text
stringlengths 0
2.53M
|
---|
"""
Progress Bar
============
.. versionadded:: 1.0.8
.. image:: images/progressbar.jpg
:align: right
The :class:`ProgressBar` widget is used to visualize the progress of some task.
Only the horizontal mode is currently supported: the vertical mode is not
yet available.
The progress bar has no interactive elements and is a display-only widget.
To use it, simply assign a value to indicate the current progress::
from kivy.uix.progressbar import ProgressBar
pb = ProgressBar(max=1000)
# this will update the graphics automatically (75% done)
pb.value = 750
"""
__all__ = ("ProgressBar",)
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, AliasProperty
class ProgressBar(Widget):
"""Class for creating a progress bar widget.
See module documentation for more details.
"""
def __init__(self, **kwargs):
self._value = 0.0
super(ProgressBar, self).__init__(**kwargs)
def _get_value(self):
return self._value
def _set_value(self, value):
value = max(0, min(self.max, value))
if value != self._value:
self._value = value
return True
value = AliasProperty(_get_value, _set_value)
"""Current value used for the slider.
:attr:`value` is an :class:`~kivy.properties.AliasProperty` that
returns the value of the progress bar. If the value is < 0 or >
:attr:`max`, it will be normalized to those boundaries.
.. versionchanged:: 1.6.0
The value is now limited to between 0 and :attr:`max`.
"""
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(
get_norm_value, set_norm_value, bind=("value", "max")
)
"""Normalized value inside the range 0-1::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> slider.value_normalized
0.5
:attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
"""
max = NumericProperty(100.0)
"""Maximum value allowed for :attr:`value`.
:attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to
100.
"""
if __name__ == "__main__":
from kivy.base import runTouchApp
runTouchApp(ProgressBar(value=50))
|
"""
Video player
============
.. versionadded:: 1.2.0
The video player widget can be used to play video and let the user control the
play/pausing, volume and position. The widget cannot be customized much because
of the complex assembly of numerous base widgets.
.. image:: images/videoplayer.jpg
:align: center
Annotations
-----------
If you want to display text at a specific time and for a certain duration,
consider annotations. An annotation file has a ".jsa" extension. The player
will automatically load the associated annotation file if it exists.
An annotation file is JSON-based, providing a list of label dictionary items.
The key and value must match one of the :class:`VideoPlayerAnnotation` items.
For example, here is a short version of a jsa file that you can find in
`examples/widgets/softboy.jsa`::
[
{"start": 0, "duration": 2,
"text": "This is an example of annotation"},
{"start": 2, "duration": 2,
"bgcolor": [0.5, 0.2, 0.4, 0.5],
"text": "You can change the background color"}
]
For our softboy.avi example, the result will be:
.. image:: images/videoplayer-annotation.jpg
:align: center
If you want to experiment with annotation files, test with::
python -m kivy.uix.videoplayer examples/widgets/softboy.avi
Fullscreen
----------
The video player can play the video in fullscreen, if
:attr:`VideoPlayer.allow_fullscreen` is activated by a double-tap on
the video. By default, if the video is smaller than the Window, it will be not
stretched.
You can allow stretching by passing custom options to a
:class:`VideoPlayer` instance::
player = VideoPlayer(source='myvideo.avi', state='play',
options={'allow_stretch': True})
End-of-stream behavior
----------------------
You can specify what happens when the video has finished playing by passing an
`eos` (end of stream) directive to the underlying
:class:`~kivy.core.video.VideoBase` class. `eos` can be one of 'stop', 'pause'
or 'loop' and defaults to 'stop'. For example, in order to loop the video::
player = VideoPlayer(source='myvideo.avi', state='play',
options={'eos': 'loop'})
.. note::
The `eos` property of the VideoBase class is a string specifying the
end-of-stream behavior. This property differs from the `eos`
properties of the :class:`VideoPlayer` and
:class:`~kivy.uix.video.Video` classes, whose `eos`
property is simply a boolean indicating that the end of the file has
been reached.
"""
__all__ = ("VideoPlayer", "VideoPlayerAnnotation")
from json import load
from os.path import exists
from kivy.properties import (
ObjectProperty,
StringProperty,
BooleanProperty,
NumericProperty,
DictProperty,
OptionProperty,
)
from kivy.animation import Animation
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.progressbar import ProgressBar
from kivy.uix.label import Label
from kivy.uix.video import Video
from kivy.uix.video import Image
from kivy.factory import Factory
from kivy.logger import Logger
from kivy.clock import Clock
class VideoPlayerVolume(Image):
video = ObjectProperty(None)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return False
touch.grab(self)
# save the current volume and delta to it
touch.ud[self.uid] = [self.video.volume, 0]
return True
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
# calculate delta
dy = abs(touch.y - touch.oy)
if dy > 10:
dy = min(dy - 10, 100)
touch.ud[self.uid][1] = dy
self.video.volume = dy / 100.0
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
dy = abs(touch.y - touch.oy)
if dy < 10:
if self.video.volume > 0:
self.video.volume = 0
else:
self.video.volume = 1.0
class VideoPlayerPlayPause(Image):
video = ObjectProperty(None)
def on_touch_down(self, touch):
""".. versionchanged:: 1.4.0"""
if self.collide_point(*touch.pos):
if self.video.state == "play":
self.video.state = "pause"
else:
self.video.state = "play"
return True
class VideoPlayerStop(Image):
video = ObjectProperty(None)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.video.state = "stop"
self.video.position = 0
return True
class VideoPlayerProgressBar(ProgressBar):
video = ObjectProperty(None)
seek = NumericProperty(None, allownone=True)
alpha = NumericProperty(1.0)
def __init__(self, **kwargs):
super(VideoPlayerProgressBar, self).__init__(**kwargs)
self.bubble = Factory.Bubble(size=(50, 44))
self.bubble_label = Factory.Label(text="0:00")
self.bubble.add_widget(self.bubble_label)
self.add_widget(self.bubble)
update = self._update_bubble
fbind = self.fbind
fbind("pos", update)
fbind("size", update)
fbind("seek", update)
def on_video(self, instance, value):
self.video.bind(position=self._update_bubble, state=self._showhide_bubble)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
self._show_bubble()
touch.grab(self)
self._update_seek(touch.x)
return True
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
self._update_seek(touch.x)
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
if self.seek:
self.video.seek(self.seek)
self.seek = None
self._hide_bubble()
return True
def _update_seek(self, x):
if self.width == 0:
return
x = max(self.x, min(self.right, x)) - self.x
self.seek = x / float(self.width)
def _show_bubble(self):
self.alpha = 1
Animation.stop_all(self, "alpha")
def _hide_bubble(self):
self.alpha = 1.0
Animation(alpha=0, d=4, t="in_out_expo").start(self)
def on_alpha(self, instance, value):
self.bubble.background_color = (1, 1, 1, value)
self.bubble_label.color = (1, 1, 1, value)
def _update_bubble(self, *l):
seek = self.seek
if self.seek is None:
if self.video.duration == 0:
seek = 0
else:
seek = self.video.position / self.video.duration
# convert to minutes:seconds
d = self.video.duration * seek
minutes = int(d / 60)
seconds = int(d - (minutes * 60))
# fix bubble label & position
self.bubble_label.text = "%d:%02d" % (minutes, seconds)
self.bubble.center_x = self.x + seek * self.width
self.bubble.y = self.top
def _showhide_bubble(self, instance, value):
if value == "play":
self._hide_bubble()
else:
self._show_bubble()
class VideoPlayerPreview(FloatLayout):
source = ObjectProperty(None)
video = ObjectProperty(None)
click_done = BooleanProperty(False)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos) and not self.click_done:
self.click_done = True
self.video.state = "play"
return True
class VideoPlayerAnnotation(Label):
"""Annotation class used for creating annotation labels.
Additional keys are available:
* bgcolor: [r, g, b, a] - background color of the text box
* bgsource: 'filename' - background image used for the background text box
* border: (n, e, s, w) - border used for the background image
"""
start = NumericProperty(0)
"""Start time of the annotation.
:attr:`start` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.
"""
duration = NumericProperty(1)
"""Duration of the annotation.
:attr:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
"""
annotation = DictProperty({})
def on_annotation(self, instance, ann):
for key, value in list(ann.items()):
setattr(self, key, value)
class VideoPlayer(GridLayout):
"""VideoPlayer class. See module documentation for more information."""
source = StringProperty("")
"""Source of the video to read.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to ''.
.. versionchanged:: 1.4.0
"""
thumbnail = StringProperty("")
"""Thumbnail of the video to show. If None, VideoPlayer will try to find
the thumbnail from the :attr:`source` + '.png'.
:attr:`thumbnail` a :class:`~kivy.properties.StringProperty` and defaults
to ''.
.. versionchanged:: 1.4.0
"""
duration = NumericProperty(-1)
"""Duration of the video. The duration defaults to -1 and is set to the
real duration when the video is loaded.
:attr:`duration` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1.
"""
position = NumericProperty(0)
"""Position of the video between 0 and :attr:`duration`. The position
defaults to -1 and is set to the real position when the video is loaded.
:attr:`position` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1.
"""
volume = NumericProperty(1.0)
"""Volume of the video in the range 0-1. 1 means full volume and 0 means
mute.
:attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults
to 1.
"""
state = OptionProperty("stop", options=("play", "pause", "stop"))
"""String, indicates whether to play, pause, or stop the video::
# start playing the video at creation
video = VideoPlayer(source='movie.mkv', state='play')
# create the video, and start later
video = VideoPlayer(source='movie.mkv')
# and later
video.state = 'play'
:attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'stop'.
"""
play = BooleanProperty(False)
"""
.. deprecated:: 1.4.0
Use :attr:`state` instead.
Boolean, indicates whether the video is playing or not. You can start/stop
the video by setting this property::
# start playing the video at creation
video = VideoPlayer(source='movie.mkv', play=True)
# create the video, and start later
video = VideoPlayer(source='movie.mkv')
# and later
video.play = True
:attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
"""
image_overlay_play = StringProperty(
"atlas://data/images/defaulttheme/player-play-overlay"
)
"""Image filename used to show a "play" overlay when the video has not yet
started.
:attr:`image_overlay_play` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/player-play-overlay'.
"""
image_loading = StringProperty("data/images/image-loading.gif")
"""Image filename used when the video is loading.
:attr:`image_loading` is a :class:`~kivy.properties.StringProperty` and
defaults to 'data/images/image-loading.gif'.
"""
image_play = StringProperty("atlas://data/images/defaulttheme/media-playback-start")
"""Image filename used for the "Play" button.
:attr:`image_play` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/media-playback-start'.
"""
image_stop = StringProperty("atlas://data/images/defaulttheme/media-playback-stop")
"""Image filename used for the "Stop" button.
:attr:`image_stop` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/media-playback-stop'.
"""
image_pause = StringProperty(
"atlas://data/images/defaulttheme/media-playback-pause"
)
"""Image filename used for the "Pause" button.
:attr:`image_pause` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/media-playback-pause'.
"""
image_volumehigh = StringProperty(
"atlas://data/images/defaulttheme/audio-volume-high"
)
"""Image filename used for the volume icon when the volume is high.
:attr:`image_volumehigh` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/audio-volume-high'.
"""
image_volumemedium = StringProperty(
"atlas://data/images/defaulttheme/audio-volume-medium"
)
"""Image filename used for the volume icon when the volume is medium.
:attr:`image_volumemedium` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/audio-volume-medium'.
"""
image_volumelow = StringProperty(
"atlas://data/images/defaulttheme/audio-volume-low"
)
"""Image filename used for the volume icon when the volume is low.
:attr:`image_volumelow` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/audio-volume-low'.
"""
image_volumemuted = StringProperty(
"atlas://data/images/defaulttheme/audio-volume-muted"
)
"""Image filename used for the volume icon when the volume is muted.
:attr:`image_volumemuted` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/audio-volume-muted'.
"""
annotations = StringProperty("")
"""If set, it will be used for reading annotations box.
:attr:`annotations` is a :class:`~kivy.properties.StringProperty`
and defaults to ''.
"""
fullscreen = BooleanProperty(False)
"""Switch to fullscreen view. This should be used with care. When
activated, the widget will remove itself from its parent, remove all
children from the window and will add itself to it. When fullscreen is
unset, all the previous children are restored and the widget is restored to
its previous parent.
.. warning::
The re-add operation doesn't care about the index position of it's
children within the parent.
:attr:`fullscreen` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
"""
allow_fullscreen = BooleanProperty(True)
"""By default, you can double-tap on the video to make it fullscreen. Set
this property to False to prevent this behavior.
:attr:`allow_fullscreen` is a :class:`~kivy.properties.BooleanProperty`
defaults to True.
"""
options = DictProperty({})
"""Optional parameters can be passed to a :class:`~kivy.uix.video.Video`
instance with this property.
:attr:`options` a :class:`~kivy.properties.DictProperty` and
defaults to {}.
"""
# internals
container = ObjectProperty(None)
def __init__(self, **kwargs):
self._video = None
self._image = None
self._annotations = ""
self._annotations_labels = []
super(VideoPlayer, self).__init__(**kwargs)
self._load_thumbnail()
self._load_annotations()
if self.source:
self._trigger_video_load()
def _trigger_video_load(self, *largs):
Clock.unschedule(self._do_video_load)
Clock.schedule_once(self._do_video_load, -1)
def on_source(self, instance, value):
# we got a value, try to see if we have an image for it
self._load_thumbnail()
self._load_annotations()
if self._video is not None:
self._video.unload()
self._video = None
if value:
self._trigger_video_load()
def on_image_overlay_play(self, instance, value):
self._image.image_overlay_play = value
def on_image_loading(self, instance, value):
self._image.image_loading = value
def _load_thumbnail(self):
if not self.container:
return
self.container.clear_widgets()
# get the source, remove extension, and use png
thumbnail = self.thumbnail
if not thumbnail:
filename = self.source.rsplit(".", 1)
thumbnail = filename[0] + ".png"
self._image = VideoPlayerPreview(source=thumbnail, video=self)
self.container.add_widget(self._image)
def _load_annotations(self):
if not self.container:
return
self._annotations_labels = []
annotations = self.annotations
if not annotations:
filename = self.source.rsplit(".", 1)
annotations = filename[0] + ".jsa"
if exists(annotations):
with open(annotations, "r") as fd:
self._annotations = load(fd)
if self._annotations:
for ann in self._annotations:
self._annotations_labels.append(VideoPlayerAnnotation(annotation=ann))
def on_state(self, instance, value):
if self._video is not None:
self._video.state = value
def _set_state(self, instance, value):
self.state = value
def _do_video_load(self, *largs):
self._video = Video(
source=self.source,
state=self.state,
volume=self.volume,
pos_hint={"x": 0, "y": 0},
**self.options
)
self._video.bind(
texture=self._play_started,
duration=self.setter("duration"),
position=self.setter("position"),
volume=self.setter("volume"),
state=self._set_state,
)
def on_play(self, instance, value):
value = "play" if value else "stop"
return self.on_state(instance, value)
def on_volume(self, instance, value):
if not self._video:
return
self._video.volume = value
def on_position(self, instance, value):
labels = self._annotations_labels
if not labels:
return
for label in labels:
start = label.start
duration = label.duration
if start > value or (start + duration) < value:
if label.parent:
label.parent.remove_widget(label)
elif label.parent is None:
self.container.add_widget(label)
def seek(self, percent):
"""Change the position to a percentage of the duration. Percentage must
be a value between 0-1.
.. warning::
Calling seek() before video is loaded has no effect.
"""
if not self._video:
return
self._video.seek(percent)
def _play_started(self, instance, value):
self.container.clear_widgets()
self.container.add_widget(self._video)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return False
if touch.is_double_tap and self.allow_fullscreen:
self.fullscreen = not self.fullscreen
return True
return super(VideoPlayer, self).on_touch_down(touch)
def on_fullscreen(self, instance, value):
window = self.get_parent_window()
if not window:
Logger.warning(
"VideoPlayer: Cannot switch to fullscreen, " "window not found."
)
if value:
self.fullscreen = False
return
if not self.parent:
Logger.warning("VideoPlayer: Cannot switch to fullscreen, " "no parent.")
if value:
self.fullscreen = False
return
if value:
self._fullscreen_state = state = {
"parent": self.parent,
"pos": self.pos,
"size": self.size,
"pos_hint": self.pos_hint,
"size_hint": self.size_hint,
"window_children": window.children[:],
}
# remove all window children
for child in window.children[:]:
window.remove_widget(child)
# put the video in fullscreen
if state["parent"] is not window:
state["parent"].remove_widget(self)
window.add_widget(self)
# ensure the video widget is in 0, 0, and the size will be
# reajusted
self.pos = (0, 0)
self.size = (100, 100)
self.pos_hint = {}
self.size_hint = (1, 1)
else:
state = self._fullscreen_state
window.remove_widget(self)
for child in state["window_children"]:
window.add_widget(child)
self.pos_hint = state["pos_hint"]
self.size_hint = state["size_hint"]
self.pos = state["pos"]
self.size = state["size"]
if state["parent"] is not window:
state["parent"].add_widget(self)
if __name__ == "__main__":
import sys
from kivy.base import runTouchApp
player = VideoPlayer(source=sys.argv[1])
runTouchApp(player)
if player:
player.state = "stop"
|
class Battery(object):
"""Battery info facade."""
@property
def status(self):
"""Property that contains a dict with the following fields:
* **isCharging** *(bool)*: Battery is charging
* **percentage** *(float)*: Battery charge remaining
.. warning::
If any of the fields is not readable, it is set as
None.
"""
return self.get_state()
def get_state(self):
return self._get_state()
# private
def _get_state(self):
raise NotImplementedError()
|
from jnius import autoclass
from plyer.facades import Notification
from plyer.platforms.android import activity, SDK_INT
AndroidString = autoclass("java.lang.String")
Context = autoclass("android.content.Context")
NotificationBuilder = autoclass("android.app.Notification$Builder")
Drawable = autoclass("{}.R$drawable".format(activity.getPackageName()))
class AndroidNotification(Notification):
def _get_notification_service(self):
if not hasattr(self, "_ns"):
self._ns = activity.getSystemService(Context.NOTIFICATION_SERVICE)
return self._ns
def _notify(self, **kwargs):
icon = getattr(Drawable, kwargs.get("icon_android", "icon"))
noti = NotificationBuilder(activity)
noti.setContentTitle(AndroidString(kwargs.get("title").encode("utf-8")))
noti.setContentText(AndroidString(kwargs.get("message").encode("utf-8")))
noti.setTicker(AndroidString(kwargs.get("ticker").encode("utf-8")))
noti.setSmallIcon(icon)
noti.setAutoCancel(True)
if SDK_INT >= 16:
noti = noti.build()
else:
noti = noti.getNotification()
self._get_notification_service().notify(0, noti)
def instance():
return AndroidNotification()
|
"""
Mac OS X file chooser
---------------------
"""
from plyer.facades import FileChooser
from pyobjus import autoclass, objc_arr, objc_str
from pyobjus.dylib_manager import load_framework, INCLUDE
load_framework(INCLUDE.AppKit)
NSURL = autoclass("NSURL")
NSOpenPanel = autoclass("NSOpenPanel")
NSSavePanel = autoclass("NSSavePanel")
NSOKButton = 1
class MacFileChooser(object):
"""A native implementation of file chooser dialogs using Apple's API
through pyobjus.
Not implemented features:
* filters (partial, wildcards are converted to extensions if possible.
Pass the Mac-specific "use_extensions" if you can provide
Mac OS X-compatible to avoid automatic conversion)
* multiple (only for save dialog. Available in open dialog)
* icon
* preview
"""
mode = "open"
path = None
multiple = False
filters = []
preview = False
title = None
icon = None
show_hidden = False
use_extensions = False
def __init__(self, **kwargs):
# Simulate Kivy's behavior
for i in kwargs:
setattr(self, i, kwargs[i])
def run(self):
panel = None
if self.mode in ("open", "dir"):
panel = NSOpenPanel.openPanel()
else:
panel = NSSavePanel.savePanel()
panel.setCanCreateDirectories_(True)
panel.setCanChooseDirectories_(self.mode == "dir")
panel.setCanChooseFiles_(self.mode != "dir")
panel.setShowsHiddenFiles_(self.show_hidden)
if self.title:
panel.setTitle_(objc_str(self.title))
if self.mode != "save" and self.multiple:
panel.setAllowsMultipleSelection_(True)
# Mac OS X does not support wildcards unlike the other platforms.
# This tries to convert wildcards to "extensions" when possible,
# ans sets the panel to also allow other file types, just to be safe.
if len(self.filters) > 0:
filthies = []
for f in self.filters:
if type(f) == str:
if not self.use_extensions:
if f.strip().endswith("*"):
continue
pystr = f.strip().split("*")[-1].split(".")[-1]
filthies.append(objc_str(pystr))
else:
for i in f[1:]:
if not self.use_extensions:
if f.strip().endswith("*"):
continue
pystr = f.strip().split("*")[-1].split(".")[-1]
filthies.append(objc_str(pystr))
ftypes_arr = objc_arr(filthies)
panel.setAllowedFileTypes_(ftypes_arr)
panel.setAllowsOtherFileTypes_(not self.use_extensions)
if self.path:
url = NSURL.fileURLWithPath_(self.path)
panel.setDirectoryURL_(url)
if panel.runModal():
if self.mode == "save" or not self.multiple:
return [panel.filename().UTF8String()]
else:
return [i.UTF8String() for i in panel.filenames()]
return None
class MacOSXFileChooser(FileChooser):
"""FileChooser implementation for Windows, using win3all."""
def _file_selection_dialog(self, **kwargs):
return MacFileChooser(**kwargs).run()
def instance():
return MacOSXFileChooser()
|
'\n jinja2.debug\n ~~~~~~~~~~~~\n\n Implements the debug interface for Jinja. This module does some pretty\n ugly stuff with the Python traceback system in order to achieve tracebacks\n with correct line numbers, locals and contents.\n\n :copyright: (c) 2010 by the Jinja Team.\n :license: BSD, see LICENSE for more details.\n'
import sys
import traceback
from jinja2.utils import CodeType, missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
try:
("raise TypeError, 'foo'")
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
'Proxies a traceback frame.'
def __init__(self, tb):
self.tb = tb
def _set_tb_next(self, next):
if (tb_set_next is not None):
(self.tb, ((next and next.tb) or None))
self._tb_next = next
def _get_tb_next(self):
return self._tb_next
tb_next = (_get_tb_next, _set_tb_next)
del _get_tb_next, _set_tb_next
@property
def is_jinja_frame(self):
return ('__jinja_template__' in self.tb.tb_frame.f_globals)
def __getattr__(self, name):
return (self.tb, name)
class ProcessedTraceback(object):
'Holds a Jinja preprocessed traceback for priting or reraising.'
def __init__(self, exc_type, exc_value, frames):
if (not frames):
raise ('no frames for this traceback?')
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
def chain_frames(self):
'Chains the frames. Requires ctypes or the speedups extension.'
prev_tb = None
for tb in self.frames:
if (prev_tb is not None):
prev_tb.tb_next = tb
prev_tb = tb
prev_tb.tb_next = None
def render_as_text(self, limit=None):
'Return a string with the traceback.'
lines = (self.exc_type, self.exc_value, self.frames[0])
return ()
def render_as_html(self, full=False):
'Return a unicode string with the traceback as rendered HTML.'
from jinja2.debugrenderer import render_traceback
return ('%s\n\n<!--\n%s\n-->' % ((self), ('utf-8', 'replace')))
@property
def is_template_syntax_error(self):
'`True` if this is a template syntax error.'
return (self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
'Exception info tuple with a proxy around the frame objects.'
return (self.exc_type, self.exc_value, self.frames[0])
@property
def standard_exc_info(self):
'Standard python exc_info for re-raising'
return (self.exc_type, self.exc_value, self.frames[0].tb)
def make_traceback(exc_info, source_hint=None):
'Creates a processed traceback object from the exc_info.'
(exc_type, exc_value, tb) = exc_info
if (exc_value, TemplateSyntaxError):
exc_info = (exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return (exc_info, initial_skip)
def translate_syntax_error(error, source=None):
'Rewrites a syntax error to please traceback systems.'
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if (filename is None):
filename = '<unknown>'
return (exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
'If passed an exc_info it will automatically rewrite the exceptions\n all the way down to the correct line numbers and frames.\n '
tb = exc_info[2]
frames = []
for x in (initial_skip):
if (tb is not None):
tb = tb.tb_next
initial_tb = tb
while (tb is not None):
if (tb.tb_frame.f_code in internal_code):
tb = tb.tb_next
continue
next = tb.tb_next
template = ('__jinja_template__')
if (template is not None):
lineno = (tb.tb_lineno)
tb = ((exc_info[:2] + (tb,)), template.filename, lineno)[2]
((tb))
tb = next
if (not frames):
raise (exc_info[2])
traceback = (exc_info[0], exc_info[1], frames)
if (tb_set_next is not None):
()
return traceback
def fake_exc_info(exc_info, filename, lineno):
'Helper for `translate_exception`.'
(exc_type, exc_value, tb) = exc_info
if (tb is not None):
real_locals = ()
ctx = ('context')
if ctx:
locals = ()
else:
locals = {}
for (name, value) in ():
if (('l_') and (value is not missing)):
locals[name[2:]] = value
('__jinja_exception__', None)
else:
locals = {}
globals = {'__name__': filename, '__file__': filename, '__jinja_exception__': exc_info[:2], '__jinja_template__': None}
code = ((('\n' * (lineno - 1)) + raise_helper), filename, 'exec')
try:
if (tb is None):
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if (function == 'root'):
location = 'top-level template code'
elif ('block_'):
location = ('block "%s"' % function[6:])
else:
location = 'template'
code = (0, code.co_nlocals, code.co_stacksize, code.co_flags, code.co_code, code.co_consts, code.co_names, code.co_varnames, filename, location, code.co_firstlineno, code.co_lnotab, (), ())
except:
raise
try:
(code, globals, locals)
except:
exc_info = ()
new_tb = exc_info[2].tb_next
return (exc_info[:2] + (new_tb,))
def _init_ugly_crap():
'This function implements a few ugly things so that we can patch the\n traceback objects. The function returned allows resetting `tb_next` on\n any python traceback object.\n '
import ctypes
from types import TracebackType
if (ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [('ob_refcnt', _Py_ssize_t), ('ob_type', (_PyObject))]
if (object.__basicsize__ != (_PyObject)):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [('_ob_next', (_PyObject)), ('_ob_prev', (_PyObject)), ('ob_refcnt', _Py_ssize_t), ('ob_type', (_PyObject))]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [('tb_next', (_Traceback)), ('tb_frame', (_PyObject)), ('tb_lasti', ctypes.c_int), ('tb_lineno', ctypes.c_int)]
def tb_set_next(tb, next):
'Set the tb_next attribute of a traceback object.'
if (not ((tb, TracebackType) and ((next is None) or (next, TracebackType)))):
raise ('tb_set_next arguments must be traceback objects')
obj = ((tb))
if (tb.tb_next is not None):
old = ((tb.tb_next))
old.ob_refcnt -= 1
if (next is None):
obj.tb_next = ()
else:
next = ((next))
next.ob_refcnt += 1
obj.tb_next = (next)
return tb_set_next
try:
from jinja2._speedups import tb_set_next
except ImportError:
try:
tb_set_next = ()
except:
tb_set_next = None
del _init_ugly_crap |
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
class EvdevRecipe(CompiledComponentsPythonRecipe):
name = "evdev"
version = "v0.4.7"
url = "https://github.com/gvalkov/python-evdev/archive/{version}.zip"
depends = [("python2", "python3")]
build_cmd = "build"
patches = [
"evcnt.patch",
"keycnt.patch",
"remove-uinput.patch",
"include-dir.patch",
"evdev-permissions.patch",
]
def get_recipe_env(self, arch=None):
env = super(EvdevRecipe, self).get_recipe_env(arch)
env["NDKPLATFORM"] = self.ctx.ndk_platform
return env
recipe = EvdevRecipe()
|
from pythonforandroid.toolchain import (
CompiledComponentsPythonRecipe,
Recipe,
current_directory,
info,
shprint,
)
from os.path import join
import sh
class PyCryptoRecipe(CompiledComponentsPythonRecipe):
version = "2.6.1"
url = "https://pypi.python.org/packages/source/p/pycrypto/pycrypto-{version}.tar.gz"
depends = ["openssl", "python2"]
site_packages_name = "Crypto"
patches = ["add_length.patch"]
def get_recipe_env(self, arch=None):
env = super(PyCryptoRecipe, self).get_recipe_env(arch)
openssl_build_dir = Recipe.get_recipe("openssl", self.ctx).get_build_dir(
arch.arch
)
env["CC"] = "%s -I%s" % (env["CC"], join(openssl_build_dir, "include"))
env["LDFLAGS"] = (
env["LDFLAGS"]
+ " -L{}".format(
self.ctx.get_libs_dir(arch.arch) + "-L{}".format(self.ctx.libs_dir)
)
+ " -L{}".format(openssl_build_dir)
)
env["EXTRA_CFLAGS"] = "--host linux-armv"
env["ac_cv_func_malloc_0_nonnull"] = "yes"
return env
def build_compiled_components(self, arch):
info("Configuring compiled components in {}".format(self.name))
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
configure = sh.Command("./configure")
shprint(
configure,
"--host=arm-eabi",
"--prefix={}".format(self.ctx.get_python_install_dir()),
"--enable-shared",
_env=env,
)
super(PyCryptoRecipe, self).build_compiled_components(arch)
recipe = PyCryptoRecipe()
|
from pythonforandroid.recipe import PythonRecipe
from os.path import join
class ZeroconfRecipe(PythonRecipe):
name = "zeroconf"
version = "0.17.4"
url = "https://pypi.python.org/packages/source/z/zeroconf/zeroconf-{version}.tar.gz"
depends = ["python2", "netifaces", "enum34", "six"]
def get_recipe_env(self, arch=None):
env = super(ZeroconfRecipe, self).get_recipe_env(arch)
# TODO: fix hardcoded path
# This is required to prevent issue with _io.so import.
hostpython = self.get_recipe("hostpython2", self.ctx)
env["PYTHONPATH"] = (
join(hostpython.get_build_dir(arch.arch), "build", "lib.linux-x86_64-2.7")
+ ":"
+ env.get("PYTHONPATH", "")
)
return env
recipe = ZeroconfRecipe()
|
import sys
if sys.version_info < (3, 0):
import testcase
else:
from . import testcase
#
# Tests elements of the heading levels
#
class TestLevelDepth(testcase.TestCase):
title = "Test Level Depth"
# Test that headings can go at least to
def test_level_depth_unlimited(self):
self.set_settings({"toc_level": 0})
self.set_text(self.text())
self.run_plugin()
self.find("* Heading 1")
self.find("----- Heading 6")
# Test that the the "toc_level" setting
def test_level_depth_limited(self):
self.set_settings({"toc_level": 2})
self.set_text(self.text())
self.run_plugin()
if self.get_text().find("----- Heading 6") == -1:
self.ok()
else:
self.error("Should not find heading level 6")
def text(self):
return """
/*
* TOC
*
* Heading 1
* +++++ Heading 6
*/
// > Heading 1
// >>>>>> Heading 6
"""
|
import sys, os, argparse
buildtools_dir = (((__file__), '../../buildtools/scripts'))
(buildtools_dir)
import PackageData
parser = ()
('--outputdir')
args = ()
()
missing_file = False
for (filename, arcname) in ():
if (not (filename)):
((filename))
missing_file = True
else:
(path, fn) = (filename)
files = (path)
if (fn not in files):
('\n---------------------------------------------------------------------------\n')
('FILENAME MISMATCH - Possibly case?\n')
((filename))
('\n---------------------------------------------------------------------------\n')
raise
if (not (missing_file == False)):
raise () |
"""Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
"print_function",
"unicode_literals",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional, self.mandatory, self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1), (2, 2, 0, "alpha", 0), CO_NESTED)
generators = _Feature(
(2, 2, 0, "alpha", 1), (2, 3, 0, "final", 0), CO_GENERATOR_ALLOWED
)
division = _Feature((2, 2, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_DIVISION)
absolute_import = _Feature(
(2, 5, 0, "alpha", 1), (3, 0, 0, "alpha", 0), CO_FUTURE_ABSOLUTE_IMPORT
)
with_statement = _Feature(
(2, 5, 0, "alpha", 1), (2, 6, 0, "alpha", 0), CO_FUTURE_WITH_STATEMENT
)
print_function = _Feature(
(2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_PRINT_FUNCTION
)
unicode_literals = _Feature(
(2, 6, 0, "alpha", 2), (3, 0, 0, "alpha", 0), CO_FUTURE_UNICODE_LITERALS
)
|
"Python interface for the 'lsprof' profiler.\n Compatible with the 'profile' module.\n"
__all__ = ['run', 'runctx', 'help', 'Profile']
import _lsprof
def run(statement, filename=None, sort=(- 1)):
'Run statement under profiler optionally saving results in filename\n\n This function takes a single argument that can be passed to the\n "exec" statement, and an optional file name. In all cases this\n routine attempts to "exec" its first argument and gather profiling\n statistics from the execution. If no file name is present, then this\n function automatically prints a simple profiling report, sorted by the\n standard name string (file/line/function-name) that is presented in\n each line.\n '
prof = ()
result = None
try:
try:
prof = (statement)
except SystemExit:
raise
finally:
if (filename is not None):
(filename)
else:
result = (sort)
return result
def runctx(statement, globals, locals, filename=None, sort=(- 1)):
'Run statement under profiler, supplying your own globals and locals,\n optionally saving results in filename.\n\n statement and filename have the same semantics as profile.run\n '
prof = ()
result = None
try:
try:
prof = (statement, globals, locals)
except SystemExit:
raise
finally:
if (filename is not None):
(filename)
else:
result = (sort)
return result
def help():
('Documentation for the profile/cProfile modules can be found ')
("in the Python Library Reference, section 'The Python Profiler'.")
class Profile(_lsprof.Profiler):
'Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)\n\n Builds a profiler object using the specified timer function.\n The default timer is a fast built-in one based on real time.\n For custom timer functions returning integers, time_unit can\n be a float specifying a scale (i.e. how long each integer unit\n is, in seconds).\n '
def print_stats(self, sort=(- 1)):
import pstats
()
def dump_stats(self, file):
import marshal
f = (file, 'wb')
()
(self.stats, f)
()
def create_stats(self):
()
()
def snapshot_stats(self):
entries = ()
self.stats = {}
callersdicts = {}
for entry in entries:
func = (entry.code)
nc = entry.callcount
cc = (nc - entry.reccallcount)
tt = entry.inlinetime
ct = entry.totaltime
callers = {}
callersdicts[(entry.code)] = callers
self.stats[func] = (cc, nc, tt, ct, callers)
for entry in entries:
if entry.calls:
func = (entry.code)
for subentry in entry.calls:
try:
callers = callersdicts[(subentry.code)]
except KeyError:
continue
nc = subentry.callcount
cc = (nc - subentry.reccallcount)
tt = subentry.inlinetime
ct = subentry.totaltime
if (func in callers):
prev = callers[func]
nc += prev[0]
cc += prev[1]
tt += prev[2]
ct += prev[3]
callers[func] = (nc, cc, tt, ct)
def run(self, cmd):
import __main__
dict = __main__.__dict__
return (cmd, dict, dict)
def runctx(self, cmd, globals, locals):
()
try:
(cmd, globals, locals)
finally:
()
return self
def runcall(self, func, *args, **kw):
()
try:
return (*args)
finally:
()
def label(code):
if (code, str):
return ('~', 0, code)
else:
return (code.co_filename, code.co_firstlineno, code.co_name)
def main():
import os, sys
from optparse import OptionParser
usage = 'cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ...'
parser = ()
parser.allow_interspersed_args = False
('-o', '--outfile')
('-s', '--sort')
if (not sys.argv[1:]):
()
(2)
(options, args) = ()
sys.argv[:] = args
if ((args) > 0):
progname = args[0]
(0, (progname))
with (progname, 'rb') as fp:
code = ((), progname, 'exec')
globs = {'__file__': progname, '__name__': '__main__', '__package__': None}
(code, globs, None, options.outfile, options.sort)
else:
()
return parser
if (__name__ == '__main__'):
() |
"""
csv.py - read/write/investigate CSV files
"""
import re
from functools import reduce
from _csv import (
Error,
__version__,
writer,
reader,
register_dialect,
unregister_dialect,
get_dialect,
list_dialects,
field_size_limit,
QUOTE_MINIMAL,
QUOTE_ALL,
QUOTE_NONNUMERIC,
QUOTE_NONE,
__doc__,
)
from _csv import Dialect as _Dialect
try:
from io import StringIO
except ImportError:
from io import StringIO
__all__ = [
"QUOTE_MINIMAL",
"QUOTE_ALL",
"QUOTE_NONNUMERIC",
"QUOTE_NONE",
"Error",
"Dialect",
"__doc__",
"excel",
"excel_tab",
"field_size_limit",
"reader",
"writer",
"register_dialect",
"get_dialect",
"list_dialects",
"Sniffer",
"unregister_dialect",
"__version__",
"DictReader",
"DictWriter",
]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ","
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = "\r\n"
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = "\t"
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(
self,
f,
fieldnames=None,
restkey=None,
restval=None,
dialect="excel",
*args,
**kwds
):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = dict(list(zip(self.fieldnames, row)))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(
self,
f,
fieldnames,
restval="",
extrasaction="raise",
dialect="excel",
*args,
**kwds
):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError(
"extrasaction (%s) must be 'raise' or 'ignore'" % extrasaction
)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(list(zip(self.fieldnames, self.fieldnames)))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError(
"dict contains fields not in fieldnames: " + ", ".join(wrong_fields)
)
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
"""
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
"""
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [",", "\t", ";", " ", ":"]
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
(
quotechar,
doublequote,
delimiter,
skipinitialspace,
) = self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample, delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = "\r\n"
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in (
"(?P<delim>[^\w\n\"'])(?P<space> ?)(?P<quote>[\"']).*?(?P=quote)(?P=delim)", # ,".*?",
"(?:^|\n)(?P<quote>[\"']).*?(?P=quote)(?P<delim>[^\w\n\"'])(?P<space> ?)", # ".*?",
"(?P<delim>>[^\w\n\"'])(?P<space> ?)(?P<quote>[\"']).*?(?P=quote)(?:$|\n)", # ,".*?"
"(?:^|\n)(?P<quote>[\"']).*?(?P=quote)(?:$|\n)",
): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ("", False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex["quote"] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex["delim"] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex["space"] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(
lambda a, b, quotes=quotes: (quotes[a] > quotes[b]) and a or b,
list(quotes.keys()),
)
if delims:
delim = reduce(
lambda a, b, delims=delims: (delims[a] > delims[b]) and a or b,
list(delims.keys()),
)
skipinitialspace = delims[delim] == spaces
if delim == "\n": # most likely a file with a single column
delim = ""
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ""
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)"
% {"delim": delim, "quote": quotechar},
re.MULTILINE,
)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = [_f for _f in data.split("\n") if _f]
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in list(charFrequency.keys()):
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b, items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (
modes[char][0],
modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]), items)[1],
)
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = list(modes.items())
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if (v[1] / total) >= consistency and (
delimiters is None or k in delimiters
):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = data[0].count(delim) == data[0].count("%c " % delim)
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ("", 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in list(delims.keys()):
skipinitialspace = data[0].count(d) == data[0].count("%c " % d)
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v, k) for (k, v) in list(delims.items())]
items.sort()
delim = items[-1][1]
skipinitialspace = data[0].count(delim) == data[0].count("%c " % delim)
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns):
columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
for thisType in [int, int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == int:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in list(columnTypes.items()):
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""A package for parsing, handling, and generating email messages."""
__version__ = "4.0.3"
__all__ = [
# Old names
"base64MIME",
"Charset",
"Encoders",
"Errors",
"Generator",
"Header",
"Iterators",
"Message",
"MIMEAudio",
"MIMEBase",
"MIMEImage",
"MIMEMessage",
"MIMEMultipart",
"MIMENonMultipart",
"MIMEText",
"Parser",
"quopriMIME",
"Utils",
"message_from_string",
"message_from_file",
# new names
"base64mime",
"charset",
"encoders",
"errors",
"generator",
"header",
"iterators",
"message",
"mime",
"parser",
"quoprimime",
"utils",
]
# Some convenience routines. Don't import Parser and Message as side-effects
# of importing email since those cascadingly import most of the rest of the
# email package.
def message_from_string(s, *args, **kws):
"""Parse a string into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parsestr(s)
def message_from_file(fp, *args, **kws):
"""Read a file and parse its contents into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parse(fp)
# Lazy loading to provide name mapping from new-style names (PEP 8 compatible
# email 4.0 module names), to old-style names (email 3.0 module names).
import sys
class LazyImporter(object):
def __init__(self, module_name):
self.__name__ = "email." + module_name
def __getattr__(self, name):
__import__(self.__name__)
mod = sys.modules[self.__name__]
self.__dict__.update(mod.__dict__)
return getattr(mod, name)
_LOWERNAMES = [
# email.<old name> -> email.<new name is lowercased old name>
"Charset",
"Encoders",
"Errors",
"FeedParser",
"Generator",
"Header",
"Iterators",
"Message",
"Parser",
"Utils",
"base64MIME",
"quopriMIME",
]
_MIMENAMES = [
# email.MIME<old name> -> email.mime.<new name is lowercased old name>
"Audio",
"Base",
"Image",
"Message",
"Multipart",
"NonMultipart",
"Text",
]
for _name in _LOWERNAMES:
importer = LazyImporter(_name.lower())
sys.modules["email." + _name] = importer
setattr(sys.modules["email"], _name, importer)
import email.mime
for _name in _MIMENAMES:
importer = LazyImporter("mime." + _name.lower())
sys.modules["email.MIME" + _name] = importer
setattr(sys.modules["email"], "MIME" + _name, importer)
setattr(sys.modules["email.mime"], _name, importer)
|
#
# big5hkscs.py: Python Unicode Codec for BIG5HKSCS
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_hk, codecs
import _multibytecodec as mbc
codec = _codecs_hk.getcodec("big5hkscs")
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name="big5hkscs",
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
#
# iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec("iso2022_jp_2004")
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name="iso2022_jp_2004",
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
""" Python 'utf-8' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_8_encode
def decode(input, errors="strict"):
return codecs.utf_8_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_8_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_8_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_8_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_8_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name="utf-8",
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
'Import hook support.\n\nConsistent use of this module will make it possible to change the\ndifferent mechanisms involved in loading modules independently.\n\nWhile the built-in module imp exports interfaces to the built-in\nmodule searching and loading algorithm, and it is possible to replace\nthe built-in function __import__ in order to change the semantics of\nthe import statement, until now it has been difficult to combine the\neffect of different __import__ hacks, like loading modules from URLs\nby rimport.py, or restricted execution by rexec.py.\n\nThis module defines three new concepts:\n\n1) A "file system hooks" class provides an interface to a filesystem.\n\nOne hooks class is defined (Hooks), which uses the interface provided\nby standard modules os and os.path. It should be used as the base\nclass for other hooks classes.\n\n2) A "module loader" class provides an interface to search for a\nmodule in a search path and to load it. It defines a method which\nsearches for a module in a single directory; by overriding this method\none can redefine the details of the search. If the directory is None,\nbuilt-in and frozen modules are searched instead.\n\nTwo module loader class are defined, both implementing the search\nstrategy used by the built-in __import__ function: ModuleLoader uses\nthe imp module\'s find_module interface, while HookableModuleLoader\nuses a file system hooks class to interact with the file system. Both\nuse the imp module\'s load_* interfaces to actually load the module.\n\n3) A "module importer" class provides an interface to import a\nmodule, as well as interfaces to reload and unload a module. It also\nprovides interfaces to install and uninstall itself instead of the\ndefault __import__ and reload (and unload) functions.\n\nOne module importer class is defined (ModuleImporter), which uses a\nmodule loader instance passed in (by default HookableModuleLoader is\ninstantiated).\n\nThe classes defined here should be used as base classes for extended\nfunctionality along those lines.\n\nIf a module importer class supports dotted names, its import_module()\nmust return a different value depending on whether it is called on\nbehalf of a "from ... import ..." statement or not. (This is caused\nby the way the __import__ hook is used by the Python interpreter.) It\nwould also do wise to install a different version of reload().\n\n'
from warnings import warnpy3k, warn
('the ihooks module has been removed in Python 3.0')
del warnpy3k
import builtins
import imp
import os
import sys
__all__ = ['BasicModuleLoader', 'Hooks', 'ModuleLoader', 'FancyModuleLoader', 'BasicModuleImporter', 'ModuleImporter', 'install', 'uninstall']
VERBOSE = 0
from imp import C_EXTENSION, PY_SOURCE, PY_COMPILED
from imp import C_BUILTIN, PY_FROZEN, PKG_DIRECTORY
BUILTIN_MODULE = C_BUILTIN
FROZEN_MODULE = PY_FROZEN
class _Verbose():
def __init__(self, verbose=VERBOSE):
self.verbose = verbose
def get_verbose(self):
return self.verbose
def set_verbose(self, verbose):
self.verbose = verbose
def note(self, *args):
if self.verbose:
(*args)
def message(self, format, *args):
if args:
((format % args))
else:
(format)
class BasicModuleLoader(_Verbose):
"Basic module loader.\n\n This provides the same functionality as built-in import. It\n doesn't deal with checking sys.modules -- all it provides is\n find_module() and a load_module(), as well as find_module_in_dir()\n which searches just one directory, and can be overridden by a\n derived class to change the module search algorithm when the basic\n dependency on sys.path is unchanged.\n\n The interface is a little more convenient than imp's:\n find_module(name, [path]) returns None or 'stuff', and\n load_module(name, stuff) loads the module.\n\n "
def find_module(self, name, path=None):
if (path is None):
path = ([None] + ())
for dir in path:
stuff = (name, dir)
if stuff:
return stuff
return None
def default_path(self):
return sys.path
def find_module_in_dir(self, name, dir):
if (dir is None):
return (name)
else:
try:
return (name, [dir])
except ImportError:
return None
def find_builtin_module(self, name):
if (name):
return (None, '', ('', '', BUILTIN_MODULE))
if (name):
return (None, '', ('', '', FROZEN_MODULE))
return None
def load_module(self, name, stuff):
(file, filename, info) = stuff
try:
return (name, file, filename, info)
finally:
if file:
()
class Hooks(_Verbose):
'Hooks into the filesystem and interpreter.\n\n By deriving a subclass you can redefine your filesystem interface,\n e.g. to merge it with the URL space.\n\n This base class behaves just like the native filesystem.\n\n '
def get_suffixes(self):
return ()
def new_module(self, name):
return (name)
def is_builtin(self, name):
return (name)
def init_builtin(self, name):
return (name)
def is_frozen(self, name):
return (name)
def init_frozen(self, name):
return (name)
def get_frozen_object(self, name):
return (name)
def load_source(self, name, filename, file=None):
return (name, filename, file)
def load_compiled(self, name, filename, file=None):
return (name, filename, file)
def load_dynamic(self, name, filename, file=None):
return (name, filename, file)
def load_package(self, name, filename, file=None):
return (name, file, filename, ('', '', PKG_DIRECTORY))
def add_module(self, name):
d = ()
if (name in d):
return d[name]
d[name] = m = (name)
return m
def modules_dict(self):
return sys.modules
def default_path(self):
return sys.path
def path_split(self, x):
return (x)
def path_join(self, x, y):
return (x, y)
def path_isabs(self, x):
return (x)
def path_exists(self, x):
return (x)
def path_isdir(self, x):
return (x)
def path_isfile(self, x):
return (x)
def path_islink(self, x):
return (x)
def openfile(self, *x):
return (*x)
openfile_error = IOError
def listdir(self, x):
return (x)
listdir_error = os.error
class ModuleLoader(BasicModuleLoader):
"Default module loader; uses file system hooks.\n\n By defining suitable hooks, you might be able to load modules from\n other sources than the file system, e.g. from compressed or\n encrypted files, tar files or (if you're brave!) URLs.\n\n "
def __init__(self, hooks=None, verbose=VERBOSE):
(self, verbose)
self.hooks = (hooks or (verbose))
def default_path(self):
return ()
def modules_dict(self):
return ()
def get_hooks(self):
return self.hooks
def set_hooks(self, hooks):
self.hooks = hooks
def find_builtin_module(self, name):
if (name):
return (None, '', ('', '', BUILTIN_MODULE))
if (name):
return (None, '', ('', '', FROZEN_MODULE))
return None
def find_module_in_dir(self, name, dir, allow_packages=1):
if (dir is None):
return (name)
if allow_packages:
fullname = (dir, name)
if (fullname):
stuff = ('__init__', fullname, 0)
if stuff:
file = stuff[0]
if file:
()
return (None, fullname, ('', '', PKG_DIRECTORY))
for info in ():
(suff, mode, type) = info
fullname = (dir, (name + suff))
try:
fp = (fullname, mode)
return (fp, fullname, info)
except self.hooks.openfile_error:
raise
return None
def load_module(self, name, stuff):
(file, filename, info) = stuff
(suff, mode, type) = info
try:
if (type == BUILTIN_MODULE):
return (name)
if (type == FROZEN_MODULE):
return (name)
if (type == C_EXTENSION):
m = (name, filename, file)
elif (type == PY_SOURCE):
m = (name, filename, file)
elif (type == PY_COMPILED):
m = (name, filename, file)
elif (type == PKG_DIRECTORY):
m = (name, filename, file)
else:
raise (('Unrecognized module type (%r) for %s' % (type, name)))
finally:
if file:
()
m.__file__ = filename
return m
class FancyModuleLoader(ModuleLoader):
'Fancy module loader -- parses and execs the code itself.'
def load_module(self, name, stuff):
(file, filename, (suff, mode, type)) = stuff
realfilename = filename
path = None
if (type == PKG_DIRECTORY):
initstuff = ('__init__', filename, 0)
if (not initstuff):
raise (('No __init__ module in package %s' % name))
(initfile, initfilename, initinfo) = initstuff
(initsuff, initmode, inittype) = initinfo
if (inittype not in (PY_COMPILED, PY_SOURCE)):
if initfile:
()
raise (('Bad type (%r) for __init__ module in package %s' % (inittype, name)))
path = [filename]
file = initfile
realfilename = initfilename
type = inittype
if (type == FROZEN_MODULE):
code = (name)
elif (type == PY_COMPILED):
import marshal
(8)
code = (file)
elif (type == PY_SOURCE):
data = ()
code = (data, realfilename, 'exec')
else:
return (self, name, stuff)
m = (name)
if path:
m.__path__ = path
m.__file__ = filename
try:
(code, m.__dict__)
except:
d = ()
if (name in d):
del d[name]
raise
return m
class BasicModuleImporter(_Verbose):
'Basic module importer; uses module loader.\n\n This provides basic import facilities but no package imports.\n\n '
def __init__(self, loader=None, verbose=VERBOSE):
(self, verbose)
self.loader = (loader or (None, verbose))
self.modules = ()
def get_loader(self):
return self.loader
def set_loader(self, loader):
self.loader = loader
def get_hooks(self):
return ()
def set_hooks(self, hooks):
return (hooks)
def import_module(self, name, globals={}, locals={}, fromlist=[]):
name = (name)
if (name in self.modules):
return self.modules[name]
stuff = (name)
if (not stuff):
raise (('No module named %s' % name))
return (name, stuff)
def reload(self, module, path=None):
name = (module.__name__)
stuff = (name, path)
if (not stuff):
raise (('Module %s not found for reload' % name))
return (name, stuff)
def unload(self, module):
del self.modules[(module.__name__)]
def install(self):
self.save_import_module = builtins.__import__
self.save_reload = builtins.reload
if (not (__builtin__, 'unload')):
builtins.unload = None
self.save_unload = builtins.unload
builtins.__import__ = self.import_module
builtins.reload = self.reload
builtins.unload = self.unload
def uninstall(self):
builtins.__import__ = self.save_import_module
builtins.reload = self.save_reload
builtins.unload = self.save_unload
if (not builtins.unload):
del builtins.unload
class ModuleImporter(BasicModuleImporter):
'A module importer that supports packages.'
def import_module(self, name, globals=None, locals=None, fromlist=None, level=(- 1)):
parent = (globals, level)
(q, tail) = (parent, (name))
m = (q, tail)
if (not fromlist):
return q
if (m, '__path__'):
(m, fromlist)
return m
def determine_parent(self, globals, level=(- 1)):
if ((not globals) or (not level)):
return None
pkgname = ('__package__')
if (pkgname is not None):
if ((not pkgname) and (level > 0)):
raise ('Attempted relative import in non-package')
else:
modname = ('__name__')
if (modname is None):
return None
if ('__path__' in globals):
pkgname = modname
else:
if ('.' not in modname):
if (level > 0):
raise ('Attempted relative import in non-package')
globals['__package__'] = None
return None
pkgname = ('.')[0]
globals['__package__'] = pkgname
if (level > 0):
dot = (pkgname)
for x in (level, 1, (- 1)):
try:
dot = ('.', 0, dot)
except ValueError:
raise ('attempted relative import beyond top-level package')
pkgname = pkgname[:dot]
try:
return sys.modules[pkgname]
except KeyError:
if (level < 1):
(("Parent module '%s' not found while handling absolute import" % pkgname), RuntimeWarning, 1)
return None
else:
raise (("Parent module '%s' not loaded, cannot perform relative import" % pkgname))
def find_head_package(self, parent, name):
if ('.' in name):
i = ('.')
head = name[:i]
tail = name[(i + 1):]
else:
head = name
tail = ''
if parent:
qname = ('%s.%s' % (parent.__name__, head))
else:
qname = head
q = (head, qname, parent)
if q:
return (q, tail)
if parent:
qname = head
parent = None
q = (head, qname, parent)
if q:
return (q, tail)
raise (("No module named '%s'" % qname))
def load_tail(self, q, tail):
m = q
while tail:
i = ('.')
if (i < 0):
i = (tail)
(head, tail) = (tail[:i], tail[(i + 1):])
mname = ('%s.%s' % (m.__name__, head))
m = (head, mname, m)
if (not m):
raise (("No module named '%s'" % mname))
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
for sub in fromlist:
if (sub == '*'):
if (not recursive):
try:
all = m.__all__
except AttributeError:
raise
else:
(m, all, 1)
continue
if ((sub != '*') and (not (m, sub))):
subname = ('%s.%s' % (m.__name__, sub))
submod = (sub, subname, m)
if (not submod):
raise (("No module named '%s'" % subname))
def import_it(self, partname, fqname, parent, force_load=0):
if (not partname):
return parent
if (not force_load):
try:
return self.modules[fqname]
except KeyError:
raise
try:
path = (parent and parent.__path__)
except AttributeError:
return None
partname = (partname)
stuff = (partname, path)
if (not stuff):
return None
fqname = (fqname)
m = (fqname, stuff)
if parent:
(parent, partname, m)
return m
def reload(self, module):
name = (module.__name__)
if ('.' not in name):
return (name, name, None)
i = ('.')
pname = name[:i]
parent = self.modules[pname]
return (name[(i + 1):], name, parent)
default_importer = None
current_importer = None
def install(importer=None):
global current_importer
current_importer = (importer or default_importer or ())
()
def uninstall():
global current_importer
() |
"""Fixer for basestring -> str."""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixBasestring(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "'basestring'"
def transform(self, node, results):
return Name("str", prefix=node.prefix)
|
"""Fixer for __nonzero__ -> __bool__ methods."""
# Author: Collin Winter
# Local imports
from .. import fixer_base
from ..fixer_util import Name, syms
class FixNonzero(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' name='__nonzero__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
name = results["name"]
new = Name("__bool__", prefix=name.prefix)
name.replace(new)
|
"MH interface -- purely object-oriented (well, almost)\n\nExecutive summary:\n\nimport mhlib\n\nmh = mhlib.MH() # use default mailbox directory and profile\nmh = mhlib.MH(mailbox) # override mailbox location (default from profile)\nmh = mhlib.MH(mailbox, profile) # override mailbox and profile\n\nmh.error(format, ...) # print error message -- can be overridden\ns = mh.getprofile(key) # profile entry (None if not set)\npath = mh.getpath() # mailbox pathname\nname = mh.getcontext() # name of current folder\nmh.setcontext(name) # set name of current folder\n\nlist = mh.listfolders() # names of top-level folders\nlist = mh.listallfolders() # names of all folders, including subfolders\nlist = mh.listsubfolders(name) # direct subfolders of given folder\nlist = mh.listallsubfolders(name) # all subfolders of given folder\n\nmh.makefolder(name) # create new folder\nmh.deletefolder(name) # delete folder -- must have no subfolders\n\nf = mh.openfolder(name) # new open folder object\n\nf.error(format, ...) # same as mh.error(format, ...)\npath = f.getfullname() # folder's full pathname\npath = f.getsequencesfilename() # full pathname of folder's sequences file\npath = f.getmessagefilename(n) # full pathname of message n in folder\n\nlist = f.listmessages() # list of messages in folder (as numbers)\nn = f.getcurrent() # get current message\nf.setcurrent(n) # set current message\nlist = f.parsesequence(seq) # parse msgs syntax into list of messages\nn = f.getlast() # get last message (0 if no messagse)\nf.setlast(n) # set last message (internal use only)\n\ndict = f.getsequences() # dictionary of sequences in folder {name: list}\nf.putsequences(dict) # write sequences back to folder\n\nf.createmessage(n, fp) # add message from file f as number n\nf.removemessages(list) # remove messages in list from folder\nf.refilemessages(list, tofolder) # move messages in list to other folder\nf.movemessage(n, tofolder, ton) # move one message to a given destination\nf.copymessage(n, tofolder, ton) # copy one message to a given destination\n\nm = f.openmessage(n) # new open message object (costs a file descriptor)\nm is a derived class of mimetools.Message(rfc822.Message), with:\ns = m.getheadertext() # text of message's headers\ns = m.getheadertext(pred) # text of message's headers, filtered by pred\ns = m.getbodytext() # text of message's body, decoded\ns = m.getbodytext(0) # text of message's body, not decoded\n"
from warnings import warnpy3k
('the mhlib module has been removed in Python 3.0; use the mailbox module instead')
del warnpy3k
MH_PROFILE = '~/.mh_profile'
PATH = '~/Mail'
MH_SEQUENCES = '.mh_sequences'
FOLDER_PROTECT = 448
import os
import sys
import re
import mimetools
import multifile
import shutil
from bisect import bisect
__all__ = ['MH', 'Error', 'Folder', 'Message']
class Error(Exception):
pass
class MH():
'Class representing a particular collection of folders.\n Optional constructor arguments are the pathname for the directory\n containing the collection, and the MH profile to use.\n If either is omitted or empty a default is used; the default\n directory is taken from the MH profile if it is specified there.'
def __init__(self, path=None, profile=None):
'Constructor.'
if (profile is None):
profile = MH_PROFILE
self.profile = (profile)
if (path is None):
path = ('Path')
if (not path):
path = PATH
if ((not (path)) and (path[0] != '~')):
path = ('~', path)
path = (path)
if (not (path)):
raise ('MH() path not found')
self.path = path
def __repr__(self):
'String representation.'
return ('MH(%r, %r)' % (self.path, self.profile))
def error(self, msg, *args):
'Routine to print an error. May be overridden by a derived class.'
(('MH error: %s\n' % (msg % args)))
def getprofile(self, key):
'Return a profile entry, None if not found.'
return (self.profile, key)
def getpath(self):
"Return the path (the name of the collection's directory)."
return self.path
def getcontext(self):
'Return the name of the current folder.'
context = (((), 'context'), 'Current-Folder')
if (not context):
context = 'inbox'
return context
def setcontext(self, context):
'Set the name of the current folder.'
fn = ((), 'context')
f = (fn, 'w')
(('Current-Folder: %s\n' % context))
()
def listfolders(self):
'Return the names of the top-level folders.'
folders = []
path = ()
for name in (path):
fullname = (path, name)
if (fullname):
(name)
()
return folders
def listsubfolders(self, name):
'Return the names of the subfolders in a given folder\n (prefixed with the given folder name).'
fullname = (self.path, name)
nlinks = (fullname).st_nlink
if (nlinks <= 2):
return []
subfolders = []
subnames = (fullname)
for subname in subnames:
fullsubname = (fullname, subname)
if (fullsubname):
name_subname = (name, subname)
(name_subname)
nlinks = (nlinks - 1)
if (nlinks <= 2):
break
()
return subfolders
def listallfolders(self):
'Return the names of all folders and subfolders, recursively.'
return ('')
def listallsubfolders(self, name):
'Return the names of subfolders in a given folder, recursively.'
fullname = (self.path, name)
nlinks = (fullname).st_nlink
if (nlinks <= 2):
return []
subfolders = []
subnames = (fullname)
for subname in subnames:
if ((subname[0] == ',') or (subname)):
continue
fullsubname = (fullname, subname)
if (fullsubname):
name_subname = (name, subname)
(name_subname)
if (not (fullsubname)):
subsubfolders = (name_subname)
subfolders = (subfolders + subsubfolders)
nlinks = (nlinks - 1)
if (nlinks <= 2):
break
()
return subfolders
def openfolder(self, name):
'Return a new Folder object for the named folder.'
return (self, name)
def makefolder(self, name):
'Create a new folder (or raise os.error if it cannot be created).'
protect = (self.profile, 'Folder-Protect')
if (protect and (protect)):
mode = (protect, 8)
else:
mode = FOLDER_PROTECT
(((), name), mode)
def deletefolder(self, name):
'Delete a folder. This removes files in the folder but not\n subdirectories. Raise os.error if deleting the folder itself fails.'
fullname = ((), name)
for subname in (fullname):
fullsubname = (fullname, subname)
try:
(fullsubname)
except os.error:
(('%s not deleted, continuing...' % fullsubname))
(fullname)
numericprog = ('^[1-9][0-9]*$')
def isnumeric(str):
return ((str) is not None)
class Folder():
'Class representing a particular folder.'
def __init__(self, mh, name):
'Constructor.'
self.mh = mh
self.name = name
if (not (())):
raise (('no folder %s' % name))
def __repr__(self):
'String representation.'
return ('Folder(%r, %r)' % (self.mh, self.name))
def error(self, *args):
'Error message handler.'
(*args)
def getfullname(self):
'Return the full pathname of the folder.'
return (self.mh.path, self.name)
def getsequencesfilename(self):
"Return the full pathname of the folder's sequences file."
return ((), MH_SEQUENCES)
def getmessagefilename(self, n):
'Return the full pathname of a message in the folder.'
return ((), (n))
def listsubfolders(self):
'Return list of direct subfolders.'
return (self.name)
def listallsubfolders(self):
'Return list of all subfolders.'
return (self.name)
def listmessages(self):
'Return the list of messages currently present in the folder.\n As a side effect, set self.last to the last message (or 0).'
messages = []
match = numericprog.match
append = messages.append
for name in (()):
if (name):
(name)
messages = ((int, messages))
()
if messages:
self.last = messages[(- 1)]
else:
self.last = 0
return messages
def getsequences(self):
'Return the set of sequences for the folder.'
sequences = {}
fullname = ()
try:
f = (fullname, 'r')
except IOError:
return sequences
while 1:
line = ()
if (not line):
break
fields = (':')
if ((fields) != 2):
(('bad sequence in %s: %s' % (fullname, ())))
key = ()
value = ()
sequences[key] = value
return sequences
def putsequences(self, sequences):
'Write the set of sequences back to the folder.'
fullname = ()
f = None
for (key, seq) in ():
s = ('', ' ')
(seq)
if (not f):
f = (fullname, 'w')
(('%s: %s\n' % (key, ())))
if (not f):
try:
(fullname)
except os.error:
raise
else:
()
def getcurrent(self):
'Return the current message. Raise Error when there is none.'
seqs = ()
try:
return (seqs['cur'])
except (ValueError, KeyError):
raise ('no cur message')
def setcurrent(self, n):
'Set the current message.'
((), 'cur', (n), 0)
def parsesequence(self, seq):
'Parse an MH sequence specification into a message list.\n Attempt to mimic mh-sequence(5) as close as possible.\n Also attempt to mimic observed behavior regarding which\n conditions cause which error messages.'
all = ()
if (not all):
raise (('no messages in %s' % self.name))
if (seq == 'all'):
return all
i = (':')
if (i >= 0):
(head, dir, tail) = (seq[:i], '', seq[(i + 1):])
if (tail[:1] in '-+'):
(dir, tail) = (tail[:1], tail[1:])
if (not (tail)):
raise (('bad message list %s' % seq))
try:
count = (tail)
except (ValueError, OverflowError):
count = (all)
try:
anchor = (head, all)
except Error as msg:
seqs = ()
if (not (head in seqs)):
if (not msg):
msg = ('bad message list %s' % seq)
raise (()[2])
msgs = seqs[head]
if (not msgs):
raise (('sequence %s empty' % head))
if (dir == '-'):
return msgs[(- count):]
else:
return msgs[:count]
else:
if (not dir):
if (head in ('prev', 'last')):
dir = '-'
if (dir == '-'):
i = (all, anchor)
return all[(0, (i - count)):i]
else:
i = (all, (anchor - 1))
return all[i:(i + count)]
i = ('-')
if (i >= 0):
begin = (seq[:i], all)
end = (seq[(i + 1):], all)
i = (all, (begin - 1))
j = (all, end)
r = all[i:j]
if (not r):
raise (('bad message list %s' % seq))
return r
try:
n = (seq, all)
except Error as msg:
seqs = ()
if (not (seq in seqs)):
if (not msg):
msg = ('bad message list %s' % seq)
raise (msg)
return seqs[seq]
else:
if (n not in all):
if (seq):
raise (("message %d doesn't exist" % n))
else:
raise (('no %s message' % seq))
else:
return [n]
def _parseindex(self, seq, all):
'Internal: parse a message number (or cur, first, etc.).'
if (seq):
try:
return (seq)
except (OverflowError, ValueError):
return sys.maxsize
if (seq in ('cur', '.')):
return ()
if (seq == 'first'):
return all[0]
if (seq == 'last'):
return all[(- 1)]
if (seq == 'next'):
n = ()
i = (all, n)
try:
return all[i]
except IndexError:
raise ('no next message')
if (seq == 'prev'):
n = ()
i = (all, (n - 1))
if (i == 0):
raise ('no prev message')
try:
return all[(i - 1)]
except IndexError:
raise ('no prev message')
raise (None)
def openmessage(self, n):
'Open a message -- returns a Message object.'
return (self, n)
def removemessages(self, list):
'Remove one or more messages -- may raise os.error.'
errors = []
deleted = []
for n in list:
path = (n)
commapath = ((',' + (n)))
try:
(commapath)
except os.error:
raise
try:
(path, commapath)
except os.error as msg:
(msg)
else:
(n)
if deleted:
(deleted)
if errors:
if ((errors) == 1):
raise (errors[0])
else:
raise ('multiple errors:', errors)
def refilemessages(self, list, tofolder, keepsequences=0):
"Refile one or more messages -- may raise os.error.\n 'tofolder' is an open folder object."
errors = []
refiled = {}
for n in list:
ton = (() + 1)
path = (n)
topath = (ton)
try:
(path, topath)
except os.error:
try:
(path, topath)
(path)
except (IOError, os.error) as msg:
(msg)
try:
(topath)
except os.error:
raise
continue
(ton)
refiled[n] = ton
if refiled:
if keepsequences:
(self, (()))
((()))
if errors:
if ((errors) == 1):
raise (errors[0])
else:
raise ('multiple errors:', errors)
def _copysequences(self, fromfolder, refileditems):
'Helper for refilemessages() to copy sequences.'
fromsequences = ()
tosequences = ()
changed = 0
for (name, seq) in (()):
try:
toseq = tosequences[name]
new = 0
except KeyError:
toseq = []
new = 1
for (fromn, ton) in refileditems:
if (fromn in seq):
(ton)
changed = 1
if (new and toseq):
tosequences[name] = toseq
if changed:
(tosequences)
def movemessage(self, n, tofolder, ton):
'Move one message over a specific destination message,\n which may or may not already exist.'
path = (n)
f = (path)
()
del f
topath = (ton)
backuptopath = ((',%d' % ton))
try:
(topath, backuptopath)
except os.error:
raise
try:
(path, topath)
except os.error:
ok = 0
try:
(None)
(path, topath)
ok = 1
finally:
if (not ok):
try:
(topath)
except os.error:
raise
(path)
([n])
def copymessage(self, n, tofolder, ton):
'Copy one message over a specific destination message,\n which may or may not already exist.'
path = (n)
f = (path)
()
del f
topath = (ton)
backuptopath = ((',%d' % ton))
try:
(topath, backuptopath)
except os.error:
raise
ok = 0
try:
(None)
(path, topath)
ok = 1
finally:
if (not ok):
try:
(topath)
except os.error:
raise
def createmessage(self, n, txt):
'Create a message, with text from the open file txt.'
path = (n)
backuppath = ((',%d' % n))
try:
(path, backuppath)
except os.error:
raise
ok = 0
BUFSIZE = (16 * 1024)
try:
f = (path, 'w')
while 1:
buf = (BUFSIZE)
if (not buf):
break
(buf)
()
ok = 1
finally:
if (not ok):
try:
(path)
except os.error:
raise
def removefromallsequences(self, list):
"Remove one or more messages from all sequences (including last)\n -- but not from 'cur'!!!"
if ((self, 'last') and (self.last in list)):
del self.last
sequences = ()
changed = 0
for (name, seq) in (()):
if (name == 'cur'):
continue
for n in list:
if (n in seq):
(n)
changed = 1
if (not seq):
del sequences[name]
if changed:
(sequences)
def getlast(self):
'Return the last message number.'
if (not (self, 'last')):
()
return self.last
def setlast(self, last):
'Set the last message number.'
if (last is None):
if (self, 'last'):
del self.last
else:
self.last = last
class Message(mimetools.Message):
def __init__(self, f, n, fp=None):
'Constructor.'
self.folder = f
self.number = n
if (fp is None):
path = (n)
fp = (path, 'r')
(self, fp)
def __repr__(self):
'String representation.'
return ('Message(%s, %s)' % ((self.folder), self.number))
def getheadertext(self, pred=None):
"Return the message's header text as a string. If an\n argument is specified, it is used as a filter predicate to\n decide which headers to return (its argument is the header\n name converted to lower case)."
if (pred is None):
return (self.headers)
headers = []
hit = 0
for line in self.headers:
if (not ()):
i = (':')
if (i > 0):
hit = (())
if hit:
(line)
return (headers)
def getbodytext(self, decode=1):
"Return the message's body text as string. This undoes a\n Content-Transfer-Encoding, but does not interpret other MIME\n features (e.g. multipart messages). To suppress decoding,\n pass 0 as an argument."
(self.startofbody)
encoding = ()
if ((not decode) or (encoding in ('', '7bit', '8bit', 'binary'))):
return ()
try:
from io import StringIO
except ImportError:
from io import StringIO
output = ()
(self.fp, output, encoding)
return ()
def getbodyparts(self):
"Only for multipart messages: return the message's body as a\n list of SubMessage objects. Each submessage object behaves\n (almost) as a Message object."
if (() != 'multipart'):
raise ('Content-Type is not multipart/*')
bdry = ('boundary')
if (not bdry):
raise ('multipart/* without boundary param')
(self.startofbody)
mf = (self.fp)
(bdry)
parts = []
while (mf):
n = ('%s.%r' % (self.number, (1 + (parts))))
part = (self.folder, n, mf)
(part)
()
return parts
def getbody(self):
'Return body, either a string or a list of messages.'
if (() == 'multipart'):
return ()
else:
return ()
class SubMessage(Message):
def __init__(self, f, n, fp):
'Constructor.'
(self, f, n, fp)
if (() == 'multipart'):
self.body = (self)
else:
self.body = (self)
self.bodyencoded = (self)
def __repr__(self):
'String representation.'
(f, n, fp) = (self.folder, self.number, self.fp)
return ('SubMessage(%s, %s, %s)' % (f, n, fp))
def getbodytext(self, decode=1):
if (not decode):
return self.bodyencoded
if ((self.body) == ('')):
return self.body
def getbodyparts(self):
if ((self.body) == ([])):
return self.body
def getbody(self):
return self.body
class IntSet():
"Class implementing sets of integers.\n\n This is an efficient representation for sets consisting of several\n continuous ranges, e.g. 1-100,200-400,402-1000 is represented\n internally as a list of three pairs: [(1,100), (200,400),\n (402,1000)]. The internal representation is always kept normalized.\n\n The constructor has up to three arguments:\n - the string used to initialize the set (default ''),\n - the separator between ranges (default ',')\n - the separator between begin and end of a range (default '-')\n The separators must be strings (not regexprs) and should be different.\n\n The tostring() function yields a string that can be passed to another\n IntSet constructor; __repr__() is a valid IntSet constructor itself.\n "
def __init__(self, data=None, sep=',', rng='-'):
self.pairs = []
self.sep = sep
self.rng = rng
if data:
(data)
def reset(self):
self.pairs = []
def __cmp__(self, other):
return (self.pairs, other.pairs)
def __hash__(self):
return (self.pairs)
def __repr__(self):
return ('IntSet(%r, %r, %r)' % ((), self.sep, self.rng))
def normalize(self):
()
i = 1
while (i < (self.pairs)):
(alo, ahi) = self.pairs[(i - 1)]
(blo, bhi) = self.pairs[i]
if (ahi >= (blo - 1)):
self.pairs[(i - 1):(i + 1)] = [(alo, (ahi, bhi))]
else:
i = (i + 1)
def tostring(self):
s = ''
for (lo, hi) in self.pairs:
if (lo == hi):
t = (lo)
else:
t = (((lo) + self.rng) + (hi))
if s:
s = (s + (self.sep + t))
else:
s = t
return s
def tolist(self):
l = []
for (lo, hi) in self.pairs:
m = ((lo, (hi + 1)))
l = (l + m)
return l
def fromlist(self, list):
for i in list:
(i)
def clone(self):
new = ()
new.pairs = self.pairs[:]
return new
def min(self):
return self.pairs[0][0]
def max(self):
return self.pairs[(- 1)][(- 1)]
def contains(self, x):
for (lo, hi) in self.pairs:
if (lo <= x <= hi):
return True
return False
def append(self, x):
for i in ((self.pairs)):
(lo, hi) = self.pairs[i]
if (x < lo):
if ((x + 1) == lo):
self.pairs[i] = (x, hi)
else:
(i, (x, x))
if ((i > 0) and ((x - 1) == self.pairs[(i - 1)][1])):
self.pairs[(i - 1):(i + 1)] = [(self.pairs[(i - 1)][0], self.pairs[i][1])]
return
if (x <= hi):
return
i = ((self.pairs) - 1)
if (i >= 0):
(lo, hi) = self.pairs[i]
if ((x - 1) == hi):
self.pairs[i] = (lo, x)
return
((x, x))
def addpair(self, xlo, xhi):
if (xlo > xhi):
return
((xlo, xhi))
()
def fromstring(self, data):
new = []
for part in (self.sep):
list = []
for subp in (self.rng):
s = ()
((s))
if ((list) == 1):
((list[0], list[0]))
elif (((list) == 2) and (list[0] <= list[1])):
((list[0], list[1]))
else:
raise ('bad data passed to IntSet')
self.pairs = (self.pairs + new)
()
def pickline(file, key, casefold=1):
try:
f = (file, 'r')
except IOError:
return None
pat = ((key) + ':')
prog = (pat, (casefold and re.IGNORECASE))
while 1:
line = ()
if (not line):
break
if (line):
text = line[((key) + 1):]
while 1:
line = ()
if ((not line) or (not ())):
break
text = (text + line)
return ()
return None
def updateline(file, key, value, casefold=1):
try:
f = (file, 'r')
lines = ()
()
except IOError:
lines = []
pat = ((key) + ':(.*)\n')
prog = (pat, (casefold and re.IGNORECASE))
if (value is None):
newline = None
else:
newline = ('%s: %s\n' % (key, value))
for i in ((lines)):
line = lines[i]
if (line):
if (newline is None):
del lines[i]
else:
lines[i] = newline
break
else:
if (newline is not None):
(newline)
tempfile = (file + '~')
f = (tempfile, 'w')
for line in lines:
(line)
()
(tempfile, file)
def test():
global mh, f
('rm -rf $HOME/Mail/@test')
mh = ()
def do(s):
(s)
((s))
('mh.listfolders()')
('mh.listallfolders()')
testfolders = ['@test', '@test/test1', '@test/test2', '@test/test1/test11', '@test/test1/test12', '@test/test1/test11/test111']
for t in testfolders:
(('mh.makefolder(%r)' % (t,)))
("mh.listsubfolders('@test')")
("mh.listallsubfolders('@test')")
f = ('@test')
('f.listsubfolders()')
('f.listallsubfolders()')
('f.getsequences()')
seqs = ()
seqs['foo'] = ()
(seqs)
(seqs)
('f.getsequences()')
for t in (testfolders):
(('mh.deletefolder(%r)' % (t,)))
('mh.getcontext()')
context = ()
f = (context)
('f.getcurrent()')
for seq in ('first', 'last', 'cur', '.', 'prev', 'next', 'first:3', 'last:3', 'cur:3', 'cur:-3', 'prev:3', 'next:3', '1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3', 'all'):
try:
(('f.parsesequence(%r)' % (seq,)))
except Error as msg:
('Error:', msg)
stuff = ()
list = ((int, ()))
(list, '<-- pick')
('f.listmessages()')
if (__name__ == '__main__'):
() |
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = [
"NNTP",
"NNTPReplyError",
"NNTPTemporaryError",
"NNTPPermanentError",
"NNTPProtocolError",
"NNTPDataError",
"error_reply",
"error_temp",
"error_perm",
"error_proto",
"error_data",
]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = "No response given"
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ["100", "215", "220", "221", "222", "224", "230", "231", "282"]
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = "\r\n"
# The class itself
class NNTP:
def __init__(
self,
host,
port=NNTP_PORT,
user=None,
password=None,
readermode=None,
usenetrc=True,
):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile("rb")
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd("mode reader")
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if user and e.response[:3] == "480":
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd("authinfo user " + user)
if resp[:3] == "381":
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd("authinfo pass " + password)
if resp[:3] != "281":
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd("mode reader")
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging:
print("*welcome*", repr(self.welcome))
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1:
print("*put*", repr(line))
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging:
print("*cmd*", repr(line))
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print("*get*", repr(line))
if not line:
raise EOFError
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging:
print("*resp*", repr(resp))
c = resp[:1]
if c == "4":
raise NNTPTemporaryError(resp)
if c == "5":
raise NNTPPermanentError(resp)
if c not in "123":
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == ".":
break
if line[:2] == "..":
line = line[1:]
if file:
file.write(line + "\n")
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd("NEWGROUPS " + date + " " + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = "NEWNEWS " + group + " " + date + " " + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd("LIST", file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return ""
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$")
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd("LIST NEWSGROUPS " + group_pattern)
if resp[:3] != "215":
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd("XGTITLE " + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd("GROUP " + name)
if resp[:3] != "211":
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd("HELP", file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != "22":
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ""
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd("STAT " + id)
def __next__(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd("NEXT")
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd("LAST")
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd("HEAD " + id)
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd("BODY " + id, file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd("ARTICLE " + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd("SLAVE")
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile("^([0-9]+) ?(.*)\n?")
resp, lines = self.longcmd("XHDR " + hdr + " " + str, file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd("XOVER " + start + "-" + end, file)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append(
(
elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7],
)
)
except IndexError:
raise NNTPDataError(line)
return resp, xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd("XGTITLE " + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != "223":
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != "111":
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd("POST")
# Raises error_??? if posting is not allowed
if resp[0] != "3":
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == "\n":
line = line[:-1]
if line[:1] == ".":
line = "." + line
self.putline(line)
self.putline(".")
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd("IHAVE " + id)
# Raises error_??? if the server already has it
if resp[0] != "3":
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == "\n":
line = line[:-1]
if line[:1] == ".":
line = "." + line
self.putline(line)
self.putline(".")
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd("QUIT")
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == "__main__":
import os
newshost = "news" and os.environ["NNTPSERVER"]
if newshost.find(".") == -1:
mode = "readermode"
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group("comp.lang.python")
print(resp)
print("Group", name, "has", count, "articles, range", first, "to", last)
resp, subs = s.xhdr("subject", first + "-" + last)
print(resp)
for item in subs:
print("%7s %s" % item)
resp = s.quit()
print(resp)
|
#
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [
"match",
"search",
"sub",
"subn",
"split",
"findall",
"compile",
"purge",
"template",
"escape",
"I",
"L",
"M",
"S",
"X",
"U",
"IGNORECASE",
"LOCALE",
"MULTILINE",
"DOTALL",
"VERBOSE",
"UNICODE",
"error",
]
__version__ = "2.2.1"
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags | T)
_alphanum = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
cachekey = (type(key[0]),) + key
p = _cache.get(cachekey)
if p is not None:
return p
pattern, flags = key
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError("Cannot process flags argument with a compiled pattern")
return pattern
if not sre_compile.isstring(pattern):
raise TypeError("first argument must be string or compiled pattern")
try:
p = sre_compile.compile(pattern, flags)
except error as v:
raise error(v) # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error as v:
raise error(v) # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copyreg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copyreg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(
sre_parse.SubPattern(
s,
[
(SUBPATTERN, (len(p) + 1, sre_parse.parse(phrase, flags))),
],
)
)
s.groups = len(p) + 1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex - 1][1]
if hasattr(action, "__call__"):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
|
#
# The Python Imaging Library
# $Id$
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
def i32(c):
return ord(c[3]) + (ord(c[2]) << 8) + (ord(c[1]) << 16) + (ord(c[0]) << 24)
def _accept(prefix):
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
##
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self):
header_size = i32(self.fp.read(4))
version = i32(self.fp.read(4))
if header_size < 20 or version != 1:
raise SyntaxError("not a GIMP brush")
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
bytes = i32(self.fp.read(4))
if width <= 0 or height <= 0 or bytes != 1:
raise SyntaxError("not a GIMP brush")
comment = self.fp.read(header_size - 20)[:-1]
self.mode = "L"
self.size = width, height
self.info["comment"] = comment
# Since the brush is so small, we read the data immediately
self.data = self.fp.read(width * height)
def load(self):
if not self.data:
return
# create an image out of the brush data block
self.im = Image.core.new(self.mode, self.size)
self.im.fromstring(self.data)
self.data = ""
#
# registry
Image.register_open("GBR", GbrImageFile, _accept)
Image.register_extension("GBR", ".gbr")
|
#
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
from PyQt4.QtGui import QImage, qRgb
##
# (Internal) Turns an RGB color into a Qt compatible color integer.
def rgb(r, g, b):
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgb(r, g, b) & 0xFFFFFF) - 0x1000000
##
# An PIL image wrapper for Qt. This is a subclass of PyQt4's QImage
# class.
#
# @param im A PIL Image object, or a file name (given either as Python
# string or a PyQt string object).
class ImageQt(QImage):
def __init__(self, im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
im = str(im.toUtf8(), "utf-8")
if Image.isStringType(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i : i + 3]))
elif im.mode == "RGB":
data = im.tostring("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tostring("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
# must keep a reference, or Qt will crash!
self.__data = data or im.tostring()
QImage.__init__(self, self.__data, im.size[0], im.size[1], format)
if colortable:
self.setColorTable(colortable)
|
#
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.3"
import Image, ImageFile, ImagePalette
def i16(c):
return ord(c[1]) + (ord(c[0]) << 8)
def i32(c):
return ord(c[3]) + (ord(c[2]) << 8) + (ord(c[1]) << 16) + (ord(c[0]) << 24)
def _accept(prefix):
return i32(prefix) == 0x59A66A95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self):
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59A66A95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
self.mode, rawmode = "RGB", "BGR"
else:
raise SyntaxError("unsupported mode")
compression = i32(s[20:24])
if i32(s[24:28]) != 0:
length = i32(s[28:32])
offset = offset + length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(length))
if self.mode == "L":
self.mode = rawmode = "P"
stride = (((self.size[0] * depth + 7) / 8) + 3) & (~3)
if compression == 1:
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride))]
elif compression == 2:
self.tile = [("sun_rle", (0, 0) + self.size, offset, rawmode)]
#
# registry
Image.register_open("SUN", SunImageFile, _accept)
Image.register_extension("SUN", ".ras")
|
# Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
"""
import textwrap
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import SSLError
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import RAND_status, RAND_egd, RAND_add
from _ssl import (
SSL_ERROR_ZERO_RETURN,
SSL_ERROR_WANT_READ,
SSL_ERROR_WANT_WRITE,
SSL_ERROR_WANT_X509_LOOKUP,
SSL_ERROR_SYSCALL,
SSL_ERROR_SSL,
SSL_ERROR_WANT_CONNECT,
SSL_ERROR_EOF,
SSL_ERROR_INVALID_ERROR_CODE,
)
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
_PROTOCOL_NAMES = {
PROTOCOL_TLSv1: "TLSv1",
PROTOCOL_SSLv23: "SSLv23",
PROTOCOL_SSLv3: "SSLv3",
}
try:
from _ssl import PROTOCOL_SSLv2
_SSLv2_IF_EXISTS = PROTOCOL_SSLv2
except ImportError:
_SSLv2_IF_EXISTS = None
else:
_PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
from socket import socket, _fileobject, _delegate_methods, error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
# Disable weak or insecure ciphers by default
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
_DEFAULT_CIPHERS = "DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2"
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(
self,
sock,
keyfile=None,
certfile=None,
server_side=False,
cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None,
):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if ciphers is None and ssl_version != _SSLv2_IF_EXISTS:
ciphers = _DEFAULT_CIPHERS
if certfile and not keyfile:
keyfile = certfile
# see if it's connected
try:
socket.getpeername(self)
except socket_error as e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._connected = False
self._sslobj = None
else:
# yes, create the SSL object
self._connected = True
self._sslobj = _ssl.sslwrap(
self._sock,
server_side,
keyfile,
certfile,
cert_reqs,
ssl_version,
ca_certs,
ciphers,
)
if do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError as x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ""
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s"
% self.__class__
)
while True:
try:
v = self._sslobj.write(data)
except SSLError as x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return self._sock.send(data, flags)
def sendto(self, data, flags_or_addr, addr=None):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" % self.__class__)
elif addr is None:
return self._sock.sendto(data, flags_or_addr)
else:
return self._sock.sendto(data, flags_or_addr, addr)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s"
% self.__class__
)
amount = len(data)
count = 0
while count < amount:
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s"
% self.__class__
)
return self.read(buflen)
else:
return self._sock.recv(buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s"
% self.__class__
)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return self._sock.recv_into(buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" % self.__class__)
else:
return self._sock.recvfrom(buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError(
"recvfrom_into not allowed on instances of %s" % self.__class__
)
else:
return self._sock.recvfrom_into(buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def _real_connect(self, addr, return_errno):
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = _ssl.sslwrap(
self._sock,
False,
self.keyfile,
self.certfile,
self.cert_reqs,
self.ssl_version,
self.ca_certs,
self.ciphers,
)
try:
if return_errno:
rc = socket.connect_ex(self, addr)
else:
rc = None
socket.connect(self, addr)
if not rc:
if self.do_handshake_on_connect:
self.do_handshake()
self._connected = True
return rc
except socket_error:
self._sslobj = None
raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
try:
return (
SSLSocket(
newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
ciphers=self.ciphers,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs,
),
addr,
)
except socket_error as e:
newsock.close()
raise e
def makefile(self, mode="r", bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(
sock,
keyfile=None,
certfile=None,
server_side=False,
cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
ciphers=None,
):
return SSLSocket(
sock,
keyfile=keyfile,
certfile=certfile,
server_side=server_side,
cert_reqs=cert_reqs,
ssl_version=ssl_version,
ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers,
)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, "standard_b64encode"):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return PEM_HEADER + "\n" + textwrap.fill(f, 64) + "\n" + PEM_FOOTER + "\n"
else:
return (
PEM_HEADER + "\n" + base64.encodestring(der_cert_bytes) + PEM_FOOTER + "\n"
)
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s" % PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s" % PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER) : -len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if ca_certs is not None:
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(
socket(), ssl_version=ssl_version, cert_reqs=cert_reqs, ca_certs=ca_certs
)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, "<unknown>")
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _ssl.sslwrap(
sock, 0, keyfile, certfile, CERT_NONE, PROTOCOL_SSLv23, None
)
try:
sock.getpeername()
except socket_error:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
|
"""Main entry point"""
import sys
if sys.argv[0].endswith("__main__.py"):
sys.argv[0] = "python -m unittest"
__unittest = True
from .main import main, TestProgram, USAGE_AS_MAIN
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None)
|
import argparse
from collections import namedtuple
import Image, ImageDraw
import sys
BBox = namedtuple("Bbox", "x y w h")
Regions = namedtuple("Regions", "alpha, opaque")
class BlockType:
OPAQUE = 1
EMPTY = 2
ALPHA = 3
UNKNOWN = 4
def BlockName(block):
if block == BlockType.OPAQUE:
return "Opaque"
elif block == BlockType.EMPTY:
return "Empty"
elif block == BlockType.ALPHA:
return "Alpha"
return "Unknown"
def Analyze(im, bbox):
numalpha = 0
numblank = 0
numopaque = 0
pixels = im.load()
for y in range(bbox.y, bbox.y + bbox.h):
for x in range(bbox.x, bbox.x + bbox.w):
p = pixels[x, y]
if p[3] == 0:
numblank += 1
elif p[3] == 255:
numopaque += 1
else:
numalpha += 1
if numalpha == 0 and numblank == 0:
return BlockType.OPAQUE
elif numalpha == 0 and numopaque == 0:
return BlockType.EMPTY
else:
return BlockType.ALPHA
class QuadTreeNode:
def __init__(self):
pass
def __init__(self, im, bbox=None, depth=0, blocksize=32):
if bbox == None:
bbox = BBox(0, 0, im.size[0], im.size[1])
self.blocksize = blocksize
self.depth = depth
self.bbox = bbox
self.im = im
self.children = None
self.type = BlockType.UNKNOWN
if bbox.w > self.blocksize and bbox.h > self.blocksize:
# if we are far enough up the tree to consider splitting:
# generate the child bboxes
childboxes = [
BBox(bbox.x, bbox.y, bbox.w / 2, bbox.h / 2),
BBox(bbox.x + bbox.w / 2, bbox.y, bbox.w - bbox.w / 2, bbox.h / 2),
BBox(bbox.x, bbox.y + bbox.h / 2, bbox.w / 2, bbox.h - bbox.h / 2),
BBox(
bbox.x + bbox.w / 2,
bbox.y + bbox.h / 2,
bbox.w - bbox.w / 2,
bbox.h - bbox.h / 2,
),
]
# figure out the child image types
childtypes = [
Analyze(im, childboxes[0]),
Analyze(im, childboxes[1]),
Analyze(im, childboxes[2]),
Analyze(im, childboxes[3]),
]
same_type = childtypes[0] == childtypes[1] == childtypes[2] == childtypes[3]
last_div = bbox.w / 2 < self.blocksize or bbox.h / 2 < self.blocksize
if same_type and (childtypes[0] != BlockType.ALPHA or last_div):
# stop iterating, we've hit the bottom
self.type = childtypes[0]
else:
# otherwise, split up the children
self.children = (
QuadTreeNode(im, childboxes[0], depth + 1, self.blocksize),
QuadTreeNode(im, childboxes[1], depth + 1, self.blocksize),
QuadTreeNode(im, childboxes[2], depth + 1, self.blocksize),
QuadTreeNode(im, childboxes[3], depth + 1, self.blocksize),
)
if (
self.children
and len(self.children) == 4
and self.children[0].type
== self.children[1].type
== self.children[2].type
== self.children[3].type
== BlockType.ALPHA
):
self.children = None
self.type = BlockType.ALPHA
else:
self.type = Analyze(im, bbox)
def __repr__(self):
if self.children == None:
return "\t" * self.depth + BlockName(self.type) + " " + str(self.bbox)
else:
l = [str(x) for x in self.children]
return "\t" * self.depth + "->\n" + "\n".join(l)
def printme(self):
print("\t" * self.depth + BlockName(self.type) + " " + str(self.bbox))
if self.children:
for child in self.children:
child.printme()
def GetBBox(self, fn):
if self.children != None:
ret = []
for child in self.children:
op = child.GetBBox(fn)
if op != None:
ret.extend(op)
if len(ret) > 0:
return ret
elif fn(self):
return [self.bbox]
return []
# fix up the list so that adjacent similarly sized images are joined
def doopt(orig):
while True:
newlist = optlist(orig)
if len(newlist) == len(orig):
return newlist
orig = newlist
def optlist(orig):
newlist = []
for o in orig:
added = False
for n in newlist:
if n.w == o.w and n.x == o.x and (n.y + n.h == o.y or o.y + o.h == n.y):
newlist.remove(n)
newlist.append(BBox(n.x, min(o.y, n.y), n.w, o.h + n.h))
added = True
break
if n.h == o.h and n.y == o.y and (n.x + n.w == o.x or o.x + o.w == n.x):
newlist.remove(n)
newlist.append(BBox(min(o.x, n.x), n.y, n.w + o.w, o.h))
added = True
break
if not added:
newlist.append(o)
return newlist
def GetImageRegions(img, blocksize=16):
rootNode = QuadTreeNode(img, None, 0, blocksize)
opaque = doopt(
sorted(
rootNode.GetBBox(lambda x: x.type == BlockType.OPAQUE),
key=lambda x: x.w * x.h,
reverse=True,
)
)
alpha = doopt(
sorted(
rootNode.GetBBox(lambda x: x.type == BlockType.ALPHA),
key=lambda x: x.w * x.h,
reverse=True,
)
)
return Regions(alpha, opaque)
|
from os import path as op
ROOTDIR = op.abspath(op.dirname(op.dirname(op.dirname(__file__))))
|
from aioauth_client import *
import pytest
import asyncio
@()
def loop():
return ()
def test_oauth1(loop):
twitter = ()
if (not twitter):
raise ()
if (not ('twitter' in ClientRegistry.clients)):
raise ()
coro = ()
(rtoken, rsecret) = (coro)
if (not rtoken):
raise ()
if (not rsecret):
raise ()
if (not (twitter.oauth_token == rtoken)):
raise ()
if (not (twitter.oauth_token_secret == rsecret)):
raise ()
url = ()
if (not (url == ('https://api.twitter.com/oauth/authorize?oauth_token=%s' % rtoken))):
raise ()
coro = ('wrong', rtoken)
with (web.HTTPBadRequest):
(coro)
def test_oauth2(loop):
github = ()
if (not github):
raise ()
if (not ('github' in ClientRegistry.clients)):
raise ()
if (not ()):
raise ()
coro = ('000')
with (web.HTTPBadRequest):
(coro) |
#!/usr/bin/env python
import re
from os import path as op
from setuptools import setup
def _read(fname):
try:
return open(op.join(op.dirname(__file__), fname)).read()
except IOError:
return ""
_meta = _read("graphite_beacon/__init__.py")
_license = re.search(r'^__license__\s*=\s*"(.*)"', _meta, re.M).group(1)
_version = re.search(r'^__version__\s*=\s*"(.*)"', _meta, re.M).group(1)
install_requires = [
l for l in _read("requirements.txt").split("\n") if l and not l.startswith("#")
]
setup(
name="graphite_beacon",
version=_version,
license=_license,
description=_read("DESCRIPTION"),
long_description=_read("README.md"),
platforms=("Any"),
keywords="graphite alerts monitoring system".split(), # noqa
author="Kirill Klenov",
author_email="horneds@gmail.com",
url="http://github.com/klen/graphite-beacon",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Natural Language :: Russian",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Testing",
"Topic :: Utilities",
],
packages=["graphite_beacon"],
include_package_data=True,
install_requires=install_requires,
entry_points={"console_scripts": ["graphite-beacon = graphite_beacon.app:run"]},
)
|
import datetime as dt
import pytest
from peewee import *
db = (':memory:')
class Person(Model):
name = ()
status = ()
created = ()
birthday = ()
is_relative = ()
class Meta():
database = db
class Pet(Model):
owner = (Person)
name = ()
animal_type = ()
class Meta():
database = db
()
()
@pytest.fixture
def mixer():
from mixer.backend.peewee import mixer
return mixer
def test_mixer(mixer):
person = (Person)
if (not person.name):
raise ()
if (not person.id):
raise ()
if (not person.birthday):
raise ()
if (not (person.status in ('user', 'moderator', 'admin'))):
raise ()
pet = (Pet)
if (not pet.name):
raise ()
if (not pet.animal_type):
raise ()
if (not pet.owner):
raise ()
with ():
person = (Person)
if (not person.id):
raise ()
def test_guard(mixer):
person = (Person)
person2 = (Person)
if (not (person.id == person2.id)):
raise ()
def test_reload(mixer):
person = (Person)
person.name = 'wrong'
person = (person)
if (not (person.name == 'true')):
raise ()
def test_select(mixer):
person = (Person)
pet = (Pet)
if (not (person == pet.owner)):
raise () |
import re
__all__ = (
"QueryInfo",
"QueryReadStoreInfo",
"JsonRestStoreInfo",
"JsonQueryRestStoreInfo",
)
class QueryInfoFeatures(object):
sorting = True
paging = False
class QueryInfo(object):
"""Usage (is that the right solution?):
info = QueryInfo(request)
info.extract()
queryset = extract.process(Object.objects.all())
"""
start = 0
end = 25
filters = {}
sorting = [] # key=field // value=descending(True/False)
request = None
max_count = 25
def __init__(self, request, max_count=None, **kwargs):
self.request = request
if max_count is not None:
self.max_count = max_count
def extract(self):
self.set_paging()
self.set_sorting()
self.set_filters()
def set_paging(self):
"""Needs to be implemented in a subclass"""
pass
def set_sorting(self):
pass
def set_filters(self):
"""Needs to be implemented in a subclass"""
pass
def process(self, queryset):
# maybe using Django's paginator
return queryset.filter(**self.filters).order_by(*self.sorting)[
self.start : self.end
]
class QueryReadStoreInfo(QueryInfo):
"""
A helper to evaluate a request from a dojox.data.QueryReadStore
and extracting the following information from it:
- paging
- sorting
- filters
Parameters could be passed within GET or POST.
"""
def set_paging(self):
start = self.request[self.request.method].pop("start", 0)
# TODO: start = 1???
count = self.request[self.request.method].pop("count", 25)
# if not is_number(end): # The dojo combobox may return "Infinity" tsss
if not is_number(count) or int(count) > self.max_count:
count = self.max_count
self.start = int(start)
self.end = int(start) + int(count)
def set_sorting(self):
# REQUEST['sort']:
# value: -sort_field (descending) / sort_field (ascending)
sort_attr = self.request[self.request.method].pop("sort", None)
if sort_attr:
self.sorting.append(sort_attr)
def set_filters(self):
query_dict = {}
for k, v in list(self.request[self.request.method].items()):
query_dict[k] = v
class JsonRestStoreInfo(QueryReadStoreInfo):
"""
A helper to evaluate a request from a dojox.data.JsonRestStoreInfo
and extracting the following information:
- paging
- filters
The paging parameter is passed within the request header "Range".
Filters are passed via GET (equal to QueryReadStoreInfo).
Sorting is just possible with JsonQueryReadStoreInfo.
"""
def set_paging(self):
# Receiving the following header:
# Range: items=0-24
# Returning: Content-Range: items 0-24/66
if "RANGE" in self.META:
regexp = re.compile(r"^\s*items=(\d+)-(\d+)", re.I)
match = regexp.match(self.META["RANGE"])
if match:
start, end = match.groups()
start, end = (
int(start),
int(end) + 1,
) # range-end means including that element!
self.start = start
count = self.max_count
if end - start < self.max_count:
count = end - start
self.end = start + count
def set_sorting(self):
# sorting is not available in the normal JsonRestStore
pass
class JsonQueryRestStoreInfo(QueryInfo):
jsonpath = None
jsonpath_filters = None
jsonpath_sorting = None
jsonpath_paging = None
def __init__(self, request, **kwargs):
"""
Matching the following example jsonpath:
/path/[?(@.field1='searchterm*'&@.field2='*search*')][/@['field1'],/@['field2']][0:24]
The last part of the URL will contain a JSONPath-query:
[filter][sort][start:end:step]
"""
path = request.path
if not path.endswith("/"):
path = path + "/"
# assuming that a least one /path/ will be before the jsonpath query
# and that the character [ initiates and ] ends the jsonpath
# [ will be removed from the start and ] from the end
match = re.match(r"^/.*/(\[.*\])/$", path)
if match:
self.jsonpath = match.groups()[0]
if self.jsonpath:
# now we remove the starting [ and ending ] and also splitting it via ][
parts = self.jsonpath[1:-1].split("][")
for part in parts:
if part.startswith("?"):
self.jsonpath_filters = part
elif re.match(r"^[/\\].*$", part):
self.jsonpath_sorting = part
# [start:end:step]
elif re.match(r"^\d*:\d*:{0,1}\d*$", part):
self.jsonpath_paging = part
super(JsonQueryRestStoreInfo, self).__init__(request, **kwargs)
def set_paging(self):
# handling 0:24
match = re.match(r"^(\d*):(\d*):{0,1}\d*$", self.jsonpath_paging)
if match:
start, end = match.groups()
if start.length == 0:
start = 0
if end.length == 0:
end = int(start) + self.max_count
start, end = (
int(start),
int(end) + 1,
) # second argument means the element should be included!
self.start = start
count = self.max_count
if end - start < self.max_count:
count = end - start
self.end = start + count
def set_sorting(self):
# handling /@['field1'],/@['field2']
for f in self.jsonpath_sorting.split(",/"):
m = re.match(r"([\\/])@\['(.*)'\]", f)
if m:
sort_prefix = "-"
direction, field = m.groups()
if direction == "/":
descending = ""
self.sorting.append(sort_prefix + field)
def set_filters(self):
# handling ?(@.field1='searchterm*'&@.field2~'*search*')
pass
|
from django.conf import settings
def access_model(app_name, model_name, request=None, instance=None):
'\n Return true to allow access to a given instance of app_name.model_name\n '
acl = (settings, 'DOJANGO_DATAGRID_ACCESS', [])
for x in acl:
try:
if (('.') > 0):
(app, model) = ('.')
if ((app_name == app) and (model_name == model)):
return True
elif ((app_name == x) or (model_name == x)):
return True
except:
raise
return False
def access_model_field(app_name, model_name, field_name, request=None, instance=None):
'\n Return true to allow access of a given field_name to model app_name.model_name given\n a specific object of said model.\n '
return (not (field_name in ('delete', '_state'))) |
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
VERSION = "0.0.1"
setup(
name="brukva",
version=VERSION,
description="Asynchronous Redis client that works within the Tornado IO loop",
author="Konstantin Merenkov",
author_email="kmerenkov@gmail.com",
license="WTFPL",
url="http://github.com/kmerenkov/brukva",
keywords=["Redis", "Tornado"],
packages=["brukva"],
test_suite="tests.all_tests",
)
|
"""
Copyright 2009 55 Minutes (http://www.55minutes.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
TOP = """\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />
<title>Test coverage report</title>
<style type="text/css" media="screen">
a
{
color: #3d707a;
}
a:hover, a:active
{
color: #bf7d18;
}
body
{
font-family: "Lucida Sans Unicode", "Lucida Grande", sans-serif;
font-size: 13px;
}
tr:hover
{
background: #f5f5f5;
}
#content-header
{
margin-left: 50px;
}
#content-header h1
{
font-size: 18px;
margin-bottom: 0;
}
#content-header p
{
font-size: 13px;
margin: 0;
color: #909090;
}
#result-list table
{
font-size: 13px;
background: white;
margin: 15px 50px;
width: 600px;
border-collapse: collapse;
text-align: right;
}
#result-list thead tr.last th,
th.statements
{
border-bottom: 1px solid #6d5e48;
}
th.statements
{
text-align: center;
}
#result-list th
{
padding: 3px 12px;
font-size: 14px;
font-weight: normal;
color: #937F61;
}
#result-list td
{
border-bottom: 1px solid #e0e0e0;
color: #606060;
padding: 6px 12px;
}
#result-list tfoot td
{
color: #937F61;
font-weight: bold;
}
#result-list .normal
{
color: #609030;
}
#result-list .warning
{
color: #d0a000;
}
#result-list .critical
{
color: red;
}
#result-list .module-name
{
text-align: left;
}
.footer-link
{
margin-left: 62px;
}
</style>
</head>
<body>
"""
CONTENT_HEADER = """\
<div id="content-header">
<h1>Test Coverage Report</h1>
<p>Generated: %(test_timestamp)s</p>
<p><img src="coverage_status.png"></p>
</div>
"""
CONTENT_BODY = """\
<div id="result-list">
<table>
<thead>
<tr>
<th> </th>
<th colspan="3" class="statements">Statements</th>
</tr>
<tr class="last">
<th class="module-name">Module</th>
<th>total</th>
<th>executed</th>
<th>excluded</th>
<th>%% covered</th>
</tr>
</thead>
<tfoot>
<tr>
<td class="module-name">Total</td>
<td>%(total_lines)d</td>
<td>%(total_executed)d</td>
<td>%(total_excluded)d</td>
<td>%(overall_covered)0.1f%%</td>
</tr>
</tfoot>
<tbody>
%(module_stats)s
</tbody>
</table>
</div>
"""
MODULE_STAT = """\
<tr>
<td class="module-name"><a href="%(module_link)s">%(module_name)s</a></td>
<td>%(total_count)d</td>
<td>%(executed_count)d</td>
<td>%(excluded_count)d</td>
<td class="%(severity)s">%(percent_covered)0.1f%%</td>
</tr>
"""
EXCEPTIONS_LINK = """\
<div>
<a class="footer-link" href="%(exceptions_link)s">
%(exception_desc)s
</a>
</div>
"""
BOTTOM = """\
</body>
</html>
"""
|
# -*- coding: utf-8 -*-
import mock
import unittest
import tempfile
from opencorpora import cli
class CliTest(unittest.TestCase):
@mock.patch("opencorpora.cli.urlopen")
def test_download(self, urlopen):
urlopen.return_value.read.return_value = ""
with tempfile.NamedTemporaryFile() as f:
class Args(object):
output = f.name
no_decompress = False
url = ""
disambig = False
quiet = False
args = Args()
cli.download(args)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
|
# -*- coding: utf-8; mode: django -*-
import os
import sys
import doctest
from glob import glob
from unittest import TestSuite, defaultTestLoader
TESTS_ROOT = os.path.abspath(os.path.dirname(__file__))
def suite():
result = TestSuite()
result.addTest(doctest.DocTestSuite("django_any.xunit"))
result.addTest(doctest.DocTestSuite("django_any.forms"))
for filename in glob(os.path.join(TESTS_ROOT, "*.py")):
if filename.endswith("__init__.py"):
continue
module_name = (
"django_any.tests.%s" % os.path.splitext(os.path.basename(filename))[0]
)
__import__(module_name)
result.addTest(defaultTestLoader.loadTestsFromModule(sys.modules[module_name]))
return result
|
# Copyright (c) 2014 Katsuya Noguchi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import slack
import slack.http_client
def list(**kwargs):
"""
Lists the items starred by a user.
"""
params = {"token": slack.api_token}
for key, value in list(kwargs.items()):
params[key] = value
return slack.http_client.get("stars.list", params)
|
import unittest
from tests.test_PolyGrid import generic_grid
from polymaze.polygrid import PolyGrid, PolyViz
try:
_assertCountEqual = unittest.TestCase.assertCountEqual
from unittest import mock
except (AttributeError, ImportError):
_assertCountEqual = unittest.TestCase.assertItemsEqual
import mock
class TestPolyViz(unittest.TestCase):
def test_provides_reference_to_related_grid(self):
grid = ()
viz = (grid)
(viz.grid, grid)
def test_can_create_new_part_styles(self):
viz = ()
any_name = 'asdf'
any_color = (0, 0, 0, 0)
(any_name, viz._shape_styles)
(any_name, viz._edge_styles)
(any_name, any_color)
(any_name, any_color)
(any_name, viz._shape_styles)
(any_name, viz._edge_styles)
def test_uses_get_x_style_when_drawing_each_part(self):
viz = ()
get_style_names = ('get_shape_style', 'get_edge_style')
for get_style_name in get_style_names:
with (viz, get_style_name) as m_getstyle:
try:
()
except Exception:
raise
(m_getstyle.called)
def test_get_x_style_returns_default_when_part_has_no_style_setting(self):
viz = (())
edge_default_spec = viz._edge_styles['default']
shape_default_spec = viz._shape_styles['default']
shape = ((()))
edge = ((()))
(shape.viz_style)
(edge.viz_style)
((shape), shape_default_spec)
((edge), edge_default_spec)
def test_get_x_style_returns_named_style_setting_when_part_has_it(self):
viz = ()
shape_style_name = '<<shape>>'
edge_style_name = '<<edge>>'
any_color = (1, 2, 3, 4)
(shape_style_name)
(edge_style_name)
shape = ((()))
edge = ((()))
shape.viz_style = shape_style_name
edge.viz_style = edge_style_name
shape_style_spec = viz._shape_styles[shape_style_name]
edge_style_spec = viz._edge_styles[edge_style_name]
((shape), shape_style_spec)
((edge), edge_style_spec)
def test_image_returns_a_PIL_image(self):
viz = ()
im = ()
((im, 'crop'))
def test_image_returns_None_for_empty_grid(self):
empty_grid = ()
(((())), 0)
viz = (empty_grid)
(())
def generic_viz(grid=None):
grid = (grid or ())
return (grid) |
from os import path
from onadata.apps.api.tests.viewsets.test_abstract_viewset import TestAbstractViewSet
from onadata.apps.api.viewsets.attachment_viewset import AttachmentViewSet
from onadata.libs.utils.image_tools import image_url
class TestAttachmentViewSet(TestAbstractViewSet):
def setUp(self):
super(TestAttachmentViewSet, self).setUp()
self.retrieve_view = AttachmentViewSet.as_view({"get": "retrieve"})
self.list_view = AttachmentViewSet.as_view({"get": "list"})
self._publish_xls_form_to_project()
def test_retrieve_view(self):
self._submit_transport_instance_w_attachment()
pk = self.attachment.pk
data = {
"url": "http://testserver/api/v1/media/%s" % pk,
"field_xpath": None,
"download_url": self.attachment.media_file.url,
"small_download_url": image_url(self.attachment, "small"),
"medium_download_url": image_url(self.attachment, "medium"),
"id": pk,
"xform": self.xform.pk,
"instance": self.attachment.instance.pk,
"mimetype": self.attachment.mimetype,
"filename": self.attachment.media_file.name,
}
request = self.factory.get("/", **self.extra)
response = self.retrieve_view(request, pk=pk)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.data, dict))
self.assertEqual(response.data, data)
# file download
filename = data["filename"]
ext = filename[filename.rindex(".") + 1 :]
request = self.factory.get("/", **self.extra)
response = self.retrieve_view(request, pk=pk, format=ext)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, "image/jpeg")
def test_list_view(self):
self._submit_transport_instance_w_attachment()
request = self.factory.get("/", **self.extra)
response = self.list_view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.data, list))
def test_list_view_filter_by_xform(self):
self._submit_transport_instance_w_attachment()
data = {"xform": self.xform.pk}
request = self.factory.get("/", data, **self.extra)
response = self.list_view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.data, list))
data["xform"] = 10000000
request = self.factory.get("/", data, **self.extra)
response = self.list_view(request)
self.assertEqual(response.status_code, 404)
data["xform"] = "lol"
request = self.factory.get("/", data, **self.extra)
response = self.list_view(request)
self.assertEqual(response.status_code, 400)
def test_list_view_filter_by_instance(self):
self._submit_transport_instance_w_attachment()
data = {"instance": self.attachment.instance.pk}
request = self.factory.get("/", data, **self.extra)
response = self.list_view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.data, list))
data["instance"] = 10000000
request = self.factory.get("/", data, **self.extra)
response = self.list_view(request)
self.assertEqual(response.status_code, 404)
data["instance"] = "lol"
request = self.factory.get("/", data, **self.extra)
response = self.list_view(request)
self.assertEqual(response.status_code, 400)
def test_direct_image_link(self):
self._submit_transport_instance_w_attachment()
data = {"filename": self.attachment.media_file.name}
request = self.factory.get("/", data, **self.extra)
response = self.retrieve_view(request, pk=self.attachment.pk)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.data, str))
self.assertEqual(response.data, self.attachment.media_file.url)
data["filename"] = 10000000
request = self.factory.get("/", data, **self.extra)
response = self.retrieve_view(request, pk=self.attachment.instance.pk)
self.assertEqual(response.status_code, 404)
data["filename"] = "lol"
request = self.factory.get("/", data, **self.extra)
response = self.retrieve_view(request, pk=self.attachment.instance.pk)
self.assertEqual(response.status_code, 404)
def test_direct_image_link_uppercase(self):
self._submit_transport_instance_w_attachment(media_file="1335783522564.JPG")
filename = self.attachment.media_file.name
file_base, file_extension = path.splitext(filename)
data = {"filename": file_base + file_extension.upper()}
request = self.factory.get("/", data, **self.extra)
response = self.retrieve_view(request, pk=self.attachment.pk)
self.assertEqual(response.status_code, 200)
self.assertTrue(isinstance(response.data, str))
self.assertEqual(response.data, self.attachment.media_file.url)
|
from rest_framework import viewsets
from onadata.apps.api.permissions import XFormPermissions
from onadata.apps.logger.models.xform import XForm
from onadata.libs import filters
from onadata.libs.mixins.anonymous_user_public_forms_mixin import (
AnonymousUserPublicFormsMixin,
)
from onadata.libs.serializers.stats_serializer import (
StatsSerializer,
StatsInstanceSerializer,
)
class StatsViewSet(AnonymousUserPublicFormsMixin, viewsets.ReadOnlyModelViewSet):
"""
Stats summary for median, mean, mode, range, max, min.
A query parameter `method` can be used to limit the results to either
`mean`, `median`, `mode` or `range` only results.
Example:
GET /api/v1/stats/1?
Response:
[
{
"age":
{
"median": 8,
"mean": 23.4,
"mode": 23,
"range": 24,
"max": 28,
"min": 4
},
...
]
Example:
GET /api/v1/stats/1?method=median
Response:
[
{
"age":
{
"median": 8,
},
...
]"""
lookup_field = "pk"
queryset = XForm.objects.all()
filter_backends = (filters.AnonDjangoObjectPermissionFilter,)
permission_classes = [
XFormPermissions,
]
serializer_class = StatsSerializer
def get_serializer_class(self):
lookup = self.kwargs.get(self.lookup_field)
if lookup is not None:
serializer_class = StatsInstanceSerializer
else:
serializer_class = super(StatsViewSet, self).get_serializer_class()
return serializer_class
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
import onadata.apps.logger.models.xform
import django.contrib.gis.db.models.fields
import jsonfield.fields
from django.conf import settings
import onadata.apps.logger.models.attachment
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
("taggit", "0002_auto_20150616_2121"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Attachment",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"media_file",
models.FileField(
upload_to=onadata.apps.logger.models.attachment.upload_to
),
),
("mimetype", models.CharField(default=b"", max_length=50, blank=True)),
],
),
migrations.CreateModel(
name="Instance",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("json", jsonfield.fields.JSONField(default={})),
("xml", models.TextField()),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_modified", models.DateTimeField(auto_now=True)),
("deleted_at", models.DateTimeField(default=None, null=True)),
(
"status",
models.CharField(default="submitted_via_web", max_length=20),
),
("uuid", models.CharField(default="", max_length=249)),
(
"geom",
django.contrib.gis.db.models.fields.GeometryCollectionField(
srid=4326, null=True
),
),
],
),
migrations.CreateModel(
name="InstanceHistory",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("xml", models.TextField()),
("uuid", models.CharField(default="", max_length=249)),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_modified", models.DateTimeField(auto_now=True)),
(
"xform_instance",
models.ForeignKey(
related_name="submission_history", to="logger.Instance"
),
),
],
),
migrations.CreateModel(
name="Note",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("note", models.TextField()),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_modified", models.DateTimeField(auto_now=True)),
(
"instance",
models.ForeignKey(related_name="notes", to="logger.Instance"),
),
],
options={
"permissions": (("view_note", "View note"),),
},
),
migrations.CreateModel(
name="SurveyType",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
name="XForm",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"xls",
models.FileField(
null=True, upload_to=onadata.apps.logger.models.xform.upload_to
),
),
("json", models.TextField(default="")),
("description", models.TextField(default="", null=True)),
("xml", models.TextField()),
("require_auth", models.BooleanField(default=False)),
("shared", models.BooleanField(default=False)),
("shared_data", models.BooleanField(default=False)),
("downloadable", models.BooleanField(default=True)),
("allows_sms", models.BooleanField(default=False)),
("encrypted", models.BooleanField(default=False)),
(
"sms_id_string",
models.SlugField(
default=b"",
verbose_name="SMS ID",
max_length=100,
editable=False,
),
),
(
"id_string",
models.SlugField(verbose_name="ID", max_length=100, editable=False),
),
("title", models.CharField(max_length=255, editable=False)),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_modified", models.DateTimeField(auto_now=True)),
("last_submission_time", models.DateTimeField(null=True, blank=True)),
("has_start_time", models.BooleanField(default=False)),
("uuid", models.CharField(default="", max_length=32)),
("bamboo_dataset", models.CharField(default="", max_length=60)),
("instances_with_geopoints", models.BooleanField(default=False)),
("num_of_submissions", models.IntegerField(default=0)),
(
"tags",
taggit.managers.TaggableManager(
to="taggit.Tag",
through="taggit.TaggedItem",
help_text="A comma-separated list of tags.",
verbose_name="Tags",
),
),
(
"user",
models.ForeignKey(
related_name="xforms", to=settings.AUTH_USER_MODEL, null=True
),
),
],
options={
"ordering": ("id_string",),
"verbose_name": "XForm",
"verbose_name_plural": "XForms",
"permissions": (
("view_xform", "Can view associated data"),
("report_xform", "Can make submissions to the form"),
("move_xform", "Can move form between projects"),
("transfer_xform", "Can transfer form ownership."),
),
},
),
migrations.CreateModel(
name="ZiggyInstance",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("entity_id", models.CharField(max_length=249)),
("instance_id", models.CharField(unique=True, max_length=249)),
("form_instance", models.TextField()),
("client_version", models.BigIntegerField(default=None, null=True)),
("server_version", models.BigIntegerField()),
("form_version", models.CharField(default="1.0", max_length=10)),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_modified", models.DateTimeField(auto_now=True)),
("date_deleted", models.DateTimeField(default=None, null=True)),
(
"reporter",
models.ForeignKey(
related_name="ziggys", to=settings.AUTH_USER_MODEL
),
),
(
"xform",
models.ForeignKey(
related_name="ziggy_submissions", to="logger.XForm", null=True
),
),
],
),
migrations.AddField(
model_name="instance",
name="survey_type",
field=models.ForeignKey(to="logger.SurveyType"),
),
migrations.AddField(
model_name="instance",
name="tags",
field=taggit.managers.TaggableManager(
to="taggit.Tag",
through="taggit.TaggedItem",
help_text="A comma-separated list of tags.",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="instance",
name="user",
field=models.ForeignKey(
related_name="instances", to=settings.AUTH_USER_MODEL, null=True
),
),
migrations.AddField(
model_name="instance",
name="xform",
field=models.ForeignKey(
related_name="instances", to="logger.XForm", null=True
),
),
migrations.AddField(
model_name="attachment",
name="instance",
field=models.ForeignKey(related_name="attachments", to="logger.Instance"),
),
migrations.AlterUniqueTogether(
name="xform",
unique_together=set([("user", "id_string"), ("user", "sms_id_string")]),
),
]
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InstanceHistory.uuid'
db.add_column(
"odk_logger_instancehistory",
"uuid",
self.gf("django.db.models.fields.CharField")(default="", max_length=249),
keep_default=False,
)
def backwards(self, orm):
# Deleting field 'InstanceHistory.uuid'
db.delete_column("odk_logger_instancehistory", "uuid")
models = {
"auth.group": {
"Meta": {"object_name": "Group"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "80"},
),
"permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": "orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
},
"auth.permission": {
"Meta": {
"ordering": "('content_type__app_label', 'content_type__model', 'codename')",
"unique_together": "(('content_type', 'codename'),)",
"object_name": "Permission",
},
"codename": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
"content_type": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['contenttypes.ContentType']"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "50"}),
},
"auth.user": {
"Meta": {"object_name": "User"},
"date_joined": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"email": (
"django.db.models.fields.EmailField",
[],
{"max_length": "75", "blank": "True"},
),
"first_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"groups": (
"django.db.models.fields.related.ManyToManyField",
[],
{"to": "orm['auth.Group']", "symmetrical": "False", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"is_active": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"is_staff": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"is_superuser": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"last_login": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"last_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"password": (
"django.db.models.fields.CharField",
[],
{"max_length": "128"},
),
"user_permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": "orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
"username": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "30"},
),
},
"contenttypes.contenttype": {
"Meta": {
"ordering": "('name',)",
"unique_together": "(('app_label', 'model'),)",
"object_name": "ContentType",
"db_table": "'django_content_type'",
},
"app_label": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"model": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
},
"odk_logger.attachment": {
"Meta": {"object_name": "Attachment"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"instance": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'attachments'", "to": "orm['odk_logger.Instance']"},
),
"media_file": (
"django.db.models.fields.files.FileField",
[],
{"max_length": "100"},
),
},
"odk_logger.instance": {
"Meta": {"object_name": "Instance"},
"date": ("django.db.models.fields.DateField", [], {"null": "True"}),
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"deleted_at": (
"django.db.models.fields.DateTimeField",
[],
{"default": "None", "null": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"start_time": (
"django.db.models.fields.DateTimeField",
[],
{"null": "True"},
),
"status": (
"django.db.models.fields.CharField",
[],
{"default": "u'submitted_via_web'", "max_length": "20"},
),
"survey_type": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['odk_logger.SurveyType']"},
),
"user": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'surveys'", "null": "True", "to": "orm['auth.User']"},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "249"},
),
"xform": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'surveys'",
"null": "True",
"to": "orm['odk_logger.XForm']",
},
),
"xml": ("django.db.models.fields.TextField", [], {}),
},
"odk_logger.instancehistory": {
"Meta": {"object_name": "InstanceHistory"},
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"uuid": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "249"},
),
"xform_instance": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'submission_history'",
"to": "orm['odk_logger.Instance']",
},
),
"xml": ("django.db.models.fields.TextField", [], {}),
},
"odk_logger.surveytype": {
"Meta": {"object_name": "SurveyType"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"slug": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
},
"odk_logger.xform": {
"Meta": {
"ordering": "('id_string',)",
"unique_together": "(('user', 'id_string'),)",
"object_name": "XForm",
},
"bamboo_dataset": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "60"},
),
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"description": (
"django.db.models.fields.TextField",
[],
{"default": "u''", "null": "True"},
),
"downloadable": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"has_start_time": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"id_string": (
"django.db.models.fields.SlugField",
[],
{"max_length": "50", "db_index": "True"},
),
"is_crowd_form": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"json": ("django.db.models.fields.TextField", [], {"default": "u''"}),
"shared": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"shared_data": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"title": ("django.db.models.fields.CharField", [], {"max_length": "64"}),
"user": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'xforms'", "null": "True", "to": "orm['auth.User']"},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "32"},
),
"xls": (
"django.db.models.fields.files.FileField",
[],
{"max_length": "100", "null": "True"},
),
"xml": ("django.db.models.fields.TextField", [], {}),
},
}
complete_apps = ["logger"]
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'XForm.title'
db.alter_column(
"logger_xform",
"title",
self.gf("django.db.models.fields.CharField")(max_length=255),
)
def backwards(self, orm):
# Changing field 'XForm.title'
db.alter_column(
"logger_xform",
"title",
self.gf("django.db.models.fields.CharField")(max_length=64),
)
models = {
"auth.group": {
"Meta": {"object_name": "Group"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "80"},
),
"permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": "orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
},
"auth.permission": {
"Meta": {
"ordering": "(u'content_type__app_label', u'content_type__model', u'codename')",
"unique_together": "((u'content_type', u'codename'),)",
"object_name": "Permission",
},
"codename": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
"content_type": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['contenttypes.ContentType']"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "50"}),
},
"auth.user": {
"Meta": {"object_name": "User"},
"date_joined": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"email": (
"django.db.models.fields.EmailField",
[],
{"max_length": "75", "blank": "True"},
),
"first_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"groups": (
"django.db.models.fields.related.ManyToManyField",
[],
{"to": "orm['auth.Group']", "symmetrical": "False", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"is_active": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"is_staff": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"is_superuser": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"last_login": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"last_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"password": (
"django.db.models.fields.CharField",
[],
{"max_length": "128"},
),
"user_permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": "orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
"username": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "30"},
),
},
"contenttypes.contenttype": {
"Meta": {
"ordering": "('name',)",
"unique_together": "(('app_label', 'model'),)",
"object_name": "ContentType",
"db_table": "'django_content_type'",
},
"app_label": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"model": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
},
"logger.attachment": {
"Meta": {"object_name": "Attachment"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"instance": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'attachments'", "to": "orm['logger.Instance']"},
),
"media_file": (
"django.db.models.fields.files.FileField",
[],
{"max_length": "100"},
),
"mimetype": (
"django.db.models.fields.CharField",
[],
{"default": "''", "max_length": "50", "blank": "True"},
),
},
"logger.instance": {
"Meta": {"object_name": "Instance"},
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"deleted_at": (
"django.db.models.fields.DateTimeField",
[],
{"default": "None", "null": "True"},
),
"geom": (
"django.contrib.gis.db.models.fields.GeometryCollectionField",
[],
{"null": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"json": ("jsonfield.fields.JSONField", [], {"default": "{}"}),
"status": (
"django.db.models.fields.CharField",
[],
{"default": "u'submitted_via_web'", "max_length": "20"},
),
"survey_type": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['logger.SurveyType']"},
),
"user": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'instances'",
"null": "True",
"to": "orm['auth.User']",
},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "249"},
),
"xform": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'instances'",
"null": "True",
"to": "orm['logger.XForm']",
},
),
"xml": ("django.db.models.fields.TextField", [], {}),
},
"logger.instancehistory": {
"Meta": {"object_name": "InstanceHistory"},
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"uuid": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "249"},
),
"xform_instance": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'submission_history'",
"to": "orm['logger.Instance']",
},
),
"xml": ("django.db.models.fields.TextField", [], {}),
},
"logger.note": {
"Meta": {"object_name": "Note"},
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"instance": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'notes'", "to": "orm['logger.Instance']"},
),
"note": ("django.db.models.fields.TextField", [], {}),
},
"logger.surveytype": {
"Meta": {"object_name": "SurveyType"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"slug": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "100"},
),
},
"logger.xform": {
"Meta": {
"ordering": "('id_string',)",
"unique_together": "(('user', 'id_string'), ('user', 'sms_id_string'))",
"object_name": "XForm",
},
"allows_sms": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"bamboo_dataset": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "60"},
),
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"description": (
"django.db.models.fields.TextField",
[],
{"default": "u''", "null": "True"},
),
"downloadable": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"encrypted": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"has_start_time": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"id_string": (
"django.db.models.fields.SlugField",
[],
{"max_length": "100"},
),
"instances_with_geopoints": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"is_crowd_form": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"json": ("django.db.models.fields.TextField", [], {"default": "u''"}),
"last_submission_time": (
"django.db.models.fields.DateTimeField",
[],
{"null": "True", "blank": "True"},
),
"num_of_submissions": (
"django.db.models.fields.IntegerField",
[],
{"default": "-1"},
),
"shared": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"shared_data": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"sms_id_string": (
"django.db.models.fields.SlugField",
[],
{"default": "''", "max_length": "100"},
),
"title": ("django.db.models.fields.CharField", [], {"max_length": "255"}),
"user": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'xforms'", "null": "True", "to": "orm['auth.User']"},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "32"},
),
"xls": (
"django.db.models.fields.files.FileField",
[],
{"max_length": "100", "null": "True"},
),
"xml": ("django.db.models.fields.TextField", [], {}),
},
"logger.ziggyinstance": {
"Meta": {"object_name": "ZiggyInstance"},
"client_version": (
"django.db.models.fields.BigIntegerField",
[],
{"default": "None", "null": "True"},
),
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_deleted": (
"django.db.models.fields.DateTimeField",
[],
{"default": "None", "null": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"entity_id": (
"django.db.models.fields.CharField",
[],
{"max_length": "249"},
),
"form_instance": ("django.db.models.fields.TextField", [], {}),
"form_version": (
"django.db.models.fields.CharField",
[],
{"default": "u'1.0'", "max_length": "10"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"instance_id": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "249"},
),
"reporter": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'ziggys'", "to": "orm['auth.User']"},
),
"server_version": ("django.db.models.fields.BigIntegerField", [], {}),
"xform": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'ziggy_submissions'",
"null": "True",
"to": "orm['logger.XForm']",
},
),
},
"taggit.tag": {
"Meta": {"object_name": "Tag"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "100"},
),
"slug": (
"django.db.models.fields.SlugField",
[],
{"unique": "True", "max_length": "100"},
),
},
"taggit.taggeditem": {
"Meta": {"object_name": "TaggedItem"},
"content_type": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "u'taggit_taggeditem_tagged_items'",
"to": "orm['contenttypes.ContentType']",
},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"object_id": (
"django.db.models.fields.IntegerField",
[],
{"db_index": "True"},
),
"tag": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "u'taggit_taggeditem_items'",
"to": "orm['taggit.Tag']",
},
),
},
}
complete_apps = ["logger"]
|
import re
import urllib.request, urllib.error, urllib.parse
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
class Section(dict):
"\n A class used to represent a section of a page. A section should\n have certain fields. 'level' denotes how nested this section is in\n the document, like h1, h2, etc. 'id' is a string used to link to\n this section. 'title' will be printed at the top the\n section. 'content' is the html that will be printed as the meat of\n the section. Notice that we use the 'section.html' template to\n render a section as html, and the url provides a link that will be\n used in the page's table of contents.\n "
FIELDS = ['level', 'id', 'title', 'content']
def to_html(self):
return ('section.html', self)
def url(self):
return ('<a href="#%(id)s">%(title)s</a>' % self)
class TreeNode(list):
'\n This simple tree class will be used to construct the table of\n contents for the page.\n '
def __init__(self, value=None, parent=None):
self.value = value
self.parent = parent
(self)
def add_child(self, value):
child = (value, self)
(child)
return child
class GoogleDoc(object):
"\n This class provides a structure for dealing with a Google\n Document. Most use cases will initialize a GoogleDoc by passing a\n url to the init. This should be a public url that links to an html\n version of the document. You can find this url by publishing your\n Google Document to the web and copying the url.\n\n The primary method this class provides is 'to_html' which renders\n this document as html in the Twitter Bootstrap format.\n "
def __init__(self, url=None):
if (url is not None):
(url)
def set_html_from_url(self, url):
f = (url)
(())
()
def set_html(self, html):
'\n When setting the html for this Google Document we do two\n things:\n\n 1. We extract the content from the html. Using a regular\n expression we pull the meat of the document out of the body\n of the html, we also cut off the footer Google adds on\n automatically.\n\n 2. We extract the various sections from the content of the\n document. Again using a regular expression, we look for h1,\n h2, ... tags to split the document up into sections. Note:\n it is important when you are writing your Google Document\n to use the heading text styles, so this code will split\n things correctly.\n '
self._html = html
()
()
def _extract_content(self):
m = ('<body>(.*)</div><div id="footer">', self._html, re.DOTALL)
self._content = (1)
()
def _fix_image_urls(self):
'\n Make relative paths for images absolute.\n '
def repl(m):
return ('src="', 'src="https://docs.google.com/document/', (1))
self._content = ('(<img[^>]*>)', repl, self._content)
def _extract_sections(self):
'\n Here is an example of what a section header looks like in the\n html of a Google Document:\n\n <h3 class="c1"><a name="h.699ffpepx6zs"></a><span>Hello World\n </span></h3>\n\n We split the content of the Google Document up using a regular\n expression that matches the above header. re.split is a pretty\n cool function if you haven\'t tried it before. It puts the\n matching groups into the list as well as the content between\n the matches. Check it out here:\n\n http://docs.python.org/library/re.html#re.split\n\n One big thing we do in this method is replace the ugly section\n id that Google creates with a nicely slugified version of the\n section title. This makes for pretty urls.\n '
self._sections = []
header = '<h(?P<level>\\d) class="[^"]+"><a name="(?P<id>[^"]+)"></a><span>(?P<title>[^<]+)</span></h\\d>'
l = (header, self._content)
(0)
while l:
section = ()
section['id'] = (section['title'])
if (section['level'] >= 1):
(section)
def _construct_section_tree(self):
"\n For some weird reason Google Documents doesn't like nesting\n lists, so their table of contents requires a bunch of special\n formatting. Instead of trying to hack off what they provide\n us, we create a tree of sections based on each sections\n level. This tree will be used to construct the html for the\n table of contents.\n "
self._section_tree = (())
current_node = self._section_tree
for section in self._sections:
while (section['level'] <= current_node.value['level']):
current_node = current_node.parent
while (section['level'] > (current_node.value['level'] + 1)):
empty_section = ()
current_node = (empty_section)
if (not (section['level'] == (current_node.value['level'] + 1))):
raise ()
current_node = (section)
def _navigation_list(self, node=None):
'\n Return an html representation of the table of contents for\n this document. This is done recursively adding on a list item\n for each element in the tree, and an unordered list if this\n node has children. I might want to double check that this html\n is the correct way to nest lists.\n '
if (node is None):
()
return (self._section_tree)
result = ''
if (('title' in node.value) and ('id' in node.value)):
result += ('<li>%s</li>' % ())
if ((node) > 0):
result += ('<ul>%s</ul>' % ([(child) for child in node]))
return result
def _navigation_html(self):
'\n Render the navigation html as a Twitter Bootstrap section.\n '
return ('section.html', {'level': 1, 'id': 'contents', 'title': 'Contents', 'content': ()})
def to_html(self):
'\n Return a cleaned up HTML representation of this Google\n Document.\n '
return ('google_doc.html', {'nav': (), 'content': ([() for s in self._sections])}) |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MetaData.file_hash'
db.add_column(
"main_metadata",
"file_hash",
self.gf("django.db.models.fields.CharField")(
max_length=50, null=True, blank=True
),
keep_default=False,
)
def backwards(self, orm):
# Deleting field 'MetaData.file_hash'
db.delete_column("main_metadata", "file_hash")
models = {
"auth.group": {
"Meta": {"object_name": "Group"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "80"},
),
"permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": "orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
},
"auth.permission": {
"Meta": {
"ordering": "(u'content_type__app_label', u'content_type__model', u'codename')",
"unique_together": "((u'content_type', u'codename'),)",
"object_name": "Permission",
},
"codename": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
"content_type": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['contenttypes.ContentType']"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "50"}),
},
"auth.user": {
"Meta": {"object_name": "User"},
"date_joined": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"email": (
"django.db.models.fields.EmailField",
[],
{"max_length": "75", "blank": "True"},
),
"first_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"groups": (
"django.db.models.fields.related.ManyToManyField",
[],
{"to": "orm['auth.Group']", "symmetrical": "False", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"is_active": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"is_staff": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"is_superuser": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"last_login": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"last_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"password": (
"django.db.models.fields.CharField",
[],
{"max_length": "128"},
),
"user_permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": "orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
"username": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "30"},
),
},
"contenttypes.contenttype": {
"Meta": {
"ordering": "('name',)",
"unique_together": "(('app_label', 'model'),)",
"object_name": "ContentType",
"db_table": "'django_content_type'",
},
"app_label": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"model": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
},
"logger.xform": {
"Meta": {
"ordering": "('id_string',)",
"unique_together": "(('user', 'id_string'), ('user', 'sms_id_string'))",
"object_name": "XForm",
},
"allows_sms": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"bamboo_dataset": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "60"},
),
"date_created": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now_add": "True", "blank": "True"},
),
"date_modified": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"description": (
"django.db.models.fields.TextField",
[],
{"default": "u''", "null": "True"},
),
"downloadable": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"encrypted": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"has_start_time": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"id_string": (
"django.db.models.fields.SlugField",
[],
{"max_length": "100"},
),
"instances_with_geopoints": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"json": ("django.db.models.fields.TextField", [], {"default": "u''"}),
"last_submission_time": (
"django.db.models.fields.DateTimeField",
[],
{"null": "True", "blank": "True"},
),
"num_of_submissions": (
"django.db.models.fields.IntegerField",
[],
{"default": "0"},
),
"require_auth": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"shared": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"shared_data": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"sms_id_string": (
"django.db.models.fields.SlugField",
[],
{"default": "''", "max_length": "100"},
),
"title": ("django.db.models.fields.CharField", [], {"max_length": "255"}),
"user": (
"django.db.models.fields.related.ForeignKey",
[],
{"related_name": "'xforms'", "null": "True", "to": "orm['auth.User']"},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"default": "u''", "max_length": "32"},
),
"xls": (
"django.db.models.fields.files.FileField",
[],
{"max_length": "100", "null": "True"},
),
"xml": ("django.db.models.fields.TextField", [], {}),
},
"main.metadata": {
"Meta": {
"unique_together": "(('xform', 'data_type', 'data_value'),)",
"object_name": "MetaData",
},
"data_file": (
"django.db.models.fields.files.FileField",
[],
{"max_length": "100", "null": "True", "blank": "True"},
),
"data_file_type": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "null": "True", "blank": "True"},
),
"data_type": (
"django.db.models.fields.CharField",
[],
{"max_length": "255"},
),
"data_value": (
"django.db.models.fields.CharField",
[],
{"max_length": "255"},
),
"file_hash": (
"django.db.models.fields.CharField",
[],
{"max_length": "50", "null": "True", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"xform": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['logger.XForm']"},
),
},
"main.tokenstoragemodel": {
"Meta": {"object_name": "TokenStorageModel"},
"id": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'google_id'",
"primary_key": "True",
"to": "orm['auth.User']",
},
),
"token": ("django.db.models.fields.TextField", [], {}),
},
"main.userprofile": {
"Meta": {"object_name": "UserProfile"},
"address": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "blank": "True"},
),
"city": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "blank": "True"},
),
"country": (
"django.db.models.fields.CharField",
[],
{"max_length": "2", "blank": "True"},
),
"created_by": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['auth.User']", "null": "True", "blank": "True"},
),
"description": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "blank": "True"},
),
"home_page": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "blank": "True"},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "blank": "True"},
),
"num_of_submissions": (
"django.db.models.fields.IntegerField",
[],
{"default": "0"},
),
"organization": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "blank": "True"},
),
"phonenumber": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"require_auth": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"twitter": (
"django.db.models.fields.CharField",
[],
{"max_length": "255", "blank": "True"},
),
"user": (
"django.db.models.fields.related.OneToOneField",
[],
{
"related_name": "'profile'",
"unique": "True",
"to": "orm['auth.User']",
},
),
},
"taggit.tag": {
"Meta": {"object_name": "Tag"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "100"},
),
"slug": (
"django.db.models.fields.SlugField",
[],
{"unique": "True", "max_length": "100"},
),
},
"taggit.taggeditem": {
"Meta": {"object_name": "TaggedItem"},
"content_type": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "u'taggit_taggeditem_tagged_items'",
"to": "orm['contenttypes.ContentType']",
},
),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"object_id": (
"django.db.models.fields.IntegerField",
[],
{"db_index": "True"},
),
"tag": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "u'taggit_taggeditem_items'",
"to": "orm['taggit.Tag']",
},
),
},
}
complete_apps = ["main"]
|
import unittest
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from onadata.apps.main.views import profile
class TestUserProfile(TestCase):
def setup(self):
self.client = Client()
self.assertEqual(len(User.objects.all()), 0)
def _login_user_and_profile(self, extra_post_data={}):
post_data = {
"username": "bob",
"email": "bob@columbia.edu",
"password1": "bobbob",
"password2": "bobbob",
"name": "Bob",
"city": "Bobville",
"country": "US",
"organization": "Bob Inc.",
"home_page": "bob.com",
"twitter": "boberama",
}
url = "/accounts/register/"
post_data = dict(list(post_data.items()) + list(extra_post_data.items()))
self.response = self.client.post(url, post_data)
try:
self.user = User.objects.get(username=post_data["username"])
except User.DoesNotExist:
pass
def test_create_user_with_given_name(self):
self._login_user_and_profile()
self.assertEqual(self.response.status_code, 302)
self.assertEqual(self.user.username, "bob")
def test_create_user_profile_for_user(self):
self._login_user_and_profile()
self.assertEqual(self.response.status_code, 302)
user_profile = self.user.profile
self.assertEqual(user_profile.city, "Bobville")
self.assertTrue(hasattr(user_profile, "metadata"))
def test_disallow_non_alpha_numeric(self):
invalid_usernames = [
"b ob",
"b.o.b.",
"b-ob",
"b!",
"@bob",
"bob@bob.com",
"bob$",
"b&o&b",
"bob?",
"#bob",
"(bob)",
"b*ob",
"%s % bob",
]
users_before = User.objects.count()
for username in invalid_usernames:
self._login_user_and_profile({"username": username})
self.assertEqual(User.objects.count(), users_before)
def test_disallow_reserved_name(self):
users_before = User.objects.count()
self._login_user_and_profile({"username": "admin"})
self.assertEqual(User.objects.count(), users_before)
def test_404_if_user_does_not_exist(self):
response = self.client.get(reverse(profile, kwargs={"username": "nonuser"}))
self.assertEqual(response.status_code, 404)
@unittest.skip("We don't use twitter in kobocat tests")
def test_show_single_at_sign_in_twitter_link(self):
self._login_user_and_profile()
response = self.client.get(reverse(profile, kwargs={"username": "bob"}))
self.assertContains(response, ">@boberama")
# add the @ sign
self.user.profile.twitter = "@boberama"
self.user.profile.save()
response = self.client.get(reverse(profile, kwargs={"username": "bob"}))
self.assertContains(response, ">@boberama")
|
# -*- coding: utf-8 -*-
# Create your views here.
|
from collections import OrderedDict
from itertools import chain
import time
from django.conf import settings
from pandas.core.frame import DataFrame
try:
from pandas.io.parsers import ExcelWriter
except ImportError as e:
from pandas import ExcelWriter
from pyxform.survey_element import SurveyElement
from pyxform.section import Section, RepeatingSection
from pyxform.question import Question
from onadata.apps.viewer.models.data_dictionary import DataDictionary
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.libs.exceptions import NoRecordsFoundError
from onadata.libs.utils.common_tags import ID, XFORM_ID_STRING, STATUS, ATTACHMENTS, GEOLOCATION, UUID, SUBMISSION_TIME, NA_REP, BAMBOO_DATASET_ID, DELETEDAT, TAGS, NOTES, SUBMITTED_BY
from onadata.libs.utils.export_tools import question_types_to_exclude
xform_instances = settings.MONGO_DB.instances
MULTIPLE_SELECT_BIND_TYPE = 'select'
GEOPOINT_BIND_TYPE = 'geopoint'
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
DEFAULT_GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
def get_valid_sheet_name(sheet_name, existing_name_list):
new_sheet_name = sheet_name[:XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS]
i = 1
generated_name = new_sheet_name
while (generated_name in existing_name_list):
digit_length = ((i))
allowed_name_len = (XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS - digit_length)
if ((generated_name) > allowed_name_len):
generated_name = generated_name[:allowed_name_len]
generated_name = (generated_name, i)
i += 1
return generated_name
def remove_dups_from_list_maintain_order(l):
return ((l))
def get_prefix_from_xpath(xpath):
xpath = (xpath)
parts = ('/', 1)
if ((parts) == 1):
return None
elif ((parts) == 2):
return ('%s/' % parts[0])
else:
raise (('%s cannot be prefixed, it returns %s' % (xpath, (parts))))
class AbstractDataFrameBuilder(object):
IGNORED_COLUMNS = [XFORM_ID_STRING, STATUS, ID, ATTACHMENTS, GEOLOCATION, BAMBOO_DATASET_ID, DELETEDAT, SUBMITTED_BY]
ADDITIONAL_COLUMNS = [UUID, SUBMISSION_TIME, TAGS, NOTES]
BINARY_SELECT_MULTIPLES = False
'\n Group functionality used by any DataFrameBuilder i.e. XLS, CSV and KML\n '
def __init__(self, username, id_string, filter_query=None, group_delimiter=DEFAULT_GROUP_DELIMITER, split_select_multiples=True, binary_select_multiples=False):
self.username = username
self.id_string = id_string
self.filter_query = filter_query
self.group_delimiter = group_delimiter
self.split_select_multiples = split_select_multiples
self.BINARY_SELECT_MULTIPLES = binary_select_multiples
()
def _setup(self):
self.dd = ()
self.select_multiples = (self.dd)
self.gps_fields = (self.dd)
@classmethod
def _fields_to_select(cls, dd):
return [() for c in () if (c, Question)]
@classmethod
def _collect_select_multiples(cls, dd):
return ([((), [() for c in e.children]) for e in () if (('type') == 'select')])
@classmethod
def _split_select_multiples(cls, record, select_multiples, binary_select_multiples=False):
'Prefix contains the xpath and slash if we are within a repeat so\n that we can figure out which select multiples belong to which repeat\n '
for (key, choices) in (()):
selections = []
if (key in record):
selections = [('%s/%s' % (key, r)) for r in (' ')]
(key)
if (not binary_select_multiples):
(([(choice, (choice in selections)) for choice in choices]))
else:
YES = 1
NO = 0
(([(choice, (YES if (choice in selections) else NO)) for choice in choices]))
for (record_key, record_item) in (()):
if ((record_item) == list):
for list_item in record_item:
if ((list_item) == dict):
(list_item, select_multiples)
return record
@classmethod
def _collect_gps_fields(cls, dd):
return [() for e in () if (('type') == 'geopoint')]
@classmethod
def _tag_edit_string(cls, record):
'\n Turns a list of tags into a string representation.\n '
if ('_tags' in record):
tags = []
for tag in record['_tags']:
if ((',' in tag) and (' ' in tag)):
(('"%s"' % tag))
else:
(tag)
({'_tags': ((tags))})
@classmethod
def _split_gps_fields(cls, record, gps_fields):
updated_gps_fields = {}
for (key, value) in ():
if ((key in gps_fields) and (value, str)):
gps_xpaths = (key)
gps_parts = ([(xpath, None) for xpath in gps_xpaths])
parts = (' ')
if ((parts) == 4):
gps_parts = (((gps_xpaths, parts)))
(gps_parts)
elif ((value) == list):
for list_item in value:
if ((list_item) == dict):
(list_item, gps_fields)
(updated_gps_fields)
def _query_mongo(self, query='{}', start=0, limit=ParsedInstance.DEFAULT_LIMIT, fields='[]', count=False):
count_args = {'username': self.username, 'id_string': self.id_string, 'query': query, 'fields': '[]', 'sort': '{}', 'count': True}
count_object = ()
record_count = count_object[0]['count']
if (record_count == 0):
raise ('No records found for your query')
if count:
return record_count
else:
query_args = {'username': self.username, 'id_string': self.id_string, 'query': query, 'fields': fields, 'sort': '{}', 'start': start, 'limit': limit, 'count': False}
cursor = ()
return cursor
class XLSDataFrameBuilder(AbstractDataFrameBuilder):
'\n Generate structures from mongo and DataDictionary for a DataFrameXLSWriter\n\n This builder can choose to query the data in batches and write to a single\n ExcelWriter object using multiple instances of DataFrameXLSWriter\n '
INDEX_COLUMN = '_index'
PARENT_TABLE_NAME_COLUMN = '_parent_table_name'
PARENT_INDEX_COLUMN = '_parent_index'
EXTRA_COLUMNS = [INDEX_COLUMN, PARENT_TABLE_NAME_COLUMN, PARENT_INDEX_COLUMN]
SHEET_NAME_MAX_CHARS = 30
XLS_SHEET_COUNT_LIMIT = 255
XLS_COLUMN_COUNT_MAX = 255
CURRENT_INDEX_META = 'current_index'
def __init__(self, username, id_string, filter_query=None, group_delimiter=DEFAULT_GROUP_DELIMITER, split_select_multiples=True, binary_select_multiples=False):
(username, id_string, filter_query, group_delimiter, split_select_multiples, binary_select_multiples)
def _setup(self):
()
()
def export_to(self, file_path, batchsize=1000):
self.xls_writer = (file_path)
record_count = ()
start = 0
header = True
while (start < record_count):
cursor = (self.filter_query)
data = (cursor)
for (section_name, section) in ():
records = data[section_name]
if ((records) > 0):
columns = section['columns']
if (self.group_delimiter != DEFAULT_GROUP_DELIMITER):
columns = [(('/')) for col in columns]
columns = (columns + self.EXTRA_COLUMNS)
writer = (records, columns)
(self.xls_writer, section_name)
header = False
start += batchsize
(0.1)
()
def _format_for_dataframe(self, cursor):
'\n Format each record for consumption by a dataframe\n\n returns a dictionary with the key being the name of the sheet,\n and values a list of dicts to feed into a DataFrame\n '
data = (((section_name, []) for section_name in (())))
main_section = self.sections[self.survey_name]
main_sections_columns = main_section['columns']
for record in cursor:
(data[self.survey_name], record, main_sections_columns, self.survey_name)
parent_index = main_section[self.CURRENT_INDEX_META]
for (sheet_name, section) in ():
if (sheet_name != self.survey_name):
xpath = section['xpath']
columns = section['columns']
if (xpath in record):
repeat_records = record[xpath]
for repeat_record in repeat_records:
(data[sheet_name], repeat_record, columns, sheet_name, parent_index, self.survey_name)
return data
def _add_data_for_section(self, data_section, record, columns, section_name, parent_index=(- 1), parent_table_name=None):
({})
self.sections[section_name][self.CURRENT_INDEX_META] += 1
index = self.sections[section_name][self.CURRENT_INDEX_META]
if self.split_select_multiples:
record = (record, self.select_multiples)
(record, self.gps_fields)
for column in columns:
data_value = None
try:
data_value = record[column]
except KeyError:
raise
({((('/')) if (self.group_delimiter != DEFAULT_GROUP_DELIMITER) else column): data_value})
({XLSDataFrameBuilder.INDEX_COLUMN: index, XLSDataFrameBuilder.PARENT_INDEX_COLUMN: parent_index, XLSDataFrameBuilder.PARENT_TABLE_NAME_COLUMN: parent_table_name})
(([(column, (record[column] if (column in record) else None)) for column in self.ADDITIONAL_COLUMNS]))
def _generate_sections(self):
'\n Split survey questions into separate sections for each xls sheet and\n columns for each section\n '
self.sections = ()
self.select_multiples = {}
survey_element = self.dd.survey
self.survey_name = (survey_element.name, (()))
(self.survey_name, (), False)
(self.survey_name, ())
for section_name in self.sections:
self.sections[section_name]['columns'] += self.ADDITIONAL_COLUMNS
()
def _build_sections_recursive(self, section_name, element, is_repeating=False):
"Builds a section's children and recurses any repeating sections\n to build those as a separate section\n "
for child in element.children:
if (child, Section):
new_is_repeating = (child, RepeatingSection)
new_section_name = section_name
if new_is_repeating:
new_section_name = (child.name, (()))
(new_section_name, (), True)
(new_section_name, child, new_is_repeating)
else:
child_bind_type = ('type')
if ((child, Question) and (not (child.type)) and (not (child_bind_type == MULTIPLE_SELECT_BIND_TYPE))):
(section_name, child)
elif (child_bind_type == MULTIPLE_SELECT_BIND_TYPE):
self.select_multiples[()] = [() for option in child.children]
if self.split_select_multiples:
for option in child.children:
(section_name, option)
else:
(section_name, child)
if (child_bind_type == GEOPOINT_BIND_TYPE):
for xpath in (()):
(section_name, xpath)
def get_exceeds_xls_limits(self):
if (not (self, 'exceeds_xls_limits')):
self.exceeds_xls_limits = False
if ((self.sections) > self.XLS_SHEET_COUNT_LIMIT):
self.exceeds_xls_limits = True
else:
for section in ():
if ((section['columns']) > self.XLS_COLUMN_COUNT_MAX):
self.exceeds_xls_limits = True
break
return self.exceeds_xls_limits
def _create_section(self, section_name, xpath, is_repeat):
self.sections[section_name] = {'name': section_name, 'xpath': xpath, 'columns': [], 'is_repeat': is_repeat, self.CURRENT_INDEX_META: 0}
def _add_column_to_section(self, sheet_name, column):
section = self.sections[sheet_name]
xpath = None
if (column, SurveyElement):
xpath = ()
elif (column, str):
xpath = column
if (not xpath):
raise ()
if (xpath not in section['columns']):
(xpath)
class CSVDataFrameBuilder(AbstractDataFrameBuilder):
def __init__(self, username, id_string, filter_query=None, group_delimiter=DEFAULT_GROUP_DELIMITER, split_select_multiples=True, binary_select_multiples=False):
(username, id_string, filter_query, group_delimiter, split_select_multiples, binary_select_multiples)
self.ordered_columns = ()
def _setup(self):
()
@classmethod
def _reindex(cls, key, value, ordered_columns, parent_prefix=None):
'\n Flatten list columns by appending an index, otherwise return as is\n '
d = {}
if (((value) is list) and ((value) > 0) and (key != NOTES) and (key != ATTACHMENTS)):
for (index, item) in (value):
index += 1
if ((item) is dict):
for (nested_key, nested_val) in ():
xpaths = [('%s[%s]' % (nested_key[:((key) + (key))], index)), nested_key[(((key) + (key)) + 1):]]
xpaths = ('/')
new_prefix = xpaths[:(- 1)]
if ((nested_val) is list):
((nested_key, nested_val, ordered_columns, new_prefix))
else:
if parent_prefix:
xpaths[0:(parent_prefix)] = parent_prefix
new_xpath = (xpaths)
if (key in (())):
if (new_xpath not in ordered_columns[key]):
(new_xpath)
d[new_xpath] = nested_val
else:
d[key] = value
elif (key == NOTES):
d[key] = (value)
elif (key == ATTACHMENTS):
d[key] = []
else:
d[key] = value
return d
@classmethod
def _build_ordered_columns(cls, survey_element, ordered_columns, is_repeating_section=False):
'\n Build a flat ordered dict of column groups\n\n is_repeating_section ensures that child questions of repeating sections\n are not considered columns\n '
for child in survey_element.children:
if (child, Section):
child_is_repeating = False
if (child, RepeatingSection):
ordered_columns[()] = []
child_is_repeating = True
(child, ordered_columns, child_is_repeating)
elif ((child, Question) and (not (child.type)) and (not is_repeating_section)):
ordered_columns[()] = None
def _format_for_dataframe(self, cursor):
if self.split_select_multiples:
for (key, choices) in (()):
self.ordered_columns[key] = (choices)
for key in self.gps_fields:
gps_xpaths = (key)
self.ordered_columns[key] = ([key] + gps_xpaths)
data = []
for record in cursor:
if self.split_select_multiples:
record = (record, self.select_multiples, self.BINARY_SELECT_MULTIPLES)
(record, self.gps_fields)
(record)
flat_dict = {}
for (key, value) in ():
reindexed = (key, value, self.ordered_columns)
(reindexed)
if (self.group_delimiter != DEFAULT_GROUP_DELIMITER):
flat_dict = ((((('/')), v) for (k, v) in ()))
(flat_dict)
return data
def export_to(self, file_or_path, data_frame_max_size=30000):
from math import ceil
record_count = ()
self.ordered_columns = ()
(self.dd.survey, self.ordered_columns)
datas = []
num_data_frames = ((((record_count) / (data_frame_max_size))))
for i in (num_data_frames):
cursor = (self.filter_query)
data = (cursor)
(data)
columns = (([([xpath] if (cols is None) else cols) for (xpath, cols) in ()]))
if (self.group_delimiter != DEFAULT_GROUP_DELIMITER):
columns = [(('/')) for col in columns]
columns += [col for col in self.ADDITIONAL_COLUMNS]
header = True
if (file_or_path, 'read'):
csv_file = file_or_path
close = False
else:
csv_file = (file_or_path, 'wb')
close = True
for data in datas:
writer = (data, columns)
(csv_file)
header = False
if close:
()
class XLSDataFrameWriter(object):
def __init__(self, records, columns):
self.dataframe = (records)
def write_to_excel(self, excel_writer, sheet_name, header=False, index=False):
(excel_writer, sheet_name)
class CSVDataFrameWriter(object):
def __init__(self, records, columns):
if (not ((records) > 0)):
raise ()
self.dataframe = (records)
for col in AbstractDataFrameBuilder.IGNORED_COLUMNS:
if (col in self.dataframe.columns):
del self.dataframe[col]
def write_to_csv(self, csv_file, header=True, index=False):
na_rep = (settings, 'NA_REP', NA_REP)
(csv_file) |
from django.core.urlresolvers import reverse
from guardian.shortcuts import assign_perm, remove_perm
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.viewer.views import instance
class TestInstanceView(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.url = reverse(
instance,
kwargs={"username": self.user.username, "id_string": self.xform.id_string},
)
def test_instance_view(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_restrict_for_anon(self):
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 403)
def test_restrict_for_not_owner(self):
self._create_user_and_login("alice")
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_allow_if_shared(self):
self.xform.shared_data = True
self.xform.save()
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 200)
def test_allow_if_user_given_permission(self):
self._create_user_and_login("alice")
assign_perm("change_xform", self.user, self.xform)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_disallow_if_user_permission_revoked(self):
self._create_user_and_login("alice")
assign_perm("change_xform", self.user, self.xform)
response = self.client.get(self.url)
remove_perm("change_xform", self.user, self.xform)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
|
import pytz
from datetime import datetime
from django.conf import settings
# 10,000,000 bytes
DEFAULT_CONTENT_LENGTH = getattr(settings, "DEFAULT_CONTENT_LENGTH", 10000000)
class OpenRosaHeadersMixin(object):
def get_openrosa_headers(self, request, location=True):
tz = pytz.timezone(settings.TIME_ZONE)
dt = datetime.now(tz).strftime("%a, %d %b %Y %H:%M:%S %Z")
data = {
"Date": dt,
"X-OpenRosa-Version": "1.0",
"X-OpenRosa-Accept-Content-Length": DEFAULT_CONTENT_LENGTH,
}
if location:
data["Location"] = request.build_absolute_uri(request.path)
return data
|
from rest_framework import serializers
from rest_framework.exceptions import ParseError
class TagListSerializer(serializers.Field):
def to_internal_value(self, data):
if type(data) is not list:
raise ParseError("expected a list of data")
return data
def to_representation(self, obj):
if obj is None:
return super(TagListSerializer, self).to_representation(obj)
if type(obj) is not list:
return list(obj.values_list("name", flat=True))
return obj
|
from datetime import date, datetime
import os
import pytz
import re
import tempfile
import traceback
from xml.dom import Node
from xml.parsers.expat import ExpatError
from dict2xml import dict2xml
from django.conf import settings
from django.core.exceptions import ValidationError, PermissionDenied
from django.core.files.storage import get_storage_class
from django.core.mail import mail_admins
from django.core.servers.basehttp import FileWrapper
from django.contrib.auth.models import User
from django.db import IntegrityError, transaction
from django.db.models.signals import pre_delete
from django.http import HttpResponse, HttpResponseNotFound, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.utils.encoding import DjangoUnicodeDecodeError
from django.utils.translation import ugettext as _
from django.utils import timezone
from modilabs.utils.subprocess_timeout import ProcessTimedOut
from pyxform.errors import PyXFormError
from pyxform.xform2json import create_survey_element_from_xml
import sys
from onadata.apps.main.models import UserProfile
from onadata.apps.logger.models import Attachment
from onadata.apps.logger.models import Instance
from onadata.apps.logger.models.instance import (
FormInactiveError,
InstanceHistory,
get_id_string_from_xml_str,
)
from onadata.apps.logger.models import XForm
from onadata.apps.logger.models.xform import XLSFormError
from onadata.apps.logger.xform_instance_parser import (
InstanceEmptyError,
InstanceInvalidUserError,
InstanceMultipleNodeError,
DuplicateInstance,
clean_and_parse_xml,
get_uuid_from_xml,
get_deprecated_uuid_from_xml,
get_submission_date_from_xml,
)
from onadata.apps.viewer.models.data_dictionary import DataDictionary
from onadata.apps.viewer.models.parsed_instance import (
_remove_from_mongo,
xform_instances,
ParsedInstance,
)
from onadata.libs.utils import common_tags
from onadata.libs.utils.model_tools import queryset_iterator, set_uuid
OPEN_ROSA_VERSION_HEADER = "X-OpenRosa-Version"
HTTP_OPEN_ROSA_VERSION_HEADER = "HTTP_X_OPENROSA_VERSION"
OPEN_ROSA_VERSION = "1.0"
DEFAULT_CONTENT_TYPE = "text/xml; charset=utf-8"
DEFAULT_CONTENT_LENGTH = settings.DEFAULT_CONTENT_LENGTH
uuid_regex = re.compile(
r"<formhub>\s*<uuid>\s*([^<]+)\s*</uuid>\s*</formhub>", re.DOTALL
)
mongo_instances = settings.MONGO_DB.instances
def _get_instance(xml, new_uuid, submitted_by, status, xform):
# check if its an edit submission
old_uuid = get_deprecated_uuid_from_xml(xml)
instances = Instance.objects.filter(uuid=old_uuid)
if instances:
# edits
check_edit_submission_permissions(submitted_by, xform)
instance = instances[0]
InstanceHistory.objects.create(
xml=instance.xml, xform_instance=instance, uuid=old_uuid
)
instance.xml = xml
instance.uuid = new_uuid
instance.save()
else:
# new submission
instance = Instance.objects.create(
xml=xml, user=submitted_by, status=status, xform=xform
)
return instance
def dict2xform(jsform, form_id):
dd = {"form_id": form_id}
xml_head = "<?xml version='1.0' ?>\n<%(form_id)s id='%(form_id)s'>\n" % dd
xml_tail = "\n</%(form_id)s>" % dd
return xml_head + dict2xml(jsform) + xml_tail
def get_uuid_from_submission(xml):
# parse UUID from uploaded XML
split_xml = uuid_regex.split(xml)
# check that xml has UUID
return len(split_xml) > 1 and split_xml[1] or None
def get_xform_from_submission(xml, username, uuid=None):
# check alternative form submission ids
uuid = uuid or get_uuid_from_submission(xml)
if not username and not uuid:
raise InstanceInvalidUserError()
if uuid:
# try find the form by its uuid which is the ideal condition
if XForm.objects.filter(uuid=uuid).count() > 0:
xform = XForm.objects.get(uuid=uuid)
return xform
id_string = get_id_string_from_xml_str(xml)
return get_object_or_404(XForm, id_string__exact=id_string, user__username=username)
def _has_edit_xform_permission(xform, user):
if isinstance(xform, XForm) and isinstance(user, User):
return user.has_perm("logger.change_xform", xform)
return False
def check_edit_submission_permissions(request_user, xform):
if xform and request_user and request_user.is_authenticated():
requires_auth = UserProfile.objects.get_or_create(user=xform.user)[
0
].require_auth
has_edit_perms = _has_edit_xform_permission(xform, request_user)
if requires_auth and not has_edit_perms:
raise PermissionDenied(
_(
"%(request_user)s is not allowed to make edit submissions "
"to %(form_user)s's %(form_title)s form."
% {
"request_user": request_user,
"form_user": xform.user,
"form_title": xform.title,
}
)
)
def check_submission_permissions(request, xform):
"""Check that permission is required and the request user has permission.
The user does no have permissions iff:
* the user is authed,
* either the profile or the form require auth,
* the xform user is not submitting.
Since we have a username, the Instance creation logic will
handle checking for the forms existence by its id_string.
:returns: None.
:raises: PermissionDenied based on the above criteria.
"""
profile = UserProfile.objects.get_or_create(user=xform.user)[0]
if (
request
and (
profile.require_auth or xform.require_auth or request.path == "/submission"
)
and xform.user != request.user
and not request.user.has_perm("report_xform", xform)
):
raise PermissionDenied(
_(
"%(request_user)s is not allowed to make submissions "
"to %(form_user)s's %(form_title)s form."
% {
"request_user": request.user,
"form_user": xform.user,
"form_title": xform.title,
}
)
)
def save_submission(
xform, xml, media_files, new_uuid, submitted_by, status, date_created_override
):
if not date_created_override:
date_created_override = get_submission_date_from_xml(xml)
instance = _get_instance(xml, new_uuid, submitted_by, status, xform)
for f in media_files:
Attachment.objects.get_or_create(
instance=instance, media_file=f, mimetype=f.content_type
)
# override date created if required
if date_created_override:
if not timezone.is_aware(date_created_override):
# default to utc?
date_created_override = timezone.make_aware(
date_created_override, timezone.utc
)
instance.date_created = date_created_override
instance.save()
if instance.xform is not None:
pi, created = ParsedInstance.objects.get_or_create(instance=instance)
if not created:
pi.save(async=False)
return instance
def create_instance(
username,
xml_file,
media_files,
status="submitted_via_web",
uuid=None,
date_created_override=None,
request=None,
):
"""
I used to check if this file had been submitted already, I've
taken this out because it was too slow. Now we're going to create
a way for an admin to mark duplicate instances. This should
simplify things a bit.
Submission cases:
If there is a username and no uuid, submitting an old ODK form.
If there is a username and a uuid, submitting a new ODK form.
"""
with transaction.atomic():
instance = None
submitted_by = (
request.user if request and request.user.is_authenticated() else None
)
if username:
username = username.lower()
xml = xml_file.read()
xform = get_xform_from_submission(xml, username, uuid)
check_submission_permissions(request, xform)
existing_instance_count = Instance.objects.filter(
xml=xml, xform__user=xform.user
).count()
if existing_instance_count > 0:
existing_instance = Instance.objects.filter(
xml=xml, xform__user=xform.user
)[0]
if not existing_instance.xform or existing_instance.xform.has_start_time:
# Ignore submission as a duplicate IFF
# * a submission's XForm collects start time
# * the submitted XML is an exact match with one that
# has already been submitted for that user.
raise DuplicateInstance()
# get new and depracated uuid's
new_uuid = get_uuid_from_xml(xml)
duplicate_instances = Instance.objects.filter(uuid=new_uuid)
if duplicate_instances:
# ensure we have saved the extra attachments
for f in media_files:
Attachment.objects.get_or_create(
instance=duplicate_instances[0],
media_file=f,
mimetype=f.content_type,
)
else:
instance = save_submission(
xform,
xml,
media_files,
new_uuid,
submitted_by,
status,
date_created_override,
)
return instance
if duplicate_instances:
# We are now outside the atomic block, so we can raise an exception
# without rolling back the extra attachments we created earlier
# NB: Since `ATOMIC_REQUESTS` is set at the database level, everything
# could still be rolled back if the calling view fails to handle an
# exception
raise DuplicateInstance()
def safe_create_instance(username, xml_file, media_files, uuid, request):
"""Create an instance and catch exceptions.
:returns: A list [error, instance] where error is None if there was no
error.
"""
error = instance = None
try:
instance = create_instance(
username, xml_file, media_files, uuid=uuid, request=request
)
except InstanceInvalidUserError:
error = OpenRosaResponseBadRequest(_("Username or ID required."))
except InstanceEmptyError:
error = OpenRosaResponseBadRequest(
_("Received empty submission. No instance was created")
)
except FormInactiveError:
error = OpenRosaResponseNotAllowed(_("Form is not active"))
except XForm.DoesNotExist:
error = OpenRosaResponseNotFound(_("Form does not exist on this account"))
except ExpatError as e:
error = OpenRosaResponseBadRequest(_("Improperly formatted XML."))
except DuplicateInstance:
response = OpenRosaResponse(_("Duplicate submission"))
response.status_code = 202
response["Location"] = request.build_absolute_uri(request.path)
error = response
except PermissionDenied as e:
error = OpenRosaResponseForbidden(e)
except InstanceMultipleNodeError as e:
error = OpenRosaResponseBadRequest(e)
except DjangoUnicodeDecodeError:
error = OpenRosaResponseBadRequest(
_("File likely corrupted during " "transmission, please try later.")
)
return [error, instance]
def report_exception(subject, info, exc_info=None):
if exc_info:
cls, err = exc_info[:2]
message = _("Exception in request:" " %(class)s: %(error)s") % {
"class": cls.__name__,
"error": err,
}
message += "".join(traceback.format_exception(*exc_info))
else:
message = "%s" % info
if settings.DEBUG or settings.TESTING_MODE:
sys.stdout.write("Subject: %s\n" % subject)
sys.stdout.write("Message: %s\n" % message)
else:
mail_admins(subject=subject, message=message)
def response_with_mimetype_and_name(
mimetype,
name,
extension=None,
show_date=True,
file_path=None,
use_local_filesystem=False,
full_mime=False,
):
if extension is None:
extension = mimetype
if not full_mime:
mimetype = "application/%s" % mimetype
if file_path:
try:
if not use_local_filesystem:
default_storage = get_storage_class()()
wrapper = FileWrapper(default_storage.open(file_path))
response = StreamingHttpResponse(wrapper, content_type=mimetype)
response["Content-Length"] = default_storage.size(file_path)
else:
wrapper = FileWrapper(open(file_path))
response = StreamingHttpResponse(wrapper, content_type=mimetype)
response["Content-Length"] = os.path.getsize(file_path)
except IOError:
response = HttpResponseNotFound(_("The requested file could not be found."))
else:
response = HttpResponse(content_type=mimetype)
response["Content-Disposition"] = disposition_ext_and_date(
name, extension, show_date
)
return response
def disposition_ext_and_date(name, extension, show_date=True):
if name is None:
return "attachment;"
if show_date:
name = "%s_%s" % (name, date.today().strftime("%Y_%m_%d"))
return "attachment; filename=%s.%s" % (name, extension)
def store_temp_file(data):
tmp = tempfile.TemporaryFile()
ret = None
try:
tmp.write(data)
tmp.seek(0)
ret = tmp
finally:
tmp.close()
return ret
def publish_form(callback):
try:
return callback()
except (PyXFormError, XLSFormError) as e:
return {"type": "alert-error", "text": str(e)}
except IntegrityError as e:
return {
"type": "alert-error",
"text": _("Form with this id or SMS-keyword already exists."),
}
except ValidationError as e:
# on clone invalid URL
return {
"type": "alert-error",
"text": _("Invalid URL format."),
}
except AttributeError as e:
# form.publish returned None, not sure why...
return {"type": "alert-error", "text": str(e)}
except ProcessTimedOut as e:
# catch timeout errors
return {
"type": "alert-error",
"text": _("Form validation timeout, please try again."),
}
except Exception as e:
# TODO: Something less horrible. This masks storage backend
# `ImportError`s and who knows what else
# ODK validation errors are vanilla errors and it masks a lot of regular
# errors if we try to catch it so let's catch it, BUT reraise it
# if we don't see typical ODK validation error messages in it.
if "ODK Validate Errors" not in e.message:
raise
# error in the XLS file; show an error to the user
return {"type": "alert-error", "text": str(e)}
def publish_xls_form(xls_file, user, id_string=None):
"""Creates or updates a DataDictionary with supplied xls_file,
user and optional id_string - if updating
"""
# get or create DataDictionary based on user and id string
if id_string:
dd = DataDictionary.objects.get(user=user, id_string=id_string)
dd.xls = xls_file
dd.save()
return dd
else:
return DataDictionary.objects.create(user=user, xls=xls_file)
def publish_xml_form(xml_file, user, id_string=None):
xml = xml_file.read()
survey = create_survey_element_from_xml(xml)
form_json = survey.to_json()
if id_string:
dd = DataDictionary.objects.get(user=user, id_string=id_string)
dd.xml = xml
dd.json = form_json
dd._mark_start_time_boolean()
set_uuid(dd)
dd._set_uuid_in_xml()
dd.save()
return dd
else:
dd = DataDictionary(user=user, xml=xml, json=form_json)
dd._mark_start_time_boolean()
set_uuid(dd)
dd._set_uuid_in_xml(file_name=xml_file.name)
dd.save()
return dd
class BaseOpenRosaResponse(HttpResponse):
status_code = 201
def __init__(self, *args, **kwargs):
super(BaseOpenRosaResponse, self).__init__(*args, **kwargs)
self[OPEN_ROSA_VERSION_HEADER] = OPEN_ROSA_VERSION
tz = pytz.timezone(settings.TIME_ZONE)
dt = datetime.now(tz).strftime("%a, %d %b %Y %H:%M:%S %Z")
self["Date"] = dt
self["X-OpenRosa-Accept-Content-Length"] = DEFAULT_CONTENT_LENGTH
self["Content-Type"] = DEFAULT_CONTENT_TYPE
class OpenRosaResponse(BaseOpenRosaResponse):
status_code = 201
def __init__(self, *args, **kwargs):
super(OpenRosaResponse, self).__init__(*args, **kwargs)
# wrap content around xml
self.content = (
"""<?xml version='1.0' encoding='UTF-8' ?>
<OpenRosaResponse xmlns="http://openrosa.org/http/response">
<message nature="">%s</message>
</OpenRosaResponse>"""
% self.content
)
class OpenRosaResponseNotFound(OpenRosaResponse):
status_code = 404
class OpenRosaResponseBadRequest(OpenRosaResponse):
status_code = 400
class OpenRosaResponseNotAllowed(OpenRosaResponse):
status_code = 405
class OpenRosaResponseForbidden(OpenRosaResponse):
status_code = 403
def inject_instanceid(xml_str, uuid):
if get_uuid_from_xml(xml_str) is None:
xml = clean_and_parse_xml(xml_str)
children = xml.childNodes
if children.length == 0:
raise ValueError(_("XML string must have a survey element."))
# check if we have a meta tag
survey_node = children.item(0)
meta_tags = [
n
for n in survey_node.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName.lower() == "meta"
]
if len(meta_tags) == 0:
meta_tag = xml.createElement("meta")
xml.documentElement.appendChild(meta_tag)
else:
meta_tag = meta_tags[0]
# check if we have an instanceID tag
uuid_tags = [
n
for n in meta_tag.childNodes
if n.nodeType == Node.ELEMENT_NODE and n.tagName == "instanceID"
]
if len(uuid_tags) == 0:
uuid_tag = xml.createElement("instanceID")
meta_tag.appendChild(uuid_tag)
else:
uuid_tag = uuid_tags[0]
# insert meta and instanceID
text_node = xml.createTextNode("uuid:%s" % uuid)
uuid_tag.appendChild(text_node)
return xml.toxml()
return xml_str
def update_mongo_for_xform(xform, only_update_missing=True):
instance_ids = set([i.id for i in Instance.objects.only("id").filter(xform=xform)])
sys.stdout.write("Total no of instances: %d\n" % len(instance_ids))
mongo_ids = set()
user = xform.user
userform_id = "%s_%s" % (user.username, xform.id_string)
if only_update_missing:
sys.stdout.write("Only updating missing mongo instances\n")
mongo_ids = set(
[
rec[common_tags.ID]
for rec in mongo_instances.find(
{common_tags.USERFORM_ID: userform_id}, {common_tags.ID: 1}
)
]
)
sys.stdout.write("Total no of mongo instances: %d\n" % len(mongo_ids))
# get the difference
instance_ids = instance_ids.difference(mongo_ids)
else:
# clear mongo records
mongo_instances.remove({common_tags.USERFORM_ID: userform_id})
# get instances
sys.stdout.write("Total no of instances to update: %d\n" % len(instance_ids))
instances = Instance.objects.only("id").in_bulk([id for id in instance_ids])
total = len(instances)
done = 0
for id, instance in list(instances.items()):
(pi, created) = ParsedInstance.objects.get_or_create(instance=instance)
pi.save(async=False)
done += 1
# if 1000 records are done, flush mongo
if (done % 1000) == 0:
sys.stdout.write("Updated %d records, flushing MongoDB...\n" % done)
settings.MONGO_CONNECTION.admin.command({"fsync": 1})
progress = "\r%.2f %% done..." % ((float(done) / float(total)) * 100)
sys.stdout.write(progress)
sys.stdout.flush()
# flush mongo again when done
settings.MONGO_CONNECTION.admin.command({"fsync": 1})
sys.stdout.write(
"\nUpdated %s\n------------------------------------------\n" % xform.id_string
)
def mongo_sync_status(remongo=False, update_all=False, user=None, xform=None):
"""Check the status of records in the mysql db versus mongodb. At a
minimum, return a report (string) of the results.
Optionally, take action to correct the differences, based on these
parameters, if present and defined:
remongo -> if True, update the records missing in mongodb
(default: False)
update_all -> if True, update all the relevant records (default: False)
user -> if specified, apply only to the forms for the given user
(default: None)
xform -> if specified, apply only to the given form (default: None)
"""
qs = XForm.objects.only("id_string", "user").select_related("user")
if user and not xform:
qs = qs.filter(user=user)
elif user and xform:
qs = qs.filter(user=user, id_string=xform.id_string)
else:
qs = qs.all()
total = qs.count()
found = 0
done = 0
total_to_remongo = 0
report_string = ""
for xform in queryset_iterator(qs, 100):
# get the count
user = xform.user
instance_count = Instance.objects.filter(xform=xform).count()
userform_id = "%s_%s" % (user.username, xform.id_string)
mongo_count = mongo_instances.find(
{common_tags.USERFORM_ID: userform_id}
).count()
if instance_count != mongo_count or update_all:
line = (
"user: %s, id_string: %s\nInstance count: %d\t"
"Mongo count: %d\n---------------------------------"
"-----\n"
% (user.username, xform.id_string, instance_count, mongo_count)
)
report_string += line
found += 1
total_to_remongo += instance_count - mongo_count
# should we remongo
if remongo or (remongo and update_all):
if update_all:
sys.stdout.write(
"Updating all records for %s\n--------------------"
"---------------------------\n" % xform.id_string
)
else:
sys.stdout.write(
"Updating missing records for %s\n----------------"
"-------------------------------\n" % xform.id_string
)
update_mongo_for_xform(xform, only_update_missing=not update_all)
done += 1
sys.stdout.write("%.2f %% done ...\r" % ((float(done) / float(total)) * 100))
# only show stats if we are not updating mongo, the update function
# will show progress
if not remongo:
line = (
"Total # of forms out of sync: %d\n"
"Total # of records to remongo: %d\n" % (found, total_to_remongo)
)
report_string += line
return report_string
def remove_xform(xform):
# disconnect parsed instance pre delete signal
pre_delete.disconnect(_remove_from_mongo, sender=ParsedInstance)
# delete instances from mongo db
query = {
ParsedInstance.USERFORM_ID: "%s_%s" % (xform.user.username, xform.id_string)
}
xform_instances.remove(query, j=True)
# delete xform, and all related models
xform.delete()
# reconnect parsed instance pre delete signal?
pre_delete.connect(_remove_from_mongo, sender=ParsedInstance)
|
# coding:utf-8
import os
import posixpath # TODO: Windows?
import logging
import pkg_resources
from cactus.utils.packaging import pkg_walk
logger = logging.getLogger(__name__)
def bootstrap_from_package(path):
for dir_, sub_dirs, filenames in pkg_walk("cactus", "skeleton"):
base_path = os.path.join(path, dir_.split("skeleton", 1)[1].lstrip("/"))
for sub_dir in sub_dirs:
dir_path = os.path.join(base_path, sub_dir)
logger.debug("Creating {0}".format(dir_path))
os.makedirs(dir_path)
for filename in filenames:
resource_path = posixpath.join(dir_, filename)
file_path = os.path.join(base_path, filename)
logger.debug("Copying {0} to {1}".format(resource_path, file_path))
with open(file_path, "wb") as f:
f.write(pkg_resources.resource_stream("cactus", resource_path).read())
|
import logging
from cactus.listener.polling import PollingListener
logger = logging.getLogger(__name__)
try:
from cactus.listener.mac import FSEventsListener as Listener
except (ImportError, OSError):
logger.debug(
"Failed to load FSEventsListener, falling back to PollingListener",
exc_info=True,
)
Listener = PollingListener
|
import os
import os.path
import shutil
import tempfile
has_symlink = False
compat_test_dir = tempfile.mkdtemp()
# Check for symlink support (available and usable)
src = os.path.join(compat_test_dir, "src")
dst = os.path.join(compat_test_dir, "dst")
with open(src, "w"):
pass
try:
os.symlink(src, dst)
except (AttributeError, OSError):
# AttributeError if symlink is not available (Python <= 3.2 on Windows)
# OSError if we don't have the symlink privilege (on Windows)
pass # Leave has_symlink false
else:
has_symlink = True
shutil.rmtree(compat_test_dir)
|
# coding:utf-8
import os
from cactus.tests import SiteTestCase
class TestLegacyContext(SiteTestCase):
def setUp(self):
super(TestLegacyContext, self).setUp()
os.mkdir(os.path.join(self.site.page_path, "test"))
with open(os.path.join(self.site.page_path, "static.html"), "w") as f:
f.write("{{ STATIC_URL }}")
with open(os.path.join(self.site.page_path, "test", "static.html"), "w") as f:
f.write("{{ STATIC_URL }}")
with open(os.path.join(self.site.page_path, "root.html"), "w") as f:
f.write("{{ ROOT_URL }}")
with open(os.path.join(self.site.page_path, "test", "root.html"), "w") as f:
f.write("{{ ROOT_URL }}")
with open(os.path.join(self.site.page_path, "page.html"), "w") as f:
f.write("{{ PAGE_URL }}")
def test_context(self):
self.site.build()
with open(os.path.join(self.site.build_path, "static.html")) as f:
self.assertEqual(f.read(), "./static")
with open(os.path.join(self.site.build_path, "test", "static.html")) as f:
self.assertEqual(f.read(), "../static")
with open(os.path.join(self.site.build_path, "root.html")) as f:
self.assertEqual(f.read(), ".")
with open(os.path.join(self.site.build_path, "test", "root.html")) as f:
self.assertEqual(f.read(), "..")
with open(os.path.join(self.site.build_path, "page.html")) as f:
self.assertEqual(f.read(), "page.html")
def test_pretty_urls(self):
self.site.prettify_urls = True
self.site.build()
with open(
os.path.join(self.site.build_path, "test", "static", "index.html")
) as f:
self.assertEqual(f.read(), "../../static")
with open(os.path.join(self.site.build_path, "root", "index.html")) as f:
self.assertEqual(f.read(), "..")
with open(
os.path.join(self.site.build_path, "test", "root", "index.html")
) as f:
self.assertEqual(f.read(), "../..")
with open(os.path.join(self.site.build_path, "page", "index.html")) as f:
self.assertEqual(f.read(), "page/")
|
# -*- coding: utf-8 -*-
from flask_apputils.routing import get_router, TemplateBlueprint
web = TemplateBlueprint("web", __name__)
route = get_router(web, __name__)
route("/", "index")
route("/home", "home")
def index():
return "Index Page"
def home():
return "Home Page"
|
import unittest
from functools import wraps
from flask import Flask
from flask.ext.jsontools import jsonapi, FlaskJsonClient
from flask.ext.jsontools import MethodView, methodview, RestfulView
def stupid(f):
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
class CrudView(MethodView):
decorators = (jsonapi,)
@stupid
@methodview("GET", ifnset=("id",))
def list(self): # listing
return [1, 2, 3]
@methodview("GET", ifset="id")
@stupid
def get(self, id):
return id
@stupid
@methodview("CUSTOM", ifset="id")
@stupid
def custom(self, id):
return True
class RestView(RestfulView):
decorators = (jsonapi,)
def list(self):
return [1, 2, 3]
def create(self):
return "ok"
def get(self, id):
return id
def replace(self, id):
return "re"
def update(self, id):
return "up"
def delete(self, id):
return "del"
@methodview("CUSTOM", ifset="id")
def custom(self, id):
return ":)"
@methodview("CUSTOM2", ifset="id")
def custom2(self, id):
return ":))"
class RestViewSubclass(RestView):
primary_key = ("id",)
custom2 = None # override
class ViewsTest(unittest.TestCase):
def setUp(self):
app = Flask(__name__)
app.test_client_class = FlaskJsonClient
app.debug = app.testing = True
CrudView.route_as_view(app, "user", ("/user/", "/user/<int:id>"))
RestViewSubclass.route_as_view(
app, "rest", ("/api/", "/api/<int:id>")
) # subclass should work as well
self.app = app
def _testRequest(self, method, path, expected_code, expected_response=None):
"""Test a request to the app
:param method: HTTP method
:param path:
:type path:
:param expected_code:
:type expected_code:
:param expected_response:
:type expected_response:
:return:
:rtype:
"""
with self.app.test_client() as c:
rv = c.open(path, method=method)
self.assertEqual(rv.status_code, expected_code)
if expected_response is not None:
self.assertEqual(rv.get_json(), expected_response)
def test_method_view(self):
"""Test MethodView(), low-level testing"""
self.assertTrue(CrudView.list._methodview.matches("GET", {"a"}))
self.assertFalse(CrudView.list._methodview.matches("GET", {"id", "a"}))
self.assertTrue(CrudView.get._methodview.matches("GET", {"id", "a"}))
self.assertFalse(CrudView.get._methodview.matches("GET", {"a"}))
self.assertTrue(CrudView.custom._methodview.matches("CUSTOM", {"id", "a"}))
self.assertFalse(CrudView.custom._methodview.matches("CUSTOM", {"a"}))
self.assertFalse(CrudView.custom._methodview.matches("????", {"a"}))
def test_method_view_requests(self):
"""Test MethodView with real requests"""
self._testRequest("GET", "/user/", 200, [1, 2, 3])
self._testRequest("GET", "/user/999", 200, 999)
self._testRequest("CUSTOM", "/user/", 405) # No method (by us)
self._testRequest("CUSTOM", "/user/999", 200, True)
self._testRequest("UNKNOWN", "/user/999", 405) # No method (by flask)
def test_restful_view_requests(self):
"""Test RestfulView with real requests"""
self._testRequest("GET", "/api/", 200, [1, 2, 3])
self._testRequest("POST", "/api/", 200, "ok")
self._testRequest("GET", "/api/999", 200, 999)
self._testRequest("PUT", "/api/999", 200, "re")
self._testRequest("POST", "/api/999", 200, "up")
self._testRequest("DELETE", "/api/999", 200, "del")
self._testRequest("CUSTOM", "/api/999", 200, ":)")
self._testRequest("CUSTOM2", "/api/999", 405) # it was overridden by `None`
self._testRequest("PATCH", "/api/999", 405)
self._testRequest("PUT", "/api/", 405)
self._testRequest("PATCH", "/api/", 405)
self._testRequest("DELETE", "/api/", 405)
self._testRequest("CUSTOM", "/api/", 405)
self._testRequest("CUSTOM2", "/api/", 405)
|
import sys
import os
import mechanize
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import re
import time
import random
import urllib.parse
import signal
import selenium.webdriver
import codecs
class SkyScraper:
def __init__(self, config):
self.CONFIG = config
self.num_done = 0
def write_queue(self, queue):
with open(
self.CONFIG["collections_path"] + self.CONFIG["collection_name"] + ".queue",
"w",
) as f:
f.write("\n".join(queue))
sys.exit(0)
def shouldCrawl(self, url):
if all([x not in url for x in self.CONFIG["crawlFilterStrings"]]):
if any([x in url for x in self.CONFIG["crawlRequiredStrings"]]):
return True
return False
def shouldIndex(self, url):
if all([x not in url for x in self.CONFIG["indexFilterStrings"]]):
if (
any(
[
condition in url
for condition in self.CONFIG["indexRequiredStrings"]
]
)
or not self.CONFIG["indexRequiredStrings"]
):
return True
return False
def crawl(self, webCrawler, queue, done):
try:
index = []
if "mechanize" in webCrawler.__module__:
links = [link.url for link in webCrawler.links()]
elif "selenium" in webCrawler.__module__:
links = [
link.get_attribute("href")
for link in webCrawler.find_elements_by_xpath("//*[@href]")
]
for link in links:
url = urllib.parse.urljoin(self.CONFIG["host"], link)
if self.CONFIG["host"] not in url:
continue
if (url not in done) & (url not in queue):
# index
if self.shouldIndex(url):
self.log_url(url)
if self.CONFIG["writeHTML"]:
index.append(url)
# crawl
if self.shouldCrawl(url):
queue.append(url)
except Exception as e:
print(e)
if "mechanize" in webCrawler.__module__:
print("error", webCrawler.geturl())
elif "selenium" in webCrawler.__module__:
print("error", webCrawler.current_url)
for url in index:
self.save_response(webCrawler, url)
return queue
def log_url(self, url):
with open(
self.CONFIG["collections_path"] + self.CONFIG["collection_name"] + ".urls",
"a",
) as f:
f.write(url + "\n")
def log_error(self, url, e):
with open(
self.CONFIG["collections_path"]
+ self.CONFIG["collection_name"]
+ ".errorurls",
"a",
) as f:
f.write(e + ": " + url + "\n")
def save_response(self, webCrawler, url):
# Handle response
if "mechanize" in webCrawler.__module__:
response = webCrawler.open(url)
webCrawler._factory.is_html = True
responseHeader = str(response.info())
html_response = response.read()
elif "selenium" in webCrawler.__module__:
webCrawler.get(url)
responseHeader = "simple"
html_response = webCrawler.page_source
fname = urllib.request.url2pathname(url).split("/")[-1]
if ("." not in fname) | (".ashx" in fname) | (".aspx" in fname):
if "msword" in responseHeader:
extension = ".doc"
elif "pdf" in responseHeader:
extension = ".pdf"
elif "excel" in responseHeader:
print("RESP", responseHeader)
extension = ".xls"
elif "powerpoint" in responseHeader:
extension = ".ppt"
elif "text/html" in responseHeader:
extension = ".aspx"
else:
extension = ""
fname = ".".join(fname.split(".")[:-1]) + extension
else:
extension = ".html"
save_location = url.replace(
self.CONFIG["host"],
self.CONFIG["collections_path"]
+ self.CONFIG["collection_name"]
+ "/"
+ self.CONFIG["collection_name"],
)
save_location.replace("//", "/")
save_location = re.sub("[^a-zA-Z0-9_/-]", "", save_location)
save_location = "/".join(save_location.split("/")[:-1]) + "/"
if not os.path.exists(save_location):
os.makedirs(save_location)
binaryExtensions = [".pdf", ".doc", ".xlsx", ".ppt", ".aspx"]
if extension in binaryExtensions:
with open(save_location + fname.replace(".aspx", ".html"), "wb") as f:
f.write(html_response)
print("written " + save_location + fname)
else:
with codecs.open(save_location + fname, "w", "utf-8-sig") as f:
f.write(html_response)
print("written " + save_location + fname)
self.num_done += 1
def continue_from_log(self):
fname = (
self.CONFIG["collections_path"] + self.CONFIG["collection_name"] + ".urls"
)
if os.path.exists(fname):
with open(fname) as f:
done_so_far = f.read().strip().split("\n")
print("done_so_far:", len(done_so_far))
return done_so_far
else:
with open(fname, "w") as f:
f.write("")
return []
def continue_from_queue(self):
fname = (
self.CONFIG["collections_path"] + self.CONFIG["collection_name"] + ".queue"
)
if os.path.exists(fname):
with open(fname) as f:
queue = f.read().strip().split("\n")
print("queue_len:", len(queue))
return queue
else:
with open(fname, "w") as f:
f.write("")
print("starting from:", self.CONFIG["startURLs"])
return self.CONFIG["startURLs"]
def scrape(self):
self.validate_config()
if self.CONFIG["browser"] == "M":
webCrawler = mechanize.Browser()
webCrawler.set_handle_robots(False)
loginInfoFields = [
"usernameField",
"usernameValue",
"passwordField",
"passwordValue",
"loginURL",
]
if any([field in self.CONFIG for field in loginInfoFields]):
if not all([field in self.CONFIG for field in loginInfoFields]):
raise Exception("")
webCrawler.open(self.CONFIG["loginURL"])
webCrawler.select_form(nr=0)
webCrawler.form[self.CONFIG["usernameField"]] = self.CONFIG[
"usernameValue"
]
webCrawler.form[self.CONFIG["passwordField"]] = self.CONFIG[
"passwordValue"
]
res = webCrawler.submit()
print(webCrawler.open(self.CONFIG["host"]).read())
elif self.CONFIG["browser"] == "S":
path_to_chromedriver = "/Users/pascal/Downloads/chromedriver"
webCrawler = selenium.webdriver.Chrome(executable_path=path_to_chromedriver)
# Nice login handling FOR PORTAL MEDIQ
# webCrawler.get('https://portal.mediq.nl/login.aspx')
# z = raw_input('Waiting for filling in Login. Hit ENTER')
self.makeStandardizedBrowser(webCrawler)
# Lists that contain the URLs that were visited and those that are still enqueued
done = self.continue_from_log()
queue = self.continue_from_queue()
self.num_done = len(done)
while queue and self.num_done < self.CONFIG["maximum_number_of_documents"]:
random.shuffle(queue)
try:
url = urllib.parse.urljoin(self.CONFIG["host"], queue.pop(0))
if url:
signal.signal(signal.SIGINT, lambda s, f: self.write_queue(queue))
if url in done:
print("ALREADY DONE", url)
continue
logString = (
"done: "
+ str(len(set(done)))
+ " left: "
+ str(len(queue))
+ " now: "
+ url
)
if self.CONFIG["browser"] == "S":
webCrawler.get(url)
else:
webCrawler.open(url)
queue = self.crawl(webCrawler, queue, done)
done.append(url)
print(logString)
except Exception as e:
self.log_error(url, str(e))
with open(
self.CONFIG["collections_path"]
+ self.CONFIG["collection_name"]
+ ".queue",
"w",
) as f:
f.write("\n".join(queue))
print(str(e), ":", url)
time.sleep(self.CONFIG["wait_between_url_visits_in_seconds"])
def makeStandardizedBrowser(self, browserObj):
if isinstance(browserObj, mechanize.Browser):
browserObj.addheaders = [
("User-agent", "Jibes WatsonBot (+pvkooten@jibes.nl)")
]
def validate_config(self):
required = [
"host",
"collections_path",
"collection_name",
"startURLs",
"crawlFilterStrings",
"crawlRequiredStrings",
"indexFilterStrings",
"indexRequiredStrings",
]
missing = []
for x in required:
if x not in self.CONFIG:
missing.append(x)
self.CONFIG["crawlFilterStrings"].extend([".jpeg", ".jpg", "#"])
self.CONFIG["indexFilterStrings"].extend([".jpeg", ".jpg", "#"])
if missing:
raise Exception("Missing REQUIRED parameter(s): " + ", ".join(missing))
optional = {
"maximum_number_of_documents": 10000,
"wait_between_url_visits_in_seconds": 1,
"writeHTML": True,
"browser": "M",
}
for x in optional:
if x not in self.CONFIG:
self.CONFIG[x] = optional[x]
|
#!/usr/bin/env python
"""
:mod:`vkmusic`
~~~~~~~~~~~~~~~
A micro tool for export audio from `vk.com <https://vk.com>`_.
It's based on `VK_API <https://github.com/python273/vk_api>`_
by Kirill Python <mikeking568@gmail.com>,
`Requests <python-requests.org>`_.
:copyright: (c) 2013 by Andrii Korzh.
:license: BSD, see LICENSE for more details.
"""
__author__ = "Andrii Korzh <Andrii.Korzh@gmail.com>"
__date__ = "03.26.15"
__version__ = "0.1.3"
import argparse
import datetime
from getpass import getpass
import os
import time
import sys
import requests
from vk_api import VkApi
def connect(login, password):
"""Initialize connection with `vk.com <https://vk.com>`_ and try to authorize user with given credentials.
:param login: user login e. g. email, phone number
:type login: str
:param password: user password
:type password: str
:return: :mod:`vk_api.vk_api.VkApi` connection
:rtype: :mod:`VkApi`
"""
return VkApi(login, password)
def get_albums(connection):
"""Get albums list for currently authorized user.
:param connection: :class:`vk_api.vk_api.VkApi` connection
:type connection: :class:`vk_api.vk_api.VkApi`
:return: list of audio albums or ``None``
:rtype: list
"""
try:
return connection.method("audio.getAlbums")
except Exception as e:
print(e)
return None
def get_audios(connection):
"""Get audios list for selected album.
:param connection: :class:`vk_api.vk_api.VkApi` connection
:type connection: :class:`vk_api.vk_api.VkApi`
:return: list of photo albums or ``None``
:rtype: list
"""
try:
return connection.method("audio.get")
except Exception as e:
print(e)
return None
def download(audio, output, title):
"""Download audio
:param audio: hash
:param output: string
"""
if not os.path.exists(output):
os.makedirs(output)
r = requests.get(audio["url"])
filename = "%s.mp3" % title
if os.path.isfile(filename):
return None
with open(os.path.join(output, filename), "wb") as f:
for buf in r.iter_content(1024):
if buf:
f.write(buf)
def get_title(audio):
return "%s - %s" % (audio["artist"], audio["title"])
def main():
parser = argparse.ArgumentParser(description="", version="%(prog)s " + __version__)
parser.add_argument(
"-o", "--output", help="output path to store photos", default="~/Music"
)
parser.add_argument("username", help="vk.com username")
parser.add_argument("-p", "--password", help="vk.com password")
parser.add_argument(
"-s", "--sort", help="sort by artist folder", action="store_true"
)
args = parser.parse_args()
# expand user path if necessary
if args.output.startswith("~"):
args.output = os.path.expanduser(args.output)
start_time = datetime.datetime.now()
try:
password = args.password or getpass("Password: ")
# Initialize vk.com connection
connection = connect(args.username, password)
# Request list of audios
audios_response = get_audios(connection)
audios_count = audios_response["count"]
audios = audios_response["items"]
print(("Found %s album%s:" % (audios_count, "s" if audios_count > 1 else "")))
ix = 0
if len(audios):
print(
(
"Found %s audio%s:\n"
% (audios_count, "s" if audios_count > 1 else "")
)
)
print(("%3s. %-80s %s" % ("No", "Title", "Duration")))
ix = 0
for audio in audios:
ix += 1
print(
(
"%3d. %-80s %s"
% (
ix,
get_title(audio),
datetime.timedelta(seconds=audio["duration"]),
)
)
)
print("\r")
processed = 1
prev_s_len = 0 # A length of the previous output line.
for audio in audios:
percent = round(float(processed) / float(audios_count) * 100, 2)
output_s = "\rExporting %-75s %s of %s (%d%%)" % (
get_title(audio),
processed,
audios_count,
percent,
)
# Pad with spaces to clear the previous line's tail.
# It's ok to multiply by negative here.
output_s += " " * (prev_s_len - len(output_s))
sys.stdout.write(output_s)
prev_s_len = len(output_s)
sys.stdout.flush()
output = os.path.join(args.output, audio["artist"] if args.sort else "")
title = audio["title"] if args.sort else get_title(audio)
try:
download(audio, output, title)
except Exception as e:
time.sleep(1)
processed += 1
if processed % 50 == 0:
time.sleep(5)
else:
print("\nNo audios found! Exiting...")
sys.exit(0)
except Exception as e:
print(e)
sys.exit(1)
except KeyboardInterrupt:
print("\nVKMusic exporting stopped by keyboard")
sys.exit(0)
finally:
print(("\nDone in %s" % (datetime.datetime.now() - start_time)))
if __name__ == "__main__":
main()
|
import sys
from .sempai import SempaiLoader
sys.meta_path.append(SempaiLoader)
|
#!/usr/bin/env python
from distutils.core import setup
# The original author of this library is
# Ken Robertson ken@invalidlogic.com
# This version is a fork on github maintained by
# Andre Graf andre@dergraf.org.
#
# Ken's library version number depended on
# the version number of python-bert. This is
# not suitable for an automated installation
# using pip which will automatically install
# the dependencies listed in 'install_requires'.
# For this reason I manually set the version
# number.
__version__ = "1.0.0"
setup(
name="ernie",
version=__version__,
description="BERT-Ernie Library",
author="Ken Robertson",
author_email="ken@invalidlogic.com",
url="http://github.com/krobertson/python-ernie",
packages=["ernie"],
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=["bert>=2.0.0"],
dependency_links=[
"http://github.com/samuel/python-bert/tarball/master#egg=bert-2.0.0"
],
)
|
"""Converts input data for the writing assignment 4 to a conventional form
used in all programming assignments
"""
import csv
filename = "recsys-data-sample-rating-matrix.csv"
ratings_file = "ratings.csv"
movies_file = "movies.csv"
def write_table(filename, rows):
# write movies
file = open(filename, "w")
csv_writer = csv.writer(file, delimiter=",")
for r in rows:
csv_writer.writerow(r)
file.close()
ratings = []
movies = []
with open(filename, "rbU") as csv_source:
csv_reader = csv.reader(csv_source, delimiter=",")
# read user ids, skip the 0th element ("")
user_ids = [int(u) for u in csv_reader.next()[1:]]
# read rating lines
for r in csv_reader:
# some dances to keep ':'s in the movie title
movie_bits = r[0].split(":")
movie_id, movie_title = int(movie_bits[0]), ":".join(movie_bits[1:]).strip()
movies.append((movie_id, movie_title))
ratings.extend(
[
(int(user_id), movie_id, float(rating))
for (user_id, rating) in zip(user_ids, r[1:])
if rating
]
)
write_table(ratings_file, ratings)
write_table(movies_file, movies)
|
version = "1.1.5.pre2"
|
"""
Created on Dec 10, 2014
@author: stefan
"""
# Paths to the STP and cryptominisat executable
PATH_STP = "../stp/stp"
PATH_CRYPTOMINISAT = "../cryptominisat/cryptominisat4"
PATH_BOOLECTOR = "../boolector_new/boolector/boolector"
# Maximum weight for characteristics to search for
MAX_WEIGHT = 1000
# Maximum number of characteristics to search for a differential
MAX_CHARACTERISTICS = 10000000
|
from netmiko.ssh_connection import SSHConnection
class BrocadeFastironSSH(SSHConnection):
"""Placeholder for Brocade FastIron"""
def __init__(self, *args, **kwargs):
raise NotImplementedError
|
MAX_BUFFER = 65535
BACKSPACE_CHAR = "\x08"
|
"\nDelay the Travis CI testing for Python versions so that they don't interfere with each other\n"
import re
import os
import time
import sys
TRAVIS_DELAY = 0
def main():
"\n Delay the Travis CI testing for Python versions so that they don't interfere with each other\n "
python_version = (sys.version_info[0], sys.version_info[1])
if ('^3.5', python_version):
total_delay = (0 * TRAVIS_DELAY)
('Python 3.5 found')
((total_delay))
(total_delay)
elif ('^3.4', python_version):
total_delay = (1 * TRAVIS_DELAY)
('Python 3.4 found')
((total_delay))
(total_delay)
elif ('^3.3', python_version):
total_delay = (2 * TRAVIS_DELAY)
('Python 3.3 found')
((total_delay))
(total_delay)
elif ('^2.7', python_version):
total_delay = (3 * TRAVIS_DELAY)
('Python 2.7 found')
((total_delay))
(total_delay)
elif ('^2.6', python_version):
total_delay = (4 * TRAVIS_DELAY)
('Python 2.6 found')
((total_delay))
return_code = ('sh travis_test.sh')
if (return_code != 0):
(1)
else:
(0)
if (__name__ == '__main__'):
() |
# This is your project's main settings file that can be committed to your
# repo. If you need to override a setting locally, use settings_local.py
from funfactory.settings_base import *
from funfactory.utils import get_middleware, get_apps
# Name of the top-level module where you put all your apps.
# If you did not install Playdoh with the funfactory installer script
# you may need to edit this value. See the docs about installing from a
# clone.
PROJECT_MODULE = "jstestnet"
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
"css": {
"common": (
"css/minimalist/style.css",
"css/style.css",
),
},
"js": {
"common": (
"js/libs/jquery.js",
"js/libs/json2.js",
"js/libs/socket.io.js",
),
"system": ("js/system.js",),
"work": ("js/work.js",),
},
}
# Defines the views served for root URLs.
ROOT_URLCONF = "%s.urls" % PROJECT_MODULE
INSTALLED_APPS = list(get_apps([])) + [
"django.contrib.admin",
# Application base, containing global templates.
"jstestnet.base",
# Local apps
"jstestnet.system",
"jstestnet.work",
]
MIDDLEWARE_CLASSES = get_middleware(
[
"funfactory.middleware.LocaleURLMiddleware",
]
)
# Because Jinja2 is the default template loader, add any non-Jinja templated
# apps here:
JINGO_EXCLUDE_APPS = [
"admin",
"registration",
]
# Tells the extract script what files to look for L10n in and what function
# handles the extraction. The Tower library expects this.
DOMAIN_METHODS["messages"] = (
[
(
"%s/**.py" % PROJECT_MODULE,
"tower.management.commands.extract.extract_tower_python",
),
(
"%s/**/templates/**.html" % PROJECT_MODULE,
"tower.management.commands.extract.extract_tower_template",
),
(
"templates/**.html",
"tower.management.commands.extract.extract_tower_template",
),
],
)
# paths that don't require a locale prefix
# NOTE: locale middleware has been disabled.
# SUPPORTED_NONLOCALES = (
# 'img',
# 'media',
# 'robots.txt',
# 'system',
# 'work',
# 'admin-contrib',
# )
# # Use this if you have localizable HTML files:
# DOMAIN_METHODS['lhtml'] = [
# ('**/templates/**.lhtml',
# 'tower.management.commands.extract.extract_tower_template'),
# ]
# # Use this if you have localizable JS files:
# DOMAIN_METHODS['javascript'] = [
# # Make sure that this won't pull in strings from external libraries you
# # may use.
# ('media/js/**.js', 'javascript'),
# ]
# For bcrypt only:
HMAC_KEYS = {
#'2012-06-01': 'Example of shared key',
}
# Use sha 256 by default but support any other algorithm:
BASE_PASSWORD_HASHERS = (
"django_sha2.hashers.SHA256PasswordHasher",
"django_sha2.hashers.BcryptHMACCombinedPasswordVerifier",
"django_sha2.hashers.SHA512PasswordHasher",
"django.contrib.auth.hashers.SHA1PasswordHasher",
"django.contrib.auth.hashers.MD5PasswordHasher",
"django.contrib.auth.hashers.UnsaltedMD5PasswordHasher",
)
from django_sha2 import get_password_hashers
PASSWORD_HASHERS = get_password_hashers(BASE_PASSWORD_HASHERS, HMAC_KEYS)
LOGGING = dict(
loggers=dict(playdoh={"level": logging.DEBUG}, jstestnet={"level": logging.INFO})
)
# When True, always provide CSRF protection for anonymous users.
# This is required to get admin logins to work w/ django-session-csrf.
ANON_ALWAYS = True
# This is an experimental feature still in the works.
# When this is True you need gevent and greenlet installed.
# See compiled.txt.
USE_SOCKET_IO = False
|
from prettytable import PrettyTable
from .common import *
from .. import colors
def process_commit(to_commit):
if not load_session() or global_vars.problem.sessionId is None:
fatal("No session known. Use relogin or init first.")
files = global_vars.problem.local_files
polygon_files = global_vars.problem.get_all_files_list()
table = PrettyTable(["File type", "Polygon name", "Local path", "Status"])
for file in files:
polygon_file = None
if file.polygon_filename:
for p in polygon_files:
if p.name == file.polygon_filename:
polygon_file = p
need_file = (
(polygon_file is not None and polygon_file.name in to_commit)
or file.name in to_commit
or file.filename in to_commit
or file.get_path() in to_commit
)
if to_commit and not need_file:
continue
if polygon_file is None:
file.polygon_filename = None
if file.polygon_filename:
status = colors.success("Returned")
print(("polygon_file for %s was removed. Adding it back." % file.name))
else:
status = colors.success("New")
print(("Adding new file %s to polygon" % file.name))
if not file.upload():
status = colors.error("Error")
table.add_row([file.type, file.polygon_filename, file.get_path(), status])
else:
polygon_text = polygon_file.get_content()
old_path = file.get_internal_path()
status = ""
while True:
try:
old_text = open(old_path, "rb").read()
except IOError:
status = colors.warning("Outdated")
print(("file %s is outdated: update first" % file.name))
break
if polygon_text.splitlines() != old_text.splitlines():
status = colors.warning("Outdated")
print(("file %s is outdated: update first" % file.name))
break
new_text = open(file.get_path(), "rb").read()
if polygon_text.splitlines() == new_text.splitlines():
status = colors.info("Not changed")
print(("file %s not changed" % file.name))
break
print(("uploading file %s" % file.name))
if file.update():
status = colors.success("Modified")
else:
status = colors.error("Error")
break
table.add_row([file.type, file.polygon_filename, file.get_path(), status])
print(table)
save_session()
def add_parser(subparsers):
parser_commit = subparsers.add_parser(
"commit",
help="Put all local changes to polygon. Not making a commit in polygon",
)
parser_commit.add_argument("file", nargs="*", help="List of files to commit")
parser_commit.set_defaults(func=lambda options: process_commit(options.file))
|
from distutils.core import setup
import unittest
import warnings
warnings.filterwarnings("ignore", "Unknown distribution option")
import sys
# patch distutils if it can't cope with the "classifiers" keyword
if sys.version < "2.2.3":
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
from BeautifulSoup import __version__
# Make sure all the tests complete.
import BeautifulSoupTests
loader = unittest.TestLoader()
result = unittest.TestResult()
suite = loader.loadTestsFromModule(BeautifulSoupTests)
suite.run(result)
if not result.wasSuccessful():
print("Unit tests have failed!")
for l in result.errors, result.failures:
for case, error in l:
print("-" * 80)
desc = case.shortDescription()
if desc:
print(desc)
print(error)
print(
"""If you see an error like: "'ascii' codec can't encode character...", see\nthe Beautiful Soup documentation:\n http://www.crummy.com/software/BeautifulSoup/documentation.html#Why%20can't%20Beautiful%20Soup%20print%20out%20the%20non-ASCII%20characters%20I%20gave%20it?"""
)
print(
"This might or might not be a problem depending on what you plan to do with\nBeautiful Soup."
)
if sys.argv[1] == "sdist":
print()
print("I'm not going to make a source distribution since the tests don't pass.")
sys.exit(1)
setup(
name="BeautifulSoup",
version=__version__,
py_modules=["BeautifulSoup", "BeautifulSoupTests"],
scripts=[
"testall.sh",
"to3.sh",
## Whatever, I'll fix this later.
#'README', 'CHANGELOG',
# 'BeautifulSoup.py.3.diff', 'BeautifulSoupTests.py.3.diff'],
],
description="HTML/XML parser for quick-turnaround applications like screen-scraping.",
author="Leonard Richardson",
author_email="leonardr@segfault.org",
long_description="""Beautiful Soup parses arbitrarily invalid SGML and provides a variety of methods and Pythonic idioms for iterating and searching the parse tree.""",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML",
"Topic :: Text Processing :: Markup :: SGML",
"Topic :: Software Development :: Libraries :: Python Modules",
],
url="http://www.crummy.com/software/BeautifulSoup/",
license="BSD",
download_url="http://www.crummy.com/software/BeautifulSoup/download/",
)
# Send announce to:
# python-announce@python.org
# python-list@python.org
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import __main__
class Event(object):
"""
Event is a simple publish/subscribe based event dispatcher
It sets itself to the __main__ function. In order to use it,
you must import it and __main__
"""
def __init__(self):
self.events = []
def subscribe(self, event, callback, args=None):
"""
This method will subscribe a callback function to an event name.
"""
if (
not {
"event": event,
"callback": callback,
"args": args,
}
in self.events
):
self.events.append(
{
"event": event,
"callback": callback,
"args": args,
}
)
def unsubscribe(self, event, callback, args=None):
"""
This method will unsubscribe a callback from an event.
"""
if {
"event": event,
"callback": callback,
"args": args,
} in self.events:
self.events.remove(
{
"event": event,
"callback": callback,
"args": args,
}
)
def fire_event(self, event=None):
"""
This method is what a method uses to fire an event,
initiating all registered callbacks
"""
for e in self.events:
if e["event"] == event:
if type(e["args"]) == type([]):
e["callback"](*e["args"])
elif type(e["args"]) == type({}):
e["callback"](**e["args"])
elif e["args"] == None:
e["callback"]()
else:
e["callback"](e["args"])
"""
Assign to the event class to __main__
"""
__main__.AEU_Events = Event()
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "j.s@google.com (Jeff Scudder)"
import unittest
import atom.service
import atom.mock_http_core
import gdata.test_config as conf
class AtomServiceUnitTest(unittest.TestCase):
def testBuildUriWithNoParams(self):
x = atom.service.BuildUri("/base/feeds/snippets")
self.assertTrue(x == "/base/feeds/snippets")
def testBuildUriWithParams(self):
# Add parameters to a URI
x = atom.service.BuildUri(
"/base/feeds/snippets", url_params={"foo": "bar", "bq": "digital camera"}
)
self.assertTrue(x == "/base/feeds/snippets?foo=bar&bq=digital+camera")
self.assertTrue(x.startswith("/base/feeds/snippets"))
self.assertTrue(x.count("?") == 1)
self.assertTrue(x.count("&") == 1)
self.assertTrue(x.index("?") < x.index("&"))
self.assertTrue(x.index("bq=digital+camera") != -1)
# Add parameters to a URI that already has parameters
x = atom.service.BuildUri(
"/base/feeds/snippets?bq=digital+camera",
url_params={"foo": "bar", "max-results": "250"},
)
self.assertTrue(x.startswith("/base/feeds/snippets?bq=digital+camera"))
self.assertTrue(x.count("?") == 1)
self.assertTrue(x.count("&") == 2)
self.assertTrue(x.index("?") < x.index("&"))
self.assertTrue(x.index("max-results=250") != -1)
self.assertTrue(x.index("foo=bar") != -1)
def testBuildUriWithoutParameterEscaping(self):
x = atom.service.BuildUri(
"/base/feeds/snippets",
url_params={"foo": " bar", "bq": "digital camera"},
escape_params=False,
)
self.assertTrue(x.index("foo= bar") != -1)
self.assertTrue(x.index("bq=digital camera") != -1)
def testParseHttpUrl(self):
atom_service = atom.service.AtomService("code.google.com")
self.assertEqual(atom_service.server, "code.google.com")
(host, port, ssl, path) = atom.service.ProcessUrl(
atom_service, "http://www.google.com/service/subservice?name=value"
)
self.assertEqual(ssl, False)
self.assertEqual(host, "www.google.com")
self.assertEqual(port, 80)
self.assertEqual(path, "/service/subservice?name=value")
def testParseHttpUrlWithPort(self):
atom_service = atom.service.AtomService("code.google.com")
self.assertEqual(atom_service.server, "code.google.com")
(host, port, ssl, path) = atom.service.ProcessUrl(
atom_service,
"http://www.google.com:12/service/subservice?name=value&newname=newvalue",
)
self.assertEqual(ssl, False)
self.assertEqual(host, "www.google.com")
self.assertEqual(port, 12)
self.assertTrue(path.startswith("/service/subservice?"))
self.assertTrue(path.find("name=value") >= len("/service/subservice?"))
self.assertTrue(path.find("newname=newvalue") >= len("/service/subservice?"))
def testParseHttpsUrl(self):
atom_service = atom.service.AtomService("code.google.com")
self.assertEqual(atom_service.server, "code.google.com")
(host, port, ssl, path) = atom.service.ProcessUrl(
atom_service,
"https://www.google.com/service/subservice?name=value&newname=newvalue",
)
self.assertEqual(ssl, True)
self.assertEqual(host, "www.google.com")
self.assertEqual(port, 443)
self.assertTrue(path.startswith("/service/subservice?"))
self.assertTrue(path.find("name=value") >= len("/service/subservice?"))
self.assertTrue(path.find("newname=newvalue") >= len("/service/subservice?"))
def testParseHttpsUrlWithPort(self):
atom_service = atom.service.AtomService("code.google.com")
self.assertEqual(atom_service.server, "code.google.com")
(host, port, ssl, path) = atom.service.ProcessUrl(
atom_service,
"https://www.google.com:13981/service/subservice?name=value&newname=newvalue",
)
self.assertEqual(ssl, True)
self.assertEqual(host, "www.google.com")
self.assertEqual(port, 13981)
self.assertTrue(path.startswith("/service/subservice?"))
self.assertTrue(path.find("name=value") >= len("/service/subservice?"))
self.assertTrue(path.find("newname=newvalue") >= len("/service/subservice?"))
def testSetBasicAuth(self):
client = atom.service.AtomService()
client.UseBasicAuth("foo", "bar")
token = client.token_store.find_token("http://")
self.assertTrue(isinstance(token, atom.service.BasicAuthToken))
self.assertEqual(token.auth_header, "Basic Zm9vOmJhcg==")
client.UseBasicAuth("", "")
token = client.token_store.find_token("http://")
self.assertTrue(isinstance(token, atom.service.BasicAuthToken))
self.assertEqual(token.auth_header, "Basic Og==")
def testProcessUrlWithStringForService(self):
(server, port, ssl, uri) = atom.service.ProcessUrl(
service="www.google.com", url="/base/feeds/items"
)
self.assertEqual(server, "www.google.com")
self.assertEqual(port, 80)
self.assertEqual(ssl, False)
self.assertTrue(uri.startswith("/base/feeds/items"))
client = atom.service.AtomService()
client.server = "www.google.com"
client.ssl = True
(server, port, ssl, uri) = atom.service.ProcessUrl(
service=client, url="/base/feeds/items"
)
self.assertEqual(server, "www.google.com")
self.assertEqual(ssl, True)
self.assertTrue(uri.startswith("/base/feeds/items"))
(server, port, ssl, uri) = atom.service.ProcessUrl(
service=None, url="https://www.google.com/base/feeds/items"
)
self.assertEqual(server, "www.google.com")
self.assertEqual(port, 443)
self.assertEqual(ssl, True)
self.assertTrue(uri.startswith("/base/feeds/items"))
def testHostHeaderContainsNonDefaultPort(self):
client = atom.service.AtomService()
client.http_client.v2_http_client = atom.mock_http_core.EchoHttpClient()
response = client.Get("http://example.com")
self.assertEqual(response.getheader("Echo-Host"), "example.com:None")
response = client.Get("https://example.com")
self.assertEqual(response.getheader("Echo-Host"), "example.com:None")
response = client.Get("https://example.com:8080")
self.assertEqual(response.getheader("Echo-Host"), "example.com:8080")
response = client.Get("http://example.com:1234")
self.assertEqual(response.getheader("Echo-Host"), "example.com:1234")
def suite():
return conf.build_suite([AtomServiceUnitTest])
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = "j.s@google.com (Jeff Scudder)"
import unittest
import gdata.data
from gdata import test_data
import gdata.test_config as conf
import atom.core
import atom.data
SIMPLE_V2_FEED_TEST_DATA = """<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:gd='http://schemas.google.com/g/2005'
gd:etag='W/"CUMBRHo_fip7ImA9WxRbGU0."'>
<title>Elizabeth Bennet's Contacts</title>
<link rel='next' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/.../more' />
<entry gd:etag='"Qn04eTVSLyp7ImA9WxRbGEUORAQ."'>
<id>http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9e</id>
<title>Fitzwilliam</title>
<link rel='http://schemas.google.com/contacts/2008/rel#photo'
type='image/*'
href='http://www.google.com/m8/feeds/photos/media/liz%40gmail.com/c9e'
gd:etag='"KTlcZWs1bCp7ImBBPV43VUV4LXEZCXERZAc."' />
<link rel='self' type='application/atom+xml'
href='Changed to ensure we are really getting the edit URL.'/>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9e'/>
</entry>
<entry gd:etag='"123456"'>
<link rel='edit' href='http://example.com/1' />
</entry>
</feed>"""
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
def parse(xml_string, target_class):
"""Convenience wrapper for converting an XML string to an XmlElement."""
return atom.core.xml_element_from_string(xml_string, target_class)
class StartIndexTest(unittest.TestCase):
def setUp(self):
self.start_index = gdata.data.StartIndex()
def testToAndFromString(self):
self.start_index.text = "1"
self.assertTrue(self.start_index.text == "1")
new_start_index = parse(self.start_index.ToString(), gdata.data.StartIndex)
self.assertTrue(self.start_index.text == new_start_index.text)
class ItemsPerPageTest(unittest.TestCase):
def setUp(self):
self.items_per_page = gdata.data.ItemsPerPage()
def testToAndFromString(self):
self.items_per_page.text = "10"
self.assertTrue(self.items_per_page.text == "10")
new_items_per_page = parse(
self.items_per_page.ToString(), gdata.data.ItemsPerPage
)
self.assertTrue(self.items_per_page.text == new_items_per_page.text)
class GDataEntryTest(unittest.TestCase):
def testIdShouldBeCleaned(self):
entry = parse(XML_ENTRY_1, gdata.data.GDEntry)
tree = parse(XML_ENTRY_1, atom.core.XmlElement)
self.assertFalse(
tree.get_elements("id", "http://www.w3.org/2005/Atom" == entry.id.text)
)
self.assertEqual(entry.get_id(), "http://www.google.com/test/id/url")
def testGeneratorShouldBeCleaned(self):
feed = parse(test_data.GBASE_FEED, gdata.data.GDFeed)
tree = parse(test_data.GBASE_FEED, atom.core.XmlElement)
self.assertFalse(
tree.get_elements("generator", "http://www.w3.org/2005/Atom")[0].text
== feed.get_generator()
)
self.assertEqual(feed.get_generator(), "GoogleBase")
def testAllowsEmptyId(self):
entry = gdata.data.GDEntry()
try:
entry.id = atom.data.Id()
except AttributeError:
self.fail("Empty id should not raise an attribute error.")
class LinkFinderTest(unittest.TestCase):
def setUp(self):
self.entry = parse(XML_ENTRY_1, gdata.data.GDEntry)
def testLinkFinderGetsLicenseLink(self):
self.assertEqual(isinstance(self.entry.FindLicenseLink(), str), True)
self.assertEqual(
self.entry.FindLicenseLink(),
"http://creativecommons.org/licenses/by-nc/2.5/rdf",
)
def testLinkFinderGetsAlternateLink(self):
self.assertTrue(isinstance(self.entry.FindAlternateLink(), str))
self.assertEqual(
self.entry.FindAlternateLink(), "http://www.provider-host.com/123456789"
)
class GDataFeedTest(unittest.TestCase):
def testCorrectConversionToElementTree(self):
test_feed = parse(test_data.GBASE_FEED, gdata.data.GDFeed)
self.assertTrue(test_feed.total_results is not None)
self.assertTrue(
test_feed.get_elements(
"totalResults", "http://a9.com/-/spec/opensearchrss/1.0/"
)
is not None
)
self.assertTrue(
len(
test_feed.get_elements(
"totalResults", "http://a9.com/-/spec/opensearchrss/1.0/"
)
)
> 0
)
def testAllowsEmptyId(self):
feed = gdata.data.GDFeed()
try:
feed.id = atom.data.Id()
except AttributeError:
self.fail("Empty id should not raise an attribute error.")
class BatchEntryTest(unittest.TestCase):
def testCorrectConversionFromAndToString(self):
batch_entry = parse(test_data.BATCH_ENTRY, gdata.data.BatchEntry)
self.assertEqual(batch_entry.batch_id.text, "itemB")
self.assertEqual(
batch_entry.id.text,
"http://www.google.com/base/feeds/items/" "2173859253842813008",
)
self.assertEqual(batch_entry.batch_operation.type, "insert")
self.assertEqual(batch_entry.batch_status.code, "201")
self.assertEqual(batch_entry.batch_status.reason, "Created")
new_entry = parse(str(batch_entry), gdata.data.BatchEntry)
self.assertEqual(batch_entry.batch_id.text, new_entry.batch_id.text)
self.assertEqual(batch_entry.id.text, new_entry.id.text)
self.assertEqual(
batch_entry.batch_operation.type, new_entry.batch_operation.type
)
self.assertEqual(batch_entry.batch_status.code, new_entry.batch_status.code)
self.assertEqual(batch_entry.batch_status.reason, new_entry.batch_status.reason)
class BatchFeedTest(unittest.TestCase):
def setUp(self):
self.batch_feed = gdata.data.BatchFeed()
self.example_entry = gdata.data.BatchEntry(
id=atom.data.Id(text="http://example.com/1"), text="This is a test"
)
def testConvertRequestFeed(self):
batch_feed = parse(test_data.BATCH_FEED_REQUEST, gdata.data.BatchFeed)
self.assertEqual(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assertTrue(isinstance(entry, gdata.data.BatchEntry))
self.assertEqual(batch_feed.title.text, "My Batch Feed")
new_feed = parse(batch_feed.to_string(), gdata.data.BatchFeed)
self.assertEqual(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assertTrue(isinstance(entry, gdata.data.BatchEntry))
self.assertEqual(new_feed.title.text, "My Batch Feed")
def testConvertResultFeed(self):
batch_feed = parse(test_data.BATCH_FEED_RESULT, gdata.data.BatchFeed)
self.assertEqual(len(batch_feed.entry), 4)
for entry in batch_feed.entry:
self.assertTrue(isinstance(entry, gdata.data.BatchEntry))
if entry.id.text == (
"http://www.google.com/base/feeds/items/" "2173859253842813008"
):
self.assertEqual(entry.batch_operation.type, "insert")
self.assertEqual(entry.batch_id.text, "itemB")
self.assertEqual(entry.batch_status.code, "201")
self.assertEqual(entry.batch_status.reason, "Created")
self.assertEqual(batch_feed.title.text, "My Batch")
new_feed = parse(str(batch_feed), gdata.data.BatchFeed)
self.assertEqual(len(new_feed.entry), 4)
for entry in new_feed.entry:
self.assertTrue(isinstance(entry, gdata.data.BatchEntry))
if entry.id.text == (
"http://www.google.com/base/feeds/items/" "2173859253842813008"
):
self.assertEqual(entry.batch_operation.type, "insert")
self.assertEqual(entry.batch_id.text, "itemB")
self.assertEqual(entry.batch_status.code, "201")
self.assertEqual(entry.batch_status.reason, "Created")
self.assertEqual(new_feed.title.text, "My Batch")
def testAddBatchEntry(self):
try:
self.batch_feed.AddBatchEntry(batch_id_string="a")
self.fail("AddBatchEntry with neither entry or URL should raise Error")
except gdata.data.MissingRequiredParameters:
pass
new_entry = self.batch_feed.AddBatchEntry(id_url_string="http://example.com/1")
self.assertEqual(len(self.batch_feed.entry), 1)
self.assertEqual(self.batch_feed.entry[0].get_id(), "http://example.com/1")
self.assertEqual(self.batch_feed.entry[0].batch_id.text, "0")
self.assertEqual(new_entry.id.text, "http://example.com/1")
self.assertEqual(new_entry.batch_id.text, "0")
to_add = gdata.data.BatchEntry(id=atom.data.Id(text="originalId"))
new_entry = self.batch_feed.AddBatchEntry(entry=to_add, batch_id_string="foo")
self.assertEqual(new_entry.batch_id.text, "foo")
self.assertEqual(new_entry.id.text, "originalId")
to_add = gdata.data.BatchEntry(
id=atom.data.Id(text="originalId"), batch_id=gdata.data.BatchId(text="bar")
)
new_entry = self.batch_feed.AddBatchEntry(
entry=to_add, id_url_string="newId", batch_id_string="foo"
)
self.assertEqual(new_entry.batch_id.text, "foo")
self.assertEqual(new_entry.id.text, "originalId")
to_add = gdata.data.BatchEntry(
id=atom.data.Id(text="originalId"), batch_id=gdata.data.BatchId(text="bar")
)
new_entry = self.batch_feed.AddBatchEntry(entry=to_add, id_url_string="newId")
self.assertEqual(new_entry.batch_id.text, "bar")
self.assertEqual(new_entry.id.text, "originalId")
to_add = gdata.data.BatchEntry(
id=atom.data.Id(text="originalId"),
batch_id=gdata.data.BatchId(text="bar"),
batch_operation=gdata.data.BatchOperation(type=gdata.data.BATCH_INSERT),
)
self.assertEqual(to_add.batch_operation.type, gdata.data.BATCH_INSERT)
new_entry = self.batch_feed.AddBatchEntry(
entry=to_add,
id_url_string="newId",
batch_id_string="foo",
operation_string=gdata.data.BATCH_UPDATE,
)
self.assertEqual(new_entry.batch_operation.type, gdata.data.BATCH_UPDATE)
def testAddInsert(self):
first_entry = gdata.data.BatchEntry(
id=atom.data.Id(text="http://example.com/1"), text="This is a test1"
)
self.batch_feed.AddInsert(first_entry)
self.assertEqual(
self.batch_feed.entry[0].batch_operation.type, gdata.data.BATCH_INSERT
)
self.assertEqual(self.batch_feed.entry[0].batch_id.text, "0")
second_entry = gdata.data.BatchEntry(
id=atom.data.Id(text="http://example.com/2"), text="This is a test2"
)
self.batch_feed.AddInsert(second_entry, batch_id_string="foo")
self.assertEqual(
self.batch_feed.entry[1].batch_operation.type, gdata.data.BATCH_INSERT
)
self.assertEqual(self.batch_feed.entry[1].batch_id.text, "foo")
third_entry = gdata.data.BatchEntry(
id=atom.data.Id(text="http://example.com/3"), text="This is a test3"
)
third_entry.batch_operation = gdata.data.BatchOperation(
type=gdata.data.BATCH_DELETE
)
# Add an entry with a delete operation already assigned.
self.batch_feed.AddInsert(third_entry)
# The batch entry should not have the original operation, it should
# have been changed to an insert.
self.assertEqual(
self.batch_feed.entry[2].batch_operation.type, gdata.data.BATCH_INSERT
)
self.assertEqual(self.batch_feed.entry[2].batch_id.text, "2")
def testAddDelete(self):
# Try deleting an entry
delete_entry = gdata.data.BatchEntry(
id=atom.data.Id(text="http://example.com/1"), text="This is a test"
)
self.batch_feed.AddDelete(entry=delete_entry)
self.assertEqual(
self.batch_feed.entry[0].batch_operation.type, gdata.data.BATCH_DELETE
)
self.assertEqual(self.batch_feed.entry[0].get_id(), "http://example.com/1")
self.assertEqual(self.batch_feed.entry[0].text, "This is a test")
# Try deleting a URL
self.batch_feed.AddDelete(url_string="http://example.com/2")
self.assertEqual(
self.batch_feed.entry[0].batch_operation.type, gdata.data.BATCH_DELETE
)
self.assertEqual(self.batch_feed.entry[1].id.text, "http://example.com/2")
self.assertTrue(self.batch_feed.entry[1].text is None)
def testAddQuery(self):
# Try querying with an existing batch entry
delete_entry = gdata.data.BatchEntry(
id=atom.data.Id(text="http://example.com/1")
)
self.batch_feed.AddQuery(entry=delete_entry)
self.assertEqual(
self.batch_feed.entry[0].batch_operation.type, gdata.data.BATCH_QUERY
)
self.assertEqual(self.batch_feed.entry[0].get_id(), "http://example.com/1")
# Try querying a URL
self.batch_feed.AddQuery(url_string="http://example.com/2")
self.assertEqual(
self.batch_feed.entry[0].batch_operation.type, gdata.data.BATCH_QUERY
)
self.assertEqual(self.batch_feed.entry[1].id.text, "http://example.com/2")
def testAddUpdate(self):
# Try updating an entry
delete_entry = gdata.data.BatchEntry(
id=atom.data.Id(text="http://example.com/1"), text="This is a test"
)
self.batch_feed.AddUpdate(entry=delete_entry)
self.assertEqual(
self.batch_feed.entry[0].batch_operation.type, gdata.data.BATCH_UPDATE
)
self.assertEqual(self.batch_feed.entry[0].get_id(), "http://example.com/1")
self.assertEqual(self.batch_feed.entry[0].text, "This is a test")
class ExtendedPropertyTest(unittest.TestCase):
def testXmlBlobRoundTrip(self):
ep = gdata.data.ExtendedProperty(name="blobby")
ep.SetXmlBlob('<some_xml attr="test"/>')
extension = ep.GetXmlBlob()
self.assertEqual(extension.tag, "some_xml")
self.assertTrue(extension.namespace is None)
self.assertEqual(extension.attributes["attr"], "test")
ep2 = parse(ep.ToString(), gdata.data.ExtendedProperty)
extension = ep2.GetXmlBlob()
self.assertEqual(extension.tag, "some_xml")
self.assertTrue(extension.namespace is None)
self.assertEqual(extension.attributes["attr"], "test")
def testGettersShouldReturnNoneWithNoBlob(self):
ep = gdata.data.ExtendedProperty(name="no blob")
self.assertTrue(ep.GetXmlBlob() is None)
def testGettersReturnCorrectTypes(self):
ep = gdata.data.ExtendedProperty(name="has blob")
ep.SetXmlBlob('<some_xml attr="test"/>')
self.assertTrue(isinstance(ep.GetXmlBlob(), atom.core.XmlElement))
self.assertTrue(isinstance(ep.GetXmlBlob().to_string(), str))
class FeedLinkTest(unittest.TestCase):
def testCorrectFromStringType(self):
link = parse(
'<feedLink xmlns="http://schemas.google.com/g/2005" countHint="5"/>',
gdata.data.FeedLink,
)
self.assertTrue(isinstance(link, gdata.data.FeedLink))
self.assertEqual(link.count_hint, "5")
class SimpleV2FeedTest(unittest.TestCase):
def test_parsing_etags_and_edit_url(self):
feed = atom.core.parse(SIMPLE_V2_FEED_TEST_DATA, gdata.data.GDFeed)
# General parsing assertions.
self.assertEqual(
feed.get_elements("title")[0].text, "Elizabeth Bennet's Contacts"
)
self.assertEqual(len(feed.entry), 2)
for entry in feed.entry:
self.assertTrue(isinstance(entry, gdata.data.GDEntry))
self.assertEqual(feed.entry[0].GetElements("title")[0].text, "Fitzwilliam")
self.assertEqual(
feed.entry[0].get_elements("id")[0].text,
"http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9e",
)
# ETags checks.
self.assertEqual(feed.etag, 'W/"CUMBRHo_fip7ImA9WxRbGU0."')
self.assertEqual(feed.entry[0].etag, '"Qn04eTVSLyp7ImA9WxRbGEUORAQ."')
self.assertEqual(feed.entry[1].etag, '"123456"')
# Look for Edit URLs.
self.assertEqual(
feed.entry[0].find_edit_link(),
"http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9e",
)
self.assertEqual(feed.entry[1].FindEditLink(), "http://example.com/1")
# Look for Next URLs.
self.assertEqual(
feed.find_next_link(), "http://www.google.com/m8/feeds/contacts/.../more"
)
def test_constructor_defauls(self):
feed = gdata.data.GDFeed()
self.assertTrue(feed.etag is None)
self.assertEqual(feed.link, [])
self.assertEqual(feed.entry, [])
entry = gdata.data.GDEntry()
self.assertTrue(entry.etag is None)
self.assertEqual(entry.link, [])
link = atom.data.Link()
self.assertTrue(link.href is None)
self.assertTrue(link.rel is None)
link1 = atom.data.Link(href="http://example.com", rel="test")
self.assertEqual(link1.href, "http://example.com")
self.assertEqual(link1.rel, "test")
link2 = atom.data.Link(href="http://example.org/", rel="alternate")
entry = gdata.data.GDEntry(etag="foo", link=[link1, link2])
feed = gdata.data.GDFeed(etag="12345", entry=[entry])
self.assertEqual(feed.etag, "12345")
self.assertEqual(len(feed.entry), 1)
self.assertEqual(feed.entry[0].etag, "foo")
self.assertEqual(len(feed.entry[0].link), 2)
class DataClassSanityTest(unittest.TestCase):
def test_basic_element_structure(self):
conf.check_data_classes(
self,
[
gdata.data.TotalResults,
gdata.data.StartIndex,
gdata.data.ItemsPerPage,
gdata.data.ExtendedProperty,
gdata.data.GDEntry,
gdata.data.GDFeed,
gdata.data.BatchId,
gdata.data.BatchOperation,
gdata.data.BatchStatus,
gdata.data.BatchEntry,
gdata.data.BatchInterrupted,
gdata.data.BatchFeed,
gdata.data.EntryLink,
gdata.data.FeedLink,
gdata.data.AdditionalName,
gdata.data.Comments,
gdata.data.Country,
gdata.data.Email,
gdata.data.FamilyName,
gdata.data.Im,
gdata.data.GivenName,
gdata.data.NamePrefix,
gdata.data.NameSuffix,
gdata.data.FullName,
gdata.data.Name,
gdata.data.OrgDepartment,
gdata.data.OrgName,
gdata.data.OrgSymbol,
gdata.data.OrgTitle,
gdata.data.Organization,
gdata.data.When,
gdata.data.Who,
gdata.data.OriginalEvent,
gdata.data.PhoneNumber,
gdata.data.PostalAddress,
gdata.data.Rating,
gdata.data.Recurrence,
gdata.data.RecurrenceException,
gdata.data.Reminder,
gdata.data.Agent,
gdata.data.HouseName,
gdata.data.Street,
gdata.data.PoBox,
gdata.data.Neighborhood,
gdata.data.City,
gdata.data.Subregion,
gdata.data.Region,
gdata.data.Postcode,
gdata.data.Country,
gdata.data.FormattedAddress,
gdata.data.StructuredPostalAddress,
gdata.data.Where,
gdata.data.AttendeeType,
gdata.data.AttendeeStatus,
],
)
def test_member_values(self):
self.assertEqual(
gdata.data.TotalResults._qname,
"{http://a9.com/-/spec/opensearch/1.1/}totalResults",
)
self.assertEqual(
gdata.data.RecurrenceException._qname,
"{http://schemas.google.com/g/2005}recurrenceException",
)
self.assertEqual(gdata.data.RecurrenceException.specialized, "specialized")
def suite():
return conf.build_suite(
[
StartIndexTest,
StartIndexTest,
GDataEntryTest,
LinkFinderTest,
GDataFeedTest,
BatchEntryTest,
BatchFeedTest,
ExtendedPropertyTest,
FeedLinkTest,
SimpleV2FeedTest,
]
)
if __name__ == "__main__":
unittest.main()
|
# performs a simple device inquiry, followed by a remote name request of each
# discovered device
import os
import sys
import struct
import bluetooth._bluetooth as bluez
def printpacket(pkt):
for c in pkt:
sys.stdout.write("%02x " % struct.unpack("B",c)[0])
print()
def read_inquiry_mode(sock):
"""returns the current mode, or -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# read_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# first read the current inquiry mode.
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_READ_INQUIRY_MODE )
pkt = sock.recv(255)
status,mode = struct.unpack("xxxxxxBB", pkt)
if status != 0: mode = -1
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return mode
def write_inquiry_mode(sock, mode):
"""returns 0 on success, -1 on failure"""
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# Setup socket filter to receive only events related to the
# write_inquiry_mode command
flt = bluez.hci_filter_new()
opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE);
bluez.hci_filter_set_opcode(flt, opcode)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
# send the command!
bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL,
bluez.OCF_WRITE_INQUIRY_MODE, struct.pack("B", mode) )
pkt = sock.recv(255)
status = struct.unpack("xxxxxxB", pkt)[0]
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
if status != 0: return -1
return 0
def device_inquiry_with_with_rssi(sock):
# save current filter
old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# perform a device inquiry on bluetooth device #0
# The inquiry should last 8 * 1.28 = 10.24 seconds
# before the inquiry is performed, bluez should flush its cache of
# previously discovered devices
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
duration = 4
max_responses = 255
cmd_pkt = struct.pack("BBBBB", 0x33, 0x8b, 0x9e, duration, max_responses)
bluez.hci_send_cmd(sock, bluez.OGF_LINK_CTL, bluez.OCF_INQUIRY, cmd_pkt)
results = []
done = False
while not done:
pkt = sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == bluez.EVT_INQUIRY_RESULT_WITH_RSSI:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
rssi = struct.unpack("b", pkt[1+13*nrsp+i])[0]
results.append( ( addr, rssi ) )
print("[%s] RSSI: [%d]" % (addr, rssi))
elif event == bluez.EVT_INQUIRY_COMPLETE:
done = True
elif event == bluez.EVT_CMD_STATUS:
status, ncmd, opcode = struct.unpack("BBH", pkt[3:7])
if status != 0:
print("uh oh...")
printpacket(pkt[3:7])
done = True
elif event == bluez.EVT_INQUIRY_RESULT:
pkt = pkt[3:]
nrsp = struct.unpack("B", pkt[0])[0]
for i in range(nrsp):
addr = bluez.ba2str( pkt[1+6*i:1+6*i+6] )
results.append( ( addr, -1 ) )
print("[%s] (no RRSI)" % addr)
else:
print("unrecognized packet type 0x%02x" % ptype)
print("event ", event)
# restore old filter
sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
return results
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print("error accessing bluetooth device...")
sys.exit(1)
try:
mode = read_inquiry_mode(sock)
except Exception as e:
print("error reading inquiry mode. ")
print("Are you sure this a bluetooth 1.2 device?")
print(e)
sys.exit(1)
print("current inquiry mode is %d" % mode)
if mode != 1:
print("writing inquiry mode...")
try:
result = write_inquiry_mode(sock, 1)
except Exception as e:
print("error writing inquiry mode. Are you sure you're root?")
print(e)
sys.exit(1)
if result != 0:
print("error while setting inquiry mode")
print("result: %d" % result)
device_inquiry_with_with_rssi(sock)
|
# -*- test-case-name: twisted.conch.test.test_checkers -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Provide L{ICredentialsChecker} implementations to be used in Conch protocols.
"""
import os, base64, binascii, errno
try:
import pwd
except ImportError:
pwd = None
else:
import crypt
try:
# get this from http://www.twistedmatrix.com/users/z3p/files/pyshadow-0.2.tar.gz
import shadow
except:
shadow = None
try:
from twisted.cred import pamauth
except ImportError:
pamauth = None
from zope.interface import implements, providedBy
from twisted.conch import error
from twisted.conch.ssh import keys
from twisted.cred.checkers import ICredentialsChecker
from twisted.cred.credentials import IUsernamePassword, ISSHPrivateKey
from twisted.cred.error import UnauthorizedLogin, UnhandledCredentials
from twisted.internet import defer
from twisted.python import failure, reflect, log
from twisted.python.util import runAsEffectiveUser
from twisted.python.filepath import FilePath
def verifyCryptedPassword(crypted, pw):
if crypted[0] == "$": # md5_crypt encrypted
salt = "$1$" + crypted.split("$")[2]
else:
salt = crypted[:2]
return crypt.crypt(pw, salt) == crypted
class UNIXPasswordDatabase:
credentialInterfaces = (IUsernamePassword,)
implements(ICredentialsChecker)
def requestAvatarId(self, credentials):
if pwd:
try:
cryptedPass = pwd.getpwnam(credentials.username)[1]
except KeyError:
return defer.fail(UnauthorizedLogin("invalid username"))
else:
if cryptedPass not in ["*", "x"] and verifyCryptedPassword(
cryptedPass, credentials.password
):
return defer.succeed(credentials.username)
if shadow:
gid = os.getegid()
uid = os.geteuid()
os.setegid(0)
os.seteuid(0)
try:
shadowPass = shadow.getspnam(credentials.username)[1]
except KeyError:
os.setegid(gid)
os.seteuid(uid)
return defer.fail(UnauthorizedLogin("invalid username"))
os.setegid(gid)
os.seteuid(uid)
if verifyCryptedPassword(shadowPass, credentials.password):
return defer.succeed(credentials.username)
return defer.fail(UnauthorizedLogin("invalid password"))
return defer.fail(UnauthorizedLogin("unable to verify password"))
class SSHPublicKeyDatabase:
"""
Checker that authenticates SSH public keys, based on public keys listed in
authorized_keys and authorized_keys2 files in user .ssh/ directories.
"""
credentialInterfaces = (ISSHPrivateKey,)
implements(ICredentialsChecker)
def requestAvatarId(self, credentials):
d = defer.maybeDeferred(self.checkKey, credentials)
d.addCallback(self._cbRequestAvatarId, credentials)
d.addErrback(self._ebRequestAvatarId)
return d
def _cbRequestAvatarId(self, validKey, credentials):
"""
Check whether the credentials themselves are valid, now that we know
if the key matches the user.
@param validKey: A boolean indicating whether or not the public key
matches a key in the user's authorized_keys file.
@param credentials: The credentials offered by the user.
@type credentials: L{ISSHPrivateKey} provider
@raise UnauthorizedLogin: (as a failure) if the key does not match the
user in C{credentials}. Also raised if the user provides an invalid
signature.
@raise ValidPublicKey: (as a failure) if the key matches the user but
the credentials do not include a signature. See
L{error.ValidPublicKey} for more information.
@return: The user's username, if authentication was successful.
"""
if not validKey:
return failure.Failure(UnauthorizedLogin("invalid key"))
if not credentials.signature:
return failure.Failure(error.ValidPublicKey())
else:
try:
pubKey = keys.Key.fromString(credentials.blob)
if pubKey.verify(credentials.signature, credentials.sigData):
return credentials.username
except: # any error should be treated as a failed login
log.err()
return failure.Failure(UnauthorizedLogin("error while verifying key"))
return failure.Failure(UnauthorizedLogin("unable to verify key"))
def getAuthorizedKeysFiles(self, credentials):
"""
Return a list of L{FilePath} instances for I{authorized_keys} files
which might contain information about authorized keys for the given
credentials.
On OpenSSH servers, the default location of the file containing the
list of authorized public keys is
U{$HOME/.ssh/authorized_keys<http://www.openbsd.org/cgi-bin/man.cgi?query=sshd_config>}.
I{$HOME/.ssh/authorized_keys2} is also returned, though it has been
U{deprecated by OpenSSH since
2001<http://marc.info/?m=100508718416162>}.
@return: A list of L{FilePath} instances to files with the authorized keys.
"""
pwent = pwd.getpwnam(credentials.username)
root = FilePath(pwent.pw_dir).child(".ssh")
files = ["authorized_keys", "authorized_keys2"]
return [root.child(f) for f in files]
def checkKey(self, credentials):
"""
Retrieve files containing authorized keys and check against user
credentials.
"""
uid, gid = os.geteuid(), os.getegid()
ouid, ogid = pwd.getpwnam(credentials.username)[2:4]
for filepath in self.getAuthorizedKeysFiles(credentials):
if not filepath.exists():
continue
try:
lines = filepath.open()
except IOError as e:
if e.errno == errno.EACCES:
lines = runAsEffectiveUser(ouid, ogid, filepath.open)
else:
raise
for l in lines:
l2 = l.split()
if len(l2) < 2:
continue
try:
if base64.decodestring(l2[1]) == credentials.blob:
return True
except binascii.Error:
continue
return False
def _ebRequestAvatarId(self, f):
if not f.check(UnauthorizedLogin):
log.msg(f)
return failure.Failure(UnauthorizedLogin("unable to get avatar id"))
return f
class SSHProtocolChecker:
"""
SSHProtocolChecker is a checker that requires multiple authentications
to succeed. To add a checker, call my registerChecker method with
the checker and the interface.
After each successful authenticate, I call my areDone method with the
avatar id. To get a list of the successful credentials for an avatar id,
use C{SSHProcotolChecker.successfulCredentials[avatarId]}. If L{areDone}
returns True, the authentication has succeeded.
"""
implements(ICredentialsChecker)
def __init__(self):
self.checkers = {}
self.successfulCredentials = {}
def get_credentialInterfaces(self):
return list(self.checkers.keys())
credentialInterfaces = property(get_credentialInterfaces)
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def requestAvatarId(self, credentials):
"""
Part of the L{ICredentialsChecker} interface. Called by a portal with
some credentials to check if they'll authenticate a user. We check the
interfaces that the credentials provide against our list of acceptable
checkers. If one of them matches, we ask that checker to verify the
credentials. If they're valid, we call our L{_cbGoodAuthentication}
method to continue.
@param credentials: the credentials the L{Portal} wants us to verify
"""
ifac = providedBy(credentials)
for i in ifac:
c = self.checkers.get(i)
if c is not None:
d = defer.maybeDeferred(c.requestAvatarId, credentials)
return d.addCallback(self._cbGoodAuthentication, credentials)
return defer.fail(
UnhandledCredentials(
"No checker for %s" % ", ".join(map(reflect.qual, ifac))
)
)
def _cbGoodAuthentication(self, avatarId, credentials):
"""
Called if a checker has verified the credentials. We call our
L{areDone} method to see if the whole of the successful authentications
are enough. If they are, we return the avatar ID returned by the first
checker.
"""
if avatarId not in self.successfulCredentials:
self.successfulCredentials[avatarId] = []
self.successfulCredentials[avatarId].append(credentials)
if self.areDone(avatarId):
del self.successfulCredentials[avatarId]
return avatarId
else:
raise error.NotEnoughAuthentication()
def areDone(self, avatarId):
"""
Override to determine if the authentication is finished for a given
avatarId.
@param avatarId: the avatar returned by the first checker. For
this checker to function correctly, all the checkers must
return the same avatar ID.
"""
return True
|
'\nImplementation module for the I{cftp} command.\n'
import os, sys, getpass, struct, tty, fcntl, stat
import fnmatch, pwd, glob
from twisted.conch.client import connect, default, options
from twisted.conch.ssh import connection, common
from twisted.conch.ssh import channel, filetransfer
from twisted.protocols import basic
from twisted.internet import reactor, stdio, defer, utils
from twisted.python import log, usage, failure
class ClientOptions(options.ConchOptions):
synopsis = 'Usage: cftp [options] [user@]host\n cftp [options] [user@]host[:dir[/]]\n cftp [options] [user@]host[:file [localfile]]\n'
longdesc = 'cftp is a client for logging into a remote machine and executing commands to send and receive file information'
optParameters = [['buffersize', 'B', 32768, 'Size of the buffer to use for sending/receiving.'], ['batchfile', 'b', None, "File to read commands from, or '-' for stdin."], ['requests', 'R', 5, 'Number of requests to make before waiting for a reply.'], ['subsystem', 's', 'sftp', 'Subsystem/server program to connect to.']]
zsh_altArgDescr = {'buffersize': 'Size of send/receive buffer (default: 32768)'}
zsh_extras = ['2::localfile:{if [[ $words[1] == *:* ]]; then; _files; fi}']
def parseArgs(self, host, localPath=None):
self['remotePath'] = ''
if (':' in host):
(host, self['remotePath']) = (':', 1)
('/')
self['host'] = host
self['localPath'] = localPath
def run():
args = sys.argv[1:]
if ('-l' in args):
i = ('-l')
args = (args[i:(i + 2)] + args)
del args[(i + 2):(i + 4)]
options = ()
try:
(args)
except usage.UsageError as u:
(('ERROR: %s' % u))
(1)
if options['log']:
realout = sys.stdout
(sys.stderr)
sys.stdout = realout
else:
()
(options)
()
def handleError():
global exitStatus
exitStatus = 2
try:
()
except:
raise
(())
raise
def doConnect(options):
if ('@' in options['host']):
(options['user'], options['host']) = ('@', 1)
host = options['host']
if (not options['user']):
options['user'] = ()
if (not options['port']):
options['port'] = 22
else:
options['port'] = (options['port'])
host = options['host']
port = options['port']
conn = ()
conn.options = options
vhk = default.verifyHostKey
uao = (options['user'], options, conn)
(_ebExit)
def _ebExit(f):
if (f.value, 'value'):
s = f.value.value
else:
s = (f)
(s)
try:
()
except:
raise
def _ignore(*args):
pass
class FileWrapper():
def __init__(self, f):
self.f = f
self.total = 0.0
(0, 2)
self.size = ()
def __getattr__(self, attr):
return (self.f, attr)
class StdioClient(basic.LineReceiver):
_pwd = pwd
ps = 'cftp> '
delimiter = '\n'
reactor = reactor
def __init__(self, client, f=None):
self.client = client
self.currentDirectory = ''
self.file = f
self.useProgressBar = (((not f) and 1) or 0)
def connectionMade(self):
(self._cbSetCurDir)
def _cbSetCurDir(self, path):
self.currentDirectory = path
()
def lineReceived(self, line):
if self.client.transport.localClosed:
return
(('got line %s' % (line)))
line = ()
if (not line):
()
return
if (self.file and ('-')):
self.ignoreErrors = 1
line = line[1:]
else:
self.ignoreErrors = 0
d = (line)
if (d is not None):
(self._cbCommand)
(self._ebCommand)
def _dispatchCommand(self, line):
if (' ' in line):
(command, rest) = (' ', 1)
rest = ()
else:
(command, rest) = (line, '')
if ('!'):
f = self.cmd_EXEC
rest = ()
else:
command = ()
(('looking up cmd %s' % command))
f = (self, ('cmd_%s' % command), None)
if (f is not None):
return (f, rest)
else:
(((("No command called `%s'" % command))))
()
def _printFailure(self, f):
(f)
e = (NotImplementedError, filetransfer.SFTPError, OSError, IOError)
if (e == NotImplementedError):
((''))
elif (e == filetransfer.SFTPError):
(('remote error %i: %s\n' % (f.value.code, f.value.message)))
elif (e in (OSError, IOError)):
(('local error %i: %s\n' % (f.value.errno, f.value.strerror)))
def _newLine(self):
if self.client.transport.localClosed:
return
(self.ps)
self.ignoreErrors = 0
if self.file:
l = ()
if (not l):
()
else:
(l)
(())
def _cbCommand(self, result):
if (result is not None):
(result)
if (not ('\n')):
('\n')
()
def _ebCommand(self, f):
(f)
if (self.file and (not self.ignoreErrors)):
()
()
def cmd_CD(self, path):
(path, rest) = (path)
if (not ('/')):
path += '/'
newPath = ((path and (self.currentDirectory, path)) or '')
d = (newPath)
(self._cbCd)
(self._ebCommand)
return d
def _cbCd(self, directory):
()
d = (directory.name)
(self._cbCurDir)
return d
def _cbCurDir(self, path):
self.currentDirectory = path
def cmd_CHGRP(self, rest):
(grp, rest) = (None, 1)
(path, rest) = (rest)
grp = (grp)
d = (path)
(self._cbSetUsrGrp, path)
return d
def cmd_CHMOD(self, rest):
(mod, rest) = (None, 1)
(path, rest) = (rest)
mod = (mod, 8)
d = (path, {'permissions': mod})
(_ignore)
return d
def cmd_CHOWN(self, rest):
(usr, rest) = (None, 1)
(path, rest) = (rest)
usr = (usr)
d = (path)
(self._cbSetUsrGrp, path)
return d
def _cbSetUsrGrp(self, attrs, path, usr=None, grp=None):
new = {}
new['uid'] = (((usr is not None) and usr) or attrs['uid'])
new['gid'] = (((grp is not None) and grp) or attrs['gid'])
d = (path, new)
(_ignore)
return d
def cmd_GET(self, rest):
(remote, rest) = (rest)
if (('*' in remote) or ('?' in remote)):
if rest:
(local, rest) = (rest)
if (not (local)):
return 'Wildcard get with non-directory target.'
else:
local = ''
d = (remote)
(self._cbGetMultiple, local)
return d
if rest:
(local, rest) = (rest)
else:
local = (remote)[1]
((remote, local))
lf = (local, 'w', 0)
path = (self.currentDirectory, remote)
d = (path, filetransfer.FXF_READ, {})
(self._cbGetOpenFile, lf)
(self._ebCloseLf, lf)
return d
def _cbGetMultiple(self, files, local):
return (None, files, local)
def _cbGetMultipleNext(self, res, files, local):
if (res, failure.Failure):
(res)
elif res:
(res)
if (not ('\n')):
('\n')
if (not files):
return
f = (0)[0]
lf = ((local, (f)[1]), 'w', 0)
path = (self.currentDirectory, f)
d = (path, filetransfer.FXF_READ, {})
(self._cbGetOpenFile, lf)
(self._ebCloseLf, lf)
(self._cbGetMultipleNext, files, local)
return d
def _ebCloseLf(self, f, lf):
()
return f
def _cbGetOpenFile(self, rf, lf):
return (self._cbGetFileSize, rf, lf)
def _cbGetFileSize(self, attrs, rf, lf):
if (not (attrs['permissions'])):
()
()
return ("Can't get non-regular file: %s" % rf.name)
rf.size = attrs['size']
bufferSize = self.client.transport.conn.options['buffersize']
numRequests = self.client.transport.conn.options['requests']
rf.total = 0.0
dList = []
chunks = []
startTime = ()
for i in (numRequests):
d = ('', rf, lf, chunks, 0, bufferSize, startTime)
(d)
dl = (dList)
(self._cbGetDone, rf, lf)
return dl
def _getNextChunk(self, chunks):
end = 0
for chunk in chunks:
if (end == 'eof'):
return
if (end != chunk[0]):
i = (chunk)
(i, (end, chunk[0]))
return (end, (chunk[0] - end))
end = chunk[1]
bufSize = (self.client.transport.conn.options['buffersize'])
((end, (end + bufSize)))
return (end, bufSize)
def _cbGetRead(self, data, rf, lf, chunks, start, size, startTime):
if (data and (data, failure.Failure)):
(('get read err: %s' % data))
reason = data
(EOFError)
i = ((start, (start + size)))
del chunks[i]
(i, (start, 'eof'))
elif data:
(('get read data: %i' % (data)))
(start)
(data)
if ((data) != size):
(('got less than we asked for: %i < %i' % ((data), size)))
i = ((start, (start + size)))
del chunks[i]
(i, (start, (start + (data))))
rf.total += (data)
if self.useProgressBar:
(rf, startTime)
chunk = (chunks)
if (not chunk):
return
else:
(start, length) = chunk
(('asking for %i -> %i' % (start, (start + length))))
d = (start, length)
(self._cbGetRead, rf, lf, chunks, start, length, startTime)
return d
def _cbGetDone(self, ignored, rf, lf):
('get done')
()
()
if self.useProgressBar:
('\n')
return ('Transferred %s to %s' % (rf.name, lf.name))
def cmd_PUT(self, rest):
(local, rest) = (rest)
if (('*' in local) or ('?' in local)):
if rest:
(remote, rest) = (rest)
path = (self.currentDirectory, remote)
d = (path)
(self._cbPutTargetAttrs, remote, local)
return d
else:
remote = ''
files = (local)
return (None, files, remote)
if rest:
(remote, rest) = (rest)
else:
remote = (local)[1]
lf = (local, 'r')
path = (self.currentDirectory, remote)
flags = ((filetransfer.FXF_WRITE | filetransfer.FXF_CREAT) | filetransfer.FXF_TRUNC)
d = (path, flags, {})
(self._cbPutOpenFile, lf)
(self._ebCloseLf, lf)
return d
def _cbPutTargetAttrs(self, attrs, path, local):
if (not (attrs['permissions'])):
return 'Wildcard put with non-directory target.'
return (None, files, path)
def _cbPutMultipleNext(self, res, files, path):
if (res, failure.Failure):
(res)
elif res:
(res)
if (not ('\n')):
('\n')
f = None
while (files and (not f)):
try:
f = (0)
lf = (f, 'r')
except:
(())
f = None
if (not f):
return
name = (f)[1]
remote = (self.currentDirectory, path, name)
((name, remote, path))
flags = ((filetransfer.FXF_WRITE | filetransfer.FXF_CREAT) | filetransfer.FXF_TRUNC)
d = (remote, flags, {})
(self._cbPutOpenFile, lf)
(self._ebCloseLf, lf)
(self._cbPutMultipleNext, files, path)
return d
def _cbPutOpenFile(self, rf, lf):
numRequests = self.client.transport.conn.options['requests']
if self.useProgressBar:
lf = (lf)
dList = []
chunks = []
startTime = ()
for i in (numRequests):
d = (None, rf, lf, chunks, startTime)
if d:
(d)
dl = (dList)
(self._cbPutDone, rf, lf)
return dl
def _cbPutWrite(self, ignored, rf, lf, chunks, startTime):
chunk = (chunks)
(start, size) = chunk
(start)
data = (size)
if self.useProgressBar:
lf.total += (data)
(lf, startTime)
if data:
d = (start, data)
(self._cbPutWrite, rf, lf, chunks, startTime)
return d
else:
return
def _cbPutDone(self, ignored, rf, lf):
()
()
if self.useProgressBar:
('\n')
return ('Transferred %s to %s' % (lf.name, rf.name))
def cmd_LCD(self, path):
(path)
def cmd_LN(self, rest):
(linkpath, rest) = (rest)
(targetpath, rest) = (rest)
(linkpath, targetpath) = [(self.currentDirectory, x) for x in (linkpath, targetpath)]
return (_ignore)
def cmd_LS(self, rest):
options = []
rest = ()
while (rest and rest[0] and (rest[0][0] == '-')):
opts = (0)[1:]
for o in opts:
if (o == 'l'):
('verbose')
elif (o == 'a'):
('all')
rest = (rest)
(path, rest) = (rest)
if (not path):
fullPath = (self.currentDirectory + '/')
else:
fullPath = (self.currentDirectory, path)
d = (fullPath)
(self._cbDisplayFiles, options)
return d
def _cbDisplayFiles(self, files, options):
()
if ('all' not in options):
files = [f for f in files if (not ('.'))]
if ('verbose' in options):
lines = [f[1] for f in files]
else:
lines = [f[0] for f in files]
if (not lines):
return None
else:
return (lines)
def cmd_MKDIR(self, path):
(path, rest) = (path)
path = (self.currentDirectory, path)
return (_ignore)
def cmd_RMDIR(self, path):
(path, rest) = (path)
path = (self.currentDirectory, path)
return (_ignore)
def cmd_LMKDIR(self, path):
(('mkdir %s' % path))
def cmd_RM(self, path):
(path, rest) = (path)
path = (self.currentDirectory, path)
return (_ignore)
def cmd_LLS(self, rest):
(('ls %s' % rest))
def cmd_RENAME(self, rest):
(oldpath, rest) = (rest)
(newpath, rest) = (rest)
(oldpath, newpath) = [(self.currentDirectory, x) for x in (oldpath, newpath)]
return (_ignore)
def cmd_EXIT(self, ignored):
()
cmd_QUIT = cmd_EXIT
def cmd_VERSION(self, ignored):
return ('SFTP version %i' % self.client.version)
def cmd_HELP(self, ignored):
return "Available commands:\ncd path Change remote directory to 'path'.\nchgrp gid path Change gid of 'path' to 'gid'.\nchmod mode path Change mode of 'path' to 'mode'.\nchown uid path Change uid of 'path' to 'uid'.\nexit Disconnect from the server.\nget remote-path [local-path] Get remote file.\nhelp Get a list of available commands.\nlcd path Change local directory to 'path'.\nlls [ls-options] [path] Display local directory listing.\nlmkdir path Create local directory.\nln linkpath targetpath Symlink remote file.\nlpwd Print the local working directory.\nls [-l] [path] Display remote directory listing.\nmkdir path Create remote directory.\nprogress Toggle progress bar.\nput local-path [remote-path] Put local file.\npwd Print the remote working directory.\nquit Disconnect from the server.\nrename oldpath newpath Rename remote file.\nrmdir path Remove remote directory.\nrm path Remove remote file.\nversion Print the SFTP version.\n? Synonym for 'help'.\n"
def cmd_PWD(self, ignored):
return self.currentDirectory
def cmd_LPWD(self, ignored):
return ()
def cmd_PROGRESS(self, ignored):
self.useProgressBar = (not self.useProgressBar)
return ('%ssing progess bar.' % ((self.useProgressBar and 'U') or 'Not u'))
def cmd_EXEC(self, rest):
"\n Run C{rest} using the user's shell (or /bin/sh if they do not have\n one).\n "
shell = (())[6]
if (not shell):
shell = '/bin/sh'
if rest:
cmds = ['-c', rest]
return (shell, cmds)
else:
(shell)
def _remoteGlob(self, fullPath):
(('looking up %s' % fullPath))
(head, tail) = (fullPath)
if (('*' in tail) or ('?' in tail)):
glob = 1
else:
glob = 0
if (tail and (not glob)):
d = (fullPath)
(self._cbOpenList, '')
(self._ebNotADirectory, head, tail)
else:
d = (head)
(self._cbOpenList, tail)
return d
def _cbOpenList(self, directory, glob):
files = []
d = ()
(self._cbReadFile, files, directory, glob)
return d
def _ebNotADirectory(self, reason, path, glob):
d = (path)
(self._cbOpenList, glob)
return d
def _cbReadFile(self, files, l, directory, glob):
if (not (files, failure.Failure)):
if glob:
([f for f in files if (f[0], glob)])
else:
(files)
d = ()
(self._cbReadFile, l, directory, glob)
return d
else:
reason = files
(EOFError)
()
return l
def _abbrevSize(self, size):
_abbrevs = [((1 << 50), 'PB'), ((1 << 40), 'TB'), ((1 << 30), 'GB'), ((1 << 20), 'MB'), ((1 << 10), 'kB'), (1, 'B')]
for (factor, suffix) in _abbrevs:
if (size > factor):
break
return (('%.1f' % (size / factor)) + suffix)
def _abbrevTime(self, t):
if (t > 3600):
hours = ((t / 3600))
t -= (3600 * hours)
mins = ((t / 60))
t -= (60 * mins)
return ('%i:%02i:%02i' % (hours, mins, t))
else:
mins = ((t / 60))
t -= (60 * mins)
return ('%02i:%02i' % (mins, t))
def _printProgressBar(self, f, startTime):
"\n Update a console progress bar on this L{StdioClient}'s transport, based\n on the difference between the start time of the operation and the\n current time according to the reactor, and appropriate to the size of\n the console window.\n\n @param f: a wrapper around the file which is being written or read\n @type f: L{FileWrapper}\n\n @param startTime: The time at which the operation being tracked began.\n @type startTime: C{float}\n "
diff = (() - startTime)
total = f.total
try:
winSize = ('4H', (0, tty.TIOCGWINSZ, '12345679'))
except IOError:
winSize = [None, 80]
if (diff == 0.0):
speed = 0.0
else:
speed = (total / diff)
if speed:
timeLeft = ((f.size - total) / speed)
else:
timeLeft = 0
front = f.name
back = ('%3i%% %s %sps %s ' % (((total / f.size) * 100), (total), (speed), (timeLeft)))
spaces = ((winSize[1] - (((front) + (back)) + 1)) * ' ')
(('\r%s%s%s' % (front, spaces, back)))
def _getFilename(self, line):
()
if (not line):
return (None, '')
if (line[0] in '\'"'):
ret = []
line = (line)
try:
for i in (1, (line)):
c = line[i]
if (c == line[0]):
return ((ret), ())
elif (c == '\\'):
del line[i]
if (line[i] not in '\'"\\'):
raise (('bad quote: \\%s' % line[i]))
(line[i])
else:
(line[i])
except IndexError:
raise ('unterminated quote')
ret = (None, 1)
if ((ret) == 1):
return (ret[0], '')
else:
return ret
StdioClient.__dict__['cmd_?'] = StdioClient.cmd_HELP
class SSHConnection(connection.SSHConnection):
def serviceStarted(self):
(())
class SSHSession(channel.SSHChannel):
name = 'session'
def channelOpen(self, foo):
(('session %s open' % self.id))
if ('/'):
request = 'exec'
else:
request = 'subsystem'
d = (self, request, (self.conn.options['subsystem']))
(self._cbSubsystem)
(_ebExit)
def _cbSubsystem(self, result):
self.client = ()
(self)
self.dataReceived = self.client.dataReceived
f = None
if self.conn.options['batchfile']:
fn = self.conn.options['batchfile']
if (fn != '-'):
f = (fn)
self.stdio = ((self.client, f))
def extReceived(self, t, data):
if (t == connection.EXTENDED_DATA_STDERR):
(('got %s stderr data' % (data)))
(data)
()
def eofReceived(self):
('got eof')
()
def closeReceived(self):
(('remote side closed %s' % self))
(self)
def closed(self):
try:
()
except:
raise
def stopWriting(self):
()
def startWriting(self):
()
if (__name__ == '__main__'):
() |
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.scripts.ckeygen}.
"""
import sys
from io import StringIO
try:
import Crypto
import pyasn1
except ImportError:
skip = "PyCrypto and pyasn1 required for twisted.conch.scripts.ckeygen."
else:
from twisted.conch.ssh.keys import Key
from twisted.conch.scripts.ckeygen import printFingerprint, _saveKey
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from twisted.conch.test.keydata import publicRSA_openssh, privateRSA_openssh
class KeyGenTests(TestCase):
"""
Tests for various functions used to implement the I{ckeygen} script.
"""
def setUp(self):
"""
Patch C{sys.stdout} with a L{StringIO} instance to tests can make
assertions about what's printed.
"""
self.stdout = StringIO()
self.patch(sys, "stdout", self.stdout)
def test_printFingerprint(self):
"""
L{printFingerprint} writes a line to standard out giving the number of
bits of the key, its fingerprint, and the basename of the file from it
was read.
"""
filename = self.mktemp()
FilePath(filename).setContent(publicRSA_openssh)
printFingerprint({"filename": filename})
self.assertEqual(
self.stdout.getvalue(),
"768 3d:13:5f:cb:c9:79:8a:93:06:27:65:bc:3d:0b:8f:af temp\n",
)
def test_saveKey(self):
"""
L{_saveKey} writes the private and public parts of a key to two
different files and writes a report of this to standard out.
"""
base = FilePath(self.mktemp())
base.makedirs()
filename = base.child("id_rsa").path
key = Key.fromString(privateRSA_openssh)
_saveKey(key.keyObject, {"filename": filename, "pass": "passphrase"})
self.assertEqual(
self.stdout.getvalue(),
"Your identification has been saved in %s\n"
"Your public key has been saved in %s.pub\n"
"The key fingerprint is:\n"
"3d:13:5f:cb:c9:79:8a:93:06:27:65:bc:3d:0b:8f:af\n" % (filename, filename),
)
self.assertEqual(
key.fromString(base.child("id_rsa").getContent(), None, "passphrase"), key
)
self.assertEqual(
Key.fromString(base.child("id_rsa.pub").getContent()), key.public()
)
|
# -*- test-case-name: twisted.test.test_digestauth -*-
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Calculations for HTTP Digest authentication.
@see: U{http://www.faqs.org/rfcs/rfc2617.html}
"""
from twisted.python.hashlib import md5, sha1
# The digest math
algorithms = {
"md5": md5,
# md5-sess is more complicated than just another algorithm. It requires
# H(A1) state to be remembered from the first WWW-Authenticate challenge
# issued and re-used to process any Authorization header in response to
# that WWW-Authenticate challenge. It is *not* correct to simply
# recalculate H(A1) each time an Authorization header is received. Read
# RFC 2617, section 3.2.2.2 and do not try to make DigestCredentialFactory
# support this unless you completely understand it. -exarkun
"md5-sess": md5,
"sha": sha1,
}
# DigestCalcHA1
def calcHA1(
pszAlg, pszUserName, pszRealm, pszPassword, pszNonce, pszCNonce, preHA1=None
):
"""
Compute H(A1) from RFC 2617.
@param pszAlg: The name of the algorithm to use to calculate the digest.
Currently supported are md5, md5-sess, and sha.
@param pszUserName: The username
@param pszRealm: The realm
@param pszPassword: The password
@param pszNonce: The nonce
@param pszCNonce: The cnonce
@param preHA1: If available this is a str containing a previously
calculated H(A1) as a hex string. If this is given then the values for
pszUserName, pszRealm, and pszPassword must be C{None} and are ignored.
"""
if preHA1 and (pszUserName or pszRealm or pszPassword):
raise TypeError(
(
"preHA1 is incompatible with the pszUserName, "
"pszRealm, and pszPassword arguments"
)
)
if preHA1 is None:
# We need to calculate the HA1 from the username:realm:password
m = algorithms[pszAlg]()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
else:
# We were given a username:realm:password
HA1 = preHA1.decode("hex")
if pszAlg == "md5-sess":
m = algorithms[pszAlg]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode("hex")
def calcHA2(algo, pszMethod, pszDigestUri, pszQop, pszHEntity):
"""
Compute H(A2) from RFC 2617.
@param pszAlg: The name of the algorithm to use to calculate the digest.
Currently supported are md5, md5-sess, and sha.
@param pszMethod: The request method.
@param pszDigestUri: The request URI.
@param pszQop: The Quality-of-Protection value.
@param pszHEntity: The hash of the entity body or C{None} if C{pszQop} is
not C{'auth-int'}.
@return: The hash of the A2 value for the calculation of the response
digest.
"""
m = algorithms[algo]()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
return m.digest().encode("hex")
def calcResponse(HA1, HA2, algo, pszNonce, pszNonceCount, pszCNonce, pszQop):
"""
Compute the digest for the given parameters.
@param HA1: The H(A1) value, as computed by L{calcHA1}.
@param HA2: The H(A2) value, as computed by L{calcHA2}.
@param pszNonce: The challenge nonce.
@param pszNonceCount: The (client) nonce count value for this response.
@param pszCNonce: The client nonce.
@param pszQop: The Quality-of-Protection value.
"""
m = algorithms[algo]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
respHash = m.digest().encode("hex")
return respHash
|
# -*- test-case-name: twisted.test.test_internet -*-
# $Id: default.py,v 1.90 2004/01/06 22:35:22 warner Exp $
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Deprecated module that used to contain SelectReactor and PosixReactorBase
Maintainer: Itamar Shtull-Trauring
"""
import warnings
warnings.warn(
"twisted.internet.default is deprecated. Use posixbase or selectreactor instead.",
category=DeprecationWarning,
)
# Backwards compat
from posixbase import PosixReactorBase
from selectreactor import SelectReactor, install
__all__ = ["install", "PosixReactorBase", "SelectReactor"]
|
# -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Select reactor
Maintainer: Itamar Shtull-Trauring
"""
from time import sleep
import sys
import select
from errno import EINTR, EBADF
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import error
from twisted.internet import posixbase
from twisted.python import log
from twisted.python.runtime import platformType
def win32select(r, w, e, timeout=None):
"""Win32 select wrapper."""
if not (r or w):
# windows select() exits immediately when no sockets
if timeout is None:
timeout = 0.01
else:
timeout = min(timeout, 0.001)
sleep(timeout)
return [], [], []
# windows doesn't process 'signals' inside select(), so we set a max
# time or ctrl-c will never be recognized
if timeout is None or timeout > 0.5:
timeout = 0.5
r, w, e = select.select(r, w, w, timeout)
return r, w + e, []
if platformType == "win32":
_select = win32select
else:
_select = select.select
# Exceptions that doSelect might return frequently
_NO_FILENO = error.ConnectionFdescWentAway("Handler has no fileno method")
_NO_FILEDESC = error.ConnectionFdescWentAway("Filedescriptor went away")
class SelectReactor(posixbase.PosixReactorBase):
"""
A select() based reactor - runs on all POSIX platforms and on Win32.
@ivar _reads: A dictionary mapping L{FileDescriptor} instances to arbitrary
values (this is essentially a set). Keys in this dictionary will be
checked for read events.
@ivar _writes: A dictionary mapping L{FileDescriptor} instances to
arbitrary values (this is essentially a set). Keys in this dictionary
will be checked for writability.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize file descriptor tracking dictionaries and the base class.
"""
self._reads = {}
self._writes = {}
posixbase.PosixReactorBase.__init__(self)
def _preenDescriptors(self):
log.msg("Malformed file descriptor found. Preening lists.")
readers = list(self._reads.keys())
writers = list(self._writes.keys())
self._reads.clear()
self._writes.clear()
for selDict, selList in ((self._reads, readers), (self._writes, writers)):
for selectable in selList:
try:
select.select([selectable], [selectable], [selectable], 0)
except Exception as e:
log.msg("bad descriptor %s" % selectable)
self._disconnectSelectable(selectable, e, False)
else:
selDict[selectable] = 1
def doSelect(self, timeout):
"""
Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
while 1:
try:
r, w, ignored = _select(
list(self._reads.keys()), list(self._writes.keys()), [], timeout
)
break
except ValueError as ve:
# Possibly a file descriptor has gone negative?
log.err()
self._preenDescriptors()
except TypeError as te:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptors()
except (select.error, IOError) as se:
# select(2) encountered an error
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not self._reads) and (not self._writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptors()
else:
# OK, I really don't know what's going on. Blow up.
raise
_drdw = self._doReadOrWrite
_logrun = log.callWithLogger
for selectables, method, fdset in (
(r, "doRead", self._reads),
(w, "doWrite", self._writes),
):
for selectable in selectables:
# if this was disconnected in another thread, kill it.
# ^^^^ --- what the !@#*? serious! -exarkun
if selectable not in fdset:
continue
# This for pausing input when we're not ready for more.
_logrun(selectable, _drdw, selectable, method, dict)
doIteration = doSelect
def _doReadOrWrite(self, selectable, method, dict):
try:
why = getattr(selectable, method)()
handfn = getattr(selectable, "fileno", None)
if not handfn:
why = _NO_FILENO
elif handfn() == -1:
why = _NO_FILEDESC
except:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(selectable, why, method == "doRead")
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._reads[reader] = 1
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
self._writes[writer] = 1
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
if reader in self._reads:
del self._reads[reader]
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
if writer in self._writes:
del self._writes[writer]
def removeAll(self):
return self._removeAll(self._reads, self._writes)
def getReaders(self):
return list(self._reads.keys())
def getWriters(self):
return list(self._writes.keys())
def install():
"""Configure the twisted mainloop to be run using the select() reactor."""
reactor = SelectReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
__all__ = ["install"]
|
# Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
from twisted.trial import unittest
from twisted.python.runtime import platform
from twisted.python.util import sibpath
from twisted.internet.utils import getProcessOutputAndValue
skipWindowsNopywin32 = None
if platform.isWindows():
try:
import win32process
except ImportError:
skipWindowsNopywin32 = (
"On windows, spawnProcess is not available "
"in the absence of win32process."
)
class QtreactorTestCase(unittest.TestCase):
"""
Tests for L{twisted.internet.qtreactor}.
"""
def test_importQtreactor(self):
"""
Attempting to import L{twisted.internet.qtreactor} should raise an
C{ImportError} indicating that C{qtreactor} is no longer a part of
Twisted.
"""
sys.modules["qtreactor"] = None
from twisted.plugins.twisted_qtstub import errorMessage
try:
import twisted.internet.qtreactor
except ImportError as e:
self.assertEqual(str(e), errorMessage)
|
"lore scripts"
|
'\nImplementation module for the I{mailmail} command.\n'
import os
import sys
import rfc822
import getpass
from configparser import ConfigParser
try:
import io as StringIO
except:
import io
from twisted.internet import reactor
from twisted.mail import smtp
GLOBAL_CFG = '/etc/mailmail'
LOCAL_CFG = ('~/.twisted/mailmail')
SMARTHOST = '127.0.0.1'
ERROR_FMT = 'Subject: Failed Message Delivery\n\n Message delivery failed. The following occurred:\n\n %s\n--\nThe Twisted sendmail application.\n'
def log(message, *args):
((((message) % args) + '\n'))
class Options():
'\n @type to: C{list} of C{str}\n @ivar to: The addresses to which to deliver this message.\n\n @type sender: C{str}\n @ivar sender: The address from which this message is being sent.\n\n @type body: C{file}\n @ivar body: The object from which the message is to be read.\n '
def getlogin():
try:
return ()
except:
return ()
_unsupportedOption = ('Unsupported option.')
def parseOptions(argv):
o = ()
o.to = [e for e in argv if (not ('-'))]
o.sender = ()
if ('-bp' in argv):
raise _unsupportedOption
if ('-bs' in argv):
raise _unsupportedOption
if ('-F' in argv):
o.sender = argv[(('-F') + 1)]
(o.sender)
if (('-i' in argv) or ('-oi' in argv)):
raise _unsupportedOption
if ('-odb' in argv):
o.background = True
else:
o.background = False
if ('-odf' in argv):
o.background = False
else:
o.background = True
if (('-oep' in argv) or ('-ep' in argv)):
o.printErrors = True
else:
o.printErrors = False
if ('-om' in argv):
raise _unsupportedOption
if ('-t' in argv):
o.recipientsFromHeaders = True
o.excludeAddresses = o.to
o.to = []
else:
o.recipientsFromHeaders = False
o.exludeAddresses = []
requiredHeaders = {'from': [], 'to': [], 'cc': [], 'bcc': [], 'date': []}
headers = []
buffer = ()
while 1:
write = 1
line = ()
if (not ()):
break
hdrs = (': ', 1)
hdr = ()
if (o.recipientsFromHeaders and (hdr in ('to', 'cc', 'bcc'))):
([a[1] for a in (hdrs[1]).addresslist])
if (hdr == 'bcc'):
write = 0
elif (hdr == 'from'):
o.sender = (hdrs[1])[1]
if (hdr in requiredHeaders):
(hdrs[1])
if write:
(line)
if (not requiredHeaders['from']):
(('From: %s\r\n' % (o.sender,)))
if (not requiredHeaders['to']):
if (not o.to):
raise ('No recipients specified.')
(('To: %s\r\n' % ((o.to),)))
if (not requiredHeaders['date']):
(('Date: %s\r\n' % ((),)))
(line)
if o.recipientsFromHeaders:
for a in o.excludeAddresses:
try:
(a)
except:
raise
(0, 0)
o.body = ((() + ()))
return o
class Configuration():
"\n @ivar allowUIDs: A list of UIDs which are allowed to send mail.\n @ivar allowGIDs: A list of GIDs which are allowed to send mail.\n @ivar denyUIDs: A list of UIDs which are not allowed to send mail.\n @ivar denyGIDs: A list of GIDs which are not allowed to send mail.\n\n @type defaultAccess: C{bool}\n @ivar defaultAccess: C{True} if access will be allowed when no other access\n control rule matches or C{False} if it will be denied in that case.\n\n @ivar useraccess: Either C{'allow'} to check C{allowUID} first\n or C{'deny'} to check C{denyUID} first.\n\n @ivar groupaccess: Either C{'allow'} to check C{allowGID} first or\n C{'deny'} to check C{denyGID} first.\n\n @ivar identities: A C{dict} mapping hostnames to credentials to use when\n sending mail to that host.\n\n @ivar smarthost: C{None} or a hostname through which all outgoing mail will\n be sent.\n\n @ivar domain: C{None} or the hostname with which to identify ourselves when\n connecting to an MTA.\n "
def __init__(self):
self.allowUIDs = []
self.denyUIDs = []
self.allowGIDs = []
self.denyGIDs = []
self.useraccess = 'deny'
self.groupaccess = 'deny'
self.identities = {}
self.smarthost = None
self.domain = None
self.defaultAccess = True
def loadConfig(path):
c = ()
if (not (path, os.R_OK)):
return c
p = ()
(path)
au = c.allowUIDs
du = c.denyUIDs
ag = c.allowGIDs
dg = c.denyGIDs
for (section, a, d) in (('useraccess', au, du), ('groupaccess', ag, dg)):
if (section):
for (mode, L) in (('allow', a), ('deny', d)):
if ((section, mode) and (section, mode)):
for id in (','):
try:
id = (id)
except ValueError:
('Illegal %sID in [%s] section: %s', (), section, id)
else:
(id)
order = (section, 'order')
order = ((str.split, ((str.lower, (',')))))
if (order[0] == 'allow'):
(c, section, 'allow')
else:
(c, section, 'deny')
if ('identity'):
for (host, up) in ('identity'):
parts = (':', 1)
if ((parts) != 2):
('Illegal entry in [identity] section: %s', up)
continue
p.identities[host] = parts
if ('addresses'):
if ('addresses', 'smarthost'):
c.smarthost = ('addresses', 'smarthost')
if ('addresses', 'default_domain'):
c.domain = ('addresses', 'default_domain')
return c
def success(result):
()
failed = None
def failure(f):
global failed
()
failed = f
def sendmail(host, options, ident):
d = (host, options.sender, options.to, options.body)
(success, failure)
()
def senderror(failure, options):
recipient = [options.sender]
sender = ('"Internally Generated Message (%s)"<postmaster@%s>' % (sys.argv[0], smtp.DNSNAME))
error = ()
()
body = ((ERROR_FMT % ()))
d = ('localhost', sender, recipient, body)
((lambda _: ()))
def deny(conf):
uid = ()
gid = ()
if (conf.useraccess == 'deny'):
if (uid in conf.denyUIDs):
return True
if (uid in conf.allowUIDs):
return False
else:
if (uid in conf.allowUIDs):
return False
if (uid in conf.denyUIDs):
return True
if (conf.groupaccess == 'deny'):
if (gid in conf.denyGIDs):
return True
if (gid in conf.allowGIDs):
return False
else:
if (gid in conf.allowGIDs):
return False
if (gid in conf.denyGIDs):
return True
return (not conf.defaultAccess)
def run():
o = (sys.argv[1:])
gConf = (GLOBAL_CFG)
lConf = (LOCAL_CFG)
if ((gConf) or (lConf)):
('Permission denied')
return
host = (lConf.smarthost or gConf.smarthost or SMARTHOST)
ident = ()
(lConf.identities)
if lConf.domain:
smtp.DNSNAME = lConf.domain
elif gConf.domain:
smtp.DNSNAME = gConf.domain
(host, o, ident)
if failed:
if o.printErrors:
()
raise (1)
else:
(failed, o) |
# -*- test-case-name: twisted.names.test -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import time
from zope.interface import implements
from twisted.names import dns
from twisted.python import failure, log
from twisted.internet import interfaces, defer
from . import common
class CacheResolver(common.ResolverBase):
"""A resolver that serves records from a local, memory cache."""
implements(interfaces.IResolver)
cache = None
def __init__(self, cache=None, verbose=0):
common.ResolverBase.__init__(self)
if cache is None:
cache = {}
self.cache = cache
self.verbose = verbose
self.cancel = {}
def __setstate__(self, state):
self.__dict__ = state
now = time.time()
for k, (when, (ans, add, ns)) in list(self.cache.items()):
diff = now - when
for rec in ans + add + ns:
if rec.ttl < diff:
del self.cache[k]
break
def __getstate__(self):
for c in list(self.cancel.values()):
c.cancel()
self.cancel.clear()
return self.__dict__
def _lookup(self, name, cls, type, timeout):
now = time.time()
q = dns.Query(name, type, cls)
try:
when, (ans, auth, add) = self.cache[q]
except KeyError:
if self.verbose > 1:
log.msg("Cache miss for " + repr(name))
return defer.fail(failure.Failure(dns.DomainError(name)))
else:
if self.verbose:
log.msg("Cache hit for " + repr(name))
diff = now - when
return defer.succeed(
(
[
dns.RRHeader(
str(r.name), r.type, r.cls, r.ttl - diff, r.payload
)
for r in ans
],
[
dns.RRHeader(
str(r.name), r.type, r.cls, r.ttl - diff, r.payload
)
for r in auth
],
[
dns.RRHeader(
str(r.name), r.type, r.cls, r.ttl - diff, r.payload
)
for r in add
],
)
)
def lookupAllRecords(self, name, timeout=None):
return defer.fail(failure.Failure(dns.DomainError(name)))
def cacheResult(self, query, payload):
if self.verbose > 1:
log.msg("Adding %r to cache" % query)
self.cache[query] = (time.time(), payload)
if query in self.cancel:
self.cancel[query].cancel()
s = list(payload[0]) + list(payload[1]) + list(payload[2])
m = s[0].ttl
for r in s:
m = min(m, r.ttl)
from twisted.internet import reactor
self.cancel[query] = reactor.callLater(m, self.clearEntry, query)
def clearEntry(self, query):
del self.cache[query]
del self.cancel[query]
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.news import news, database
from twisted.application import strports
from twisted.python import usage, log
class DBOptions(usage.Options):
optParameters = [
["module", None, "pyPgSQL.PgSQL", "DB-API 2.0 module to use"],
["dbhost", None, "localhost", "Host where database manager is listening"],
["dbuser", None, "news", "Username with which to connect to database"],
["database", None, "news", "Database name to use"],
[
"schema",
None,
"schema.sql",
"File to which to write SQL schema initialisation",
],
# XXX - Hrm.
["groups", "g", "groups.list", "File containing group list"],
["servers", "s", "servers.list", "File containing server list"],
]
def postOptions(self):
# XXX - Hmmm.
self["groups"] = [
g.strip() for g in open(self["groups"]).readlines() if not g.startswith("#")
]
self["servers"] = [
s.strip()
for s in open(self["servers"]).readlines()
if not s.startswith("#")
]
try:
__import__(self["module"])
except ImportError:
log.msg("Warning: Cannot import %s" % (self["module"],))
f = open(self["schema"], "w")
f.write(
database.NewsStorageAugmentation.schema
+ "\n"
+ database.makeGroupSQL(self["groups"])
+ "\n"
+ database.makeOverviewSQL()
)
f.close()
info = {
"host": self["dbhost"],
"user": self["dbuser"],
"database": self["database"],
"dbapiName": self["module"],
}
self.db = database.NewsStorageAugmentation(info)
class PickleOptions(usage.Options):
optParameters = [
["file", None, "news.pickle", "File to which to save pickle"],
# XXX - Hrm.
["groups", "g", "groups.list", "File containing group list"],
["servers", "s", "servers.list", "File containing server list"],
["moderators", "m", "moderators.list", "File containing moderators list"],
]
subCommands = None
def postOptions(self):
# XXX - Hmmm.
filename = self["file"]
self["groups"] = [
g.strip() for g in open(self["groups"]).readlines() if not g.startswith("#")
]
self["servers"] = [
s.strip()
for s in open(self["servers"]).readlines()
if not s.startswith("#")
]
self["moderators"] = [
s.split()
for s in open(self["moderators"]).readlines()
if not s.startswith("#")
]
self.db = database.PickleStorage(filename, self["groups"], self["moderators"])
class Options(usage.Options):
synopsis = "[options]"
groups = None
servers = None
subscriptions = None
optParameters = [
["port", "p", "119", "Listen port"],
["interface", "i", "", "Interface to which to bind"],
["datadir", "d", "news.db", "Root data storage path"],
["mailhost", "m", "localhost", "Host of SMTP server to use"],
]
zsh_actions = {"datadir": "_dirs", "mailhost": "_hosts"}
def __init__(self):
usage.Options.__init__(self)
self.groups = []
self.servers = []
self.subscriptions = []
def opt_group(self, group):
"""The name of a newsgroup to carry."""
self.groups.append([group, None])
def opt_moderator(self, moderator):
"""The email of the moderator for the most recently passed group."""
self.groups[-1][1] = moderator
def opt_subscription(self, group):
"""A newsgroup to list as a recommended subscription."""
self.subscriptions.append(group)
def opt_server(self, server):
"""The address of a Usenet server to pass messages to and receive messages from."""
self.servers.append(server)
def makeService(config):
if not len(config.groups):
raise usage.UsageError("No newsgroups specified")
db = database.NewsShelf(config["mailhost"], config["datadir"])
for g, m in config.groups:
if m:
db.addGroup(g, "m")
db.addModerator(g, m)
else:
db.addGroup(g, "y")
for s in config.subscriptions:
print(s)
db.addSubscription(s)
s = config["port"]
if config["interface"]:
# Add a warning here
s += ":interface=" + config["interface"]
return strports.service(s, news.UsenetServerFactory(db, config.servers))
|
'\nAn FTP protocol implementation\n\n@author: Itamar Shtull-Trauring\n@author: Jp Calderone\n@author: Andrew Bennetts\n'
import os
import time
import re
import operator
import stat
import errno
import fnmatch
import warnings
from functools import reduce
try:
import pwd, grp
except ImportError:
pwd = grp = None
from zope.interface import Interface, implements
from twisted import copyright
from twisted.internet import reactor, interfaces, protocol, error, defer
from twisted.protocols import basic, policies
from twisted.python import log, failure, filepath
from twisted.python.compat import reduce
from twisted.cred import error as cred_error, portal, credentials, checkers
RESTART_MARKER_REPLY = '100'
SERVICE_READY_IN_N_MINUTES = '120'
DATA_CNX_ALREADY_OPEN_START_XFR = '125'
FILE_STATUS_OK_OPEN_DATA_CNX = '150'
CMD_OK = '200.1'
TYPE_SET_OK = '200.2'
ENTERING_PORT_MODE = '200.3'
CMD_NOT_IMPLMNTD_SUPERFLUOUS = '202'
SYS_STATUS_OR_HELP_REPLY = '211'
DIR_STATUS = '212'
FILE_STATUS = '213'
HELP_MSG = '214'
NAME_SYS_TYPE = '215'
SVC_READY_FOR_NEW_USER = '220.1'
WELCOME_MSG = '220.2'
SVC_CLOSING_CTRL_CNX = '221'
GOODBYE_MSG = '221'
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = '225'
CLOSING_DATA_CNX = '226'
TXFR_COMPLETE_OK = '226'
ENTERING_PASV_MODE = '227'
ENTERING_EPSV_MODE = '229'
USR_LOGGED_IN_PROCEED = '230.1'
GUEST_LOGGED_IN_PROCEED = '230.2'
REQ_FILE_ACTN_COMPLETED_OK = '250'
PWD_REPLY = '257.1'
MKD_REPLY = '257.2'
USR_NAME_OK_NEED_PASS = '331.1'
GUEST_NAME_OK_NEED_EMAIL = '331.2'
NEED_ACCT_FOR_LOGIN = '332'
REQ_FILE_ACTN_PENDING_FURTHER_INFO = '350'
SVC_NOT_AVAIL_CLOSING_CTRL_CNX = '421.1'
TOO_MANY_CONNECTIONS = '421.2'
CANT_OPEN_DATA_CNX = '425'
CNX_CLOSED_TXFR_ABORTED = '426'
REQ_ACTN_ABRTD_FILE_UNAVAIL = '450'
REQ_ACTN_ABRTD_LOCAL_ERR = '451'
REQ_ACTN_ABRTD_INSUFF_STORAGE = '452'
SYNTAX_ERR = '500'
SYNTAX_ERR_IN_ARGS = '501'
CMD_NOT_IMPLMNTD = '502'
BAD_CMD_SEQ = '503'
CMD_NOT_IMPLMNTD_FOR_PARAM = '504'
NOT_LOGGED_IN = '530.1'
AUTH_FAILURE = '530.2'
NEED_ACCT_FOR_STOR = '532'
FILE_NOT_FOUND = '550.1'
PERMISSION_DENIED = '550.2'
ANON_USER_DENIED = '550.3'
IS_NOT_A_DIR = '550.4'
REQ_ACTN_NOT_TAKEN = '550.5'
FILE_EXISTS = '550.6'
IS_A_DIR = '550.7'
PAGE_TYPE_UNK = '551'
EXCEEDED_STORAGE_ALLOC = '552'
FILENAME_NOT_ALLOWED = '553'
RESPONSE = {RESTART_MARKER_REPLY: '110 MARK yyyy-mmmm', SERVICE_READY_IN_N_MINUTES: '120 service ready in %s minutes', DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer', FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.', CMD_OK: '200 Command OK', TYPE_SET_OK: '200 Type set to %s.', ENTERING_PORT_MODE: '200 PORT OK', CMD_NOT_IMPLMNTD_SUPERFLUOUS: '202 Command not implemented, superfluous at this site', SYS_STATUS_OR_HELP_REPLY: '211 System status reply', DIR_STATUS: '212 %s', FILE_STATUS: '213 %s', HELP_MSG: '214 help: %s', NAME_SYS_TYPE: '215 UNIX Type: L8', WELCOME_MSG: '220 %s', SVC_READY_FOR_NEW_USER: '220 Service ready', GOODBYE_MSG: '221 Goodbye.', DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress', CLOSING_DATA_CNX: '226 Abort successful', TXFR_COMPLETE_OK: '226 Transfer Complete.', ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).', ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).', USR_LOGGED_IN_PROCEED: '230 User logged in, proceed', GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.', REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK', PWD_REPLY: '257 "%s"', MKD_REPLY: '257 "%s" created', 'userotp': '331 Response to %s.', USR_NAME_OK_NEED_PASS: '331 Password required for %s.', GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.', REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.', SVC_NOT_AVAIL_CLOSING_CTRL_CNX: '421 Service not available, closing control connection.', TOO_MANY_CONNECTIONS: '421 Too many users right now, try again in a few minutes.', CANT_OPEN_DATA_CNX: "425 Can't open data connection.", CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.', REQ_ACTN_ABRTD_LOCAL_ERR: '451 Requested action aborted. Local error in processing.', SYNTAX_ERR: '500 Syntax error: %s', SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.', CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented", BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s', CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.", NOT_LOGGED_IN: '530 Please login with USER and PASS.', AUTH_FAILURE: '530 Sorry, Authentication failed.', NEED_ACCT_FOR_STOR: '532 Need an account for storing files', FILE_NOT_FOUND: '550 %s: No such file or directory.', PERMISSION_DENIED: '550 %s: Permission denied.', ANON_USER_DENIED: '550 Anonymous users are forbidden to change the filesystem', IS_NOT_A_DIR: '550 Cannot rmd, %s is not a directory', FILE_EXISTS: '550 %s: File exists', IS_A_DIR: '550 %s: is a directory', REQ_ACTN_NOT_TAKEN: '550 Requested action not taken: %s', EXCEEDED_STORAGE_ALLOC: '552 Requested file action aborted, exceeded file storage allocation', FILENAME_NOT_ALLOWED: '553 Requested action not taken, file name not allowed'}
class InvalidPath(Exception):
'\n Internal exception used to signify an error during parsing a path.\n '
def toSegments(cwd, path):
'\n Normalize a path, as represented by a list of strings each\n representing one segment of the path.\n '
if ('/'):
segs = []
else:
segs = cwd[:]
for s in ('/'):
if ((s == '.') or (s == '')):
continue
elif (s == '..'):
if segs:
()
else:
raise (cwd, path)
elif (('\x00' in s) or ('/' in s)):
raise (cwd, path)
else:
(s)
return segs
def errnoToFailure(e, path):
'\n Map C{OSError} and C{IOError} to standard FTP errors.\n '
if (e == errno.ENOENT):
return ((path))
elif ((e == errno.EACCES) or (e == errno.EPERM)):
return ((path))
elif (e == errno.ENOTDIR):
return ((path))
elif (e == errno.EEXIST):
return ((path))
elif (e == errno.EISDIR):
return ((path))
else:
return ()
class FTPCmdError(Exception):
'\n Generic exception for FTP commands.\n '
def __init__(self, *msg):
(self, *msg)
self.errorMessage = msg
def response(self):
'\n Generate a FTP response message for this error.\n '
return (RESPONSE[self.errorCode] % self.errorMessage)
class FileNotFoundError(FTPCmdError):
'\n Raised when trying to access a non existent file or directory.\n '
errorCode = FILE_NOT_FOUND
class AnonUserDeniedError(FTPCmdError):
'\n Raised when an anonymous user issues a command that will alter the\n filesystem\n '
def __init__(self):
(self, None)
errorCode = ANON_USER_DENIED
class PermissionDeniedError(FTPCmdError):
'\n Raised when access is attempted to a resource to which access is\n not allowed.\n '
errorCode = PERMISSION_DENIED
class IsNotADirectoryError(FTPCmdError):
"\n Raised when RMD is called on a path that isn't a directory.\n "
errorCode = IS_NOT_A_DIR
class FileExistsError(FTPCmdError):
'\n Raised when attempted to override an existing resource.\n '
errorCode = FILE_EXISTS
class IsADirectoryError(FTPCmdError):
'\n Raised when DELE is called on a path that is a directory.\n '
errorCode = IS_A_DIR
class CmdSyntaxError(FTPCmdError):
'\n Raised when a command syntax is wrong.\n '
errorCode = SYNTAX_ERR
class CmdArgSyntaxError(FTPCmdError):
'\n Raised when a command is called with wrong value or a wrong number of\n arguments.\n '
errorCode = SYNTAX_ERR_IN_ARGS
class CmdNotImplementedError(FTPCmdError):
'\n Raised when an unimplemented command is given to the server.\n '
errorCode = CMD_NOT_IMPLMNTD
class CmdNotImplementedForArgError(FTPCmdError):
'\n Raised when the handling of a parameter for a command is not implemented by\n the server.\n '
errorCode = CMD_NOT_IMPLMNTD_FOR_PARAM
class FTPError(Exception):
pass
class PortConnectionError(Exception):
pass
class BadCmdSequenceError(FTPCmdError):
'\n Raised when a client sends a series of commands in an illogical sequence.\n '
errorCode = BAD_CMD_SEQ
class AuthorizationError(FTPCmdError):
'\n Raised when client authentication fails.\n '
errorCode = AUTH_FAILURE
def debugDeferred(self, *_):
(('debugDeferred(): %s' % (_)))
_months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class DTP(object, protocol.Protocol):
(interfaces.IConsumer)
isConnected = False
_cons = None
_onConnLost = None
_buffer = None
def connectionMade(self):
self.isConnected = True
(None)
self._buffer = []
def connectionLost(self, reason):
self.isConnected = False
if (self._onConnLost is not None):
(None)
def sendLine(self, line):
((line + '\r\n'))
def _formatOneListResponse(self, name, size, directory, permissions, hardlinks, modified, owner, group):
def formatMode(mode):
return ([(((mode & (256 >> n)) and 'rwx'[(n % 3)]) or '-') for n in (9)])
def formatDate(mtime):
now = ()
info = {'month': _months[mtime.tm_mon], 'day': mtime.tm_mday, 'year': mtime.tm_year, 'hour': mtime.tm_hour, 'minute': mtime.tm_min}
if (now.tm_year != mtime.tm_year):
return ('%(month)s %(day)02d %(year)5d' % info)
else:
return ('%(month)s %(day)02d %(hour)02d:%(minute)02d' % info)
format = '%(directory)s%(permissions)s%(hardlinks)4d %(owner)-9s %(group)-9s %(size)15d %(date)12s %(name)s'
return (format % {'directory': ((directory and 'd') or '-'), 'permissions': (permissions), 'hardlinks': hardlinks, 'owner': owner[:8], 'group': group[:8], 'size': size, 'date': ((modified)), 'name': name})
def sendListResponse(self, name, response):
((name, *response))
def registerProducer(self, producer, streaming):
return (producer, streaming)
def unregisterProducer(self):
()
()
def write(self, data):
if self.isConnected:
return (data)
raise ('Crap damn crap damn crap damn')
def _conswrite(self, bytes):
try:
(bytes)
except:
()
def dataReceived(self, bytes):
if (self._cons is not None):
(bytes)
else:
(bytes)
def _unregConsumer(self, ignored):
()
self._cons = None
del self._onConnLost
return ignored
def registerConsumer(self, cons):
if (not (self._cons is None)):
raise ()
self._cons = cons
(self, True)
for chunk in self._buffer:
(chunk)
self._buffer = None
if self.isConnected:
self._onConnLost = d = ()
(self._unregConsumer)
return d
else:
()
self._cons = None
return (None)
def resumeProducing(self):
()
def pauseProducing(self):
()
def stopProducing(self):
()
class DTPFactory(protocol.ClientFactory):
"\n Client factory for I{data transfer process} protocols.\n\n @ivar peerCheck: perform checks to make sure the ftp-pi's peer is the same\n as the dtp's\n @ivar pi: a reference to this factory's protocol interpreter\n\n @ivar _state: Indicates the current state of the DTPFactory. Initially,\n this is L{_IN_PROGRESS}. If the connection fails or times out, it is\n L{_FAILED}. If the connection succeeds before the timeout, it is\n L{_FINISHED}.\n "
_IN_PROGRESS = ()
_FAILED = ()
_FINISHED = ()
_state = _IN_PROGRESS
peerCheck = False
def __init__(self, pi, peerHost=None, reactor=None):
"Constructor\n @param pi: this factory's protocol interpreter\n @param peerHost: if peerCheck is True, this is the tuple that the\n generated instance will use to perform security checks\n "
self.pi = pi
self.peerHost = peerHost
self.deferred = ()
self.delayedCall = None
if (reactor is None):
from twisted.internet import reactor
self._reactor = reactor
def buildProtocol(self, addr):
('DTPFactory.buildProtocol')
if (self._state is not self._IN_PROGRESS):
return None
self._state = self._FINISHED
()
p = ()
p.factory = self
p.pi = self.pi
self.pi.dtpInstance = p
return p
def stopFactory(self):
('dtpFactory.stopFactory')
()
def timeoutFactory(self):
('timed out waiting for DTP connection')
if (self._state is not self._IN_PROGRESS):
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
((('DTPFactory timeout')))
def cancelTimeout(self):
if ((self.delayedCall is not None) and ()):
('cancelling DTP timeout')
()
def setTimeout(self, seconds):
(('DTPFactory.setTimeout set to %s seconds' % seconds))
self.delayedCall = (seconds, self.timeoutFactory)
def clientConnectionFailed(self, connector, reason):
if (self._state is not self._IN_PROGRESS):
return
self._state = self._FAILED
d = self.deferred
self.deferred = None
((reason))
class ASCIIConsumerWrapper(object):
def __init__(self, cons):
self.cons = cons
self.registerProducer = cons.registerProducer
self.unregisterProducer = cons.unregisterProducer
if (not ((os.linesep == '\r\n') or ((os.linesep) == 1))):
raise ('Unsupported platform (yea right like this even exists)')
if (os.linesep == '\r\n'):
self.write = cons.write
def write(self, bytes):
return ((os.linesep, '\r\n'))
class FileConsumer(object):
'\n A consumer for FTP input that writes data to a file.\n\n @ivar fObj: a file object opened for writing, used to write data received.\n @type fObj: C{file}\n '
(interfaces.IConsumer)
def __init__(self, fObj):
self.fObj = fObj
def registerProducer(self, producer, streaming):
self.producer = producer
if (not streaming):
raise ()
def unregisterProducer(self):
self.producer = None
()
def write(self, bytes):
(bytes)
class FTPOverflowProtocol(basic.LineReceiver):
'FTP mini-protocol for when there are too many connections.'
def connectionMade(self):
(RESPONSE[TOO_MANY_CONNECTIONS])
()
class FTP(object, basic.LineReceiver, policies.TimeoutMixin):
'\n Protocol Interpreter for the File Transfer Protocol\n\n @ivar state: The current server state. One of L{UNAUTH},\n L{INAUTH}, L{AUTHED}, L{RENAMING}.\n\n @ivar shell: The connected avatar\n @ivar binary: The transfer mode. If false, ASCII.\n @ivar dtpFactory: Generates a single DTP for this session\n @ivar dtpPort: Port returned from listenTCP\n @ivar listenFactory: A callable with the signature of\n L{twisted.internet.interfaces.IReactorTCP.listenTCP} which will be used\n to create Ports for passive connections (mainly for testing).\n\n @ivar passivePortRange: iterator used as source of passive port numbers.\n @type passivePortRange: C{iterator}\n '
disconnected = False
(UNAUTH, INAUTH, AUTHED, RENAMING) = ((4))
dtpTimeout = 10
portal = None
shell = None
dtpFactory = None
dtpPort = None
dtpInstance = None
binary = True
passivePortRange = (0, 1)
listenFactory = reactor.listenTCP
def reply(self, key, *args):
msg = (RESPONSE[key] % args)
(msg)
def connectionMade(self):
self.state = self.UNAUTH
(self.timeOut)
(WELCOME_MSG, self.factory.welcomeMessage)
def connectionLost(self, reason):
if self.dtpFactory:
()
(None)
if ((self.shell, 'logout') and (self.shell.logout is not None)):
()
self.shell = None
self.transport = None
def timeoutConnection(self):
()
def lineReceived(self, line):
()
()
def processFailed(err):
if (FTPCmdError):
(())
elif ((TypeError) and (('takes exactly') != (- 1))):
(SYNTAX_ERR, ('%s requires an argument.' % (cmd,)))
else:
('Unexpected FTP error')
(err)
(REQ_ACTN_NOT_TAKEN, 'internal server error')
def processSucceeded(result):
if (result, tuple):
(*result)
elif (result is not None):
(result)
def allDone(ignored):
if (not self.disconnected):
()
spaceIndex = (' ')
if (spaceIndex != (- 1)):
cmd = line[:spaceIndex]
args = (line[(spaceIndex + 1):],)
else:
cmd = line
args = ()
d = (self.processCommand, cmd, *args)
(processSucceeded, processFailed)
(log.err)
from twisted.internet import reactor
(0, d.addBoth, allDone)
def processCommand(self, cmd, *params):
cmd = ()
if (self.state == self.UNAUTH):
if (cmd == 'USER'):
return (*params)
elif (cmd == 'PASS'):
return (BAD_CMD_SEQ, 'USER required before PASS')
else:
return NOT_LOGGED_IN
elif (self.state == self.INAUTH):
if (cmd == 'PASS'):
return (*params)
else:
return (BAD_CMD_SEQ, 'PASS required after USER')
elif (self.state == self.AUTHED):
method = (self, ('ftp_' + cmd), None)
if (method is not None):
return (*params)
return ((cmd))
elif (self.state == self.RENAMING):
if (cmd == 'RNTO'):
return (*params)
else:
return (BAD_CMD_SEQ, 'RNTO required after RNFR')
def getDTPPort(self, factory):
'\n Return a port for passive access, using C{self.passivePortRange}\n attribute.\n '
for portn in self.passivePortRange:
try:
dtpPort = (portn, factory)
except error.CannotListenError:
continue
else:
return dtpPort
raise ('', portn, ('No port available in range %s' % (self.passivePortRange,)))
def ftp_USER(self, username):
'\n First part of login. Get the username the peer wants to\n authenticate as.\n '
if (not username):
return (('USER requires an argument'))
self._user = username
self.state = self.INAUTH
if (self.factory.allowAnonymous and (self._user == self.factory.userAnonymous)):
return GUEST_NAME_OK_NEED_EMAIL
else:
return (USR_NAME_OK_NEED_PASS, username)
def ftp_PASS(self, password):
'\n Second part of login. Get the password the peer wants to\n authenticate with.\n '
if (self.factory.allowAnonymous and (self._user == self.factory.userAnonymous)):
creds = ()
reply = GUEST_LOGGED_IN_PROCEED
else:
creds = (self._user, password)
reply = USR_LOGGED_IN_PROCEED
del self._user
def _cbLogin(xxx_todo_changeme):
(interface, avatar, logout) = xxx_todo_changeme
if (not (interface is IFTPShell)):
raise ('The realm is busted, jerk.')
self.shell = avatar
self.logout = logout
self.workingDirectory = []
self.state = self.AUTHED
return reply
def _ebLogin(failure):
(cred_error.UnauthorizedLogin, cred_error.UnhandledCredentials)
self.state = self.UNAUTH
raise AuthorizationError
d = (creds, None, IFTPShell)
(_cbLogin, _ebLogin)
return d
def ftp_PASV(self):
'Request for a passive connection\n\n from the rfc::\n\n This command requests the server-DTP to "listen" on a data port\n (which is not its default data port) and to wait for a connection\n rather than initiate one upon receipt of a transfer command. The\n response to this command includes the host and port address this\n server is listening on.\n '
if (self.dtpFactory is not None):
()
self.dtpFactory = ()
(self.dtpTimeout)
self.dtpPort = (self.dtpFactory)
host = ().host
port = ().port
(ENTERING_PASV_MODE, (host, port))
return ((lambda ign: None))
def ftp_PORT(self, address):
addr = ((int, (',')))
ip = ('%d.%d.%d.%d' % (addr[:4]))
port = ((addr[4] << 8) | addr[5])
if (self.dtpFactory is not None):
()
self.dtpFactory = ()
(self.dtpTimeout)
self.dtpPort = (ip, port, self.dtpFactory)
def connected(ignored):
return ENTERING_PORT_MODE
def connFailed(err):
(PortConnectionError)
return CANT_OPEN_DATA_CNX
return (connected, connFailed)
def ftp_LIST(self, path=''):
"This command causes a list to be sent from the server to the\n passive DTP. If the pathname specifies a directory or other\n group of files, the server should transfer a list of files\n in the specified directory. If the pathname specifies a\n file then the server should send current information on the\n file. A null argument implies the user's current working or\n default directory.\n "
if ((self.dtpInstance is None) or (not self.dtpInstance.isConnected)):
return (('must send PORT or PASV before RETR'))
if (path == '-a'):
path = ''
if (path == '-aL'):
path = ''
if (path == '-L'):
path = ''
if (path == '-la'):
path = ''
def gotListing(results):
(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, attrs) in results:
(name, attrs)
()
return (TXFR_COMPLETE_OK,)
try:
segments = (self.workingDirectory, path)
except InvalidPath:
return ((path))
d = (segments, ('size', 'directory', 'permissions', 'hardlinks', 'modified', 'owner', 'group'))
(gotListing)
return d
def ftp_NLST(self, path):
'\n This command causes a directory listing to be sent from the server to\n the client. The pathname should specify a directory or other\n system-specific file group descriptor. An empty path implies the current\n working directory. If the path is non-existent, send nothing. If the\n path is to a file, send only the file name.\n\n @type path: C{str}\n @param path: The path for which a directory listing should be returned.\n\n @rtype: L{Deferred}\n @return: a L{Deferred} which will be fired when the listing request\n is finished.\n '
if ((self.dtpInstance is None) or (not self.dtpInstance.isConnected)):
return (('must send PORT or PASV before RETR'))
try:
segments = (self.workingDirectory, path)
except InvalidPath:
return ((path))
def cbList(results):
'\n Send, line by line, each file in the directory listing, and then\n close the connection.\n\n @type results: A C{list} of C{tuple}. The first element of each\n C{tuple} is a C{str} and the second element is a C{list}.\n @param results: The names of the files in the directory.\n\n @rtype: C{tuple}\n @return: A C{tuple} containing the status code for a successful\n transfer.\n '
(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, ignored) in results:
(name)
()
return (TXFR_COMPLETE_OK,)
def cbGlob(results):
(DATA_CNX_ALREADY_OPEN_START_XFR)
for (name, ignored) in results:
if (name, segments[(- 1)]):
(name)
()
return (TXFR_COMPLETE_OK,)
def listErr(results):
'\n RFC 959 specifies that an NLST request may only return directory\n listings. Thus, send nothing and just close the connection.\n\n @type results: L{Failure}\n @param results: The L{Failure} wrapping a L{FileNotFoundError} that\n occurred while trying to list the contents of a nonexistent\n directory.\n\n @rtype: C{tuple}\n @returns: A C{tuple} containing the status code for a successful\n transfer.\n '
()
return (TXFR_COMPLETE_OK,)
if (segments and (('*' in segments[(- 1)]) or ('?' in segments[(- 1)]) or (('[' in segments[(- 1)]) and (']' in segments[(- 1)])))):
d = (segments[:(- 1)])
(cbGlob)
else:
d = (segments)
(cbList)
(listErr)
return d
def ftp_CWD(self, path):
try:
segments = (self.workingDirectory, path)
except InvalidPath:
return ((path))
def accessGranted(result):
self.workingDirectory = segments
return (REQ_FILE_ACTN_COMPLETED_OK,)
return (accessGranted)
def ftp_CDUP(self):
return ('..')
def ftp_PWD(self):
return (PWD_REPLY, ('/' + (self.workingDirectory)))
def ftp_RETR(self, path):
if (self.dtpInstance is None):
raise ('PORT or PASV required before RETR')
try:
newsegs = (self.workingDirectory, path)
except InvalidPath:
return ((path))
(None)
def enableTimeout(result):
(self.factory.timeOut)
return result
if (not self.binary):
cons = (self.dtpInstance)
else:
cons = self.dtpInstance
def cbSent(result):
return (TXFR_COMPLETE_OK,)
def ebSent(err):
('Unexpected error attempting to transmit file to client:')
(err)
return (CNX_CLOSED_TXFR_ABORTED,)
def cbOpened(file):
if self.dtpInstance.isConnected:
(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
(FILE_STATUS_OK_OPEN_DATA_CNX)
d = (cons)
(cbSent, ebSent)
return d
def ebOpened(err):
if (not (PermissionDeniedError, FileNotFoundError, IsNotADirectoryError)):
('Unexpected error attempting to open file for transmission:')
(err)
if (FTPCmdError):
return (err.value.errorCode, (newsegs))
return (FILE_NOT_FOUND, (newsegs))
d = (newsegs)
(cbOpened, ebOpened)
(enableTimeout)
return d
def ftp_STOR(self, path):
if (self.dtpInstance is None):
raise ('PORT or PASV required before STOR')
try:
newsegs = (self.workingDirectory, path)
except InvalidPath:
return ((path))
(None)
def enableTimeout(result):
(self.factory.timeOut)
return result
def cbSent(result):
return (TXFR_COMPLETE_OK,)
def ebSent(err):
('Unexpected error receiving file from client:')
(err)
return (CNX_CLOSED_TXFR_ABORTED,)
def cbConsumer(cons):
if (not self.binary):
cons = (cons)
d = (cons)
if self.dtpInstance.isConnected:
(DATA_CNX_ALREADY_OPEN_START_XFR)
else:
(FILE_STATUS_OK_OPEN_DATA_CNX)
return d
def cbOpened(file):
d = ()
(cbConsumer)
((lambda ignored: ()))
(cbSent, ebSent)
return d
def ebOpened(err):
if (not (PermissionDeniedError, FileNotFoundError, IsNotADirectoryError)):
('Unexpected error attempting to open file for upload:')
(err)
if (err.value, FTPCmdError):
return (err.value.errorCode, (newsegs))
return (FILE_NOT_FOUND, (newsegs))
d = (newsegs)
(cbOpened, ebOpened)
(enableTimeout)
return d
def ftp_SIZE(self, path):
try:
newsegs = (self.workingDirectory, path)
except InvalidPath:
return ((path))
def cbStat(xxx_todo_changeme1):
(size,) = xxx_todo_changeme1
return (FILE_STATUS, (size))
return (cbStat)
def ftp_MDTM(self, path):
try:
newsegs = (self.workingDirectory, path)
except InvalidPath:
return ((path))
def cbStat(xxx_todo_changeme2):
(modified,) = xxx_todo_changeme2
return (FILE_STATUS, ('%Y%m%d%H%M%S', (modified)))
return (cbStat)
def ftp_TYPE(self, type):
p = ()
if p:
f = (self, ('type_' + p[0]), None)
if (f is not None):
return (p[1:])
return (p)
return (SYNTAX_ERR,)
def type_A(self, code):
if ((code == '') or (code == 'N')):
self.binary = False
return (TYPE_SET_OK, ('A' + code))
else:
return ((code))
def type_I(self, code):
if (code == ''):
self.binary = True
return (TYPE_SET_OK, 'I')
else:
return ((code))
def type_UNKNOWN(self, code):
return ((code))
def ftp_SYST(self):
return NAME_SYS_TYPE
def ftp_STRU(self, structure):
p = ()
if (p == 'F'):
return (CMD_OK,)
return ((structure))
def ftp_MODE(self, mode):
p = ()
if (p == 'S'):
return (CMD_OK,)
return ((mode))
def ftp_MKD(self, path):
try:
newsegs = (self.workingDirectory, path)
except InvalidPath:
return ((path))
return ((lambda ign: (MKD_REPLY, path)))
def ftp_RMD(self, path):
try:
newsegs = (self.workingDirectory, path)
except InvalidPath:
return ((path))
return ((lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,)))
def ftp_DELE(self, path):
try:
newsegs = (self.workingDirectory, path)
except InvalidPath:
return ((path))
return ((lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,)))
def ftp_NOOP(self):
return (CMD_OK,)
def ftp_RNFR(self, fromName):
self._fromName = fromName
self.state = self.RENAMING
return (REQ_FILE_ACTN_PENDING_FURTHER_INFO,)
def ftp_RNTO(self, toName):
fromName = self._fromName
del self._fromName
self.state = self.AUTHED
try:
fromsegs = (self.workingDirectory, fromName)
tosegs = (self.workingDirectory, toName)
except InvalidPath:
return ((fromName))
return ((lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,)))
def ftp_QUIT(self):
(GOODBYE_MSG)
()
self.disconnected = True
def cleanupDTP(self):
'call when DTP connection exits'
('cleanupDTP')
(self.dtpPort)
(dtpPort, self.dtpPort) = (self.dtpPort, None)
if (dtpPort):
()
elif (dtpPort):
()
elif (not False):
raise (('dtpPort should be an IListeningPort or IConnector, instead is %r' % (dtpPort,)))
()
self.dtpFactory = None
if (self.dtpInstance is not None):
self.dtpInstance = None
class FTPFactory(policies.LimitTotalConnectionsFactory):
"\n A factory for producing ftp protocol instances\n\n @ivar timeOut: the protocol interpreter's idle timeout time in seconds,\n default is 600 seconds.\n\n @ivar passivePortRange: value forwarded to C{protocol.passivePortRange}.\n @type passivePortRange: C{iterator}\n "
protocol = FTP
overflowProtocol = FTPOverflowProtocol
allowAnonymous = True
userAnonymous = 'anonymous'
timeOut = 600
welcomeMessage = ('Twisted %s FTP Server' % (copyright.version,))
passivePortRange = (0, 1)
def __init__(self, portal=None, userAnonymous='anonymous'):
self.portal = portal
self.userAnonymous = userAnonymous
self.instances = []
def buildProtocol(self, addr):
p = (self, addr)
if (p is not None):
p.wrappedProtocol.portal = self.portal
p.wrappedProtocol.timeOut = self.timeOut
p.wrappedProtocol.passivePortRange = self.passivePortRange
return p
def stopFactory(self):
[(None) for p in self.instances if (p.timeOut is not None)]
(self)
class IFTPShell(Interface):
'\n An abstraction of the shell commands used by the FTP protocol for\n a given user account.\n\n All path names must be absolute.\n '
def makeDirectory(path):
'\n Create a directory.\n\n @param path: The path, as a list of segments, to create\n @type path: C{list} of C{unicode}\n\n @return: A Deferred which fires when the directory has been\n created, or which fails if the directory cannot be created.\n '
def removeDirectory(path):
'\n Remove a directory.\n\n @param path: The path, as a list of segments, to remove\n @type path: C{list} of C{unicode}\n\n @return: A Deferred which fires when the directory has been\n removed, or which fails if the directory cannot be removed.\n '
def removeFile(path):
'\n Remove a file.\n\n @param path: The path, as a list of segments, to remove\n @type path: C{list} of C{unicode}\n\n @return: A Deferred which fires when the file has been\n removed, or which fails if the file cannot be removed.\n '
def rename(fromPath, toPath):
'\n Rename a file or directory.\n\n @param fromPath: The current name of the path.\n @type fromPath: C{list} of C{unicode}\n\n @param toPath: The desired new name of the path.\n @type toPath: C{list} of C{unicode}\n\n @return: A Deferred which fires when the path has been\n renamed, or which fails if the path cannot be renamed.\n '
def access(path):
'\n Determine whether access to the given path is allowed.\n\n @param path: The path, as a list of segments\n\n @return: A Deferred which fires with None if access is allowed\n or which fails with a specific exception type if access is\n denied.\n '
def stat(path, keys=()):
'\n Retrieve information about the given path.\n\n This is like list, except it will never return results about\n child paths.\n '
def list(path, keys=()):
"\n Retrieve information about the given path.\n\n If the path represents a non-directory, the result list should\n have only one entry with information about that non-directory.\n Otherwise, the result list should have an element for each\n child of the directory.\n\n @param path: The path, as a list of segments, to list\n @type path: C{list} of C{unicode}\n\n @param keys: A tuple of keys desired in the resulting\n dictionaries.\n\n @return: A Deferred which fires with a list of (name, list),\n where the name is the name of the entry as a unicode string\n and each list contains values corresponding to the requested\n keys. The following are possible elements of keys, and the\n values which should be returned for them:\n\n - C{'size'}: size in bytes, as an integer (this is kinda required)\n\n - C{'directory'}: boolean indicating the type of this entry\n\n - C{'permissions'}: a bitvector (see os.stat(foo).st_mode)\n\n - C{'hardlinks'}: Number of hard links to this entry\n\n - C{'modified'}: number of seconds since the epoch since entry was\n modified\n\n - C{'owner'}: string indicating the user owner of this entry\n\n - C{'group'}: string indicating the group owner of this entry\n "
def openForReading(path):
'\n @param path: The path, as a list of segments, to open\n @type path: C{list} of C{unicode}\n\n @rtype: C{Deferred} which will fire with L{IReadFile}\n '
def openForWriting(path):
'\n @param path: The path, as a list of segments, to open\n @type path: C{list} of C{unicode}\n\n @rtype: C{Deferred} which will fire with L{IWriteFile}\n '
class IReadFile(Interface):
'\n A file out of which bytes may be read.\n '
def send(consumer):
'\n Produce the contents of the given path to the given consumer. This\n method may only be invoked once on each provider.\n\n @type consumer: C{IConsumer}\n\n @return: A Deferred which fires when the file has been\n consumed completely.\n '
class IWriteFile(Interface):
'\n A file into which bytes may be written.\n '
def receive():
'\n Create a consumer which will write to this file. This method may\n only be invoked once on each provider.\n\n @rtype: C{Deferred} of C{IConsumer}\n '
def close():
'\n Perform any post-write work that needs to be done. This method may\n only be invoked once on each provider, and will always be invoked\n after receive().\n\n @rtype: C{Deferred} of anything: the value is ignored. The FTP client\n will not see their upload request complete until this Deferred has\n been fired.\n '
def _getgroups(uid):
'Return the primary and supplementary groups for the given UID.\n\n @type uid: C{int}\n '
result = []
pwent = (uid)
(pwent.pw_gid)
for grent in ():
if (pwent.pw_name in grent.gr_mem):
(grent.gr_gid)
return result
def _testPermissions(uid, gid, spath, mode='r'):
"\n checks to see if uid has proper permissions to access path with mode\n\n @type uid: C{int}\n @param uid: numeric user id\n\n @type gid: C{int}\n @param gid: numeric group id\n\n @type spath: C{str}\n @param spath: the path on the server to test\n\n @type mode: C{str}\n @param mode: 'r' or 'w' (read or write)\n\n @rtype: C{bool}\n @return: True if the given credentials have the specified form of\n access to the given path\n "
if (mode == 'r'):
usr = stat.S_IRUSR
grp = stat.S_IRGRP
oth = stat.S_IROTH
amode = os.R_OK
elif (mode == 'w'):
usr = stat.S_IWUSR
grp = stat.S_IWGRP
oth = stat.S_IWOTH
amode = os.W_OK
else:
raise (("Invalid mode %r: must specify 'r' or 'w'" % (mode,)))
access = False
if (spath):
if (uid == 0):
access = True
else:
s = (spath)
if ((usr & s.st_mode) and (uid == s.st_uid)):
access = True
elif ((grp & s.st_mode) and (gid in (uid))):
access = True
elif (oth & s.st_mode):
access = True
if access:
if (not (spath, amode)):
access = False
(('Filesystem grants permission to UID %d but it is inaccessible to me running as UID %d' % (uid, ())))
return access
class FTPAnonymousShell(object):
'\n An anonymous implementation of IFTPShell\n\n @type filesystemRoot: L{twisted.python.filepath.FilePath}\n @ivar filesystemRoot: The path which is considered the root of\n this shell.\n '
(IFTPShell)
def __init__(self, filesystemRoot):
self.filesystemRoot = filesystemRoot
def _path(self, path):
return (filepath.FilePath.child, path, self.filesystemRoot)
def makeDirectory(self, path):
return (())
def removeDirectory(self, path):
return (())
def removeFile(self, path):
return (())
def rename(self, fromPath, toPath):
return (())
def receive(self, path):
path = (path)
return (())
def openForReading(self, path):
'\n Open C{path} for reading.\n\n @param path: The path, as a list of segments, to open.\n @type path: C{list} of C{unicode}\n @return: A L{Deferred} is returned that will fire with an object\n implementing L{IReadFile} if the file is successfully opened. If\n C{path} is a directory, or if an exception is raised while trying\n to open the file, the L{Deferred} will fire with an error.\n '
p = (path)
if ():
return ((path))
try:
f = ('r')
except (IOError, OSError) as e:
return (e.errno, path)
except:
return ()
else:
return ((f))
def openForWriting(self, path):
'\n Reject write attempts by anonymous users with\n L{PermissionDeniedError}.\n '
return (('STOR not allowed'))
def access(self, path):
p = (path)
if (not ()):
return ((path))
try:
()
except (IOError, OSError) as e:
return (e.errno, path)
except:
return ()
else:
return (None)
def stat(self, path, keys=()):
p = (path)
if ():
try:
statResult = (p, keys)
except (IOError, OSError) as e:
return (e.errno, path)
except:
return ()
else:
return (statResult)
else:
return ((lambda res: res[0][1]))
def list(self, path, keys=()):
'\n Return the list of files at given C{path}, adding C{keys} stat\n informations if specified.\n\n @param path: the directory or file to check.\n @type path: C{str}\n\n @param keys: the list of desired metadata\n @type keys: C{list} of C{str}\n '
filePath = (path)
if ():
entries = ()
fileEntries = [(p) for p in entries]
elif ():
entries = [(*(self.filesystemRoot))]
fileEntries = [filePath]
else:
return ((path))
results = []
for (fileName, filePath) in (entries, fileEntries):
ent = []
((fileName, ent))
if keys:
try:
((filePath, keys))
except (IOError, OSError) as e:
return (e.errno, fileName)
except:
return ()
return (results)
def _statNode(self, filePath, keys):
'\n Shortcut method to get stat info on a node.\n\n @param filePath: the node to stat.\n @type filePath: C{filepath.FilePath}\n\n @param keys: the stat keys to get.\n @type keys: C{iterable}\n '
()
return [(filePath.statinfo) for k in keys]
_stat_size = ('st_size')
_stat_permissions = ('st_mode')
_stat_hardlinks = ('st_nlink')
_stat_modified = ('st_mtime')
def _stat_owner(self, st):
if (pwd is not None):
try:
return (st.st_uid)[0]
except KeyError:
raise
return (st.st_uid)
def _stat_group(self, st):
if (grp is not None):
try:
return (st.st_gid)[0]
except KeyError:
raise
return (st.st_gid)
def _stat_directory(self, st):
return ((st.st_mode & stat.S_IFDIR))
class _FileReader(object):
(IReadFile)
def __init__(self, fObj):
self.fObj = fObj
self._send = False
def _close(self, passthrough):
self._send = True
()
return passthrough
def send(self, consumer):
if (not (not self._send)):
raise ('Can only call IReadFile.send *once* per instance')
self._send = True
d = (self.fObj, consumer)
(self._close)
return d
class FTPShell(FTPAnonymousShell):
'\n An authenticated implementation of L{IFTPShell}.\n '
def makeDirectory(self, path):
p = (path)
try:
()
except (IOError, OSError) as e:
return (e.errno, path)
except:
return ()
else:
return (None)
def removeDirectory(self, path):
p = (path)
if ():
return ((path))
try:
(p.path)
except (IOError, OSError) as e:
return (e.errno, path)
except:
return ()
else:
return (None)
def removeFile(self, path):
p = (path)
if ():
return ((path))
try:
()
except (IOError, OSError) as e:
return (e.errno, path)
except:
return ()
else:
return (None)
def rename(self, fromPath, toPath):
fp = (fromPath)
tp = (toPath)
try:
(fp.path, tp.path)
except (IOError, OSError) as e:
return (e.errno, fromPath)
except:
return ()
else:
return (None)
def openForWriting(self, path):
'\n Open C{path} for writing.\n\n @param path: The path, as a list of segments, to open.\n @type path: C{list} of C{unicode}\n @return: A L{Deferred} is returned that will fire with an object\n implementing L{IWriteFile} if the file is successfully opened. If\n C{path} is a directory, or if an exception is raised while trying\n to open the file, the L{Deferred} will fire with an error.\n '
p = (path)
if ():
return ((path))
try:
fObj = ('w')
except (IOError, OSError) as e:
return (e.errno, path)
except:
return ()
return ((fObj))
class _FileWriter(object):
(IWriteFile)
def __init__(self, fObj):
self.fObj = fObj
self._receive = False
def receive(self):
if (not (not self._receive)):
raise ('Can only call IWriteFile.receive *once* per instance')
self._receive = True
return ((self.fObj))
def close(self):
return (None)
class BaseFTPRealm():
'\n Base class for simple FTP realms which provides an easy hook for specifying\n the home directory for each user.\n '
(portal.IRealm)
def __init__(self, anonymousRoot):
self.anonymousRoot = (anonymousRoot)
def getHomeDirectory(self, avatarId):
'\n Return a L{FilePath} representing the home directory of the given\n avatar. Override this in a subclass.\n\n @param avatarId: A user identifier returned from a credentials checker.\n @type avatarId: C{str}\n\n @rtype: L{FilePath}\n '
raise (('%r did not override getHomeDirectory' % (self.__class__,)))
def requestAvatar(self, avatarId, mind, *interfaces):
for iface in interfaces:
if (iface is IFTPShell):
if (avatarId is checkers.ANONYMOUS):
avatar = (self.anonymousRoot)
else:
avatar = ((avatarId))
return (IFTPShell, avatar, (avatar, 'logout', (lambda : None)))
raise ('Only IFTPShell interface is supported by this realm')
class FTPRealm(BaseFTPRealm):
'\n @type anonymousRoot: L{twisted.python.filepath.FilePath}\n @ivar anonymousRoot: Root of the filesystem to which anonymous\n users will be granted access.\n\n @type userHome: L{filepath.FilePath}\n @ivar userHome: Root of the filesystem containing user home directories.\n '
def __init__(self, anonymousRoot, userHome='/home'):
(self, anonymousRoot)
self.userHome = (userHome)
def getHomeDirectory(self, avatarId):
'\n Use C{avatarId} as a single path segment to construct a child of\n C{self.userHome} and return that child.\n '
return (avatarId)
class SystemFTPRealm(BaseFTPRealm):
'\n L{SystemFTPRealm} uses system user account information to decide what the\n home directory for a particular avatarId is.\n\n This works on POSIX but probably is not reliable on Windows.\n '
def getHomeDirectory(self, avatarId):
'\n Return the system-defined home directory of the system user account with\n the name C{avatarId}.\n '
path = (('~' + avatarId))
if ('~'):
raise ()
return (path)
class ConnectionLost(FTPError):
pass
class CommandFailed(FTPError):
pass
class BadResponse(FTPError):
pass
class UnexpectedResponse(FTPError):
pass
class UnexpectedData(FTPError):
pass
class FTPCommand():
def __init__(self, text=None, public=0):
self.text = text
self.deferred = ()
self.ready = 1
self.public = public
self.transferDeferred = None
def fail(self, failure):
if self.public:
(failure)
class ProtocolWrapper(protocol.Protocol):
def __init__(self, original, deferred):
self.original = original
self.deferred = deferred
def makeConnection(self, transport):
(transport)
def dataReceived(self, data):
(data)
def connectionLost(self, reason):
(reason)
(None)
class SenderProtocol(protocol.Protocol):
(interfaces.IFinishableConsumer)
def __init__(self):
self.connectedDeferred = ()
self.deferred = ()
def dataReceived(self, data):
raise ('Received data from the server on a send-only data-connection')
def makeConnection(self, transport):
(self, transport)
(self)
def connectionLost(self, reason):
if (error.ConnectionDone):
('connection done')
else:
(reason)
def write(self, data):
(data)
def registerProducer(self, producer, streaming):
'\n Register the given producer with our transport.\n '
(producer, streaming)
def unregisterProducer(self):
'\n Unregister the previously registered producer.\n '
()
def finish(self):
()
def decodeHostPort(line):
'Decode an FTP response specifying a host and port.\n\n @return: a 2-tuple of (host, port).\n '
abcdef = ('[^0-9, ]', '', line)
parsed = [(()) for p in (',')]
for x in parsed:
if ((x < 0) or (x > 255)):
raise ('Out of range', line, x)
(a, b, c, d, e, f) = parsed
host = ('%s.%s.%s.%s' % (a, b, c, d))
port = (((e) << 8) + (f))
return (host, port)
def encodeHostPort(host, port):
numbers = (('.') + [((port >> 8)), ((port % 256))])
return (numbers)
def _unwrapFirstError(failure):
(defer.FirstError)
return failure.value.subFailure
class FTPDataPortFactory(protocol.ServerFactory):
'Factory for data connections that use the PORT command\n\n (i.e. "active" transfers)\n '
noisy = 0
def buildProtocol(self, addr):
self.protocol.factory = self
()
return self.protocol
class FTPClientBasic(basic.LineReceiver):
'\n Foundations of an FTP client.\n '
debug = False
def __init__(self):
self.actionQueue = []
self.greeting = None
self.nextDeferred = (self._cb_greeting)
(self.fail)
self.response = []
self._failed = 0
def fail(self, error):
'\n Give an error to any queued deferreds.\n '
(error)
def _fail(self, error):
'\n Errback all queued deferreds.\n '
if self._failed:
return error
self._failed = 1
if self.nextDeferred:
try:
((('FTP connection lost', error)))
except defer.AlreadyCalledError:
raise
for ftpCommand in self.actionQueue:
((('FTP connection lost', error)))
return error
def _cb_greeting(self, greeting):
self.greeting = greeting
def sendLine(self, line):
'\n (Private) Sends a line, unless line is None.\n '
if (line is None):
return
(self, line)
def sendNextCommand(self):
'\n (Private) Processes the next command in the queue.\n '
ftpCommand = ()
if (ftpCommand is None):
self.nextDeferred = None
return
if (not ftpCommand.ready):
(0, ftpCommand)
(1.0, self.sendNextCommand)
self.nextDeferred = None
return
if (ftpCommand.text == 'PORT'):
(ftpCommand)
if self.debug:
(('<-- %s' % ftpCommand.text))
self.nextDeferred = ftpCommand.deferred
(ftpCommand.text)
def queueCommand(self, ftpCommand):
"\n Add an FTPCommand object to the queue.\n\n If it's the only thing in the queue, and we are connected and we aren't\n waiting for a response of an earlier command, the command will be sent\n immediately.\n\n @param ftpCommand: an L{FTPCommand}\n "
(ftpCommand)
if (((self.actionQueue) == 1) and (self.transport is not None) and (self.nextDeferred is None)):
()
def queueStringCommand(self, command, public=1):
"\n Queues a string to be issued as an FTP command\n\n @param command: string of an FTP command to queue\n @param public: a flag intended for internal use by FTPClient. Don't\n change it unless you know what you're doing.\n\n @return: a L{Deferred} that will be called when the response to the\n command has been received.\n "
ftpCommand = (command, public)
(ftpCommand)
return ftpCommand.deferred
def popCommandQueue(self):
'\n Return the front element of the command queue, or None if empty.\n '
if self.actionQueue:
return (0)
else:
return None
def queueLogin(self, username, password):
"\n Login: send the username, send the password.\n\n If the password is C{None}, the PASS command won't be sent. Also, if\n the response to the USER command has a response code of 230 (User logged\n in), then PASS won't be sent either.\n "
deferreds = []
userDeferred = (('USER ' + username))
(userDeferred)
if (password is not None):
passwordCmd = (('PASS ' + password))
(passwordCmd)
(passwordCmd.deferred)
def cancelPasswordIfNotNeeded(response):
if ('230'):
(passwordCmd)
return response
(cancelPasswordIfNotNeeded)
for deferred in deferreds:
(self.fail)
((lambda x: None))
def lineReceived(self, line):
'\n (Private) Parses the response messages from the FTP server.\n '
if self.debug:
(('--> %s' % line))
(line)
codeIsValid = ('\\d{3} ', line)
if (not codeIsValid):
return
code = line[0:3]
if (code[0] == '1'):
return
if (self.nextDeferred is None):
((self.response))
return
response = self.response
self.response = []
if (code[0] in ('2', '3')):
(response)
elif (code[0] in ('4', '5')):
(((response)))
else:
(('Server sent invalid response code %s' % (code,)))
(((response)))
()
def connectionLost(self, reason):
(reason)
class _PassiveConnectionFactory(protocol.ClientFactory):
noisy = False
def __init__(self, protoInstance):
self.protoInstance = protoInstance
def buildProtocol(self, ignored):
self.protoInstance.factory = self
return self.protoInstance
def clientConnectionFailed(self, connector, reason):
e = ('Connection Failed', reason)
(e)
class FTPClient(FTPClientBasic):
'\n L{FTPClient} is a client implementation of the FTP protocol which\n exposes FTP commands as methods which return L{Deferred}s.\n\n Each command method returns a L{Deferred} which is called back when a\n successful response code (2xx or 3xx) is received from the server or\n which is error backed if an error response code (4xx or 5xx) is received\n from the server or if a protocol violation occurs. If an error response\n code is received, the L{Deferred} fires with a L{Failure} wrapping a\n L{CommandFailed} instance. The L{CommandFailed} instance is created\n with a list of the response lines received from the server.\n\n See U{RFC 959<http://www.ietf.org/rfc/rfc959.txt>} for error code\n definitions.\n\n Both active and passive transfers are supported.\n\n @ivar passive: See description in __init__.\n '
connectFactory = reactor.connectTCP
def __init__(self, username='anonymous', password='twisted@twistedmatrix.com', passive=1):
'\n Constructor.\n\n I will login as soon as I receive the welcome message from the server.\n\n @param username: FTP username\n @param password: FTP password\n @param passive: flag that controls if I use active or passive data\n connections. You can also change this after construction by\n assigning to C{self.passive}.\n '
(self)
(username, password)
self.passive = passive
def fail(self, error):
'\n Disconnect, and also give an error to any queued deferreds.\n '
()
(error)
def receiveFromConnection(self, commands, protocol):
'\n Retrieves a file or listing generated by the given command,\n feeding it to the given protocol.\n\n @param commands: list of strings of FTP commands to execute then receive\n the results of (e.g. C{LIST}, C{RETR})\n @param protocol: A L{Protocol} B{instance} e.g. an\n L{FTPFileListProtocol}, or something that can be adapted to one.\n Typically this will be an L{IConsumer} implementation.\n\n @return: L{Deferred}.\n '
protocol = (protocol)
wrapper = (protocol, ())
return (commands, wrapper)
def queueLogin(self, username, password):
'\n Login: send the username, send the password, and\n set retrieval mode to binary\n '
(self, username, password)
d = ('TYPE I')
(self.fail)
((lambda x: None))
def sendToConnection(self, commands):
'\n XXX\n\n @return: A tuple of two L{Deferred}s:\n - L{Deferred} L{IFinishableConsumer}. You must call\n the C{finish} method on the IFinishableConsumer when the file\n is completely transferred.\n - L{Deferred} list of control-connection responses.\n '
s = ()
r = (commands, s)
return (s.connectedDeferred, r)
def _openDataConnection(self, commands, protocol):
'\n This method returns a DeferredList.\n '
cmds = [(command) for command in commands]
cmdsDeferred = ([cmd.deferred for cmd in cmds])
(_unwrapFirstError)
if self.passive:
_mutable = [None]
def doPassive(response):
'Connect to the port specified in the response to PASV'
(host, port) = (response[(- 1)][4:])
f = (protocol)
_mutable[0] = (host, port, f)
pasvCmd = ('PASV')
(pasvCmd)
(self.fail)
results = [cmdsDeferred, pasvCmd.deferred, protocol.deferred]
d = (results)
(_unwrapFirstError)
def close(x, m=_mutable):
(m[0] and ())
return x
(close)
else:
portCmd = ('PORT')
portCmd.transferDeferred = protocol.deferred
portCmd.protocol = protocol
(portCmd.transferDeferred.errback)
(portCmd)
portCmd.loseConnection = (lambda result: result)
portCmd.fail = (lambda error: error)
((lambda e, pc=portCmd: ((e) or e)))
results = [cmdsDeferred, portCmd.deferred, portCmd.transferDeferred]
d = (results)
(_unwrapFirstError)
for cmd in cmds:
(cmd)
return d
def generatePortCommand(self, portCmd):
'\n (Private) Generates the text of a given PORT command.\n '
factory = ()
factory.protocol = portCmd.protocol
listener = (0, factory)
factory.port = listener
def listenerFail(error, listener=listener):
if listener.connected:
()
return error
portCmd.fail = listenerFail
host = ().host
port = ().port
portCmd.text = ('PORT ' + (host, port))
def escapePath(self, path):
'\n Returns a FTP escaped path (replace newlines with nulls).\n '
return ('\n', '\x00')
def retrieveFile(self, path, protocol, offset=0):
"\n Retrieve a file from the given path\n\n This method issues the 'RETR' FTP command.\n\n The file is fed into the given Protocol instance. The data connection\n will be passive if self.passive is set.\n\n @param path: path to file that you wish to receive.\n @param protocol: a L{Protocol} instance.\n @param offset: offset to start downloading from\n\n @return: L{Deferred}\n "
cmds = [('RETR ' + (path))]
if offset:
(0, ('REST ' + (offset)))
return (cmds, protocol)
retr = retrieveFile
def storeFile(self, path, offset=0):
"\n Store a file at the given path.\n\n This method issues the 'STOR' FTP command.\n\n @return: A tuple of two L{Deferred}s:\n - L{Deferred} L{IFinishableConsumer}. You must call\n the C{finish} method on the IFinishableConsumer when the file\n is completely transferred.\n - L{Deferred} list of control-connection responses.\n "
cmds = [('STOR ' + (path))]
if offset:
(0, ('REST ' + (offset)))
return (cmds)
stor = storeFile
def rename(self, pathFrom, pathTo):
'\n Rename a file.\n\n This method issues the I{RNFR}/I{RNTO} command sequence to rename\n C{pathFrom} to C{pathTo}.\n\n @param: pathFrom: the absolute path to the file to be renamed\n @type pathFrom: C{str}\n\n @param: pathTo: the absolute path to rename the file to.\n @type pathTo: C{str}\n\n @return: A L{Deferred} which fires when the rename operation has\n succeeded or failed. If it succeeds, the L{Deferred} is called\n back with a two-tuple of lists. The first list contains the\n responses to the I{RNFR} command. The second list contains the\n responses to the I{RNTO} command. If either I{RNFR} or I{RNTO}\n fails, the L{Deferred} is errbacked with L{CommandFailed} or\n L{BadResponse}.\n @rtype: L{Deferred}\n\n @since: 8.2\n '
renameFrom = (('RNFR ' + (pathFrom)))
renameTo = (('RNTO ' + (pathTo)))
fromResponse = []
result = ()
((lambda toResponse: (fromResponse, toResponse)))
def ebFrom(failure):
()
(failure)
(fromResponse.extend, ebFrom)
(result)
return result
def list(self, path, protocol):
"\n Retrieve a file listing into the given protocol instance.\n\n This method issues the 'LIST' FTP command.\n\n @param path: path to get a file listing for.\n @param protocol: a L{Protocol} instance, probably a\n L{FTPFileListProtocol} instance. It can cope with most common file\n listing formats.\n\n @return: L{Deferred}\n "
if (path is None):
path = ''
return ([('LIST ' + (path))], protocol)
def nlst(self, path, protocol):
"\n Retrieve a short file listing into the given protocol instance.\n\n This method issues the 'NLST' FTP command.\n\n NLST (should) return a list of filenames, one per line.\n\n @param path: path to get short file listing for.\n @param protocol: a L{Protocol} instance.\n "
if (path is None):
path = ''
return ([('NLST ' + (path))], protocol)
def cwd(self, path):
"\n Issues the CWD (Change Working Directory) command. It's also\n available as changeDirectory, which parses the result.\n\n @return: a L{Deferred} that will be called when done.\n "
return (('CWD ' + (path)))
def changeDirectory(self, path):
'\n Change the directory on the server and parse the result to determine\n if it was successful or not.\n\n @type path: C{str}\n @param path: The path to which to change.\n\n @return: a L{Deferred} which will be called back when the directory\n change has succeeded or errbacked if an error occurrs.\n '
('FTPClient.changeDirectory is deprecated in Twisted 8.2 and newer. Use FTPClient.cwd instead.')
def cbResult(result):
if (result[(- 1)][:3] != '250'):
return ((result))
return True
return (cbResult)
def makeDirectory(self, path):
'\n Make a directory\n\n This method issues the MKD command.\n\n @param path: The path to the directory to create.\n @type path: C{str}\n\n @return: A L{Deferred} which fires when the server responds. If the\n directory is created, the L{Deferred} is called back with the\n server response. If the server response indicates the directory\n was not created, the L{Deferred} is errbacked with a L{Failure}\n wrapping L{CommandFailed} or L{BadResponse}.\n @rtype: L{Deferred}\n\n @since: 8.2\n '
return (('MKD ' + (path)))
def removeFile(self, path):
'\n Delete a file on the server.\n\n L{removeFile} issues a I{DELE} command to the server to remove the\n indicated file. Note that this command cannot remove a directory.\n\n @param path: The path to the file to delete. May be relative to the\n current dir.\n @type path: C{str}\n\n @return: A L{Deferred} which fires when the server responds. On error,\n it is errbacked with either L{CommandFailed} or L{BadResponse}. On\n success, it is called back with a list of response lines.\n @rtype: L{Deferred}\n\n @since: 8.2\n '
return (('DELE ' + (path)))
def cdup(self):
'\n Issues the CDUP (Change Directory UP) command.\n\n @return: a L{Deferred} that will be called when done.\n '
return ('CDUP')
def pwd(self):
'\n Issues the PWD (Print Working Directory) command.\n\n The L{getDirectory} does the same job but automatically parses the\n result.\n\n @return: a L{Deferred} that will be called when done. It is up to the\n caller to interpret the response, but the L{parsePWDResponse} method\n in this module should work.\n '
return ('PWD')
def getDirectory(self):
'\n Returns the current remote directory.\n\n @return: a L{Deferred} that will be called back with a C{str} giving\n the remote directory or which will errback with L{CommandFailed}\n if an error response is returned.\n '
def cbParse(result):
try:
if (((' ', 1)[0]) != 257):
raise ValueError
except (IndexError, ValueError):
return ((result))
path = (result[0])
if (path is None):
return ((result))
return path
return (cbParse)
def quit(self):
'\n Issues the I{QUIT} command.\n\n @return: A L{Deferred} that fires when the server acknowledges the\n I{QUIT} command. The transport should not be disconnected until\n this L{Deferred} fires.\n '
return ('QUIT')
class FTPFileListProtocol(basic.LineReceiver):
"Parser for standard FTP file listings\n\n This is the evil required to match::\n\n -rw-r--r-- 1 root other 531 Jan 29 03:26 README\n\n If you need different evil for a wacky FTP server, you can\n override either C{fileLinePattern} or C{parseDirectoryLine()}.\n\n It populates the instance attribute self.files, which is a list containing\n dicts with the following keys (examples from the above line):\n - filetype: e.g. 'd' for directories, or '-' for an ordinary file\n - perms: e.g. 'rw-r--r--'\n - nlinks: e.g. 1\n - owner: e.g. 'root'\n - group: e.g. 'other'\n - size: e.g. 531\n - date: e.g. 'Jan 29 03:26'\n - filename: e.g. 'README'\n - linktarget: e.g. 'some/file'\n\n Note that the 'date' value will be formatted differently depending on the\n date. Check U{http://cr.yp.to/ftp.html} if you really want to try to parse\n it.\n\n @ivar files: list of dicts describing the files in this listing\n "
fileLinePattern = ('^(?P<filetype>.)(?P<perms>.{9})\\s+(?P<nlinks>\\d*)\\s*(?P<owner>\\S+)\\s+(?P<group>\\S+)\\s+(?P<size>\\d+)\\s+(?P<date>...\\s+\\d+\\s+[\\d:]+)\\s+(?P<filename>([^ ]|\\\\ )*?)( -> (?P<linktarget>[^\\r]*))?\\r?$')
delimiter = '\n'
def __init__(self):
self.files = []
def lineReceived(self, line):
d = (line)
if (d is None):
(line)
else:
(d)
def parseDirectoryLine(self, line):
'Return a dictionary of fields, or None if line cannot be parsed.\n\n @param line: line of text expected to contain a directory entry\n @type line: str\n\n @return: dict\n '
match = (line)
if (match is None):
return None
else:
d = ()
d['filename'] = ('\\ ', ' ')
d['nlinks'] = (d['nlinks'])
d['size'] = (d['size'])
if d['linktarget']:
d['linktarget'] = ('\\ ', ' ')
return d
def addFile(self, info):
'Append file information dictionary to the list of known files.\n\n Subclasses can override or extend this method to handle file\n information differently without affecting the parsing of data\n from the server.\n\n @param info: dictionary containing the parsed representation\n of the file information\n @type info: dict\n '
(info)
def unknownLine(self, line):
'Deal with received lines which could not be parsed as file\n information.\n\n Subclasses can override this to perform any special processing\n needed.\n\n @param line: unparsable line as received\n @type line: str\n '
pass
def parsePWDResponse(response):
'Returns the path from a response to a PWD command.\n\n Responses typically look like::\n\n 257 "/home/andrew" is current directory.\n\n For this example, I will return C{\'/home/andrew\'}.\n\n If I can\'t find the path, I return C{None}.\n '
match = ('"(.*)"', response)
if match:
return ()[0]
else:
return None |
# -*- test-case-name: twisted.test.test_roots -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted Python Roots: an abstract hierarchy representation for Twisted.
Maintainer: Glyph Lefkowitz
"""
# System imports
import types
from twisted.python import reflect
class NotSupportedError(NotImplementedError):
"""
An exception meaning that the tree-manipulation operation
you're attempting to perform is not supported.
"""
class Request:
"""I am an abstract representation of a request for an entity.
I also function as the response. The request is responded to by calling
self.write(data) until there is no data left and then calling
self.finish().
"""
# This attribute should be set to the string name of the protocol being
# responded to (e.g. HTTP or FTP)
wireProtocol = None
def write(self, data):
"""Add some data to the response to this request."""
raise NotImplementedError("%s.write" % reflect.qual(self.__class__))
def finish(self):
"""The response to this request is finished; flush all data to the network stream."""
raise NotImplementedError("%s.finish" % reflect.qual(self.__class__))
class Entity:
"""I am a terminal object in a hierarchy, with no children.
I represent a null interface; certain non-instance objects (strings and
integers, notably) are Entities.
Methods on this class are suggested to be implemented, but are not
required, and will be emulated on a per-protocol basis for types which do
not handle them.
"""
def render(self, request):
"""
I produce a stream of bytes for the request, by calling request.write()
and request.finish().
"""
raise NotImplementedError("%s.render" % reflect.qual(self.__class__))
class Collection:
"""I represent a static collection of entities.
I contain methods designed to represent collections that can be dynamically
created.
"""
def __init__(self, entities=None):
"""Initialize me."""
if entities is not None:
self.entities = entities
else:
self.entities = {}
def getStaticEntity(self, name):
"""Get an entity that was added to me using putEntity.
This method will return 'None' if it fails.
"""
return self.entities.get(name)
def getDynamicEntity(self, name, request):
"""Subclass this to generate an entity on demand.
This method should return 'None' if it fails.
"""
def getEntity(self, name, request):
"""Retrieve an entity from me.
I will first attempt to retrieve an entity statically; static entities
will obscure dynamic ones. If that fails, I will retrieve the entity
dynamically.
If I cannot retrieve an entity, I will return 'None'.
"""
ent = self.getStaticEntity(name)
if ent is not None:
return ent
ent = self.getDynamicEntity(name, request)
if ent is not None:
return ent
return None
def putEntity(self, name, entity):
"""Store a static reference on 'name' for 'entity'.
Raises a KeyError if the operation fails.
"""
self.entities[name] = entity
def delEntity(self, name):
"""Remove a static reference for 'name'.
Raises a KeyError if the operation fails.
"""
del self.entities[name]
def storeEntity(self, name, request):
"""Store an entity for 'name', based on the content of 'request'."""
raise NotSupportedError("%s.storeEntity" % reflect.qual(self.__class__))
def removeEntity(self, name, request):
"""Remove an entity for 'name', based on the content of 'request'."""
raise NotSupportedError("%s.removeEntity" % reflect.qual(self.__class__))
def listStaticEntities(self):
"""Retrieve a list of all name, entity pairs that I store references to.
See getStaticEntity.
"""
return list(self.entities.items())
def listDynamicEntities(self, request):
"""A list of all name, entity that I can generate on demand.
See getDynamicEntity.
"""
return []
def listEntities(self, request):
"""Retrieve a list of all name, entity pairs I contain.
See getEntity.
"""
return self.listStaticEntities() + self.listDynamicEntities(request)
def listStaticNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getStaticEntity.
"""
return list(self.entities.keys())
def listDynamicNames(self):
"""Retrieve a list of the names of entities that I store references to.
See getDynamicEntity.
"""
return []
def listNames(self, request):
"""Retrieve a list of all names for entities that I contain.
See getEntity.
"""
return self.listStaticNames()
class ConstraintViolation(Exception):
"""An exception raised when a constraint is violated."""
class Constrained(Collection):
"""A collection that has constraints on its names and/or entities."""
def nameConstraint(self, name):
"""A method that determines whether an entity may be added to me with a given name.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def entityConstraint(self, entity):
"""A method that determines whether an entity may be added to me.
If the constraint is satisfied, return 1; if the constraint is not
satisfied, either return 0 or raise a descriptive ConstraintViolation.
"""
return 1
def reallyPutEntity(self, name, entity):
Collection.putEntity(self, name, entity)
def putEntity(self, name, entity):
"""Store an entity if it meets both constraints.
Otherwise raise a ConstraintViolation.
"""
if self.nameConstraint(name):
if self.entityConstraint(entity):
self.reallyPutEntity(name, entity)
else:
raise ConstraintViolation("Entity constraint violated.")
else:
raise ConstraintViolation("Name constraint violated.")
class Locked(Constrained):
"""A collection that can be locked from adding entities."""
locked = 0
def lock(self):
self.locked = 1
def entityConstraint(self, entity):
return not self.locked
class Homogenous(Constrained):
"""A homogenous collection of entities.
I will only contain entities that are an instance of the class or type
specified by my 'entityType' attribute.
"""
entityType = types.InstanceType
def entityConstraint(self, entity):
if isinstance(entity, self.entityType):
return 1
else:
raise ConstraintViolation(
"%s of incorrect type (%s)" % (entity, self.entityType)
)
def getNameType(self):
return "Name"
def getEntityType(self):
return self.entityType.__name__
|
# -*- test-case-name: twisted.test.test_paths.ZipFilePathTestCase -*-
# Copyright (c) 2006-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module contains partial re-implementations of FilePath, pending some
specification of formal interfaces it is a duck-typing attempt to emulate them
for certain restricted uses.
See the constructor for ZipArchive for use.
"""
__metaclass__ = type
import os
import time
import errno
from twisted.python.zipstream import ChunkingZipFile
from twisted.python.filepath import FilePath, _PathHelper
# using FilePath here exclusively rather than os to make sure that we don't do
# anything OS-path-specific here.
ZIP_PATH_SEP = "/" # In zipfiles, "/" is universally used as the
# path separator, regardless of platform.
class ZipPath(_PathHelper):
"""
I represent a file or directory contained within a zip file.
"""
def __init__(self, archive, pathInArchive):
"""
Don't construct me directly. Use ZipArchive.child().
@param archive: a ZipArchive instance.
@param pathInArchive: a ZIP_PATH_SEP-separated string.
"""
self.archive = archive
self.pathInArchive = pathInArchive
# self.path pretends to be os-specific because that's the way the
# 'zipimport' module does it.
self.path = os.path.join(
archive.zipfile.filename, *(self.pathInArchive.split(ZIP_PATH_SEP))
)
def __cmp__(self, other):
if not isinstance(other, ZipPath):
return NotImplemented
return cmp(
(self.archive, self.pathInArchive), (other.archive, other.pathInArchive)
)
def __repr__(self):
parts = [os.path.abspath(self.archive.path)]
parts.extend(self.pathInArchive.split(ZIP_PATH_SEP))
path = os.sep.join(parts)
return "ZipPath('%s')" % (path.encode("string-escape"),)
def parent(self):
splitup = self.pathInArchive.split(ZIP_PATH_SEP)
if len(splitup) == 1:
return self.archive
return ZipPath(self.archive, ZIP_PATH_SEP.join(splitup[:-1]))
def child(self, path):
"""
Return a new ZipPath representing a path in C{self.archive} which is
a child of this path.
@note: Requesting the C{".."} (or other special name) child will not
cause L{InsecurePath} to be raised since these names do not have
any special meaning inside a zip archive. Be particularly
careful with the C{path} attribute (if you absolutely must use
it) as this means it may include special names with special
meaning outside of the context of a zip archive.
"""
return ZipPath(self.archive, ZIP_PATH_SEP.join([self.pathInArchive, path]))
def sibling(self, path):
return self.parent().child(path)
# preauthChild = child
def exists(self):
return self.isdir() or self.isfile()
def isdir(self):
return self.pathInArchive in self.archive.childmap
def isfile(self):
return self.pathInArchive in self.archive.zipfile.NameToInfo
def islink(self):
return False
def listdir(self):
if self.exists():
if self.isdir():
return list(self.archive.childmap[self.pathInArchive].keys())
else:
raise OSError(errno.ENOTDIR, "Leaf zip entry listed")
else:
raise OSError(errno.ENOENT, "Non-existent zip entry listed")
def splitext(self):
"""
Return a value similar to that returned by os.path.splitext.
"""
# This happens to work out because of the fact that we use OS-specific
# path separators in the constructor to construct our fake 'path'
# attribute.
return os.path.splitext(self.path)
def basename(self):
return self.pathInArchive.split(ZIP_PATH_SEP)[-1]
def dirname(self):
# XXX NOTE: This API isn't a very good idea on filepath, but it's even
# less meaningful here.
return self.parent().path
def open(self):
return self.archive.zipfile.readfile(self.pathInArchive)
def restat(self):
pass
def getAccessTime(self):
"""
Retrieve this file's last access-time. This is the same as the last access
time for the archive.
@return: a number of seconds since the epoch
"""
return self.archive.getAccessTime()
def getModificationTime(self):
"""
Retrieve this file's last modification time. This is the time of
modification recorded in the zipfile.
@return: a number of seconds since the epoch.
"""
return time.mktime(
self.archive.zipfile.NameToInfo[self.pathInArchive].date_time + (0, 0, 0)
)
def getStatusChangeTime(self):
"""
Retrieve this file's last modification time. This name is provided for
compatibility, and returns the same value as getmtime.
@return: a number of seconds since the epoch.
"""
return self.getModificationTime()
class ZipArchive(ZipPath):
"""I am a FilePath-like object which can wrap a zip archive as if it were a
directory.
"""
archive = property(lambda self: self)
def __init__(self, archivePathname):
"""Create a ZipArchive, treating the archive at archivePathname as a zip file.
@param archivePathname: a str, naming a path in the filesystem.
"""
self.zipfile = ChunkingZipFile(archivePathname)
self.path = archivePathname
self.pathInArchive = ""
# zipfile is already wasting O(N) memory on cached ZipInfo instances,
# so there's no sense in trying to do this lazily or intelligently
self.childmap = {} # map parent: list of children
for name in self.zipfile.namelist():
name = name.split(ZIP_PATH_SEP)
for x in range(len(name)):
child = name[-x]
parent = ZIP_PATH_SEP.join(name[:-x])
if parent not in self.childmap:
self.childmap[parent] = {}
self.childmap[parent][child] = 1
parent = ""
def child(self, path):
"""
Create a ZipPath pointing at a path within the archive.
@param path: a str with no path separators in it, either '/' or the
system path separator, if it's different.
"""
return ZipPath(self, path)
def exists(self):
"""
Returns true if the underlying archive exists.
"""
return FilePath(self.zipfile.filename).exists()
def getAccessTime(self):
"""
Return the archive file's last access time.
"""
return FilePath(self.zipfile.filename).getAccessTime()
def getModificationTime(self):
"""
Return the archive file's modification time.
"""
return FilePath(self.zipfile.filename).getModificationTime()
def getStatusChangeTime(self):
"""
Return the archive file's status change time.
"""
return FilePath(self.zipfile.filename).getStatusChangeTime()
def __repr__(self):
return "ZipArchive(%r)" % (os.path.abspath(self.path),)
__all__ = ["ZipArchive", "ZipPath"]
|
# -*- test-case-name: twisted.trial.test.test_script -*-
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os, random, gc, time, warnings
from twisted.internet import defer
from twisted.application import app
from twisted.python import usage, reflect, failure
from twisted.python.filepath import FilePath
from twisted import plugin
from twisted.python.util import spewer
from twisted.python.compat import set
from twisted.trial import runner, itrial, reporter
# Yea, this is stupid. Leave it for for command-line compatibility for a
# while, though.
TBFORMAT_MAP = {
"plain": "default",
"default": "default",
"emacs": "brief",
"brief": "brief",
"cgitb": "verbose",
"verbose": "verbose",
}
def _parseLocalVariables(line):
"""Accepts a single line in Emacs local variable declaration format and
returns a dict of all the variables {name: value}.
Raises ValueError if 'line' is in the wrong format.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
paren = "-*-"
start = line.find(paren) + len(paren)
end = line.rfind(paren)
if start == -1 or end == -1:
raise ValueError("%r not a valid local variable declaration" % (line,))
items = line[start:end].split(";")
localVars = {}
for item in items:
if len(item.strip()) == 0:
continue
split = item.split(":")
if len(split) != 2:
raise ValueError("%r contains invalid declaration %r" % (line, item))
localVars[split[0].strip()] = split[1].strip()
return localVars
def loadLocalVariables(filename):
"""Accepts a filename and attempts to load the Emacs variable declarations
from that file, simulating what Emacs does.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
f = file(filename, "r")
lines = [f.readline(), f.readline()]
f.close()
for line in lines:
try:
return _parseLocalVariables(line)
except ValueError:
pass
return {}
def getTestModules(filename):
testCaseVar = loadLocalVariables(filename).get("test-case-name", None)
if testCaseVar is None:
return []
return testCaseVar.split(",")
def isTestFile(filename):
"""Returns true if 'filename' looks like a file containing unit tests.
False otherwise. Doesn't care whether filename exists.
"""
basename = os.path.basename(filename)
return basename.startswith("test_") and os.path.splitext(basename)[1] == (".py")
def _zshReporterAction():
return "(%s)" % (
" ".join([p.longOpt for p in plugin.getPlugins(itrial.IReporter)]),
)
class Options(usage.Options, app.ReactorSelectionMixin):
synopsis = """%s [options] [[file|package|module|TestCase|testmethod]...]
""" % (
os.path.basename(sys.argv[0]),
)
longdesc = (
"trial loads and executes a suite of unit tests, obtained "
"from modules, packages and files listed on the command line."
)
optFlags = [
["help", "h"],
[
"rterrors",
"e",
"realtime errors, print out tracebacks as " "soon as they occur",
],
[
"debug",
"b",
"Run tests in the Python debugger. Will load "
"'.pdbrc' from current directory if it exists.",
],
[
"debug-stacktraces",
"B",
"Report Deferred creation and " "callback stack traces",
],
[
"nopm",
None,
"don't automatically jump into debugger for " "postmorteming of exceptions",
],
["dry-run", "n", "do everything but run the tests"],
[
"force-gc",
None,
"Have Trial run gc.collect() before and " "after each test case.",
],
["profile", None, "Run tests under the Python profiler"],
["unclean-warnings", None, "Turn dirty reactor errors into warnings"],
["until-failure", "u", "Repeat test until it fails"],
["no-recurse", "N", "Don't recurse into packages"],
["help-reporters", None, "Help on available output plugins (reporters)"],
]
optParameters = [
["logfile", "l", "test.log", "log file name"],
["random", "z", None, "Run tests in random order using the specified seed"],
[
"temp-directory",
None,
"_trial_temp",
"Path to use as working directory for tests.",
],
[
"reporter",
None,
"verbose",
"The reporter to use for this test run. See --help-reporters for "
"more info.",
],
]
zsh_actions = {"tbformat": "(plain emacs cgitb)", "reporter": _zshReporterAction}
zsh_actionDescr = {"logfile": "log file name", "random": "random seed"}
zsh_extras = ["*:file|module|package|TestCase|testMethod:_files -g '*.py'"]
fallbackReporter = reporter.TreeReporter
extra = None
tracer = None
def __init__(self):
self["tests"] = set()
usage.Options.__init__(self)
def coverdir(self):
"""
Return a L{FilePath} representing the directory into which coverage
results should be written.
"""
coverdir = "coverage"
result = FilePath(self["temp-directory"]).child(coverdir)
print("Setting coverage directory to %s." % (result.path,))
return result
def opt_coverage(self):
"""
Generate coverage information in the I{coverage} file in the
directory specified by the I{trial-temp} option.
"""
import trace
self.tracer = trace.Trace(count=1, trace=0)
sys.settrace(self.tracer.globaltrace)
def opt_testmodule(self, filename):
"Filename to grep for test cases (-*- test-case-name)"
# If the filename passed to this parameter looks like a test module
# we just add that to the test suite.
#
# If not, we inspect it for an Emacs buffer local variable called
# 'test-case-name'. If that variable is declared, we try to add its
# value to the test suite as a module.
#
# This parameter allows automated processes (like Buildbot) to pass
# a list of files to Trial with the general expectation of "these files,
# whatever they are, will get tested"
if not os.path.isfile(filename):
sys.stderr.write("File %r doesn't exist\n" % (filename,))
return
filename = os.path.abspath(filename)
if isTestFile(filename):
self["tests"].add(filename)
else:
self["tests"].update(getTestModules(filename))
def opt_spew(self):
"""Print an insanely verbose log of everything that happens. Useful
when debugging freezes or locks in complex code."""
sys.settrace(spewer)
def opt_help_reporters(self):
synopsis = (
"Trial's output can be customized using plugins called "
"Reporters. You can\nselect any of the following "
"reporters using --reporter=<foo>\n"
)
print(synopsis)
for p in plugin.getPlugins(itrial.IReporter):
print(" ", p.longOpt, "\t", p.description)
print()
sys.exit(0)
def opt_disablegc(self):
"""Disable the garbage collector"""
gc.disable()
def opt_tbformat(self, opt):
"""Specify the format to display tracebacks with. Valid formats are
'plain', 'emacs', and 'cgitb' which uses the nicely verbose stdlib
cgitb.text function"""
try:
self["tbformat"] = TBFORMAT_MAP[opt]
except KeyError:
raise usage.UsageError("tbformat must be 'plain', 'emacs', or 'cgitb'.")
def opt_extra(self, arg):
"""
Add an extra argument. (This is a hack necessary for interfacing with
emacs's `gud'.)
"""
if self.extra is None:
self.extra = []
self.extra.append(arg)
opt_x = opt_extra
def opt_recursionlimit(self, arg):
"""see sys.setrecursionlimit()"""
try:
sys.setrecursionlimit(int(arg))
except (TypeError, ValueError):
raise usage.UsageError("argument to recursionlimit must be an integer")
def opt_random(self, option):
try:
self["random"] = int(option)
except ValueError:
raise usage.UsageError("Argument to --random must be a positive integer")
else:
if self["random"] < 0:
raise usage.UsageError(
"Argument to --random must be a positive integer"
)
elif self["random"] == 0:
self["random"] = int(time.time() * 100)
def opt_without_module(self, option):
"""
Fake the lack of the specified modules, separated with commas.
"""
for module in option.split(","):
if module in sys.modules:
warnings.warn(
"Module '%s' already imported, " "disabling anyway." % (module,),
category=RuntimeWarning,
)
sys.modules[module] = None
def parseArgs(self, *args):
self["tests"].update(args)
if self.extra is not None:
self["tests"].update(self.extra)
def _loadReporterByName(self, name):
for p in plugin.getPlugins(itrial.IReporter):
qual = "%s.%s" % (p.module, p.klass)
if p.longOpt == name:
return reflect.namedAny(qual)
raise usage.UsageError(
"Only pass names of Reporter plugins to "
"--reporter. See --help-reporters for "
"more info."
)
def postOptions(self):
# Only load reporters now, as opposed to any earlier, to avoid letting
# application-defined plugins muck up reactor selecting by importing
# t.i.reactor and causing the default to be installed.
self["reporter"] = self._loadReporterByName(self["reporter"])
if "tbformat" not in self:
self["tbformat"] = "default"
if self["nopm"]:
if not self["debug"]:
raise usage.UsageError("you must specify --debug when using " "--nopm ")
failure.DO_POST_MORTEM = False
def _initialDebugSetup(config):
# do this part of debug setup first for easy debugging of import failures
if config["debug"]:
failure.startDebugMode()
if config["debug"] or config["debug-stacktraces"]:
defer.setDebugging(True)
def _getSuite(config):
loader = _getLoader(config)
recurse = not config["no-recurse"]
return loader.loadByNames(config["tests"], recurse)
def _getLoader(config):
loader = runner.TestLoader()
if config["random"]:
randomer = random.Random()
randomer.seed(config["random"])
loader.sorter = lambda x: randomer.random()
print("Running tests shuffled with seed %d\n" % config["random"])
if not config["until-failure"]:
loader.suiteFactory = runner.DestructiveTestSuite
return loader
def _makeRunner(config):
mode = None
if config["debug"]:
mode = runner.TrialRunner.DEBUG
if config["dry-run"]:
mode = runner.TrialRunner.DRY_RUN
return runner.TrialRunner(
config["reporter"],
mode=mode,
profile=config["profile"],
logfile=config["logfile"],
tracebackFormat=config["tbformat"],
realTimeErrors=config["rterrors"],
uncleanWarnings=config["unclean-warnings"],
workingDirectory=config["temp-directory"],
forceGarbageCollection=config["force-gc"],
)
def run():
if len(sys.argv) == 1:
sys.argv.append("--help")
config = Options()
try:
config.parseOptions()
except usage.error as ue:
raise SystemExit("%s: %s" % (sys.argv[0], ue))
_initialDebugSetup(config)
trialRunner = _makeRunner(config)
suite = _getSuite(config)
if config["until-failure"]:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(
show_missing=1, summary=False, coverdir=config.coverdir().path
)
sys.exit(not test_result.wasSuccessful())
|
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) 2007 Twisted Matrix Laboratories.
# See LICENSE for details.
# Don't change the docstring, it's part of the tests
"""
I'm a test drop-in. The plugin system's unit tests use me. No one
else should.
"""
from zope.interface import classProvides
from twisted.plugin import IPlugin
from twisted.test.test_plugin import ITestPlugin, ITestPlugin2
class TestPlugin:
"""
A plugin used solely for testing purposes.
"""
classProvides(ITestPlugin, IPlugin)
def test1():
pass
test1 = staticmethod(test1)
class AnotherTestPlugin:
"""
Another plugin used solely for testing purposes.
"""
classProvides(ITestPlugin2, IPlugin)
def test():
pass
test = staticmethod(test)
class ThirdTestPlugin:
"""
Another plugin used solely for testing purposes.
"""
classProvides(ITestPlugin2, IPlugin)
def test():
pass
test = staticmethod(test)
|
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.reflect module.
"""
import weakref, os
from ihooks import ModuleImporter
try:
from collections import deque
except ImportError:
deque = None
from twisted.trial import unittest
from twisted.python import reflect, util
from twisted.python.versions import Version
class SettableTest(unittest.TestCase):
def setUp(self):
self.setter = reflect.Settable()
def tearDown(self):
del self.setter
def testSet(self):
self.setter(a=1, b=2)
self.assertEqual(self.setter.a, 1)
self.assertEqual(self.setter.b, 2)
class AccessorTester(reflect.Accessor):
def set_x(self, x):
self.y = x
self.reallySet("x", x)
def get_z(self):
self.q = 1
return 1
def del_z(self):
self.reallyDel("q")
class PropertyAccessorTester(reflect.PropertyAccessor):
"""
Test class to check L{reflect.PropertyAccessor} functionalities.
"""
r = 0
def set_r(self, r):
self.s = r
def set_x(self, x):
self.y = x
self.reallySet("x", x)
def get_z(self):
self.q = 1
return 1
def del_z(self):
self.reallyDel("q")
class AccessorTest(unittest.TestCase):
def setUp(self):
self.tester = AccessorTester()
def testSet(self):
self.tester.x = 1
self.assertEqual(self.tester.x, 1)
self.assertEqual(self.tester.y, 1)
def testGet(self):
self.assertEqual(self.tester.z, 1)
self.assertEqual(self.tester.q, 1)
def testDel(self):
self.tester.z
self.assertEqual(self.tester.q, 1)
del self.tester.z
self.assertEqual(hasattr(self.tester, "q"), 0)
self.tester.x = 1
del self.tester.x
self.assertEqual(hasattr(self.tester, "x"), 0)
class PropertyAccessorTest(AccessorTest):
"""
Tests for L{reflect.PropertyAccessor}, using L{PropertyAccessorTester}.
"""
def setUp(self):
self.tester = PropertyAccessorTester()
def test_setWithDefaultValue(self):
"""
If an attribute is present in the class, it can be retrieved by
default.
"""
self.assertEqual(self.tester.r, 0)
self.tester.r = 1
self.assertEqual(self.tester.r, 0)
self.assertEqual(self.tester.s, 1)
def test_getValueInDict(self):
"""
The attribute value can be overriden by directly modifying the value in
C{__dict__}.
"""
self.tester.__dict__["r"] = 10
self.assertEqual(self.tester.r, 10)
def test_notYetInDict(self):
"""
If a getter is defined on an attribute but without any default value,
it raises C{AttributeError} when trying to access it.
"""
self.assertRaises(AttributeError, getattr, self.tester, "x")
class LookupsTestCase(unittest.TestCase):
"""
Tests for L{namedClass}, L{namedModule}, and L{namedAny}.
"""
def test_namedClassLookup(self):
"""
L{namedClass} should return the class object for the name it is passed.
"""
self.assertIdentical(
reflect.namedClass("twisted.python.reflect.Summer"), reflect.Summer
)
def test_namedModuleLookup(self):
"""
L{namedModule} should return the module object for the name it is
passed.
"""
self.assertIdentical(reflect.namedModule("twisted.python.reflect"), reflect)
def test_namedAnyPackageLookup(self):
"""
L{namedAny} should return the package object for the name it is passed.
"""
import twisted.python
self.assertIdentical(reflect.namedAny("twisted.python"), twisted.python)
def test_namedAnyModuleLookup(self):
"""
L{namedAny} should return the module object for the name it is passed.
"""
self.assertIdentical(reflect.namedAny("twisted.python.reflect"), reflect)
def test_namedAnyClassLookup(self):
"""
L{namedAny} should return the class object for the name it is passed.
"""
self.assertIdentical(
reflect.namedAny("twisted.python.reflect.Summer"), reflect.Summer
)
def test_namedAnyAttributeLookup(self):
"""
L{namedAny} should return the object an attribute of a non-module,
non-package object is bound to for the name it is passed.
"""
# Note - not assertEqual because unbound method lookup creates a new
# object every time. This is a foolishness of Python's object
# implementation, not a bug in Twisted.
self.assertEqual(
reflect.namedAny("twisted.python.reflect.Summer.reallySet"),
reflect.Summer.reallySet,
)
def test_namedAnySecondAttributeLookup(self):
"""
L{namedAny} should return the object an attribute of an object which
itself was an attribute of a non-module, non-package object is bound to
for the name it is passed.
"""
self.assertIdentical(
reflect.namedAny("twisted.python.reflect.Summer.reallySet.__doc__"),
reflect.Summer.reallySet.__doc__,
)
def test_importExceptions(self):
"""
Exceptions raised by modules which L{namedAny} causes to be imported
should pass through L{namedAny} to the caller.
"""
self.assertRaises(
ZeroDivisionError, reflect.namedAny, "twisted.test.reflect_helper_ZDE"
)
# Make sure that this behavior is *consistent* for 2.3, where there is
# no post-failed-import cleanup
self.assertRaises(
ZeroDivisionError, reflect.namedAny, "twisted.test.reflect_helper_ZDE"
)
self.assertRaises(
ValueError, reflect.namedAny, "twisted.test.reflect_helper_VE"
)
# Modules which themselves raise ImportError when imported should result in an ImportError
self.assertRaises(
ImportError, reflect.namedAny, "twisted.test.reflect_helper_IE"
)
def test_attributeExceptions(self):
"""
If segments on the end of a fully-qualified Python name represents
attributes which aren't actually present on the object represented by
the earlier segments, L{namedAny} should raise an L{AttributeError}.
"""
self.assertRaises(
AttributeError, reflect.namedAny, "twisted.nosuchmoduleintheworld"
)
# ImportError behaves somewhat differently between "import
# extant.nonextant" and "import extant.nonextant.nonextant", so test
# the latter as well.
self.assertRaises(
AttributeError, reflect.namedAny, "twisted.nosuch.modulein.theworld"
)
self.assertRaises(
AttributeError,
reflect.namedAny,
"twisted.python.reflect.Summer.nosuchattributeintheworld",
)
def test_invalidNames(self):
"""
Passing a name which isn't a fully-qualified Python name to L{namedAny}
should result in one of the following exceptions:
- L{InvalidName}: the name is not a dot-separated list of Python objects
- L{ObjectNotFound}: the object doesn't exist
- L{ModuleNotFound}: the object doesn't exist and there is only one
component in the name
"""
err = self.assertRaises(
reflect.ModuleNotFound, reflect.namedAny, "nosuchmoduleintheworld"
)
self.assertEqual(str(err), "No module named 'nosuchmoduleintheworld'")
# This is a dot-separated list, but it isn't valid!
err = self.assertRaises(
reflect.ObjectNotFound, reflect.namedAny, "@#$@(#.!@(#!@#"
)
self.assertEqual(str(err), "'@#$@(#.!@(#!@#' does not name an object")
err = self.assertRaises(
reflect.ObjectNotFound, reflect.namedAny, "tcelfer.nohtyp.detsiwt"
)
self.assertEqual(str(err), "'tcelfer.nohtyp.detsiwt' does not name an object")
err = self.assertRaises(reflect.InvalidName, reflect.namedAny, "")
self.assertEqual(str(err), "Empty module name")
for invalidName in [".twisted", "twisted.", "twisted..python"]:
err = self.assertRaises(reflect.InvalidName, reflect.namedAny, invalidName)
self.assertEqual(
str(err),
"name must be a string giving a '.'-separated list of Python "
"identifiers, not %r" % (invalidName,),
)
class ImportHooksLookupTests(LookupsTestCase):
"""
Tests for lookup methods in the presence of L{ihooks}-style import hooks.
Runs all of the tests from L{LookupsTestCase} after installing a custom
import hook.
"""
def setUp(self):
"""
Perturb the normal import behavior subtly by installing an import
hook. No custom behavior is provided, but this adds some extra
frames to the call stack, which L{namedAny} must be able to account
for.
"""
self.importer = ModuleImporter()
self.importer.install()
def tearDown(self):
"""
Uninstall the custom import hook.
"""
self.importer.uninstall()
class ObjectGrep(unittest.TestCase):
def test_dictionary(self):
"""
Test references search through a dictionnary, as a key or as a value.
"""
o = object()
d1 = {None: o}
d2 = {o: None}
self.assertIn("[None]", reflect.objgrep(d1, o, reflect.isSame))
self.assertIn("{None}", reflect.objgrep(d2, o, reflect.isSame))
def test_list(self):
"""
Test references search through a list.
"""
o = object()
L = [None, o]
self.assertIn("[1]", reflect.objgrep(L, o, reflect.isSame))
def test_tuple(self):
"""
Test references search through a tuple.
"""
o = object()
T = (o, None)
self.assertIn("[0]", reflect.objgrep(T, o, reflect.isSame))
def test_instance(self):
"""
Test references search through an object attribute.
"""
class Dummy:
pass
o = object()
d = Dummy()
d.o = o
self.assertIn(".o", reflect.objgrep(d, o, reflect.isSame))
def test_weakref(self):
"""
Test references search through a weakref object.
"""
class Dummy:
pass
o = Dummy()
w1 = weakref.ref(o)
self.assertIn("()", reflect.objgrep(w1, o, reflect.isSame))
def test_boundMethod(self):
"""
Test references search through method special attributes.
"""
class Dummy:
def dummy(self):
pass
o = Dummy()
m = o.dummy
self.assertIn(".im_self", reflect.objgrep(m, m.__self__, reflect.isSame))
self.assertIn(
".im_class", reflect.objgrep(m, m.__self__.__class__, reflect.isSame)
)
self.assertIn(".im_func", reflect.objgrep(m, m.__func__, reflect.isSame))
def test_everything(self):
"""
Test references search using complex set of objects.
"""
class Dummy:
def method(self):
pass
o = Dummy()
D1 = {(): "baz", None: "Quux", o: "Foosh"}
L = [None, (), D1, 3]
T = (L, {}, Dummy())
D2 = {0: "foo", 1: "bar", 2: T}
i = Dummy()
i.attr = D2
m = i.method
w = weakref.ref(m)
self.assertIn(
"().im_self.attr[2][0][2]{'Foosh'}", reflect.objgrep(w, o, reflect.isSame)
)
def test_depthLimit(self):
"""
Test the depth of references search.
"""
a = []
b = [a]
c = [a, b]
d = [a, c]
self.assertEqual(["[0]"], reflect.objgrep(d, a, reflect.isSame, maxDepth=1))
self.assertEqual(
["[0]", "[1][0]"], reflect.objgrep(d, a, reflect.isSame, maxDepth=2)
)
self.assertEqual(
["[0]", "[1][0]", "[1][1][0]"],
reflect.objgrep(d, a, reflect.isSame, maxDepth=3),
)
def test_deque(self):
"""
Test references search through a deque object. Only for Python > 2.3.
"""
o = object()
D = deque()
D.append(None)
D.append(o)
self.assertIn("[1]", reflect.objgrep(D, o, reflect.isSame))
if deque is None:
test_deque.skip = "Deque not available"
class GetClass(unittest.TestCase):
def testOld(self):
class OldClass:
pass
old = OldClass()
self.assertIn(reflect.getClass(OldClass).__name__, ("class", "classobj"))
self.assertEqual(reflect.getClass(old).__name__, "OldClass")
def testNew(self):
class NewClass(object):
pass
new = NewClass()
self.assertEqual(reflect.getClass(NewClass).__name__, "type")
self.assertEqual(reflect.getClass(new).__name__, "NewClass")
class Breakable(object):
breakRepr = False
breakStr = False
def __str__(self):
if self.breakStr:
raise RuntimeError("str!")
else:
return "<Breakable>"
def __repr__(self):
if self.breakRepr:
raise RuntimeError("repr!")
else:
return "Breakable()"
class BrokenType(Breakable, type):
breakName = False
def get___name__(self):
if self.breakName:
raise RuntimeError("no name")
return "BrokenType"
__name__ = property(get___name__)
class BTBase(Breakable, metaclass=BrokenType):
breakRepr = True
breakStr = True
class NoClassAttr(Breakable):
__class__ = property(lambda x: x.not_class)
class SafeRepr(unittest.TestCase):
"""
Tests for L{reflect.safe_repr} function.
"""
def test_workingRepr(self):
"""
L{reflect.safe_repr} produces the same output as C{repr} on a working
object.
"""
x = [1, 2, 3]
self.assertEqual(reflect.safe_repr(x), repr(x))
def test_brokenRepr(self):
"""
L{reflect.safe_repr} returns a string with class name, address, and
traceback when the repr call failed.
"""
b = Breakable()
b.breakRepr = True
bRepr = reflect.safe_repr(b)
self.assertIn("Breakable instance at 0x", bRepr)
# Check that the file is in the repr, but without the extension as it
# can be .py/.pyc
self.assertIn(os.path.splitext(__file__)[0], bRepr)
self.assertIn("RuntimeError: repr!", bRepr)
def test_brokenStr(self):
"""
L{reflect.safe_repr} isn't affected by a broken C{__str__} method.
"""
b = Breakable()
b.breakStr = True
self.assertEqual(reflect.safe_repr(b), repr(b))
def test_brokenClassRepr(self):
class X(BTBase):
breakRepr = True
reflect.safe_repr(X)
reflect.safe_repr(X())
def test_unsignedID(self):
"""
L{unsignedID} is used to print ID of the object in case of error, not
standard ID value which can be negative.
"""
class X(BTBase):
breakRepr = True
ids = {X: 100}
def fakeID(obj):
try:
return ids[obj]
except (TypeError, KeyError):
return id(obj)
self.addCleanup(util.setIDFunction, util.setIDFunction(fakeID))
xRepr = reflect.safe_repr(X)
self.assertIn("0x64", xRepr)
def test_brokenClassStr(self):
class X(BTBase):
breakStr = True
reflect.safe_repr(X)
reflect.safe_repr(X())
def test_brokenClassAttribute(self):
"""
If an object raises an exception when accessing its C{__class__}
attribute, L{reflect.safe_repr} uses C{type} to retrieve the class
object.
"""
b = NoClassAttr()
b.breakRepr = True
bRepr = reflect.safe_repr(b)
self.assertIn("NoClassAttr instance at 0x", bRepr)
self.assertIn(os.path.splitext(__file__)[0], bRepr)
self.assertIn("RuntimeError: repr!", bRepr)
def test_brokenClassNameAttribute(self):
"""
If a class raises an exception when accessing its C{__name__} attribute
B{and} when calling its C{__str__} implementation, L{reflect.safe_repr}
returns 'BROKEN CLASS' instead of the class name.
"""
class X(BTBase):
breakName = True
xRepr = reflect.safe_repr(X())
self.assertIn("<BROKEN CLASS AT 0x", xRepr)
self.assertIn(os.path.splitext(__file__)[0], xRepr)
self.assertIn("RuntimeError: repr!", xRepr)
class SafeStr(unittest.TestCase):
"""
Tests for L{reflect.safe_str} function.
"""
def test_workingStr(self):
x = [1, 2, 3]
self.assertEqual(reflect.safe_str(x), str(x))
def test_brokenStr(self):
b = Breakable()
b.breakStr = True
reflect.safe_str(b)
def test_brokenRepr(self):
b = Breakable()
b.breakRepr = True
reflect.safe_str(b)
def test_brokenClassStr(self):
class X(BTBase):
breakStr = True
reflect.safe_str(X)
reflect.safe_str(X())
def test_brokenClassRepr(self):
class X(BTBase):
breakRepr = True
reflect.safe_str(X)
reflect.safe_str(X())
def test_brokenClassAttribute(self):
"""
If an object raises an exception when accessing its C{__class__}
attribute, L{reflect.safe_str} uses C{type} to retrieve the class
object.
"""
b = NoClassAttr()
b.breakStr = True
bStr = reflect.safe_str(b)
self.assertIn("NoClassAttr instance at 0x", bStr)
self.assertIn(os.path.splitext(__file__)[0], bStr)
self.assertIn("RuntimeError: str!", bStr)
def test_brokenClassNameAttribute(self):
"""
If a class raises an exception when accessing its C{__name__} attribute
B{and} when calling its C{__str__} implementation, L{reflect.safe_str}
returns 'BROKEN CLASS' instead of the class name.
"""
class X(BTBase):
breakName = True
xStr = reflect.safe_str(X())
self.assertIn("<BROKEN CLASS AT 0x", xStr)
self.assertIn(os.path.splitext(__file__)[0], xStr)
self.assertIn("RuntimeError: str!", xStr)
class FilenameToModule(unittest.TestCase):
"""
Test L{reflect.filenameToModuleName} detection.
"""
def test_directory(self):
"""
Tests it finds good name for directories/packages.
"""
module = reflect.filenameToModuleName(os.path.join("twisted", "test"))
self.assertEqual(module, "test")
module = reflect.filenameToModuleName(
os.path.join("twisted", "test") + os.path.sep
)
self.assertEqual(module, "test")
def test_file(self):
"""
Test it finds good name for files.
"""
module = reflect.filenameToModuleName(
os.path.join("twisted", "test", "test_reflect.py")
)
self.assertEqual(module, "test_reflect")
class FullyQualifiedNameTests(unittest.TestCase):
"""
Test for L{reflect.fullyQualifiedName}.
"""
def _checkFullyQualifiedName(self, obj, expected):
"""
Helper to check that fully qualified name of C{obj} results to
C{expected}.
"""
self.assertEqual(reflect.fullyQualifiedName(obj), expected)
def test_package(self):
"""
L{reflect.fullyQualifiedName} returns the full name of a package and
a subpackage.
"""
import twisted
self._checkFullyQualifiedName(twisted, "twisted")
import twisted.python
self._checkFullyQualifiedName(twisted.python, "twisted.python")
def test_module(self):
"""
L{reflect.fullyQualifiedName} returns the name of a module inside a a
package.
"""
self._checkFullyQualifiedName(reflect, "twisted.python.reflect")
import twisted.trial.unittest
self._checkFullyQualifiedName(twisted.trial.unittest, "twisted.trial.unittest")
def test_class(self):
"""
L{reflect.fullyQualifiedName} returns the name of a class and its
module.
"""
self._checkFullyQualifiedName(
reflect.Settable, "twisted.python.reflect.Settable"
)
def test_function(self):
"""
L{reflect.fullyQualifiedName} returns the name of a function inside its
module.
"""
self._checkFullyQualifiedName(
reflect.fullyQualifiedName, "twisted.python.reflect.fullyQualifiedName"
)
def test_method(self):
"""
L{reflect.fullyQualifiedName} returns the name of a method inside its
class and its module.
"""
self._checkFullyQualifiedName(
reflect.PropertyAccessor.reallyDel,
"twisted.python.reflect.PropertyAccessor.reallyDel",
)
self._checkFullyQualifiedName(
reflect.PropertyAccessor().reallyDel,
"twisted.python.reflect.PropertyAccessor.reallyDel",
)
class DeprecationTestCase(unittest.TestCase):
"""
Test deprecations in twisted.python.reflect
"""
def test_macro(self):
"""
Test deprecation of L{reflect.macro}.
"""
result = self.callDeprecated(
Version("Twisted", 8, 2, 0), reflect.macro, "test", __file__, "test = 1"
)
self.assertEqual(result, 1)
|
# Copyright (c) 2006 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.python.zshcomp
"""
import os, os.path
from io import StringIO
from twisted.trial import unittest
from twisted.python import zshcomp, usage
class ZshcompTestCase(unittest.TestCase):
"""
Tests for the zsh completion function builder in twisted/python/zshcomp.py
"""
def test_buildAll(self):
"""
Build all the completion functions for twisted commands - no errors
should be raised
"""
dirname = self.mktemp()
os.mkdir(dirname)
skippedCmds = [x[0] for x in zshcomp.makeCompFunctionFiles(dirname)]
# verify a zsh function was created for each twisted command
for info in zshcomp.generateFor:
if info[0] in skippedCmds:
continue
funcPath = os.path.join(dirname, "_" + info[0])
self.assertTrue(os.path.exists(funcPath))
def test_accumulateMetadata(self):
"""
Test that the zsh_* variables you can place on Option classes gets
picked up correctly
"""
opts = TestOptions2()
ag = zshcomp.ArgumentsGenerator("dummy_cmd", opts, "dummy_value")
altArgDescr = TestOptions.zsh_altArgDescr.copy()
altArgDescr.update(TestOptions2.zsh_altArgDescr)
actionDescr = TestOptions.zsh_actionDescr.copy()
actionDescr.update(TestOptions2.zsh_actionDescr)
self.failUnlessEquals(ag.altArgDescr, altArgDescr)
self.failUnlessEquals(ag.actionDescr, actionDescr)
self.failUnlessEquals(ag.multiUse, TestOptions.zsh_multiUse)
self.failUnlessEquals(ag.mutuallyExclusive, TestOptions.zsh_mutuallyExclusive)
self.failUnlessEquals(ag.actions, TestOptions.zsh_actions)
self.failUnlessEquals(ag.extras, TestOptions.zsh_extras)
def test_accumulateAdditionalOptions(self):
"""
Test that we pick up options that are only defined by having an
appropriately named method on your Options class,
e.g. def opt_foo(self, foo)
"""
opts = TestOptions2()
ag = zshcomp.ArgumentsGenerator("dummy_cmd", opts, "dummy_value")
self.assertTrue("nocrash" in ag.optFlags_d and "nocrash" in ag.optAll_d)
self.assertTrue("difficulty" in ag.optParams_d and "difficulty" in ag.optAll_d)
def test_verifyZshNames(self):
"""
Test that using a parameter/flag name that doesn't exist
will raise an error
"""
class TmpOptions(TestOptions2):
zsh_actions = {"detaill": "foo"} # Note typo of detail
opts = TmpOptions()
self.assertRaises(
ValueError, zshcomp.ArgumentsGenerator, "dummy_cmd", opts, "dummy_value"
)
def test_zshCode(self):
"""
Generate a completion function, and test the textual output
against a known correct output
"""
cmd_name = "testprog"
opts = CodeTestOptions()
f = StringIO()
b = zshcomp.Builder(cmd_name, opts, f)
b.write()
f.reset()
self.failUnlessEquals(f.read(), testOutput1)
def test_skipBuild(self):
"""
Test that makeCompFunctionFiles skips building for commands whos
script module cannot be imported
"""
generateFor = [("test_cmd", "no.way.your.gonna.import.this", "Foo")]
skips = zshcomp.makeCompFunctionFiles("out_dir", generateFor, {})
# no exceptions should be raised. hooray.
self.assertEqual(len(skips), 1)
self.assertEqual(len(skips[0]), 2)
self.assertEqual(skips[0][0], "test_cmd")
self.assertTrue(isinstance(skips[0][1], ImportError))
self.flushLoggedErrors(self, ImportError)
class TestOptions(usage.Options):
"""
Command-line options for an imaginary game
"""
optFlags = [
["fokker", "f", "Select the Fokker Dr.I as your dogfighter aircraft"],
["albatros", "a", "Select the Albatros D-III as your dogfighter aircraft"],
["spad", "s", "Select the SPAD S.VII as your dogfighter aircraft"],
["bristol", "b", "Select the Bristol Scout as your dogfighter aircraft"],
["physics", "p", "Enable secret Twisted physics engine"],
["jam", "j", "Enable a small chance that your machine guns will jam!"],
["verbose", "v", "Verbose logging (may be specified more than once)"],
]
optParameters = [
["pilot-name", None, "What's your name, Ace?", "Manfred von Richthofen"],
["detail", "d", "Select the level of rendering detail (1-5)", "3"],
]
zsh_altArgDescr = {"physics": "Twisted-Physics", "detail": "Rendering detail level"}
zsh_actionDescr = {"detail": "Pick your detail"}
zsh_multiUse = ["verbose"]
zsh_mutuallyExclusive = [["fokker", "albatros", "spad", "bristol"]]
zsh_actions = {"detail": "(1 2 3 4 5)"}
zsh_extras = [":saved game file to load:_files"]
class TestOptions2(TestOptions):
"""
Extend the options and zsh metadata provided by TestOptions. zshcomp must
accumulate options and metadata from all classes in the hiearchy so this
is important for testing
"""
optFlags = [["no-stalls", None, "Turn off the ability to stall your aircraft"]]
optParameters = [
["reality-level", None, "Select the level of physics reality (1-5)", "5"]
]
zsh_altArgDescr = {"no-stalls": "Can't stall your plane"}
zsh_actionDescr = {"reality-level": "Physics reality level"}
def opt_nocrash(self):
"""Select that you can't crash your plane"""
def opt_difficulty(self, difficulty):
"""How tough are you? (1-10)"""
def _accuracyAction():
return "(1 2 3)"
class CodeTestOptions(usage.Options):
"""
Command-line options for an imaginary program
"""
optFlags = [
["color", "c", "Turn on color output"],
["gray", "g", "Turn on gray-scale output"],
["verbose", "v", "Verbose logging (may be specified more than once)"],
]
optParameters = [
["optimization", None, "Select the level of optimization (1-5)", "5"],
["accuracy", "a", "Select the level of accuracy (1-3)", "3"],
]
zsh_altArgDescr = {"color": "Color on", "optimization": "Optimization level"}
zsh_actionDescr = {"optimization": "Optimization?", "accuracy": "Accuracy?"}
zsh_multiUse = ["verbose"]
zsh_mutuallyExclusive = [["color", "gray"]]
zsh_actions = {"optimization": "(1 2 3 4 5)", "accuracy": _accuracyAction}
zsh_extras = [":output file:_files"]
testOutput1 = """#compdef testprog
_arguments -s -A "-*" \\
':output file:_files' \\
'(--accuracy)-a[3]:Accuracy?:(1 2 3)' \\
'(-a)--accuracy=[3]:Accuracy?:(1 2 3)' \\
'(--gray -g --color)-c[Color on]' \\
'(--gray -g -c)--color[Color on]' \\
'(--color -c --gray)-g[Turn on gray-scale output]' \\
'(--color -c -g)--gray[Turn on gray-scale output]' \\
'--help[Display this help and exit.]' \\
'--optimization=[Optimization level]:Optimization?:(1 2 3 4 5)' \\
'*-v[Verbose logging (may be specified more than once)]' \\
'*--verbose[Verbose logging (may be specified more than once)]' \\
'--version[version]' \\
&& return 0
"""
|
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the output generated by trial.
"""
import os, io
from twisted.scripts import trial
from twisted.trial import runner
from twisted.trial.test import packages
def runTrial(*args):
from twisted.trial import reporter
config = trial.Options()
config.parseOptions(args)
output = io.StringIO()
myRunner = runner.TrialRunner(
reporter.VerboseTextReporter,
stream=output,
workingDirectory=config["temp-directory"],
)
suite = trial._getSuite(config)
result = myRunner.run(suite)
return output.getvalue()
class TestImportErrors(packages.SysPathManglingTest):
"""Actually run trial as if on the command line and check that the output
is what we expect.
"""
debug = False
parent = "_testImportErrors"
def runTrial(self, *args):
return runTrial("--temp-directory", self.mktemp(), *args)
def _print(self, stuff):
print(stuff)
return stuff
def failUnlessIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failUnlessIn(
containee, container, *args, **kwargs
)
return container
def failIfIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failIfIn(containee, container, *args, **kwargs)
return container
def test_trialRun(self):
self.runTrial()
def test_nonexistentModule(self):
d = self.runTrial("twisted.doesntexist")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "twisted.doesntexist")
return d
def test_nonexistentPackage(self):
d = self.runTrial("doesntexist")
self.failUnlessIn(d, "doesntexist")
self.failUnlessIn(d, "ModuleNotFound")
self.failUnlessIn(d, "[ERROR]")
return d
def test_nonexistentPackageWithModule(self):
d = self.runTrial("doesntexist.barney")
self.failUnlessIn(d, "doesntexist.barney")
self.failUnlessIn(d, "ObjectNotFound")
self.failUnlessIn(d, "[ERROR]")
return d
def test_badpackage(self):
d = self.runTrial("badpackage")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "badpackage")
self.failIfIn(d, "IOError")
return d
def test_moduleInBadpackage(self):
d = self.runTrial("badpackage.test_module")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "badpackage.test_module")
self.failIfIn(d, "IOError")
return d
def test_badmodule(self):
d = self.runTrial("package.test_bad_module")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "package.test_bad_module")
self.failIfIn(d, "IOError")
self.failIfIn(d, "<module ")
return d
def test_badimport(self):
d = self.runTrial("package.test_import_module")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "package.test_import_module")
self.failIfIn(d, "IOError")
self.failIfIn(d, "<module ")
return d
def test_recurseImport(self):
d = self.runTrial("package")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "test_bad_module")
self.failUnlessIn(d, "test_import_module")
self.failIfIn(d, "<module ")
self.failIfIn(d, "IOError")
return d
def test_recurseImportErrors(self):
d = self.runTrial("package2")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "package2")
self.failUnlessIn(d, "test_module")
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, "<module ")
self.failIfIn(d, "IOError")
return d
def test_nonRecurseImportErrors(self):
d = self.runTrial("-N", "package2")
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, "<module ")
return d
def test_regularRun(self):
d = self.runTrial("package.test_module")
self.failIfIn(d, "[ERROR]")
self.failIfIn(d, "IOError")
self.failUnlessIn(d, "OK")
self.failUnlessIn(d, "PASSED (successes=1)")
return d
def test_filename(self):
self.mangleSysPath(self.oldPath)
d = self.runTrial(os.path.join(self.parent, "package", "test_module.py"))
self.failIfIn(d, "[ERROR]")
self.failIfIn(d, "IOError")
self.failUnlessIn(d, "OK")
self.failUnlessIn(d, "PASSED (successes=1)")
return d
def test_dosFile(self):
## XXX -- not really an output test, more of a script test
self.mangleSysPath(self.oldPath)
d = self.runTrial(os.path.join(self.parent, "package", "test_dos_module.py"))
self.failIfIn(d, "[ERROR]")
self.failIfIn(d, "IOError")
self.failUnlessIn(d, "OK")
self.failUnlessIn(d, "PASSED (successes=1)")
return d
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""I hold HTML generation helpers.
"""
from twisted.python import log
# t.w imports
from twisted.web import resource
import traceback, string
from io import StringIO
from microdom import escape
def PRE(text):
"Wrap <pre> tags around some text and HTML-escape it."
return "<pre>" + escape(text) + "</pre>"
def UL(lst):
io = StringIO()
io.write("<ul>\n")
for el in lst:
io.write("<li> %s</li>\n" % el)
io.write("</ul>")
return io.getvalue()
def linkList(lst):
io = StringIO()
io.write("<ul>\n")
for hr, el in lst:
io.write('<li> <a href="%s">%s</a></li>\n' % (hr, el))
io.write("</ul>")
return io.getvalue()
def output(func, *args, **kw):
"""output(func, *args, **kw) -> html string
Either return the result of a function (which presumably returns an
HTML-legal string) or a sparse HTMLized error message and a message
in the server log.
"""
try:
return func(*args, **kw)
except:
log.msg("Error calling %r:" % (func,))
log.err()
return PRE("An error occurred.")
|