rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
'`' : 49, '\\' : 51, ' :' : 59,
'`' : 49, '\\' : 51, ',' : 59,
def _get_key_value(self, keyval): # A - Z / a - z _char_key = {'a' : 38, 'b' : 56, 'c' : 54, 'd' : 40, 'e' : 26, 'f' : 41, 'g' : 42, 'h' : 43, 'i' : 31, 'j' : 44, 'k' : 45, 'l' : 46, 'm' : 58, 'n' : 57, 'o' : 32, 'p' : 33, 'q' : 24, 'r' : 27, 's' : 39, 't' : 28, 'u' : 30, 'v' : 55, 'w' : 25, 'x' : 53, 'y' : 29, 'z' : 52} # 0 - 9 _digit_key = {'0' : 19, '1' : 10, '2' : 11, '3' : 12, '4' : 13, '5' : 14, '6' : 15, '7' : 16, '8' : 17, '9' : 18} # Symbols _symbol_key_val = {'-' : 20, '=' : 21, '[' : 34, ']' : 35, ';' : 47, '\'' : 48, '`' : 49, '\\' : 51, ' :' : 59, '.' : 60, '/' : 61, ' ' : 65} _symbol_shift_key_val = {'!' : 10, '@' : 11, '#' : 12, '$' : 13, '%' : 14, '^' : 15, '&' : 16, '*' : 17, '(' : 18, ')' : 19, '_' : 20, '+' : 21, '{' : 34, '}' : 35, ':' : 47, '"' :48, '~' : 49, '|' : 51, '<' : 59, '>' : 60, '?' : 61}
ef98c102f96c2679122dad78a63e493f90a64fef /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11266/ef98c102f96c2679122dad78a63e493f90a64fef/keypress_actions.py
for child in self._list_objects (child_obj):
for child in self._list_objects(child_obj):
def selectitem(self, window_name, object_name, item_name): ''' Select combo box / layered pane item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
56b39c18988dc66883bbb9d9a03c4460dcb1e53c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11266/56b39c18988dc66883bbb9d9a03c4460dcb1e53c/combo_box.py
for child in self._list_objects (child_obj):
for child in self._list_objects(child_obj):
def selectindex(self, window_name, object_name, item_index): ''' Select combo box item / layered pane based on index @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_index: Item index to select @type object_name: integer
56b39c18988dc66883bbb9d9a03c4460dcb1e53c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11266/56b39c18988dc66883bbb9d9a03c4460dcb1e53c/combo_box.py
for child in self._list_objects (child_obj):
for child in self._list_objects(child_obj):
def getallitem(self, window_name, object_name): ''' Select combo box item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string
56b39c18988dc66883bbb9d9a03c4460dcb1e53c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11266/56b39c18988dc66883bbb9d9a03c4460dcb1e53c/combo_box.py
for child in self._list_objects (child_obj):
for child in self._list_objects(child_obj):
def getallitem(self, window_name, object_name): ''' Select combo box item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string
56b39c18988dc66883bbb9d9a03c4460dcb1e53c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11266/56b39c18988dc66883bbb9d9a03c4460dcb1e53c/combo_box.py
for child in self._list_objects (child_obj):
for child in self._list_objects(child_obj):
def verifyselect(self, window_name, object_name, item_name): ''' Verify the item selected in combo box @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
56b39c18988dc66883bbb9d9a03c4460dcb1e53c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11266/56b39c18988dc66883bbb9d9a03c4460dcb1e53c/combo_box.py
for child in self._list_objects (child_obj):
for child in self._list_objects(child_obj):
def verifyselect(self, window_name, object_name, item_name): ''' Verify the item selected in combo box @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string
56b39c18988dc66883bbb9d9a03c4460dcb1e53c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11266/56b39c18988dc66883bbb9d9a03c4460dcb1e53c/combo_box.py
return HttpResponse(json.dumps(response),
return HttpResponse(json.dumps(response, indent=2),
def geotag(request): """ accepts a block of text, extracts addresses, locations and places and geocodes them. """ # XXX this is very brutal and wacky looking... # it re-uses as much of the existing way of doing things # as possible without regard to time costs or instanity of # interface. Once this has a more clear form, a more # optimized way of attacking this could be devised if needed. text = request.REQUEST.get('q', '').strip() pre = '<geotagger:location>' post = '</geotagger:location>' text = tag_addresses(text, pre=pre, post=post) text = location_tagger(pre=pre, post=post)(text) text = place_tagger(pre=pre, post=post)(text) all_pat = re.compile('%s(.*?)%s' % (pre, post)) results = [] all_locations = [] for loc in all_pat.findall(text): try: all_locations.append(loc) results += _build_geocoder_results(loc) except DoesNotExist: pass response = {'locations': results, 'searched': all_locations} return HttpResponse(json.dumps(response), mimetype="application/json")
506a796eb4d1609d8803ee728b37f268026c7749 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/506a796eb4d1609d8803ee728b37f268026c7749/views.py
Block.objects.exclude(right_city=SHORT_NAME.upper()).exclude(left_city=SHORT_NAME.upper()).delete()
Block.objects.exclude(right_city=settings.SHORT_NAME.upper()).exclude(left_city=settings.SHORT_NAME.upper()).delete()
def update_block_numbers(): Block.objects.exclude(right_city=SHORT_NAME.upper()).exclude(left_city=SHORT_NAME.upper()).delete() for b in Block.objects.all(): (from_num, to_num) = make_block_numbers(b.left_from_num, b.left_to_num, b.right_from_num, b.right_to_num) if b.from_num != from_num and b.to_num != to_num: b.from_num = from_num b.to_num = to_num b.save()
4cf41664f1cab2cac49f504dd1e5f107a11cf172 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/4cf41664f1cab2cac49f504dd1e5f107a11cf172/fix_block_numbers.py
item.location_name = e['x-calconnect-street'] item.item_date = datetime.datetime.strptime(e.dtstart, "%Y-%m-%d %H:%M:%S +0000")
item.item_date = datetime.datetime(*e.updated_parsed[:6])
def main(argv=None): url = 'http://calendar.boston.com/search?acat=&cat=&commit=Search&new=n&rss=1&search=true&sort=0&srad=20&srss=50&ssrss=5&st=event&st_select=any&svt=text&swhat=&swhen=today&swhere=&trim=1' schema = 'events' try: schema = Schema.objects.get(slug=schema) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema sys.exit(0) f = feedparser.parse(url) geocoder = SmartGeocoder() for e in f.entries: try: item = NewsItem.objects.get(title=e.title, description=e.description) except NewsItem.DoesNotExist: item = NewsItem() item.schema = schema item.title = e.title item.description = e.description item.url = e.link item.location_name = e['x-calconnect-street'] item.item_date = datetime.datetime.strptime(e.dtstart, "%Y-%m-%d %H:%M:%S +0000") item.pub_date = datetime.datetime(*e.updated_parsed[:6]) try: add = geocoder.geocode(item.location_name) item.location = add['point'] item.block = add['block'] except: pass item.save() print "Added: %s" % item.title
66766fe0d86caeee8131276221d8f8b09ce87754 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/66766fe0d86caeee8131276221d8f8b09ce87754/add_events.py
try: add = geocoder.geocode(item.location_name) item.location = add['point'] item.block = add['block'] except: pass item.save() print "Added: %s" % item.title
def main(argv=None): url = 'http://calendar.boston.com/search?acat=&cat=&commit=Search&new=n&rss=1&search=true&sort=0&srad=20&srss=50&ssrss=5&st=event&st_select=any&svt=text&swhat=&swhen=today&swhere=&trim=1' schema = 'events' try: schema = Schema.objects.get(slug=schema) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema sys.exit(0) f = feedparser.parse(url) geocoder = SmartGeocoder() for e in f.entries: try: item = NewsItem.objects.get(title=e.title, description=e.description) except NewsItem.DoesNotExist: item = NewsItem() item.schema = schema item.title = e.title item.description = e.description item.url = e.link item.location_name = e['x-calconnect-street'] item.item_date = datetime.datetime.strptime(e.dtstart, "%Y-%m-%d %H:%M:%S +0000") item.pub_date = datetime.datetime(*e.updated_parsed[:6]) try: add = geocoder.geocode(item.location_name) item.location = add['point'] item.block = add['block'] except: pass item.save() print "Added: %s" % item.title
66766fe0d86caeee8131276221d8f8b09ce87754 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/66766fe0d86caeee8131276221d8f8b09ce87754/add_events.py
status = "Updated"
status = "updated"
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
49bc1a4bf4bb16d3f2ed1c01f930c1c22416dd8d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/49bc1a4bf4bb16d3f2ed1c01f930c1c22416dd8d/add_events.py
status = "Added"
status = "added"
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
49bc1a4bf4bb16d3f2ed1c01f930c1c22416dd8d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/49bc1a4bf4bb16d3f2ed1c01f930c1c22416dd8d/add_events.py
summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
def save(self, old_record, list_record, detail_record): kwargs = self.pk_fields(list_record) summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
15524532af524afea969a9afaffe2a98105d5777 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/15524532af524afea969a9afaffe2a98105d5777/seeclickfix_retrieval.py
kwargs.update(dict( description=list_record['summary_detail']['value'], location_name=location_name, location=location, ))
summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] content = address_re.sub('', content) content = rating_re.sub('', content) kwargs.update(dict(description=content, location=location, ))
def save(self, old_record, list_record, detail_record): kwargs = self.pk_fields(list_record) summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
15524532af524afea969a9afaffe2a98105d5777 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/15524532af524afea969a9afaffe2a98105d5777/seeclickfix_retrieval.py
block = location = None if 'location' not in kwargs:
block = kwargs.get('block') location = kwargs.get('location') location_name = kwargs.get('location_name') assert location or location_name, "At least one of location or location_name must be provided" if location is None:
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
5a90202aeded824785a2f1fea2c81c4c232757b4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/5a90202aeded824785a2f1fea2c81c4c232757b4/newsitem_list_detail.py
location=kwargs.get('location', location), location_name=kwargs['location_name'],
location=location, location_name=location_name,
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
5a90202aeded824785a2f1fea2c81c4c232757b4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/5a90202aeded824785a2f1fea2c81c4c232757b4/newsitem_list_detail.py
block=kwargs.get('block', block),
block=block,
def create_newsitem(self, attributes, **kwargs): """ Creates and saves a NewsItem with the given kwargs. Returns the new NewsItem.
5a90202aeded824785a2f1fea2c81c4c232757b4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/5a90202aeded824785a2f1fea2c81c4c232757b4/newsitem_list_detail.py
def __init__(self, shapefile, city=None, layer_id=0):
def __init__(self, shapefile, city=None, layer_id=0, encoding='utf8', verbose=False): self.verbose = verbose self.encoding = encoding
def __init__(self, shapefile, city=None, layer_id=0): ds = DataSource(shapefile) self.layer = ds[layer_id] self.city = city and city or Metro.objects.get_current().name self.fcc_pat = re.compile('^(' + '|'.join(VALID_FCC_PREFIXES) + ')\d$')
a3554d50609f678e201e43c5b85d34d74b5a60e0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/a3554d50609f678e201e43c5b85d34d74b5a60e0/blocks.py
def install_aggdraw(options): """ workaround for broken aggdraw on certain platforms, may require additional fixes for 64 bit plaforms, unclear. """ os.chdir(options.env_root) sh('env CFLAGS=-fpermissive %s/bin/pip install aggdraw' % options.env_root)
eafcbf64eb345018f0302fcc926a6091cc3cca83 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/eafcbf64eb345018f0302fcc926a6091cc3cca83/pavement.py
response = TileResponse(render_tile(layername, z, x, y, extension=extension))
response = TileResponse(render_tile(layername, z, x, y, extension='png'))
def get_tile(request, version, layername, z, x, y, extension='png'): 'Returns a map tile in the requested format' z, x, y = int(z), int(x), int(y) response = TileResponse(render_tile(layername, z, x, y, extension=extension)) return response(extension)
fd503b650f206ae092c102019ef2469e91934650 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/fd503b650f206ae092c102019ef2469e91934650/views.py
self.params['is_multi'] = False geom_type = value.geom_type.upper()
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
cd0c9da3738122a2c01aa6f8bfc338e309ad332e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/cd0c9da3738122a2c01aa6f8bfc338e309ad332e/admin.py
self.params['geom_type'] = OGRGeomType(value.geom_type) if geom_type == 'LINESTRING': self.params['is_linestring'] = True elif geom_type == 'POLYGON': self.params['is_polygon'] = True elif geom_type == 'MULTIPOLYGON': self.params['is_polygon'] = True self.params['is_multi'] = False elif geom_type == 'POINT': self.params['is_point'] = True elif geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'GEOMETRYCOLLECTION'): self.params['is_collection']=True if value.geom_type.upper() == 'GEOMETRYCOLLECTION': self.params['collection_type'] = 'Any' else: self.params['collection_type'] = OGRGeomType(value.geom_type.upper().replace('MULTI', ''))
self.params['geom_type'] = OGRGeomType(value.geom_type) if value.geom_type.upper() in ('LINESTRING', 'MULTILINESTRING'): self.params['is_linestring'] = True elif value.geom_type.upper() in ('POLYGON', 'MULTIPOLYGON'): self.params['is_polygon'] = True elif value.geom_type.upper() in ('POINT', 'MULTIPOINT'): self.params['is_point'] = True if value.geom_type.upper() in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION'): self.params['is_collection']=True if value.geom_type.upper() == 'GEOMETRYCOLLECTION': self.params['collection_type'] = 'Any' else: self.params['collection_type'] = OGRGeomType(value.geom_type.upper().replace('MULTI', ''))
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
cd0c9da3738122a2c01aa6f8bfc338e309ad332e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/cd0c9da3738122a2c01aa6f8bfc338e309ad332e/admin.py
if self.params['is_unknown']: self.params['geom_type'] = OGRGeomType('GEOMETRYCOLLECTION') self.params['is_collection']=True self.params['collection_type'] = 'Any'
if self.params['is_unknown']: self.params['geom_type'] = OGRGeomType('GEOMETRYCOLLECTION') self.params['is_collection']=True self.params['collection_type'] = 'Any'
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
cd0c9da3738122a2c01aa6f8bfc338e309ad332e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/cd0c9da3738122a2c01aa6f8bfc338e309ad332e/admin.py
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs)
cd0c9da3738122a2c01aa6f8bfc338e309ad332e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/cd0c9da3738122a2c01aa6f8bfc338e309ad332e/admin.py
user_settings_module = '%s.settings' % (options.app, options.user_settings)
user_settings_module = '%s.settings' % options.app
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
b898b6a591bc1adbd7166425ac3368d7f5797c3f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/b898b6a591bc1adbd7166425ac3368d7f5797c3f/pavement.py
__import__(settings_module)
__import__(user_settings_module)
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
b898b6a591bc1adbd7166425ac3368d7f5797c3f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/b898b6a591bc1adbd7166425ac3368d7f5797c3f/pavement.py
return sys.modules[settings_module]
return sys.modules[user_settings_module]
def get_app_settings(options): settings_module = '%s.settings_default' % options.app user_settings_module = '%s.settings' % (options.app, options.user_settings) try: __import__(settings_module) except: exit_with_traceback("Problem with %s or %s, see above" % (settings_module, user_settings_module)) return sys.modules[settings_module]
b898b6a591bc1adbd7166425ac3368d7f5797c3f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/b898b6a591bc1adbd7166425ac3368d7f5797c3f/pavement.py
item = NewsItem.objects.get(schema__id=schema.id, title=entry.title, description=entry.description)
item = NewsItem.objects.get(schema__id=schema.id, url=entry.link)
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(schema__id=schema.id, title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. point = entry.get('georss_point') or entry.get('point') if point: x, y = point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
41f016572216a36d3af500427fdadb6d4f62ba97 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/41f016572216a36d3af500427fdadb6d4f62ba97/add_news.py
addcount = updatecount = 0
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
31ebdee79524d4fe974befc2b00a3de71a881e3d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/31ebdee79524d4fe974befc2b00a3de71a881e3d/add_events.py
item = NewsItem.objects.get(title=entry.title, description=entry.description)
item = NewsItem.objects.get(title=title, schema=schema)
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
31ebdee79524d4fe974befc2b00a3de71a881e3d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/31ebdee79524d4fe974befc2b00a3de71a881e3d/add_events.py
addcount += 1 except NewsItem.MultipleObjectsReturned: logger.warn("Multiple entries matched title %r, event titles are not unique?" % title) continue
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
31ebdee79524d4fe974befc2b00a3de71a881e3d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/31ebdee79524d4fe974befc2b00a3de71a881e3d/add_events.py
item.title = convert_entities(entry.title)
item.title = title
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
31ebdee79524d4fe974befc2b00a3de71a881e3d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/31ebdee79524d4fe974befc2b00a3de71a881e3d/add_events.py
logger.info("add_events finished")
logger.info("add_events finished: %d added, %d updated" % (addcount, updatecount))
def main(): """ Download Calendar RSS feed and update database """ logger.info("Starting add_events") url = """http://calendar.boston.com/search?acat=&cat=&commit=Search\
31ebdee79524d4fe974befc2b00a3de71a881e3d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/31ebdee79524d4fe974befc2b00a3de71a881e3d/add_events.py
return self.schema.slug
return (self.schema.slug,)
def natural_key(self): return self.schema.slug
0078aa690bf0b1508e07cdd380682afee2704558 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/0078aa690bf0b1508e07cdd380682afee2704558/models.py
item = NewsItem.objects.get(title=entry.title, description=entry.description)
item = NewsItem.objects.get(schema__id=schema.id, title=entry.title, description=entry.description)
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. if 'point' in entry: x,y = entry.point.split(' ') elif 'georss_point' in entry: x,y = entry.georss_point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
8784b516d6fbe9940e580877cac0558941b88d25 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8784b516d6fbe9940e580877cac0558941b88d25/add_news.py
if 'point' in entry: x,y = entry.point.split(' ') elif 'georss_point' in entry: x,y = entry.georss_point.split(' ')
point = entry.get('georss_point') or entry.get('point') if point: x, y = point.split(' ')
def main(argv=None): logger.info("Starting add_news") if argv: url = argv[0] else: url = 'http://search.boston.com/search/api?q=*&sort=-articleprintpublicationdate&subject=massachusetts&scope=bonzai' schema_slug = 'local-news' try: schema = Schema.objects.get(slug=schema_slug) except Schema.DoesNotExist: print "Schema (%s): DoesNotExist" % schema_slug sys.exit(1) f = feedparser.parse(url) for entry in f.entries: try: item = NewsItem.objects.get(title=entry.title, description=entry.description) print "Already have %r (id %d)" % (item.title, item.id) except NewsItem.DoesNotExist: item = NewsItem() try: item.schema = schema item.title = convert_entities(entry.title) item.description = convert_entities(entry.description) item.url = entry.link item.location_name = entry.get('x-calconnect-street') or entry.get('georss_featurename') item.item_date = datetime.datetime(*entry.updated_parsed[:6]) item.pub_date = datetime.datetime(*entry.updated_parsed[:6]) # feedparser bug: depending on which parser it magically uses, # we either get the xml namespace in the key name, or we don't. if 'point' in entry: x,y = entry.point.split(' ') elif 'georss_point' in entry: x,y = entry.georss_point.split(' ') else: # Fall back on geocoding. text = item.title + ' ' + item.description try: x, y = quick_dirty_fallback_geocode(text, parse=True) except GeocodingException: logger.debug("Geocoding exception on %r:" % text) log_exception(level=logging.DEBUG) continue if None in (x, y): logger.debug("couldn't geocode '%s...'" % item.title[:30]) continue item.location = Point((float(y), float(x))) if item.location.x == 0.0 and item.location.y == 0.0: # There's a lot of these. Maybe attempt to # parse and geocode if we haven't already? logger.info("Skipping %r as it has bad location 0,0" % item.title) continue if not item.location_name: # Fall back to reverse-geocoding. from ebpub.geocoder import reverse try: block, distance = reverse.reverse_geocode(item.location) logger.debug(" Reverse-geocoded point to %r" % block.pretty_name) item.location_name = block.pretty_name except reverse.ReverseGeocodeError: logger.debug(" Failed to reverse geocode %s for %r" % (item.location.wkt, item.title)) item.location_name = u'' item.save() logger.info("Saved: %s" % item.title) except: logger.error("Warning: couldn't save %r. Traceback:" % item.title) log_exception() logger.info("Finished add_news")
8784b516d6fbe9940e580877cac0558941b88d25 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8784b516d6fbe9940e580877cac0558941b88d25/add_news.py
def auto(options): # determine the root of the virutal env options.env_root = os.path.abspath(os.environ.get('VIRTUAL_ENV', '.')) # XXX better test. if not os.path.exists(os.path.join(options.env_root, 'bin', 'paver')): print "It does not appear that your virutal environment is activated or that you are in its root." print "please activate your environment and try again." sys.exit(0) print "Using virtual env %s" % options.env_root
7c4ad48307decdfd8ffdf8e71d7757acdf24813a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7c4ad48307decdfd8ffdf8e71d7757acdf24813a/pavement.py
newsitem_qs = kwargs.get('newsitem_qs') or NewsItem.objects.all()
newsitem_qs = kwargs.get('newsitem_qs') if newsitem_qs is None: newsitem_qs = NewsItem.objects.all()
def get_place_info_for_request(request, *args, **kwargs): """ A utility function that abstracts getting commonly used location-related information: a place, its type, a queryset of intersecting NewsItems, a bbox, nearby locations, etc. """ info = dict(bbox=None, nearby_locations=[], location=None, is_block=False, block_radius=None, is_saved=False, pid='', #place_wkt = '', # Unused? cookies_to_set={}, ) saved_place_lookup={} newsitem_qs = kwargs.get('newsitem_qs') or NewsItem.objects.all() info['place'] = place = url_to_place(*args, **kwargs) nearby = Location.objects.filter(location_type__is_significant=True) nearby = nearby.select_related().exclude(id=place.id) nearby = nearby.order_by('location_type__id', 'name') if place.location is None: # No geometry. info['bbox'] = get_metro()['extent'] saved_place_lookup = {'location__id': place.id} info['newsitem_qs'] = newsitem_qs.filter( newsitemlocation__location__id=place.id) elif isinstance(place, Block): info['is_block'] = True xy_radius, block_radius, cookies_to_set = block_radius_value(request) search_buf = make_search_buffer(place.location.centroid, block_radius) info['nearby_locations'] = nearby.filter( location__bboverlaps=search_buf ) info['bbox'] = search_buf.extent saved_place_lookup = {'block__id': place.id} info['block_radius'] = block_radius info['cookies_to_set'] = cookies_to_set info['newsitem_qs'] = newsitem_qs.filter( location__bboverlaps=search_buf) info['pid'] = make_pid(place, block_radius) else: # If the location is a point, or very small, we want to expand # the area we care about via make_search_buffer(). But if # it's not, we probably want the extent of its geometry. # Let's just take the union to cover both cases. info['location'] = place saved_place_lookup = {'location__id': place.id} search_buf = make_search_buffer(place.location.centroid, 3) search_buf = search_buf.union(place.location) info['bbox'] = search_buf.extent nearby = nearby.filter(location__bboverlaps=search_buf) info['nearby_locations'] = nearby.exclude(id=place.id) info['newsitem_qs'] = newsitem_qs.filter( newsitemlocation__location__id=place.id) # TODO: place_wkt is unused? preserved from the old generic_place_page() #info['place_wkt'] = place.location.simplify(tolerance=0.001, # preserve_topology=True) info['pid'] = make_pid(place) # Determine whether this is a saved place. if not request.user.is_anonymous(): saved_place_lookup['user_id'] = request.user.id # TODO: request.user.id should not do a DB lookup info['is_saved'] = SavedPlace.objects.filter(**saved_place_lookup).count() return info
0eb82220b2ea53474ee7ebb1c2f2688d4f8aea65 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/0eb82220b2ea53474ee7ebb1c2f2688d4f8aea65/views.py
def test_make_pid__block__not_enough_args(self):
def test_make_pid__block__default_radius(self):
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(TypeError, make_pid, b)
1fb467b2215e3c862b855d7801460eebbe66b767 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/1fb467b2215e3c862b855d7801460eebbe66b767/tests.py
self.assertRaises(TypeError, make_pid, b)
self.assertEqual(make_pid(b), 'b:%d.8' % b.id)
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(TypeError, make_pid, b)
1fb467b2215e3c862b855d7801460eebbe66b767 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/1fb467b2215e3c862b855d7801460eebbe66b767/tests.py
objects = models.Manager() public_objects = SchemaManager()
objects = SchemaManager() public_objects = SchemaPublicManager()
def get_query_set(self): return super(SchemaManager, self).get_query_set().filter(is_public=True)
baec0a715b07fe756e828610e80c9ceba55ce318 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/baec0a715b07fe756e828610e80c9ceba55ce318/models.py
objects = models.GeoManager()
objects = LocationManager()
def url(self): return '/locations/%s/' % self.slug
baec0a715b07fe756e828610e80c9ceba55ce318 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/baec0a715b07fe756e828610e80c9ceba55ce318/models.py
'b:12;1' (block ID 12, 1-block radius)
'b:12.1' (block ID 12, 1-block radius)
def parse_pid(pid): """ Returns a tuple of (place, block_radius, xy_radius), where block_radius and xy_radius are None for Locations. PID examples: 'b:12;1' (block ID 12, 1-block radius) 'l:32' (location ID 32) """ try: place_type, place_id = pid.split(':') if place_type == 'b': place_id, block_radius = place_id.split('.') place_id = int(place_id) except (KeyError, ValueError): raise Http404('Invalid place') if place_type == 'b': try: xy_radius = BLOCK_RADIUS_CHOICES[block_radius] except KeyError: raise Http404('Invalid radius') return (get_object_or_404(Block, id=place_id), block_radius, xy_radius) elif place_type == 'l': return (get_object_or_404(Location, id=place_id), None, None) else: raise Http404
66b1fc0c3b6f67b54be6247708e23ed666d15b34 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/66b1fc0c3b6f67b54be6247708e23ed666d15b34/views.py
JSON -- expects request.GET['pid'] and request.GET['s'] (a schema ID).
JSON -- expects request.GET['pid'] (a location ID) and request.GET['s'] (a schema ID). Returns a JSON mapping containing {'bunches': {scale: [list of clusters]}, 'ids': [list of newsitem ids]} where clusters are represented as [[list of newsitem IDs], [center x, y]] NB: the list of all newsitem IDs should be the union of all IDs in all the clusters.
def ajax_place_newsitems(request): """ JSON -- expects request.GET['pid'] and request.GET['s'] (a schema ID). """ try: s = Schema.public_objects.get(id=int(request.GET['s'])) except (KeyError, ValueError, Schema.DoesNotExist): raise Http404('Invalid Schema') place, block_radius, xy_radius = parse_pid(request.GET.get('pid', '')) if isinstance(place, Block): search_buffer = make_search_buffer(place.location.centroid, block_radius) newsitem_qs = NewsItem.objects.filter(location__bboverlaps=search_buffer) else: newsitem_qs = NewsItem.objects.filter(newsitemlocation__location__id=place.id) # Make the JSON output. Note that we have to call dumps() twice because the # bunches are a special case. ni_list = list(newsitem_qs.filter(schema__id=s.id).order_by('-item_date')[:50]) bunches = simplejson.dumps(cluster_newsitems(ni_list, 26), cls=ClusterJSON) id_list = simplejson.dumps([ni.id for ni in ni_list]) return HttpResponse('{"bunches": %s, "ids": %s}' % (bunches, id_list), mimetype="application/javascript")
66b1fc0c3b6f67b54be6247708e23ed666d15b34 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/66b1fc0c3b6f67b54be6247708e23ed666d15b34/views.py
place, block_radius, xy_radius = parse_pid(request.GET.get('pid', ''))
pid = request.GET.get('pid', '') place, block_radius, xy_radius = parse_pid(pid)
def ajax_place_newsitems(request): """ JSON -- expects request.GET['pid'] and request.GET['s'] (a schema ID). """ try: s = Schema.public_objects.get(id=int(request.GET['s'])) except (KeyError, ValueError, Schema.DoesNotExist): raise Http404('Invalid Schema') place, block_radius, xy_radius = parse_pid(request.GET.get('pid', '')) if isinstance(place, Block): search_buffer = make_search_buffer(place.location.centroid, block_radius) newsitem_qs = NewsItem.objects.filter(location__bboverlaps=search_buffer) else: newsitem_qs = NewsItem.objects.filter(newsitemlocation__location__id=place.id) # Make the JSON output. Note that we have to call dumps() twice because the # bunches are a special case. ni_list = list(newsitem_qs.filter(schema__id=s.id).order_by('-item_date')[:50]) bunches = simplejson.dumps(cluster_newsitems(ni_list, 26), cls=ClusterJSON) id_list = simplejson.dumps([ni.id for ni in ni_list]) return HttpResponse('{"bunches": %s, "ids": %s}' % (bunches, id_list), mimetype="application/javascript")
66b1fc0c3b6f67b54be6247708e23ed666d15b34 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/66b1fc0c3b6f67b54be6247708e23ed666d15b34/views.py
sh("django-admin.py dbshell --settings=%s < ../../ebpub/ebpub/db/sql/location.sql" % settings_mod)
def sync_all(options): """Use django-admin to initialize all our databases. """ settings_mod = "%s.settings" % options.app settings = get_app_settings(options) for dbname in settings.DATABASE_SYNC_ORDER: sh("django-admin.py syncdb --settings=%s --database=%s --noinput" % (settings_mod, dbname)) for dbname in settings.DATABASES.keys(): if dbname not in settings.DATABASE_SYNC_ORDER: sh("django-admin.py syncdb --settings=%s --database=%s --noinput" % (settings_mod, dbname)) # Need workaround here for # http://developer.openblockproject.org/ticket/74 because geometry # columns don't exist yet at the time that Django loads an app's # custom sql. Maybe just re-run the sqlcustom stuff and ignore # errors?
3886244a81ebde06b601dca345223d03f5d72497 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/3886244a81ebde06b601dca345223d03f5d72497/pavement.py
yield self.get_html(self.url)
max_per_page = 1000 max_pages = 10 delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
def list_pages(self): yield self.get_html(self.url)
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
unique_fields = self.unique_fields(list_record) qs = NewsItem.objects.filter(schema__id=self.schema.id, **unique_fields)
qs = NewsItem.objects.filter(schema__id=self.schema.id) qs = qs.by_attribute(self.schema_fields['guid'], list_record['id'])
def existing_record(self, list_record): unique_fields = self.unique_fields(list_record) qs = NewsItem.objects.filter(schema__id=self.schema.id, **unique_fields) try: return qs[0] except IndexError: return None
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
kwargs = self.unique_fields(list_record)
if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
location = Point((float(list_record['geo_long']), float(list_record['geo_lat'])))
kwargs = get_unique_fields(list_record) location = self.get_location(list_record)
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
print "skipping %r as it has bad location 0,0" % list_record['title']
self.logger.warn("skipping %r as it has bad location 0,0" % list_record['title'])
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
attributes = None
attributes = {'guid': list_record['id']}
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
attributes = {'rating': rating}
attributes['rating'] = rating
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
kwargs.update(dict(description=content, location=location, )) if old_record: self.update_existing(old_record, kwargs, attributes) else: self.create_newsitem(attributes=attributes, **kwargs)
kwargs['description'] = content
def save(self, old_record, list_record, detail_record): kwargs = self.unique_fields(list_record)
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
def unique_fields(self, list_record): import datetime date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
kwargs['location'] = location
def unique_fields(self, list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely idenfity # an article. import datetime date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
return dict(item_date=date, location_name=location_name, title=list_record['title'], )
self.create_or_update(old_record, attributes, **kwargs)
def unique_fields(self, list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely idenfity # an article. import datetime date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u''
7fa37ba874d94c12b5efdfb819de1a05f663c91f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/7fa37ba874d94c12b5efdfb819de1a05f663c91f/seeclickfix_retrieval.py
self.assertRaises(AssertionError, make_pid, b)
self.assertRaises(TypeError, make_pid, b)
def test_make_pid__block__not_enough_args(self): b = self._makeBlock() self.assertRaises(AssertionError, make_pid, b)
777a4ff331beb34f53f1d2220232f9f4a71ab870 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/777a4ff331beb34f53f1d2220232f9f4a71ab870/tests.py
def existing_record(self, record): return None
def existing_record(self, list_record): pk_fields = self.pk_fields(list_record) qs = NewsItem.objects.filter(schema__id=self.schema.id, **pk_fields) try: return qs[0] except IndexError: return None
def existing_record(self, record): # TODO return None
f84a6d66bcffb81e9231219272d5018326876ba1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/f84a6d66bcffb81e9231219272d5018326876ba1/seeclickfix_retrieval.py
content = list_record['summary']
content = list_record['summary']
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
f84a6d66bcffb81e9231219272d5018326876ba1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/f84a6d66bcffb81e9231219272d5018326876ba1/seeclickfix_retrieval.py
title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
return dict(item_date=date, title=u'SeeClickFix: ' + list_record['title'], )
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
f84a6d66bcffb81e9231219272d5018326876ba1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/f84a6d66bcffb81e9231219272d5018326876ba1/seeclickfix_retrieval.py
if (location.x, location.y) == (0,0, 0.0): print "skipping %r as it has bad location 0,0" % title return self.create_newsitem( attributes=None, title=u'SeeClickFix: ' + title, description=list_record['summary_detail']['value'], item_date=date, location_name=location_name, location=location, )
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
f84a6d66bcffb81e9231219272d5018326876ba1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/f84a6d66bcffb81e9231219272d5018326876ba1/seeclickfix_retrieval.py
from ebdata.retrieval import log_debug
def save(self, old_record, list_record, detail_record): summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] # remove address and rating from content, i guess. content = content.replace(summary_detail, '') import re address_re = re.compile(r'Address: (.*?)<br />') addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' import datetime date = datetime.date(*list_record['updated_parsed'][:3]) title = list_record['title'] location = Point((float(list_record['geo_lat']), float(list_record['geo_long'])))
f84a6d66bcffb81e9231219272d5018326876ba1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/f84a6d66bcffb81e9231219272d5018326876ba1/seeclickfix_retrieval.py
return '/%s/by-date/%s/%s/%s/' % (self.schema.slug, self.item_date.year, self.item_date.month, self.item_date.day)
year = self.item_date.year month = self.item_date.month day = self.item_date.day slug = self.schema.slug return '/%(slug)s/by-date/%(year)s-%(month)s-%(day)s,%(year)s-%(month)s-%(day)s/' % locals()
def item_date_url(self): return '/%s/by-date/%s/%s/%s/' % (self.schema.slug, self.item_date.year, self.item_date.month, self.item_date.day)
b7941a625bd17ff879d4fd7e25f10e81f98d78b3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/b7941a625bd17ff879d4fd7e25f10e81f98d78b3/models.py
return tree.body.text.strip()
if tree.body.text: return tree.body.text.strip() else: return u''
def preprocess_to_string(*args, **kw): """ like make_tree_and_preprocess() but returns a string. """ tree = make_tree_and_preprocess(*args, **kw) return tree.body.text.strip()
42360caf8fb37750ad107073d22ac9dfdf3c8f57 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/42360caf8fb37750ad107073d22ac9dfdf3c8f57/treeutils.py
return u'User %s: %u' % (self.user_id, self.name())
return u'User %d: %s' % (self.user_id, self.name())
def __unicode__(self): return u'User %s: %u' % (self.user_id, self.name())
9901e9cc82e2ab02dcbe2066b6bdbd6468e8247c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/9901e9cc82e2ab02dcbe2066b6bdbd6468e8247c/models.py
def test_address_dir_northwest(self): # There was a typo in the regex for this, but mysteriously it still worked self.assertParses('123 Northwest Main St.', [('123 Northwest Main St.', '')])
9cd05fd7d6a243935a765fc5a4ce657a9862e557 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/9cd05fd7d6a243935a765fc5a4ce657a9862e557/tests.py
print len(geo_entries)
def geo_example(request): import feedparser from ebdata.nlp.addresses import parse_addresses from ebpub.geocoder.base import AddressGeocoder feed_url = 'http://www.bpdnews.com/index.xml' feed = feedparser.parse(feed_url) geocoder = AddressGeocoder() geo_entries = [] for entry in feed.entries: addresses = parse_addresses(entry.description) point = None while not point: for address in addresses: try: location = geocoder.geocode(address[0]) point = location['point'] break except Exception: pass if not point: point = -1 if point and point is not -1: entry['point'] = point geo_entries.append(entry) print len(geo_entries) return render_to_response('db/geo_example.html', {'entries': geo_entries })
e8758d033637fab28b8b33051d8a013b73217e17 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/e8758d033637fab28b8b33051d8a013b73217e17/views.py
max_per_page = 200 max_pages = 10
max_per_page = 500 max_pages = 4
def list_pages(self): # Fetch the feed, paginating if necessary. # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues max_per_page = 200 max_pages = 10
97a52757a27606c092907cae58b1231589874383 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/97a52757a27606c092907cae58b1231589874383/seeclickfix_retrieval.py
import pprint for info in SeeClickFixNewsFeedScraper().raw_data(): pprint.pprint(info['detail'])
SeeClickFixNewsFeedScraper().display_data()
def save(self, old_record, list_record, detail_record): attributes = detail_record.pop('attributes', None) self.create_or_update(old_record, attributes, **detail_record)
97a52757a27606c092907cae58b1231589874383 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/97a52757a27606c092907cae58b1231589874383/seeclickfix_retrieval.py
def get_unique_fields(list_record):
class SeeClickFixNewsFeedScraper(RssListDetailScraper, NewsItemListDetailScraper): """ For all of these methods, see docstrings in ebdata.retrieval.scrapers.list_detail.ListDetailScraper """
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], ) class SeeClickFixNewsFeedScraper(RssListDetailScraper, NewsItemListDetailScraper):
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
has_detail = False
has_detail = True
def get_unique_fields(list_record): # not necessarily primary key, but for this script's purposes # these are the fields that in combination uniquely identify # an article. # TODO: 'id' is all we need for uniqueness, but what i'm doing # here is really cleaning? date = datetime.date(*list_record['updated_parsed'][:3]) summary_detail = list_record['summary_detail']['value'] addr = address_re.search(summary_detail) if addr: addr = addr.group(1) location_name = ', '.join([part.strip() for part in addr.split(',')]) else: location_name = u'' return dict(id=list_record['id'], item_date=date, location_name=location_name, title=list_record['title'], )
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
max_per_page = 1000
max_per_page = 200
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
url = LIST_URL + '&start=%d&page=%d&num_results=%d' % (
feed_url = FEED_URL + '&start=%d&page=%d&num_results=%d' % (
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
yield self.get_html(url)
yield self.fetch_data(feed_url)
def list_pages(self): # See API docs at # http://help.seeclickfix.com/faqs/api/listing-issues # paginate if necessary. max_per_page = 1000 max_pages = 10 # First, figure out how long it's been since the last scrape; # seeclickfix has a 'start' option in hours. # We'll discard microseconds and round up. # The idea is not to be precise, but to get everything we haven't # seen yet and not much that we have seen. delta = datetime.datetime.now() - self.last_updated_time() hours_ago = math.ceil((delta.seconds / 3600.0) + (delta.days * 24)) for page in range(1, max_pages + 1): url = LIST_URL + '&start=%d&page=%d&num_results=%d' % ( hours_ago, page, max_per_page) yield self.get_html(url)
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
def existing_record(self, list_record): qs = NewsItem.objects.filter(schema__id=self.schema.id) qs = qs.by_attribute(self.schema_fields['guid'], list_record['id'])
def existing_record(self, cleaned_list_record): url = cleaned_list_record['id'].replace('https:', 'http:') qs = NewsItem.objects.filter(schema__id=self.schema.id, url=url)
def existing_record(self, list_record): qs = NewsItem.objects.filter(schema__id=self.schema.id) qs = qs.by_attribute(self.schema_fields['guid'], list_record['id']) try: return qs[0] except IndexError: return None
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
kwargs = get_unique_fields(list_record) location = self.get_location(list_record) if (location.x, location.y) == (0,0, 0.0): self.logger.warn("skipping %r as it has bad location 0,0" % list_record['title']) return summary_detail = list_record['summary_detail']['value'] content = list_record['summary'] content = address_re.sub('', content) rating = rating_re.search(content) attributes = {'guid': list_record['id']} if rating: rating = int(rating.group(1)) attributes['rating'] = rating content = rating_re.sub('', content) content = preprocess_to_string(content, drop_tags=('p', 'br', 'b',)) kwargs['description'] = content kwargs['location'] = location self.create_or_update(old_record, attributes, **kwargs)
attributes = detail_record.pop('attributes', None) self.create_or_update(old_record, attributes, **detail_record)
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
from ebdata.retrieval import log_debug SeeClickFixNewsFeedScraper().update()
TESTING = True if TESTING: from ebdata.retrieval import log_debug import pprint for info in SeeClickFixNewsFeedScraper().raw_data(): pprint.pprint(info['detail']) else: SeeClickFixNewsFeedScraper().update()
def save(self, old_record, list_record, detail_record): if old_record is not None: self.logger.info("Stopping, we've already seen %s" % old_record) raise StopScraping()
8800415fdfe5733eacf7b02f35bafd51005ee12a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10327/8800415fdfe5733eacf7b02f35bafd51005ee12a/seeclickfix_retrieval.py
time = datetime.utcnow() - timedelta(minutes = 30)
time = datetime.utcnow()
def testBoatWindVersusWeather(self): settings = Settings() weather = Weather() weather.load(settings) wind = Wind(weather)
f98b1dce1e0444566a60599bd93916d3b6ee8e40 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/f98b1dce1e0444566a60599bd93916d3b6ee8e40/test_boatwind.py
off_track_angle = normalize_angle_pipi(track[0] - heading)
off_track_angle = normalize_angle_pipi(heading - track[0])
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
+ 60 * math.cos(0.8 * off_track_angle) ^ 2
+ 60 * math.pow(math.cos(0.8 * off_track_angle), 2)
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
allowed_off_track = waypoint.range + off_track_mult * math.sqrt(bearing[1])
allowed_off_track = off_track_mult * math.sqrt(bearing[1])
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
cs = math.cos(track[0] - bearing[0])
cos_approach_angle = math.cos(track[0] - bearing[0])
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
if abs(off_track) > allowed_off_track or cs < 0.72:
if abs(off_track) > allowed_off_track or cos_approach_angle < 0.72:
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
track_angle = normalize_angle_pipi(heading - track[0]) if (off_track > 0) == (track_angle > 0):
off_bearing_angle = normalize_angle_pipi(heading - bearing[0]) if (off_track > 0) == (off_bearing_angle > 0):
def handle_tacking_and_gybing(self, heading, bearing): wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) track, waypoint = self.router.get_active_segment()
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
track_angle = normalize_angle_pipi(heading - track[0])
off_bearing_angle = normalize_angle_pipi(heading - bearing[0])
def prevent_beaching(self, heading, look_ahead = None): if look_ahead == None: look_ahead = 250 # We'll construct a future course line... boat_position = self.boat.position # ... project it ahead... sail_vector = PolarVector(heading, look_ahead) future_position = boat_position + sail_vector sail_line = (self.boat.position, sail_vector, future_position) # Check if the projected line hits land... if self.map.hit(sail_line): # ... and if so, tack or gybe away from it wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) self.__log("Tacked/gybed to avoid hitting land") return True, normalize_angle_2pi(heading + 2 * wind_angle)
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
if (off_track > 0) == (track_angle > 0):
if (off_track > 0) == (off_bearing_angle > 0):
def prevent_beaching(self, heading, look_ahead = None): if look_ahead == None: look_ahead = 250 # We'll construct a future course line... boat_position = self.boat.position # ... project it ahead... sail_vector = PolarVector(heading, look_ahead) future_position = boat_position + sail_vector sail_line = (self.boat.position, sail_vector, future_position) # Check if the projected line hits land... if self.map.hit(sail_line): # ... and if so, tack or gybe away from it wind = self.boat.condition.wind wind_angle = normalize_angle_pipi(wind[0] - heading) self.__log("Tacked/gybed to avoid hitting land") return True, normalize_angle_2pi(heading + 2 * wind_angle)
2e97057ce325b75b197091005db1da0ca47c323d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/10981/2e97057ce325b75b197091005db1da0ca47c323d/sailor.py
elif elem.tag == '{'+dcterms+'}license'
elif elem.tag == '{'+dcterms+'}license':
def readmeta(item,meta): for elem in meta: if elem.tag.find(dc): if elem.tag == '{'+dc+'}date': try: item.info.date = datetime.strptime(elem.text,"%Y-%m-%dT%H:%M:%S.%f") except ValueError, e: if str(e) == "'f' is a bad directive in format '%Y-%m-%dT%H:%M:%S.%f'": # Python 2.5 item.info.date = datetime.strptime(elem.text.split('.')[0],"%Y-%m-%dT%H:%M:%S") else: remain = str(e)[26:] if remain == 'Z': item.info.date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") else: date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") delta = remain.split(':') item.info.date = date - timedelta(hours=int(delta[0]),minutes=int(delta[1])) elif elem.tag == '{'+dc+'}type': if elem.attrib['{'+rdf+'}resource'] != 'http://purl.org/dc/dcmitype/Dataset': raise FileFormatError elif elem.tag == '{'+dc+'}format': if elem.text != 'application/swatchbook': raise FileFormatError elif '{'+xml+'}lang' in elem.attrib: exec("item.info."+elem.tag[(len(dc)+2):]+"_l10n[elem.attrib['{'+xml+'}lang']] = xmlunescape(elem.text)") else: exec("item.info."+elem.tag[(len(dc)+2):]+" = xmlunescape(elem.text)") elif elem.tag == '{'+dcterms+'}license' item.info.license = xmlunescape(elem.attrib['{'+rdf+'}resource'])
ac63689e58d5fcb5e96eb1a67b5421fb54c6f8a8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/ac63689e58d5fcb5e96eb1a67b5421fb54c6f8a8/sbz.py
xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="http://purl.org/dc/elements/1.1/"\n xmlns:cc="http://creativecommons.org/ns
xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:cc="'+cc+'"\n xmlns:rdf="'+rdf+'">\n'
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="http://purl.org/dc/elements/1.1/"\n xmlns:cc="http://creativecommons.org/ns#"\n xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <swatches>\n' for id in swatchbook.swatches: if isinstance(swatchbook.swatches[id], Color): swatch = swatchbook.swatches[id] xml += ' <color' if 'spot' in swatch.usage: xml += ' spot="1"' xml += '>\n' xml += sbz.writemeta(swatch.info,2) for value in swatch.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in swatch.values[value])+'</values>\n' for extra in swatch.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if swatch.extra[extra]: xml += xmlescape(unicode(swatch.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' xml += ' </swatches>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
7b5f67bf5b56e854dad1732e375162afa036a6be /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/7b5f67bf5b56e854dad1732e375162afa036a6be/sbz.py
if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n'
if book.info[info][lang]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n'
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
ef44432f2b2eb6e9bc145c2a1a8bc71f23ee4923 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/ef44432f2b2eb6e9bc145c2a1a8bc71f23ee4923/sbz.py
xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n'
if book.info[info]: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n'
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
ef44432f2b2eb6e9bc145c2a1a8bc71f23ee4923 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/ef44432f2b2eb6e9bc145c2a1a8bc71f23ee4923/sbz.py
xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n'
if book.display[display]: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n'
def write(book): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.2">\n' for info in book.info: if isinstance(book.info[info],dict): for lang in book.info[info]: if lang == 0: xml += ' <info type="'+info+'">'+xmlescape(book.info[info][0])+'</info>\n' else: xml += ' <info type="'+info+'" lang="'+lang+'">'+xmlescape(book.info[info][lang])+'</info>\n' else: xml += ' <info type="'+info+'">'+xmlescape(book.info[info])+'</info>\n' for display in book.display: xml += ' <display type="'+display+'">'+str(book.display[display])+'</display>\n' xml += unicode(sbz.writem(book.items,0),'utf-8') xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in book.profiles: #TODO: check if exists zip.write(book.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
ef44432f2b2eb6e9bc145c2a1a8bc71f23ee4923 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/ef44432f2b2eb6e9bc145c2a1a8bc71f23ee4923/sbz.py
elif elem.tag.find(cc): exec("item.info."+elem.tag[(len(cc)+2):]+" = xmlunescape(elem.text)")
elif elem.tag == '{'+dcterms+'}license' item.info.license = xmlunescape(elem.attrib['{'+rdf+'}resource'])
def readmeta(item,meta): for elem in meta: if elem.tag.find(dc): if elem.tag == '{'+dc+'}date': try: item.info.date = datetime.strptime(elem.text,"%Y-%m-%dT%H:%M:%S.%f") except ValueError, e: if str(e) == "'f' is a bad directive in format '%Y-%m-%dT%H:%M:%S.%f'": # Python 2.5 item.info.date = datetime.strptime(elem.text.split('.')[0],"%Y-%m-%dT%H:%M:%S") else: remain = str(e)[26:] if remain == 'Z': item.info.date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") else: date = datetime.strptime(elem.text[:-len(remain)],"%Y-%m-%dT%H:%M:%S.%f") delta = remain.split(':') item.info.date = date - timedelta(hours=int(delta[0]),minutes=int(delta[1])) elif elem.tag == '{'+dc+'}type': if elem.attrib['{'+rdf+'}resource'] != 'http://purl.org/dc/dcmitype/Dataset': raise FileFormatError elif elem.tag == '{'+dc+'}format': if elem.text != 'application/swatchbook': raise FileFormatError elif '{'+xml+'}lang' in elem.attrib: exec("item.info."+elem.tag[(len(dc)+2):]+"_l10n[elem.attrib['{'+xml+'}lang']] = xmlunescape(elem.text)") else: exec("item.info."+elem.tag[(len(dc)+2):]+" = xmlunescape(elem.text)") elif elem.tag.find(cc): exec("item.info."+elem.tag[(len(cc)+2):]+" = xmlunescape(elem.text)")
124ac0b7c33e87468c0b62c963c1861595ca7d19 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/124ac0b7c33e87468c0b62c963c1861595ca7d19/sbz.py
xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:cc="'+cc+'"\n xmlns:rdf="'+rdf+'">\n'
xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:dcterms="'+dcterms+'"\n xmlns:rdf="'+rdf+'">\n'
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:cc="'+cc+'"\n xmlns:rdf="'+rdf+'">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <swatches>\n' for id in swatchbook.swatches: if isinstance(swatchbook.swatches[id], Color): swatch = swatchbook.swatches[id] xml += ' <color' if 'spot' in swatch.usage: xml += ' spot="1"' xml += '>\n' xml += sbz.writemeta(swatch.info,2) for value in swatch.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in swatch.values[value])+'</values>\n' for extra in swatch.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if swatch.extra[extra]: xml += xmlescape(unicode(swatch.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' xml += ' </swatches>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) zip.close() tf.seek(0) return tf.read()
124ac0b7c33e87468c0b62c963c1861595ca7d19 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/124ac0b7c33e87468c0b62c963c1861595ca7d19/sbz.py
xml += ' '*(offset+2)+'<cc:license rdf:resource="'+xmlescape(meta.license)+'" />\n'
xml += ' '*(offset+2)+'<dcterms:license rdf:resource="'+xmlescape(meta.license)+'" />\n'
def writemeta(meta,offset=0): xml = u'' if offset == 0: xml += ' <dc:format>application/swatchbook</dc:format>\n <dc:type rdf:resource="http://purl.org/dc/dcmitype/Dataset" />\n' if meta.date: xml += ' '*(offset+2)+'<dc:date>'+meta.date.isoformat()+'Z</dc:date>\n' for dc in meta.dc: info = eval('meta.'+dc) if len(info) > 0: xml += ' '*(offset+2)+'<dc:'+dc+'>'+xmlescape(info)+'</dc:'+dc+'>\n' if meta.dc[dc][0]: info_l10n = eval('meta.'+dc+'_l10n') for lang in info_l10n: xml += ' '*(offset+2)+'<dc:'+dc+' xml:lang="'+lang+'">'+xmlescape(info_l10n[lang])+'</dc:'+dc+'>\n' if meta.license > '': xml += ' '*(offset+2)+'<cc:license rdf:resource="'+xmlescape(meta.license)+'" />\n' if xml > u'': return ' '*(offset+1)+'<metadata>\n'+xml+' '*(offset+1)+'</metadata>\n' else: return u''
124ac0b7c33e87468c0b62c963c1861595ca7d19 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/124ac0b7c33e87468c0b62c963c1861595ca7d19/sbz.py
for elem in material: if elem.tag == 'values': values = map(eval,elem.text.split()) if 'space' in elem.attrib: sitem.values[(elem.attrib['model'],unicode(elem.attrib['space']))] = values else: sitem.values[(elem.attrib['model'],False)] = values elif elem.tag == 'metadata': sbz.readmeta(sitem,elem) elif elem.tag == 'extra': sitem.extra[xmlunescape(elem.attrib['type'])] = xmlunescape(elem.text) if sitem.info.identifier > '': id = sitem.info.identifier else: raise FileFormatError
for elem in material: if elem.tag == 'values': values = map(eval,elem.text.split()) if 'space' in elem.attrib: sitem.values[(elem.attrib['model'],unicode(elem.attrib['space']))] = values else: sitem.values[(elem.attrib['model'],False)] = values elif elem.tag == 'metadata': sbz.readmeta(sitem,elem) elif elem.tag == 'extra': sitem.extra[xmlunescape(elem.attrib['type'])] = xmlunescape(elem.text) if sitem.info.identifier > '': id = sitem.info.identifier else: raise FileFormatError
def readmaterial(material,swatchbook): if material.tag == 'color': sitem = Color(swatchbook) if 'usage' in material.attrib: sitem.usage = material.attrib['usage'].split(',') elif material.tag == 'pattern': sitem = Pattern(swatchbook) for elem in material: if elem.tag == 'values': values = map(eval,elem.text.split()) if 'space' in elem.attrib: sitem.values[(elem.attrib['model'],unicode(elem.attrib['space']))] = values else: sitem.values[(elem.attrib['model'],False)] = values elif elem.tag == 'metadata': sbz.readmeta(sitem,elem) elif elem.tag == 'extra': sitem.extra[xmlunescape(elem.attrib['type'])] = xmlunescape(elem.text) if sitem.info.identifier > '': id = sitem.info.identifier else: raise FileFormatError swatchbook.materials[id] = sitem
bf60ecce22088a961f55263604160f2c61cb02e6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/bf60ecce22088a961f55263604160f2c61cb02e6/sbz.py
material = swatchbook.materials[id]
material = swatchbook.materials[id]
def write(swatchbook): xml = '<?xml version="1.0" encoding="UTF-8"?>\n<SwatchBook version="0.7"\n xmlns:dc="'+dc+'"\n xmlns:dcterms="'+dcterms+'"\n xmlns:rdf="'+rdf+'">\n' xml += sbz.writemeta(swatchbook.info) xml += ' <materials>\n' for id in swatchbook.materials: material = swatchbook.materials[id] if isinstance(swatchbook.materials[id], Color): xml += ' <color' if len(material.usage) > 0: xml += ' usage="'+(','.join(material.usage))+'"' xml += '>\n' xml += sbz.writemeta(material.info,2) for value in material.values: xml += ' <values model="'+value[0]+'"' if value[1]: xml += ' space="'+value[1]+'"' xml += '>'+' '.join(str(round(x,16)) for x in material.values[value])+'</values>\n' for extra in material.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if material.extra[extra]: xml += xmlescape(unicode(material.extra[extra])) xml += '</extra>\n' xml += ' </color>\n' elif isinstance(swatchbook.materials[id], Pattern): xml += ' <pattern>\n' xml += sbz.writemeta(material.info,2) for extra in material.extra: xml += ' <extra type="'+xmlescape(extra)+'">' if material.extra[extra]: xml += xmlescape(unicode(material.extra[extra])) xml += '</extra>\n' xml += ' </pattern>\n' xml += ' </materials>\n' if len(swatchbook.book.items) > 0: xml += ' <book' for display in swatchbook.book.display: if swatchbook.book.display[display]: xml += ' '+display+'="'+str(swatchbook.book.display[display])+'"' xml += '>\n' xml += unicode(sbz.writem(swatchbook.book.items),'utf-8') xml += ' </book>\n' xml += '</SwatchBook>\n' tf = open(tempfile.mkstemp()[1],"w+b") zip = ZipFile(tf,'w',ZIP_DEFLATED) zip.writestr('swatchbook.xml',xml.encode('utf-8')) for profile in swatchbook.profiles: zip.write(swatchbook.profiles[profile].uri,'profiles/'+profile) def addfiles(dir): for s in sorted(os.listdir(dir)): file = os.path.join(dir,s) if os.path.isdir(file) and file not in ('.','..'): addfiles(file) else: zip.write(file,file[len(swatchbook.tmpdir):]) addfiles(swatchbook.tmpdir) zip.close() tf.seek(0) return tf.read()
bf60ecce22088a961f55263604160f2c61cb02e6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9936/bf60ecce22088a961f55263604160f2c61cb02e6/sbz.py