Dataset Preview
Viewer
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
Give up after 6 attempts. The last one raised <class 'huggingface_hub.utils._errors.HfHubHTTPError'>
Error code:   UnexpectedError

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

id
int64
prompt
string
repo
string
completion
string
0
# complete multi-line body codes for that function<fim_prefix># ''' Train a k - nearest neighbors classifier for face recognition. Trains a k-nearest neighbors classifier for face recognition. :param train_dir: directory that contains a sub-directory for each known person, with its name. (View in source code to see train_dir example tree structure) Structure: <train_dir>/ β”œβ”€β”€ <person1>/ β”‚ β”œβ”€β”€ <somename1>.jpeg β”‚ β”œβ”€β”€ <somename2>.jpeg β”‚ β”œβ”€β”€ ... β”œβ”€β”€ <person2>/ β”‚ β”œβ”€β”€ <somename1>.jpeg β”‚ └── <somename2>.jpeg └── ... :param model_save_path: (optional) path to save model on disk :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree :param verbose: verbosity of training :return: returns knn classifier that was trained on the given data. ''' def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):<fim_suffix><fim_middle>
ageitgey/face_recognition
X = [] y = [] for class_dir in os.listdir(train_dir): if not os.path.isdir(os.path.join(train_dir, class_dir)): continue for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)): image = face_recognition.load_image_file(img_path) face_bounding_boxes = face_recognition.face_locations(image) if len(face_bounding_boxes) != 1: if verbose: print('Image {} not suitable for training: {}'.format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else 'Found more than one face')) else: X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0]) y.append(class_dir) if n_neighbors is None: n_neighbors = int(round(math.sqrt(len(X)))) if verbose: print('Chose n_neighbors automatically:', n_neighbors) knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance') knn_clf.fit(X, y) if model_save_path is not None: with open(model_save_path, 'wb') as f: pickle.dump(knn_clf, f) return knn_clf
1
# complete multi-line body codes for that function<fim_prefix># ''' Predicts faces in a given image using a KNN classifier. Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf. :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance of mis-classifying an unknown person as a known one. :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...]. For faces of unrecognized persons, the name 'unknown' will be returned. ''' def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):<fim_suffix><fim_middle>
ageitgey/face_recognition
if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS: raise Exception('Invalid image path: {}'.format(X_img_path)) if knn_clf is None and model_path is None: raise Exception('Must supply knn classifier either thourgh knn_clf or model_path') if knn_clf is None: with open(model_path, 'rb') as f: knn_clf = pickle.load(f) X_img = face_recognition.load_image_file(X_img_path) X_face_locations = face_recognition.face_locations(X_img) if len(X_face_locations) == 0: return [] faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations) closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] return [(pred, loc) if rec else ('unknown', loc) for (pred, loc, rec) in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
2
# complete multi-line body codes for that function<fim_prefix># ''' Show the face recognition results visually. Shows the face recognition results visually. :param img_path: path to image to be recognized :param predictions: results of the predict function :return: ''' def show_prediction_labels_on_image(img_path, predictions):<fim_suffix><fim_middle>
ageitgey/face_recognition
pil_image = Image.open(img_path).convert('RGB') draw = ImageDraw.Draw(pil_image) for (name, (top, right, bottom, left)) in predictions: draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255)) name = name.encode('UTF-8') (text_width, text_height) = draw.textsize(name) draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255)) draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255)) del draw pil_image.show()
3
# complete multi-line body codes for that function<fim_prefix># ''' Convert a dlib rect object to a plain tuple in ( top right bottom left Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order :param rect: a dlib 'rect' object :return: a plain tuple representation of the rect in (top, right, bottom, left) order ''' def _rect_to_css(rect):<fim_suffix><fim_middle>
ageitgey/face_recognition
return (rect.top(), rect.right(), rect.bottom(), rect.left())
4
# complete multi-line body codes for that function<fim_prefix># ''' Trim the given CSS tuple to the bounds of the image. Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image. :param css: plain tuple representation of the rect in (top, right, bottom, left) order :param image_shape: numpy shape of the image array :return: a trimmed plain tuple representation of the rect in (top, right, bottom, left) order ''' def _trim_css_to_bounds(css, image_shape):<fim_suffix><fim_middle>
ageitgey/face_recognition
return (max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0))
5
# complete multi-line body codes for that function<fim_prefix># ''' Given a list of face encodings compare them to a known face encoding and get a euclidean distance for each comparison face. Given a list of face encodings, compare them to a known face encoding and get a euclidean distance for each comparison face. The distance tells you how similar the faces are. :param faces: List of face encodings to compare :param face_to_compare: A face encoding to compare against :return: A numpy ndarray with the distance for each face in the same order as the 'faces' array ''' def face_distance(face_encodings, face_to_compare):<fim_suffix><fim_middle>
ageitgey/face_recognition
if len(face_encodings) == 0: return np.empty(0) return np.linalg.norm(face_encodings - face_to_compare, axis=1)
6
# complete multi-line body codes for that function<fim_prefix># ''' Loads an image file into a numpy array. Loads an image file (.jpg, .png, etc) into a numpy array :param file: image file name or file object to load :param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported. :return: image contents as numpy array ''' def load_image_file(file, mode='RGB'):<fim_suffix><fim_middle>
ageitgey/face_recognition
im = PIL.Image.open(file) if mode: im = im.convert(mode) return np.array(im)
7
# complete multi-line body codes for that function<fim_prefix># ''' Returns an array of bounding boxes of human faces in a image. Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of dlib 'rect' objects of found face locations ''' def _raw_face_locations(img, number_of_times_to_upsample=1, model='hog'):<fim_suffix><fim_middle>
ageitgey/face_recognition
if model == 'cnn': return cnn_face_detector(img, number_of_times_to_upsample) else: return face_detector(img, number_of_times_to_upsample)
8
# complete multi-line body codes for that function<fim_prefix># ''' Returns an array of bounding boxes of human faces in a image. Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but faster on CPUs. "cnn" is a more accurate deep-learning model which is GPU/CUDA accelerated (if available). The default is "hog". :return: A list of tuples of found face locations in css (top, right, bottom, left) order ''' def face_locations(img, number_of_times_to_upsample=1, model='hog'):<fim_suffix><fim_middle>
ageitgey/face_recognition
if model == 'cnn': return [_trim_css_to_bounds(_rect_to_css(face.rect), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, 'cnn')] else: return [_trim_css_to_bounds(_rect_to_css(face), img.shape) for face in _raw_face_locations(img, number_of_times_to_upsample, model)]
9
# complete multi-line body codes for that function<fim_prefix># ''' Returns a 2d array of bounding boxes of human faces in a given image using the cnn face detectors. Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector If you are using a GPU, this can give you much faster results since the GPU can process batches of images at once. If you aren't using a GPU, you don't need this function. :param img: A list of images (each as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param batch_size: How many images to include in each GPU processing batch. :return: A list of tuples of found face locations in css (top, right, bottom, left) order ''' def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128):<fim_suffix><fim_middle>
ageitgey/face_recognition
def convert_cnn_detections_to_css(detections): return [_trim_css_to_bounds(_rect_to_css(face.rect), images[0].shape) for face in detections] raw_detections_batched = _raw_face_locations_batched(images, number_of_times_to_upsample, batch_size) return list(map(convert_cnn_detections_to_css, raw_detections_batched))
10
# complete multi-line body codes for that function<fim_prefix># ''' Given an image returns a dict of face feature locations Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image :param face_image: image to search :param face_locations: Optionally provide a list of face locations to check. :param model: Optional - which model to use. "large" (default) or "small" which only returns 5 points but is faster. :return: A list of dicts of face feature locations (eyes, nose, etc) ''' def face_landmarks(face_image, face_locations=None, model='large'):<fim_suffix><fim_middle>
ageitgey/face_recognition
landmarks = _raw_face_landmarks(face_image, face_locations, model) landmarks_as_tuples = [[(p.x, p.y) for p in landmark.parts()] for landmark in landmarks] if model == 'large': return [{'chin': points[0:17], 'left_eyebrow': points[17:22], 'right_eyebrow': points[22:27], 'nose_bridge': points[27:31], 'nose_tip': points[31:36], 'left_eye': points[36:42], 'right_eye': points[42:48], 'top_lip': points[48:55] + [points[64]] + [points[63]] + [points[62]] + [points[61]] + [points[60]], 'bottom_lip': points[54:60] + [points[48]] + [points[60]] + [points[67]] + [points[66]] + [points[65]] + [points[64]]} for points in landmarks_as_tuples] elif model == 'small': return [{'nose_tip': [points[4]], 'left_eye': points[2:4], 'right_eye': points[0:2]} for points in landmarks_as_tuples] else: raise ValueError("Invalid landmarks model type. Supported models are ['small', 'large'].")
11
# complete multi-line body codes for that function<fim_prefix># ''' Given an image returns the 128 - dimensional face encoding for each face in the image. Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image) ''' def face_encodings(face_image, known_face_locations=None, num_jitters=1):<fim_suffix><fim_middle>
ageitgey/face_recognition
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model='small') return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
12
# complete multi-line body codes for that function<fim_prefix># ''' Parses a string into a base - level structure type. Parses the given data type string to a :class:`DataType`. The data type string format equals to :class:`DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted string and case-insensitive strings. >>> _parse_datatype_string("int ") IntegerType >>> _parse_datatype_string("INT ") IntegerType >>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ") StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true))) >>> _parse_datatype_string("a DOUBLE, b STRING") StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true))) >>> _parse_datatype_string("a: array< short>") StructType(List(StructField(a,ArrayType(ShortType,true),true))) >>> _parse_datatype_string(" map<string , string > ") MapType(StringType,StringType,true) >>> # Error cases >>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... >>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ParseException:... ''' def _parse_datatype_string(s):<fim_suffix><fim_middle>
apache/spark
sc = SparkContext._active_spark_context def from_ddl_schema(type_str): return _parse_datatype_json_string(sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json()) def from_ddl_datatype(type_str): return _parse_datatype_json_string(sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json()) try: return from_ddl_schema(s) except Exception as e: try: return from_ddl_datatype(s) except: try: return from_ddl_datatype('struct<%s>' % s.strip()) except: raise e
13
# complete multi-line body codes for that function<fim_prefix># ''' Return the Catalyst datatype from the size of integers. Return the Catalyst datatype from the size of integers. ''' def _int_size_to_type(size):<fim_suffix><fim_middle>
apache/spark
if size <= 8: return ByteType if size <= 16: return ShortType if size <= 32: return IntegerType if size <= 64: return LongType
14
# complete multi-line body codes for that function<fim_prefix># ''' Infer the DataType from obj Infer the DataType from obj ''' def _infer_type(obj):<fim_suffix><fim_middle>
apache/spark
if obj is None: return NullType() if hasattr(obj, '__UDT__'): return obj.__UDT__ dataType = _type_mappings.get(type(obj)) if dataType is DecimalType: return DecimalType(38, 18) elif dataType is not None: return dataType() if isinstance(obj, dict): for (key, value) in obj.items(): if key is not None and value is not None: return MapType(_infer_type(key), _infer_type(value), True) return MapType(NullType(), NullType(), True) elif isinstance(obj, list): for v in obj: if v is not None: return ArrayType(_infer_type(obj[0]), True) return ArrayType(NullType(), True) elif isinstance(obj, array): if obj.typecode in _array_type_mappings: return ArrayType(_array_type_mappings[obj.typecode](), False) else: raise TypeError('not supported type: array(%s)' % obj.typecode) else: try: return _infer_schema(obj) except TypeError: raise TypeError('not supported type: %s' % type(obj))
15
# complete multi-line body codes for that function<fim_prefix># ''' Infer the schema from dict namedtuple or object Infer the schema from dict/namedtuple/object ''' def _infer_schema(row, names=None):<fim_suffix><fim_middle>
apache/spark
if isinstance(row, dict): items = sorted(row.items()) elif isinstance(row, (tuple, list)): if hasattr(row, '__fields__'): items = zip(row.__fields__, tuple(row)) elif hasattr(row, '_fields'): items = zip(row._fields, tuple(row)) else: if names is None: names = ['_%d' % i for i in range(1, len(row) + 1)] elif len(names) < len(row): names.extend(('_%d' % i for i in range(len(names) + 1, len(row) + 1))) items = zip(names, row) elif hasattr(row, '__dict__'): items = sorted(row.__dict__.items()) else: raise TypeError('Can not infer schema for type: %s' % type(row)) fields = [StructField(k, _infer_type(v), True) for (k, v) in items] return StructType(fields)
16
# complete multi-line body codes for that function<fim_prefix># ''' Return whether there is NullType in dt Return whether there is NullType in `dt` or not ''' def _has_nulltype(dt):<fim_suffix><fim_middle>
apache/spark
if isinstance(dt, StructType): return any((_has_nulltype(f.dataType) for f in dt.fields)) elif isinstance(dt, ArrayType): return _has_nulltype(dt.elementType) elif isinstance(dt, MapType): return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType) else: return isinstance(dt, NullType)
17
# complete multi-line body codes for that function<fim_prefix># ''' Create a converter to drop the names of fields in obj Create a converter to drop the names of fields in obj ''' def _create_converter(dataType):<fim_suffix><fim_middle>
apache/spark
if not _need_converter(dataType): return lambda x: x if isinstance(dataType, ArrayType): conv = _create_converter(dataType.elementType) return lambda row: [conv(v) for v in row] elif isinstance(dataType, MapType): kconv = _create_converter(dataType.keyType) vconv = _create_converter(dataType.valueType) return lambda row: dict(((kconv(k), vconv(v)) for (k, v) in row.items())) elif isinstance(dataType, NullType): return lambda x: None elif not isinstance(dataType, StructType): return lambda x: x names = [f.name for f in dataType.fields] converters = [_create_converter(f.dataType) for f in dataType.fields] convert_fields = any((_need_converter(f.dataType) for f in dataType.fields)) def convert_struct(obj): if obj is None: return if isinstance(obj, (tuple, list)): if convert_fields: return tuple((conv(v) for (v, conv) in zip(obj, converters))) else: return tuple(obj) if isinstance(obj, dict): d = obj elif hasattr(obj, '__dict__'): d = obj.__dict__ else: raise TypeError('Unexpected obj type: %s' % type(obj)) if convert_fields: return tuple([conv(d.get(name)) for (name, conv) in zip(names, converters)]) else: return tuple([d.get(name) for name in names]) return convert_struct
18
# complete multi-line body codes for that function<fim_prefix># ''' Returns a verifier that checks the type of obj against dataType and raises a TypeError if they do not match. Make a verifier that checks the type of obj against dataType and raises a TypeError if they do not match. This verifier also checks the value of obj against datatype and raises a ValueError if it's not within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is not checked, so it will become infinity when cast to Java float if it overflows. >>> _make_type_verifier(StructType([]))(None) >>> _make_type_verifier(StringType())("") >>> _make_type_verifier(LongType())(0) >>> _make_type_verifier(ArrayType(ShortType()))(list(range(3))) >>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({}) >>> _make_type_verifier(StructType([]))(()) >>> _make_type_verifier(StructType([]))([]) >>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> # Check if numeric values are within the allowed range. >>> _make_type_verifier(ByteType())(12) >>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier( ... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1}) Traceback (most recent call last): ... ValueError:... >>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False) >>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... ''' def _make_type_verifier(dataType, nullable=True, name=None):<fim_suffix><fim_middle>
apache/spark
if name is None: new_msg = lambda msg: msg new_name = lambda n: 'field %s' % n else: new_msg = lambda msg: '%s: %s' % (name, msg) new_name = lambda n: 'field %s in %s' % (n, name) def verify_nullability(obj): if obj is None: if nullable: return True else: raise ValueError(new_msg('This field is not nullable, but got None')) else: return False _type = type(dataType) def assert_acceptable_types(obj): assert _type in _acceptable_types, new_msg('unknown datatype: %s for object %r' % (dataType, obj)) def verify_acceptable_types(obj): if type(obj) not in _acceptable_types[_type]: raise TypeError(new_msg('%s can not accept object %r in type %s' % (dataType, obj, type(obj)))) if isinstance(dataType, StringType): verify_value = lambda _: _ elif isinstance(dataType, UserDefinedType): verifier = _make_type_verifier(dataType.sqlType(), name=name) def verify_udf(obj): if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType): raise ValueError(new_msg('%r is not an instance of type %r' % (obj, dataType))) verifier(dataType.toInternal(obj)) verify_value = verify_udf elif isinstance(dataType, ByteType): def verify_byte(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -128 or obj > 127: raise ValueError(new_msg('object of ByteType out of range, got: %s' % obj)) verify_value = verify_byte elif isinstance(dataType, ShortType): def verify_short(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -32768 or obj > 32767: raise ValueError(new_msg('object of ShortType out of range, got: %s' % obj)) verify_value = verify_short elif isinstance(dataType, IntegerType): def verify_integer(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) if obj < -2147483648 or obj > 2147483647: raise ValueError(new_msg('object of IntegerType out of range, got: %s' % obj)) verify_value = verify_integer elif isinstance(dataType, ArrayType): element_verifier = _make_type_verifier(dataType.elementType, dataType.containsNull, name='element in array %s' % name) def verify_array(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) for i in obj: element_verifier(i) verify_value = verify_array elif isinstance(dataType, MapType): key_verifier = _make_type_verifier(dataType.keyType, False, name='key of map %s' % name) value_verifier = _make_type_verifier(dataType.valueType, dataType.valueContainsNull, name='value of map %s' % name) def verify_map(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) for (k, v) in obj.items(): key_verifier(k) value_verifier(v) verify_value = verify_map elif isinstance(dataType, StructType): verifiers = [] for f in dataType.fields: verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name)) verifiers.append((f.name, verifier)) def verify_struct(obj): assert_acceptable_types(obj) if isinstance(obj, dict): for (f, verifier) in verifiers: verifier(obj.get(f)) elif isinstance(obj, Row) and getattr(obj, '__from_dict__', False): for (f, verifier) in verifiers: verifier(obj[f]) elif isinstance(obj, (tuple, list)): if len(obj) != len(verifiers): raise ValueError(new_msg('Length of object (%d) does not match with length of fields (%d)' % (len(obj), len(verifiers)))) for (v, (_, verifier)) in zip(obj, verifiers): verifier(v) elif hasattr(obj, '__dict__'): d = obj.__dict__ for (f, verifier) in verifiers: verifier(d.get(f)) else: raise TypeError(new_msg('StructType can not accept object %r in type %s' % (obj, type(obj)))) verify_value = verify_struct else: def verify_default(obj): assert_acceptable_types(obj) verify_acceptable_types(obj) verify_value = verify_default def verify(obj): if not verify_nullability(obj): verify_value(obj) return verify
19
# complete multi-line body codes for that function<fim_prefix># ''' Convert Spark data type to Arrow type Convert Spark data type to pyarrow type ''' def to_arrow_type(dt):<fim_suffix><fim_middle>
apache/spark
import pyarrow as pa if type(dt) == BooleanType: arrow_type = pa.bool_() elif type(dt) == ByteType: arrow_type = pa.int8() elif type(dt) == ShortType: arrow_type = pa.int16() elif type(dt) == IntegerType: arrow_type = pa.int32() elif type(dt) == LongType: arrow_type = pa.int64() elif type(dt) == FloatType: arrow_type = pa.float32() elif type(dt) == DoubleType: arrow_type = pa.float64() elif type(dt) == DecimalType: arrow_type = pa.decimal128(dt.precision, dt.scale) elif type(dt) == StringType: arrow_type = pa.string() elif type(dt) == BinaryType: arrow_type = pa.binary() elif type(dt) == DateType: arrow_type = pa.date32() elif type(dt) == TimestampType: arrow_type = pa.timestamp('us', tz='UTC') elif type(dt) == ArrayType: if type(dt.elementType) in [StructType, TimestampType]: raise TypeError('Unsupported type in conversion to Arrow: ' + str(dt)) arrow_type = pa.list_(to_arrow_type(dt.elementType)) elif type(dt) == StructType: if any((type(field.dataType) == StructType for field in dt)): raise TypeError('Nested StructType not supported in conversion to Arrow') fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable) for field in dt] arrow_type = pa.struct(fields) else: raise TypeError('Unsupported type in conversion to Arrow: ' + str(dt)) return arrow_type
20
# complete multi-line body codes for that function<fim_prefix># ''' Convert a Spark schema from Spark to Arrow Convert a schema from Spark to Arrow ''' def to_arrow_schema(schema):<fim_suffix><fim_middle>
apache/spark
import pyarrow as pa fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable) for field in schema] return pa.schema(fields)
21
# complete multi-line body codes for that function<fim_prefix># ''' Convert a pyarrow type to Spark data type. Convert pyarrow type to Spark data type. ''' def from_arrow_type(at):<fim_suffix><fim_middle>
apache/spark
import pyarrow.types as types if types.is_boolean(at): spark_type = BooleanType() elif types.is_int8(at): spark_type = ByteType() elif types.is_int16(at): spark_type = ShortType() elif types.is_int32(at): spark_type = IntegerType() elif types.is_int64(at): spark_type = LongType() elif types.is_float32(at): spark_type = FloatType() elif types.is_float64(at): spark_type = DoubleType() elif types.is_decimal(at): spark_type = DecimalType(precision=at.precision, scale=at.scale) elif types.is_string(at): spark_type = StringType() elif types.is_binary(at): spark_type = BinaryType() elif types.is_date32(at): spark_type = DateType() elif types.is_timestamp(at): spark_type = TimestampType() elif types.is_list(at): if types.is_timestamp(at.value_type): raise TypeError('Unsupported type in conversion from Arrow: ' + str(at)) spark_type = ArrayType(from_arrow_type(at.value_type)) elif types.is_struct(at): if any((types.is_struct(field.type) for field in at)): raise TypeError('Nested StructType not supported in conversion from Arrow: ' + str(at)) return StructType([StructField(field.name, from_arrow_type(field.type), nullable=field.nullable) for field in at]) else: raise TypeError('Unsupported type in conversion from Arrow: ' + str(at)) return spark_type
22
# complete multi-line body codes for that function<fim_prefix># ''' Convert schema from Arrow to Spark. Convert schema from Arrow to Spark. ''' def from_arrow_schema(arrow_schema):<fim_suffix><fim_middle>
apache/spark
return StructType([StructField(field.name, from_arrow_type(field.type), nullable=field.nullable) for field in arrow_schema])
23
# complete multi-line body codes for that function<fim_prefix># ''' Convert timezone aware timestamps to timezone - naive in the specified timezone or local timezone. Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone. If the input series is not a timestamp series, then the same series is returned. If the input series is a timestamp series, then a converted series is returned. :param s: pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return pandas.Series that have been converted to tz-naive ''' def _check_series_localize_timestamps(s, timezone):<fim_suffix><fim_middle>
apache/spark
from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() from pandas.api.types import is_datetime64tz_dtype tz = timezone or _get_local_timezone() if is_datetime64tz_dtype(s.dtype): return s.dt.tz_convert(tz).dt.tz_localize(None) else: return s
24
# complete multi-line body codes for that function<fim_prefix># ''' Convert timezone aware timestamps to timezone - naive in the specified timezone or local timezone - naive in the specified timezone or local timezone - naive in the specified timezone. Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone :param pdf: pandas.DataFrame :param timezone: the timezone to convert. if None then use local timezone :return pandas.DataFrame where any timezone aware columns have been converted to tz-naive ''' def _check_dataframe_localize_timestamps(pdf, timezone):<fim_suffix><fim_middle>
apache/spark
from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() for (column, series) in pdf.iteritems(): pdf[column] = _check_series_localize_timestamps(series, timezone) return pdf
25
# complete multi-line body codes for that function<fim_prefix># ''' Convert a tz - naive timestamp in the specified timezone or local timezone to UTC normalized for Spark internal storage. Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for Spark internal storage :param s: a pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone ''' def _check_series_convert_timestamps_internal(s, timezone):<fim_suffix><fim_middle>
apache/spark
from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype if is_datetime64_dtype(s.dtype): tz = timezone or _get_local_timezone() return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC') elif is_datetime64tz_dtype(s.dtype): return s.dt.tz_convert('UTC') else: return s
26
# complete multi-line body codes for that function<fim_prefix># ''' Convert timestamp to timezone - naive in the specified timezone or local timezone. Convert timestamp to timezone-naive in the specified timezone or local timezone :param s: a pandas.Series :param from_timezone: the timezone to convert from. if None then use local timezone :param to_timezone: the timezone to convert to. if None then use local timezone :return pandas.Series where if it is a timestamp, has been converted to tz-naive ''' def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):<fim_suffix><fim_middle>
apache/spark
from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() import pandas as pd from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype from_tz = from_timezone or _get_local_timezone() to_tz = to_timezone or _get_local_timezone() if is_datetime64tz_dtype(s.dtype): return s.dt.tz_convert(to_tz).dt.tz_localize(None) elif is_datetime64_dtype(s.dtype) and from_tz != to_tz: return s.apply(lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None) if ts is not pd.NaT else pd.NaT) else: return s
27
# complete multi-line body codes for that function<fim_prefix># ''' Constructs a new StructType object by adding new elements to the list of fields. Construct a StructType by adding new elements to it to define the schema. The method accepts either: a) A single parameter which is a StructField object. b) Between 2 and 4 parameters as (name, data_type, nullable (optional), metadata(optional). The data_type parameter may be either a String or a DataType object. >>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None) >>> struct2 = StructType([StructField("f1", StringType(), True), \\ ... StructField("f2", StringType(), True, None)]) >>> struct1 == struct2 True >>> struct1 = StructType().add(StructField("f1", StringType(), True)) >>> struct2 = StructType([StructField("f1", StringType(), True)]) >>> struct1 == struct2 True >>> struct1 = StructType().add("f1", "string", True) >>> struct2 = StructType([StructField("f1", StringType(), True)]) >>> struct1 == struct2 True :param field: Either the name of the field or a StructField object :param data_type: If present, the DataType of the StructField to create :param nullable: Whether the field to add should be nullable (default True) :param metadata: Any additional metadata (default None) :return: a new updated StructType ''' def add(self, field, data_type=None, nullable=True, metadata=None):<fim_suffix><fim_middle>
apache/spark
if isinstance(field, StructField): self.fields.append(field) self.names.append(field.name) else: if isinstance(field, str) and data_type is None: raise ValueError('Must specify DataType if passing name of struct_field to create.') if isinstance(data_type, str): data_type_f = _parse_datatype_json_value(data_type) else: data_type_f = data_type self.fields.append(StructField(field, data_type_f, nullable, metadata)) self.names.append(field) self._needConversion = [f.needConversion() for f in self] self._needSerializeAnyField = any(self._needConversion) return self
28
# complete multi-line body codes for that function<fim_prefix># ''' Cache the sqlType into class because it s heavy used in toInternal. Cache the sqlType() into class, because it's heavy used in `toInternal`. ''' def _cachedSqlType(cls):<fim_suffix><fim_middle>
apache/spark
if not hasattr(cls, '_cached_sql_type'): cls._cached_sql_type = cls.sqlType() return cls._cached_sql_type
29
# complete multi-line body codes for that function<fim_prefix># ''' Converts the table into a dict. Return as an dict :param recursive: turns the nested Row as dict (default: False). >>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11} True >>> row = Row(key=1, value=Row(name='a', age=2)) >>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')} True >>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}} True ''' def asDict(self, recursive=False):<fim_suffix><fim_middle>
apache/spark
if not hasattr(self, '__fields__'): raise TypeError('Cannot convert a Row class into dict') if recursive: def conv(obj): if isinstance(obj, Row): return obj.asDict(True) elif isinstance(obj, list): return [conv(o) for o in obj] elif isinstance(obj, dict): return dict(((k, conv(v)) for (k, v) in obj.items())) else: return obj return dict(zip(self.__fields__, (conv(o) for o in self))) else: return dict(zip(self.__fields__, self))
30
# complete multi-line body codes for that function<fim_prefix># ''' Returns the summary of the LinearRegressionModel. Gets summary (e.g. residuals, mse, r-squared ) of model on training set. An exception is thrown if `trainingSummary is None`. ''' def summary(self):<fim_suffix><fim_middle>
apache/spark
if self.hasSummary: return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary) else: raise RuntimeError('No training summary available for this %s' % self.__class__.__name__)
31
# complete multi-line body codes for that function<fim_prefix># ''' Evaluates the model on a test dataset. Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` ''' def evaluate(self, dataset):<fim_suffix><fim_middle>
apache/spark
if not isinstance(dataset, DataFrame): raise ValueError('dataset must be a DataFrame but got %s.' % type(dataset)) java_lr_summary = self._call_java('evaluate', dataset) return LinearRegressionSummary(java_lr_summary)
32
# complete multi-line body codes for that function<fim_prefix># ''' Returns a GeneralizedLinearRegressionTrainingSummary object for this training set. Gets summary (e.g. residuals, deviance, pValues) of model on training set. An exception is thrown if `trainingSummary is None`. ''' def summary(self):<fim_suffix><fim_middle>
apache/spark
if self.hasSummary: return GeneralizedLinearRegressionTrainingSummary(super(GeneralizedLinearRegressionModel, self).summary) else: raise RuntimeError('No training summary available for this %s' % self.__class__.__name__)
33
# complete multi-line body codes for that function<fim_prefix># ''' Evaluates the model on a test dataset. Evaluates the model on a test dataset. :param dataset: Test dataset to evaluate model on, where dataset is an instance of :py:class:`pyspark.sql.DataFrame` ''' def evaluate(self, dataset):<fim_suffix><fim_middle>
apache/spark
if not isinstance(dataset, DataFrame): raise ValueError('dataset must be a DataFrame but got %s.' % type(dataset)) java_glr_summary = self._call_java('evaluate', dataset) return GeneralizedLinearRegressionSummary(java_glr_summary)
34
# complete multi-line body codes for that function<fim_prefix># ''' Get all the directories that are local Get all the directories ''' def _get_local_dirs(sub):<fim_suffix><fim_middle>
apache/spark
path = os.environ.get('SPARK_LOCAL_DIRS', '/tmp') dirs = path.split(',') if len(dirs) > 1: rnd = random.Random(os.getpid() + id(dirs)) random.shuffle(dirs, rnd.random) return [os.path.join(d, 'python', str(os.getpid()), sub) for d in dirs]
35
# complete multi-line body codes for that function<fim_prefix># ''' Choose one directory for spill by number n Choose one directory for spill by number n ''' def _get_spill_dir(self, n):<fim_suffix><fim_middle>
apache/spark
return os.path.join(self.localdirs[n % len(self.localdirs)], str(n))
36
# complete multi-line body codes for that function<fim_prefix># ''' Combine the items by creator and combiner Combine the items by creator and combiner ''' def mergeValues(self, iterator):<fim_suffix><fim_middle>
apache/spark
(creator, comb) = (self.agg.createCombiner, self.agg.mergeValue) (c, data, pdata, hfun, batch) = (0, self.data, self.pdata, self._partition, self.batch) limit = self.memory_limit for (k, v) in iterator: d = pdata[hfun(k)] if pdata else data d[k] = comb(d[k], v) if k in d else creator(v) c += 1 if c >= batch: if get_used_memory() >= limit: self._spill() limit = self._next_limit() batch /= 2 c = 0 else: batch *= 1.5 if get_used_memory() >= limit: self._spill()
37
# complete multi-line body codes for that function<fim_prefix># ''' Merge a set of keys and values by merging them into a single object. Merge (K,V) pair by mergeCombiner ''' def mergeCombiners(self, iterator, limit=None):<fim_suffix><fim_middle>
apache/spark
if limit is None: limit = self.memory_limit (comb, hfun, objsize) = (self.agg.mergeCombiners, self._partition, self._object_size) (c, data, pdata, batch) = (0, self.data, self.pdata, self.batch) for (k, v) in iterator: d = pdata[hfun(k)] if pdata else data d[k] = comb(d[k], v) if k in d else v if not limit: continue c += objsize(v) if c > batch: if get_used_memory() > limit: self._spill() limit = self._next_limit() batch /= 2 c = 0 else: batch *= 1.5 if limit and get_used_memory() >= limit: self._spill()
38
# complete multi-line body codes for that function<fim_prefix># ''' This function will dump already partitioned data into disks. It will dump the data into the disks and the memory used by the memory. dump already partitioned data into disks. It will dump the data in batch for better performance. ''' def _spill(self):<fim_suffix><fim_middle>
apache/spark
global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self.pdata: streams = [open(os.path.join(path, str(i)), 'wb') for i in range(self.partitions)] for (k, v) in self.data.items(): h = self._partition(k) self.serializer.dump_stream([(k, v)], streams[h]) for s in streams: DiskBytesSpilled += s.tell() s.close() self.data.clear() self.pdata.extend([{} for i in range(self.partitions)]) else: for i in range(self.partitions): p = os.path.join(path, str(i)) with open(p, 'wb') as f: self.serializer.dump_stream(iter(self.pdata[i].items()), f) self.pdata[i].clear() DiskBytesSpilled += os.path.getsize(p) self.spills += 1 gc.collect() MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
39
# complete multi-line body codes for that function<fim_prefix># ''' Return all items as iterator Return all merged items as iterator ''' def items(self):<fim_suffix><fim_middle>
apache/spark
if not self.pdata and (not self.spills): return iter(self.data.items()) return self._external_items()
40
# complete multi-line body codes for that function<fim_prefix># ''' Return all partitioned items as iterator Return all partitioned items as iterator ''' def _external_items(self):<fim_suffix><fim_middle>
apache/spark
assert not self.data if any(self.pdata): self._spill() self.pdata = [] try: for i in range(self.partitions): for v in self._merged_items(i): yield v self.data.clear() for j in range(self.spills): path = self._get_spill_dir(j) os.remove(os.path.join(path, str(i))) finally: self._cleanup()
41
# complete multi-line body codes for that function<fim_prefix># ''' Merge the partitioned items and return the as iterator merge the partitioned items and return the as iterator If one partition can not be fit in memory, then them will be partitioned and merged recursively. ''' def _recursive_merged_items(self, index):<fim_suffix><fim_middle>
apache/spark
subdirs = [os.path.join(d, 'parts', str(index)) for d in self.localdirs] m = ExternalMerger(self.agg, self.memory_limit, self.serializer, subdirs, self.scale * self.partitions, self.partitions, self.batch) m.pdata = [{} for _ in range(self.partitions)] limit = self._next_limit() for j in range(self.spills): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) with open(p, 'rb') as f: m.mergeCombiners(self.serializer.load_stream(f), 0) if get_used_memory() > limit: m._spill() limit = self._next_limit() return m._external_items()
42
# complete multi-line body codes for that function<fim_prefix># ''' Choose one directory for spill by number n Choose one directory for spill by number n ''' def _get_path(self, n):<fim_suffix><fim_middle>
apache/spark
d = self.local_dirs[n % len(self.local_dirs)] if not os.path.exists(d): os.makedirs(d) return os.path.join(d, str(n))
43
# complete multi-line body codes for that function<fim_prefix># ''' Sort the elements in iterator do external sort when the memory is below the limit. Sort the elements in iterator, do external sort when the memory goes above the limit. ''' def sorted(self, iterator, key=None, reverse=False):<fim_suffix><fim_middle>
apache/spark
global MemoryBytesSpilled, DiskBytesSpilled (batch, limit) = (100, self._next_limit()) (chunks, current_chunk) = ([], []) iterator = iter(iterator) while True: chunk = list(itertools.islice(iterator, batch)) current_chunk.extend(chunk) if len(chunk) < batch: break used_memory = get_used_memory() if used_memory > limit: current_chunk.sort(key=key, reverse=reverse) path = self._get_path(len(chunks)) with open(path, 'wb') as f: self.serializer.dump_stream(current_chunk, f) def load(f): for v in self.serializer.load_stream(f): yield v f.close() chunks.append(load(open(path, 'rb'))) current_chunk = [] MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 DiskBytesSpilled += os.path.getsize(path) os.unlink(path) elif not chunks: batch = min(int(batch * 1.5), 10000) current_chunk.sort(key=key, reverse=reverse) if not chunks: return current_chunk if current_chunk: chunks.append(iter(current_chunk)) return heapq.merge(chunks, key=key, reverse=reverse)
44
# complete multi-line body codes for that function<fim_prefix># ''' dump the values into disk dump the values into disk ''' def _spill(self):<fim_suffix><fim_middle>
apache/spark
global MemoryBytesSpilled, DiskBytesSpilled if self._file is None: self._open_file() used_memory = get_used_memory() pos = self._file.tell() self._ser.dump_stream(self.values, self._file) self.values = [] gc.collect() DiskBytesSpilled += self._file.tell() - pos MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
45
# complete multi-line body codes for that function<fim_prefix># ''' Dump already partitioned data into disks. dump already partitioned data into disks. ''' def _spill(self):<fim_suffix><fim_middle>
apache/spark
global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self.pdata: streams = [open(os.path.join(path, str(i)), 'wb') for i in range(self.partitions)] self._sorted = len(self.data) < self.SORT_KEY_LIMIT if self._sorted: self.serializer = self.flattened_serializer() for k in sorted(self.data.keys()): h = self._partition(k) self.serializer.dump_stream([(k, self.data[k])], streams[h]) else: for (k, v) in self.data.items(): h = self._partition(k) self.serializer.dump_stream([(k, v)], streams[h]) for s in streams: DiskBytesSpilled += s.tell() s.close() self.data.clear() self.pdata.extend([{} for i in range(self.partitions)]) else: for i in range(self.partitions): p = os.path.join(path, str(i)) with open(p, 'wb') as f: if self._sorted: sorted_items = sorted(self.pdata[i].items(), key=operator.itemgetter(0)) self.serializer.dump_stream(sorted_items, f) else: self.serializer.dump_stream(self.pdata[i].items(), f) self.pdata[i].clear() DiskBytesSpilled += os.path.getsize(p) self.spills += 1 gc.collect() MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20
46
# complete multi-line body codes for that function<fim_prefix># ''' Load a partition from disk then sort and group by key load a partition from disk, then sort and group by key ''' def _merge_sorted_items(self, index):<fim_suffix><fim_middle>
apache/spark
def load_partition(j): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) with open(p, 'rb', 65536) as f: for v in self.serializer.load_stream(f): yield v disk_items = [load_partition(j) for j in range(self.spills)] if self._sorted: sorted_items = heapq.merge(disk_items, key=operator.itemgetter(0)) else: ser = self.flattened_serializer() sorter = ExternalSorter(self.memory_limit, ser) sorted_items = sorter.sorted(itertools.chain(*disk_items), key=operator.itemgetter(0)) return ((k, vs) for (k, vs) in GroupByKey(sorted_items))
47
# complete multi-line body codes for that function<fim_prefix># ''' This function is called by the worker process. Called by a worker process after the fork(). ''' def worker(sock, authenticated):<fim_suffix><fim_middle>
apache/spark
signal.signal(SIGHUP, SIG_DFL) signal.signal(SIGCHLD, SIG_DFL) signal.signal(SIGTERM, SIG_DFL) signal.signal(SIGINT, signal.default_int_handler) infile = os.fdopen(os.dup(sock.fileno()), 'rb', 65536) outfile = os.fdopen(os.dup(sock.fileno()), 'wb', 65536) if not authenticated: client_secret = UTF8Deserializer().loads(infile) if os.environ['PYTHON_WORKER_FACTORY_SECRET'] == client_secret: write_with_length('ok'.encode('utf-8'), outfile) outfile.flush() else: write_with_length('err'.encode('utf-8'), outfile) outfile.flush() sock.close() return 1 exit_code = 0 try: worker_main(infile, outfile) except SystemExit as exc: exit_code = compute_real_exit_code(exc.code) finally: try: outfile.flush() except Exception: pass return exit_code
48
# complete multi-line body codes for that function<fim_prefix># ''' This function returns consistent hash code for builtin types and tuple with None. This function returns consistent hash code for builtin types, especially for None and tuple with None. The algorithm is similar to that one used by CPython 2.7 >>> portable_hash(None) 0 >>> portable_hash((None, 1)) & 0xffffffff 219750521 ''' def portable_hash(x):<fim_suffix><fim_middle>
apache/spark
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ: raise Exception('Randomness of hash of string should be disabled via PYTHONHASHSEED') if x is None: return 0 if isinstance(x, tuple): h = 3430008 for i in x: h ^= portable_hash(i) h *= 1000003 h &= sys.maxsize h ^= len(x) if h == -1: h = -2 return int(h) return hash(x)
49
# complete multi-line body codes for that function<fim_prefix># ''' Parse a memory string in the format supported by Java and return the value in MiB. Parse a memory string in the format supported by Java (e.g. 1g, 200m) and return the value in MiB >>> _parse_memory("256m") 256 >>> _parse_memory("2g") 2048 ''' def _parse_memory(s):<fim_suffix><fim_middle>
apache/spark
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024} if s[-1].lower() not in units: raise ValueError('invalid format: ' + s) return int(float(s[:-1]) * units[s[-1].lower()])
50
# complete multi-line body codes for that function<fim_prefix># ''' Ignore the u prefix of string in doc tests Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3 ''' def ignore_unicode_prefix(f):<fim_suffix><fim_middle>
apache/spark
if sys.version >= '3': literal_re = re.compile("(\\W|^)[uU](['])", re.UNICODE) f.__doc__ = literal_re.sub('\\1\\2', f.__doc__) return f
51
# complete multi-line body codes for that function<fim_prefix># ''' Persist this RDD with the default storage level. Persist this RDD with the default storage level (C{MEMORY_ONLY}). ''' def cache(self):<fim_suffix><fim_middle>
apache/spark
self.is_cached = True self.persist(StorageLevel.MEMORY_ONLY) return self
52
# complete multi-line body codes for that function<fim_prefix># ''' Set this RDD s storage level to persist its values across operations . Set this RDD's storage level to persist its values across operations after the first time it is computed. This can only be used to assign a new storage level if the RDD does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_ONLY}). >>> rdd = sc.parallelize(["b", "a", "c"]) >>> rdd.persist().is_cached True ''' def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):<fim_suffix><fim_middle>
apache/spark
self.is_cached = True javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel) self._jrdd.persist(javaStorageLevel) return self
53
# complete multi-line body codes for that function<fim_prefix># ''' Mark the RDD as non - persistent and remove all blocks for the current entry from memory and disk. Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. ''' def unpersist(self, blocking=False):<fim_suffix><fim_middle>
apache/spark
self.is_cached = False self._jrdd.unpersist(blocking) return self
54
# complete multi-line body codes for that function<fim_prefix># ''' Gets the name of the file to which this RDD was checkpointed. Gets the name of the file to which this RDD was checkpointed Not defined if RDD is checkpointed locally. ''' def getCheckpointFile(self):<fim_suffix><fim_middle>
apache/spark
checkpointFile = self._jrdd.rdd().getCheckpointFile() if checkpointFile.isDefined(): return checkpointFile.get()
55
# complete multi-line body codes for that function<fim_prefix># ''' Return a new RDD by applying a function to each element of this RDD. Return a new RDD by applying a function to each element of this RDD. >>> rdd = sc.parallelize(["b", "a", "c"]) >>> sorted(rdd.map(lambda x: (x, 1)).collect()) [('a', 1), ('b', 1), ('c', 1)] ''' def map(self, f, preservesPartitioning=False):<fim_suffix><fim_middle>
apache/spark
def func(_, iterator): return map(fail_on_stopiteration(f), iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
56
# complete multi-line body codes for that function<fim_prefix># ''' Return a new RDD by first applying a function to all elements of this RDD and then flattening the results. Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2, 3] >>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect()) [(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)] ''' def flatMap(self, f, preservesPartitioning=False):<fim_suffix><fim_middle>
apache/spark
def func(s, iterator): return chain.from_iterable(map(fail_on_stopiteration(f), iterator)) return self.mapPartitionsWithIndex(func, preservesPartitioning)
57
# complete multi-line body codes for that function<fim_prefix># ''' Return a new RDD by applying a function to each partition of this RDD. Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7] ''' def mapPartitions(self, f, preservesPartitioning=False):<fim_suffix><fim_middle>
apache/spark
def func(s, iterator): return f(iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning)
58
# complete multi-line body codes for that function<fim_prefix># ''' Return a new RDD by applying a function to each partition of this RDD while tracking the index of the original partition. Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithSplit(f).sum() 6 ''' def mapPartitionsWithSplit(self, f, preservesPartitioning=False):<fim_suffix><fim_middle>
apache/spark
warnings.warn('mapPartitionsWithSplit is deprecated; use mapPartitionsWithIndex instead', DeprecationWarning, stacklevel=2) return self.mapPartitionsWithIndex(f, preservesPartitioning)
59
# complete multi-line body codes for that function<fim_prefix># ''' Return an RDD containing the distinct elements in this RDD. Return a new RDD containing the distinct elements in this RDD. >>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect()) [1, 2, 3] ''' def distinct(self, numPartitions=None):<fim_suffix><fim_middle>
apache/spark
return self.map(lambda x: (x, None)).reduceByKey(lambda x, _: x, numPartitions).map(lambda x: x[0])
60
# complete multi-line body codes for that function<fim_prefix># ''' Return a new RDD with the specified fraction of the total number of elements in this RDD. Return a sampled subset of this RDD. :param withReplacement: can elements be sampled multiple times (replaced when sampled out) :param fraction: expected size of the sample as a fraction of this RDD's size without replacement: probability that each element is chosen; fraction must be [0, 1] with replacement: expected number of times each element is chosen; fraction must be >= 0 :param seed: seed for the random number generator .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. >>> rdd = sc.parallelize(range(100), 4) >>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14 True ''' def sample(self, withReplacement, fraction, seed=None):<fim_suffix><fim_middle>
apache/spark
assert fraction >= 0.0, 'Negative fraction value: %s' % fraction return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
61
# complete multi-line body codes for that function<fim_prefix># ''' Randomly splits this RDD with the provided weights. Randomly splits this RDD with the provided weights. :param weights: weights for splits, will be normalized if they don't sum to 1 :param seed: random seed :return: split RDDs in a list >>> rdd = sc.parallelize(range(500), 1) >>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17) >>> len(rdd1.collect() + rdd2.collect()) 500 >>> 150 < rdd1.count() < 250 True >>> 250 < rdd2.count() < 350 True ''' def randomSplit(self, weights, seed=None):<fim_suffix><fim_middle>
apache/spark
s = float(sum(weights)) cweights = [0.0] for w in weights: cweights.append(cweights[-1] + w / s) if seed is None: seed = random.randint(0, 2 ** 32 - 1) return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True) for (lb, ub) in zip(cweights, cweights[1:])]
62
# complete multi-line body codes for that function<fim_prefix># ''' Return a fixed - size sampled subset of this RDD. Return a fixed-size sampled subset of this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> rdd = sc.parallelize(range(0, 10)) >>> len(rdd.takeSample(True, 20, 1)) 20 >>> len(rdd.takeSample(False, 5, 2)) 5 >>> len(rdd.takeSample(False, 15, 3)) 10 ''' def takeSample(self, withReplacement, num, seed=None):<fim_suffix><fim_middle>
apache/spark
numStDev = 10.0 if num < 0: raise ValueError('Sample size cannot be negative.') elif num == 0: return [] initialCount = self.count() if initialCount == 0: return [] rand = random.Random(seed) if not withReplacement and num >= initialCount: samples = self.collect() rand.shuffle(samples) return samples maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize)) if num > maxSampleSize: raise ValueError('Sample size cannot be greater than %d.' % maxSampleSize) fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement) samples = self.sample(withReplacement, fraction, seed).collect() while len(samples) < num: seed = rand.randint(0, sys.maxsize) samples = self.sample(withReplacement, fraction, seed).collect() rand.shuffle(samples) return samples[0:num]
63
# complete multi-line body codes for that function<fim_prefix># ''' Compute the sampling rate for a specific sample size. Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and total is the total number of data points in the RDD. We're trying to compute q > p such that - when sampling with replacement, we're drawing each data point with prob_i ~ Pois(q), where we want to guarantee Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to total), i.e. the failure rate of not having a sufficiently large sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient to guarantee 0.9999 success rate for num > 12, but we need a slightly larger q (9 empirically determined). - when sampling without replacement, we're drawing each data point with prob_i ~ Binomial(total, fraction) and our choice of q guarantees 1-delta, or 0.9999 success rate, where success rate is defined the same as in sampling with replacement. ''' def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):<fim_suffix><fim_middle>
apache/spark
fraction = float(sampleSizeLowerBound) / total if withReplacement: numStDev = 5 if sampleSizeLowerBound < 12: numStDev = 9 return fraction + numStDev * sqrt(fraction / total) else: delta = 5e-05 gamma = -log(delta) / total return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
64
# complete multi-line body codes for that function<fim_prefix># ''' Return the union of this RDD and another RDD. Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3] ''' def union(self, other):<fim_suffix><fim_middle>
apache/spark
if self._jrdd_deserializer == other._jrdd_deserializer: rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer) else: self_copy = self._reserialize() other_copy = other._reserialize() rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer) if self.partitioner == other.partitioner and self.getNumPartitions() == rdd.getNumPartitions(): rdd.partitioner = self.partitioner return rdd
65
# complete multi-line body codes for that function<fim_prefix># ''' Return the intersection of this RDD and another RDD. Return the intersection of this RDD and another one. The output will not contain any duplicate elements, even if the input RDDs did. .. note:: This method performs a shuffle internally. >>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5]) >>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8]) >>> rdd1.intersection(rdd2).collect() [1, 2, 3] ''' def intersection(self, other):<fim_suffix><fim_middle>
apache/spark
return self.map(lambda v: (v, None)).cogroup(other.map(lambda v: (v, None))).filter(lambda k_vs: all(k_vs[1])).keys()
66
# complete multi-line body codes for that function<fim_prefix># ''' Repartition the RDD according to the given partitioner and within each resulting partition sort records by their keys. Repartition the RDD according to the given partitioner and, within each resulting partition, sort records by their keys. >>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)]) >>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True) >>> rdd2.glom().collect() [[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]] ''' def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash, ascending=True, keyfunc=lambda x: x):<fim_suffix><fim_middle>
apache/spark
if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = _parse_memory(self.ctx._conf.get('spark.python.worker.memory', '512m')) serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=not ascending)) return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
67
# complete multi-line body codes for that function<fim_prefix># ''' Sorts this RDD by key. Sorts this RDD, which is assumed to consist of (key, value) pairs. >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortByKey().first() ('1', 3) >>> sc.parallelize(tmp).sortByKey(True, 1).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortByKey(True, 2).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)] >>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)]) >>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect() [('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)] ''' def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):<fim_suffix><fim_middle>
apache/spark
if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = self._memory_limit() serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=not ascending)) if numPartitions == 1: if self.getNumPartitions() > 1: self = self.coalesce(1) return self.mapPartitions(sortPartition, True) rddSize = self.count() if not rddSize: return self maxSampleSize = numPartitions * 20.0 fraction = min(maxSampleSize / max(rddSize, 1), 1.0) samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect() samples = sorted(samples, key=keyfunc) bounds = [samples[int(len(samples) * (i + 1) / numPartitions)] for i in range(0, numPartitions - 1)] def rangePartitioner(k): p = bisect.bisect_left(bounds, keyfunc(k)) if ascending: return p else: return numPartitions - 1 - p return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
68
# complete multi-line body codes for that function<fim_prefix># ''' Sorts this RDD by the given keyfunc. Sorts this RDD by the given keyfunc >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect() [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] ''' def sortBy(self, keyfunc, ascending=True, numPartitions=None):<fim_suffix><fim_middle>
apache/spark
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
69
# complete multi-line body codes for that function<fim_prefix># ''' Return the Cartesian product of this RDD and another RDD. Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and C{b} is in C{other}. >>> rdd = sc.parallelize([1, 2]) >>> sorted(rdd.cartesian(rdd).collect()) [(1, 1), (1, 2), (2, 1), (2, 2)] ''' def cartesian(self, other):<fim_suffix><fim_middle>
apache/spark
deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer) return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
70
# complete multi-line body codes for that function<fim_prefix># ''' Return an RDD of grouped items by a function. Return an RDD of grouped items. >>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8]) >>> result = rdd.groupBy(lambda x: x % 2).collect() >>> sorted([(x, sorted(y)) for (x, y) in result]) [(0, [2, 8]), (1, [1, 1, 3, 5])] ''' def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):<fim_suffix><fim_middle>
apache/spark
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
71
# complete multi-line body codes for that function<fim_prefix># ''' Return an RDD of strings from a shell command. Return an RDD created by piping elements to a forked external process. >>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect() [u'1', u'2', u'', u'3'] :param checkCode: whether or not to check the return value of the shell command. ''' def pipe(self, command, env=None, checkCode=False):<fim_suffix><fim_middle>
apache/spark
if env is None: env = dict() def func(iterator): pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE) def pipe_objs(out): for obj in iterator: s = unicode(obj).rstrip('\n') + '\n' out.write(s.encode('utf-8')) out.close() Thread(target=pipe_objs, args=[pipe.stdin]).start() def check_return_code(): pipe.wait() if checkCode and pipe.returncode: raise Exception("Pipe function `%s' exited with error code %d" % (command, pipe.returncode)) else: for i in range(0): yield i return (x.rstrip(b'\n').decode('utf-8') for x in chain(iter(pipe.stdout.readline, b''), check_return_code())) return self.mapPartitions(func)
72
# complete multi-line body codes for that function<fim_prefix># ''' Applies a function to all elements of this RDD. Applies a function to all elements of this RDD. >>> def f(x): print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f) ''' def foreach(self, f):<fim_suffix><fim_middle>
apache/spark
f = fail_on_stopiteration(f) def processPartition(iterator): for x in iterator: f(x) return iter([]) self.mapPartitions(processPartition).count()
73
# complete multi-line body codes for that function<fim_prefix># ''' Applies a function to each partition of this RDD. Applies a function to each partition of this RDD. >>> def f(iterator): ... for x in iterator: ... print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f) ''' def foreachPartition(self, f):<fim_suffix><fim_middle>
apache/spark
def func(it): r = f(it) try: return iter(r) except TypeError: return iter([]) self.mapPartitions(func).count()
74
# complete multi-line body codes for that function<fim_prefix># ''' Returns a list containing all of the elements in this RDD. Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. ''' def collect(self):<fim_suffix><fim_middle>
apache/spark
with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) return list(_load_from_socket(sock_info, self._jrdd_deserializer))
75
# complete multi-line body codes for that function<fim_prefix># ''' Reduces the elements of this RDD using the specified commutative and an associative binary operator. Currently reduces partitions locally. Reduces the elements of this RDD using the specified commutative and associative binary operator. Currently reduces partitions locally. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add) 15 >>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add) 10 >>> sc.parallelize([]).reduce(add) Traceback (most recent call last): ... ValueError: Can not reduce() empty RDD ''' def reduce(self, f):<fim_suffix><fim_middle>
apache/spark
f = fail_on_stopiteration(f) def func(iterator): iterator = iter(iterator) try: initial = next(iterator) except StopIteration: return yield reduce(f, iterator, initial) vals = self.mapPartitions(func).collect() if vals: return reduce(f, vals) raise ValueError('Can not reduce() empty RDD')
76
# complete multi-line body codes for that function<fim_prefix># ''' Reduces the elements of this RDD in a multi - level tree pattern. Reduces the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeReduce(add) -5 >>> rdd.treeReduce(add, 1) -5 >>> rdd.treeReduce(add, 2) -5 >>> rdd.treeReduce(add, 5) -5 >>> rdd.treeReduce(add, 10) -5 ''' def treeReduce(self, f, depth=2):<fim_suffix><fim_middle>
apache/spark
if depth < 1: raise ValueError('Depth cannot be smaller than 1 but got %d.' % depth) zeroValue = (None, True) def op(x, y): if x[1]: return y elif y[1]: return x else: return (f(x[0], y[0]), False) reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth) if reduced[1]: raise ValueError('Cannot reduce empty RDD.') return reduced[0]
77
# complete multi-line body codes for that function<fim_prefix># ''' Folds the elements of each partition into a single value. Aggregate the elements of each partition, and then the results for all the partitions, using a given associative function and a neutral "zero value." The function C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. This behaves somewhat differently from fold operations implemented for non-distributed collections in functional languages like Scala. This fold operation may be applied to partitions individually, and then fold those results into the final result, rather than apply the fold to each element sequentially in some defined ordering. For functions that are not commutative, the result may differ from that of a fold applied to a non-distributed collection. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add) 15 ''' def fold(self, zeroValue, op):<fim_suffix><fim_middle>
apache/spark
op = fail_on_stopiteration(op) def func(iterator): acc = zeroValue for obj in iterator: acc = op(acc, obj) yield acc vals = self.mapPartitions(func).collect() return reduce(op, vals, zeroValue)
78
# complete multi-line body codes for that function<fim_prefix># ''' Aggregate the elements of each partition and then the results for all the partitions using a given combine functions and a neutral zeroValue value. Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. The first function (seqOp) can return a different result type, U, than the type of this RDD. Thus, we need one operation for merging a T into an U and one operation for merging two U >>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1)) >>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) >>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp) (10, 4) >>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp) (0, 0) ''' def aggregate(self, zeroValue, seqOp, combOp):<fim_suffix><fim_middle>
apache/spark
seqOp = fail_on_stopiteration(seqOp) combOp = fail_on_stopiteration(combOp) def func(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc vals = self.mapPartitions(func).collect() return reduce(combOp, vals, zeroValue)
79
# complete multi-line body codes for that function<fim_prefix># ''' This function aggregates the elements of this RDD in a multi - level tree. Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5 ''' def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):<fim_suffix><fim_middle>
apache/spark
if depth < 1: raise ValueError('Depth cannot be smaller than 1 but got %d.' % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated.mapPartitionsWithIndex(mapPartition).reduceByKey(combOp, curNumPartitions).values() return partiallyAggregated.reduce(combOp)
80
# complete multi-line body codes for that function<fim_prefix># ''' Find the maximum item in this RDD. Find the maximum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0]) >>> rdd.max() 43.0 >>> rdd.max(key=str) 5.0 ''' def max(self, key=None):<fim_suffix><fim_middle>
apache/spark
if key is None: return self.reduce(max) return self.reduce(lambda a, b: max(a, b, key=key))
81
# complete multi-line body codes for that function<fim_prefix># ''' Find the minimum item in this RDD. Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0 ''' def min(self, key=None):<fim_suffix><fim_middle>
apache/spark
if key is None: return self.reduce(min) return self.reduce(lambda a, b: min(a, b, key=key))
82
# complete multi-line body codes for that function<fim_prefix># ''' Return the sum of the elements in this RDD. Add up the elements in this RDD. >>> sc.parallelize([1.0, 2.0, 3.0]).sum() 6.0 ''' def sum(self):<fim_suffix><fim_middle>
apache/spark
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
83
# complete multi-line body codes for that function<fim_prefix># ''' Return a new RDD with the mean variance and count of the elements in one operation. Return a L{StatCounter} object that captures the mean, variance and count of the RDD's elements in one operation. ''' def stats(self):<fim_suffix><fim_middle>
apache/spark
def redFunc(left_counter, right_counter): return left_counter.mergeStats(right_counter) return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
84
# complete multi-line body codes for that function<fim_prefix># ''' Compute a histogram of the given buckets. Compute a histogram using the provided buckets. The buckets are all open to the right except for the last which is closed. e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50], which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1 and 50 we would have a histogram of 1,0,1. If your histogram is evenly spaced (e.g. [0, 10, 20, 30]), this can be switched from an O(log n) inseration to O(1) per element (where n is the number of buckets). Buckets must be sorted, not contain any duplicates, and have at least two elements. If `buckets` is a number, it will generate buckets which are evenly spaced between the minimum and maximum of the RDD. For example, if the min value is 0 and the max is 100, given `buckets` as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must be at least 1. An exception is raised if the RDD contains infinity. If the elements in the RDD do not vary (max == min), a single bucket will be used. The return value is a tuple of buckets and histogram. >>> rdd = sc.parallelize(range(51)) >>> rdd.histogram(2) ([0, 25, 50], [25, 26]) >>> rdd.histogram([0, 5, 25, 50]) ([0, 5, 25, 50], [5, 20, 26]) >>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets ([0, 15, 30, 45, 60], [15, 15, 15, 6]) >>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"]) >>> rdd.histogram(("a", "b", "c")) (('a', 'b', 'c'), [2, 2]) ''' def histogram(self, buckets):<fim_suffix><fim_middle>
apache/spark
if isinstance(buckets, int): if buckets < 1: raise ValueError('number of buckets must be >= 1') def comparable(x): if x is None: return False if type(x) is float and isnan(x): return False return True filtered = self.filter(comparable) def minmax(a, b): return (min(a[0], b[0]), max(a[1], b[1])) try: (minv, maxv) = filtered.map(lambda x: (x, x)).reduce(minmax) except TypeError as e: if ' empty ' in str(e): raise ValueError('can not generate buckets from empty RDD') raise if minv == maxv or buckets == 1: return ([minv, maxv], [filtered.count()]) try: inc = (maxv - minv) / buckets except TypeError: raise TypeError('Can not generate buckets with non-number in RDD') if isinf(inc): raise ValueError('Can not generate buckets with infinite value') inc = int(inc) if inc * buckets != maxv - minv: inc = (maxv - minv) * 1.0 / buckets buckets = [i * inc + minv for i in range(buckets)] buckets.append(maxv) even = True elif isinstance(buckets, (list, tuple)): if len(buckets) < 2: raise ValueError('buckets should have more than one value') if any((i is None or (isinstance(i, float) and isnan(i)) for i in buckets)): raise ValueError('can not have None or NaN in buckets') if sorted(buckets) != list(buckets): raise ValueError('buckets should be sorted') if len(set(buckets)) != len(buckets): raise ValueError('buckets should not contain duplicated values') minv = buckets[0] maxv = buckets[-1] even = False inc = None try: steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)] except TypeError: pass else: if max(steps) - min(steps) < 1e-10: even = True inc = (maxv - minv) / (len(buckets) - 1) else: raise TypeError('buckets should be a list or tuple or number(int or long)') def histogram(iterator): counters = [0] * len(buckets) for i in iterator: if i is None or (type(i) is float and isnan(i)) or i > maxv or (i < minv): continue t = int((i - minv) / inc) if even else bisect.bisect_right(buckets, i) - 1 counters[t] += 1 last = counters.pop() counters[-1] += last return [counters] def mergeCounters(a, b): return [i + j for (i, j) in zip(a, b)] return (buckets, self.mapPartitions(histogram).reduce(mergeCounters))
85
# complete multi-line body codes for that function<fim_prefix># ''' Return the count of each unique value in this RDD as a dictionary of = > count Return the count of each unique value in this RDD as a dictionary of (value, count) pairs. >>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items()) [(1, 2), (2, 3)] ''' def countByValue(self):<fim_suffix><fim_middle>
apache/spark
def countPartition(iterator): counts = defaultdict(int) for obj in iterator: counts[obj] += 1 yield counts def mergeMaps(m1, m2): for (k, v) in m2.items(): m1[k] += v return m1 return self.mapPartitions(countPartition).reduce(mergeMaps)
86
# complete multi-line body codes for that function<fim_prefix># ''' Return the top N elements from an RDD. Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2] ''' def top(self, num, key=None):<fim_suffix><fim_middle>
apache/spark
def topIterator(iterator): yield heapq.nlargest(num, iterator, key=key) def merge(a, b): return heapq.nlargest(num, a + b, key=key) return self.mapPartitions(topIterator).reduce(merge)
87
# complete multi-line body codes for that function<fim_prefix># ''' Take the N elements from an RDD ordered in ascending order or as is specified by the optional key function. Get the N elements from an RDD ordered in ascending order or as specified by the optional key function. .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6) [1, 2, 3, 4, 5, 6] >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x) [10, 9, 7, 6, 5, 4] ''' def takeOrdered(self, num, key=None):<fim_suffix><fim_middle>
apache/spark
def merge(a, b): return heapq.nsmallest(num, a + b, key) return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
88
# complete multi-line body codes for that function<fim_prefix># ''' Take the first num elements of the RDD. Take the first num elements of the RDD. It works by first scanning one partition, and use the results from that partition to estimate the number of additional partitions needed to satisfy the limit. Translated from the Scala implementation in RDD#take(). .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2) [2, 3] >>> sc.parallelize([2, 3, 4, 5, 6]).take(10) [2, 3, 4, 5, 6] >>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3) [91, 92, 93] ''' def take(self, num):<fim_suffix><fim_middle>
apache/spark
items = [] totalParts = self.getNumPartitions() partsScanned = 0 while len(items) < num and partsScanned < totalParts: numPartsToTry = 1 if partsScanned > 0: if len(items) == 0: numPartsToTry = partsScanned * 4 else: numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4) left = num - len(items) def takeUpToNumLeft(iterator): iterator = iter(iterator) taken = 0 while taken < left: try: yield next(iterator) except StopIteration: return taken += 1 p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts)) res = self.context.runJob(self, takeUpToNumLeft, p) items += res partsScanned += numPartsToTry return items[:num]
89
# complete multi-line body codes for that function<fim_prefix># ''' Save a Python RDD of key - value pairs to any Hadoop file system using the new Hadoop OutputFormat API. Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are converted for output using either user specified converters or, by default, L{org.apache.spark.api.python.JavaToWritableConverter}. :param conf: Hadoop job configuration, passed in as a dict :param keyConverter: (None by default) :param valueConverter: (None by default) ''' def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):<fim_suffix><fim_middle>
apache/spark
jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True)
90
# complete multi-line body codes for that function<fim_prefix># ''' Save the current RDD to a new Hadoop file using the new API. Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types will be inferred if not specified. Keys and values are converted for output using either user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The C{conf} is applied on top of the base Hadoop conf associated with the SparkContext of this RDD to create a merged Hadoop MapReduce job configuration for saving the data. :param path: path to Hadoop file :param outputFormatClass: fully qualified classname of Hadoop OutputFormat (e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.IntWritable", None by default) :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.Text", None by default) :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: Hadoop job configuration, passed in as a dict (None by default) ''' def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None):<fim_suffix><fim_middle>
apache/spark
jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf)
91
# complete multi-line body codes for that function<fim_prefix># ''' Save the current RDD to a sequence file. Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the L{org.apache.hadoop.io.Writable} types that we convert from the RDD's key and value types. The mechanism is as follows: 1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects. 2. Keys and values of this Java RDD are converted to Writables and written out. :param path: path to sequence file :param compressionCodecClass: (None by default) ''' def saveAsSequenceFile(self, path, compressionCodecClass=None):<fim_suffix><fim_middle>
apache/spark
pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True, path, compressionCodecClass)
92
# complete multi-line body codes for that function<fim_prefix># ''' Save this RDD as a PickleFile. Save this RDD as a SequenceFile of serialized objects. The serializer used is L{pyspark.serializers.PickleSerializer}, default batch size is 10. >>> tmpFile = NamedTemporaryFile(delete=True) >>> tmpFile.close() >>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3) >>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect()) ['1', '2', 'rdd', 'spark'] ''' def saveAsPickleFile(self, path, batchSize=10):<fim_suffix><fim_middle>
apache/spark
if batchSize == 0: ser = AutoBatchedSerializer(PickleSerializer()) else: ser = BatchedSerializer(PickleSerializer(), batchSize) self._reserialize(ser)._jrdd.saveAsObjectFile(path)
93
# complete multi-line body codes for that function<fim_prefix># ''' Save this RDD as a text file using string representations of elements. Save this RDD as a text file, using string representations of elements. @param path: path to text file @param compressionCodecClass: (None by default) string i.e. "org.apache.hadoop.io.compress.GzipCodec" >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name) >>> from fileinput import input >>> from glob import glob >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n' Empty lines are tolerated when saving to text files. >>> tempFile2 = NamedTemporaryFile(delete=True) >>> tempFile2.close() >>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name) >>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*")))) '\\n\\n\\nbar\\nfoo\\n' Using compressionCodecClass >>> tempFile3 = NamedTemporaryFile(delete=True) >>> tempFile3.close() >>> codec = "org.apache.hadoop.io.compress.GzipCodec" >>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec) >>> from fileinput import input, hook_compressed >>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)) >>> b''.join(result).decode('utf-8') u'bar\\nfoo\\n' ''' def saveAsTextFile(self, path, compressionCodecClass=None):<fim_suffix><fim_middle>
apache/spark
def func(split, iterator): for x in iterator: if not isinstance(x, (unicode, bytes)): x = unicode(x) if isinstance(x, unicode): x = x.encode('utf-8') yield x keyed = self.mapPartitionsWithIndex(func) keyed._bypass_serializer = True if compressionCodecClass: compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass) keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec) else: keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
94
# complete multi-line body codes for that function<fim_prefix># ''' Return a new RDD with the values for each key using an associative and commutative reduce function. Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)] ''' def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):<fim_suffix><fim_middle>
apache/spark
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
95
# complete multi-line body codes for that function<fim_prefix># ''' Return a new DStream with the values for each key using an associative and commutative reduce function. Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)] ''' def reduceByKeyLocally(self, func):<fim_suffix><fim_middle>
apache/spark
func = fail_on_stopiteration(func) def reducePartition(iterator): m = {} for (k, v) in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for (k, v) in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps)
96
# complete multi-line body codes for that function<fim_prefix># ''' Return a copy of the RDD partitioned by the specified partitioner. Return a copy of the RDD partitioned using the specified partitioner. >>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x)) >>> sets = pairs.partitionBy(2).glom().collect() >>> len(set(sets[0]).intersection(set(sets[1]))) 0 ''' def partitionBy(self, numPartitions, partitionFunc=portable_hash):<fim_suffix><fim_middle>
apache/spark
if numPartitions is None: numPartitions = self._defaultReducePartitions() partitioner = Partitioner(numPartitions, partitionFunc) if self.partitioner == partitioner: return self outputSerializer = self.ctx._unbatched_serializer limit = _parse_memory(self.ctx._conf.get('spark.python.worker.memory', '512m')) / 2 def add_shuffle_key(split, iterator): buckets = defaultdict(list) (c, batch) = (0, min(10 * numPartitions, 1000)) for (k, v) in iterator: buckets[partitionFunc(k) % numPartitions].append((k, v)) c += 1 if c % 1000 == 0 and get_used_memory() > limit or c > batch: (n, size) = (len(buckets), 0) for split in list(buckets.keys()): yield pack_long(split) d = outputSerializer.dumps(buckets[split]) del buckets[split] yield d size += len(d) avg = int(size / n) >> 20 if avg < 1: batch *= 1.5 elif avg > 10: batch = max(int(batch / 1.5), 1) c = 0 for (split, items) in buckets.items(): yield pack_long(split) yield outputSerializer.dumps(items) keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True) keyed._bypass_serializer = True with SCCallSiteSync(self.context) as css: pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD() jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc)) jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner)) rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer)) rdd.partitioner = partitioner return rdd
97
# complete multi-line body codes for that function<fim_prefix># ''' This function returns an RDD of elements from the first entry in the RDD that are combined with the second entry in the RDD. Generic function to combine the elements for each key using a custom set of aggregation functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C. Users provide three functions: - C{createCombiner}, which turns a V into a C (e.g., creates a one-element list) - C{mergeValue}, to merge a V into a C (e.g., adds it to the end of a list) - C{mergeCombiners}, to combine two C's into a single one (e.g., merges the lists) To avoid memory allocation, both mergeValue and mergeCombiners are allowed to modify and return their first argument instead of creating a new C. In addition, users can control the partitioning of the output RDD. .. note:: V and C can be different -- for example, one might group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]). >>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)]) >>> def to_list(a): ... return [a] ... >>> def append(a, b): ... a.append(b) ... return a ... >>> def extend(a, b): ... a.extend(b) ... return a ... >>> sorted(x.combineByKey(to_list, append, extend).collect()) [('a', [1, 2]), ('b', [1])] ''' def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None, partitionFunc=portable_hash):<fim_suffix><fim_middle>
apache/spark
if numPartitions is None: numPartitions = self._defaultReducePartitions() serializer = self.ctx.serializer memory = self._memory_limit() agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combineLocally(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def _mergeCombiners(iterator): merger = ExternalMerger(agg, memory, serializer) merger.mergeCombiners(iterator) return merger.items() return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
98
# complete multi-line body codes for that function<fim_prefix># ''' Aggregate the values of each key using given combine functions and a neutral zero value. Aggregate the values of each key, using given combine functions and a neutral "zero value". This function can return a different result type, U, than the type of the values in this RDD, V. Thus, we need one operation for merging a V into a U and one operation for merging two U's, The former operation is used for merging values within a partition, and the latter is used for merging values between partitions. To avoid memory allocation, both of these functions are allowed to modify and return their first argument instead of creating a new U. ''' def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash):<fim_suffix><fim_middle>
apache/spark
def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey(lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
99
# complete multi-line body codes for that function<fim_prefix># ''' Return a new table with the values for each key in the table grouped by func. Merge the values for each key using an associative function "func" and a neutral "zeroValue" which may be added to the result an arbitrary number of times, and must not change the result (e.g., 0 for addition, or 1 for multiplication.). >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> from operator import add >>> sorted(rdd.foldByKey(0, add).collect()) [('a', 2), ('b', 1)] ''' def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):<fim_suffix><fim_middle>
apache/spark
def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc)
End of preview.
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
0