repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
153
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
sequence
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
apache/spark
python/pyspark/sql/session.py
SparkSession._inferSchema
def _inferSchema(self, rdd, samplingRatio=None, names=None): """ Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType` """ first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") if type(first) is dict: warnings.warn("Using RDD of dict to inferSchema is deprecated. " "Use pyspark.sql.Row instead") if samplingRatio is None: schema = _infer_schema(first, names=names) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type(schema, _infer_schema(row, names=names)) if not _has_nulltype(schema): break else: raise ValueError("Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type) return schema
python
def _inferSchema(self, rdd, samplingRatio=None, names=None): """ Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType` """ first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") if type(first) is dict: warnings.warn("Using RDD of dict to inferSchema is deprecated. " "Use pyspark.sql.Row instead") if samplingRatio is None: schema = _infer_schema(first, names=names) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type(schema, _infer_schema(row, names=names)) if not _has_nulltype(schema): break else: raise ValueError("Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type) return schema
[ "def", "_inferSchema", "(", "self", ",", "rdd", ",", "samplingRatio", "=", "None", ",", "names", "=", "None", ")", ":", "first", "=", "rdd", ".", "first", "(", ")", "if", "not", "first", ":", "raise", "ValueError", "(", "\"The first row in RDD is empty, \"", "\"can not infer schema\"", ")", "if", "type", "(", "first", ")", "is", "dict", ":", "warnings", ".", "warn", "(", "\"Using RDD of dict to inferSchema is deprecated. \"", "\"Use pyspark.sql.Row instead\"", ")", "if", "samplingRatio", "is", "None", ":", "schema", "=", "_infer_schema", "(", "first", ",", "names", "=", "names", ")", "if", "_has_nulltype", "(", "schema", ")", ":", "for", "row", "in", "rdd", ".", "take", "(", "100", ")", "[", "1", ":", "]", ":", "schema", "=", "_merge_type", "(", "schema", ",", "_infer_schema", "(", "row", ",", "names", "=", "names", ")", ")", "if", "not", "_has_nulltype", "(", "schema", ")", ":", "break", "else", ":", "raise", "ValueError", "(", "\"Some of types cannot be determined by the \"", "\"first 100 rows, please try again with sampling\"", ")", "else", ":", "if", "samplingRatio", "<", "0.99", ":", "rdd", "=", "rdd", ".", "sample", "(", "False", ",", "float", "(", "samplingRatio", ")", ")", "schema", "=", "rdd", ".", "map", "(", "lambda", "row", ":", "_infer_schema", "(", "row", ",", "names", ")", ")", ".", "reduce", "(", "_merge_type", ")", "return", "schema" ]
Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType`
[ "Infer", "schema", "from", "an", "RDD", "of", "Row", "or", "tuple", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L382-L412
apache/spark
python/pyspark/sql/session.py
SparkSession._createFromRDD
def _createFromRDD(self, rdd, schema, samplingRatio): """ Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. """ if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchema(rdd, samplingRatio, names=schema) converter = _create_converter(struct) rdd = rdd.map(converter) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data rdd = rdd.map(schema.toInternal) return rdd, schema
python
def _createFromRDD(self, rdd, schema, samplingRatio): """ Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. """ if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchema(rdd, samplingRatio, names=schema) converter = _create_converter(struct) rdd = rdd.map(converter) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data rdd = rdd.map(schema.toInternal) return rdd, schema
[ "def", "_createFromRDD", "(", "self", ",", "rdd", ",", "schema", ",", "samplingRatio", ")", ":", "if", "schema", "is", "None", "or", "isinstance", "(", "schema", ",", "(", "list", ",", "tuple", ")", ")", ":", "struct", "=", "self", ".", "_inferSchema", "(", "rdd", ",", "samplingRatio", ",", "names", "=", "schema", ")", "converter", "=", "_create_converter", "(", "struct", ")", "rdd", "=", "rdd", ".", "map", "(", "converter", ")", "if", "isinstance", "(", "schema", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "i", ",", "name", "in", "enumerate", "(", "schema", ")", ":", "struct", ".", "fields", "[", "i", "]", ".", "name", "=", "name", "struct", ".", "names", "[", "i", "]", "=", "name", "schema", "=", "struct", "elif", "not", "isinstance", "(", "schema", ",", "StructType", ")", ":", "raise", "TypeError", "(", "\"schema should be StructType or list or None, but got: %s\"", "%", "schema", ")", "# convert python objects to sql data", "rdd", "=", "rdd", ".", "map", "(", "schema", ".", "toInternal", ")", "return", "rdd", ",", "schema" ]
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
[ "Create", "an", "RDD", "for", "DataFrame", "from", "an", "existing", "RDD", "returns", "the", "RDD", "and", "schema", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L414-L433
apache/spark
python/pyspark/sql/session.py
SparkSession._createFromLocal
def _createFromLocal(self, data, schema): """ Create an RDD for DataFrame from a list or pandas.DataFrame, returns the RDD and schema. """ # make sure data could consumed multiple times if not isinstance(data, list): data = list(data) if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchemaFromList(data, names=schema) converter = _create_converter(struct) data = map(converter, data) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data data = [schema.toInternal(row) for row in data] return self._sc.parallelize(data), schema
python
def _createFromLocal(self, data, schema): """ Create an RDD for DataFrame from a list or pandas.DataFrame, returns the RDD and schema. """ # make sure data could consumed multiple times if not isinstance(data, list): data = list(data) if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchemaFromList(data, names=schema) converter = _create_converter(struct) data = map(converter, data) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data data = [schema.toInternal(row) for row in data] return self._sc.parallelize(data), schema
[ "def", "_createFromLocal", "(", "self", ",", "data", ",", "schema", ")", ":", "# make sure data could consumed multiple times", "if", "not", "isinstance", "(", "data", ",", "list", ")", ":", "data", "=", "list", "(", "data", ")", "if", "schema", "is", "None", "or", "isinstance", "(", "schema", ",", "(", "list", ",", "tuple", ")", ")", ":", "struct", "=", "self", ".", "_inferSchemaFromList", "(", "data", ",", "names", "=", "schema", ")", "converter", "=", "_create_converter", "(", "struct", ")", "data", "=", "map", "(", "converter", ",", "data", ")", "if", "isinstance", "(", "schema", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "i", ",", "name", "in", "enumerate", "(", "schema", ")", ":", "struct", ".", "fields", "[", "i", "]", ".", "name", "=", "name", "struct", ".", "names", "[", "i", "]", "=", "name", "schema", "=", "struct", "elif", "not", "isinstance", "(", "schema", ",", "StructType", ")", ":", "raise", "TypeError", "(", "\"schema should be StructType or list or None, but got: %s\"", "%", "schema", ")", "# convert python objects to sql data", "data", "=", "[", "schema", ".", "toInternal", "(", "row", ")", "for", "row", "in", "data", "]", "return", "self", ".", "_sc", ".", "parallelize", "(", "data", ")", ",", "schema" ]
Create an RDD for DataFrame from a list or pandas.DataFrame, returns the RDD and schema.
[ "Create", "an", "RDD", "for", "DataFrame", "from", "a", "list", "or", "pandas", ".", "DataFrame", "returns", "the", "RDD", "and", "schema", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L435-L459
apache/spark
python/pyspark/sql/session.py
SparkSession._get_numpy_record_dtype
def _get_numpy_record_dtype(self, rec): """ Used when converting a pandas.DataFrame to Spark using to_records(), this will correct the dtypes of fields in a record so they can be properly loaded into Spark. :param rec: a numpy record to check field dtypes :return corrected dtype for a numpy.record or None if no correction needed """ import numpy as np cur_dtypes = rec.dtype col_names = cur_dtypes.names record_type_list = [] has_rec_fix = False for i in xrange(len(cur_dtypes)): curr_type = cur_dtypes[i] # If type is a datetime64 timestamp, convert to microseconds # NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs, # conversion from [us] or lower will lead to py datetime objects, see SPARK-22417 if curr_type == np.dtype('datetime64[ns]'): curr_type = 'datetime64[us]' has_rec_fix = True record_type_list.append((str(col_names[i]), curr_type)) return np.dtype(record_type_list) if has_rec_fix else None
python
def _get_numpy_record_dtype(self, rec): """ Used when converting a pandas.DataFrame to Spark using to_records(), this will correct the dtypes of fields in a record so they can be properly loaded into Spark. :param rec: a numpy record to check field dtypes :return corrected dtype for a numpy.record or None if no correction needed """ import numpy as np cur_dtypes = rec.dtype col_names = cur_dtypes.names record_type_list = [] has_rec_fix = False for i in xrange(len(cur_dtypes)): curr_type = cur_dtypes[i] # If type is a datetime64 timestamp, convert to microseconds # NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs, # conversion from [us] or lower will lead to py datetime objects, see SPARK-22417 if curr_type == np.dtype('datetime64[ns]'): curr_type = 'datetime64[us]' has_rec_fix = True record_type_list.append((str(col_names[i]), curr_type)) return np.dtype(record_type_list) if has_rec_fix else None
[ "def", "_get_numpy_record_dtype", "(", "self", ",", "rec", ")", ":", "import", "numpy", "as", "np", "cur_dtypes", "=", "rec", ".", "dtype", "col_names", "=", "cur_dtypes", ".", "names", "record_type_list", "=", "[", "]", "has_rec_fix", "=", "False", "for", "i", "in", "xrange", "(", "len", "(", "cur_dtypes", ")", ")", ":", "curr_type", "=", "cur_dtypes", "[", "i", "]", "# If type is a datetime64 timestamp, convert to microseconds", "# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,", "# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417", "if", "curr_type", "==", "np", ".", "dtype", "(", "'datetime64[ns]'", ")", ":", "curr_type", "=", "'datetime64[us]'", "has_rec_fix", "=", "True", "record_type_list", ".", "append", "(", "(", "str", "(", "col_names", "[", "i", "]", ")", ",", "curr_type", ")", ")", "return", "np", ".", "dtype", "(", "record_type_list", ")", "if", "has_rec_fix", "else", "None" ]
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct the dtypes of fields in a record so they can be properly loaded into Spark. :param rec: a numpy record to check field dtypes :return corrected dtype for a numpy.record or None if no correction needed
[ "Used", "when", "converting", "a", "pandas", ".", "DataFrame", "to", "Spark", "using", "to_records", "()", "this", "will", "correct", "the", "dtypes", "of", "fields", "in", "a", "record", "so", "they", "can", "be", "properly", "loaded", "into", "Spark", ".", ":", "param", "rec", ":", "a", "numpy", "record", "to", "check", "field", "dtypes", ":", "return", "corrected", "dtype", "for", "a", "numpy", ".", "record", "or", "None", "if", "no", "correction", "needed" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L461-L482
apache/spark
python/pyspark/sql/session.py
SparkSession._convert_from_pandas
def _convert_from_pandas(self, pdf, schema, timezone): """ Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records """ if timezone is not None: from pyspark.sql.types import _check_series_convert_timestamps_tz_local copied = False if isinstance(schema, StructType): for field in schema: # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if isinstance(field.dataType, TimestampType): s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone) if s is not pdf[field.name]: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[field.name] = s else: for column, series in pdf.iteritems(): s = _check_series_convert_timestamps_tz_local(series, timezone) if s is not series: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[column] = s # Convert pandas.DataFrame to list of numpy records np_records = pdf.to_records(index=False) # Check if any columns need to be fixed for Spark to infer properly if len(np_records) > 0: record_dtype = self._get_numpy_record_dtype(np_records[0]) if record_dtype is not None: return [r.astype(record_dtype).tolist() for r in np_records] # Convert list of numpy records to python lists return [r.tolist() for r in np_records]
python
def _convert_from_pandas(self, pdf, schema, timezone): """ Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records """ if timezone is not None: from pyspark.sql.types import _check_series_convert_timestamps_tz_local copied = False if isinstance(schema, StructType): for field in schema: # TODO: handle nested timestamps, such as ArrayType(TimestampType())? if isinstance(field.dataType, TimestampType): s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone) if s is not pdf[field.name]: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[field.name] = s else: for column, series in pdf.iteritems(): s = _check_series_convert_timestamps_tz_local(series, timezone) if s is not series: if not copied: # Copy once if the series is modified to prevent the original # Pandas DataFrame from being updated pdf = pdf.copy() copied = True pdf[column] = s # Convert pandas.DataFrame to list of numpy records np_records = pdf.to_records(index=False) # Check if any columns need to be fixed for Spark to infer properly if len(np_records) > 0: record_dtype = self._get_numpy_record_dtype(np_records[0]) if record_dtype is not None: return [r.astype(record_dtype).tolist() for r in np_records] # Convert list of numpy records to python lists return [r.tolist() for r in np_records]
[ "def", "_convert_from_pandas", "(", "self", ",", "pdf", ",", "schema", ",", "timezone", ")", ":", "if", "timezone", "is", "not", "None", ":", "from", "pyspark", ".", "sql", ".", "types", "import", "_check_series_convert_timestamps_tz_local", "copied", "=", "False", "if", "isinstance", "(", "schema", ",", "StructType", ")", ":", "for", "field", "in", "schema", ":", "# TODO: handle nested timestamps, such as ArrayType(TimestampType())?", "if", "isinstance", "(", "field", ".", "dataType", ",", "TimestampType", ")", ":", "s", "=", "_check_series_convert_timestamps_tz_local", "(", "pdf", "[", "field", ".", "name", "]", ",", "timezone", ")", "if", "s", "is", "not", "pdf", "[", "field", ".", "name", "]", ":", "if", "not", "copied", ":", "# Copy once if the series is modified to prevent the original", "# Pandas DataFrame from being updated", "pdf", "=", "pdf", ".", "copy", "(", ")", "copied", "=", "True", "pdf", "[", "field", ".", "name", "]", "=", "s", "else", ":", "for", "column", ",", "series", "in", "pdf", ".", "iteritems", "(", ")", ":", "s", "=", "_check_series_convert_timestamps_tz_local", "(", "series", ",", "timezone", ")", "if", "s", "is", "not", "series", ":", "if", "not", "copied", ":", "# Copy once if the series is modified to prevent the original", "# Pandas DataFrame from being updated", "pdf", "=", "pdf", ".", "copy", "(", ")", "copied", "=", "True", "pdf", "[", "column", "]", "=", "s", "# Convert pandas.DataFrame to list of numpy records", "np_records", "=", "pdf", ".", "to_records", "(", "index", "=", "False", ")", "# Check if any columns need to be fixed for Spark to infer properly", "if", "len", "(", "np_records", ")", ">", "0", ":", "record_dtype", "=", "self", ".", "_get_numpy_record_dtype", "(", "np_records", "[", "0", "]", ")", "if", "record_dtype", "is", "not", "None", ":", "return", "[", "r", ".", "astype", "(", "record_dtype", ")", ".", "tolist", "(", ")", "for", "r", "in", "np_records", "]", "# Convert list of numpy records to python lists", "return", "[", "r", ".", "tolist", "(", ")", "for", "r", "in", "np_records", "]" ]
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame :return list of records
[ "Convert", "a", "pandas", ".", "DataFrame", "to", "list", "of", "records", "that", "can", "be", "used", "to", "make", "a", "DataFrame", ":", "return", "list", "of", "records" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L484-L525
apache/spark
python/pyspark/sql/session.py
SparkSession._create_from_pandas_with_arrow
def _create_from_pandas_with_arrow(self, pdf, schema, timezone): """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """ from pyspark.serializers import ArrowStreamPandasSerializer from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType from pyspark.sql.utils import require_minimum_pandas_version, \ require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype import pyarrow as pa # Create the Spark schema from list of names passed in with Arrow types if isinstance(schema, (list, tuple)): arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False) struct = StructType() for name, field in zip(schema, arrow_schema): struct.add(name, from_arrow_type(field.type), nullable=field.nullable) schema = struct # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError("Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step)) # Create list of Arrow (columns, type) for serializer dump_stream arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)] for pdf_slice in pdf_slices] jsqlContext = self._wrapped._jsqlContext safecheck = self._wrapped._conf.arrowSafeTypeConversion() col_by_name = True # col by name only applies to StructType columns, can't happen here ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name) def reader_func(temp_filename): return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename) def create_RDD_server(): return self._jvm.ArrowRDDServer(jsqlContext) # Create Spark DataFrame from Arrow stream file, using one batch per partition jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server) jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
python
def _create_from_pandas_with_arrow(self, pdf, schema, timezone): """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """ from pyspark.serializers import ArrowStreamPandasSerializer from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType from pyspark.sql.utils import require_minimum_pandas_version, \ require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype import pyarrow as pa # Create the Spark schema from list of names passed in with Arrow types if isinstance(schema, (list, tuple)): arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False) struct = StructType() for name, field in zip(schema, arrow_schema): struct.add(name, from_arrow_type(field.type), nullable=field.nullable) schema = struct # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError("Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step)) # Create list of Arrow (columns, type) for serializer dump_stream arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)] for pdf_slice in pdf_slices] jsqlContext = self._wrapped._jsqlContext safecheck = self._wrapped._conf.arrowSafeTypeConversion() col_by_name = True # col by name only applies to StructType columns, can't happen here ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name) def reader_func(temp_filename): return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename) def create_RDD_server(): return self._jvm.ArrowRDDServer(jsqlContext) # Create Spark DataFrame from Arrow stream file, using one batch per partition jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server) jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
[ "def", "_create_from_pandas_with_arrow", "(", "self", ",", "pdf", ",", "schema", ",", "timezone", ")", ":", "from", "pyspark", ".", "serializers", "import", "ArrowStreamPandasSerializer", "from", "pyspark", ".", "sql", ".", "types", "import", "from_arrow_type", ",", "to_arrow_type", ",", "TimestampType", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", ",", "require_minimum_pyarrow_version", "require_minimum_pandas_version", "(", ")", "require_minimum_pyarrow_version", "(", ")", "from", "pandas", ".", "api", ".", "types", "import", "is_datetime64_dtype", ",", "is_datetime64tz_dtype", "import", "pyarrow", "as", "pa", "# Create the Spark schema from list of names passed in with Arrow types", "if", "isinstance", "(", "schema", ",", "(", "list", ",", "tuple", ")", ")", ":", "arrow_schema", "=", "pa", ".", "Schema", ".", "from_pandas", "(", "pdf", ",", "preserve_index", "=", "False", ")", "struct", "=", "StructType", "(", ")", "for", "name", ",", "field", "in", "zip", "(", "schema", ",", "arrow_schema", ")", ":", "struct", ".", "add", "(", "name", ",", "from_arrow_type", "(", "field", ".", "type", ")", ",", "nullable", "=", "field", ".", "nullable", ")", "schema", "=", "struct", "# Determine arrow types to coerce data when creating batches", "if", "isinstance", "(", "schema", ",", "StructType", ")", ":", "arrow_types", "=", "[", "to_arrow_type", "(", "f", ".", "dataType", ")", "for", "f", "in", "schema", ".", "fields", "]", "elif", "isinstance", "(", "schema", ",", "DataType", ")", ":", "raise", "ValueError", "(", "\"Single data type %s is not supported with Arrow\"", "%", "str", "(", "schema", ")", ")", "else", ":", "# Any timestamps must be coerced to be compatible with Spark", "arrow_types", "=", "[", "to_arrow_type", "(", "TimestampType", "(", ")", ")", "if", "is_datetime64_dtype", "(", "t", ")", "or", "is_datetime64tz_dtype", "(", "t", ")", "else", "None", "for", "t", "in", "pdf", ".", "dtypes", "]", "# Slice the DataFrame to be batched", "step", "=", "-", "(", "-", "len", "(", "pdf", ")", "//", "self", ".", "sparkContext", ".", "defaultParallelism", ")", "# round int up", "pdf_slices", "=", "(", "pdf", "[", "start", ":", "start", "+", "step", "]", "for", "start", "in", "xrange", "(", "0", ",", "len", "(", "pdf", ")", ",", "step", ")", ")", "# Create list of Arrow (columns, type) for serializer dump_stream", "arrow_data", "=", "[", "[", "(", "c", ",", "t", ")", "for", "(", "_", ",", "c", ")", ",", "t", "in", "zip", "(", "pdf_slice", ".", "iteritems", "(", ")", ",", "arrow_types", ")", "]", "for", "pdf_slice", "in", "pdf_slices", "]", "jsqlContext", "=", "self", ".", "_wrapped", ".", "_jsqlContext", "safecheck", "=", "self", ".", "_wrapped", ".", "_conf", ".", "arrowSafeTypeConversion", "(", ")", "col_by_name", "=", "True", "# col by name only applies to StructType columns, can't happen here", "ser", "=", "ArrowStreamPandasSerializer", "(", "timezone", ",", "safecheck", ",", "col_by_name", ")", "def", "reader_func", "(", "temp_filename", ")", ":", "return", "self", ".", "_jvm", ".", "PythonSQLUtils", ".", "readArrowStreamFromFile", "(", "jsqlContext", ",", "temp_filename", ")", "def", "create_RDD_server", "(", ")", ":", "return", "self", ".", "_jvm", ".", "ArrowRDDServer", "(", "jsqlContext", ")", "# Create Spark DataFrame from Arrow stream file, using one batch per partition", "jrdd", "=", "self", ".", "_sc", ".", "_serialize_to_jvm", "(", "arrow_data", ",", "ser", ",", "reader_func", ",", "create_RDD_server", ")", "jdf", "=", "self", ".", "_jvm", ".", "PythonSQLUtils", ".", "toDataFrame", "(", "jrdd", ",", "schema", ".", "json", "(", ")", ",", "jsqlContext", ")", "df", "=", "DataFrame", "(", "jdf", ",", "self", ".", "_wrapped", ")", "df", ".", "_schema", "=", "schema", "return", "df" ]
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion.
[ "Create", "a", "DataFrame", "from", "a", "given", "pandas", ".", "DataFrame", "by", "slicing", "it", "into", "partitions", "converting", "to", "Arrow", "data", "then", "sending", "to", "the", "JVM", "to", "parallelize", ".", "If", "a", "schema", "is", "passed", "in", "the", "data", "types", "will", "be", "used", "to", "coerce", "the", "data", "in", "Pandas", "to", "Arrow", "conversion", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L527-L588
apache/spark
python/pyspark/sql/session.py
SparkSession._create_shell_session
def _create_shell_session(): """ Initialize a SparkSession for a pyspark shell session. This is called from shell.py to make error handling simpler without needing to declare local variables in that script, which would expose those to users. """ import py4j from pyspark.conf import SparkConf from pyspark.context import SparkContext try: # Try to access HiveConf, it will raise exception if Hive is not added conf = SparkConf() if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive': SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf() return SparkSession.builder\ .enableHiveSupport()\ .getOrCreate() else: return SparkSession.builder.getOrCreate() except (py4j.protocol.Py4JError, TypeError): if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive': warnings.warn("Fall back to non-hive support because failing to access HiveConf, " "please make sure you build spark with hive") return SparkSession.builder.getOrCreate()
python
def _create_shell_session(): """ Initialize a SparkSession for a pyspark shell session. This is called from shell.py to make error handling simpler without needing to declare local variables in that script, which would expose those to users. """ import py4j from pyspark.conf import SparkConf from pyspark.context import SparkContext try: # Try to access HiveConf, it will raise exception if Hive is not added conf = SparkConf() if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive': SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf() return SparkSession.builder\ .enableHiveSupport()\ .getOrCreate() else: return SparkSession.builder.getOrCreate() except (py4j.protocol.Py4JError, TypeError): if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive': warnings.warn("Fall back to non-hive support because failing to access HiveConf, " "please make sure you build spark with hive") return SparkSession.builder.getOrCreate()
[ "def", "_create_shell_session", "(", ")", ":", "import", "py4j", "from", "pyspark", ".", "conf", "import", "SparkConf", "from", "pyspark", ".", "context", "import", "SparkContext", "try", ":", "# Try to access HiveConf, it will raise exception if Hive is not added", "conf", "=", "SparkConf", "(", ")", "if", "conf", ".", "get", "(", "'spark.sql.catalogImplementation'", ",", "'hive'", ")", ".", "lower", "(", ")", "==", "'hive'", ":", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "hadoop", ".", "hive", ".", "conf", ".", "HiveConf", "(", ")", "return", "SparkSession", ".", "builder", ".", "enableHiveSupport", "(", ")", ".", "getOrCreate", "(", ")", "else", ":", "return", "SparkSession", ".", "builder", ".", "getOrCreate", "(", ")", "except", "(", "py4j", ".", "protocol", ".", "Py4JError", ",", "TypeError", ")", ":", "if", "conf", ".", "get", "(", "'spark.sql.catalogImplementation'", ",", "''", ")", ".", "lower", "(", ")", "==", "'hive'", ":", "warnings", ".", "warn", "(", "\"Fall back to non-hive support because failing to access HiveConf, \"", "\"please make sure you build spark with hive\"", ")", "return", "SparkSession", ".", "builder", ".", "getOrCreate", "(", ")" ]
Initialize a SparkSession for a pyspark shell session. This is called from shell.py to make error handling simpler without needing to declare local variables in that script, which would expose those to users.
[ "Initialize", "a", "SparkSession", "for", "a", "pyspark", "shell", "session", ".", "This", "is", "called", "from", "shell", ".", "py", "to", "make", "error", "handling", "simpler", "without", "needing", "to", "declare", "local", "variables", "in", "that", "script", "which", "would", "expose", "those", "to", "users", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L591-L615
apache/spark
python/pyspark/sql/session.py
SparkSession.createDataFrame
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True): """ Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of column names, default is ``None``. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for ``IntegerType``. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.1 Added verifySchema. .. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental. >>> l = [('Alice', 1)] >>> spark.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> spark.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> spark.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> spark.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = spark.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = spark.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = spark.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> spark.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> spark.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... """ SparkSession._activeSession = self self._jvm.SparkSession.setActiveSession(self._jsparkSession) if isinstance(data, DataFrame): raise TypeError("data is already a DataFrame") if isinstance(schema, basestring): schema = _parse_datatype_string(schema) elif isinstance(schema, (list, tuple)): # Must re-encode any unicode strings to be consistent with StructField names schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema] try: import pandas has_pandas = True except Exception: has_pandas = False if has_pandas and isinstance(data, pandas.DataFrame): from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() if self._wrapped._conf.pandasRespectSessionTimeZone(): timezone = self._wrapped._conf.sessionLocalTimeZone() else: timezone = None # If no schema supplied by user then get the names of columns only if schema is None: schema = [str(x) if not isinstance(x, basestring) else (x.encode('utf-8') if not isinstance(x, str) else x) for x in data.columns] if self._wrapped._conf.arrowEnabled() and len(data) > 0: try: return self._create_from_pandas_with_arrow(data, schema, timezone) except Exception as e: from pyspark.util import _exception_message if self._wrapped._conf.arrowFallbackEnabled(): msg = ( "createDataFrame attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true; however, " "failed by the reason below:\n %s\n" "Attempting non-optimization as " "'spark.sql.execution.arrow.fallback.enabled' is set to " "true." % _exception_message(e)) warnings.warn(msg) else: msg = ( "createDataFrame attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true, but has reached " "the error below and will not continue because automatic fallback " "with 'spark.sql.execution.arrow.fallback.enabled' has been set to " "false.\n %s" % _exception_message(e)) warnings.warn(msg) raise data = self._convert_from_pandas(data, schema, timezone) if isinstance(schema, StructType): verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True def prepare(obj): verify_func(obj) return obj elif isinstance(schema, DataType): dataType = schema schema = StructType().add("value", schema) verify_func = _make_type_verifier( dataType, name="field value") if verifySchema else lambda _: True def prepare(obj): verify_func(obj) return obj, else: prepare = lambda obj: obj if isinstance(data, RDD): rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio) else: rdd, schema = self._createFromLocal(map(prepare, data), schema) jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd()) jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json()) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
python
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True): """ Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of column names, default is ``None``. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for ``IntegerType``. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.1 Added verifySchema. .. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental. >>> l = [('Alice', 1)] >>> spark.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> spark.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> spark.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> spark.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = spark.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = spark.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = spark.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> spark.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> spark.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ... """ SparkSession._activeSession = self self._jvm.SparkSession.setActiveSession(self._jsparkSession) if isinstance(data, DataFrame): raise TypeError("data is already a DataFrame") if isinstance(schema, basestring): schema = _parse_datatype_string(schema) elif isinstance(schema, (list, tuple)): # Must re-encode any unicode strings to be consistent with StructField names schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema] try: import pandas has_pandas = True except Exception: has_pandas = False if has_pandas and isinstance(data, pandas.DataFrame): from pyspark.sql.utils import require_minimum_pandas_version require_minimum_pandas_version() if self._wrapped._conf.pandasRespectSessionTimeZone(): timezone = self._wrapped._conf.sessionLocalTimeZone() else: timezone = None # If no schema supplied by user then get the names of columns only if schema is None: schema = [str(x) if not isinstance(x, basestring) else (x.encode('utf-8') if not isinstance(x, str) else x) for x in data.columns] if self._wrapped._conf.arrowEnabled() and len(data) > 0: try: return self._create_from_pandas_with_arrow(data, schema, timezone) except Exception as e: from pyspark.util import _exception_message if self._wrapped._conf.arrowFallbackEnabled(): msg = ( "createDataFrame attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true; however, " "failed by the reason below:\n %s\n" "Attempting non-optimization as " "'spark.sql.execution.arrow.fallback.enabled' is set to " "true." % _exception_message(e)) warnings.warn(msg) else: msg = ( "createDataFrame attempted Arrow optimization because " "'spark.sql.execution.arrow.enabled' is set to true, but has reached " "the error below and will not continue because automatic fallback " "with 'spark.sql.execution.arrow.fallback.enabled' has been set to " "false.\n %s" % _exception_message(e)) warnings.warn(msg) raise data = self._convert_from_pandas(data, schema, timezone) if isinstance(schema, StructType): verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True def prepare(obj): verify_func(obj) return obj elif isinstance(schema, DataType): dataType = schema schema = StructType().add("value", schema) verify_func = _make_type_verifier( dataType, name="field value") if verifySchema else lambda _: True def prepare(obj): verify_func(obj) return obj, else: prepare = lambda obj: obj if isinstance(data, RDD): rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio) else: rdd, schema = self._createFromLocal(map(prepare, data), schema) jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd()) jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json()) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
[ "def", "createDataFrame", "(", "self", ",", "data", ",", "schema", "=", "None", ",", "samplingRatio", "=", "None", ",", "verifySchema", "=", "True", ")", ":", "SparkSession", ".", "_activeSession", "=", "self", "self", ".", "_jvm", ".", "SparkSession", ".", "setActiveSession", "(", "self", ".", "_jsparkSession", ")", "if", "isinstance", "(", "data", ",", "DataFrame", ")", ":", "raise", "TypeError", "(", "\"data is already a DataFrame\"", ")", "if", "isinstance", "(", "schema", ",", "basestring", ")", ":", "schema", "=", "_parse_datatype_string", "(", "schema", ")", "elif", "isinstance", "(", "schema", ",", "(", "list", ",", "tuple", ")", ")", ":", "# Must re-encode any unicode strings to be consistent with StructField names", "schema", "=", "[", "x", ".", "encode", "(", "'utf-8'", ")", "if", "not", "isinstance", "(", "x", ",", "str", ")", "else", "x", "for", "x", "in", "schema", "]", "try", ":", "import", "pandas", "has_pandas", "=", "True", "except", "Exception", ":", "has_pandas", "=", "False", "if", "has_pandas", "and", "isinstance", "(", "data", ",", "pandas", ".", "DataFrame", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", "require_minimum_pandas_version", "(", ")", "if", "self", ".", "_wrapped", ".", "_conf", ".", "pandasRespectSessionTimeZone", "(", ")", ":", "timezone", "=", "self", ".", "_wrapped", ".", "_conf", ".", "sessionLocalTimeZone", "(", ")", "else", ":", "timezone", "=", "None", "# If no schema supplied by user then get the names of columns only", "if", "schema", "is", "None", ":", "schema", "=", "[", "str", "(", "x", ")", "if", "not", "isinstance", "(", "x", ",", "basestring", ")", "else", "(", "x", ".", "encode", "(", "'utf-8'", ")", "if", "not", "isinstance", "(", "x", ",", "str", ")", "else", "x", ")", "for", "x", "in", "data", ".", "columns", "]", "if", "self", ".", "_wrapped", ".", "_conf", ".", "arrowEnabled", "(", ")", "and", "len", "(", "data", ")", ">", "0", ":", "try", ":", "return", "self", ".", "_create_from_pandas_with_arrow", "(", "data", ",", "schema", ",", "timezone", ")", "except", "Exception", "as", "e", ":", "from", "pyspark", ".", "util", "import", "_exception_message", "if", "self", ".", "_wrapped", ".", "_conf", ".", "arrowFallbackEnabled", "(", ")", ":", "msg", "=", "(", "\"createDataFrame attempted Arrow optimization because \"", "\"'spark.sql.execution.arrow.enabled' is set to true; however, \"", "\"failed by the reason below:\\n %s\\n\"", "\"Attempting non-optimization as \"", "\"'spark.sql.execution.arrow.fallback.enabled' is set to \"", "\"true.\"", "%", "_exception_message", "(", "e", ")", ")", "warnings", ".", "warn", "(", "msg", ")", "else", ":", "msg", "=", "(", "\"createDataFrame attempted Arrow optimization because \"", "\"'spark.sql.execution.arrow.enabled' is set to true, but has reached \"", "\"the error below and will not continue because automatic fallback \"", "\"with 'spark.sql.execution.arrow.fallback.enabled' has been set to \"", "\"false.\\n %s\"", "%", "_exception_message", "(", "e", ")", ")", "warnings", ".", "warn", "(", "msg", ")", "raise", "data", "=", "self", ".", "_convert_from_pandas", "(", "data", ",", "schema", ",", "timezone", ")", "if", "isinstance", "(", "schema", ",", "StructType", ")", ":", "verify_func", "=", "_make_type_verifier", "(", "schema", ")", "if", "verifySchema", "else", "lambda", "_", ":", "True", "def", "prepare", "(", "obj", ")", ":", "verify_func", "(", "obj", ")", "return", "obj", "elif", "isinstance", "(", "schema", ",", "DataType", ")", ":", "dataType", "=", "schema", "schema", "=", "StructType", "(", ")", ".", "add", "(", "\"value\"", ",", "schema", ")", "verify_func", "=", "_make_type_verifier", "(", "dataType", ",", "name", "=", "\"field value\"", ")", "if", "verifySchema", "else", "lambda", "_", ":", "True", "def", "prepare", "(", "obj", ")", ":", "verify_func", "(", "obj", ")", "return", "obj", ",", "else", ":", "prepare", "=", "lambda", "obj", ":", "obj", "if", "isinstance", "(", "data", ",", "RDD", ")", ":", "rdd", ",", "schema", "=", "self", ".", "_createFromRDD", "(", "data", ".", "map", "(", "prepare", ")", ",", "schema", ",", "samplingRatio", ")", "else", ":", "rdd", ",", "schema", "=", "self", ".", "_createFromLocal", "(", "map", "(", "prepare", ",", "data", ")", ",", "schema", ")", "jrdd", "=", "self", ".", "_jvm", ".", "SerDeUtil", ".", "toJavaArray", "(", "rdd", ".", "_to_java_object_rdd", "(", ")", ")", "jdf", "=", "self", ".", "_jsparkSession", ".", "applySchemaToPythonRDD", "(", "jrdd", ".", "rdd", "(", ")", ",", "schema", ".", "json", "(", ")", ")", "df", "=", "DataFrame", "(", "jdf", ",", "self", ".", "_wrapped", ")", "df", ".", "_schema", "=", "schema", "return", "df" ]
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`. When ``schema`` is a list of column names, the type of each column will be inferred from ``data``. When ``schema`` is ``None``, it will try to infer the schema (column names and types) from ``data``, which should be an RDD of :class:`Row`, or :class:`namedtuple`, or :class:`dict`. When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match the real data, or an exception will be thrown at runtime. If the given schema is not :class:`pyspark.sql.types.StructType`, it will be wrapped into a :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value", each record will also be wrapped into a tuple, which can be converted to row later. If schema inference is needed, ``samplingRatio`` is used to determined the ratio of rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``. :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean, etc.), or :class:`list`, or :class:`pandas.DataFrame`. :param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of column names, default is ``None``. The data type string format equals to :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use ``int`` as a short name for ``IntegerType``. :param samplingRatio: the sample ratio of rows used for inferring :param verifySchema: verify data types of every row against schema. :return: :class:`DataFrame` .. versionchanged:: 2.1 Added verifySchema. .. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental. >>> l = [('Alice', 1)] >>> spark.createDataFrame(l).collect() [Row(_1=u'Alice', _2=1)] >>> spark.createDataFrame(l, ['name', 'age']).collect() [Row(name=u'Alice', age=1)] >>> d = [{'name': 'Alice', 'age': 1}] >>> spark.createDataFrame(d).collect() [Row(age=1, name=u'Alice')] >>> rdd = sc.parallelize(l) >>> spark.createDataFrame(rdd).collect() [Row(_1=u'Alice', _2=1)] >>> df = spark.createDataFrame(rdd, ['name', 'age']) >>> df.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql import Row >>> Person = Row('name', 'age') >>> person = rdd.map(lambda r: Person(*r)) >>> df2 = spark.createDataFrame(person) >>> df2.collect() [Row(name=u'Alice', age=1)] >>> from pyspark.sql.types import * >>> schema = StructType([ ... StructField("name", StringType(), True), ... StructField("age", IntegerType(), True)]) >>> df3 = spark.createDataFrame(rdd, schema) >>> df3.collect() [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP [Row(name=u'Alice', age=1)] >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP [Row(0=1, 1=2)] >>> spark.createDataFrame(rdd, "a: string, b: int").collect() [Row(a=u'Alice', b=1)] >>> rdd = rdd.map(lambda row: row[1]) >>> spark.createDataFrame(rdd, "int").collect() [Row(value=1)] >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Py4JJavaError: ...
[ "Creates", "a", ":", "class", ":", "DataFrame", "from", "an", ":", "class", ":", "RDD", "a", "list", "or", "a", ":", "class", ":", "pandas", ".", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L619-L787
apache/spark
python/pyspark/sql/session.py
SparkSession.sql
def sql(self, sqlQuery): """Returns a :class:`DataFrame` representing the result of the given query. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1") >>> df2.collect() [Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] """ return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
python
def sql(self, sqlQuery): """Returns a :class:`DataFrame` representing the result of the given query. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1") >>> df2.collect() [Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] """ return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
[ "def", "sql", "(", "self", ",", "sqlQuery", ")", ":", "return", "DataFrame", "(", "self", ".", "_jsparkSession", ".", "sql", "(", "sqlQuery", ")", ",", "self", ".", "_wrapped", ")" ]
Returns a :class:`DataFrame` representing the result of the given query. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1") >>> df2.collect() [Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
[ "Returns", "a", ":", "class", ":", "DataFrame", "representing", "the", "result", "of", "the", "given", "query", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L791-L801
apache/spark
python/pyspark/sql/session.py
SparkSession.table
def table(self, tableName): """Returns the specified table as a :class:`DataFrame`. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.table("table1") >>> sorted(df.collect()) == sorted(df2.collect()) True """ return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
python
def table(self, tableName): """Returns the specified table as a :class:`DataFrame`. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.table("table1") >>> sorted(df.collect()) == sorted(df2.collect()) True """ return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
[ "def", "table", "(", "self", ",", "tableName", ")", ":", "return", "DataFrame", "(", "self", ".", "_jsparkSession", ".", "table", "(", "tableName", ")", ",", "self", ".", "_wrapped", ")" ]
Returns the specified table as a :class:`DataFrame`. :return: :class:`DataFrame` >>> df.createOrReplaceTempView("table1") >>> df2 = spark.table("table1") >>> sorted(df.collect()) == sorted(df2.collect()) True
[ "Returns", "the", "specified", "table", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L804-L814
apache/spark
python/pyspark/sql/session.py
SparkSession.streams
def streams(self): """Returns a :class:`StreamingQueryManager` that allows managing all the :class:`StreamingQuery` StreamingQueries active on `this` context. .. note:: Evolving. :return: :class:`StreamingQueryManager` """ from pyspark.sql.streaming import StreamingQueryManager return StreamingQueryManager(self._jsparkSession.streams())
python
def streams(self): """Returns a :class:`StreamingQueryManager` that allows managing all the :class:`StreamingQuery` StreamingQueries active on `this` context. .. note:: Evolving. :return: :class:`StreamingQueryManager` """ from pyspark.sql.streaming import StreamingQueryManager return StreamingQueryManager(self._jsparkSession.streams())
[ "def", "streams", "(", "self", ")", ":", "from", "pyspark", ".", "sql", ".", "streaming", "import", "StreamingQueryManager", "return", "StreamingQueryManager", "(", "self", ".", "_jsparkSession", ".", "streams", "(", ")", ")" ]
Returns a :class:`StreamingQueryManager` that allows managing all the :class:`StreamingQuery` StreamingQueries active on `this` context. .. note:: Evolving. :return: :class:`StreamingQueryManager`
[ "Returns", "a", ":", "class", ":", "StreamingQueryManager", "that", "allows", "managing", "all", "the", ":", "class", ":", "StreamingQuery", "StreamingQueries", "active", "on", "this", "context", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L842-L851
apache/spark
python/pyspark/sql/session.py
SparkSession.stop
def stop(self): """Stop the underlying :class:`SparkContext`. """ self._sc.stop() # We should clean the default session up. See SPARK-23228. self._jvm.SparkSession.clearDefaultSession() self._jvm.SparkSession.clearActiveSession() SparkSession._instantiatedSession = None SparkSession._activeSession = None
python
def stop(self): """Stop the underlying :class:`SparkContext`. """ self._sc.stop() # We should clean the default session up. See SPARK-23228. self._jvm.SparkSession.clearDefaultSession() self._jvm.SparkSession.clearActiveSession() SparkSession._instantiatedSession = None SparkSession._activeSession = None
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_sc", ".", "stop", "(", ")", "# We should clean the default session up. See SPARK-23228.", "self", ".", "_jvm", ".", "SparkSession", ".", "clearDefaultSession", "(", ")", "self", ".", "_jvm", ".", "SparkSession", ".", "clearActiveSession", "(", ")", "SparkSession", ".", "_instantiatedSession", "=", "None", "SparkSession", ".", "_activeSession", "=", "None" ]
Stop the underlying :class:`SparkContext`.
[ "Stop", "the", "underlying", ":", "class", ":", "SparkContext", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L854-L862
apache/spark
python/pyspark/status.py
StatusTracker.getJobInfo
def getJobInfo(self, jobId): """ Returns a :class:`SparkJobInfo` object, or None if the job info could not be found or was garbage collected. """ job = self._jtracker.getJobInfo(jobId) if job is not None: return SparkJobInfo(jobId, job.stageIds(), str(job.status()))
python
def getJobInfo(self, jobId): """ Returns a :class:`SparkJobInfo` object, or None if the job info could not be found or was garbage collected. """ job = self._jtracker.getJobInfo(jobId) if job is not None: return SparkJobInfo(jobId, job.stageIds(), str(job.status()))
[ "def", "getJobInfo", "(", "self", ",", "jobId", ")", ":", "job", "=", "self", ".", "_jtracker", ".", "getJobInfo", "(", "jobId", ")", "if", "job", "is", "not", "None", ":", "return", "SparkJobInfo", "(", "jobId", ",", "job", ".", "stageIds", "(", ")", ",", "str", "(", "job", ".", "status", "(", ")", ")", ")" ]
Returns a :class:`SparkJobInfo` object, or None if the job info could not be found or was garbage collected.
[ "Returns", "a", ":", "class", ":", "SparkJobInfo", "object", "or", "None", "if", "the", "job", "info", "could", "not", "be", "found", "or", "was", "garbage", "collected", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/status.py#L78-L85
apache/spark
python/pyspark/status.py
StatusTracker.getStageInfo
def getStageInfo(self, stageId): """ Returns a :class:`SparkStageInfo` object, or None if the stage info could not be found or was garbage collected. """ stage = self._jtracker.getStageInfo(stageId) if stage is not None: # TODO: fetch them in batch for better performance attrs = [getattr(stage, f)() for f in SparkStageInfo._fields[1:]] return SparkStageInfo(stageId, *attrs)
python
def getStageInfo(self, stageId): """ Returns a :class:`SparkStageInfo` object, or None if the stage info could not be found or was garbage collected. """ stage = self._jtracker.getStageInfo(stageId) if stage is not None: # TODO: fetch them in batch for better performance attrs = [getattr(stage, f)() for f in SparkStageInfo._fields[1:]] return SparkStageInfo(stageId, *attrs)
[ "def", "getStageInfo", "(", "self", ",", "stageId", ")", ":", "stage", "=", "self", ".", "_jtracker", ".", "getStageInfo", "(", "stageId", ")", "if", "stage", "is", "not", "None", ":", "# TODO: fetch them in batch for better performance", "attrs", "=", "[", "getattr", "(", "stage", ",", "f", ")", "(", ")", "for", "f", "in", "SparkStageInfo", ".", "_fields", "[", "1", ":", "]", "]", "return", "SparkStageInfo", "(", "stageId", ",", "*", "attrs", ")" ]
Returns a :class:`SparkStageInfo` object, or None if the stage info could not be found or was garbage collected.
[ "Returns", "a", ":", "class", ":", "SparkStageInfo", "object", "or", "None", "if", "the", "stage", "info", "could", "not", "be", "found", "or", "was", "garbage", "collected", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/status.py#L87-L96
apache/spark
python/pyspark/serializers.py
_restore
def _restore(name, fields, value): """ Restore an object of namedtuple""" k = (name, fields) cls = __cls.get(k) if cls is None: cls = collections.namedtuple(name, fields) __cls[k] = cls return cls(*value)
python
def _restore(name, fields, value): """ Restore an object of namedtuple""" k = (name, fields) cls = __cls.get(k) if cls is None: cls = collections.namedtuple(name, fields) __cls[k] = cls return cls(*value)
[ "def", "_restore", "(", "name", ",", "fields", ",", "value", ")", ":", "k", "=", "(", "name", ",", "fields", ")", "cls", "=", "__cls", ".", "get", "(", "k", ")", "if", "cls", "is", "None", ":", "cls", "=", "collections", ".", "namedtuple", "(", "name", ",", "fields", ")", "__cls", "[", "k", "]", "=", "cls", "return", "cls", "(", "*", "value", ")" ]
Restore an object of namedtuple
[ "Restore", "an", "object", "of", "namedtuple" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L578-L585
apache/spark
python/pyspark/serializers.py
_hack_namedtuple
def _hack_namedtuple(cls): """ Make class generated by namedtuple picklable """ name = cls.__name__ fields = cls._fields def __reduce__(self): return (_restore, (name, fields, tuple(self))) cls.__reduce__ = __reduce__ cls._is_namedtuple_ = True return cls
python
def _hack_namedtuple(cls): """ Make class generated by namedtuple picklable """ name = cls.__name__ fields = cls._fields def __reduce__(self): return (_restore, (name, fields, tuple(self))) cls.__reduce__ = __reduce__ cls._is_namedtuple_ = True return cls
[ "def", "_hack_namedtuple", "(", "cls", ")", ":", "name", "=", "cls", ".", "__name__", "fields", "=", "cls", ".", "_fields", "def", "__reduce__", "(", "self", ")", ":", "return", "(", "_restore", ",", "(", "name", ",", "fields", ",", "tuple", "(", "self", ")", ")", ")", "cls", ".", "__reduce__", "=", "__reduce__", "cls", ".", "_is_namedtuple_", "=", "True", "return", "cls" ]
Make class generated by namedtuple picklable
[ "Make", "class", "generated", "by", "namedtuple", "picklable" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L588-L597
apache/spark
python/pyspark/serializers.py
_hijack_namedtuple
def _hijack_namedtuple(): """ Hack namedtuple() to make it picklable """ # hijack only one time if hasattr(collections.namedtuple, "__hijack"): return global _old_namedtuple # or it will put in closure global _old_namedtuple_kwdefaults # or it will put in closure too def _copy_func(f): return types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__) def _kwdefaults(f): # __kwdefaults__ contains the default values of keyword-only arguments which are # introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple # are as below: # # - Does not exist in Python 2. # - Returns None in <= Python 3.5.x. # - Returns a dictionary containing the default values to the keys from Python 3.6.x # (See https://bugs.python.org/issue25628). kargs = getattr(f, "__kwdefaults__", None) if kargs is None: return {} else: return kargs _old_namedtuple = _copy_func(collections.namedtuple) _old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple) def namedtuple(*args, **kwargs): for k, v in _old_namedtuple_kwdefaults.items(): kwargs[k] = kwargs.get(k, v) cls = _old_namedtuple(*args, **kwargs) return _hack_namedtuple(cls) # replace namedtuple with the new one collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple collections.namedtuple.__code__ = namedtuple.__code__ collections.namedtuple.__hijack = 1 # hack the cls already generated by namedtuple. # Those created in other modules can be pickled as normal, # so only hack those in __main__ module for n, o in sys.modules["__main__"].__dict__.items(): if (type(o) is type and o.__base__ is tuple and hasattr(o, "_fields") and "__reduce__" not in o.__dict__): _hack_namedtuple(o)
python
def _hijack_namedtuple(): """ Hack namedtuple() to make it picklable """ # hijack only one time if hasattr(collections.namedtuple, "__hijack"): return global _old_namedtuple # or it will put in closure global _old_namedtuple_kwdefaults # or it will put in closure too def _copy_func(f): return types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__) def _kwdefaults(f): # __kwdefaults__ contains the default values of keyword-only arguments which are # introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple # are as below: # # - Does not exist in Python 2. # - Returns None in <= Python 3.5.x. # - Returns a dictionary containing the default values to the keys from Python 3.6.x # (See https://bugs.python.org/issue25628). kargs = getattr(f, "__kwdefaults__", None) if kargs is None: return {} else: return kargs _old_namedtuple = _copy_func(collections.namedtuple) _old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple) def namedtuple(*args, **kwargs): for k, v in _old_namedtuple_kwdefaults.items(): kwargs[k] = kwargs.get(k, v) cls = _old_namedtuple(*args, **kwargs) return _hack_namedtuple(cls) # replace namedtuple with the new one collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple collections.namedtuple.__code__ = namedtuple.__code__ collections.namedtuple.__hijack = 1 # hack the cls already generated by namedtuple. # Those created in other modules can be pickled as normal, # so only hack those in __main__ module for n, o in sys.modules["__main__"].__dict__.items(): if (type(o) is type and o.__base__ is tuple and hasattr(o, "_fields") and "__reduce__" not in o.__dict__): _hack_namedtuple(o)
[ "def", "_hijack_namedtuple", "(", ")", ":", "# hijack only one time", "if", "hasattr", "(", "collections", ".", "namedtuple", ",", "\"__hijack\"", ")", ":", "return", "global", "_old_namedtuple", "# or it will put in closure", "global", "_old_namedtuple_kwdefaults", "# or it will put in closure too", "def", "_copy_func", "(", "f", ")", ":", "return", "types", ".", "FunctionType", "(", "f", ".", "__code__", ",", "f", ".", "__globals__", ",", "f", ".", "__name__", ",", "f", ".", "__defaults__", ",", "f", ".", "__closure__", ")", "def", "_kwdefaults", "(", "f", ")", ":", "# __kwdefaults__ contains the default values of keyword-only arguments which are", "# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple", "# are as below:", "#", "# - Does not exist in Python 2.", "# - Returns None in <= Python 3.5.x.", "# - Returns a dictionary containing the default values to the keys from Python 3.6.x", "# (See https://bugs.python.org/issue25628).", "kargs", "=", "getattr", "(", "f", ",", "\"__kwdefaults__\"", ",", "None", ")", "if", "kargs", "is", "None", ":", "return", "{", "}", "else", ":", "return", "kargs", "_old_namedtuple", "=", "_copy_func", "(", "collections", ".", "namedtuple", ")", "_old_namedtuple_kwdefaults", "=", "_kwdefaults", "(", "collections", ".", "namedtuple", ")", "def", "namedtuple", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "k", ",", "v", "in", "_old_namedtuple_kwdefaults", ".", "items", "(", ")", ":", "kwargs", "[", "k", "]", "=", "kwargs", ".", "get", "(", "k", ",", "v", ")", "cls", "=", "_old_namedtuple", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_hack_namedtuple", "(", "cls", ")", "# replace namedtuple with the new one", "collections", ".", "namedtuple", ".", "__globals__", "[", "\"_old_namedtuple_kwdefaults\"", "]", "=", "_old_namedtuple_kwdefaults", "collections", ".", "namedtuple", ".", "__globals__", "[", "\"_old_namedtuple\"", "]", "=", "_old_namedtuple", "collections", ".", "namedtuple", ".", "__globals__", "[", "\"_hack_namedtuple\"", "]", "=", "_hack_namedtuple", "collections", ".", "namedtuple", ".", "__code__", "=", "namedtuple", ".", "__code__", "collections", ".", "namedtuple", ".", "__hijack", "=", "1", "# hack the cls already generated by namedtuple.", "# Those created in other modules can be pickled as normal,", "# so only hack those in __main__ module", "for", "n", ",", "o", "in", "sys", ".", "modules", "[", "\"__main__\"", "]", ".", "__dict__", ".", "items", "(", ")", ":", "if", "(", "type", "(", "o", ")", "is", "type", "and", "o", ".", "__base__", "is", "tuple", "and", "hasattr", "(", "o", ",", "\"_fields\"", ")", "and", "\"__reduce__\"", "not", "in", "o", ".", "__dict__", ")", ":", "_hack_namedtuple", "(", "o", ")" ]
Hack namedtuple() to make it picklable
[ "Hack", "namedtuple", "()", "to", "make", "it", "picklable" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L600-L651
apache/spark
python/pyspark/serializers.py
ArrowCollectSerializer.load_stream
def load_stream(self, stream): """ Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields a list of indices that can be used to put the RecordBatches in the correct order. """ # load the batches for batch in self.serializer.load_stream(stream): yield batch # load the batch order indices num = read_int(stream) batch_order = [] for i in xrange(num): index = read_int(stream) batch_order.append(index) yield batch_order
python
def load_stream(self, stream): """ Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields a list of indices that can be used to put the RecordBatches in the correct order. """ # load the batches for batch in self.serializer.load_stream(stream): yield batch # load the batch order indices num = read_int(stream) batch_order = [] for i in xrange(num): index = read_int(stream) batch_order.append(index) yield batch_order
[ "def", "load_stream", "(", "self", ",", "stream", ")", ":", "# load the batches", "for", "batch", "in", "self", ".", "serializer", ".", "load_stream", "(", "stream", ")", ":", "yield", "batch", "# load the batch order indices", "num", "=", "read_int", "(", "stream", ")", "batch_order", "=", "[", "]", "for", "i", "in", "xrange", "(", "num", ")", ":", "index", "=", "read_int", "(", "stream", ")", "batch_order", ".", "append", "(", "index", ")", "yield", "batch_order" ]
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields a list of indices that can be used to put the RecordBatches in the correct order.
[ "Load", "a", "stream", "of", "un", "-", "ordered", "Arrow", "RecordBatches", "where", "the", "last", "iteration", "yields", "a", "list", "of", "indices", "that", "can", "be", "used", "to", "put", "the", "RecordBatches", "in", "the", "correct", "order", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L200-L215
apache/spark
python/pyspark/serializers.py
ArrowStreamPandasSerializer._create_batch
def _create_batch(self, series): """ Create an Arrow record batch from the given pandas.Series or list of Series, with optional type. :param series: A single pandas.Series, list of Series, or list of (series, arrow_type) :return: Arrow RecordBatch """ import pandas as pd import pyarrow as pa from pyspark.sql.types import _check_series_convert_timestamps_internal # Make input conform to [(series1, type1), (series2, type2), ...] if not isinstance(series, (list, tuple)) or \ (len(series) == 2 and isinstance(series[1], pa.DataType)): series = [series] series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series) def create_array(s, t): mask = s.isnull() # Ensure timestamp series are in expected form for Spark internal representation if t is not None and pa.types.is_timestamp(t): s = _check_series_convert_timestamps_internal(s.fillna(0), self._timezone) # TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2 return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False) try: array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck) except pa.ArrowException as e: error_msg = "Exception thrown when converting pandas.Series (%s) to Arrow " + \ "Array (%s). It can be caused by overflows or other unsafe " + \ "conversions warned by Arrow. Arrow safe type check can be " + \ "disabled by using SQL config " + \ "`spark.sql.execution.pandas.arrowSafeTypeConversion`." raise RuntimeError(error_msg % (s.dtype, t), e) return array arrs = [] for s, t in series: if t is not None and pa.types.is_struct(t): if not isinstance(s, pd.DataFrame): raise ValueError("A field of type StructType expects a pandas.DataFrame, " "but got: %s" % str(type(s))) # Input partition and result pandas.DataFrame empty, make empty Arrays with struct if len(s) == 0 and len(s.columns) == 0: arrs_names = [(pa.array([], type=field.type), field.name) for field in t] # Assign result columns by schema name if user labeled with strings elif self._assign_cols_by_name and any(isinstance(name, basestring) for name in s.columns): arrs_names = [(create_array(s[field.name], field.type), field.name) for field in t] # Assign result columns by position else: arrs_names = [(create_array(s[s.columns[i]], field.type), field.name) for i, field in enumerate(t)] struct_arrs, struct_names = zip(*arrs_names) arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names)) else: arrs.append(create_array(s, t)) return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
python
def _create_batch(self, series): """ Create an Arrow record batch from the given pandas.Series or list of Series, with optional type. :param series: A single pandas.Series, list of Series, or list of (series, arrow_type) :return: Arrow RecordBatch """ import pandas as pd import pyarrow as pa from pyspark.sql.types import _check_series_convert_timestamps_internal # Make input conform to [(series1, type1), (series2, type2), ...] if not isinstance(series, (list, tuple)) or \ (len(series) == 2 and isinstance(series[1], pa.DataType)): series = [series] series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series) def create_array(s, t): mask = s.isnull() # Ensure timestamp series are in expected form for Spark internal representation if t is not None and pa.types.is_timestamp(t): s = _check_series_convert_timestamps_internal(s.fillna(0), self._timezone) # TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2 return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False) try: array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck) except pa.ArrowException as e: error_msg = "Exception thrown when converting pandas.Series (%s) to Arrow " + \ "Array (%s). It can be caused by overflows or other unsafe " + \ "conversions warned by Arrow. Arrow safe type check can be " + \ "disabled by using SQL config " + \ "`spark.sql.execution.pandas.arrowSafeTypeConversion`." raise RuntimeError(error_msg % (s.dtype, t), e) return array arrs = [] for s, t in series: if t is not None and pa.types.is_struct(t): if not isinstance(s, pd.DataFrame): raise ValueError("A field of type StructType expects a pandas.DataFrame, " "but got: %s" % str(type(s))) # Input partition and result pandas.DataFrame empty, make empty Arrays with struct if len(s) == 0 and len(s.columns) == 0: arrs_names = [(pa.array([], type=field.type), field.name) for field in t] # Assign result columns by schema name if user labeled with strings elif self._assign_cols_by_name and any(isinstance(name, basestring) for name in s.columns): arrs_names = [(create_array(s[field.name], field.type), field.name) for field in t] # Assign result columns by position else: arrs_names = [(create_array(s[s.columns[i]], field.type), field.name) for i, field in enumerate(t)] struct_arrs, struct_names = zip(*arrs_names) arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names)) else: arrs.append(create_array(s, t)) return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
[ "def", "_create_batch", "(", "self", ",", "series", ")", ":", "import", "pandas", "as", "pd", "import", "pyarrow", "as", "pa", "from", "pyspark", ".", "sql", ".", "types", "import", "_check_series_convert_timestamps_internal", "# Make input conform to [(series1, type1), (series2, type2), ...]", "if", "not", "isinstance", "(", "series", ",", "(", "list", ",", "tuple", ")", ")", "or", "(", "len", "(", "series", ")", "==", "2", "and", "isinstance", "(", "series", "[", "1", "]", ",", "pa", ".", "DataType", ")", ")", ":", "series", "=", "[", "series", "]", "series", "=", "(", "(", "s", ",", "None", ")", "if", "not", "isinstance", "(", "s", ",", "(", "list", ",", "tuple", ")", ")", "else", "s", "for", "s", "in", "series", ")", "def", "create_array", "(", "s", ",", "t", ")", ":", "mask", "=", "s", ".", "isnull", "(", ")", "# Ensure timestamp series are in expected form for Spark internal representation", "if", "t", "is", "not", "None", "and", "pa", ".", "types", ".", "is_timestamp", "(", "t", ")", ":", "s", "=", "_check_series_convert_timestamps_internal", "(", "s", ".", "fillna", "(", "0", ")", ",", "self", ".", "_timezone", ")", "# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2", "return", "pa", ".", "Array", ".", "from_pandas", "(", "s", ",", "mask", "=", "mask", ")", ".", "cast", "(", "t", ",", "safe", "=", "False", ")", "try", ":", "array", "=", "pa", ".", "Array", ".", "from_pandas", "(", "s", ",", "mask", "=", "mask", ",", "type", "=", "t", ",", "safe", "=", "self", ".", "_safecheck", ")", "except", "pa", ".", "ArrowException", "as", "e", ":", "error_msg", "=", "\"Exception thrown when converting pandas.Series (%s) to Arrow \"", "+", "\"Array (%s). It can be caused by overflows or other unsafe \"", "+", "\"conversions warned by Arrow. Arrow safe type check can be \"", "+", "\"disabled by using SQL config \"", "+", "\"`spark.sql.execution.pandas.arrowSafeTypeConversion`.\"", "raise", "RuntimeError", "(", "error_msg", "%", "(", "s", ".", "dtype", ",", "t", ")", ",", "e", ")", "return", "array", "arrs", "=", "[", "]", "for", "s", ",", "t", "in", "series", ":", "if", "t", "is", "not", "None", "and", "pa", ".", "types", ".", "is_struct", "(", "t", ")", ":", "if", "not", "isinstance", "(", "s", ",", "pd", ".", "DataFrame", ")", ":", "raise", "ValueError", "(", "\"A field of type StructType expects a pandas.DataFrame, \"", "\"but got: %s\"", "%", "str", "(", "type", "(", "s", ")", ")", ")", "# Input partition and result pandas.DataFrame empty, make empty Arrays with struct", "if", "len", "(", "s", ")", "==", "0", "and", "len", "(", "s", ".", "columns", ")", "==", "0", ":", "arrs_names", "=", "[", "(", "pa", ".", "array", "(", "[", "]", ",", "type", "=", "field", ".", "type", ")", ",", "field", ".", "name", ")", "for", "field", "in", "t", "]", "# Assign result columns by schema name if user labeled with strings", "elif", "self", ".", "_assign_cols_by_name", "and", "any", "(", "isinstance", "(", "name", ",", "basestring", ")", "for", "name", "in", "s", ".", "columns", ")", ":", "arrs_names", "=", "[", "(", "create_array", "(", "s", "[", "field", ".", "name", "]", ",", "field", ".", "type", ")", ",", "field", ".", "name", ")", "for", "field", "in", "t", "]", "# Assign result columns by position", "else", ":", "arrs_names", "=", "[", "(", "create_array", "(", "s", "[", "s", ".", "columns", "[", "i", "]", "]", ",", "field", ".", "type", ")", ",", "field", ".", "name", ")", "for", "i", ",", "field", "in", "enumerate", "(", "t", ")", "]", "struct_arrs", ",", "struct_names", "=", "zip", "(", "*", "arrs_names", ")", "arrs", ".", "append", "(", "pa", ".", "StructArray", ".", "from_arrays", "(", "struct_arrs", ",", "struct_names", ")", ")", "else", ":", "arrs", ".", "append", "(", "create_array", "(", "s", ",", "t", ")", ")", "return", "pa", ".", "RecordBatch", ".", "from_arrays", "(", "arrs", ",", "[", "\"_%d\"", "%", "i", "for", "i", "in", "xrange", "(", "len", "(", "arrs", ")", ")", "]", ")" ]
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type. :param series: A single pandas.Series, list of Series, or list of (series, arrow_type) :return: Arrow RecordBatch
[ "Create", "an", "Arrow", "record", "batch", "from", "the", "given", "pandas", ".", "Series", "or", "list", "of", "Series", "with", "optional", "type", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L274-L335
apache/spark
python/pyspark/serializers.py
ArrowStreamPandasSerializer.dump_stream
def dump_stream(self, iterator, stream): """ Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or a list of series accompanied by an optional pyarrow type to coerce the data to. """ batches = (self._create_batch(series) for series in iterator) super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
python
def dump_stream(self, iterator, stream): """ Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or a list of series accompanied by an optional pyarrow type to coerce the data to. """ batches = (self._create_batch(series) for series in iterator) super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
[ "def", "dump_stream", "(", "self", ",", "iterator", ",", "stream", ")", ":", "batches", "=", "(", "self", ".", "_create_batch", "(", "series", ")", "for", "series", "in", "iterator", ")", "super", "(", "ArrowStreamPandasSerializer", ",", "self", ")", ".", "dump_stream", "(", "batches", ",", "stream", ")" ]
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or a list of series accompanied by an optional pyarrow type to coerce the data to.
[ "Make", "ArrowRecordBatches", "from", "Pandas", "Series", "and", "serialize", ".", "Input", "is", "a", "single", "series", "or", "a", "list", "of", "series", "accompanied", "by", "an", "optional", "pyarrow", "type", "to", "coerce", "the", "data", "to", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L337-L343
apache/spark
python/pyspark/serializers.py
ArrowStreamPandasSerializer.load_stream
def load_stream(self, stream): """ Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series. """ batches = super(ArrowStreamPandasSerializer, self).load_stream(stream) import pyarrow as pa for batch in batches: yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
python
def load_stream(self, stream): """ Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series. """ batches = super(ArrowStreamPandasSerializer, self).load_stream(stream) import pyarrow as pa for batch in batches: yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
[ "def", "load_stream", "(", "self", ",", "stream", ")", ":", "batches", "=", "super", "(", "ArrowStreamPandasSerializer", ",", "self", ")", ".", "load_stream", "(", "stream", ")", "import", "pyarrow", "as", "pa", "for", "batch", "in", "batches", ":", "yield", "[", "self", ".", "arrow_to_pandas", "(", "c", ")", "for", "c", "in", "pa", ".", "Table", ".", "from_batches", "(", "[", "batch", "]", ")", ".", "itercolumns", "(", ")", "]" ]
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
[ "Deserialize", "ArrowRecordBatches", "to", "an", "Arrow", "table", "and", "return", "as", "a", "list", "of", "pandas", ".", "Series", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L345-L352
apache/spark
python/pyspark/serializers.py
ArrowStreamPandasUDFSerializer.dump_stream
def dump_stream(self, iterator, stream): """ Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent. This should be sent after creating the first record batch so in case of an error, it can be sent back to the JVM before the Arrow stream starts. """ def init_stream_yield_batches(): should_write_start_length = True for series in iterator: batch = self._create_batch(series) if should_write_start_length: write_int(SpecialLengths.START_ARROW_STREAM, stream) should_write_start_length = False yield batch return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
python
def dump_stream(self, iterator, stream): """ Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent. This should be sent after creating the first record batch so in case of an error, it can be sent back to the JVM before the Arrow stream starts. """ def init_stream_yield_batches(): should_write_start_length = True for series in iterator: batch = self._create_batch(series) if should_write_start_length: write_int(SpecialLengths.START_ARROW_STREAM, stream) should_write_start_length = False yield batch return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
[ "def", "dump_stream", "(", "self", ",", "iterator", ",", "stream", ")", ":", "def", "init_stream_yield_batches", "(", ")", ":", "should_write_start_length", "=", "True", "for", "series", "in", "iterator", ":", "batch", "=", "self", ".", "_create_batch", "(", "series", ")", "if", "should_write_start_length", ":", "write_int", "(", "SpecialLengths", ".", "START_ARROW_STREAM", ",", "stream", ")", "should_write_start_length", "=", "False", "yield", "batch", "return", "ArrowStreamSerializer", ".", "dump_stream", "(", "self", ",", "init_stream_yield_batches", "(", ")", ",", "stream", ")" ]
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent. This should be sent after creating the first record batch so in case of an error, it can be sent back to the JVM before the Arrow stream starts.
[ "Override", "because", "Pandas", "UDFs", "require", "a", "START_ARROW_STREAM", "before", "the", "Arrow", "stream", "is", "sent", ".", "This", "should", "be", "sent", "after", "creating", "the", "first", "record", "batch", "so", "in", "case", "of", "an", "error", "it", "can", "be", "sent", "back", "to", "the", "JVM", "before", "the", "Arrow", "stream", "starts", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L381-L397
apache/spark
python/pyspark/sql/streaming.py
StreamingQuery.awaitTermination
def awaitTermination(self, timeout=None): """Waits for the termination of `this` query, either by :func:`query.stop()` or by an exception. If the query has terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If the query has terminated, then all subsequent calls to this method will either return immediately (if the query was terminated by :func:`stop()`), or throw the exception immediately (if the query has terminated with exception). throws :class:`StreamingQueryException`, if `this` query has terminated with an exception """ if timeout is not None: if not isinstance(timeout, (int, float)) or timeout < 0: raise ValueError("timeout must be a positive integer or float. Got %s" % timeout) return self._jsq.awaitTermination(int(timeout * 1000)) else: return self._jsq.awaitTermination()
python
def awaitTermination(self, timeout=None): """Waits for the termination of `this` query, either by :func:`query.stop()` or by an exception. If the query has terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If the query has terminated, then all subsequent calls to this method will either return immediately (if the query was terminated by :func:`stop()`), or throw the exception immediately (if the query has terminated with exception). throws :class:`StreamingQueryException`, if `this` query has terminated with an exception """ if timeout is not None: if not isinstance(timeout, (int, float)) or timeout < 0: raise ValueError("timeout must be a positive integer or float. Got %s" % timeout) return self._jsq.awaitTermination(int(timeout * 1000)) else: return self._jsq.awaitTermination()
[ "def", "awaitTermination", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "not", "None", ":", "if", "not", "isinstance", "(", "timeout", ",", "(", "int", ",", "float", ")", ")", "or", "timeout", "<", "0", ":", "raise", "ValueError", "(", "\"timeout must be a positive integer or float. Got %s\"", "%", "timeout", ")", "return", "self", ".", "_jsq", ".", "awaitTermination", "(", "int", "(", "timeout", "*", "1000", ")", ")", "else", ":", "return", "self", ".", "_jsq", ".", "awaitTermination", "(", ")" ]
Waits for the termination of `this` query, either by :func:`query.stop()` or by an exception. If the query has terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If the query has terminated, then all subsequent calls to this method will either return immediately (if the query was terminated by :func:`stop()`), or throw the exception immediately (if the query has terminated with exception). throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
[ "Waits", "for", "the", "termination", "of", "this", "query", "either", "by", ":", "func", ":", "query", ".", "stop", "()", "or", "by", "an", "exception", ".", "If", "the", "query", "has", "terminated", "with", "an", "exception", "then", "the", "exception", "will", "be", "thrown", ".", "If", "timeout", "is", "set", "it", "returns", "whether", "the", "query", "has", "terminated", "or", "not", "within", "the", "timeout", "seconds", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L86-L103
apache/spark
python/pyspark/sql/streaming.py
StreamingQuery.recentProgress
def recentProgress(self): """Returns an array of the most recent [[StreamingQueryProgress]] updates for this query. The number of progress updates retained for each stream is configured by Spark session configuration `spark.sql.streaming.numRecentProgressUpdates`. """ return [json.loads(p.json()) for p in self._jsq.recentProgress()]
python
def recentProgress(self): """Returns an array of the most recent [[StreamingQueryProgress]] updates for this query. The number of progress updates retained for each stream is configured by Spark session configuration `spark.sql.streaming.numRecentProgressUpdates`. """ return [json.loads(p.json()) for p in self._jsq.recentProgress()]
[ "def", "recentProgress", "(", "self", ")", ":", "return", "[", "json", ".", "loads", "(", "p", ".", "json", "(", ")", ")", "for", "p", "in", "self", ".", "_jsq", ".", "recentProgress", "(", ")", "]" ]
Returns an array of the most recent [[StreamingQueryProgress]] updates for this query. The number of progress updates retained for each stream is configured by Spark session configuration `spark.sql.streaming.numRecentProgressUpdates`.
[ "Returns", "an", "array", "of", "the", "most", "recent", "[[", "StreamingQueryProgress", "]]", "updates", "for", "this", "query", ".", "The", "number", "of", "progress", "updates", "retained", "for", "each", "stream", "is", "configured", "by", "Spark", "session", "configuration", "spark", ".", "sql", ".", "streaming", ".", "numRecentProgressUpdates", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L115-L120
apache/spark
python/pyspark/sql/streaming.py
StreamingQuery.lastProgress
def lastProgress(self): """ Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or None if there were no progress updates :return: a map """ lastProgress = self._jsq.lastProgress() if lastProgress: return json.loads(lastProgress.json()) else: return None
python
def lastProgress(self): """ Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or None if there were no progress updates :return: a map """ lastProgress = self._jsq.lastProgress() if lastProgress: return json.loads(lastProgress.json()) else: return None
[ "def", "lastProgress", "(", "self", ")", ":", "lastProgress", "=", "self", ".", "_jsq", ".", "lastProgress", "(", ")", "if", "lastProgress", ":", "return", "json", ".", "loads", "(", "lastProgress", ".", "json", "(", ")", ")", "else", ":", "return", "None" ]
Returns the most recent :class:`StreamingQueryProgress` update of this streaming query or None if there were no progress updates :return: a map
[ "Returns", "the", "most", "recent", ":", "class", ":", "StreamingQueryProgress", "update", "of", "this", "streaming", "query", "or", "None", "if", "there", "were", "no", "progress", "updates", ":", "return", ":", "a", "map" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L124-L134
apache/spark
python/pyspark/sql/streaming.py
StreamingQuery.exception
def exception(self): """ :return: the StreamingQueryException if the query was terminated by an exception, or None. """ if self._jsq.exception().isDefined(): je = self._jsq.exception().get() msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace())) return StreamingQueryException(msg, stackTrace, je.getCause()) else: return None
python
def exception(self): """ :return: the StreamingQueryException if the query was terminated by an exception, or None. """ if self._jsq.exception().isDefined(): je = self._jsq.exception().get() msg = je.toString().split(': ', 1)[1] # Drop the Java StreamingQueryException type info stackTrace = '\n\t at '.join(map(lambda x: x.toString(), je.getStackTrace())) return StreamingQueryException(msg, stackTrace, je.getCause()) else: return None
[ "def", "exception", "(", "self", ")", ":", "if", "self", ".", "_jsq", ".", "exception", "(", ")", ".", "isDefined", "(", ")", ":", "je", "=", "self", ".", "_jsq", ".", "exception", "(", ")", ".", "get", "(", ")", "msg", "=", "je", ".", "toString", "(", ")", ".", "split", "(", "': '", ",", "1", ")", "[", "1", "]", "# Drop the Java StreamingQueryException type info", "stackTrace", "=", "'\\n\\t at '", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", ".", "toString", "(", ")", ",", "je", ".", "getStackTrace", "(", ")", ")", ")", "return", "StreamingQueryException", "(", "msg", ",", "stackTrace", ",", "je", ".", "getCause", "(", ")", ")", "else", ":", "return", "None" ]
:return: the StreamingQueryException if the query was terminated by an exception, or None.
[ ":", "return", ":", "the", "StreamingQueryException", "if", "the", "query", "was", "terminated", "by", "an", "exception", "or", "None", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L181-L191
apache/spark
python/pyspark/sql/streaming.py
StreamingQueryManager.awaitAnyTermination
def awaitAnyTermination(self, timeout=None): """Wait until any of the queries on the associated SQLContext has terminated since the creation of the context, or since :func:`resetTerminated()` was called. If any query was terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will either return immediately (if the query was terminated by :func:`query.stop()`), or throw the exception immediately (if the query was terminated with exception). Use :func:`resetTerminated()` to clear past terminations and wait for new terminations. In the case where multiple queries have terminated since :func:`resetTermination()` was called, if any query has terminated with exception, then :func:`awaitAnyTermination()` will throw any of the exception. For correctly documenting exceptions across multiple queries, users need to stop all of them after any of them terminates with exception, and then check the `query.exception()` for each query. throws :class:`StreamingQueryException`, if `this` query has terminated with an exception """ if timeout is not None: if not isinstance(timeout, (int, float)) or timeout < 0: raise ValueError("timeout must be a positive integer or float. Got %s" % timeout) return self._jsqm.awaitAnyTermination(int(timeout * 1000)) else: return self._jsqm.awaitAnyTermination()
python
def awaitAnyTermination(self, timeout=None): """Wait until any of the queries on the associated SQLContext has terminated since the creation of the context, or since :func:`resetTerminated()` was called. If any query was terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will either return immediately (if the query was terminated by :func:`query.stop()`), or throw the exception immediately (if the query was terminated with exception). Use :func:`resetTerminated()` to clear past terminations and wait for new terminations. In the case where multiple queries have terminated since :func:`resetTermination()` was called, if any query has terminated with exception, then :func:`awaitAnyTermination()` will throw any of the exception. For correctly documenting exceptions across multiple queries, users need to stop all of them after any of them terminates with exception, and then check the `query.exception()` for each query. throws :class:`StreamingQueryException`, if `this` query has terminated with an exception """ if timeout is not None: if not isinstance(timeout, (int, float)) or timeout < 0: raise ValueError("timeout must be a positive integer or float. Got %s" % timeout) return self._jsqm.awaitAnyTermination(int(timeout * 1000)) else: return self._jsqm.awaitAnyTermination()
[ "def", "awaitAnyTermination", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "not", "None", ":", "if", "not", "isinstance", "(", "timeout", ",", "(", "int", ",", "float", ")", ")", "or", "timeout", "<", "0", ":", "raise", "ValueError", "(", "\"timeout must be a positive integer or float. Got %s\"", "%", "timeout", ")", "return", "self", ".", "_jsqm", ".", "awaitAnyTermination", "(", "int", "(", "timeout", "*", "1000", ")", ")", "else", ":", "return", "self", ".", "_jsqm", ".", "awaitAnyTermination", "(", ")" ]
Wait until any of the queries on the associated SQLContext has terminated since the creation of the context, or since :func:`resetTerminated()` was called. If any query was terminated with an exception, then the exception will be thrown. If `timeout` is set, it returns whether the query has terminated or not within the `timeout` seconds. If a query has terminated, then subsequent calls to :func:`awaitAnyTermination()` will either return immediately (if the query was terminated by :func:`query.stop()`), or throw the exception immediately (if the query was terminated with exception). Use :func:`resetTerminated()` to clear past terminations and wait for new terminations. In the case where multiple queries have terminated since :func:`resetTermination()` was called, if any query has terminated with exception, then :func:`awaitAnyTermination()` will throw any of the exception. For correctly documenting exceptions across multiple queries, users need to stop all of them after any of them terminates with exception, and then check the `query.exception()` for each query. throws :class:`StreamingQueryException`, if `this` query has terminated with an exception
[ "Wait", "until", "any", "of", "the", "queries", "on", "the", "associated", "SQLContext", "has", "terminated", "since", "the", "creation", "of", "the", "context", "or", "since", ":", "func", ":", "resetTerminated", "()", "was", "called", ".", "If", "any", "query", "was", "terminated", "with", "an", "exception", "then", "the", "exception", "will", "be", "thrown", ".", "If", "timeout", "is", "set", "it", "returns", "whether", "the", "query", "has", "terminated", "or", "not", "within", "the", "timeout", "seconds", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L240-L265
apache/spark
python/pyspark/sql/streaming.py
DataStreamReader.load
def load(self, path=None, format=None, schema=None, **options): """Loads a data stream from a data source and returns it as a :class`DataFrame`. .. note:: Evolving. :param path: optional string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> json_sdf = spark.readStream.format("json") \\ ... .schema(sdf_schema) \\ ... .load(tempfile.mkdtemp()) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ if format is not None: self.format(format) if schema is not None: self.schema(schema) self.options(**options) if path is not None: if type(path) != str or len(path.strip()) == 0: raise ValueError("If the path is provided for stream, it needs to be a " + "non-empty string. List of paths are not supported.") return self._df(self._jreader.load(path)) else: return self._df(self._jreader.load())
python
def load(self, path=None, format=None, schema=None, **options): """Loads a data stream from a data source and returns it as a :class`DataFrame`. .. note:: Evolving. :param path: optional string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> json_sdf = spark.readStream.format("json") \\ ... .schema(sdf_schema) \\ ... .load(tempfile.mkdtemp()) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ if format is not None: self.format(format) if schema is not None: self.schema(schema) self.options(**options) if path is not None: if type(path) != str or len(path.strip()) == 0: raise ValueError("If the path is provided for stream, it needs to be a " + "non-empty string. List of paths are not supported.") return self._df(self._jreader.load(path)) else: return self._df(self._jreader.load())
[ "def", "load", "(", "self", ",", "path", "=", "None", ",", "format", "=", "None", ",", "schema", "=", "None", ",", "*", "*", "options", ")", ":", "if", "format", "is", "not", "None", ":", "self", ".", "format", "(", "format", ")", "if", "schema", "is", "not", "None", ":", "self", ".", "schema", "(", "schema", ")", "self", ".", "options", "(", "*", "*", "options", ")", "if", "path", "is", "not", "None", ":", "if", "type", "(", "path", ")", "!=", "str", "or", "len", "(", "path", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "\"If the path is provided for stream, it needs to be a \"", "+", "\"non-empty string. List of paths are not supported.\"", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "load", "(", "path", ")", ")", "else", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "load", "(", ")", ")" ]
Loads a data stream from a data source and returns it as a :class`DataFrame`. .. note:: Evolving. :param path: optional string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> json_sdf = spark.readStream.format("json") \\ ... .schema(sdf_schema) \\ ... .load(tempfile.mkdtemp()) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True
[ "Loads", "a", "data", "stream", "from", "a", "data", "source", "and", "returns", "it", "as", "a", ":", "class", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L370-L400
apache/spark
python/pyspark/sql/streaming.py
DataStreamReader.json
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, locale=None, dropFieldIfAllNull=None, encoding=None): """ Loads a JSON file stream and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. .. note:: Evolving. :param path: string represents path to the JSON dataset, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, locale=locale, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding) if isinstance(path, basestring): return self._df(self._jreader.json(path)) else: raise TypeError("path can be only a single string")
python
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, locale=None, dropFieldIfAllNull=None, encoding=None): """ Loads a JSON file stream and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. .. note:: Evolving. :param path: string represents path to the JSON dataset, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, locale=locale, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding) if isinstance(path, basestring): return self._df(self._jreader.json(path)) else: raise TypeError("path can be only a single string")
[ "def", "json", "(", "self", ",", "path", ",", "schema", "=", "None", ",", "primitivesAsString", "=", "None", ",", "prefersDecimal", "=", "None", ",", "allowComments", "=", "None", ",", "allowUnquotedFieldNames", "=", "None", ",", "allowSingleQuotes", "=", "None", ",", "allowNumericLeadingZero", "=", "None", ",", "allowBackslashEscapingAnyCharacter", "=", "None", ",", "mode", "=", "None", ",", "columnNameOfCorruptRecord", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "multiLine", "=", "None", ",", "allowUnquotedControlChars", "=", "None", ",", "lineSep", "=", "None", ",", "locale", "=", "None", ",", "dropFieldIfAllNull", "=", "None", ",", "encoding", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "schema", "=", "schema", ",", "primitivesAsString", "=", "primitivesAsString", ",", "prefersDecimal", "=", "prefersDecimal", ",", "allowComments", "=", "allowComments", ",", "allowUnquotedFieldNames", "=", "allowUnquotedFieldNames", ",", "allowSingleQuotes", "=", "allowSingleQuotes", ",", "allowNumericLeadingZero", "=", "allowNumericLeadingZero", ",", "allowBackslashEscapingAnyCharacter", "=", "allowBackslashEscapingAnyCharacter", ",", "mode", "=", "mode", ",", "columnNameOfCorruptRecord", "=", "columnNameOfCorruptRecord", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "multiLine", "=", "multiLine", ",", "allowUnquotedControlChars", "=", "allowUnquotedControlChars", ",", "lineSep", "=", "lineSep", ",", "locale", "=", "locale", ",", "dropFieldIfAllNull", "=", "dropFieldIfAllNull", ",", "encoding", "=", "encoding", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "json", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
Loads a JSON file stream and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. .. note:: Evolving. :param path: string represents path to the JSON dataset, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. >>> json_sdf = spark.readStream.json(tempfile.mkdtemp(), schema = sdf_schema) >>> json_sdf.isStreaming True >>> json_sdf.schema == sdf_schema True
[ "Loads", "a", "JSON", "file", "stream", "and", "returns", "the", "results", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L403-L503
apache/spark
python/pyspark/sql/streaming.py
DataStreamReader.orc
def orc(self, path): """Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.orc(path)) else: raise TypeError("path can be only a single string")
python
def orc(self, path): """Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.orc(path)) else: raise TypeError("path can be only a single string")
[ "def", "orc", "(", "self", ",", "path", ")", ":", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "orc", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
Loads a ORC file stream, returning the result as a :class:`DataFrame`. .. note:: Evolving. >>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp()) >>> orc_sdf.isStreaming True >>> orc_sdf.schema == sdf_schema True
[ "Loads", "a", "ORC", "file", "stream", "returning", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L506-L520
apache/spark
python/pyspark/sql/streaming.py
DataStreamReader.parquet
def parquet(self, path): """Loads a Parquet file stream, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. .. note:: Evolving. >>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp()) >>> parquet_sdf.isStreaming True >>> parquet_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.parquet(path)) else: raise TypeError("path can be only a single string")
python
def parquet(self, path): """Loads a Parquet file stream, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. .. note:: Evolving. >>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp()) >>> parquet_sdf.isStreaming True >>> parquet_sdf.schema == sdf_schema True """ if isinstance(path, basestring): return self._df(self._jreader.parquet(path)) else: raise TypeError("path can be only a single string")
[ "def", "parquet", "(", "self", ",", "path", ")", ":", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "parquet", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
Loads a Parquet file stream, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. .. note:: Evolving. >>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp()) >>> parquet_sdf.isStreaming True >>> parquet_sdf.schema == sdf_schema True
[ "Loads", "a", "Parquet", "file", "stream", "returning", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L523-L542
apache/spark
python/pyspark/sql/streaming.py
DataStreamReader.text
def text(self, path, wholetext=False, lineSep=None): """ Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.text(path)) else: raise TypeError("path can be only a single string")
python
def text(self, path, wholetext=False, lineSep=None): """ Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.text(path)) else: raise TypeError("path can be only a single string")
[ "def", "text", "(", "self", ",", "path", ",", "wholetext", "=", "False", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "wholetext", "=", "wholetext", ",", "lineSep", "=", "lineSep", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "text", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
Loads a text file stream and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. .. note:: Evolving. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> text_sdf = spark.readStream.text(tempfile.mkdtemp()) >>> text_sdf.isStreaming True >>> "value" in str(text_sdf.schema) True
[ "Loads", "a", "text", "file", "stream", "and", "returns", "a", ":", "class", ":", "DataFrame", "whose", "schema", "starts", "with", "a", "string", "column", "named", "value", "and", "followed", "by", "partitioned", "columns", "if", "there", "are", "any", ".", "The", "text", "files", "must", "be", "encoded", "as", "UTF", "-", "8", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L546-L572
apache/spark
python/pyspark/sql/streaming.py
DataStreamReader.csv
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.csv(path)) else: raise TypeError("path can be only a single string")
python
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): return self._df(self._jreader.csv(path)) else: raise TypeError("path can be only a single string")
[ "def", "csv", "(", "self", ",", "path", ",", "schema", "=", "None", ",", "sep", "=", "None", ",", "encoding", "=", "None", ",", "quote", "=", "None", ",", "escape", "=", "None", ",", "comment", "=", "None", ",", "header", "=", "None", ",", "inferSchema", "=", "None", ",", "ignoreLeadingWhiteSpace", "=", "None", ",", "ignoreTrailingWhiteSpace", "=", "None", ",", "nullValue", "=", "None", ",", "nanValue", "=", "None", ",", "positiveInf", "=", "None", ",", "negativeInf", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "maxColumns", "=", "None", ",", "maxCharsPerColumn", "=", "None", ",", "maxMalformedLogPerPartition", "=", "None", ",", "mode", "=", "None", ",", "columnNameOfCorruptRecord", "=", "None", ",", "multiLine", "=", "None", ",", "charToEscapeQuoteEscaping", "=", "None", ",", "enforceSchema", "=", "None", ",", "emptyValue", "=", "None", ",", "locale", "=", "None", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "schema", "=", "schema", ",", "sep", "=", "sep", ",", "encoding", "=", "encoding", ",", "quote", "=", "quote", ",", "escape", "=", "escape", ",", "comment", "=", "comment", ",", "header", "=", "header", ",", "inferSchema", "=", "inferSchema", ",", "ignoreLeadingWhiteSpace", "=", "ignoreLeadingWhiteSpace", ",", "ignoreTrailingWhiteSpace", "=", "ignoreTrailingWhiteSpace", ",", "nullValue", "=", "nullValue", ",", "nanValue", "=", "nanValue", ",", "positiveInf", "=", "positiveInf", ",", "negativeInf", "=", "negativeInf", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "maxColumns", "=", "maxColumns", ",", "maxCharsPerColumn", "=", "maxCharsPerColumn", ",", "maxMalformedLogPerPartition", "=", "maxMalformedLogPerPartition", ",", "mode", "=", "mode", ",", "columnNameOfCorruptRecord", "=", "columnNameOfCorruptRecord", ",", "multiLine", "=", "multiLine", ",", "charToEscapeQuoteEscaping", "=", "charToEscapeQuoteEscaping", ",", "enforceSchema", "=", "enforceSchema", ",", "emptyValue", "=", "emptyValue", ",", "locale", "=", "locale", ",", "lineSep", "=", "lineSep", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "csv", "(", "path", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only a single string\"", ")" ]
r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. .. note:: Evolving. :param path: string, or list of strings, for input path(s). :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse one record, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp(), schema = sdf_schema) >>> csv_sdf.isStreaming True >>> csv_sdf.schema == sdf_schema True
[ "r", "Loads", "a", "CSV", "file", "stream", "and", "returns", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L575-L705
apache/spark
python/pyspark/sql/streaming.py
DataStreamWriter.outputMode
def outputMode(self, outputMode): """Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append') """ if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0: raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode) self._jwrite = self._jwrite.outputMode(outputMode) return self
python
def outputMode(self, outputMode): """Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append') """ if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0: raise ValueError('The output mode must be a non-empty string. Got: %s' % outputMode) self._jwrite = self._jwrite.outputMode(outputMode) return self
[ "def", "outputMode", "(", "self", ",", "outputMode", ")", ":", "if", "not", "outputMode", "or", "type", "(", "outputMode", ")", "!=", "str", "or", "len", "(", "outputMode", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'The output mode must be a non-empty string. Got: %s'", "%", "outputMode", ")", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "outputMode", "(", "outputMode", ")", "return", "self" ]
Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. Options include: * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. .. note:: Evolving. >>> writer = sdf.writeStream.outputMode('append')
[ "Specifies", "how", "data", "of", "a", "streaming", "DataFrame", "/", "Dataset", "is", "written", "to", "a", "streaming", "sink", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L729-L749
apache/spark
python/pyspark/sql/streaming.py
DataStreamWriter.queryName
def queryName(self, queryName): """Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') """ if not queryName or type(queryName) != str or len(queryName.strip()) == 0: raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName) self._jwrite = self._jwrite.queryName(queryName) return self
python
def queryName(self, queryName): """Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query') """ if not queryName or type(queryName) != str or len(queryName.strip()) == 0: raise ValueError('The queryName must be a non-empty string. Got: %s' % queryName) self._jwrite = self._jwrite.queryName(queryName) return self
[ "def", "queryName", "(", "self", ",", "queryName", ")", ":", "if", "not", "queryName", "or", "type", "(", "queryName", ")", "!=", "str", "or", "len", "(", "queryName", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'The queryName must be a non-empty string. Got: %s'", "%", "queryName", ")", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "queryName", "(", "queryName", ")", "return", "self" ]
Specifies the name of the :class:`StreamingQuery` that can be started with :func:`start`. This name must be unique among all the currently active queries in the associated SparkSession. .. note:: Evolving. :param queryName: unique name for the query >>> writer = sdf.writeStream.queryName('streaming_query')
[ "Specifies", "the", "name", "of", "the", ":", "class", ":", "StreamingQuery", "that", "can", "be", "started", "with", ":", "func", ":", "start", ".", "This", "name", "must", "be", "unique", "among", "all", "the", "currently", "active", "queries", "in", "the", "associated", "SparkSession", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L811-L825
apache/spark
python/pyspark/sql/streaming.py
DataStreamWriter.trigger
def trigger(self, processingTime=None, once=None, continuous=None): """Set the trigger for the stream query. If this is not set it will run the query as fast as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``. .. note:: Evolving. :param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'. Set a trigger that runs a query periodically based on the processing time. Only one trigger can be set. :param once: if set to True, set a trigger that processes only one batch of data in a streaming query then terminates the query. Only one trigger can be set. >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(processingTime='5 seconds') >>> # trigger the query for just once batch of data >>> writer = sdf.writeStream.trigger(once=True) >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(continuous='5 seconds') """ params = [processingTime, once, continuous] if params.count(None) == 3: raise ValueError('No trigger provided') elif params.count(None) < 2: raise ValueError('Multiple triggers not allowed.') jTrigger = None if processingTime is not None: if type(processingTime) != str or len(processingTime.strip()) == 0: raise ValueError('Value for processingTime must be a non empty string. Got: %s' % processingTime) interval = processingTime.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime( interval) elif once is not None: if once is not True: raise ValueError('Value for once must be True. Got: %s' % once) jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once() else: if type(continuous) != str or len(continuous.strip()) == 0: raise ValueError('Value for continuous must be a non empty string. Got: %s' % continuous) interval = continuous.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous( interval) self._jwrite = self._jwrite.trigger(jTrigger) return self
python
def trigger(self, processingTime=None, once=None, continuous=None): """Set the trigger for the stream query. If this is not set it will run the query as fast as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``. .. note:: Evolving. :param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'. Set a trigger that runs a query periodically based on the processing time. Only one trigger can be set. :param once: if set to True, set a trigger that processes only one batch of data in a streaming query then terminates the query. Only one trigger can be set. >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(processingTime='5 seconds') >>> # trigger the query for just once batch of data >>> writer = sdf.writeStream.trigger(once=True) >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(continuous='5 seconds') """ params = [processingTime, once, continuous] if params.count(None) == 3: raise ValueError('No trigger provided') elif params.count(None) < 2: raise ValueError('Multiple triggers not allowed.') jTrigger = None if processingTime is not None: if type(processingTime) != str or len(processingTime.strip()) == 0: raise ValueError('Value for processingTime must be a non empty string. Got: %s' % processingTime) interval = processingTime.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime( interval) elif once is not None: if once is not True: raise ValueError('Value for once must be True. Got: %s' % once) jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once() else: if type(continuous) != str or len(continuous.strip()) == 0: raise ValueError('Value for continuous must be a non empty string. Got: %s' % continuous) interval = continuous.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous( interval) self._jwrite = self._jwrite.trigger(jTrigger) return self
[ "def", "trigger", "(", "self", ",", "processingTime", "=", "None", ",", "once", "=", "None", ",", "continuous", "=", "None", ")", ":", "params", "=", "[", "processingTime", ",", "once", ",", "continuous", "]", "if", "params", ".", "count", "(", "None", ")", "==", "3", ":", "raise", "ValueError", "(", "'No trigger provided'", ")", "elif", "params", ".", "count", "(", "None", ")", "<", "2", ":", "raise", "ValueError", "(", "'Multiple triggers not allowed.'", ")", "jTrigger", "=", "None", "if", "processingTime", "is", "not", "None", ":", "if", "type", "(", "processingTime", ")", "!=", "str", "or", "len", "(", "processingTime", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'Value for processingTime must be a non empty string. Got: %s'", "%", "processingTime", ")", "interval", "=", "processingTime", ".", "strip", "(", ")", "jTrigger", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "streaming", ".", "Trigger", ".", "ProcessingTime", "(", "interval", ")", "elif", "once", "is", "not", "None", ":", "if", "once", "is", "not", "True", ":", "raise", "ValueError", "(", "'Value for once must be True. Got: %s'", "%", "once", ")", "jTrigger", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "streaming", ".", "Trigger", ".", "Once", "(", ")", "else", ":", "if", "type", "(", "continuous", ")", "!=", "str", "or", "len", "(", "continuous", ".", "strip", "(", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'Value for continuous must be a non empty string. Got: %s'", "%", "continuous", ")", "interval", "=", "continuous", ".", "strip", "(", ")", "jTrigger", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "streaming", ".", "Trigger", ".", "Continuous", "(", "interval", ")", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "trigger", "(", "jTrigger", ")", "return", "self" ]
Set the trigger for the stream query. If this is not set it will run the query as fast as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``. .. note:: Evolving. :param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'. Set a trigger that runs a query periodically based on the processing time. Only one trigger can be set. :param once: if set to True, set a trigger that processes only one batch of data in a streaming query then terminates the query. Only one trigger can be set. >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(processingTime='5 seconds') >>> # trigger the query for just once batch of data >>> writer = sdf.writeStream.trigger(once=True) >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(continuous='5 seconds')
[ "Set", "the", "trigger", "for", "the", "stream", "query", ".", "If", "this", "is", "not", "set", "it", "will", "run", "the", "query", "as", "fast", "as", "possible", "which", "is", "equivalent", "to", "setting", "the", "trigger", "to", "processingTime", "=", "0", "seconds", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L829-L878
apache/spark
python/pyspark/sql/streaming.py
DataStreamWriter.foreach
def foreach(self, f): """ Sets the output of the streaming query to be processed using the provided writer ``f``. This is often used to write the output of a streaming query to arbitrary storage systems. The processing logic can be specified in two ways. #. A **function** that takes a row as input. This is a simple way to express your processing logic. Note that this does not allow you to deduplicate generated data when failures cause reprocessing of some input data. That would require you to specify the processing logic in the next way. #. An **object** with a ``process`` method and optional ``open`` and ``close`` methods. The object can have the following methods. * ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing (for example, open a connection, start a transaction, etc). Additionally, you can use the `partition_id` and `epoch_id` to deduplicate regenerated data (discussed later). * ``process(row)``: *Non-optional* method that processes each :class:`Row`. * ``close(error)``: *Optional* method that finalizes and cleans up (for example, close connection, commit transaction, etc.) after all rows have been processed. The object will be used by Spark in the following way. * A single copy of this object is responsible of all the data generated by a single task in a query. In other words, one instance is responsible for processing one partition of the data generated in a distributed manner. * This object must be serializable because each task will get a fresh serialized-deserialized copy of the provided object. Hence, it is strongly recommended that any initialization for writing data (e.g. opening a connection or starting a transaction) is done after the `open(...)` method has been called, which signifies that the task is ready to generate data. * The lifecycle of the methods are as follows. For each partition with ``partition_id``: ... For each batch/epoch of streaming data with ``epoch_id``: ....... Method ``open(partitionId, epochId)`` is called. ....... If ``open(...)`` returns true, for each row in the partition and batch/epoch, method ``process(row)`` is called. ....... Method ``close(errorOrNull)`` is called with error (if any) seen while processing rows. Important points to note: * The `partitionId` and `epochId` can be used to deduplicate generated data when failures cause reprocessing of some input data. This depends on the execution mode of the query. If the streaming query is being executed in the micro-batch mode, then every partition represented by a unique tuple (partition_id, epoch_id) is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used to deduplicate and/or transactionally commit data and achieve exactly-once guarantees. However, if the streaming query is being executed in the continuous mode, then this guarantee does not hold and therefore should not be used for deduplication. * The ``close()`` method (if exists) will be called if `open()` method exists and returns successfully (irrespective of the return value), except if the Python crashes in the middle. .. note:: Evolving. >>> # Print every row using a function >>> def print_row(row): ... print(row) ... >>> writer = sdf.writeStream.foreach(print_row) >>> # Print every row using a object with process() method >>> class RowPrinter: ... def open(self, partition_id, epoch_id): ... print("Opened %d, %d" % (partition_id, epoch_id)) ... return True ... def process(self, row): ... print(row) ... def close(self, error): ... print("Closed with error: %s" % str(error)) ... >>> writer = sdf.writeStream.foreach(RowPrinter()) """ from pyspark.rdd import _wrap_function from pyspark.serializers import PickleSerializer, AutoBatchedSerializer from pyspark.taskcontext import TaskContext if callable(f): # The provided object is a callable function that is supposed to be called on each row. # Construct a function that takes an iterator and calls the provided function on each # row. def func_without_process(_, iterator): for x in iterator: f(x) return iter([]) func = func_without_process else: # The provided object is not a callable function. Then it is expected to have a # 'process(row)' method, and optional 'open(partition_id, epoch_id)' and # 'close(error)' methods. if not hasattr(f, 'process'): raise Exception("Provided object does not have a 'process' method") if not callable(getattr(f, 'process')): raise Exception("Attribute 'process' in provided object is not callable") def doesMethodExist(method_name): exists = hasattr(f, method_name) if exists and not callable(getattr(f, method_name)): raise Exception( "Attribute '%s' in provided object is not callable" % method_name) return exists open_exists = doesMethodExist('open') close_exists = doesMethodExist('close') def func_with_open_process_close(partition_id, iterator): epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId') if epoch_id: epoch_id = int(epoch_id) else: raise Exception("Could not get batch id from TaskContext") # Check if the data should be processed should_process = True if open_exists: should_process = f.open(partition_id, epoch_id) error = None try: if should_process: for x in iterator: f.process(x) except Exception as ex: error = ex finally: if close_exists: f.close(error) if error: raise error return iter([]) func = func_with_open_process_close serializer = AutoBatchedSerializer(PickleSerializer()) wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer) jForeachWriter = \ self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter( wrapped_func, self._df._jdf.schema()) self._jwrite.foreach(jForeachWriter) return self
python
def foreach(self, f): """ Sets the output of the streaming query to be processed using the provided writer ``f``. This is often used to write the output of a streaming query to arbitrary storage systems. The processing logic can be specified in two ways. #. A **function** that takes a row as input. This is a simple way to express your processing logic. Note that this does not allow you to deduplicate generated data when failures cause reprocessing of some input data. That would require you to specify the processing logic in the next way. #. An **object** with a ``process`` method and optional ``open`` and ``close`` methods. The object can have the following methods. * ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing (for example, open a connection, start a transaction, etc). Additionally, you can use the `partition_id` and `epoch_id` to deduplicate regenerated data (discussed later). * ``process(row)``: *Non-optional* method that processes each :class:`Row`. * ``close(error)``: *Optional* method that finalizes and cleans up (for example, close connection, commit transaction, etc.) after all rows have been processed. The object will be used by Spark in the following way. * A single copy of this object is responsible of all the data generated by a single task in a query. In other words, one instance is responsible for processing one partition of the data generated in a distributed manner. * This object must be serializable because each task will get a fresh serialized-deserialized copy of the provided object. Hence, it is strongly recommended that any initialization for writing data (e.g. opening a connection or starting a transaction) is done after the `open(...)` method has been called, which signifies that the task is ready to generate data. * The lifecycle of the methods are as follows. For each partition with ``partition_id``: ... For each batch/epoch of streaming data with ``epoch_id``: ....... Method ``open(partitionId, epochId)`` is called. ....... If ``open(...)`` returns true, for each row in the partition and batch/epoch, method ``process(row)`` is called. ....... Method ``close(errorOrNull)`` is called with error (if any) seen while processing rows. Important points to note: * The `partitionId` and `epochId` can be used to deduplicate generated data when failures cause reprocessing of some input data. This depends on the execution mode of the query. If the streaming query is being executed in the micro-batch mode, then every partition represented by a unique tuple (partition_id, epoch_id) is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used to deduplicate and/or transactionally commit data and achieve exactly-once guarantees. However, if the streaming query is being executed in the continuous mode, then this guarantee does not hold and therefore should not be used for deduplication. * The ``close()`` method (if exists) will be called if `open()` method exists and returns successfully (irrespective of the return value), except if the Python crashes in the middle. .. note:: Evolving. >>> # Print every row using a function >>> def print_row(row): ... print(row) ... >>> writer = sdf.writeStream.foreach(print_row) >>> # Print every row using a object with process() method >>> class RowPrinter: ... def open(self, partition_id, epoch_id): ... print("Opened %d, %d" % (partition_id, epoch_id)) ... return True ... def process(self, row): ... print(row) ... def close(self, error): ... print("Closed with error: %s" % str(error)) ... >>> writer = sdf.writeStream.foreach(RowPrinter()) """ from pyspark.rdd import _wrap_function from pyspark.serializers import PickleSerializer, AutoBatchedSerializer from pyspark.taskcontext import TaskContext if callable(f): # The provided object is a callable function that is supposed to be called on each row. # Construct a function that takes an iterator and calls the provided function on each # row. def func_without_process(_, iterator): for x in iterator: f(x) return iter([]) func = func_without_process else: # The provided object is not a callable function. Then it is expected to have a # 'process(row)' method, and optional 'open(partition_id, epoch_id)' and # 'close(error)' methods. if not hasattr(f, 'process'): raise Exception("Provided object does not have a 'process' method") if not callable(getattr(f, 'process')): raise Exception("Attribute 'process' in provided object is not callable") def doesMethodExist(method_name): exists = hasattr(f, method_name) if exists and not callable(getattr(f, method_name)): raise Exception( "Attribute '%s' in provided object is not callable" % method_name) return exists open_exists = doesMethodExist('open') close_exists = doesMethodExist('close') def func_with_open_process_close(partition_id, iterator): epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId') if epoch_id: epoch_id = int(epoch_id) else: raise Exception("Could not get batch id from TaskContext") # Check if the data should be processed should_process = True if open_exists: should_process = f.open(partition_id, epoch_id) error = None try: if should_process: for x in iterator: f.process(x) except Exception as ex: error = ex finally: if close_exists: f.close(error) if error: raise error return iter([]) func = func_with_open_process_close serializer = AutoBatchedSerializer(PickleSerializer()) wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer) jForeachWriter = \ self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter( wrapped_func, self._df._jdf.schema()) self._jwrite.foreach(jForeachWriter) return self
[ "def", "foreach", "(", "self", ",", "f", ")", ":", "from", "pyspark", ".", "rdd", "import", "_wrap_function", "from", "pyspark", ".", "serializers", "import", "PickleSerializer", ",", "AutoBatchedSerializer", "from", "pyspark", ".", "taskcontext", "import", "TaskContext", "if", "callable", "(", "f", ")", ":", "# The provided object is a callable function that is supposed to be called on each row.", "# Construct a function that takes an iterator and calls the provided function on each", "# row.", "def", "func_without_process", "(", "_", ",", "iterator", ")", ":", "for", "x", "in", "iterator", ":", "f", "(", "x", ")", "return", "iter", "(", "[", "]", ")", "func", "=", "func_without_process", "else", ":", "# The provided object is not a callable function. Then it is expected to have a", "# 'process(row)' method, and optional 'open(partition_id, epoch_id)' and", "# 'close(error)' methods.", "if", "not", "hasattr", "(", "f", ",", "'process'", ")", ":", "raise", "Exception", "(", "\"Provided object does not have a 'process' method\"", ")", "if", "not", "callable", "(", "getattr", "(", "f", ",", "'process'", ")", ")", ":", "raise", "Exception", "(", "\"Attribute 'process' in provided object is not callable\"", ")", "def", "doesMethodExist", "(", "method_name", ")", ":", "exists", "=", "hasattr", "(", "f", ",", "method_name", ")", "if", "exists", "and", "not", "callable", "(", "getattr", "(", "f", ",", "method_name", ")", ")", ":", "raise", "Exception", "(", "\"Attribute '%s' in provided object is not callable\"", "%", "method_name", ")", "return", "exists", "open_exists", "=", "doesMethodExist", "(", "'open'", ")", "close_exists", "=", "doesMethodExist", "(", "'close'", ")", "def", "func_with_open_process_close", "(", "partition_id", ",", "iterator", ")", ":", "epoch_id", "=", "TaskContext", ".", "get", "(", ")", ".", "getLocalProperty", "(", "'streaming.sql.batchId'", ")", "if", "epoch_id", ":", "epoch_id", "=", "int", "(", "epoch_id", ")", "else", ":", "raise", "Exception", "(", "\"Could not get batch id from TaskContext\"", ")", "# Check if the data should be processed", "should_process", "=", "True", "if", "open_exists", ":", "should_process", "=", "f", ".", "open", "(", "partition_id", ",", "epoch_id", ")", "error", "=", "None", "try", ":", "if", "should_process", ":", "for", "x", "in", "iterator", ":", "f", ".", "process", "(", "x", ")", "except", "Exception", "as", "ex", ":", "error", "=", "ex", "finally", ":", "if", "close_exists", ":", "f", ".", "close", "(", "error", ")", "if", "error", ":", "raise", "error", "return", "iter", "(", "[", "]", ")", "func", "=", "func_with_open_process_close", "serializer", "=", "AutoBatchedSerializer", "(", "PickleSerializer", "(", ")", ")", "wrapped_func", "=", "_wrap_function", "(", "self", ".", "_spark", ".", "_sc", ",", "func", ",", "serializer", ",", "serializer", ")", "jForeachWriter", "=", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "sql", ".", "execution", ".", "python", ".", "PythonForeachWriter", "(", "wrapped_func", ",", "self", ".", "_df", ".", "_jdf", ".", "schema", "(", ")", ")", "self", ".", "_jwrite", ".", "foreach", "(", "jForeachWriter", ")", "return", "self" ]
Sets the output of the streaming query to be processed using the provided writer ``f``. This is often used to write the output of a streaming query to arbitrary storage systems. The processing logic can be specified in two ways. #. A **function** that takes a row as input. This is a simple way to express your processing logic. Note that this does not allow you to deduplicate generated data when failures cause reprocessing of some input data. That would require you to specify the processing logic in the next way. #. An **object** with a ``process`` method and optional ``open`` and ``close`` methods. The object can have the following methods. * ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing (for example, open a connection, start a transaction, etc). Additionally, you can use the `partition_id` and `epoch_id` to deduplicate regenerated data (discussed later). * ``process(row)``: *Non-optional* method that processes each :class:`Row`. * ``close(error)``: *Optional* method that finalizes and cleans up (for example, close connection, commit transaction, etc.) after all rows have been processed. The object will be used by Spark in the following way. * A single copy of this object is responsible of all the data generated by a single task in a query. In other words, one instance is responsible for processing one partition of the data generated in a distributed manner. * This object must be serializable because each task will get a fresh serialized-deserialized copy of the provided object. Hence, it is strongly recommended that any initialization for writing data (e.g. opening a connection or starting a transaction) is done after the `open(...)` method has been called, which signifies that the task is ready to generate data. * The lifecycle of the methods are as follows. For each partition with ``partition_id``: ... For each batch/epoch of streaming data with ``epoch_id``: ....... Method ``open(partitionId, epochId)`` is called. ....... If ``open(...)`` returns true, for each row in the partition and batch/epoch, method ``process(row)`` is called. ....... Method ``close(errorOrNull)`` is called with error (if any) seen while processing rows. Important points to note: * The `partitionId` and `epochId` can be used to deduplicate generated data when failures cause reprocessing of some input data. This depends on the execution mode of the query. If the streaming query is being executed in the micro-batch mode, then every partition represented by a unique tuple (partition_id, epoch_id) is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used to deduplicate and/or transactionally commit data and achieve exactly-once guarantees. However, if the streaming query is being executed in the continuous mode, then this guarantee does not hold and therefore should not be used for deduplication. * The ``close()`` method (if exists) will be called if `open()` method exists and returns successfully (irrespective of the return value), except if the Python crashes in the middle. .. note:: Evolving. >>> # Print every row using a function >>> def print_row(row): ... print(row) ... >>> writer = sdf.writeStream.foreach(print_row) >>> # Print every row using a object with process() method >>> class RowPrinter: ... def open(self, partition_id, epoch_id): ... print("Opened %d, %d" % (partition_id, epoch_id)) ... return True ... def process(self, row): ... print(row) ... def close(self, error): ... print("Closed with error: %s" % str(error)) ... >>> writer = sdf.writeStream.foreach(RowPrinter())
[ "Sets", "the", "output", "of", "the", "streaming", "query", "to", "be", "processed", "using", "the", "provided", "writer", "f", ".", "This", "is", "often", "used", "to", "write", "the", "output", "of", "a", "streaming", "query", "to", "arbitrary", "storage", "systems", ".", "The", "processing", "logic", "can", "be", "specified", "in", "two", "ways", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L881-L1040
apache/spark
python/pyspark/sql/streaming.py
DataStreamWriter.foreachBatch
def foreachBatch(self, func): """ Sets the output of the streaming query to be processed using the provided function. This is supported only the in the micro-batch execution modes (that is, when the trigger is not continuous). In every micro-batch, the provided function will be called in every micro-batch with (i) the output rows as a DataFrame and (ii) the batch identifier. The batchId can be used deduplicate and transactionally write the output (that is, the provided Dataset) to external systems. The output DataFrame is guaranteed to exactly same for the same batchId (assuming all operations are deterministic in the query). .. note:: Evolving. >>> def func(batch_df, batch_id): ... batch_df.collect() ... >>> writer = sdf.writeStream.foreach(func) """ from pyspark.java_gateway import ensure_callback_server_started gw = self._spark._sc._gateway java_import(gw.jvm, "org.apache.spark.sql.execution.streaming.sources.*") wrapped_func = ForeachBatchFunction(self._spark, func) gw.jvm.PythonForeachBatchHelper.callForeachBatch(self._jwrite, wrapped_func) ensure_callback_server_started(gw) return self
python
def foreachBatch(self, func): """ Sets the output of the streaming query to be processed using the provided function. This is supported only the in the micro-batch execution modes (that is, when the trigger is not continuous). In every micro-batch, the provided function will be called in every micro-batch with (i) the output rows as a DataFrame and (ii) the batch identifier. The batchId can be used deduplicate and transactionally write the output (that is, the provided Dataset) to external systems. The output DataFrame is guaranteed to exactly same for the same batchId (assuming all operations are deterministic in the query). .. note:: Evolving. >>> def func(batch_df, batch_id): ... batch_df.collect() ... >>> writer = sdf.writeStream.foreach(func) """ from pyspark.java_gateway import ensure_callback_server_started gw = self._spark._sc._gateway java_import(gw.jvm, "org.apache.spark.sql.execution.streaming.sources.*") wrapped_func = ForeachBatchFunction(self._spark, func) gw.jvm.PythonForeachBatchHelper.callForeachBatch(self._jwrite, wrapped_func) ensure_callback_server_started(gw) return self
[ "def", "foreachBatch", "(", "self", ",", "func", ")", ":", "from", "pyspark", ".", "java_gateway", "import", "ensure_callback_server_started", "gw", "=", "self", ".", "_spark", ".", "_sc", ".", "_gateway", "java_import", "(", "gw", ".", "jvm", ",", "\"org.apache.spark.sql.execution.streaming.sources.*\"", ")", "wrapped_func", "=", "ForeachBatchFunction", "(", "self", ".", "_spark", ",", "func", ")", "gw", ".", "jvm", ".", "PythonForeachBatchHelper", ".", "callForeachBatch", "(", "self", ".", "_jwrite", ",", "wrapped_func", ")", "ensure_callback_server_started", "(", "gw", ")", "return", "self" ]
Sets the output of the streaming query to be processed using the provided function. This is supported only the in the micro-batch execution modes (that is, when the trigger is not continuous). In every micro-batch, the provided function will be called in every micro-batch with (i) the output rows as a DataFrame and (ii) the batch identifier. The batchId can be used deduplicate and transactionally write the output (that is, the provided Dataset) to external systems. The output DataFrame is guaranteed to exactly same for the same batchId (assuming all operations are deterministic in the query). .. note:: Evolving. >>> def func(batch_df, batch_id): ... batch_df.collect() ... >>> writer = sdf.writeStream.foreach(func)
[ "Sets", "the", "output", "of", "the", "streaming", "query", "to", "be", "processed", "using", "the", "provided", "function", ".", "This", "is", "supported", "only", "the", "in", "the", "micro", "-", "batch", "execution", "modes", "(", "that", "is", "when", "the", "trigger", "is", "not", "continuous", ")", ".", "In", "every", "micro", "-", "batch", "the", "provided", "function", "will", "be", "called", "in", "every", "micro", "-", "batch", "with", "(", "i", ")", "the", "output", "rows", "as", "a", "DataFrame", "and", "(", "ii", ")", "the", "batch", "identifier", ".", "The", "batchId", "can", "be", "used", "deduplicate", "and", "transactionally", "write", "the", "output", "(", "that", "is", "the", "provided", "Dataset", ")", "to", "external", "systems", ".", "The", "output", "DataFrame", "is", "guaranteed", "to", "exactly", "same", "for", "the", "same", "batchId", "(", "assuming", "all", "operations", "are", "deterministic", "in", "the", "query", ")", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L1043-L1069
apache/spark
python/pyspark/sql/streaming.py
DataStreamWriter.start
def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None, **options): """Streams the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. .. note:: Evolving. :param path: the path in a Hadoop supported file system :param format: the format used to save :param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. :param partitionBy: names of partitioning columns :param queryName: unique name for the query :param options: All other string options. You may want to provide a `checkpointLocation` for most streams, however it is not required for a `memory` stream. >>> sq = sdf.writeStream.format('memory').queryName('this_query').start() >>> sq.isActive True >>> sq.name u'this_query' >>> sq.stop() >>> sq.isActive False >>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start( ... queryName='that_query', outputMode="append", format='memory') >>> sq.name u'that_query' >>> sq.isActive True >>> sq.stop() """ self.options(**options) if outputMode is not None: self.outputMode(outputMode) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if queryName is not None: self.queryName(queryName) if path is None: return self._sq(self._jwrite.start()) else: return self._sq(self._jwrite.start(path))
python
def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None, **options): """Streams the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. .. note:: Evolving. :param path: the path in a Hadoop supported file system :param format: the format used to save :param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. :param partitionBy: names of partitioning columns :param queryName: unique name for the query :param options: All other string options. You may want to provide a `checkpointLocation` for most streams, however it is not required for a `memory` stream. >>> sq = sdf.writeStream.format('memory').queryName('this_query').start() >>> sq.isActive True >>> sq.name u'this_query' >>> sq.stop() >>> sq.isActive False >>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start( ... queryName='that_query', outputMode="append", format='memory') >>> sq.name u'that_query' >>> sq.isActive True >>> sq.stop() """ self.options(**options) if outputMode is not None: self.outputMode(outputMode) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if queryName is not None: self.queryName(queryName) if path is None: return self._sq(self._jwrite.start()) else: return self._sq(self._jwrite.start(path))
[ "def", "start", "(", "self", ",", "path", "=", "None", ",", "format", "=", "None", ",", "outputMode", "=", "None", ",", "partitionBy", "=", "None", ",", "queryName", "=", "None", ",", "*", "*", "options", ")", ":", "self", ".", "options", "(", "*", "*", "options", ")", "if", "outputMode", "is", "not", "None", ":", "self", ".", "outputMode", "(", "outputMode", ")", "if", "partitionBy", "is", "not", "None", ":", "self", ".", "partitionBy", "(", "partitionBy", ")", "if", "format", "is", "not", "None", ":", "self", ".", "format", "(", "format", ")", "if", "queryName", "is", "not", "None", ":", "self", ".", "queryName", "(", "queryName", ")", "if", "path", "is", "None", ":", "return", "self", ".", "_sq", "(", "self", ".", "_jwrite", ".", "start", "(", ")", ")", "else", ":", "return", "self", ".", "_sq", "(", "self", ".", "_jwrite", ".", "start", "(", "path", ")", ")" ]
Streams the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. .. note:: Evolving. :param path: the path in a Hadoop supported file system :param format: the format used to save :param outputMode: specifies how data of a streaming DataFrame/Dataset is written to a streaming sink. * `append`:Only the new rows in the streaming DataFrame/Dataset will be written to the sink * `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink every time these is some updates * `update`:only the rows that were updated in the streaming DataFrame/Dataset will be written to the sink every time there are some updates. If the query doesn't contain aggregations, it will be equivalent to `append` mode. :param partitionBy: names of partitioning columns :param queryName: unique name for the query :param options: All other string options. You may want to provide a `checkpointLocation` for most streams, however it is not required for a `memory` stream. >>> sq = sdf.writeStream.format('memory').queryName('this_query').start() >>> sq.isActive True >>> sq.name u'this_query' >>> sq.stop() >>> sq.isActive False >>> sq = sdf.writeStream.trigger(processingTime='5 seconds').start( ... queryName='that_query', outputMode="append", format='memory') >>> sq.name u'that_query' >>> sq.isActive True >>> sq.stop()
[ "Streams", "the", "contents", "of", "the", ":", "class", ":", "DataFrame", "to", "a", "data", "source", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L1073-L1128
apache/spark
python/pyspark/cloudpickle.py
_make_cell_set_template_code
def _make_cell_set_template_code(): """Get the Python compiler to emit LOAD_FAST(arg); STORE_DEREF Notes ----- In Python 3, we could use an easier function: .. code-block:: python def f(): cell = None def _stub(value): nonlocal cell cell = value return _stub _cell_set_template_code = f().__code__ This function is _only_ a LOAD_FAST(arg); STORE_DEREF, but that is invalid syntax on Python 2. If we use this function we also don't need to do the weird freevars/cellvars swap below """ def inner(value): lambda: cell # make ``cell`` a closure so that we get a STORE_DEREF cell = value co = inner.__code__ # NOTE: we are marking the cell variable as a free variable intentionally # so that we simulate an inner function instead of the outer function. This # is what gives us the ``nonlocal`` behavior in a Python 2 compatible way. if not PY3: # pragma: no branch return types.CodeType( co.co_argcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), ) else: return types.CodeType( co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), )
python
def _make_cell_set_template_code(): """Get the Python compiler to emit LOAD_FAST(arg); STORE_DEREF Notes ----- In Python 3, we could use an easier function: .. code-block:: python def f(): cell = None def _stub(value): nonlocal cell cell = value return _stub _cell_set_template_code = f().__code__ This function is _only_ a LOAD_FAST(arg); STORE_DEREF, but that is invalid syntax on Python 2. If we use this function we also don't need to do the weird freevars/cellvars swap below """ def inner(value): lambda: cell # make ``cell`` a closure so that we get a STORE_DEREF cell = value co = inner.__code__ # NOTE: we are marking the cell variable as a free variable intentionally # so that we simulate an inner function instead of the outer function. This # is what gives us the ``nonlocal`` behavior in a Python 2 compatible way. if not PY3: # pragma: no branch return types.CodeType( co.co_argcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), ) else: return types.CodeType( co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_cellvars, # this is the trickery (), )
[ "def", "_make_cell_set_template_code", "(", ")", ":", "def", "inner", "(", "value", ")", ":", "lambda", ":", "cell", "# make ``cell`` a closure so that we get a STORE_DEREF", "cell", "=", "value", "co", "=", "inner", ".", "__code__", "# NOTE: we are marking the cell variable as a free variable intentionally", "# so that we simulate an inner function instead of the outer function. This", "# is what gives us the ``nonlocal`` behavior in a Python 2 compatible way.", "if", "not", "PY3", ":", "# pragma: no branch", "return", "types", ".", "CodeType", "(", "co", ".", "co_argcount", ",", "co", ".", "co_nlocals", ",", "co", ".", "co_stacksize", ",", "co", ".", "co_flags", ",", "co", ".", "co_code", ",", "co", ".", "co_consts", ",", "co", ".", "co_names", ",", "co", ".", "co_varnames", ",", "co", ".", "co_filename", ",", "co", ".", "co_name", ",", "co", ".", "co_firstlineno", ",", "co", ".", "co_lnotab", ",", "co", ".", "co_cellvars", ",", "# this is the trickery", "(", ")", ",", ")", "else", ":", "return", "types", ".", "CodeType", "(", "co", ".", "co_argcount", ",", "co", ".", "co_kwonlyargcount", ",", "co", ".", "co_nlocals", ",", "co", ".", "co_stacksize", ",", "co", ".", "co_flags", ",", "co", ".", "co_code", ",", "co", ".", "co_consts", ",", "co", ".", "co_names", ",", "co", ".", "co_varnames", ",", "co", ".", "co_filename", ",", "co", ".", "co_name", ",", "co", ".", "co_firstlineno", ",", "co", ".", "co_lnotab", ",", "co", ".", "co_cellvars", ",", "# this is the trickery", "(", ")", ",", ")" ]
Get the Python compiler to emit LOAD_FAST(arg); STORE_DEREF Notes ----- In Python 3, we could use an easier function: .. code-block:: python def f(): cell = None def _stub(value): nonlocal cell cell = value return _stub _cell_set_template_code = f().__code__ This function is _only_ a LOAD_FAST(arg); STORE_DEREF, but that is invalid syntax on Python 2. If we use this function we also don't need to do the weird freevars/cellvars swap below
[ "Get", "the", "Python", "compiler", "to", "emit", "LOAD_FAST", "(", "arg", ")", ";", "STORE_DEREF" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L82-L149
apache/spark
python/pyspark/cloudpickle.py
is_tornado_coroutine
def is_tornado_coroutine(func): """ Return whether *func* is a Tornado coroutine function. Running coroutines are not supported. """ if 'tornado.gen' not in sys.modules: return False gen = sys.modules['tornado.gen'] if not hasattr(gen, "is_coroutine_function"): # Tornado version is too old return False return gen.is_coroutine_function(func)
python
def is_tornado_coroutine(func): """ Return whether *func* is a Tornado coroutine function. Running coroutines are not supported. """ if 'tornado.gen' not in sys.modules: return False gen = sys.modules['tornado.gen'] if not hasattr(gen, "is_coroutine_function"): # Tornado version is too old return False return gen.is_coroutine_function(func)
[ "def", "is_tornado_coroutine", "(", "func", ")", ":", "if", "'tornado.gen'", "not", "in", "sys", ".", "modules", ":", "return", "False", "gen", "=", "sys", ".", "modules", "[", "'tornado.gen'", "]", "if", "not", "hasattr", "(", "gen", ",", "\"is_coroutine_function\"", ")", ":", "# Tornado version is too old", "return", "False", "return", "gen", ".", "is_coroutine_function", "(", "func", ")" ]
Return whether *func* is a Tornado coroutine function. Running coroutines are not supported.
[ "Return", "whether", "*", "func", "*", "is", "a", "Tornado", "coroutine", "function", ".", "Running", "coroutines", "are", "not", "supported", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L905-L916
apache/spark
python/pyspark/cloudpickle.py
dump
def dump(obj, file, protocol=None): """Serialize obj as bytes streamed into file protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ CloudPickler(file, protocol=protocol).dump(obj)
python
def dump(obj, file, protocol=None): """Serialize obj as bytes streamed into file protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ CloudPickler(file, protocol=protocol).dump(obj)
[ "def", "dump", "(", "obj", ",", "file", ",", "protocol", "=", "None", ")", ":", "CloudPickler", "(", "file", ",", "protocol", "=", "protocol", ")", ".", "dump", "(", "obj", ")" ]
Serialize obj as bytes streamed into file protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python.
[ "Serialize", "obj", "as", "bytes", "streamed", "into", "file" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L926-L936
apache/spark
python/pyspark/cloudpickle.py
dumps
def dumps(obj, protocol=None): """Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ file = StringIO() try: cp = CloudPickler(file, protocol=protocol) cp.dump(obj) return file.getvalue() finally: file.close()
python
def dumps(obj, protocol=None): """Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """ file = StringIO() try: cp = CloudPickler(file, protocol=protocol) cp.dump(obj) return file.getvalue() finally: file.close()
[ "def", "dumps", "(", "obj", ",", "protocol", "=", "None", ")", ":", "file", "=", "StringIO", "(", ")", "try", ":", "cp", "=", "CloudPickler", "(", "file", ",", "protocol", "=", "protocol", ")", "cp", ".", "dump", "(", "obj", ")", "return", "file", ".", "getvalue", "(", ")", "finally", ":", "file", ".", "close", "(", ")" ]
Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python.
[ "Serialize", "obj", "as", "a", "string", "of", "bytes", "allocated", "in", "memory" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L939-L955
apache/spark
python/pyspark/cloudpickle.py
_fill_function
def _fill_function(*args): """Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func(). """ if len(args) == 2: func = args[0] state = args[1] elif len(args) == 5: # Backwards compat for cloudpickle v0.4.0, after which the `module` # argument was introduced func = args[0] keys = ['globals', 'defaults', 'dict', 'closure_values'] state = dict(zip(keys, args[1:])) elif len(args) == 6: # Backwards compat for cloudpickle v0.4.1, after which the function # state was passed as a dict to the _fill_function it-self. func = args[0] keys = ['globals', 'defaults', 'dict', 'module', 'closure_values'] state = dict(zip(keys, args[1:])) else: raise ValueError('Unexpected _fill_value arguments: %r' % (args,)) # - At pickling time, any dynamic global variable used by func is # serialized by value (in state['globals']). # - At unpickling time, func's __globals__ attribute is initialized by # first retrieving an empty isolated namespace that will be shared # with other functions pickled from the same original module # by the same CloudPickler instance and then updated with the # content of state['globals'] to populate the shared isolated # namespace with all the global variables that are specifically # referenced for this function. func.__globals__.update(state['globals']) func.__defaults__ = state['defaults'] func.__dict__ = state['dict'] if 'annotations' in state: func.__annotations__ = state['annotations'] if 'doc' in state: func.__doc__ = state['doc'] if 'name' in state: func.__name__ = state['name'] if 'module' in state: func.__module__ = state['module'] if 'qualname' in state: func.__qualname__ = state['qualname'] cells = func.__closure__ if cells is not None: for cell, value in zip(cells, state['closure_values']): if value is not _empty_cell_value: cell_set(cell, value) return func
python
def _fill_function(*args): """Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func(). """ if len(args) == 2: func = args[0] state = args[1] elif len(args) == 5: # Backwards compat for cloudpickle v0.4.0, after which the `module` # argument was introduced func = args[0] keys = ['globals', 'defaults', 'dict', 'closure_values'] state = dict(zip(keys, args[1:])) elif len(args) == 6: # Backwards compat for cloudpickle v0.4.1, after which the function # state was passed as a dict to the _fill_function it-self. func = args[0] keys = ['globals', 'defaults', 'dict', 'module', 'closure_values'] state = dict(zip(keys, args[1:])) else: raise ValueError('Unexpected _fill_value arguments: %r' % (args,)) # - At pickling time, any dynamic global variable used by func is # serialized by value (in state['globals']). # - At unpickling time, func's __globals__ attribute is initialized by # first retrieving an empty isolated namespace that will be shared # with other functions pickled from the same original module # by the same CloudPickler instance and then updated with the # content of state['globals'] to populate the shared isolated # namespace with all the global variables that are specifically # referenced for this function. func.__globals__.update(state['globals']) func.__defaults__ = state['defaults'] func.__dict__ = state['dict'] if 'annotations' in state: func.__annotations__ = state['annotations'] if 'doc' in state: func.__doc__ = state['doc'] if 'name' in state: func.__name__ = state['name'] if 'module' in state: func.__module__ = state['module'] if 'qualname' in state: func.__qualname__ = state['qualname'] cells = func.__closure__ if cells is not None: for cell, value in zip(cells, state['closure_values']): if value is not _empty_cell_value: cell_set(cell, value) return func
[ "def", "_fill_function", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "2", ":", "func", "=", "args", "[", "0", "]", "state", "=", "args", "[", "1", "]", "elif", "len", "(", "args", ")", "==", "5", ":", "# Backwards compat for cloudpickle v0.4.0, after which the `module`", "# argument was introduced", "func", "=", "args", "[", "0", "]", "keys", "=", "[", "'globals'", ",", "'defaults'", ",", "'dict'", ",", "'closure_values'", "]", "state", "=", "dict", "(", "zip", "(", "keys", ",", "args", "[", "1", ":", "]", ")", ")", "elif", "len", "(", "args", ")", "==", "6", ":", "# Backwards compat for cloudpickle v0.4.1, after which the function", "# state was passed as a dict to the _fill_function it-self.", "func", "=", "args", "[", "0", "]", "keys", "=", "[", "'globals'", ",", "'defaults'", ",", "'dict'", ",", "'module'", ",", "'closure_values'", "]", "state", "=", "dict", "(", "zip", "(", "keys", ",", "args", "[", "1", ":", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unexpected _fill_value arguments: %r'", "%", "(", "args", ",", ")", ")", "# - At pickling time, any dynamic global variable used by func is", "# serialized by value (in state['globals']).", "# - At unpickling time, func's __globals__ attribute is initialized by", "# first retrieving an empty isolated namespace that will be shared", "# with other functions pickled from the same original module", "# by the same CloudPickler instance and then updated with the", "# content of state['globals'] to populate the shared isolated", "# namespace with all the global variables that are specifically", "# referenced for this function.", "func", ".", "__globals__", ".", "update", "(", "state", "[", "'globals'", "]", ")", "func", ".", "__defaults__", "=", "state", "[", "'defaults'", "]", "func", ".", "__dict__", "=", "state", "[", "'dict'", "]", "if", "'annotations'", "in", "state", ":", "func", ".", "__annotations__", "=", "state", "[", "'annotations'", "]", "if", "'doc'", "in", "state", ":", "func", ".", "__doc__", "=", "state", "[", "'doc'", "]", "if", "'name'", "in", "state", ":", "func", ".", "__name__", "=", "state", "[", "'name'", "]", "if", "'module'", "in", "state", ":", "func", ".", "__module__", "=", "state", "[", "'module'", "]", "if", "'qualname'", "in", "state", ":", "func", ".", "__qualname__", "=", "state", "[", "'qualname'", "]", "cells", "=", "func", ".", "__closure__", "if", "cells", "is", "not", "None", ":", "for", "cell", ",", "value", "in", "zip", "(", "cells", ",", "state", "[", "'closure_values'", "]", ")", ":", "if", "value", "is", "not", "_empty_cell_value", ":", "cell_set", "(", "cell", ",", "value", ")", "return", "func" ]
Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func().
[ "Fills", "in", "the", "rest", "of", "function", "data", "into", "the", "skeleton", "function", "object" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1060-L1113
apache/spark
python/pyspark/cloudpickle.py
_rehydrate_skeleton_class
def _rehydrate_skeleton_class(skeleton_class, class_dict): """Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info. """ registry = None for attrname, attr in class_dict.items(): if attrname == "_abc_impl": registry = attr else: setattr(skeleton_class, attrname, attr) if registry is not None: for subclass in registry: skeleton_class.register(subclass) return skeleton_class
python
def _rehydrate_skeleton_class(skeleton_class, class_dict): """Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info. """ registry = None for attrname, attr in class_dict.items(): if attrname == "_abc_impl": registry = attr else: setattr(skeleton_class, attrname, attr) if registry is not None: for subclass in registry: skeleton_class.register(subclass) return skeleton_class
[ "def", "_rehydrate_skeleton_class", "(", "skeleton_class", ",", "class_dict", ")", ":", "registry", "=", "None", "for", "attrname", ",", "attr", "in", "class_dict", ".", "items", "(", ")", ":", "if", "attrname", "==", "\"_abc_impl\"", ":", "registry", "=", "attr", "else", ":", "setattr", "(", "skeleton_class", ",", "attrname", ",", "attr", ")", "if", "registry", "is", "not", "None", ":", "for", "subclass", "in", "registry", ":", "skeleton_class", ".", "register", "(", "subclass", ")", "return", "skeleton_class" ]
Put attributes from `class_dict` back on `skeleton_class`. See CloudPickler.save_dynamic_class for more info.
[ "Put", "attributes", "from", "class_dict", "back", "on", "skeleton_class", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1146-L1161
apache/spark
python/pyspark/cloudpickle.py
_is_dynamic
def _is_dynamic(module): """ Return True if the module is special module that cannot be imported by its name. """ # Quick check: module that have __file__ attribute are not dynamic modules. if hasattr(module, '__file__'): return False if hasattr(module, '__spec__'): return module.__spec__ is None else: # Backward compat for Python 2 import imp try: path = None for part in module.__name__.split('.'): if path is not None: path = [path] f, path, description = imp.find_module(part, path) if f is not None: f.close() except ImportError: return True return False
python
def _is_dynamic(module): """ Return True if the module is special module that cannot be imported by its name. """ # Quick check: module that have __file__ attribute are not dynamic modules. if hasattr(module, '__file__'): return False if hasattr(module, '__spec__'): return module.__spec__ is None else: # Backward compat for Python 2 import imp try: path = None for part in module.__name__.split('.'): if path is not None: path = [path] f, path, description = imp.find_module(part, path) if f is not None: f.close() except ImportError: return True return False
[ "def", "_is_dynamic", "(", "module", ")", ":", "# Quick check: module that have __file__ attribute are not dynamic modules.", "if", "hasattr", "(", "module", ",", "'__file__'", ")", ":", "return", "False", "if", "hasattr", "(", "module", ",", "'__spec__'", ")", ":", "return", "module", ".", "__spec__", "is", "None", "else", ":", "# Backward compat for Python 2", "import", "imp", "try", ":", "path", "=", "None", "for", "part", "in", "module", ".", "__name__", ".", "split", "(", "'.'", ")", ":", "if", "path", "is", "not", "None", ":", "path", "=", "[", "path", "]", "f", ",", "path", ",", "description", "=", "imp", ".", "find_module", "(", "part", ",", "path", ")", "if", "f", "is", "not", "None", ":", "f", ".", "close", "(", ")", "except", "ImportError", ":", "return", "True", "return", "False" ]
Return True if the module is special module that cannot be imported by its name.
[ "Return", "True", "if", "the", "module", "is", "special", "module", "that", "cannot", "be", "imported", "by", "its", "name", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1164-L1188
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_codeobject
def save_codeobject(self, obj): """ Save a code object """ if PY3: # pragma: no branch args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) else: args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) self.save_reduce(types.CodeType, args, obj=obj)
python
def save_codeobject(self, obj): """ Save a code object """ if PY3: # pragma: no branch args = ( obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) else: args = ( obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name, obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars ) self.save_reduce(types.CodeType, args, obj=obj)
[ "def", "save_codeobject", "(", "self", ",", "obj", ")", ":", "if", "PY3", ":", "# pragma: no branch", "args", "=", "(", "obj", ".", "co_argcount", ",", "obj", ".", "co_kwonlyargcount", ",", "obj", ".", "co_nlocals", ",", "obj", ".", "co_stacksize", ",", "obj", ".", "co_flags", ",", "obj", ".", "co_code", ",", "obj", ".", "co_consts", ",", "obj", ".", "co_names", ",", "obj", ".", "co_varnames", ",", "obj", ".", "co_filename", ",", "obj", ".", "co_name", ",", "obj", ".", "co_firstlineno", ",", "obj", ".", "co_lnotab", ",", "obj", ".", "co_freevars", ",", "obj", ".", "co_cellvars", ")", "else", ":", "args", "=", "(", "obj", ".", "co_argcount", ",", "obj", ".", "co_nlocals", ",", "obj", ".", "co_stacksize", ",", "obj", ".", "co_flags", ",", "obj", ".", "co_code", ",", "obj", ".", "co_consts", ",", "obj", ".", "co_names", ",", "obj", ".", "co_varnames", ",", "obj", ".", "co_filename", ",", "obj", ".", "co_name", ",", "obj", ".", "co_firstlineno", ",", "obj", ".", "co_lnotab", ",", "obj", ".", "co_freevars", ",", "obj", ".", "co_cellvars", ")", "self", ".", "save_reduce", "(", "types", ".", "CodeType", ",", "args", ",", "obj", "=", "obj", ")" ]
Save a code object
[ "Save", "a", "code", "object" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L298-L315
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_function
def save_function(self, obj, name=None): """ Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately. """ try: should_special_case = obj in _BUILTIN_TYPE_CONSTRUCTORS except TypeError: # Methods of builtin types aren't hashable in python 2. should_special_case = False if should_special_case: # We keep a special-cased cache of built-in type constructors at # global scope, because these functions are structured very # differently in different python versions and implementations (for # example, they're instances of types.BuiltinFunctionType in # CPython, but they're ordinary types.FunctionType instances in # PyPy). # # If the function we've received is in that cache, we just # serialize it as a lookup into the cache. return self.save_reduce(_BUILTIN_TYPE_CONSTRUCTORS[obj], (), obj=obj) write = self.write if name is None: name = obj.__name__ try: # whichmodule() could fail, see # https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling modname = pickle.whichmodule(obj, name) except Exception: modname = None # print('which gives %s %s %s' % (modname, obj, name)) try: themodule = sys.modules[modname] except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__ modname = '__main__' if modname == '__main__': themodule = None try: lookedup_by_name = getattr(themodule, name, None) except Exception: lookedup_by_name = None if themodule: if lookedup_by_name is obj: return self.save_global(obj, name) # a builtin_function_or_method which comes in as an attribute of some # object (e.g., itertools.chain.from_iterable) will end # up with modname "__main__" and so end up here. But these functions # have no __code__ attribute in CPython, so the handling for # user-defined functions below will fail. # So we pickle them here using save_reduce; have to do it differently # for different python versions. if not hasattr(obj, '__code__'): if PY3: # pragma: no branch rv = obj.__reduce_ex__(self.proto) else: if hasattr(obj, '__self__'): rv = (getattr, (obj.__self__, name)) else: raise pickle.PicklingError("Can't pickle %r" % obj) return self.save_reduce(obj=obj, *rv) # if func is lambda, def'ed at prompt, is in main, or is nested, then # we'll pickle the actual function object rather than simply saving a # reference (as is done in default pickler), via save_function_tuple. if (islambda(obj) or getattr(obj.__code__, 'co_filename', None) == '<stdin>' or themodule is None): self.save_function_tuple(obj) return else: # func is nested if lookedup_by_name is None or lookedup_by_name is not obj: self.save_function_tuple(obj) return if obj.__dict__: # essentially save_reduce, but workaround needed to avoid recursion self.save(_restore_attr) write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) self.save(obj.__dict__) write(pickle.TUPLE + pickle.REDUCE) else: write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj)
python
def save_function(self, obj, name=None): """ Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately. """ try: should_special_case = obj in _BUILTIN_TYPE_CONSTRUCTORS except TypeError: # Methods of builtin types aren't hashable in python 2. should_special_case = False if should_special_case: # We keep a special-cased cache of built-in type constructors at # global scope, because these functions are structured very # differently in different python versions and implementations (for # example, they're instances of types.BuiltinFunctionType in # CPython, but they're ordinary types.FunctionType instances in # PyPy). # # If the function we've received is in that cache, we just # serialize it as a lookup into the cache. return self.save_reduce(_BUILTIN_TYPE_CONSTRUCTORS[obj], (), obj=obj) write = self.write if name is None: name = obj.__name__ try: # whichmodule() could fail, see # https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling modname = pickle.whichmodule(obj, name) except Exception: modname = None # print('which gives %s %s %s' % (modname, obj, name)) try: themodule = sys.modules[modname] except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__ modname = '__main__' if modname == '__main__': themodule = None try: lookedup_by_name = getattr(themodule, name, None) except Exception: lookedup_by_name = None if themodule: if lookedup_by_name is obj: return self.save_global(obj, name) # a builtin_function_or_method which comes in as an attribute of some # object (e.g., itertools.chain.from_iterable) will end # up with modname "__main__" and so end up here. But these functions # have no __code__ attribute in CPython, so the handling for # user-defined functions below will fail. # So we pickle them here using save_reduce; have to do it differently # for different python versions. if not hasattr(obj, '__code__'): if PY3: # pragma: no branch rv = obj.__reduce_ex__(self.proto) else: if hasattr(obj, '__self__'): rv = (getattr, (obj.__self__, name)) else: raise pickle.PicklingError("Can't pickle %r" % obj) return self.save_reduce(obj=obj, *rv) # if func is lambda, def'ed at prompt, is in main, or is nested, then # we'll pickle the actual function object rather than simply saving a # reference (as is done in default pickler), via save_function_tuple. if (islambda(obj) or getattr(obj.__code__, 'co_filename', None) == '<stdin>' or themodule is None): self.save_function_tuple(obj) return else: # func is nested if lookedup_by_name is None or lookedup_by_name is not obj: self.save_function_tuple(obj) return if obj.__dict__: # essentially save_reduce, but workaround needed to avoid recursion self.save(_restore_attr) write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) self.save(obj.__dict__) write(pickle.TUPLE + pickle.REDUCE) else: write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj)
[ "def", "save_function", "(", "self", ",", "obj", ",", "name", "=", "None", ")", ":", "try", ":", "should_special_case", "=", "obj", "in", "_BUILTIN_TYPE_CONSTRUCTORS", "except", "TypeError", ":", "# Methods of builtin types aren't hashable in python 2.", "should_special_case", "=", "False", "if", "should_special_case", ":", "# We keep a special-cased cache of built-in type constructors at", "# global scope, because these functions are structured very", "# differently in different python versions and implementations (for", "# example, they're instances of types.BuiltinFunctionType in", "# CPython, but they're ordinary types.FunctionType instances in", "# PyPy).", "#", "# If the function we've received is in that cache, we just", "# serialize it as a lookup into the cache.", "return", "self", ".", "save_reduce", "(", "_BUILTIN_TYPE_CONSTRUCTORS", "[", "obj", "]", ",", "(", ")", ",", "obj", "=", "obj", ")", "write", "=", "self", ".", "write", "if", "name", "is", "None", ":", "name", "=", "obj", ".", "__name__", "try", ":", "# whichmodule() could fail, see", "# https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling", "modname", "=", "pickle", ".", "whichmodule", "(", "obj", ",", "name", ")", "except", "Exception", ":", "modname", "=", "None", "# print('which gives %s %s %s' % (modname, obj, name))", "try", ":", "themodule", "=", "sys", ".", "modules", "[", "modname", "]", "except", "KeyError", ":", "# eval'd items such as namedtuple give invalid items for their function __module__", "modname", "=", "'__main__'", "if", "modname", "==", "'__main__'", ":", "themodule", "=", "None", "try", ":", "lookedup_by_name", "=", "getattr", "(", "themodule", ",", "name", ",", "None", ")", "except", "Exception", ":", "lookedup_by_name", "=", "None", "if", "themodule", ":", "if", "lookedup_by_name", "is", "obj", ":", "return", "self", ".", "save_global", "(", "obj", ",", "name", ")", "# a builtin_function_or_method which comes in as an attribute of some", "# object (e.g., itertools.chain.from_iterable) will end", "# up with modname \"__main__\" and so end up here. But these functions", "# have no __code__ attribute in CPython, so the handling for", "# user-defined functions below will fail.", "# So we pickle them here using save_reduce; have to do it differently", "# for different python versions.", "if", "not", "hasattr", "(", "obj", ",", "'__code__'", ")", ":", "if", "PY3", ":", "# pragma: no branch", "rv", "=", "obj", ".", "__reduce_ex__", "(", "self", ".", "proto", ")", "else", ":", "if", "hasattr", "(", "obj", ",", "'__self__'", ")", ":", "rv", "=", "(", "getattr", ",", "(", "obj", ".", "__self__", ",", "name", ")", ")", "else", ":", "raise", "pickle", ".", "PicklingError", "(", "\"Can't pickle %r\"", "%", "obj", ")", "return", "self", ".", "save_reduce", "(", "obj", "=", "obj", ",", "*", "rv", ")", "# if func is lambda, def'ed at prompt, is in main, or is nested, then", "# we'll pickle the actual function object rather than simply saving a", "# reference (as is done in default pickler), via save_function_tuple.", "if", "(", "islambda", "(", "obj", ")", "or", "getattr", "(", "obj", ".", "__code__", ",", "'co_filename'", ",", "None", ")", "==", "'<stdin>'", "or", "themodule", "is", "None", ")", ":", "self", ".", "save_function_tuple", "(", "obj", ")", "return", "else", ":", "# func is nested", "if", "lookedup_by_name", "is", "None", "or", "lookedup_by_name", "is", "not", "obj", ":", "self", ".", "save_function_tuple", "(", "obj", ")", "return", "if", "obj", ".", "__dict__", ":", "# essentially save_reduce, but workaround needed to avoid recursion", "self", ".", "save", "(", "_restore_attr", ")", "write", "(", "pickle", ".", "MARK", "+", "pickle", ".", "GLOBAL", "+", "modname", "+", "'\\n'", "+", "name", "+", "'\\n'", ")", "self", ".", "memoize", "(", "obj", ")", "self", ".", "save", "(", "obj", ".", "__dict__", ")", "write", "(", "pickle", ".", "TUPLE", "+", "pickle", ".", "REDUCE", ")", "else", ":", "write", "(", "pickle", ".", "GLOBAL", "+", "modname", "+", "'\\n'", "+", "name", "+", "'\\n'", ")", "self", ".", "memoize", "(", "obj", ")" ]
Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately.
[ "Registered", "with", "the", "dispatch", "to", "handle", "all", "function", "types", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L319-L412
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_dynamic_class
def save_dynamic_class(self, obj): """ Save a class that can't be stored as module global. This method is used to serialize classes that are defined inside functions, or that otherwise can't be serialized as attribute lookups from global modules. """ clsdict = dict(obj.__dict__) # copy dict proxy to a dict clsdict.pop('__weakref__', None) # For ABCMeta in python3.7+, remove _abc_impl as it is not picklable. # This is a fix which breaks the cache but this only makes the first # calls to issubclass slower. if "_abc_impl" in clsdict: import abc (registry, _, _, _) = abc._get_dump(obj) clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] # On PyPy, __doc__ is a readonly attribute, so we need to include it in # the initial skeleton class. This is safe because we know that the # doc can't participate in a cycle with the original class. type_kwargs = {'__doc__': clsdict.pop('__doc__', None)} if hasattr(obj, "__slots__"): type_kwargs['__slots__'] = obj.__slots__ # pickle string length optimization: member descriptors of obj are # created automatically from obj's __slots__ attribute, no need to # save them in obj's state if isinstance(obj.__slots__, string_types): clsdict.pop(obj.__slots__) else: for k in obj.__slots__: clsdict.pop(k, None) # If type overrides __dict__ as a property, include it in the type kwargs. # In Python 2, we can't set this attribute after construction. __dict__ = clsdict.pop('__dict__', None) if isinstance(__dict__, property): type_kwargs['__dict__'] = __dict__ save = self.save write = self.write # We write pickle instructions explicitly here to handle the # possibility that the type object participates in a cycle with its own # __dict__. We first write an empty "skeleton" version of the class and # memoize it before writing the class' __dict__ itself. We then write # instructions to "rehydrate" the skeleton class by restoring the # attributes from the __dict__. # # A type can appear in a cycle with its __dict__ if an instance of the # type appears in the type's __dict__ (which happens for the stdlib # Enum class), or if the type defines methods that close over the name # of the type, (which is common for Python 2-style super() calls). # Push the rehydration function. save(_rehydrate_skeleton_class) # Mark the start of the args tuple for the rehydration function. write(pickle.MARK) # Create and memoize an skeleton class with obj's name and bases. tp = type(obj) self.save_reduce(tp, (obj.__name__, obj.__bases__, type_kwargs), obj=obj) # Now save the rest of obj's __dict__. Any references to obj # encountered while saving will point to the skeleton class. save(clsdict) # Write a tuple of (skeleton_class, clsdict). write(pickle.TUPLE) # Call _rehydrate_skeleton_class(skeleton_class, clsdict) write(pickle.REDUCE)
python
def save_dynamic_class(self, obj): """ Save a class that can't be stored as module global. This method is used to serialize classes that are defined inside functions, or that otherwise can't be serialized as attribute lookups from global modules. """ clsdict = dict(obj.__dict__) # copy dict proxy to a dict clsdict.pop('__weakref__', None) # For ABCMeta in python3.7+, remove _abc_impl as it is not picklable. # This is a fix which breaks the cache but this only makes the first # calls to issubclass slower. if "_abc_impl" in clsdict: import abc (registry, _, _, _) = abc._get_dump(obj) clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] # On PyPy, __doc__ is a readonly attribute, so we need to include it in # the initial skeleton class. This is safe because we know that the # doc can't participate in a cycle with the original class. type_kwargs = {'__doc__': clsdict.pop('__doc__', None)} if hasattr(obj, "__slots__"): type_kwargs['__slots__'] = obj.__slots__ # pickle string length optimization: member descriptors of obj are # created automatically from obj's __slots__ attribute, no need to # save them in obj's state if isinstance(obj.__slots__, string_types): clsdict.pop(obj.__slots__) else: for k in obj.__slots__: clsdict.pop(k, None) # If type overrides __dict__ as a property, include it in the type kwargs. # In Python 2, we can't set this attribute after construction. __dict__ = clsdict.pop('__dict__', None) if isinstance(__dict__, property): type_kwargs['__dict__'] = __dict__ save = self.save write = self.write # We write pickle instructions explicitly here to handle the # possibility that the type object participates in a cycle with its own # __dict__. We first write an empty "skeleton" version of the class and # memoize it before writing the class' __dict__ itself. We then write # instructions to "rehydrate" the skeleton class by restoring the # attributes from the __dict__. # # A type can appear in a cycle with its __dict__ if an instance of the # type appears in the type's __dict__ (which happens for the stdlib # Enum class), or if the type defines methods that close over the name # of the type, (which is common for Python 2-style super() calls). # Push the rehydration function. save(_rehydrate_skeleton_class) # Mark the start of the args tuple for the rehydration function. write(pickle.MARK) # Create and memoize an skeleton class with obj's name and bases. tp = type(obj) self.save_reduce(tp, (obj.__name__, obj.__bases__, type_kwargs), obj=obj) # Now save the rest of obj's __dict__. Any references to obj # encountered while saving will point to the skeleton class. save(clsdict) # Write a tuple of (skeleton_class, clsdict). write(pickle.TUPLE) # Call _rehydrate_skeleton_class(skeleton_class, clsdict) write(pickle.REDUCE)
[ "def", "save_dynamic_class", "(", "self", ",", "obj", ")", ":", "clsdict", "=", "dict", "(", "obj", ".", "__dict__", ")", "# copy dict proxy to a dict", "clsdict", ".", "pop", "(", "'__weakref__'", ",", "None", ")", "# For ABCMeta in python3.7+, remove _abc_impl as it is not picklable.", "# This is a fix which breaks the cache but this only makes the first", "# calls to issubclass slower.", "if", "\"_abc_impl\"", "in", "clsdict", ":", "import", "abc", "(", "registry", ",", "_", ",", "_", ",", "_", ")", "=", "abc", ".", "_get_dump", "(", "obj", ")", "clsdict", "[", "\"_abc_impl\"", "]", "=", "[", "subclass_weakref", "(", ")", "for", "subclass_weakref", "in", "registry", "]", "# On PyPy, __doc__ is a readonly attribute, so we need to include it in", "# the initial skeleton class. This is safe because we know that the", "# doc can't participate in a cycle with the original class.", "type_kwargs", "=", "{", "'__doc__'", ":", "clsdict", ".", "pop", "(", "'__doc__'", ",", "None", ")", "}", "if", "hasattr", "(", "obj", ",", "\"__slots__\"", ")", ":", "type_kwargs", "[", "'__slots__'", "]", "=", "obj", ".", "__slots__", "# pickle string length optimization: member descriptors of obj are", "# created automatically from obj's __slots__ attribute, no need to", "# save them in obj's state", "if", "isinstance", "(", "obj", ".", "__slots__", ",", "string_types", ")", ":", "clsdict", ".", "pop", "(", "obj", ".", "__slots__", ")", "else", ":", "for", "k", "in", "obj", ".", "__slots__", ":", "clsdict", ".", "pop", "(", "k", ",", "None", ")", "# If type overrides __dict__ as a property, include it in the type kwargs.", "# In Python 2, we can't set this attribute after construction.", "__dict__", "=", "clsdict", ".", "pop", "(", "'__dict__'", ",", "None", ")", "if", "isinstance", "(", "__dict__", ",", "property", ")", ":", "type_kwargs", "[", "'__dict__'", "]", "=", "__dict__", "save", "=", "self", ".", "save", "write", "=", "self", ".", "write", "# We write pickle instructions explicitly here to handle the", "# possibility that the type object participates in a cycle with its own", "# __dict__. We first write an empty \"skeleton\" version of the class and", "# memoize it before writing the class' __dict__ itself. We then write", "# instructions to \"rehydrate\" the skeleton class by restoring the", "# attributes from the __dict__.", "#", "# A type can appear in a cycle with its __dict__ if an instance of the", "# type appears in the type's __dict__ (which happens for the stdlib", "# Enum class), or if the type defines methods that close over the name", "# of the type, (which is common for Python 2-style super() calls).", "# Push the rehydration function.", "save", "(", "_rehydrate_skeleton_class", ")", "# Mark the start of the args tuple for the rehydration function.", "write", "(", "pickle", ".", "MARK", ")", "# Create and memoize an skeleton class with obj's name and bases.", "tp", "=", "type", "(", "obj", ")", "self", ".", "save_reduce", "(", "tp", ",", "(", "obj", ".", "__name__", ",", "obj", ".", "__bases__", ",", "type_kwargs", ")", ",", "obj", "=", "obj", ")", "# Now save the rest of obj's __dict__. Any references to obj", "# encountered while saving will point to the skeleton class.", "save", "(", "clsdict", ")", "# Write a tuple of (skeleton_class, clsdict).", "write", "(", "pickle", ".", "TUPLE", ")", "# Call _rehydrate_skeleton_class(skeleton_class, clsdict)", "write", "(", "pickle", ".", "REDUCE", ")" ]
Save a class that can't be stored as module global. This method is used to serialize classes that are defined inside functions, or that otherwise can't be serialized as attribute lookups from global modules.
[ "Save", "a", "class", "that", "can", "t", "be", "stored", "as", "module", "global", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L463-L538
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_function_tuple
def save_function_tuple(self, func): """ Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later. """ if is_tornado_coroutine(func): self.save_reduce(_rebuild_tornado_coroutine, (func.__wrapped__,), obj=func) return save = self.save write = self.write code, f_globals, defaults, closure_values, dct, base_globals = self.extract_func_data(func) save(_fill_function) # skeleton function updater write(pickle.MARK) # beginning of tuple that _fill_function expects self._save_subimports( code, itertools.chain(f_globals.values(), closure_values or ()), ) # create a skeleton function object and memoize it save(_make_skel_func) save(( code, len(closure_values) if closure_values is not None else -1, base_globals, )) write(pickle.REDUCE) self.memoize(func) # save the rest of the func data needed by _fill_function state = { 'globals': f_globals, 'defaults': defaults, 'dict': dct, 'closure_values': closure_values, 'module': func.__module__, 'name': func.__name__, 'doc': func.__doc__, } if hasattr(func, '__annotations__') and sys.version_info >= (3, 7): state['annotations'] = func.__annotations__ if hasattr(func, '__qualname__'): state['qualname'] = func.__qualname__ save(state) write(pickle.TUPLE) write(pickle.REDUCE)
python
def save_function_tuple(self, func): """ Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later. """ if is_tornado_coroutine(func): self.save_reduce(_rebuild_tornado_coroutine, (func.__wrapped__,), obj=func) return save = self.save write = self.write code, f_globals, defaults, closure_values, dct, base_globals = self.extract_func_data(func) save(_fill_function) # skeleton function updater write(pickle.MARK) # beginning of tuple that _fill_function expects self._save_subimports( code, itertools.chain(f_globals.values(), closure_values or ()), ) # create a skeleton function object and memoize it save(_make_skel_func) save(( code, len(closure_values) if closure_values is not None else -1, base_globals, )) write(pickle.REDUCE) self.memoize(func) # save the rest of the func data needed by _fill_function state = { 'globals': f_globals, 'defaults': defaults, 'dict': dct, 'closure_values': closure_values, 'module': func.__module__, 'name': func.__name__, 'doc': func.__doc__, } if hasattr(func, '__annotations__') and sys.version_info >= (3, 7): state['annotations'] = func.__annotations__ if hasattr(func, '__qualname__'): state['qualname'] = func.__qualname__ save(state) write(pickle.TUPLE) write(pickle.REDUCE)
[ "def", "save_function_tuple", "(", "self", ",", "func", ")", ":", "if", "is_tornado_coroutine", "(", "func", ")", ":", "self", ".", "save_reduce", "(", "_rebuild_tornado_coroutine", ",", "(", "func", ".", "__wrapped__", ",", ")", ",", "obj", "=", "func", ")", "return", "save", "=", "self", ".", "save", "write", "=", "self", ".", "write", "code", ",", "f_globals", ",", "defaults", ",", "closure_values", ",", "dct", ",", "base_globals", "=", "self", ".", "extract_func_data", "(", "func", ")", "save", "(", "_fill_function", ")", "# skeleton function updater", "write", "(", "pickle", ".", "MARK", ")", "# beginning of tuple that _fill_function expects", "self", ".", "_save_subimports", "(", "code", ",", "itertools", ".", "chain", "(", "f_globals", ".", "values", "(", ")", ",", "closure_values", "or", "(", ")", ")", ",", ")", "# create a skeleton function object and memoize it", "save", "(", "_make_skel_func", ")", "save", "(", "(", "code", ",", "len", "(", "closure_values", ")", "if", "closure_values", "is", "not", "None", "else", "-", "1", ",", "base_globals", ",", ")", ")", "write", "(", "pickle", ".", "REDUCE", ")", "self", ".", "memoize", "(", "func", ")", "# save the rest of the func data needed by _fill_function", "state", "=", "{", "'globals'", ":", "f_globals", ",", "'defaults'", ":", "defaults", ",", "'dict'", ":", "dct", ",", "'closure_values'", ":", "closure_values", ",", "'module'", ":", "func", ".", "__module__", ",", "'name'", ":", "func", ".", "__name__", ",", "'doc'", ":", "func", ".", "__doc__", ",", "}", "if", "hasattr", "(", "func", ",", "'__annotations__'", ")", "and", "sys", ".", "version_info", ">=", "(", "3", ",", "7", ")", ":", "state", "[", "'annotations'", "]", "=", "func", ".", "__annotations__", "if", "hasattr", "(", "func", ",", "'__qualname__'", ")", ":", "state", "[", "'qualname'", "]", "=", "func", ".", "__qualname__", "save", "(", "state", ")", "write", "(", "pickle", ".", "TUPLE", ")", "write", "(", "pickle", ".", "REDUCE", ")" ]
Pickles an actual func object. A func comprises: code, globals, defaults, closure, and dict. We extract and save these, injecting reducing functions at certain points to recreate the func object. Keep in mind that some of these pieces can contain a ref to the func itself. Thus, a naive save on these pieces could trigger an infinite loop of save's. To get around that, we first create a skeleton func object using just the code (this is safe, since this won't contain a ref to the func), and memoize it as soon as it's created. The other stuff can then be filled in later.
[ "Pickles", "an", "actual", "func", "object", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L540-L596
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_global
def save_global(self, obj, name=None, pack=struct.pack): """ Save a "global". The name of this method is somewhat misleading: all types get dispatched here. """ if obj is type(None): return self.save_reduce(type, (None,), obj=obj) elif obj is type(Ellipsis): return self.save_reduce(type, (Ellipsis,), obj=obj) elif obj is type(NotImplemented): return self.save_reduce(type, (NotImplemented,), obj=obj) if obj.__module__ == "__main__": return self.save_dynamic_class(obj) try: return Pickler.save_global(self, obj, name=name) except Exception: if obj.__module__ == "__builtin__" or obj.__module__ == "builtins": if obj in _BUILTIN_TYPE_NAMES: return self.save_reduce( _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj) typ = type(obj) if typ is not obj and isinstance(obj, (type, types.ClassType)): return self.save_dynamic_class(obj) raise
python
def save_global(self, obj, name=None, pack=struct.pack): """ Save a "global". The name of this method is somewhat misleading: all types get dispatched here. """ if obj is type(None): return self.save_reduce(type, (None,), obj=obj) elif obj is type(Ellipsis): return self.save_reduce(type, (Ellipsis,), obj=obj) elif obj is type(NotImplemented): return self.save_reduce(type, (NotImplemented,), obj=obj) if obj.__module__ == "__main__": return self.save_dynamic_class(obj) try: return Pickler.save_global(self, obj, name=name) except Exception: if obj.__module__ == "__builtin__" or obj.__module__ == "builtins": if obj in _BUILTIN_TYPE_NAMES: return self.save_reduce( _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj) typ = type(obj) if typ is not obj and isinstance(obj, (type, types.ClassType)): return self.save_dynamic_class(obj) raise
[ "def", "save_global", "(", "self", ",", "obj", ",", "name", "=", "None", ",", "pack", "=", "struct", ".", "pack", ")", ":", "if", "obj", "is", "type", "(", "None", ")", ":", "return", "self", ".", "save_reduce", "(", "type", ",", "(", "None", ",", ")", ",", "obj", "=", "obj", ")", "elif", "obj", "is", "type", "(", "Ellipsis", ")", ":", "return", "self", ".", "save_reduce", "(", "type", ",", "(", "Ellipsis", ",", ")", ",", "obj", "=", "obj", ")", "elif", "obj", "is", "type", "(", "NotImplemented", ")", ":", "return", "self", ".", "save_reduce", "(", "type", ",", "(", "NotImplemented", ",", ")", ",", "obj", "=", "obj", ")", "if", "obj", ".", "__module__", "==", "\"__main__\"", ":", "return", "self", ".", "save_dynamic_class", "(", "obj", ")", "try", ":", "return", "Pickler", ".", "save_global", "(", "self", ",", "obj", ",", "name", "=", "name", ")", "except", "Exception", ":", "if", "obj", ".", "__module__", "==", "\"__builtin__\"", "or", "obj", ".", "__module__", "==", "\"builtins\"", ":", "if", "obj", "in", "_BUILTIN_TYPE_NAMES", ":", "return", "self", ".", "save_reduce", "(", "_builtin_type", ",", "(", "_BUILTIN_TYPE_NAMES", "[", "obj", "]", ",", ")", ",", "obj", "=", "obj", ")", "typ", "=", "type", "(", "obj", ")", "if", "typ", "is", "not", "obj", "and", "isinstance", "(", "obj", ",", "(", "type", ",", "types", ".", "ClassType", ")", ")", ":", "return", "self", ".", "save_dynamic_class", "(", "obj", ")", "raise" ]
Save a "global". The name of this method is somewhat misleading: all types get dispatched here.
[ "Save", "a", "global", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L678-L707
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_inst
def save_inst(self, obj): """Inner logic to save instance. Based off pickle.save_inst""" cls = obj.__class__ # Try the dispatch table (pickle module doesn't do it) f = self.dispatch.get(cls) if f: f(self, obj) # Call unbound method with explicit self return memo = self.memo write = self.write save = self.save if hasattr(obj, '__getinitargs__'): args = obj.__getinitargs__() len(args) # XXX Assert it's a sequence pickle._keep_alive(args, memo) else: args = () write(pickle.MARK) if self.bin: save(cls) for arg in args: save(arg) write(pickle.OBJ) else: for arg in args: save(arg) write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n') self.memoize(obj) try: getstate = obj.__getstate__ except AttributeError: stuff = obj.__dict__ else: stuff = getstate() pickle._keep_alive(stuff, memo) save(stuff) write(pickle.BUILD)
python
def save_inst(self, obj): """Inner logic to save instance. Based off pickle.save_inst""" cls = obj.__class__ # Try the dispatch table (pickle module doesn't do it) f = self.dispatch.get(cls) if f: f(self, obj) # Call unbound method with explicit self return memo = self.memo write = self.write save = self.save if hasattr(obj, '__getinitargs__'): args = obj.__getinitargs__() len(args) # XXX Assert it's a sequence pickle._keep_alive(args, memo) else: args = () write(pickle.MARK) if self.bin: save(cls) for arg in args: save(arg) write(pickle.OBJ) else: for arg in args: save(arg) write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n') self.memoize(obj) try: getstate = obj.__getstate__ except AttributeError: stuff = obj.__dict__ else: stuff = getstate() pickle._keep_alive(stuff, memo) save(stuff) write(pickle.BUILD)
[ "def", "save_inst", "(", "self", ",", "obj", ")", ":", "cls", "=", "obj", ".", "__class__", "# Try the dispatch table (pickle module doesn't do it)", "f", "=", "self", ".", "dispatch", ".", "get", "(", "cls", ")", "if", "f", ":", "f", "(", "self", ",", "obj", ")", "# Call unbound method with explicit self", "return", "memo", "=", "self", ".", "memo", "write", "=", "self", ".", "write", "save", "=", "self", ".", "save", "if", "hasattr", "(", "obj", ",", "'__getinitargs__'", ")", ":", "args", "=", "obj", ".", "__getinitargs__", "(", ")", "len", "(", "args", ")", "# XXX Assert it's a sequence", "pickle", ".", "_keep_alive", "(", "args", ",", "memo", ")", "else", ":", "args", "=", "(", ")", "write", "(", "pickle", ".", "MARK", ")", "if", "self", ".", "bin", ":", "save", "(", "cls", ")", "for", "arg", "in", "args", ":", "save", "(", "arg", ")", "write", "(", "pickle", ".", "OBJ", ")", "else", ":", "for", "arg", "in", "args", ":", "save", "(", "arg", ")", "write", "(", "pickle", ".", "INST", "+", "cls", ".", "__module__", "+", "'\\n'", "+", "cls", ".", "__name__", "+", "'\\n'", ")", "self", ".", "memoize", "(", "obj", ")", "try", ":", "getstate", "=", "obj", ".", "__getstate__", "except", "AttributeError", ":", "stuff", "=", "obj", ".", "__dict__", "else", ":", "stuff", "=", "getstate", "(", ")", "pickle", ".", "_keep_alive", "(", "stuff", ",", "memo", ")", "save", "(", "stuff", ")", "write", "(", "pickle", ".", "BUILD", ")" ]
Inner logic to save instance. Based off pickle.save_inst
[ "Inner", "logic", "to", "save", "instance", ".", "Based", "off", "pickle", ".", "save_inst" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L725-L768
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_itemgetter
def save_itemgetter(self, obj): """itemgetter serializer (needed for namedtuple support)""" class Dummy: def __getitem__(self, item): return item items = obj(Dummy()) if not isinstance(items, tuple): items = (items,) return self.save_reduce(operator.itemgetter, items)
python
def save_itemgetter(self, obj): """itemgetter serializer (needed for namedtuple support)""" class Dummy: def __getitem__(self, item): return item items = obj(Dummy()) if not isinstance(items, tuple): items = (items,) return self.save_reduce(operator.itemgetter, items)
[ "def", "save_itemgetter", "(", "self", ",", "obj", ")", ":", "class", "Dummy", ":", "def", "__getitem__", "(", "self", ",", "item", ")", ":", "return", "item", "items", "=", "obj", "(", "Dummy", "(", ")", ")", "if", "not", "isinstance", "(", "items", ",", "tuple", ")", ":", "items", "=", "(", "items", ",", ")", "return", "self", ".", "save_reduce", "(", "operator", ".", "itemgetter", ",", "items", ")" ]
itemgetter serializer (needed for namedtuple support)
[ "itemgetter", "serializer", "(", "needed", "for", "namedtuple", "support", ")" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L786-L794
apache/spark
python/pyspark/cloudpickle.py
CloudPickler.save_attrgetter
def save_attrgetter(self, obj): """attrgetter serializer""" class Dummy(object): def __init__(self, attrs, index=None): self.attrs = attrs self.index = index def __getattribute__(self, item): attrs = object.__getattribute__(self, "attrs") index = object.__getattribute__(self, "index") if index is None: index = len(attrs) attrs.append(item) else: attrs[index] = ".".join([attrs[index], item]) return type(self)(attrs, index) attrs = [] obj(Dummy(attrs)) return self.save_reduce(operator.attrgetter, tuple(attrs))
python
def save_attrgetter(self, obj): """attrgetter serializer""" class Dummy(object): def __init__(self, attrs, index=None): self.attrs = attrs self.index = index def __getattribute__(self, item): attrs = object.__getattribute__(self, "attrs") index = object.__getattribute__(self, "index") if index is None: index = len(attrs) attrs.append(item) else: attrs[index] = ".".join([attrs[index], item]) return type(self)(attrs, index) attrs = [] obj(Dummy(attrs)) return self.save_reduce(operator.attrgetter, tuple(attrs))
[ "def", "save_attrgetter", "(", "self", ",", "obj", ")", ":", "class", "Dummy", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "attrs", ",", "index", "=", "None", ")", ":", "self", ".", "attrs", "=", "attrs", "self", ".", "index", "=", "index", "def", "__getattribute__", "(", "self", ",", "item", ")", ":", "attrs", "=", "object", ".", "__getattribute__", "(", "self", ",", "\"attrs\"", ")", "index", "=", "object", ".", "__getattribute__", "(", "self", ",", "\"index\"", ")", "if", "index", "is", "None", ":", "index", "=", "len", "(", "attrs", ")", "attrs", ".", "append", "(", "item", ")", "else", ":", "attrs", "[", "index", "]", "=", "\".\"", ".", "join", "(", "[", "attrs", "[", "index", "]", ",", "item", "]", ")", "return", "type", "(", "self", ")", "(", "attrs", ",", "index", ")", "attrs", "=", "[", "]", "obj", "(", "Dummy", "(", "attrs", ")", ")", "return", "self", ".", "save_reduce", "(", "operator", ".", "attrgetter", ",", "tuple", "(", "attrs", ")", ")" ]
attrgetter serializer
[ "attrgetter", "serializer" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L799-L816
apache/spark
python/pyspark/ml/param/__init__.py
Param._copy_new_parent
def _copy_new_parent(self, parent): """Copy the current param to a new parent, must be a dummy param.""" if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent)
python
def _copy_new_parent(self, parent): """Copy the current param to a new parent, must be a dummy param.""" if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent)
[ "def", "_copy_new_parent", "(", "self", ",", "parent", ")", ":", "if", "self", ".", "parent", "==", "\"undefined\"", ":", "param", "=", "copy", ".", "copy", "(", "self", ")", "param", ".", "parent", "=", "parent", ".", "uid", "return", "param", "else", ":", "raise", "ValueError", "(", "\"Cannot copy from non-dummy parent %s.\"", "%", "parent", ")" ]
Copy the current param to a new parent, must be a dummy param.
[ "Copy", "the", "current", "param", "to", "a", "new", "parent", "must", "be", "a", "dummy", "param", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L52-L59
apache/spark
python/pyspark/ml/param/__init__.py
TypeConverters.toList
def toList(value): """ Convert a value to a list, if possible. """ if type(value) == list: return value elif type(value) in [np.ndarray, tuple, xrange, array.array]: return list(value) elif isinstance(value, Vector): return list(value.toArray()) else: raise TypeError("Could not convert %s to list" % value)
python
def toList(value): """ Convert a value to a list, if possible. """ if type(value) == list: return value elif type(value) in [np.ndarray, tuple, xrange, array.array]: return list(value) elif isinstance(value, Vector): return list(value.toArray()) else: raise TypeError("Could not convert %s to list" % value)
[ "def", "toList", "(", "value", ")", ":", "if", "type", "(", "value", ")", "==", "list", ":", "return", "value", "elif", "type", "(", "value", ")", "in", "[", "np", ".", "ndarray", ",", "tuple", ",", "xrange", ",", "array", ".", "array", "]", ":", "return", "list", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "Vector", ")", ":", "return", "list", "(", "value", ".", "toArray", "(", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Could not convert %s to list\"", "%", "value", ")" ]
Convert a value to a list, if possible.
[ "Convert", "a", "value", "to", "a", "list", "if", "possible", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L113-L124
apache/spark
python/pyspark/ml/param/__init__.py
TypeConverters.toListFloat
def toListFloat(value): """ Convert a value to list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return [float(v) for v in value] raise TypeError("Could not convert %s to list of floats" % value)
python
def toListFloat(value): """ Convert a value to list of floats, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return [float(v) for v in value] raise TypeError("Could not convert %s to list of floats" % value)
[ "def", "toListFloat", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_numeric", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "float", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of floats\"", "%", "value", ")" ]
Convert a value to list of floats, if possible.
[ "Convert", "a", "value", "to", "list", "of", "floats", "if", "possible", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L127-L135
apache/spark
python/pyspark/ml/param/__init__.py
TypeConverters.toListInt
def toListInt(value): """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value)
python
def toListInt(value): """ Convert a value to list of ints, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value)
[ "def", "toListInt", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_integer", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "int", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of ints\"", "%", "value", ")" ]
Convert a value to list of ints, if possible.
[ "Convert", "a", "value", "to", "list", "of", "ints", "if", "possible", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L138-L146
apache/spark
python/pyspark/ml/param/__init__.py
TypeConverters.toListString
def toListString(value): """ Convert a value to list of strings, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value)
python
def toListString(value): """ Convert a value to list of strings, if possible. """ if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value)
[ "def", "toListString", "(", "value", ")", ":", "if", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_can_convert_to_string", "(", "v", ")", ",", "value", ")", ")", ":", "return", "[", "TypeConverters", ".", "toString", "(", "v", ")", "for", "v", "in", "value", "]", "raise", "TypeError", "(", "\"Could not convert %s to list of strings\"", "%", "value", ")" ]
Convert a value to list of strings, if possible.
[ "Convert", "a", "value", "to", "list", "of", "strings", "if", "possible", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L149-L157
apache/spark
python/pyspark/ml/param/__init__.py
TypeConverters.toVector
def toVector(value): """ Convert a value to a MLlib Vector, if possible. """ if isinstance(value, Vector): return value elif TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return DenseVector(value) raise TypeError("Could not convert %s to vector" % value)
python
def toVector(value): """ Convert a value to a MLlib Vector, if possible. """ if isinstance(value, Vector): return value elif TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return DenseVector(value) raise TypeError("Could not convert %s to vector" % value)
[ "def", "toVector", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "Vector", ")", ":", "return", "value", "elif", "TypeConverters", ".", "_can_convert_to_list", "(", "value", ")", ":", "value", "=", "TypeConverters", ".", "toList", "(", "value", ")", "if", "all", "(", "map", "(", "lambda", "v", ":", "TypeConverters", ".", "_is_numeric", "(", "v", ")", ",", "value", ")", ")", ":", "return", "DenseVector", "(", "value", ")", "raise", "TypeError", "(", "\"Could not convert %s to vector\"", "%", "value", ")" ]
Convert a value to a MLlib Vector, if possible.
[ "Convert", "a", "value", "to", "a", "MLlib", "Vector", "if", "possible", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L160-L170
apache/spark
python/pyspark/ml/param/__init__.py
TypeConverters.toString
def toString(value): """ Convert a value to a string, if possible. """ if isinstance(value, basestring): return value elif type(value) in [np.string_, np.str_]: return str(value) elif type(value) == np.unicode_: return unicode(value) else: raise TypeError("Could not convert %s to string type" % type(value))
python
def toString(value): """ Convert a value to a string, if possible. """ if isinstance(value, basestring): return value elif type(value) in [np.string_, np.str_]: return str(value) elif type(value) == np.unicode_: return unicode(value) else: raise TypeError("Could not convert %s to string type" % type(value))
[ "def", "toString", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "return", "value", "elif", "type", "(", "value", ")", "in", "[", "np", ".", "string_", ",", "np", ".", "str_", "]", ":", "return", "str", "(", "value", ")", "elif", "type", "(", "value", ")", "==", "np", ".", "unicode_", ":", "return", "unicode", "(", "value", ")", "else", ":", "raise", "TypeError", "(", "\"Could not convert %s to string type\"", "%", "type", "(", "value", ")", ")" ]
Convert a value to a string, if possible.
[ "Convert", "a", "value", "to", "a", "string", "if", "possible", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L202-L213
apache/spark
python/pyspark/ml/param/__init__.py
Params._copy_params
def _copy_params(self): """ Copy all params defined on the class to current object. """ cls = type(self) src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)] src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs)) for name, param in src_params: setattr(self, name, param._copy_new_parent(self))
python
def _copy_params(self): """ Copy all params defined on the class to current object. """ cls = type(self) src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)] src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs)) for name, param in src_params: setattr(self, name, param._copy_new_parent(self))
[ "def", "_copy_params", "(", "self", ")", ":", "cls", "=", "type", "(", "self", ")", "src_name_attrs", "=", "[", "(", "x", ",", "getattr", "(", "cls", ",", "x", ")", ")", "for", "x", "in", "dir", "(", "cls", ")", "]", "src_params", "=", "list", "(", "filter", "(", "lambda", "nameAttr", ":", "isinstance", "(", "nameAttr", "[", "1", "]", ",", "Param", ")", ",", "src_name_attrs", ")", ")", "for", "name", ",", "param", "in", "src_params", ":", "setattr", "(", "self", ",", "name", ",", "param", ".", "_copy_new_parent", "(", "self", ")", ")" ]
Copy all params defined on the class to current object.
[ "Copy", "all", "params", "defined", "on", "the", "class", "to", "current", "object", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L250-L258
apache/spark
python/pyspark/ml/param/__init__.py
Params.params
def params(self): """ Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`. """ if self._params is None: self._params = list(filter(lambda attr: isinstance(attr, Param), [getattr(self, x) for x in dir(self) if x != "params" and not isinstance(getattr(type(self), x, None), property)])) return self._params
python
def params(self): """ Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`. """ if self._params is None: self._params = list(filter(lambda attr: isinstance(attr, Param), [getattr(self, x) for x in dir(self) if x != "params" and not isinstance(getattr(type(self), x, None), property)])) return self._params
[ "def", "params", "(", "self", ")", ":", "if", "self", ".", "_params", "is", "None", ":", "self", ".", "_params", "=", "list", "(", "filter", "(", "lambda", "attr", ":", "isinstance", "(", "attr", ",", "Param", ")", ",", "[", "getattr", "(", "self", ",", "x", ")", "for", "x", "in", "dir", "(", "self", ")", "if", "x", "!=", "\"params\"", "and", "not", "isinstance", "(", "getattr", "(", "type", "(", "self", ")", ",", "x", ",", "None", ")", ",", "property", ")", "]", ")", ")", "return", "self", ".", "_params" ]
Returns all params ordered by name. The default implementation uses :py:func:`dir` to get all attributes of type :py:class:`Param`.
[ "Returns", "all", "params", "ordered", "by", "name", ".", "The", "default", "implementation", "uses", ":", "py", ":", "func", ":", "dir", "to", "get", "all", "attributes", "of", "type", ":", "py", ":", "class", ":", "Param", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L261-L271
apache/spark
python/pyspark/ml/param/__init__.py
Params.explainParam
def explainParam(self, param): """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """ param = self._resolveParam(param) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr)
python
def explainParam(self, param): """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """ param = self._resolveParam(param) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr)
[ "def", "explainParam", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "values", "=", "[", "]", "if", "self", ".", "isDefined", "(", "param", ")", ":", "if", "param", "in", "self", ".", "_defaultParamMap", ":", "values", ".", "append", "(", "\"default: %s\"", "%", "self", ".", "_defaultParamMap", "[", "param", "]", ")", "if", "param", "in", "self", ".", "_paramMap", ":", "values", ".", "append", "(", "\"current: %s\"", "%", "self", ".", "_paramMap", "[", "param", "]", ")", "else", ":", "values", ".", "append", "(", "\"undefined\"", ")", "valueStr", "=", "\"(\"", "+", "\", \"", ".", "join", "(", "values", ")", "+", "\")\"", "return", "\"%s: %s %s\"", "%", "(", "param", ".", "name", ",", "param", ".", "doc", ",", "valueStr", ")" ]
Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string.
[ "Explains", "a", "single", "param", "and", "returns", "its", "name", "doc", "and", "optional", "default", "value", "and", "user", "-", "supplied", "value", "in", "a", "string", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L273-L288
apache/spark
python/pyspark/ml/param/__init__.py
Params.getParam
def getParam(self, paramName): """ Gets a param by its name. """ param = getattr(self, paramName) if isinstance(param, Param): return param else: raise ValueError("Cannot find param with name %s." % paramName)
python
def getParam(self, paramName): """ Gets a param by its name. """ param = getattr(self, paramName) if isinstance(param, Param): return param else: raise ValueError("Cannot find param with name %s." % paramName)
[ "def", "getParam", "(", "self", ",", "paramName", ")", ":", "param", "=", "getattr", "(", "self", ",", "paramName", ")", "if", "isinstance", "(", "param", ",", "Param", ")", ":", "return", "param", "else", ":", "raise", "ValueError", "(", "\"Cannot find param with name %s.\"", "%", "paramName", ")" ]
Gets a param by its name.
[ "Gets", "a", "param", "by", "its", "name", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L297-L305
apache/spark
python/pyspark/ml/param/__init__.py
Params.isSet
def isSet(self, param): """ Checks whether a param is explicitly set by user. """ param = self._resolveParam(param) return param in self._paramMap
python
def isSet(self, param): """ Checks whether a param is explicitly set by user. """ param = self._resolveParam(param) return param in self._paramMap
[ "def", "isSet", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "return", "param", "in", "self", ".", "_paramMap" ]
Checks whether a param is explicitly set by user.
[ "Checks", "whether", "a", "param", "is", "explicitly", "set", "by", "user", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L307-L312
apache/spark
python/pyspark/ml/param/__init__.py
Params.hasDefault
def hasDefault(self, param): """ Checks whether a param has a default value. """ param = self._resolveParam(param) return param in self._defaultParamMap
python
def hasDefault(self, param): """ Checks whether a param has a default value. """ param = self._resolveParam(param) return param in self._defaultParamMap
[ "def", "hasDefault", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "return", "param", "in", "self", ".", "_defaultParamMap" ]
Checks whether a param has a default value.
[ "Checks", "whether", "a", "param", "has", "a", "default", "value", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L314-L319
apache/spark
python/pyspark/ml/param/__init__.py
Params.hasParam
def hasParam(self, paramName): """ Tests whether this instance contains a param with a given (string) name. """ if isinstance(paramName, basestring): p = getattr(self, paramName, None) return isinstance(p, Param) else: raise TypeError("hasParam(): paramName must be a string")
python
def hasParam(self, paramName): """ Tests whether this instance contains a param with a given (string) name. """ if isinstance(paramName, basestring): p = getattr(self, paramName, None) return isinstance(p, Param) else: raise TypeError("hasParam(): paramName must be a string")
[ "def", "hasParam", "(", "self", ",", "paramName", ")", ":", "if", "isinstance", "(", "paramName", ",", "basestring", ")", ":", "p", "=", "getattr", "(", "self", ",", "paramName", ",", "None", ")", "return", "isinstance", "(", "p", ",", "Param", ")", "else", ":", "raise", "TypeError", "(", "\"hasParam(): paramName must be a string\"", ")" ]
Tests whether this instance contains a param with a given (string) name.
[ "Tests", "whether", "this", "instance", "contains", "a", "param", "with", "a", "given", "(", "string", ")", "name", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L328-L337
apache/spark
python/pyspark/ml/param/__init__.py
Params.getOrDefault
def getOrDefault(self, param): """ Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set. """ param = self._resolveParam(param) if param in self._paramMap: return self._paramMap[param] else: return self._defaultParamMap[param]
python
def getOrDefault(self, param): """ Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set. """ param = self._resolveParam(param) if param in self._paramMap: return self._paramMap[param] else: return self._defaultParamMap[param]
[ "def", "getOrDefault", "(", "self", ",", "param", ")", ":", "param", "=", "self", ".", "_resolveParam", "(", "param", ")", "if", "param", "in", "self", ".", "_paramMap", ":", "return", "self", ".", "_paramMap", "[", "param", "]", "else", ":", "return", "self", ".", "_defaultParamMap", "[", "param", "]" ]
Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set.
[ "Gets", "the", "value", "of", "a", "param", "in", "the", "user", "-", "supplied", "param", "map", "or", "its", "default", "value", ".", "Raises", "an", "error", "if", "neither", "is", "set", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L339-L348
apache/spark
python/pyspark/ml/param/__init__.py
Params.extractParamMap
def extractParamMap(self, extra=None): """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap
python
def extractParamMap(self, extra=None): """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap
[ "def", "extractParamMap", "(", "self", ",", "extra", "=", "None", ")", ":", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "paramMap", "=", "self", ".", "_defaultParamMap", ".", "copy", "(", ")", "paramMap", ".", "update", "(", "self", ".", "_paramMap", ")", "paramMap", ".", "update", "(", "extra", ")", "return", "paramMap" ]
Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map
[ "Extracts", "the", "embedded", "default", "param", "values", "and", "user", "-", "supplied", "values", "and", "then", "merges", "them", "with", "extra", "values", "from", "input", "into", "a", "flat", "param", "map", "where", "the", "latter", "value", "is", "used", "if", "there", "exist", "conflicts", "i", ".", "e", ".", "with", "ordering", ":", "default", "param", "values", "<", "user", "-", "supplied", "values", "<", "extra", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L350-L366
apache/spark
python/pyspark/ml/param/__init__.py
Params.copy
def copy(self, extra=None): """ Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() that = copy.copy(self) that._paramMap = {} that._defaultParamMap = {} return self._copyValues(that, extra)
python
def copy(self, extra=None): """ Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """ if extra is None: extra = dict() that = copy.copy(self) that._paramMap = {} that._defaultParamMap = {} return self._copyValues(that, extra)
[ "def", "copy", "(", "self", ",", "extra", "=", "None", ")", ":", "if", "extra", "is", "None", ":", "extra", "=", "dict", "(", ")", "that", "=", "copy", ".", "copy", "(", "self", ")", "that", ".", "_paramMap", "=", "{", "}", "that", ".", "_defaultParamMap", "=", "{", "}", "return", "self", ".", "_copyValues", "(", "that", ",", "extra", ")" ]
Creates a copy of this instance with the same uid and some extra params. The default implementation creates a shallow copy using :py:func:`copy.copy`, and then copies the embedded and extra parameters over and returns the copy. Subclasses should override this method if the default approach is not sufficient. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance
[ "Creates", "a", "copy", "of", "this", "instance", "with", "the", "same", "uid", "and", "some", "extra", "params", ".", "The", "default", "implementation", "creates", "a", "shallow", "copy", "using", ":", "py", ":", "func", ":", "copy", ".", "copy", "and", "then", "copies", "the", "embedded", "and", "extra", "parameters", "over", "and", "returns", "the", "copy", ".", "Subclasses", "should", "override", "this", "method", "if", "the", "default", "approach", "is", "not", "sufficient", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L368-L385
apache/spark
python/pyspark/ml/param/__init__.py
Params.set
def set(self, param, value): """ Sets a parameter in the embedded param map. """ self._shouldOwn(param) try: value = param.typeConverter(value) except ValueError as e: raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e)) self._paramMap[param] = value
python
def set(self, param, value): """ Sets a parameter in the embedded param map. """ self._shouldOwn(param) try: value = param.typeConverter(value) except ValueError as e: raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e)) self._paramMap[param] = value
[ "def", "set", "(", "self", ",", "param", ",", "value", ")", ":", "self", ".", "_shouldOwn", "(", "param", ")", "try", ":", "value", "=", "param", ".", "typeConverter", "(", "value", ")", "except", "ValueError", "as", "e", ":", "raise", "ValueError", "(", "'Invalid param value given for param \"%s\". %s'", "%", "(", "param", ".", "name", ",", "e", ")", ")", "self", ".", "_paramMap", "[", "param", "]", "=", "value" ]
Sets a parameter in the embedded param map.
[ "Sets", "a", "parameter", "in", "the", "embedded", "param", "map", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L387-L396
apache/spark
python/pyspark/ml/param/__init__.py
Params._shouldOwn
def _shouldOwn(self, param): """ Validates that the input param belongs to this Params instance. """ if not (self.uid == param.parent and self.hasParam(param.name)): raise ValueError("Param %r does not belong to %r." % (param, self))
python
def _shouldOwn(self, param): """ Validates that the input param belongs to this Params instance. """ if not (self.uid == param.parent and self.hasParam(param.name)): raise ValueError("Param %r does not belong to %r." % (param, self))
[ "def", "_shouldOwn", "(", "self", ",", "param", ")", ":", "if", "not", "(", "self", ".", "uid", "==", "param", ".", "parent", "and", "self", ".", "hasParam", "(", "param", ".", "name", ")", ")", ":", "raise", "ValueError", "(", "\"Param %r does not belong to %r.\"", "%", "(", "param", ",", "self", ")", ")" ]
Validates that the input param belongs to this Params instance.
[ "Validates", "that", "the", "input", "param", "belongs", "to", "this", "Params", "instance", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L398-L403
apache/spark
python/pyspark/ml/param/__init__.py
Params._resolveParam
def _resolveParam(self, param): """ Resolves a param and validates the ownership. :param param: param name or the param instance, which must belong to this Params instance :return: resolved param instance """ if isinstance(param, Param): self._shouldOwn(param) return param elif isinstance(param, basestring): return self.getParam(param) else: raise ValueError("Cannot resolve %r as a param." % param)
python
def _resolveParam(self, param): """ Resolves a param and validates the ownership. :param param: param name or the param instance, which must belong to this Params instance :return: resolved param instance """ if isinstance(param, Param): self._shouldOwn(param) return param elif isinstance(param, basestring): return self.getParam(param) else: raise ValueError("Cannot resolve %r as a param." % param)
[ "def", "_resolveParam", "(", "self", ",", "param", ")", ":", "if", "isinstance", "(", "param", ",", "Param", ")", ":", "self", ".", "_shouldOwn", "(", "param", ")", "return", "param", "elif", "isinstance", "(", "param", ",", "basestring", ")", ":", "return", "self", ".", "getParam", "(", "param", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot resolve %r as a param.\"", "%", "param", ")" ]
Resolves a param and validates the ownership. :param param: param name or the param instance, which must belong to this Params instance :return: resolved param instance
[ "Resolves", "a", "param", "and", "validates", "the", "ownership", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L405-L419
apache/spark
python/pyspark/ml/param/__init__.py
Params._set
def _set(self, **kwargs): """ Sets user-supplied params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None: try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e)) self._paramMap[p] = value return self
python
def _set(self, **kwargs): """ Sets user-supplied params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None: try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e)) self._paramMap[p] = value return self
[ "def", "_set", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "param", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "p", "=", "getattr", "(", "self", ",", "param", ")", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "p", ".", "typeConverter", "(", "value", ")", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "'Invalid param value given for param \"%s\". %s'", "%", "(", "p", ".", "name", ",", "e", ")", ")", "self", ".", "_paramMap", "[", "p", "]", "=", "value", "return", "self" ]
Sets user-supplied params.
[ "Sets", "user", "-", "supplied", "params", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L431-L443
apache/spark
python/pyspark/ml/param/__init__.py
Params._setDefault
def _setDefault(self, **kwargs): """ Sets default params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None and not isinstance(value, JavaObject): try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid default param value given for param "%s". %s' % (p.name, e)) self._defaultParamMap[p] = value return self
python
def _setDefault(self, **kwargs): """ Sets default params. """ for param, value in kwargs.items(): p = getattr(self, param) if value is not None and not isinstance(value, JavaObject): try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid default param value given for param "%s". %s' % (p.name, e)) self._defaultParamMap[p] = value return self
[ "def", "_setDefault", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "param", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "p", "=", "getattr", "(", "self", ",", "param", ")", "if", "value", "is", "not", "None", "and", "not", "isinstance", "(", "value", ",", "JavaObject", ")", ":", "try", ":", "value", "=", "p", ".", "typeConverter", "(", "value", ")", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "'Invalid default param value given for param \"%s\". %s'", "%", "(", "p", ".", "name", ",", "e", ")", ")", "self", ".", "_defaultParamMap", "[", "p", "]", "=", "value", "return", "self" ]
Sets default params.
[ "Sets", "default", "params", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L452-L465
apache/spark
python/pyspark/ml/param/__init__.py
Params._copyValues
def _copyValues(self, to, extra=None): """ Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied """ paramMap = self._paramMap.copy() if extra is not None: paramMap.update(extra) for param in self.params: # copy default params if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] # copy explicitly set params if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to
python
def _copyValues(self, to, extra=None): """ Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied """ paramMap = self._paramMap.copy() if extra is not None: paramMap.update(extra) for param in self.params: # copy default params if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] # copy explicitly set params if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to
[ "def", "_copyValues", "(", "self", ",", "to", ",", "extra", "=", "None", ")", ":", "paramMap", "=", "self", ".", "_paramMap", ".", "copy", "(", ")", "if", "extra", "is", "not", "None", ":", "paramMap", ".", "update", "(", "extra", ")", "for", "param", "in", "self", ".", "params", ":", "# copy default params", "if", "param", "in", "self", ".", "_defaultParamMap", "and", "to", ".", "hasParam", "(", "param", ".", "name", ")", ":", "to", ".", "_defaultParamMap", "[", "to", ".", "getParam", "(", "param", ".", "name", ")", "]", "=", "self", ".", "_defaultParamMap", "[", "param", "]", "# copy explicitly set params", "if", "param", "in", "paramMap", "and", "to", ".", "hasParam", "(", "param", ".", "name", ")", ":", "to", ".", "_set", "(", "*", "*", "{", "param", ".", "name", ":", "paramMap", "[", "param", "]", "}", ")", "return", "to" ]
Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied
[ "Copies", "param", "values", "from", "this", "instance", "to", "another", "instance", "for", "params", "shared", "by", "them", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L467-L486
apache/spark
python/pyspark/ml/param/__init__.py
Params._resetUid
def _resetUid(self, newUid): """ Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). :param newUid: new uid to use, which is converted to unicode :return: same instance, but with the uid and Param.parent values updated, including within param maps """ newUid = unicode(newUid) self.uid = newUid newDefaultParamMap = dict() newParamMap = dict() for param in self.params: newParam = copy.copy(param) newParam.parent = newUid if param in self._defaultParamMap: newDefaultParamMap[newParam] = self._defaultParamMap[param] if param in self._paramMap: newParamMap[newParam] = self._paramMap[param] param.parent = newUid self._defaultParamMap = newDefaultParamMap self._paramMap = newParamMap return self
python
def _resetUid(self, newUid): """ Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). :param newUid: new uid to use, which is converted to unicode :return: same instance, but with the uid and Param.parent values updated, including within param maps """ newUid = unicode(newUid) self.uid = newUid newDefaultParamMap = dict() newParamMap = dict() for param in self.params: newParam = copy.copy(param) newParam.parent = newUid if param in self._defaultParamMap: newDefaultParamMap[newParam] = self._defaultParamMap[param] if param in self._paramMap: newParamMap[newParam] = self._paramMap[param] param.parent = newUid self._defaultParamMap = newDefaultParamMap self._paramMap = newParamMap return self
[ "def", "_resetUid", "(", "self", ",", "newUid", ")", ":", "newUid", "=", "unicode", "(", "newUid", ")", "self", ".", "uid", "=", "newUid", "newDefaultParamMap", "=", "dict", "(", ")", "newParamMap", "=", "dict", "(", ")", "for", "param", "in", "self", ".", "params", ":", "newParam", "=", "copy", ".", "copy", "(", "param", ")", "newParam", ".", "parent", "=", "newUid", "if", "param", "in", "self", ".", "_defaultParamMap", ":", "newDefaultParamMap", "[", "newParam", "]", "=", "self", ".", "_defaultParamMap", "[", "param", "]", "if", "param", "in", "self", ".", "_paramMap", ":", "newParamMap", "[", "newParam", "]", "=", "self", ".", "_paramMap", "[", "param", "]", "param", ".", "parent", "=", "newUid", "self", ".", "_defaultParamMap", "=", "newDefaultParamMap", "self", ".", "_paramMap", "=", "newParamMap", "return", "self" ]
Changes the uid of this instance. This updates both the stored uid and the parent uid of params and param maps. This is used by persistence (loading). :param newUid: new uid to use, which is converted to unicode :return: same instance, but with the uid and Param.parent values updated, including within param maps
[ "Changes", "the", "uid", "of", "this", "instance", ".", "This", "updates", "both", "the", "stored", "uid", "and", "the", "parent", "uid", "of", "params", "and", "param", "maps", ".", "This", "is", "used", "by", "persistence", "(", "loading", ")", ".", ":", "param", "newUid", ":", "new", "uid", "to", "use", "which", "is", "converted", "to", "unicode", ":", "return", ":", "same", "instance", "but", "with", "the", "uid", "and", "Param", ".", "parent", "values", "updated", "including", "within", "param", "maps" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L488-L511
apache/spark
python/pyspark/ml/common.py
_to_java_object_rdd
def _to_java_object_rdd(rdd): """ Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
python
def _to_java_object_rdd(rdd): """ Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
[ "def", "_to_java_object_rdd", "(", "rdd", ")", ":", "rdd", "=", "rdd", ".", "_reserialize", "(", "AutoBatchedSerializer", "(", "PickleSerializer", "(", ")", ")", ")", "return", "rdd", ".", "ctx", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "ml", ".", "python", ".", "MLSerDe", ".", "pythonToJava", "(", "rdd", ".", "_jrdd", ",", "True", ")" ]
Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not.
[ "Return", "an", "JavaRDD", "of", "Object", "by", "unpickling" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/common.py#L60-L67
apache/spark
python/pyspark/broadcast.py
Broadcast.value
def value(self): """ Return the broadcasted value """ if not hasattr(self, "_value") and self._path is not None: # we only need to decrypt it here when encryption is enabled and # if its on the driver, since executor decryption is handled already if self._sc is not None and self._sc._encryption_enabled: port, auth_secret = self._python_broadcast.setupDecryptionServer() (decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret) self._python_broadcast.waitTillBroadcastDataSent() return self.load(decrypted_sock_file) else: self._value = self.load_from_path(self._path) return self._value
python
def value(self): """ Return the broadcasted value """ if not hasattr(self, "_value") and self._path is not None: # we only need to decrypt it here when encryption is enabled and # if its on the driver, since executor decryption is handled already if self._sc is not None and self._sc._encryption_enabled: port, auth_secret = self._python_broadcast.setupDecryptionServer() (decrypted_sock_file, _) = local_connect_and_auth(port, auth_secret) self._python_broadcast.waitTillBroadcastDataSent() return self.load(decrypted_sock_file) else: self._value = self.load_from_path(self._path) return self._value
[ "def", "value", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_value\"", ")", "and", "self", ".", "_path", "is", "not", "None", ":", "# we only need to decrypt it here when encryption is enabled and", "# if its on the driver, since executor decryption is handled already", "if", "self", ".", "_sc", "is", "not", "None", "and", "self", ".", "_sc", ".", "_encryption_enabled", ":", "port", ",", "auth_secret", "=", "self", ".", "_python_broadcast", ".", "setupDecryptionServer", "(", ")", "(", "decrypted_sock_file", ",", "_", ")", "=", "local_connect_and_auth", "(", "port", ",", "auth_secret", ")", "self", ".", "_python_broadcast", ".", "waitTillBroadcastDataSent", "(", ")", "return", "self", ".", "load", "(", "decrypted_sock_file", ")", "else", ":", "self", ".", "_value", "=", "self", ".", "load_from_path", "(", "self", ".", "_path", ")", "return", "self", ".", "_value" ]
Return the broadcasted value
[ "Return", "the", "broadcasted", "value" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L135-L148
apache/spark
python/pyspark/broadcast.py
Broadcast.unpersist
def unpersist(self, blocking=False): """ Delete cached copies of this broadcast on the executors. If the broadcast is used after this is called, it will need to be re-sent to each executor. :param blocking: Whether to block until unpersisting has completed """ if self._jbroadcast is None: raise Exception("Broadcast can only be unpersisted in driver") self._jbroadcast.unpersist(blocking)
python
def unpersist(self, blocking=False): """ Delete cached copies of this broadcast on the executors. If the broadcast is used after this is called, it will need to be re-sent to each executor. :param blocking: Whether to block until unpersisting has completed """ if self._jbroadcast is None: raise Exception("Broadcast can only be unpersisted in driver") self._jbroadcast.unpersist(blocking)
[ "def", "unpersist", "(", "self", ",", "blocking", "=", "False", ")", ":", "if", "self", ".", "_jbroadcast", "is", "None", ":", "raise", "Exception", "(", "\"Broadcast can only be unpersisted in driver\"", ")", "self", ".", "_jbroadcast", ".", "unpersist", "(", "blocking", ")" ]
Delete cached copies of this broadcast on the executors. If the broadcast is used after this is called, it will need to be re-sent to each executor. :param blocking: Whether to block until unpersisting has completed
[ "Delete", "cached", "copies", "of", "this", "broadcast", "on", "the", "executors", ".", "If", "the", "broadcast", "is", "used", "after", "this", "is", "called", "it", "will", "need", "to", "be", "re", "-", "sent", "to", "each", "executor", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L150-L160
apache/spark
python/pyspark/broadcast.py
Broadcast.destroy
def destroy(self, blocking=False): """ Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """ if self._jbroadcast is None: raise Exception("Broadcast can only be destroyed in driver") self._jbroadcast.destroy(blocking) os.unlink(self._path)
python
def destroy(self, blocking=False): """ Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted. """ if self._jbroadcast is None: raise Exception("Broadcast can only be destroyed in driver") self._jbroadcast.destroy(blocking) os.unlink(self._path)
[ "def", "destroy", "(", "self", ",", "blocking", "=", "False", ")", ":", "if", "self", ".", "_jbroadcast", "is", "None", ":", "raise", "Exception", "(", "\"Broadcast can only be destroyed in driver\"", ")", "self", ".", "_jbroadcast", ".", "destroy", "(", "blocking", ")", "os", ".", "unlink", "(", "self", ".", "_path", ")" ]
Destroy all data and metadata related to this broadcast variable. Use this with caution; once a broadcast variable has been destroyed, it cannot be used again. .. versionchanged:: 3.0.0 Added optional argument `blocking` to specify whether to block until all blocks are deleted.
[ "Destroy", "all", "data", "and", "metadata", "related", "to", "this", "broadcast", "variable", ".", "Use", "this", "with", "caution", ";", "once", "a", "broadcast", "variable", "has", "been", "destroyed", "it", "cannot", "be", "used", "again", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/broadcast.py#L162-L175
apache/spark
python/pyspark/sql/udf.py
UserDefinedFunction._wrapped
def _wrapped(self): """ Wrap this udf with a function and attach docstring from func """ # It is possible for a callable instance without __name__ attribute or/and # __module__ attribute to be wrapped here. For example, functools.partial. In this case, # we should avoid wrapping the attributes from the wrapped function to the wrapper # function. So, we take out these attribute names from the default names to set and # then manually assign it after being wrapped. assignments = tuple( a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__') @functools.wraps(self.func, assigned=assignments) def wrapper(*args): return self(*args) wrapper.__name__ = self._name wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__') else self.func.__class__.__module__) wrapper.func = self.func wrapper.returnType = self.returnType wrapper.evalType = self.evalType wrapper.deterministic = self.deterministic wrapper.asNondeterministic = functools.wraps( self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped()) return wrapper
python
def _wrapped(self): """ Wrap this udf with a function and attach docstring from func """ # It is possible for a callable instance without __name__ attribute or/and # __module__ attribute to be wrapped here. For example, functools.partial. In this case, # we should avoid wrapping the attributes from the wrapped function to the wrapper # function. So, we take out these attribute names from the default names to set and # then manually assign it after being wrapped. assignments = tuple( a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__') @functools.wraps(self.func, assigned=assignments) def wrapper(*args): return self(*args) wrapper.__name__ = self._name wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__') else self.func.__class__.__module__) wrapper.func = self.func wrapper.returnType = self.returnType wrapper.evalType = self.evalType wrapper.deterministic = self.deterministic wrapper.asNondeterministic = functools.wraps( self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped()) return wrapper
[ "def", "_wrapped", "(", "self", ")", ":", "# It is possible for a callable instance without __name__ attribute or/and", "# __module__ attribute to be wrapped here. For example, functools.partial. In this case,", "# we should avoid wrapping the attributes from the wrapped function to the wrapper", "# function. So, we take out these attribute names from the default names to set and", "# then manually assign it after being wrapped.", "assignments", "=", "tuple", "(", "a", "for", "a", "in", "functools", ".", "WRAPPER_ASSIGNMENTS", "if", "a", "!=", "'__name__'", "and", "a", "!=", "'__module__'", ")", "@", "functools", ".", "wraps", "(", "self", ".", "func", ",", "assigned", "=", "assignments", ")", "def", "wrapper", "(", "*", "args", ")", ":", "return", "self", "(", "*", "args", ")", "wrapper", ".", "__name__", "=", "self", ".", "_name", "wrapper", ".", "__module__", "=", "(", "self", ".", "func", ".", "__module__", "if", "hasattr", "(", "self", ".", "func", ",", "'__module__'", ")", "else", "self", ".", "func", ".", "__class__", ".", "__module__", ")", "wrapper", ".", "func", "=", "self", ".", "func", "wrapper", ".", "returnType", "=", "self", ".", "returnType", "wrapper", ".", "evalType", "=", "self", ".", "evalType", "wrapper", ".", "deterministic", "=", "self", ".", "deterministic", "wrapper", ".", "asNondeterministic", "=", "functools", ".", "wraps", "(", "self", ".", "asNondeterministic", ")", "(", "lambda", ":", "self", ".", "asNondeterministic", "(", ")", ".", "_wrapped", "(", ")", ")", "return", "wrapper" ]
Wrap this udf with a function and attach docstring from func
[ "Wrap", "this", "udf", "with", "a", "function", "and", "attach", "docstring", "from", "func" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L177-L204
apache/spark
python/pyspark/sql/udf.py
UDFRegistration.register
def register(self, name, f, returnType=None): """Register a Python function (including lambda function) or a user-defined function as a SQL function. :param name: name of the user-defined function in SQL statements. :param f: a Python function, or a user-defined function. The user-defined function can be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and :meth:`pyspark.sql.functions.pandas_udf`. :param returnType: the return type of the registered user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :return: a user-defined function. To register a nondeterministic Python function, users need to first build a nondeterministic user-defined function for the Python function and then register it as a SQL function. `returnType` can be optionally specified when `f` is a Python function but not when `f` is a user-defined function. Please see below. 1. When `f` is a Python function: `returnType` defaults to string type and can be optionally specified. The produced object must match the specified type. In this case, this API works as if `register(name, f, returnType=StringType())`. >>> strlen = spark.udf.register("stringLengthString", lambda x: len(x)) >>> spark.sql("SELECT stringLengthString('test')").collect() [Row(stringLengthString(test)=u'4')] >>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect() [Row(stringLengthString(text)=u'3')] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] 2. When `f` is a user-defined function: Spark uses the return type of the given user-defined function as the return type of the registered user-defined function. `returnType` should not be specified. In this case, this API works as if `register(name, f)`. >>> from pyspark.sql.types import IntegerType >>> from pyspark.sql.functions import udf >>> slen = udf(lambda s: len(s), IntegerType()) >>> _ = spark.udf.register("slen", slen) >>> spark.sql("SELECT slen('test')").collect() [Row(slen(test)=4)] >>> import random >>> from pyspark.sql.functions import udf >>> from pyspark.sql.types import IntegerType >>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic() >>> new_random_udf = spark.udf.register("random_udf", random_udf) >>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP [Row(random_udf()=82)] >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP >>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP [Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)] >>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def sum_udf(v): ... return v.sum() ... >>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP >>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2" >>> spark.sql(q).collect() # doctest: +SKIP [Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)] .. note:: Registration for a user-defined function (case 2.) was added from Spark 2.3.0. """ # This is to check whether the input function is from a user-defined function or # Python function. if hasattr(f, 'asNondeterministic'): if returnType is not None: raise TypeError( "Invalid returnType: data type can not be specified when f is" "a user-defined function, but got %s." % returnType) if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF, PythonEvalType.SQL_SCALAR_PANDAS_UDF, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]: raise ValueError( "Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or " "SQL_GROUPED_AGG_PANDAS_UDF") register_udf = UserDefinedFunction(f.func, returnType=f.returnType, name=name, evalType=f.evalType, deterministic=f.deterministic) return_udf = f else: if returnType is None: returnType = StringType() register_udf = UserDefinedFunction(f, returnType=returnType, name=name, evalType=PythonEvalType.SQL_BATCHED_UDF) return_udf = register_udf._wrapped() self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf) return return_udf
python
def register(self, name, f, returnType=None): """Register a Python function (including lambda function) or a user-defined function as a SQL function. :param name: name of the user-defined function in SQL statements. :param f: a Python function, or a user-defined function. The user-defined function can be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and :meth:`pyspark.sql.functions.pandas_udf`. :param returnType: the return type of the registered user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :return: a user-defined function. To register a nondeterministic Python function, users need to first build a nondeterministic user-defined function for the Python function and then register it as a SQL function. `returnType` can be optionally specified when `f` is a Python function but not when `f` is a user-defined function. Please see below. 1. When `f` is a Python function: `returnType` defaults to string type and can be optionally specified. The produced object must match the specified type. In this case, this API works as if `register(name, f, returnType=StringType())`. >>> strlen = spark.udf.register("stringLengthString", lambda x: len(x)) >>> spark.sql("SELECT stringLengthString('test')").collect() [Row(stringLengthString(test)=u'4')] >>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect() [Row(stringLengthString(text)=u'3')] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] 2. When `f` is a user-defined function: Spark uses the return type of the given user-defined function as the return type of the registered user-defined function. `returnType` should not be specified. In this case, this API works as if `register(name, f)`. >>> from pyspark.sql.types import IntegerType >>> from pyspark.sql.functions import udf >>> slen = udf(lambda s: len(s), IntegerType()) >>> _ = spark.udf.register("slen", slen) >>> spark.sql("SELECT slen('test')").collect() [Row(slen(test)=4)] >>> import random >>> from pyspark.sql.functions import udf >>> from pyspark.sql.types import IntegerType >>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic() >>> new_random_udf = spark.udf.register("random_udf", random_udf) >>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP [Row(random_udf()=82)] >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP >>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP [Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)] >>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def sum_udf(v): ... return v.sum() ... >>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP >>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2" >>> spark.sql(q).collect() # doctest: +SKIP [Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)] .. note:: Registration for a user-defined function (case 2.) was added from Spark 2.3.0. """ # This is to check whether the input function is from a user-defined function or # Python function. if hasattr(f, 'asNondeterministic'): if returnType is not None: raise TypeError( "Invalid returnType: data type can not be specified when f is" "a user-defined function, but got %s." % returnType) if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF, PythonEvalType.SQL_SCALAR_PANDAS_UDF, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]: raise ValueError( "Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or " "SQL_GROUPED_AGG_PANDAS_UDF") register_udf = UserDefinedFunction(f.func, returnType=f.returnType, name=name, evalType=f.evalType, deterministic=f.deterministic) return_udf = f else: if returnType is None: returnType = StringType() register_udf = UserDefinedFunction(f, returnType=returnType, name=name, evalType=PythonEvalType.SQL_BATCHED_UDF) return_udf = register_udf._wrapped() self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf) return return_udf
[ "def", "register", "(", "self", ",", "name", ",", "f", ",", "returnType", "=", "None", ")", ":", "# This is to check whether the input function is from a user-defined function or", "# Python function.", "if", "hasattr", "(", "f", ",", "'asNondeterministic'", ")", ":", "if", "returnType", "is", "not", "None", ":", "raise", "TypeError", "(", "\"Invalid returnType: data type can not be specified when f is\"", "\"a user-defined function, but got %s.\"", "%", "returnType", ")", "if", "f", ".", "evalType", "not", "in", "[", "PythonEvalType", ".", "SQL_BATCHED_UDF", ",", "PythonEvalType", ".", "SQL_SCALAR_PANDAS_UDF", ",", "PythonEvalType", ".", "SQL_GROUPED_AGG_PANDAS_UDF", "]", ":", "raise", "ValueError", "(", "\"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF or \"", "\"SQL_GROUPED_AGG_PANDAS_UDF\"", ")", "register_udf", "=", "UserDefinedFunction", "(", "f", ".", "func", ",", "returnType", "=", "f", ".", "returnType", ",", "name", "=", "name", ",", "evalType", "=", "f", ".", "evalType", ",", "deterministic", "=", "f", ".", "deterministic", ")", "return_udf", "=", "f", "else", ":", "if", "returnType", "is", "None", ":", "returnType", "=", "StringType", "(", ")", "register_udf", "=", "UserDefinedFunction", "(", "f", ",", "returnType", "=", "returnType", ",", "name", "=", "name", ",", "evalType", "=", "PythonEvalType", ".", "SQL_BATCHED_UDF", ")", "return_udf", "=", "register_udf", ".", "_wrapped", "(", ")", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "udf", "(", ")", ".", "registerPython", "(", "name", ",", "register_udf", ".", "_judf", ")", "return", "return_udf" ]
Register a Python function (including lambda function) or a user-defined function as a SQL function. :param name: name of the user-defined function in SQL statements. :param f: a Python function, or a user-defined function. The user-defined function can be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and :meth:`pyspark.sql.functions.pandas_udf`. :param returnType: the return type of the registered user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :return: a user-defined function. To register a nondeterministic Python function, users need to first build a nondeterministic user-defined function for the Python function and then register it as a SQL function. `returnType` can be optionally specified when `f` is a Python function but not when `f` is a user-defined function. Please see below. 1. When `f` is a Python function: `returnType` defaults to string type and can be optionally specified. The produced object must match the specified type. In this case, this API works as if `register(name, f, returnType=StringType())`. >>> strlen = spark.udf.register("stringLengthString", lambda x: len(x)) >>> spark.sql("SELECT stringLengthString('test')").collect() [Row(stringLengthString(test)=u'4')] >>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect() [Row(stringLengthString(text)=u'3')] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] >>> from pyspark.sql.types import IntegerType >>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType()) >>> spark.sql("SELECT stringLengthInt('test')").collect() [Row(stringLengthInt(test)=4)] 2. When `f` is a user-defined function: Spark uses the return type of the given user-defined function as the return type of the registered user-defined function. `returnType` should not be specified. In this case, this API works as if `register(name, f)`. >>> from pyspark.sql.types import IntegerType >>> from pyspark.sql.functions import udf >>> slen = udf(lambda s: len(s), IntegerType()) >>> _ = spark.udf.register("slen", slen) >>> spark.sql("SELECT slen('test')").collect() [Row(slen(test)=4)] >>> import random >>> from pyspark.sql.functions import udf >>> from pyspark.sql.types import IntegerType >>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic() >>> new_random_udf = spark.udf.register("random_udf", random_udf) >>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP [Row(random_udf()=82)] >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP >>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP [Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)] >>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def sum_udf(v): ... return v.sum() ... >>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP >>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2" >>> spark.sql(q).collect() # doctest: +SKIP [Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)] .. note:: Registration for a user-defined function (case 2.) was added from Spark 2.3.0.
[ "Register", "a", "Python", "function", "(", "including", "lambda", "function", ")", "or", "a", "user", "-", "defined", "function", "as", "a", "SQL", "function", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L232-L341
apache/spark
python/pyspark/sql/udf.py
UDFRegistration.registerJavaFunction
def registerJavaFunction(self, name, javaClassName, returnType=None): """Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)] """ jdt = None if returnType is not None: if not isinstance(returnType, DataType): returnType = _parse_datatype_string(returnType) jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json()) self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
python
def registerJavaFunction(self, name, javaClassName, returnType=None): """Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)] """ jdt = None if returnType is not None: if not isinstance(returnType, DataType): returnType = _parse_datatype_string(returnType) jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json()) self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
[ "def", "registerJavaFunction", "(", "self", ",", "name", ",", "javaClassName", ",", "returnType", "=", "None", ")", ":", "jdt", "=", "None", "if", "returnType", "is", "not", "None", ":", "if", "not", "isinstance", "(", "returnType", ",", "DataType", ")", ":", "returnType", "=", "_parse_datatype_string", "(", "returnType", ")", "jdt", "=", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "parseDataType", "(", "returnType", ".", "json", "(", ")", ")", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "udf", "(", ")", ".", "registerJava", "(", "name", ",", "javaClassName", ",", "jdt", ")" ]
Register a Java user-defined function as a SQL function. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not specified we would infer it via reflection. :param name: name of the user-defined function :param javaClassName: fully qualified name of java class :param returnType: the return type of the registered Java function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> spark.udf.registerJavaFunction( ... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType()) >>> spark.sql("SELECT javaStringLength('test')").collect() [Row(UDF:javaStringLength(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength") >>> spark.sql("SELECT javaStringLength2('test')").collect() [Row(UDF:javaStringLength2(test)=4)] >>> spark.udf.registerJavaFunction( ... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer") >>> spark.sql("SELECT javaStringLength3('test')").collect() [Row(UDF:javaStringLength3(test)=4)]
[ "Register", "a", "Java", "user", "-", "defined", "function", "as", "a", "SQL", "function", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L345-L378
apache/spark
python/pyspark/sql/udf.py
UDFRegistration.registerJavaUDAF
def registerJavaUDAF(self, name, javaClassName): """Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)] """ self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
python
def registerJavaUDAF(self, name, javaClassName): """Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)] """ self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
[ "def", "registerJavaUDAF", "(", "self", ",", "name", ",", "javaClassName", ")", ":", "self", ".", "sparkSession", ".", "_jsparkSession", ".", "udf", "(", ")", ".", "registerJavaUDAF", "(", "name", ",", "javaClassName", ")" ]
Register a Java user-defined aggregate function as a SQL function. :param name: name of the user-defined aggregate function :param javaClassName: fully qualified name of java class >>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg") >>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"]) >>> df.createOrReplaceTempView("df") >>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect() [Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
[ "Register", "a", "Java", "user", "-", "defined", "aggregate", "function", "as", "a", "SQL", "function", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L382-L395
apache/spark
python/pyspark/streaming/context.py
StreamingContext.getOrCreate
def getOrCreate(cls, checkpointPath, setupFunc): """ Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams """ cls._ensure_initialized() gw = SparkContext._gateway # Check whether valid checkpoint information exists in the given path ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath) if ssc_option.isEmpty(): ssc = setupFunc() ssc.checkpoint(checkpointPath) return ssc jssc = gw.jvm.JavaStreamingContext(ssc_option.get()) # If there is already an active instance of Python SparkContext use it, or create a new one if not SparkContext._active_spark_context: jsc = jssc.sparkContext() conf = SparkConf(_jconf=jsc.getConf()) SparkContext(conf=conf, gateway=gw, jsc=jsc) sc = SparkContext._active_spark_context # update ctx in serializer cls._transformerSerializer.ctx = sc return StreamingContext(sc, None, jssc)
python
def getOrCreate(cls, checkpointPath, setupFunc): """ Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams """ cls._ensure_initialized() gw = SparkContext._gateway # Check whether valid checkpoint information exists in the given path ssc_option = gw.jvm.StreamingContextPythonHelper().tryRecoverFromCheckpoint(checkpointPath) if ssc_option.isEmpty(): ssc = setupFunc() ssc.checkpoint(checkpointPath) return ssc jssc = gw.jvm.JavaStreamingContext(ssc_option.get()) # If there is already an active instance of Python SparkContext use it, or create a new one if not SparkContext._active_spark_context: jsc = jssc.sparkContext() conf = SparkConf(_jconf=jsc.getConf()) SparkContext(conf=conf, gateway=gw, jsc=jsc) sc = SparkContext._active_spark_context # update ctx in serializer cls._transformerSerializer.ctx = sc return StreamingContext(sc, None, jssc)
[ "def", "getOrCreate", "(", "cls", ",", "checkpointPath", ",", "setupFunc", ")", ":", "cls", ".", "_ensure_initialized", "(", ")", "gw", "=", "SparkContext", ".", "_gateway", "# Check whether valid checkpoint information exists in the given path", "ssc_option", "=", "gw", ".", "jvm", ".", "StreamingContextPythonHelper", "(", ")", ".", "tryRecoverFromCheckpoint", "(", "checkpointPath", ")", "if", "ssc_option", ".", "isEmpty", "(", ")", ":", "ssc", "=", "setupFunc", "(", ")", "ssc", ".", "checkpoint", "(", "checkpointPath", ")", "return", "ssc", "jssc", "=", "gw", ".", "jvm", ".", "JavaStreamingContext", "(", "ssc_option", ".", "get", "(", ")", ")", "# If there is already an active instance of Python SparkContext use it, or create a new one", "if", "not", "SparkContext", ".", "_active_spark_context", ":", "jsc", "=", "jssc", ".", "sparkContext", "(", ")", "conf", "=", "SparkConf", "(", "_jconf", "=", "jsc", ".", "getConf", "(", ")", ")", "SparkContext", "(", "conf", "=", "conf", ",", "gateway", "=", "gw", ",", "jsc", "=", "jsc", ")", "sc", "=", "SparkContext", ".", "_active_spark_context", "# update ctx in serializer", "cls", ".", "_transformerSerializer", ".", "ctx", "=", "sc", "return", "StreamingContext", "(", "sc", ",", "None", ",", "jssc", ")" ]
Either recreate a StreamingContext from checkpoint data or create a new StreamingContext. If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be recreated from the checkpoint data. If the data does not exist, then the provided setupFunc will be used to create a new context. @param checkpointPath: Checkpoint directory used in an earlier streaming program @param setupFunc: Function to create a new context and setup DStreams
[ "Either", "recreate", "a", "StreamingContext", "from", "checkpoint", "data", "or", "create", "a", "new", "StreamingContext", ".", "If", "checkpoint", "data", "exists", "in", "the", "provided", "checkpointPath", "then", "StreamingContext", "will", "be", "recreated", "from", "the", "checkpoint", "data", ".", "If", "the", "data", "does", "not", "exist", "then", "the", "provided", "setupFunc", "will", "be", "used", "to", "create", "a", "new", "context", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L88-L120
apache/spark
python/pyspark/streaming/context.py
StreamingContext.getActive
def getActive(cls): """ Return either the currently active StreamingContext (i.e., if there is a context started but not stopped) or None. """ activePythonContext = cls._activeContext if activePythonContext is not None: # Verify that the current running Java StreamingContext is active and is the same one # backing the supposedly active Python context activePythonContextJavaId = activePythonContext._jssc.ssc().hashCode() activeJvmContextOption = activePythonContext._jvm.StreamingContext.getActive() if activeJvmContextOption.isEmpty(): cls._activeContext = None elif activeJvmContextOption.get().hashCode() != activePythonContextJavaId: cls._activeContext = None raise Exception("JVM's active JavaStreamingContext is not the JavaStreamingContext " "backing the action Python StreamingContext. This is unexpected.") return cls._activeContext
python
def getActive(cls): """ Return either the currently active StreamingContext (i.e., if there is a context started but not stopped) or None. """ activePythonContext = cls._activeContext if activePythonContext is not None: # Verify that the current running Java StreamingContext is active and is the same one # backing the supposedly active Python context activePythonContextJavaId = activePythonContext._jssc.ssc().hashCode() activeJvmContextOption = activePythonContext._jvm.StreamingContext.getActive() if activeJvmContextOption.isEmpty(): cls._activeContext = None elif activeJvmContextOption.get().hashCode() != activePythonContextJavaId: cls._activeContext = None raise Exception("JVM's active JavaStreamingContext is not the JavaStreamingContext " "backing the action Python StreamingContext. This is unexpected.") return cls._activeContext
[ "def", "getActive", "(", "cls", ")", ":", "activePythonContext", "=", "cls", ".", "_activeContext", "if", "activePythonContext", "is", "not", "None", ":", "# Verify that the current running Java StreamingContext is active and is the same one", "# backing the supposedly active Python context", "activePythonContextJavaId", "=", "activePythonContext", ".", "_jssc", ".", "ssc", "(", ")", ".", "hashCode", "(", ")", "activeJvmContextOption", "=", "activePythonContext", ".", "_jvm", ".", "StreamingContext", ".", "getActive", "(", ")", "if", "activeJvmContextOption", ".", "isEmpty", "(", ")", ":", "cls", ".", "_activeContext", "=", "None", "elif", "activeJvmContextOption", ".", "get", "(", ")", ".", "hashCode", "(", ")", "!=", "activePythonContextJavaId", ":", "cls", ".", "_activeContext", "=", "None", "raise", "Exception", "(", "\"JVM's active JavaStreamingContext is not the JavaStreamingContext \"", "\"backing the action Python StreamingContext. This is unexpected.\"", ")", "return", "cls", ".", "_activeContext" ]
Return either the currently active StreamingContext (i.e., if there is a context started but not stopped) or None.
[ "Return", "either", "the", "currently", "active", "StreamingContext", "(", "i", ".", "e", ".", "if", "there", "is", "a", "context", "started", "but", "not", "stopped", ")", "or", "None", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L123-L141
apache/spark
python/pyspark/streaming/context.py
StreamingContext.getActiveOrCreate
def getActiveOrCreate(cls, checkpointPath, setupFunc): """ Either return the active StreamingContext (i.e. currently started but not stopped), or recreate a StreamingContext from checkpoint data or create a new StreamingContext using the provided setupFunc function. If the checkpointPath is None or does not contain valid checkpoint data, then setupFunc will be called to create a new context and setup DStreams. @param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be None if the intention is to always create a new context when there is no active context. @param setupFunc: Function to create a new JavaStreamingContext and setup DStreams """ if setupFunc is None: raise Exception("setupFunc cannot be None") activeContext = cls.getActive() if activeContext is not None: return activeContext elif checkpointPath is not None: return cls.getOrCreate(checkpointPath, setupFunc) else: return setupFunc()
python
def getActiveOrCreate(cls, checkpointPath, setupFunc): """ Either return the active StreamingContext (i.e. currently started but not stopped), or recreate a StreamingContext from checkpoint data or create a new StreamingContext using the provided setupFunc function. If the checkpointPath is None or does not contain valid checkpoint data, then setupFunc will be called to create a new context and setup DStreams. @param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be None if the intention is to always create a new context when there is no active context. @param setupFunc: Function to create a new JavaStreamingContext and setup DStreams """ if setupFunc is None: raise Exception("setupFunc cannot be None") activeContext = cls.getActive() if activeContext is not None: return activeContext elif checkpointPath is not None: return cls.getOrCreate(checkpointPath, setupFunc) else: return setupFunc()
[ "def", "getActiveOrCreate", "(", "cls", ",", "checkpointPath", ",", "setupFunc", ")", ":", "if", "setupFunc", "is", "None", ":", "raise", "Exception", "(", "\"setupFunc cannot be None\"", ")", "activeContext", "=", "cls", ".", "getActive", "(", ")", "if", "activeContext", "is", "not", "None", ":", "return", "activeContext", "elif", "checkpointPath", "is", "not", "None", ":", "return", "cls", ".", "getOrCreate", "(", "checkpointPath", ",", "setupFunc", ")", "else", ":", "return", "setupFunc", "(", ")" ]
Either return the active StreamingContext (i.e. currently started but not stopped), or recreate a StreamingContext from checkpoint data or create a new StreamingContext using the provided setupFunc function. If the checkpointPath is None or does not contain valid checkpoint data, then setupFunc will be called to create a new context and setup DStreams. @param checkpointPath: Checkpoint directory used in an earlier streaming program. Can be None if the intention is to always create a new context when there is no active context. @param setupFunc: Function to create a new JavaStreamingContext and setup DStreams
[ "Either", "return", "the", "active", "StreamingContext", "(", "i", ".", "e", ".", "currently", "started", "but", "not", "stopped", ")", "or", "recreate", "a", "StreamingContext", "from", "checkpoint", "data", "or", "create", "a", "new", "StreamingContext", "using", "the", "provided", "setupFunc", "function", ".", "If", "the", "checkpointPath", "is", "None", "or", "does", "not", "contain", "valid", "checkpoint", "data", "then", "setupFunc", "will", "be", "called", "to", "create", "a", "new", "context", "and", "setup", "DStreams", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L144-L166
apache/spark
python/pyspark/streaming/context.py
StreamingContext.awaitTermination
def awaitTermination(self, timeout=None): """ Wait for the execution to stop. @param timeout: time to wait in seconds """ if timeout is None: self._jssc.awaitTermination() else: self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
python
def awaitTermination(self, timeout=None): """ Wait for the execution to stop. @param timeout: time to wait in seconds """ if timeout is None: self._jssc.awaitTermination() else: self._jssc.awaitTerminationOrTimeout(int(timeout * 1000))
[ "def", "awaitTermination", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "self", ".", "_jssc", ".", "awaitTermination", "(", ")", "else", ":", "self", ".", "_jssc", ".", "awaitTerminationOrTimeout", "(", "int", "(", "timeout", "*", "1000", ")", ")" ]
Wait for the execution to stop. @param timeout: time to wait in seconds
[ "Wait", "for", "the", "execution", "to", "stop", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L182-L191
apache/spark
python/pyspark/streaming/context.py
StreamingContext.stop
def stop(self, stopSparkContext=True, stopGraceFully=False): """ Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed """ self._jssc.stop(stopSparkContext, stopGraceFully) StreamingContext._activeContext = None if stopSparkContext: self._sc.stop()
python
def stop(self, stopSparkContext=True, stopGraceFully=False): """ Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed """ self._jssc.stop(stopSparkContext, stopGraceFully) StreamingContext._activeContext = None if stopSparkContext: self._sc.stop()
[ "def", "stop", "(", "self", ",", "stopSparkContext", "=", "True", ",", "stopGraceFully", "=", "False", ")", ":", "self", ".", "_jssc", ".", "stop", "(", "stopSparkContext", ",", "stopGraceFully", ")", "StreamingContext", ".", "_activeContext", "=", "None", "if", "stopSparkContext", ":", "self", ".", "_sc", ".", "stop", "(", ")" ]
Stop the execution of the streams, with option of ensuring all received data has been processed. @param stopSparkContext: Stop the associated SparkContext or not @param stopGracefully: Stop gracefully by waiting for the processing of all received data to be completed
[ "Stop", "the", "execution", "of", "the", "streams", "with", "option", "of", "ensuring", "all", "received", "data", "has", "been", "processed", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L203-L215
apache/spark
python/pyspark/streaming/context.py
StreamingContext.socketTextStream
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_2): """ Create an input from TCP source hostname:port. Data is received using a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited lines. @param hostname: Hostname to connect to for receiving data @param port: Port to connect to for receiving data @param storageLevel: Storage level to use for storing the received objects """ jlevel = self._sc._getJavaStorageLevel(storageLevel) return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self, UTF8Deserializer())
python
def socketTextStream(self, hostname, port, storageLevel=StorageLevel.MEMORY_AND_DISK_2): """ Create an input from TCP source hostname:port. Data is received using a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited lines. @param hostname: Hostname to connect to for receiving data @param port: Port to connect to for receiving data @param storageLevel: Storage level to use for storing the received objects """ jlevel = self._sc._getJavaStorageLevel(storageLevel) return DStream(self._jssc.socketTextStream(hostname, port, jlevel), self, UTF8Deserializer())
[ "def", "socketTextStream", "(", "self", ",", "hostname", ",", "port", ",", "storageLevel", "=", "StorageLevel", ".", "MEMORY_AND_DISK_2", ")", ":", "jlevel", "=", "self", ".", "_sc", ".", "_getJavaStorageLevel", "(", "storageLevel", ")", "return", "DStream", "(", "self", ".", "_jssc", ".", "socketTextStream", "(", "hostname", ",", "port", ",", "jlevel", ")", ",", "self", ",", "UTF8Deserializer", "(", ")", ")" ]
Create an input from TCP source hostname:port. Data is received using a TCP socket and receive byte is interpreted as UTF8 encoded ``\\n`` delimited lines. @param hostname: Hostname to connect to for receiving data @param port: Port to connect to for receiving data @param storageLevel: Storage level to use for storing the received objects
[ "Create", "an", "input", "from", "TCP", "source", "hostname", ":", "port", ".", "Data", "is", "received", "using", "a", "TCP", "socket", "and", "receive", "byte", "is", "interpreted", "as", "UTF8", "encoded", "\\\\", "n", "delimited", "lines", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L241-L253
apache/spark
python/pyspark/streaming/context.py
StreamingContext.textFileStream
def textFileStream(self, directory): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8. """ return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
python
def textFileStream(self, directory): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8. """ return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
[ "def", "textFileStream", "(", "self", ",", "directory", ")", ":", "return", "DStream", "(", "self", ".", "_jssc", ".", "textFileStream", "(", "directory", ")", ",", "self", ",", "UTF8Deserializer", "(", ")", ")" ]
Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8.
[ "Create", "an", "input", "stream", "that", "monitors", "a", "Hadoop", "-", "compatible", "file", "system", "for", "new", "files", "and", "reads", "them", "as", "text", "files", ".", "Files", "must", "be", "wrriten", "to", "the", "monitored", "directory", "by", "moving", "them", "from", "another", "location", "within", "the", "same", "file", "system", ".", "File", "names", "starting", "with", ".", "are", "ignored", ".", "The", "text", "files", "must", "be", "encoded", "as", "UTF", "-", "8", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L255-L263
apache/spark
python/pyspark/streaming/context.py
StreamingContext.binaryRecordsStream
def binaryRecordsStream(self, directory, recordLength): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes """ return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self, NoOpSerializer())
python
def binaryRecordsStream(self, directory, recordLength): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes """ return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self, NoOpSerializer())
[ "def", "binaryRecordsStream", "(", "self", ",", "directory", ",", "recordLength", ")", ":", "return", "DStream", "(", "self", ".", "_jssc", ".", "binaryRecordsStream", "(", "directory", ",", "recordLength", ")", ",", "self", ",", "NoOpSerializer", "(", ")", ")" ]
Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as flat binary files with records of fixed length. Files must be written to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. @param directory: Directory to load data from @param recordLength: Length of each record in bytes
[ "Create", "an", "input", "stream", "that", "monitors", "a", "Hadoop", "-", "compatible", "file", "system", "for", "new", "files", "and", "reads", "them", "as", "flat", "binary", "files", "with", "records", "of", "fixed", "length", ".", "Files", "must", "be", "written", "to", "the", "monitored", "directory", "by", "moving", "them", "from", "another", "location", "within", "the", "same", "file", "system", ".", "File", "names", "starting", "with", ".", "are", "ignored", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L265-L277
apache/spark
python/pyspark/streaming/context.py
StreamingContext.queueStream
def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds """ if default and not isinstance(default, RDD): default = self._sc.parallelize(default) if not rdds and default: rdds = [rdds] if rdds and not isinstance(rdds[0], RDD): rdds = [self._sc.parallelize(input) for input in rdds] self._check_serializers(rdds) queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds]) if default: default = default._reserialize(rdds[0]._jrdd_deserializer) jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd) else: jdstream = self._jssc.queueStream(queue, oneAtATime) return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
python
def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds """ if default and not isinstance(default, RDD): default = self._sc.parallelize(default) if not rdds and default: rdds = [rdds] if rdds and not isinstance(rdds[0], RDD): rdds = [self._sc.parallelize(input) for input in rdds] self._check_serializers(rdds) queue = self._jvm.PythonDStream.toRDDQueue([r._jrdd for r in rdds]) if default: default = default._reserialize(rdds[0]._jrdd_deserializer) jdstream = self._jssc.queueStream(queue, oneAtATime, default._jrdd) else: jdstream = self._jssc.queueStream(queue, oneAtATime) return DStream(jdstream, self, rdds[0]._jrdd_deserializer)
[ "def", "queueStream", "(", "self", ",", "rdds", ",", "oneAtATime", "=", "True", ",", "default", "=", "None", ")", ":", "if", "default", "and", "not", "isinstance", "(", "default", ",", "RDD", ")", ":", "default", "=", "self", ".", "_sc", ".", "parallelize", "(", "default", ")", "if", "not", "rdds", "and", "default", ":", "rdds", "=", "[", "rdds", "]", "if", "rdds", "and", "not", "isinstance", "(", "rdds", "[", "0", "]", ",", "RDD", ")", ":", "rdds", "=", "[", "self", ".", "_sc", ".", "parallelize", "(", "input", ")", "for", "input", "in", "rdds", "]", "self", ".", "_check_serializers", "(", "rdds", ")", "queue", "=", "self", ".", "_jvm", ".", "PythonDStream", ".", "toRDDQueue", "(", "[", "r", ".", "_jrdd", "for", "r", "in", "rdds", "]", ")", "if", "default", ":", "default", "=", "default", ".", "_reserialize", "(", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")", "jdstream", "=", "self", ".", "_jssc", ".", "queueStream", "(", "queue", ",", "oneAtATime", ",", "default", ".", "_jrdd", ")", "else", ":", "jdstream", "=", "self", ".", "_jssc", ".", "queueStream", "(", "queue", ",", "oneAtATime", ")", "return", "DStream", "(", "jdstream", ",", "self", ",", "rdds", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd each time or pick all of them once. @param default: The default rdd if no more in rdds
[ "Create", "an", "input", "stream", "from", "a", "queue", "of", "RDDs", "or", "list", ".", "In", "each", "batch", "it", "will", "process", "either", "one", "or", "all", "of", "the", "RDDs", "returned", "by", "the", "queue", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L286-L313
apache/spark
python/pyspark/streaming/context.py
StreamingContext.transform
def transform(self, dstreams, transformFunc): """ Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list. """ jdstreams = [d._jdstream for d in dstreams] # change the final serializer to sc.serializer func = TransformFunction(self._sc, lambda t, *rdds: transformFunc(rdds), *[d._jrdd_deserializer for d in dstreams]) jfunc = self._jvm.TransformFunction(func) jdstream = self._jssc.transform(jdstreams, jfunc) return DStream(jdstream, self, self._sc.serializer)
python
def transform(self, dstreams, transformFunc): """ Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list. """ jdstreams = [d._jdstream for d in dstreams] # change the final serializer to sc.serializer func = TransformFunction(self._sc, lambda t, *rdds: transformFunc(rdds), *[d._jrdd_deserializer for d in dstreams]) jfunc = self._jvm.TransformFunction(func) jdstream = self._jssc.transform(jdstreams, jfunc) return DStream(jdstream, self, self._sc.serializer)
[ "def", "transform", "(", "self", ",", "dstreams", ",", "transformFunc", ")", ":", "jdstreams", "=", "[", "d", ".", "_jdstream", "for", "d", "in", "dstreams", "]", "# change the final serializer to sc.serializer", "func", "=", "TransformFunction", "(", "self", ".", "_sc", ",", "lambda", "t", ",", "*", "rdds", ":", "transformFunc", "(", "rdds", ")", ",", "*", "[", "d", ".", "_jrdd_deserializer", "for", "d", "in", "dstreams", "]", ")", "jfunc", "=", "self", ".", "_jvm", ".", "TransformFunction", "(", "func", ")", "jdstream", "=", "self", ".", "_jssc", ".", "transform", "(", "jdstreams", ",", "jfunc", ")", "return", "DStream", "(", "jdstream", ",", "self", ",", "self", ".", "_sc", ".", "serializer", ")" ]
Create a new DStream in which each RDD is generated by applying a function on RDDs of the DStreams. The order of the JavaRDDs in the transform function parameter will be the same as the order of corresponding DStreams in the list.
[ "Create", "a", "new", "DStream", "in", "which", "each", "RDD", "is", "generated", "by", "applying", "a", "function", "on", "RDDs", "of", "the", "DStreams", ".", "The", "order", "of", "the", "JavaRDDs", "in", "the", "transform", "function", "parameter", "will", "be", "the", "same", "as", "the", "order", "of", "corresponding", "DStreams", "in", "the", "list", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L315-L329
apache/spark
python/pyspark/streaming/context.py
StreamingContext.union
def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """ if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream jdstreams = SparkContext._gateway.new_array(cls, len(dstreams)) for i in range(0, len(dstreams)): jdstreams[i] = dstreams[i]._jdstream return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
python
def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """ if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream jdstreams = SparkContext._gateway.new_array(cls, len(dstreams)) for i in range(0, len(dstreams)): jdstreams[i] = dstreams[i]._jdstream return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
[ "def", "union", "(", "self", ",", "*", "dstreams", ")", ":", "if", "not", "dstreams", ":", "raise", "ValueError", "(", "\"should have at least one DStream to union\"", ")", "if", "len", "(", "dstreams", ")", "==", "1", ":", "return", "dstreams", "[", "0", "]", "if", "len", "(", "set", "(", "s", ".", "_jrdd_deserializer", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same serializer\"", ")", "if", "len", "(", "set", "(", "s", ".", "_slideDuration", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same slide duration\"", ")", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "api", ".", "java", ".", "JavaDStream", "jdstreams", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "dstreams", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dstreams", ")", ")", ":", "jdstreams", "[", "i", "]", "=", "dstreams", "[", "i", "]", ".", "_jdstream", "return", "DStream", "(", "self", ".", "_jssc", ".", "union", "(", "jdstreams", ")", ",", "self", ",", "dstreams", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Create a unified DStream from multiple DStreams of the same type and same slide duration.
[ "Create", "a", "unified", "DStream", "from", "multiple", "DStreams", "of", "the", "same", "type", "and", "same", "slide", "duration", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L331-L348
apache/spark
python/pyspark/streaming/context.py
StreamingContext.addStreamingListener
def addStreamingListener(self, streamingListener): """ Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming. """ self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper( self._jvm.PythonStreamingListenerWrapper(streamingListener)))
python
def addStreamingListener(self, streamingListener): """ Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming. """ self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper( self._jvm.PythonStreamingListenerWrapper(streamingListener)))
[ "def", "addStreamingListener", "(", "self", ",", "streamingListener", ")", ":", "self", ".", "_jssc", ".", "addStreamingListener", "(", "self", ".", "_jvm", ".", "JavaStreamingListenerWrapper", "(", "self", ".", "_jvm", ".", "PythonStreamingListenerWrapper", "(", "streamingListener", ")", ")", ")" ]
Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for receiving system events related to streaming.
[ "Add", "a", "[[", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "scheduler", ".", "StreamingListener", "]]", "object", "for", "receiving", "system", "events", "related", "to", "streaming", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L350-L356
huggingface/pytorch-pretrained-BERT
pytorch_pretrained_bert/modeling_gpt2.py
load_tf_weights_in_gpt2
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(gpt2_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'w' or l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'wpe' or l[0] == 'wte': pointer = getattr(pointer, l[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
python
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path): """ Load tf checkpoints in a pytorch model """ try: import re import numpy as np import tensorflow as tf except ImportError: print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise tf_path = os.path.abspath(gpt2_checkpoint_path) print("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array.squeeze()) for name, array in zip(names, arrays): name = name[6:] # skip "model/" name = name.split('/') pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+\d+', m_name): l = re.split(r'(\d+)', m_name) else: l = [m_name] if l[0] == 'w' or l[0] == 'g': pointer = getattr(pointer, 'weight') elif l[0] == 'b': pointer = getattr(pointer, 'bias') elif l[0] == 'wpe' or l[0] == 'wte': pointer = getattr(pointer, l[0]) pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model
[ "def", "load_tf_weights_in_gpt2", "(", "model", ",", "gpt2_checkpoint_path", ")", ":", "try", ":", "import", "re", "import", "numpy", "as", "np", "import", "tensorflow", "as", "tf", "except", "ImportError", ":", "print", "(", "\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"", "\"https://www.tensorflow.org/install/ for installation instructions.\"", ")", "raise", "tf_path", "=", "os", ".", "path", ".", "abspath", "(", "gpt2_checkpoint_path", ")", "print", "(", "\"Converting TensorFlow checkpoint from {}\"", ".", "format", "(", "tf_path", ")", ")", "# Load weights from TF model", "init_vars", "=", "tf", ".", "train", ".", "list_variables", "(", "tf_path", ")", "names", "=", "[", "]", "arrays", "=", "[", "]", "for", "name", ",", "shape", "in", "init_vars", ":", "print", "(", "\"Loading TF weight {} with shape {}\"", ".", "format", "(", "name", ",", "shape", ")", ")", "array", "=", "tf", ".", "train", ".", "load_variable", "(", "tf_path", ",", "name", ")", "names", ".", "append", "(", "name", ")", "arrays", ".", "append", "(", "array", ".", "squeeze", "(", ")", ")", "for", "name", ",", "array", "in", "zip", "(", "names", ",", "arrays", ")", ":", "name", "=", "name", "[", "6", ":", "]", "# skip \"model/\"", "name", "=", "name", ".", "split", "(", "'/'", ")", "pointer", "=", "model", "for", "m_name", "in", "name", ":", "if", "re", ".", "fullmatch", "(", "r'[A-Za-z]+\\d+'", ",", "m_name", ")", ":", "l", "=", "re", ".", "split", "(", "r'(\\d+)'", ",", "m_name", ")", "else", ":", "l", "=", "[", "m_name", "]", "if", "l", "[", "0", "]", "==", "'w'", "or", "l", "[", "0", "]", "==", "'g'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "elif", "l", "[", "0", "]", "==", "'b'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "'bias'", ")", "elif", "l", "[", "0", "]", "==", "'wpe'", "or", "l", "[", "0", "]", "==", "'wte'", ":", "pointer", "=", "getattr", "(", "pointer", ",", "l", "[", "0", "]", ")", "pointer", "=", "getattr", "(", "pointer", ",", "'weight'", ")", "else", ":", "pointer", "=", "getattr", "(", "pointer", ",", "l", "[", "0", "]", ")", "if", "len", "(", "l", ")", ">=", "2", ":", "num", "=", "int", "(", "l", "[", "1", "]", ")", "pointer", "=", "pointer", "[", "num", "]", "try", ":", "assert", "pointer", ".", "shape", "==", "array", ".", "shape", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "(", "pointer", ".", "shape", ",", "array", ".", "shape", ")", "raise", "print", "(", "\"Initialize PyTorch weight {}\"", ".", "format", "(", "name", ")", ")", "pointer", ".", "data", "=", "torch", ".", "from_numpy", "(", "array", ")", "return", "model" ]
Load tf checkpoints in a pytorch model
[ "Load", "tf", "checkpoints", "in", "a", "pytorch", "model" ]
train
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L45-L96
huggingface/pytorch-pretrained-BERT
pytorch_pretrained_bert/modeling_gpt2.py
GPT2Config.from_json_file
def from_json_file(cls, json_file): """Constructs a `GPT2Config` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return cls.from_dict(json.loads(text))
python
def from_json_file(cls, json_file): """Constructs a `GPT2Config` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return cls.from_dict(json.loads(text))
[ "def", "from_json_file", "(", "cls", ",", "json_file", ")", ":", "with", "open", "(", "json_file", ",", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "reader", ":", "text", "=", "reader", ".", "read", "(", ")", "return", "cls", ".", "from_dict", "(", "json", ".", "loads", "(", "text", ")", ")" ]
Constructs a `GPT2Config` from a json file of parameters.
[ "Constructs", "a", "GPT2Config", "from", "a", "json", "file", "of", "parameters", "." ]
train
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_gpt2.py#L162-L166