repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
153
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
sequence
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
apache/spark
python/pyspark/sql/functions.py
size
def size(col): """ Collection function: returns the length of the array or map stored in the column. :param col: name of column or expression >>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data']) >>> df.select(size(df.data)).collect() [Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.size(_to_java_column(col)))
python
def size(col): """ Collection function: returns the length of the array or map stored in the column. :param col: name of column or expression >>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data']) >>> df.select(size(df.data)).collect() [Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.size(_to_java_column(col)))
[ "def", "size", "(", "col", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "size", "(", "_to_java_column", "(", "col", ")", ")", ")" ]
Collection function: returns the length of the array or map stored in the column. :param col: name of column or expression >>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data']) >>> df.select(size(df.data)).collect() [Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
[ "Collection", "function", ":", "returns", "the", "length", "of", "the", "array", "or", "map", "stored", "in", "the", "column", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2472-L2483
apache/spark
python/pyspark/sql/functions.py
sort_array
def sort_array(col, asc=True): """ Collection function: sorts the input array in ascending or descending order according to the natural ordering of the array elements. Null elements will be placed at the beginning of the returned array in ascending order or at the end of the returned array in descending order. :param col: name of column or expression >>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data']) >>> df.select(sort_array(df.data).alias('r')).collect() [Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])] >>> df.select(sort_array(df.data, asc=False).alias('r')).collect() [Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
python
def sort_array(col, asc=True): """ Collection function: sorts the input array in ascending or descending order according to the natural ordering of the array elements. Null elements will be placed at the beginning of the returned array in ascending order or at the end of the returned array in descending order. :param col: name of column or expression >>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data']) >>> df.select(sort_array(df.data).alias('r')).collect() [Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])] >>> df.select(sort_array(df.data, asc=False).alias('r')).collect() [Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
[ "def", "sort_array", "(", "col", ",", "asc", "=", "True", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "sort_array", "(", "_to_java_column", "(", "col", ")", ",", "asc", ")", ")" ]
Collection function: sorts the input array in ascending or descending order according to the natural ordering of the array elements. Null elements will be placed at the beginning of the returned array in ascending order or at the end of the returned array in descending order. :param col: name of column or expression >>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data']) >>> df.select(sort_array(df.data).alias('r')).collect() [Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])] >>> df.select(sort_array(df.data, asc=False).alias('r')).collect() [Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
[ "Collection", "function", ":", "sorts", "the", "input", "array", "in", "ascending", "or", "descending", "order", "according", "to", "the", "natural", "ordering", "of", "the", "array", "elements", ".", "Null", "elements", "will", "be", "placed", "at", "the", "beginning", "of", "the", "returned", "array", "in", "ascending", "order", "or", "at", "the", "end", "of", "the", "returned", "array", "in", "descending", "order", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2517-L2533
apache/spark
python/pyspark/sql/functions.py
array_repeat
def array_repeat(col, count): """ Collection function: creates an array containing a column repeated count times. >>> df = spark.createDataFrame([('ab',)], ['data']) >>> df.select(array_repeat(df.data, 3).alias('r')).collect() [Row(r=[u'ab', u'ab', u'ab'])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_repeat(_to_java_column(col), count))
python
def array_repeat(col, count): """ Collection function: creates an array containing a column repeated count times. >>> df = spark.createDataFrame([('ab',)], ['data']) >>> df.select(array_repeat(df.data, 3).alias('r')).collect() [Row(r=[u'ab', u'ab', u'ab'])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.array_repeat(_to_java_column(col), count))
[ "def", "array_repeat", "(", "col", ",", "count", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "array_repeat", "(", "_to_java_column", "(", "col", ")", ",", "count", ")", ")" ]
Collection function: creates an array containing a column repeated count times. >>> df = spark.createDataFrame([('ab',)], ['data']) >>> df.select(array_repeat(df.data, 3).alias('r')).collect() [Row(r=[u'ab', u'ab', u'ab'])]
[ "Collection", "function", ":", "creates", "an", "array", "containing", "a", "column", "repeated", "count", "times", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2687-L2696
apache/spark
python/pyspark/sql/functions.py
map_concat
def map_concat(*cols): """Returns the union of all the given maps. :param cols: list of column names (string) or list of :class:`Column` expressions >>> from pyspark.sql.functions import map_concat >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2") >>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False) +------------------------+ |map3 | +------------------------+ |[1 -> d, 2 -> b, 3 -> c]| +------------------------+ """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column)) return Column(jc)
python
def map_concat(*cols): """Returns the union of all the given maps. :param cols: list of column names (string) or list of :class:`Column` expressions >>> from pyspark.sql.functions import map_concat >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2") >>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False) +------------------------+ |map3 | +------------------------+ |[1 -> d, 2 -> b, 3 -> c]| +------------------------+ """ sc = SparkContext._active_spark_context if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column)) return Column(jc)
[ "def", "map_concat", "(", "*", "cols", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "(", "list", ",", "set", ")", ")", ":", "cols", "=", "cols", "[", "0", "]", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "map_concat", "(", "_to_seq", "(", "sc", ",", "cols", ",", "_to_java_column", ")", ")", "return", "Column", "(", "jc", ")" ]
Returns the union of all the given maps. :param cols: list of column names (string) or list of :class:`Column` expressions >>> from pyspark.sql.functions import map_concat >>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2") >>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False) +------------------------+ |map3 | +------------------------+ |[1 -> d, 2 -> b, 3 -> c]| +------------------------+
[ "Returns", "the", "union", "of", "all", "the", "given", "maps", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2717-L2735
apache/spark
python/pyspark/sql/functions.py
sequence
def sequence(start, stop, step=None): """ Generate a sequence of integers from `start` to `stop`, incrementing by `step`. If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`, otherwise -1. >>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2')) >>> df1.select(sequence('C1', 'C2').alias('r')).collect() [Row(r=[-2, -1, 0, 1, 2])] >>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3')) >>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect() [Row(r=[4, 2, 0, -2, -4])] """ sc = SparkContext._active_spark_context if step is None: return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop))) else: return Column(sc._jvm.functions.sequence( _to_java_column(start), _to_java_column(stop), _to_java_column(step)))
python
def sequence(start, stop, step=None): """ Generate a sequence of integers from `start` to `stop`, incrementing by `step`. If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`, otherwise -1. >>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2')) >>> df1.select(sequence('C1', 'C2').alias('r')).collect() [Row(r=[-2, -1, 0, 1, 2])] >>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3')) >>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect() [Row(r=[4, 2, 0, -2, -4])] """ sc = SparkContext._active_spark_context if step is None: return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop))) else: return Column(sc._jvm.functions.sequence( _to_java_column(start), _to_java_column(stop), _to_java_column(step)))
[ "def", "sequence", "(", "start", ",", "stop", ",", "step", "=", "None", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "step", "is", "None", ":", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "sequence", "(", "_to_java_column", "(", "start", ")", ",", "_to_java_column", "(", "stop", ")", ")", ")", "else", ":", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "sequence", "(", "_to_java_column", "(", "start", ")", ",", "_to_java_column", "(", "stop", ")", ",", "_to_java_column", "(", "step", ")", ")", ")" ]
Generate a sequence of integers from `start` to `stop`, incrementing by `step`. If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`, otherwise -1. >>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2')) >>> df1.select(sequence('C1', 'C2').alias('r')).collect() [Row(r=[-2, -1, 0, 1, 2])] >>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3')) >>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect() [Row(r=[4, 2, 0, -2, -4])]
[ "Generate", "a", "sequence", "of", "integers", "from", "start", "to", "stop", "incrementing", "by", "step", ".", "If", "step", "is", "not", "set", "incrementing", "by", "1", "if", "start", "is", "less", "than", "or", "equal", "to", "stop", "otherwise", "-", "1", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2739-L2757
apache/spark
python/pyspark/sql/functions.py
from_csv
def from_csv(col, schema, options={}): """ Parses a column containing a CSV string to a row with the specified schema. Returns `null`, in the case of an unparseable string. :param col: string column in CSV format :param schema: a string with schema in DDL format to use when parsing the CSV column. :param options: options to control parsing. accepts the same options as the CSV datasource >>> data = [("1,2,3",)] >>> df = spark.createDataFrame(data, ("value",)) >>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect() [Row(csv=Row(a=1, b=2, c=3))] >>> value = data[0][0] >>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect() [Row(csv=Row(_c0=1, _c1=2, _c2=3))] """ sc = SparkContext._active_spark_context if isinstance(schema, basestring): schema = _create_column_from_literal(schema) elif isinstance(schema, Column): schema = _to_java_column(schema) else: raise TypeError("schema argument should be a column or string") jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options) return Column(jc)
python
def from_csv(col, schema, options={}): """ Parses a column containing a CSV string to a row with the specified schema. Returns `null`, in the case of an unparseable string. :param col: string column in CSV format :param schema: a string with schema in DDL format to use when parsing the CSV column. :param options: options to control parsing. accepts the same options as the CSV datasource >>> data = [("1,2,3",)] >>> df = spark.createDataFrame(data, ("value",)) >>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect() [Row(csv=Row(a=1, b=2, c=3))] >>> value = data[0][0] >>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect() [Row(csv=Row(_c0=1, _c1=2, _c2=3))] """ sc = SparkContext._active_spark_context if isinstance(schema, basestring): schema = _create_column_from_literal(schema) elif isinstance(schema, Column): schema = _to_java_column(schema) else: raise TypeError("schema argument should be a column or string") jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options) return Column(jc)
[ "def", "from_csv", "(", "col", ",", "schema", ",", "options", "=", "{", "}", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "if", "isinstance", "(", "schema", ",", "basestring", ")", ":", "schema", "=", "_create_column_from_literal", "(", "schema", ")", "elif", "isinstance", "(", "schema", ",", "Column", ")", ":", "schema", "=", "_to_java_column", "(", "schema", ")", "else", ":", "raise", "TypeError", "(", "\"schema argument should be a column or string\"", ")", "jc", "=", "sc", ".", "_jvm", ".", "functions", ".", "from_csv", "(", "_to_java_column", "(", "col", ")", ",", "schema", ",", "options", ")", "return", "Column", "(", "jc", ")" ]
Parses a column containing a CSV string to a row with the specified schema. Returns `null`, in the case of an unparseable string. :param col: string column in CSV format :param schema: a string with schema in DDL format to use when parsing the CSV column. :param options: options to control parsing. accepts the same options as the CSV datasource >>> data = [("1,2,3",)] >>> df = spark.createDataFrame(data, ("value",)) >>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect() [Row(csv=Row(a=1, b=2, c=3))] >>> value = data[0][0] >>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect() [Row(csv=Row(_c0=1, _c1=2, _c2=3))]
[ "Parses", "a", "column", "containing", "a", "CSV", "string", "to", "a", "row", "with", "the", "specified", "schema", ".", "Returns", "null", "in", "the", "case", "of", "an", "unparseable", "string", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2762-L2789
apache/spark
python/pyspark/sql/functions.py
udf
def udf(f=None, returnType=StringType()): """Creates a user defined function (UDF). .. note:: The user-defined functions are considered deterministic by default. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. If your function is not deterministic, call `asNondeterministic` on the user defined function. E.g.: >>> from pyspark.sql.types import IntegerType >>> import random >>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic() .. note:: The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. .. note:: The user-defined functions do not take keyword arguments on the calling side. :param f: python function if used as a standalone function :param returnType: the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> slen = udf(lambda s: len(s), IntegerType()) >>> @udf ... def to_upper(s): ... if s is not None: ... return s.upper() ... >>> @udf(returnType=IntegerType()) ... def add_one(x): ... if x is not None: ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age")) >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show() +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ """ # The following table shows most of Python data and SQL type conversions in normal UDFs that # are not yet visible to the user. Some of behaviors are buggy and might be changed in the near # future. The table might have to be eventually documented externally. # Please see SPARK-25666's PR to see the codes in order to generate the table below. # # +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa # |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)|1(long)| a(str)| a(unicode)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)| ABC(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa # +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa # | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | tinyint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | smallint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | int| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | bigint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | string| None| u'true'| u'1'| u'1'| u'a'| u'a'|u'java.util.Grego...| u'java.util.Grego...| u'1.0'| u'[I@24a83055'| u'[1]'|u'[Ljava.lang.Obj...| u'[B@49093632'| u'1'| u'{a=1}'| X| X| # noqa # | date| None| X| X| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa # | timestamp| None| X| X| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa # | float| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa # | double| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa # | array<int>| None| None| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa # | binary| None| None| None| None|bytearray(b'a')|bytearray(b'a')| None| None| None| None| None| None|bytearray(b'ABC')| None| None| X| X| # noqa # | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa # | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| None| None| {u'a': 1}| X| X| # noqa # | struct<_1:int>| None| X| X| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa # +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa # # Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be # used in `returnType`. # Note: The values inside of the table are generated by `repr`. # Note: Python 2 is used to generate this table since it is used to check the backward # compatibility often in practice. # Note: 'X' means it throws an exception during the conversion. # decorator @udf, @udf(), @udf(dataType()) if f is None or isinstance(f, (str, DataType)): # If DataType has been passed as a positional argument # for decorator use it as a returnType return_type = f or returnType return functools.partial(_create_udf, returnType=return_type, evalType=PythonEvalType.SQL_BATCHED_UDF) else: return _create_udf(f=f, returnType=returnType, evalType=PythonEvalType.SQL_BATCHED_UDF)
python
def udf(f=None, returnType=StringType()): """Creates a user defined function (UDF). .. note:: The user-defined functions are considered deterministic by default. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. If your function is not deterministic, call `asNondeterministic` on the user defined function. E.g.: >>> from pyspark.sql.types import IntegerType >>> import random >>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic() .. note:: The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. .. note:: The user-defined functions do not take keyword arguments on the calling side. :param f: python function if used as a standalone function :param returnType: the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> slen = udf(lambda s: len(s), IntegerType()) >>> @udf ... def to_upper(s): ... if s is not None: ... return s.upper() ... >>> @udf(returnType=IntegerType()) ... def add_one(x): ... if x is not None: ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age")) >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show() +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ """ # The following table shows most of Python data and SQL type conversions in normal UDFs that # are not yet visible to the user. Some of behaviors are buggy and might be changed in the near # future. The table might have to be eventually documented externally. # Please see SPARK-25666's PR to see the codes in order to generate the table below. # # +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa # |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)|1(long)| a(str)| a(unicode)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)| ABC(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa # +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa # | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | tinyint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | smallint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | int| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | bigint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa # | string| None| u'true'| u'1'| u'1'| u'a'| u'a'|u'java.util.Grego...| u'java.util.Grego...| u'1.0'| u'[I@24a83055'| u'[1]'|u'[Ljava.lang.Obj...| u'[B@49093632'| u'1'| u'{a=1}'| X| X| # noqa # | date| None| X| X| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa # | timestamp| None| X| X| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa # | float| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa # | double| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa # | array<int>| None| None| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa # | binary| None| None| None| None|bytearray(b'a')|bytearray(b'a')| None| None| None| None| None| None|bytearray(b'ABC')| None| None| X| X| # noqa # | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa # | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| None| None| {u'a': 1}| X| X| # noqa # | struct<_1:int>| None| X| X| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa # +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa # # Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be # used in `returnType`. # Note: The values inside of the table are generated by `repr`. # Note: Python 2 is used to generate this table since it is used to check the backward # compatibility often in practice. # Note: 'X' means it throws an exception during the conversion. # decorator @udf, @udf(), @udf(dataType()) if f is None or isinstance(f, (str, DataType)): # If DataType has been passed as a positional argument # for decorator use it as a returnType return_type = f or returnType return functools.partial(_create_udf, returnType=return_type, evalType=PythonEvalType.SQL_BATCHED_UDF) else: return _create_udf(f=f, returnType=returnType, evalType=PythonEvalType.SQL_BATCHED_UDF)
[ "def", "udf", "(", "f", "=", "None", ",", "returnType", "=", "StringType", "(", ")", ")", ":", "# The following table shows most of Python data and SQL type conversions in normal UDFs that", "# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near", "# future. The table might have to be eventually documented externally.", "# Please see SPARK-25666's PR to see the codes in order to generate the table below.", "#", "# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa", "# |SQL Type \\ Python Value(Type)|None(NoneType)|True(bool)|1(int)|1(long)| a(str)| a(unicode)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)| ABC(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa", "# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa", "# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa", "# | tinyint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa", "# | smallint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa", "# | int| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa", "# | bigint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa", "# | string| None| u'true'| u'1'| u'1'| u'a'| u'a'|u'java.util.Grego...| u'java.util.Grego...| u'1.0'| u'[I@24a83055'| u'[1]'|u'[Ljava.lang.Obj...| u'[B@49093632'| u'1'| u'{a=1}'| X| X| # noqa", "# | date| None| X| X| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa", "# | timestamp| None| X| X| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa", "# | float| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa", "# | double| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa", "# | array<int>| None| None| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa", "# | binary| None| None| None| None|bytearray(b'a')|bytearray(b'a')| None| None| None| None| None| None|bytearray(b'ABC')| None| None| X| X| # noqa", "# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa", "# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| None| None| {u'a': 1}| X| X| # noqa", "# | struct<_1:int>| None| X| X| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa", "# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa", "#", "# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be", "# used in `returnType`.", "# Note: The values inside of the table are generated by `repr`.", "# Note: Python 2 is used to generate this table since it is used to check the backward", "# compatibility often in practice.", "# Note: 'X' means it throws an exception during the conversion.", "# decorator @udf, @udf(), @udf(dataType())", "if", "f", "is", "None", "or", "isinstance", "(", "f", ",", "(", "str", ",", "DataType", ")", ")", ":", "# If DataType has been passed as a positional argument", "# for decorator use it as a returnType", "return_type", "=", "f", "or", "returnType", "return", "functools", ".", "partial", "(", "_create_udf", ",", "returnType", "=", "return_type", ",", "evalType", "=", "PythonEvalType", ".", "SQL_BATCHED_UDF", ")", "else", ":", "return", "_create_udf", "(", "f", "=", "f", ",", "returnType", "=", "returnType", ",", "evalType", "=", "PythonEvalType", ".", "SQL_BATCHED_UDF", ")" ]
Creates a user defined function (UDF). .. note:: The user-defined functions are considered deterministic by default. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. If your function is not deterministic, call `asNondeterministic` on the user defined function. E.g.: >>> from pyspark.sql.types import IntegerType >>> import random >>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic() .. note:: The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. .. note:: The user-defined functions do not take keyword arguments on the calling side. :param f: python function if used as a standalone function :param returnType: the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. >>> from pyspark.sql.types import IntegerType >>> slen = udf(lambda s: len(s), IntegerType()) >>> @udf ... def to_upper(s): ... if s is not None: ... return s.upper() ... >>> @udf(returnType=IntegerType()) ... def add_one(x): ... if x is not None: ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age")) >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show() +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+
[ "Creates", "a", "user", "defined", "function", "(", "UDF", ")", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2805-L2889
apache/spark
python/pyspark/sql/functions.py
pandas_udf
def pandas_udf(f=None, returnType=None, functionType=None): """ Creates a vectorized user defined function (UDF). :param f: user-defined function. A python function if used as a standalone function :param returnType: the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`. Default: SCALAR. .. note:: Experimental The function type of the UDF can be one of the following: 1. SCALAR A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`. The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`. If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`. :class:`MapType`, nested :class:`StructType` are currently not supported as output types. Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and :meth:`pyspark.sql.DataFrame.select`. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> from pyspark.sql.types import IntegerType, StringType >>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP >>> @pandas_udf(StringType()) # doctest: +SKIP ... def to_upper(s): ... return s.str.upper() ... >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ... ("id", "name", "age")) # doctest: +SKIP >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\ ... .show() # doctest: +SKIP +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ >>> @pandas_udf("first string, last string") # doctest: +SKIP ... def split_expand(n): ... return n.str.split(expand=True) >>> df.select(split_expand("name")).show() # doctest: +SKIP +------------------+ |split_expand(name)| +------------------+ | [John, Doe]| +------------------+ .. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input column, but is the length of an internal batch used for each call to the function. Therefore, this can be used, for example, to ensure the length of each returned `pandas.Series`, and can not be used as the column length. 2. GROUPED_MAP A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame` The returnType should be a :class:`StructType` describing the schema of the returned `pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match the field names in the defined returnType schema if specified as strings, or match the field data types by position if not strings, e.g. integer indices. The length of the returned `pandas.DataFrame` can be arbitrary. Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) # doctest: +SKIP >>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP ... def normalize(pdf): ... v = pdf.v ... return pdf.assign(v=(v - v.mean()) / v.std()) >>> df.groupby("id").apply(normalize).show() # doctest: +SKIP +---+-------------------+ | id| v| +---+-------------------+ | 1|-0.7071067811865475| | 1| 0.7071067811865475| | 2|-0.8320502943378437| | 2|-0.2773500981126146| | 2| 1.1094003924504583| +---+-------------------+ Alternatively, the user can define a function that takes two arguments. In this case, the grouping key(s) will be passed as the first argument and the data will be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as a `pandas.DataFrame` containing all columns from the original Spark DataFrame. This is useful when the user does not want to hardcode grouping key(s) in the function. >>> import pandas as pd # doctest: +SKIP >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) # doctest: +SKIP >>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP ... def mean_udf(key, pdf): ... # key is a tuple of one numpy.int64, which is the value ... # of 'id' for the current group ... return pd.DataFrame([key + (pdf.v.mean(),)]) >>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP +---+---+ | id| v| +---+---+ | 1|1.5| | 2|6.0| +---+---+ >>> @pandas_udf( ... "id long, `ceil(v / 2)` long, v double", ... PandasUDFType.GROUPED_MAP) # doctest: +SKIP >>> def sum_udf(key, pdf): ... # key is a tuple of two numpy.int64s, which is the values ... # of 'id' and 'ceil(df.v / 2)' for the current group ... return pd.DataFrame([key + (pdf.v.sum(),)]) >>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP +---+-----------+----+ | id|ceil(v / 2)| v| +---+-----------+----+ | 2| 5|10.0| | 1| 1| 3.0| | 2| 3| 5.0| | 2| 2| 3.0| +---+-----------+----+ .. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is recommended to explicitly index the columns by name to ensure the positions are correct, or alternatively use an `OrderedDict`. For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or `pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`. .. seealso:: :meth:`pyspark.sql.GroupedData.apply` 3. GROUPED_AGG A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar The `returnType` should be a primitive data type, e.g., :class:`DoubleType`. The returned scalar can be either a python primitive type, e.g., `int` or `float` or a numpy data type, e.g., `numpy.int64` or `numpy.float64`. :class:`MapType` and :class:`StructType` are currently not supported as output types. Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window` This example shows using grouped aggregated UDFs with groupby: >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) >>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def mean_udf(v): ... return v.mean() >>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP +---+-----------+ | id|mean_udf(v)| +---+-----------+ | 1| 1.5| | 2| 6.0| +---+-----------+ This example shows using grouped aggregated UDFs as window functions. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> from pyspark.sql import Window >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) >>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def mean_udf(v): ... return v.mean() >>> w = (Window.partitionBy('id') ... .orderBy('v') ... .rowsBetween(-1, 0)) >>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP +---+----+------+ | id| v|mean_v| +---+----+------+ | 1| 1.0| 1.0| | 1| 2.0| 1.5| | 2| 3.0| 3.0| | 2| 5.0| 4.0| | 2|10.0| 7.5| +---+----+------+ .. note:: For performance reasons, the input series to window functions are not copied. Therefore, mutating the input series is not allowed and will cause incorrect results. For the same reason, users should also not rely on the index of the input series. .. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window` .. note:: The user-defined functions are considered deterministic by default. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. If your function is not deterministic, call `asNondeterministic` on the user defined function. E.g.: >>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP ... def random(v): ... import numpy as np ... import pandas as pd ... return pd.Series(np.random.randn(len(v)) >>> random = random.asNondeterministic() # doctest: +SKIP .. note:: The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. .. note:: The user-defined functions do not take keyword arguments on the calling side. .. note:: The data type of returned `pandas.Series` from the user-defined functions should be matched with defined returnType (see :meth:`types.to_arrow_type` and :meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do conversion on returned data. The conversion is not guaranteed to be correct and results should be checked for accuracy by users. """ # The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that # are not yet visible to the user. Some of behaviors are buggy and might be changed in the near # future. The table might have to be eventually documented externally. # Please see SPARK-25798's PR to see the codes in order to generate the table below. # # +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa # |SQL Type \ Pandas Value(Type)|None(object(NoneType))|True(bool)|1(int8)|1(int16)| 1(int32)| 1(int64)|1(uint8)|1(uint16)|1(uint32)|1(uint64)|1.0(float16)|1.0(float32)|1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))|1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa # +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa # | boolean| None| True| True| True| True| True| True| True| True| True| False| False| False| False| False| X| X| X| False| False| False| X| False| # noqa # | tinyint| None| 1| 1| 1| 1| 1| X| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| 0| X| # noqa # | smallint| None| 1| 1| 1| 1| 1| 1| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa # | int| None| 1| 1| 1| 1| 1| 1| 1| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa # | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| X| 1| 1| 1| 0| 18000000000000| X| X| X| X| X| X| X| X| # noqa # | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X|1.401298464324817...| X| X| X| X| X| X| # noqa # | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa # | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa # | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X| X| X| X| X| X| X| X| # noqa # | string| None| u''|u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u''| u''| u''| X| X| u'a'| X| X| u''| u''| u''| X| X| # noqa # | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa # | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa # | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # | binary| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa # # Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be # used in `returnType`. # Note: The values inside of the table are generated by `repr`. # Note: Python 2 is used to generate this table since it is used to check the backward # compatibility often in practice. # Note: Pandas 0.19.2 and PyArrow 0.9.0 are used. # Note: Timezone is Singapore timezone. # Note: 'X' means it throws an exception during the conversion. # Note: 'binary' type is only supported with PyArrow 0.10.0+ (SPARK-23555). # decorator @pandas_udf(returnType, functionType) is_decorator = f is None or isinstance(f, (str, DataType)) if is_decorator: # If DataType has been passed as a positional argument # for decorator use it as a returnType return_type = f or returnType if functionType is not None: # @pandas_udf(dataType, functionType=functionType) # @pandas_udf(returnType=dataType, functionType=functionType) eval_type = functionType elif returnType is not None and isinstance(returnType, int): # @pandas_udf(dataType, functionType) eval_type = returnType else: # @pandas_udf(dataType) or @pandas_udf(returnType=dataType) eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF else: return_type = returnType if functionType is not None: eval_type = functionType else: eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF if return_type is None: raise ValueError("Invalid returnType: returnType can not be None") if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]: raise ValueError("Invalid functionType: " "functionType must be one the values from PandasUDFType") if is_decorator: return functools.partial(_create_udf, returnType=return_type, evalType=eval_type) else: return _create_udf(f=f, returnType=return_type, evalType=eval_type)
python
def pandas_udf(f=None, returnType=None, functionType=None): """ Creates a vectorized user defined function (UDF). :param f: user-defined function. A python function if used as a standalone function :param returnType: the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`. Default: SCALAR. .. note:: Experimental The function type of the UDF can be one of the following: 1. SCALAR A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`. The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`. If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`. :class:`MapType`, nested :class:`StructType` are currently not supported as output types. Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and :meth:`pyspark.sql.DataFrame.select`. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> from pyspark.sql.types import IntegerType, StringType >>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP >>> @pandas_udf(StringType()) # doctest: +SKIP ... def to_upper(s): ... return s.str.upper() ... >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ... ("id", "name", "age")) # doctest: +SKIP >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\ ... .show() # doctest: +SKIP +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ >>> @pandas_udf("first string, last string") # doctest: +SKIP ... def split_expand(n): ... return n.str.split(expand=True) >>> df.select(split_expand("name")).show() # doctest: +SKIP +------------------+ |split_expand(name)| +------------------+ | [John, Doe]| +------------------+ .. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input column, but is the length of an internal batch used for each call to the function. Therefore, this can be used, for example, to ensure the length of each returned `pandas.Series`, and can not be used as the column length. 2. GROUPED_MAP A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame` The returnType should be a :class:`StructType` describing the schema of the returned `pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match the field names in the defined returnType schema if specified as strings, or match the field data types by position if not strings, e.g. integer indices. The length of the returned `pandas.DataFrame` can be arbitrary. Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) # doctest: +SKIP >>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP ... def normalize(pdf): ... v = pdf.v ... return pdf.assign(v=(v - v.mean()) / v.std()) >>> df.groupby("id").apply(normalize).show() # doctest: +SKIP +---+-------------------+ | id| v| +---+-------------------+ | 1|-0.7071067811865475| | 1| 0.7071067811865475| | 2|-0.8320502943378437| | 2|-0.2773500981126146| | 2| 1.1094003924504583| +---+-------------------+ Alternatively, the user can define a function that takes two arguments. In this case, the grouping key(s) will be passed as the first argument and the data will be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as a `pandas.DataFrame` containing all columns from the original Spark DataFrame. This is useful when the user does not want to hardcode grouping key(s) in the function. >>> import pandas as pd # doctest: +SKIP >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) # doctest: +SKIP >>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP ... def mean_udf(key, pdf): ... # key is a tuple of one numpy.int64, which is the value ... # of 'id' for the current group ... return pd.DataFrame([key + (pdf.v.mean(),)]) >>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP +---+---+ | id| v| +---+---+ | 1|1.5| | 2|6.0| +---+---+ >>> @pandas_udf( ... "id long, `ceil(v / 2)` long, v double", ... PandasUDFType.GROUPED_MAP) # doctest: +SKIP >>> def sum_udf(key, pdf): ... # key is a tuple of two numpy.int64s, which is the values ... # of 'id' and 'ceil(df.v / 2)' for the current group ... return pd.DataFrame([key + (pdf.v.sum(),)]) >>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP +---+-----------+----+ | id|ceil(v / 2)| v| +---+-----------+----+ | 2| 5|10.0| | 1| 1| 3.0| | 2| 3| 5.0| | 2| 2| 3.0| +---+-----------+----+ .. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is recommended to explicitly index the columns by name to ensure the positions are correct, or alternatively use an `OrderedDict`. For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or `pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`. .. seealso:: :meth:`pyspark.sql.GroupedData.apply` 3. GROUPED_AGG A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar The `returnType` should be a primitive data type, e.g., :class:`DoubleType`. The returned scalar can be either a python primitive type, e.g., `int` or `float` or a numpy data type, e.g., `numpy.int64` or `numpy.float64`. :class:`MapType` and :class:`StructType` are currently not supported as output types. Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window` This example shows using grouped aggregated UDFs with groupby: >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) >>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def mean_udf(v): ... return v.mean() >>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP +---+-----------+ | id|mean_udf(v)| +---+-----------+ | 1| 1.5| | 2| 6.0| +---+-----------+ This example shows using grouped aggregated UDFs as window functions. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> from pyspark.sql import Window >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) >>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def mean_udf(v): ... return v.mean() >>> w = (Window.partitionBy('id') ... .orderBy('v') ... .rowsBetween(-1, 0)) >>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP +---+----+------+ | id| v|mean_v| +---+----+------+ | 1| 1.0| 1.0| | 1| 2.0| 1.5| | 2| 3.0| 3.0| | 2| 5.0| 4.0| | 2|10.0| 7.5| +---+----+------+ .. note:: For performance reasons, the input series to window functions are not copied. Therefore, mutating the input series is not allowed and will cause incorrect results. For the same reason, users should also not rely on the index of the input series. .. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window` .. note:: The user-defined functions are considered deterministic by default. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. If your function is not deterministic, call `asNondeterministic` on the user defined function. E.g.: >>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP ... def random(v): ... import numpy as np ... import pandas as pd ... return pd.Series(np.random.randn(len(v)) >>> random = random.asNondeterministic() # doctest: +SKIP .. note:: The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. .. note:: The user-defined functions do not take keyword arguments on the calling side. .. note:: The data type of returned `pandas.Series` from the user-defined functions should be matched with defined returnType (see :meth:`types.to_arrow_type` and :meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do conversion on returned data. The conversion is not guaranteed to be correct and results should be checked for accuracy by users. """ # The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that # are not yet visible to the user. Some of behaviors are buggy and might be changed in the near # future. The table might have to be eventually documented externally. # Please see SPARK-25798's PR to see the codes in order to generate the table below. # # +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa # |SQL Type \ Pandas Value(Type)|None(object(NoneType))|True(bool)|1(int8)|1(int16)| 1(int32)| 1(int64)|1(uint8)|1(uint16)|1(uint32)|1(uint64)|1.0(float16)|1.0(float32)|1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))|1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa # +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa # | boolean| None| True| True| True| True| True| True| True| True| True| False| False| False| False| False| X| X| X| False| False| False| X| False| # noqa # | tinyint| None| 1| 1| 1| 1| 1| X| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| 0| X| # noqa # | smallint| None| 1| 1| 1| 1| 1| 1| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa # | int| None| 1| 1| 1| 1| 1| 1| 1| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa # | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| X| 1| 1| 1| 0| 18000000000000| X| X| X| X| X| X| X| X| # noqa # | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X|1.401298464324817...| X| X| X| X| X| X| # noqa # | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa # | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa # | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X| X| X| X| X| X| X| X| # noqa # | string| None| u''|u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u''| u''| u''| X| X| u'a'| X| X| u''| u''| u''| X| X| # noqa # | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa # | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa # | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # | binary| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa # +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa # # Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be # used in `returnType`. # Note: The values inside of the table are generated by `repr`. # Note: Python 2 is used to generate this table since it is used to check the backward # compatibility often in practice. # Note: Pandas 0.19.2 and PyArrow 0.9.0 are used. # Note: Timezone is Singapore timezone. # Note: 'X' means it throws an exception during the conversion. # Note: 'binary' type is only supported with PyArrow 0.10.0+ (SPARK-23555). # decorator @pandas_udf(returnType, functionType) is_decorator = f is None or isinstance(f, (str, DataType)) if is_decorator: # If DataType has been passed as a positional argument # for decorator use it as a returnType return_type = f or returnType if functionType is not None: # @pandas_udf(dataType, functionType=functionType) # @pandas_udf(returnType=dataType, functionType=functionType) eval_type = functionType elif returnType is not None and isinstance(returnType, int): # @pandas_udf(dataType, functionType) eval_type = returnType else: # @pandas_udf(dataType) or @pandas_udf(returnType=dataType) eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF else: return_type = returnType if functionType is not None: eval_type = functionType else: eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF if return_type is None: raise ValueError("Invalid returnType: returnType can not be None") if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF, PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]: raise ValueError("Invalid functionType: " "functionType must be one the values from PandasUDFType") if is_decorator: return functools.partial(_create_udf, returnType=return_type, evalType=eval_type) else: return _create_udf(f=f, returnType=return_type, evalType=eval_type)
[ "def", "pandas_udf", "(", "f", "=", "None", ",", "returnType", "=", "None", ",", "functionType", "=", "None", ")", ":", "# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that", "# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near", "# future. The table might have to be eventually documented externally.", "# Please see SPARK-25798's PR to see the codes in order to generate the table below.", "#", "# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa", "# |SQL Type \\ Pandas Value(Type)|None(object(NoneType))|True(bool)|1(int8)|1(int16)| 1(int32)| 1(int64)|1(uint8)|1(uint16)|1(uint32)|1(uint64)|1.0(float16)|1.0(float32)|1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))|1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa", "# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa", "# | boolean| None| True| True| True| True| True| True| True| True| True| False| False| False| False| False| X| X| X| False| False| False| X| False| # noqa", "# | tinyint| None| 1| 1| 1| 1| 1| X| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| 0| X| # noqa", "# | smallint| None| 1| 1| 1| 1| 1| 1| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa", "# | int| None| 1| 1| 1| 1| 1| 1| 1| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa", "# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| X| 1| 1| 1| 0| 18000000000000| X| X| X| X| X| X| X| X| # noqa", "# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X|1.401298464324817...| X| X| X| X| X| X| # noqa", "# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa", "# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa", "# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X| X| X| X| X| X| X| X| # noqa", "# | string| None| u''|u'\\x01'| u'\\x01'| u'\\x01'| u'\\x01'| u'\\x01'| u'\\x01'| u'\\x01'| u'\\x01'| u''| u''| u''| X| X| u'a'| X| X| u''| u''| u''| X| X| # noqa", "# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa", "# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa", "# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa", "# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa", "# | binary| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa", "# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa", "#", "# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be", "# used in `returnType`.", "# Note: The values inside of the table are generated by `repr`.", "# Note: Python 2 is used to generate this table since it is used to check the backward", "# compatibility often in practice.", "# Note: Pandas 0.19.2 and PyArrow 0.9.0 are used.", "# Note: Timezone is Singapore timezone.", "# Note: 'X' means it throws an exception during the conversion.", "# Note: 'binary' type is only supported with PyArrow 0.10.0+ (SPARK-23555).", "# decorator @pandas_udf(returnType, functionType)", "is_decorator", "=", "f", "is", "None", "or", "isinstance", "(", "f", ",", "(", "str", ",", "DataType", ")", ")", "if", "is_decorator", ":", "# If DataType has been passed as a positional argument", "# for decorator use it as a returnType", "return_type", "=", "f", "or", "returnType", "if", "functionType", "is", "not", "None", ":", "# @pandas_udf(dataType, functionType=functionType)", "# @pandas_udf(returnType=dataType, functionType=functionType)", "eval_type", "=", "functionType", "elif", "returnType", "is", "not", "None", "and", "isinstance", "(", "returnType", ",", "int", ")", ":", "# @pandas_udf(dataType, functionType)", "eval_type", "=", "returnType", "else", ":", "# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)", "eval_type", "=", "PythonEvalType", ".", "SQL_SCALAR_PANDAS_UDF", "else", ":", "return_type", "=", "returnType", "if", "functionType", "is", "not", "None", ":", "eval_type", "=", "functionType", "else", ":", "eval_type", "=", "PythonEvalType", ".", "SQL_SCALAR_PANDAS_UDF", "if", "return_type", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid returnType: returnType can not be None\"", ")", "if", "eval_type", "not", "in", "[", "PythonEvalType", ".", "SQL_SCALAR_PANDAS_UDF", ",", "PythonEvalType", ".", "SQL_GROUPED_MAP_PANDAS_UDF", ",", "PythonEvalType", ".", "SQL_GROUPED_AGG_PANDAS_UDF", "]", ":", "raise", "ValueError", "(", "\"Invalid functionType: \"", "\"functionType must be one the values from PandasUDFType\"", ")", "if", "is_decorator", ":", "return", "functools", ".", "partial", "(", "_create_udf", ",", "returnType", "=", "return_type", ",", "evalType", "=", "eval_type", ")", "else", ":", "return", "_create_udf", "(", "f", "=", "f", ",", "returnType", "=", "return_type", ",", "evalType", "=", "eval_type", ")" ]
Creates a vectorized user defined function (UDF). :param f: user-defined function. A python function if used as a standalone function :param returnType: the return type of the user-defined function. The value can be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string. :param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`. Default: SCALAR. .. note:: Experimental The function type of the UDF can be one of the following: 1. SCALAR A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`. The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`. If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`. :class:`MapType`, nested :class:`StructType` are currently not supported as output types. Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and :meth:`pyspark.sql.DataFrame.select`. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> from pyspark.sql.types import IntegerType, StringType >>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP >>> @pandas_udf(StringType()) # doctest: +SKIP ... def to_upper(s): ... return s.str.upper() ... >>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP ... def add_one(x): ... return x + 1 ... >>> df = spark.createDataFrame([(1, "John Doe", 21)], ... ("id", "name", "age")) # doctest: +SKIP >>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\ ... .show() # doctest: +SKIP +----------+--------------+------------+ |slen(name)|to_upper(name)|add_one(age)| +----------+--------------+------------+ | 8| JOHN DOE| 22| +----------+--------------+------------+ >>> @pandas_udf("first string, last string") # doctest: +SKIP ... def split_expand(n): ... return n.str.split(expand=True) >>> df.select(split_expand("name")).show() # doctest: +SKIP +------------------+ |split_expand(name)| +------------------+ | [John, Doe]| +------------------+ .. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input column, but is the length of an internal batch used for each call to the function. Therefore, this can be used, for example, to ensure the length of each returned `pandas.Series`, and can not be used as the column length. 2. GROUPED_MAP A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame` The returnType should be a :class:`StructType` describing the schema of the returned `pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match the field names in the defined returnType schema if specified as strings, or match the field data types by position if not strings, e.g. integer indices. The length of the returned `pandas.DataFrame` can be arbitrary. Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) # doctest: +SKIP >>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP ... def normalize(pdf): ... v = pdf.v ... return pdf.assign(v=(v - v.mean()) / v.std()) >>> df.groupby("id").apply(normalize).show() # doctest: +SKIP +---+-------------------+ | id| v| +---+-------------------+ | 1|-0.7071067811865475| | 1| 0.7071067811865475| | 2|-0.8320502943378437| | 2|-0.2773500981126146| | 2| 1.1094003924504583| +---+-------------------+ Alternatively, the user can define a function that takes two arguments. In this case, the grouping key(s) will be passed as the first argument and the data will be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as a `pandas.DataFrame` containing all columns from the original Spark DataFrame. This is useful when the user does not want to hardcode grouping key(s) in the function. >>> import pandas as pd # doctest: +SKIP >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) # doctest: +SKIP >>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP ... def mean_udf(key, pdf): ... # key is a tuple of one numpy.int64, which is the value ... # of 'id' for the current group ... return pd.DataFrame([key + (pdf.v.mean(),)]) >>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP +---+---+ | id| v| +---+---+ | 1|1.5| | 2|6.0| +---+---+ >>> @pandas_udf( ... "id long, `ceil(v / 2)` long, v double", ... PandasUDFType.GROUPED_MAP) # doctest: +SKIP >>> def sum_udf(key, pdf): ... # key is a tuple of two numpy.int64s, which is the values ... # of 'id' and 'ceil(df.v / 2)' for the current group ... return pd.DataFrame([key + (pdf.v.sum(),)]) >>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP +---+-----------+----+ | id|ceil(v / 2)| v| +---+-----------+----+ | 2| 5|10.0| | 1| 1| 3.0| | 2| 3| 5.0| | 2| 2| 3.0| +---+-----------+----+ .. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is recommended to explicitly index the columns by name to ensure the positions are correct, or alternatively use an `OrderedDict`. For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or `pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`. .. seealso:: :meth:`pyspark.sql.GroupedData.apply` 3. GROUPED_AGG A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar The `returnType` should be a primitive data type, e.g., :class:`DoubleType`. The returned scalar can be either a python primitive type, e.g., `int` or `float` or a numpy data type, e.g., `numpy.int64` or `numpy.float64`. :class:`MapType` and :class:`StructType` are currently not supported as output types. Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window` This example shows using grouped aggregated UDFs with groupby: >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) >>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def mean_udf(v): ... return v.mean() >>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP +---+-----------+ | id|mean_udf(v)| +---+-----------+ | 1| 1.5| | 2| 6.0| +---+-----------+ This example shows using grouped aggregated UDFs as window functions. >>> from pyspark.sql.functions import pandas_udf, PandasUDFType >>> from pyspark.sql import Window >>> df = spark.createDataFrame( ... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)], ... ("id", "v")) >>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP ... def mean_udf(v): ... return v.mean() >>> w = (Window.partitionBy('id') ... .orderBy('v') ... .rowsBetween(-1, 0)) >>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP +---+----+------+ | id| v|mean_v| +---+----+------+ | 1| 1.0| 1.0| | 1| 2.0| 1.5| | 2| 3.0| 3.0| | 2| 5.0| 4.0| | 2|10.0| 7.5| +---+----+------+ .. note:: For performance reasons, the input series to window functions are not copied. Therefore, mutating the input series is not allowed and will cause incorrect results. For the same reason, users should also not rely on the index of the input series. .. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window` .. note:: The user-defined functions are considered deterministic by default. Due to optimization, duplicate invocations may be eliminated or the function may even be invoked more times than it is present in the query. If your function is not deterministic, call `asNondeterministic` on the user defined function. E.g.: >>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP ... def random(v): ... import numpy as np ... import pandas as pd ... return pd.Series(np.random.randn(len(v)) >>> random = random.asNondeterministic() # doctest: +SKIP .. note:: The user-defined functions do not support conditional expressions or short circuiting in boolean expressions and it ends up with being executed all internally. If the functions can fail on special rows, the workaround is to incorporate the condition into the functions. .. note:: The user-defined functions do not take keyword arguments on the calling side. .. note:: The data type of returned `pandas.Series` from the user-defined functions should be matched with defined returnType (see :meth:`types.to_arrow_type` and :meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do conversion on returned data. The conversion is not guaranteed to be correct and results should be checked for accuracy by users.
[ "Creates", "a", "vectorized", "user", "defined", "function", "(", "UDF", ")", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2893-L3189
apache/spark
python/pyspark/sql/readwriter.py
to_str
def to_str(value): """ A wrapper over str(), but converts bool values to lower case strings. If None is given, just returns None, instead of converting it to string "None". """ if isinstance(value, bool): return str(value).lower() elif value is None: return value else: return str(value)
python
def to_str(value): """ A wrapper over str(), but converts bool values to lower case strings. If None is given, just returns None, instead of converting it to string "None". """ if isinstance(value, bool): return str(value).lower() elif value is None: return value else: return str(value)
[ "def", "to_str", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "str", "(", "value", ")", ".", "lower", "(", ")", "elif", "value", "is", "None", ":", "return", "value", "else", ":", "return", "str", "(", "value", ")" ]
A wrapper over str(), but converts bool values to lower case strings. If None is given, just returns None, instead of converting it to string "None".
[ "A", "wrapper", "over", "str", "()", "but", "converts", "bool", "values", "to", "lower", "case", "strings", ".", "If", "None", "is", "given", "just", "returns", "None", "instead", "of", "converting", "it", "to", "string", "None", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L34-L44
apache/spark
python/pyspark/sql/readwriter.py
OptionUtils._set_opts
def _set_opts(self, schema=None, **options): """ Set named options (filter out those the value is None) """ if schema is not None: self.schema(schema) for k, v in options.items(): if v is not None: self.option(k, v)
python
def _set_opts(self, schema=None, **options): """ Set named options (filter out those the value is None) """ if schema is not None: self.schema(schema) for k, v in options.items(): if v is not None: self.option(k, v)
[ "def", "_set_opts", "(", "self", ",", "schema", "=", "None", ",", "*", "*", "options", ")", ":", "if", "schema", "is", "not", "None", ":", "self", ".", "schema", "(", "schema", ")", "for", "k", ",", "v", "in", "options", ".", "items", "(", ")", ":", "if", "v", "is", "not", "None", ":", "self", ".", "option", "(", "k", ",", "v", ")" ]
Set named options (filter out those the value is None)
[ "Set", "named", "options", "(", "filter", "out", "those", "the", "value", "is", "None", ")" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L49-L57
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.format
def format(self, source): """Specifies the input data source format. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df = spark.read.format('json').load('python/test_support/sql/people.json') >>> df.dtypes [('age', 'bigint'), ('name', 'string')] """ self._jreader = self._jreader.format(source) return self
python
def format(self, source): """Specifies the input data source format. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df = spark.read.format('json').load('python/test_support/sql/people.json') >>> df.dtypes [('age', 'bigint'), ('name', 'string')] """ self._jreader = self._jreader.format(source) return self
[ "def", "format", "(", "self", ",", "source", ")", ":", "self", ".", "_jreader", "=", "self", ".", "_jreader", ".", "format", "(", "source", ")", "return", "self" ]
Specifies the input data source format. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df = spark.read.format('json').load('python/test_support/sql/people.json') >>> df.dtypes [('age', 'bigint'), ('name', 'string')]
[ "Specifies", "the", "input", "data", "source", "format", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L78-L89
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.schema
def schema(self, schema): """Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema automatically from data. By specifying the schema here, the underlying data source can skip the schema inference step, and thus speed up data loading. :param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). >>> s = spark.read.schema("col0 INT, col1 DOUBLE") """ from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() if isinstance(schema, StructType): jschema = spark._jsparkSession.parseDataType(schema.json()) self._jreader = self._jreader.schema(jschema) elif isinstance(schema, basestring): self._jreader = self._jreader.schema(schema) else: raise TypeError("schema should be StructType or string") return self
python
def schema(self, schema): """Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema automatically from data. By specifying the schema here, the underlying data source can skip the schema inference step, and thus speed up data loading. :param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). >>> s = spark.read.schema("col0 INT, col1 DOUBLE") """ from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() if isinstance(schema, StructType): jschema = spark._jsparkSession.parseDataType(schema.json()) self._jreader = self._jreader.schema(jschema) elif isinstance(schema, basestring): self._jreader = self._jreader.schema(schema) else: raise TypeError("schema should be StructType or string") return self
[ "def", "schema", "(", "self", ",", "schema", ")", ":", "from", "pyspark", ".", "sql", "import", "SparkSession", "spark", "=", "SparkSession", ".", "builder", ".", "getOrCreate", "(", ")", "if", "isinstance", "(", "schema", ",", "StructType", ")", ":", "jschema", "=", "spark", ".", "_jsparkSession", ".", "parseDataType", "(", "schema", ".", "json", "(", ")", ")", "self", ".", "_jreader", "=", "self", ".", "_jreader", ".", "schema", "(", "jschema", ")", "elif", "isinstance", "(", "schema", ",", "basestring", ")", ":", "self", ".", "_jreader", "=", "self", ".", "_jreader", ".", "schema", "(", "schema", ")", "else", ":", "raise", "TypeError", "(", "\"schema should be StructType or string\"", ")", "return", "self" ]
Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema automatically from data. By specifying the schema here, the underlying data source can skip the schema inference step, and thus speed up data loading. :param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). >>> s = spark.read.schema("col0 INT, col1 DOUBLE")
[ "Specifies", "the", "input", "schema", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L92-L113
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.option
def option(self, key, value): """Adds an input option for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ self._jreader = self._jreader.option(key, to_str(value)) return self
python
def option(self, key, value): """Adds an input option for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ self._jreader = self._jreader.option(key, to_str(value)) return self
[ "def", "option", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_jreader", "=", "self", ".", "_jreader", ".", "option", "(", "key", ",", "to_str", "(", "value", ")", ")", "return", "self" ]
Adds an input option for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone.
[ "Adds", "an", "input", "option", "for", "the", "underlying", "data", "source", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L116-L125
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.options
def options(self, **options): """Adds input options for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ for k in options: self._jreader = self._jreader.option(k, to_str(options[k])) return self
python
def options(self, **options): """Adds input options for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ for k in options: self._jreader = self._jreader.option(k, to_str(options[k])) return self
[ "def", "options", "(", "self", ",", "*", "*", "options", ")", ":", "for", "k", "in", "options", ":", "self", ".", "_jreader", "=", "self", ".", "_jreader", ".", "option", "(", "k", ",", "to_str", "(", "options", "[", "k", "]", ")", ")", "return", "self" ]
Adds input options for the underlying data source. You can set the following option(s) for reading files: * ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone.
[ "Adds", "input", "options", "for", "the", "underlying", "data", "source", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L128-L138
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.load
def load(self, path=None, format=None, schema=None, **options): """Loads data from a data source and returns it as a :class`DataFrame`. :param path: optional string or a list of string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> df = spark.read.format("parquet").load('python/test_support/sql/parquet_partitioned', ... opt1=True, opt2=1, opt3='str') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] >>> df = spark.read.format('json').load(['python/test_support/sql/people.json', ... 'python/test_support/sql/people1.json']) >>> df.dtypes [('age', 'bigint'), ('aka', 'string'), ('name', 'string')] """ if format is not None: self.format(format) if schema is not None: self.schema(schema) self.options(**options) if isinstance(path, basestring): return self._df(self._jreader.load(path)) elif path is not None: if type(path) != list: path = [path] return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path))) else: return self._df(self._jreader.load())
python
def load(self, path=None, format=None, schema=None, **options): """Loads data from a data source and returns it as a :class`DataFrame`. :param path: optional string or a list of string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> df = spark.read.format("parquet").load('python/test_support/sql/parquet_partitioned', ... opt1=True, opt2=1, opt3='str') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] >>> df = spark.read.format('json').load(['python/test_support/sql/people.json', ... 'python/test_support/sql/people1.json']) >>> df.dtypes [('age', 'bigint'), ('aka', 'string'), ('name', 'string')] """ if format is not None: self.format(format) if schema is not None: self.schema(schema) self.options(**options) if isinstance(path, basestring): return self._df(self._jreader.load(path)) elif path is not None: if type(path) != list: path = [path] return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path))) else: return self._df(self._jreader.load())
[ "def", "load", "(", "self", ",", "path", "=", "None", ",", "format", "=", "None", ",", "schema", "=", "None", ",", "*", "*", "options", ")", ":", "if", "format", "is", "not", "None", ":", "self", ".", "format", "(", "format", ")", "if", "schema", "is", "not", "None", ":", "self", ".", "schema", "(", "schema", ")", "self", ".", "options", "(", "*", "*", "options", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "load", "(", "path", ")", ")", "elif", "path", "is", "not", "None", ":", "if", "type", "(", "path", ")", "!=", "list", ":", "path", "=", "[", "path", "]", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "load", "(", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "PythonUtils", ".", "toSeq", "(", "path", ")", ")", ")", "else", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "load", "(", ")", ")" ]
Loads data from a data source and returns it as a :class`DataFrame`. :param path: optional string or a list of string for file-system backed data sources. :param format: optional string for format of the data source. Default to 'parquet'. :param schema: optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param options: all other string options >>> df = spark.read.format("parquet").load('python/test_support/sql/parquet_partitioned', ... opt1=True, opt2=1, opt3='str') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] >>> df = spark.read.format('json').load(['python/test_support/sql/people.json', ... 'python/test_support/sql/people1.json']) >>> df.dtypes [('age', 'bigint'), ('aka', 'string'), ('name', 'string')]
[ "Loads", "data", "from", "a", "data", "source", "and", "returns", "it", "as", "a", ":", "class", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L141-L172
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.json
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None, dropFieldIfAllNull=None, encoding=None, locale=None): """ Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')] """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding, locale=locale) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) return self._df(self._jreader.json(jrdd)) else: raise TypeError("path can be only string, list or RDD")
python
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None, dropFieldIfAllNull=None, encoding=None, locale=None): """ Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')] """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding, locale=locale) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) return self._df(self._jreader.json(jrdd)) else: raise TypeError("path can be only string, list or RDD")
[ "def", "json", "(", "self", ",", "path", ",", "schema", "=", "None", ",", "primitivesAsString", "=", "None", ",", "prefersDecimal", "=", "None", ",", "allowComments", "=", "None", ",", "allowUnquotedFieldNames", "=", "None", ",", "allowSingleQuotes", "=", "None", ",", "allowNumericLeadingZero", "=", "None", ",", "allowBackslashEscapingAnyCharacter", "=", "None", ",", "mode", "=", "None", ",", "columnNameOfCorruptRecord", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "multiLine", "=", "None", ",", "allowUnquotedControlChars", "=", "None", ",", "lineSep", "=", "None", ",", "samplingRatio", "=", "None", ",", "dropFieldIfAllNull", "=", "None", ",", "encoding", "=", "None", ",", "locale", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "schema", "=", "schema", ",", "primitivesAsString", "=", "primitivesAsString", ",", "prefersDecimal", "=", "prefersDecimal", ",", "allowComments", "=", "allowComments", ",", "allowUnquotedFieldNames", "=", "allowUnquotedFieldNames", ",", "allowSingleQuotes", "=", "allowSingleQuotes", ",", "allowNumericLeadingZero", "=", "allowNumericLeadingZero", ",", "allowBackslashEscapingAnyCharacter", "=", "allowBackslashEscapingAnyCharacter", ",", "mode", "=", "mode", ",", "columnNameOfCorruptRecord", "=", "columnNameOfCorruptRecord", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "multiLine", "=", "multiLine", ",", "allowUnquotedControlChars", "=", "allowUnquotedControlChars", ",", "lineSep", "=", "lineSep", ",", "samplingRatio", "=", "samplingRatio", ",", "dropFieldIfAllNull", "=", "dropFieldIfAllNull", ",", "encoding", "=", "encoding", ",", "locale", "=", "locale", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "path", "=", "[", "path", "]", "if", "type", "(", "path", ")", "==", "list", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "json", "(", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "PythonUtils", ".", "toSeq", "(", "path", ")", ")", ")", "elif", "isinstance", "(", "path", ",", "RDD", ")", ":", "def", "func", "(", "iterator", ")", ":", "for", "x", "in", "iterator", ":", "if", "not", "isinstance", "(", "x", ",", "basestring", ")", ":", "x", "=", "unicode", "(", "x", ")", "if", "isinstance", "(", "x", ",", "unicode", ")", ":", "x", "=", "x", ".", "encode", "(", "\"utf-8\"", ")", "yield", "x", "keyed", "=", "path", ".", "mapPartitions", "(", "func", ")", "keyed", ".", "_bypass_serializer", "=", "True", "jrdd", "=", "keyed", ".", "_jrdd", ".", "map", "(", "self", ".", "_spark", ".", "_jvm", ".", "BytesToString", "(", ")", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "json", "(", "jrdd", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only string, list or RDD\"", ")" ]
Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')]
[ "Loads", "JSON", "files", "and", "returns", "the", "results", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L175-L293
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.parquet
def parquet(self, *paths): """Loads Parquet files, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. >>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] """ return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
python
def parquet(self, *paths): """Loads Parquet files, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. >>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] """ return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
[ "def", "parquet", "(", "self", ",", "*", "paths", ")", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "parquet", "(", "_to_seq", "(", "self", ".", "_spark", ".", "_sc", ",", "paths", ")", ")", ")" ]
Loads Parquet files, returning the result as a :class:`DataFrame`. You can set the following Parquet-specific option(s) for reading Parquet files: * ``mergeSchema``: sets whether we should merge schemas collected from all \ Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \ The default value is specified in ``spark.sql.parquet.mergeSchema``. >>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
[ "Loads", "Parquet", "files", "returning", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L309-L321
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.text
def text(self, paths, wholetext=False, lineSep=None): """ Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')] """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(paths, basestring): paths = [paths] return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
python
def text(self, paths, wholetext=False, lineSep=None): """ Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')] """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(paths, basestring): paths = [paths] return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
[ "def", "text", "(", "self", ",", "paths", ",", "wholetext", "=", "False", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "wholetext", "=", "wholetext", ",", "lineSep", "=", "lineSep", ")", "if", "isinstance", "(", "paths", ",", "basestring", ")", ":", "paths", "=", "[", "paths", "]", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "text", "(", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "PythonUtils", ".", "toSeq", "(", "paths", ")", ")", ")" ]
Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')]
[ "Loads", "text", "files", "and", "returns", "a", ":", "class", ":", "DataFrame", "whose", "schema", "starts", "with", "a", "string", "column", "named", "value", "and", "followed", "by", "partitioned", "columns", "if", "there", "are", "any", ".", "The", "text", "files", "must", "be", "encoded", "as", "UTF", "-", "8", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L325-L349
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.csv
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, samplingRatio=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. :param path: string, or list of strings, for input path(s), or RDD of Strings storing CSV rows. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse records, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise. :param samplingRatio: defines fraction of rows used for schema inferring. If None is set, it uses the default value, ``1.0``. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] >>> rdd = sc.textFile('python/test_support/sql/ages.csv') >>> df2 = spark.read.csv(rdd) >>> df2.dtypes [('_c0', 'string'), ('_c1', 'string')] """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, samplingRatio=samplingRatio, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) # see SPARK-22112 # There aren't any jvm api for creating a dataframe from rdd storing csv. # We can do it through creating a jvm dataset firstly and using the jvm api # for creating a dataframe from dataset storing csv. jdataset = self._spark._ssql_ctx.createDataset( jrdd.rdd(), self._spark._jvm.Encoders.STRING()) return self._df(self._jreader.csv(jdataset)) else: raise TypeError("path can be only string, list or RDD")
python
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None, comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None, negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None, maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None, columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None, samplingRatio=None, enforceSchema=None, emptyValue=None, locale=None, lineSep=None): r"""Loads a CSV file and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. :param path: string, or list of strings, for input path(s), or RDD of Strings storing CSV rows. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse records, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise. :param samplingRatio: defines fraction of rows used for schema inferring. If None is set, it uses the default value, ``1.0``. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] >>> rdd = sc.textFile('python/test_support/sql/ages.csv') >>> df2 = spark.read.csv(rdd) >>> df2.dtypes [('_c0', 'string'), ('_c1', 'string')] """ self._set_opts( schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment, header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue, nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf, dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns, maxCharsPerColumn=maxCharsPerColumn, maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, samplingRatio=samplingRatio, enforceSchema=enforceSchema, emptyValue=emptyValue, locale=locale, lineSep=lineSep) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) # see SPARK-22112 # There aren't any jvm api for creating a dataframe from rdd storing csv. # We can do it through creating a jvm dataset firstly and using the jvm api # for creating a dataframe from dataset storing csv. jdataset = self._spark._ssql_ctx.createDataset( jrdd.rdd(), self._spark._jvm.Encoders.STRING()) return self._df(self._jreader.csv(jdataset)) else: raise TypeError("path can be only string, list or RDD")
[ "def", "csv", "(", "self", ",", "path", ",", "schema", "=", "None", ",", "sep", "=", "None", ",", "encoding", "=", "None", ",", "quote", "=", "None", ",", "escape", "=", "None", ",", "comment", "=", "None", ",", "header", "=", "None", ",", "inferSchema", "=", "None", ",", "ignoreLeadingWhiteSpace", "=", "None", ",", "ignoreTrailingWhiteSpace", "=", "None", ",", "nullValue", "=", "None", ",", "nanValue", "=", "None", ",", "positiveInf", "=", "None", ",", "negativeInf", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "maxColumns", "=", "None", ",", "maxCharsPerColumn", "=", "None", ",", "maxMalformedLogPerPartition", "=", "None", ",", "mode", "=", "None", ",", "columnNameOfCorruptRecord", "=", "None", ",", "multiLine", "=", "None", ",", "charToEscapeQuoteEscaping", "=", "None", ",", "samplingRatio", "=", "None", ",", "enforceSchema", "=", "None", ",", "emptyValue", "=", "None", ",", "locale", "=", "None", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "schema", "=", "schema", ",", "sep", "=", "sep", ",", "encoding", "=", "encoding", ",", "quote", "=", "quote", ",", "escape", "=", "escape", ",", "comment", "=", "comment", ",", "header", "=", "header", ",", "inferSchema", "=", "inferSchema", ",", "ignoreLeadingWhiteSpace", "=", "ignoreLeadingWhiteSpace", ",", "ignoreTrailingWhiteSpace", "=", "ignoreTrailingWhiteSpace", ",", "nullValue", "=", "nullValue", ",", "nanValue", "=", "nanValue", ",", "positiveInf", "=", "positiveInf", ",", "negativeInf", "=", "negativeInf", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "maxColumns", "=", "maxColumns", ",", "maxCharsPerColumn", "=", "maxCharsPerColumn", ",", "maxMalformedLogPerPartition", "=", "maxMalformedLogPerPartition", ",", "mode", "=", "mode", ",", "columnNameOfCorruptRecord", "=", "columnNameOfCorruptRecord", ",", "multiLine", "=", "multiLine", ",", "charToEscapeQuoteEscaping", "=", "charToEscapeQuoteEscaping", ",", "samplingRatio", "=", "samplingRatio", ",", "enforceSchema", "=", "enforceSchema", ",", "emptyValue", "=", "emptyValue", ",", "locale", "=", "locale", ",", "lineSep", "=", "lineSep", ")", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "path", "=", "[", "path", "]", "if", "type", "(", "path", ")", "==", "list", ":", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "csv", "(", "self", ".", "_spark", ".", "_sc", ".", "_jvm", ".", "PythonUtils", ".", "toSeq", "(", "path", ")", ")", ")", "elif", "isinstance", "(", "path", ",", "RDD", ")", ":", "def", "func", "(", "iterator", ")", ":", "for", "x", "in", "iterator", ":", "if", "not", "isinstance", "(", "x", ",", "basestring", ")", ":", "x", "=", "unicode", "(", "x", ")", "if", "isinstance", "(", "x", ",", "unicode", ")", ":", "x", "=", "x", ".", "encode", "(", "\"utf-8\"", ")", "yield", "x", "keyed", "=", "path", ".", "mapPartitions", "(", "func", ")", "keyed", ".", "_bypass_serializer", "=", "True", "jrdd", "=", "keyed", ".", "_jrdd", ".", "map", "(", "self", ".", "_spark", ".", "_jvm", ".", "BytesToString", "(", ")", ")", "# see SPARK-22112", "# There aren't any jvm api for creating a dataframe from rdd storing csv.", "# We can do it through creating a jvm dataset firstly and using the jvm api", "# for creating a dataframe from dataset storing csv.", "jdataset", "=", "self", ".", "_spark", ".", "_ssql_ctx", ".", "createDataset", "(", "jrdd", ".", "rdd", "(", ")", ",", "self", ".", "_spark", ".", "_jvm", ".", "Encoders", ".", "STRING", "(", ")", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "csv", "(", "jdataset", ")", ")", "else", ":", "raise", "TypeError", "(", "\"path can be only string, list or RDD\"", ")" ]
r"""Loads a CSV file and returns the result as a :class:`DataFrame`. This function will go through the input once to determine the input schema if ``inferSchema`` is enabled. To avoid going through the entire data once, disable ``inferSchema`` option or specify the schema explicitly using ``schema``. :param path: string, or list of strings, for input path(s), or RDD of Strings storing CSV rows. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param encoding: decodes the CSV files by the given encoding type. If None is set, it uses the default value, ``UTF-8``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If you would like to turn off quotations, you need to set an empty string. :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\``. :param comment: sets a single character used for skipping lines beginning with this character. By default (None), it is disabled. :param header: uses the first line as names of columns. If None is set, it uses the default value, ``false``. :param inferSchema: infers the input schema automatically from data. It requires one extra pass over the data. If None is set, it uses the default value, ``false``. :param enforceSchema: If it is set to ``true``, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to ``false``, the schema will be validated against all headers in CSV files or the first header in RDD if the ``header`` option is set to ``true``. Field names in the schema and column names in CSV headers are checked by their positions taking into account ``spark.sql.caseSensitive``. If None is set, ``true`` is used by default. Though the default value is ``true``, it is recommended to disable the ``enforceSchema`` option to avoid incorrect results. :param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from values being read should be skipped. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. Since 2.0.1, this ``nullValue`` param applies to all supported types including the string type. :param nanValue: sets the string representation of a non-number value. If None is set, it uses the default value, ``NaN``. :param positiveInf: sets the string representation of a positive infinity value. If None is set, it uses the default value, ``Inf``. :param negativeInf: sets the string representation of a negative infinity value. If None is set, it uses the default value, ``Inf``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param maxColumns: defines a hard limit of how many columns a record can have. If None is set, it uses the default value, ``20480``. :param maxCharsPerColumn: defines the maximum number of characters allowed for any given value being read. If None is set, it uses the default value, ``-1`` meaning unlimited length. :param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0. If specified, it is ignored. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ A record with less/more tokens than schema is not a corrupted record to CSV. \ When it meets a record having fewer tokens than the length of the schema, \ sets ``null`` to extra fields. When the record has more tokens than the \ length of the schema, it drops extra tokens. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param multiLine: parse records, which may span multiple lines. If None is set, it uses the default value, ``false``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise. :param samplingRatio: defines fraction of rows used for schema inferring. If None is set, it uses the default value, ``1.0``. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, empty string. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. Maximum length is 1 character. >>> df = spark.read.csv('python/test_support/sql/ages.csv') >>> df.dtypes [('_c0', 'string'), ('_c1', 'string')] >>> rdd = sc.textFile('python/test_support/sql/ages.csv') >>> df2 = spark.read.csv(rdd) >>> df2.dtypes [('_c0', 'string'), ('_c1', 'string')]
[ "r", "Loads", "a", "CSV", "file", "and", "returns", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L352-L506
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.orc
def orc(self, path): """Loads ORC files, returning the result as a :class:`DataFrame`. >>> df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> df.dtypes [('a', 'bigint'), ('b', 'int'), ('c', 'int')] """ if isinstance(path, basestring): path = [path] return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
python
def orc(self, path): """Loads ORC files, returning the result as a :class:`DataFrame`. >>> df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> df.dtypes [('a', 'bigint'), ('b', 'int'), ('c', 'int')] """ if isinstance(path, basestring): path = [path] return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
[ "def", "orc", "(", "self", ",", "path", ")", ":", "if", "isinstance", "(", "path", ",", "basestring", ")", ":", "path", "=", "[", "path", "]", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "orc", "(", "_to_seq", "(", "self", ".", "_spark", ".", "_sc", ",", "path", ")", ")", ")" ]
Loads ORC files, returning the result as a :class:`DataFrame`. >>> df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> df.dtypes [('a', 'bigint'), ('b', 'int'), ('c', 'int')]
[ "Loads", "ORC", "files", "returning", "the", "result", "as", "a", ":", "class", ":", "DataFrame", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L509-L518
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.jdbc
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None, predicates=None, properties=None): """ Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) if column is not None: assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified" assert upperBound is not None, "upperBound can not be None when ``column`` is specified" assert numPartitions is not None, \ "numPartitions can not be None when ``column`` is specified" return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound), int(numPartitions), jprop)) if predicates is not None: gateway = self._spark._sc._gateway jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates) return self._df(self._jreader.jdbc(url, table, jpredicates, jprop)) return self._df(self._jreader.jdbc(url, table, jprop))
python
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None, predicates=None, properties=None): """ Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) if column is not None: assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified" assert upperBound is not None, "upperBound can not be None when ``column`` is specified" assert numPartitions is not None, \ "numPartitions can not be None when ``column`` is specified" return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound), int(numPartitions), jprop)) if predicates is not None: gateway = self._spark._sc._gateway jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates) return self._df(self._jreader.jdbc(url, table, jpredicates, jprop)) return self._df(self._jreader.jdbc(url, table, jprop))
[ "def", "jdbc", "(", "self", ",", "url", ",", "table", ",", "column", "=", "None", ",", "lowerBound", "=", "None", ",", "upperBound", "=", "None", ",", "numPartitions", "=", "None", ",", "predicates", "=", "None", ",", "properties", "=", "None", ")", ":", "if", "properties", "is", "None", ":", "properties", "=", "dict", "(", ")", "jprop", "=", "JavaClass", "(", "\"java.util.Properties\"", ",", "self", ".", "_spark", ".", "_sc", ".", "_gateway", ".", "_gateway_client", ")", "(", ")", "for", "k", "in", "properties", ":", "jprop", ".", "setProperty", "(", "k", ",", "properties", "[", "k", "]", ")", "if", "column", "is", "not", "None", ":", "assert", "lowerBound", "is", "not", "None", ",", "\"lowerBound can not be None when ``column`` is specified\"", "assert", "upperBound", "is", "not", "None", ",", "\"upperBound can not be None when ``column`` is specified\"", "assert", "numPartitions", "is", "not", "None", ",", "\"numPartitions can not be None when ``column`` is specified\"", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "column", ",", "int", "(", "lowerBound", ")", ",", "int", "(", "upperBound", ")", ",", "int", "(", "numPartitions", ")", ",", "jprop", ")", ")", "if", "predicates", "is", "not", "None", ":", "gateway", "=", "self", ".", "_spark", ".", "_sc", ".", "_gateway", "jpredicates", "=", "utils", ".", "toJArray", "(", "gateway", ",", "gateway", ".", "jvm", ".", "java", ".", "lang", ".", "String", ",", "predicates", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "jpredicates", ",", "jprop", ")", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "jprop", ")", ")" ]
Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame
[ "Construct", "a", ":", "class", ":", "DataFrame", "representing", "the", "database", "table", "named", "table", "accessible", "via", "JDBC", "URL", "url", "and", "connection", "properties", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L521-L569
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.mode
def mode(self, saveMode): """Specifies the behavior when data or table already exists. Options include: * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ # At the JVM side, the default value of mode is already set to "error". # So, if the given saveMode is None, we will not call JVM-side's mode method. if saveMode is not None: self._jwrite = self._jwrite.mode(saveMode) return self
python
def mode(self, saveMode): """Specifies the behavior when data or table already exists. Options include: * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ # At the JVM side, the default value of mode is already set to "error". # So, if the given saveMode is None, we will not call JVM-side's mode method. if saveMode is not None: self._jwrite = self._jwrite.mode(saveMode) return self
[ "def", "mode", "(", "self", ",", "saveMode", ")", ":", "# At the JVM side, the default value of mode is already set to \"error\".", "# So, if the given saveMode is None, we will not call JVM-side's mode method.", "if", "saveMode", "is", "not", "None", ":", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "mode", "(", "saveMode", ")", "return", "self" ]
Specifies the behavior when data or table already exists. Options include: * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
[ "Specifies", "the", "behavior", "when", "data", "or", "table", "already", "exists", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L590-L606
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.format
def format(self, source): """Specifies the underlying output data source. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data')) """ self._jwrite = self._jwrite.format(source) return self
python
def format(self, source): """Specifies the underlying output data source. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data')) """ self._jwrite = self._jwrite.format(source) return self
[ "def", "format", "(", "self", ",", "source", ")", ":", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "format", "(", "source", ")", "return", "self" ]
Specifies the underlying output data source. :param source: string, name of the data source, e.g. 'json', 'parquet'. >>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
[ "Specifies", "the", "underlying", "output", "data", "source", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L609-L617
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.option
def option(self, key, value): """Adds an output option for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ self._jwrite = self._jwrite.option(key, to_str(value)) return self
python
def option(self, key, value): """Adds an output option for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ self._jwrite = self._jwrite.option(key, to_str(value)) return self
[ "def", "option", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "option", "(", "key", ",", "to_str", "(", "value", ")", ")", "return", "self" ]
Adds an output option for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone.
[ "Adds", "an", "output", "option", "for", "the", "underlying", "data", "source", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L620-L629
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.options
def options(self, **options): """Adds output options for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ for k in options: self._jwrite = self._jwrite.option(k, to_str(options[k])) return self
python
def options(self, **options): """Adds output options for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone. """ for k in options: self._jwrite = self._jwrite.option(k, to_str(options[k])) return self
[ "def", "options", "(", "self", ",", "*", "*", "options", ")", ":", "for", "k", "in", "options", ":", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "option", "(", "k", ",", "to_str", "(", "options", "[", "k", "]", ")", ")", "return", "self" ]
Adds output options for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone.
[ "Adds", "output", "options", "for", "the", "underlying", "data", "source", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L632-L642
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.partitionBy
def partitionBy(self, *cols): """Partitions the output by the given columns on the file system. If specified, the output is laid out on the file system similar to Hive's partitioning scheme. :param cols: name of columns >>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ if len(cols) == 1 and isinstance(cols[0], (list, tuple)): cols = cols[0] self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols)) return self
python
def partitionBy(self, *cols): """Partitions the output by the given columns on the file system. If specified, the output is laid out on the file system similar to Hive's partitioning scheme. :param cols: name of columns >>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ if len(cols) == 1 and isinstance(cols[0], (list, tuple)): cols = cols[0] self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols)) return self
[ "def", "partitionBy", "(", "self", ",", "*", "cols", ")", ":", "if", "len", "(", "cols", ")", "==", "1", "and", "isinstance", "(", "cols", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "cols", "=", "cols", "[", "0", "]", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "partitionBy", "(", "_to_seq", "(", "self", ".", "_spark", ".", "_sc", ",", "cols", ")", ")", "return", "self" ]
Partitions the output by the given columns on the file system. If specified, the output is laid out on the file system similar to Hive's partitioning scheme. :param cols: name of columns >>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
[ "Partitions", "the", "output", "by", "the", "given", "columns", "on", "the", "file", "system", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L645-L658
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.sortBy
def sortBy(self, col, *cols): """Sorts the output in each bucket by the given columns on the file system. :param col: a name of a column, or a list of names. :param cols: additional names (optional). If `col` is a list it should be empty. >>> (df.write.format('parquet') # doctest: +SKIP ... .bucketBy(100, 'year', 'month') ... .sortBy('day') ... .mode("overwrite") ... .saveAsTable('sorted_bucketed_table')) """ if isinstance(col, (list, tuple)): if cols: raise ValueError("col is a {0} but cols are not empty".format(type(col))) col, cols = col[0], col[1:] if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)): raise TypeError("all names should be `str`") self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols)) return self
python
def sortBy(self, col, *cols): """Sorts the output in each bucket by the given columns on the file system. :param col: a name of a column, or a list of names. :param cols: additional names (optional). If `col` is a list it should be empty. >>> (df.write.format('parquet') # doctest: +SKIP ... .bucketBy(100, 'year', 'month') ... .sortBy('day') ... .mode("overwrite") ... .saveAsTable('sorted_bucketed_table')) """ if isinstance(col, (list, tuple)): if cols: raise ValueError("col is a {0} but cols are not empty".format(type(col))) col, cols = col[0], col[1:] if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)): raise TypeError("all names should be `str`") self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols)) return self
[ "def", "sortBy", "(", "self", ",", "col", ",", "*", "cols", ")", ":", "if", "isinstance", "(", "col", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "cols", ":", "raise", "ValueError", "(", "\"col is a {0} but cols are not empty\"", ".", "format", "(", "type", "(", "col", ")", ")", ")", "col", ",", "cols", "=", "col", "[", "0", "]", ",", "col", "[", "1", ":", "]", "if", "not", "all", "(", "isinstance", "(", "c", ",", "basestring", ")", "for", "c", "in", "cols", ")", "or", "not", "(", "isinstance", "(", "col", ",", "basestring", ")", ")", ":", "raise", "TypeError", "(", "\"all names should be `str`\"", ")", "self", ".", "_jwrite", "=", "self", ".", "_jwrite", ".", "sortBy", "(", "col", ",", "_to_seq", "(", "self", ".", "_spark", ".", "_sc", ",", "cols", ")", ")", "return", "self" ]
Sorts the output in each bucket by the given columns on the file system. :param col: a name of a column, or a list of names. :param cols: additional names (optional). If `col` is a list it should be empty. >>> (df.write.format('parquet') # doctest: +SKIP ... .bucketBy(100, 'year', 'month') ... .sortBy('day') ... .mode("overwrite") ... .saveAsTable('sorted_bucketed_table'))
[ "Sorts", "the", "output", "in", "each", "bucket", "by", "the", "given", "columns", "on", "the", "file", "system", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L693-L715
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.save
def save(self, path=None, format=None, mode=None, partitionBy=None, **options): """Saves the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. :param path: the path in a Hadoop supported file system :param format: the format used to save :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param options: all other string options >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode).options(**options) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if path is None: self._jwrite.save() else: self._jwrite.save(path)
python
def save(self, path=None, format=None, mode=None, partitionBy=None, **options): """Saves the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. :param path: the path in a Hadoop supported file system :param format: the format used to save :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param options: all other string options >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode).options(**options) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if path is None: self._jwrite.save() else: self._jwrite.save(path)
[ "def", "save", "(", "self", ",", "path", "=", "None", ",", "format", "=", "None", ",", "mode", "=", "None", ",", "partitionBy", "=", "None", ",", "*", "*", "options", ")", ":", "self", ".", "mode", "(", "mode", ")", ".", "options", "(", "*", "*", "options", ")", "if", "partitionBy", "is", "not", "None", ":", "self", ".", "partitionBy", "(", "partitionBy", ")", "if", "format", "is", "not", "None", ":", "self", ".", "format", "(", "format", ")", "if", "path", "is", "None", ":", "self", ".", "_jwrite", ".", "save", "(", ")", "else", ":", "self", ".", "_jwrite", ".", "save", "(", "path", ")" ]
Saves the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. :param path: the path in a Hadoop supported file system :param format: the format used to save :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param options: all other string options >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
[ "Saves", "the", "contents", "of", "the", ":", "class", ":", "DataFrame", "to", "a", "data", "source", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L718-L747
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.insertInto
def insertInto(self, tableName, overwrite=False): """Inserts the content of the :class:`DataFrame` to the specified table. It requires that the schema of the class:`DataFrame` is the same as the schema of the table. Optionally overwriting any existing data. """ self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
python
def insertInto(self, tableName, overwrite=False): """Inserts the content of the :class:`DataFrame` to the specified table. It requires that the schema of the class:`DataFrame` is the same as the schema of the table. Optionally overwriting any existing data. """ self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
[ "def", "insertInto", "(", "self", ",", "tableName", ",", "overwrite", "=", "False", ")", ":", "self", ".", "_jwrite", ".", "mode", "(", "\"overwrite\"", "if", "overwrite", "else", "\"append\"", ")", ".", "insertInto", "(", "tableName", ")" ]
Inserts the content of the :class:`DataFrame` to the specified table. It requires that the schema of the class:`DataFrame` is the same as the schema of the table. Optionally overwriting any existing data.
[ "Inserts", "the", "content", "of", "the", ":", "class", ":", "DataFrame", "to", "the", "specified", "table", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L750-L758
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.saveAsTable
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options): """Saves the content of the :class:`DataFrame` as the specified table. In the case the table already exists, behavior of this function depends on the save mode, specified by the `mode` function (default to throwing an exception). When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be the same as that of the existing table. * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. :param name: the table name :param format: the format used to save :param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \ (default: error) :param partitionBy: names of partitioning columns :param options: all other string options """ self.mode(mode).options(**options) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) self._jwrite.saveAsTable(name)
python
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options): """Saves the content of the :class:`DataFrame` as the specified table. In the case the table already exists, behavior of this function depends on the save mode, specified by the `mode` function (default to throwing an exception). When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be the same as that of the existing table. * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. :param name: the table name :param format: the format used to save :param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \ (default: error) :param partitionBy: names of partitioning columns :param options: all other string options """ self.mode(mode).options(**options) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) self._jwrite.saveAsTable(name)
[ "def", "saveAsTable", "(", "self", ",", "name", ",", "format", "=", "None", ",", "mode", "=", "None", ",", "partitionBy", "=", "None", ",", "*", "*", "options", ")", ":", "self", ".", "mode", "(", "mode", ")", ".", "options", "(", "*", "*", "options", ")", "if", "partitionBy", "is", "not", "None", ":", "self", ".", "partitionBy", "(", "partitionBy", ")", "if", "format", "is", "not", "None", ":", "self", ".", "format", "(", "format", ")", "self", ".", "_jwrite", ".", "saveAsTable", "(", "name", ")" ]
Saves the content of the :class:`DataFrame` as the specified table. In the case the table already exists, behavior of this function depends on the save mode, specified by the `mode` function (default to throwing an exception). When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be the same as that of the existing table. * `append`: Append contents of this :class:`DataFrame` to existing data. * `overwrite`: Overwrite existing data. * `error` or `errorifexists`: Throw an exception if data already exists. * `ignore`: Silently ignore this operation if data already exists. :param name: the table name :param format: the format used to save :param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \ (default: error) :param partitionBy: names of partitioning columns :param options: all other string options
[ "Saves", "the", "content", "of", "the", ":", "class", ":", "DataFrame", "as", "the", "specified", "table", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L761-L786
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.json
def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None, lineSep=None, encoding=None): """Saves the content of the :class:`DataFrame` in JSON format (`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param encoding: specifies encoding (charset) of saved json files. If None is set, the default UTF-8 charset will be used. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts( compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat, lineSep=lineSep, encoding=encoding) self._jwrite.json(path)
python
def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None, lineSep=None, encoding=None): """Saves the content of the :class:`DataFrame` in JSON format (`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param encoding: specifies encoding (charset) of saved json files. If None is set, the default UTF-8 charset will be used. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts( compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat, lineSep=lineSep, encoding=encoding) self._jwrite.json(path)
[ "def", "json", "(", "self", ",", "path", ",", "mode", "=", "None", ",", "compression", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "lineSep", "=", "None", ",", "encoding", "=", "None", ")", ":", "self", ".", "mode", "(", "mode", ")", "self", ".", "_set_opts", "(", "compression", "=", "compression", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "lineSep", "=", "lineSep", ",", "encoding", "=", "encoding", ")", "self", ".", "_jwrite", ".", "json", "(", "path", ")" ]
Saves the content of the :class:`DataFrame` in JSON format (`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param encoding: specifies encoding (charset) of saved json files. If None is set, the default UTF-8 charset will be used. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
[ "Saves", "the", "content", "of", "the", ":", "class", ":", "DataFrame", "in", "JSON", "format", "(", "JSON", "Lines", "text", "format", "or", "newline", "-", "delimited", "JSON", "<http", ":", "//", "jsonlines", ".", "org", "/", ">", "_", ")", "at", "the", "specified", "path", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L789-L826
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.parquet
def parquet(self, path, mode=None, partitionBy=None, compression=None): """Saves the content of the :class:`DataFrame` in Parquet format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, uncompressed, snappy, gzip, lzo, brotli, lz4, and zstd). This will override ``spark.sql.parquet.compression.codec``. If None is set, it uses the value specified in ``spark.sql.parquet.compression.codec``. >>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) if partitionBy is not None: self.partitionBy(partitionBy) self._set_opts(compression=compression) self._jwrite.parquet(path)
python
def parquet(self, path, mode=None, partitionBy=None, compression=None): """Saves the content of the :class:`DataFrame` in Parquet format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, uncompressed, snappy, gzip, lzo, brotli, lz4, and zstd). This will override ``spark.sql.parquet.compression.codec``. If None is set, it uses the value specified in ``spark.sql.parquet.compression.codec``. >>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) if partitionBy is not None: self.partitionBy(partitionBy) self._set_opts(compression=compression) self._jwrite.parquet(path)
[ "def", "parquet", "(", "self", ",", "path", ",", "mode", "=", "None", ",", "partitionBy", "=", "None", ",", "compression", "=", "None", ")", ":", "self", ".", "mode", "(", "mode", ")", "if", "partitionBy", "is", "not", "None", ":", "self", ".", "partitionBy", "(", "partitionBy", ")", "self", ".", "_set_opts", "(", "compression", "=", "compression", ")", "self", ".", "_jwrite", ".", "parquet", "(", "path", ")" ]
Saves the content of the :class:`DataFrame` in Parquet format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, uncompressed, snappy, gzip, lzo, brotli, lz4, and zstd). This will override ``spark.sql.parquet.compression.codec``. If None is set, it uses the value specified in ``spark.sql.parquet.compression.codec``. >>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
[ "Saves", "the", "content", "of", "the", ":", "class", ":", "DataFrame", "in", "Parquet", "format", "at", "the", "specified", "path", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L829-L853
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.text
def text(self, path, compression=None, lineSep=None): """Saves the content of the DataFrame in a text file at the specified path. The text files will be encoded as UTF-8. :param path: the path in any Hadoop supported file system :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. The DataFrame must have only one column that is of string type. Each row becomes a new line in the output file. """ self._set_opts(compression=compression, lineSep=lineSep) self._jwrite.text(path)
python
def text(self, path, compression=None, lineSep=None): """Saves the content of the DataFrame in a text file at the specified path. The text files will be encoded as UTF-8. :param path: the path in any Hadoop supported file system :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. The DataFrame must have only one column that is of string type. Each row becomes a new line in the output file. """ self._set_opts(compression=compression, lineSep=lineSep) self._jwrite.text(path)
[ "def", "text", "(", "self", ",", "path", ",", "compression", "=", "None", ",", "lineSep", "=", "None", ")", ":", "self", ".", "_set_opts", "(", "compression", "=", "compression", ",", "lineSep", "=", "lineSep", ")", "self", ".", "_jwrite", ".", "text", "(", "path", ")" ]
Saves the content of the DataFrame in a text file at the specified path. The text files will be encoded as UTF-8. :param path: the path in any Hadoop supported file system :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. The DataFrame must have only one column that is of string type. Each row becomes a new line in the output file.
[ "Saves", "the", "content", "of", "the", "DataFrame", "in", "a", "text", "file", "at", "the", "specified", "path", ".", "The", "text", "files", "will", "be", "encoded", "as", "UTF", "-", "8", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L856-L871
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.csv
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None, header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None, timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None, lineSep=None): r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If an empty string is set, it uses ``u0000`` (null character). :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\`` :param escapeQuotes: a flag indicating whether values containing quotes should always be enclosed in quotes. If None is set, it uses the default value ``true``, escaping all values containing a quote character. :param quoteAll: a flag indicating whether all values should always be enclosed in quotes. If None is set, it uses the default value ``false``, only escaping values containing a quote character. :param header: writes the names of columns as the first line. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param encoding: sets the encoding (charset) of saved csv files. If None is set, the default UTF-8 charset will be used. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, ``""``. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. Maximum length is 1 character. >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header, nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll, dateFormat=dateFormat, timestampFormat=timestampFormat, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, encoding=encoding, emptyValue=emptyValue, lineSep=lineSep) self._jwrite.csv(path)
python
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None, header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None, timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None, charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None, lineSep=None): r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If an empty string is set, it uses ``u0000`` (null character). :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\`` :param escapeQuotes: a flag indicating whether values containing quotes should always be enclosed in quotes. If None is set, it uses the default value ``true``, escaping all values containing a quote character. :param quoteAll: a flag indicating whether all values should always be enclosed in quotes. If None is set, it uses the default value ``false``, only escaping values containing a quote character. :param header: writes the names of columns as the first line. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param encoding: sets the encoding (charset) of saved csv files. If None is set, the default UTF-8 charset will be used. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, ``""``. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. Maximum length is 1 character. >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header, nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll, dateFormat=dateFormat, timestampFormat=timestampFormat, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, encoding=encoding, emptyValue=emptyValue, lineSep=lineSep) self._jwrite.csv(path)
[ "def", "csv", "(", "self", ",", "path", ",", "mode", "=", "None", ",", "compression", "=", "None", ",", "sep", "=", "None", ",", "quote", "=", "None", ",", "escape", "=", "None", ",", "header", "=", "None", ",", "nullValue", "=", "None", ",", "escapeQuotes", "=", "None", ",", "quoteAll", "=", "None", ",", "dateFormat", "=", "None", ",", "timestampFormat", "=", "None", ",", "ignoreLeadingWhiteSpace", "=", "None", ",", "ignoreTrailingWhiteSpace", "=", "None", ",", "charToEscapeQuoteEscaping", "=", "None", ",", "encoding", "=", "None", ",", "emptyValue", "=", "None", ",", "lineSep", "=", "None", ")", ":", "self", ".", "mode", "(", "mode", ")", "self", ".", "_set_opts", "(", "compression", "=", "compression", ",", "sep", "=", "sep", ",", "quote", "=", "quote", ",", "escape", "=", "escape", ",", "header", "=", "header", ",", "nullValue", "=", "nullValue", ",", "escapeQuotes", "=", "escapeQuotes", ",", "quoteAll", "=", "quoteAll", ",", "dateFormat", "=", "dateFormat", ",", "timestampFormat", "=", "timestampFormat", ",", "ignoreLeadingWhiteSpace", "=", "ignoreLeadingWhiteSpace", ",", "ignoreTrailingWhiteSpace", "=", "ignoreTrailingWhiteSpace", ",", "charToEscapeQuoteEscaping", "=", "charToEscapeQuoteEscaping", ",", "encoding", "=", "encoding", ",", "emptyValue", "=", "emptyValue", ",", "lineSep", "=", "lineSep", ")", "self", ".", "_jwrite", ".", "csv", "(", "path", ")" ]
r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, bzip2, gzip, lz4, snappy and deflate). :param sep: sets a single character as a separator for each field and value. If None is set, it uses the default value, ``,``. :param quote: sets a single character used for escaping quoted values where the separator can be part of the value. If None is set, it uses the default value, ``"``. If an empty string is set, it uses ``u0000`` (null character). :param escape: sets a single character used for escaping quotes inside an already quoted value. If None is set, it uses the default value, ``\`` :param escapeQuotes: a flag indicating whether values containing quotes should always be enclosed in quotes. If None is set, it uses the default value ``true``, escaping all values containing a quote character. :param quoteAll: a flag indicating whether all values should always be enclosed in quotes. If None is set, it uses the default value ``false``, only escaping values containing a quote character. :param header: writes the names of columns as the first line. If None is set, it uses the default value, ``false``. :param nullValue: sets the string representation of a null value. If None is set, it uses the default value, empty string. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from values being written should be skipped. If None is set, it uses the default value, ``true``. :param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for the quote character. If None is set, the default value is escape character when escape and quote characters are different, ``\0`` otherwise.. :param encoding: sets the encoding (charset) of saved csv files. If None is set, the default UTF-8 charset will be used. :param emptyValue: sets the string representation of an empty value. If None is set, it uses the default value, ``""``. :param lineSep: defines the line separator that should be used for writing. If None is set, it uses the default value, ``\\n``. Maximum length is 1 character. >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
[ "r", "Saves", "the", "content", "of", "the", ":", "class", ":", "DataFrame", "in", "CSV", "format", "at", "the", "specified", "path", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L874-L945
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.orc
def orc(self, path, mode=None, partitionBy=None, compression=None): """Saves the content of the :class:`DataFrame` in ORC format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, snappy, zlib, and lzo). This will override ``orc.compress`` and ``spark.sql.orc.compression.codec``. If None is set, it uses the value specified in ``spark.sql.orc.compression.codec``. >>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) if partitionBy is not None: self.partitionBy(partitionBy) self._set_opts(compression=compression) self._jwrite.orc(path)
python
def orc(self, path, mode=None, partitionBy=None, compression=None): """Saves the content of the :class:`DataFrame` in ORC format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, snappy, zlib, and lzo). This will override ``orc.compress`` and ``spark.sql.orc.compression.codec``. If None is set, it uses the value specified in ``spark.sql.orc.compression.codec``. >>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode) if partitionBy is not None: self.partitionBy(partitionBy) self._set_opts(compression=compression) self._jwrite.orc(path)
[ "def", "orc", "(", "self", ",", "path", ",", "mode", "=", "None", ",", "partitionBy", "=", "None", ",", "compression", "=", "None", ")", ":", "self", ".", "mode", "(", "mode", ")", "if", "partitionBy", "is", "not", "None", ":", "self", ".", "partitionBy", "(", "partitionBy", ")", "self", ".", "_set_opts", "(", "compression", "=", "compression", ")", "self", ".", "_jwrite", ".", "orc", "(", "path", ")" ]
Saves the content of the :class:`DataFrame` in ORC format at the specified path. :param path: the path in any Hadoop supported file system :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param compression: compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, snappy, zlib, and lzo). This will override ``orc.compress`` and ``spark.sql.orc.compression.codec``. If None is set, it uses the value specified in ``spark.sql.orc.compression.codec``. >>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned') >>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
[ "Saves", "the", "content", "of", "the", ":", "class", ":", "DataFrame", "in", "ORC", "format", "at", "the", "specified", "path", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L948-L973
apache/spark
python/pyspark/sql/readwriter.py
DataFrameWriter.jdbc
def jdbc(self, url, table, mode=None, properties=None): """Saves the content of the :class:`DataFrame` to an external database table via JDBC. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: Name of the table in the external database. :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) self.mode(mode)._jwrite.jdbc(url, table, jprop)
python
def jdbc(self, url, table, mode=None, properties=None): """Saves the content of the :class:`DataFrame` to an external database table via JDBC. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: Name of the table in the external database. :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) self.mode(mode)._jwrite.jdbc(url, table, jprop)
[ "def", "jdbc", "(", "self", ",", "url", ",", "table", ",", "mode", "=", "None", ",", "properties", "=", "None", ")", ":", "if", "properties", "is", "None", ":", "properties", "=", "dict", "(", ")", "jprop", "=", "JavaClass", "(", "\"java.util.Properties\"", ",", "self", ".", "_spark", ".", "_sc", ".", "_gateway", ".", "_gateway_client", ")", "(", ")", "for", "k", "in", "properties", ":", "jprop", ".", "setProperty", "(", "k", ",", "properties", "[", "k", "]", ")", "self", ".", "mode", "(", "mode", ")", ".", "_jwrite", ".", "jdbc", "(", "url", ",", "table", ",", "jprop", ")" ]
Saves the content of the :class:`DataFrame` to an external database table via JDBC. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: Name of the table in the external database. :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
[ "Saves", "the", "content", "of", "the", ":", "class", ":", "DataFrame", "to", "an", "external", "database", "table", "via", "JDBC", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L976-L1000
apache/spark
python/pyspark/streaming/kinesis.py
KinesisUtils.createStream
def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName, initialPositionInStream, checkpointInterval, storageLevel=StorageLevel.MEMORY_AND_DISK_2, awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder, stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None): """ Create an input stream that pulls messages from a Kinesis stream. This uses the Kinesis Client Library (KCL) to pull messages from Kinesis. .. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing is enabled. Make sure that your checkpoint directory is secure. :param ssc: StreamingContext object :param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to update DynamoDB :param streamName: Kinesis stream name :param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com) :param regionName: Name of region used by the Kinesis Client Library (KCL) to update DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics) :param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the worker's initial starting position in the stream. The values are either the beginning of the stream per Kinesis' limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or the tip of the stream (InitialPositionInStream.LATEST). :param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis Spark Streaming documentation for more details on the different types of checkpoints. :param storageLevel: Storage level to use for storing the received objects (default is StorageLevel.MEMORY_AND_DISK_2) :param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use DefaultAWSCredentialsProviderChain) :param awsSecretKey: AWS SecretKey (default is None. If None, will use DefaultAWSCredentialsProviderChain) :param decoder: A function used to decode value (default is utf8_decoder) :param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from the Kinesis stream (default is None). :param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis stream, if STS is being used (default is None). :param stsExternalId: External ID that can be used to validate against the assumed IAM role's trust policy, if STS is being used (default is None). :return: A DStream object """ jlevel = ssc._sc._getJavaStorageLevel(storageLevel) jduration = ssc._jduration(checkpointInterval) try: # Use KinesisUtilsPythonHelper to access Scala's KinesisUtils helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper() except TypeError as e: if str(e) == "'JavaPackage' object is not callable": _print_missing_jar( "Streaming's Kinesis", "streaming-kinesis-asl", "streaming-kinesis-asl-assembly", ssc.sparkContext.version) raise jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl, regionName, initialPositionInStream, jduration, jlevel, awsAccessKeyId, awsSecretKey, stsAssumeRoleArn, stsSessionName, stsExternalId) stream = DStream(jstream, ssc, NoOpSerializer()) return stream.map(lambda v: decoder(v))
python
def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName, initialPositionInStream, checkpointInterval, storageLevel=StorageLevel.MEMORY_AND_DISK_2, awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder, stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None): """ Create an input stream that pulls messages from a Kinesis stream. This uses the Kinesis Client Library (KCL) to pull messages from Kinesis. .. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing is enabled. Make sure that your checkpoint directory is secure. :param ssc: StreamingContext object :param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to update DynamoDB :param streamName: Kinesis stream name :param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com) :param regionName: Name of region used by the Kinesis Client Library (KCL) to update DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics) :param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the worker's initial starting position in the stream. The values are either the beginning of the stream per Kinesis' limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or the tip of the stream (InitialPositionInStream.LATEST). :param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis Spark Streaming documentation for more details on the different types of checkpoints. :param storageLevel: Storage level to use for storing the received objects (default is StorageLevel.MEMORY_AND_DISK_2) :param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use DefaultAWSCredentialsProviderChain) :param awsSecretKey: AWS SecretKey (default is None. If None, will use DefaultAWSCredentialsProviderChain) :param decoder: A function used to decode value (default is utf8_decoder) :param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from the Kinesis stream (default is None). :param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis stream, if STS is being used (default is None). :param stsExternalId: External ID that can be used to validate against the assumed IAM role's trust policy, if STS is being used (default is None). :return: A DStream object """ jlevel = ssc._sc._getJavaStorageLevel(storageLevel) jduration = ssc._jduration(checkpointInterval) try: # Use KinesisUtilsPythonHelper to access Scala's KinesisUtils helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper() except TypeError as e: if str(e) == "'JavaPackage' object is not callable": _print_missing_jar( "Streaming's Kinesis", "streaming-kinesis-asl", "streaming-kinesis-asl-assembly", ssc.sparkContext.version) raise jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl, regionName, initialPositionInStream, jduration, jlevel, awsAccessKeyId, awsSecretKey, stsAssumeRoleArn, stsSessionName, stsExternalId) stream = DStream(jstream, ssc, NoOpSerializer()) return stream.map(lambda v: decoder(v))
[ "def", "createStream", "(", "ssc", ",", "kinesisAppName", ",", "streamName", ",", "endpointUrl", ",", "regionName", ",", "initialPositionInStream", ",", "checkpointInterval", ",", "storageLevel", "=", "StorageLevel", ".", "MEMORY_AND_DISK_2", ",", "awsAccessKeyId", "=", "None", ",", "awsSecretKey", "=", "None", ",", "decoder", "=", "utf8_decoder", ",", "stsAssumeRoleArn", "=", "None", ",", "stsSessionName", "=", "None", ",", "stsExternalId", "=", "None", ")", ":", "jlevel", "=", "ssc", ".", "_sc", ".", "_getJavaStorageLevel", "(", "storageLevel", ")", "jduration", "=", "ssc", ".", "_jduration", "(", "checkpointInterval", ")", "try", ":", "# Use KinesisUtilsPythonHelper to access Scala's KinesisUtils", "helper", "=", "ssc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "kinesis", ".", "KinesisUtilsPythonHelper", "(", ")", "except", "TypeError", "as", "e", ":", "if", "str", "(", "e", ")", "==", "\"'JavaPackage' object is not callable\"", ":", "_print_missing_jar", "(", "\"Streaming's Kinesis\"", ",", "\"streaming-kinesis-asl\"", ",", "\"streaming-kinesis-asl-assembly\"", ",", "ssc", ".", "sparkContext", ".", "version", ")", "raise", "jstream", "=", "helper", ".", "createStream", "(", "ssc", ".", "_jssc", ",", "kinesisAppName", ",", "streamName", ",", "endpointUrl", ",", "regionName", ",", "initialPositionInStream", ",", "jduration", ",", "jlevel", ",", "awsAccessKeyId", ",", "awsSecretKey", ",", "stsAssumeRoleArn", ",", "stsSessionName", ",", "stsExternalId", ")", "stream", "=", "DStream", "(", "jstream", ",", "ssc", ",", "NoOpSerializer", "(", ")", ")", "return", "stream", ".", "map", "(", "lambda", "v", ":", "decoder", "(", "v", ")", ")" ]
Create an input stream that pulls messages from a Kinesis stream. This uses the Kinesis Client Library (KCL) to pull messages from Kinesis. .. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing is enabled. Make sure that your checkpoint directory is secure. :param ssc: StreamingContext object :param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to update DynamoDB :param streamName: Kinesis stream name :param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com) :param regionName: Name of region used by the Kinesis Client Library (KCL) to update DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics) :param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the worker's initial starting position in the stream. The values are either the beginning of the stream per Kinesis' limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or the tip of the stream (InitialPositionInStream.LATEST). :param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis Spark Streaming documentation for more details on the different types of checkpoints. :param storageLevel: Storage level to use for storing the received objects (default is StorageLevel.MEMORY_AND_DISK_2) :param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use DefaultAWSCredentialsProviderChain) :param awsSecretKey: AWS SecretKey (default is None. If None, will use DefaultAWSCredentialsProviderChain) :param decoder: A function used to decode value (default is utf8_decoder) :param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from the Kinesis stream (default is None). :param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis stream, if STS is being used (default is None). :param stsExternalId: External ID that can be used to validate against the assumed IAM role's trust policy, if STS is being used (default is None). :return: A DStream object
[ "Create", "an", "input", "stream", "that", "pulls", "messages", "from", "a", "Kinesis", "stream", ".", "This", "uses", "the", "Kinesis", "Client", "Library", "(", "KCL", ")", "to", "pull", "messages", "from", "Kinesis", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/kinesis.py#L37-L98
apache/spark
dev/merge_spark_pr.py
choose_jira_assignee
def choose_jira_assignee(issue, asf_jira): """ Prompt the user to choose who to assign the issue to in jira, given a list of candidates, including the original reporter and all commentors """ while True: try: reporter = issue.fields.reporter commentors = map(lambda x: x.author, issue.fields.comment.comments) candidates = set(commentors) candidates.add(reporter) candidates = list(candidates) print("JIRA is unassigned, choose assignee") for idx, author in enumerate(candidates): if author.key == "apachespark": continue annotations = ["Reporter"] if author == reporter else [] if author in commentors: annotations.append("Commentor") print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations))) raw_assignee = input( "Enter number of user, or userid, to assign to (blank to leave unassigned):") if raw_assignee == "": return None else: try: id = int(raw_assignee) assignee = candidates[id] except: # assume it's a user id, and try to assign (might fail, we just prompt again) assignee = asf_jira.user(raw_assignee) asf_jira.assign_issue(issue.key, assignee.key) return assignee except KeyboardInterrupt: raise except: traceback.print_exc() print("Error assigning JIRA, try again (or leave blank and fix manually)")
python
def choose_jira_assignee(issue, asf_jira): """ Prompt the user to choose who to assign the issue to in jira, given a list of candidates, including the original reporter and all commentors """ while True: try: reporter = issue.fields.reporter commentors = map(lambda x: x.author, issue.fields.comment.comments) candidates = set(commentors) candidates.add(reporter) candidates = list(candidates) print("JIRA is unassigned, choose assignee") for idx, author in enumerate(candidates): if author.key == "apachespark": continue annotations = ["Reporter"] if author == reporter else [] if author in commentors: annotations.append("Commentor") print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations))) raw_assignee = input( "Enter number of user, or userid, to assign to (blank to leave unassigned):") if raw_assignee == "": return None else: try: id = int(raw_assignee) assignee = candidates[id] except: # assume it's a user id, and try to assign (might fail, we just prompt again) assignee = asf_jira.user(raw_assignee) asf_jira.assign_issue(issue.key, assignee.key) return assignee except KeyboardInterrupt: raise except: traceback.print_exc() print("Error assigning JIRA, try again (or leave blank and fix manually)")
[ "def", "choose_jira_assignee", "(", "issue", ",", "asf_jira", ")", ":", "while", "True", ":", "try", ":", "reporter", "=", "issue", ".", "fields", ".", "reporter", "commentors", "=", "map", "(", "lambda", "x", ":", "x", ".", "author", ",", "issue", ".", "fields", ".", "comment", ".", "comments", ")", "candidates", "=", "set", "(", "commentors", ")", "candidates", ".", "add", "(", "reporter", ")", "candidates", "=", "list", "(", "candidates", ")", "print", "(", "\"JIRA is unassigned, choose assignee\"", ")", "for", "idx", ",", "author", "in", "enumerate", "(", "candidates", ")", ":", "if", "author", ".", "key", "==", "\"apachespark\"", ":", "continue", "annotations", "=", "[", "\"Reporter\"", "]", "if", "author", "==", "reporter", "else", "[", "]", "if", "author", "in", "commentors", ":", "annotations", ".", "append", "(", "\"Commentor\"", ")", "print", "(", "\"[%d] %s (%s)\"", "%", "(", "idx", ",", "author", ".", "displayName", ",", "\",\"", ".", "join", "(", "annotations", ")", ")", ")", "raw_assignee", "=", "input", "(", "\"Enter number of user, or userid, to assign to (blank to leave unassigned):\"", ")", "if", "raw_assignee", "==", "\"\"", ":", "return", "None", "else", ":", "try", ":", "id", "=", "int", "(", "raw_assignee", ")", "assignee", "=", "candidates", "[", "id", "]", "except", ":", "# assume it's a user id, and try to assign (might fail, we just prompt again)", "assignee", "=", "asf_jira", ".", "user", "(", "raw_assignee", ")", "asf_jira", ".", "assign_issue", "(", "issue", ".", "key", ",", "assignee", ".", "key", ")", "return", "assignee", "except", "KeyboardInterrupt", ":", "raise", "except", ":", "traceback", ".", "print_exc", "(", ")", "print", "(", "\"Error assigning JIRA, try again (or leave blank and fix manually)\"", ")" ]
Prompt the user to choose who to assign the issue to in jira, given a list of candidates, including the original reporter and all commentors
[ "Prompt", "the", "user", "to", "choose", "who", "to", "assign", "the", "issue", "to", "in", "jira", "given", "a", "list", "of", "candidates", "including", "the", "original", "reporter", "and", "all", "commentors" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/dev/merge_spark_pr.py#L325-L362
apache/spark
dev/merge_spark_pr.py
standardize_jira_ref
def standardize_jira_ref(text): """ Standardize the [SPARK-XXXXX] [MODULE] prefix Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX][MLLIB] Issue" >>> standardize_jira_ref( ... "[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful") '[SPARK-5821][SQL] ParquetRelation2 CTAS should check if delete is successful' >>> standardize_jira_ref( ... "[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests") '[SPARK-4123][PROJECT INFRA][WIP] Show new dependencies added in pull requests' >>> standardize_jira_ref("[MLlib] Spark 5954: Top by key") '[SPARK-5954][MLLIB] Top by key' >>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl") '[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl' >>> standardize_jira_ref( ... "SPARK-1094 Support MiMa for reporting binary compatibility across versions.") '[SPARK-1094] Support MiMa for reporting binary compatibility across versions.' >>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark") '[SPARK-1146][WIP] Vagrant support for Spark' >>> standardize_jira_ref( ... "SPARK-1032. If Yarn app fails before registering, app master stays aroun...") '[SPARK-1032] If Yarn app fails before registering, app master stays aroun...' >>> standardize_jira_ref( ... "[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.") '[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.' >>> standardize_jira_ref("Additional information for users building from source code") 'Additional information for users building from source code' """ jira_refs = [] components = [] # If the string is compliant, no need to process any further if (re.search(r'^\[SPARK-[0-9]{3,6}\](\[[A-Z0-9_\s,]+\] )+\S+', text)): return text # Extract JIRA ref(s): pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE) for ref in pattern.findall(text): # Add brackets, replace spaces with a dash, & convert to uppercase jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']') text = text.replace(ref, '') # Extract spark component(s): # Look for alphanumeric chars, spaces, dashes, periods, and/or commas pattern = re.compile(r'(\[[\w\s,.-]+\])', re.IGNORECASE) for component in pattern.findall(text): components.append(component.upper()) text = text.replace(component, '') # Cleanup any remaining symbols: pattern = re.compile(r'^\W+(.*)', re.IGNORECASE) if (pattern.search(text) is not None): text = pattern.search(text).groups()[0] # Assemble full text (JIRA ref(s), module(s), remaining text) clean_text = ''.join(jira_refs).strip() + ''.join(components).strip() + " " + text.strip() # Replace multiple spaces with a single space, e.g. if no jira refs and/or components were # included clean_text = re.sub(r'\s+', ' ', clean_text.strip()) return clean_text
python
def standardize_jira_ref(text): """ Standardize the [SPARK-XXXXX] [MODULE] prefix Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX][MLLIB] Issue" >>> standardize_jira_ref( ... "[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful") '[SPARK-5821][SQL] ParquetRelation2 CTAS should check if delete is successful' >>> standardize_jira_ref( ... "[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests") '[SPARK-4123][PROJECT INFRA][WIP] Show new dependencies added in pull requests' >>> standardize_jira_ref("[MLlib] Spark 5954: Top by key") '[SPARK-5954][MLLIB] Top by key' >>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl") '[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl' >>> standardize_jira_ref( ... "SPARK-1094 Support MiMa for reporting binary compatibility across versions.") '[SPARK-1094] Support MiMa for reporting binary compatibility across versions.' >>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark") '[SPARK-1146][WIP] Vagrant support for Spark' >>> standardize_jira_ref( ... "SPARK-1032. If Yarn app fails before registering, app master stays aroun...") '[SPARK-1032] If Yarn app fails before registering, app master stays aroun...' >>> standardize_jira_ref( ... "[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.") '[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.' >>> standardize_jira_ref("Additional information for users building from source code") 'Additional information for users building from source code' """ jira_refs = [] components = [] # If the string is compliant, no need to process any further if (re.search(r'^\[SPARK-[0-9]{3,6}\](\[[A-Z0-9_\s,]+\] )+\S+', text)): return text # Extract JIRA ref(s): pattern = re.compile(r'(SPARK[-\s]*[0-9]{3,6})+', re.IGNORECASE) for ref in pattern.findall(text): # Add brackets, replace spaces with a dash, & convert to uppercase jira_refs.append('[' + re.sub(r'\s+', '-', ref.upper()) + ']') text = text.replace(ref, '') # Extract spark component(s): # Look for alphanumeric chars, spaces, dashes, periods, and/or commas pattern = re.compile(r'(\[[\w\s,.-]+\])', re.IGNORECASE) for component in pattern.findall(text): components.append(component.upper()) text = text.replace(component, '') # Cleanup any remaining symbols: pattern = re.compile(r'^\W+(.*)', re.IGNORECASE) if (pattern.search(text) is not None): text = pattern.search(text).groups()[0] # Assemble full text (JIRA ref(s), module(s), remaining text) clean_text = ''.join(jira_refs).strip() + ''.join(components).strip() + " " + text.strip() # Replace multiple spaces with a single space, e.g. if no jira refs and/or components were # included clean_text = re.sub(r'\s+', ' ', clean_text.strip()) return clean_text
[ "def", "standardize_jira_ref", "(", "text", ")", ":", "jira_refs", "=", "[", "]", "components", "=", "[", "]", "# If the string is compliant, no need to process any further", "if", "(", "re", ".", "search", "(", "r'^\\[SPARK-[0-9]{3,6}\\](\\[[A-Z0-9_\\s,]+\\] )+\\S+'", ",", "text", ")", ")", ":", "return", "text", "# Extract JIRA ref(s):", "pattern", "=", "re", ".", "compile", "(", "r'(SPARK[-\\s]*[0-9]{3,6})+'", ",", "re", ".", "IGNORECASE", ")", "for", "ref", "in", "pattern", ".", "findall", "(", "text", ")", ":", "# Add brackets, replace spaces with a dash, & convert to uppercase", "jira_refs", ".", "append", "(", "'['", "+", "re", ".", "sub", "(", "r'\\s+'", ",", "'-'", ",", "ref", ".", "upper", "(", ")", ")", "+", "']'", ")", "text", "=", "text", ".", "replace", "(", "ref", ",", "''", ")", "# Extract spark component(s):", "# Look for alphanumeric chars, spaces, dashes, periods, and/or commas", "pattern", "=", "re", ".", "compile", "(", "r'(\\[[\\w\\s,.-]+\\])'", ",", "re", ".", "IGNORECASE", ")", "for", "component", "in", "pattern", ".", "findall", "(", "text", ")", ":", "components", ".", "append", "(", "component", ".", "upper", "(", ")", ")", "text", "=", "text", ".", "replace", "(", "component", ",", "''", ")", "# Cleanup any remaining symbols:", "pattern", "=", "re", ".", "compile", "(", "r'^\\W+(.*)'", ",", "re", ".", "IGNORECASE", ")", "if", "(", "pattern", ".", "search", "(", "text", ")", "is", "not", "None", ")", ":", "text", "=", "pattern", ".", "search", "(", "text", ")", ".", "groups", "(", ")", "[", "0", "]", "# Assemble full text (JIRA ref(s), module(s), remaining text)", "clean_text", "=", "''", ".", "join", "(", "jira_refs", ")", ".", "strip", "(", ")", "+", "''", ".", "join", "(", "components", ")", ".", "strip", "(", ")", "+", "\" \"", "+", "text", ".", "strip", "(", ")", "# Replace multiple spaces with a single space, e.g. if no jira refs and/or components were", "# included", "clean_text", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "clean_text", ".", "strip", "(", ")", ")", "return", "clean_text" ]
Standardize the [SPARK-XXXXX] [MODULE] prefix Converts "[SPARK-XXX][mllib] Issue", "[MLLib] SPARK-XXX. Issue" or "SPARK XXX [MLLIB]: Issue" to "[SPARK-XXX][MLLIB] Issue" >>> standardize_jira_ref( ... "[SPARK-5821] [SQL] ParquetRelation2 CTAS should check if delete is successful") '[SPARK-5821][SQL] ParquetRelation2 CTAS should check if delete is successful' >>> standardize_jira_ref( ... "[SPARK-4123][Project Infra][WIP]: Show new dependencies added in pull requests") '[SPARK-4123][PROJECT INFRA][WIP] Show new dependencies added in pull requests' >>> standardize_jira_ref("[MLlib] Spark 5954: Top by key") '[SPARK-5954][MLLIB] Top by key' >>> standardize_jira_ref("[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl") '[SPARK-979] a LRU scheduler for load balancing in TaskSchedulerImpl' >>> standardize_jira_ref( ... "SPARK-1094 Support MiMa for reporting binary compatibility across versions.") '[SPARK-1094] Support MiMa for reporting binary compatibility across versions.' >>> standardize_jira_ref("[WIP] [SPARK-1146] Vagrant support for Spark") '[SPARK-1146][WIP] Vagrant support for Spark' >>> standardize_jira_ref( ... "SPARK-1032. If Yarn app fails before registering, app master stays aroun...") '[SPARK-1032] If Yarn app fails before registering, app master stays aroun...' >>> standardize_jira_ref( ... "[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.") '[SPARK-6250][SPARK-6146][SPARK-5911][SQL] Types are now reserved words in DDL parser.' >>> standardize_jira_ref("Additional information for users building from source code") 'Additional information for users building from source code'
[ "Standardize", "the", "[", "SPARK", "-", "XXXXX", "]", "[", "MODULE", "]", "prefix", "Converts", "[", "SPARK", "-", "XXX", "]", "[", "mllib", "]", "Issue", "[", "MLLib", "]", "SPARK", "-", "XXX", ".", "Issue", "or", "SPARK", "XXX", "[", "MLLIB", "]", ":", "Issue", "to", "[", "SPARK", "-", "XXX", "]", "[", "MLLIB", "]", "Issue" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/dev/merge_spark_pr.py#L374-L437
apache/spark
python/pyspark/mllib/util.py
MLUtils._parse_libsvm_line
def _parse_libsvm_line(line): """ Parses a line in LIBSVM format into (label, indices, values). """ items = line.split(None) label = float(items[0]) nnz = len(items) - 1 indices = np.zeros(nnz, dtype=np.int32) values = np.zeros(nnz) for i in xrange(nnz): index, value = items[1 + i].split(":") indices[i] = int(index) - 1 values[i] = float(value) return label, indices, values
python
def _parse_libsvm_line(line): """ Parses a line in LIBSVM format into (label, indices, values). """ items = line.split(None) label = float(items[0]) nnz = len(items) - 1 indices = np.zeros(nnz, dtype=np.int32) values = np.zeros(nnz) for i in xrange(nnz): index, value = items[1 + i].split(":") indices[i] = int(index) - 1 values[i] = float(value) return label, indices, values
[ "def", "_parse_libsvm_line", "(", "line", ")", ":", "items", "=", "line", ".", "split", "(", "None", ")", "label", "=", "float", "(", "items", "[", "0", "]", ")", "nnz", "=", "len", "(", "items", ")", "-", "1", "indices", "=", "np", ".", "zeros", "(", "nnz", ",", "dtype", "=", "np", ".", "int32", ")", "values", "=", "np", ".", "zeros", "(", "nnz", ")", "for", "i", "in", "xrange", "(", "nnz", ")", ":", "index", ",", "value", "=", "items", "[", "1", "+", "i", "]", ".", "split", "(", "\":\"", ")", "indices", "[", "i", "]", "=", "int", "(", "index", ")", "-", "1", "values", "[", "i", "]", "=", "float", "(", "value", ")", "return", "label", ",", "indices", ",", "values" ]
Parses a line in LIBSVM format into (label, indices, values).
[ "Parses", "a", "line", "in", "LIBSVM", "format", "into", "(", "label", "indices", "values", ")", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L40-L53
apache/spark
python/pyspark/mllib/util.py
MLUtils._convert_labeled_point_to_libsvm
def _convert_labeled_point_to_libsvm(p): """Converts a LabeledPoint to a string in LIBSVM format.""" from pyspark.mllib.regression import LabeledPoint assert isinstance(p, LabeledPoint) items = [str(p.label)] v = _convert_to_vector(p.features) if isinstance(v, SparseVector): nnz = len(v.indices) for i in xrange(nnz): items.append(str(v.indices[i] + 1) + ":" + str(v.values[i])) else: for i in xrange(len(v)): items.append(str(i + 1) + ":" + str(v[i])) return " ".join(items)
python
def _convert_labeled_point_to_libsvm(p): """Converts a LabeledPoint to a string in LIBSVM format.""" from pyspark.mllib.regression import LabeledPoint assert isinstance(p, LabeledPoint) items = [str(p.label)] v = _convert_to_vector(p.features) if isinstance(v, SparseVector): nnz = len(v.indices) for i in xrange(nnz): items.append(str(v.indices[i] + 1) + ":" + str(v.values[i])) else: for i in xrange(len(v)): items.append(str(i + 1) + ":" + str(v[i])) return " ".join(items)
[ "def", "_convert_labeled_point_to_libsvm", "(", "p", ")", ":", "from", "pyspark", ".", "mllib", ".", "regression", "import", "LabeledPoint", "assert", "isinstance", "(", "p", ",", "LabeledPoint", ")", "items", "=", "[", "str", "(", "p", ".", "label", ")", "]", "v", "=", "_convert_to_vector", "(", "p", ".", "features", ")", "if", "isinstance", "(", "v", ",", "SparseVector", ")", ":", "nnz", "=", "len", "(", "v", ".", "indices", ")", "for", "i", "in", "xrange", "(", "nnz", ")", ":", "items", ".", "append", "(", "str", "(", "v", ".", "indices", "[", "i", "]", "+", "1", ")", "+", "\":\"", "+", "str", "(", "v", ".", "values", "[", "i", "]", ")", ")", "else", ":", "for", "i", "in", "xrange", "(", "len", "(", "v", ")", ")", ":", "items", ".", "append", "(", "str", "(", "i", "+", "1", ")", "+", "\":\"", "+", "str", "(", "v", "[", "i", "]", ")", ")", "return", "\" \"", ".", "join", "(", "items", ")" ]
Converts a LabeledPoint to a string in LIBSVM format.
[ "Converts", "a", "LabeledPoint", "to", "a", "string", "in", "LIBSVM", "format", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L56-L69
apache/spark
python/pyspark/mllib/util.py
MLUtils.loadLibSVMFile
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None): """ Loads labeled data in the LIBSVM format into an RDD of LabeledPoint. The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR. Each line represents a labeled sparse feature vector using the following format: label index1:value1 index2:value2 ... where the indices are one-based and in ascending order. This method parses each line into a LabeledPoint, where the feature indices are converted to zero-based. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param numFeatures: number of features, which will be determined from the input data if a nonpositive value is given. This is useful when the dataset is already split into multiple files and you want to load them separately, because some features may not present in certain files, which leads to inconsistent feature dimensions. :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> tempFile = NamedTemporaryFile(delete=True) >>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0") >>> tempFile.flush() >>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect() >>> tempFile.close() >>> examples[0] LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0])) >>> examples[1] LabeledPoint(-1.0, (6,[],[])) >>> examples[2] LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0])) """ from pyspark.mllib.regression import LabeledPoint lines = sc.textFile(path, minPartitions) parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l)) if numFeatures <= 0: parsed.cache() numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1 return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
python
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None): """ Loads labeled data in the LIBSVM format into an RDD of LabeledPoint. The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR. Each line represents a labeled sparse feature vector using the following format: label index1:value1 index2:value2 ... where the indices are one-based and in ascending order. This method parses each line into a LabeledPoint, where the feature indices are converted to zero-based. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param numFeatures: number of features, which will be determined from the input data if a nonpositive value is given. This is useful when the dataset is already split into multiple files and you want to load them separately, because some features may not present in certain files, which leads to inconsistent feature dimensions. :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> tempFile = NamedTemporaryFile(delete=True) >>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0") >>> tempFile.flush() >>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect() >>> tempFile.close() >>> examples[0] LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0])) >>> examples[1] LabeledPoint(-1.0, (6,[],[])) >>> examples[2] LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0])) """ from pyspark.mllib.regression import LabeledPoint lines = sc.textFile(path, minPartitions) parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l)) if numFeatures <= 0: parsed.cache() numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1 return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
[ "def", "loadLibSVMFile", "(", "sc", ",", "path", ",", "numFeatures", "=", "-", "1", ",", "minPartitions", "=", "None", ")", ":", "from", "pyspark", ".", "mllib", ".", "regression", "import", "LabeledPoint", "lines", "=", "sc", ".", "textFile", "(", "path", ",", "minPartitions", ")", "parsed", "=", "lines", ".", "map", "(", "lambda", "l", ":", "MLUtils", ".", "_parse_libsvm_line", "(", "l", ")", ")", "if", "numFeatures", "<=", "0", ":", "parsed", ".", "cache", "(", ")", "numFeatures", "=", "parsed", ".", "map", "(", "lambda", "x", ":", "-", "1", "if", "x", "[", "1", "]", ".", "size", "==", "0", "else", "x", "[", "1", "]", "[", "-", "1", "]", ")", ".", "reduce", "(", "max", ")", "+", "1", "return", "parsed", ".", "map", "(", "lambda", "x", ":", "LabeledPoint", "(", "x", "[", "0", "]", ",", "Vectors", ".", "sparse", "(", "numFeatures", ",", "x", "[", "1", "]", ",", "x", "[", "2", "]", ")", ")", ")" ]
Loads labeled data in the LIBSVM format into an RDD of LabeledPoint. The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR. Each line represents a labeled sparse feature vector using the following format: label index1:value1 index2:value2 ... where the indices are one-based and in ascending order. This method parses each line into a LabeledPoint, where the feature indices are converted to zero-based. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param numFeatures: number of features, which will be determined from the input data if a nonpositive value is given. This is useful when the dataset is already split into multiple files and you want to load them separately, because some features may not present in certain files, which leads to inconsistent feature dimensions. :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> tempFile = NamedTemporaryFile(delete=True) >>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0") >>> tempFile.flush() >>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect() >>> tempFile.close() >>> examples[0] LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0])) >>> examples[1] LabeledPoint(-1.0, (6,[],[])) >>> examples[2] LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
[ "Loads", "labeled", "data", "in", "the", "LIBSVM", "format", "into", "an", "RDD", "of", "LabeledPoint", ".", "The", "LIBSVM", "format", "is", "a", "text", "-", "based", "format", "used", "by", "LIBSVM", "and", "LIBLINEAR", ".", "Each", "line", "represents", "a", "labeled", "sparse", "feature", "vector", "using", "the", "following", "format", ":" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L73-L122
apache/spark
python/pyspark/mllib/util.py
MLUtils.saveAsLibSVMFile
def saveAsLibSVMFile(data, dir): """ Save labeled data in LIBSVM format. :param data: an RDD of LabeledPoint to be saved :param dir: directory to save the data >>> from tempfile import NamedTemporaryFile >>> from fileinput import input >>> from pyspark.mllib.regression import LabeledPoint >>> from glob import glob >>> from pyspark.mllib.util import MLUtils >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])), ... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))] >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name) >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n' """ lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p)) lines.saveAsTextFile(dir)
python
def saveAsLibSVMFile(data, dir): """ Save labeled data in LIBSVM format. :param data: an RDD of LabeledPoint to be saved :param dir: directory to save the data >>> from tempfile import NamedTemporaryFile >>> from fileinput import input >>> from pyspark.mllib.regression import LabeledPoint >>> from glob import glob >>> from pyspark.mllib.util import MLUtils >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])), ... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))] >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name) >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n' """ lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p)) lines.saveAsTextFile(dir)
[ "def", "saveAsLibSVMFile", "(", "data", ",", "dir", ")", ":", "lines", "=", "data", ".", "map", "(", "lambda", "p", ":", "MLUtils", ".", "_convert_labeled_point_to_libsvm", "(", "p", ")", ")", "lines", ".", "saveAsTextFile", "(", "dir", ")" ]
Save labeled data in LIBSVM format. :param data: an RDD of LabeledPoint to be saved :param dir: directory to save the data >>> from tempfile import NamedTemporaryFile >>> from fileinput import input >>> from pyspark.mllib.regression import LabeledPoint >>> from glob import glob >>> from pyspark.mllib.util import MLUtils >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])), ... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))] >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name) >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
[ "Save", "labeled", "data", "in", "LIBSVM", "format", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L126-L147
apache/spark
python/pyspark/mllib/util.py
MLUtils.loadLabeledPoints
def loadLabeledPoints(sc, path, minPartitions=None): """ Load labeled points saved using RDD.saveAsTextFile. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])), ... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))] >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name) >>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect() [LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])] """ minPartitions = minPartitions or min(sc.defaultParallelism, 2) return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
python
def loadLabeledPoints(sc, path, minPartitions=None): """ Load labeled points saved using RDD.saveAsTextFile. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])), ... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))] >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name) >>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect() [LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])] """ minPartitions = minPartitions or min(sc.defaultParallelism, 2) return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
[ "def", "loadLabeledPoints", "(", "sc", ",", "path", ",", "minPartitions", "=", "None", ")", ":", "minPartitions", "=", "minPartitions", "or", "min", "(", "sc", ".", "defaultParallelism", ",", "2", ")", "return", "callMLlibFunc", "(", "\"loadLabeledPoints\"", ",", "sc", ",", "path", ",", "minPartitions", ")" ]
Load labeled points saved using RDD.saveAsTextFile. :param sc: Spark context :param path: file or directory path in any Hadoop-supported file system URI :param minPartitions: min number of partitions @return: labeled data stored as an RDD of LabeledPoint >>> from tempfile import NamedTemporaryFile >>> from pyspark.mllib.util import MLUtils >>> from pyspark.mllib.regression import LabeledPoint >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])), ... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))] >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name) >>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect() [LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
[ "Load", "labeled", "points", "saved", "using", "RDD", ".", "saveAsTextFile", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L151-L173
apache/spark
python/pyspark/mllib/util.py
MLUtils.appendBias
def appendBias(data): """ Returns a new vector with `1.0` (bias) appended to the end of the input vector. """ vec = _convert_to_vector(data) if isinstance(vec, SparseVector): newIndices = np.append(vec.indices, len(vec)) newValues = np.append(vec.values, 1.0) return SparseVector(len(vec) + 1, newIndices, newValues) else: return _convert_to_vector(np.append(vec.toArray(), 1.0))
python
def appendBias(data): """ Returns a new vector with `1.0` (bias) appended to the end of the input vector. """ vec = _convert_to_vector(data) if isinstance(vec, SparseVector): newIndices = np.append(vec.indices, len(vec)) newValues = np.append(vec.values, 1.0) return SparseVector(len(vec) + 1, newIndices, newValues) else: return _convert_to_vector(np.append(vec.toArray(), 1.0))
[ "def", "appendBias", "(", "data", ")", ":", "vec", "=", "_convert_to_vector", "(", "data", ")", "if", "isinstance", "(", "vec", ",", "SparseVector", ")", ":", "newIndices", "=", "np", ".", "append", "(", "vec", ".", "indices", ",", "len", "(", "vec", ")", ")", "newValues", "=", "np", ".", "append", "(", "vec", ".", "values", ",", "1.0", ")", "return", "SparseVector", "(", "len", "(", "vec", ")", "+", "1", ",", "newIndices", ",", "newValues", ")", "else", ":", "return", "_convert_to_vector", "(", "np", ".", "append", "(", "vec", ".", "toArray", "(", ")", ",", "1.0", ")", ")" ]
Returns a new vector with `1.0` (bias) appended to the end of the input vector.
[ "Returns", "a", "new", "vector", "with", "1", ".", "0", "(", "bias", ")", "appended", "to", "the", "end", "of", "the", "input", "vector", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L177-L188
apache/spark
python/pyspark/mllib/util.py
MLUtils.convertVectorColumnsToML
def convertVectorColumnsToML(dataset, *cols): """ Converts vector columns in an input DataFrame from the :py:class:`pyspark.mllib.linalg.Vector` type to the new :py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml` package. :param dataset: input dataset :param cols: a list of vector columns to be converted. New vector columns will be ignored. If unspecified, all old vector columns will be converted excepted nested ones. :return: the input dataset with old vector columns converted to the new vector type >>> import pyspark >>> from pyspark.mllib.linalg import Vectors >>> from pyspark.mllib.util import MLUtils >>> df = spark.createDataFrame( ... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))], ... ["id", "x", "y"]) >>> r1 = MLUtils.convertVectorColumnsToML(df).first() >>> isinstance(r1.x, pyspark.ml.linalg.SparseVector) True >>> isinstance(r1.y, pyspark.ml.linalg.DenseVector) True >>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first() >>> isinstance(r2.x, pyspark.ml.linalg.SparseVector) True >>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector) True """ if not isinstance(dataset, DataFrame): raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset))) return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
python
def convertVectorColumnsToML(dataset, *cols): """ Converts vector columns in an input DataFrame from the :py:class:`pyspark.mllib.linalg.Vector` type to the new :py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml` package. :param dataset: input dataset :param cols: a list of vector columns to be converted. New vector columns will be ignored. If unspecified, all old vector columns will be converted excepted nested ones. :return: the input dataset with old vector columns converted to the new vector type >>> import pyspark >>> from pyspark.mllib.linalg import Vectors >>> from pyspark.mllib.util import MLUtils >>> df = spark.createDataFrame( ... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))], ... ["id", "x", "y"]) >>> r1 = MLUtils.convertVectorColumnsToML(df).first() >>> isinstance(r1.x, pyspark.ml.linalg.SparseVector) True >>> isinstance(r1.y, pyspark.ml.linalg.DenseVector) True >>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first() >>> isinstance(r2.x, pyspark.ml.linalg.SparseVector) True >>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector) True """ if not isinstance(dataset, DataFrame): raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset))) return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
[ "def", "convertVectorColumnsToML", "(", "dataset", ",", "*", "cols", ")", ":", "if", "not", "isinstance", "(", "dataset", ",", "DataFrame", ")", ":", "raise", "TypeError", "(", "\"Input dataset must be a DataFrame but got {}.\"", ".", "format", "(", "type", "(", "dataset", ")", ")", ")", "return", "callMLlibFunc", "(", "\"convertVectorColumnsToML\"", ",", "dataset", ",", "list", "(", "cols", ")", ")" ]
Converts vector columns in an input DataFrame from the :py:class:`pyspark.mllib.linalg.Vector` type to the new :py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml` package. :param dataset: input dataset :param cols: a list of vector columns to be converted. New vector columns will be ignored. If unspecified, all old vector columns will be converted excepted nested ones. :return: the input dataset with old vector columns converted to the new vector type >>> import pyspark >>> from pyspark.mllib.linalg import Vectors >>> from pyspark.mllib.util import MLUtils >>> df = spark.createDataFrame( ... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))], ... ["id", "x", "y"]) >>> r1 = MLUtils.convertVectorColumnsToML(df).first() >>> isinstance(r1.x, pyspark.ml.linalg.SparseVector) True >>> isinstance(r1.y, pyspark.ml.linalg.DenseVector) True >>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first() >>> isinstance(r2.x, pyspark.ml.linalg.SparseVector) True >>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector) True
[ "Converts", "vector", "columns", "in", "an", "input", "DataFrame", "from", "the", ":", "py", ":", "class", ":", "pyspark", ".", "mllib", ".", "linalg", ".", "Vector", "type", "to", "the", "new", ":", "py", ":", "class", ":", "pyspark", ".", "ml", ".", "linalg", ".", "Vector", "type", "under", "the", "spark", ".", "ml", "package", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L201-L237
apache/spark
python/pyspark/mllib/util.py
LinearDataGenerator.generateLinearInput
def generateLinearInput(intercept, weights, xMean, xVariance, nPoints, seed, eps): """ :param: intercept bias factor, the term c in X'w + c :param: weights feature vector, the term w in X'w + c :param: xMean Point around which the data X is centered. :param: xVariance Variance of the given data :param: nPoints Number of points to be generated :param: seed Random Seed :param: eps Used to scale the noise. If eps is set high, the amount of gaussian noise added is more. Returns a list of LabeledPoints of length nPoints """ weights = [float(weight) for weight in weights] xMean = [float(mean) for mean in xMean] xVariance = [float(var) for var in xVariance] return list(callMLlibFunc( "generateLinearInputWrapper", float(intercept), weights, xMean, xVariance, int(nPoints), int(seed), float(eps)))
python
def generateLinearInput(intercept, weights, xMean, xVariance, nPoints, seed, eps): """ :param: intercept bias factor, the term c in X'w + c :param: weights feature vector, the term w in X'w + c :param: xMean Point around which the data X is centered. :param: xVariance Variance of the given data :param: nPoints Number of points to be generated :param: seed Random Seed :param: eps Used to scale the noise. If eps is set high, the amount of gaussian noise added is more. Returns a list of LabeledPoints of length nPoints """ weights = [float(weight) for weight in weights] xMean = [float(mean) for mean in xMean] xVariance = [float(var) for var in xVariance] return list(callMLlibFunc( "generateLinearInputWrapper", float(intercept), weights, xMean, xVariance, int(nPoints), int(seed), float(eps)))
[ "def", "generateLinearInput", "(", "intercept", ",", "weights", ",", "xMean", ",", "xVariance", ",", "nPoints", ",", "seed", ",", "eps", ")", ":", "weights", "=", "[", "float", "(", "weight", ")", "for", "weight", "in", "weights", "]", "xMean", "=", "[", "float", "(", "mean", ")", "for", "mean", "in", "xMean", "]", "xVariance", "=", "[", "float", "(", "var", ")", "for", "var", "in", "xVariance", "]", "return", "list", "(", "callMLlibFunc", "(", "\"generateLinearInputWrapper\"", ",", "float", "(", "intercept", ")", ",", "weights", ",", "xMean", ",", "xVariance", ",", "int", "(", "nPoints", ")", ",", "int", "(", "seed", ")", ",", "float", "(", "eps", ")", ")", ")" ]
:param: intercept bias factor, the term c in X'w + c :param: weights feature vector, the term w in X'w + c :param: xMean Point around which the data X is centered. :param: xVariance Variance of the given data :param: nPoints Number of points to be generated :param: seed Random Seed :param: eps Used to scale the noise. If eps is set high, the amount of gaussian noise added is more. Returns a list of LabeledPoints of length nPoints
[ ":", "param", ":", "intercept", "bias", "factor", "the", "term", "c", "in", "X", "w", "+", "c", ":", "param", ":", "weights", "feature", "vector", "the", "term", "w", "in", "X", "w", "+", "c", ":", "param", ":", "xMean", "Point", "around", "which", "the", "data", "X", "is", "centered", ".", ":", "param", ":", "xVariance", "Variance", "of", "the", "given", "data", ":", "param", ":", "nPoints", "Number", "of", "points", "to", "be", "generated", ":", "param", ":", "seed", "Random", "Seed", ":", "param", ":", "eps", "Used", "to", "scale", "the", "noise", ".", "If", "eps", "is", "set", "high", "the", "amount", "of", "gaussian", "noise", "added", "is", "more", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L471-L490
apache/spark
python/pyspark/mllib/util.py
LinearDataGenerator.generateLinearRDD
def generateLinearRDD(sc, nexamples, nfeatures, eps, nParts=2, intercept=0.0): """ Generate an RDD of LabeledPoints. """ return callMLlibFunc( "generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures), float(eps), int(nParts), float(intercept))
python
def generateLinearRDD(sc, nexamples, nfeatures, eps, nParts=2, intercept=0.0): """ Generate an RDD of LabeledPoints. """ return callMLlibFunc( "generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures), float(eps), int(nParts), float(intercept))
[ "def", "generateLinearRDD", "(", "sc", ",", "nexamples", ",", "nfeatures", ",", "eps", ",", "nParts", "=", "2", ",", "intercept", "=", "0.0", ")", ":", "return", "callMLlibFunc", "(", "\"generateLinearRDDWrapper\"", ",", "sc", ",", "int", "(", "nexamples", ")", ",", "int", "(", "nfeatures", ")", ",", "float", "(", "eps", ")", ",", "int", "(", "nParts", ")", ",", "float", "(", "intercept", ")", ")" ]
Generate an RDD of LabeledPoints.
[ "Generate", "an", "RDD", "of", "LabeledPoints", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L494-L501
apache/spark
python/pyspark/mllib/regression.py
LinearRegressionWithSGD.train
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, initialWeights=None, regParam=0.0, regType=None, intercept=False, validateData=True, convergenceTol=0.001): """ Train a linear regression model using Stochastic Gradient Descent (SGD). This solves the least squares regression formulation f(weights) = 1/(2n) ||A weights - y||^2 which is the mean squared error. Here the data matrix has n rows, and the input RDD holds the set of rows of A, each with its corresponding right hand side label y. See also the documentation for the precise formulation. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations. (default: 100) :param step: The step parameter used in SGD. (default: 1.0) :param miniBatchFraction: Fraction of data to be used for each SGD iteration. (default: 1.0) :param initialWeights: The initial weights. (default: None) :param regParam: The regularizer parameter. (default: 0.0) :param regType: The type of regularizer used for training our model. Supported values: - "l1" for using L1 regularization - "l2" for using L2 regularization - None for no regularization (default) :param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e., whether bias features are activated or not). (default: False) :param validateData: Boolean parameter which indicates if the algorithm should validate data before training. (default: True) :param convergenceTol: A condition which decides iteration termination. (default: 0.001) """ warnings.warn( "Deprecated in 2.0.0. Use ml.regression.LinearRegression.", DeprecationWarning) def train(rdd, i): return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations), float(step), float(miniBatchFraction), i, float(regParam), regType, bool(intercept), bool(validateData), float(convergenceTol)) return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights)
python
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, initialWeights=None, regParam=0.0, regType=None, intercept=False, validateData=True, convergenceTol=0.001): """ Train a linear regression model using Stochastic Gradient Descent (SGD). This solves the least squares regression formulation f(weights) = 1/(2n) ||A weights - y||^2 which is the mean squared error. Here the data matrix has n rows, and the input RDD holds the set of rows of A, each with its corresponding right hand side label y. See also the documentation for the precise formulation. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations. (default: 100) :param step: The step parameter used in SGD. (default: 1.0) :param miniBatchFraction: Fraction of data to be used for each SGD iteration. (default: 1.0) :param initialWeights: The initial weights. (default: None) :param regParam: The regularizer parameter. (default: 0.0) :param regType: The type of regularizer used for training our model. Supported values: - "l1" for using L1 regularization - "l2" for using L2 regularization - None for no regularization (default) :param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e., whether bias features are activated or not). (default: False) :param validateData: Boolean parameter which indicates if the algorithm should validate data before training. (default: True) :param convergenceTol: A condition which decides iteration termination. (default: 0.001) """ warnings.warn( "Deprecated in 2.0.0. Use ml.regression.LinearRegression.", DeprecationWarning) def train(rdd, i): return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations), float(step), float(miniBatchFraction), i, float(regParam), regType, bool(intercept), bool(validateData), float(convergenceTol)) return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights)
[ "def", "train", "(", "cls", ",", "data", ",", "iterations", "=", "100", ",", "step", "=", "1.0", ",", "miniBatchFraction", "=", "1.0", ",", "initialWeights", "=", "None", ",", "regParam", "=", "0.0", ",", "regType", "=", "None", ",", "intercept", "=", "False", ",", "validateData", "=", "True", ",", "convergenceTol", "=", "0.001", ")", ":", "warnings", ".", "warn", "(", "\"Deprecated in 2.0.0. Use ml.regression.LinearRegression.\"", ",", "DeprecationWarning", ")", "def", "train", "(", "rdd", ",", "i", ")", ":", "return", "callMLlibFunc", "(", "\"trainLinearRegressionModelWithSGD\"", ",", "rdd", ",", "int", "(", "iterations", ")", ",", "float", "(", "step", ")", ",", "float", "(", "miniBatchFraction", ")", ",", "i", ",", "float", "(", "regParam", ")", ",", "regType", ",", "bool", "(", "intercept", ")", ",", "bool", "(", "validateData", ")", ",", "float", "(", "convergenceTol", ")", ")", "return", "_regression_train_wrapper", "(", "train", ",", "LinearRegressionModel", ",", "data", ",", "initialWeights", ")" ]
Train a linear regression model using Stochastic Gradient Descent (SGD). This solves the least squares regression formulation f(weights) = 1/(2n) ||A weights - y||^2 which is the mean squared error. Here the data matrix has n rows, and the input RDD holds the set of rows of A, each with its corresponding right hand side label y. See also the documentation for the precise formulation. :param data: The training data, an RDD of LabeledPoint. :param iterations: The number of iterations. (default: 100) :param step: The step parameter used in SGD. (default: 1.0) :param miniBatchFraction: Fraction of data to be used for each SGD iteration. (default: 1.0) :param initialWeights: The initial weights. (default: None) :param regParam: The regularizer parameter. (default: 0.0) :param regType: The type of regularizer used for training our model. Supported values: - "l1" for using L1 regularization - "l2" for using L2 regularization - None for no regularization (default) :param intercept: Boolean parameter which indicates the use or not of the augmented representation for training data (i.e., whether bias features are activated or not). (default: False) :param validateData: Boolean parameter which indicates if the algorithm should validate data before training. (default: True) :param convergenceTol: A condition which decides iteration termination. (default: 0.001)
[ "Train", "a", "linear", "regression", "model", "using", "Stochastic", "Gradient", "Descent", "(", "SGD", ")", ".", "This", "solves", "the", "least", "squares", "regression", "formulation" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L230-L291
apache/spark
python/pyspark/mllib/regression.py
IsotonicRegressionModel.predict
def predict(self, x): """ Predict labels for provided features. Using a piecewise linear function. 1) If x exactly matches a boundary then associated prediction is returned. In case there are multiple predictions with the same boundary then one of them is returned. Which one is undefined (same as java.util.Arrays.binarySearch). 2) If x is lower or higher than all boundaries then first or last prediction is returned respectively. In case there are multiple predictions with the same boundary then the lowest or highest is returned respectively. 3) If x falls between two values in boundary array then prediction is treated as piecewise linear function and interpolated value is returned. In case there are multiple values with the same boundary then the same rules as in 2) are used. :param x: Feature or RDD of Features to be labeled. """ if isinstance(x, RDD): return x.map(lambda v: self.predict(v)) return np.interp(x, self.boundaries, self.predictions)
python
def predict(self, x): """ Predict labels for provided features. Using a piecewise linear function. 1) If x exactly matches a boundary then associated prediction is returned. In case there are multiple predictions with the same boundary then one of them is returned. Which one is undefined (same as java.util.Arrays.binarySearch). 2) If x is lower or higher than all boundaries then first or last prediction is returned respectively. In case there are multiple predictions with the same boundary then the lowest or highest is returned respectively. 3) If x falls between two values in boundary array then prediction is treated as piecewise linear function and interpolated value is returned. In case there are multiple values with the same boundary then the same rules as in 2) are used. :param x: Feature or RDD of Features to be labeled. """ if isinstance(x, RDD): return x.map(lambda v: self.predict(v)) return np.interp(x, self.boundaries, self.predictions)
[ "def", "predict", "(", "self", ",", "x", ")", ":", "if", "isinstance", "(", "x", ",", "RDD", ")", ":", "return", "x", ".", "map", "(", "lambda", "v", ":", "self", ".", "predict", "(", "v", ")", ")", "return", "np", ".", "interp", "(", "x", ",", "self", ".", "boundaries", ",", "self", ".", "predictions", ")" ]
Predict labels for provided features. Using a piecewise linear function. 1) If x exactly matches a boundary then associated prediction is returned. In case there are multiple predictions with the same boundary then one of them is returned. Which one is undefined (same as java.util.Arrays.binarySearch). 2) If x is lower or higher than all boundaries then first or last prediction is returned respectively. In case there are multiple predictions with the same boundary then the lowest or highest is returned respectively. 3) If x falls between two values in boundary array then prediction is treated as piecewise linear function and interpolated value is returned. In case there are multiple values with the same boundary then the same rules as in 2) are used. :param x: Feature or RDD of Features to be labeled.
[ "Predict", "labels", "for", "provided", "features", ".", "Using", "a", "piecewise", "linear", "function", ".", "1", ")", "If", "x", "exactly", "matches", "a", "boundary", "then", "associated", "prediction", "is", "returned", ".", "In", "case", "there", "are", "multiple", "predictions", "with", "the", "same", "boundary", "then", "one", "of", "them", "is", "returned", ".", "Which", "one", "is", "undefined", "(", "same", "as", "java", ".", "util", ".", "Arrays", ".", "binarySearch", ")", ".", "2", ")", "If", "x", "is", "lower", "or", "higher", "than", "all", "boundaries", "then", "first", "or", "last", "prediction", "is", "returned", "respectively", ".", "In", "case", "there", "are", "multiple", "predictions", "with", "the", "same", "boundary", "then", "the", "lowest", "or", "highest", "is", "returned", "respectively", ".", "3", ")", "If", "x", "falls", "between", "two", "values", "in", "boundary", "array", "then", "prediction", "is", "treated", "as", "piecewise", "linear", "function", "and", "interpolated", "value", "is", "returned", ".", "In", "case", "there", "are", "multiple", "values", "with", "the", "same", "boundary", "then", "the", "same", "rules", "as", "in", "2", ")", "are", "used", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L628-L651
apache/spark
python/pyspark/mllib/regression.py
IsotonicRegressionModel.save
def save(self, sc, path): """Save an IsotonicRegressionModel.""" java_boundaries = _py2java(sc, self.boundaries.tolist()) java_predictions = _py2java(sc, self.predictions.tolist()) java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel( java_boundaries, java_predictions, self.isotonic) java_model.save(sc._jsc.sc(), path)
python
def save(self, sc, path): """Save an IsotonicRegressionModel.""" java_boundaries = _py2java(sc, self.boundaries.tolist()) java_predictions = _py2java(sc, self.predictions.tolist()) java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel( java_boundaries, java_predictions, self.isotonic) java_model.save(sc._jsc.sc(), path)
[ "def", "save", "(", "self", ",", "sc", ",", "path", ")", ":", "java_boundaries", "=", "_py2java", "(", "sc", ",", "self", ".", "boundaries", ".", "tolist", "(", ")", ")", "java_predictions", "=", "_py2java", "(", "sc", ",", "self", ".", "predictions", ".", "tolist", "(", ")", ")", "java_model", "=", "sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "mllib", ".", "regression", ".", "IsotonicRegressionModel", "(", "java_boundaries", ",", "java_predictions", ",", "self", ".", "isotonic", ")", "java_model", ".", "save", "(", "sc", ".", "_jsc", ".", "sc", "(", ")", ",", "path", ")" ]
Save an IsotonicRegressionModel.
[ "Save", "an", "IsotonicRegressionModel", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L654-L660
apache/spark
python/pyspark/mllib/regression.py
IsotonicRegressionModel.load
def load(cls, sc, path): """Load an IsotonicRegressionModel.""" java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load( sc._jsc.sc(), path) py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray() py_predictions = _java2py(sc, java_model.predictionVector()).toArray() return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
python
def load(cls, sc, path): """Load an IsotonicRegressionModel.""" java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load( sc._jsc.sc(), path) py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray() py_predictions = _java2py(sc, java_model.predictionVector()).toArray() return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
[ "def", "load", "(", "cls", ",", "sc", ",", "path", ")", ":", "java_model", "=", "sc", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "mllib", ".", "regression", ".", "IsotonicRegressionModel", ".", "load", "(", "sc", ".", "_jsc", ".", "sc", "(", ")", ",", "path", ")", "py_boundaries", "=", "_java2py", "(", "sc", ",", "java_model", ".", "boundaryVector", "(", ")", ")", ".", "toArray", "(", ")", "py_predictions", "=", "_java2py", "(", "sc", ",", "java_model", ".", "predictionVector", "(", ")", ")", ".", "toArray", "(", ")", "return", "IsotonicRegressionModel", "(", "py_boundaries", ",", "py_predictions", ",", "java_model", ".", "isotonic", ")" ]
Load an IsotonicRegressionModel.
[ "Load", "an", "IsotonicRegressionModel", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L664-L670
apache/spark
python/pyspark/mllib/regression.py
IsotonicRegression.train
def train(cls, data, isotonic=True): """ Train an isotonic regression model on the given data. :param data: RDD of (label, feature, weight) tuples. :param isotonic: Whether this is isotonic (which is default) or antitonic. (default: True) """ boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel", data.map(_convert_to_vector), bool(isotonic)) return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic)
python
def train(cls, data, isotonic=True): """ Train an isotonic regression model on the given data. :param data: RDD of (label, feature, weight) tuples. :param isotonic: Whether this is isotonic (which is default) or antitonic. (default: True) """ boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel", data.map(_convert_to_vector), bool(isotonic)) return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic)
[ "def", "train", "(", "cls", ",", "data", ",", "isotonic", "=", "True", ")", ":", "boundaries", ",", "predictions", "=", "callMLlibFunc", "(", "\"trainIsotonicRegressionModel\"", ",", "data", ".", "map", "(", "_convert_to_vector", ")", ",", "bool", "(", "isotonic", ")", ")", "return", "IsotonicRegressionModel", "(", "boundaries", ".", "toArray", "(", ")", ",", "predictions", ".", "toArray", "(", ")", ",", "isotonic", ")" ]
Train an isotonic regression model on the given data. :param data: RDD of (label, feature, weight) tuples. :param isotonic: Whether this is isotonic (which is default) or antitonic. (default: True)
[ "Train", "an", "isotonic", "regression", "model", "on", "the", "given", "data", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L699-L711
apache/spark
python/pyspark/mllib/linalg/distributed.py
RowMatrix.columnSimilarities
def columnSimilarities(self, threshold=0.0): """ Compute similarities between columns of this matrix. The threshold parameter is a trade-off knob between estimate quality and computational cost. The default threshold setting of 0 guarantees deterministically correct results, but uses the brute-force approach of computing normalized dot products. Setting the threshold to positive values uses a sampling approach and incurs strictly less computational cost than the brute-force approach. However the similarities computed will be estimates. The sampling guarantees relative-error correctness for those pairs of columns that have similarity greater than the given similarity threshold. To describe the guarantee, we set some notation: * Let A be the smallest in magnitude non-zero element of this matrix. * Let B be the largest in magnitude non-zero element of this matrix. * Let L be the maximum number of non-zeros per row. For example, for {0,1} matrices: A=B=1. Another example, for the Netflix matrix: A=1, B=5 For those column pairs that are above the threshold, the computed similarity is correct to within 20% relative error with probability at least 1 - (0.981)^10/B^ The shuffle size is bounded by the *smaller* of the following two expressions: * O(n log(n) L / (threshold * A)) * O(m L^2^) The latter is the cost of the brute-force approach, so for non-zero thresholds, the cost is always cheaper than the brute-force approach. :param: threshold: Set to 0 for deterministic guaranteed correctness. Similarities above this threshold are estimated with the cost vs estimate quality trade-off described above. :return: An n x n sparse upper-triangular CoordinateMatrix of cosine similarities between columns of this matrix. >>> rows = sc.parallelize([[1, 2], [1, 5]]) >>> mat = RowMatrix(rows) >>> sims = mat.columnSimilarities() >>> sims.entries.first().value 0.91914503... """ java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold)) return CoordinateMatrix(java_sims_mat)
python
def columnSimilarities(self, threshold=0.0): """ Compute similarities between columns of this matrix. The threshold parameter is a trade-off knob between estimate quality and computational cost. The default threshold setting of 0 guarantees deterministically correct results, but uses the brute-force approach of computing normalized dot products. Setting the threshold to positive values uses a sampling approach and incurs strictly less computational cost than the brute-force approach. However the similarities computed will be estimates. The sampling guarantees relative-error correctness for those pairs of columns that have similarity greater than the given similarity threshold. To describe the guarantee, we set some notation: * Let A be the smallest in magnitude non-zero element of this matrix. * Let B be the largest in magnitude non-zero element of this matrix. * Let L be the maximum number of non-zeros per row. For example, for {0,1} matrices: A=B=1. Another example, for the Netflix matrix: A=1, B=5 For those column pairs that are above the threshold, the computed similarity is correct to within 20% relative error with probability at least 1 - (0.981)^10/B^ The shuffle size is bounded by the *smaller* of the following two expressions: * O(n log(n) L / (threshold * A)) * O(m L^2^) The latter is the cost of the brute-force approach, so for non-zero thresholds, the cost is always cheaper than the brute-force approach. :param: threshold: Set to 0 for deterministic guaranteed correctness. Similarities above this threshold are estimated with the cost vs estimate quality trade-off described above. :return: An n x n sparse upper-triangular CoordinateMatrix of cosine similarities between columns of this matrix. >>> rows = sc.parallelize([[1, 2], [1, 5]]) >>> mat = RowMatrix(rows) >>> sims = mat.columnSimilarities() >>> sims.entries.first().value 0.91914503... """ java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold)) return CoordinateMatrix(java_sims_mat)
[ "def", "columnSimilarities", "(", "self", ",", "threshold", "=", "0.0", ")", ":", "java_sims_mat", "=", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"columnSimilarities\"", ",", "float", "(", "threshold", ")", ")", "return", "CoordinateMatrix", "(", "java_sims_mat", ")" ]
Compute similarities between columns of this matrix. The threshold parameter is a trade-off knob between estimate quality and computational cost. The default threshold setting of 0 guarantees deterministically correct results, but uses the brute-force approach of computing normalized dot products. Setting the threshold to positive values uses a sampling approach and incurs strictly less computational cost than the brute-force approach. However the similarities computed will be estimates. The sampling guarantees relative-error correctness for those pairs of columns that have similarity greater than the given similarity threshold. To describe the guarantee, we set some notation: * Let A be the smallest in magnitude non-zero element of this matrix. * Let B be the largest in magnitude non-zero element of this matrix. * Let L be the maximum number of non-zeros per row. For example, for {0,1} matrices: A=B=1. Another example, for the Netflix matrix: A=1, B=5 For those column pairs that are above the threshold, the computed similarity is correct to within 20% relative error with probability at least 1 - (0.981)^10/B^ The shuffle size is bounded by the *smaller* of the following two expressions: * O(n log(n) L / (threshold * A)) * O(m L^2^) The latter is the cost of the brute-force approach, so for non-zero thresholds, the cost is always cheaper than the brute-force approach. :param: threshold: Set to 0 for deterministic guaranteed correctness. Similarities above this threshold are estimated with the cost vs estimate quality trade-off described above. :return: An n x n sparse upper-triangular CoordinateMatrix of cosine similarities between columns of this matrix. >>> rows = sc.parallelize([[1, 2], [1, 5]]) >>> mat = RowMatrix(rows) >>> sims = mat.columnSimilarities() >>> sims.entries.first().value 0.91914503...
[ "Compute", "similarities", "between", "columns", "of", "this", "matrix", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L201-L260
apache/spark
python/pyspark/mllib/linalg/distributed.py
RowMatrix.tallSkinnyQR
def tallSkinnyQR(self, computeQ=False): """ Compute the QR decomposition of this RowMatrix. The implementation is designed to optimize the QR decomposition (factorization) for the RowMatrix of a tall and skinny shape. Reference: Paul G. Constantine, David F. Gleich. "Tall and skinny QR factorizations in MapReduce architectures" ([[https://doi.org/10.1145/1996092.1996103]]) :param: computeQ: whether to computeQ :return: QRDecomposition(Q: RowMatrix, R: Matrix), where Q = None if computeQ = false. >>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]]) >>> mat = RowMatrix(rows) >>> decomp = mat.tallSkinnyQR(True) >>> Q = decomp.Q >>> R = decomp.R >>> # Test with absolute values >>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist()) >>> absQRows.collect() [[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]] >>> # Test with absolute values >>> abs(R.toArray()).tolist() [[5.0, 10.0], [0.0, 1.0]] """ decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ)) if computeQ: java_Q = decomp.call("Q") Q = RowMatrix(java_Q) else: Q = None R = decomp.call("R") return QRDecomposition(Q, R)
python
def tallSkinnyQR(self, computeQ=False): """ Compute the QR decomposition of this RowMatrix. The implementation is designed to optimize the QR decomposition (factorization) for the RowMatrix of a tall and skinny shape. Reference: Paul G. Constantine, David F. Gleich. "Tall and skinny QR factorizations in MapReduce architectures" ([[https://doi.org/10.1145/1996092.1996103]]) :param: computeQ: whether to computeQ :return: QRDecomposition(Q: RowMatrix, R: Matrix), where Q = None if computeQ = false. >>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]]) >>> mat = RowMatrix(rows) >>> decomp = mat.tallSkinnyQR(True) >>> Q = decomp.Q >>> R = decomp.R >>> # Test with absolute values >>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist()) >>> absQRows.collect() [[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]] >>> # Test with absolute values >>> abs(R.toArray()).tolist() [[5.0, 10.0], [0.0, 1.0]] """ decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ)) if computeQ: java_Q = decomp.call("Q") Q = RowMatrix(java_Q) else: Q = None R = decomp.call("R") return QRDecomposition(Q, R)
[ "def", "tallSkinnyQR", "(", "self", ",", "computeQ", "=", "False", ")", ":", "decomp", "=", "JavaModelWrapper", "(", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"tallSkinnyQR\"", ",", "computeQ", ")", ")", "if", "computeQ", ":", "java_Q", "=", "decomp", ".", "call", "(", "\"Q\"", ")", "Q", "=", "RowMatrix", "(", "java_Q", ")", "else", ":", "Q", "=", "None", "R", "=", "decomp", ".", "call", "(", "\"R\"", ")", "return", "QRDecomposition", "(", "Q", ",", "R", ")" ]
Compute the QR decomposition of this RowMatrix. The implementation is designed to optimize the QR decomposition (factorization) for the RowMatrix of a tall and skinny shape. Reference: Paul G. Constantine, David F. Gleich. "Tall and skinny QR factorizations in MapReduce architectures" ([[https://doi.org/10.1145/1996092.1996103]]) :param: computeQ: whether to computeQ :return: QRDecomposition(Q: RowMatrix, R: Matrix), where Q = None if computeQ = false. >>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]]) >>> mat = RowMatrix(rows) >>> decomp = mat.tallSkinnyQR(True) >>> Q = decomp.Q >>> R = decomp.R >>> # Test with absolute values >>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist()) >>> absQRows.collect() [[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]] >>> # Test with absolute values >>> abs(R.toArray()).tolist() [[5.0, 10.0], [0.0, 1.0]]
[ "Compute", "the", "QR", "decomposition", "of", "this", "RowMatrix", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L263-L301
apache/spark
python/pyspark/mllib/linalg/distributed.py
RowMatrix.computeSVD
def computeSVD(self, k, computeU=False, rCond=1e-9): """ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the Scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: :py:class:`SingularValueDecomposition` >>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]]) >>> rm = RowMatrix(rows) >>> svd_model = rm.computeSVD(2, True) >>> svd_model.U.rows.collect() [DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) """ j_model = self._java_matrix_wrapper.call( "computeSVD", int(k), bool(computeU), float(rCond)) return SingularValueDecomposition(j_model)
python
def computeSVD(self, k, computeU=False, rCond=1e-9): """ Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the Scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: :py:class:`SingularValueDecomposition` >>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]]) >>> rm = RowMatrix(rows) >>> svd_model = rm.computeSVD(2, True) >>> svd_model.U.rows.collect() [DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0) """ j_model = self._java_matrix_wrapper.call( "computeSVD", int(k), bool(computeU), float(rCond)) return SingularValueDecomposition(j_model)
[ "def", "computeSVD", "(", "self", ",", "k", ",", "computeU", "=", "False", ",", "rCond", "=", "1e-9", ")", ":", "j_model", "=", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"computeSVD\"", ",", "int", "(", "k", ")", ",", "bool", "(", "computeU", ")", ",", "float", "(", "rCond", ")", ")", "return", "SingularValueDecomposition", "(", "j_model", ")" ]
Computes the singular value decomposition of the RowMatrix. The given row matrix A of dimension (m X n) is decomposed into U * s * V'T where * U: (m X k) (left singular vectors) is a RowMatrix whose columns are the eigenvectors of (A X A') * s: DenseVector consisting of square root of the eigenvalues (singular values) in descending order. * v: (n X k) (right singular vectors) is a Matrix whose columns are the eigenvectors of (A' X A) For more specific details on implementation, please refer the Scala documentation. :param k: Number of leading singular values to keep (`0 < k <= n`). It might return less than k if there are numerically zero singular values or there are not enough Ritz values converged before the maximum number of Arnoldi update iterations is reached (in case that matrix A is ill-conditioned). :param computeU: Whether or not to compute U. If set to be True, then U is computed by A * V * s^-1 :param rCond: Reciprocal condition number. All singular values smaller than rCond * s[0] are treated as zero where s[0] is the largest singular value. :returns: :py:class:`SingularValueDecomposition` >>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]]) >>> rm = RowMatrix(rows) >>> svd_model = rm.computeSVD(2, True) >>> svd_model.U.rows.collect() [DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])] >>> svd_model.s DenseVector([3.4641, 3.1623]) >>> svd_model.V DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0)
[ "Computes", "the", "singular", "value", "decomposition", "of", "the", "RowMatrix", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L304-L345
apache/spark
python/pyspark/mllib/linalg/distributed.py
RowMatrix.multiply
def multiply(self, matrix): """ Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`RowMatrix` >>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]])) >>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])] """ if not isinstance(matrix, DenseMatrix): raise ValueError("Only multiplication with DenseMatrix " "is supported.") j_model = self._java_matrix_wrapper.call("multiply", matrix) return RowMatrix(j_model)
python
def multiply(self, matrix): """ Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`RowMatrix` >>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]])) >>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])] """ if not isinstance(matrix, DenseMatrix): raise ValueError("Only multiplication with DenseMatrix " "is supported.") j_model = self._java_matrix_wrapper.call("multiply", matrix) return RowMatrix(j_model)
[ "def", "multiply", "(", "self", ",", "matrix", ")", ":", "if", "not", "isinstance", "(", "matrix", ",", "DenseMatrix", ")", ":", "raise", "ValueError", "(", "\"Only multiplication with DenseMatrix \"", "\"is supported.\"", ")", "j_model", "=", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"multiply\"", ",", "matrix", ")", "return", "RowMatrix", "(", "j_model", ")" ]
Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`RowMatrix` >>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]])) >>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])]
[ "Multiply", "this", "matrix", "by", "a", "local", "dense", "matrix", "on", "the", "right", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L373-L389
apache/spark
python/pyspark/mllib/linalg/distributed.py
SingularValueDecomposition.U
def U(self): """ Returns a distributed matrix whose columns are the left singular vectors of the SingularValueDecomposition if computeU was set to be True. """ u = self.call("U") if u is not None: mat_name = u.getClass().getSimpleName() if mat_name == "RowMatrix": return RowMatrix(u) elif mat_name == "IndexedRowMatrix": return IndexedRowMatrix(u) else: raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name)
python
def U(self): """ Returns a distributed matrix whose columns are the left singular vectors of the SingularValueDecomposition if computeU was set to be True. """ u = self.call("U") if u is not None: mat_name = u.getClass().getSimpleName() if mat_name == "RowMatrix": return RowMatrix(u) elif mat_name == "IndexedRowMatrix": return IndexedRowMatrix(u) else: raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name)
[ "def", "U", "(", "self", ")", ":", "u", "=", "self", ".", "call", "(", "\"U\"", ")", "if", "u", "is", "not", "None", ":", "mat_name", "=", "u", ".", "getClass", "(", ")", ".", "getSimpleName", "(", ")", "if", "mat_name", "==", "\"RowMatrix\"", ":", "return", "RowMatrix", "(", "u", ")", "elif", "mat_name", "==", "\"IndexedRowMatrix\"", ":", "return", "IndexedRowMatrix", "(", "u", ")", "else", ":", "raise", "TypeError", "(", "\"Expected RowMatrix/IndexedRowMatrix got %s\"", "%", "mat_name", ")" ]
Returns a distributed matrix whose columns are the left singular vectors of the SingularValueDecomposition if computeU was set to be True.
[ "Returns", "a", "distributed", "matrix", "whose", "columns", "are", "the", "left", "singular", "vectors", "of", "the", "SingularValueDecomposition", "if", "computeU", "was", "set", "to", "be", "True", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L401-L414
apache/spark
python/pyspark/mllib/linalg/distributed.py
IndexedRowMatrix.rows
def rows(self): """ Rows of the IndexedRowMatrix stored as an RDD of IndexedRows. >>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6])])) >>> rows = mat.rows >>> rows.first() IndexedRow(0, [1.0,2.0,3.0]) """ # We use DataFrames for serialization of IndexedRows from # Java, so we first convert the RDD of rows to a DataFrame # on the Scala/Java side. Then we map each Row in the # DataFrame back to an IndexedRow on this side. rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model) rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1])) return rows
python
def rows(self): """ Rows of the IndexedRowMatrix stored as an RDD of IndexedRows. >>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6])])) >>> rows = mat.rows >>> rows.first() IndexedRow(0, [1.0,2.0,3.0]) """ # We use DataFrames for serialization of IndexedRows from # Java, so we first convert the RDD of rows to a DataFrame # on the Scala/Java side. Then we map each Row in the # DataFrame back to an IndexedRow on this side. rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model) rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1])) return rows
[ "def", "rows", "(", "self", ")", ":", "# We use DataFrames for serialization of IndexedRows from", "# Java, so we first convert the RDD of rows to a DataFrame", "# on the Scala/Java side. Then we map each Row in the", "# DataFrame back to an IndexedRow on this side.", "rows_df", "=", "callMLlibFunc", "(", "\"getIndexedRows\"", ",", "self", ".", "_java_matrix_wrapper", ".", "_java_model", ")", "rows", "=", "rows_df", ".", "rdd", ".", "map", "(", "lambda", "row", ":", "IndexedRow", "(", "row", "[", "0", "]", ",", "row", "[", "1", "]", ")", ")", "return", "rows" ]
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows. >>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(1, [4, 5, 6])])) >>> rows = mat.rows >>> rows.first() IndexedRow(0, [1.0,2.0,3.0])
[ "Rows", "of", "the", "IndexedRowMatrix", "stored", "as", "an", "RDD", "of", "IndexedRows", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L519-L535
apache/spark
python/pyspark/mllib/linalg/distributed.py
IndexedRowMatrix.toBlockMatrix
def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024): """ Convert this matrix to a BlockMatrix. :param rowsPerBlock: Number of rows that make up each block. The blocks forming the final rows are not required to have the given number of rows. :param colsPerBlock: Number of columns that make up each block. The blocks forming the final columns are not required to have the given number of columns. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(6, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows).toBlockMatrix() >>> # This IndexedRowMatrix will have 7 effective rows, due to >>> # the highest row index being 6, and the ensuing >>> # BlockMatrix will have 7 rows as well. >>> print(mat.numRows()) 7 >>> print(mat.numCols()) 3 """ java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix", rowsPerBlock, colsPerBlock) return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock)
python
def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024): """ Convert this matrix to a BlockMatrix. :param rowsPerBlock: Number of rows that make up each block. The blocks forming the final rows are not required to have the given number of rows. :param colsPerBlock: Number of columns that make up each block. The blocks forming the final columns are not required to have the given number of columns. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(6, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows).toBlockMatrix() >>> # This IndexedRowMatrix will have 7 effective rows, due to >>> # the highest row index being 6, and the ensuing >>> # BlockMatrix will have 7 rows as well. >>> print(mat.numRows()) 7 >>> print(mat.numCols()) 3 """ java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix", rowsPerBlock, colsPerBlock) return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock)
[ "def", "toBlockMatrix", "(", "self", ",", "rowsPerBlock", "=", "1024", ",", "colsPerBlock", "=", "1024", ")", ":", "java_block_matrix", "=", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"toBlockMatrix\"", ",", "rowsPerBlock", ",", "colsPerBlock", ")", "return", "BlockMatrix", "(", "java_block_matrix", ",", "rowsPerBlock", ",", "colsPerBlock", ")" ]
Convert this matrix to a BlockMatrix. :param rowsPerBlock: Number of rows that make up each block. The blocks forming the final rows are not required to have the given number of rows. :param colsPerBlock: Number of columns that make up each block. The blocks forming the final columns are not required to have the given number of columns. >>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]), ... IndexedRow(6, [4, 5, 6])]) >>> mat = IndexedRowMatrix(rows).toBlockMatrix() >>> # This IndexedRowMatrix will have 7 effective rows, due to >>> # the highest row index being 6, and the ensuing >>> # BlockMatrix will have 7 rows as well. >>> print(mat.numRows()) 7 >>> print(mat.numCols()) 3
[ "Convert", "this", "matrix", "to", "a", "BlockMatrix", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L631-L658
apache/spark
python/pyspark/mllib/linalg/distributed.py
IndexedRowMatrix.multiply
def multiply(self, matrix): """ Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`IndexedRowMatrix` >>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))])) >>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])] """ if not isinstance(matrix, DenseMatrix): raise ValueError("Only multiplication with DenseMatrix " "is supported.") return IndexedRowMatrix(self._java_matrix_wrapper.call("multiply", matrix))
python
def multiply(self, matrix): """ Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`IndexedRowMatrix` >>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))])) >>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])] """ if not isinstance(matrix, DenseMatrix): raise ValueError("Only multiplication with DenseMatrix " "is supported.") return IndexedRowMatrix(self._java_matrix_wrapper.call("multiply", matrix))
[ "def", "multiply", "(", "self", ",", "matrix", ")", ":", "if", "not", "isinstance", "(", "matrix", ",", "DenseMatrix", ")", ":", "raise", "ValueError", "(", "\"Only multiplication with DenseMatrix \"", "\"is supported.\"", ")", "return", "IndexedRowMatrix", "(", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"multiply\"", ",", "matrix", ")", ")" ]
Multiply this matrix by a local dense matrix on the right. :param matrix: a local dense matrix whose number of rows must match the number of columns of this matrix :returns: :py:class:`IndexedRowMatrix` >>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))])) >>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect() [IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])]
[ "Multiply", "this", "matrix", "by", "a", "local", "dense", "matrix", "on", "the", "right", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L705-L720
apache/spark
python/pyspark/mllib/linalg/distributed.py
CoordinateMatrix.entries
def entries(self): """ Entries of the CoordinateMatrix stored as an RDD of MatrixEntries. >>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)])) >>> entries = mat.entries >>> entries.first() MatrixEntry(0, 0, 1.2) """ # We use DataFrames for serialization of MatrixEntry entries # from Java, so we first convert the RDD of entries to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a MatrixEntry on this side. entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model) entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2])) return entries
python
def entries(self): """ Entries of the CoordinateMatrix stored as an RDD of MatrixEntries. >>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)])) >>> entries = mat.entries >>> entries.first() MatrixEntry(0, 0, 1.2) """ # We use DataFrames for serialization of MatrixEntry entries # from Java, so we first convert the RDD of entries to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a MatrixEntry on this side. entries_df = callMLlibFunc("getMatrixEntries", self._java_matrix_wrapper._java_model) entries = entries_df.rdd.map(lambda row: MatrixEntry(row[0], row[1], row[2])) return entries
[ "def", "entries", "(", "self", ")", ":", "# We use DataFrames for serialization of MatrixEntry entries", "# from Java, so we first convert the RDD of entries to a", "# DataFrame on the Scala/Java side. Then we map each Row in", "# the DataFrame back to a MatrixEntry on this side.", "entries_df", "=", "callMLlibFunc", "(", "\"getMatrixEntries\"", ",", "self", ".", "_java_matrix_wrapper", ".", "_java_model", ")", "entries", "=", "entries_df", ".", "rdd", ".", "map", "(", "lambda", "row", ":", "MatrixEntry", "(", "row", "[", "0", "]", ",", "row", "[", "1", "]", ",", "row", "[", "2", "]", ")", ")", "return", "entries" ]
Entries of the CoordinateMatrix stored as an RDD of MatrixEntries. >>> mat = CoordinateMatrix(sc.parallelize([MatrixEntry(0, 0, 1.2), ... MatrixEntry(6, 4, 2.1)])) >>> entries = mat.entries >>> entries.first() MatrixEntry(0, 0, 1.2)
[ "Entries", "of", "the", "CoordinateMatrix", "stored", "as", "an", "RDD", "of", "MatrixEntries", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L811-L828
apache/spark
python/pyspark/mllib/linalg/distributed.py
BlockMatrix.blocks
def blocks(self): """ The RDD of sub-matrix blocks ((blockRowIndex, blockColIndex), sub-matrix) that form this distributed matrix. >>> mat = BlockMatrix( ... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2) >>> blocks = mat.blocks >>> blocks.first() ((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0)) """ # We use DataFrames for serialization of sub-matrix blocks # from Java, so we first convert the RDD of blocks to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a sub-matrix block on this side. blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model) blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1])) return blocks
python
def blocks(self): """ The RDD of sub-matrix blocks ((blockRowIndex, blockColIndex), sub-matrix) that form this distributed matrix. >>> mat = BlockMatrix( ... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2) >>> blocks = mat.blocks >>> blocks.first() ((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0)) """ # We use DataFrames for serialization of sub-matrix blocks # from Java, so we first convert the RDD of blocks to a # DataFrame on the Scala/Java side. Then we map each Row in # the DataFrame back to a sub-matrix block on this side. blocks_df = callMLlibFunc("getMatrixBlocks", self._java_matrix_wrapper._java_model) blocks = blocks_df.rdd.map(lambda row: ((row[0][0], row[0][1]), row[1])) return blocks
[ "def", "blocks", "(", "self", ")", ":", "# We use DataFrames for serialization of sub-matrix blocks", "# from Java, so we first convert the RDD of blocks to a", "# DataFrame on the Scala/Java side. Then we map each Row in", "# the DataFrame back to a sub-matrix block on this side.", "blocks_df", "=", "callMLlibFunc", "(", "\"getMatrixBlocks\"", ",", "self", ".", "_java_matrix_wrapper", ".", "_java_model", ")", "blocks", "=", "blocks_df", ".", "rdd", ".", "map", "(", "lambda", "row", ":", "(", "(", "row", "[", "0", "]", "[", "0", "]", ",", "row", "[", "0", "]", "[", "1", "]", ")", ",", "row", "[", "1", "]", ")", ")", "return", "blocks" ]
The RDD of sub-matrix blocks ((blockRowIndex, blockColIndex), sub-matrix) that form this distributed matrix. >>> mat = BlockMatrix( ... sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]), 3, 2) >>> blocks = mat.blocks >>> blocks.first() ((0, 0), DenseMatrix(3, 2, [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 0))
[ "The", "RDD", "of", "sub", "-", "matrix", "blocks", "((", "blockRowIndex", "blockColIndex", ")", "sub", "-", "matrix", ")", "that", "form", "this", "distributed", "matrix", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1051-L1071
apache/spark
python/pyspark/mllib/linalg/distributed.py
BlockMatrix.persist
def persist(self, storageLevel): """ Persists the underlying RDD with the specified storage level. """ if not isinstance(storageLevel, StorageLevel): raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel)) javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel) self._java_matrix_wrapper.call("persist", javaStorageLevel) return self
python
def persist(self, storageLevel): """ Persists the underlying RDD with the specified storage level. """ if not isinstance(storageLevel, StorageLevel): raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel)) javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel) self._java_matrix_wrapper.call("persist", javaStorageLevel) return self
[ "def", "persist", "(", "self", ",", "storageLevel", ")", ":", "if", "not", "isinstance", "(", "storageLevel", ",", "StorageLevel", ")", ":", "raise", "TypeError", "(", "\"`storageLevel` should be a StorageLevel, got %s\"", "%", "type", "(", "storageLevel", ")", ")", "javaStorageLevel", "=", "self", ".", "_java_matrix_wrapper", ".", "_sc", ".", "_getJavaStorageLevel", "(", "storageLevel", ")", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"persist\"", ",", "javaStorageLevel", ")", "return", "self" ]
Persists the underlying RDD with the specified storage level.
[ "Persists", "the", "underlying", "RDD", "with", "the", "specified", "storage", "level", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1168-L1176
apache/spark
python/pyspark/mllib/linalg/distributed.py
BlockMatrix.add
def add(self, other): """ Adds two block matrices together. The matrices must have the same size and matching `rowsPerBlock` and `colsPerBlock` values. If one of the sub matrix blocks that are being added is a SparseMatrix, the resulting sub matrix block will also be a SparseMatrix, even if it is being added to a DenseMatrix. If two dense sub matrix blocks are added, the output block will also be a DenseMatrix. >>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6]) >>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]) >>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12]) >>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)]) >>> mat1 = BlockMatrix(blocks1, 3, 2) >>> mat2 = BlockMatrix(blocks2, 3, 2) >>> mat3 = BlockMatrix(blocks3, 3, 2) >>> mat1.add(mat2).toLocalMatrix() DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0) >>> mat1.add(mat3).toLocalMatrix() DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0) """ if not isinstance(other, BlockMatrix): raise TypeError("Other should be a BlockMatrix, got %s" % type(other)) other_java_block_matrix = other._java_matrix_wrapper._java_model java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix) return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock)
python
def add(self, other): """ Adds two block matrices together. The matrices must have the same size and matching `rowsPerBlock` and `colsPerBlock` values. If one of the sub matrix blocks that are being added is a SparseMatrix, the resulting sub matrix block will also be a SparseMatrix, even if it is being added to a DenseMatrix. If two dense sub matrix blocks are added, the output block will also be a DenseMatrix. >>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6]) >>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]) >>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12]) >>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)]) >>> mat1 = BlockMatrix(blocks1, 3, 2) >>> mat2 = BlockMatrix(blocks2, 3, 2) >>> mat3 = BlockMatrix(blocks3, 3, 2) >>> mat1.add(mat2).toLocalMatrix() DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0) >>> mat1.add(mat3).toLocalMatrix() DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0) """ if not isinstance(other, BlockMatrix): raise TypeError("Other should be a BlockMatrix, got %s" % type(other)) other_java_block_matrix = other._java_matrix_wrapper._java_model java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix) return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock)
[ "def", "add", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "BlockMatrix", ")", ":", "raise", "TypeError", "(", "\"Other should be a BlockMatrix, got %s\"", "%", "type", "(", "other", ")", ")", "other_java_block_matrix", "=", "other", ".", "_java_matrix_wrapper", ".", "_java_model", "java_block_matrix", "=", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"add\"", ",", "other_java_block_matrix", ")", "return", "BlockMatrix", "(", "java_block_matrix", ",", "self", ".", "rowsPerBlock", ",", "self", ".", "colsPerBlock", ")" ]
Adds two block matrices together. The matrices must have the same size and matching `rowsPerBlock` and `colsPerBlock` values. If one of the sub matrix blocks that are being added is a SparseMatrix, the resulting sub matrix block will also be a SparseMatrix, even if it is being added to a DenseMatrix. If two dense sub matrix blocks are added, the output block will also be a DenseMatrix. >>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6]) >>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]) >>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12]) >>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)]) >>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)]) >>> mat1 = BlockMatrix(blocks1, 3, 2) >>> mat2 = BlockMatrix(blocks2, 3, 2) >>> mat3 = BlockMatrix(blocks3, 3, 2) >>> mat1.add(mat2).toLocalMatrix() DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0) >>> mat1.add(mat3).toLocalMatrix() DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
[ "Adds", "two", "block", "matrices", "together", ".", "The", "matrices", "must", "have", "the", "same", "size", "and", "matching", "rowsPerBlock", "and", "colsPerBlock", "values", ".", "If", "one", "of", "the", "sub", "matrix", "blocks", "that", "are", "being", "added", "is", "a", "SparseMatrix", "the", "resulting", "sub", "matrix", "block", "will", "also", "be", "a", "SparseMatrix", "even", "if", "it", "is", "being", "added", "to", "a", "DenseMatrix", ".", "If", "two", "dense", "sub", "matrix", "blocks", "are", "added", "the", "output", "block", "will", "also", "be", "a", "DenseMatrix", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1186-L1217
apache/spark
python/pyspark/mllib/linalg/distributed.py
BlockMatrix.transpose
def transpose(self): """ Transpose this BlockMatrix. Returns a new BlockMatrix instance sharing the same underlying data. Is a lazy operation. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat_transposed = mat.transpose() >>> mat_transposed.toLocalMatrix() DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0) """ java_transposed_matrix = self._java_matrix_wrapper.call("transpose") return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock)
python
def transpose(self): """ Transpose this BlockMatrix. Returns a new BlockMatrix instance sharing the same underlying data. Is a lazy operation. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat_transposed = mat.transpose() >>> mat_transposed.toLocalMatrix() DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0) """ java_transposed_matrix = self._java_matrix_wrapper.call("transpose") return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock)
[ "def", "transpose", "(", "self", ")", ":", "java_transposed_matrix", "=", "self", ".", "_java_matrix_wrapper", ".", "call", "(", "\"transpose\"", ")", "return", "BlockMatrix", "(", "java_transposed_matrix", ",", "self", ".", "colsPerBlock", ",", "self", ".", "rowsPerBlock", ")" ]
Transpose this BlockMatrix. Returns a new BlockMatrix instance sharing the same underlying data. Is a lazy operation. >>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])), ... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))]) >>> mat = BlockMatrix(blocks, 3, 2) >>> mat_transposed = mat.transpose() >>> mat_transposed.toLocalMatrix() DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0)
[ "Transpose", "this", "BlockMatrix", ".", "Returns", "a", "new", "BlockMatrix", "instance", "sharing", "the", "same", "underlying", "data", ".", "Is", "a", "lazy", "operation", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1290-L1304
apache/spark
python/pyspark/mllib/linalg/__init__.py
_vector_size
def _vector_size(v): """ Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector """ if isinstance(v, Vector): return len(v) elif type(v) in (array.array, list, tuple, xrange): return len(v) elif type(v) == np.ndarray: if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1): return len(v) else: raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape)) elif _have_scipy and scipy.sparse.issparse(v): assert v.shape[1] == 1, "Expected column vector" return v.shape[0] else: raise TypeError("Cannot treat type %s as a vector" % type(v))
python
def _vector_size(v): """ Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector """ if isinstance(v, Vector): return len(v) elif type(v) in (array.array, list, tuple, xrange): return len(v) elif type(v) == np.ndarray: if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1): return len(v) else: raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape)) elif _have_scipy and scipy.sparse.issparse(v): assert v.shape[1] == 1, "Expected column vector" return v.shape[0] else: raise TypeError("Cannot treat type %s as a vector" % type(v))
[ "def", "_vector_size", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "Vector", ")", ":", "return", "len", "(", "v", ")", "elif", "type", "(", "v", ")", "in", "(", "array", ".", "array", ",", "list", ",", "tuple", ",", "xrange", ")", ":", "return", "len", "(", "v", ")", "elif", "type", "(", "v", ")", "==", "np", ".", "ndarray", ":", "if", "v", ".", "ndim", "==", "1", "or", "(", "v", ".", "ndim", "==", "2", "and", "v", ".", "shape", "[", "1", "]", "==", "1", ")", ":", "return", "len", "(", "v", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot treat an ndarray of shape %s as a vector\"", "%", "str", "(", "v", ".", "shape", ")", ")", "elif", "_have_scipy", "and", "scipy", ".", "sparse", ".", "issparse", "(", "v", ")", ":", "assert", "v", ".", "shape", "[", "1", "]", "==", "1", ",", "\"Expected column vector\"", "return", "v", ".", "shape", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "\"Cannot treat type %s as a vector\"", "%", "type", "(", "v", ")", ")" ]
Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
[ "Returns", "the", "size", "of", "the", "vector", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L86-L118
apache/spark
python/pyspark/mllib/linalg/__init__.py
DenseVector.parse
def parse(s): """ Parse string representation back into the DenseVector. >>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]') DenseVector([0.0, 1.0, 2.0, 3.0]) """ start = s.find('[') if start == -1: raise ValueError("Array should start with '['.") end = s.find(']') if end == -1: raise ValueError("Array should end with ']'.") s = s[start + 1: end] try: values = [float(val) for val in s.split(',') if val] except ValueError: raise ValueError("Unable to parse values from %s" % s) return DenseVector(values)
python
def parse(s): """ Parse string representation back into the DenseVector. >>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]') DenseVector([0.0, 1.0, 2.0, 3.0]) """ start = s.find('[') if start == -1: raise ValueError("Array should start with '['.") end = s.find(']') if end == -1: raise ValueError("Array should end with ']'.") s = s[start + 1: end] try: values = [float(val) for val in s.split(',') if val] except ValueError: raise ValueError("Unable to parse values from %s" % s) return DenseVector(values)
[ "def", "parse", "(", "s", ")", ":", "start", "=", "s", ".", "find", "(", "'['", ")", "if", "start", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Array should start with '['.\"", ")", "end", "=", "s", ".", "find", "(", "']'", ")", "if", "end", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Array should end with ']'.\"", ")", "s", "=", "s", "[", "start", "+", "1", ":", "end", "]", "try", ":", "values", "=", "[", "float", "(", "val", ")", "for", "val", "in", "s", ".", "split", "(", "','", ")", "if", "val", "]", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Unable to parse values from %s\"", "%", "s", ")", "return", "DenseVector", "(", "values", ")" ]
Parse string representation back into the DenseVector. >>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]') DenseVector([0.0, 1.0, 2.0, 3.0])
[ "Parse", "string", "representation", "back", "into", "the", "DenseVector", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L297-L316
apache/spark
python/pyspark/mllib/linalg/__init__.py
DenseVector.dot
def dot(self, other): """ Compute the dot product of two Vectors. We support (Numpy array, list, SparseVector, or SciPy sparse) and a target NumPy array that is either 1- or 2-dimensional. Equivalent to calling numpy.dot of the two vectors. >>> dense = DenseVector(array.array('d', [1., 2.])) >>> dense.dot(dense) 5.0 >>> dense.dot(SparseVector(2, [0, 1], [2., 1.])) 4.0 >>> dense.dot(range(1, 3)) 5.0 >>> dense.dot(np.array(range(1, 3))) 5.0 >>> dense.dot([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F')) array([ 5., 11.]) >>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F')) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if type(other) == np.ndarray: if other.ndim > 1: assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.array, other) elif _have_scipy and scipy.sparse.issparse(other): assert len(self) == other.shape[0], "dimension mismatch" return other.transpose().dot(self.toArray()) else: assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.dot(self) elif isinstance(other, Vector): return np.dot(self.toArray(), other.toArray()) else: return np.dot(self.toArray(), other)
python
def dot(self, other): """ Compute the dot product of two Vectors. We support (Numpy array, list, SparseVector, or SciPy sparse) and a target NumPy array that is either 1- or 2-dimensional. Equivalent to calling numpy.dot of the two vectors. >>> dense = DenseVector(array.array('d', [1., 2.])) >>> dense.dot(dense) 5.0 >>> dense.dot(SparseVector(2, [0, 1], [2., 1.])) 4.0 >>> dense.dot(range(1, 3)) 5.0 >>> dense.dot(np.array(range(1, 3))) 5.0 >>> dense.dot([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F')) array([ 5., 11.]) >>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F')) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if type(other) == np.ndarray: if other.ndim > 1: assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.array, other) elif _have_scipy and scipy.sparse.issparse(other): assert len(self) == other.shape[0], "dimension mismatch" return other.transpose().dot(self.toArray()) else: assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.dot(self) elif isinstance(other, Vector): return np.dot(self.toArray(), other.toArray()) else: return np.dot(self.toArray(), other)
[ "def", "dot", "(", "self", ",", "other", ")", ":", "if", "type", "(", "other", ")", "==", "np", ".", "ndarray", ":", "if", "other", ".", "ndim", ">", "1", ":", "assert", "len", "(", "self", ")", "==", "other", ".", "shape", "[", "0", "]", ",", "\"dimension mismatch\"", "return", "np", ".", "dot", "(", "self", ".", "array", ",", "other", ")", "elif", "_have_scipy", "and", "scipy", ".", "sparse", ".", "issparse", "(", "other", ")", ":", "assert", "len", "(", "self", ")", "==", "other", ".", "shape", "[", "0", "]", ",", "\"dimension mismatch\"", "return", "other", ".", "transpose", "(", ")", ".", "dot", "(", "self", ".", "toArray", "(", ")", ")", "else", ":", "assert", "len", "(", "self", ")", "==", "_vector_size", "(", "other", ")", ",", "\"dimension mismatch\"", "if", "isinstance", "(", "other", ",", "SparseVector", ")", ":", "return", "other", ".", "dot", "(", "self", ")", "elif", "isinstance", "(", "other", ",", "Vector", ")", ":", "return", "np", ".", "dot", "(", "self", ".", "toArray", "(", ")", ",", "other", ".", "toArray", "(", ")", ")", "else", ":", "return", "np", ".", "dot", "(", "self", ".", "toArray", "(", ")", ",", "other", ")" ]
Compute the dot product of two Vectors. We support (Numpy array, list, SparseVector, or SciPy sparse) and a target NumPy array that is either 1- or 2-dimensional. Equivalent to calling numpy.dot of the two vectors. >>> dense = DenseVector(array.array('d', [1., 2.])) >>> dense.dot(dense) 5.0 >>> dense.dot(SparseVector(2, [0, 1], [2., 1.])) 4.0 >>> dense.dot(range(1, 3)) 5.0 >>> dense.dot(np.array(range(1, 3))) 5.0 >>> dense.dot([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F')) array([ 5., 11.]) >>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F')) Traceback (most recent call last): ... AssertionError: dimension mismatch
[ "Compute", "the", "dot", "product", "of", "two", "Vectors", ".", "We", "support", "(", "Numpy", "array", "list", "SparseVector", "or", "SciPy", "sparse", ")", "and", "a", "target", "NumPy", "array", "that", "is", "either", "1", "-", "or", "2", "-", "dimensional", ".", "Equivalent", "to", "calling", "numpy", ".", "dot", "of", "the", "two", "vectors", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L339-L380
apache/spark
python/pyspark/mllib/linalg/__init__.py
DenseVector.squared_distance
def squared_distance(self, other): """ Squared distance of two Vectors. >>> dense1 = DenseVector(array.array('d', [1., 2.])) >>> dense1.squared_distance(dense1) 0.0 >>> dense2 = np.array([2., 1.]) >>> dense1.squared_distance(dense2) 2.0 >>> dense3 = [2., 1.] >>> dense1.squared_distance(dense3) 2.0 >>> sparse1 = SparseVector(2, [0, 1], [2., 1.]) >>> dense1.squared_distance(sparse1) 2.0 >>> dense1.squared_distance([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense1.squared_distance(SparseVector(1, [0,], [1.,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.squared_distance(self) elif _have_scipy and scipy.sparse.issparse(other): return _convert_to_vector(other).squared_distance(self) if isinstance(other, Vector): other = other.toArray() elif not isinstance(other, np.ndarray): other = np.array(other) diff = self.toArray() - other return np.dot(diff, diff)
python
def squared_distance(self, other): """ Squared distance of two Vectors. >>> dense1 = DenseVector(array.array('d', [1., 2.])) >>> dense1.squared_distance(dense1) 0.0 >>> dense2 = np.array([2., 1.]) >>> dense1.squared_distance(dense2) 2.0 >>> dense3 = [2., 1.] >>> dense1.squared_distance(dense3) 2.0 >>> sparse1 = SparseVector(2, [0, 1], [2., 1.]) >>> dense1.squared_distance(sparse1) 2.0 >>> dense1.squared_distance([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense1.squared_distance(SparseVector(1, [0,], [1.,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, SparseVector): return other.squared_distance(self) elif _have_scipy and scipy.sparse.issparse(other): return _convert_to_vector(other).squared_distance(self) if isinstance(other, Vector): other = other.toArray() elif not isinstance(other, np.ndarray): other = np.array(other) diff = self.toArray() - other return np.dot(diff, diff)
[ "def", "squared_distance", "(", "self", ",", "other", ")", ":", "assert", "len", "(", "self", ")", "==", "_vector_size", "(", "other", ")", ",", "\"dimension mismatch\"", "if", "isinstance", "(", "other", ",", "SparseVector", ")", ":", "return", "other", ".", "squared_distance", "(", "self", ")", "elif", "_have_scipy", "and", "scipy", ".", "sparse", ".", "issparse", "(", "other", ")", ":", "return", "_convert_to_vector", "(", "other", ")", ".", "squared_distance", "(", "self", ")", "if", "isinstance", "(", "other", ",", "Vector", ")", ":", "other", "=", "other", ".", "toArray", "(", ")", "elif", "not", "isinstance", "(", "other", ",", "np", ".", "ndarray", ")", ":", "other", "=", "np", ".", "array", "(", "other", ")", "diff", "=", "self", ".", "toArray", "(", ")", "-", "other", "return", "np", ".", "dot", "(", "diff", ",", "diff", ")" ]
Squared distance of two Vectors. >>> dense1 = DenseVector(array.array('d', [1., 2.])) >>> dense1.squared_distance(dense1) 0.0 >>> dense2 = np.array([2., 1.]) >>> dense1.squared_distance(dense2) 2.0 >>> dense3 = [2., 1.] >>> dense1.squared_distance(dense3) 2.0 >>> sparse1 = SparseVector(2, [0, 1], [2., 1.]) >>> dense1.squared_distance(sparse1) 2.0 >>> dense1.squared_distance([1.,]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> dense1.squared_distance(SparseVector(1, [0,], [1.,])) Traceback (most recent call last): ... AssertionError: dimension mismatch
[ "Squared", "distance", "of", "two", "Vectors", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L382-L418
apache/spark
python/pyspark/mllib/linalg/__init__.py
SparseVector.parse
def parse(s): """ Parse string representation back into the SparseVector. >>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )') SparseVector(4, {0: 4.0, 1: 5.0}) """ start = s.find('(') if start == -1: raise ValueError("Tuple should start with '('") end = s.find(')') if end == -1: raise ValueError("Tuple should end with ')'") s = s[start + 1: end].strip() size = s[: s.find(',')] try: size = int(size) except ValueError: raise ValueError("Cannot parse size %s." % size) ind_start = s.find('[') if ind_start == -1: raise ValueError("Indices array should start with '['.") ind_end = s.find(']') if ind_end == -1: raise ValueError("Indices array should end with ']'") new_s = s[ind_start + 1: ind_end] ind_list = new_s.split(',') try: indices = [int(ind) for ind in ind_list if ind] except ValueError: raise ValueError("Unable to parse indices from %s." % new_s) s = s[ind_end + 1:].strip() val_start = s.find('[') if val_start == -1: raise ValueError("Values array should start with '['.") val_end = s.find(']') if val_end == -1: raise ValueError("Values array should end with ']'.") val_list = s[val_start + 1: val_end].split(',') try: values = [float(val) for val in val_list if val] except ValueError: raise ValueError("Unable to parse values from %s." % s) return SparseVector(size, indices, values)
python
def parse(s): """ Parse string representation back into the SparseVector. >>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )') SparseVector(4, {0: 4.0, 1: 5.0}) """ start = s.find('(') if start == -1: raise ValueError("Tuple should start with '('") end = s.find(')') if end == -1: raise ValueError("Tuple should end with ')'") s = s[start + 1: end].strip() size = s[: s.find(',')] try: size = int(size) except ValueError: raise ValueError("Cannot parse size %s." % size) ind_start = s.find('[') if ind_start == -1: raise ValueError("Indices array should start with '['.") ind_end = s.find(']') if ind_end == -1: raise ValueError("Indices array should end with ']'") new_s = s[ind_start + 1: ind_end] ind_list = new_s.split(',') try: indices = [int(ind) for ind in ind_list if ind] except ValueError: raise ValueError("Unable to parse indices from %s." % new_s) s = s[ind_end + 1:].strip() val_start = s.find('[') if val_start == -1: raise ValueError("Values array should start with '['.") val_end = s.find(']') if val_end == -1: raise ValueError("Values array should end with ']'.") val_list = s[val_start + 1: val_end].split(',') try: values = [float(val) for val in val_list if val] except ValueError: raise ValueError("Unable to parse values from %s." % s) return SparseVector(size, indices, values)
[ "def", "parse", "(", "s", ")", ":", "start", "=", "s", ".", "find", "(", "'('", ")", "if", "start", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Tuple should start with '('\"", ")", "end", "=", "s", ".", "find", "(", "')'", ")", "if", "end", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Tuple should end with ')'\"", ")", "s", "=", "s", "[", "start", "+", "1", ":", "end", "]", ".", "strip", "(", ")", "size", "=", "s", "[", ":", "s", ".", "find", "(", "','", ")", "]", "try", ":", "size", "=", "int", "(", "size", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Cannot parse size %s.\"", "%", "size", ")", "ind_start", "=", "s", ".", "find", "(", "'['", ")", "if", "ind_start", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Indices array should start with '['.\"", ")", "ind_end", "=", "s", ".", "find", "(", "']'", ")", "if", "ind_end", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Indices array should end with ']'\"", ")", "new_s", "=", "s", "[", "ind_start", "+", "1", ":", "ind_end", "]", "ind_list", "=", "new_s", ".", "split", "(", "','", ")", "try", ":", "indices", "=", "[", "int", "(", "ind", ")", "for", "ind", "in", "ind_list", "if", "ind", "]", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Unable to parse indices from %s.\"", "%", "new_s", ")", "s", "=", "s", "[", "ind_end", "+", "1", ":", "]", ".", "strip", "(", ")", "val_start", "=", "s", ".", "find", "(", "'['", ")", "if", "val_start", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Values array should start with '['.\"", ")", "val_end", "=", "s", ".", "find", "(", "']'", ")", "if", "val_end", "==", "-", "1", ":", "raise", "ValueError", "(", "\"Values array should end with ']'.\"", ")", "val_list", "=", "s", "[", "val_start", "+", "1", ":", "val_end", "]", ".", "split", "(", "','", ")", "try", ":", "values", "=", "[", "float", "(", "val", ")", "for", "val", "in", "val_list", "if", "val", "]", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Unable to parse values from %s.\"", "%", "s", ")", "return", "SparseVector", "(", "size", ",", "indices", ",", "values", ")" ]
Parse string representation back into the SparseVector. >>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )') SparseVector(4, {0: 4.0, 1: 5.0})
[ "Parse", "string", "representation", "back", "into", "the", "SparseVector", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L589-L635
apache/spark
python/pyspark/mllib/linalg/__init__.py
SparseVector.dot
def dot(self, other): """ Dot product with a SparseVector or 1- or 2-dimensional Numpy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.dot(a) 25.0 >>> a.dot(array.array('d', [1., 2., 3., 4.])) 22.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.dot(b) 0.0 >>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]])) array([ 22., 22.]) >>> a.dot([1., 2., 3.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.array([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(DenseVector([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.zeros((3, 2))) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if isinstance(other, np.ndarray): if other.ndim not in [2, 1]: raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim) assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.values, other[self.indices]) assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, DenseVector): return np.dot(other.array[self.indices], self.values) elif isinstance(other, SparseVector): # Find out common indices. self_cmind = np.in1d(self.indices, other.indices, assume_unique=True) self_values = self.values[self_cmind] if self_values.size == 0: return 0.0 else: other_cmind = np.in1d(other.indices, self.indices, assume_unique=True) return np.dot(self_values, other.values[other_cmind]) else: return self.dot(_convert_to_vector(other))
python
def dot(self, other): """ Dot product with a SparseVector or 1- or 2-dimensional Numpy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.dot(a) 25.0 >>> a.dot(array.array('d', [1., 2., 3., 4.])) 22.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.dot(b) 0.0 >>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]])) array([ 22., 22.]) >>> a.dot([1., 2., 3.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.array([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(DenseVector([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.zeros((3, 2))) Traceback (most recent call last): ... AssertionError: dimension mismatch """ if isinstance(other, np.ndarray): if other.ndim not in [2, 1]: raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim) assert len(self) == other.shape[0], "dimension mismatch" return np.dot(self.values, other[self.indices]) assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, DenseVector): return np.dot(other.array[self.indices], self.values) elif isinstance(other, SparseVector): # Find out common indices. self_cmind = np.in1d(self.indices, other.indices, assume_unique=True) self_values = self.values[self_cmind] if self_values.size == 0: return 0.0 else: other_cmind = np.in1d(other.indices, self.indices, assume_unique=True) return np.dot(self_values, other.values[other_cmind]) else: return self.dot(_convert_to_vector(other))
[ "def", "dot", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "np", ".", "ndarray", ")", ":", "if", "other", ".", "ndim", "not", "in", "[", "2", ",", "1", "]", ":", "raise", "ValueError", "(", "\"Cannot call dot with %d-dimensional array\"", "%", "other", ".", "ndim", ")", "assert", "len", "(", "self", ")", "==", "other", ".", "shape", "[", "0", "]", ",", "\"dimension mismatch\"", "return", "np", ".", "dot", "(", "self", ".", "values", ",", "other", "[", "self", ".", "indices", "]", ")", "assert", "len", "(", "self", ")", "==", "_vector_size", "(", "other", ")", ",", "\"dimension mismatch\"", "if", "isinstance", "(", "other", ",", "DenseVector", ")", ":", "return", "np", ".", "dot", "(", "other", ".", "array", "[", "self", ".", "indices", "]", ",", "self", ".", "values", ")", "elif", "isinstance", "(", "other", ",", "SparseVector", ")", ":", "# Find out common indices.", "self_cmind", "=", "np", ".", "in1d", "(", "self", ".", "indices", ",", "other", ".", "indices", ",", "assume_unique", "=", "True", ")", "self_values", "=", "self", ".", "values", "[", "self_cmind", "]", "if", "self_values", ".", "size", "==", "0", ":", "return", "0.0", "else", ":", "other_cmind", "=", "np", ".", "in1d", "(", "other", ".", "indices", ",", "self", ".", "indices", ",", "assume_unique", "=", "True", ")", "return", "np", ".", "dot", "(", "self_values", ",", "other", ".", "values", "[", "other_cmind", "]", ")", "else", ":", "return", "self", ".", "dot", "(", "_convert_to_vector", "(", "other", ")", ")" ]
Dot product with a SparseVector or 1- or 2-dimensional Numpy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.dot(a) 25.0 >>> a.dot(array.array('d', [1., 2., 3., 4.])) 22.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.dot(b) 0.0 >>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]])) array([ 22., 22.]) >>> a.dot([1., 2., 3.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.array([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(DenseVector([1., 2.])) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> a.dot(np.zeros((3, 2))) Traceback (most recent call last): ... AssertionError: dimension mismatch
[ "Dot", "product", "with", "a", "SparseVector", "or", "1", "-", "or", "2", "-", "dimensional", "Numpy", "array", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L637-L691
apache/spark
python/pyspark/mllib/linalg/__init__.py
SparseVector.squared_distance
def squared_distance(self, other): """ Squared distance from a SparseVector or 1-dimensional NumPy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.squared_distance(a) 0.0 >>> a.squared_distance(array.array('d', [1., 2., 3., 4.])) 11.0 >>> a.squared_distance(np.array([1., 2., 3., 4.])) 11.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.squared_distance(b) 26.0 >>> b.squared_distance(a) 26.0 >>> b.squared_distance([1., 2.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> b.squared_distance(SparseVector(3, [1,], [1.0,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, np.ndarray) or isinstance(other, DenseVector): if isinstance(other, np.ndarray) and other.ndim != 1: raise Exception("Cannot call squared_distance with %d-dimensional array" % other.ndim) if isinstance(other, DenseVector): other = other.array sparse_ind = np.zeros(other.size, dtype=bool) sparse_ind[self.indices] = True dist = other[sparse_ind] - self.values result = np.dot(dist, dist) other_ind = other[~sparse_ind] result += np.dot(other_ind, other_ind) return result elif isinstance(other, SparseVector): result = 0.0 i, j = 0, 0 while i < len(self.indices) and j < len(other.indices): if self.indices[i] == other.indices[j]: diff = self.values[i] - other.values[j] result += diff * diff i += 1 j += 1 elif self.indices[i] < other.indices[j]: result += self.values[i] * self.values[i] i += 1 else: result += other.values[j] * other.values[j] j += 1 while i < len(self.indices): result += self.values[i] * self.values[i] i += 1 while j < len(other.indices): result += other.values[j] * other.values[j] j += 1 return result else: return self.squared_distance(_convert_to_vector(other))
python
def squared_distance(self, other): """ Squared distance from a SparseVector or 1-dimensional NumPy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.squared_distance(a) 0.0 >>> a.squared_distance(array.array('d', [1., 2., 3., 4.])) 11.0 >>> a.squared_distance(np.array([1., 2., 3., 4.])) 11.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.squared_distance(b) 26.0 >>> b.squared_distance(a) 26.0 >>> b.squared_distance([1., 2.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> b.squared_distance(SparseVector(3, [1,], [1.0,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """ assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, np.ndarray) or isinstance(other, DenseVector): if isinstance(other, np.ndarray) and other.ndim != 1: raise Exception("Cannot call squared_distance with %d-dimensional array" % other.ndim) if isinstance(other, DenseVector): other = other.array sparse_ind = np.zeros(other.size, dtype=bool) sparse_ind[self.indices] = True dist = other[sparse_ind] - self.values result = np.dot(dist, dist) other_ind = other[~sparse_ind] result += np.dot(other_ind, other_ind) return result elif isinstance(other, SparseVector): result = 0.0 i, j = 0, 0 while i < len(self.indices) and j < len(other.indices): if self.indices[i] == other.indices[j]: diff = self.values[i] - other.values[j] result += diff * diff i += 1 j += 1 elif self.indices[i] < other.indices[j]: result += self.values[i] * self.values[i] i += 1 else: result += other.values[j] * other.values[j] j += 1 while i < len(self.indices): result += self.values[i] * self.values[i] i += 1 while j < len(other.indices): result += other.values[j] * other.values[j] j += 1 return result else: return self.squared_distance(_convert_to_vector(other))
[ "def", "squared_distance", "(", "self", ",", "other", ")", ":", "assert", "len", "(", "self", ")", "==", "_vector_size", "(", "other", ")", ",", "\"dimension mismatch\"", "if", "isinstance", "(", "other", ",", "np", ".", "ndarray", ")", "or", "isinstance", "(", "other", ",", "DenseVector", ")", ":", "if", "isinstance", "(", "other", ",", "np", ".", "ndarray", ")", "and", "other", ".", "ndim", "!=", "1", ":", "raise", "Exception", "(", "\"Cannot call squared_distance with %d-dimensional array\"", "%", "other", ".", "ndim", ")", "if", "isinstance", "(", "other", ",", "DenseVector", ")", ":", "other", "=", "other", ".", "array", "sparse_ind", "=", "np", ".", "zeros", "(", "other", ".", "size", ",", "dtype", "=", "bool", ")", "sparse_ind", "[", "self", ".", "indices", "]", "=", "True", "dist", "=", "other", "[", "sparse_ind", "]", "-", "self", ".", "values", "result", "=", "np", ".", "dot", "(", "dist", ",", "dist", ")", "other_ind", "=", "other", "[", "~", "sparse_ind", "]", "result", "+=", "np", ".", "dot", "(", "other_ind", ",", "other_ind", ")", "return", "result", "elif", "isinstance", "(", "other", ",", "SparseVector", ")", ":", "result", "=", "0.0", "i", ",", "j", "=", "0", ",", "0", "while", "i", "<", "len", "(", "self", ".", "indices", ")", "and", "j", "<", "len", "(", "other", ".", "indices", ")", ":", "if", "self", ".", "indices", "[", "i", "]", "==", "other", ".", "indices", "[", "j", "]", ":", "diff", "=", "self", ".", "values", "[", "i", "]", "-", "other", ".", "values", "[", "j", "]", "result", "+=", "diff", "*", "diff", "i", "+=", "1", "j", "+=", "1", "elif", "self", ".", "indices", "[", "i", "]", "<", "other", ".", "indices", "[", "j", "]", ":", "result", "+=", "self", ".", "values", "[", "i", "]", "*", "self", ".", "values", "[", "i", "]", "i", "+=", "1", "else", ":", "result", "+=", "other", ".", "values", "[", "j", "]", "*", "other", ".", "values", "[", "j", "]", "j", "+=", "1", "while", "i", "<", "len", "(", "self", ".", "indices", ")", ":", "result", "+=", "self", ".", "values", "[", "i", "]", "*", "self", ".", "values", "[", "i", "]", "i", "+=", "1", "while", "j", "<", "len", "(", "other", ".", "indices", ")", ":", "result", "+=", "other", ".", "values", "[", "j", "]", "*", "other", ".", "values", "[", "j", "]", "j", "+=", "1", "return", "result", "else", ":", "return", "self", ".", "squared_distance", "(", "_convert_to_vector", "(", "other", ")", ")" ]
Squared distance from a SparseVector or 1-dimensional NumPy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.squared_distance(a) 0.0 >>> a.squared_distance(array.array('d', [1., 2., 3., 4.])) 11.0 >>> a.squared_distance(np.array([1., 2., 3., 4.])) 11.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.squared_distance(b) 26.0 >>> b.squared_distance(a) 26.0 >>> b.squared_distance([1., 2.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> b.squared_distance(SparseVector(3, [1,], [1.0,])) Traceback (most recent call last): ... AssertionError: dimension mismatch
[ "Squared", "distance", "from", "a", "SparseVector", "or", "1", "-", "dimensional", "NumPy", "array", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L693-L758
apache/spark
python/pyspark/mllib/linalg/__init__.py
SparseVector.toArray
def toArray(self): """ Returns a copy of this SparseVector as a 1-dimensional NumPy array. """ arr = np.zeros((self.size,), dtype=np.float64) arr[self.indices] = self.values return arr
python
def toArray(self): """ Returns a copy of this SparseVector as a 1-dimensional NumPy array. """ arr = np.zeros((self.size,), dtype=np.float64) arr[self.indices] = self.values return arr
[ "def", "toArray", "(", "self", ")", ":", "arr", "=", "np", ".", "zeros", "(", "(", "self", ".", "size", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "arr", "[", "self", ".", "indices", "]", "=", "self", ".", "values", "return", "arr" ]
Returns a copy of this SparseVector as a 1-dimensional NumPy array.
[ "Returns", "a", "copy", "of", "this", "SparseVector", "as", "a", "1", "-", "dimensional", "NumPy", "array", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L760-L766
apache/spark
python/pyspark/mllib/linalg/__init__.py
SparseVector.asML
def asML(self): """ Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseVector` .. versionadded:: 2.0.0 """ return newlinalg.SparseVector(self.size, self.indices, self.values)
python
def asML(self): """ Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseVector` .. versionadded:: 2.0.0 """ return newlinalg.SparseVector(self.size, self.indices, self.values)
[ "def", "asML", "(", "self", ")", ":", "return", "newlinalg", ".", "SparseVector", "(", "self", ".", "size", ",", "self", ".", "indices", ",", "self", ".", "values", ")" ]
Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseVector` .. versionadded:: 2.0.0
[ "Convert", "this", "vector", "to", "the", "new", "mllib", "-", "local", "representation", ".", "This", "does", "NOT", "copy", "the", "data", ";", "it", "copies", "references", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L768-L777
apache/spark
python/pyspark/mllib/linalg/__init__.py
Vectors.dense
def dense(*elements): """ Create a dense vector of 64-bit floats from a Python list or numbers. >>> Vectors.dense([1, 2, 3]) DenseVector([1.0, 2.0, 3.0]) >>> Vectors.dense(1.0, 2.0) DenseVector([1.0, 2.0]) """ if len(elements) == 1 and not isinstance(elements[0], (float, int, long)): # it's list, numpy.array or other iterable object. elements = elements[0] return DenseVector(elements)
python
def dense(*elements): """ Create a dense vector of 64-bit floats from a Python list or numbers. >>> Vectors.dense([1, 2, 3]) DenseVector([1.0, 2.0, 3.0]) >>> Vectors.dense(1.0, 2.0) DenseVector([1.0, 2.0]) """ if len(elements) == 1 and not isinstance(elements[0], (float, int, long)): # it's list, numpy.array or other iterable object. elements = elements[0] return DenseVector(elements)
[ "def", "dense", "(", "*", "elements", ")", ":", "if", "len", "(", "elements", ")", "==", "1", "and", "not", "isinstance", "(", "elements", "[", "0", "]", ",", "(", "float", ",", "int", ",", "long", ")", ")", ":", "# it's list, numpy.array or other iterable object.", "elements", "=", "elements", "[", "0", "]", "return", "DenseVector", "(", "elements", ")" ]
Create a dense vector of 64-bit floats from a Python list or numbers. >>> Vectors.dense([1, 2, 3]) DenseVector([1.0, 2.0, 3.0]) >>> Vectors.dense(1.0, 2.0) DenseVector([1.0, 2.0])
[ "Create", "a", "dense", "vector", "of", "64", "-", "bit", "floats", "from", "a", "Python", "list", "or", "numbers", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L874-L886
apache/spark
python/pyspark/mllib/linalg/__init__.py
Vectors.fromML
def fromML(vec): """ Convert a vector from the new mllib-local representation. This does NOT copy the data; it copies references. :param vec: a :py:class:`pyspark.ml.linalg.Vector` :return: a :py:class:`pyspark.mllib.linalg.Vector` .. versionadded:: 2.0.0 """ if isinstance(vec, newlinalg.DenseVector): return DenseVector(vec.array) elif isinstance(vec, newlinalg.SparseVector): return SparseVector(vec.size, vec.indices, vec.values) else: raise TypeError("Unsupported vector type %s" % type(vec))
python
def fromML(vec): """ Convert a vector from the new mllib-local representation. This does NOT copy the data; it copies references. :param vec: a :py:class:`pyspark.ml.linalg.Vector` :return: a :py:class:`pyspark.mllib.linalg.Vector` .. versionadded:: 2.0.0 """ if isinstance(vec, newlinalg.DenseVector): return DenseVector(vec.array) elif isinstance(vec, newlinalg.SparseVector): return SparseVector(vec.size, vec.indices, vec.values) else: raise TypeError("Unsupported vector type %s" % type(vec))
[ "def", "fromML", "(", "vec", ")", ":", "if", "isinstance", "(", "vec", ",", "newlinalg", ".", "DenseVector", ")", ":", "return", "DenseVector", "(", "vec", ".", "array", ")", "elif", "isinstance", "(", "vec", ",", "newlinalg", ".", "SparseVector", ")", ":", "return", "SparseVector", "(", "vec", ".", "size", ",", "vec", ".", "indices", ",", "vec", ".", "values", ")", "else", ":", "raise", "TypeError", "(", "\"Unsupported vector type %s\"", "%", "type", "(", "vec", ")", ")" ]
Convert a vector from the new mllib-local representation. This does NOT copy the data; it copies references. :param vec: a :py:class:`pyspark.ml.linalg.Vector` :return: a :py:class:`pyspark.mllib.linalg.Vector` .. versionadded:: 2.0.0
[ "Convert", "a", "vector", "from", "the", "new", "mllib", "-", "local", "representation", ".", "This", "does", "NOT", "copy", "the", "data", ";", "it", "copies", "references", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L889-L904
apache/spark
python/pyspark/mllib/linalg/__init__.py
Vectors.squared_distance
def squared_distance(v1, v2): """ Squared distance between two vectors. a and b can be of type SparseVector, DenseVector, np.ndarray or array.array. >>> a = Vectors.sparse(4, [(0, 1), (3, 4)]) >>> b = Vectors.dense([2, 5, 4, 1]) >>> a.squared_distance(b) 51.0 """ v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2) return v1.squared_distance(v2)
python
def squared_distance(v1, v2): """ Squared distance between two vectors. a and b can be of type SparseVector, DenseVector, np.ndarray or array.array. >>> a = Vectors.sparse(4, [(0, 1), (3, 4)]) >>> b = Vectors.dense([2, 5, 4, 1]) >>> a.squared_distance(b) 51.0 """ v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2) return v1.squared_distance(v2)
[ "def", "squared_distance", "(", "v1", ",", "v2", ")", ":", "v1", ",", "v2", "=", "_convert_to_vector", "(", "v1", ")", ",", "_convert_to_vector", "(", "v2", ")", "return", "v1", ".", "squared_distance", "(", "v2", ")" ]
Squared distance between two vectors. a and b can be of type SparseVector, DenseVector, np.ndarray or array.array. >>> a = Vectors.sparse(4, [(0, 1), (3, 4)]) >>> b = Vectors.dense([2, 5, 4, 1]) >>> a.squared_distance(b) 51.0
[ "Squared", "distance", "between", "two", "vectors", ".", "a", "and", "b", "can", "be", "of", "type", "SparseVector", "DenseVector", "np", ".", "ndarray", "or", "array", ".", "array", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L920-L932
apache/spark
python/pyspark/mllib/linalg/__init__.py
Vectors.parse
def parse(s): """Parse a string representation back into the Vector. >>> Vectors.parse('[2,1,2 ]') DenseVector([2.0, 1.0, 2.0]) >>> Vectors.parse(' ( 100, [0], [2])') SparseVector(100, {0: 2.0}) """ if s.find('(') == -1 and s.find('[') != -1: return DenseVector.parse(s) elif s.find('(') != -1: return SparseVector.parse(s) else: raise ValueError( "Cannot find tokens '[' or '(' from the input string.")
python
def parse(s): """Parse a string representation back into the Vector. >>> Vectors.parse('[2,1,2 ]') DenseVector([2.0, 1.0, 2.0]) >>> Vectors.parse(' ( 100, [0], [2])') SparseVector(100, {0: 2.0}) """ if s.find('(') == -1 and s.find('[') != -1: return DenseVector.parse(s) elif s.find('(') != -1: return SparseVector.parse(s) else: raise ValueError( "Cannot find tokens '[' or '(' from the input string.")
[ "def", "parse", "(", "s", ")", ":", "if", "s", ".", "find", "(", "'('", ")", "==", "-", "1", "and", "s", ".", "find", "(", "'['", ")", "!=", "-", "1", ":", "return", "DenseVector", ".", "parse", "(", "s", ")", "elif", "s", ".", "find", "(", "'('", ")", "!=", "-", "1", ":", "return", "SparseVector", ".", "parse", "(", "s", ")", "else", ":", "raise", "ValueError", "(", "\"Cannot find tokens '[' or '(' from the input string.\"", ")" ]
Parse a string representation back into the Vector. >>> Vectors.parse('[2,1,2 ]') DenseVector([2.0, 1.0, 2.0]) >>> Vectors.parse(' ( 100, [0], [2])') SparseVector(100, {0: 2.0})
[ "Parse", "a", "string", "representation", "back", "into", "the", "Vector", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L942-L956
apache/spark
python/pyspark/mllib/linalg/__init__.py
Vectors._equals
def _equals(v1_indices, v1_values, v2_indices, v2_values): """ Check equality between sparse/dense vectors, v1_indices and v2_indices assume to be strictly increasing. """ v1_size = len(v1_values) v2_size = len(v2_values) k1 = 0 k2 = 0 all_equal = True while all_equal: while k1 < v1_size and v1_values[k1] == 0: k1 += 1 while k2 < v2_size and v2_values[k2] == 0: k2 += 1 if k1 >= v1_size or k2 >= v2_size: return k1 >= v1_size and k2 >= v2_size all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2] k1 += 1 k2 += 1 return all_equal
python
def _equals(v1_indices, v1_values, v2_indices, v2_values): """ Check equality between sparse/dense vectors, v1_indices and v2_indices assume to be strictly increasing. """ v1_size = len(v1_values) v2_size = len(v2_values) k1 = 0 k2 = 0 all_equal = True while all_equal: while k1 < v1_size and v1_values[k1] == 0: k1 += 1 while k2 < v2_size and v2_values[k2] == 0: k2 += 1 if k1 >= v1_size or k2 >= v2_size: return k1 >= v1_size and k2 >= v2_size all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2] k1 += 1 k2 += 1 return all_equal
[ "def", "_equals", "(", "v1_indices", ",", "v1_values", ",", "v2_indices", ",", "v2_values", ")", ":", "v1_size", "=", "len", "(", "v1_values", ")", "v2_size", "=", "len", "(", "v2_values", ")", "k1", "=", "0", "k2", "=", "0", "all_equal", "=", "True", "while", "all_equal", ":", "while", "k1", "<", "v1_size", "and", "v1_values", "[", "k1", "]", "==", "0", ":", "k1", "+=", "1", "while", "k2", "<", "v2_size", "and", "v2_values", "[", "k2", "]", "==", "0", ":", "k2", "+=", "1", "if", "k1", ">=", "v1_size", "or", "k2", ">=", "v2_size", ":", "return", "k1", ">=", "v1_size", "and", "k2", ">=", "v2_size", "all_equal", "=", "v1_indices", "[", "k1", "]", "==", "v2_indices", "[", "k2", "]", "and", "v1_values", "[", "k1", "]", "==", "v2_values", "[", "k2", "]", "k1", "+=", "1", "k2", "+=", "1", "return", "all_equal" ]
Check equality between sparse/dense vectors, v1_indices and v2_indices assume to be strictly increasing.
[ "Check", "equality", "between", "sparse", "/", "dense", "vectors", "v1_indices", "and", "v2_indices", "assume", "to", "be", "strictly", "increasing", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L963-L985
apache/spark
python/pyspark/mllib/linalg/__init__.py
Matrix._convert_to_array
def _convert_to_array(array_like, dtype): """ Convert Matrix attributes which are array-like or buffer to array. """ if isinstance(array_like, bytes): return np.frombuffer(array_like, dtype=dtype) return np.asarray(array_like, dtype=dtype)
python
def _convert_to_array(array_like, dtype): """ Convert Matrix attributes which are array-like or buffer to array. """ if isinstance(array_like, bytes): return np.frombuffer(array_like, dtype=dtype) return np.asarray(array_like, dtype=dtype)
[ "def", "_convert_to_array", "(", "array_like", ",", "dtype", ")", ":", "if", "isinstance", "(", "array_like", ",", "bytes", ")", ":", "return", "np", ".", "frombuffer", "(", "array_like", ",", "dtype", "=", "dtype", ")", "return", "np", ".", "asarray", "(", "array_like", ",", "dtype", "=", "dtype", ")" ]
Convert Matrix attributes which are array-like or buffer to array.
[ "Convert", "Matrix", "attributes", "which", "are", "array", "-", "like", "or", "buffer", "to", "array", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1014-L1020
apache/spark
python/pyspark/mllib/linalg/__init__.py
DenseMatrix.toArray
def toArray(self): """ Return an numpy.ndarray >>> m = DenseMatrix(2, 2, range(4)) >>> m.toArray() array([[ 0., 2.], [ 1., 3.]]) """ if self.isTransposed: return np.asfortranarray( self.values.reshape((self.numRows, self.numCols))) else: return self.values.reshape((self.numRows, self.numCols), order='F')
python
def toArray(self): """ Return an numpy.ndarray >>> m = DenseMatrix(2, 2, range(4)) >>> m.toArray() array([[ 0., 2.], [ 1., 3.]]) """ if self.isTransposed: return np.asfortranarray( self.values.reshape((self.numRows, self.numCols))) else: return self.values.reshape((self.numRows, self.numCols), order='F')
[ "def", "toArray", "(", "self", ")", ":", "if", "self", ".", "isTransposed", ":", "return", "np", ".", "asfortranarray", "(", "self", ".", "values", ".", "reshape", "(", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ")", ")", ")", "else", ":", "return", "self", ".", "values", ".", "reshape", "(", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ")", ",", "order", "=", "'F'", ")" ]
Return an numpy.ndarray >>> m = DenseMatrix(2, 2, range(4)) >>> m.toArray() array([[ 0., 2.], [ 1., 3.]])
[ "Return", "an", "numpy", ".", "ndarray" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1082-L1095
apache/spark
python/pyspark/mllib/linalg/__init__.py
DenseMatrix.toSparse
def toSparse(self): """Convert to SparseMatrix""" if self.isTransposed: values = np.ravel(self.toArray(), order='F') else: values = self.values indices = np.nonzero(values)[0] colCounts = np.bincount(indices // self.numRows) colPtrs = np.cumsum(np.hstack( (0, colCounts, np.zeros(self.numCols - colCounts.size)))) values = values[indices] rowIndices = indices % self.numRows return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)
python
def toSparse(self): """Convert to SparseMatrix""" if self.isTransposed: values = np.ravel(self.toArray(), order='F') else: values = self.values indices = np.nonzero(values)[0] colCounts = np.bincount(indices // self.numRows) colPtrs = np.cumsum(np.hstack( (0, colCounts, np.zeros(self.numCols - colCounts.size)))) values = values[indices] rowIndices = indices % self.numRows return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)
[ "def", "toSparse", "(", "self", ")", ":", "if", "self", ".", "isTransposed", ":", "values", "=", "np", ".", "ravel", "(", "self", ".", "toArray", "(", ")", ",", "order", "=", "'F'", ")", "else", ":", "values", "=", "self", ".", "values", "indices", "=", "np", ".", "nonzero", "(", "values", ")", "[", "0", "]", "colCounts", "=", "np", ".", "bincount", "(", "indices", "//", "self", ".", "numRows", ")", "colPtrs", "=", "np", ".", "cumsum", "(", "np", ".", "hstack", "(", "(", "0", ",", "colCounts", ",", "np", ".", "zeros", "(", "self", ".", "numCols", "-", "colCounts", ".", "size", ")", ")", ")", ")", "values", "=", "values", "[", "indices", "]", "rowIndices", "=", "indices", "%", "self", ".", "numRows", "return", "SparseMatrix", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ",", "colPtrs", ",", "rowIndices", ",", "values", ")" ]
Convert to SparseMatrix
[ "Convert", "to", "SparseMatrix" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1097-L1110
apache/spark
python/pyspark/mllib/linalg/__init__.py
DenseMatrix.asML
def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.DenseMatrix(self.numRows, self.numCols, self.values, self.isTransposed)
python
def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.DenseMatrix(self.numRows, self.numCols, self.values, self.isTransposed)
[ "def", "asML", "(", "self", ")", ":", "return", "newlinalg", ".", "DenseMatrix", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ",", "self", ".", "values", ",", "self", ".", "isTransposed", ")" ]
Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseMatrix` .. versionadded:: 2.0.0
[ "Convert", "this", "matrix", "to", "the", "new", "mllib", "-", "local", "representation", ".", "This", "does", "NOT", "copy", "the", "data", ";", "it", "copies", "references", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1112-L1121
apache/spark
python/pyspark/mllib/linalg/__init__.py
SparseMatrix.toArray
def toArray(self): """ Return an numpy.ndarray """ A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F') for k in xrange(self.colPtrs.size - 1): startptr = self.colPtrs[k] endptr = self.colPtrs[k + 1] if self.isTransposed: A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr] else: A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr] return A
python
def toArray(self): """ Return an numpy.ndarray """ A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F') for k in xrange(self.colPtrs.size - 1): startptr = self.colPtrs[k] endptr = self.colPtrs[k + 1] if self.isTransposed: A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr] else: A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr] return A
[ "def", "toArray", "(", "self", ")", ":", "A", "=", "np", ".", "zeros", "(", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ")", ",", "dtype", "=", "np", ".", "float64", ",", "order", "=", "'F'", ")", "for", "k", "in", "xrange", "(", "self", ".", "colPtrs", ".", "size", "-", "1", ")", ":", "startptr", "=", "self", ".", "colPtrs", "[", "k", "]", "endptr", "=", "self", ".", "colPtrs", "[", "k", "+", "1", "]", "if", "self", ".", "isTransposed", ":", "A", "[", "k", ",", "self", ".", "rowIndices", "[", "startptr", ":", "endptr", "]", "]", "=", "self", ".", "values", "[", "startptr", ":", "endptr", "]", "else", ":", "A", "[", "self", ".", "rowIndices", "[", "startptr", ":", "endptr", "]", ",", "k", "]", "=", "self", ".", "values", "[", "startptr", ":", "endptr", "]", "return", "A" ]
Return an numpy.ndarray
[ "Return", "an", "numpy", ".", "ndarray" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1277-L1289
apache/spark
python/pyspark/mllib/linalg/__init__.py
SparseMatrix.asML
def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.SparseMatrix(self.numRows, self.numCols, self.colPtrs, self.rowIndices, self.values, self.isTransposed)
python
def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.SparseMatrix(self.numRows, self.numCols, self.colPtrs, self.rowIndices, self.values, self.isTransposed)
[ "def", "asML", "(", "self", ")", ":", "return", "newlinalg", ".", "SparseMatrix", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ",", "self", ".", "colPtrs", ",", "self", ".", "rowIndices", ",", "self", ".", "values", ",", "self", ".", "isTransposed", ")" ]
Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseMatrix` .. versionadded:: 2.0.0
[ "Convert", "this", "matrix", "to", "the", "new", "mllib", "-", "local", "representation", ".", "This", "does", "NOT", "copy", "the", "data", ";", "it", "copies", "references", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1295-L1305
apache/spark
python/pyspark/mllib/linalg/__init__.py
Matrices.sparse
def sparse(numRows, numCols, colPtrs, rowIndices, values): """ Create a SparseMatrix """ return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
python
def sparse(numRows, numCols, colPtrs, rowIndices, values): """ Create a SparseMatrix """ return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
[ "def", "sparse", "(", "numRows", ",", "numCols", ",", "colPtrs", ",", "rowIndices", ",", "values", ")", ":", "return", "SparseMatrix", "(", "numRows", ",", "numCols", ",", "colPtrs", ",", "rowIndices", ",", "values", ")" ]
Create a SparseMatrix
[ "Create", "a", "SparseMatrix" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1321-L1325
apache/spark
python/pyspark/mllib/linalg/__init__.py
Matrices.fromML
def fromML(mat): """ Convert a matrix from the new mllib-local representation. This does NOT copy the data; it copies references. :param mat: a :py:class:`pyspark.ml.linalg.Matrix` :return: a :py:class:`pyspark.mllib.linalg.Matrix` .. versionadded:: 2.0.0 """ if isinstance(mat, newlinalg.DenseMatrix): return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed) elif isinstance(mat, newlinalg.SparseMatrix): return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices, mat.values, mat.isTransposed) else: raise TypeError("Unsupported matrix type %s" % type(mat))
python
def fromML(mat): """ Convert a matrix from the new mllib-local representation. This does NOT copy the data; it copies references. :param mat: a :py:class:`pyspark.ml.linalg.Matrix` :return: a :py:class:`pyspark.mllib.linalg.Matrix` .. versionadded:: 2.0.0 """ if isinstance(mat, newlinalg.DenseMatrix): return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed) elif isinstance(mat, newlinalg.SparseMatrix): return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices, mat.values, mat.isTransposed) else: raise TypeError("Unsupported matrix type %s" % type(mat))
[ "def", "fromML", "(", "mat", ")", ":", "if", "isinstance", "(", "mat", ",", "newlinalg", ".", "DenseMatrix", ")", ":", "return", "DenseMatrix", "(", "mat", ".", "numRows", ",", "mat", ".", "numCols", ",", "mat", ".", "values", ",", "mat", ".", "isTransposed", ")", "elif", "isinstance", "(", "mat", ",", "newlinalg", ".", "SparseMatrix", ")", ":", "return", "SparseMatrix", "(", "mat", ".", "numRows", ",", "mat", ".", "numCols", ",", "mat", ".", "colPtrs", ",", "mat", ".", "rowIndices", ",", "mat", ".", "values", ",", "mat", ".", "isTransposed", ")", "else", ":", "raise", "TypeError", "(", "\"Unsupported matrix type %s\"", "%", "type", "(", "mat", ")", ")" ]
Convert a matrix from the new mllib-local representation. This does NOT copy the data; it copies references. :param mat: a :py:class:`pyspark.ml.linalg.Matrix` :return: a :py:class:`pyspark.mllib.linalg.Matrix` .. versionadded:: 2.0.0
[ "Convert", "a", "matrix", "from", "the", "new", "mllib", "-", "local", "representation", ".", "This", "does", "NOT", "copy", "the", "data", ";", "it", "copies", "references", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1328-L1344
apache/spark
python/pyspark/ml/feature.py
LSHModel.approxNearestNeighbors
def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"): """ Given a large dataset and an item, approximately find at most k items which have the closest distance to the item. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. .. note:: This method is experimental and will likely change behavior in the next release. :param dataset: The dataset to search for nearest neighbors of the key. :param key: Feature vector representing the item to search for. :param numNearestNeighbors: The maximum number of nearest neighbors. :param distCol: Output column for storing the distance between each result row and the key. Use "distCol" as default value if it's not specified. :return: A dataset containing at most k items closest to the key. A column "distCol" is added to show the distance between each row and the key. """ return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors, distCol)
python
def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"): """ Given a large dataset and an item, approximately find at most k items which have the closest distance to the item. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. .. note:: This method is experimental and will likely change behavior in the next release. :param dataset: The dataset to search for nearest neighbors of the key. :param key: Feature vector representing the item to search for. :param numNearestNeighbors: The maximum number of nearest neighbors. :param distCol: Output column for storing the distance between each result row and the key. Use "distCol" as default value if it's not specified. :return: A dataset containing at most k items closest to the key. A column "distCol" is added to show the distance between each row and the key. """ return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors, distCol)
[ "def", "approxNearestNeighbors", "(", "self", ",", "dataset", ",", "key", ",", "numNearestNeighbors", ",", "distCol", "=", "\"distCol\"", ")", ":", "return", "self", ".", "_call_java", "(", "\"approxNearestNeighbors\"", ",", "dataset", ",", "key", ",", "numNearestNeighbors", ",", "distCol", ")" ]
Given a large dataset and an item, approximately find at most k items which have the closest distance to the item. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. .. note:: This method is experimental and will likely change behavior in the next release. :param dataset: The dataset to search for nearest neighbors of the key. :param key: Feature vector representing the item to search for. :param numNearestNeighbors: The maximum number of nearest neighbors. :param distCol: Output column for storing the distance between each result row and the key. Use "distCol" as default value if it's not specified. :return: A dataset containing at most k items closest to the key. A column "distCol" is added to show the distance between each row and the key.
[ "Given", "a", "large", "dataset", "and", "an", "item", "approximately", "find", "at", "most", "k", "items", "which", "have", "the", "closest", "distance", "to", "the", "item", ".", "If", "the", ":", "py", ":", "attr", ":", "outputCol", "is", "missing", "the", "method", "will", "transform", "the", "data", ";", "if", "the", ":", "py", ":", "attr", ":", "outputCol", "exists", "it", "will", "use", "that", ".", "This", "allows", "caching", "of", "the", "transformed", "data", "when", "necessary", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L162-L180
apache/spark
python/pyspark/ml/feature.py
LSHModel.approxSimilarityJoin
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"): """ Join two datasets to approximately find all pairs of rows whose distance are smaller than the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. :param datasetA: One of the datasets to join. :param datasetB: Another dataset to join. :param threshold: The threshold for the distance of row pairs. :param distCol: Output column for storing the distance between each pair of rows. Use "distCol" as default value if it's not specified. :return: A joined dataset containing pairs of rows. The original rows are in columns "datasetA" and "datasetB", and a column "distCol" is added to show the distance between each pair. """ threshold = TypeConverters.toFloat(threshold) return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
python
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"): """ Join two datasets to approximately find all pairs of rows whose distance are smaller than the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. :param datasetA: One of the datasets to join. :param datasetB: Another dataset to join. :param threshold: The threshold for the distance of row pairs. :param distCol: Output column for storing the distance between each pair of rows. Use "distCol" as default value if it's not specified. :return: A joined dataset containing pairs of rows. The original rows are in columns "datasetA" and "datasetB", and a column "distCol" is added to show the distance between each pair. """ threshold = TypeConverters.toFloat(threshold) return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
[ "def", "approxSimilarityJoin", "(", "self", ",", "datasetA", ",", "datasetB", ",", "threshold", ",", "distCol", "=", "\"distCol\"", ")", ":", "threshold", "=", "TypeConverters", ".", "toFloat", "(", "threshold", ")", "return", "self", ".", "_call_java", "(", "\"approxSimilarityJoin\"", ",", "datasetA", ",", "datasetB", ",", "threshold", ",", "distCol", ")" ]
Join two datasets to approximately find all pairs of rows whose distance are smaller than the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows caching of the transformed data when necessary. :param datasetA: One of the datasets to join. :param datasetB: Another dataset to join. :param threshold: The threshold for the distance of row pairs. :param distCol: Output column for storing the distance between each pair of rows. Use "distCol" as default value if it's not specified. :return: A joined dataset containing pairs of rows. The original rows are in columns "datasetA" and "datasetB", and a column "distCol" is added to show the distance between each pair.
[ "Join", "two", "datasets", "to", "approximately", "find", "all", "pairs", "of", "rows", "whose", "distance", "are", "smaller", "than", "the", "threshold", ".", "If", "the", ":", "py", ":", "attr", ":", "outputCol", "is", "missing", "the", "method", "will", "transform", "the", "data", ";", "if", "the", ":", "py", ":", "attr", ":", "outputCol", "exists", "it", "will", "use", "that", ".", "This", "allows", "caching", "of", "the", "transformed", "data", "when", "necessary", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L182-L199
apache/spark
python/pyspark/ml/feature.py
StringIndexerModel.from_labels
def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None): """ Construct the model directly from an array of label strings, requires an active SparkContext. """ sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(labels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCol(inputCol) if outputCol is not None: model.setOutputCol(outputCol) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model
python
def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None): """ Construct the model directly from an array of label strings, requires an active SparkContext. """ sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(labels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCol(inputCol) if outputCol is not None: model.setOutputCol(outputCol) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model
[ "def", "from_labels", "(", "cls", ",", "labels", ",", "inputCol", ",", "outputCol", "=", "None", ",", "handleInvalid", "=", "None", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "java_class", "=", "sc", ".", "_gateway", ".", "jvm", ".", "java", ".", "lang", ".", "String", "jlabels", "=", "StringIndexerModel", ".", "_new_java_array", "(", "labels", ",", "java_class", ")", "model", "=", "StringIndexerModel", ".", "_create_from_java_class", "(", "\"org.apache.spark.ml.feature.StringIndexerModel\"", ",", "jlabels", ")", "model", ".", "setInputCol", "(", "inputCol", ")", "if", "outputCol", "is", "not", "None", ":", "model", ".", "setOutputCol", "(", "outputCol", ")", "if", "handleInvalid", "is", "not", "None", ":", "model", ".", "setHandleInvalid", "(", "handleInvalid", ")", "return", "model" ]
Construct the model directly from an array of label strings, requires an active SparkContext.
[ "Construct", "the", "model", "directly", "from", "an", "array", "of", "label", "strings", "requires", "an", "active", "SparkContext", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2503-L2518
apache/spark
python/pyspark/ml/feature.py
StringIndexerModel.from_arrays_of_labels
def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None, handleInvalid=None): """ Construct the model directly from an array of array of label strings, requires an active SparkContext. """ sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCols(inputCols) if outputCols is not None: model.setOutputCols(outputCols) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model
python
def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None, handleInvalid=None): """ Construct the model directly from an array of array of label strings, requires an active SparkContext. """ sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCols(inputCols) if outputCols is not None: model.setOutputCols(outputCols) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model
[ "def", "from_arrays_of_labels", "(", "cls", ",", "arrayOfLabels", ",", "inputCols", ",", "outputCols", "=", "None", ",", "handleInvalid", "=", "None", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "java_class", "=", "sc", ".", "_gateway", ".", "jvm", ".", "java", ".", "lang", ".", "String", "jlabels", "=", "StringIndexerModel", ".", "_new_java_array", "(", "arrayOfLabels", ",", "java_class", ")", "model", "=", "StringIndexerModel", ".", "_create_from_java_class", "(", "\"org.apache.spark.ml.feature.StringIndexerModel\"", ",", "jlabels", ")", "model", ".", "setInputCols", "(", "inputCols", ")", "if", "outputCols", "is", "not", "None", ":", "model", ".", "setOutputCols", "(", "outputCols", ")", "if", "handleInvalid", "is", "not", "None", ":", "model", ".", "setHandleInvalid", "(", "handleInvalid", ")", "return", "model" ]
Construct the model directly from an array of array of label strings, requires an active SparkContext.
[ "Construct", "the", "model", "directly", "from", "an", "array", "of", "array", "of", "label", "strings", "requires", "an", "active", "SparkContext", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2522-L2538
apache/spark
python/pyspark/ml/feature.py
StopWordsRemover.setParams
def setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False, locale=None): """ setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \ locale=None) Sets params for this StopWordRemover. """ kwargs = self._input_kwargs return self._set(**kwargs)
python
def setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False, locale=None): """ setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \ locale=None) Sets params for this StopWordRemover. """ kwargs = self._input_kwargs return self._set(**kwargs)
[ "def", "setParams", "(", "self", ",", "inputCol", "=", "None", ",", "outputCol", "=", "None", ",", "stopWords", "=", "None", ",", "caseSensitive", "=", "False", ",", "locale", "=", "None", ")", ":", "kwargs", "=", "self", ".", "_input_kwargs", "return", "self", ".", "_set", "(", "*", "*", "kwargs", ")" ]
setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \ locale=None) Sets params for this StopWordRemover.
[ "setParams", "(", "self", "inputCol", "=", "None", "outputCol", "=", "None", "stopWords", "=", "None", "caseSensitive", "=", "false", "\\", "locale", "=", "None", ")", "Sets", "params", "for", "this", "StopWordRemover", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2654-L2662
apache/spark
python/pyspark/ml/feature.py
StopWordsRemover.loadDefaultStopWords
def loadDefaultStopWords(language): """ Loads the default stop words for the given language. Supported languages: danish, dutch, english, finnish, french, german, hungarian, italian, norwegian, portuguese, russian, spanish, swedish, turkish """ stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover return list(stopWordsObj.loadDefaultStopWords(language))
python
def loadDefaultStopWords(language): """ Loads the default stop words for the given language. Supported languages: danish, dutch, english, finnish, french, german, hungarian, italian, norwegian, portuguese, russian, spanish, swedish, turkish """ stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover return list(stopWordsObj.loadDefaultStopWords(language))
[ "def", "loadDefaultStopWords", "(", "language", ")", ":", "stopWordsObj", "=", "_jvm", "(", ")", ".", "org", ".", "apache", ".", "spark", ".", "ml", ".", "feature", ".", "StopWordsRemover", "return", "list", "(", "stopWordsObj", ".", "loadDefaultStopWords", "(", "language", ")", ")" ]
Loads the default stop words for the given language. Supported languages: danish, dutch, english, finnish, french, german, hungarian, italian, norwegian, portuguese, russian, spanish, swedish, turkish
[ "Loads", "the", "default", "stop", "words", "for", "the", "given", "language", ".", "Supported", "languages", ":", "danish", "dutch", "english", "finnish", "french", "german", "hungarian", "italian", "norwegian", "portuguese", "russian", "spanish", "swedish", "turkish" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2708-L2715
apache/spark
python/pyspark/ml/feature.py
Word2VecModel.findSynonyms
def findSynonyms(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, basestring): word = _convert_to_vector(word) return self._call_java("findSynonyms", word, num)
python
def findSynonyms(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, basestring): word = _convert_to_vector(word) return self._call_java("findSynonyms", word, num)
[ "def", "findSynonyms", "(", "self", ",", "word", ",", "num", ")", ":", "if", "not", "isinstance", "(", "word", ",", "basestring", ")", ":", "word", "=", "_convert_to_vector", "(", "word", ")", "return", "self", ".", "_call_java", "(", "\"findSynonyms\"", ",", "word", ",", "num", ")" ]
Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns a dataframe with two fields word and similarity (which gives the cosine similarity).
[ "Find", "num", "number", "of", "words", "closest", "in", "similarity", "to", "word", ".", "word", "can", "be", "a", "string", "or", "vector", "representation", ".", "Returns", "a", "dataframe", "with", "two", "fields", "word", "and", "similarity", "(", "which", "gives", "the", "cosine", "similarity", ")", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L3293-L3302
apache/spark
python/pyspark/ml/feature.py
Word2VecModel.findSynonymsArray
def findSynonymsArray(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns an array with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, basestring): word = _convert_to_vector(word) tuples = self._java_obj.findSynonymsArray(word, num) return list(map(lambda st: (st._1(), st._2()), list(tuples)))
python
def findSynonymsArray(self, word, num): """ Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns an array with two fields word and similarity (which gives the cosine similarity). """ if not isinstance(word, basestring): word = _convert_to_vector(word) tuples = self._java_obj.findSynonymsArray(word, num) return list(map(lambda st: (st._1(), st._2()), list(tuples)))
[ "def", "findSynonymsArray", "(", "self", ",", "word", ",", "num", ")", ":", "if", "not", "isinstance", "(", "word", ",", "basestring", ")", ":", "word", "=", "_convert_to_vector", "(", "word", ")", "tuples", "=", "self", ".", "_java_obj", ".", "findSynonymsArray", "(", "word", ",", "num", ")", "return", "list", "(", "map", "(", "lambda", "st", ":", "(", "st", ".", "_1", "(", ")", ",", "st", ".", "_2", "(", ")", ")", ",", "list", "(", "tuples", ")", ")", ")" ]
Find "num" number of words closest in similarity to "word". word can be a string or vector representation. Returns an array with two fields word and similarity (which gives the cosine similarity).
[ "Find", "num", "number", "of", "words", "closest", "in", "similarity", "to", "word", ".", "word", "can", "be", "a", "string", "or", "vector", "representation", ".", "Returns", "an", "array", "with", "two", "fields", "word", "and", "similarity", "(", "which", "gives", "the", "cosine", "similarity", ")", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L3305-L3315
apache/spark
python/pyspark/sql/utils.py
install_exception_handler
def install_exception_handler(): """ Hook an exception handler into Py4j, which could capture some SQL exceptions in Java. When calling Java API, it will call `get_return_value` to parse the returned object. If any exception happened in JVM, the result will be Java exception object, it raise py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that could capture the Java exception and throw a Python one (with the same error message). It's idempotent, could be called multiple times. """ original = py4j.protocol.get_return_value # The original `get_return_value` is not patched, it's idempotent. patched = capture_sql_exception(original) # only patch the one used in py4j.java_gateway (call Java API) py4j.java_gateway.get_return_value = patched
python
def install_exception_handler(): """ Hook an exception handler into Py4j, which could capture some SQL exceptions in Java. When calling Java API, it will call `get_return_value` to parse the returned object. If any exception happened in JVM, the result will be Java exception object, it raise py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that could capture the Java exception and throw a Python one (with the same error message). It's idempotent, could be called multiple times. """ original = py4j.protocol.get_return_value # The original `get_return_value` is not patched, it's idempotent. patched = capture_sql_exception(original) # only patch the one used in py4j.java_gateway (call Java API) py4j.java_gateway.get_return_value = patched
[ "def", "install_exception_handler", "(", ")", ":", "original", "=", "py4j", ".", "protocol", ".", "get_return_value", "# The original `get_return_value` is not patched, it's idempotent.", "patched", "=", "capture_sql_exception", "(", "original", ")", "# only patch the one used in py4j.java_gateway (call Java API)", "py4j", ".", "java_gateway", ".", "get_return_value", "=", "patched" ]
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java. When calling Java API, it will call `get_return_value` to parse the returned object. If any exception happened in JVM, the result will be Java exception object, it raise py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that could capture the Java exception and throw a Python one (with the same error message). It's idempotent, could be called multiple times.
[ "Hook", "an", "exception", "handler", "into", "Py4j", "which", "could", "capture", "some", "SQL", "exceptions", "in", "Java", "." ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L99-L114
apache/spark
python/pyspark/sql/utils.py
toJArray
def toJArray(gateway, jtype, arr): """ Convert python list to java type array :param gateway: Py4j Gateway :param jtype: java type of element in array :param arr: python type list """ jarr = gateway.new_array(jtype, len(arr)) for i in range(0, len(arr)): jarr[i] = arr[i] return jarr
python
def toJArray(gateway, jtype, arr): """ Convert python list to java type array :param gateway: Py4j Gateway :param jtype: java type of element in array :param arr: python type list """ jarr = gateway.new_array(jtype, len(arr)) for i in range(0, len(arr)): jarr[i] = arr[i] return jarr
[ "def", "toJArray", "(", "gateway", ",", "jtype", ",", "arr", ")", ":", "jarr", "=", "gateway", ".", "new_array", "(", "jtype", ",", "len", "(", "arr", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "arr", ")", ")", ":", "jarr", "[", "i", "]", "=", "arr", "[", "i", "]", "return", "jarr" ]
Convert python list to java type array :param gateway: Py4j Gateway :param jtype: java type of element in array :param arr: python type list
[ "Convert", "python", "list", "to", "java", "type", "array", ":", "param", "gateway", ":", "Py4j", "Gateway", ":", "param", "jtype", ":", "java", "type", "of", "element", "in", "array", ":", "param", "arr", ":", "python", "type", "list" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L117-L127
apache/spark
python/pyspark/sql/utils.py
require_minimum_pandas_version
def require_minimum_pandas_version(): """ Raise ImportError if minimum version of Pandas is not installed """ # TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pandas_version = "0.19.2" from distutils.version import LooseVersion try: import pandas have_pandas = True except ImportError: have_pandas = False if not have_pandas: raise ImportError("Pandas >= %s must be installed; however, " "it was not found." % minimum_pandas_version) if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version): raise ImportError("Pandas >= %s must be installed; however, " "your version was %s." % (minimum_pandas_version, pandas.__version__))
python
def require_minimum_pandas_version(): """ Raise ImportError if minimum version of Pandas is not installed """ # TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pandas_version = "0.19.2" from distutils.version import LooseVersion try: import pandas have_pandas = True except ImportError: have_pandas = False if not have_pandas: raise ImportError("Pandas >= %s must be installed; however, " "it was not found." % minimum_pandas_version) if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version): raise ImportError("Pandas >= %s must be installed; however, " "your version was %s." % (minimum_pandas_version, pandas.__version__))
[ "def", "require_minimum_pandas_version", "(", ")", ":", "# TODO(HyukjinKwon): Relocate and deduplicate the version specification.", "minimum_pandas_version", "=", "\"0.19.2\"", "from", "distutils", ".", "version", "import", "LooseVersion", "try", ":", "import", "pandas", "have_pandas", "=", "True", "except", "ImportError", ":", "have_pandas", "=", "False", "if", "not", "have_pandas", ":", "raise", "ImportError", "(", "\"Pandas >= %s must be installed; however, \"", "\"it was not found.\"", "%", "minimum_pandas_version", ")", "if", "LooseVersion", "(", "pandas", ".", "__version__", ")", "<", "LooseVersion", "(", "minimum_pandas_version", ")", ":", "raise", "ImportError", "(", "\"Pandas >= %s must be installed; however, \"", "\"your version was %s.\"", "%", "(", "minimum_pandas_version", ",", "pandas", ".", "__version__", ")", ")" ]
Raise ImportError if minimum version of Pandas is not installed
[ "Raise", "ImportError", "if", "minimum", "version", "of", "Pandas", "is", "not", "installed" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L130-L147
apache/spark
python/pyspark/sql/utils.py
require_minimum_pyarrow_version
def require_minimum_pyarrow_version(): """ Raise ImportError if minimum version of pyarrow is not installed """ # TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pyarrow_version = "0.12.1" from distutils.version import LooseVersion try: import pyarrow have_arrow = True except ImportError: have_arrow = False if not have_arrow: raise ImportError("PyArrow >= %s must be installed; however, " "it was not found." % minimum_pyarrow_version) if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version): raise ImportError("PyArrow >= %s must be installed; however, " "your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
python
def require_minimum_pyarrow_version(): """ Raise ImportError if minimum version of pyarrow is not installed """ # TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pyarrow_version = "0.12.1" from distutils.version import LooseVersion try: import pyarrow have_arrow = True except ImportError: have_arrow = False if not have_arrow: raise ImportError("PyArrow >= %s must be installed; however, " "it was not found." % minimum_pyarrow_version) if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version): raise ImportError("PyArrow >= %s must be installed; however, " "your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
[ "def", "require_minimum_pyarrow_version", "(", ")", ":", "# TODO(HyukjinKwon): Relocate and deduplicate the version specification.", "minimum_pyarrow_version", "=", "\"0.12.1\"", "from", "distutils", ".", "version", "import", "LooseVersion", "try", ":", "import", "pyarrow", "have_arrow", "=", "True", "except", "ImportError", ":", "have_arrow", "=", "False", "if", "not", "have_arrow", ":", "raise", "ImportError", "(", "\"PyArrow >= %s must be installed; however, \"", "\"it was not found.\"", "%", "minimum_pyarrow_version", ")", "if", "LooseVersion", "(", "pyarrow", ".", "__version__", ")", "<", "LooseVersion", "(", "minimum_pyarrow_version", ")", ":", "raise", "ImportError", "(", "\"PyArrow >= %s must be installed; however, \"", "\"your version was %s.\"", "%", "(", "minimum_pyarrow_version", ",", "pyarrow", ".", "__version__", ")", ")" ]
Raise ImportError if minimum version of pyarrow is not installed
[ "Raise", "ImportError", "if", "minimum", "version", "of", "pyarrow", "is", "not", "installed" ]
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L150-L167