{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, BatchNormalization, Conv2DTranspose, Concatenate, Reshape, LeakyReLU, Input, UpSampling2D, Flatten\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.models import Model"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "source": [
    "def gen(latent):\n",
    "    in2 = Input(shape=(10))\n",
    "    x2 = Dense(49)(in2)\n",
    "    x2 = LeakyReLU(alpha=0.2)(x2)\n",
    "    x2 = Reshape((7,7,1))(x2)\n",
    "\n",
    "\n",
    "    in1 = Input(shape=(latent))\n",
    "    x1 = Dense(7*7*128)(in1)\n",
    "    x1 = LeakyReLU(alpha=0.2)(x1)\n",
    "    x1 = Reshape((7,7,128))(x1)\n",
    "\n",
    "    x = Concatenate()([x1,x2])\n",
    "    x = UpSampling2D()(x)\n",
    "    x = Conv2DTranspose(128,(3,3),padding='same')(x)\n",
    "    x = LeakyReLU(alpha=0.2)(x)\n",
    "\n",
    "    x = UpSampling2D()(x)\n",
    "    x = Conv2DTranspose(128,(3,3),padding='same')(x)\n",
    "    x = LeakyReLU(alpha=0.2)(x)\n",
    "    x = Conv2DTranspose(1,(3,3),padding='same',activation='sigmoid')(x)\n",
    "\n",
    "    return Model([in1,in2],x)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "g = gen(100)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "source": [
    "g.input"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[<KerasTensor: shape=(None, 100) dtype=float32 (created by layer 'input_2')>,\n",
       " <KerasTensor: shape=(None, 10) dtype=float32 (created by layer 'input_1')>]"
      ]
     },
     "metadata": {},
     "execution_count": 4
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "source": [
    "g.output"
   ],
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "<KerasTensor: shape=(None, 28, 28, 1) dtype=float32 (created by layer 'conv2d_transpose_2')>"
      ]
     },
     "metadata": {},
     "execution_count": 5
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "source": [
    "g.summary()"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Model: \"model\"\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_2 (InputLayer)            [(None, 100)]        0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_1 (InputLayer)            [(None, 10)]         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 6272)         633472      input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense (Dense)                   (None, 49)           539         input_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "leaky_re_lu_1 (LeakyReLU)       (None, 6272)         0           dense_1[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "leaky_re_lu (LeakyReLU)         (None, 49)           0           dense[0][0]                      \n",
      "__________________________________________________________________________________________________\n",
      "reshape_1 (Reshape)             (None, 7, 7, 128)    0           leaky_re_lu_1[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "reshape (Reshape)               (None, 7, 7, 1)      0           leaky_re_lu[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "concatenate (Concatenate)       (None, 7, 7, 129)    0           reshape_1[0][0]                  \n",
      "                                                                 reshape[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d (UpSampling2D)    (None, 14, 14, 129)  0           concatenate[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose (Conv2DTranspo (None, 14, 14, 128)  148736      up_sampling2d[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "leaky_re_lu_2 (LeakyReLU)       (None, 14, 14, 128)  0           conv2d_transpose[0][0]           \n",
      "__________________________________________________________________________________________________\n",
      "up_sampling2d_1 (UpSampling2D)  (None, 28, 28, 128)  0           leaky_re_lu_2[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose_1 (Conv2DTrans (None, 28, 28, 128)  147584      up_sampling2d_1[0][0]            \n",
      "__________________________________________________________________________________________________\n",
      "leaky_re_lu_3 (LeakyReLU)       (None, 28, 28, 128)  0           conv2d_transpose_1[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "conv2d_transpose_2 (Conv2DTrans (None, 28, 28, 1)    1153        leaky_re_lu_3[0][0]              \n",
      "==================================================================================================\n",
      "Total params: 931,484\n",
      "Trainable params: 931,484\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "source": [
    "in2 = Input(shape=(10))\n",
    "x2 = Dense(49)(in2)\n",
    "x2 = LeakyReLU(alpha=0.2)(x2)\n",
    "x2 = Reshape((7,7,1))(x2)\n",
    "\n",
    "\n",
    "in1 = Input(shape=(100))\n",
    "x1 = Dense(7*7*128)(in1)\n",
    "x1 = LeakyReLU(alpha=0.2)(x1)\n",
    "x1 = Reshape((7,7,128))(x1)\n",
    "\n",
    "x = Concatenate()([x1,x2])\n",
    "x = UpSampling2D()(x)\n",
    "x = Conv2DTranspose(128,(3,3),padding='same')(x)\n",
    "x = LeakyReLU(alpha=0.2)(x)\n",
    "\n",
    "x = UpSampling2D()(x)\n",
    "x = Conv2DTranspose(128,(3,3),padding='same')(x)\n",
    "x = LeakyReLU(alpha=0.2)(x)\n",
    "x = Conv2DTranspose(1,(3,3),padding='same',activation='sigmoid')(x)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "source": [
    "help(x)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Help on KerasTensor in module tensorflow.python.keras.engine.keras_tensor object:\n",
      "\n",
      "class KerasTensor(builtins.object)\n",
      " |  KerasTensor(type_spec, inferred_value=None, name=None)\n",
      " |  \n",
      " |  A representation of a Keras in/output during Functional API construction.\n",
      " |  \n",
      " |  `KerasTensor`s are tensor-like objects that represent the symbolic inputs\n",
      " |  and outputs of Keras layers during Functional model construction. They are\n",
      " |  comprised of the `tf.TypeSpec` of the (Composite)Tensor that will be\n",
      " |  consumed/produced in the corresponding location of the Functional model.\n",
      " |  \n",
      " |  KerasTensors are intended as a private API, so users should never need to\n",
      " |  directly instantiate `KerasTensor`s.\n",
      " |  \n",
      " |  **Building Functional Models with KerasTensors**\n",
      " |  `tf.keras.Input` produces `KerasTensor`s that represent the symbolic inputs\n",
      " |  to your model.\n",
      " |  \n",
      " |  Passing a `KerasTensor` to a `tf.keras.Layer` `__call__` lets the layer know\n",
      " |  that you are building a Functional model. The layer __call__ will\n",
      " |  infer the output signature and return `KerasTensor`s with `tf.TypeSpec`s\n",
      " |  corresponding to the symbolic outputs of that layer call. These output\n",
      " |  `KerasTensor`s will have all of the internal KerasHistory metadata attached\n",
      " |  to them that Keras needs to construct a Functional Model.\n",
      " |  \n",
      " |  Currently, layers infer the output signature by:\n",
      " |    * creating a scratch `FuncGraph`\n",
      " |    * making placeholders in the scratch graph that match the input typespecs\n",
      " |    * Calling `layer.call` on these placeholders\n",
      " |    * extracting the signatures of the outputs before clearing the scratch graph\n",
      " |  \n",
      " |  (Note: names assigned to KerasTensors by this process are not guaranteed to\n",
      " |  be unique, and are subject to implementation details).\n",
      " |  \n",
      " |  `tf.nest` methods are used to insure all of the inputs/output data\n",
      " |  structures get maintained, with elements swapped between KerasTensors and\n",
      " |  placeholders.\n",
      " |  \n",
      " |  In rare cases (such as when directly manipulating shapes using Keras layers),\n",
      " |  the layer may be able to partially infer the value of the output in addition\n",
      " |  to just inferring the signature.\n",
      " |  When this happens, the returned KerasTensor will also contain the inferred\n",
      " |  value information. Follow-on layers can use this information.\n",
      " |  during their own output signature inference.\n",
      " |  E.g. if one layer produces a symbolic `KerasTensor` that the next layer uses\n",
      " |  as the shape of its outputs, partially knowing the value helps infer the\n",
      " |  output shape.\n",
      " |  \n",
      " |  **Automatically converting TF APIs to layers**:\n",
      " |  If you passing a `KerasTensor` to a TF API that supports dispatching,\n",
      " |  Keras will automatically turn that API call into a lambda\n",
      " |  layer in the Functional model, and return KerasTensors representing the\n",
      " |  symbolic outputs.\n",
      " |  \n",
      " |  Most TF APIs that take only tensors as input and produce output tensors\n",
      " |  will support dispatching.\n",
      " |  \n",
      " |  Calling a `tf.function` does not support dispatching, so you cannot pass\n",
      " |  `KerasTensor`s as inputs to a `tf.function`.\n",
      " |  \n",
      " |  Higher-order APIs that take methods which produce tensors (e.g. `tf.while`,\n",
      " |  `tf.map_fn`, `tf.cond`) also do not currently support dispatching. So, you\n",
      " |  cannot directly pass KerasTensors as inputs to these APIs either. If you\n",
      " |  want to use these APIs inside of a Functional model, you must put them inside\n",
      " |  of a custom layer.\n",
      " |  \n",
      " |  Args:\n",
      " |    type_spec: The `tf.TypeSpec` for the symbolic input created by\n",
      " |      `tf.keras.Input`, or symbolically inferred for the output\n",
      " |      during a symbolic layer `__call__`.\n",
      " |    inferred_value: (Optional) a non-symbolic static value, possibly partially\n",
      " |      specified, that could be symbolically inferred for the outputs during\n",
      " |      a symbolic layer `__call__`. This will generally only happen when\n",
      " |      grabbing and manipulating `tf.int32` shapes directly as tensors.\n",
      " |      Statically inferring values in this way and storing them in the\n",
      " |      KerasTensor allows follow-on layers to infer output signatures\n",
      " |      more effectively. (e.g. when using a symbolic shape tensor to later\n",
      " |      construct a tensor with that shape).\n",
      " |    name: (optional) string name for this KerasTensor. Names automatically\n",
      " |      generated by symbolic layer `__call__`s are not guaranteed to be unique,\n",
      " |      and are subject to implementation details.\n",
      " |  \n",
      " |  Methods defined here:\n",
      " |  \n",
      " |  __abs__ = abs(x, name=None)\n",
      " |      Computes the absolute value of a tensor.\n",
      " |      \n",
      " |      Given a tensor of integer or floating-point values, this operation returns a\n",
      " |      tensor of the same type, where each element contains the absolute value of the\n",
      " |      corresponding element in the input.\n",
      " |      \n",
      " |      Given a tensor `x` of complex numbers, this operation returns a tensor of type\n",
      " |      `float32` or `float64` that is the absolute value of each element in `x`. For\n",
      " |      a complex number \\\\(a + bj\\\\), its absolute value is computed as\n",
      " |      \\\\(\\sqrt{a^2 + b^2}\\\\).\n",
      " |      \n",
      " |      For example:\n",
      " |      \n",
      " |      >>> # real number\n",
      " |      >>> x = tf.constant([-2.25, 3.25])\n",
      " |      >>> tf.abs(x)\n",
      " |      <tf.Tensor: shape=(2,), dtype=float32,\n",
      " |      numpy=array([2.25, 3.25], dtype=float32)>\n",
      " |      \n",
      " |      >>> # complex number\n",
      " |      >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n",
      " |      >>> tf.abs(x)\n",
      " |      <tf.Tensor: shape=(2, 1), dtype=float64, numpy=\n",
      " |      array([[5.25594901],\n",
      " |             [6.60492241]])>\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n",
      " |          `int32`, `int64`, `complex64` or `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,\n",
      " |          with absolute values. Note, for `complex64` or `complex128` input, the\n",
      " |          returned `Tensor` will be of type `float32` or `float64`, respectively.\n",
      " |      \n",
      " |        If `x` is a `SparseTensor`, returns\n",
      " |        `SparseTensor(x.indices, tf.math.abs(x.values, ...), x.dense_shape)`\n",
      " |  \n",
      " |  __add__ = binary_op_wrapper(x, y)\n",
      " |      The operation invoked by the `Tensor.__add__` operator.\n",
      " |      \n",
      " |        Purpose in the API:\n",
      " |      \n",
      " |          This method is exposed in TensorFlow's API so that library developers\n",
      " |          can register dispatching for `Tensor.__add__` to allow it to handle\n",
      " |          custom composite tensors & other custom objects.\n",
      " |      \n",
      " |          The API symbol is not intended to be called by users directly and does\n",
      " |          appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: The left-hand side of the `+` operator.\n",
      " |        y: The right-hand side of the `+` operator.\n",
      " |        name: an optional name for the operation.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `+` operation.\n",
      " |  \n",
      " |  __and__ = binary_op_wrapper(x, y)\n",
      " |  \n",
      " |  __array__(self)\n",
      " |  \n",
      " |  __div__ = binary_op_wrapper(x, y)\n",
      " |      Divides x / y elementwise (using Python 2 division operator semantics). (deprecated)\n",
      " |      \n",
      " |      Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n",
      " |      Instructions for updating:\n",
      " |      Deprecated in favor of operator or tf.math.divide.\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor division operator or tf.divide which obey Python\n",
      " |      3 division operator semantics.\n",
      " |      \n",
      " |      This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`\n",
      " |      and `y` are both integers then the result will be an integer. This is in\n",
      " |      contrast to Python 3, where division with `/` is always a float while division\n",
      " |      with `//` is always an integer.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` returns the quotient of x and y.\n",
      " |  \n",
      " |  __eq__ = tensor_equals(self, other)\n",
      " |      The operation invoked by the `Tensor.__eq__` operator.\n",
      " |      \n",
      " |      Compares two tensors element-wise for equality if they are\n",
      " |      broadcast-compatible; or returns False if they are not broadcast-compatible.\n",
      " |      (Note that this behavior differs from `tf.math.equal`, which raises an\n",
      " |      exception if the two tensors are not broadcast-compatible.)\n",
      " |      \n",
      " |      Purpose in the API:\n",
      " |      \n",
      " |        This method is exposed in TensorFlow's API so that library developers\n",
      " |        can register dispatching for `Tensor.__eq__` to allow it to handle\n",
      " |        custom composite tensors & other custom objects.\n",
      " |      \n",
      " |        The API symbol is not intended to be called by users directly and does\n",
      " |        appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        self: The left-hand side of the `==` operator.\n",
      " |        other: The right-hand side of the `==` operator.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `==` operation, or `False` if the arguments\n",
      " |        are not broadcast-compatible.\n",
      " |  \n",
      " |  __floordiv__ = binary_op_wrapper(x, y)\n",
      " |      Divides `x / y` elementwise, rounding toward the most negative integer.\n",
      " |      \n",
      " |      The same as `tf.compat.v1.div(x,y)` for integers, but uses\n",
      " |      `tf.floor(tf.compat.v1.div(x,y))` for\n",
      " |      floating point arguments so that the result is always an integer (though\n",
      " |      possibly an integer represented as floating point).  This op is generated by\n",
      " |      `x // y` floor division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.\n",
      " |      \n",
      " |      `x` and `y` must have the same type, and the result will have the same type\n",
      " |      as well.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` rounded down.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If the inputs are complex.\n",
      " |  \n",
      " |  __ge__ = greater_equal(x, y, name=None)\n",
      " |      Returns the truth value of (x >= y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.greater_equal` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6, 7])\n",
      " |      y = tf.constant([5, 2, 5, 10])\n",
      " |      tf.math.greater_equal(x, y) ==> [True, True, True, False]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6, 7])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.greater_equal(x, y) ==> [True, False, True, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __getitem__ = _slice_helper(tensor, slice_spec, var=None)\n",
      " |      Overload for Tensor.__getitem__.\n",
      " |      \n",
      " |      This operation extracts the specified region from the tensor.\n",
      " |      The notation is similar to NumPy with the restriction that\n",
      " |      currently only support basic indexing. That means that\n",
      " |      using a non-scalar tensor as input is not currently allowed.\n",
      " |      \n",
      " |      Some useful examples:\n",
      " |      \n",
      " |      ```python\n",
      " |      # Strip leading and trailing 2 elements\n",
      " |      foo = tf.constant([1,2,3,4,5,6])\n",
      " |      print(foo[2:-2].eval())  # => [3,4]\n",
      " |      \n",
      " |      # Skip every other row and reverse the order of the columns\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[::2,::-1].eval())  # => [[3,2,1], [9,8,7]]\n",
      " |      \n",
      " |      # Use scalar tensors as indices on both dimensions\n",
      " |      print(foo[tf.constant(0), tf.constant(2)].eval())  # => 3\n",
      " |      \n",
      " |      # Insert another dimension\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]\n",
      " |      print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],\n",
      " |      [[7],[8],[9]]]\n",
      " |      \n",
      " |      # Ellipses (3 equivalent operations)\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[tf.newaxis, :, :].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      print(foo[tf.newaxis, ...].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      print(foo[tf.newaxis].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      \n",
      " |      # Masks\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[foo > 2].eval())  # => [3, 4, 5, 6, 7, 8, 9]\n",
      " |      ```\n",
      " |      \n",
      " |      Notes:\n",
      " |        - `tf.newaxis` is `None` as in NumPy.\n",
      " |        - An implicit ellipsis is placed at the end of the `slice_spec`\n",
      " |        - NumPy advanced indexing is currently not supported.\n",
      " |      \n",
      " |      Purpose in the API:\n",
      " |      \n",
      " |        This method is exposed in TensorFlow's API so that library developers\n",
      " |        can register dispatching for `Tensor.__getitem__` to allow it to handle\n",
      " |        custom composite tensors & other custom objects.\n",
      " |      \n",
      " |        The API symbol is not intended to be called by users directly and does\n",
      " |        appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        tensor: An ops.Tensor object.\n",
      " |        slice_spec: The arguments to Tensor.__getitem__.\n",
      " |        var: In the case of variable slice assignment, the Variable object to slice\n",
      " |          (i.e. tensor is the read-only view of this variable).\n",
      " |      \n",
      " |      Returns:\n",
      " |        The appropriate slice of \"tensor\", based on \"slice_spec\".\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If a slice range is negative size.\n",
      " |        TypeError: If the slice indices aren't int, slice, ellipsis,\n",
      " |          tf.newaxis or scalar int32/int64 tensors.\n",
      " |  \n",
      " |  __gt__ = greater(x, y, name=None)\n",
      " |      Returns the truth value of (x > y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.greater` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5, 2, 5])\n",
      " |      tf.math.greater(x, y) ==> [False, True, True]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.greater(x, y) ==> [False, False, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __hash__(self)\n",
      " |      Return hash(self).\n",
      " |  \n",
      " |  __init__(self, type_spec, inferred_value=None, name=None)\n",
      " |      Constructs a KerasTensor.\n",
      " |  \n",
      " |  __invert__ = invert_(x, name=None)\n",
      " |  \n",
      " |  __iter__(self)\n",
      " |  \n",
      " |  __le__ = less_equal(x, y, name=None)\n",
      " |      Returns the truth value of (x <= y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.less_equal` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.less_equal(x, y) ==> [True, True, False]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5, 6, 6])\n",
      " |      tf.math.less_equal(x, y) ==> [True, True, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __len__(self)\n",
      " |  \n",
      " |  __lt__ = less(x, y, name=None)\n",
      " |      Returns the truth value of (x < y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.less` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.less(x, y) ==> [False, True, False]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5, 6, 7])\n",
      " |      tf.math.less(x, y) ==> [False, True, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __matmul__ = binary_op_wrapper(x, y)\n",
      " |      Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n",
      " |      \n",
      " |      The inputs must, following any transpositions, be tensors of rank >= 2\n",
      " |      where the inner 2 dimensions specify valid matrix multiplication dimensions,\n",
      " |      and any further outer dimensions specify matching batch size.\n",
      " |      \n",
      " |      Both matrices must be of the same type. The supported types are:\n",
      " |      `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n",
      " |      \n",
      " |      Either matrix can be transposed or adjointed (conjugated and transposed) on\n",
      " |      the fly by setting one of the corresponding flag to `True`. These are `False`\n",
      " |      by default.\n",
      " |      \n",
      " |      If one or both of the matrices contain a lot of zeros, a more efficient\n",
      " |      multiplication algorithm can be used by setting the corresponding\n",
      " |      `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n",
      " |      This optimization is only available for plain matrices (rank-2 tensors) with\n",
      " |      datatypes `bfloat16` or `float32`.\n",
      " |      \n",
      " |      A simple 2-D tensor matrix multiplication:\n",
      " |      \n",
      " |      >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n",
      " |      >>> a  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n",
      " |      array([[1, 2, 3],\n",
      " |             [4, 5, 6]], dtype=int32)>\n",
      " |      >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n",
      " |      >>> b  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n",
      " |      array([[ 7,  8],\n",
      " |             [ 9, 10],\n",
      " |             [11, 12]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n",
      " |      array([[ 58,  64],\n",
      " |             [139, 154]], dtype=int32)>\n",
      " |      \n",
      " |      A batch matrix multiplication with batch shape [2]:\n",
      " |      \n",
      " |      >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])\n",
      " |      >>> a  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=\n",
      " |      array([[[ 1,  2,  3],\n",
      " |              [ 4,  5,  6]],\n",
      " |             [[ 7,  8,  9],\n",
      " |              [10, 11, 12]]], dtype=int32)>\n",
      " |      >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])\n",
      " |      >>> b  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n",
      " |      array([[[13, 14],\n",
      " |              [15, 16],\n",
      " |              [17, 18]],\n",
      " |             [[19, 20],\n",
      " |              [21, 22],\n",
      " |              [23, 24]]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=\n",
      " |      array([[[ 94, 100],\n",
      " |              [229, 244]],\n",
      " |             [[508, 532],\n",
      " |              [697, 730]]], dtype=int32)>\n",
      " |      \n",
      " |      Since python >= 3.5 the @ operator is supported\n",
      " |      (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,\n",
      " |      it simply calls the `tf.matmul()` function, so the following lines are\n",
      " |      equivalent:\n",
      " |      \n",
      " |      >>> d = a @ b @ [[10], [11]]\n",
      " |      >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])\n",
      " |      \n",
      " |      Args:\n",
      " |        a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,\n",
      " |          `complex64`, `complex128` and rank > 1.\n",
      " |        b: `tf.Tensor` with same type and rank as `a`.\n",
      " |        transpose_a: If `True`, `a` is transposed before multiplication.\n",
      " |        transpose_b: If `True`, `b` is transposed before multiplication.\n",
      " |        adjoint_a: If `True`, `a` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        adjoint_b: If `True`, `b` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        name: Name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix\n",
      " |        is the product of the corresponding matrices in `a` and `b`, e.g. if all\n",
      " |        transpose or adjoint attributes are `False`:\n",
      " |      \n",
      " |        `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,\n",
      " |        for all indices `i`, `j`.\n",
      " |      \n",
      " |        Note: This is matrix product, not element-wise product.\n",
      " |      \n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and\n",
      " |          `adjoint_b` are both set to `True`.\n",
      " |  \n",
      " |  __mod__ = binary_op_wrapper(x, y)\n",
      " |      Returns element-wise remainder of division. When `x < 0` xor `y < 0` is\n",
      " |      \n",
      " |      true, this follows Python semantics in that the result here is consistent\n",
      " |      with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.\n",
      " |      \n",
      " |      *NOTE*: `math.floormod` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `uint64`, `bfloat16`, `half`, `float32`, `float64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __mul__ = binary_op_wrapper(x, y)\n",
      " |      Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".\n",
      " |  \n",
      " |  __ne__ = tensor_not_equals(self, other)\n",
      " |      The operation invoked by the `Tensor.__ne__` operator.\n",
      " |      \n",
      " |      Compares two tensors element-wise for inequality if they are\n",
      " |      broadcast-compatible; or returns True if they are not broadcast-compatible.\n",
      " |      (Note that this behavior differs from `tf.math.not_equal`, which raises an\n",
      " |      exception if the two tensors are not broadcast-compatible.)\n",
      " |      \n",
      " |      Purpose in the API:\n",
      " |      \n",
      " |        This method is exposed in TensorFlow's API so that library developers\n",
      " |        can register dispatching for `Tensor.__ne__` to allow it to handle\n",
      " |        custom composite tensors & other custom objects.\n",
      " |      \n",
      " |        The API symbol is not intended to be called by users directly and does\n",
      " |        appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        self: The left-hand side of the `!=` operator.\n",
      " |        other: The right-hand side of the `!=` operator.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `!=` operation, or `True` if the arguments\n",
      " |        are not broadcast-compatible.\n",
      " |  \n",
      " |  __neg__ = neg(x, name=None)\n",
      " |      Computes numerical negative value element-wise.\n",
      " |      \n",
      " |      I.e., \\\\(y = -x\\\\).\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |      \n",
      " |        If `x` is a `SparseTensor`, returns\n",
      " |        `SparseTensor(x.indices, tf.math.negative(x.values, ...), x.dense_shape)`\n",
      " |  \n",
      " |  __or__ = binary_op_wrapper(x, y)\n",
      " |  \n",
      " |  __pow__ = binary_op_wrapper(x, y)\n",
      " |      Computes the power of one value to another.\n",
      " |      \n",
      " |      Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n",
      " |      corresponding elements in `x` and `y`. For example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([[2, 2], [3, 3]])\n",
      " |      y = tf.constant([[8, 16], [2, 3]])\n",
      " |      tf.pow(x, y)  # [[256, 65536], [9, 27]]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`.\n",
      " |  \n",
      " |  __radd__ = r_binary_op_wrapper(y, x)\n",
      " |      The operation invoked by the `Tensor.__add__` operator.\n",
      " |      \n",
      " |        Purpose in the API:\n",
      " |      \n",
      " |          This method is exposed in TensorFlow's API so that library developers\n",
      " |          can register dispatching for `Tensor.__add__` to allow it to handle\n",
      " |          custom composite tensors & other custom objects.\n",
      " |      \n",
      " |          The API symbol is not intended to be called by users directly and does\n",
      " |          appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: The left-hand side of the `+` operator.\n",
      " |        y: The right-hand side of the `+` operator.\n",
      " |        name: an optional name for the operation.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `+` operation.\n",
      " |  \n",
      " |  __rand__ = r_binary_op_wrapper(y, x)\n",
      " |  \n",
      " |  __rdiv__ = r_binary_op_wrapper(y, x)\n",
      " |      Divides x / y elementwise (using Python 2 division operator semantics). (deprecated)\n",
      " |      \n",
      " |      Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n",
      " |      Instructions for updating:\n",
      " |      Deprecated in favor of operator or tf.math.divide.\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor division operator or tf.divide which obey Python\n",
      " |      3 division operator semantics.\n",
      " |      \n",
      " |      This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`\n",
      " |      and `y` are both integers then the result will be an integer. This is in\n",
      " |      contrast to Python 3, where division with `/` is always a float while division\n",
      " |      with `//` is always an integer.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` returns the quotient of x and y.\n",
      " |  \n",
      " |  __repr__(self)\n",
      " |      Return repr(self).\n",
      " |  \n",
      " |  __rfloordiv__ = r_binary_op_wrapper(y, x)\n",
      " |      Divides `x / y` elementwise, rounding toward the most negative integer.\n",
      " |      \n",
      " |      The same as `tf.compat.v1.div(x,y)` for integers, but uses\n",
      " |      `tf.floor(tf.compat.v1.div(x,y))` for\n",
      " |      floating point arguments so that the result is always an integer (though\n",
      " |      possibly an integer represented as floating point).  This op is generated by\n",
      " |      `x // y` floor division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.\n",
      " |      \n",
      " |      `x` and `y` must have the same type, and the result will have the same type\n",
      " |      as well.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` rounded down.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If the inputs are complex.\n",
      " |  \n",
      " |  __rmatmul__ = r_binary_op_wrapper(y, x)\n",
      " |      Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n",
      " |      \n",
      " |      The inputs must, following any transpositions, be tensors of rank >= 2\n",
      " |      where the inner 2 dimensions specify valid matrix multiplication dimensions,\n",
      " |      and any further outer dimensions specify matching batch size.\n",
      " |      \n",
      " |      Both matrices must be of the same type. The supported types are:\n",
      " |      `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n",
      " |      \n",
      " |      Either matrix can be transposed or adjointed (conjugated and transposed) on\n",
      " |      the fly by setting one of the corresponding flag to `True`. These are `False`\n",
      " |      by default.\n",
      " |      \n",
      " |      If one or both of the matrices contain a lot of zeros, a more efficient\n",
      " |      multiplication algorithm can be used by setting the corresponding\n",
      " |      `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n",
      " |      This optimization is only available for plain matrices (rank-2 tensors) with\n",
      " |      datatypes `bfloat16` or `float32`.\n",
      " |      \n",
      " |      A simple 2-D tensor matrix multiplication:\n",
      " |      \n",
      " |      >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n",
      " |      >>> a  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n",
      " |      array([[1, 2, 3],\n",
      " |             [4, 5, 6]], dtype=int32)>\n",
      " |      >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n",
      " |      >>> b  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n",
      " |      array([[ 7,  8],\n",
      " |             [ 9, 10],\n",
      " |             [11, 12]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n",
      " |      array([[ 58,  64],\n",
      " |             [139, 154]], dtype=int32)>\n",
      " |      \n",
      " |      A batch matrix multiplication with batch shape [2]:\n",
      " |      \n",
      " |      >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])\n",
      " |      >>> a  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=\n",
      " |      array([[[ 1,  2,  3],\n",
      " |              [ 4,  5,  6]],\n",
      " |             [[ 7,  8,  9],\n",
      " |              [10, 11, 12]]], dtype=int32)>\n",
      " |      >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])\n",
      " |      >>> b  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n",
      " |      array([[[13, 14],\n",
      " |              [15, 16],\n",
      " |              [17, 18]],\n",
      " |             [[19, 20],\n",
      " |              [21, 22],\n",
      " |              [23, 24]]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=\n",
      " |      array([[[ 94, 100],\n",
      " |              [229, 244]],\n",
      " |             [[508, 532],\n",
      " |              [697, 730]]], dtype=int32)>\n",
      " |      \n",
      " |      Since python >= 3.5 the @ operator is supported\n",
      " |      (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,\n",
      " |      it simply calls the `tf.matmul()` function, so the following lines are\n",
      " |      equivalent:\n",
      " |      \n",
      " |      >>> d = a @ b @ [[10], [11]]\n",
      " |      >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])\n",
      " |      \n",
      " |      Args:\n",
      " |        a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,\n",
      " |          `complex64`, `complex128` and rank > 1.\n",
      " |        b: `tf.Tensor` with same type and rank as `a`.\n",
      " |        transpose_a: If `True`, `a` is transposed before multiplication.\n",
      " |        transpose_b: If `True`, `b` is transposed before multiplication.\n",
      " |        adjoint_a: If `True`, `a` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        adjoint_b: If `True`, `b` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        name: Name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix\n",
      " |        is the product of the corresponding matrices in `a` and `b`, e.g. if all\n",
      " |        transpose or adjoint attributes are `False`:\n",
      " |      \n",
      " |        `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,\n",
      " |        for all indices `i`, `j`.\n",
      " |      \n",
      " |        Note: This is matrix product, not element-wise product.\n",
      " |      \n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and\n",
      " |          `adjoint_b` are both set to `True`.\n",
      " |  \n",
      " |  __rmod__ = r_binary_op_wrapper(y, x)\n",
      " |      Returns element-wise remainder of division. When `x < 0` xor `y < 0` is\n",
      " |      \n",
      " |      true, this follows Python semantics in that the result here is consistent\n",
      " |      with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.\n",
      " |      \n",
      " |      *NOTE*: `math.floormod` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `uint64`, `bfloat16`, `half`, `float32`, `float64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __rmul__ = r_binary_op_wrapper(y, x)\n",
      " |      Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".\n",
      " |  \n",
      " |  __ror__ = r_binary_op_wrapper(y, x)\n",
      " |  \n",
      " |  __rpow__ = r_binary_op_wrapper(y, x)\n",
      " |      Computes the power of one value to another.\n",
      " |      \n",
      " |      Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n",
      " |      corresponding elements in `x` and `y`. For example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([[2, 2], [3, 3]])\n",
      " |      y = tf.constant([[8, 16], [2, 3]])\n",
      " |      tf.pow(x, y)  # [[256, 65536], [9, 27]]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`.\n",
      " |  \n",
      " |  __rsub__ = r_binary_op_wrapper(y, x)\n",
      " |      Returns x - y element-wise.\n",
      " |      \n",
      " |      *NOTE*: `tf.subtract` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Both input and output have a range `(-inf, inf)`.\n",
      " |      \n",
      " |      Example usages below.\n",
      " |      \n",
      " |      Subtract operation between an array and a scalar:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = 1\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 0, -1, -2, -3, -4], dtype=int32)>\n",
      " |      \n",
      " |      Note that binary `-` operator can be used instead:\n",
      " |      \n",
      " |      >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])\n",
      " |      >>> y = tf.convert_to_tensor(1)\n",
      " |      >>> x - y\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      \n",
      " |      Subtract operation between an array and a tensor of same shape:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = tf.constant([5, 4, 3, 2, 1])\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 4,  2,  0, -2, -4], dtype=int32)>\n",
      " |      \n",
      " |      **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a\n",
      " |      non-tensor, the non-tensor input will adopt (or get casted to) the data type\n",
      " |      of the tensor input. This can potentially cause unwanted overflow or underflow\n",
      " |      conversion.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = tf.constant([1, 2], dtype=tf.int8)\n",
      " |      >>> y = [2**8 + 1, 2**8 + 2]\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2,), dtype=int8, numpy=array([0, 0], dtype=int8)>\n",
      " |      \n",
      " |      When subtracting two input values of different shapes, `tf.subtract` follows the\n",
      " |      [general broadcasting rules](https://numpy.org/doc/stable/user/basics.broadcasting.html#general-broadcasting-rules)\n",
      " |      . The two input array shapes are compared element-wise. Starting with the\n",
      " |      trailing dimensions, the two dimensions either have to be equal or one of them\n",
      " |      needs to be `1`.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(2, 1, 3)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 3), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]],\n",
      " |             [[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]]])>\n",
      " |      \n",
      " |      Example with inputs of different dimensions:\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(1, 6)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 6), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]],\n",
      " |             [[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]]])>\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __rtruediv__ = r_binary_op_wrapper(y, x)\n",
      " |      Divides x / y elementwise (using Python 3 division operator semantics).\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n",
      " |      division operator semantics.\n",
      " |      \n",
      " |      This function forces Python 3 division operator semantics where all integer\n",
      " |      arguments are cast to floating types first.   This op is generated by normal\n",
      " |      `x / y` division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.  If you want integer division that rounds\n",
      " |      down, use `x // y` or `tf.math.floordiv`.\n",
      " |      \n",
      " |      `x` and `y` must have the same numeric type.  If the inputs are floating\n",
      " |      point, the output will have the same type.  If the inputs are integral, the\n",
      " |      inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n",
      " |      and `int64` (matching the behavior of Numpy).\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of numeric type.\n",
      " |        y: `Tensor` denominator of numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` evaluated in floating point.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If `x` and `y` have different dtypes.\n",
      " |  \n",
      " |  __rxor__ = r_binary_op_wrapper(y, x)\n",
      " |  \n",
      " |  __str__(self)\n",
      " |      Return str(self).\n",
      " |  \n",
      " |  __sub__ = binary_op_wrapper(x, y)\n",
      " |      Returns x - y element-wise.\n",
      " |      \n",
      " |      *NOTE*: `tf.subtract` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Both input and output have a range `(-inf, inf)`.\n",
      " |      \n",
      " |      Example usages below.\n",
      " |      \n",
      " |      Subtract operation between an array and a scalar:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = 1\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 0, -1, -2, -3, -4], dtype=int32)>\n",
      " |      \n",
      " |      Note that binary `-` operator can be used instead:\n",
      " |      \n",
      " |      >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])\n",
      " |      >>> y = tf.convert_to_tensor(1)\n",
      " |      >>> x - y\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      \n",
      " |      Subtract operation between an array and a tensor of same shape:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = tf.constant([5, 4, 3, 2, 1])\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 4,  2,  0, -2, -4], dtype=int32)>\n",
      " |      \n",
      " |      **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a\n",
      " |      non-tensor, the non-tensor input will adopt (or get casted to) the data type\n",
      " |      of the tensor input. This can potentially cause unwanted overflow or underflow\n",
      " |      conversion.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = tf.constant([1, 2], dtype=tf.int8)\n",
      " |      >>> y = [2**8 + 1, 2**8 + 2]\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2,), dtype=int8, numpy=array([0, 0], dtype=int8)>\n",
      " |      \n",
      " |      When subtracting two input values of different shapes, `tf.subtract` follows the\n",
      " |      [general broadcasting rules](https://numpy.org/doc/stable/user/basics.broadcasting.html#general-broadcasting-rules)\n",
      " |      . The two input array shapes are compared element-wise. Starting with the\n",
      " |      trailing dimensions, the two dimensions either have to be equal or one of them\n",
      " |      needs to be `1`.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(2, 1, 3)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 3), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]],\n",
      " |             [[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]]])>\n",
      " |      \n",
      " |      Example with inputs of different dimensions:\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(1, 6)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 6), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]],\n",
      " |             [[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]]])>\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __truediv__ = binary_op_wrapper(x, y)\n",
      " |      Divides x / y elementwise (using Python 3 division operator semantics).\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n",
      " |      division operator semantics.\n",
      " |      \n",
      " |      This function forces Python 3 division operator semantics where all integer\n",
      " |      arguments are cast to floating types first.   This op is generated by normal\n",
      " |      `x / y` division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.  If you want integer division that rounds\n",
      " |      down, use `x // y` or `tf.math.floordiv`.\n",
      " |      \n",
      " |      `x` and `y` must have the same numeric type.  If the inputs are floating\n",
      " |      point, the output will have the same type.  If the inputs are integral, the\n",
      " |      inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n",
      " |      and `int64` (matching the behavior of Numpy).\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of numeric type.\n",
      " |        y: `Tensor` denominator of numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` evaluated in floating point.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If `x` and `y` have different dtypes.\n",
      " |  \n",
      " |  __xor__ = binary_op_wrapper(x, y)\n",
      " |  \n",
      " |  experimental_ref(self)\n",
      " |      DEPRECATED FUNCTION\n",
      " |      \n",
      " |      Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n",
      " |      Instructions for updating:\n",
      " |      Use ref() instead.\n",
      " |  \n",
      " |  get_shape(self)\n",
      " |  \n",
      " |  ref(self)\n",
      " |      Returns a hashable reference object to this KerasTensor.\n",
      " |      \n",
      " |      The primary use case for this API is to put KerasTensors in a\n",
      " |      set/dictionary. We can't put tensors in a set/dictionary as\n",
      " |      `tensor.__hash__()` is not available and tensor equality (`==`) is supposed\n",
      " |      to produce a tensor representing if the two inputs are equal.\n",
      " |      \n",
      " |      See the documentation of `tf.Tensor.ref()` for more info.\n",
      " |  \n",
      " |  set_shape(self, shape)\n",
      " |      Updates the shape of this KerasTensor. Mimics `tf.Tensor.set_shape()`.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Class methods defined here:\n",
      " |  \n",
      " |  from_tensor(tensor) from builtins.type\n",
      " |      Convert a traced (composite)tensor to a representative KerasTensor.\n",
      " |  \n",
      " |  from_type_spec(type_spec, name=None) from builtins.type\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Readonly properties defined here:\n",
      " |  \n",
      " |  dtype\n",
      " |      Returns the `dtype` symbolically inferred for this Keras output.\n",
      " |  \n",
      " |  is_tensor_like\n",
      " |  \n",
      " |  name\n",
      " |      Returns the (non-unique, optional) name of this symbolic Keras value.\n",
      " |  \n",
      " |  op\n",
      " |  \n",
      " |  shape\n",
      " |      Returns the `TensorShape` symbolically inferred for this Keras output.\n",
      " |  \n",
      " |  type_spec\n",
      " |      Returns the `tf.TypeSpec` symbolically inferred for this Keras output.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors defined here:\n",
      " |  \n",
      " |  __dict__\n",
      " |      dictionary for instance variables (if defined)\n",
      " |  \n",
      " |  __weakref__\n",
      " |      list of weak references to the object (if defined)\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes defined here:\n",
      " |  \n",
      " |  __array_priority__ = 100\n",
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "source": [
    "help(x)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Help on KerasTensor in module tensorflow.python.keras.engine.keras_tensor object:\n",
      "\n",
      "class KerasTensor(builtins.object)\n",
      " |  KerasTensor(type_spec, inferred_value=None, name=None)\n",
      " |  \n",
      " |  A representation of a Keras in/output during Functional API construction.\n",
      " |  \n",
      " |  `KerasTensor`s are tensor-like objects that represent the symbolic inputs\n",
      " |  and outputs of Keras layers during Functional model construction. They are\n",
      " |  comprised of the `tf.TypeSpec` of the (Composite)Tensor that will be\n",
      " |  consumed/produced in the corresponding location of the Functional model.\n",
      " |  \n",
      " |  KerasTensors are intended as a private API, so users should never need to\n",
      " |  directly instantiate `KerasTensor`s.\n",
      " |  \n",
      " |  **Building Functional Models with KerasTensors**\n",
      " |  `tf.keras.Input` produces `KerasTensor`s that represent the symbolic inputs\n",
      " |  to your model.\n",
      " |  \n",
      " |  Passing a `KerasTensor` to a `tf.keras.Layer` `__call__` lets the layer know\n",
      " |  that you are building a Functional model. The layer __call__ will\n",
      " |  infer the output signature and return `KerasTensor`s with `tf.TypeSpec`s\n",
      " |  corresponding to the symbolic outputs of that layer call. These output\n",
      " |  `KerasTensor`s will have all of the internal KerasHistory metadata attached\n",
      " |  to them that Keras needs to construct a Functional Model.\n",
      " |  \n",
      " |  Currently, layers infer the output signature by:\n",
      " |    * creating a scratch `FuncGraph`\n",
      " |    * making placeholders in the scratch graph that match the input typespecs\n",
      " |    * Calling `layer.call` on these placeholders\n",
      " |    * extracting the signatures of the outputs before clearing the scratch graph\n",
      " |  \n",
      " |  (Note: names assigned to KerasTensors by this process are not guaranteed to\n",
      " |  be unique, and are subject to implementation details).\n",
      " |  \n",
      " |  `tf.nest` methods are used to insure all of the inputs/output data\n",
      " |  structures get maintained, with elements swapped between KerasTensors and\n",
      " |  placeholders.\n",
      " |  \n",
      " |  In rare cases (such as when directly manipulating shapes using Keras layers),\n",
      " |  the layer may be able to partially infer the value of the output in addition\n",
      " |  to just inferring the signature.\n",
      " |  When this happens, the returned KerasTensor will also contain the inferred\n",
      " |  value information. Follow-on layers can use this information.\n",
      " |  during their own output signature inference.\n",
      " |  E.g. if one layer produces a symbolic `KerasTensor` that the next layer uses\n",
      " |  as the shape of its outputs, partially knowing the value helps infer the\n",
      " |  output shape.\n",
      " |  \n",
      " |  **Automatically converting TF APIs to layers**:\n",
      " |  If you passing a `KerasTensor` to a TF API that supports dispatching,\n",
      " |  Keras will automatically turn that API call into a lambda\n",
      " |  layer in the Functional model, and return KerasTensors representing the\n",
      " |  symbolic outputs.\n",
      " |  \n",
      " |  Most TF APIs that take only tensors as input and produce output tensors\n",
      " |  will support dispatching.\n",
      " |  \n",
      " |  Calling a `tf.function` does not support dispatching, so you cannot pass\n",
      " |  `KerasTensor`s as inputs to a `tf.function`.\n",
      " |  \n",
      " |  Higher-order APIs that take methods which produce tensors (e.g. `tf.while`,\n",
      " |  `tf.map_fn`, `tf.cond`) also do not currently support dispatching. So, you\n",
      " |  cannot directly pass KerasTensors as inputs to these APIs either. If you\n",
      " |  want to use these APIs inside of a Functional model, you must put them inside\n",
      " |  of a custom layer.\n",
      " |  \n",
      " |  Args:\n",
      " |    type_spec: The `tf.TypeSpec` for the symbolic input created by\n",
      " |      `tf.keras.Input`, or symbolically inferred for the output\n",
      " |      during a symbolic layer `__call__`.\n",
      " |    inferred_value: (Optional) a non-symbolic static value, possibly partially\n",
      " |      specified, that could be symbolically inferred for the outputs during\n",
      " |      a symbolic layer `__call__`. This will generally only happen when\n",
      " |      grabbing and manipulating `tf.int32` shapes directly as tensors.\n",
      " |      Statically inferring values in this way and storing them in the\n",
      " |      KerasTensor allows follow-on layers to infer output signatures\n",
      " |      more effectively. (e.g. when using a symbolic shape tensor to later\n",
      " |      construct a tensor with that shape).\n",
      " |    name: (optional) string name for this KerasTensor. Names automatically\n",
      " |      generated by symbolic layer `__call__`s are not guaranteed to be unique,\n",
      " |      and are subject to implementation details.\n",
      " |  \n",
      " |  Methods defined here:\n",
      " |  \n",
      " |  __abs__ = abs(x, name=None)\n",
      " |      Computes the absolute value of a tensor.\n",
      " |      \n",
      " |      Given a tensor of integer or floating-point values, this operation returns a\n",
      " |      tensor of the same type, where each element contains the absolute value of the\n",
      " |      corresponding element in the input.\n",
      " |      \n",
      " |      Given a tensor `x` of complex numbers, this operation returns a tensor of type\n",
      " |      `float32` or `float64` that is the absolute value of each element in `x`. For\n",
      " |      a complex number \\\\(a + bj\\\\), its absolute value is computed as\n",
      " |      \\\\(\\sqrt{a^2 + b^2}\\\\).\n",
      " |      \n",
      " |      For example:\n",
      " |      \n",
      " |      >>> # real number\n",
      " |      >>> x = tf.constant([-2.25, 3.25])\n",
      " |      >>> tf.abs(x)\n",
      " |      <tf.Tensor: shape=(2,), dtype=float32,\n",
      " |      numpy=array([2.25, 3.25], dtype=float32)>\n",
      " |      \n",
      " |      >>> # complex number\n",
      " |      >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n",
      " |      >>> tf.abs(x)\n",
      " |      <tf.Tensor: shape=(2, 1), dtype=float64, numpy=\n",
      " |      array([[5.25594901],\n",
      " |             [6.60492241]])>\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n",
      " |          `int32`, `int64`, `complex64` or `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,\n",
      " |          with absolute values. Note, for `complex64` or `complex128` input, the\n",
      " |          returned `Tensor` will be of type `float32` or `float64`, respectively.\n",
      " |      \n",
      " |        If `x` is a `SparseTensor`, returns\n",
      " |        `SparseTensor(x.indices, tf.math.abs(x.values, ...), x.dense_shape)`\n",
      " |  \n",
      " |  __add__ = binary_op_wrapper(x, y)\n",
      " |      The operation invoked by the `Tensor.__add__` operator.\n",
      " |      \n",
      " |        Purpose in the API:\n",
      " |      \n",
      " |          This method is exposed in TensorFlow's API so that library developers\n",
      " |          can register dispatching for `Tensor.__add__` to allow it to handle\n",
      " |          custom composite tensors & other custom objects.\n",
      " |      \n",
      " |          The API symbol is not intended to be called by users directly and does\n",
      " |          appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: The left-hand side of the `+` operator.\n",
      " |        y: The right-hand side of the `+` operator.\n",
      " |        name: an optional name for the operation.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `+` operation.\n",
      " |  \n",
      " |  __and__ = binary_op_wrapper(x, y)\n",
      " |  \n",
      " |  __array__(self)\n",
      " |  \n",
      " |  __div__ = binary_op_wrapper(x, y)\n",
      " |      Divides x / y elementwise (using Python 2 division operator semantics). (deprecated)\n",
      " |      \n",
      " |      Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n",
      " |      Instructions for updating:\n",
      " |      Deprecated in favor of operator or tf.math.divide.\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor division operator or tf.divide which obey Python\n",
      " |      3 division operator semantics.\n",
      " |      \n",
      " |      This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`\n",
      " |      and `y` are both integers then the result will be an integer. This is in\n",
      " |      contrast to Python 3, where division with `/` is always a float while division\n",
      " |      with `//` is always an integer.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` returns the quotient of x and y.\n",
      " |  \n",
      " |  __eq__ = tensor_equals(self, other)\n",
      " |      The operation invoked by the `Tensor.__eq__` operator.\n",
      " |      \n",
      " |      Compares two tensors element-wise for equality if they are\n",
      " |      broadcast-compatible; or returns False if they are not broadcast-compatible.\n",
      " |      (Note that this behavior differs from `tf.math.equal`, which raises an\n",
      " |      exception if the two tensors are not broadcast-compatible.)\n",
      " |      \n",
      " |      Purpose in the API:\n",
      " |      \n",
      " |        This method is exposed in TensorFlow's API so that library developers\n",
      " |        can register dispatching for `Tensor.__eq__` to allow it to handle\n",
      " |        custom composite tensors & other custom objects.\n",
      " |      \n",
      " |        The API symbol is not intended to be called by users directly and does\n",
      " |        appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        self: The left-hand side of the `==` operator.\n",
      " |        other: The right-hand side of the `==` operator.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `==` operation, or `False` if the arguments\n",
      " |        are not broadcast-compatible.\n",
      " |  \n",
      " |  __floordiv__ = binary_op_wrapper(x, y)\n",
      " |      Divides `x / y` elementwise, rounding toward the most negative integer.\n",
      " |      \n",
      " |      The same as `tf.compat.v1.div(x,y)` for integers, but uses\n",
      " |      `tf.floor(tf.compat.v1.div(x,y))` for\n",
      " |      floating point arguments so that the result is always an integer (though\n",
      " |      possibly an integer represented as floating point).  This op is generated by\n",
      " |      `x // y` floor division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.\n",
      " |      \n",
      " |      `x` and `y` must have the same type, and the result will have the same type\n",
      " |      as well.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` rounded down.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If the inputs are complex.\n",
      " |  \n",
      " |  __ge__ = greater_equal(x, y, name=None)\n",
      " |      Returns the truth value of (x >= y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.greater_equal` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6, 7])\n",
      " |      y = tf.constant([5, 2, 5, 10])\n",
      " |      tf.math.greater_equal(x, y) ==> [True, True, True, False]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6, 7])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.greater_equal(x, y) ==> [True, False, True, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __getitem__ = _slice_helper(tensor, slice_spec, var=None)\n",
      " |      Overload for Tensor.__getitem__.\n",
      " |      \n",
      " |      This operation extracts the specified region from the tensor.\n",
      " |      The notation is similar to NumPy with the restriction that\n",
      " |      currently only support basic indexing. That means that\n",
      " |      using a non-scalar tensor as input is not currently allowed.\n",
      " |      \n",
      " |      Some useful examples:\n",
      " |      \n",
      " |      ```python\n",
      " |      # Strip leading and trailing 2 elements\n",
      " |      foo = tf.constant([1,2,3,4,5,6])\n",
      " |      print(foo[2:-2].eval())  # => [3,4]\n",
      " |      \n",
      " |      # Skip every other row and reverse the order of the columns\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[::2,::-1].eval())  # => [[3,2,1], [9,8,7]]\n",
      " |      \n",
      " |      # Use scalar tensors as indices on both dimensions\n",
      " |      print(foo[tf.constant(0), tf.constant(2)].eval())  # => 3\n",
      " |      \n",
      " |      # Insert another dimension\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]\n",
      " |      print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],\n",
      " |      [[7],[8],[9]]]\n",
      " |      \n",
      " |      # Ellipses (3 equivalent operations)\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[tf.newaxis, :, :].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      print(foo[tf.newaxis, ...].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      print(foo[tf.newaxis].eval())  # => [[[1,2,3], [4,5,6], [7,8,9]]]\n",
      " |      \n",
      " |      # Masks\n",
      " |      foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n",
      " |      print(foo[foo > 2].eval())  # => [3, 4, 5, 6, 7, 8, 9]\n",
      " |      ```\n",
      " |      \n",
      " |      Notes:\n",
      " |        - `tf.newaxis` is `None` as in NumPy.\n",
      " |        - An implicit ellipsis is placed at the end of the `slice_spec`\n",
      " |        - NumPy advanced indexing is currently not supported.\n",
      " |      \n",
      " |      Purpose in the API:\n",
      " |      \n",
      " |        This method is exposed in TensorFlow's API so that library developers\n",
      " |        can register dispatching for `Tensor.__getitem__` to allow it to handle\n",
      " |        custom composite tensors & other custom objects.\n",
      " |      \n",
      " |        The API symbol is not intended to be called by users directly and does\n",
      " |        appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        tensor: An ops.Tensor object.\n",
      " |        slice_spec: The arguments to Tensor.__getitem__.\n",
      " |        var: In the case of variable slice assignment, the Variable object to slice\n",
      " |          (i.e. tensor is the read-only view of this variable).\n",
      " |      \n",
      " |      Returns:\n",
      " |        The appropriate slice of \"tensor\", based on \"slice_spec\".\n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If a slice range is negative size.\n",
      " |        TypeError: If the slice indices aren't int, slice, ellipsis,\n",
      " |          tf.newaxis or scalar int32/int64 tensors.\n",
      " |  \n",
      " |  __gt__ = greater(x, y, name=None)\n",
      " |      Returns the truth value of (x > y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.greater` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5, 2, 5])\n",
      " |      tf.math.greater(x, y) ==> [False, True, True]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.greater(x, y) ==> [False, False, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __hash__(self)\n",
      " |      Return hash(self).\n",
      " |  \n",
      " |  __init__(self, type_spec, inferred_value=None, name=None)\n",
      " |      Constructs a KerasTensor.\n",
      " |  \n",
      " |  __invert__ = invert_(x, name=None)\n",
      " |  \n",
      " |  __iter__(self)\n",
      " |  \n",
      " |  __le__ = less_equal(x, y, name=None)\n",
      " |      Returns the truth value of (x <= y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.less_equal` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.less_equal(x, y) ==> [True, True, False]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5, 6, 6])\n",
      " |      tf.math.less_equal(x, y) ==> [True, True, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __len__(self)\n",
      " |  \n",
      " |  __lt__ = less(x, y, name=None)\n",
      " |      Returns the truth value of (x < y) element-wise.\n",
      " |      \n",
      " |      *NOTE*: `math.less` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5])\n",
      " |      tf.math.less(x, y) ==> [False, True, False]\n",
      " |      \n",
      " |      x = tf.constant([5, 4, 6])\n",
      " |      y = tf.constant([5, 6, 7])\n",
      " |      tf.math.less(x, y) ==> [False, True, True]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor` of type `bool`.\n",
      " |  \n",
      " |  __matmul__ = binary_op_wrapper(x, y)\n",
      " |      Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n",
      " |      \n",
      " |      The inputs must, following any transpositions, be tensors of rank >= 2\n",
      " |      where the inner 2 dimensions specify valid matrix multiplication dimensions,\n",
      " |      and any further outer dimensions specify matching batch size.\n",
      " |      \n",
      " |      Both matrices must be of the same type. The supported types are:\n",
      " |      `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n",
      " |      \n",
      " |      Either matrix can be transposed or adjointed (conjugated and transposed) on\n",
      " |      the fly by setting one of the corresponding flag to `True`. These are `False`\n",
      " |      by default.\n",
      " |      \n",
      " |      If one or both of the matrices contain a lot of zeros, a more efficient\n",
      " |      multiplication algorithm can be used by setting the corresponding\n",
      " |      `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n",
      " |      This optimization is only available for plain matrices (rank-2 tensors) with\n",
      " |      datatypes `bfloat16` or `float32`.\n",
      " |      \n",
      " |      A simple 2-D tensor matrix multiplication:\n",
      " |      \n",
      " |      >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n",
      " |      >>> a  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n",
      " |      array([[1, 2, 3],\n",
      " |             [4, 5, 6]], dtype=int32)>\n",
      " |      >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n",
      " |      >>> b  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n",
      " |      array([[ 7,  8],\n",
      " |             [ 9, 10],\n",
      " |             [11, 12]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n",
      " |      array([[ 58,  64],\n",
      " |             [139, 154]], dtype=int32)>\n",
      " |      \n",
      " |      A batch matrix multiplication with batch shape [2]:\n",
      " |      \n",
      " |      >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])\n",
      " |      >>> a  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=\n",
      " |      array([[[ 1,  2,  3],\n",
      " |              [ 4,  5,  6]],\n",
      " |             [[ 7,  8,  9],\n",
      " |              [10, 11, 12]]], dtype=int32)>\n",
      " |      >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])\n",
      " |      >>> b  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n",
      " |      array([[[13, 14],\n",
      " |              [15, 16],\n",
      " |              [17, 18]],\n",
      " |             [[19, 20],\n",
      " |              [21, 22],\n",
      " |              [23, 24]]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=\n",
      " |      array([[[ 94, 100],\n",
      " |              [229, 244]],\n",
      " |             [[508, 532],\n",
      " |              [697, 730]]], dtype=int32)>\n",
      " |      \n",
      " |      Since python >= 3.5 the @ operator is supported\n",
      " |      (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,\n",
      " |      it simply calls the `tf.matmul()` function, so the following lines are\n",
      " |      equivalent:\n",
      " |      \n",
      " |      >>> d = a @ b @ [[10], [11]]\n",
      " |      >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])\n",
      " |      \n",
      " |      Args:\n",
      " |        a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,\n",
      " |          `complex64`, `complex128` and rank > 1.\n",
      " |        b: `tf.Tensor` with same type and rank as `a`.\n",
      " |        transpose_a: If `True`, `a` is transposed before multiplication.\n",
      " |        transpose_b: If `True`, `b` is transposed before multiplication.\n",
      " |        adjoint_a: If `True`, `a` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        adjoint_b: If `True`, `b` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        name: Name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix\n",
      " |        is the product of the corresponding matrices in `a` and `b`, e.g. if all\n",
      " |        transpose or adjoint attributes are `False`:\n",
      " |      \n",
      " |        `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,\n",
      " |        for all indices `i`, `j`.\n",
      " |      \n",
      " |        Note: This is matrix product, not element-wise product.\n",
      " |      \n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and\n",
      " |          `adjoint_b` are both set to `True`.\n",
      " |  \n",
      " |  __mod__ = binary_op_wrapper(x, y)\n",
      " |      Returns element-wise remainder of division. When `x < 0` xor `y < 0` is\n",
      " |      \n",
      " |      true, this follows Python semantics in that the result here is consistent\n",
      " |      with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.\n",
      " |      \n",
      " |      *NOTE*: `math.floormod` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `uint64`, `bfloat16`, `half`, `float32`, `float64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __mul__ = binary_op_wrapper(x, y)\n",
      " |      Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".\n",
      " |  \n",
      " |  __ne__ = tensor_not_equals(self, other)\n",
      " |      The operation invoked by the `Tensor.__ne__` operator.\n",
      " |      \n",
      " |      Compares two tensors element-wise for inequality if they are\n",
      " |      broadcast-compatible; or returns True if they are not broadcast-compatible.\n",
      " |      (Note that this behavior differs from `tf.math.not_equal`, which raises an\n",
      " |      exception if the two tensors are not broadcast-compatible.)\n",
      " |      \n",
      " |      Purpose in the API:\n",
      " |      \n",
      " |        This method is exposed in TensorFlow's API so that library developers\n",
      " |        can register dispatching for `Tensor.__ne__` to allow it to handle\n",
      " |        custom composite tensors & other custom objects.\n",
      " |      \n",
      " |        The API symbol is not intended to be called by users directly and does\n",
      " |        appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        self: The left-hand side of the `!=` operator.\n",
      " |        other: The right-hand side of the `!=` operator.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `!=` operation, or `True` if the arguments\n",
      " |        are not broadcast-compatible.\n",
      " |  \n",
      " |  __neg__ = neg(x, name=None)\n",
      " |      Computes numerical negative value element-wise.\n",
      " |      \n",
      " |      I.e., \\\\(y = -x\\\\).\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |      \n",
      " |        If `x` is a `SparseTensor`, returns\n",
      " |        `SparseTensor(x.indices, tf.math.negative(x.values, ...), x.dense_shape)`\n",
      " |  \n",
      " |  __or__ = binary_op_wrapper(x, y)\n",
      " |  \n",
      " |  __pow__ = binary_op_wrapper(x, y)\n",
      " |      Computes the power of one value to another.\n",
      " |      \n",
      " |      Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n",
      " |      corresponding elements in `x` and `y`. For example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([[2, 2], [3, 3]])\n",
      " |      y = tf.constant([[8, 16], [2, 3]])\n",
      " |      tf.pow(x, y)  # [[256, 65536], [9, 27]]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`.\n",
      " |  \n",
      " |  __radd__ = r_binary_op_wrapper(y, x)\n",
      " |      The operation invoked by the `Tensor.__add__` operator.\n",
      " |      \n",
      " |        Purpose in the API:\n",
      " |      \n",
      " |          This method is exposed in TensorFlow's API so that library developers\n",
      " |          can register dispatching for `Tensor.__add__` to allow it to handle\n",
      " |          custom composite tensors & other custom objects.\n",
      " |      \n",
      " |          The API symbol is not intended to be called by users directly and does\n",
      " |          appear in TensorFlow's generated documentation.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: The left-hand side of the `+` operator.\n",
      " |        y: The right-hand side of the `+` operator.\n",
      " |        name: an optional name for the operation.\n",
      " |      \n",
      " |      Returns:\n",
      " |        The result of the elementwise `+` operation.\n",
      " |  \n",
      " |  __rand__ = r_binary_op_wrapper(y, x)\n",
      " |  \n",
      " |  __rdiv__ = r_binary_op_wrapper(y, x)\n",
      " |      Divides x / y elementwise (using Python 2 division operator semantics). (deprecated)\n",
      " |      \n",
      " |      Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n",
      " |      Instructions for updating:\n",
      " |      Deprecated in favor of operator or tf.math.divide.\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor division operator or tf.divide which obey Python\n",
      " |      3 division operator semantics.\n",
      " |      \n",
      " |      This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`\n",
      " |      and `y` are both integers then the result will be an integer. This is in\n",
      " |      contrast to Python 3, where division with `/` is always a float while division\n",
      " |      with `//` is always an integer.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` returns the quotient of x and y.\n",
      " |  \n",
      " |  __repr__(self)\n",
      " |      Return repr(self).\n",
      " |  \n",
      " |  __rfloordiv__ = r_binary_op_wrapper(y, x)\n",
      " |      Divides `x / y` elementwise, rounding toward the most negative integer.\n",
      " |      \n",
      " |      The same as `tf.compat.v1.div(x,y)` for integers, but uses\n",
      " |      `tf.floor(tf.compat.v1.div(x,y))` for\n",
      " |      floating point arguments so that the result is always an integer (though\n",
      " |      possibly an integer represented as floating point).  This op is generated by\n",
      " |      `x // y` floor division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.\n",
      " |      \n",
      " |      `x` and `y` must have the same type, and the result will have the same type\n",
      " |      as well.\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of real numeric type.\n",
      " |        y: `Tensor` denominator of real numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` rounded down.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If the inputs are complex.\n",
      " |  \n",
      " |  __rmatmul__ = r_binary_op_wrapper(y, x)\n",
      " |      Multiplies matrix `a` by matrix `b`, producing `a` * `b`.\n",
      " |      \n",
      " |      The inputs must, following any transpositions, be tensors of rank >= 2\n",
      " |      where the inner 2 dimensions specify valid matrix multiplication dimensions,\n",
      " |      and any further outer dimensions specify matching batch size.\n",
      " |      \n",
      " |      Both matrices must be of the same type. The supported types are:\n",
      " |      `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.\n",
      " |      \n",
      " |      Either matrix can be transposed or adjointed (conjugated and transposed) on\n",
      " |      the fly by setting one of the corresponding flag to `True`. These are `False`\n",
      " |      by default.\n",
      " |      \n",
      " |      If one or both of the matrices contain a lot of zeros, a more efficient\n",
      " |      multiplication algorithm can be used by setting the corresponding\n",
      " |      `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.\n",
      " |      This optimization is only available for plain matrices (rank-2 tensors) with\n",
      " |      datatypes `bfloat16` or `float32`.\n",
      " |      \n",
      " |      A simple 2-D tensor matrix multiplication:\n",
      " |      \n",
      " |      >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n",
      " |      >>> a  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n",
      " |      array([[1, 2, 3],\n",
      " |             [4, 5, 6]], dtype=int32)>\n",
      " |      >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])\n",
      " |      >>> b  # 2-D tensor\n",
      " |      <tf.Tensor: shape=(3, 2), dtype=int32, numpy=\n",
      " |      array([[ 7,  8],\n",
      " |             [ 9, 10],\n",
      " |             [11, 12]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n",
      " |      array([[ 58,  64],\n",
      " |             [139, 154]], dtype=int32)>\n",
      " |      \n",
      " |      A batch matrix multiplication with batch shape [2]:\n",
      " |      \n",
      " |      >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])\n",
      " |      >>> a  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=\n",
      " |      array([[[ 1,  2,  3],\n",
      " |              [ 4,  5,  6]],\n",
      " |             [[ 7,  8,  9],\n",
      " |              [10, 11, 12]]], dtype=int32)>\n",
      " |      >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])\n",
      " |      >>> b  # 3-D tensor\n",
      " |      <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=\n",
      " |      array([[[13, 14],\n",
      " |              [15, 16],\n",
      " |              [17, 18]],\n",
      " |             [[19, 20],\n",
      " |              [21, 22],\n",
      " |              [23, 24]]], dtype=int32)>\n",
      " |      >>> c = tf.matmul(a, b)\n",
      " |      >>> c  # `a` * `b`\n",
      " |      <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=\n",
      " |      array([[[ 94, 100],\n",
      " |              [229, 244]],\n",
      " |             [[508, 532],\n",
      " |              [697, 730]]], dtype=int32)>\n",
      " |      \n",
      " |      Since python >= 3.5 the @ operator is supported\n",
      " |      (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,\n",
      " |      it simply calls the `tf.matmul()` function, so the following lines are\n",
      " |      equivalent:\n",
      " |      \n",
      " |      >>> d = a @ b @ [[10], [11]]\n",
      " |      >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])\n",
      " |      \n",
      " |      Args:\n",
      " |        a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,\n",
      " |          `complex64`, `complex128` and rank > 1.\n",
      " |        b: `tf.Tensor` with same type and rank as `a`.\n",
      " |        transpose_a: If `True`, `a` is transposed before multiplication.\n",
      " |        transpose_b: If `True`, `b` is transposed before multiplication.\n",
      " |        adjoint_a: If `True`, `a` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        adjoint_b: If `True`, `b` is conjugated and transposed before\n",
      " |          multiplication.\n",
      " |        a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this\n",
      " |          **does not support `tf.sparse.SparseTensor`**, it just makes optimizations\n",
      " |          that assume most values in `a` are zero.\n",
      " |          See `tf.sparse.sparse_dense_matmul`\n",
      " |          for some support for `tf.sparse.SparseTensor` multiplication.\n",
      " |        name: Name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix\n",
      " |        is the product of the corresponding matrices in `a` and `b`, e.g. if all\n",
      " |        transpose or adjoint attributes are `False`:\n",
      " |      \n",
      " |        `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,\n",
      " |        for all indices `i`, `j`.\n",
      " |      \n",
      " |        Note: This is matrix product, not element-wise product.\n",
      " |      \n",
      " |      \n",
      " |      Raises:\n",
      " |        ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and\n",
      " |          `adjoint_b` are both set to `True`.\n",
      " |  \n",
      " |  __rmod__ = r_binary_op_wrapper(y, x)\n",
      " |      Returns element-wise remainder of division. When `x < 0` xor `y < 0` is\n",
      " |      \n",
      " |      true, this follows Python semantics in that the result here is consistent\n",
      " |      with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.\n",
      " |      \n",
      " |      *NOTE*: `math.floormod` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `uint64`, `bfloat16`, `half`, `float32`, `float64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __rmul__ = r_binary_op_wrapper(y, x)\n",
      " |      Dispatches cwise mul for \"Dense*Dense\" and \"Dense*Sparse\".\n",
      " |  \n",
      " |  __ror__ = r_binary_op_wrapper(y, x)\n",
      " |  \n",
      " |  __rpow__ = r_binary_op_wrapper(y, x)\n",
      " |      Computes the power of one value to another.\n",
      " |      \n",
      " |      Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\n",
      " |      corresponding elements in `x` and `y`. For example:\n",
      " |      \n",
      " |      ```python\n",
      " |      x = tf.constant([[2, 2], [3, 3]])\n",
      " |      y = tf.constant([[8, 16], [2, 3]])\n",
      " |      tf.pow(x, y)  # [[256, 65536], [9, 27]]\n",
      " |      ```\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,\n",
      " |          `complex64`, or `complex128`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`.\n",
      " |  \n",
      " |  __rsub__ = r_binary_op_wrapper(y, x)\n",
      " |      Returns x - y element-wise.\n",
      " |      \n",
      " |      *NOTE*: `tf.subtract` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Both input and output have a range `(-inf, inf)`.\n",
      " |      \n",
      " |      Example usages below.\n",
      " |      \n",
      " |      Subtract operation between an array and a scalar:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = 1\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 0, -1, -2, -3, -4], dtype=int32)>\n",
      " |      \n",
      " |      Note that binary `-` operator can be used instead:\n",
      " |      \n",
      " |      >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])\n",
      " |      >>> y = tf.convert_to_tensor(1)\n",
      " |      >>> x - y\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      \n",
      " |      Subtract operation between an array and a tensor of same shape:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = tf.constant([5, 4, 3, 2, 1])\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 4,  2,  0, -2, -4], dtype=int32)>\n",
      " |      \n",
      " |      **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a\n",
      " |      non-tensor, the non-tensor input will adopt (or get casted to) the data type\n",
      " |      of the tensor input. This can potentially cause unwanted overflow or underflow\n",
      " |      conversion.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = tf.constant([1, 2], dtype=tf.int8)\n",
      " |      >>> y = [2**8 + 1, 2**8 + 2]\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2,), dtype=int8, numpy=array([0, 0], dtype=int8)>\n",
      " |      \n",
      " |      When subtracting two input values of different shapes, `tf.subtract` follows the\n",
      " |      [general broadcasting rules](https://numpy.org/doc/stable/user/basics.broadcasting.html#general-broadcasting-rules)\n",
      " |      . The two input array shapes are compared element-wise. Starting with the\n",
      " |      trailing dimensions, the two dimensions either have to be equal or one of them\n",
      " |      needs to be `1`.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(2, 1, 3)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 3), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]],\n",
      " |             [[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]]])>\n",
      " |      \n",
      " |      Example with inputs of different dimensions:\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(1, 6)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 6), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]],\n",
      " |             [[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]]])>\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __rtruediv__ = r_binary_op_wrapper(y, x)\n",
      " |      Divides x / y elementwise (using Python 3 division operator semantics).\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n",
      " |      division operator semantics.\n",
      " |      \n",
      " |      This function forces Python 3 division operator semantics where all integer\n",
      " |      arguments are cast to floating types first.   This op is generated by normal\n",
      " |      `x / y` division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.  If you want integer division that rounds\n",
      " |      down, use `x // y` or `tf.math.floordiv`.\n",
      " |      \n",
      " |      `x` and `y` must have the same numeric type.  If the inputs are floating\n",
      " |      point, the output will have the same type.  If the inputs are integral, the\n",
      " |      inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n",
      " |      and `int64` (matching the behavior of Numpy).\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of numeric type.\n",
      " |        y: `Tensor` denominator of numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` evaluated in floating point.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If `x` and `y` have different dtypes.\n",
      " |  \n",
      " |  __rxor__ = r_binary_op_wrapper(y, x)\n",
      " |  \n",
      " |  __str__(self)\n",
      " |      Return str(self).\n",
      " |  \n",
      " |  __sub__ = binary_op_wrapper(x, y)\n",
      " |      Returns x - y element-wise.\n",
      " |      \n",
      " |      *NOTE*: `tf.subtract` supports broadcasting. More about broadcasting\n",
      " |      [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n",
      " |      \n",
      " |      Both input and output have a range `(-inf, inf)`.\n",
      " |      \n",
      " |      Example usages below.\n",
      " |      \n",
      " |      Subtract operation between an array and a scalar:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = 1\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 0, -1, -2, -3, -4], dtype=int32)>\n",
      " |      \n",
      " |      Note that binary `-` operator can be used instead:\n",
      " |      \n",
      " |      >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])\n",
      " |      >>> y = tf.convert_to_tensor(1)\n",
      " |      >>> x - y\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32, numpy=array([0, 1, 2, 3, 4], dtype=int32)>\n",
      " |      \n",
      " |      Subtract operation between an array and a tensor of same shape:\n",
      " |      \n",
      " |      >>> x = [1, 2, 3, 4, 5]\n",
      " |      >>> y = tf.constant([5, 4, 3, 2, 1])\n",
      " |      >>> tf.subtract(y, x)\n",
      " |      <tf.Tensor: shape=(5,), dtype=int32,\n",
      " |      numpy=array([ 4,  2,  0, -2, -4], dtype=int32)>\n",
      " |      \n",
      " |      **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a\n",
      " |      non-tensor, the non-tensor input will adopt (or get casted to) the data type\n",
      " |      of the tensor input. This can potentially cause unwanted overflow or underflow\n",
      " |      conversion.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = tf.constant([1, 2], dtype=tf.int8)\n",
      " |      >>> y = [2**8 + 1, 2**8 + 2]\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2,), dtype=int8, numpy=array([0, 0], dtype=int8)>\n",
      " |      \n",
      " |      When subtracting two input values of different shapes, `tf.subtract` follows the\n",
      " |      [general broadcasting rules](https://numpy.org/doc/stable/user/basics.broadcasting.html#general-broadcasting-rules)\n",
      " |      . The two input array shapes are compared element-wise. Starting with the\n",
      " |      trailing dimensions, the two dimensions either have to be equal or one of them\n",
      " |      needs to be `1`.\n",
      " |      \n",
      " |      For example,\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(2, 1, 3)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 3), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]],\n",
      " |             [[0., 0., 0.],\n",
      " |              [0., 0., 0.],\n",
      " |              [0., 0., 0.]]])>\n",
      " |      \n",
      " |      Example with inputs of different dimensions:\n",
      " |      \n",
      " |      >>> x = np.ones(6).reshape(2, 3, 1)\n",
      " |      >>> y = np.ones(6).reshape(1, 6)\n",
      " |      >>> tf.subtract(x, y)\n",
      " |      <tf.Tensor: shape=(2, 3, 6), dtype=float64, numpy=\n",
      " |      array([[[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]],\n",
      " |             [[0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.],\n",
      " |              [0., 0., 0., 0., 0., 0.]]])>\n",
      " |      \n",
      " |      Args:\n",
      " |        x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`, `uint64`.\n",
      " |        y: A `Tensor`. Must have the same type as `x`.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        A `Tensor`. Has the same type as `x`.\n",
      " |  \n",
      " |  __truediv__ = binary_op_wrapper(x, y)\n",
      " |      Divides x / y elementwise (using Python 3 division operator semantics).\n",
      " |      \n",
      " |      NOTE: Prefer using the Tensor operator or tf.divide which obey Python\n",
      " |      division operator semantics.\n",
      " |      \n",
      " |      This function forces Python 3 division operator semantics where all integer\n",
      " |      arguments are cast to floating types first.   This op is generated by normal\n",
      " |      `x / y` division in Python 3 and in Python 2.7 with\n",
      " |      `from __future__ import division`.  If you want integer division that rounds\n",
      " |      down, use `x // y` or `tf.math.floordiv`.\n",
      " |      \n",
      " |      `x` and `y` must have the same numeric type.  If the inputs are floating\n",
      " |      point, the output will have the same type.  If the inputs are integral, the\n",
      " |      inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`\n",
      " |      and `int64` (matching the behavior of Numpy).\n",
      " |      \n",
      " |      Args:\n",
      " |        x: `Tensor` numerator of numeric type.\n",
      " |        y: `Tensor` denominator of numeric type.\n",
      " |        name: A name for the operation (optional).\n",
      " |      \n",
      " |      Returns:\n",
      " |        `x / y` evaluated in floating point.\n",
      " |      \n",
      " |      Raises:\n",
      " |        TypeError: If `x` and `y` have different dtypes.\n",
      " |  \n",
      " |  __xor__ = binary_op_wrapper(x, y)\n",
      " |  \n",
      " |  experimental_ref(self)\n",
      " |      DEPRECATED FUNCTION\n",
      " |      \n",
      " |      Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.\n",
      " |      Instructions for updating:\n",
      " |      Use ref() instead.\n",
      " |  \n",
      " |  get_shape(self)\n",
      " |  \n",
      " |  ref(self)\n",
      " |      Returns a hashable reference object to this KerasTensor.\n",
      " |      \n",
      " |      The primary use case for this API is to put KerasTensors in a\n",
      " |      set/dictionary. We can't put tensors in a set/dictionary as\n",
      " |      `tensor.__hash__()` is not available and tensor equality (`==`) is supposed\n",
      " |      to produce a tensor representing if the two inputs are equal.\n",
      " |      \n",
      " |      See the documentation of `tf.Tensor.ref()` for more info.\n",
      " |  \n",
      " |  set_shape(self, shape)\n",
      " |      Updates the shape of this KerasTensor. Mimics `tf.Tensor.set_shape()`.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Class methods defined here:\n",
      " |  \n",
      " |  from_tensor(tensor) from builtins.type\n",
      " |      Convert a traced (composite)tensor to a representative KerasTensor.\n",
      " |  \n",
      " |  from_type_spec(type_spec, name=None) from builtins.type\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Readonly properties defined here:\n",
      " |  \n",
      " |  dtype\n",
      " |      Returns the `dtype` symbolically inferred for this Keras output.\n",
      " |  \n",
      " |  is_tensor_like\n",
      " |  \n",
      " |  name\n",
      " |      Returns the (non-unique, optional) name of this symbolic Keras value.\n",
      " |  \n",
      " |  op\n",
      " |  \n",
      " |  shape\n",
      " |      Returns the `TensorShape` symbolically inferred for this Keras output.\n",
      " |  \n",
      " |  type_spec\n",
      " |      Returns the `tf.TypeSpec` symbolically inferred for this Keras output.\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data descriptors defined here:\n",
      " |  \n",
      " |  __dict__\n",
      " |      dictionary for instance variables (if defined)\n",
      " |  \n",
      " |  __weakref__\n",
      " |      list of weak references to the object (if defined)\n",
      " |  \n",
      " |  ----------------------------------------------------------------------\n",
      " |  Data and other attributes defined here:\n",
      " |  \n",
      " |  __array_priority__ = 100\n",
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.8.10",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.10 64-bit ('TF_env': venv)"
  },
  "interpreter": {
   "hash": "0329f0a5e6cabec3ab8e58c7c31f3faa9932aff3ca3e5d00a3e765bf08184e67"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}