
    AVh6                    ;   d Z ddlZddlZddlmZ ddlmZ ddl	m
Z
 ddl	mZ ddl	mZ ddl	mZ dd	l	mZ dd
l	mZ ddl	mZ ddl	mZ ddl	mZ ddl	mZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddl  ddl!m"Z# ddl$m%Z% ddl$mZ ddl$m&Z& ddl$m'Z' ddl$m(Z( ddl)m*Z* dd l+m,Z, ejZ                  Z. e,d!d"d!g#      e'j^                   e&j`                  d"      dDd$                     Z1e1Z2  e&jf                  dd%      e4      Z4  e&jf                  dd&      e5      Z5  e,d'g#       e'j^                  e4               e,d(g#       e'j^                  e5             d) Z6 e,d*d+g#      e'j^                   e&jn                  dd,d-       e6ejh                  j                   jq                  d.d/      jq                  d-d0            dddejr                  fd1                            Z: e,d*d+g #      e'j^                  dejr                  dfd2              Z; e,d3d4g#      e'j^                   e&jn                  dd,d-       e6ejj                  j                   jq                  d.d/      jq                  d-d0            dddejr                  fd5                            Z< e,d3d4g #      e'j^                  dejr                  dfd6              Z= e,d7d8      e'j|                  e'j^                  dEd9                     Z?dEd:Z@ G d; d<      ZA e,d=d>      e'j                  e'j^                  dEd?                     ZC e,d@dA      e'j                  e'j^                  dEdB                     ZD e&jf                  dCdD      dEdE       ZEej                  j                   eEj                   dFneEj                   z   eE_          e,dGdH      e'j                  e'j^                  dEdI                     ZGej                  j                   eG_          e&jf                  dCdJ      dEdK       ZIej                  j                   eIj                   dFneIj                   z   eI_         ej                  ZK e&jf                  dCdL      dEdM       ZL e,dNdOg#      e'j                  e'j^                  dEdP                     ZM e,dQdRdQdRg#      e'j|                  e'j^                  dEdS                     ZN e,dNdOg #      e'j                  e'j^                   e6eMj                         dEdT                            ZO e,dUdV      e'j                  e'j^                  dEdW                     ZP e,dXdY      e'j                  e'j^                  dEdZ                     ZQ e,d[d\      e'j|                  e'j^                  dEd]                     ZR e,d^d^d_g#      e'j|                  e'j^                   e&j`                  d_      dEd`                            ZS e,dadadbg#      e'j|                  e'j^                   e&j`                  db      dEdc                            ZT e,dddddeg#      e'j|                  e'j^                   e&j`                  de      dEdf                            ZU e,dgdh      e'j|                  e'j^                  dEdi                     ZV e,djdk      e'j|                  e'j^                  dEdl                     ZW e,dmdn      e'j|                  e'j^                  dEdo                     ZX e,dpg#      e'j|                  e'j^                   e&jf                  ddqr      dFds                            ZY e,dtg#      e'j|                  e'j^                   e&jf                  ddqr      dGdu                            ZZ e,dvg#      e'j|                  e'j^                   e&jf                  ddqr      dHdw                            Z[ e,dxg#      e'j|                  e'j^                   e&jf                  ddqr      dIdy                            Z\ e,dzg#      e'j|                  e'j^                   e&jf                  ddqr      dJd{                            Z] e,d|g#      e'j|                  e'j^                   e&jf                  ddqr      dKd}                            Z^ e,d~g#      e'j|                  e'j^                   e&jf                  ddqr      dLd                            Z_ej                  ej                  ej                  ej                  ej                  ej                  ej                  ej                  ej                  ej                  ej                  ej                  ej                  ej                  ejr                  ej                  ej                  dej                  dej                  dej                  dej                  dej                  diZmdEdZndEdZo e,dd      e'j                  e'j^                  dEd                     Zp e,dg#      e'j                  e'j^                   e&jf                  ddr      dEd                            Zq e,dddg#      e'j                  e'j^                   e&j`                  d      dEd                            Zr e,d      e'j                  e'j^                  dEd                     ZsdEdZt e,dddg#      e'j                  e'j^                   e&j`                  d      dEd                            Zuej                  Zwej                  Zyej                  Zzej                  Z|ej                  Z~ e,dg #      e'j^                  dEd              ZdEdZ e,dddg#      e'j                  e'j^                   e&j`                  d      dMd                            ZdEdZdEdZdEdZdEdZ e,dd      e'j                  e'j^                  dEd                     Z e,dd      e'j                  e'j^                  dEd                     Z e,dg #      e'j^                  d               Z e,dg #      e'j^                  d               Z e,d      e'j^                  dNd              Z	 	 dOdZ ej                  ej                  e       d Zd Zd Z e,ddg#      e'j^                   e&jn                  ddd      	 	 	 	 	 dPd                     Z e,ddg #      e'j^                  dQd              Z	 	 	 	 dRdZ e,d      e'j^                  dQd              Z e,ddg#      e'j^                   e&jn                  ddd       e&jn                  ddd      dddejr                  ddddfd                            Z e,dg #      e'j^                  ddejr                  dfd              Z e,ddg#      e'j^                  	 	 	 	 	 dPd              Z e,ddg #      e'j^                  dQd              Z e,d      e'j^                  dQd              Z e,d      e'j^                  dQd              Z e,ddg #      e'j^                  dQdĄ              Z e,ddg#      e'j^                   e&jn                  ddd      	 	 	 	 	 dPdń                     Z e,ddg#      e'j^                   e&jn                  ddd      	 	 	 	 	 dPdȄ                     Z e,ddg #      e'j^                  dQdɄ              Z e,ddg#      e'j^                   e&jn                  ddd      	 	 	 	 	 dPd̄                     Z e,ddg #      e'j^                  dQd̈́              Z	 	 	 	 dRd΄Z e,ddg#      e'j^                   e&jn                  ddd      	 	 	 	 	 dPdф                     Z e,ddg #      e'j^                  dQd҄              Z e,ddg#      e'j^                   e&jn                  ddd      	 	 	 	 	 dPdՄ                     Z e,ddg #      e'j^                  dQdք              Z e,ddg#      e'j^                   e&jn                  ddd      	 	 	 	 	 dPdل                     Z e,ddg #      e'j^                  dQdڄ              Z e,dddg#      e'j^                   e&j`                  dܫ      dEd݄                     Z e,dd߫      e'j^                  	 	 	 	 	 	 	 	 	 	 dSd              Z e,d      e'j^                  	 	 	 	 	 dTd              ZdEdZej                   e_           e&jf                  dd      ejV                        Z  e,dg#      e       e'j^                  dUd       ZdUdZ e,dd      e'j                  e'j^                  dEd                     Z e,dd       e'j^                  dg      dEd              Z e,dddg#      e'j^                   e&jf                  dd      dVd                     Z ejd                  d      d        Z e,ddd      e'j|                  e'j^                  dEd                     Z e,dddg#      e'j|                  e'j^                   e&j`                  d      dEd                            Z e,dd      e'j^                  dWd              Z e,d d dg#      e'j^                   e&j`                  d      dWd                     Z e,ddg#      e'j^                  dWd              Z e,dddg#      e'j|                  e'j^                   e&j`                  d      dEd                            Zd Zd	 Z e,d
d
dg#      e'j^                   e&j`                  d      dEd                     Z e,dddg#      e'j^                   e&j`                  d      dEd                     Z e,ddg#       e&j`                  d      	 	 	 dOd              Z e,dg #      ddej                  fd       Z e,dg #      	 	 	 dOd       Z e,ddg#       e&j`                  d      	 	 	 dOd              Z e,dg #      	 	 	 dOd       Z e,ddg#       e&j`                  d      	 	 	 dOd              Z e,dg #      	 	 	 dOd       Z e,dd       e'j^                  dEd!              Z e,d"      e'j^                  dEd#              Z e,d$      e'j|                  e'j^                  dEd%                     Z e,d&      e'j                  e'j^                  dEd'                     Z e,d(      e'j                  e'j^                  dEd)                     Z e,d*      e'j|                  e'j^                  dEd+                     Z e,d,      e'j|                  e'j^                  dEd-                     Z e,d.      e'j|                  e'j^                  dEd/                     Z e,d0d0d1g#      e'j|                  e'j^                   e&j`                  d1      dEd2                            Z e,d3d4      e'j|                  e'j^                  dEd5                     Z e,d6d7      e'j|                  e'j^                  dEd8                     Z e,d9      e'j^                  dej                  dfd:              Z e,d;d;d<g#      e'j|                  e'j^                   e&j`                  d<      dEd=                            Z e,d>d?      e'j|                  e'j^                  dEd@                     Z e,dAdB      e'j|                  e'j^                  dEdC                     Z e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j|                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ejZ                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                          e'j|                  ej                         y(X  ai  Math Operations.

Note: Functions taking `Tensor` arguments can also take anything accepted by
`tf.convert_to_tensor`.

Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).

TensorFlow provides a variety of math functions including:

* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
* Segment functions (like: `tf.math.segment_sum`)

See: `tf.linalg` for matrix and tensor functions.

<a id=Segmentation></a>

## About Segmentation

TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it  defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.

For example:

```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
#  ==>  [[0 0 0 0]
#        [5 6 7 8]]
```

The standard `segment_*` functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent `unsorted_segment_` function.
These functions take an additional argument `num_segments` so that the output
tensor can be efficiently allocated.

``` python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6,  8, 10, 12],
#       [-1, -2, -3, -4]]
```

API docstring: tensorflow.math
    N)compat)context)constant_op)dtypes)indexed_slices)ops)override_binary_operator)sparse_tensortensor)tensor_conversion_registry)tensor_shape)tensor_util)	array_ops)array_ops_stack)gen_array_ops)gen_bitwise_ops)gen_data_flow_ops)gen_logging_ops)gen_math_ops)
gen_nn_ops)gen_sparse_ops)tensor_math_operator_overrides)*)
tf_logging)_pywrap_utils)deprecation)dispatch)nest)collections_abc)	tf_exportlinspace	lin_space)v1c                    t        j                  |d| |g      5  t        j                  | d      } t        j                  |d| j                        }t	        j
                  |d      }t        || j                        }t	        j                  t	        j                  |       t	        j                  |            }t	        j                  | |      } t	        j                  ||      }t	        j                  | |      }t	        j                  ||      }t	        j                  |      }	t	        j                  |	      d	   }
t	        j                  |d	k\  ||
|z         }t        j                  |d
z
  d	      }t        j                  |dz
  d      }||z
  t        ||j                        z  }t        ||j                        }t        ||j                        }t	        j                  |d	k\  |d      }t        t        d|t        j                         |j                        }t        j"                  |t        |
            }t	        j                  ||d      }t	        j$                  ||      }|||z  z   }|||f}t	        j&                  ||      }t	        j(                  |	      }t	        j&                  |	d	| t	        j$                  |dg      |	|dz   d fd	      }t	        j*                  |||      cddd       S # 1 sw Y   yxY w)a  Generates evenly-spaced values in an interval along a given axis.

  A sequence of `num` evenly-spaced values are generated beginning at `start`
  along a given `axis`.
  If `num > 1`, the values in the sequence increase by
  `(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
  If `num <= 0`, `ValueError` is raised.

  Matches
  [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
  behaviour
  except when `num == 0`.

  For example:

  ```
  tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
  ```

  `Start` and `stop` can be tensors of arbitrary size:

  >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
  <tf.Tensor: shape=(5, 2), dtype=float32, numpy=
  array([[ 0.  ,  5.  ],
         [ 2.5 , 13.75],
         [ 5.  , 22.5 ],
         [ 7.5 , 31.25],
         [10.  , 40.  ]], dtype=float32)>

  `Axis` is where the values will be generated (the dimension in the
  returned tensor which corresponds to the axis will be equal to `num`)

  >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
  <tf.Tensor: shape=(2, 5), dtype=float32, numpy=
  array([[ 0.  ,  2.5 ,  5.  ,  7.5 , 10.  ],
         [ 5.  , 13.75, 22.5 , 31.25, 40.  ]], dtype=float32)>



  Args:
    start: A `Tensor`. Must be one of the following types: `bfloat16`,
      `float32`, `float64`. N-D tensor. First entry in the range.
    stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
      Last entry in the range.
    num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
      tensor. Number of values to generate.
    name: A name for the operation (optional).
    axis: Axis along which the operation is performed (used only when N-D
      tensors are provided).

  Returns:
    A `Tensor`. Has the same type as `start`.
  r"   startnamestopr(   dtypenumr+   axisr         N)r   
name_scopeconvert_to_tensorr+   r   convert_to_int_tensorcastbroadcast_dynamic_shapeshapebroadcast_toexpand_dimswhere_v2r   maximumranger   int64equalreshapeconcat
zeros_likeslice)r&   r)   r,   r(   r/   num_intbroadcast_shapeexpanded_startexpanded_stopr8   ndimsnum_filln_stepsdelta	range_enddesired_rangemaskdesired_range_shaperesall_tensorsconcatenatedbeginsizes                          N/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/ops/math_ops.pylinspace_ndrV   q   s   t ~~dJ6 76!!%g6E  F%++FD--c>G
wekk
*C77	 57O""5/:E!!$8D**5t<N))$T:MOON+EOOE"1%Edaiut|<D ##GaK3H ""7Q;2G^+tG4A4G4G0I IE .%++6N4M ""7a<"=I q)6<<@%++NMdE%L1D $,,T8Q?%%m5HIM
5=0
0C "36K##Kd;L  'E	q	))'A37taxz9JKD ??<5o76 76 76s   KLLzUse `tf.math.argmax` insteadzUse `tf.math.argmin` insteadarg_maxarg_minc                       fd}|S )Nc                     | _         | S N)__doc__)funcdocs    rU   
_decoratorz_set_doc.<locals>._decorator   s    DLK     )r^   r_   s   ` rU   _set_docrb      s     
r`   zmath.argmaxargmaxzUse the `axis` argument instead	dimension
dimensionsaxesr/   c                 N    t        j                  d|d|      }t        | |||      S Nr/   rd   )r   deprecated_argument_lookup	argmax_v2inputr/   r(   rd   output_types        rU   rc   rc      .     
	/	/k09
;$	5$T	22r`   c                 <    |d}t        j                  | |||      S )a  Returns the index with the largest value across axes of a tensor.

  In case of identity returns the smallest index.

  For example:

  >>> A = tf.constant([2, 20, 30, 3, 6])
  >>> tf.math.argmax(A)  # A[2] is maximum in tensor A
  <tf.Tensor: shape=(), dtype=int64, numpy=2>
  >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
  ...                  [14, 45, 23, 5, 27]])
  >>> tf.math.argmax(B, 0)
  <tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
  >>> tf.math.argmax(B, 1)
  <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
  >>> C = tf.constant([0, 0, 0, 0])
  >>> tf.math.argmax(C) # Returns smallest index in case of ties
  <tf.Tensor: shape=(), dtype=int64, numpy=0>

  Args:
    input: A `Tensor`.
    axis: An integer, the axis to reduce across. Default to 0.
    output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
      to `tf.int64`.
    name: An optional name for the operation.

  Returns:
    A `Tensor` of type `output_type`.
  r   r(   rm   )r   rW   rl   r/   rm   r(   s       rU   rj   rj     s&    @ 
\D			eT+	NNr`   zmath.argminargminc                 N    t        j                  d|d|      }t        | |||      S rh   )r   ri   	argmin_v2rk   s        rU   rr   rr   -  rn   r`   c                 <    |d}t        j                  | |||      S )a  Returns the index with the smallest value across axes of a tensor.

  Returns the smallest index in case of ties.

  Args:
    input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
      `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
      `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
      `uint64`.
    axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      int32 or int64, must be in the range `-rank(input), rank(input))`.
      Describes which axis of the input Tensor to reduce across. For vectors,
      use axis = 0.
    output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
      `tf.int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `output_type`.

  Usage:
  ```python
  import tensorflow as tf
  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
  b = tf.math.argmin(input = a)
  c = tf.keras.backend.eval(b)
  # c = 0
  # here a[0] = 1 which is the smallest element of a across axis 0
  ```
  r   rp   )r   rX   rq   s       rU   rt   rt   >  s&    B 
\D			eT+	NNr`   zmath.absabsc                 P   t        j                  |d| g      5 }t        j                  | d      } | j                  j                  r5t        j                  | | j                  j                  |      cddd       S t        j                  | |      cddd       S # 1 sw Y   yxY w)a  Computes the absolute value of a tensor.

  Given a tensor of integer or floating-point values, this operation returns a
  tensor of the same type, where each element contains the absolute value of the
  corresponding element in the input.

  Given a tensor `x` of complex numbers, this operation returns a tensor of type
  `float32` or `float64` that is the absolute value of each element in `x`. For
  a complex number \\(a + bj\\), its absolute value is computed as
  \\(\sqrt{a^2 + b^2}\\).

  For example:

  >>> # real number
  >>> x = tf.constant([-2.25, 3.25])
  >>> tf.abs(x)
  <tf.Tensor: shape=(2,), dtype=float32,
  numpy=array([2.25, 3.25], dtype=float32)>

  >>> # complex number
  >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
  >>> tf.abs(x)
  <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
  array([[5.25594901],
         [6.60492241]])>

  Args:
    x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
      `int32`, `int64`, `complex64` or `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
      with absolute values. Note, for `complex64` or `complex128` input, the
      returned `Tensor` will be of type `float32` or `float64`, respectively.
  Absxr'   Toutr(   N)	r   r3   r4   r+   
is_complexr   complex_abs
real_dtype_absry   r(   s     rU   rv   rv   i  s    P ~~dEA3' +4ac*Aww%%aagg.@.@tL+ + QT*	+ + +s   AB<BB%c                 2    t        j                  | ||      S )Nrl   
boundariesr(   )r   	bucketizer   s      rU   
_bucketizer     s    			e
	NNr`   c                   (    e Zd ZdZd Zd Zd Zd Zy)DivideDelegateWithNamezHUse Python2/Python3 division delegation to implement divide for tensors.c                      || _         || _        y)zConstruct DivideDelegateWithName.

    Args:
      x: Tensor to use as left operand in operator overloads
      name: The name that is preferred for the op created.
    Nr   )selfry   r(   s      rU   __init__zDivideDelegateWithName.__init__  s     DFDIr`   c                 D    t        | j                  || j                        S r[   )_truediv_python3ry   r(   r   ys     rU   __truediv__z"DivideDelegateWithName.__truediv__  s    DFFAtyy11r`   c                 D    t        | j                  || j                        S r[   )floordivry   r(   r   s     rU   __floordiv__z#DivideDelegateWithName.__floordiv__  s    DFFAtyy))r`   c                 D    t        | j                  || j                        S r[   )_div_python2ry   r(   r   s     rU   __div__zDivideDelegateWithName.__div__  s    499--r`   N)__name__
__module____qualname__r\   r   r   r   r   ra   r`   rU   r   r     s    P2*.r`   r   zmath.dividedividec                     |t        | |      |z  S t        j                  |       sDt        j                  |      r|j                  j                  nd}t        j                  | |      } | |z  S )at  Computes Python style division of `x` by `y`.

  For example:

  >>> x = tf.constant([16, 12, 11])
  >>> y = tf.constant([4, 6, 2])
  >>> tf.divide(x,y)
  <tf.Tensor: shape=(3,), dtype=float64,
  numpy=array([4. , 2. , 5.5])>

  Args:
    x: A `Tensor`
    y: A `Tensor`
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with same shape as input
  Nr-   )r   r   
is_tf_typer+   
base_dtyper   r4   )ry   r   r(   r+   s       rU   r   r     sg    . 
 "!T*Q.. !!!$$/$:$:1$=agg  4e



/aq5Lr`   zmath.multiplymultiplyc                 0    t        j                  | ||      S )a5  Returns an element-wise x * y.

  For example:

  >>> x = tf.constant(([1, 2, 3, 4]))
  >>> tf.math.multiply(x, x)
  <tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1,  4,  9, 16], dtype=int32)>

  Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
  pass in non-`Tensor` arguments:

  >>> tf.math.multiply(7,6)
  <tf.Tensor: shape=(), dtype=int32, numpy=42>

  If `x.shape` is not the same as `y.shape`, they will be broadcast to a
  compatible shape. (More about broadcasting
  [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)

  For example:

  >>> x = tf.ones([1, 2]);
  >>> y = tf.ones([2, 1]);
  >>> x * y  # Taking advantage of operator overriding
  <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
  array([[1., 1.],
       [1., 1.]], dtype=float32)>

  The reduction version of this elementwise operation is `tf.math.reduce_prod`

  Args:
    x: A Tensor. Must be one of the following types: `bfloat16`,
      `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
      `int16`, `int32`, `int64`, `complex64`, `complex128`.
    y: A `Tensor`. Must have the same type as `x`.
    name: A name for the operation (optional).

  Returns:

  A `Tensor`.  Has the same type as `x`.

  Raises:

   * InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
  r   mulry   r   r(   s      rU   r   r     s    b 
		!Q	%%r`   z
2016-12-30zE`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`c                 0    t        j                  | ||      S r[   r   r   s      rU   _mulr          
		!Q	%%r`    zmath.subtractsubtractc                 0    t        j                  | ||      S r[   r   subr   s      rU   r   r     r   r`   zG`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`c                 0    t        j                  | ||      S r[   r   r   s      rU   _subr   (  r   r`   z>`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`c                     t        | |      S )ao  Computes numerical negative value element-wise.

  I.e., \(y = -x\).

  Args:
    x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
      `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
  )negativer   s     rU   _negr   6  s      
!T	r`   zmath.scalar_mul
scalar_mulc                    t        j                  |j                        j                  }t	        j
                  | |d      } | j                         }|j                  dk(  r{t        |t        j                        rJt        j                  t        j                  | |j                  |      |j                  |j                        S t        j                  | ||      S t!        d| d      )a  Multiplies a scalar times a `Tensor` or `IndexedSlices` object.

  This is a special case of `tf.math.multiply`, where the first value must be a
  `scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
  guaranteed to be efficient for `tf.IndexedSlices`.

  >>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
  >>> with tf.GradientTape() as g:
  ...   g.watch(x)
  ...   y = tf.gather(x, [1, 2])  # IndexedSlices
  ...   z = tf.math.scalar_mul(10.0, y)

  Args:
    scalar: A 0-D scalar `Tensor`. Must have known shape.
    x: A `Tensor` or `IndexedSlices` to be scaled.
    name: A name for the operation (optional).

  Returns:
    `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.

  Raises:
    ValueError: if scalar is not a 0-D `scalar`.
  scalarr+   r(   r   z5The input scalar must be a 0-D value. Received shape .)r   as_dtyper+   r   r   r4   	get_shaperH   
isinstancer   IndexedSlicesr   r   valuesindicesdense_shape
ValueError)r   ry   r(   r   r8   s        rU   r   r   L  s    6 qww'22*  JX/&



%
[[A!^112))


6188T
2AIIq}}N N fa..

?waHJ Jr`   zmath.softplusznn.softplusc                 .    t        j                  | |      S )a  Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.

  `softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
  takes on positive values.

  <img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">

  Example:

  >>> import tensorflow as tf
  >>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
  array([0.6931472, 1.3132616], dtype=float32)

  Args:
    features: `Tensor`
    name: Optional: name to associate with this operation.
  Returns:
    `Tensor`
  )r   softplus)featuresr(   s     rU   r   r   v  s    . 
		Xt	,,r`   c                 x    t        j                  |d|g      5 }t        | ||      cd d d        S # 1 sw Y   y xY w)Nr   )r   r3   r   )r   ry   r(   s      rU   scalar_mul_v2r     s9    
 ~~dL1#. '$fa&' ' 's   09zmath.powpowc                     t        j                  |d| g      5 }t        j                  | ||      cddd       S # 1 sw Y   yxY w)an  Computes the power of one value to another.

  Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
  corresponding elements in `x` and `y`. For example:

  ```python
  x = tf.constant([[2, 2], [3, 3]])
  y = tf.constant([[8, 16], [2, 3]])
  tf.pow(x, y)  # [[256, 65536], [9, 27]]
  ```

  Args:
    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
      `complex64`, or `complex128`.
    y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
      `complex64`, or `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`.
  Powr'   N)r   r3   r   _powr   s      rU   r   r     s?    2 ~~dEA3' .4Q-. . .s	   ;Azdtypes.complexcomplexc           
         t        j                  | d      } t        j                  |d      }t        j                  |d| |g      5 }| j                  |j                  f}|t        j
                  t        j
                  fk(  rt        j                  }n|t        j                  t        j                  fk(  rt        j                  }nZt        d| j                  j                   d|j                  j                   dt        j                  t        j
                  g       t        j                  | |||      cd	d	d	       S # 1 sw Y   y	xY w)
a  Converts two real numbers to a complex number.

  Given a tensor `real` representing the real part of a complex number, and a
  tensor `imag` representing the imaginary part of a complex number, this
  operation returns complex numbers elementwise of the form \\(a + bj\\), where
  *a* represents the `real` part and *b* represents the `imag` part.

  The input tensors `real` and `imag` must have the same shape.

  For example:

  ```python
  real = tf.constant([2.25, 3.25])
  imag = tf.constant([4.75, 5.75])
  tf.complex(real, imag)  # [[2.25 + 4.75j], [3.25 + 5.75j]]
  ```

  Args:
    real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
    imag: A `Tensor`. Must have the same type as `real`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `complex64` or `complex128`.

  Raises:
    TypeError: Real and imag must be correct types
  realr'   imagComplexz7The `real` and `imag` components have incorrect types:  z&. They must be consistent, and one of rz   N)r   r4   r3   r+   r   float64
complex128float32	complex64	TypeErrorr(   r   _complex)r   r   r(   input_typesr{   s        rU   r   r     s
   @ 
		t&	1$			t&	1$
~~dId|4 C::tzz*Kv~~v~~66d	8	8dCZZ__Qtzz/ 0^^V^^4578 8   t$TBC C Cs   C3EEz	math.signsignc           
         t        j                  |       } | j                  j                  r}t	        j
                  | t        t	        j                  | | j                  t        j                  k(  rt        j                  nt        j                        | j                        |      S t	        j                  | |      S )ad  Returns an element-wise indication of the sign of a number.

  `y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.

  For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.

  Example usage:

  >>> # real number
  >>> tf.math.sign([0., 2., -3.])
  <tf.Tensor: shape=(3,), dtype=float32,
  numpy=array([ 0.,  1., -1.], dtype=float32)>

  >>> # complex number
  >>> tf.math.sign([1 + 1j, 0 + 0j])
  <tf.Tensor: shape=(2,), dtype=complex128,
  numpy=array([0.70710678+0.70710678j, 0.        +0.j        ])>

  Args:
   x: A Tensor. Must be one of the following types: bfloat16, half, float32,
     float64, int32, int64, complex64, complex128.
   name: A name for the operation (optional).

  Returns:
   A Tensor. Has the same type as x.

   If x is a SparseTensor, returns SparseTensor(x.indices,
     tf.math.sign(x.values, ...), x.dense_shape).
  r{   r-   r'   )r   r4   r+   r|   r   
div_no_nanr6   r}   r   r   r   r   r   r   s     rU   r   r     s    B 
A!WW""	$$77f... ^^4:NND ''	   
		14	((r`   z	math.realr   c                    t        j                  |d| g      5 }t        j                  | d      } | j                  j                  r7| j                  j
                  }t        j                  | ||      cddd       S | j                  j                  r| cddd       S t        dj                  | j                              # 1 sw Y   yxY w)a'  Returns the real part of a complex (or real) tensor.

  Given a tensor `input`, this operation returns a tensor of type `float` that
  is the real part of each element in `input` considered as a complex number.

  For example:

  ```python
  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
  tf.math.real(x)  # [-2.25, 3.25]
  ```

  If `input` is already real, it is returned unchanged.

  Args:
    input: A `Tensor`. Must have numeric type.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32` or `float64`.
  Realrl   r'   rz   Nz<input must be a numeric tensor, but got tensor with dtype {})r   r3   r4   r+   r|   r~   r   r   
is_numericr   format)rl   r(   r~   s      rU   r   r     s    4 ~~dFUG, !!%g6E{{;;))ju:DA	 
 
		  
H
O
Okk  s   AC>C$CCz	math.imagr   c                 L   t        j                  |d| g      5 }t        j                  | d      } | j                  j                  r5t        j                  | | j                  j                  |      cddd       S t        j                  |       cddd       S # 1 sw Y   yxY w)ap  Returns the imaginary part of a complex (or real) tensor.

  Given a tensor `input`, this operation returns a tensor of type `float` that
  is the imaginary part of each element in `input` considered as a complex
  number. If `input` is real, a tensor of all zeros is returned.

  For example:

  ```python
  x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
  tf.math.imag(x)  # [4.75, 5.75]
  ```

  Args:
    input: A `Tensor`. Must be one of the following types: `float`, `double`,
      `complex64`, `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32` or `float64`.
  Imagrl   r'   rz   N)
r   r3   r4   r+   r|   r   r   r~   r   rB   rl   r(   s     rU   r   r   ?  s    4 ~~dFUG, )!!%g6E{{u5;;+A+AM) )
 !!%() ) )s   AB<BB#z
math.angleanglec                    t        j                  |d| g      5 }t        j                  | d      } | j                  j                  r5t        j                  | | j                  j                  |      cddd       S t        j                  | dk  t        j                  t        j                  |       z  t        j                  |             cddd       S # 1 sw Y   yxY w)a  Returns the element-wise argument of a complex (or real) tensor.

  Given a tensor `input`, this operation returns a tensor of type `float` that
  is the argument of each element in `input` considered as a complex number.

  The elements in `input` are considered to be complex numbers of the form
  \\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
  If `input` is real then *b* is zero by definition.

  The argument returned by this function is of the form \\(atan2(b, a)\\).
  If `input` is real, a tensor of all zeros is returned.

  For example:

  ```
  input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
  tf.math.angle(input).numpy()
  # ==> array([2.0131705, 1.056345 ], dtype=float32)
  ```

  Args:
    input: A `Tensor`. Must be one of the following types: `float`, `double`,
      `complex64`, `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of type `float32` or `float64`.
  Anglerl   r'   rz   Nr   )r   r3   r4   r+   r|   r   r   r~   r   wherenppi	ones_likerB   r   s     rU   r   r   a  s    B ~~dGeW- :!!%g6E{{EKK,B,BN: :
 __UQY	0C0CE0J(J&11%8:: : :s   AC<ACCz
math.roundroundc                     t        j                  | d      } | j                  j                  r| S t	        j
                  | |      S )a  Rounds the values of a tensor to the nearest integer, element-wise.

  Rounds half to even.  Also known as bankers rounding. If you want to round
  according to the current system rounding mode use tf::cint.
  For example:

  ```python
  x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
  tf.round(x)  # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
  ```

  Args:
    x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as `x`.
  ry   r'   )r   r4   r+   
is_integerr   r   r   s     rU   r   r     s;    , 
AC(!WWHad++r`   r6   zdtypes.castc                    t        j                  |      j                  }t        | t        j
                        st        j                  |       r|| j                  k(  r| S t        j                  |d| g      5 }t        | t        j                        rDt        | j                  ||      }t        j                  | j                  || j                         } nt        | t"        j$                        rDt        | j                  ||      }t#        j$                  || j                  | j                         } nt        j&                  | d      } | j                  j(                  rF|j*                  r:t-        j.                  d| j                  j0                   d|j0                   d       | j                  |k7  rt3        j                  | ||      } | cddd       S # 1 sw Y   yxY w)a  Casts a tensor to a new type.

  The operation casts `x` (in case of `Tensor`) or `x.values`
  (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.

  For example:

  >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
  >>> tf.cast(x, tf.int32)
  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>

  Notice `tf.cast` has an alias `tf.dtypes.cast`:

  >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
  >>> tf.dtypes.cast(x, tf.int32)
  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>

  The operation supports data types (for `x` and `dtype`) of
  `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
  `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
  In case of casting from complex types (`complex64`, `complex128`) to real
  types, only the real part of `x` is returned. In case of casting from real
  types to complex types (`complex64`, `complex128`), the imaginary part of the
  returned value is set to `0`. The handling of complex types here matches the
  behavior of numpy.

  Note casting nan and inf values to integral types has undefined behavior.

  Note this operation can lead to a loss of precision when converting native
  Python `float` and `complex` variables to `tf.float64` or `tf.complex128`
  tensors, since the input is first converted to the `float32` data type and
  then widened. It is recommended to use `tf.convert_to_tensor` instead of
  `tf.cast` for any non-tensor inputs.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
      be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
      `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
      `bfloat16`.
    dtype: The destination type. The list of supported dtypes is the same as
      `x`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
      same type as `dtype`.

  Raises:
    TypeError: If `x` cannot be cast to the `dtype`.

  Castr'   ry   z!You are casting an input of type z to an incompatible dtype zI.  This will discard the imaginary part and may not be what you intended.N)r   r   r   r   
tensor_libTensorr   IsResourceVariabler+   r   r3   r
   SparseTensorr6   r   r   r   r   r   r4   r|   is_floatingloggingwarnr(   r   )ry   r+   r(   	base_typevalues_casts        rU   r6   r6     sn   n ooe$//)J%%&-*J*J1*MQWWH
~~dFQC( D!]//0948k

$
$QYYQ]]
Ka	A~33	4948k

&
&{AIIq}}
Ma 


,a	
			 5 5/~ >""+..!1 2	
 
I	a6-  s   8EGG#zdtypes.saturate_castsaturate_castc                    t        j                  |d| g      5 }t        j                  | d      } t        j                  |      j
                  }| j                  }|j                  r,|j                  r|j                  }|j                  }|j                  |j                  k  s|j                  |j                  kD  rt        j                  | t        j                  t        j                  |j                  |j                        |      t        j                  t        j                  |j                  |j                        |      d      } t        | ||      cddd       S t!        |       } t#        j$                  d       |j                  }|j                  }t'        j(                  dd	d
      s3|j                  |j                  k  s|j                  |j                  kD  rh|j*                  }	 t-        j.                  ||j*                        } |t-        j4                  |j                  |j                              }	t-        j6                  |	|j                  g|      }
|
d   |
d
   k  rt-        j8                  |	 |d      |      }	 |t-        j:                  |j                  |j                              }t-        j6                  ||j                  g|      }
|
d   |
d
   kD  rt-        j8                  | |d      |      }t        j                  | t        j                  |	|      t        j                  ||      d      } t        | ||      cddd       S # t0        $ r
 t2        }Y dw xY w# 1 sw Y   yxY w)a  Performs a safe saturating cast of `value` to `dtype`.

  This function casts the input to `dtype` without overflow.  If
  there is a danger that values would over or underflow in the cast, this op
  applies the appropriate clamping before the cast.  See `tf.cast` for more
  details.

  Args:
    value: A `Tensor`.
    dtype: The desired output `DType`.
    name: A name for the operation (optional).

  Returns:
    `value` safely cast to `dtype`.
  r   valuer'   r-   clampNz0Casting complex to real discards imaginary part.i     r1   r   )r   r3   r4   r   r   r   r+   r|   r~   minmaxr   _clip_by_valuebuiltinsr   r6   r   r   r   forward_compatforward_compatibleas_numpy_dtyper   promote_typesr   floatr<   array	nextafterminimum)r   r+   r(   in_dtypereal_in_dtypereal_out_dtypeout_real_dtypenp_dtypepromoted_type	min_limitpromoted	max_limits               rU   r   r     s   * ~~dOeW5 O)!!%g6EOOE"--E{{H			 ++)) 2 22  >#5#55--##"">#5#5~7I7IJ " ##"">#5#5~7I7IJ " % E5t,/O) O)4 UGH&& %%N
 	))$A6<<.,,,<<.,,, ((h
((n33
 2::hllN4F4FGHi9n&8&89Oh	!x{	"LLHQKxH	2::hllN4F4FGHi9n&8&89Oh	!x{	"LLHQKxH	))



	
:


	
:	e u4(_O) O)r   	sO) O)s8   EN )BN 8 M*EN *M=9N <M==N  N	to_floatzUse `tf.cast` instead.)dateinstructionsc                 :    t        | t        j                  |      S )a  Casts a tensor to type `float32`.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
    type `float32`.

  Raises:
    TypeError: If `x` cannot be cast to the `float32`.

  @compatibility(TF2)

  This name was deprecated and removed in TF2, but has an exact replacement
  `tf.cast(..., tf.float32)`. There are no further issues with eager execution
  or tf.function.

  Before:

  >>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double))
  <tf.Tensor: shape=(), dtype=float32, numpy=3.14>

  After:

  >>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32)
  <tf.Tensor: shape=(), dtype=float32, numpy=3.14>

  @end_compatibility

  r'   )r6   r   r   r   s     rU   r  r  h      J 
ad	++r`   	to_doublec                 :    t        | t        j                  |      S )a  Casts a tensor to type `float64`.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
    type `float64`.

  Raises:
    TypeError: If `x` cannot be cast to the `float64`.

  @compatibility(TF2)

  This name was deprecated and removed in TF2, but has an exact replacement
  `tf.cast(..., tf.double)`. There are no further issues with eager execution or
  tf.function.

  Before:

  >>> tf.compat.v1.to_double(tf.constant(3.14, dtype=tf.float32))
  <tf.Tensor: shape=(), dtype=float64, numpy=3.14>

  After:

  >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.double)
  <tf.Tensor: shape=(), dtype=float64, numpy=3.14>

  @end_compatibility

  r'   )r6   r   r   r   s     rU   r  r    r  r`   to_int32c                 :    t        | t        j                  |      S )a  Casts a tensor to type `int32`.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
    type `int32`.

  Raises:
    TypeError: If `x` cannot be cast to the `int32`.

  @compatibility(TF2)

  This name was deprecated and removed in TF2, but has an exact replacement
  `tf.cast(..., tf.int32)`. There are no further issues with eager execution or
  tf.function.

  Before:

  >>> tf.compat.v1.to_int32(tf.constant(1, dtype=tf.int64))
  <tf.Tensor: shape=(), dtype=int32, numpy=1>

  After:

  >>> tf.cast(tf.constant(1, dtype=tf.int64), tf.int32)
  <tf.Tensor: shape=(), dtype=int32, numpy=1>

  @end_compatibility

  r'   )r6   r   int32r   s     rU   r  r        J 
aD	))r`   to_int64c                 :    t        | t        j                  |      S )a  Casts a tensor to type `int64`.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
    type `int64`.

  Raises:
    TypeError: If `x` cannot be cast to the `int64`.

  @compatibility(TF2)

  This name was deprecated and removed in TF2, but has an exact replacement
  `tf.cast(..., tf.int64)`. There are no further issues with eager execution or
  tf.function.

  Before:

  >>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32))
  <tf.Tensor: shape=(), dtype=int64, numpy=1>

  After:

  >>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64)
  <tf.Tensor: shape=(), dtype=int64, numpy=1>

  @end_compatibility

  r'   )r6   r   r>   r   s     rU   r  r    r  r`   to_bfloat16c                 :    t        | t        j                  |      S )a)  Casts a tensor to type `bfloat16`.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
    type `bfloat16`.

  Raises:
    TypeError: If `x` cannot be cast to the `bfloat16`.

  @compatibility(TF2)

  This name was deprecated and removed in TF2, but has an exact replacement
  `tf.cast(..., tf.bfloat16)`. There are no further issues with eager execution
  or tf.function.

  Before:

  >>> tf.compat.v1.to_bfloat16(tf.constant(3.14, dtype=tf.float32))
  <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>

  After:

  >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.bfloat16)
  <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>

  @end_compatibility

  r'   )r6   r   bfloat16r   s     rU   r  r    s    J 
at	,,r`   to_complex64c                 :    t        | t        j                  |      S )aC  Casts a tensor to type `complex64`.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
    type `complex64`.

  Raises:
    TypeError: If `x` cannot be cast to the `complex64`.

  @compatibility(TF2)

  This name was deprecated and removed in TF2, but has an exact replacement
  `tf.cast(..., tf.complex64)`. There are no further issues with eager execution
  or tf.function.

  Before:

  >>> tf.compat.v1.to_complex64(tf.constant(1. + 2.j, dtype=tf.complex128))
  <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>

  After:

  >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex128), tf.complex64)
  <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>

  @end_compatibility

  r'   )r6   r   r   r   s     rU   r  r  0  s    J 
a!!	--r`   to_complex128c                 :    t        | t        j                  |      S )aI  Casts a tensor to type `complex128`.

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
    type `complex128`.

  Raises:
    TypeError: If `x` cannot be cast to the `complex128`.

  @compatibility(TF2)

  This name was deprecated and removed in TF2, but has an exact replacement
  `tf.cast(..., tf.complex128)`. There are no further issues with eager
  execution or tf.function.

  Before:

  >>> tf.compat.v1.to_complex128(tf.constant(1. + 2.j, dtype=tf.complex64))
  <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>

  After:

  >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex64), tf.complex128)
  <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>

  @end_compatibility

  r'   )r6   r   r   r   s     rU   r  r  X  s    J 
a""	..r`   c                    t        j                  |d| |g      5 }t        j                  | d      } t        j                  || j                  j                  d      }| j                  j                  }|j                  j                  }||k7  rt        d|d|d      	 t        |   }|t        | |      } t        ||      }t        j                  | ||      cd d d        S # t        $ rQ t        d	|d
dj                  t        j                         D  cg c]  } t        |        nc c} w c}        d      w xY w# 1 sw Y   y xY w)Ntruedivry   r'   r   
dtype_hintr(   *`x` and `y` must have the same dtype, got  != r   Invalid dtype z" in __truediv__. Expected one of {, }.)r   r3   r4   r+   r   r   _TRUEDIV_TABLEKeyErrorjoinkeysreprr6   r   real_div)ry   r   r(   x_dtypey_dtyper+   s         rU   r   r     sL   
~~dI1v. 2$ac*AaAGG,>,>SIAgg  Ggg  G' $KtG;a9 : :LW%e
 
q%.a
q%.a  AD1#2 2  L7+ &))n.A.A.CDT!WDDEFcKL LL2 2s0   BE"	C&+1E&3E D,+E  EEc                    t        j                  |d| |g      5 }t        j                  | d      } t        j                  |d| j                  j                        }| j                  j                  }|j                  j                  }||k7  rt        d|d|d      |j                  s|j                  r!t        j                  | ||      cd	d	d	       S t        j                  | ||      cd	d	d	       S # 1 sw Y   y	xY w)
a  Divide two values using Python 2 semantics.

  Used for Tensor.__div__.

  Args:
    x: `Tensor` numerator of real numeric type.
    y: `Tensor` denominator of real numeric type.
    name: A name for the operation (optional).

  Returns:
    `x / y` returns the quotient of x and y.
  divry   r'   r   r*   r$  r%  r   N)r   r3   r4   r+   r   r   r   r|   r   r.  	floor_divry   r   r(   r/  r0  s        rU   r   r     s     ~~dEAq6* 5dac*Aac1C1CDAgg  Ggg  G' $KtG;a9 : :g00""1ad35 5 ##Aqt45 5 5s   B6C;C;;Dzmath.truedivr!  c                     t        | ||      S )a  Divides x / y elementwise (using Python 3 division operator semantics).

  NOTE: Prefer using the Tensor operator or tf.divide which obey Python
  division operator semantics.

  This function forces Python 3 division operator semantics where all integer
  arguments are cast to floating types first. If you want integer
  division that rounds down, use `x // y` or `tf.math.floordiv`.

  `x` and `y` must have the same numeric type.  If the inputs are floating
  point, the output will have the same type.  If the inputs are integral, the
  inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
  and `int64` (matching the behavior of Numpy).

  Example:

  >>> # Division with integer tensors (returns float)
  >>> x1 = tf.constant([10, 20, 30], dtype=tf.int32)
  >>> y1 = tf.constant([2, 4, 5], dtype=tf.int32)
  >>> result1 = tf.math.truediv(x1, y1)

  <tf.Tensor: shape=(3,), dtype=float64, numpy=array([5., 5., 6.])>

  >>> # Division with different shaped tensors (broadcasting)
  >>> x2 = tf.constant([[10, 20], [30, 40]], dtype=tf.float64)
  >>> y2 = tf.constant([2, 5], dtype=tf.float64)
  >>> result2 = tf.math.truediv(x2, y2)

  <tf.Tensor: shape=(2, 2),dtype=float64,numpy= array([[ 5.,  4.],[15.,  8.]])>

  # Handling potential division by zero (returns inf)
  >>> x3 = tf.constant(5, dtype=tf.float32)
  >>> y3 = tf.constant(0, dtype=tf.float32)
  >>> result3 = tf.math.truediv(x3, y3)

  <tf.Tensor: shape=(), dtype=float32, numpy=inf>

  Args:
    x: `Tensor` numerator of numeric type.
    y: `Tensor` denominator of numeric type.
    name: A name for the operation (optional).

  Returns:
    `x / y` evaluated in floating point.

  Raises:
    TypeError: If `x` and `y` have different dtypes.
  )r   r   s      rU   r!  r!    s    h 
!Q	%%r`   r2  z2Deprecated in favor of operator or tf.math.divide.c                     t        | ||      S )a  Divides x / y elementwise (using Python 2 division operator semantics).

  @compatibility(TF2)
  This function is deprecated in TF2. Prefer using the Tensor division operator,
  `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator
  semantics.
  @end_compatibility


  This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
  and `y` are both integers then the result will be an integer. This is in
  contrast to Python 3, where division with `/` is always a float while division
  with `//` is always an integer.

  Args:
    x: `Tensor` numerator of real numeric type.
    y: `Tensor` denominator of real numeric type.
    name: A name for the operation (optional).

  Returns:
    `x / y` returns the quotient of x and y.
  )r   r   s      rU   r2  r2    s    : 
aD	!!r`   zmath.divide_no_nanr   c                    t        j                  |d| |g      5 }t        j                  |       sYt        j                  |      rDt        j                  |d      }t        j                  | |j
                  j                  d      } nCt        j                  | d      } t        j                  || j
                  j                  d      }| j
                  j                  }|j
                  j                  }||k7  rt        d|d|d	      	 t        |   }|t        | |      } t        ||      }t        j                  | ||      cddd       S # t        $ rV}t        d
|ddj                  t        j                         D  cg c]  } t        |        nc c} w c}        d      |d}~ww xY w# 1 sw Y   yxY w)a  Computes a safe divide which returns 0 if `y` (denominator) is zero.

  For example:

  >>> tf.constant(3.0) / 0.0
  <tf.Tensor: shape=(), dtype=float32, numpy=inf>
  >>> tf.math.divide_no_nan(3.0, 0.0)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.0>

  Note that 0 is returned if `y` is 0 even if `x` is nonfinite:

  >>> tf.math.divide_no_nan(np.nan, 0.0)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.0>

  Args:
    x: A `Tensor` of a floating or integer dtype.
    y: A `Tensor` with the same dtype as `x` and a compatible shape.
    name: A name for the operation (optional).

  Returns:
    The element-wise quotient as in `tf.math.divide(x, y)`,
    except that division by zero produces `0.0`, not `nan`.
  r   r   r'   ry   r   r"  r$  r%  r   r&  z, in tf.math.divide_no_nan. Expected one of {r'  r(  N)r   r3   r   r   r4   r+   r   r   r)  r*  r+  r,  r-  r6   r   r   )ry   r   r(   r/  r0  r+   es          rU   r   r     s   : ~~dL1a&1 4T!!!$)?)?)B



,a


););#
Fa



,a


agg.@.@s
Kagg  Ggg  G' $KtG;a9 : :W%e 
q%.a
q%.a""1ad3/4 4  7+ &))n.A.A.CDT!WDDEFcK 4 4s<   C5F6	E1F6	F3*F.FF..F33F66F?zmath.multiply_no_nanc                    t        j                  |d| |g      5 }t        j                  | d      } t        j                  |d| j                  j                        }| j                  j                  }|j                  j                  }||k7  rt        d|d|      t        j                  | ||      cddd       S # 1 sw Y   yxY w)	a  Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.

  Note this is noncommutative: if y is NaN or infinite and x is 0, the result
  will be NaN.

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
    y: A `Tensor` whose dtype is compatible with `x`.
    name: A name for the operation (optional).

  Returns:
    The element-wise value of the x times y.
  multiply_no_nanry   r'   r   r*   r$  r%  N)r   r3   r4   r+   r   r   r   
mul_no_nanr4  s        rU   r:  r:  R  s    $ ~~d-1v6 4$ac*Aac1C1CDAgg  Ggg  G' $KtG;8 9 9""1ad34 4 4s   BCC
c                     t        j                  |d| |g      5 }t        j                  | ||      cddd       S # 1 sw Y   yxY w)a  Returns element-wise remainder of division.

  This follows Python semantics in that the
  result here is consistent with a flooring divide. E.g.
  `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y.

  *NOTE*: `math.floormod` supports broadcasting. More about broadcasting
  [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)

  Args:
    x: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`,
      `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `bfloat16`, `half`,
      `float32`, `float64`.
    y: A `Tensor`. Must have the same type as `x`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  modr'   N)r   r3   r   	floor_modr   s      rU   r=  r=  o  sA    ( ~~dEAq6* 3d!!!QT23 3 3	   <Azmath.floordivr   c                     t        j                  |d| |g      5 }t        j                  | ||      cddd       S # 1 sw Y   yxY w)a  Divides `x / y` elementwise, rounding toward the most negative integer.

  Mathematically, this is equivalent to floor(x / y). For example:
    floor(8.4 / 4.0) = floor(2.1) = 2.0
    floor(-8.4 / 4.0) = floor(-2.1) = -3.0
  This is equivalent to the '//' operator in Python 3.0 and above.

  Note: `x` and `y` must have the same type, and the result will have the same
  type as well.

  Args:
    x: `Tensor` numerator of real numeric type.
    y: `Tensor` denominator of real numeric type.
    name: A name for the operation (optional).

  Returns:
    `x / y` rounded toward -infinity.

  Raises:
    TypeError: If the inputs are complex.
  r   r'   N)r   r3   r   r3  r   s      rU   r   r     sA    4 ~~dJA/ 34!!!QT23 3 3r?  z__operators__.addc                    t        j                         rt        | ||      S t        |t        j
                        sFt        |t        j                        s,t        j                  || j                  j                  d      }| j                  t        j                  k(  rt        j                  | ||      S t        j                  | ||      S )al  The operation invoked by the `Tensor.__add__` operator.

  Purpose in the API:

    This method is exposed in TensorFlow's API so that library developers
    can register dispatching for `Tensor.__add__` to allow it to handle
    custom composite tensors & other custom objects.

    The API symbol is not intended to be called by users directly and does
    appear in TensorFlow's generated documentation.

  Args:
    x: The left-hand side of the `+` operator.
    y: The right-hand side of the `+` operator.
    name: an optional name for the operation.

  Returns:
    The result of the elementwise `+` operation.
  r'   r   r"  )r    is_auto_dtype_conversion_enabledaddr   r   r   r
   r   r4   r+   r   r   stringr   add_v2r   s      rU   _add_dispatchrF    s    , 	))+q!$	Az((	)*	#	#3%aAGG,>,>SIAWWAqt,,q!$//r`   c                    t        |t        j                        rbt        j                  |j
                  |j                  |j                  | |      }t        j                  |j
                  ||j                        S t        | ||      S )z:Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse".r'   )	r   r
   r   r   sparse_dense_cwise_mulr   r   r   r   )ry   r   r(   new_valss       rU   _mul_dispatchrJ    sf    =--.44QYY56]]AtMH%%aii1==IIAqt$$r`   zmath.logical_xorlogical_xorc           	          t        j                  t        j                  | |      t        j                  t        j                  | |            |      S )a~  Logical XOR function.

  x ^ y = (x | y) & ~(x & y)

  Requires that `x` and `y` have the same shape or have
  [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
  shapes. For example, `x` and `y` can be:

  - Two single elements of type `bool`
  - One `tf.Tensor` of type `bool` and one single `bool`, where the result will
    be calculated by applying logical XOR with the single element to each
    element in the larger Tensor.
  - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
    the result will be the element-wise logical XOR of the two input tensors.

  Usage:

  >>> a = tf.constant([True])
  >>> b = tf.constant([False])
  >>> tf.math.logical_xor(a, b)
  <tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>

  >>> c = tf.constant([True])
  >>> x = tf.constant([False, True, True, False])
  >>> tf.math.logical_xor(c, x)
  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False,  True])>

  >>> y = tf.constant([False, False, True, True])
  >>> z = tf.constant([False, True, False, True])
  >>> tf.math.logical_xor(y, z)
  <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False,  True,  True, False])>

  Args:
      x: A `tf.Tensor` type bool.
      y: A `tf.Tensor` of type bool.
      name: A name for the operation (optional).

  Returns:
    A `tf.Tensor` of type bool with the same size as that of x or y.
  r'   )r   logical_and
logical_orlogical_notr   s      rU   rK  rK    sF    \ 
	!	!a#|771=>
 r`   c                     | j                   t        j                  k(  rt        j                  | ||      S t        j                  | |      S r[   )r+   r   boolr   rM  r   bitwise_andr   s      rU   and_rS    s:    WW##Aq$//		$	$Q	**r`   c                     | j                   t        j                  k(  rt        j                  | ||      S t        j                  | |      S r[   )r+   r   rQ  r   rN  r   
bitwise_orr   s      rU   or_rV    s:    WW""1a..		#	#Aq	))r`   c                     | j                   t        j                  k(  rt        | ||      S t	        j
                  | |      S r[   )r+   r   rQ  rK  r   bitwise_xorr   s      rU   xor_rY    s4    WWq!T""		$	$Q	**r`   c                     | j                   t        j                  k(  rt        j                  | |      S t        j                  | |      S Nr'   )r+   r   rQ  r   rO  r   invertr   s     rU   invert_r]    s8    WW##AD11				--r`   z
math.equalr?   c                 2    t        j                  | ||      S )aJ  Returns the truth value of (x == y) element-wise.

  Performs a [broadcast](
  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
  arguments and then an element-wise equality comparison, returning a Tensor of
  boolean values.

  For example:

  >>> x = tf.constant([2, 4])
  >>> y = tf.constant(2)
  >>> tf.math.equal(x, y)
  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  False])>

  >>> x = tf.constant([2, 4])
  >>> y = tf.constant([2, 4])
  >>> tf.math.equal(x, y)
  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>

  Args:
    x: A `tf.Tensor`.
    y: A `tf.Tensor`.
    name: A name for the operation (optional).

  Returns:
    A `tf.Tensor` of type bool with the same size as that of x or y.

  Raises:
    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
  r'   )r   r?   r   s      rU   r?   r?   #  s    D 
		Aqt	,,r`   zmath.not_equal	not_equalc                 2    t        j                  | ||      S )aT  Returns the truth value of (x != y) element-wise.

  Performs a [broadcast](
  https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
  arguments and then an element-wise inequality comparison, returning a Tensor
  of boolean values.

  For example:

  >>> x = tf.constant([2, 4])
  >>> y = tf.constant(2)
  >>> tf.math.not_equal(x, y)
  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>

  >>> x = tf.constant([2, 4])
  >>> y = tf.constant([2, 4])
  >>> tf.math.not_equal(x, y)
  <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  False])>

  Args:
    x: A `tf.Tensor`.
    y: A `tf.Tensor`.
    name: A name for the operation (optional).

  Returns:
    A `tf.Tensor` of type bool with the same size as that of x or y.

  Raises:
    `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
  r'   )r   r_  r   s      rU   r_  r_  H  s    D 
		14	00r`   z__operators__.eqc                    |yt        | dd      }t        j                  j                  rSt	        j
                         r?||j                  r1t        j                  | |      \  } }t        j                  | |d      S | |u S )a  The operation invoked by the `Tensor.__eq__` operator.

  Compares two tensors element-wise for equality if they are
  broadcast-compatible; or returns False if they are not broadcast-compatible.
  (Note that this behavior differs from `tf.math.equal`, which raises an
  exception if the two tensors are not broadcast-compatible.)

  Purpose in the API:

    This method is exposed in TensorFlow's API so that library developers
    can register dispatching for `Tensor.__eq__` to allow it to handle
    custom composite tensors & other custom objects.

    The API symbol is not intended to be called by users directly and does
    appear in TensorFlow's generated documentation.

  Args:
    self: The left-hand side of the `==` operator.
    other: The right-hand side of the `==` operator.

  Returns:
    The result of the elementwise `==` operation, or `False` if the arguments
    are not broadcast-compatible.
  NFgraphincompatible_shape_error)getattrr   r   _USE_EQUALITYr   #executing_eagerly_outside_functionsbuilding_functionr	   maybe_promote_tensorsr   r?   )r   othergs      rU   tensor_equalsrl  m  sy    6 ]dGT"!%%

1
1
39++*@@uMKD%dEEJJ 5=r`   z__operators__.nec                     |yt         j                  j                  rEt        j                         r1t        j                  | |      \  } }t        j                  | |d      S | |uS )a  The operation invoked by the `Tensor.__ne__` operator.

  Compares two tensors element-wise for inequality if they are
  broadcast-compatible; or returns True if they are not broadcast-compatible.
  (Note that this behavior differs from `tf.math.not_equal`, which raises an
  exception if the two tensors are not broadcast-compatible.)

  Purpose in the API:

    This method is exposed in TensorFlow's API so that library developers
    can register dispatching for `Tensor.__ne__` to allow it to handle
    custom composite tensors & other custom objects.

    The API symbol is not intended to be called by users directly and does
    appear in TensorFlow's generated documentation.

  Args:
    self: The left-hand side of the `!=` operator.
    other: The right-hand side of the `!=` operator.

  Returns:
    The result of the elementwise `!=` operation, or `True` if the arguments
    are not broadcast-compatible.
  TFrc  )	r   r   rf  r   rg  r	   ri  r   r_  )r   rj  s     rU   tensor_not_equalsrn    s_    6 ]%%

1
1
3*@@uMKD%!!$NN ur`   r=   c                 x   |d| }} t        j                  |d| ||g      5 }t        | t        j                        st        j
                  | |d      } t        |t        j                        st        j
                  ||d      }t        |t        j                        st        j
                  ||d      }|t        j                  t        j                  t        j                  t        j                  t        j                  t        j                  gt        fd| ||fD              sJ t        | ||fD cg c]  }|j                   c}j                   	      }n|}t#        | |      } t#        ||      }t#        ||      }t%        j&                  | |||
      cddd       S c c}w # 1 sw Y   yxY w)a  Creates a sequence of numbers.

  Creates a sequence of numbers that begins at `start` and extends by
  increments of `delta` up to but not including `limit`.

  The dtype of the resulting tensor is inferred from the inputs unless
  it is provided explicitly.

  Like the Python builtin `range`, `start` defaults to 0, so that
  `range(n) = range(0, n)`.

  For example:

  >>> start = 3
  >>> limit = 18
  >>> delta = 3
  >>> tf.range(start, limit, delta)
  <tf.Tensor: shape=(5,), dtype=int32,
  numpy=array([ 3,  6,  9, 12, 15], dtype=int32)>

  >>> start = 3
  >>> limit = 1
  >>> delta = -0.5
  >>> tf.range(start, limit, delta)
  <tf.Tensor: shape=(4,), dtype=float32,
  numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>

  >>> limit = 5
  >>> tf.range(limit)
  <tf.Tensor: shape=(5,), dtype=int32,
  numpy=array([0, 1, 2, 3, 4], dtype=int32)>

  Args:
    start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
      is not None; otherwise, acts as range limit and first entry defaults to 0.
    limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
      defaults to the value of `start` while the first entry of the range
      defaults to 0.
    delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
      1.
    dtype: The type of the elements of the resulting tensor.
    name: A name for the operation. Defaults to "range".

  Returns:
    An 1-D `Tensor` of type `dtype`.

  @compatibility(numpy)
  Equivalent to np.arange
  @end_compatibility
  Nr   Ranger&   r   limitrK   c              3   :   K   | ]  }|j                   v   y wr[   r-   ).0argdtype_hierarchys     rU   	<genexpr>zrange.<locals>.<genexpr>	  s     O#o-Os   )keyr'   )r   r3   r   r   r   r4   r   r  r>   float16r  r   r   allr   r+   indexr6   r   _range)r&   rq  rK   r+   r(   rt  inferred_dtyperu  s          @rU   r=   r=     sz   j ]e5E
~~dGeUE%:; ?teZ../##EWEeeZ../##EWEeeZ../##EWEe }
,,
,,
..
//
..
..o O%9NOOOO%1FG#CIIG.446n n 'E'E'EueU>=? ?& H'? ?s   DF0<F+
AF0+F00F9c                 `    ~t        | j                  | j                  | j                  ||      S )Nr   )r=   r&   r)   step)r   r+   r(   as_refs       rU   !_range_tensor_conversion_functionr    s$    	u{{EJJ

%d	KKr`   c                    ||S 	 | j                   j                  }|r8t        j                  t        j                  |t
        j                              S t        dt        j                  |             S # t        $ r d}Y fw xY w)z*Returns range(0, rank(x)) if axis is None.Nr-   r   )
r8   rankAttributeErrorr   constantr   aranger  r=   r   )ry   r/   x_ranks      rU   _ReductionDimsr  #  sr    	Kww||f
 !!"))F"(("CDD 1innQ'((  fs   A5 5BBc                 n    t        | t        j                        xs | j                  j	                         S )z1Returns true if tensor has a fully defined shape.)r   r   EagerTensorr8   is_fully_definedr   s    rU   _has_fully_defined_shaper  5  s%    	FCOO	,	O0M0M0OOr`   c                 F    t        |      s| s||j                  d       |S )z@Set a reduction's output shape to be a scalar if we are certain.ra   )r  	set_shape)keepdimsr/   outputs      rU   _may_reduce_to_scalarr  :  s$    	!&	)8
l
R	-r`   zmath.reduce_sum
reduce_sumz-keep_dims is deprecated, use keepdims instead	keep_dimsc                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )a  Computes the sum of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.add` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> # x has a shape of (2, 3) (two rows and three columns):
    >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
    >>> x.numpy()
    array([[1, 1, 1],
           [1, 1, 1]], dtype=int32)
    >>> # sum all the elements
    >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
    >>> tf.reduce_sum(x).numpy().item()
    6
    >>> # reduce along the first dimension
    >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
    >>> tf.reduce_sum(x, 0).numpy()
    array([2, 2, 2], dtype=int32)
    >>> # reduce along the second dimension
    >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
    >>> tf.reduce_sum(x, 1).numpy()
    array([3, 3], dtype=int32)
    >>> # keep the original dimensions
    >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
    array([[3],
           [3]], dtype=int32)
    >>> # reduce along both dimensions
    >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
    >>> # or, equivalently, reduce along rows, then reduce the resultant array
    >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
    >>> # 2 + 2 + 2 = 6
    >>> tf.reduce_sum(x, [0, 1]).numpy().item()
    6

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor, of the same dtype as the input_tensor.

  @compatibility(numpy)
  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
  int64 while tensorflow returns the same dtype as the input.
  @end_compatibility
  r/   reduction_indicesr  r  )r   ri   r  input_tensorr/   r  r(   r  r  s         rU   reduce_sum_v1r  B  sM    P 
	/	/0C0A
C$ 33J4?L(	L$$	77r`   c           
      4    t        | |||t        | |            S )a  Computes the sum of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.add` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> # x has a shape of (2, 3) (two rows and three columns):
    >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
    >>> x.numpy()
    array([[1, 1, 1],
           [1, 1, 1]], dtype=int32)
    >>> # sum all the elements
    >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
    >>> tf.reduce_sum(x).numpy().item()
    6
    >>> # reduce along the first dimension
    >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
    >>> tf.reduce_sum(x, 0).numpy()
    array([2, 2, 2], dtype=int32)
    >>> # reduce along the second dimension
    >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
    >>> tf.reduce_sum(x, 1).numpy()
    array([3, 3], dtype=int32)
    >>> # keep the original dimensions
    >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
    array([[3],
           [3]], dtype=int32)
    >>> # reduce along both dimensions
    >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
    >>> # or, equivalently, reduce along rows, then reduce the resultant array
    >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
    >>> # 2 + 2 + 2 = 6
    >>> tf.reduce_sum(x, [0, 1]).numpy().item()
    6

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor)]`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor, of the same dtype as the input_tensor.

  @compatibility(numpy)
  Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
  int64 while tensorflow returns the same dtype as the input.
  @end_compatibility
  )reduce_sum_with_dimsr  r  r/   r  r(   s       rU   r  r    s&    ~ 
lD(D,\4@
B Br`   c           
      h    |dn
t        |      }t        ||t        j                  | |||            S NFr'   )rQ  r  r   _sumr  r/   r  r(   dimss        rU   r  r    <    
 &UDN(	dH4@
B Br`   zmath.reduce_euclidean_normc                 t    t        |      }t        ||t        j                  | t	        | |      ||            S )a  Computes the Euclidean norm of elements across dimensions of a tensor.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

  ```python
  x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
  tf.math.reduce_euclidean_norm(x)  # returns 4 as dtype is tf.int32
  y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
  tf.math.reduce_euclidean_norm(y)  # returns 4.1231055 which is sqrt(17)
  tf.math.reduce_euclidean_norm(y, 0)  # [sqrt(2), sqrt(5), sqrt(10)]
  tf.math.reduce_euclidean_norm(y, 1)  # [sqrt(14), sqrt(3)]
  tf.math.reduce_euclidean_norm(y, 1, keepdims=True)  # [[sqrt(14)], [sqrt(3)]]
  tf.math.reduce_euclidean_norm(y, [0, 1])  # sqrt(17)
  ```

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor, of the same dtype as the input_tensor.
  r'   )rQ  r  r   euclidean_normr  r  s       rU   reduce_euclidean_normr    s?    J (^(	!!
|T:H
 r`   zmath.count_nonzerocount_nonzeroz1reduction_indices is deprecated, use axis insteadr  c                     t        j                  d|d|      }t        j                  d|d|       } t        j                  d|d|      }t        | ||||      S )a,  Computes number of nonzero elements across dimensions of a tensor.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  entry in `axis`. If `keepdims` is true, the reduced dimensions
  are retained with length 1.

  If `axis` has no entries, all dimensions are reduced, and a
  tensor with a single element is returned.

  **NOTE** Floating point comparison to zero is done by exact floating point
  equality check.  Small values are **not** rounded to zero for purposes of
  the nonzero check.

  For example:

  ```python
  x = tf.constant([[0, 1, 0], [1, 1, 0]])
  tf.math.count_nonzero(x)  # 3
  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
  tf.math.count_nonzero(x, 1)  # [1, 2]
  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
  tf.math.count_nonzero(x, [0, 1])  # 3
  ```

  **NOTE** Strings are compared against zero-length empty string `""`. Any
  string with a size greater than zero is already considered as nonzero.

  For example:
  ```python
  x = tf.constant(["", "a", "  ", "b", ""])
  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
  ```

  Args:
    input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
      `string`.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    dtype: The output dtype; defaults to `tf.int64`.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.
    input: Overrides input_tensor. For compatibility.

  Returns:
    The reduced tensor (number of nonzero values).
  r  r  rl   r  r/   r  )r   ri   count_nonzero_v2)r  r/   r  r+   r(   r  r  rl   s           rU   r  r  	  sl    D 33J4?L(778F8DF, 
	/	/0C0A
C$ 
,ht	DDr`   c           	         |d}t        j                  |d| g      5  t        j                  | d      } | j                  t        j
                  k(  r| }n7t        j                  g | j                        }t        j                  | |      }t        t        t        |t        j                        ||      |      cddd       S # 1 sw Y   yxY w)ac  Computes number of nonzero elements across dimensions of a tensor.

  Reduces `input` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  entry in `axis`. If `keepdims` is true, the reduced dimensions
  are retained with length 1.

  If `axis` has no entries, all dimensions are reduced, and a
  tensor with a single element is returned.

  **NOTE** Floating point comparison to zero is done by exact floating point
  equality check.  Small values are **not** rounded to zero for purposes of
  the nonzero check.

  For example:

  ```python
  x = tf.constant([[0, 1, 0], [1, 1, 0]])
  tf.math.count_nonzero(x)  # 3
  tf.math.count_nonzero(x, 0)  # [1, 2, 0]
  tf.math.count_nonzero(x, 1)  # [1, 2]
  tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
  tf.math.count_nonzero(x, [0, 1])  # 3
  ```

  **NOTE** Strings are compared against zero-length empty string `""`. Any
  string with a size greater than zero is already considered as nonzero.

  For example:
  ```python
  x = tf.constant(["", "a", "  ", "b", ""])
  tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.
  ```

  Args:
    input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input), rank(input))`.
    keepdims: If true, retains reduced dimensions with length 1.
    dtype: The output dtype; defaults to `tf.int64`.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor (number of nonzero values).
  NFr  rl   r'   r-   r/   r  )r   r3   r4   r+   r   rQ  r   zerosr   r_  r6   r  r>   )rl   r/   r  r+   r(   	predicatezeros          rU   r  r  [	  s    j H
~~dOeW5 !!%g6E {{fkk!i __Ru{{3d((5iFLL)		
   s   BCCzmath.reduce_meanreduce_meanc                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )aS  Computes the mean of elements across dimensions of a tensor.

  Reduces `input_tensor` along the dimensions given in `axis` by computing the
  mean of elements across the dimensions in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a tensor with a single
  element is returned.

  For example:

  >>> x = tf.constant([[1., 1.], [2., 2.]])
  >>> tf.reduce_mean(x)
  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
  >>> tf.reduce_mean(x, 0)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
  >>> tf.reduce_mean(x, 1)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.mean

  Please note that `np.mean` has a `dtype` parameter that could be used to
  specify the output type. By default this is `dtype=float64`. On the other
  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
  for example:

  >>> x = tf.constant([1, 0, 1, 0])
  >>> tf.reduce_mean(x)
  <tf.Tensor: shape=(), dtype=int32, numpy=0>
  >>> y = tf.constant([1., 0., 1., 0.])
  >>> tf.reduce_mean(y)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>

  @end_compatibility
  r/   r  r  r  )r   ri   r  r  s         rU   reduce_mean_v1r  	  sM    v 
	/	/0C0A
C$ 33J4?L(	\44	88r`   c                 |    |dn
t        |      }t        ||t        j                  | t	        | |      ||            S )a  Computes the mean of elements across dimensions of a tensor.

  Reduces `input_tensor` along the dimensions given in `axis` by computing the
  mean of elements across the dimensions in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a tensor with a single
  element is returned.

  For example:

  >>> x = tf.constant([[1., 1.], [2., 2.]])
  >>> tf.reduce_mean(x)
  <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
  >>> tf.reduce_mean(x, 0)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
  >>> tf.reduce_mean(x, 1)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.mean

  Please note that `np.mean` has a `dtype` parameter that could be used to
  specify the output type. By default this is `dtype=float64`. On the other
  hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
  for example:

  >>> x = tf.constant([1, 0, 1, 0])
  >>> tf.reduce_mean(x)
  <tf.Tensor: shape=(), dtype=int32, numpy=0>
  >>> y = tf.constant([1., 0., 1., 0.])
  >>> tf.reduce_mean(y)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>

  @end_compatibility
  Fr'   )rQ  r  r   meanr  r  s       rU   r  r  	  sF    h &UDN(	
|T:H
 r`   zmath.reduce_variancec                 .   |r|nd}t        j                  |      5  t        j                  |       } t        | |d      }|j                  j
                  rt        d|j                   d      | |z
  }|j                  j                  rU|j                  j                  }t        j                  t        j                  t        j                  |      |      |      }nt        j                  |      }t        |||      cddd       S # 1 sw Y   yxY w)a  Computes the variance of elements across dimensions of a tensor.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

  >>> x = tf.constant([[1., 2.], [3., 4.]])
  >>> tf.math.reduce_variance(x)
  <tf.Tensor: shape=(), dtype=float32, numpy=1.25>
  >>> tf.math.reduce_variance(x, 0)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
  >>> tf.math.reduce_variance(x, 1)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>

  Args:
    input_tensor: The tensor to reduce. Should have real or complex type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name scope for the associated operations (optional).

  Returns:
    The reduced tensor, of the same dtype as the input_tensor. Note,  for
    `complex64` or `complex128` input, the returned `Tensor` will be of type
    `float32` or `float64`, respectively.

  @compatibility(numpy)
  Equivalent to np.var

  Please note `np.var` has a `dtype` parameter that could be used to specify the
  output type. By default this is `dtype=float64`. On the other hand,
  `tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
  @end_compatibility
  reduce_varianceTr  z<Input must be either real or complex. Received integer type r   r   N)r   r3   r4   r  r+   r   r   r|   r~   r   r   r   conjsquare)r  r/   r  r(   meansdiffr~   squared_deviationss           rU   r  r  &
  s    X ,$
~~d I((6L4$?E{{ //4{{m1> ? ?%Dzz ::((j',,


<,,T2D
9
L (..t4)xHI I Is   C%DDzmath.reduce_stdc                     |r|nd}t        j                  |      5  t        j                  |       } t        | ||      }t	        j
                  |      cddd       S # 1 sw Y   yxY w)a  Computes the standard deviation of elements across dimensions of a tensor.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

  >>> x = tf.constant([[1., 2.], [3., 4.]])
  >>> tf.math.reduce_std(x)
  <tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
  >>> tf.math.reduce_std(x, 0)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
  >>> tf.math.reduce_std(x, 1)
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>

  Args:
    input_tensor: The tensor to reduce. Should have real or complex type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name scope for the associated operations (optional).

  Returns:
    The reduced tensor, of the same dtype as the input_tensor. Note,  for
    `complex64` or `complex128` input, the returned `Tensor` will be of type
    `float32` or `float64`, respectively.

  @compatibility(numpy)
  Equivalent to np.std

  Please note `np.std` has a `dtype` parameter that could be used to specify the
  output type. By default this is `dtype=float64`. On the other hand,
  `tf.math.reduce_std` has aggressive type inference from `input_tensor`.
  @end_compatibility
  
reduce_stdr  N)r   r3   r4   r  r   sqrt)r  r/   r  r(   variances        rU   r  r  e
  s[    X <$
~~d '((6L|$JHX&' ' 's   8AA'zmath.reduce_prodreduce_prodc                 |    |dn
t        |      }t        ||t        j                  | t	        | |      ||            S )a,  Computes `tf.math.multiply` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.multiply` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  entry in `axis`. If `keepdims` is true, the reduced dimensions
  are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> x = tf.constant([[1., 2.], [3., 4.]])
    >>> tf.math.reduce_prod(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=24.>
    >>> tf.math.reduce_prod(x, 0)
    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
    >>> tf.math.reduce_prod(x, 1)
    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
    dtype=float32)>

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.prod
  @end_compatibility
  Fr'   )rQ  r  r   prodr  r  s       rU   r  r  
  sF    R &UDN(	
|T:H
 r`   c                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )a  Computes `tf.math.multiply` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.multiply` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> x = tf.constant([[1., 2.], [3., 4.]])
    >>> tf.math.reduce_prod(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=24.>
    >>> tf.math.reduce_prod(x, 0)
    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
    >>> tf.math.reduce_prod(x, 1)
    <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
    dtype=float32)>

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.prod
  @end_compatibility
  r/   r  r  r  )r   ri   r  r  s         rU   reduce_prod_v1r  
  sM    f 
	/	/0C0A
C$ 33J4?L(	\44	88r`   zmath.reduce_min
reduce_minc                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )a  Computes the `tf.math.minimum` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.minimum` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  Usage example:

    >>> x = tf.constant([5, 1, 2, 4])
    >>> tf.reduce_min(x)
    <tf.Tensor: shape=(), dtype=int32, numpy=1>
    >>> x = tf.constant([-5, -1, -2, -4])
    >>> tf.reduce_min(x)
    <tf.Tensor: shape=(), dtype=int32, numpy=-5>
    >>> x = tf.constant([4, float('nan')])
    >>> tf.reduce_min(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
    >>> x = tf.constant([float('nan'), float('nan')])
    >>> tf.reduce_min(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
    >>> x = tf.constant([float('-inf'), float('inf')])
    >>> tf.reduce_min(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=-inf>

  See the numpy docs for `np.amin` and `np.nanmin` behavior.

  Args:
    input_tensor: The tensor to reduce. Should have real numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor.
  r/   r  r  r  )r   ri   r  r  s         rU   reduce_min_v1r    M    p 
	/	/0C0A
C$ 33J4?L(	L$$	77r`   c                 |    |dn
t        |      }t        ||t        j                  | t	        | |      ||            S )a   Computes the `tf.math.minimum` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.minimum` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

  >>> a = tf.constant([
  ...   [[1, 2], [3, 4]],
  ...   [[1, 2], [3, 4]]
  ... ])
  >>> tf.reduce_min(a)
  <tf.Tensor: shape=(), dtype=int32, numpy=1>

  Choosing a specific axis returns minimum element in the given axis:

  >>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
  >>> tf.reduce_min(b, axis=0)
  <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
  >>> tf.reduce_min(b, axis=1)
  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>

  Setting `keepdims` to `True` retains the dimension of `input_tensor`:

  >>> tf.reduce_min(a, keepdims=True)
  <tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
  >>> tf.math.reduce_min(a, axis=0, keepdims=True)
  <tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
  array([[[1, 2],
          [3, 4]]], dtype=int32)>

  Args:
    input_tensor: The tensor to reduce. Should have real numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.min
  @end_compatibility
  Fr'   )rQ  r  r   _minr  r  s       rU   r  r  D  sF    p &UDN(	
|T:H
 r`   zmath.reduce_max
reduce_maxc                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )a  Computes `tf.math.maximum` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.maximum` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  Usage example:

    >>> x = tf.constant([5, 1, 2, 4])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=int32, numpy=5>
    >>> x = tf.constant([-5, -1, -2, -4])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=int32, numpy=-1>
    >>> x = tf.constant([4, float('nan')])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
    >>> x = tf.constant([float('nan'), float('nan')])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
    >>> x = tf.constant([float('-inf'), float('inf')])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=inf>

  See the numpy docs for `np.amax` and `np.nanmax` behavior.

  Args:
    input_tensor: The tensor to reduce. Should have real numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor.
  r/   r  r  r  )r   ri   r  r  s         rU   reduce_max_v1r    r  r`   c           
      4    t        | |||t        | |            S )a>  Computes `tf.math.maximum` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.maximum` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  Usage example:

    >>> x = tf.constant([5, 1, 2, 4])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=int32, numpy=5>
    >>> x = tf.constant([-5, -1, -2, -4])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=int32, numpy=-1>
    >>> x = tf.constant([4, float('nan')])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
    >>> x = tf.constant([float('nan'), float('nan')])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=nan>
    >>> x = tf.constant([float('-inf'), float('inf')])
    >>> tf.reduce_max(x)
    <tf.Tensor: shape=(), dtype=float32, numpy=inf>

  See the numpy docs for `np.amax` and `np.nanmax` behavior.

  Args:
    input_tensor: The tensor to reduce. Should have real numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor.
  )reduce_max_with_dimsr  r  s       rU   r  r    s&    \ 
lD(D,\4@
B Br`   c           
      h    |dn
t        |      }t        ||t        j                  | |||            S r  )rQ  r  r   _maxr  s        rU   r  r    r  r`   zmath.reduce_all
reduce_allc                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )a  Computes `tf.math.logical_and` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.logical_and` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> x = tf.constant([[True,  True], [False, False]])
    >>> tf.math.reduce_all(x)
    <tf.Tensor: shape=(), dtype=bool, numpy=False>
    >>> tf.math.reduce_all(x, 0)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
    >>> tf.math.reduce_all(x, 1)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>

  Args:
    input_tensor: The boolean tensor to reduce.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.all
  @end_compatibility
  r/   r  r  r  )r   ri   r  r  s         rU   reduce_all_v1r    M    d 
	/	/0C0A
C$ 33J4?L(	L$$	77r`   c                 |    |dn
t        |      }t        ||t        j                  | t	        | |      ||            S )a(  Computes `tf.math.logical_and` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.logical_and` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> x = tf.constant([[True,  True], [False, False]])
    >>> tf.math.reduce_all(x)
    <tf.Tensor: shape=(), dtype=bool, numpy=False>
    >>> tf.math.reduce_all(x, 0)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
    >>> tf.math.reduce_all(x, 1)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>

  Args:
    input_tensor: The boolean tensor to reduce.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.all
  @end_compatibility
  Fr'   )rQ  r  r   _allr  r  s       rU   r  r  ;  F    P &UDN(	
|T:H
 r`   zmath.reduce_any
reduce_anyc                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )a  Computes `tf.math.logical_or` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.logical_or` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> x = tf.constant([[True,  True], [False, False]])
    >>> tf.reduce_any(x)
    <tf.Tensor: shape=(), dtype=bool, numpy=True>
    >>> tf.reduce_any(x, 0)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
    >>> tf.reduce_any(x, 1)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>

  Args:
    input_tensor: The boolean tensor to reduce.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.any
  @end_compatibility
  r/   r  r  r  )r   ri   r  r  s         rU   reduce_any_v1r  k  r  r`   c                 |    |dn
t        |      }t        ||t        j                  | t	        | |      ||            S )a  Computes `tf.math.logical_or` of elements across dimensions of a tensor.

  This is the reduction operation for the elementwise `tf.math.logical_or` op.

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` is None, all dimensions are reduced, and a
  tensor with a single element is returned.

  For example:

    >>> x = tf.constant([[True,  True], [False, False]])
    >>> tf.reduce_any(x)
    <tf.Tensor: shape=(), dtype=bool, numpy=True>
    >>> tf.reduce_any(x, 0)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>
    >>> tf.reduce_any(x, 1)
    <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>

  Args:
    input_tensor: The boolean tensor to reduce.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor.

  @compatibility(numpy)
  Equivalent to np.any
  @end_compatibility
  Fr'   )rQ  r  r   _anyr  r  s       rU   r  r    r  r`   zmath.reduce_logsumexpreduce_logsumexpc                 ~    t        j                  d|d|      }t        j                  d|d|      }t        | |||      S )a  Computes log(sum(exp(elements across dimensions of a tensor))).

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` has no entries, all dimensions are reduced, and a
  tensor with a single element is returned.

  This function is more numerically stable than log(sum(exp(input))). It avoids
  overflows caused by taking the exp of large inputs and underflows caused by
  taking the log of small inputs.

  For example:

  ```python
  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
  tf.reduce_logsumexp(x)  # log(6)
  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
  tf.reduce_logsumexp(x, [0, 1])  # log(6)
  ```

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).
    reduction_indices: The old (deprecated) name for axis.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    The reduced tensor.
  r/   r  r  r  )r   ri   r  r  s         rU   reduce_logsumexp_v1r    sM    b 
	/	/0C0A
C$ 33J4?L(	,h	==r`   c                    t        j                  |d| g      5 }t        | |d      }t        j                  t        j                  t        j                  |      |d            }t        j                  t        t        t        | |            ||            }|s)t        j                  |t        j                  |            }t        |||      }t!        |||      cddd       S # 1 sw Y   yxY w)aG  Computes log(sum(exp(elements across dimensions of a tensor))).

  Reduces `input_tensor` along the dimensions given in `axis`.
  Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
  of the entries in `axis`, which must be unique. If `keepdims` is true, the
  reduced dimensions are retained with length 1.

  If `axis` has no entries, all dimensions are reduced, and a
  tensor with a single element is returned.

  This function is more numerically stable than log(sum(exp(input))). It avoids
  overflows caused by taking the exp of large inputs and underflows caused by
  taking the log of small inputs.

  For example:

  ```python
  x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
  tf.reduce_logsumexp(x)  # log(6)
  tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
  tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
  tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
  tf.reduce_logsumexp(x, [0, 1])  # log(6)
  ```

  Args:
    input_tensor: The tensor to reduce. Should have numeric type.
    axis: The dimensions to reduce. If `None` (the default), reduces all
      dimensions. Must be in the range `[-rank(input_tensor),
      rank(input_tensor))`.
    keepdims: If true, retains reduced dimensions with length 1.
    name: A name for the operation (optional).

  Returns:
    The reduced tensor.
  ReduceLogSumExpTr  r   r'   N)r   r3   r  r   stop_gradientr   	select_v2	is_finitelogr  expr   r@   r   r8   rC  r  )r  r/   r  r(   raw_maxmy_maxresults          rU   r  r    s    N ~~d-~> 9$D4@G$$""7+W	F v./	 F
   )<)<V)DEfd+F 489 9 9s   CC((C1zlinalg.tracetracec                     t        j                  |d| g      5 }t        j                  | d      } t        t	        j
                  |       dg|      cddd       S # 1 sw Y   yxY w)a{  Compute the trace of a tensor `x`.

  `trace(x)` returns the sum along the main diagonal of each inner-most matrix
  in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
  is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where

  `output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`

  For example:

  ```python
  x = tf.constant([[1, 2], [3, 4]])
  tf.linalg.trace(x)  # 5

  x = tf.constant([[1, 2, 3],
                   [4, 5, 6],
                   [7, 8, 9]])
  tf.linalg.trace(x)  # 15

  x = tf.constant([[[1, 2, 3],
                    [4, 5, 6],
                    [7, 8, 9]],
                   [[-1, -2, -3],
                    [-4, -5, -6],
                    [-7, -8, -9]]])
  tf.linalg.trace(x)  # [15, -15]
  ```

  Args:
    x: tensor.
    name: A name for the operation (optional).

  Returns:
    The trace of input tensor.
  Tracery   r'   r2   N)r   r3   r4   r  r   matrix_diag_partr   s     rU   r  r  F  s]    N ~~dGaS) FTac*Ai003bTEF F Fs   9AA%zlinalg.matmulmatmulc                    t        j                  |d| |g      5 }|r|rt        d| d| d      |r|rt        d| d| d      t        j                         rt        | t         j                        s,t        j                  |       st        j                  | d      } t        |t         j                        rt        j                  |      rpt        j                  || j                  j                  d	
      }nCt        j                  | d      } t        j                  || j                  j                  d	
      }| j                         }|j                         }|du xs t        |      dkD  xs |du xs t        |      dkD  }d}|r || j                  k7  s||j                  k7  rd}|so|sm|rk|rt        |       } d}|rt        |      }d}|r&t        j                   | |||||	|
|      cddd       S t        j"                  | ||||	|
|      cddd       S |rt        |       } d}|rt        |      }d}d}|s|r>t$        j&                  t$        j(                  g}| j                  |v xr |j                  |v }| j                  t$        j&                  k(  r,|j                  t$        j*                  t$        j,                  fvsI|j                  t$        j&                  k(  rG| j                  t$        j*                  t$        j,                  fvr| j                  |j                  k7  rd}|rqt/        | ||||||      }| j                  t$        j&                  k(  r7|j                  t$        j&                  k(  rt1        |t$        j&                        }|cddd       S |r2|xs |}|xs |}t        j                   | |||||	|
|      cddd       S t        j2                  | ||||	|
|      cddd       S # 1 sw Y   yxY w)a  Multiplies matrix `a` by matrix `b`, producing `a` * `b`.

  The inputs must, following any transpositions, be tensors of rank >= 2
  where the inner 2 dimensions specify valid matrix multiplication dimensions,
  and any further outer dimensions specify matching batch size.

  Both matrices must be of the same type. The supported types are:
  `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`,
  `complex64`, `complex128`.

  Either matrix can be transposed or adjointed (conjugated and transposed) on
  the fly by setting one of the corresponding flag to `True`. These are `False`
  by default.

  If one or both of the matrices contain a lot of zeros, a more efficient
  multiplication algorithm can be used by setting the corresponding
  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
  This optimization is only available for plain matrices (rank-2 tensors) with
  datatypes `bfloat16` or `float32`.

  A simple 2-D tensor matrix multiplication:

  >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
  >>> a  # 2-D tensor
  <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
  array([[1, 2, 3],
         [4, 5, 6]], dtype=int32)>
  >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
  >>> b  # 2-D tensor
  <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
  array([[ 7,  8],
         [ 9, 10],
         [11, 12]], dtype=int32)>
  >>> c = tf.matmul(a, b)
  >>> c  # `a` * `b`
  <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
  array([[ 58,  64],
         [139, 154]], dtype=int32)>

  A batch matrix multiplication with batch shape [2]:

  >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
  >>> a  # 3-D tensor
  <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
  array([[[ 1,  2,  3],
          [ 4,  5,  6]],
         [[ 7,  8,  9],
          [10, 11, 12]]], dtype=int32)>
  >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
  >>> b  # 3-D tensor
  <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
  array([[[13, 14],
          [15, 16],
          [17, 18]],
         [[19, 20],
          [21, 22],
          [23, 24]]], dtype=int32)>
  >>> c = tf.matmul(a, b)
  >>> c  # `a` * `b`
  <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
  array([[[ 94, 100],
          [229, 244]],
         [[508, 532],
          [697, 730]]], dtype=int32)>

  Since python >= 3.5 the @ operator is supported
  (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
  it simply calls the `tf.matmul()` function, so the following lines are
  equivalent:

  >>> d = a @ b @ [[10], [11]]
  >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])

  Args:
    a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
      `complex64`, `complex128` and rank > 1.
    b: `tf.Tensor` with same type and rank as `a`.
    transpose_a: If `True`, `a` is transposed before multiplication.
    transpose_b: If `True`, `b` is transposed before multiplication.
    adjoint_a: If `True`, `a` is conjugated and transposed before
      multiplication.
    adjoint_b: If `True`, `b` is conjugated and transposed before
      multiplication.
    a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
      **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
      that assume most values in `a` are zero. See
      `tf.sparse.sparse_dense_matmul` for some support for
      `tf.sparse.SparseTensor` multiplication.
    b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
      **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
      that assume most values in `b` are zero. See
      `tf.sparse.sparse_dense_matmul` for some support for
      `tf.sparse.SparseTensor` multiplication.
    output_type: The output datatype if needed. Defaults to None in which case
      the output_type is the same as input type. Currently only works when input
      tensors are type (u)int8 and output_type can be int32.
    grad_a: Set it to `True` to hint that Tensor `a` is for the backward pass.
    grad_b: Set it to `True` to hint that Tensor `b` is for the backward pass.
    name: Name for the operation (optional).

  Returns:
    A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
    is the product of the corresponding matrices in `a` and `b`, e.g. if all
    transpose or adjoint attributes are `False`:

    `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
    for all indices `i`, `j`.

    Note: This is matrix product, not element-wise product.


  Raises:
    ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
      `adjoint_b` are both set to `True`.
    TypeError: If output_type is specified but the types of `a`, `b` and
      `output_type` is not (u)int8, (u)int8 and int32.
  MatMulzNOnly one of `transpose_a` and `adjoint_a` can be True. Received `transpose_a`=z, `adjoint_a`=r   zNOnly one of `transpose_b` and `adjoint_b` can be True. Received `transpose_b`=z, `adjoint_b`=ar'   br"  Nr0   FT)adj_xadj_yr{   grad_xgrad_yr(   )r  r  r  r  r(   )transpose_atranspose_ba_is_sparseb_is_sparser(   )r  r  grad_agrad_br(   )r   r3   r   r   executing_eagerlyr   r  r   r   r4   r+   r   _shape_tuplelenr  r   batch_mat_mul_v3batch_mat_mul_v2r   r  r   int8uint8sparse_matmulr6   mat_mul)r  r  r  r  	adjoint_a	adjoint_br  r  rm   r  r  r(   a_shapeb_shape%output_may_have_non_empty_batch_shapeuse_batch_matmul_v3use_sparse_matmulsparse_matmul_typesrets                      rU   r  r  r  s   L ~~dHq!f- C
y$$/= 1"1&' ' y$$/= 1"1&' '
   "
Q
(M,L,LQ,O!!!#.3??+}/O/O
0!!!0B0BM



,a


agg.@.@s
Ka nnGnnG 
D	,CL1, 	.	D	,CL1, *  qww.+2H B 
G		G		,,	
eC
 C
z ,,
{C
 C
T 
q'ak
q'akk#__fnn=
''(
(
KQWW8K-K 
''V__
$
''&++v||4
4
''V__
$
''&++v||4
4177agg;M 

!!!!c 
FOO	#6??(B3(WC
 C
Z 
,	,	,,	
aC
 C
v ####
wC
 C
 C
s%   GO::O:E8O:!*O:O::Pzlinalg.matvecc           	          t        j                  |d| |g      5 }t        | t        j                  |d      ||||      }t        j
                  |d      cddd       S # 1 sw Y   yxY w)a	  Multiplies matrix `a` by vector `b`, producing `a` * `b`.

  The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
  with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
  with `shape(b)[:-1]`.

  Both `a` and `b` must be of the same type. The supported types are:
  `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.

  Matrix `a` can be transposed or adjointed (conjugated and transposed) on
  the fly by setting one of the corresponding flag to `True`. These are `False`
  by default.

  If one or both of the inputs contain a lot of zeros, a more efficient
  multiplication algorithm can be used by setting the corresponding
  `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
  This optimization is only available for plain matrices/vectors (rank-2/1
  tensors) with datatypes `bfloat16` or `float32`.

  For example:

  ```python
  # 2-D tensor `a`
  # [[1, 2, 3],
  #  [4, 5, 6]]
  a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])

  # 1-D tensor `b`
  # [7, 9, 11]
  b = tf.constant([7, 9, 11], shape=[3])

  # `a` * `b`
  # [ 58,  139]
  c = tf.linalg.matvec(a, b)


  # 3-D tensor `a`
  # [[[ 1,  2,  3],
  #   [ 4,  5,  6]],
  #  [[ 7,  8,  9],
  #   [10, 11, 12]]]
  a = tf.constant(np.arange(1, 13, dtype=np.int32),
                  shape=[2, 2, 3])

  # 2-D tensor `b`
  # [[13, 14, 15],
  #  [16, 17, 18]]
  b = tf.constant(np.arange(13, 19, dtype=np.int32),
                  shape=[2, 3])

  # `a` * `b`
  # [[ 86, 212],
  #  [410, 563]]
  c = tf.linalg.matvec(a, b)
  ```

  Args:
    a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
      `complex128` and rank > 1.
    b: `Tensor` with same type as `a` and compatible dimensions.
    transpose_a: If `True`, `a` is transposed before multiplication.
    adjoint_a: If `True`, `a` is conjugated and transposed before
      multiplication.
    a_is_sparse: If `True`, `a` is treated as a sparse matrix.
    b_is_sparse: If `True`, `b` is treated as a sparse matrix.
    name: Name for the operation (optional).

  Returns:
    A `Tensor` of the same type as `a` and `b` where each inner-most vector is
    the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
    all transpose or adjoint attributes are `False`:

    `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.

    Note: This is matrix-vector product, not element-wise product.


  Raises:
    ValueError: If transpose_a and adjoint_a are both set to True.
  MatVecr2   r.   )r  r  r  r  N)r   r3   r  r   r:   squeeze)r  r  r  r  r  r  r(   r  s           rU   matvecr  ~  sk    r ~~dHq!f- .	ab)!F V"-. . .s   =A!!A*c                 h    t        j                         r| j                  |      S t        | ||      S r[  )r   is_numpy_style_type_promotion_matmulr  )r  r  r(   s      rU   matmul_wrapperr    s+    &&(99Q<	14	  r`   zUse `tf.linalg.matmul` insteadr  c                 4   t        | t        j                  t        j                  f      st        dt        |        d      t        | t        j                        r| S t        j                  | |      }t        j                  | t        d|d         |      S )aW  Convert 'x' to IndexedSlices.

  Convert a dense Tensor to a block-sparse IndexedSlices.

  Args:
    x: Either a Tensor object, or an IndexedSlices object.
    optimize: if true, attempt to optimize the conversion of 'x'.

  Returns:
    An IndexedSlices object.

  Raises:
    TypeError: If 'x' is not a Tensor or an IndexedSlices object.
  zNot a Tensor or IndexedSlices: r   optimizer   )
r   r   r   r   r   r   typer   shape_internalr=   )ry   r  x_shapes      rU   _as_indexed_slicesr    s    & 
A
))>+G+GH	I
5d1gYa@
AA>//0H$$Q:'		%	%aq'!*)=w	GGr`   c           
         t        | t        t        f      st        dt	        |        d      | D cg c]  }t        ||       }}|D cg c]6  }|j                  j                  t        j                  k(  s+|j                  8 }}|rt        |      t        |      k(  r|S g }|D ]  }|j                  j                  t        j                  k(  r]|j                  t        j                  |j                  t        |j                  t        j                         |j"                               |j                  |        |S c c}w c c}w )a  Convert all elements of 'inputs' to IndexedSlices.

  Additionally, homogenize the types of all the indices to
  either int32 or int64.

  Args:
    inputs: List containing either Tensor or IndexedSlices objects.
    optimize: if true, attempt to optimize the conversion of each input.

  Returns:
    A list of IndexedSlices objects.

  Raises:
    TypeError: If 'inputs' is not a list or a tuple.
  zExpected a list or tuple, not r   r  )r   listtupler   r  r  r   r+   r   r  r  appendr   r   r   r6   r>   r   )inputsr  ioutputsowith_int32_indexcasted_outputss          rU   _as_indexed_slices_listr%    s     
FT5M	*
4T&\N!D
EE?EF!H5F'F AIIOOv||$Caii  
S!12c'lBN. ayy&,,&

&
&qxxaii1N'(}}67 A 
 Gs   E,E
:E
zmath.addrC  c                    t        j                  |d| g      5 }t        j                  | d      } t        j                  || j                  j                  d      }| j                  t
        j                  k(  r!t        j                  | ||      cddd       S t        j                  | ||      cddd       S # 1 sw Y   yxY w)a*  Returns x + y element-wise.

  Example usages below.

  Add a scalar and a list:

  >>> x = [1, 2, 3, 4, 5]
  >>> y = 1
  >>> tf.add(x, y)
  <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
  dtype=int32)>

  Note that binary `+` operator can be used instead:

  >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])
  >>> y = tf.convert_to_tensor(1)
  >>> x + y
  <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
  dtype=int32)>

  Add a tensor and a list of same shape:

  >>> x = [1, 2, 3, 4, 5]
  >>> y = tf.constant([1, 2, 3, 4, 5])
  >>> tf.add(x, y)
  <tf.Tensor: shape=(5,), dtype=int32,
  numpy=array([ 2,  4,  6,  8, 10], dtype=int32)>

  **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a
  non-tensor, the non-tensor input will adopt (or get casted to) the data type
  of the tensor input. This can potentially cause unwanted overflow or underflow
  conversion.

  For example,

  >>> x = tf.constant([1, 2], dtype=tf.int8)
  >>> y = [2**7 + 1, 2**7 + 2]
  >>> tf.add(x, y)
  <tf.Tensor: shape=(2,), dtype=int8, numpy=array([-126, -124], dtype=int8)>

  When adding two input values of different shapes, `Add` follows NumPy
  broadcasting rules. The two input array shapes are compared element-wise.
  Starting with the trailing dimensions, the two dimensions either have to be
  equal or one of them needs to be `1`.

  For example,

  >>> x = np.ones(6).reshape(1, 2, 1, 3)
  >>> y = np.ones(6).reshape(2, 1, 3, 1)
  >>> tf.add(x, y).shape.as_list()
  [2, 2, 3, 3]

  Another example with two arrays of different dimension.

  >>> x = np.ones([1, 2, 1, 4])
  >>> y = np.ones([3, 4])
  >>> tf.add(x, y).shape.as_list()
  [1, 2, 3, 4]

  The reduction version of this elementwise operation is `tf.math.reduce_sum`

  Args:
    x: A `tf.Tensor`. Must be one of the following types: bfloat16, half,
      float16, float32, float64, uint8, uint16, uint32, uint64, int8, int16,
      int32, int64, complex64, complex128, string.
    y: A `tf.Tensor`. Must have the same type as x.
    name: A name for the operation (optional)
  Addry   r'   r   r"  N)
r   r3   r4   r+   r   r   rD  r   rC  rE  r   s      rU   rC  rC  +  s    P ~~dEA3' 24ac*AaAGG,>,>SIAww&--a.	2 2   AD12 2 2s   A8B<B<<Cz
math.add_nadd_nr  )iterable_parametersc                    | rt        | t        j                        st        d      t	        j
                  |       } t        d | D              st        d      t        |       dk(  rVt        | d   t        j                        rt        j                  | d         }n| d   }|rt        j                  ||      S |S t        j                  | |      S )a  Returns the element-wise sum of a list of tensors.

  All inputs in the list must have the same shape. This op does not
  [broadcast](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
  its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
  instead.

  For example:

  >>> a = tf.constant([[3, 5], [4, 8]])
  >>> b = tf.constant([[1, 6], [2, 9]])
  >>> tf.math.add_n([a, b, a]).numpy()
  array([[ 7, 16],
         [10, 25]], dtype=int32)

  See Also:

  * `tf.reduce_sum(inputs, axis=0)` - This performs the same mathematical
    operation, but `tf.add_n` may be more efficient because it sums the
    tensors directly. `reduce_sum` on the other hand calls
    `tf.convert_to_tensor` on the list of tensors, unnecessarily stacking them
    into a single tensor before summing.

  Args:
    inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
      same shape and type. `tf.IndexedSlices` objects will be converted into
      dense tensors prior to adding.
    name: A name for the operation (optional).

  Returns:
    A `tf.Tensor` of the same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  z^Inputs must be an iterable of at least one Tensor/IndexedSlices with the same dtype and shape.c              3   p   K   | ].  }t        |t        j                  t        j                  f       0 y wr[   )r   r   r   r   r   rs  ry   s     rU   rv  zadd_n.<locals>.<genexpr>  s0      
 Z&&(D(DEFs   46r1   r   r'   )r   r    Iterabler   r   %convert_n_to_tensor_or_indexed_slicesry  r  r   r   r4   r   identityr   r(  )r  r(   r   s      rU   r(  r(  |  s    N 
z&/*B*BC
 K L L??G&	  
  K L L 	[A&)^99:$$VAY/fayfT22M			F	..r`   zmath.accumulate_naccumulate_nzUse `tf.math.add_n` Insteadc                     d } rt         t        t        f      s |       t        j                          t        d  D              s |       t         fd D              s |       |t        j                  |      }nt        j                         } D ]<  }t        |t        j                        s|j                  |j                               }> |1| d   j                  k7  rt        d| d d   j                   d      t               dk(  r| d   S t               dk(  r|t!        j"                   d   |	      S t%         |	      S )
a  Returns the element-wise sum of a list of tensors.

  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
  otherwise, these are inferred.

  For example:

  >>> a = tf.constant([[1, 2], [3, 4]])
  >>> b = tf.constant([[5, 0], [0, 6]])
  >>> tf.math.accumulate_n([a, b, a]).numpy()
  array([[ 7, 4],
         [ 6, 14]], dtype=int32)

  >>> # Explicitly pass shape and type
  >>> tf.math.accumulate_n(
  ...     [a, b, a], shape=[2, 2], tensor_dtype=tf.int32).numpy()
  array([[ 7,  4],
         [ 6, 14]], dtype=int32)

  Note: The input must be a list or tuple. This function does not handle
  `IndexedSlices`

  See Also:

  * `tf.reduce_sum(inputs, axis=0)` - This performe the same mathematical
    operation, but `tf.add_n` may be more efficient because it sums the
    tensors directly. `reduce_sum` on the other hand calls
    `tf.convert_to_tensor` on the list of tensors, unncessairly stacking them
    into a single tensor before summing.
  * `tf.add_n` - This is another python wrapper for the same Op. It has
    nearly identical functionality.

  Args:
    inputs: A list of `Tensor` objects, each with same shape and type.
    shape: Expected shape of elements of `inputs` (optional). Also controls the
      output shape of this op, which may affect type inference in other ops. A
      value of `None` means "infer the input shape from the shapes in `inputs`".
    tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
      means "infer the input dtype from `inputs[0]`".
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  c                      t        d      S )NzJinputs must be a list of at least one Tensor with the same dtype and shape)r   ra   r`   rU   _input_errorz"accumulate_n.<locals>._input_error  s     - . .r`   c              3   P   K   | ]  }t        |t        j                           y wr[   )r   r   r   r,  s     rU   rv  zaccumulate_n.<locals>.<genexpr>  s     >!Z:,,->s   $&c              3   V   K   | ]   }|j                   d    j                   k(   " yw)r   Nr-   )rs  ry   r  s     rU   rv  zaccumulate_n.<locals>.<genexpr>  s"     8AQWWq	'8s   &)r   zThe `tensor_dtype` argument is z, but `input` is of type zA. These must be equal. Try casting the input to the desired type.r1   r'   )r   r  r  r   r.  ry  r   as_shapeunknown_shaper   r   
merge_withr   r+   r   r  r   r/  r(  )r  r8   tensor_dtyper(   r3  r  s   `     rU   r0  r0    sT   j. 
z&4-8
.??G&	>v>	>
.	88	8
.
!!%(E&&(E 9l,
 1 12|5578e9
 ,&)//"A

), 8q	  !	 ! !
 	[A$,!9
6{aD,fQid33	vD	!!r`   AccumulateNV2c                 4    |gt        | j                        z  S )z=Same as gradient for AddN. Copies the gradient to all inputs.)r  r  )opgrads     rU   _accumulate_n_gradr>    s     #bii.	  r`   zmath.sigmoidz
nn.sigmoidsigmoidc                     t        j                  |d| g      5 }t        j                  | d      } t        j                  | |      cddd       S # 1 sw Y   yxY w)a   Computes sigmoid of `x` element-wise.

  Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.

  For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.

  Example Usage:

  If a positive number is large, then its sigmoid will approach to 1 since the
  formula will be `y = <large_num> / (1 + <large_num>)`

  >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
  >>> tf.math.sigmoid(x)
  <tf.Tensor: shape=(4,), dtype=float32,
  numpy=array([0.5, 0.7310586, 1.0, 1.0], dtype=float32)>

  If a negative number is large, its sigmoid will approach to 0 since the
  formula will be `y = 1 / (1 + <large_num>)`

  >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
  >>> tf.math.sigmoid(x)
  <tf.Tensor: shape=(4,), dtype=float32, numpy=
  array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
        dtype=float32)>

  Args:
    x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
      `complex128`.
    name: A name for the operation (optional).

  Returns:
    A Tensor with the same type as `x`.

  Usage Example:

  >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
  >>> tf.sigmoid(x)
  <tf.Tensor: shape=(3,), dtype=float32,
  numpy=array([0. , 0.5, 1. ], dtype=float32)>

  @compatibility(scipy)
  Equivalent to scipy.special.expit
  @end_compatibility
  Sigmoidry   r'   N)r   r3   r4   r   r?  r   s     rU   r?  r?    sP    ` ~~dIs+ .tac*A-. . .s   .AAzmath.log_sigmoidlog_sigmoidc                     t        j                  |d| g      5 }t        j                  | d      } t        j                  t        j                  |        |      cddd       S # 1 sw Y   yxY w)a  Computes log sigmoid of `x` element-wise.

  Specifically, `y = log(1 / (1 + exp(-x)))`.  For numerical stability,
  we use `y = -tf.nn.softplus(-x)`.

  Args:
    x: A Tensor with type `float32` or `float64`.
    name: A name for the operation (optional).

  Returns:
    A Tensor with the same type as `x`.

  Usage Example:

  If a positive number is large, then its log_sigmoid will approach to 0 since
  the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
  approximates to `log (1)` which is 0.

  >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
  >>> tf.math.log_sigmoid(x)
  <tf.Tensor: shape=(4,), dtype=float32, numpy=
  array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
        dtype=float32)>

  If a negative number is large, its log_sigmoid will approach to the number
  itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
  `log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
  that is the number itself.

  >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
  >>> tf.math.log_sigmoid(x)
  <tf.Tensor: shape=(4,), dtype=float32, numpy=
  array([-100.       ,  -50.       ,   -1.3132616,   -0.6931472],
        dtype=float32)>
  
LogSigmoidry   r'   N)r   r3   r4   r   negr   r   r   s     rU   rB  rB  J  sa    P ~~dL1#. @$ac*AJ//3$?@ @ @s   AA%%A.zmath.cumsumcumsumc                     t        j                  |d| g      5 }t        j                  | d      } t        j                  | ||||      cddd       S # 1 sw Y   yxY w)a	  Compute the cumulative sum of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumsum, which means that the first
  element of the input is identical to the first element of the output:
  For example:

  >>> # tf.cumsum([a, b, c])   # [a, a + b, a + b + c]
  >>> x = tf.constant([2, 4, 6, 8])
  >>> tf.cumsum(x)
  <tf.Tensor: shape=(4,), dtype=int32,
  numpy=array([ 2,  6, 12, 20], dtype=int32)>

  >>> # using varying `axis` values
  >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
  >>> tf.cumsum(y, axis=0)
  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
  array([[ 2,  4,  6,  8],
         [ 3,  7, 11, 15]], dtype=int32)>
  >>> tf.cumsum(y, axis=1)
  <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
  array([[ 2,  6, 12, 20],
         [ 1,  4,  9, 16]], dtype=int32)>

  By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
  instead:

  >>> # tf.cumsum([a, b, c], exclusive=True)  => [0, a, a + b]
  >>> x = tf.constant([2, 4, 6, 8])
  >>> tf.cumsum(x, exclusive=True)
  <tf.Tensor: shape=(4,), dtype=int32,
  numpy=array([ 0,  2,  6, 12], dtype=int32)>

  By setting the `reverse` kwarg to `True`, the cumsum is performed in the
  opposite direction:

  >>> # tf.cumsum([a, b, c], reverse=True)  # [a + b + c, b + c, c]
  >>> x = tf.constant([2, 4, 6, 8])
  >>> tf.cumsum(x, reverse=True)
  <tf.Tensor: shape=(4,), dtype=int32,
  numpy=array([20, 18, 14,  8], dtype=int32)>

  This is more efficient than using separate `tf.reverse` ops.
  The `reverse` and `exclusive` kwargs can also be combined:

  >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True)  # [b + c, c, 0]
  >>> x = tf.constant([2, 4, 6, 8])
  >>> tf.cumsum(x, exclusive=True, reverse=True)
  <tf.Tensor: shape=(4,), dtype=int32,
  numpy=array([18, 14,  8,  0], dtype=int32)>

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
      `[-rank(x), rank(x))`.
    exclusive: If `True`, perform exclusive cumsum.
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  Cumsumry   r'   	exclusivereverser(   N)r   r3   r4   r   rF  ry   r/   rJ  rK  r(   s        rU   rF  rF  w  s^    D ~~dHqc* Bdac*A	49gDBB B B   1AAzmath.cumprodcumprodc                     t        j                  |d| g      5 }t        j                  | d      } t        j                  | ||||      cddd       S # 1 sw Y   yxY w)ag  Compute the cumulative product of the tensor `x` along `axis`.

  By default, this op performs an inclusive cumprod, which means that the
  first element of the input is identical to the first element of the output:

  ```python
  tf.math.cumprod([a, b, c])  # [a, a * b, a * b * c]
  ```

  By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
  performed
  instead:

  ```python
  tf.math.cumprod([a, b, c], exclusive=True)  # [1, a, a * b]
  ```

  By setting the `reverse` kwarg to `True`, the cumprod is performed in the
  opposite direction:

  ```python
  tf.math.cumprod([a, b, c], reverse=True)  # [a * b * c, b * c, c]
  ```

  This is more efficient than using separate `tf.reverse` ops.
  The `reverse` and `exclusive` kwargs can also be combined:

  ```python
  tf.math.cumprod([a, b, c], exclusive=True, reverse=True)  # [b * c, c, 1]
  ```

  Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
      `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
      `complex128`, `qint8`, `quint8`, `qint32`, `half`.
    axis: A `Tensor` of type `int32` (default: 0). Must be in the range
      `[-rank(x), rank(x))`.
    exclusive: If `True`, perform exclusive cumprod.
    reverse: A `bool` (default: False).
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `x`.
  Cumprodry   r'   rI  N)r   r3   r4   r   rN  rL  s        rU   rN  rN    s^    ` ~~dIs+ Btac*A	49gDBB B BrM  zmath.cumulative_logsumexpc                     t        j                  |d| g      5 }t        j                  | d      } t        j                  | ||||      cddd       S # 1 sw Y   yxY w)a  Compute the cumulative log-sum-exp of the tensor `x` along the `axis`.

  By default, this operation performs an inclusive cumulative log-sum-exp, which
  means that the first element of the input is identical to the first element of
  the output.

  This operation is significantly more numerically stable than the equivalent
  Tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
  it computes the same result given infinite numerical precision. However, note
  that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
  for a given element, as it applies the "log-sum-exp trick" in a different
  way.

  More precisely, where `tf.math.reduce_logsumexp` uses the following trick:

  ```
  log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
  ```

  it cannot be directly used here as there is no fast way of applying it
  to each prefix `x[:i]`. Instead, this function implements a prefix
  scan using pairwise log-add-exp, which is a commutative and associative
  (up to floating point precision) operator:

  ```
  log_add_exp(x, y) = log(exp(x) + exp(y))
                    = log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
  ```

  However, reducing using the above operator leads to a different computation
  tree (logs are taken repeatedly instead of only at the end), and the maximum
  is only computed pairwise instead of over the entire prefix. In general, this
  leads to a different and slightly less precise computation.

  Args:
    x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
      `float64`.
    axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
      range `[-rank(x), rank(x))`.
    exclusive: If `True`, perform exclusive cumulative log-sum-exp.
    reverse: If `True`, performs the cumulative log-sum-exp in the reverse
      direction.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same shape and type as `x`.
  CumulativeLogsumexpry   r'   rI  N)r   r3   r4   r   cumulative_logsumexprL  s        rU   rS  rS    s_    d ~~d1A37 B4ac*A,,	49gDBB B BrM  z	math.conjr  c                 @   t        | t        j                        r&| j                  }|j                  s|j
                  r| S t        j                  |d| g      5 }t        j                  | d      } | j                  j                  s| j                  t        j                  k(  r t        j                  | |      cddd       S | j                  j                  s| j                  j
                  r| cddd       S t        d| j                  d      # 1 sw Y   yxY w)af  Returns the complex conjugate of a complex number.

  Given a tensor `x` of complex numbers, this operation returns a tensor of
  complex numbers that are the complex conjugate of each element in `x`. The
  complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
  real part and `b` is the imaginary part.

  The complex conjugate returned by this operation is of the form \\(a - bj\\).

  For example:

  >>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
  >>> tf.math.conj(x)
  <tf.Tensor: shape=(2,), dtype=complex128,
  numpy=array([-2.25-4.75j,  3.25-5.75j])>

  If `x` is real, it is returned unchanged.

  For example:

  >>> x = tf.constant([-2.25, 3.25])
  >>> tf.math.conj(x)
  <tf.Tensor: shape=(2,), dtype=float32,
  numpy=array([-2.25,  3.25], dtype=float32)>

  Args:
    x: `Tensor` to conjugate.  Must have numeric or variant type.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` that is the conjugate of `x` (with the same type).

  Raises:
    TypeError: If `x` is not a numeric tensor.

  @compatibility(numpy)
  Equivalent to numpy.conj.
  @end_compatibility
  Conjry   r'   Nz.Expected numeric or variant tensor, got dtype r   )r   r   r   r+   r   r   r   r3   r4   r|   r   variantr   r  r   )ry   r(   dts      rU   r  r  -  s    X :$$%	
B	~~h
~~dFQC( IDac*AwwQWW6qt,I I 
		 2 2I I :177+Q
GI II Is   A!D-D;DDc                 *   t        j                  |       }|ht        j                  |      }|Qt        j                  |t        j                        }t        j                  |t        j                        }d||<   |S t        j                  |      }t        j                  | |j                        }||z   |z  }t        j                  |      }t        j                  t        |      |g| t        j                  || j                        g      S )zHelper function for reduction ops.

  Args:
    input_shape: 1-D Tensor, the shape of the Tensor being reduced.
    axes: 1-D Tensor, the reduction axes.

  Returns:
    A 1-D Tensor, the output shape as if keepdims were set to True.
  r-   r1   )out_type)r   constant_valuer   r   r  r   r4   r   rT   r+   r8   r   dynamic_stitchr=   ones)input_shaperf   constant_input_shapeconstant_axes
input_rank
axes_shapes         rU   reduced_shaperb  h  s     %33K@%..t4M hh}BHH=mXX&:"((K,-=)!!			t	$$~~kDJJ?*

z	)$t$*		)	)Z$

..;+<+<
=
 r`   c           	         t        j                  |      }t        j                  |      }t        j                  || j
                        }t        j                  |||      }t        j                  |t        j                     t        j                  t        j                  |       t        j                  |      z
  g|j
                        gd      }t        j                  ||      }t        j                  |d      S )am  Helper function for unsorted_segment_mean/_sqrtN.

  Computes the number of segment entries with 0-entries set to 1 to allow
  division by N.

  Args:
    data: A `Tensor` with data that will be assembled in the output.
    segment_ids: An integer tensor whose shape is a prefix of `data.shape`. The
      values must be in the range `[0, num_segments)`. The values are always
      validated to be in range on CPU, never validated on TPU/GPU.
    num_segments: An integer scalar `Tensor`. The number of distinct segment
      IDs.

  Returns:
    A `Tensor` with the number of segment entries with 0-entries set to 1.
  r-   r   r.   r1   )r   r4   r   r  r\  r+   r   unsorted_segment_sumrA   newaxisr  r@   r<   )datasegment_idsnum_segmentssegment_ids_shapeones_tensornbroadcastable_shapes          rU   _unsorted_segment_Nrm    s    " &&|4,..{;0

C+''[,O!!((I%%&~~y~~d+"45 6(..01  ./!			a	##r`   zmath.unsorted_segment_meanunsorted_segment_meanc                     t        j                  |d      5  t        j                  |       } t        j                  |      }t        | ||      }t	        j
                  | ||      }||z  cddd       S # 1 sw Y   yxY w)a  Computes the mean along segments of a tensor.

  Read [the section on
  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
  for an explanation of segments.

  This operator is similar to the `tf.math.unsorted_segment_sum` operator.
  Instead of computing the sum over segments, it computes the mean of all
  entries belonging to a segment such that:

  \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
  `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
  occurrences of id \\i\\.

  If there is no entry for a given segment ID `i`, it outputs 0.

  If the given segment ID `i` is negative, the value is dropped and will not
  be added to the sum of the segment.

  Caution: On CPU, values in `segment_ids` are always validated to be less than
  `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
  does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
  result in safe but unspecified behavior, which may include ignoring
  out-of-bound indices or outputting a tensor with a 0 stored in the first
  dimension of its shape if `num_segments` is 0.

  Args:
    data: A `Tensor` with floating point or complex dtype.
    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
      The values must be less than `num_segments`.
      The values are always validated to be in range on CPU,
      never validated on GPU.
    num_segments: An integer scalar `Tensor`.  The number of distinct segment
      IDs.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
    dimensions, which are replaced with a single dimension which has size
   `num_segments`.
  UnsortedSegmentMeanN)r   r3   r4   rm  r   rd  rf  rg  rh  r(   Nsummeds         rU   rn  rn    sr    ^ ~~d12   &D''4KD+|<A..t[,OFA:  s   AA44A=zmath.unsorted_segment_sqrt_nunsorted_segment_sqrt_nc                 &   t        j                  |d      5  t        j                  |       } t        j                  |      }t        | ||      }t	        j
                  | ||      }|t	        j                  |      z  cddd       S # 1 sw Y   yxY w)a  Computes the sum along segments of a tensor divided by the sqrt(N).

  Read [the section on
  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
  for an explanation of segments.

  This operator is similar to the `tf.math.unsorted_segment_sum` operator.
  Additionally to computing the sum over segments, it divides the results by
  sqrt(N).

  \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
  tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
  number of occurrences of id \\i\\.

  If there is no entry for a given segment ID `i`, it outputs 0.

  Note that this op only supports floating point and complex dtypes,
  due to tf.sqrt only supporting these types.

  If the given segment ID `i` is negative, the value is dropped and will not
  be added to the sum of the segment.

  Caution: On CPU, values in `segment_ids` are always validated to be less than
  `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
  does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
  result in safe but unspecified behavior, which may include ignoring
  out-of-bound indices or outputting a tensor with a 0 stored in the first
  dimension of its shape if `num_segments` is 0.

  Args:
    data: A `Tensor` with floating point or complex dtype.
    segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
      The values must be in the range `[0, num_segments)`.
      The values are always validated to be in range on CPU,
      never validated on GPU.
    num_segments: An integer scalar `Tensor`.  The number of distinct segment
      IDs.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`.  Has same shape as data, except for the first `segment_ids.rank`
    dimensions, which are replaced with a single dimension which has size
   `num_segments`.
  UnsortedSegmentSqrtNN)r   r3   r4   rm  r   rd  r  rq  s         rU   rt  rt    s~    d ~~d23 )  &D''4KD+|<A..t[,OFL%%a(() ) )s   A&BBzsparse.segment_sumsparse_segment_sumc                 p    |t        j                  | |||||      S t        j                  | ||||      S )a  Computes the sum along sparse segments of a tensor.

  Read [the section on
  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
  for an explanation of segments.

  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
  first dimension, selecting a subset of dimension 0, specified by `indices`.
  `segment_ids` is allowed to have missing ids, in which case the output will
  be zeros at those indices. In those cases `num_segments` is used to determine
  the size of the output.

  For example:

  ```python
  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

  # Select two rows, one segment.
  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
  # => [[0 0 0 0]]

  # Select two rows, two segment.
  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
  # => [[ 1  2  3  4]
  #     [-1 -2 -3 -4]]

  # With missing segment ids.
  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
                        num_segments=4)
  # => [[ 1  2  3  4]
  #     [ 0  0  0  0]
  #     [-1 -2 -3 -4]
  #     [ 0  0  0  0]]

  # Select all rows, two segments.
  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
  # => [[0 0 0 0]
  #     [5 6 7 8]]

  # Which is equivalent to:
  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
  ```

  Args:
    data: A `Tensor` with data that will be assembled in the output.
    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
      `segment_ids`.
    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
      should be sorted and can be repeated.
    name: A name for the operation (optional).
    num_segments: An optional int32 scalar. Indicates the size of the output
      `Tensor`.
    sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
      gradient of this function will be sparse (`IndexedSlices`) instead of
      dense (`Tensor`). The sparse gradient will contain one non-zero row for
      each unique index in `indices`.

  Returns:
    A `tensor` of the shape as data, except for dimension 0 which
    has size `k`, the number of segments specified via `num_segments` or
    inferred for the last element in `segments_ids`.
  )rf  r   rg  rh  sparse_gradientr(   )rf  r   rg  ry  r(   )r   $sparse_segment_sum_with_num_segmentsrw  rf  r   rg  r(   rh  ry  s         rU   rw  rw    sT    P <<!'  **' r`   zsparse.sampled_addmmg      ?c                    t        j                  |       } t        j                  ||      }t        j                  |t        j                        }t        j                  ||      }t        j                  ||      }t	        j
                  t        j                  |            }t	        j
                  t        j                  |            }	|d   }
|	d   }t        j                  |
|g      }t        t        ||            }t        j                         r|st        d| d|       t	        j
                  |      }t	        j
                  |      }|>|<t        j                   t        j                  ||            }|st        d| d|       d|d|g}t#        j$                  ||dd       | d	ddf   }| d	ddf   }t        j&                  || d	ddf   gd
      }t	        j
                  t        j(                  |            }|dz
  }t        j*                  |||      }t        j*                  t        j,                  |      ||      }t/        ||z  d
      }| ||z  ||z  z   |fS )a/  Performs the sampled matrix multiplication of two dense matrices.

  Multiplies matrix `mat1` by matrix `mat2` at the locations defined by
  `indices`. The product is scaled and added to `values`,
  producing `alpha` * (`mat1` @ `mat2`) * spy(`indices`) + `beta` * `values`.

  The function `spy(indices)` is the sparsity pattern matrix derived from
  `indices`.

  The `mat1` and `mat2` inputs must be tensors of rank >= 2 where the inner 2
  dimensions specify valid matrix multiplication dimensions, and any further
  dimensions specify matching batch size.

  The `indices`, `values`, and `dense_shape` inputs make up the components of a
  `SparseTensor` which defines the sparsity pattern of the output. The sparsity
  pattern has values of 1 at the positions defined by the `SparseTensor`, and 0
  elsewhere.

  The `alpha` and `beta` inputs are the scaling factors.

  The supported types for `values`, `mat1`, and `mat2` are:
  `bfloat16`, `float16`, `float32`, `float64`.

  A simple 2-D tensor operation:

  >>> indices = tf.constant([0, 0, 1, 1], shape=[2, 2])
  >>> indices
  <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
  array([[0, 0],
         [1, 1]], dtype=int32)>
  >>> values = tf.constant([0.5, 0.3])
  >>> values
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.3], dtype=float32)>
  >>> dense_shape = tf.constant([2, 2])
  >>> dense_shape
  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
  >>> mat1 = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=tf.float32)
  >>> mat1
  <tf.Tensor: shape=(2, 3), dtype=float32, numpy=
  array([[1., 2., 3.],
         [4., 5., 6.]], dtype=float32)>
  >>> mat2 = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2], dtype=tf.float32)
  >>> mat2
  <tf.Tensor: shape=(3, 2), dtype=float32, numpy=
  array([[ 7.,  8.],
         [ 9., 10.],
         [11., 12.]], dtype=float32)>
  >>> tf.sparse.sampled_addmm(indices, values, dense_shape, mat1, mat2,
  ... alpha=0.75, beta=0.25)
  (<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
  array([[0, 0],
         [1, 1]], dtype=int32)>, <tf.Tensor: shape=(2,), dtype=float32, numpy=
  array([ 43.625, 115.575], dtype=float32)>,
  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>)

  A batch operation:

  >>> indices = tf.constant([0, 1, 1, 0, 0, 0, 1, 0], shape=[2, 2, 2])
  >>> indices
  <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
  array([[[0, 1],
          [1, 0]],
         [[0, 0],
          [1, 0]]], dtype=int32)>
  >>> values = tf.constant([3, 5, 2, 7], shape=[2, 2], dtype=tf.float32)
  >>> values
  <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
  array([[3., 5.],
         [2., 7.]], dtype=float32)>
  >>> dense_shape = tf.constant([2, 2])
  >>> dense_shape
  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
  >>> mat1 = tf.constant(np.arange(1, 13), shape=[2, 2, 3], dtype=tf.float32)
  >>> mat1
  <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
  array([[[ 1.,  2.,  3.],
          [ 4.,  5.,  6.]],
         [[ 7.,  8.,  9.],
          [10., 11., 12.]]], dtype=float32)>
  >>> mat2 = tf.constant(np.arange(13, 25), shape=[2, 3, 2], dtype=tf.float32)
  >>> mat2
  <tf.Tensor: shape=(2, 3, 2), dtype=float32, numpy=
  array([[[13., 14.],
          [15., 16.],
          [17., 18.]],
         [[19., 20.],
          [21., 22.],
          [23., 24.]]], dtype=float32)>
  >>> tf.sparse.sampled_addmm(indices, values, dense_shape, mat1, mat2,
  ... alpha=0.75, beta=0.25)
  (<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
  array([[[0, 1],
          [1, 0]],
         [[0, 0],
          [1, 0]]], dtype=int32)>, <tf.Tensor: shape=(2, 2), dtype=float32,
  numpy=array([[ 75.75, 173.  ],
         [381.5 , 524.5 ]], dtype=float32)>,
  <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>)

  Args:
    indices: `tf.Tensor` containing coordinates for the rows and columns to be
      multiplied. Must have rank > 1.
    values: `tf.Tensor` containing the values to be scaled and added to the
      sampled dot product.
    dense_shape: `tf.Tensor` defining the dense shape of the output.
    mat1: `tf.Tensor` to be multiplied. Must have rank > 1.
    mat2: `tf.Tensor` to be multiplied. Must have rank > 1.
    beta: Number to be multiplied with `values`. Defaults to 1.0.
    alpha: Number to be multiplied with the sampled dot product of `mat1` and
      `mat2`. Defaults to 1.0.
    output_type: The output datatype if needed. Defaults to float32.

  Returns:
    A tuple representing the `SparseTensor` components of the result of the
    operation.

  Raises:
    ValueError: If `dense_shape` does not match the shape of the product.
  r-   r2   zDense shape: z does not match output shape: NAssertr'   .r.   r0   )
batch_dims)r   r4   r   r  r   rZ  r   r8   r   stackr  r?   r   r  r   r   ry  r   _assertrA   r  	gather_ndmatrix_transposer  )r   r   r   mat1mat2betaalpharm   
mat1_shape
mat2_shape
dense_rows
dense_colsoutput_shape	conditiondense_shape_staticoutput_shape_staticcondition_staticrf  batch_indicesrow_indicescol_indicesr  r  rowscolsdots                             rU   sampled_addmmr  y  sq   D !!'*'  {;&%%kF+			t;	7$			t;	7$)))//$*?@*)))//$*?@*"~*"~* &&
J'?@,{L9:)  + ''.* 
 %33K@%44\B%*=*I
((%':
; K= ))N,
 	
 	(	D ItTA #ss(#-SbS!+  -bc1B!C"M+ 
	#	#INN4$8	9$ax* 
		T;:	F$			  &

$
 	4$;R(#
3;$.	<<r`   c                 $    t        | |||||      S )a  Computes the sum along sparse segments of a tensor.

  Read [the section on
  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
  for an explanation of segments.

  Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
  first dimension, selecting a subset of dimension 0, specified by `indices`.
  `segment_ids` is allowed to have missing ids, in which case the output will
  be zeros at those indices. In those cases `num_segments` is used to determine
  the size of the output.

  For example:

  ```python
  c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

  # Select two rows, one segment.
  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
  # => [[0 0 0 0]]

  # Select two rows, two segment.
  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
  # => [[ 1  2  3  4]
  #     [-1 -2 -3 -4]]

  # With missing segment ids.
  tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
                        num_segments=4)
  # => [[ 1  2  3  4]
  #     [ 0  0  0  0]
  #     [-1 -2 -3 -4]
  #     [ 0  0  0  0]]

  # Select all rows, two segments.
  tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
  # => [[0 0 0 0]
  #     [5 6 7 8]]

  # Which is equivalent to:
  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
  ```

  Args:
    data: A `Tensor` with data that will be assembled in the output.
    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
      `segment_ids`.
    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
      should be sorted and can be repeated.
    num_segments: An optional int32 scalar. Indicates the size of the output
      `Tensor`.
    name: A name for the operation (optional).
    sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
      gradient of this function will be sparse (`IndexedSlices`) instead of
      dense (`Tensor`). The sparse gradient will contain one non-zero row for
      each unique index in `indices`.

  Returns:
    A `tensor` of the shape as data, except for dimension 0 which
    has size `k`, the number of segments specified via `num_segments` or
    inferred for the last element in `segments_ids`.
  r(   rh  ry  )rw  rf  r   rg  rh  r(   ry  s         rU   sparse_segment_sum_v2r  ;  s$    N 

%
 r`   zsparse.segment_meansparse_segment_meanc                 p    |t        j                  | |||||      S t        j                  | ||||      S )a  Computes the mean along sparse segments of a tensor.

  Read [the section on
  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
  for an explanation of segments.

  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
  `data`'s first dimension, selecting a subset of dimension 0, specified by
  `indices`.
  `segment_ids` is allowed to have missing ids, in which case the output will
  be zeros at those indices. In those cases `num_segments` is used to determine
  the size of the output.

  Args:
    data: A `Tensor` with data that will be assembled in the output.
    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
      `segment_ids`.
    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
      should be sorted and can be repeated.
    name: A name for the operation (optional).
    num_segments: An optional int32 scalar. Indicates the size of the output
      `Tensor`.
    sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
      gradient of this function will be sparse (`IndexedSlices`) instead of
      dense (`Tensor`). The sparse gradient will contain one non-zero row for
      each unique index in `indices`.

  Returns:
    A `tensor` of the shape as data, except for dimension 0 which
    has size `k`, the number of segments specified via `num_segments` or
    inferred for the last element in `segments_ids`.
  r  rf  r   rg  r(   ry  )r   %sparse_segment_mean_with_num_segmentsr  r{  s         rU   r  r    sT    T ==!'  ++' r`   c                 $    t        | |||||      S )a  Computes the mean along sparse segments of a tensor.

  Read [the section on
  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
  for an explanation of segments.

  Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
  `data`'s first dimension, selecting a subset of dimension 0, specified by
  `indices`.
  `segment_ids` is allowed to have missing ids, in which case the output will
  be zeros at those indices. In those cases `num_segments` is used to determine
  the size of the output.

  Args:
    data: A `Tensor` with data that will be assembled in the output.
    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
      `segment_ids`.
    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
      should be sorted and can be repeated.
    num_segments: An optional int32 scalar. Indicates the size of the output
      `Tensor`.
    name: A name for the operation (optional).
    sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
      gradient of this function will be sparse (`IndexedSlices`) instead of
      dense (`Tensor`). The sparse gradient will contain one non-zero row for
      each unique index in `indices`.

  Returns:
    A `tensor` of the shape as data, except for dimension 0 which
    has size `k`, the number of segments specified via `num_segments` or
    inferred for the last element in `segments_ids`.
  r  )r  r  s         rU   sparse_segment_mean_v2r    s$    R 

%
 r`   zsparse.segment_sqrt_nsparse_segment_sqrt_nc                 p    |t        j                  | |||||      S t        j                  | ||||      S )a  Computes the sum along sparse segments of a tensor divided by the sqrt(N).

  `N` is the size of the segment being reduced.

  Args:
    data: A `Tensor` with data that will be assembled in the output.
    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
      `segment_ids`.
    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
      should be sorted and can be repeated.
    name: A name for the operation (optional).
    num_segments: An optional int32 scalar. Indicates the size of the output
      `Tensor`.
    sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
      gradient of this function will be sparse (IndexedSlices) instead of dense
      (Tensor).

  Returns:
    A `tensor` of the shape as data, except for dimension 0 which
    has size `k`, the number of segments specified via `num_segments` or
    inferred for the last element in `segments_ids`.
  r  r  )r   'sparse_segment_sqrt_n_with_num_segmentsr  r{  s         rU   r  r    sT    @ ??!'  --' r`   c                 $    t        | |||||      S )a  Computes the sum along sparse segments of a tensor divided by the sqrt(N).

  Read [the section on
  segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
  for an explanation of segments.

  Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
  segment, `N`, divide by `sqrt(N)` instead.

  Args:
    data: A `Tensor` with data that will be assembled in the output.
    indices: A 1-D `Tensor` with indices into `data`. Has same rank as
      `segment_ids`.
    segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
      should be sorted and can be repeated.
    num_segments: An optional int32 scalar. Indicates the size of the output
      `Tensor`.
    name: A name for the operation (optional).
    sparse_gradient: An optional `bool`. Defaults to `False`. If `True`, the
      gradient of this function will be sparse (`IndexedSlices`) instead of
      dense (`Tensor`). The sparse gradient will contain one non-zero row for
      each unique index in `indices`.

  Returns:
    A `tensor` of the shape as data, except for dimension 0 which
    has size `k`, the number of segments specified via `num_segments` or
    inferred for the last element in `segments_ids`.
  r  )r  r  s         rU   sparse_segment_sqrt_n_v2r  /  s$    J 

%
 r`   	tensordotzlinalg.tensordotc                 x   dd}d }t        j                  |d| ||g      5 }t        j                  | d      } t        j                  |d      } || |      \  }} || |      \  }}	}
 |||d      \  }}}t        ||      }t	        |	t
              rt	        |t
              rq|j                         j                         r/|j                         j                         |	|z   k(  r|cddd       S t        j                  ||	|z   |      cddd       S t        j                  |	t        j                  	      }	t        j                  |t        j                  	      }t        j                  |t        j                  |	|gd
      |      }|
||j                  |
|z          |cddd       S # 1 sw Y   yxY w)a
  Tensor contraction of a and b along specified axes and outer product.

  Tensordot (also known as tensor contraction) sums the product of elements
  from `a` and `b` over the indices specified by `axes`.

  This operation corresponds to `numpy.tensordot(a, b, axes)`.

  Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`
  is equivalent to matrix multiplication.

  Example 2: When `a` and `b` are matrices (order 2), the case
  `axes = [[1], [0]]` is equivalent to matrix multiplication.

  Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
  the outer product, a tensor of order 4.

  Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
  tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
  \\(c_{jklm}\\) whose entry
  corresponding to the indices \\((j,k,l,m)\\) is given by:

  \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).

  In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.

  For example:
  
   ```python
   import numpy as np
   import tensorflow as tf

   a = np.arange(60).reshape(3,4,5)
   b = np.arange(24).reshape(4,3,2)
   c = tf.tensordot(a,b, axes=([1,0],[0,1]))
   c
   
   <tf.Tensor: shape=(5, 2), dtype=int64, numpy=
   array([[4400, 4730],
       [4532, 4874],
       [4664, 5018],
       [4796, 5162],
       [4928, 5306]])>
  
  # Another example
  d = tf.random.uniform((3,4,5))
  e = tf.random.uniform((5,3,2))
  f = tf.tensordot(d,e, axes=([2,0],[0,1]))
  f
  
  <tf.Tensor: shape=(4, 2), dtype=float32, numpy=
  array([[4.8271146, 4.493    ],
       [5.8537536, 5.492961 ],
       [5.2579894, 5.2020206],
       [3.5817177, 4.2104754]], dtype=float32)>
       
    ```
    
  Args:
    a: `Tensor` of type `float32` or `float64`.
    b: `Tensor` with the same type as `a`.
    axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
      If axes is a scalar, sum over the last N axes of a and the first N axes of
      b in order. If axes is a list or `Tensor` the first and second row contain
      the set of unique integers specifying axes along which the contraction is
      computed, for `a` and `b`, respectively. The number of axes for `a` and
      `b` must be equal. If `axes=0`, computes the outer product between `a` and
      `b`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `a`.

  Raises:
    ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
    IndexError: If the values in axes exceed the rank of the corresponding
      tensor.
  c           	      	   | j                         j                         rt        |t        t        f      r| j                         j                         }|D cg c]  }|dk\  r|n|t        |      z    }}t        j                  t        |            D cg c]	  }||vs| }}|D cg c]  }||   	 }}t        t        j                  |D cg c]  }||   	 c}            }t        t        j                  |D cg c]  }||   	 c}            }|rt        |      |z   n|t        |      z   }	|r||gn||g}
|	t        j                  t        |            k7  j                         rt        j                  | |	      }n| }|j                         j                         |
k7  rt        j                   ||
      }n|}|||fS | j                         j"                  t        |t        t        f      r| j                         j                         }|D cg c]  }|dk\  r|n|t        |      z    }}t        j                  t        |            D cg c]	  }||vs| }}|D cg c]  }||   	 }}|D cg c]  }||   	 }}|}t%        j&                  |t(        j*                  d      }t%        j&                  |t(        j*                  d      }t        j,                  |       }nd}t        j,                  |       }t        j.                  |       }t%        j&                  |t(        j*                  d      }t        j0                  |dk\  |||z         }t3        j4                  t        |      |t(        j*                        \  }}t        j6                  ||      }t        j6                  ||      }t9        |      }t9        |      }|r0t        j:                  ||gd      }	t=        j>                  ||g      }
n/t        j:                  ||gd      }	t=        j>                  ||g      }
t        j                   t        j                  | |	      |
      }|||fS c c}w c c}w c c}w c c}w c c}w c c}w c c}w c c}w c c}w )a  Helper method to perform transpose and reshape for contraction op.

    This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
    using `array_ops.transpose` and `array_ops.reshape`. The method takes a
    tensor and performs the correct transpose and reshape operation for a given
    set of indices. It returns the reshaped tensor as well as a list of indices
    necessary to reshape the tensor again after matrix multiplication.

    Args:
      a: `Tensor`.
      axes: List or `int32` `Tensor` of unique indices specifying valid axes of
        `a`.
      flipped: An optional `bool`. Defaults to `False`. If `True`, the method
        assumes that `a` is the second argument in the contraction operation.

    Returns:
      A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
      the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
      either a list of integers or an `int32` `Tensor`, depending on whether
      the shape of a is fully specified, and free_dims_static is either a list
      of integers and None values, or None, representing the inferred
      static shape of the free dimensions
    r   Nrf   r   free) r   r  r   r  r  as_listr  r   r=   intr   r  r  anyr   	transposer@   rH   r   r4   r   r  r8   r  r   r   	list_diffgatherr  rA   r   r  )r  rf   flippedshape_ar   r  	free_dims	prod_free	prod_axesperm	new_shapea_trans
reshaped_a	axes_dimsfree_dims_staticrank_a_prod_free_dimsprod_axes_dimss                      rU   _tensordot_reshapez%tensordot.<locals>._tensordot_reshape  s   0 	{{}%%'JtdE],K%%'g9=>A16aq3w<//>d>!G5GA$aGdG'+,!71:,i,bgg48awqz89:ibgg48awqz89:i")T$Z$td4j/@d,39i()Y9Oi
"))CL)
)	.	.	0%%a.					$	$	&)	3&&w	:

I--	
			(ZtUm-L++-'');?@aQ!VS\!11@@#>>#g,7Ia1D=II)-.AWQZ.	.)-.AWQZ.	.$$$TFK$$TFK//!$//!$"$$TFKtqy$v>))%-v||La""7D1i""7D1i"9-n"9-n	ta0#))>>*JK	ta0#))>>*JK	$$Y%8%8D%A9Mj$444[ ?G,88 AI..sB   Q!	Q&!Q&+Q+Q0 Q5Q:	Q?!Q?+R=R	c           	         | j                         }t        |t        j                        r|dk  rt	        d| d      |j
                  ||j
                  kD  rt	        d|  d| d|j
                   d      t        t        j                  |j
                  |z
  |j
                              t        t        j                  |            fS t        j                  |       }t        ||z
  |t        j                        t        |t        j                        fS t        |t        t        f      rt        |      dk7  rt	        d	| d      |d   }|d
   }t        |t        j                        r t        |t        j                        r|g}|g}t        |      t        |      k7  r$t	        dt        |       dt        |       d      ||fS t        j                   |dt        j                        }|d   |d
   fS )zDGenerates two sets of contraction axes for the two tensor arguments.r   z%`axes` must be at least 0. Received: r   zB`axes` must not be larger than the number of dimensions of tensor z.  Received z, vs tensor dimensions r-   r0   z5`axes` must be an integer or have length 2. Received r1   z2Different number of contraction axes `a` and `b`, r%  rf   r*   )r   r   r   integral_typesr   rH   r  r   r=   r   r  r   r  r  r  r   r4   )r  rf   r  r  a_axesb_axess         rU   _tensordot_axesz"tensordot.<locals>._tensordot_axes  s   kkmG$--.	@aHII		"'-- 334#\$ H007aA B B X^^GMMD$8$+MM3 459(..:N5OQ 	Q ~~a dTk4"LL*+0V\\+JL 	L	D4-	(	TaCD6KM 	MAwfAwf	FF11	2
VV22
3	VF	#MK=S[M< = 	=V^""4fFLLId!Wd1gr`   	Tensordotr  r'   r  TNr-   r   )F)r   r3   r4   r  r   r  r   r  r  r   r@   r   r  rA   r  )r  r  rf   r(   r  r  r  r  	a_reshapea_free_dimsa_free_dims_static	b_reshapeb_free_dimsb_free_dims_static	ab_matmulproducts                   rU   r  r  ^  s   bG5R!F ~~dK!Q6 $ac*Aac*A$Q-NFF1CAv1N.I{.1C	642.I{.y),I+t$K)F




0
0
2




'
'
)[;-F
F    {[0t=   ))+V\\Jk))+V\\Jk!!
Y%%{K&@!D4Qg		',>,J,/AAB-  s   CF01F0BF00F9zmath.polyvalc           
         t        | t              st        dt        |        d      t	        j
                  |dt        j                  |       |gz         5 }t	        j                  |d      }t        |       dk  r t        j                  ||      cddd       S t        |       D cg c]  \  }}t	        j                  |d|z        ! } }}| d	   }| dd D ]
  }|||z  z   } |cddd       S c c}}w # 1 sw Y   yxY w)
a0  Computes the elementwise value of a polynomial.

  If `x` is a tensor and `coeffs` is a list n + 1 tensors,
  this function returns the value of the n-th order polynomial

  `p(x) = coeffs[n-1] + coeffs[n-2] * x + ...  + coeffs[0] * x**(n-1)`

  evaluated using Horner's method, i.e.

  ```python
  p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
  ```

  Usage Example:

  >>> coefficients = [1.0, 2.5, -4.2]
  >>> x = 5.0
  >>> y = tf.math.polyval(coefficients, x)
  >>> y
  <tf.Tensor: shape=(), dtype=float32, numpy=33.3>

  Usage Example:

  >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
  <tf.Tensor: shape=(), dtype=int32, numpy=21>

  `tf.math.polyval` can also be used in polynomial regression. Taking
  advantage of this function can facilitate writing a polynomial equation
  as compared to explicitly writing it out, especially for higher degree
  polynomials.

  >>> x = tf.constant(3)
  >>> theta1 = tf.Variable(2)
  >>> theta2 = tf.Variable(1)
  >>> theta3 = tf.Variable(0)
  >>> tf.math.polyval([theta1, theta2, theta3], x)
  <tf.Tensor: shape=(), dtype=int32, numpy=21>

  Args:
    coeffs: A list of `Tensor` representing the coefficients of the polynomial.
    x: A `Tensor` representing the variable of the polynomial.
    name: A name for the operation (optional).

  Returns:
    A `tensor` of the shape as the expression p(x) with usual broadcasting
    rules for element-wise addition and multiplication applied.

  @compatibility(numpy)
  Equivalent to numpy.polyval.
  @end_compatibility
  z1Argument coeffs must be list type. Received type r   polyvalry   r'   r1   Nzcoeff_%dr   )r   r  r   r  r   r3   r   flattenr4   r  r   rB   	enumerate)coeffsry   r(   rz  coeffpcs          rU   r  r  4  s   l 
FD	!

;DL>KM M ~~dIt||F';qc'AB dac*A
6{Q!!!$/  &f-E5 	e:+=?F  	q	AABZ 
a!e)a 	 s$   <C:C:+$C4C:4C::Dzmath.reciprocal_no_nanc                    t        j                  |d| g      5 }t        j                  | d      } t        j                  d| j
                  j                  d      }t        j                  || |      cddd       S # 1 sw Y   yxY w)a&  Performs a safe reciprocal operation, element wise.

  If a particular element is zero, the reciprocal for that element is
  also set to zero.

  For example:
  ```python
  x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
  tf.math.reciprocal_no_nan(x)  # [ 0.5, 2, 0.0, 1.0 ]
  ```

  Args:
    x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
      `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as `x`.

  Raises:
    TypeError: x must be of a valid dtype.

  reciprocal_no_nanry   r'   r1   oner   N)	r   r3   r4   r   r  r+   r   r   r   )ry   r(   scoper  s       rU   r  r  |  sp    8 ~~d/!5 7ac*A


q(:(:
GC""367 7 7s   AA>>Bz
math.xdivyc                     t        j                  |d| g      5  t        j                  | |      cddd       S # 1 sw Y   yxY w)a  Computes `x / y`.

  Given `x` and `y`, computes `x / y`. This function safely returns
  zero when `x = 0`, no matter what the value of `y` is.

  Example:

  >>> tf.math.xdivy(1., 2.)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
  >>> tf.math.xdivy(0., 1.)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
  >>> tf.math.xdivy(0., 0.)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
  >>> tf.math.xdivy(1., 0.)
  <tf.Tensor: shape=(), dtype=float32, numpy=inf>

  Args:
    x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
      `complex128`
    y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
      `complex128`
    name: A name for the operation (optional).

  Returns:
    `x / y`.
  xdivyN)r   r3   r   r  r   s      rU   r  r    s;    < ~~dGaS) $a#$ $ $	   9Azmath.xlog1pyc                     t        j                  |d| g      5  t        j                  | |      cddd       S # 1 sw Y   yxY w)aV  Compute x * log1p(y).

  Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
  zero when `x = 0`, no matter what the value of `y` is.

  Example:

  >>> tf.math.xlog1py(0., 1.)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.>
  >>> tf.math.xlog1py(1., 1.)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
  >>> tf.math.xlog1py(2., 2.)
  <tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
  >>> tf.math.xlog1py(0., -1.)
  <tf.Tensor: shape=(), dtype=float32, numpy=0.>

  Args:
    x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
      `complex128`
    y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
      `complex128`
    name: A name for the operation (optional).

  Returns:
    `x * log1p(y)`.

  @compatibility(scipy)
  Equivalent to scipy.special.xlog1py
  @end_compatibility
  xlog1pyN)r   r3   r   r  r   s      rU   r  r    s<    D ~~dIs+ &1%& & &r  zmath.erfinvc                     t        j                  |d| g      5  t        j                  |       cddd       S # 1 sw Y   yxY w)a!  Compute inverse error function.

  Given `x`, compute the inverse error function of `x`. This function
  is the inverse of `tf.math.erf`.

  Args:
    x: `Tensor` with type `float` or `double`.
    name: A name for the operation (optional).
  Returns:
    Inverse error function of `x`.
  erfinvN)r   r3   r   r  r   s     rU   r  r    s9     ~~dHqc* "q!" " "	   8Az
math.ndtric                     t        j                  |d| g      5  t        j                  |       cddd       S # 1 sw Y   yxY w)zCompute quantile of Standard Normal.

  Args:
    x: `Tensor` with type `float` or `double`.
    name: A name for the operation (optional).
  Returns:
    Inverse error function of `x`.
  ndtriN)r   r3   r   r  r   s     rU   r  r    s9     ~~dGaS) !a ! ! !r  zmath.erfcinvc                     t        j                  |d| g      5  t        j                  | d      } t        d| z         t	        j
                  d      z  cddd       S # 1 sw Y   yxY w)a  Computes the inverse of complementary error function.

  Given `x`, compute the inverse complementary error function of `x`.
  This function is the inverse of `tf.math.erfc`, and is defined on
  `[0, 2]`.

  >>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
  <tf.Tensor: shape=(5,), dtype=float32, numpy=
  array([       inf,  0.9061935, -0.       , -0.4769363,       -inf],
        dtype=float32)>

  Args:
    x: `Tensor` with type `float` or `double`.
    name: A name for the operation (optional).
  Returns:
    Inverse complementary error function of `x`.

  @compatibility(numpy)
  Equivalent to scipy.special.erfcinv
  @end_compatibility
  erfcinvr&   r'   g      ?N)r   r3   r4   r  r   r  r   s     rU   r  r  	  sW    2 ~~dIs+ *ag.A#'N?RWWS\)* * *s   <AA(z	math.ceilceilc                 .    t        j                  | |      S )a  Return the ceiling of the input, element-wise.

  For example:

  >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
  <tf.Tensor: shape=(7,), dtype=float32,
  numpy=array([-1., -1., -0.,  1.,  2.,  2.,  2.], dtype=float32)>

  Args:
    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
      `float32`, `float64`. `int32`
    name: A name for the operation (optional).

  Returns:
    A `tf.Tensor`. Has the same type as `x`.

  @compatibility(numpy)
  Equivalent to np.ceil
  @end_compatibility
  )r   r  r   s     rU   r  r  '  s    2 
		1d	##r`   z	math.sqrtr  c                 .    t        j                  | |      S )a  Computes element-wise square root of the input tensor.

  Note: This operation does not support integer types.

  >>> x = tf.constant([[4.0], [16.0]])
  >>> tf.sqrt(x)
  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
    array([[2.],
           [4.]], dtype=float32)>
  >>> y = tf.constant([[-4.0], [16.0]])
  >>> tf.sqrt(y)
  <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
    array([[nan],
           [ 4.]], dtype=float32)>
  >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
  >>> tf.sqrt(z)
  <tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
    array([[0.0+1.j],
           [4.0+0.j]])>

  Note: In order to support complex type, please provide an input tensor
  of `complex64` or `complex128`.

  Args:
    x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
      `complex64`, `complex128`
    name: A name for the operation (optional).

  Returns:
    A `tf.Tensor` of same size, type and sparsity as `x`.
  )r   r  r   s     rU   r  r  C  s    F 
		1d	##r`   zmath.expr  c                 .    t        j                  | |      S )a  Computes exponential of x element-wise.  \\(y = e^x\\).

  This function computes the exponential of the input tensor element-wise.
  i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
  \\(e\\) denotes Euler's number and is approximately equal to 2.718281.
  Output is positive for any real input.

  >>> x = tf.constant(2.0)
  >>> tf.math.exp(x)
  <tf.Tensor: shape=(), dtype=float32, numpy=7.389056>

  >>> x = tf.constant([2.0, 8.0])
  >>> tf.math.exp(x)
  <tf.Tensor: shape=(2,), dtype=float32,
  numpy=array([   7.389056, 2980.958   ], dtype=float32)>

  For complex numbers, the exponential value is calculated as
  $$
  e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
  $$

  For `1+1j` the value would be computed as:
  $$
  e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
  $$

  >>> x = tf.constant(1 + 1j)
  >>> tf.math.exp(x)
  <tf.Tensor: shape=(), dtype=complex128,
  numpy=(1.4686939399158851+2.2873552871788423j)>

  Args:
    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
      `float32`, `float64`, `complex64`, `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `tf.Tensor`. Has the same type as `x`.

  @compatibility(numpy)
  Equivalent to np.exp
  @end_compatibility
  )r   r  r   s     rU   r  r  j  s    ^ 
		!T	""r`   zmath.sobol_samplec                     t        j                  |d| ||g      5  t        j                  | |||      cddd       S # 1 sw Y   yxY w)a*  Generates points from the Sobol sequence.

  Creates a Sobol sequence with `num_results` samples. Each sample has dimension
  `dim`. Skips the first `skip` samples.

  Args:
    dim: Positive scalar `Tensor` representing each sample's dimension.
    num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
        points to return in the output.
    skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
        initial points of the Sobol sequence to skip. Default value is 0.
    dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
        `tf.float64`. Defaults to `tf.float32`.
    name: (Optional) Python `str` name prefixed to ops created by this function.

  Returns:
    `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
  sobolr-   N)r   r3   r   sobol_sample)dimnum_resultsskipr+   r(   s        rU   r  r    sH    * ~~dGc;%=> J$$S+t5IJ J Js	   >Az
math.rsqrtrsqrtc                 .    t        j                  | |      S )a  Computes reciprocal of square root of x element-wise.

  For example:

  >>> x = tf.constant([2., 0., -2.])
  >>> tf.math.rsqrt(x)
  <tf.Tensor: shape=(3,), dtype=float32,
  numpy=array([0.707, inf, nan], dtype=float32)>

  Args:
    x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
      `float32`, `float64`.
    name: A name for the operation (optional).

  Returns:
    A `tf.Tensor`. Has the same type as `x`.
  )r   r  r   s     rU   r  r    s    , 
		At	$$r`   z	math.acosacosc                 .    t        j                  | |      S )a  Computes acos of x element-wise.

  Provided an input tensor, the `tf.math.acos` operation
  returns the inverse cosine of each element of the tensor.
  If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.

  Input range is `[-1, 1]` and the output has a range of `[0, pi]`.

  For example:

  >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
  >>> tf.math.acos(x)
  <tf.Tensor: shape=(6,), dtype=float32,
  numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
  dtype=float32)>

  Args:
    x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
      `float32`, `float64`, `complex64`, `complex128`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as x.
  )r   r  r   s     rU   r  r    s    8 
		1d	##r`   z
math.floorfloorc                 .    t        j                  | |      S )a  Returns element-wise largest integer not greater than x.

  Both input range is `(-inf, inf)` and the
  output range consists of all integer values.

  For example:

  >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
  >>> tf.floor(x).numpy()
  array([ 1., -2.,  5., -3.,  0., inf], dtype=float32)

  Args:
    x:  A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
      `float32`, `float64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as x.
  )r   r  r   s     rU   r  r    s    . 
		At	$$r`   )Nr   r[   )ToFloat)ToDouble)ToInt32)ToInt64)
ToBFloat16)ToComplex64)ToComplex128)
LogicalXor)Nr1   Nr=   )NNF)NNNNN)NFN)NFNN)
FFFFFFNFFN)FFFFN)T)NNN)r   FFN)r\   r   numpyr   tensorflow.python.compatr   r   tensorflow.python.eagerr   tensorflow.python.frameworkr   r   r   r   r	   r
   r   r   r   r   r   tensorflow.python.opsr   r   r   r   r   r   r   r   r   r   "tensorflow.python.ops.gen_math_opstensorflow.python.platformr   r   tensorflow.python.utilr   r   r   r   tensorflow.python.util.compatr     tensorflow.python.util.tf_exportr!   
next_afterr  add_dispatch_supportdeprecated_endpointsrV   r"   
deprecatedrW   rX   rb   deprecated_argsreplacer>   rc   rj   rr   rt   register_unary_elementwise_apirv   r   r   register_binary_elementwise_apir   r   r   r   r   r   r   rE  r   r   r   r   r   r   r   r   r   r   r   r   r6   r   r  r  r  r  r  r  r  r   r   r  uint16int16uint32r   r  uint64r  rx  r   r   r)  r   r   r!  r2  r   r:  r=  r   r.  realdivtruncate_divtruncatedivr3  truncate_modtruncatemodr>  floormodrF  rJ  rK  rS  rV  rY  r]  r?   r_  rl  rn  r=   r  #register_tensor_conversion_functionr  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  sparse_mat_mulr  r  r%  rC  r(  r0  RegisterGradientr>  r?  rB  rF  rN  rS  r  rb  rm  rn  rt  rw  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  rR  rU  rX  
left_shiftright_shiftr\  atan2greatergreater_equalless
less_equalrM  rN  r<   r  squared_differencexlogyzetaacoshasinasinhatanatanhcoscoshdigammaerferfcexpm1r  is_infis_nanlgammar  log1prO  
reciprocalrintsinsinhr  tantanhra   r`   rU   <module>r=     s,!  6n   = + 3 . 6 + @ 5 < B 4 3 + 1 / 1 3 1 . , 0 @ 1 < 0 ) . + ' 9 6 ##	 :;
34	!!!+.n6 /  5n6b 
F
 +
 
 'E
Fw
O
F
 +
 
 'E
Fw
O 	i[ 7(77@ A 	i[ 7(77@ A }h'(	T#D(*	  (()/118f1MO ||	3O*  )3 =(r*	FLLt  O  + OF }h'(	T#D(*	  (()/118f1MO ||	3O*  )3 =(r*	FLLt !O  +!OR :u	((	)+  ) )+`O. .. =(#	))	  * $@ ?J'	))	.&  * (.&d KM&M&
 dll&:M  ?J'	))	&  * (&  ##++  MO&O&
 dll&:M   DFF&  ,/0	))	$J  * 1$JN ?M.NO	((	-  ) P-. lr2	))		*

'   * 3'
 :u	))	.  * .6 Y'	))	*C  * (*CZ ;	((	))  )  ))X ;K01	((	!!!&)" *  ) 2"J ;K01	((	!!!&)) *  ) 2)< <\734	((	!!!'*#: +  ) 5#:R <!	((	,  ) ",6 6=!	((	O  ) "Od !?3	((	a)  ) 4a)H zl	((	T0HI!, J  ) !,H {m	((	T0HI!, J  ) !,H zl	((	T0HI!* J  ) !*H zl	((	T0HI!* J  ) !*H }o	((	T0HI!- J  ) !-H ~	((	T0HI!. J  )  !.H  	((	T0HI!/ J  ) !!/L LL&..
KK
MM6>>
LL&..
MM6>>
LL&..
MM6>>
LL&..
OOT
NND
NND
NND
d
t$2*58 >9%	))	1&  * &1&h ug	))		EG"G  * "4 %9<$HI	))	!!!,/04 0  * J04f !"	))	4  * #4430 ?<=	))	!!!*-3 .  * >34 

''""	''!! 2&	0  '0>% #5}"EF	))	!!!-0- 1  * G-`+*+. <!	))	-  * "-D [)	))	1  * *1D "%	%  &%P "%	#  &#L 7	T?  T?n ?C-2L ?  > >NN57
)$P
  ,/0	TL(* $( H8*  1
H8V lr2	>B  3>BD #"'""	B '(	(  )(V #_56	TL(* 
=  $$( CE*  7CEL B'	 

,,	G  (GT !=12	 %)!>9  3>9B }4	7  57t !"	:I  #:Iz 	.'  .'b }4	,  5,^ !=12	TL(*  %)!39*  3
39l  ,/0	TL(* $( 88*  1
88v lr2	;  3;|  ,/0	TL(* $( 88*  1
88v lr2	-B  3-Bb #"'""	B  ,/0	TL(* $( 28*  1
28j lr2	+  3+\  ,/0	TL(* $( 28*  1
28j lr2	+  3+\ &(:;<	TL(* "!%!*."&1>*  =
1>h "$62>	39  ?39l >~w78	!!!'*&F +  9&FR ?H%	 	G
  &G
T ?	 _.  _.H!   N&&&t-MN! 	o  .	H H0 F :u	))	K2  * K2\ <!H:>7/ ? "7/t $7#HI	;<P" =  JP"f o&! '! ><3	((	/.  ) 4/.d #5}"EF	((	!!!-0&@ 1  ) G&@R =(#	CB  $CBL >~y9:	!!!),0B -  ;0Bf &,G+HI	3B  J3Bl ;K01	((	!!!&)4I *  ) 24In!H$B  $&=>@ 
!!!"9:/ ; @
/d "&(ABD 
!!!";<2) = D
2)j #%9:;!!!"67
 
V 8 <Vr !b) 

~= *~=B B'
 	M (M` $&;<=!!!"78
 
8 9 >8v  R(
 	/ )/d &(?@A!!!"9:
 
. ; B.b "r*
 	+ ++\ ;*+	Q  ,Qh >	C  CL #$	((	7  ) %7> <	))	$  * $> >	))	 &  *  &F =	((	"  ) "  <	((	
!  ) 
! >	((	*  ) *6 ;K01	((	!!!&)$ *  ) 2$0 ;	((	 $  )   $H :u	((	,#  ) ,#d 	()d J   J. <\734	((	!!!'*% +  ) 5%* ;	((	$  )  $8 <!	((	%  ) "%2 ) ( ()D)D E ( ( ()C)C D ( ( ()D)D E ( ( ()C)C D ( ( ()D)D E ' ' '(>(> ? ( ( ();); < ( ( ()?)? @ ( ( ()?)? @ ( ( ()=)= > ( ( ()C)C D ( ( ():): ; ( ( ()@)@ A ( ( ()A)A B ( ( ()@)@ A ( ( ()=)= > ( ( ()=)= > ( ( ()>)> ? ( ( ()H)H I ( ( ()B)B C ( ( ()B)B C ( ( ();); < ( ( ():): ; ( ' '(:(: ; ' ' '(9(9 : ' ' '(:(: ; ' ' '(9(9 : ' ' '(:(: ; ' ' '(8(8 9 ' ' '(9(9 : ' ' '(<(< = ' ' '(8(8 9 ' ' '(9(9 : ' ' '(:(: ; ' ' '(>(> ? ' ' '(;(; < ' ' '(;(; < ' ' '(;(; < ' ' '(8(8 9 ' ' '(:(: ; ' ' '(@(@ A ' ' '(8(8 9 ' ' '(?(? @ ' ' '(?(? @ ' ' '(9(9 : ' ' '(8(8 9 ' ' '(9(9 : ' ' '(;(; < ' ' '(8(8 9 ' ' '(9(9 :r`   