
    AVh                        d Z ddlZddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddl ddlmZ ddlmZ ddlmZ d Z edddg      ej.                  d4d              Z edddg      ej.                   ej2                  d      d5d                     Z edd      ej.                  ddej6                  dfd              Z edddg      ej.                   ej2                  d      d6d                     Z edd g       ej.                  d5d!              Z ed"d#g       ej.                  d5d$              Z ed%d%d&g      ej.                   ej2                  d&      d5d'                     Z  ed(d(d)g      ej.                   ej2                  d)      d5d*                     Z! ed+d+d,g      ej.                   ej2                  d,      d7d-                     Z" ed.d/g       ej.                  	 	 	 	 d8d0              Z# ed.d/g      ej.                   ejH                  dd1d2      	 	 	 	 	 d9d3                     Z%y):zAOperations for linear algebra.

API docstring: tensorflow.linalg
    N)dtypes)ops)tensor)	array_opscond)gen_array_ops)gen_linalg_ops)linalg_ops_impl)map_fn)math_ops)*)deprecation)dispatch)	tf_exportc                    t        j                  | | ||       }t        |t        j                        s|dk7  rt        j                  |       }|dd }|r|d   }n|d   }t        ||| j                        }| j                  |rdnd   }|j                  | j                  dd j                  ||g             |||z  z  }t        j                  |      S )ar  Computes Cholesky factorization of regularized gramian matrix.

  Below we will use the following notation for each pair of matrix and
  right-hand sides in the batch:

  `matrix`=\\(A \in \Re^{m \times n}\\),
  `output`=\\(C  \in \Re^{\min(m, n) \times \min(m,n)}\\),
  `l2_regularizer`=\\(\lambda\\).

  If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that
  \\(L L^H =  A^H A + \lambda I\\).
  If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that
  \\(L L^H =  A A^H + \lambda I\\).

  Args:
    matrix: `Tensor` of shape `[..., M, N]`.
    l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
    first_kind: bool. Controls what gramian matrix to factor.
  Returns:
    output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2
      dimensions contain the Cholesky factors \\(L\\) described above.
  )	adjoint_a	adjoint_br   N)batch_shapedtype)r   matmul
isinstance
tensor_libTensorr   shapeeyer   	set_shapeconcatenater
   cholesky)	matrixl2_regularizer
first_kindgramianmatrix_shaper   	small_dimidentitysmall_dim_statics	            P/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/ops/linalg_ops.py_RegularizedGramianCholeskyr+   +   s    0 OOf
*nF'
 1 12n6I??6*Ls#Kr"ir"i9+V\\JH||*B"=Sb%%'79I&JKM~((G		 	 	))    zlinalg.triangular_solvematrix_triangular_solve)v1c                     t        j                  |d| |g      5  t        j                  | |||      cddd       S # 1 sw Y   yxY w)a5  Solve systems of linear equations with upper or lower triangular matrices.

  `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
  square matrices. If `lower` is `True` then the strictly upper triangular part
  of each inner-most matrix is assumed to be zero and not accessed. If `lower`
  is `False` then the strictly lower triangular part of each inner-most matrix
  is assumed to be zero and not accessed. `rhs` is a tensor of shape
  `[..., M, N]`.

  The output is a tensor of shape `[..., M, N]`. If `adjoint` is `True` then the
  innermost matrices in output satisfy matrix equations `
  sum_k matrix[..., i, k] * output[..., k, j] = rhs[..., i, j]`.
  If `adjoint` is `False` then the
  innermost matrices in output satisfy matrix equations
  `sum_k adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.

  Example:

  >>> a = tf.constant([[3,  0,  0,  0],
  ...   [2,  1,  0,  0],
  ...   [1,  0,  1,  0],
  ...   [1,  1,  1,  1]], dtype=tf.float32)

  >>> b = tf.constant([[4], [2], [4], [2]], dtype=tf.float32)
  >>> x = tf.linalg.triangular_solve(a, b, lower=True)
  >>> x
  <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
  array([[ 1.3333334 ],
         [-0.66666675],
         [ 2.6666665 ],
         [-1.3333331 ]], dtype=float32)>
  >>> tf.matmul(a, x)
  <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
  array([[4.],
         [2.],
         [4.],
         [2.]], dtype=float32)>

  Args:
    matrix: A `Tensor`. Must be one of the following types: `float64`,
      `float32`, `half`, `complex64`, `complex128`. Shape is `[..., M, M]`.
    rhs: A `Tensor`. Must have the same type as `matrix`. Shape is `[..., M,
      N]`.
    lower: An optional `bool`. Defaults to `True`. Boolean indicating whether
      the innermost matrices in matrix are lower or upper triangular.
    adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether
      to solve with matrix or its (block-wise) adjoint.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as matrix, and shape is `[..., M, N]`.

  triangular_solve)loweradjointNr   
name_scoper
   r-   )r"   rhsr1   r2   names        r*   r-   r-   T   sF    t ~~d.> 3115'33 3 3s	   =Azlinalg.cholesky_solvecholesky_solvec                     t        j                  |d| |g      5  t        j                  | |dd      }t        j                  | |dd      }|cddd       S # 1 sw Y   yxY w)a   Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations.

  Specifically, returns `X` from `A X = RHS`, where `A = L L^T`, `L` is the
  `chol` arg and `RHS` is the `rhs` arg.

  ```python
  # Solve 10 separate 2x2 linear systems:
  A = ... # shape 10 x 2 x 2
  RHS = ... # shape 10 x 2 x 1
  chol = tf.linalg.cholesky(A)  # shape 10 x 2 x 2
  X = tf.linalg.cholesky_solve(chol, RHS)  # shape 10 x 2 x 1
  # tf.matmul(A, X) ~ RHS
  X[3, :, 0]  # Solution to the linear system A[3, :, :] x = RHS[3, :, 0]

  # Solve five linear systems (K = 5) for every member of the length 10 batch.
  A = ... # shape 10 x 2 x 2
  RHS = ... # shape 10 x 2 x 5
  ...
  X[3, :, 2]  # Solution to the linear system A[3, :, :] x = RHS[3, :, 2]
  ```

  Args:
    chol:  A `Tensor`.  Must be `float32` or `float64`, shape is `[..., M, M]`.
      Cholesky factorization of `A`, e.g. `chol = tf.linalg.cholesky(A)`.
      For that reason, only the lower triangular parts (including the diagonal)
      of the last two dimensions of `chol` are used.  The strictly upper part is
      assumed to be zero and not accessed.
    rhs:  A `Tensor`, same type as `chol`, shape is `[..., M, K]`.
    name:  A name to give this `Op`.  Defaults to `cholesky_solve`.

  Returns:
    Solution to `A x = rhs`, shape `[..., M, K]`.
  r7   FT)r2   r1   Nr3   )cholr5   r6   yxs        r*   r7   r7      sd    R ~~d,tSk: ..c5	.A..aT	+A  s   4AA!r   z
linalg.eyec                 6    t        j                  | ||||      S )an  Construct an identity matrix, or a batch of matrices.

  See also `tf.ones`, `tf.zeros`, `tf.fill`, `tf.one_hot`.

  ```python
  # Construct one identity matrix.
  tf.eye(2)
  ==> [[1., 0.],
       [0., 1.]]

  # Construct a batch of 3 identity matrices, each 2 x 2.
  # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
  batch_identity = tf.eye(2, batch_shape=[3])

  # Construct one 2 x 3 "identity" matrix
  tf.eye(2, num_columns=3)
  ==> [[ 1.,  0.,  0.],
       [ 0.,  1.,  0.]]
  ```

  Args:
    num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
      in each batch matrix.
    num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
      of columns in each batch matrix.  Defaults to `num_rows`.
    batch_shape:  A list or tuple of Python integers or a 1-D `int32` `Tensor`.
      If provided, the returned `Tensor` will have leading batch dimensions of
      this shape.
    dtype:  The type of an element in the resulting `Tensor`
    name:  A name for this `Op`.  Defaults to "eye".

  Returns:
    A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
  )num_columnsr   r   r6   )r   r   )num_rowsr=   r   r   r6   s        r*   r   r      s&    R 
		X)4)4#("&	
( (r,   zlinalg.lstsqmatrix_solve_lsc                 0   	  fd}d d 		fd}t        j                   d        j                  t        j                  k(  r|dk7  rt        d       j                         } |||      r
 | ||      S t        j                   |||	      S )
a	  Solves one or more linear least-squares problems.

  `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
  form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
  inner-most 2 dimensions form `M`-by-`K` matrices.  The computed output is a
  `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
  matrices that solve the equations
  `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
  sense.

  Below we will use the following notation for each pair of matrix and
  right-hand sides in the batch:

  `matrix`=\\(A \in \Re^{m \times n}\\),
  `rhs`=\\(B  \in \Re^{m \times k}\\),
  `output`=\\(X  \in \Re^{n \times k}\\),
  `l2_regularizer`=\\(\lambda\\).

  If `fast` is `True`, then the solution is computed by solving the normal
  equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
  \\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
  problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
  \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
  \\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is
  the minimum-norm solution to the under-determined linear system, i.e.
  \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
  \\(A Z = B\\). Notice that the fast path is only numerically stable when
  \\(A\\) is numerically full rank and has a condition number
  \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\)
  is sufficiently large.

  If `fast` is `False` an algorithm based on the numerically robust complete
  orthogonal decomposition is used. This computes the minimum-norm
  least-squares solution, even when \\(A\\) is rank deficient. This path is
  typically 6-7 times slower than the fast path. If `fast` is `False` then
  `l2_regularizer` is ignored.

  Args:
    matrix: `Tensor` of shape `[..., M, N]`.
    rhs: `Tensor` of shape `[..., M, K]`.
    l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
    fast: bool. Defaults to `True`.
    name: string, optional name of the operation.

  Returns:
    output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
      `M`-by-`K` matrices that solve the equations
      `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
      squares sense.

  Raises:
    NotImplementedError: linalg.lstsq is currently disabled for complex128
    and l2_regularizer != 0 due to poor accuracy.
  c                     | du ry|dd }|dd }|j                         sy|j                         j                  j                  z  }|j                         t	        j
                  |      kD  }d}||kD  r|ryy)a
  Determines whether to use the composite or specialized CPU kernel.

    When the total size of the tensor is larger than the cache size and the
    batch size is large compared to the smallest matrix dimension, then the
    composite implementation is inefficient since it has to read the entire
    tensor from memory multiple times. In this case we fall back to the
    original CPU kernel, which does all the computational steps on each
    matrix separately.

    Only fast mode is supported by the composite impl, so `False` is returned
    if `fast` is `False`.

    Args:
      fast: bool indicating if fast mode in the solver was requested.
      tensor_shape: The shape of the tensor.

    Returns:
      True if the composite impl should be used. False otherwise.
    FNr   Ti  )is_fully_definednum_elementsr   sizenpmin)fasttensor_shaper   r&   tensor_sizeis_io_boundL2_CACHE_SIZE_GUESSTIMATEr"   s          r*   _use_composite_implz,matrix_solve_ls.<locals>._use_composite_impl0  s    ( u}s#K$L((*++-0A0AAK**,rvvl/CCK &..;r,   c                 b    t        | |d      }t        |t        j                  | |d            S )z3Computes (A^H*A + l2_regularizer)^{-1} * A^H * rhs.Tr#   r$   r   )r+   r7   r   r   r"   r5   r#   r9   s       r*   _overdeterminedz(matrix_solve_ls.<locals>._overdeterminedR  s.    &~$@D$t LMMr,   c                 b    t        | |d      }t        j                  | t        ||      d      S )z3Computes A^H * (A*A^H + l2_regularizer)^{-1} * rhs.FrN   TrO   )r+   r   r   r7   rP   s       r*   _underdeterminedz)matrix_solve_ls.<locals>._underdeterminedX  s.    &~%AD??6>$#<MMr,   c           	          t        j                  d g      5   j                         dd }|j                         r1|d   |d   k\  r        cddd       S         cddd       S t	        j
                         dd }t        j                  |d   |d   k\   fd fd      cddd       S # 1 sw Y   yxY w)z>Composite implementation of matrix_solve_ls that supports GPU.r?   r   Nr   c                              S N )rQ   r#   r"   r5   s   r*   <lambda>z:matrix_solve_ls.<locals>._composite_impl.<locals>.<lambda>m  s    OFC@ r,   c                              S rV   rW   )rS   r#   r"   r5   s   r*   rX   z:matrix_solve_ls.<locals>._composite_impl.<locals>.<lambda>n  s    $VS.A r,   )r   r4   	get_shaperB   r   r   r   )r"   r5   r#   r&   rQ   rS   r6   s   ``` r*   _composite_implz(matrix_solve_ls.<locals>._composite_impl^  s    	/&#~1N	O C%%',l		&	&	(|B// n=	C C "&#~>C C !v.rs3yyR 00@ACC C Cs   8C!	C4ACC
r"   )r6   r   zbmatrix_solve_ls is currently disabled for complex128 and l2_regularizer != 0 due to poor accuracy.)rG   r6   )	r   convert_to_tensorr   r   
complex128NotImplementedErrorrZ   r
   r?   )
r"   r5   r#   rG   r6   rL   r[   rH   rQ   rS   s
   `   `   @@r*   r?   r?      s    x DNNC$   h7&\\V&&&>Q+>
 / 0 0 !!#,|,6377))^$T; ;r,   z
linalg.eigeigc                 l   | j                   t        j                  k(  s| j                   t        j                  k(  rt        j                  }nJ| j                   t        j                  k(  s| j                   t        j
                  k(  rt        j
                  }t        j                  | d|      \  }}||fS )aF  Computes the eigen decomposition of a batch of matrices.

  The eigenvalues
  and eigenvectors for a non-Hermitian matrix in general are complex. The
  eigenvectors are not guaranteed to be linearly independent.

  Computes the eigenvalues and right eigenvectors of the innermost
  N-by-N matrices in `tensor` such that
  `tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.

  Args:
    tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
      each inner inner matrix is referenced.
    name: string, optional name of the operation.

  Returns:
    e: Eigenvalues. Shape is `[..., N]`. The eigenvalues are not necessarily
       ordered.
    v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most
      matrices contain eigenvectors of the corresponding matrices in `tensor`
  TTout	compute_vr6   r   r   float32	complex64float64r]   r
   r_   )r   r6   	out_dtypeevs        r*   r_   r_   ~  s    0 \\V^^#v||v7G7G'G  I||v~~%9J9J)J!!I			Fd	N$!Q	
A+r,   zlinalg.eigvalseigvalsc                 h   | j                   t        j                  k(  s| j                   t        j                  k(  rt        j                  }nJ| j                   t        j                  k(  s| j                   t        j
                  k(  rt        j
                  }t        j                  | d|      \  }}|S )a|  Computes the eigenvalues of one or more matrices.

  Note: If your program backpropagates through this function, you should replace
  it with a call to tf.linalg.eig (possibly ignoring the second output) to
  avoid computing the eigen decomposition twice. This is because the
  eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
  _SelfAdjointEigV2Grad in linalg_grad.py.

  Args:
    tensor: `Tensor` of shape `[..., N, N]`.
    name: string, optional name of the operation.

  Returns:
    e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
      eigenvalues of `tensor[..., :, :]`.
  Fra   rd   )r   r6   rh   ri   _s        r*   rk   rk     s{    & \\V^^#v||v7G7G'G  I||v~~%9J9J)J!!I			Fe$	O$!Q	
(r,   zlinalg.eighself_adjoint_eigc                 @    t        j                  | d|      \  }}||fS )a  Computes the eigen decomposition of a batch of self-adjoint matrices.

  Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices
  in `tensor` such that
  `tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.

  Args:
    tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
      each inner inner matrix is referenced.
    name: string, optional name of the operation.

  Returns:
    e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order.
    v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most
      matrices contain eigenvectors of the corresponding matrices in `tensor`
  Trc   r6   r
   self_adjoint_eig_v2)r   r6   ri   rj   s       r*   rn   rn     s%    ( 
	+	+Fd	N$!Q	
A+r,   zlinalg.eigvalshself_adjoint_eigvalsc                 <    t        j                  | d|      \  }}|S )a  Computes the eigenvalues of one or more self-adjoint matrices.

  Note: If your program backpropagates through this function, you should replace
  it with a call to tf.linalg.eigh (possibly ignoring the second output) to
  avoid computing the eigen decomposition twice. This is because the
  eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
  _SelfAdjointEigV2Grad in linalg_grad.py.

  Args:
    tensor: `Tensor` of shape `[..., N, N]`.
    name: string, optional name of the operation.

  Returns:
    e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
      eigenvalues of `tensor[..., :, :]`.
  Frp   rq   )r   r6   ri   rm   s       r*   rs   rs     s!    ( 
	+	+Fe$	O$!Q	
(r,   z
linalg.svdsvdc                     t        j                  | |||      \  }}}|rt        j                  |      ||fS t        j                  |      S )a;
  Computes the singular value decompositions of one or more matrices.

  Computes the SVD of each inner matrix in `tensor` such that
  `tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) *
   transpose(conj(v[..., :, :]))`

  ```python
  # a is a tensor.
  # s is a tensor of singular values.
  # u is a tensor of left singular vectors.
  # v is a tensor of right singular vectors.
  s, u, v = svd(a)
  s = svd(a, compute_uv=False)
  ```

  Args:
    tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
      `N`.
    full_matrices: If true, compute full-sized `u` and `v`. If false
      (the default), compute only the leading `P` singular vectors.
      Ignored if `compute_uv` is `False`.
    compute_uv: If `True` then left and right singular vectors will be
      computed and returned in `u` and `v`, respectively. Otherwise, only the
      singular values will be computed, which can be significantly faster.
    name: string, optional name of the operation.

  Returns:
    s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
      order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
      second largest, etc.
    u: Left singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
      `[..., M, M]`. Not returned if `compute_uv` is `False`.
    v: Right singular vectors. If `full_matrices` is `False` (default) then
      shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
      `[..., N, N]`. Not returned if `compute_uv` is `False`.

  @compatibility(numpy)
  Mostly equivalent to numpy.linalg.svd, except that
    * The order of output  arguments here is `s`, `u`, `v` when `compute_uv` is
      `True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd.
    * full_matrices is `False` by default as opposed to `True` for
       numpy.linalg.svd.
    * tf.linalg.svd uses the standard definition of the SVD
      \\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are
      the columns of `u`, while the right singular vectors of `a` are the
      columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint
      \\(V^H\\) as the third output argument.
  ```python
  import tensorflow as tf
  import numpy as np
  s, u, v = tf.linalg.svd(a)
  tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_b=True))
  u, s, v_adj = np.linalg.svd(a, full_matrices=False)
  np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
  # tf_a_approx and np_a_approx should be numerically close.
  ```
  @end_compatibility
  )
compute_uvfull_matricesr6   )r
   ru   r   real)r   rx   rw   r6   surj   s          r*   ru   ru     sM    ~ =tM'!Q==Q!!==r,   normzlinalg.normc                 "    t        | ||||      S )aF  Computes the norm of vectors, matrices, and tensors.

  This function can compute several different vector norms (the 1-norm, the
  Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
  matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).

  Args:
    tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
    ord: Order of the norm. Supported values are `'fro'`, `'euclidean'`,
      `1`, `2`, `np.inf` and any positive real number yielding the corresponding
      p-norm. Default is `'euclidean'` which is equivalent to Frobenius norm if
      `tensor` is a matrix and equivalent to 2-norm for vectors.
      Some restrictions apply:
        a) The Frobenius norm `'fro'` is not defined for vectors,
        b) If axis is a 2-tuple (matrix norm), only `'euclidean'`, '`fro'`, `1`,
           `2`, `np.inf` are supported.
      See the description of `axis` on how to compute norms for a batch of
      vectors or matrices stored in a tensor.
    axis: If `axis` is `None` (the default), the input is considered a vector
      and a single vector norm is computed over the entire set of values in the
      tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
      `norm(reshape(tensor, [-1]), ord=ord)`.
      If `axis` is a Python integer, the input is considered a batch of vectors,
      and `axis` determines the axis in `tensor` over which to compute vector
      norms.
      If `axis` is a 2-tuple of Python integers it is considered a batch of
      matrices and `axis` determines the axes in `tensor` over which to compute
      a matrix norm.
      Negative indices are supported. Example: If you are passing a tensor that
      can be either a matrix or a batch of matrices at runtime, pass
      `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
      computed.
    keepdims: If True, the axis indicated in `axis` are kept with size 1.
      Otherwise, the dimensions in `axis` are removed from the output shape.
    name: The name of the op.

  Returns:
    output: A `Tensor` of the same type as tensor, containing the vector or
      matrix norms. If `keepdims` is True then the rank of output is equal to
      the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,
      if `axis` is an integer, the rank of `output` is one less than the rank
      of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less
      than the rank of `tensor`.

  Raises:
    ValueError: If `ord` or `axis` is invalid.

  @compatibility(numpy)
  Mostly equivalent to numpy.linalg.norm.
  Not supported: ord <= 0, 2-norm for matrices, nuclear norm.
  Other differences:
    a) If axis is `None`, treats the flattened `tensor` as a vector
     regardless of rank.
    b) Explicitly supports 'euclidean' norm as the default, including for
     higher order tensors.
  @end_compatibility
  r   ordaxiskeepdimsr6   )r|   r~   s        r*   norm_v2r   1  s     @ 
V	
 r,   z-keep_dims is deprecated, use keepdims instead	keep_dimsc                    t        j                  d|d|      }|d}t        |t              xs t        |t              xr t        |      dk(  }|rtt        |      }t        |d   t              rt        |d   t              r|d   |d   k(  rt        d|       d	d
ddt        j                  g}||vrvt        d| d|       t        |t              s|t        d|       d	ddt        j                  g}t        j                  |      r|dk  r||vrt        d|       ||f}t        j                  |d| g      5  t        j                  |       } |dv r|r;|dv r6t        j                  |       t!        j                   fdt        j                  |            }	t#        j$                        }
t        j&                  t)        j*                  |
|	t,        j.                        d   |	gd      t!        j                   fd|
      }t        j0                  |       }t        j2                  t#        j4                  t#        j6                  t9        j:                  |d      d         dd      d      }t        j0                  ||      }n0t#        j<                  t#        j>                  | t#        j@                  |       z  |d            }nt#        j6                  |       }|dk(  r?|dn|d   }t#        j>                  ||d      }|rt#        j4                  ||d   d      }n|t        j                  k(  r?|rt#        j>                  ||d   d      }|dn|d   }t#        j4                  ||d      }nCt#        jB                  t#        j>                  t#        jB                  ||      |d      d|z        }|st        jD                  ||      }|cddd       S # 1 sw Y   yxY w)aj  Computes the norm of vectors, matrices, and tensors.

  This function can compute several different vector norms (the 1-norm, the
  Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
  matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).

  Args:
    tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
    ord: Order of the norm. Supported values are 'fro', 'euclidean',
      `1`, `2`, `np.inf` and any positive real number yielding the corresponding
      p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if
      `tensor` is a matrix and equivalent to 2-norm for vectors.
      Some restrictions apply:
        a) The Frobenius norm `fro` is not defined for vectors,
        b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`,
           `2`, `np.inf` are supported.
      See the description of `axis` on how to compute norms for a batch of
      vectors or matrices stored in a tensor.
    axis: If `axis` is `None` (the default), the input is considered a vector
      and a single vector norm is computed over the entire set of values in the
      tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
      `norm(reshape(tensor, [-1]), ord=ord)`.
      If `axis` is a Python integer, the input is considered a batch of vectors,
      and `axis` determines the axis in `tensor` over which to compute vector
      norms.
      If `axis` is a 2-tuple of Python integers it is considered a batch of
      matrices and `axis` determines the axes in `tensor` over which to compute
      a matrix norm.
      Negative indices are supported. Example: If you are passing a tensor that
      can be either a matrix or a batch of matrices at runtime, pass
      `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
      computed.
    keepdims: If True, the axis indicated in `axis` are kept with size 1.
      Otherwise, the dimensions in `axis` are removed from the output shape.
    name: The name of the op.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    output: A `Tensor` of the same type as tensor, containing the vector or
      matrix norms. If `keepdims` is True then the rank of output is equal to
      the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,
      if `axis` is an integer, the rank of `output` is one less than the rank
      of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less
      than the rank of `tensor`.

  Raises:
    ValueError: If `ord` or `axis` is invalid.

  @compatibility(numpy)
  Mostly equivalent to numpy.linalg.norm.
  Not supported: ord <= 0, 2-norm for matrices, nuclear norm.
  Other differences:
    a) If axis is `None`, treats the flattened `tensor` as a vector
     regardless of rank.
    b) Explicitly supports 'euclidean' norm as the default, including for
     higher order tensors.
  @end_compatibility
  r   r   NF   r      zF'axis' must be None, an integer, or a tuple of 2 unique integers, got 	euclideanfroz)'ord' must be a supported matrix norm in z, got z+'ord' must be a supported vector norm, got r|   )r   r   r          @)r   r   c                 H     t        j                    dk\   fd fd      S )Nr   c                       S rV   rW   )is   r*   rX   z(norm.<locals>.<lambda>.<locals>.<lambda>  s     r,   c                       z   S rV   rW   r   ranks   r*   rX   z(norm.<locals>.<lambda>.<locals>.<lambda>  s    D r,   r   r   s   `r*   rX   znorm.<locals>.<lambda>  s    diiQ " r,   )r   c           
          t        j                  t        j                  t        j                  t        j
                  |                   t        j                        S )N)r   )r   castr   squeezewhere_v2equalr   int32)r   perm_befores    r*   rX   znorm.<locals>.<lambda>  s?    hmm!!&&x~~k1'EFHll$ r,   )perm)rw   r   T)r   r   )r   g      ?)#r   deprecated_argument_lookupr   tuplelistlenint
ValueErrorrE   infisrealr   r4   r\   r   r   r   r   rangeconcatr	   	list_diffr   r   	transposeexpand_dims
reduce_maxabsr
   ru   sqrt
reduce_sumconjpowr   )r   r   r   r   r6   r   is_matrix_normsupported_matrix_normssupported_vector_normspositive_axisaxes
perm_afterpermedmatrix_2_normresultsum_axismax_axisr   r   s                    @@r*   r|   r|   y  s   H 33J4?L(He,F
40F #IN ;DtAw$JtAw,DQ47""&)* * *5!Q?
((B01u> ? ? tS!T\--1F45 5 *1a8IIcNcQhC7M,MDSEJKKWd
~~dFVH- 9""6*F
**	C8O~~f%" !!$') ~~d#&&##D-FqI(
 -.	/
 ]]$ &*	+

 $$V+>!--^//5I!LM  $$]D
 v..tEF ||F#f	<4T!W$$VXE&&vtBx$G&"&&=&&vtAwF&<4T!W$$VXE VS 94$O#I   .fs9 9 9s   J"O00O9)TFNrV   )g        TN)FTN)r   NNN)r   NNNN)&__doc__numpyrE   tensorflow.python.frameworkr   r   r   r   tensorflow.python.opsr   r   r	   r
   r   r   r   $tensorflow.python.ops.gen_linalg_opstensorflow.python.utilr   r    tensorflow.python.util.tf_exportr   r+   add_dispatch_supportr-   deprecated_endpointsr7   re   r   r?   r_   rk   rn   rs   ru   r   deprecated_argsr|   rW   r,   r*   <module>r      s  
  . + < + & / 0 1 ( * 2 . + 6&*R !#<=? 
83 ?83v !8:J KM	!!!"23* 4 M*Z 5,	nn	+(   +(\ >~/@AB	!!!"34D; 5  CD;N <2&	  '< Y2.	  /2 =m-?@A	!!!"45 6  B* "35K!LM	!!!"89 :  N* <\512	!!!%(A )  3AJ 6=R(		B  )BL v}%&	
9;H WH  'Wr,   