
    AVh(                     *   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ i Zd	d
gZd Z ej                  ddd       edg      	 dd              Z ej                  ddd      	 dd       Z edg       G d d	             Zy)z5Registration and usage mechanisms for KL-divergences.    )ops)	array_ops)control_flow_assert)math_ops)deprecation)
tf_inspect)	tf_export
RegisterKLkl_divergencec                    t        j                  |       }t        j                  |      }d}d}t        |      D ]D  \  }}t        |      D ]1  \  }}	||z   }
t        j	                  ||	fd      }|r	|s(|
|k  s.|
}|}3 F |S )z3Get the KL function registered for classes a and b.N)r   getmro	enumerate_DIVERGENCESget)type_atype_bhierarchy_ahierarchy_bdist_to_childrenkl_fnmro_to_aparent_amro_to_bparent_bcandidate_distcandidate_kl_fns               d/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/ops/distributions/kullback_leibler.py_registered_klr   #   s    !!&)+!!&)+
%%k2  h'4  ((*n$(((H)=tDo><L+L)   
,    
2019-01-01The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.T	warn_oncezdistributions.kl_divergence)v1Nc                    t        t        |       t        |            }|7t        dt        |       j                  dt        |      j                        t	        j
                  d      5   || ||      }|r|cddd       S t        j                  |d      }t	        j                  t        j                  t        j                  t        j                  t        j                  |                  d| j                  d|j                  d	|g      g      5  t        j                  |d
      cddd       cddd       S # 1 sw Y   nxY w	 ddd       y# 1 sw Y   yxY w)a<  Get the KL-divergence KL(distribution_a || distribution_b).

  If there is no KL method registered specifically for `type(distribution_a)`
  and `type(distribution_b)`, then the class hierarchies of these types are
  searched.

  If one KL method is registered between any pairs of classes in these two
  parent hierarchies, it is used.

  If more than one such registered method exists, the method whose registered
  classes have the shortest sum MRO paths to the input types is used.

  If more than one such shortest path exists, the first method
  identified in the search is used (favoring a shorter MRO distance to
  `type(distribution_a)`).

  Args:
    distribution_a: The first distribution.
    distribution_b: The second distribution.
    allow_nan_stats: Python `bool`, default `True`. When `True`,
      statistics (e.g., mean, mode, variance) use the value "`NaN`" to
      indicate the result is undefined. When `False`, an exception is raised
      if one or more of the statistic's batch members are undefined.
    name: Python `str` name prefixed to Ops created by this class.

  Returns:
    A Tensor with the batchwise KL-divergence between `distribution_a`
    and `distribution_b`.

  Raises:
    NotImplementedError: If no KL method is defined for distribution types
      of `distribution_a` and `distribution_b`.
  NzKNo KL(distribution_a || distribution_b) registered for distribution_a type z and distribution_b type KullbackLeibler)nameklzKL calculation between z and zI returned NaN values (and was called with allow_nan_stats=False). Values:
checked_kl)r   typeNotImplementedError__name__r   
name_scoper   identitycontrol_dependenciesr   Assertr   logical_not
reduce_anyis_nanr'   )distribution_adistribution_ballow_nan_statsr'   r   kl_ts         r   r   r   3   sG   X n-tN/C
D%
]
 (($~*>*G*G	IJ J
 ~~'( 9d;D9 9 d.D		!	!""  !4!4X__T5J!KL  $$n&9&9;<@O	# 
 9 <89 99 99 9 99 9 9s+   -EBEE4	EE	EE&c                     t        j                  |d      5  | j                         t        | ||      z   cddd       S # 1 sw Y   yxY w)a  Computes the (Shannon) cross entropy.

  Denote two distributions by `P` (`ref`) and `Q` (`other`). Assuming `P, Q`
  are absolutely continuous with respect to one another and permit densities
  `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as:

  ```none
  H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
  ```

  where `F` denotes the support of the random variable `X ~ P`.

  Args:
    ref: `tfd.Distribution` instance.
    other: `tfd.Distribution` instance.
    allow_nan_stats: Python `bool`, default `True`. When `True`,
      statistics (e.g., mean, mode, variance) use the value "`NaN`" to
      indicate the result is undefined. When `False`, an exception is raised
      if one or more of the statistic's batch members are undefined.
    name: Python `str` prepended to names of ops created by this function.

  Returns:
    cross_entropy: `ref.dtype` `Tensor` with shape `[B1, ..., Bn]`
      representing `n` different calculations of (Shanon) cross entropy.
  cross_entropy)r6   N)r   r-   entropyr   )refotherr6   r'   s       r   r9   r9   y   sD    F ~~dO, 5;;==UO5 55 5 5s   A  A	zdistributions.RegisterKLc                   L    e Zd ZdZ ej
                  ddd      d        Zd Zy)	r
   zDecorator to register a KL divergence implementation function.

  Usage:

  @distributions.RegisterKL(distributions.Normal, distributions.Normal)
  def _kl_normal_mvn(norm_a, norm_b):
    # Return KL(norm_a || norm_b)
  r    r!   Tr"   c                     ||f| _         y)zInitialize the KL registrar.

    Args:
      dist_cls_a: the class of the first argument of the KL divergence.
      dist_cls_b: the class of the second argument of the KL divergence.
    N)_key)self
dist_cls_a
dist_cls_bs      r   __init__zRegisterKL.__init__   s     Z(DIr   c           	      (   t        |      st        d|z        | j                  t        v rSt	        d| j                  d   j
                  d| j                  d   j
                  dt        | j                           |t        | j                  <   |S )a*  Perform the KL registration.

    Args:
      kl_fn: The function to use for the KL divergence.

    Returns:
      kl_fn

    Raises:
      TypeError: if kl_fn is not a callable.
      ValueError: if a KL divergence function has already been registered for
        the given argument classes.
    z$kl_fn must be callable, received: %szKL(r   z ||    z") has already been registered to: )callable	TypeErrorr?   r   
ValueErrorr,   )r@   r   s     r   __call__zRegisterKL.__call__   s|     E?<uDEEyyL ))A,//11F1F&tyy13 4 4 $LLr   N)r,   
__module____qualname____doc__r   
deprecatedrC   rI    r   r   r
   r
      s:     ;'
 ))r   )TN)rL   tensorflow.python.frameworkr   tensorflow.python.opsr   r   r   tensorflow.python.utilr   r    tensorflow.python.util.tf_exportr	   r   __all__r   rM   r   r9   r
   rN   r   r   <module>rT      s    < + + 5 * . - 6    %
  ,-.-1:9 /:9z %
  .255@ )*+0 0 ,0r   