
    AVh6                        d Z ddlZddlmZ ddlmZ ddlmZ	 ddl
mZ ddl
mZ ddl
mZ dd	l
mZ dd
l
mZ ddlmZ ddlmZ ddlmZ   edg      e       e	j,                  Ze	j.                  Z ed      d"d       Z edg      d        Z edg      d        Z ed      d        Z ed      d        Z ed      d        Z ed      d        Z ed      d        Z ed       d!        Z y)#zTesting.    N)mock)	test_util)
googletest)assert_equal_graph_def)create_local_cluster)TensorFlowTestCase)gpu_device_name)is_gpu_available)compute_gradient_error)compute_gradient)	tf_exportz	test.mock)v1z	test.mainc                 T    t        j                          t        j                  |       S )zRuns all unit tests.)
_test_utilInstallStackTraceHandler_googletestmain)argvs    O/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/platform/test.pyr   r   1   s!     %%'			$	    ztest.get_temp_dirc                  *    t        j                         S )a  Returns a temporary directory for use during tests.

  There is no need to delete the directory after the test.

  @compatibility(TF2)
  This function is removed in TF2. Please use `TestCase.get_temp_dir` instead
  in a test case.
  Outside of a unit test, obtain a temporary directory through Python's
  `tempfile` module.
  @end_compatibility

  Returns:
    The temporary directory.
  )r   
GetTempDir r   r   get_temp_dirr   8   s      
			!!r   ztest.test_src_dir_pathc                 ,    t        j                  |       S )zCreates an absolute test srcdir path given a relative path.

  Args:
    relative_path: a path relative to tensorflow root.
      e.g. "core/platform".

  Returns:
    An absolute path to the linked in runfiles.
  )r   test_src_dir_path)relative_paths    r   r   r   K   s     
	&	&}	55r   ztest.is_built_with_cudac                  *    t        j                         S )a%  Returns whether TensorFlow was built with CUDA (GPU) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with CUDA (GPU).

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_cuda():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is built with CUDA.
  )r   IsGoogleCudaEnabledr   r   r   is_built_with_cudar    Y   s    $ 
	'	'	))r   ztest.is_built_with_rocmc                  *    t        j                         S )a)  Returns whether TensorFlow was built with ROCm (GPU) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with ROCm (GPU).

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_rocm():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is NOT built with ROCm.
  )r   IsBuiltWithROCmr   r   r   is_built_with_rocmr#   n   s    $ 
	#	#	%%r   ztest.disable_with_predicatec                       fd}|S )z"Disables the test if pred is true.c                 H     t        j                          fd       }|S )Nc                 N            r| j                         y  | g|i |S N)skipTest)selfargskwargsfuncpredskip_messages      r   wrapper_disable_with_predicatezhdisable_with_predicate.<locals>.decorator_disable_with_predicate.<locals>.wrapper_disable_with_predicate   s)    	l#D*4*6**r   )	functoolswraps)r,   r/   r-   r.   s   ` r    decorator_disable_with_predicatez@disable_with_predicate.<locals>.decorator_disable_with_predicate   s&    __T+ + *)r   r   )r-   r.   r2   s   `` r   disable_with_predicater3      s    	* 
*)r   ztest.is_built_with_gpu_supportc                  .    t               xs
 t               S )a9  Returns whether TensorFlow was built with GPU (CUDA or ROCm) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with GPU.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_gpu_support():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is built with CUDA GPU support.
  )r    r#   r   r   r   is_built_with_gpu_supportr5      s    $ 
		5!3!55r   ztest.is_built_with_xlac                  *    t        j                         S )a`  Returns whether TensorFlow was built with XLA support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with XLA.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_xla(self):
  ...     if not tf.test.is_built_with_xla():
  ...       self.skipTest("test is only applicable on XLA")

  ...     @tf.function(jit_compile=True)
  ...     def add(x, y):
  ...       return tf.math.add(x, y)
  ...
  ...     self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)

  TensorFlow official binary is built with XLA.
  )r   IsBuiltWithXLAr   r   r   is_built_with_xlar8      s    * 
	"	"	$$r   ztest.is_cpu_target_availablec                 ,    t        j                  |       S )a  Indicates whether TensorFlow was built with support for a given CPU target.

  Args:
    target: The name of the CPU target whose support to check for.

  Returns:
    A boolean indicating whether TensorFlow was built with support for the
    given CPU target.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with a given target.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_aarch64(self):
  ...     if not tf.test.is_cpu_target_available('aarch64'):
  ...       self.skipTest("test is only applicable on AArch64")

  ...     @tf.function(jit_compile=True)
  ...     def add(x, y):
  ...       return tf.math.add(x, y)
  ...
  ...     self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)
  )r   IsCPUTargetAvailable)targets    r   is_cpu_target_availabler<      s    4 
	(	(	00r   r'   )!__doc__r0   unittestr   tensorflow.python.frameworkr   r   tensorflow.python.platformr   r   %tensorflow.python.framework.test_utilr   r   r   TestCaser	   r
   &tensorflow.python.ops.gradient_checkerr   r    tensorflow.python.util.tf_exportr   	BenchmarkStubOutForTestingr   r   r   r    r#   r3   r5   r8   r<   r   r   r   <module>rG      sQ       @ @ I F P A B I C 7 	k] D ! !!	  11  ;    "#$" %"$ '()
6 *
6 $%* &*( $%& &&( ()* **" +,6 -6( #$% %%. )*1 +1r   