a
    /Sicz                 	   @   sB  d Z ddlZddlZddlZddlZddlZddlZddlZddlm	Z	 ddl
ZddlZddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddl m!Z! ddl m"Z" ddl m#Z# ddl$m%Z% ddl&m'Z' dZ(dZ)e* r:e)ne(Z+i a,ej-.dZ/dZ0ej1Z2ej3Z4ej5Z6ej7Z8dZ9dZ:dZ;e<ddZ=ej*Z>e?dddka@e?ddkaAdd ZBd d! ZCd"d# ZDd$d% ZEd&d' ZFd(d) ZGe'd*g d+e> G d,d- d-eHZIG d.d/ d/eHZJi ZKG d0d1 d1ejLZMeNd2g d3ZOG d4d5 d5ejLZPe'd6G d7d8 d8eNd8d9d:gZQe'd;d<G d=d> d>eNd>g d?ZRe'd@G dAdB dBeNdBd9d:gZSG dCdD dDeHZTeT ZUG dEdF dFeHZVG dGdH dHeHZWG dIdJ dJeHZXdaYeZ Z[dKdL Z\dMdN Z]dOdP Z^dQdR Z_dSdT Z`dUdV ZadWdX ZbdYdZ Zcd[d\ Zdd]d^ Zed_d` Zfdadb Zge'dcg d+dddc Zhe'dcgd+dedf Zidgdh Zjdidj Zkdkdl Zle'dmg d+dndo Zmdpdq Zndrds Ze'dtg d+dudv Zoe'dwg d+dxdy Zpe'dzg d+d{d| Zqe'd}g d+d~d Zre'ddd Zse'ddd Zte#judd Zvdd Zwe#judd Zxe#judd Zye'de#judd Zzdd Z{dd Z|dd Z}dd Z~dd Zdd Zdd ZejudddZdd Zdd Zdd Zdd Ze'de#judd Zdd Ze'ddd Zdd Zdd Zdd Zdd Zdd Zee"_dS )z%State management for eager execution.    N)logging)function_pb2)
config_pb2)coordination_config_pb2)rewriter_config_pb2)
pywrap_tfe)tf2)pywrap_tf_session)executor)
monitoring)
c_api_util)device)
tfrt_utils)compat)is_in_graph_mode)tf_contextlib)
deprecated)	tf_export    iiX  z3/tensorflow/api/python/eager_context_create_counterz7Counter for number of eager contexts created in Python.ZTF_RUN_EAGER_OP_AS_FUNCTION1ZTF_JIT_COMPILE_REWRITEc                   C   s   da t durdt _dS )ad  Execute elementary eager ops (non-function) wrapped in a call op.

  This should be functionally equivalent to running the eager op's kernel
  directly (the default) but reduces the number of codepaths for executing
  TF2 programs in the runtime, thereby improving consistency (in terms of
  optimizations and rewrites for instance) and maintainability.
  TN!_RUN_EAGER_OP_AS_FUNCTION_ENABLEDcontext_saferun_eager_op_as_function r   r   [/var/www/html/django/DPS/env/lib/python3.9/site-packages/tensorflow/python/eager/context.pyenable_run_eager_op_as_functionZ   s    	
r   c                   C   s   da t d urdt _d S NFr   r   r   r   r    disable_run_eager_op_as_functioni   s    
r   c                   C   s   t  d urt  jS tS N)r   r   r   r   r   r   r    run_eager_op_as_function_enabledp   s    
r!   c                   C   s   da t durdt _dS )zRun jit_compile functions through rewrite pass.

  This runs jit_compile functions through all of the multidevice function
  rewrite passes.
  TN_JIT_COMPILE_REWRITE_ENABLEDr   jit_compile_rewriter   r   r   r   enable_jit_compile_rewritew   s    
r%   c                   C   s   da t d urdt _d S r   r"   r   r   r   r   disable_jit_compile_rewrite   s    
r&   c                   C   s   t  d urt  jS tS r    )r   r$   r#   r   r   r   r   jit_compile_rewrite_enabled   s    
r'   z__internal__.is_tfrt_enabled)v1c                   @   s:   e Zd ZdZg dZdddZdd Zd	d
 Zdd ZdS )_EagerTensorCachezASimple cache which evicts items based on length in a FIFO manner.)_data
_max_items_max_tensor_size   '  c                 C   s   t  | _|| _|| _d S r    )collectionsOrderedDictr*   r+   r,   )selfZ	max_itemsmax_tensor_sizer   r   r   __init__   s    
z_EagerTensorCache.__init__c                 C   s>   |  | jkrd S || j|< t| j| jkr:| jjdd d S )NF)last)Z_num_elementsr,   r*   lenr+   popitemr1   keyvaluer   r   r   put   s
    
z_EagerTensorCache.putc                 C   s   | j |d S r    )r*   getr1   r8   r   r   r   r;      s    z_EagerTensorCache.getc                 C   s   | j   d S r    )r*   clearr1   r   r   r   flush   s    z_EagerTensorCache.flushN)r-   r.   )	__name__
__module____qualname____doc__	__slots__r3   r:   r;   r?   r   r   r   r   r)      s   
	r)   c                   @   sV   e Zd ZdZddgZdddZedd Zejd	d Zed
d Z	e	jdd Z	dS )FunctionCallOptionsz|Options applied at call sites of eager functions.

  Eager functions are functions decorated with tf.contrib.eager.defun.
  _config_proto_serialized_executor_typeNc                 C   s   || _ || _dS )aR  Constructor.

    Args:
      executor_type: (optional) name of the executor to be used to execute the
        eager function. If None or an empty string, the default Tensorflow
        executor will be used.
      config_proto: (optional) a `config_pb2.ConfigProto` proto or a serialized
        string of that proto. The config used by Grappler when optimizing the
        function graph. Each concrete function is optimized the first time is
        called. Changing config_proto after the first call has no effect. If
        config_proto is None, an empty RewriterConfig will be used.
    N)config_proto_serializedexecutor_type)r1   rI   config_protor   r   r   r3      s    zFunctionCallOptions.__init__c                 C   s   | j S r    rG   r>   r   r   r   rI      s    z!FunctionCallOptions.executor_typec                 C   s
   || _ d S r    rK   )r1   rI   r   r   r   rI      s    c                 C   s   | j S r    )rF   r>   r   r   r   rH      s    z+FunctionCallOptions.config_proto_serializedc                 C   s\   t |tjr|jdd| _n<t |tr.|| _n*|d u rFt  | _ntdt|d S )NT)deterministiczrthe rewriter config must be either a config_pb2.ConfigProto, or a serialized string of that proto or None. got: {})	
isinstancer   ConfigProtoSerializeToStringrF   str
ValueErrorformattyper1   configr   r   r   rH      s    


)NN)
r@   rA   rB   rC   rD   r3   propertyrI   setterrH   r   r   r   r   rE      s   



rE   c                       s@   e Zd ZdZddgZ fddZedd Zedd	 Z  Z	S )
_TensorCacheszThread local tensor caches._ones_rank_cache_zeros_cachec                    s   t t|   d | _d | _d S r    )superrX   r3   rY   rZ   r>   	__class__r   r   r3      s    z_TensorCaches.__init__c                 C   s   | j st | _ | j S r    )rY   r)   r>   r   r   r   ones_rank_cache   s    z_TensorCaches.ones_rank_cachec                 C   s   | j st | _ | j S r    )rZ   r)   r>   r   r   r   zeros_cache   s    z_TensorCaches.zeros_cache)
r@   rA   rB   rC   rD   r3   rV   r^   r_   __classcell__r   r   r\   r   rX      s   
rX   ContextSwitchis_building_functionenter_context_fndevice_stackc                       s0   e Zd ZdZ fddZdd Zdd Z  ZS )_ContextSwitchStackz)A thread-local stack of context switches.c                    s,   t t|   g | _|r(| jdtd d d S )NFrb   )r[   rf   r3   stackpush
eager_mode)r1   eagerr\   r   r   r3   
  s    z_ContextSwitchStack.__init__c                 C   s   | j t||| dS )a  Push metadata about a context switch onto the stack.

    A context switch can take any one of the two forms: installing a graph as
    the default graph, or entering the eager context. For each context switch,
    we record whether or not the entered context is building a function.

    Args:
      is_building_function: (bool.) Whether the context is building a function.
      enter_context_fn: (function.) A callable that executes the context switch.
        For example, `graph.as_default` or `eager_mode`.
      device_stack: If applicable, the device function stack for this graph.
        When breaking out of graphs in init_scope, the innermost nonempty device
        stack is used. Eager contexts put `None` here and the value is never
        used.
    N)rg   appendra   )r1   rc   rd   re   r   r   r   rh     s    
z_ContextSwitchStack.pushc                 C   s   | j   dS )zPop the stack.N)rg   popr>   r   r   r   rl   ,  s    z_ContextSwitchStack.pop)r@   rA   rB   rC   r3   rh   rl   r`   r   r   r\   r   rf     s   rf   zconfig.LogicalDevicec                   @   s   e Zd ZdZdS )LogicalDevicea  Abstraction for a logical device initialized by the runtime.

  A `tf.config.LogicalDevice` corresponds to an initialized logical device on a
  `tf.config.PhysicalDevice` or a remote device visible to the cluster. Tensors
  and operations can be placed on a specific logical device by calling
  `tf.device` with a specified `tf.config.LogicalDevice`.

  Fields:
    name: The fully qualified name of the device. Can be used for Op or function
      placement.
    device_type: String declaring the type of device such as "CPU" or "GPU".
  Nr@   rA   rB   rC   r   r   r   r   rm   2  s   rm   namedevice_typez!config.LogicalDeviceConfigurationz.config.experimental.VirtualDeviceConfigurationc                       s"   e Zd ZdZd fdd	Z  ZS )LogicalDeviceConfigurationaV  Configuration class for a logical devices.

  The class specifies the parameters to configure a `tf.config.PhysicalDevice`
  as it is initialized to a `tf.config.LogicalDevice` during runtime
  initialization. Not all fields are valid for all device types.

  See `tf.config.get_logical_device_configuration` and
  `tf.config.set_logical_device_configuration` for usage examples.

  Fields:
    memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual
      device. Currently only supported for GPUs.
    experimental_priority: (optional) Priority to assign to a virtual device.
      Lower values have higher priorities and 0 is the default.
      Within a physical GPU, the GPU scheduler will prioritize ops on virtual
      devices with higher priority. Currently only supported for Nvidia GPUs.
    experimental_device_ordinal: (optional) Ordinal number to order the virtual
    device.
      LogicalDevice with lower ordinal number will receive a lower device id.
      Physical device id and location in the list is used to break ties.
      Currently only supported for Nvidia GPUs.
  Nr   c                    s   t t| | |||S r    )r[   rq   __new__)clsmemory_limitexperimental_priorityexperimental_device_ordinalr\   r   r   rr   a  s    z"LogicalDeviceConfiguration.__new__)NNr   )r@   rA   rB   rC   rr   r`   r   r   r\   r   rq   D  s
      rq   )rt   ru   rv   zconfig.PhysicalDevicec                   @   s   e Zd ZdZdS )PhysicalDevicear  Abstraction for a locally visible physical device.

  TensorFlow can utilize various devices such as the CPU or multiple GPUs
  for computation. Before initializing a local device for use, the user can
  customize certain properties of the device such as it's visibility or memory
  configuration.

  Once a visible `tf.config.PhysicalDevice` is initialized one or more
  `tf.config.LogicalDevice` objects are created. Use
  `tf.config.set_visible_devices` to configure the visibility of a physical
  device and `tf.config.set_logical_device_configuration` to configure multiple
  `tf.config.LogicalDevice` objects for a `tf.config.PhysicalDevice`. This is
  useful when separation between models is needed or to simulate a multi-device
  environment.

  Fields:
    name: Unique identifier for device.
    device_type: String declaring the type of device such as "CPU" or "GPU".
  Nrn   r   r   r   r   rw   j  s   rw   c                   @   s(   e Zd ZdZddgZdd Zdd ZdS )	_AtomicCounterzA simple atomic counter._value_lockc                 C   s   d| _ t | _d S Nr   )ry   	threadingLockrz   r>   r   r   r   r3     s    z_AtomicCounter.__init__c                 C   s>   | j $ |  jd7  _| jW  d    S 1 s00    Y  d S )Nr   )rz   ry   r>   r   r   r   increment_and_get  s    z _AtomicCounter.increment_and_getN)r@   rA   rB   rC   rD   r3   r~   r   r   r   r   rx     s   rx   c                   @   s&   e Zd ZdZdgZdd Zdd ZdS )_TensorCacheDeleterz*Deletes tensor caches for a given context._context_idc                 C   s
   || _ d S r    )r   )r1   
context_idr   r   r   r3     s    z_TensorCacheDeleter.__init__c                 C   s"   t d u rd S | jt v rt | j= d S r    )_tensor_caches_mapr   r>   r   r   r   __del__  s    
z_TensorCacheDeleter.__del__N)r@   rA   rB   rC   rD   r3   r   r   r   r   r   r     s   r   c                   @   s  e Zd ZdZdddZdd Zdd Zd	d
 Zdd Zdd Z	dd Z
dd ZefddZefddZdd Zdd Zdd Zdd d!Zed"d# Zd$d% Zd&d' Zd(d) Zd*d+ Zd,d- Zd.d/ Zdd2d3Zd4d5 Zd6d7 Zed8d9 Zed:d; Zd<d= Z e!j"d>d? Z#d@dA Z$dBdC Z%dDdE Z&edFdG Z'e'j(dHdG Z'edIdJ Z)edKdL Z*dMdN Z+dOdP Z,dQdR Z-dSdT Z.edUdV Z/e/j(dWdV Z/dXdY Z0edZd[ Z1e1j(d\d[ Z1ed]d^ Z2d_d` Z3edadb Z4e4j(dcdb Z4ddde Z5dfdg Z6dhdi Z7djdk Z8dldm Z9dndo Z:dpdq Z;drds Z<dtdu Z=dvdw Z>dxdy Z?edzd{ Z@ed|d} ZAeAj(d~d} ZAdddZBdd ZCdddZDdd ZEdd ZFdddZGdddZHdddZIdd ZJdd ZKdd ZLdd ZMdd ZNdd ZOdddZPdddZQeRdddddd ZSedd ZTedd ZUeTj(dd ZTeUj(dd ZUedd ZVeVj(dd ZVdd ZWdd ZXedd ZYeYj(dd ZYedd ZZeZj(dd ZZedd Z[e[j(dd Z[edd Z\e\j(dd Z\edd Z]e]j(dd Z]edd Z^e^j(dd Z^eddĄ Z_e_j(ddĄ Z_eddǄ Z`e`j(ddǄ Z`eddʄ Zaeaj(ddʄ Zaedd̈́ Zbebj(dd̈́ ZbddЄ Zcdd҄ ZdddԄ Zeddք Zfdd؄ Zgeddڄ ZhdS )Contextz.Environment in which eager operations execute.Nc                 C   st  t  | _t| j| _t t| j< || _tj	| dd t
d| _t|  | _d| _d| _d| _t | _d| _|du rzt}|| _d| _|dttfvrtd| |du rt}|tk| _t | _d| _t  | _!t" | _#|| _$d| _%d| _&d| _'d| _(d| _)d| _*t | _+d| _,d| _-d| _.g | _/d| _0i | _1d| _2d| _3d| _4d| _5d| _6d| _7d| _8i | _9t:; <d d| _=dS )a  Creates a new Context.

    Args:
      config: (Optional.) A `ConfigProto` protocol buffer with configuration
        options for the Context. Note that a lot of these options may be
        currently unimplemented or irrelevant when eager execution is enabled.
      device_policy: (Optional.) What policy to use when trying to run an
        operation on a device with inputs which are not on that device. When set
        to None, an appropriate value will be picked automatically. The value
        picked may change between TensorFlow releases.  Defaults to
        DEVICE_PLACEMENT_SILENT.
        Valid values:
        - DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not
          correct.
        - DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right
          device but raises a warning.
        - DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might hide
          performance problems.
        - DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors,
          raising errors on the other ones.
      execution_mode: (Optional.) Policy controlling how operations dispatched
        are actually executed. When set to None, an appropriate value will be
        picked automatically. The value picked may change between TensorFlow
        releases.
        Valid values:
        - SYNC: executes each operation synchronously.
        - ASYNC: executes each operation asynchronously. These operations may
          return "non-ready" handles.
      server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution
        on remote devices. GrpcServers need to be started by creating an
        identical server_def to this, and setting the appropriate task_indexes,
        so that the servers can communicate. It will then be possible to execute
        operations on remote devices.

    Raises:
     ValueError: If execution_mode is not valid.
    c                   S   s   t tkS r    )default_execution_mode
EAGER_MODEr   r   r   r   <lambda>      z"Context.__init__.<locals>.<lambda>)is_eagerdevice_specNFz0execution_mode should be None/SYNC/ASYNC. Got %sr   )>_context_id_counterr~   _idr   Z_tensor_cache_deleterrX   r   _configr   ZEagerContextThreadLocalData_starting_device_spec_thread_local_datarf   executing_eagerly_context_switches_context_handle_context_devices_seedr|   r}   _initialize_lock_initializedDEVICE_PLACEMENT_SILENT_device_policy_mirroring_policySYNCASYNCrQ   _default_is_asyncis_tfrt_enabled	_use_tfrt_use_tfrt_distributed_runtimer!   _run_eager_op_as_functionr'   _jit_compile_rewrite_server_def_collective_ops_server_def_collective_leader(_collective_scoped_allocator_enabled_ops"_collective_use_nccl_communication_collective_device_filters_coordination_service_config_device_lock_physical_devices_physical_device_to_index_pluggable_devices_visible_device_list_memory_growth_map_virtual_device_map_optimizer_jit_intra_op_parallelism_threads_inter_op_parallelism_threads_soft_device_placement_log_device_placement_operation_timeout_in_ms_enable_mlir_graph_optimization_optimizer_experimental_options$_python_eager_context_create_counterget_cellZincrease_by_is_global_context)r1   rU   device_policyexecution_mode
server_defr   r   r   r3     sl    ,



zContext.__init__c                 C   sZ   || _ zt| W n  ty2   tt|}Y n0 t|| _| j	durVt
| j	 dS )z,Set a global eager mode seed for random ops.N)r   hash	TypeErrorintnparrayrandomRandom_rngr   r   TFE_ContextClearCaches)r1   seedr   r   r   _set_global_seed  s    
zContext._set_global_seedc                 C   s   | j dtS )a2  Returns a fake operation seed.

      In eager mode, user shouldn't set or depend on operation seed.
      Here, we generate a random seed based on global seed to make
      operation's randomness different and depend on the global seed.

    Returns:
      A fake operation seed based on global seed.
    r   )r   randint	_MAXINT32r>   r   r   r   _internal_operation_seed#  s    
z Context._internal_operation_seedc              	   C   s   g }g }t | j}zd| _d\}}| jp.| j}|durF|j|j }}tt 	|D ]}t 
||}|t| tj|}	|	jdkr|	jdddd}	|t|	 |	jd t ||}
|
dkrT|	j|krT|	j|krT|  jd7  _qTW || _|| _t | n|| _|| _t | 0 dS )	zHelper to initialize devices.r   )NNN	localhost)jobreplicataskro   rp   GPUr   )r   ZTFE_ContextListDevicesr   	_num_gpusr   r   job_name
task_indexrangeZTF_DeviceListCountZTF_DeviceListNamerk   pydevcanonical_name
DeviceSpecfrom_stringr   replacerm   	to_stringrp   ZTF_DeviceListTyper   _logical_devicesr   ZTF_DeleteDeviceList)r1   Zlogical_devicesZcontext_devicesZdevice_listZcurrent_jobcurrent_taskr   iZdev_namespecZdev_typer   r   r   _initialize_logical_devices/  s:    
z#Context._initialize_logical_devicesc              
   C   s  | j r
dS | j | j r*W d   dS | jdu s8J t }z| j }t|| | jdurpt	|| j | j
durt|| j
 | jtkrt|d | jdurt|| j | jdur| jdurt|| j t|| j t|| j t|}W t| nt| 0 | jr0| jr0J d| jdurV| j }t|t| n"| jdurx| j }t|| || _|   d| _ | j rt!| j W d   n1 s0    Y  dS )z5Initialize handle and devices if not already done so.NTzzCannot enable remote execution as well as collective ops at the moment. If this is important to you, please file an issue.)"r   r   r   r   ZTFE_NewContextOptionsrU   rO   ZTFE_ContextOptionsSetConfigr   Z*TFE_ContextOptionsSetDevicePlacementPolicyr   Z$TFE_ContextOptionsSetMirroringPolicyr   r   ZTFE_ContextOptionsSetAsyncr   ZTFE_ContextOptionsSetTfrtr   Z+TFE_ContextOptionsSetTfrtDistributedRuntimeZ)TFE_ContextOptionsSetRunEagerOpAsFunctionr   Z&TFE_ContextOptionsSetJitCompileRewriter   ZTFE_NewContextZTFE_DeleteContextOptionsr   r   TFE_ContextSetServerDef_KEEP_ALIVE_SECSTFE_EnableCollectiveOpsr   r   r   TFE_Py_SetCEagerContext)r1   optsZ
config_strZcontext_handleserver_def_strr   r   r   ensure_initializedO  sh    








zContext.ensure_initializedc                 C   s   | j rt| j d| _d S NT)r   r   r   r   r   r>   r   r   r   mark_as_global_context  s    zContext.mark_as_global_contextc                 C   s$   |     |    t  d S r    )r^   r?   r_   r   TFE_ClearScalarCacher>   r   r   r   _clear_caches  s    zContext._clear_cachesc                 C   s   | j S r    )r   r>   r   r   r   get_server_def  s    zContext.get_server_defc                 C   sD   |st d|| _| jr8| }t| j|| |   |   dS )a  Allow setting a server_def on the context.

    When a server def is replaced, it effectively clears a bunch of caches
    within the context. If you attempt to use a tensor object that was pointing
    to a tensor on the remote device, it will raise an error.

    Args:
      server_def: A tensorflow::ServerDef proto. Enables execution on remote
        devices.
      keep_alive_secs: Num. seconds after which the remote end will hang up. As
        long as the client is still alive, the server state for the context will
        be kept alive. If the client is killed (or there is some failure), the
        server will clean up its context keep_alive_secs after the final RPC it
        receives.

    Raises:
      ValueError: if server_def is None.
    server_def is None.N)rQ   r   r   rO   r   r   r   r   r1   r   Zkeep_alive_secsr   r   r   r   set_server_def  s    
zContext.set_server_defc                 C   sD   |st d|| _| jr8| }t| j|| |   |   dS )a  Update a server_def on the context.

    Args:
      server_def: A tensorflow::ServerDef proto. Enables execution on remote
        devices.
      keep_alive_secs: Num. seconds after which the remote end will hang up. As
        long as the client is still alive, the server state for the context will
        be kept alive. If the client is killed (or there is some failure), the
        server will clean up its context keep_alive_secs after the final RPC it
        receives.

    Raises:
      ValueError: if server_def is None.
    r   N)rQ   r   r   rO   r   ZTFE_ContextUpdateServerDefr   r   r   r   r   r   update_server_def  s    zContext.update_server_defc                 C   s    | j rt| j |S tddS )aZ  Checks whether a remote worker is alive or not.

    Args:
      worker_name: a string representing the remote worker. It must be a fully
      specified name like "/job:worker/replica:0/task:0".

    Returns:
      a boolean indicating whether the remote worker is alive or not.

    Raises:
      ValueError: if context is not initialized.
    Context is not initialized.N)r   r   ZTFE_ContextCheckAliverQ   )r1   worker_namer   r   r   check_alive  s    zContext.check_alivec                 C   s    | j rt| j  ntddS )a  Sync both local executors and the ones on remote workers.

    In async execution mode, local function calls can return before the
    corresponding remote op/function execution requests are completed. Calling
    this method creates a synchronization barrier for remote executors. It only
    returns when all remote pending nodes are finished, potentially with errors
    if any remote executors are in error state.

    Raises:
      ValueError: if context is not initialized.
    r   N)r   r   ZTFE_ContextSyncExecutorsrQ   r>   r   r   r   sync_executors  s    zContext.sync_executorsc                 C   s    | j rt| j  ntddS )a  Clear errors in both local executors and remote workers.

    After receiving errors from remote workers, additional requests on the fly
    could further taint the status on the remote workers due to the async nature
    of remote execution. Calling this method block on waiting for all pending
    nodes in remote executors to finish and clear their error statuses.

    Raises:
      ValueError: if context is not initialized.
    r   N)r   r   ZTFE_ContextClearExecutorsrQ   r>   r   r   r   clear_executor_errors  s    zContext.clear_executor_errorsr   Tr   c                 C   sx   | j rtd t }||_|r.t||_||_	||_
||_|durnt|tr`|j| ntd|f || _dS )z?Enable distributed coordination service with specified configs.zfConfiguring coordination service type may not be effective because the context is already initialized.NzC`coordinated_jobs` must be a list of job names or None, but got: %s)r   r   warningr   ZCoordinationServiceConfigservice_typer   r   service_leaderenable_health_checkcluster_register_timeout_in_msheartbeat_timeout_in_msrM   listcoordinated_jobsextendrQ   r   )r1   r   r   r   r   r   r   rU   r   r   r   configure_coordination_service  s     

z&Context.configure_coordination_servicec                 C   s   | j S r    )r   r>   r   r   r   coordination_service  s    zContext.coordination_servicec                 C   s   t   t| j|| d S r    )r   r   ZTFE_InsertConfigKeyValuer   r7   r   r   r   set_config_key_value!  s    zContext.set_config_key_valuec                 C   sR   t   t 0}t| j|| t|d}W d    n1 sD0    Y  |S Nzutf-8)	r   r   	tf_bufferr   ZTFE_GetConfigKeyValuer   r	   TF_GetBufferdecode)r1   r8   buffer_r9   r   r   r   get_config_key_value%  s
    
.zContext.get_config_key_valuec                 C   s   t   t| j| d S r    )r   r   ZTFE_DeleteConfigKeyValuer   r<   r   r   r   delete_config_key_value,  s    zContext.delete_config_key_valuec                 C   s$   | j rt| j || ntddS )zReport error to other members in a multi-client cluster.

    Args:
      error_code: a `tf.errors` error code.
      error_message: a string. The error message.
    r   N)r   r   ZTFE_ReportErrorToClusterrQ   )r1   
error_codeerror_messager   r   r   report_error_to_cluster0  s
    
zContext.report_error_to_clusterc                 C   s   | j durt| j  dS )z2Clear kernel cache and reset all stateful kernels.N)r   r   r   r>   r   r   r   clear_kernel_cache=  s    
zContext.clear_kernel_cachec                 C   s   |st d|| _| jdur~td | jB | js8J | j }t	| j| | 
  |   W d   n1 st0    Y  dS )a*  Enable distributed collective ops with an appropriate server_def.

    Args:
      server_def: A tensorflow::ServerDef proto. Enables execution on remote
        devices.

    Raises:
      ValueError: if server_def is None.
      RuntimeError: if this method is not called at program startup.
    r   NzhEnabling collective ops after program startup may cause error when accessing previously created tensors.)rQ   r   r   r   r   r   r   rO   r   r   r   r   )r1   r   r   r   r   r   enable_collective_opsB  s    



zContext.enable_collective_opsZCollectiveReduceFc                 C   sn   | j dur@| j |ks2| j|ks2| j|ks2| j|kr<tdndS | jdurRtd|| _ || _|| _|| _dS )a  Configure collective ops.

      Collective group leader is necessary for collective ops to run, other
      configurations are mainly for the purpose of performance.

    Args:
      collective_leader: a device string for collective leader, e.g.
        "/job:worker/replica:0/task:0"; empty string means local execution of
          collective ops.
      scoped_allocator_enabled_ops: a tuple or a list of op names for scoped
        allocator to run with.
      use_nccl_communication: whether to use nccl communication for collective
        ops.
      device_filters: a tuple or a list of device strings. If set, corresponding
        task can only see the devices filtered by these device filters.

    Raises:
      RuntimeError: if this method is not called at program startup.
    Nz&Collective ops are already configured.z4Collective ops must be configured at program startup)r   r   r   r   rQ   r   RuntimeError)r1   Zcollective_leaderZscoped_allocator_enabled_opsZuse_nccl_communicationdevice_filtersr   r   r   configure_collective_ops^  s$    



z Context.configure_collective_opsc                 C   s   |    t| j|| dS )a  Abort the collective ops.

    This is intended to be used when a peer failure is detected, which allows
    the user to handle the case instead of hanging. This aborts all on-going
    collectives. After all subsequent collectives error immediately, and you
    need to reset_context() to use collectives again.

    Args:
      code: a `tf.errors` error code.
      message: a string. The error message.
    N)r   r   ZTFE_AbortCollectiveOps_handle)r1   codemessager   r   r   abort_collective_ops  s    zContext.abort_collective_opsc                 C   s   |    t| j|| dS )a  Check collective peer health.

    This probes each task to see if they're still alive. Note that restarted
    tasks are considered a different one, and they're considered not healthy.

    This should only be used in multi client multi worker training.

    Args:
      task: a task string, must be in the format of /job:xxx/replica:0/task:N.
      timeout_in_ms: an integer, the timeout. If zero, there's no timeout.

    Raises:
      tf.errors.UnavailableError: when a peer is down.
      tf.errors.FailedPreconditionError: when a peer is a different one from the
        one this task has talked to, e.g. the peer has restarted.
      tf.errors.InvalidArgumentError: when the task string is invalid.
    N)r   r   Z TFE_CollectiveOpsCheckPeerHealthr  )r1   r   timeout_in_msr   r   r    check_collective_ops_peer_health  s    
z(Context.check_collective_ops_peer_healthc                 C   s   | j d u rtd| j S Nz"Context must be initialized first.)r   AssertionErrorr>   r   r   r   r    s    
zContext._handlec                 C   s   | j d u rtd| j S r  )r   r  r>   r   r   r   _devices  s    
zContext._devicesc                 C   sT   | j d u rdS | j}dt| g}t|D ]\}}|d||f  q*d|S d S )Nz:Eager TensorFlow Context. Devices currently uninitialized.z(Eager TensorFlow Context with %d devicesz   Device %d: %s
)r   r  r5   	enumeraterk   join)r1   deviceslinesr   dr   r   r   __str__  s    
zContext.__str__c                 c   sp   | j }|j}|tk|_|tkr.| jdtd z"dV  W ||_|tkrl| j  n||_|tkrj| j  0 dS )z;A context manager to allow setting the mode to EAGER/GRAPH.FN)r   r   r   context_switchesrh   ri   rl   )r1   modectxZold_is_eagerr   r   r   _mode  s    
zContext._modec                 C   s   | j jS )z;Returns True if current thread has eager executing enabled.)r   r   r>   r   r   r   r     s    zContext.executing_eagerlyc                 C   s   t | j jS zPer-device cache for scalars.)r   r   r^   r>   r   r   r   r^     s    zContext.ones_rank_cachec                 C   s   t | j jS r&  )r   r   r_   r>   r   r   r   r_     s    zContext.zeros_cachec                 C   s   | j jS )z*Returns scope name for the current thread.r   
scope_namer>   r   r   r   r(    s    zContext.scope_namec                 C   s   || j _dS )z'Sets scope name for the current thread.Nr'  )r1   sr   r   r   r(    s    c                 C   s   | j jS )z/Returns the device name for the current thread.)r   device_namer>   r   r   r   r*    s    zContext.device_namec                 C   s   | j jS )z/Returns the device spec for the current thread.)r   r   r>   r   r   r   r     s    zContext.device_specc                 C   s   || j _|| j _d S r    )r   r*  r   )r1   r*  r   r   r   r   _set_device  s    zContext._set_devicec                 C   s.   t |tr|j}nt|r$| }t| |S )ao  Context-manager to force placement of operations and Tensors on a device.

    Args:
      name: Name of the device or None to get default placement.

    Returns:
      Context manager that forces device placement.

    Raises:
      ValueError: If name is not a string or is an invalid device name.
      RuntimeError: If device scopes are not properly nested.
    )rM   rm   ro   r   Zis_device_specr   _EagerDeviceContextr1   ro   r   r   r   r     s
    

zContext.devicec                 C   s   | j S )z=List of the names of devices available to execute operations.)r  r>   r   r   r   r    s    zContext.devicesc                 C   sR   |    t .}t| j| t|d}W d    n1 sD0    Y  |S r  )	r   r   r  r   ZTFE_HostAddressSpacer   r	   r  r  )r1   r  Zaddress_spacer   r   r   host_address_space  s
    
.zContext.host_address_spacec                 C   s   |   rtS tS )z'Gets execution mode for current thread.)is_asyncr   r   r>   r   r   r   r     s    zContext.execution_modec                 C   s~   |dt tfvrtd| |du r&t }|tk}|  |krz| jdurt| j  t|}|| j_t	
| j|  n|| _dS )z'Sets execution mode for current thread.Nz0Execution mode should be None/SYNC/ASYNC. Got %s)r   r   rQ   r/  r   r
   waitnew_executorr   r   TFE_ContextSetExecutorForThreadhandler   )r1   r#  Zenable_asyncexecutor_newr   r   r   r      s     


c                 C   s   | j d ur| j S | jS d S r    )r   r
   r/  r   r>   r   r   r   r/  6  s    

zContext.is_asyncc                 C   s   |    tt| jS r    )r   r
   Executorr   ZTFE_ContextGetExecutorForThreadr   r>   r   r   r   r
   <  s    
zContext.executorc                 C   s   |    t| j|  d S r    )r   r   r2  r   r3  )r1   er   r   r   r
   B  s    c                    s     t  jdur& j jdurLjr>tjjntjj j	j
_jdur^j _jdurpj _jdurj _n
  _jdurj _jdurj _t }| j_|tjjjkrd j_jdurj j_ fdd} fdd}|d |d |d	 |d
 |d |d |d |d |d |d |d |d |d |d |d |d |d |d j !dd}|dur| j	j"_#d j$d< d j$d< j%D ]Z}|j&vrqԈj'!|}|du r j$|j(  d7  < n j$|j(  t)|7  < qԈ* } j+,| j-rVj- j_.j/r j	j"}t0j1j2|_3|j4j5dd= j/D ]}	|j4j56|	 qj7rd j_8j9r؈ j:dd= j9D ]}
 j:6|
 qĈj;r jj<j;  S )z7Return the ConfigProto with all runtime deltas applied.NTc                    s>   j | d }|d u rd S t jj| |r0tjjntjj d S r    )	r   r;   setattrgraph_optionsrewrite_optionsr   RewriterConfigONOFFoptionZtogglerU   r1   r   r   rewriter_toggleo  s    

z'Context.config.<locals>.rewriter_togglec                    s.   j | d }|d u rd S t jj| | d S r    )r   r;   r7  r8  r9  r=  r?  r   r   rewriter_boolx  s    z%Context.config.<locals>.rewriter_boollayout_optimizerconstant_foldingshape_optimization	remappingarithmetic_optimizationdependency_optimizationloop_optimizationfunction_optimizationdebug_stripperdisable_model_pruningscoped_allocator_optimizationpin_to_host_optimizationimplementation_selectorauto_mixed_precisionuse_plugin_optimizersdisable_meta_optimizer$auto_mixed_precision_onednn_bfloat16auto_mixed_precision_mklmin_graph_nodesr   CPUr   r   )=_initialize_physical_devicesr   rN   r   ZCopyFromr   OptimizerOptionsON_1r<  r8  optimizer_optionsglobal_jit_levelr   intra_op_parallelism_threadsr   inter_op_parallelism_threadsr   allow_soft_placementr   r   log_device_placementr   operation_timeout_in_msr   TF_IsMlirBridgeEnabledexperimentalZmlir_bridge_rolloutExperimentalZMLIR_BRIDGE_ROLLOUT_ENABLEDenable_mlir_bridger   enable_mlir_graph_optimizationr   r;   r9  rT  device_countr   r   r   rp   r5   _compute_gpu_optionsgpu_optionsZ	MergeFromr   Zcollective_group_leaderr   r   r:  r;  rL  Zscoped_allocator_optsZ	enable_oprk   r   Zcollective_ncclr   r  r   Zcoordination_config)r1   Zis_mlir_bridge_enabledr@  rA  nodesdevvirtual_devicesrg  r9  opfr   r?  r   rU   G  s    










	








zContext.configc                 C   s`  g }g }d}t  }| d}| j}|}|D ]}||vr*|| q*|D ]}|d7 }|| jvr^qF| j| }	||	 |t| | jrF| j	|g }
g }g }g }|
D ]2}||j
 ||j |jdur||j q|rt|t|krtd|tjjj|||d qF|s<|r<t|dkr2td| }nd}tj|d|tjj|d	d
S )zBuild the GPUOptions proto.r   r   Nz2priority must be specified for all virtual devices)Zmemory_limit_mbpriorityZdevice_ordinalz/Memory growth cannot differ between GPU devices,)rj  )allow_growthvisible_device_listra  )setlist_physical_devicesr   rk   r   r   addrP   r   r;   rv   rt   ru   r5   rQ   r   Z
GPUOptionsrb  ZVirtualDevicesrl   r  )r1   rq  rj  Z	gpu_indexZmemory_growthsZgpu_devicesZpluggable_devicesZcompatible_devicesri  ZgrowthZvdevsZdevice_ordinalsZdevice_limitsrn  Zvirt_devrp  r   r   r   rf    s`    





zContext._compute_gpu_optionsc                 C   s8   | j jdu r0| j}| jdu r"d|_t|d| j _| j jS )zReturns function call options for current thread.

    Note that the returned object is still referenced by the eager context.

    Returns: the FunctionCallOptions for current thread.
    NT)rJ   )r   function_call_optionsrU   r   r]  rE   rT   r   r   r   ru    s    

zContext.function_call_optionsc                 C   s   || j _dS )z1Returns function call options for current thread.N)r   ru  r1   optionsr   r   r   ru    s    c                 C   s   |    | jS )z3The number of GPUs available to execute operations.)r   r   r>   r   r   r   num_gpus  s    zContext.num_gpusc                 C   s   |    t| j| dS )zAdd a function definition to the context.

    Once added, the function (identified by its name) can be executed like any
    other operation.

    Args:
      fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
    N)r   r   ZTFE_ContextAddFunctionr  )r1   fnr   r   r   add_function  s    	zContext.add_functionc                 C   s(   |    | }t| j|t| dS )zAdd a function definition to the context.

    Once added, the function (identified by its name) can be executed like any
    other operation.

    Args:
      fdef: A FunctionDef protocol buffer message.
    N)r   rO   r   ZTFE_ContextAddFunctionDefr  r5   )r1   fdefZfdef_stringr   r   r   add_function_def!  s
    	
zContext.add_function_defc                 C   sX   t  *}t| j|| t|}W d   n1 s80    Y  t }|	| |S )zGet a function definition from the context.

    Args:
      name: function signature name.

    Returns:
      The requested FunctionDef.

    Raises:
      tf.errors.NotFoundError: if name is not the name of a registered function.
    N)
r   r  r   ZTFE_ContextGetFunctionDefr  r	   r  r   FunctionDefParseFromString)r1   ro   r  
proto_dataZfunction_defr   r   r   get_function_def/  s    
(
zContext.get_function_defc                 C   s   |    t| j||| dS )z<Calls TFE_RegisterCustomDevice. See the non-member function.N)r   r   ZTFE_Py_RegisterCustomDevicer  )r1   device_capsuler*  device_info_capsuler   r   r   register_custom_deviceC  s    
zContext.register_custom_devicec                 C   s   |    t| j|S )zPack multiple `EagerTensor`s of the same dtype and shape.

    Args:
      tensors: a list of EagerTensors to pack.

    Returns:
      A packed EagerTensor.
    )r   r   ZTFE_Py_PackEagerTensorsr  )r1   tensorsr   r   r   pack_eager_tensorsJ  s    	zContext.pack_eager_tensorsc                 C   s   |    tt| jS )zGet a list of names of registered functions.

    Returns:
      A set of names of all registered functions for the context.
    )r   rr  r   ZTFE_ContextListFunctionNamesr  r>   r   r   r   list_function_namesV  s    zContext.list_function_namesc                 C   s   |    t| j| dS )zRemove a function from the context.

    Once removed, the function cannot be executed anymore.

    Args:
      name: function signature name.
    N)r   r   ZTFE_ContextRemoveFunctionr  r-  r   r   r   remove_function_  s    zContext.remove_functionc                 C   s   |    tt| j|S )z)Check if a function `name` is registered.)r   boolr   ZTFE_ContextHasFunctionr  r-  r   r   r   has_functionj  s    zContext.has_functionc                 C   s   || j jvr| j j| dS )al  Add a post-op callback to the context.

    A post-op callback is invoked immediately after an eager operation or
    function has finished execution or after a op has been added to a graph,
    providing access to the op's type, name input and output tensors. Multiple
    op callbacks can be added, in which case the callbacks will be invoked in
    the order in which they are added.

    Args:
      callback: a callable of the signature `f(op_type, inputs, attrs, outputs,
        op_name=None, graph=None)`. See doc strings in `op_callbacks.py` for
        details on the function signature and its semantics.
    N)r   op_callbacksrk   r1   callbackr   r   r   add_op_callbacko  s    zContext.add_op_callbackc                 C   s,   || j jvrtd| j j| j j|= dS )zRemove an already-registered op callback.

    Args:
      callback: The op callback to be removed.

    Raises:
      KeyError: If `callback` is not already registered.
    zOThe specified op callback has not been registered, and hence cannot be removed.N)r   r  KeyErrorindexr  r   r   r   remove_op_callback  s
    	zContext.remove_op_callbackc                 C   s   | j jS r    )r   r  r>   r   r   r   r    s    zContext.op_callbacksc                 C   s   | j jS r    r   invoking_op_callbacksr>   r   r   r   r    s    zContext.invoking_op_callbacksc                 C   s   || j _d S r    r  )r1   r9   r   r   r   r    s    c                    s    j  |s& jdur&W d   dS t }dd |D  _dd t jD  _t }dd |D  _t j _	 fdd jD  _
W d   n1 s0    Y     dS )zGets local devices visible to the system.

    Args:
      reinitialize: If True, reinitializes self._physical_devices  so that
        dynamic registered devices will also be visible to the python front-end.
    Nc                 S   s*   g | ]"}t | | d d dqS :r   r   rw   r  split.0r   r   r   r   
<listcomp>  s   z8Context._initialize_physical_devices.<locals>.<listcomp>c                 S   s   i | ]\}}||qS r   r   )r  r   pr   r   r   
<dictcomp>  s   z8Context._initialize_physical_devices.<locals>.<dictcomp>c                 S   s*   g | ]"}t | | d d dqS r  r  r  r   r   r   r    s   c                    s&   i | ]}|j d ks| jv r|dqS )r   N)rp   r   r  r>   r   r   r    s   )r   r   r   ZTF_ListPhysicalDevicesr  r   ZTF_ListPluggablePhysicalDevicesr   r   r   r   _import_config)r1   ZreinitializeZdevsZpluggable_devsr   r>   r   rV    s&    	
&z$Context._initialize_physical_devicesc                 C   s   |  d dS )z)Gets local devices visible to the system.TN)rV  r>   r   r   r   reinitialize_physical_devices  s    z%Context.reinitialize_physical_devicesc                    s.   |     du rt| jS  fdd| jD S )ae  List local devices visible to the system.

    This API allows a client to query the devices before they have been
    initialized by the eager runtime. Additionally a user can filter by device
    type, to get only CPUs or GPUs.

    Args:
      device_type: Optional device type to limit results to

    Returns:
      List of PhysicalDevice objects.
    Nc                    s   g | ]}|j  kr|qS r   rp   r  r  r   r   r    r   z1Context.list_physical_devices.<locals>.<listcomp>)rV  r   r   r1   rp   r   r  r   rs    s    
zContext.list_physical_devicesc                 C   s   t |tstd|f | jdu s,|| jvr:td|f | j| }t|}d|v rz*|d d\}}t|t|f|d< W n" ty   td|d  Y n0 |S )zReturns details about a physical devices.

    Args:
      device: A `tf.config.PhysicalDevice` returned by
        `tf.config.list_physical_devices` or `tf.config.get_visible_devices`.

    Returns:
      A dict with string keys.
    z6device must be a tf.config.PhysicalDevice, but got: %sNzcThe PhysicalDevice must be one obtained from calling `tf.config.list_physical_devices`, but got: %sZcompute_capability.z;Device returned compute capability an in invalid format: %s)	rM   rw   rQ   r   r   ZTF_GetDeviceDetailsr  r   r  )r1   r   r  detailsmajorminorr   r   r   get_device_details  s*    




zContext.get_device_detailsc                 C   s  | j du rdS | j jdd}|dkrrdd | jD }|dkrL| g d n&|dkrr| |d dd t|D  dd | jD }|sdS | j jd	d}g }| j jj}|r|	d
D ]2}t
|t|krtd| ||t
|  qn|}|dur|d| }| |d	 dS )a%  Import config if passed in during construction.

    If Context was created with a ConfigProto such as when calling
    tf.compat.v1.enable_eager_execution(), then we need to pull out the
    various pieces we might be replacing and import then into our internal
    class representation.
    NrU  r   c                 S   s   g | ]}|j d kr|qS )rU  r  r  r   r   r   r    r   z*Context._import_config.<locals>.<listcomp>r   c                 S   s   g | ]
}t  qS r   )rq   )r  _r   r   r   r    r   c                 S   s   g | ]}|j d kr|qS )r   r  r  r   r   r   r    r   r   ro  z Invalid visible device index: %s)r   re  r;   r   set_visible_devices set_logical_device_configurationr   rg  rq  r  r   r5   rQ   rk   )r1   num_cpusZcpusZgpusZ	gpu_countZvisible_gpusZvisible_indicesr  r   r   r   r    s4    


zContext._import_configc                    s.   |     du rt| jS  fdd| jD S )zReturn logical devices.Nc                    s   g | ]}|j  kr|qS r   r  r  r  r   r   r  /  r   z0Context.list_logical_devices.<locals>.<listcomp>)r   r   r   r  r   r  r   list_logical_devices)  s    
zContext.list_logical_devicesc                    s.   |     du rt| jS  fdd| jD S )z Get the list of visible devices.Nc                    s   g | ]}|j  kr|qS r   r  r  r  r   r   r  8  s   z/Context.get_visible_devices.<locals>.<listcomp>)rV  r   r   r  r   r  r   get_visible_devices1  s    

zContext.get_visible_devicesc                    s   |    t|ts|g}|D ]@}|| jvr:tdt|  dur|j krtdt| qg } dur~ fdd| jD }||7 }| j|krdS | jdurt	d|| _dS )z Set the list of visible devices.Unrecognized device: %sNc                    s   g | ]}|j  kr|qS r   r  r  r  r   r   r  K  s   z/Context.set_visible_devices.<locals>.<listcomp>z:Visible devices cannot be modified after being initialized)
rV  rM   r   r   rQ   reprrp   r   r   r  )r1   r  rp   r   rq  r   r  r   r  <  s*    




zContext.set_visible_devicesc                 C   s   |    |   t| j|S )z-Returns a dict of memory info for the device.)rV  r   r   ZTFE_GetMemoryInfor   r1   ri  r   r   r   get_memory_infoZ  s    zContext.get_memory_infoc                 C   s"   |    |   t| j| dS )z/Resets the tracked memory stats for the device.N)rV  r   r   ZTFE_ResetMemoryStatsr   r  r   r   r   reset_memory_stats`  s    zContext.reset_memory_statsc                 C   s,   |    || jvr"tdt| | j| S )z5Get if memory growth is enabled for a PhysicalDevice.r  )rV  r   rQ   r  r   r  r   r   r   get_memory_growthf  s    
zContext.get_memory_growthc                 C   s   |    || jvr"tdt| || jv r4td|jdkrP|| jvrPtd| j||krddS | j	durvt
d|| j|< dS )z<Set if memory growth should be enabled for a PhysicalDevice.r  zBCannot set memory growth on device when virtual devices configuredr   z=Cannot set memory growth on non-GPU and non-Pluggable devicesNz;Physical devices cannot be modified after being initialized)rV  r   rQ   r  r   rp   r   r   r;   r   r  )r1   ri  enabler   r   r   set_memory_growtho  s$    


zContext.set_memory_growthc                 C   s.   |    || jvr"tdt| | j|S )z:Get the virtual device configuration for a PhysicalDevice.r  )rV  r   rQ   r  r   r;   r  r   r   r    get_logical_device_configuration  s    
z(Context.get_logical_device_configurationc                 C   s   |    || jvr"tdt| |jdkrn|D ]:}|jdurFtd|jdurXtd|jdkr0tdq0n6|jdkr|D ]}|jdu r|td	q|ntd
|j | j	||krdS | j
durtd|| j|< dS )z:Set the virtual device configuration for a PhysicalDevice.r  rU  NzFSetting memory limit on CPU virtual devices is currently not supportedzPSetting experimental_priority on CPU virtual  devices is currently not supportedr   zVSetting experimental_device_ordinal on CPU virtual  devices is currently not supportedr   z8Setting memory limit is required for GPU virtual devicesz(Virtual devices are not supported for %sz:Virtual devices cannot be modified after being initialized)rV  r   rQ   r  rp   rt   ru   rv   r   r;   r   r  )r1   ri  rj  Zvdevr   r   r   r    s6    







z(Context.set_logical_device_configurationc                    s   | j p
| j}dg |dur0 d|j|jf   fdd| dD }|   t|dkrdtdt	
| j|| |   dS )	a  Set virtual CPU devices in context.

    If virtual CPU devices are already configured at context initialization
    by tf.config.set_logical_device_configuration(), this method should not be
    called.

    Args:
      num_cpus: Number of virtual CPUs.
      prefix: Device name prefix.

    Raises:
     RuntimeError: If virtual CPUs are already configured at context
     initialization.
    z/deviceNz/job:%s/replica:0/task:%dc                    s    g | ]}|j t r|qS r   )ro   
startswithtupler  Zlocal_prefixr   r   r    s   z3Context.set_logical_cpu_devices.<locals>.<listcomp>rU  r   z.Virtual CPUs already set, cannot modify again.)r   r   rk   r   r   r  r   r5   r  r   ZTFE_SetLogicalCpuDevicesr   r   )r1   r  prefixr   Zlogical_local_devicesr   r  r   set_logical_cpu_devices  s    
zContext.set_logical_cpu_deviceshloc                 C   s   t | j||||S r    )r   ZTF_GetCompilerIrr   )r1   r*  function_nameargsstager   r   r   get_compiler_ir  s    
zContext.get_compiler_irz*XLA:CPU and XLA:GPU devices are deprecated)Z	warn_oncec                 C   s   t   dS )z1Enables XLA:CPU and XLA:GPU devices registration.N)r   ZTF_EnableXlaDevicesr>   r   r   r   enable_xla_devices  s    zContext.enable_xla_devicesc                 C   s   t  S r    )r   r`  r>   r   r   r   rc    s    zContext.enable_mlir_bridgec                 C   s   | j S r    )r   r>   r   r   r   rd    s    z&Context.enable_mlir_graph_optimizationc                 C   s   t | d | j_d S r    )r   ZTF_EnableMlirBridger   ru  r1   enabledr   r   r   rc    s    
c                 C   s   || _ d | j_d S r    )r   r   ru  r  r   r   r   rd    s    c                 C   s$   | j jjj}|tjjkp"|tjjkS r    )rU   r8  rY  rZ  r   rW  rX  ZON_2)r1   levelr   r   r   optimizer_jit  s    
zContext.optimizer_jitc                 C   s   || _ d | j_d S r    )r   r   ru  r  r   r   r   r    s    c                    s   | j jji   fdd} fdd}|d |d |d |d |d	 |d
 |d |d |d |d |d |d |d |d |d |d |d |d jdkrΈj d<  S )zhGet experimental options for the optimizer.

    Returns:
      Dictionary of current option values
    c                    s&   t | }|dkr"|tjjk | < d S r{   )getattrr   r:  r;  )r>  attrrw  r9  r   r   r@    s    
zCContext.get_optimizer_experimental_options.<locals>.rewriter_togglec                    s   t |  | < d S r    )r  )r>  r  r   r   rA    s    zAContext.get_optimizer_experimental_options.<locals>.rewriter_boolrB  rC  rD  rE  rF  rG  rH  rI  rJ  rK  rL  rM  rN  rO  rP  rQ  rR  rS  r   rT  )rU   r8  r9  rT  )r1   r@  rA  r   r  r   "get_optimizer_experimental_options  s2    


z*Context.get_optimizer_experimental_optionsc                 C   s   | j | d| j_dS )zjSet experimental options for the optimizer.

    Args:
      options: Dictionary of options to modify
    N)r   updater   ru  rv  r   r   r   "set_optimizer_experimental_options#  s    z*Context.set_optimizer_experimental_optionsc                 C   s   | j jS r    )rU   r[  r>   r   r   r   r[  -  s    z$Context.intra_op_parallelism_threadsc                 C   s*   | j |krd S | jd ur td|| _ d S )Nz=Intra op parallelism cannot be modified after initialization.)r   r   r  r1   num_threadsr   r   r   r[  1  s    

c                 C   s   | j jS r    )rU   r\  r>   r   r   r   r\  <  s    z$Context.inter_op_parallelism_threadsc                 C   s*   | j |krd S | jd ur td|| _ d S )Nz=Inter op parallelism cannot be modified after initialization.)r   r   r  r  r   r   r   r\  @  s    

c                 C   s   | j jS r    )rU   r]  r>   r   r   r   soft_device_placementK  s    zContext.soft_device_placementc                 C   s*   | j d urt| j| || _d | j_d S r    )r   r   Z!TFE_ContextSetSoftDevicePlacementr  r   r   ru  r1   r  r   r   r   r  O  s    
c                 C   s   | j jS r    )rU   r^  r>   r   r   r   r^  W  s    zContext.log_device_placementc                 C   s*   | j d urt| j| || _d | j_d S r    )r   r   Z TFE_ContextSetLogDevicePlacementr  r   r   ru  r  r   r   r   r^  [  s    
c                 C   s   | j S r    )r   r>   r   r   r   r   c  s    z Context.run_eager_op_as_functionc                 C   s"   | j d urt| j| || _d S r    )r   r   Z"TFE_ContextSetRunEagerOpAsFunctionr  r   r  r   r   r   r   g  s    
c                 C   s   | j S r    )r   r>   r   r   r   r$   m  s    zContext.jit_compile_rewritec                 C   s"   | j d urt| j| || _d S r    )r   r   ZTFE_ContextSetJitCompileRewriter  r   r  r   r   r   r$   q  s    
c                 C   s   | j d urt| jS | jS r    )r   r   Z#TFE_ContextGetDevicePlacementPolicyr  r   r>   r   r   r   r   w  s    
zContext.device_policyc                 C   s:   |d u rt }| j|kr6|| _| jd ur6t| j| j d S r    )r   r   r   r   Z.TFE_ContextSetThreadLocalDevicePlacementPolicyr  )r1   policyr   r   r   r     s    

c                 C   s   | j S r    )r   r>   r   r   r   use_tfrt  s    zContext.use_tfrtc                 C   s<   t |tstdt| | j|kr8| jr2td|| _dS )zSets whether to use TFRT.Expecting a boolean but got %s0use_tfrt should be set before being initialized.N)rM   r  rQ   rS   r   r   )r1   Ztfrtr   r   r   r    s    

c                 C   s   | j S r    )r   r>   r   r   r   use_tfrt_distributed_runtime  s    z$Context.use_tfrt_distributed_runtimec                 C   s<   t |tstdt| | j|kr8| jr2td|| _dS )a/  Sets whether to use TFRT distributed runtime.

    This is only effective when use_tfrt is also true. Note that currently TFRT
    distributed runtime is not function complete and this config is for testing
    only.
    Args:
      enable: A boolean to set whether to use TFRT distributed runtime.
    r  r  N)rM   r  rQ   rS   r   r   r  r   r   r   r    s    


c                 C   s   | j jS r    )rU   r_  r>   r   r   r   r_    s    zContext.operation_timeout_in_msc                 C   s*   | j |krd S | jd ur td|| _ d S )Nz:Operation timeout cannot be modified after initialization.)r   r   r  )r1   r  r   r   r   r_    s    

c                 C   s   |    t| j dS )zEnables tracing of op execution via RunMetadata.

    To retrieve the accumulated metadata call context.export_run_metadata()
    and to stop tracing call context.disable_run_metadata().
    N)r   r   ZTFE_ContextEnableRunMetadatar  r>   r   r   r   enable_run_metadata  s    zContext.enable_run_metadatac                 C   s   | j s
dS t| j  dS z1Disables tracing of op execution via RunMetadata.N)r   r   ZTFE_ContextDisableRunMetadatar>   r   r   r   disable_run_metadata  s    zContext.disable_run_metadatac                 C   s   |    t| j dS )zEnables graph collection of executed functions.

    To retrieve the accumulated graphs call context.export_run_metadata()
    and to stop collecting graphs call context.disable_graph_collection().
    N)r   r   Z TFE_ContextEnableGraphCollectionr  r>   r   r   r   enable_graph_collection  s    zContext.enable_graph_collectionc                 C   s   | j s
dS t| j  dS z0Disables graph collection of executed functions.N)r   r   Z!TFE_ContextDisableGraphCollectionr>   r   r   r   disable_graph_collection  s    z Context.disable_graph_collectionc                 C   sf   | j s
dS t (}t| j | t|}W d   n1 s@0    Y  t }|	t
| |S )a  Returns a RunMetadata proto with accumulated information.

    The returned protocol buffer contains information since the most recent call
    to either enable_run_metadata or export_run_metadata.

    Returns:
      A RunMetadata protocol buffer. Or None if not enabled.
    N)r   r   r  r   ZTFE_ContextExportRunMetadatar	   r  r   ZRunMetadatar~  r   as_bytes)r1   r  r  Zrun_metadatar   r   r   export_run_metadata  s    	
(zContext.export_run_metadatac                 C   s   | j S )z$Returns a stack of context switches.)r   r>   r   r   r   r"    s    zContext.context_switches)NNNN)r   Tr   r   N)r   r  FN)F)N)N)N)N)r   )r  )ir@   rA   rB   rC   r3   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rV   r   r  r  r  r  r  r  r  r  r  r  r  r!  r   contextmanagerr%  r   r^   r_   r(  rW   r*  r   r+  r   r  r.  r   r/  r
   rU   rf  ru  rx  rz  r|  r  r  r  r  r  r  r  r  r  r  rV  r  rs  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r  rc  rd  r  r  r  r[  r\  r  r^  r   r$   r   r  r  r_  r  r  r  r  r  r"  r   r   r   r   r     s^      
i 4!     

    
+












v:

	



'
.


		$








)
























		r   c                   @   s0   e Zd ZdZg dZdd Zdd Zdd Zd	S )
r,  zAContext-manager forcing placement of ops and Tensors on a device._device_name_ctx_stackc                 C   s   || _ || _g | _d S r    r  )r1   r$  r*  r   r   r   r3     s    z_EagerDeviceContext.__init__c                 C   s  | j }|j}|j}| j}||f}zt| \}}W n tyV   tdt||f Y n ty   |d urt	|t
jstdt||f tj|}|rt|}n|  tj|jd }||}ntjd}| }||ft|< Y n0 ||| | j|||f d S )Nz*Expecting a string device name. Got %s(%s)r   r   )r  r*  r   r  _device_parsing_cacher   rQ   rS   r  rM   sixstring_typesr   r   r   copyr   r   Zmake_merged_specr   r+  r  rk   )r1   r$  old_device_nameold_device_specZnew_device_name	cache_keynew_device_specr   r   r   r   	__enter__  s<    


z_EagerDeviceContext.__enter__c                 G   s@   | j }| jd \}}}|j|ur(td| jd= ||| d S )Nrm  z1Exiting device scope without proper scope nesting)r  r  r   r  r+  )r1   Zex_infor$  r  r  r  r   r   r   __exit__$  s    
z_EagerDeviceContext.__exit__N)r@   rA   rB   rC   rD   r3   r  r  r   r   r   r   r,    s
   "r,  c                 C   s   t |  |   | ad S r    )r   ZTFE_Py_SetEagerContextr   _contextr$  r   r   r   _set_context_locked2  s    
r  c                 C   s0   t  t|  W d    n1 s"0    Y  d S r    )_context_lockr  r  r   r   r   _set_context9  s    r  c                  C   s>   t & td u rt } t|  W d    n1 s00    Y  d S r    )r  r  r   r  r  r   r   r   _create_context>  s    r  c                   C   sV   t   t  t$ tdur*t  daW d   n1 s>0    Y  t  i adS )zWClears and re-initializes the singleton context.

  Should only be used for testing.
  N)	gccollectr   r   r  r  r   r  r  r   r   r   r   _reset_contextE  s    
"r  c                   C   s   t   dS )z[Clears and re-initializes the TF JIT compiler flags.

  Should only be used for testing.
  N)r   ZTF_ResetJitCompilerFlagsr   r   r   r   _reset_jit_compiler_flagsY  s    r  c                   C   s   t du rt  t S )z#Returns a singleton context object.N)r  r  r   r   r   r   contexta  s    r  c                   C   s   t S )zAReturns current context (or None if one hasn't been initialized).)r  r   r   r   r   r   h  s    r   c                   C   s   t    dS )zInitialize the context.N)r  r   r   r   r   r   r   m  s    r   c                   C   s   t    dS )zInitialize the virtual devices.N)r  r   r   r   r   r   initialize_logical_devicesr  s    r  c                 C   s   t  |  dS )zSets the eager mode seed.N)r  r   )r   r   r   r   set_global_seedw  s    r  c                   C   s   t  jS )zReturns the eager mode seed.)r  r   r   r   r   r   global_seed|  s    r  c                   C   s
   t   S )z:Returns the operation seed generated based on global seed.)r  r   r   r   r   r   internal_operation_seed  s    r  r   c                  C   s   t  } | du rttkS |  S )a\  Checks whether the current thread has eager execution enabled.

  Eager execution is enabled by default and this API returns `True`
  in most of cases. However, this API might return `False` in the following use
  cases.

  *  Executing inside `tf.function`, unless under `tf.init_scope` or
     `tf.config.run_functions_eagerly(True)` is previously called.
  *  Executing inside a transformation function for `tf.dataset`.
  *  `tf.compat.v1.disable_eager_execution()` is called.

  General case:

  >>> print(tf.executing_eagerly())
  True

  Inside `tf.function`:

  >>> @tf.function
  ... def fn():
  ...   with tf.init_scope():
  ...     print(tf.executing_eagerly())
  ...   print(tf.executing_eagerly())
  >>> fn()
  True
  False

  Inside `tf.function` after `tf.config.run_functions_eagerly(True)` is called:

  >>> tf.config.run_functions_eagerly(True)
  >>> @tf.function
  ... def fn():
  ...   with tf.init_scope():
  ...     print(tf.executing_eagerly())
  ...   print(tf.executing_eagerly())
  >>> fn()
  True
  True
  >>> tf.config.run_functions_eagerly(False)

  Inside a transformation function for `tf.dataset`:

  >>> def data_fn(x):
  ...   print(tf.executing_eagerly())
  ...   return x
  >>> dataset = tf.data.Dataset.range(100)
  >>> dataset = dataset.map(data_fn)
  False

  Returns:
    `True` if the current thread has eager execution enabled.
  N)r   r   r   r   r  r   r   r   r     s    6c                   C   s   t  S )a*  Checks whether the current thread has eager execution enabled.

  Eager execution is typically enabled via
  `tf.compat.v1.enable_eager_execution`, but may also be enabled within the
  context of a Python function via tf.contrib.eager.py_func.

  When eager execution is enabled, returns `True` in most cases. However,
  this API might return `False` in the following use cases.

  *  Executing inside `tf.function`, unless under `tf.init_scope` or
     `tf.config.run_functions_eagerly(True)` is previously called.
  *  Executing inside a transformation function for `tf.dataset`.
  *  `tf.compat.v1.disable_eager_execution()` is called.

  >>> tf.compat.v1.enable_eager_execution()

  General case:

  >>> print(tf.executing_eagerly())
  True

  Inside `tf.function`:

  >>> @tf.function
  ... def fn():
  ...   with tf.init_scope():
  ...     print(tf.executing_eagerly())
  ...   print(tf.executing_eagerly())
  >>> fn()
  True
  False

  Inside `tf.function`
  after  `tf.config.run_functions_eagerly(True)` is called:

  >>> tf.config.run_functions_eagerly(True)
  >>> @tf.function
  ... def fn():
  ...   with tf.init_scope():
  ...     print(tf.executing_eagerly())
  ...   print(tf.executing_eagerly())
  >>> fn()
  True
  True
  >>> tf.config.run_functions_eagerly(False)

  Inside a transformation function for `tf.dataset`:

  >>> def data_fn(x):
  ...   print(tf.executing_eagerly())
  ...   return x
  >>> dataset = tf.data.Dataset.range(100)
  >>> dataset = dataset.map(data_fn)
  False

  Returns:
    `True` if the current thread has eager execution enabled.
  r   r   r   r   r   executing_eagerly_v1  s    <r  c                   C   s   t  S )z?Use executing_eagerly() instead. This function will be removed.r  r   r   r   r   in_eager_mode	  s    r  c                   C   s   dS )a  Returns the anonymous shared name.

  In eager mode we create anonymous resources to avoid spurious sharing issues.
  The runtime generates a unique name on our behalf when the reserved
  anonymous shared name is used as a shared name.

  Returns:
    The anonymous shared name.
  z$cd2c89b7-88b7-44c8-ad83-06c2a9158347r   r   r   r   r   anonymous_name	  s    r  c                   C   s   t  tS )zBContext-manager to disable eager execution for the current thread.)r  r%  
GRAPH_MODEr   r   r   r   
graph_mode	  s    r  z%__internal__.eager_context.eager_modec                   C   s   t  tS )zAContext-manager to enable eager execution for the current thread.)r  r%  r   r   r   r   r   ri   	  s    ri   c                   C   s   t  jS )zName of the current scope.)r  r(  r   r   r   r   r(  #	  s    r(  c                 C   s   t   t | S )a  Context-manager to force placement of operations and Tensors on a device.

  Example:
  ```python
  with tf.device('gpu:0'):
    with tf.device('cpu:0'):
      shape = tf.constant([], dtype=tf.int32)
    x = tf.random.truncated_normal(shape, tf.float32)
  ```
  will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
  operation runs on GPU 0.

  Args:
    name: Name of the device (see context().devices()), or None to perform
      automatic placement.

  Returns:
    Context manager for setting the device.
  )r   r  r   ro   r   r   r   r   (	  s    r   z%__internal__.eager_context.get_configc                   C   s   t  jS )zNGet the ConfigProto of Context.

  Returns:
    The ConfigProto of Context.
  )r  rU   r   r   r   r   
get_configA	  s    r  z*__internal__.eager_context.get_device_namec                   C   s   t  jS )zfGet the device name for the current thread.

  Returns:
    The device name for the current thread.
  )r  r*  r   r   r   r   get_device_nameK	  s    r   z4__internal__.eager_context.set_soft_device_placementc                 C   s   | t  _dS )zrSet if soft device placements should be allowed.

  Args:
    enabled: Whether to enable soft device placement.
  N)r  r  r  r   r   r   set_soft_device_placementU	  s    r  z'__internal__.eager_context.get_executorc                   C   s   t  jS )z^Get the Executor of the current thread.

  Returns:
    The Executor of the current thread.
  r  r
   r   r   r   r   get_executor_	  s    r  z"debugging.get_log_device_placementc                   C   s   t  jS )zXGet if device placements are logged.

  Returns:
    If device placements are logged.
  r  r^  r   r   r   r   get_log_device_placementi	  s    r  z"debugging.set_log_device_placementc                 C   s   | t  _dS )a  Turns logging for device placement decisions on or off.

  Operations execute on a particular device, producing and consuming tensors on
  that device. This may change the performance of the operation or require
  TensorFlow to copy data to or from an accelerator, so knowing where operations
  execute is useful for debugging performance issues.

  For more advanced profiling, use the [TensorFlow
  profiler](https://www.tensorflow.org/guide/profiler).

  Device placement for operations is typically controlled by a `tf.device`
  scope, but there are exceptions, for example operations on a `tf.Variable`
  which follow the initial placement of the variable. Turning off soft device
  placement (with `tf.config.set_soft_device_placement`) provides more explicit
  control.

  >>> tf.debugging.set_log_device_placement(True)
  >>> tf.ones([])
  >>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:GPU:0
  >>> with tf.device("CPU"):
  ...  tf.ones([])
  >>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:CPU:0
  >>> tf.debugging.set_log_device_placement(False)

  Turning on `tf.debugging.set_log_device_placement` also logs the placement of
  ops inside `tf.function` when the function is called.

  Args:
    enabled: Whether to enabled device placement logging.
  Nr  r  r   r   r   set_log_device_placements	  s     r  c                 c   s0   t  }|j}z| |_dV  W ||_n||_0 dS )zGContext manager for setting device placement policy for current thread.N)r  r   )r  r$  
old_policyr   r   r   r   	  s    r   c                 C   s   | t  _dS )z+Sets execution mode for the current thread.N)r  r   )r#  r   r   r   set_execution_mode	  s    r	  c                 c   sf   | du rdV  nRt  }t| tk}|j}z&|  ||_dV  W ||_|  n||_|  0 dS )z>Context manager for setting execution mode for current thread.N)r  r
   r1  r   r0  )r#  r$  r4  executor_oldr   r   r   r   	  s    
r   c                 c   s0   t  }|j}z| |_dV  W ||_n||_0 dS )a!  Context manager for changing executor for current thread.

  Args:
    e: A Executor to execute eager ops under this scope. Setting it to None will
      switch back to use the default executor for the context.

  Yields:
    Context manager for setting the executor for current thread.
  Nr  )r6  r$  r
  r   r   r   executor_scope	  s    r  z#experimental.function_executor_typec                 c   s:   t  j}t|}z| |_dV  W |t  _n
|t  _0 dS )at  Context manager for setting the executor of eager defined functions.

  Eager defined functions are functions decorated by tf.contrib.eager.defun.

  Args:
    executor_type: a string for the name of the executor to be used to execute
      functions defined by tf.contrib.eager.defun.

  Yields:
    Context manager for setting the executor of eager defined functions.
  N)r  ru  r  rI   )rI   Zcurrent_optionsZold_optionsr   r   r   function_executor_type	  s    
r  c                   C   s
   t   S )z0Returns true if current thread is in async mode.)r  r/  r   r   r   r   r/  	  s    r/  c                   C   s
   t   S )z`Get the number of available GPU devices.

  Returns:
    The number of available GPU devices.
  )r  rx  r   r   r   r   rx  	  s    rx  c                   C   s   t    dS )zEnables tracing of op execution via RunMetadata.

  To retrieve the accumulated metadata call context.export_run_metadata()
  and to stop tracing call context.disable_run_metadata().
  N)r  r  r   r   r   r   r  	  s    r  c                   C   s   t    dS r  )r  r  r   r   r   r   r  	  s    r  c                   C   s   t    dS )zEnables graph collection of executed functions.

  To retrieve the accumulated graphs call context.export_run_metadata()
  and to stop collecting graphs call context.disable_graph_collection().
  N)r  r  r   r   r   r   r  
  s    r  c                   C   s   t    dS r  )r  r  r   r   r   r   r  

  s    r  c                   C   s
   t   S )zReturns a RunMetadata proto with accumulated information.

  The returned protocol buffer contains information since the most recent call
  to either enable_run_metadata or export_run_metadata.

  Returns:
    A RunMetadata protocol buffer.
  )r  r  r   r   r   r   r  
  s    	r  Tc                 c   sf   t  }|  zg }|V  | }W |  n
|  0 |jD ]"}| rT||j q>||j q>dS )aL  Collects a flat list of pre- or post-optimization graphs.

  The collected graphs include device placements, which can be useful for
  testing.

  Usage:

  ```
  @def_function.function
  def f(x):
    return x + constant_op.constant(1.)

  with context.collect_graphs() as graphs:
    with ops.device("CPU:0"):
      f(constant_op.constant(1.))

  graph, = graphs  # `graph` contains a single GraphDef for inspection
  ```

  Args:
    optimized: whether to collect optimized graphs or non-optimized graphs

  Yields:
    A list of GraphDefs, populated when the context manager exits.
  N)r  r  r  r  Zfunction_graphsrk   Zpost_optimization_graphZpre_optimization_graph)Z	optimizedr$  graphsmetadatagraphr   r   r   collect_graphs
  s    

r  c                   C   s
   t   S r    )r  r   r   r   r   r   r   E
  s    r   c                 C   s   t  |  d S r    )r  r   r   r   r   r   r   I
  s    r   c                 C   s   t  |  d S r    )r  r   r  r   r   r   r   M
  s    r   c                 C   s   t  | S r    )r  r   )r   r   r   r   r   Q
  s    r   zexperimental.async_scopec               	   c   sr   d} t j| }z>tdt j| < dV  t   W |du rDt j| = qn|t j| < n|du rbt j| = n
|t j| < 0 dS )a  Context manager for grouping async operations.

  Ops/function calls inside the scope can return before finishing the actual
  execution. When exiting the async scope, a synchronization barrier will be
  automatically added to ensure the completion of all async op and function
  execution, potentially raising exceptions if async execution results in
  an error state.

  Users may write the following code to asynchronously invoke `train_step_fn`
  and log the `loss` metric for every `num_steps` steps in a training loop.
  `train_step_fn` internally consumes data using `iterator.get_next()`, and may
  throw OutOfRangeError when running out of data. In the case:

  ```
  try:
    with tf.experimental.async_scope():
      for _ in range(num_steps):
        # Step function updates the metric `loss` internally
        train_step_fn()
  except tf.errors.OutOfRangeError:
    tf.experimental.async_clear_error()
  logging.info('loss = %s', loss.numpy())
  ```

  Yields:
    Context manager for grouping async operations.
  Z(TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUETN)osenvironr;   rP   r  r   )Zremote_async_env_varr  r   r   r   async_scopeU
  s     

r  c                  C   s6   d} t j| tdkrdS t jdur2t   dS )a  Sync all async operations and raise any errors during execution.

  In async execution mode, an op/function call can return before finishing the
  actual execution. Calling this method creates a synchronization barrier for
  all async op and function execution. It only returns when all pending nodes
  are finished, potentially raising exceptions if async execution results in
  an error state. It is a no-op if the context is not initialized.
  Z%TF_PS_DISABLE_ASYNC_EXECUTOR_GLOBALLYTN)r  r  r;   rP   r  r   r   )Zdisable_async_executor_env_varr   r   r   
async_wait
  s
    	r  zexperimental.async_clear_errorc                   C   s   t    dS )a:  Clear pending operations and error statuses in async execution.

  In async execution mode, an error in op/function execution can lead to errors
  in subsequent ops/functions that are scheduled but not yet executed. Calling
  this method clears all pending operations and reset the async execution state.

  Example:

  ```
  while True:
    try:
      # Step function updates the metric `loss` internally
      train_step_fn()
    except tf.errors.OutOfRangeError:
      tf.experimental.async_clear_error()
      break
  logging.info('loss = %s', loss.numpy())
  ```
  N)r  r   r   r   r   r   async_clear_error
  s    r  c                 C   s   t  |  dS )z)Add a function definition to the context.N)r  rz  )r{  r   r   r   rz  
  s    rz  c                 C   s   t  |  dS )z#Remove a function from the context.N)r  r  r  r   r   r   r  
  s    r  c                 C   s   t  | S r    )r  r  r  r   r   r   r  
  s    r  c                 C   s   t  | || dS )a!  Calls TFE_RegisterCustomDevice to register a custom device with Python.

  Enables using C extensions specifying a custom device from Python. See the
  experimental eager C API in tensorflow/c/eager/c_api_experimental.h for
  details.

  Note that custom devices are not currently supported inside `tf.function`s.

  Args:
    device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice'
      containing a pointer to a TFE_CustomDevice struct. The capsule retains
      ownership of the memory.
    device_name: A string indicating the name to register the custom device
      under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may
      subsequently be passed to `with tf.device(...):`.
    device_info_capsule: A PyCapsule with the name set to
      'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific
      struct with the initial state of the custom device (the void* device_info
      argument to TFE_RegisterCustomDevice). This method takes ownership of the
      memory and clears the capsule destructor.
  N)r  r  )r  r*  r  r   r   r   r  
  s    
r  c                   C   s   t  d u rdS t  S r   )r   r   r   r   r   r   _tmp_in_graph_mode
  s    
r  )T)rC   r/   
contextlibr  r  r  r   r|   Zabslr   numpyr   r  Ztensorflow.core.frameworkr   Ztensorflow.core.protobufr   r   r   tensorflow.pythonr   r   tensorflow.python.clientr	   tensorflow.python.eagerr
   r   tensorflow.python.frameworkr   r   r   r   tensorflow.python.utilr   r   r   Z"tensorflow.python.util.deprecationr    tensorflow.python.util.tf_exportr   r  r   r  r   r  r   r   r   r   ZTFE_DEVICE_PLACEMENT_EXPLICITZDEVICE_PLACEMENT_EXPLICITZTFE_DEVICE_PLACEMENT_WARNZDEVICE_PLACEMENT_WARNZTFE_DEVICE_PLACEMENT_SILENTr   Z%TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32Z!DEVICE_PLACEMENT_SILENT_FOR_INT32r   r   r   Counterr   r   getenvr   r#   r   r   r!   r%   r&   r'   objectr)   rE   r   localrX   
namedtuplera   rf   rm   rq   rw   rx   r   r   r   r,  r  r}   r  r  r  r  r  r  r  r   r   r  r  r  r  r   r  r  r  r  ri   r(  r  r   r  r  r  r  r  r   r	  r   r  r  r/  rx  r  r  r  r  r  r  r   r   r   r   r  r  r  rz  r  r  r  r  ZIS_IN_GRAPH_MODEr   r   r   r   <module>   sV  7+

$
            \8

<

>



	

	

	

	
	
"


			).
