a
    Sicc                 
   @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
Zddlm  mZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddl m!Z! ddl"m#Z$ ddl%m&Z& ddl'm(Z( e)Z*e+Z,e-Z.e/ a0e/ a1G dd dej/Z2e2 a3e	4 Z5e6 Z7G dd dej/Z8e8 Z9da:da;ej<Z<ej=Z=ej>Z>ej?Z?ej@Z@ejAZAe&de(jBdd ZCe&dejDjEjFe(jBdd ZGe&dd]ddZHe&dd d! ZIe&d"d#d$ ZJejDKeJ e&d%e(jBd&d' ZLe&d(e(jBd)d* ZMd+d, ZNd-d. ZOd/d0 ZPd1d2 ZQd3d4 ZRd5d6 ZSe&d7e(jBd8d9 ZTd:d; ZUe&d<ejVe(jBd=d> ZWejVd?d@ ZXejVdAdB ZYdCdD ZZdEdF Z[d^dGdHZ\d_dJdKZ]e&dLgdMd`dNdOZ^ejD_e^ ejDj`ae^ dPdQ ZbejVdadRdSZce&dTgdMdUdV ZddWdX ZedYdZ ZfG d[d\ d\Zgd]d^ Zhd_d` Zidadb Zjdcdd Zkdedf Zldgdh Zme&die(jBdjdk Zne&dlejDjEjFe(jBdmdn Zoe&dog dMe(jBdpdq ZpejjqjpZre&dogdrdser e&dte(jBdbdudvZsdwdx Zte&dyg dMdzd{ Zud|d} Zvdcd~dZwddddZxe&dg dMdd Zye&dejDjEjFe(jBdeddZze&ddd Z{e&de(jBdfddZ|dd Z}e&dejDjEjFe(jBdd Z~e&de(jBdd Ze&de(jBdd Ze&dejDjEjFe(jBdd Ze(jBdd Ze&de(jBdd Ze&de(jBdgddZe&dejDjEjFe(jBdhddZe&dejDjEjFe(jBdiddZe&de(jBdjddZe&dejDjEjFe(jBdkddZdlddZdae/ Ze&dg dMdd Ze&dg dMdd Ze&dg dMdd ZG dd dejDj`jZe&de(jBdmddZe&de(jBdnddZe&dÃe(jBddń Ze&dƃejDjEjFe(jBddȄ Ze&dɃe(jBdd˄ Ze&d̃e(jBdd΄ Ze&dσe(jBddф Ze&d҃e(jBddԄ Ze&dՃejDjEjFe(jBddׄ Ze&d؃ejDjEjFe(jBdoddڄZe&dۃejDjEjFe(jBdd݄ Ze&dރejDjEjFe(jBdd Ze&dejDjEjFe(jBdpddZe&dejDjEjFe(jBdqddZe&dejDjEjFe(jBdrddZ+e&dejDjEjFe(jBdsddZe&dejDjEjFe(jBdtddZe&dejDjEjFe(jBduddZe&de(jBdvddZe&dejDjEjFe(jBdwddZe&dejDjEjFe(jBdxddZe&dejDjEjFe(jBdyddZ-e&dejDjEjFe(jBdzd dZ)e&dejDjEjFe(jBd{ddZe&dejDjEjFe(jBd|ddZe&d	ejDjEjFe(jBd
d Ze&dejDjEjFe(jBdd Ze&dejDjEjFe(jBdd Ze&dejDjEjFe(jBdd Ze&dejDjEjFe(jBdd Zd}ddZe&dejDjEjFe(jBdd Ze&dejDjEjFe(jBdd Ze&d ejDjEjFe(jBd!d" Ze&d#ejDjEjFe(jBd$d% Ze&d&ejDjEjFe(jBd'd( Ze&d)ejDjEjFe(jBd*d+ Ze&d,ejDjEjFe(jBd-d. Ze&d/ejDjEjFe(jBd0d1 Ze&d2ejDjEjFe(jBd3d4 Ze&d5ejDjEjFe(jBd6d7 Ze&d8ejDjEjFe(jBd9d: Ze&d;ejDjEjFe(jBd<d= Ze&d>ejDjEjFe(jBd?d@ Ze&dAejDjEjFe(jBdBdC Zd~dEdFZddGdHZddIdJZe&dKe(jBddLdMZe&dNejDjEjFe(jBddOdPZe&dQejDjEjFe(jBddRdSZe&dTejDjEjFe(jBdUdV Ze&dWejDjEjFe(jBdXdY Ze&dZejDjEjFe(jBdd\d]Ze&d^ejDjEjFe(jBd_d` Ze&daejDjEjFe(jBdbdc Ze&ddejDjEjFe(jBdedf Ze&dgejDjEjFe(jBddjdkZe&dlejDjEjFe(jBdmdn Ze&doejDjEjFe(jBdpdq Ze&drejDjEjFe(jBdsdt Ze&duejDjEjFe(jBddvdwZe&dxejDjEjFe(jBdydz Ze&d{ejDjEjFe(jBdd}d~Ze&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdd Ze&dejDjEjFe(jBdd ZҐddd Ze&de(jBdd Ze&dejDjEjFe(jBdd Ze&de(jBdd Ze&dejDjEjFe(jBdd Zej jeӐde_ ej jeӐde_ e&dejDjEjFe(jBdddZG dd dZe&de(jBdddZe&de(jBdd Ze&dejDjEjFe(jBdd Ze&dejDjEjFdddZe&dejDjEjFe(jBdd Ze&de(jBdddZe&de(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddÄZe&dăejDjEjFe(jBdŐdƄ Ze&dǃejDjEjFe(jBdȐdɄ Zdʐd˄ Ze&d̃ejDjEjFe(jBdd͐d΄Ze&dσejDjEjFe(jBddАdфZe&d҃ejDjEjFe(jBddӐdԄZe&dՃejDjEjFe(jBddؐdلZe&dڃejDjEjFe(jBdېd܄ Ze&d݃ejDjEjFe(jBdސd߄ Ze&dejDjEjFe(jBdd Ze&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdd Zdd ZdddZdd Zdd Ze&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZdddZe&d ejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBddd	Zdd
dZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZdddZ e&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&dejDjEjFe(jBdddZe&d ejDjEjFe(jBdd!d"Ze&d#ejDjEjFe(jBdd$d%Ze&d&ejDjEjFe(jBdd'd(Ze&d)ejDjEjFe(jBdd*d+Ze&d,ejDjEjFe(jBd-d. Z	e&d/ejDjEjFe(jBd0d1 Z
e&d2ejDjEjFe(jBdd4d5Ze&d6e(jBdd7d8Ze&d9e(jBdd:d;Ze&d<e(jBdd=d>Zd?ejv r$ejd?Zn,ejd@ZejedAZejejedBZejerzDee"ZeeZW d   n1 s0    Y  W n ey   i ZY n0 edCe= ZedDv sJ edEe< Zeee s>J edFe> Z!e!dGv sfJ e@e e?e eAe! ejesze"e W n e#y   Y n0 ejes`e= e< dHe> dIZzXeedJ2Z$e$%ej&edKdL W d   n1 s<0    Y  W n e'y^   Y n0 dMdN Z(dOdP Z)dQdR Z*dSdT Z+dUdV Z,dWdX Z-ddYdZZ.G d[d\ d\e	j4Z/e/ej0a1e/ej0a2e/ej0a3dS (  zKeras backend API.    N)backend_config)distribute_coordinator_utils)keras_tensor)control_flow_util)object_identity)tf_contextlib)
tf_inspect)
config_pb2)context)
get_config)
tf_logging)keras_export)doc_controlsc                       s   e Zd Z fddZ  ZS )SessionLocalc                    s   t    d | _d S N)super__init__sessionself	__class__ I/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/backend.pyr   D   s    
zSessionLocal.__init__)__name__
__module____qualname__r   __classcell__r   r   r   r   r   C   s   r   c                       s.   e Zd ZdZG dd dZ fddZ  ZS )_DummyEagerGraphz_DummyEagerGraph provides a thread local `key` attribute.

    We can't use threading.local directly, i.e. without subclassing, because
    gevent monkey patches threading.local and its version does not support
    weak references.
    c                   @   s   e Zd ZdZdS )z'_DummyEagerGraph._WeakReferencableClassa  This dummy class is needed for two reasons.

        - We need something that supports weak references. Basic types like
        string and ints don't.
        - We need something whose hash and equality are based on object identity
        to make sure they are treated as different keys to
        _GRAPH_LEARNING_PHASES.

        An empty Python class satisfies both of these requirements.
        N)r   r   r   __doc__r   r   r   r   _WeakReferencableClasse   s   r    c                    s   t    t | _d| _d S NF)r   r   r   r    keylearning_phase_is_setr   r   r   r   r   s   s    

z_DummyEagerGraph.__init__)r   r   r   r   r    r   r   r   r   r   r   r   ]   s   r   Fzkeras.backend.backendc                   C   s   dS )zPublicly accessible method for determining the current backend.

    Only exists for API compatibility with multi-backend Keras.

    Returns:
        The string "tensorflow".
    
tensorflowr   r   r   r   r   backend   s    
r%   zkeras.backend.cast_to_floatxc                 C   s6   t | tjtjtjfr&tj| t dS tj| t dS )a%  Cast a Numpy array to the default Keras float type.

    Args:
        x: Numpy array or TensorFlow tensor.

    Returns:
        The same array (Numpy array if `x` was a Numpy array, or TensorFlow
        tensor if `x` was a tensor), cast to its new type.

    Example:

    >>> tf.keras.backend.floatx()
    'float32'
    >>> arr = np.array([1.0, 2.0], dtype='float64')
    >>> arr.dtype
    dtype('float64')
    >>> new_arr = cast_to_floatx(arr)
    >>> new_arr
    array([1.,  2.], dtype=float32)
    >>> new_arr.dtype
    dtype('float32')

    dtype)	
isinstancetfTensorVariableSparseTensorcastfloatxnpasarrayxr   r   r   cast_to_floatx   s    r3   zkeras.backend.get_uid c                 C   s<   t  }|tvrttt|< t| }||   d7  < ||  S )zAssociates a string prefix with an integer counter in a TensorFlow graph.

    Args:
      prefix: String prefix to index.

    Returns:
      Unique integer ID.

    Example:

    >>> get_uid('dense')
    1
    >>> get_uid('dense')
    2

       )	get_graphPER_GRAPH_OBJECT_NAME_UIDScollectionsdefaultdictint)prefixgraphlayer_name_uidsr   r   r   get_uid   s    r>   zkeras.backend.reset_uidsc                   C   s   t   t  dS )zResets graph identifiers.N)r7   clearOBSERVED_NAMESr   r   r   r   
reset_uids   s    rA   zkeras.backend.clear_sessionc                  C   s   dt _tjj  t  tjdur2tj	  dt_t
 } |  B dt_i at }t| | t| d t| d W d   n1 s0    Y  t rt   dS )ak  Resets all state generated by Keras.

    Keras manages a global state, which it uses to implement the Functional
    model-building API and to uniquify autogenerated layer names.

    If you are creating many models in a loop, this global state will consume
    an increasing amount of memory over time, and you may want to clear it.
    Calling `clear_session()` releases the global state: this helps avoid
    clutter from old models and layers, especially when memory is limited.

    Example 1: calling `clear_session()` when creating models in a loop

    ```python
    for _ in range(100):
      # Without `clear_session()`, each iteration of this loop will
      # slightly increase the size of the global state managed by Keras
      model = tf.keras.Sequential([
          tf.keras.layers.Dense(10) for _ in range(10)])

    for _ in range(100):
      # With `clear_session()` called at the beginning,
      # Keras starts with a blank state at each iteration
      # and memory consumption is constant over time.
      tf.keras.backend.clear_session()
      model = tf.keras.Sequential([
          tf.keras.layers.Dense(10) for _ in range(10)])
    ```

    Example 2: resetting the layer name generation counter

    >>> import tensorflow as tf
    >>> layers = [tf.keras.layers.Dense(10) for _ in range(10)]
    >>> new_layer = tf.keras.layers.Dense(10)
    >>> print(new_layer.name)
    dense_10
    >>> tf.keras.backend.set_learning_phase(1)
    >>> print(tf.keras.backend.learning_phase())
    1
    >>> tf.keras.backend.clear_session()
    >>> new_layer = tf.keras.layers.Dense(10)
    >>> print(new_layer.name)
    dense
    NF)_GRAPHr<   r)   compatv1reset_default_graphrA   _SESSIONr   closer6   
as_default_DUMMY_EAGER_GRAPHr#   _GRAPH_LEARNING_PHASES_default_learning_phase_internal_set_learning_phase_GRAPH_VARIABLESpop_GRAPH_TF_OPTIMIZERSexecuting_eagerlyr
   clear_kernel_cacher<   phaser   r   r   clear_session   s     2



*rT   z,keras.backend.manual_variable_initializationc                 C   s   | a dS )a@  Sets the manual variable initialization flag.

    This boolean flag determines whether
    variables should be initialized
    as they are instantiated (default), or if
    the user should handle the initialization
    (e.g. via `tf.compat.v1.initialize_all_variables()`).

    Args:
        value: Python boolean.
    N)_MANUAL_VAR_INITvaluer   r   r   manual_variable_initialization/  s    rX   zkeras.backend.learning_phasec                  C   s   t jj } | ttddu r$t }npt  X t	 rpt
jtvrXt }tt
j| dt
_tt
jW  d   S t }W d   n1 s0    Y  t| | |S )a)  Returns the learning phase flag.

    The learning phase flag is a bool tensor (0 = test, 1 = train)
    to be passed as input to any Keras function
    that uses a different behavior at train time and test time.

    Returns:
        Learning phase (scalar integer tensor or Python integer).
    r<   NT)r)   rC   rD   get_default_graphgetattrrB   symbolic_learning_phase
init_scoper
   rP   rI   r"   rJ   rK   rL   r#   _internal_get_learning_phase_mark_func_graph_as_unsaveable)r<   learning_phaserS   r   r   r   r_   A  s    

$
r_   c                   C   s   t jS r   )rI   r#   r   r   r   r   global_learning_phase_is_setg  s    r`   c                 C   s   | j rt|r| d dS )a  Mark func graph as unsaveable due to use of symbolic keras learning phase.

    Functions that capture the symbolic learning phase cannot be exported to
    SavedModel. Mark the funcgraph as unsaveable, so that an error will be
    raised if it is exported.

    Args:
      graph: Graph or FuncGraph object.
      learning_phase: Learning phase placeholder or int defined in the graph.
    zThe keras learning phase placeholder was used inside a function. Exporting placeholders is not supported when saving out a SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` in the function to set the learning phase to a constant value.N)building_functionis_placeholdermark_as_unsaveable)r<   r_   r   r   r   r^   k  s    r^   c                  C   sR   t  } |  0 | tvr(t }t| | t| W  d    S 1 sD0    Y  d S r   )r6   rH   rJ   rK   rL   r]   rR   r   r   r   r[     s    

r[   c                 C   s,   t |tjr t|}|t| < n|t| < d S r   )r(   r)   r*   weakrefrefrJ   )r<   rW   Z	value_refr   r   r   rL     s    

rL   c                 C   s&   t | d }t|tjr| S |S d S r   )rJ   getr(   rd   re   rR   r   r   r   r]     s    r]   c                   C   sL   t  rdS td$ tjjjddddW  d    S 1 s>0    Y  d S )Nr   r4   Fr   keras_learning_phaseshapename)r
   rP   
name_scoper)   rC   rD   placeholder_with_defaultr   r   r   r   rK     s    
rK   z keras.backend.set_learning_phasec                 C   s   t d t|  dS )a  Sets the learning phase to a fixed value.

    The backend learning phase affects any code that calls
    `backend.learning_phase()`
    In particular, all Keras built-in layers use the learning phase as the
    default for the `training` arg to `Layer.__call__`.

    User-written layers and models can achieve the same behavior with code that
    looks like:

    ```python
      def call(self, inputs, training=None):
        if training is None:
          training = backend.learning_phase()
    ```

    Args:
        value: Learning phase value, either 0 or 1 (integers).
               0 = test, 1 = train

    Raises:
        ValueError: if `value` is neither `0` nor `1`.
    z`tf.keras.backend.set_learning_phase` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.N)warningswarn&deprecated_internal_set_learning_phaserV   r   r   r   set_learning_phase  s    rp   c                 C   sb   | dvrt dt 6 t r4dt_ttj|  tt |  W d   n1 sT0    Y  dS )a  A deprecated internal implementation of set_learning_phase.

    This method is an internal-only version of `set_learning_phase` that
    does not raise a deprecation error. It is required because
    saved_model needs to keep working with user code that uses the deprecated
    learning phase methods until those APIs are fully removed from the public
    API.

    Specifically SavedModel saving needs to make sure the learning phase is 0
    during tracing even if users overwrote it to a different value.

    But, we don't want to raise deprecation warnings for users when savedmodel
    sets learning phase just for compatibility with code that relied on
    explicitly setting the learning phase for other values.

    Args:
        value: Learning phase value, either 0 or 1 (integers).
            0 = test, 1 = train

    Raises:
        ValueError: if `value` is neither `0` nor `1`.
       r   r5   %Expected learning phase to be 0 or 1.TN)	
ValueErrorr)   r\   rP   rI   r#   rL   r"   r6   rV   r   r   r   ro     s    
ro   z"keras.backend.learning_phase_scopec                 c   sH   t jddd t|  z
dV  W n0 W d   n1 s:0    Y  dS )ai  Provides a scope within which the learning phase is equal to `value`.

    The learning phase gets restored to its original value upon exiting the
    scope.

    Args:
       value: Learning phase value, either 0 or 1 (integers).
              0 = test, 1 = train

    Yields:
      None.

    Raises:
       ValueError: if `value` is neither `0` nor `1`.
    z`tf.keras.backend.learning_phase_scope` is deprecated and will be removed after 2020-10-11. To update it, simply pass a True/False value to the `training` argument of the `__call__` method of your layer or model.   
stacklevelN)rm   rn   (deprecated_internal_learning_phase_scoperV   r   r   r   learning_phase_scope  s    
rx   c                 c   s  | dvrt dt , t r,ttj}tt }W d   n1 sJ0    Y  tj}zt	|  dV  W |svdt_t h t r|durt
tj| ntjtv rttj= t }|durt
|| n|tv rt|= W d   n1 s0    Y  n|sdt_t r t rD|dur0t
tj| ntjtv rDttj= t }|dur`t
|| n|tv rpt|= W d   n1 s0    Y  0 dS )a  An internal-only version of `learning_phase_scope`.

    Unlike the public method, this method does not raise a deprecation warning.
    This is needed because saved model saving needs to set learning phase
    to maintain compatibility
    with code that sets/gets the learning phase, but saved model
    saving itself shouldn't raise a deprecation warning.

    We can get rid of this method and its usages when the public API is
    removed.

    Args:
        value: Learning phase value, either 0 or 1 (integers).
            0 = test, 1 = train

    Yields:
        None.

    Raises:
        ValueError: if `value` is neither `0` nor `1`.
    rq   rr   NF)rs   r)   r\   rP   r]   rI   r"   r6   r#   ro   rL   rJ   )rW   previous_eager_valueprevious_graph_valuelearning_phase_previously_setr<   r   r   r   rw     sT    
(

&




rw   c              	   c   s~   | dv sJ t jj sJ t }|r,t }z0ttj|  dV  W |rTttj| qzt	tj= n|rpttj| nt	tj= 0 dS )a  Internal scope that sets the learning phase in eager / tf.function only.

    Args:
        value: Learning phase value, either 0 or 1 (integers).
               0 = test, 1 = train

    Yields:
      None.

    Raises:
       ValueError: if `value` is neither `0` nor `1`.
    rq   N)
r)   rC   rD   #executing_eagerly_outside_functionsr`   r_   rL   rI   r"   rJ   )rW   global_learning_phase_was_setprevious_valuer   r   r   eager_learning_phase_scopeJ  s    
r   c                 C   s"   t | dd}|rt|r| S dS )zConvert `obj` to a graph element if possible, otherwise return `None`.

    Args:
      obj: Object to convert.

    Returns:
      The result of `obj._as_graph_element()` if that method is available;
          otherwise `None`.
    _as_graph_elementN)rZ   callable)objconv_fnr   r   r   r   i  s    
r   c                 C   s@   t | dd}t |dd}|r<|r<||ur<td|| ||f dS )zFail if the 2 items are from different graphs.

    Args:
      original_item: Original item to check against.
      item: Item to check.

    Raises:
      ValueError: if graphs do not match.
    r<   Nz<%s must be from the same graph as %s (graphs are %s and %s).)rZ   rs   )original_itemitemoriginal_graphr<   r   r   r   _assert_same_graphy  s    

r   c                 C   s   t jj }|jr|S t| } |r<t|t js<td|f d}| D ]}t|t j	t j
t jjfrt|t j
rzt|t j
kr|}nt|}|durD|s|}t|dd}qD|durt|| qD|j|urDtd| qD|p|S )a^  Returns the appropriate graph to use for the given inputs.

    This library method provides a consistent algorithm for choosing the graph
    in which an Operation should be constructed:

    1. If the default graph is being used to construct a function, we
       use the default graph.
    2. If the "graph" is specified explicitly, we validate that all of the
       inputs in "op_input_list" are compatible with that graph.
    3. Otherwise, we attempt to select a graph from the first Operation-
       or Tensor-valued input in "op_input_list", and validate that all other
       such inputs are in the same graph.
    4. If the graph was not specified and it could not be inferred from
       "op_input_list", we attempt to use the default graph.

    Args:
      op_input_list: A list of inputs to an operation, which may include
        `Tensor`, `Operation`, and other objects that may be converted to a
        graph element.
      graph: (Optional) The explicit graph to use.

    Raises:
      TypeError: If op_input_list is not a list or tuple, or if graph is not a
        Graph.
      ValueError: If a graph is explicitly passed and not all inputs are from
        it, or if the inputs are from multiple graphs, or we could not find a
        graph and there was no default graph.

    Returns:
      The appropriate graph to use for the given inputs.

    z#Input graph needs to be a Graph: %sNr<   z#%s is not from the passed-in graph.)r)   rC   rD   rY   ra   tupler(   Graph	TypeError	Operationr*   __internal__CompositeTensortyper   rZ   r   r<   rs   )op_input_listr<   current_default_graphoriginal_graph_elementop_inputgraph_elementr   r   r   _current_graph  s8    !

r   r   c                 C   s   t jj }|dur|}ndt  r*tdttdddu sJtjj	t
| urxt j rdtt j  nt jjjt dt_tj}|S )z2Returns the session object for the current thread.Nz4Cannot get session inside Tensorflow graph function.r   config)r)   rC   rD   get_default_sessioninside_functionRuntimeErrorrZ   rF   r   r<   r   
distributehas_strategy(configure_and_create_distributed_sessionget_strategySessionget_default_session_config)r   default_sessionr   r   r   r   _get_session  s2    
r   zkeras.backend.get_session)rD   c                 C   sB   t | }ts>|j  t| W d   n1 s40    Y  |S )at  Returns the TF session to be used by the backend.

    If a default TensorFlow session is available, we will return it.

    Else, we will return the global Keras session assuming it matches
    the current graph.

    If no global Keras session exists at this point:
    we will create a new global session.

    Note that you can manually set the global session
    via `K.set_session(sess)`.

    Args:
        op_input_list: An option sequence of tensors or ops, which will be used
          to determine the current graph. Otherwise the default graph will be
          used.

    Returns:
        A TensorFlow session.
    N)r   rU   r<   rH   _initialize_variables)r   r   r   r   r   get_session  s
    &r   c                   C   s8   t  r(ttdd s"t jdt_tjS t jj	 S d S )Nr<   keras_graph)
r)   rP   rZ   rB   r   	FuncGraphr<   rC   rD   rY   r   r   r   r   r6     s
    r6   c                 c   sn   t tdd}|dur,| dur,|| ur,td|r:|V  dS | pHtjd} z| t_| V  W dt_ndt_0 dS )aZ  Retrieve a shared and temporary func graph.

    The eager execution path lifts a subgraph from the keras global graph into
    a scratch graph in order to create a function. DistributionStrategies, in
    turn, constructs multiple functions as well as a final combined function. In
    order for that logic to work correctly, all of the functions need to be
    created on the same scratch FuncGraph.

    Args:
      graph: A graph to be used as the current scratch graph. If not set then
        a scratch graph will either be retrieved or created:

    Yields:
      The current scratch graph.
    r<   Nz"Multiple scratch graphs specified.keras_scratch_graph)rZ   _CURRENT_SCRATCH_GRAPHrs   r)   r   r   r<   )r<   scratch_graphr   r   r   _scratch_graph(  s     r   zkeras.backend.set_sessionc                 C   s
   | t _dS )zRSets the global TensorFlow session.

    Args:
        session: A TF Session.
    N)rF   r   r   r   r   r   set_sessionO  s    r   c                  C   s&   t jdrtd t } d| _| S )NOMP_NUM_THREADSzOMP_NUM_THREADS is no longer used by the default Keras config. To configure the number of threads, use tf.config.threading APIs.T)osenvironrf   loggingwarningr   allow_soft_placementr   r   r   r   r   Z  s    r   c                  C   s6   t jj } t| d }|d u r2tt}|t| < |S r   )	r)   rC   rD   rY   r7   rf   r8   r9   r:   )r<   name_uid_mapr   r   r   get_default_graph_uid_mapg  s    
r   c                   @   s(   e Zd ZdZdd Zdd Zdd ZdS )	_TfDeviceCaptureOpz(Class for capturing the TF device scope.c                 C   s
   d | _ d S r   devicer   r   r   r   r   v  s    z_TfDeviceCaptureOp.__init__c                 C   s   t |tjr| }|| _dS )z8This method captures TF's explicit device scope setting.N)r(   r)   
DeviceSpec	to_stringr   )r   r   r   r   r   _set_devicey  s    z_TfDeviceCaptureOp._set_devicec                 C   s
   || _ d S r   r   )r   
device_strr   r   r   _set_device_from_string  s    z*_TfDeviceCaptureOp._set_device_from_stringN)r   r   r   r   r   r   r   r   r   r   r   r   s  s   r   c                  C   sF   t  } t }| | tjj r0tj|j	S tj
jj|j	S dS )a  Return explicit device of current context, otherwise returns `None`.

    Returns:
        If the current device scope is explicitly set, it returns a string with
        the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
        return `None`.
    N)r6   r   _apply_device_functionsr)   r   tf2enabledr   from_stringr   rC   rD   )r<   opr   r   r   _get_current_tf_device  s    
r   c                 C   s4   |   } | dvrtdt }|duo2|j|   kS )az  Check if the current device is explicitly set on the device type specified.

    Args:
        device_type: A string containing `GPU` or `CPU` (case-insensitive).

    Returns:
        A boolean indicating if the current device scope is explicitly set on
        the device type.

    Raises:
        ValueError: If the `device_type` string indicates an unsupported device.
    )CPUGPUz.`device_type` should be either "CPU" or "GPU".N)upperrs   r   device_type)r   r   r   r   r   _is_current_explicit_device  s
    r   c                   C   sB   t jj r"dd t jdD S tdu r4t  add tD S )zwGet a list of available GPU devices (formatted as strings).

    Returns:
        A list of available GPU devices.
    c                 S   s   g | ]
}|j qS r   rj   .0dr   r   r   
<listcomp>      z'_get_available_gpus.<locals>.<listcomp>r   Nc                 S   s   g | ]}|j d kr|jqS )r   )r   rj   r   r2   r   r   r   r     r   )	r)   rC   rD   r|   r   list_logical_devices_LOCAL_DEVICESr   list_devicesr   r   r   r   _get_available_gpus  s
    
r   c                  C   s   t d} tt }|  o|S )aR  Check whether the current scope supports NCHW ops.

    TensorFlow does not support NCHW on CPU. Therefore we check if we are not
    explicitly put on
    CPU, and have GPUs available. In this case there will be soft-placing on the
    GPU device.

    Returns:
        bool: if the current scope device placement would support nchw
    r   )r   boolr   )explicitly_on_cpugpus_availabler   r   r   _has_nchw_support  s    
r   c                 C   s   t j| |dS )aJ  Convert the input `x` to a tensor of type `dtype`.

    This is slightly faster than the _to_tensor function, at the cost of
    handling fewer cases.

    Args:
        x: An object to be converted (numpy arrays, floats, ints and lists of
          them).
        dtype: The destination type.

    Returns:
        A tensor.
    r&   )r)   constantr2   r'   r   r   r   _constant_to_tensor  s    r   c                 C   s   t j| |dS )zConvert the input `x` to a tensor of type `dtype`.

    Args:
        x: An object to be converted (numpy array, list, tensors).
        dtype: The destination type.

    Returns:
        A tensor.
    r&   )r)   convert_to_tensorr   r   r   r   
_to_tensor  s    
r   zkeras.backend.is_sparsec                 C   s,   t | dd}|dur t|tjS t| tjS )a{  Returns whether a tensor is a sparse tensor.

    Args:
        tensor: A tensor instance.

    Returns:
        A boolean.

    Example:


    >>> a = tf.keras.backend.placeholder((2, 2), sparse=False)
    >>> print(tf.keras.backend.is_sparse(a))
    False
    >>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
    >>> print(tf.keras.backend.is_sparse(b))
    True

    
_type_specN)rZ   r(   r)   SparseTensorSpecr,   )tensorspecr   r   r   	is_sparse  s    r   zkeras.backend.to_densec                 C   s   t | rtj| S | S dS )a  Converts a sparse tensor into a dense tensor and returns it.

    Args:
        tensor: A tensor instance (potentially sparse).

    Returns:
        A dense tensor.

    Examples:


    >>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
    >>> print(tf.keras.backend.is_sparse(b))
    True
    >>> c = tf.keras.backend.to_dense(b)
    >>> print(tf.keras.backend.is_sparse(c))
    False

    N)r   r)   sparseto_denser   r   r   r   r     s    r   zkeras.backend.name_scopec                 C   s
   t | S )a}  A context manager for use when defining a Python op.

    This context manager pushes a name scope, which will make the name of all
    operations added within it have a prefix.

    For example, to define a new Python op called `my_op`:


    def my_op(a):
      with tf.name_scope("MyOp") as scope:
        a = tf.convert_to_tensor(a, name="a")
        # Define some computation that uses `a`.
        return foo_op(..., name=scope)


    When executed, the Tensor `a` will have the name `MyOp/a`.

    Args:
      name: The prefix to use on all names created within the name scope.

    Returns:
      Name scope context manager.
    )r)   rk   r   r   r   r   rk   "  s    rk   T)rD   allow_multiple_exportszkeras.backend.variablec                 C   s   |du rt  }t| drd|  }tt|jdt|jdfd}tj	||j
|jd}|j|_|S tj| t|||d}t| tjr| j|_nt| drt| |_t| |S )a  Instantiates a variable and returns it.

    Args:
        value: Numpy array, initial value of the tensor.
        dtype: Tensor type.
        name: Optional name string for the tensor.
        constraint: Optional projection function to be
            applied to the variable after an optimizer update.

    Returns:
        A variable instance (with Keras metadata included).

    Examples:

    >>> val = np.array([[1, 2], [3, 4]])
    >>> kvar = tf.keras.backend.variable(value=val, dtype='float64',
    ...                                  name='example_var')
    >>> tf.keras.backend.dtype(kvar)
    'float64'
    >>> print(kvar)
    <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy=
      array([[1., 2.],
             [3., 4.]])>

    Ntocoor5   )indicesvaluesdense_shape)r'   rj   
constraintri   )r.   hasattrr   r/   concatenateexpand_dimsrowcolr)   r,   datari   _keras_shaper+   as_dtyper(   ndarray	int_shapetrack_variable)rW   r'   rj   r   
sparse_coor   vr   r   r   variableF  s4    



r   c                 C   s"   t  rdS td }||  dS )zBTracks the given TF optimizer for initialization of its variables.N)r)   rP   rO   add)tf_optimizer
optimizersr   r   r   track_tf_optimizer  s    r   z)keras.__internal__.backend.track_variablec                 C   s4   t  rdS t| dr| jnt }t| |  dS )z-Tracks the given variable for initialization.Nr<   )r)   rP   r   r<   r6   rM   r   )r   r<   r   r   r   r     s    r   c                 C   s   t |  dS )zFObserve a name and make sure it won't be used by `unique_object_name`.N)r@   r   r   r   r   r   observe_object_name  s    r   c           	      C   s   |du rt  }|du r&|r t}nt }d}|du s:||v r|| f}|rz|| }|rd| d t| }n| }||  d7  < q*||  d7  < | d t||  }q*|S )a  Makes a object name (or arbitrary string) unique within a TensorFlow graph.

    Args:
      name: String name to make unique.
      name_uid_map: An optional defaultdict(int) to use when creating unique
        names. If None (default), uses a per-Graph dictionary.
      avoid_names: An optional set or dict with names which should not be used.
        If None (default), don't avoid any names unless `avoid_observed_names`
        is True.
      namespace: Gets a name which is unique within the (graph, namespace).
        Layers which are not Networks use a blank namespace and so get
        graph-global names.
      zero_based: If True, name sequences start with no suffix (e.g. "dense",
        "dense_1"). If False, naming is one-based ("dense_1", "dense_2").
      avoid_observed_names: If True, avoid any names that have been observed by
        `backend.observe_object_name`.

    Returns:
      Unique string name.

    Example:


    unique_object_name('dense')  # dense_1
    unique_object_name('dense')  # dense_2

    N_r5   )r   r@   setstr)	rj   r   avoid_names	namespace
zero_basedavoid_observed_namesproposed_namename_keynumberr   r   r   unique_object_name  s$    #r  c                 C   s6   t  rJ t|  }t|  D ]}||j  q|S )zFReturns variables corresponding to the given graph for initialization.)r)   rP   rM   rO   update	optimizer	variables)r<   r	  optr   r   r   _get_variables  s
    r  z/keras.__internal__.backend.initialize_variablesc                    s   t t }g }|D ]}t|dds|| q|r| dd |D   fddt|D }g }t||D ]\}}|r|| d|_qj|r| tj	j
| dS )z9Utility to initialize uninitialized variables on the fly._keras_initializedFc                 S   s   g | ]}t jj|qS r   )r)   rC   rD   is_variable_initializedr   r   r   r   r   r     r   z)_initialize_variables.<locals>.<listcomp>c                    s$   g | ]\}} |  o|j d uqS r   )initializer)r   nr   is_initializedr   r   r     s   TN)r  r6   rZ   appendrun	enumeratezipr  r)   rC   rD   variables_initializer)r   r	  candidate_varsr   should_be_initializeduninitialized_varsflagr   r  r   r     s&    


r   zkeras.backend.constantc                 C   s    |du rt  }tj| |||dS )a"  Creates a constant tensor.

    Args:
        value: A constant value (or list)
        dtype: The type of the elements of the resulting tensor.
        shape: Optional dimensions of resulting tensor.
        name: Optional name for the tensor.

    Returns:
        A Constant Tensor.
    N)r'   ri   rj   )r.   r)   r   )rW   r'   ri   rj   r   r   r   r     s    r   zkeras.backend.is_keras_tensorc                 C   sX   t | tjtjtjtjtjfs6tdt	t
|  d tjj rNt | tjS t| dS )a`  Returns whether `x` is a Keras tensor.

    A "Keras tensor" is a tensor that was returned by a Keras layer,
    (`Layer` class) or by `Input`.

    Args:
        x: A candidate tensor.

    Returns:
        A boolean: Whether the argument is a Keras tensor.

    Raises:
        ValueError: In case `x` is not a symbolic tensor.

    Examples:

    >>> np_var = np.array([1, 2])
    >>> # A numpy array is not a symbolic tensor.
    >>> tf.keras.backend.is_keras_tensor(np_var)
    Traceback (most recent call last):
    ...
    ValueError: Unexpectedly found an instance of type
    `<class 'numpy.ndarray'>`.
    Expected a symbolic tensor instance.
    >>> keras_var = tf.keras.backend.variable(np_var)
    >>> # A variable created with the keras backend is not a Keras tensor.
    >>> tf.keras.backend.is_keras_tensor(keras_var)
    False
    >>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5))
    >>> # A placeholder is a Keras tensor.
    >>> tf.keras.backend.is_keras_tensor(keras_placeholder)
    True
    >>> keras_input = tf.keras.layers.Input([10])
    >>> # An Input is a Keras tensor.
    >>> tf.keras.backend.is_keras_tensor(keras_input)
    True
    >>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input)
    >>> # Any Keras layer output is a Keras tensor.
    >>> tf.keras.backend.is_keras_tensor(keras_layer_output)
    True

    z(Unexpectedly found an instance of type `z'`. Expected a symbolic tensor instance._keras_history)r(   r)   r*   r+   r,   RaggedTensorr   KerasTensorrs   r   r   rC   rD   r|   r   r1   r   r   r   is_keras_tensor
  s&    ,

r  zkeras.backend.placeholderc                 C   s  |r|rt d|du rt }| s.|r.d| } tjj r|rNtj| |d}nj|rd}tdt| D ]0}| | du st	| | drd| | j
du rd|}qdtj| ||d}ntj| ||d	}tj||d
}	nt   |rtjjj|| |d}	nr|rLd}tdt| D ]}| | du r|}qtj| ||d}
dd }tjj||
dd}	ntjjj|| |d}	W d   n1 sv0    Y  t rddlm} |j|	d}	d|	_|	S )a[  Instantiates a placeholder tensor and returns it.

    Args:
        shape: Shape of the placeholder
            (integer tuple, may include `None` entries).
        ndim: Number of axes of the tensor.
            At least one of {`shape`, `ndim`} must be specified.
            If both are specified, `shape` is used.
        dtype: Placeholder type.
        sparse: Boolean, whether the placeholder should have a sparse type.
        name: Optional name string for the placeholder.
        ragged: Boolean, whether the placeholder should have a ragged type.
            In this case, values of 'None' in the 'shape' argument represent
            ragged dimensions. For more information about RaggedTensors, see
            this [guide](https://www.tensorflow.org/guide/ragged_tensor).

    Raises:
        ValueError: If called with sparse = True and ragged = True.

    Returns:
        Tensor instance (with Keras metadata included).

    Examples:


    >>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5))
    >>> input_ph
    <KerasTensor: shape=(2, 4, 5) dtype=float32 (created by layer ...)>

    zFCannot set both sparse and ragged to True when creating a placeholder.Nr   )ri   r'   r   r5   rW   )ri   r'   ragged_rankri   r'   rj   r   rh   c                 S   s   t jj| j| jS r   )r)   rC   rD   placeholderr'   ri   )
tensorspecr   r   r   tensor_spec_to_placeholder  s    z/placeholder.<locals>.tensor_spec_to_placeholderTexpand_composites)input_layerr   )rs   r.   r)   rC   rD   r|   r   rangelenr   rW   RaggedTensorSpec
TensorSpecr   keras_tensor_from_type_specr6   rH   sparse_placeholdernestmap_structurer"  rP   keras.enginer'  Input_is_backend_placeholder)ri   ndimr'   r   rj   raggedr   r   ir2   	type_specr$  r'  r   r   r   r"  J  sf    #4
r"  c                 C   s~   zdt jj rt| dW S ddlm} || rTt jj	| dd}t
dd |D W S | jjdkW S W n tyx   Y d	S 0 d
S )zxReturns whether `x` is a placeholder.

    Args:
        x: A candidate placeholder.

    Returns:
        Boolean.
    r2  r   tf_utilsTr%  c                 s   s   | ]}t |V  qd S r   )rb   )r   cr   r   r   	<genexpr>  r   z!is_placeholder.<locals>.<genexpr>PlaceholderFN)r)   rC   rD   r|   r   keras.utilsr8  is_extension_typer.  flattenpy_anyr   r   AttributeError)r2   r8  flat_componentsr   r   r   rb     s    	
rb   zkeras.backend.shapec                 C   s
   t | S )a0  Returns the symbolic shape of a tensor or variable.

    Args:
        x: A tensor or variable.

    Returns:
        A symbolic shape (which is itself a tensor).

    Examples:

    >>> val = np.array([[1, 2], [3, 4]])
    >>> kvar = tf.keras.backend.variable(value=val)
    >>> tf.keras.backend.shape(kvar)
    <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
    >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
    >>> tf.keras.backend.shape(input)
    <KerasTensor: shape=(3,) dtype=int32 inferred_value=[2, 4, 5] ...>

    )r)   ri   r1   r   r   r   ri     s    ri   zkeras.backend.int_shapec                 C   s<   z"| j }t|tst| }|W S  ty6   Y dS 0 dS )a  Returns the shape of tensor or variable as a tuple of int or None entries.

    Args:
        x: Tensor or variable.

    Returns:
        A tuple of integers (or None entries).

    Examples:

    >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
    >>> tf.keras.backend.int_shape(input)
    (2, 4, 5)
    >>> val = np.array([[1, 2], [3, 4]])
    >>> kvar = tf.keras.backend.variable(value=val)
    >>> tf.keras.backend.int_shape(kvar)
    (2, 2)

    N)ri   r(   r   as_listrs   r2   ri   r   r   r   r     s    
r   zkeras.backend.ndimc                 C   s   | j jS )a  Returns the number of axes in a tensor, as an integer.

    Args:
        x: Tensor or variable.

    Returns:
        Integer (scalar), number of axes.

    Examples:


    >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
    >>> val = np.array([[1, 2], [3, 4]])
    >>> kvar = tf.keras.backend.variable(value=val)
    >>> tf.keras.backend.ndim(input)
    3
    >>> tf.keras.backend.ndim(kvar)
    2

    )ri   rankr1   r   r   r   r3    s    r3  zkeras.backend.dtypec                 C   s
   | j jjS )a{  Returns the dtype of a Keras tensor or variable, as a string.

    Args:
        x: Tensor or variable.

    Returns:
        String, dtype of `x`.

    Examples:

    >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5)))
    'float32'
    >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
    ...                                                     dtype='float32'))
    'float32'
    >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
    ...                                                     dtype='float64'))
    'float64'
    >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]))
    >>> tf.keras.backend.dtype(kvar)
    'float32'
    >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
    ...                                  dtype='float32')
    >>> tf.keras.backend.dtype(kvar)
    'float32'

    )r'   
base_dtyperj   r1   r   r   r   r'     s    r'   c                 C   s   t | jjS )zReturns the numpy dtype of a Keras tensor or variable.

    Args:
        x: Tensor or variable.

    Returns:
        numpy.dtype, dtype of `x`.
    )r)   r   r'   as_numpy_dtyper1   r   r   r   dtype_numpy?  s    
rG  zkeras.backend.evalc                 C   s   t t| S )a^  Evaluates the value of a variable.

    Args:
        x: A variable.

    Returns:
        A Numpy array.

    Examples:

    >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
    ...                                  dtype='float32')
    >>> tf.keras.backend.eval(kvar)
    array([[1.,  2.],
           [3.,  4.]], dtype=float32)

    )	get_valuer   r1   r   r   r   evalL  s    rI  zkeras.backend.zerosc                 C   s   t  f |du rt }t |}t j| ||d}t|j r\t|||dW  d   S |W  d   S 1 st0    Y  dS )a\  Instantiates an all-zeros variable and returns it.

    Args:
        shape: Tuple or list of integers, shape of returned Keras variable
        dtype: data type of returned Keras variable
        name: name of returned Keras variable

    Returns:
        A variable (including Keras metadata), filled with `0.0`.
        Note that if `shape` was symbolic, we cannot return a variable,
        and will return a dynamically-shaped tensor instead.

    Example:

    >>> kvar = tf.keras.backend.zeros((3,4))
    >>> tf.keras.backend.eval(kvar)
    array([[0.,  0.,  0.,  0.],
           [0.,  0.,  0.,  0.],
           [0.,  0.,  0.,  0.]], dtype=float32)
    >>> A = tf.constant([1,2,3])
    >>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.]
    >>> tf.keras.backend.eval(kvar2)
    array([0., 0., 0.], dtype=float32)
    >>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32)
    >>> tf.keras.backend.eval(kvar3)
    array([0, 0, 0], dtype=int32)
    >>> kvar4 = tf.keras.backend.zeros([2,3])
    >>> tf.keras.backend.eval(kvar4)
    array([[0., 0., 0.],
           [0., 0., 0.]], dtype=float32)

    Nr!  r'   rj   )	r)   r\   r.   r   zerospy_allri   rB  r   ri   r'   rj   tf_dtyper   r   r   r   rK  c  s    #

rK  zkeras.backend.onesc                 C   s   t  f |du rt }t |}t j| ||d}t|j r\t|||dW  d   S |W  d   S 1 st0    Y  dS )a  Instantiates an all-ones variable and returns it.

    Args:
        shape: Tuple of integers, shape of returned Keras variable.
        dtype: String, data type of returned Keras variable.
        name: String, name of returned Keras variable.

    Returns:
        A Keras variable, filled with `1.0`.
        Note that if `shape` was symbolic, we cannot return a variable,
        and will return a dynamically-shaped tensor instead.

    Example:


    >>> kvar = tf.keras.backend.ones((3,4))
    >>> tf.keras.backend.eval(kvar)
    array([[1.,  1.,  1.,  1.],
           [1.,  1.,  1.,  1.],
           [1.,  1.,  1.,  1.]], dtype=float32)

    Nr!  rJ  )	r)   r\   r.   r   onesrL  ri   rB  r   rM  r   r   r   rO    s    

rO  zkeras.backend.eyec                 C   s.   |du rt  }t|}ttj| |d||S )a  Instantiate an identity matrix and returns it.

    Args:
        size: Integer, number of rows/columns.
        dtype: String, data type of returned Keras variable.
        name: String, name of returned Keras variable.

    Returns:
        A Keras variable, an identity matrix.

    Example:


    >>> kvar = tf.keras.backend.eye(3)
    >>> tf.keras.backend.eval(kvar)
    array([[1.,  0.,  0.],
           [0.,  1.,  0.],
           [0.,  0.,  1.]], dtype=float32)


    Nr&   )r.   r)   r   r   eye)sizer'   rj   rN  r   r   r   rP    s    
rP  zkeras.backend.zeros_likec                 C   s   t j| ||dS )aK  Instantiates an all-zeros variable of the same shape as another tensor.

    Args:
        x: Keras variable or Keras tensor.
        dtype: dtype of returned Keras variable.
               `None` uses the dtype of `x`.
        name: name for the variable to create.

    Returns:
        A Keras variable with the shape of `x` filled with zeros.

    Example:

    ```python
    kvar = tf.keras.backend.variable(np.random.random((2,3)))
    kvar_zeros = tf.keras.backend.zeros_like(kvar)
    K.eval(kvar_zeros)
    # array([[ 0.,  0.,  0.], [ 0.,  0.,  0.]], dtype=float32)
    ```
    rJ  r)   
zeros_liker2   r'   rj   r   r   r   rS    s    rS  zkeras.backend.ones_likec                 C   s   t j| ||dS )aU  Instantiates an all-ones variable of the same shape as another tensor.

    Args:
        x: Keras variable or tensor.
        dtype: String, dtype of returned Keras variable.
             None uses the dtype of x.
        name: String, name for the variable to create.

    Returns:
        A Keras variable with the shape of x filled with ones.

    Example:

    >>> kvar = tf.keras.backend.variable(np.random.random((2,3)))
    >>> kvar_ones = tf.keras.backend.ones_like(kvar)
    >>> tf.keras.backend.eval(kvar_ones)
    array([[1.,  1.,  1.],
           [1.,  1.,  1.]], dtype=float32)

    rJ  )r)   	ones_likerT  r   r   r   rU    s    rU  c                 C   s   t j| |dS )zReturns a tensor with the same content as the input tensor.

    Args:
        x: The input tensor.
        name: String, name for the variable to create.

    Returns:
        A tensor of the same shape, type and content.
    r   r)   identity)r2   rj   r   r   r   rW    s    
rW  z9keras.backend.experimental.is_tf_random_generator_enabledc                   C   s   t S )a  Check whether `tf.random.Generator` is used for RNG in Keras.

    Compared to existing TF stateful random ops, `tf.random.Generator` uses
    `tf.Variable` and stateless random ops to generate random numbers,
    which leads to better reproducibility in distributed training.
    Note enabling it might introduce some breakage to existing code,
    by producing differently-seeded random number sequences
    and breaking tests that rely on specific random numbers being generated.
    To disable the
    usage of `tf.random.Generator`, please use
    `tf.keras.backend.experimental.disable_random_generator`.

    We expect the `tf.random.Generator` code path to become the default, and
    will remove the legacy stateful random ops such as `tf.random.uniform` in
    the future (see the [TF RNG guide](
    https://www.tensorflow.org/guide/random_numbers)).

    This API will also be removed in a future release as well, together with
    `tf.keras.backend.experimental.enable_tf_random_generator()` and
    `tf.keras.backend.experimental.disable_tf_random_generator()`

    Returns:
      boolean: whether `tf.random.Generator` is used for random number
        generation in Keras.
    _USE_GENERATOR_FOR_RNGr   r   r   r   is_tf_random_generator_enabled&  s    rZ  z5keras.backend.experimental.enable_tf_random_generatorc                   C   s   da dS )zEnable the `tf.random.Generator` as the RNG for Keras.

    See `tf.keras.backend.experimental.is_tf_random_generator_enabled` for more
    details.
    TNrX  r   r   r   r   enable_tf_random_generatorF  s    	r[  z6keras.backend.experimental.disable_tf_random_generatorc                   C   s   da dS )zDisable the `tf.random.Generator` as the RNG for Keras.

    See `tf.keras.backend.experimental.is_tf_random_generator_enabled` for more
    details.
    FNrX  r   r   r   r   disable_tf_random_generatorR  s    r\  c                   @   sv   e Zd ZdZdZdZdZdddZdd	 Zd
d Z	dd Z
dd Zdd ZdddZdddZdddZd ddZdS )!RandomGeneratora1  Random generator that selects appropriate random ops.

    This class contains the logic for legacy stateful random ops, as well as the
    new stateless random ops with seeds and tf.random.Generator. Any class that
    relies on RNG (eg initializer, shuffle, dropout) should use this class to
    handle the transition from legacy RNGs to new RNGs.

    Args:
      seed: Optional int seed. When `rng_type` is "stateful", the seed is used
        to create `tf.random.Generator` to produce deterministic sequences.
        When `rng_type` is "stateless", new seed will be created if it is not
        provided by user, and it will be passed down to stateless random ops.
        When `rng_type` is "legacy_stateful", the seed will be passed down to
        stateful random ops.
      rng_type: Type of RNG to use, one of "stateful", "stateless",
        "legacy_stateful". It defaults to "stateful" if
        `enable_tf_random_generator` has been activated, or to
        "legacy_stateful" otherwise.
        - When using "stateless", the random ops outputs are constant (the same
          inputs result in the same outputs).
        - When using "stateful" or "legacy_stateful", the random ops outputs are
          non-constant, but deterministic: calling the same random op multiple
          times with the same inputs results in a deterministic sequence of
          different outputs.
        - "legacy_stateful" is backed by TF1 stateful RNG ops
          (e.g. `tf.random.uniform`), while "stateful"
          is backed by TF2 APIs (e.g. `tf.random.Generator.uniform`).
    	statelessstatefulZlegacy_statefulNc                 K   s"   || _ | j|fi | d| _d S r!   )_seed_set_rng_type_built)r   seedrng_typekwargsr   r   r   r     s    zRandomGenerator.__init__c                 K   s`   | ddr| j}|d u r4t r*| j| _q\| j| _n(|| j| j| jfvrVtd| || _d S )NZforce_generatorFzeInvalid `rng_type` received. Valid `rng_type` are ["stateless", "stateful", "legacy_stateful"]. Got: )rf   RNG_STATEFULrZ  	_rng_typeRNG_LEGACY_STATEFULRNG_STATELESSrs   )r   rd  re  r   r   r   ra    s"    

zRandomGenerator._set_rng_typec                 C   s   | j r
dS | j| jkr*tjj s*| j| _| j| jkrL| 	| j
| _
d| _nf| j| jkrddlm} || , | 	| j
}tjj|| _W d   q1 s0    Y  nd| _d| _ dS )a  Lazily init the RandomGenerator.

        The TF API executing_eagerly_outside_functions() has some side effect,
        and couldn't be used before API like tf.enable_eager_execution(). Some
        of the client side code was creating the initializer at the code load
        time, which triggers the creation of RandomGenerator. Lazy init this
        class to walkaround this issue until it is resolved on TF side.
        Nr   r7  T)rb  rg  rf  r)   rC   rD   r|   rh  ri  _create_seedr`  
_generatorr<  r8  maybe_init_scoperandom	Generator	from_seed)r   r8  rc  r   r   r   _maybe_init  s"    

0zRandomGenerator._maybe_initc                 C   sD   |    | j| jkr| jdgS | j| jkr@| j dddf S dS )aT  Generate a new seed based on the init config.

        Note that this will not return python ints which will be frozen in the
        graph and cause stateless op to return the same value. It will only
        return value when generator is used, otherwise it will return None.

        Returns:
          A tensor with shape [2,].
        r   N)rp  rg  ri  r`  rf  rk  
make_seedsr   r   r   r   make_seed_for_stateless_op  s    

z*RandomGenerator.make_seed_for_stateless_opc                 C   s&   | j dur"| j }|  j d7  _ |S dS )a  Create a new seed for the legacy stateful ops to use.

        When user didn't provide any original seed, this method will return
        None.  Otherwise it will increment the counter and return as the new
        seed.

        Note that it is important to generate different seed for stateful ops in
        the `tf.function`. The random ops will return same value when same seed
        is provided in the `tf.function`.

        Returns:
          int as new seed, or None.
        Nr5   )r`  )r   resultr   r   r   make_legacy_seed  s
    
z RandomGenerator.make_legacy_seedc                 C   s6   |d ur|S t tdd r&tjddS tddS d S )N	generatorr5       eA)rZ   _SEED_GENERATORru  randintrm  )r   Zuser_specified_seedr   r   r   rj    s
    zRandomGenerator._create_seed              ?c                 C   s   |    |pt }| j| jkr2| jj||||dS | j| jkrp|  }|rZtj	j
||}tj	j|||||dS tj	j|||||  dS )a  Produce random number based on the normal distribution.

        Args:
          shape: The shape of the random values to generate.
          mean: Floats, default to 0. Mean of the random values to generate.
          stddev: Floats, default to 1. Standard deviation of the random values
            to generate.
          dtype: Optional dtype of the tensor. Only floating point types are
            supported. If not specified, `tf.keras.backend.floatx()` is used,
            which default to `float32` unless you configured it otherwise (via
            `tf.keras.backend.set_floatx(float_dtype)`)
          nonce: Optional integer scalar, that will be folded into the seed in
            the stateless mode.
        ri   meanstddevr'   ri   r|  r}  r'   rc  )rp  r.   rg  rf  rk  normalri  rr  r)   rm  experimentalstateless_fold_instateless_normalrt  r   ri   r|  r}  r'   noncerc  r   r   r   random_normal  s(    

zRandomGenerator.random_normalc                 C   s   |    |pt }| j| jkr2| jj||||dS | j| jkrp|  }|rZtj	j
||}tj	j|||||dS tj	j|||||  dS )a  Produce random number based on the uniform distribution.

        Args:
          shape: The shape of the random values to generate.
          minval: Floats, default to 0. Lower bound of the range of
            random values to generate (inclusive).
          minval: Floats, default to None. Upper bound of the range of
            random values to generate (exclusive).
          dtype: Optional dtype of the tensor. Only floating point types are
            supported. If not specified, `tf.keras.backend.floatx()` is used,
            which default to `float32` unless you configured it otherwise (via
            `tf.keras.backend.set_floatx(float_dtype)`)
          nonce: Optional integer scalar, that will be folded into the seed in
            the stateless mode.
        )ri   minvalmaxvalr'   ri   r  r  r'   rc  )rp  r.   rg  rf  rk  uniformri  rr  r)   rm  r  r  stateless_uniformrt  )r   ri   r  r  r'   r  rc  r   r   r   random_uniform  s0    
zRandomGenerator.random_uniformc                 C   s   |    |pt }| j| jkr2| jj||||dS | j| jkrp|  }|rZtj	j
||}tj	j|||||dS tj	j|||||  dS )a  Produce random number based on the truncated normal distribution.

        Args:
          shape: The shape of the random values to generate.
          mean: Floats, default to 0. Mean of the random values to generate.
          stddev: Floats, default to 1. Standard deviation of the random values
            to generate.
          dtype: Optional dtype of the tensor. Only floating point types are
            supported. If not specified, `tf.keras.backend.floatx()` is used,
            which default to `float32` unless you configured it otherwise (via
            `tf.keras.backend.set_floatx(float_dtype)`)
          nonce: Optional integer scalar, that will be folded into the seed in
            the stateless mode.
        r{  r~  )rp  r.   rg  rf  rk  truncated_normalri  rr  r)   rm  r  r  stateless_truncated_normalrt  r  r   r   r   r  C  s(    

z RandomGenerator.truncated_normalc                 C   sL   |    | j| j| jfv r4tjjj||||  dS tjj	|||| 
 dS )Nratenoise_shaperc  )rp  rg  rf  ri  r)   nnr  stateless_dropoutrr  dropoutrt  )r   inputsr  r  r   r   r   r  i  s    zRandomGenerator.dropout)NN)ry  rz  NN)ry  NNN)ry  rz  NN)N)r   r   r   r   ri  rf  rh  r   ra  rp  rr  rt  rj  r  r  r  r  r   r   r   r   r]  ]  s"   
(	 
' 
, 
&r]  z%keras.backend.random_uniform_variablec                 C   sT   |du rt  }t|}|du r,tjd}tjjj||||d| }t	|||dS )a  Instantiates a variable with values drawn from a uniform distribution.

    Args:
        shape: Tuple of integers, shape of returned Keras variable.
        low: Float, lower boundary of the output interval.
        high: Float, upper boundary of the output interval.
        dtype: String, dtype of returned Keras variable.
        name: String, name of returned Keras variable.
        seed: Integer, random seed.

    Returns:
        A Keras variable, filled with drawn samples.

    Example:

    >>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3),
    ... low=0.0, high=1.0)
    >>> kvar
    <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
    dtype=float32)>
    Nrv  r'   rc  rJ  )
r.   r)   r   r/   rm  rx  rC   rD   random_uniform_initializerr   )ri   lowhighr'   rj   rc  rN  rW   r   r   r   random_uniform_variablez  s    
r  z$keras.backend.random_normal_variablec                 C   sT   |du rt  }t|}|du r,tjd}tjjj||||d| }t	|||dS )a  Instantiates a variable with values drawn from a normal distribution.

    Args:
        shape: Tuple of integers, shape of returned Keras variable.
        mean: Float, mean of the normal distribution.
        scale: Float, standard deviation of the normal distribution.
        dtype: String, dtype of returned Keras variable.
        name: String, name of returned Keras variable.
        seed: Integer, random seed.

    Returns:
        A Keras variable, filled with drawn samples.

    Example:

    >>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3),
    ... mean=0.0, scale=1.0)
    >>> kvar
    <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
    dtype=float32)>
    Nrv  r  rJ  )
r.   r)   r   r/   rm  rx  rC   rD   random_normal_initializerr   )ri   r|  scaler'   rj   rc  rN  rW   r   r   r   random_normal_variable  s    
r  zkeras.backend.count_paramsc                 C   s   t | j S )a  Returns the static number of elements in a variable or tensor.

    Args:
        x: Variable or tensor.

    Returns:
        Integer, the number of scalars in `x`.

    Example:

    >>> kvar = tf.keras.backend.zeros((2,3))
    >>> tf.keras.backend.count_params(kvar)
    6
    >>> tf.keras.backend.eval(kvar)
    array([[0.,  0.,  0.],
           [0.,  0.,  0.]], dtype=float32)

    )r/   prodri   rB  r1   r   r   r   count_params  s    r  zkeras.backend.castc                 C   s   t | |S )a  Casts a tensor to a different dtype and returns it.

    You can cast a Keras variable but it still returns a Keras tensor.

    Args:
        x: Keras tensor (or variable).
        dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).

    Returns:
        Keras tensor with dtype `dtype`.

    Examples:
        Cast a float32 variable to a float64 tensor

    >>> input = tf.keras.backend.ones(shape=(1,3))
    >>> print(input)
    <tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
    numpy=array([[1., 1., 1.]], dtype=float32)>
    >>> cast_input = tf.keras.backend.cast(input, dtype='float64')
    >>> print(cast_input)
    tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)

    )r)   r-   r   r   r   r   r-     s    r-   zkeras.backend.updatec                 C   s   t jj| |S r   )r)   rC   rD   assign)r2   new_xr   r   r   r    s    r  zkeras.backend.update_addc                 C   s   t jj| |S )zUpdate the value of `x` by adding `increment`.

    Args:
        x: A Variable.
        increment: A tensor of same shape as `x`.

    Returns:
        The variable `x` updated.
    )r)   rC   rD   
assign_add)r2   	incrementr   r   r   
update_add	  s    r  zkeras.backend.update_subc                 C   s   t jj| |S )zUpdate the value of `x` by subtracting `decrement`.

    Args:
        x: A Variable.
        decrement: A tensor of same shape as `x`.

    Returns:
        The variable `x` updated.
    )r)   rC   rD   
assign_sub)r2   	decrementr   r   r   
update_sub	  s    r  z#keras.backend.moving_average_updatec                 C   sX   t jj r>t || j}t || j}| | | d|  S t jjj| ||ddS dS )a  Compute the exponential moving average of a value.

    The moving average 'x' is updated with 'value' following:

    ```
    x = x * momentum + value * (1 - momentum)
    ```

    For example:

    >>> x = tf.Variable(0.0)
    >>> momentum=0.9
    >>> moving_average_update(x, value = 2.0, momentum=momentum).numpy()
    >>> x.numpy()
    0.2

    The result will be biased towards the initial value of the variable.

    If the variable was initialized to zero, you can divide by
    `1 - momentum ** num_updates` to debias it (Section 3 of
    [Kingma et al., 2015](https://arxiv.org/abs/1412.6980)):

    >>> num_updates = 1.0
    >>> x_zdb = x/(1 - momentum**num_updates)
    >>> x_zdb.numpy()
    2.0

    Args:
        x: A Variable, the moving average.
        value: A tensor with the same shape as `x`, the new value to be
          averaged in.
        momentum: The moving average momentum.

    Returns:
        The updated variable.
    r5   T)zero_debiasN)	r)   r   r   r   r-   r'   r  trainassign_moving_average)r2   rW   momentumr   r   r   moving_average_update!	  s    'r  zkeras.backend.dotc           
      C   sx  t | durNt | dks(t |dkrNg }tt| tt| D ]&\}}|durb|| qF|| qFt|}g }tt|tt|D ]&\}}|dur|| q|| qt|}tt	t |}|
dg| }t| d|d g}ttjjj||d|d dg}tt|||dd |dd  |dd  S t| rhtj| |}	nt| |}	|	S )a  Multiplies 2 tensors (and/or variables) and returns a tensor.

    This operation corresponds to `numpy.dot(a, b, out=None)`.

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A tensor, dot product of `x` and `y`.

    Examples:

    If inputs `x` and `y` are 2-D arrays, then it is equivalent to `tf.matmul`.
    >>> x = tf.keras.backend.placeholder(shape=(2, 3))
    >>> y = tf.keras.backend.placeholder(shape=(3, 4))
    >>> xy = tf.keras.backend.dot(x, y)
    >>> xy
    <KerasTensor: shape=(2, 4) dtype=float32 ...>

    >>> x = tf.keras.backend.placeholder(shape=(32, 28, 3))
    >>> y = tf.keras.backend.placeholder(shape=(3, 4))
    >>> xy = tf.keras.backend.dot(x, y)
    >>> xy
    <KerasTensor: shape=(32, 28, 4) dtype=float32 ...>

    If `x` is an N-D array and `y` is an M-D array (where M>=2), it is a sum
    product over the last axis of `x` and the second-to-last axis of `y`.
    >>> x = tf.keras.backend.random_uniform_variable(
    ... shape=(2, 3), low=0., high=1.)
    >>> y = tf.keras.backend.ones((4, 3, 5))
    >>> xy = tf.keras.backend.dot(x, y)
    >>> tf.keras.backend.int_shape(xy)
    (2, 4, 5)
    Nrt   perm)r3  r  r   r)   unstackri   r  r   listr(  rN   reshaperC   rD   	transposematmulr   r   sparse_dense_matmul)
r2   yx_shaper5  sy_shapey_permute_dimxtytoutr   r   r   dotU	  s4    '("",
r  zkeras.backend.batch_dotc                 C   s  t | }t |}t|}t|}|dk s0|dk rPtdt| d t| d |d }|d }|dur|dur||krtdt| d t| d t|tr||g}|du r|dkr|d |d g}n|d |d g}td	d
 |D rtdt| t|}|d dk r$|d  |7  < |d dk rB|d  |7  < d|v rTtd|\}	}
||	 }||
 }|dur|dur||krtdt| d t| d t| d|d |d ||f  |}|}|dkrt	| d} |	d7 }	|d7 }|dkrt	|d}|d7 }|	|d krntt
|}t
|	|d D ]}||d  ||< q>|	|d< tjj| |} |
dkrtt
|}t
|
ddD ]}||d  ||< q|
|d< tjj||}|dkrt| }|dd }t|d d|d g}t| |} d}nd}|dkrTt|}|dd }t|d |d dg}t||}d}nd}t| |}t|}d}|rt|dd ||dd gd}d}|rt|dd |gd}d}|rt||}|dkrt|d}n|dkr t|d}|S )a  Batchwise dot product.

    `batch_dot` is used to compute dot product of `x` and `y` when
    `x` and `y` are data in batch, i.e. in a shape of
    `(batch_size, :)`.
    `batch_dot` results in a tensor or variable with less dimensions
    than the input. If the number of dimensions is reduced to 1,
    we use `expand_dims` to make sure that ndim is at least 2.

    Args:
      x: Keras tensor or variable with `ndim >= 2`.
      y: Keras tensor or variable with `ndim >= 2`.
      axes: Tuple or list of integers with target dimensions, or single integer.
        The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal.

    Returns:
      A tensor with shape equal to the concatenation of `x`'s shape
      (less the dimension that was summed over) and `y`'s shape
      (less the batch dimension and the dimension that was summed over).
      If the final rank is 1, we reshape it to `(batch_size, 1)`.

    Examples:

    >>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1))
    >>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20))
    >>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2))
    >>> tf.keras.backend.int_shape(xy_batch_dot)
    (32, 1, 30)

    Shape inference:
      Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
      If `axes` is (1, 2), to find the output shape of resultant tensor,
          loop through each dimension in `x`'s shape and `y`'s shape:
      * `x.shape[0]` : 100 : append to output shape
      * `x.shape[1]` : 20 : do not append to output shape,
          dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
      * `y.shape[0]` : 100 : do not append to output shape,
          always ignore first dimension of `y`
      * `y.shape[1]` : 30 : append to output shape
      * `y.shape[2]` : 20 : do not append to output shape,
          dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
      `output_shape` = `(100, 30)`
    rt   zICannot do batch_dot on inputs with rank < 2. Received inputs with shapes z and .r   NzVCannot do batch_dot on inputs with different batch sizes. Received inputs with shapes r5   c                 s   s   | ]}t |ttfV  qd S r   )r(   r  r   )r   ar   r   r   r:  	  r   zbatch_dot.<locals>.<genexpr>zYMultiple target dimensions are not supported. Expected: None, int, (int, int), Provided: zCannot perform batch_dot over axis 0. If your inputs are not batched, add a dummy batch dimension to your inputs using K.expand_dims(x, 0)z*Cannot do batch_dot on inputs with shapes z with axes=z(. x.shape[%d] != y.shape[%d] (%d != %d).r     TF)r   r)  rs   r   r(   r:   r?  r  r)   r   r(  rC   rD   r  ri   stackr  r  concatsqueeze)r2   r  axesr  r  x_ndimy_ndimx_batch_sizey_batch_sizea0a1d1d2orig_x_ndimorig_y_ndimpatternr5  
x_mid_dimsx_squashed_shape
x_squashedy_trail_dimsy_squashed_shape
y_squashedrs  output_shape
do_reshaper   r   r   	batch_dot	  s    /
	









r  zkeras.backend.transposec                 C   s   t jj| S )a  Transposes a tensor and returns it.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.

    Examples:

    >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
    >>> tf.keras.backend.eval(var)
    array([[1.,  2.,  3.],
           [4.,  5.,  6.]], dtype=float32)
    >>> var_transposed = tf.keras.backend.transpose(var)
    >>> tf.keras.backend.eval(var_transposed)
    array([[1.,  4.],
           [2.,  5.],
           [3.,  6.]], dtype=float32)
    >>> input = tf.keras.backend.placeholder((2, 3))
    >>> input
    <KerasTensor: shape=(2, 3) dtype=float32 ...>
    >>> input_transposed = tf.keras.backend.transpose(input)
    >>> input_transposed
    <KerasTensor: shape=(3, 2) dtype=float32 ...>
    r)   rC   rD   r  r1   r   r   r   r  i
  s    r  zkeras.backend.gatherc                 C   s   t jj| |S )as  Retrieves the elements of indices `indices` in the tensor `reference`.

    Args:
        reference: A tensor.
        indices: An integer tensor of indices.

    Returns:
        A tensor of same type as `reference`.

    Examples:

    >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
    >>> tf.keras.backend.eval(var)
    array([[1., 2., 3.],
           [4., 5., 6.]], dtype=float32)
    >>> var_gathered = tf.keras.backend.gather(var, [0])
    >>> tf.keras.backend.eval(var_gathered)
    array([[1., 2., 3.]], dtype=float32)
    >>> var_gathered = tf.keras.backend.gather(var, [1])
    >>> tf.keras.backend.eval(var_gathered)
    array([[4., 5., 6.]], dtype=float32)
    >>> var_gathered = tf.keras.backend.gather(var, [0,1,0])
    >>> tf.keras.backend.eval(var_gathered)
    array([[1., 2., 3.],
           [4., 5., 6.],
           [1., 2., 3.]], dtype=float32)
    )r)   rC   rD   gather)	referencer   r   r   r   r  
  s    r  zkeras.backend.maxc                 C   s   t | ||S )a  Maximum value in a tensor.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to find maximum values.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    Returns:
        A tensor with maximum values of `x`.
    )r)   
reduce_maxr2   axiskeepdimsr   r   r   max
  s    r  zkeras.backend.minc                 C   s   t | ||S )a  Minimum value in a tensor.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to find minimum values.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    Returns:
        A tensor with minimum values of `x`.
    )r)   
reduce_minr  r   r   r   min
  s    r  zkeras.backend.sumc                 C   s   t | ||S )a  Sum of the values in a tensor, alongside the specified axis.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to sum over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    Returns:
        A tensor with sum of `x`.
    )r)   
reduce_sumr  r   r   r   sum
  s    r  zkeras.backend.prodc                 C   s   t | ||S )a  Multiplies the values in a tensor, alongside the specified axis.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to compute the product.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    Returns:
        A tensor with the product of elements of `x`.
    )r)   reduce_prodr  r   r   r   r  
  s    r  zkeras.backend.cumsumc                 C   s   t j| |dS )a  Cumulative sum of the values in a tensor, alongside the specified axis.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to compute the sum.

    Returns:
        A tensor of the cumulative sum of values of `x` along `axis`.
    r  )r)   cumsumr2   r  r   r   r   r  
  s    r  zkeras.backend.cumprodc                 C   s   t jj| |dS )a  Cumulative product of the values in a tensor, alongside the specified axis.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to compute the product.

    Returns:
        A tensor of the cumulative product of values of `x` along `axis`.
    r  )r)   mathcumprodr  r   r   r   r    s    r  zkeras.backend.varc                 C   s.   | j jtjkrt| t } tjj| ||dS )a  Variance of a tensor, alongside the specified axis.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to compute the variance.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    Returns:
        A tensor with the variance of elements of `x`.
    r  r  )r'   rE  r)   r   r-   r.   r  reduce_variancer  r   r   r   var  s    r  zkeras.backend.stdc                 C   s.   | j jtjkrt| t } tjj| ||dS )a7  Standard deviation of a tensor, alongside the specified axis.

    It is an alias to `tf.math.reduce_std`.

    Args:
        x: A tensor or variable. It should have numerical dtypes. Boolean type
          inputs will be converted to float.
        axis: An integer, the axis to compute the standard deviation. If `None`
          (the default), reduces all dimensions. Must be in the range
          `[-rank(x), rank(x))`.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is retained
            with length 1.

    Returns:
        A tensor with the standard deviation of elements of `x` with same dtype.
        Boolean type input will be converted to float.
    r  )r'   rE  r)   r   r-   r.   r  
reduce_stdr  r   r   r   std4  s    r  zkeras.backend.meanc                 C   s*   | j jtjkrt| t } t| ||S )a  Mean of a tensor, alongside the specified axis.

    Args:
        x: A tensor or variable.
        axis: A list of integer. Axes to compute the mean.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1 for each entry in `axis`. If `keepdims` is `True`,
            the reduced dimensions are retained with length 1.

    Returns:
        A tensor with the mean of elements of `x`.
    )r'   rE  r)   r   r-   r.   reduce_meanr  r   r   r   r|  P  s    r|  zkeras.backend.anyc                 C   s   t | t j} t | ||S )zBitwise reduction (logical OR).

    Args:
        x: Tensor or variable.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    Returns:
        A uint8 tensor (0s and 1s).
    )r)   r-   r   
reduce_anyr  r   r   r   anyf  s    r  zkeras.backend.allc                 C   s   t | t j} t | ||S )zBitwise reduction (logical AND).

    Args:
        x: Tensor or variable.
        axis: axis along which to perform the reduction.
        keepdims: whether the drop or broadcast the reduction axes.

    Returns:
        A uint8 tensor (0s and 1s).
    )r)   r-   r   
reduce_allr  r   r   r   allx  s    r  zkeras.backend.argmaxr  c                 C   s   t | |S )zReturns the index of the maximum value along an axis.

    Args:
        x: Tensor or variable.
        axis: axis along which to perform the reduction.

    Returns:
        A tensor.
    )r)   argmaxr  r   r   r   r    s    r  zkeras.backend.argminc                 C   s   t | |S )zReturns the index of the minimum value along an axis.

    Args:
        x: Tensor or variable.
        axis: axis along which to perform the reduction.

    Returns:
        A tensor.
    )r)   argminr  r   r   r   r    s    r  zkeras.backend.squarec                 C   s
   t | S )zcElement-wise square.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   squarer1   r   r   r   r    s    r  zkeras.backend.absc                 C   s
   t | S )zkElement-wise absolute value.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   absr1   r   r   r   r    s    r  zkeras.backend.sqrtc                 C   s$   t d| jj}t| |} t| S )zElement-wise square root.

       This function clips negative tensor values to 0 before computing the
       square root.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    ry  )r   r'   rE  r)   maximumsqrt)r2   zeror   r   r   r    s    r  zkeras.backend.expc                 C   s
   t | S )zhElement-wise exponential.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   expr1   r   r   r   r    s    r  zkeras.backend.logc                 C   s   t j| S )z`Element-wise log.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   r  logr1   r   r   r   r    s    r  c                 C   s   t | ||S )a  Computes log(sum(exp(elements across dimensions of a tensor))).

    This function is more numerically stable than log(sum(exp(x))).
    It avoids overflows caused by taking the exp of large inputs and
    underflows caused by taking the log of small inputs.

    Args:
        x: A tensor or variable.
        axis: An integer, the axis to reduce over.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`, the reduced dimension is
            retained with length 1.

    Returns:
        The reduced tensor.
    )r)   reduce_logsumexpr  r   r   r   	logsumexp  s    r   zkeras.backend.roundc                 C   s
   t | S )zElement-wise rounding to the closest integer.

    In case of tie, the rounding mode used is "half to even".

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   roundr1   r   r   r   r    s    r  zkeras.backend.signc                 C   s
   t | S )zaElement-wise sign.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   signr1   r   r   r   r     s    r  zkeras.backend.powc                 C   s   t | |S )zElement-wise exponentiation.

    Args:
        x: Tensor or variable.
        a: Python integer.

    Returns:
        A tensor.
    )r)   pow)r2   r  r   r   r   r  /  s    r  zkeras.backend.clipc                 C   sT   t |ttfr(t |ttfr(||k r(|}|du r8tj }|du rFtj}t| ||S )zElement-wise value clipping.

    Args:
        x: Tensor or variable.
        min_value: Python float, integer, or tensor.
        max_value: Python float, integer, or tensor.

    Returns:
        A tensor.
    N)r(   r:   floatr/   infr)   clip_by_value)r2   	min_value	max_valuer   r   r   clip?  s    r	  zkeras.backend.equalc                 C   s   t | |S )zElement-wise equality between two tensors.

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A bool tensor.
    )r)   equalr2   r  r   r   r   r
  Y  s    r
  zkeras.backend.not_equalc                 C   s   t | |S )zElement-wise inequality between two tensors.

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A bool tensor.
    )r)   	not_equalr  r   r   r   r  i  s    r  zkeras.backend.greaterc                 C   s   t | |S )zElement-wise truth value of (x > y).

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A bool tensor.
    r)   greaterr  r   r   r   r  y  s    r  zkeras.backend.greater_equalc                 C   s   t | |S )zElement-wise truth value of (x >= y).

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A bool tensor.
    )r)   greater_equalr  r   r   r   r    s    r  zkeras.backend.lessc                 C   s   t | |S )zElement-wise truth value of (x < y).

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A bool tensor.
    r)   lessr  r   r   r   r    s    r  zkeras.backend.less_equalc                 C   s   t | |S )zElement-wise truth value of (x <= y).

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A bool tensor.
    )r)   
less_equalr  r   r   r   r    s    r  zkeras.backend.maximumc                 C   s   t | |S )a  Element-wise maximum of two tensors.

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A tensor with the element wise maximum value(s) of `x` and `y`.

    Examples:

    >>> x = tf.Variable([[1, 2], [3, 4]])
    >>> y = tf.Variable([[2, 1], [0, -1]])
    >>> m = tf.keras.backend.maximum(x, y)
    >>> m
    <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
    array([[2, 2],
           [3, 4]], dtype=int32)>
    )r)   r  r  r   r   r   r    s    r  zkeras.backend.minimumc                 C   s   t | |S )zElement-wise minimum of two tensors.

    Args:
        x: Tensor or variable.
        y: Tensor or variable.

    Returns:
        A tensor.
    )r)   minimumr  r   r   r   r    s    r  zkeras.backend.sinc                 C   s
   t | S )znComputes sin of x element-wise.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   sinr1   r   r   r   r    s    r  zkeras.backend.cosc                 C   s
   t | S )znComputes cos of x element-wise.

    Args:
        x: Tensor or variable.

    Returns:
        A tensor.
    )r)   cosr1   r   r   r   r    s    r  MbP?c                 C   s<   t jjj| |ddd\}}t j| |||||}|||fS )a  Non-fused version of `normalize_batch_in_training`.

    Args:
        x: Input tensor or variable.
        gamma: Tensor by which to scale the input.
        beta: Tensor with which to center the input.
        reduction_axes: iterable of integers,
            axes over which to normalize.
        epsilon: Fuzz factor.

    Returns:
        A tuple length of 3, `(normalized_tensor, mean, variance)`.
    NF)r)   rC   rD   r  momentsbatch_normalization)r2   gammabetareduction_axesepsilonr|  r  normedr   r   r   $_regular_normalize_batch_in_training  s    r  c                 C   s   t jjj| |ddd\}}g }tt| D ],}||v rD|d q,|t | |  q,t 	|}t 
||}	t 
||}
|du rd}nt 
||}|du rd}nt 
||}t j| |	|
|||}|||fS )a  Non-fused, broadcast version of `normalize_batch_in_training`.

    Args:
        x: Input tensor or variable.
        gamma: Tensor by which to scale the input.
        beta: Tensor with which to center the input.
        reduction_axes: iterable of integers,
            axes over which to normalize.
        epsilon: Fuzz factor.

    Returns:
        A tuple length of 3, `(normalized_tensor, mean, variance)`.
    NFr5   )r)   rC   rD   r  r  r(  r3  r  ri   r  r  r  )r2   r  r  r  r  r|  r  target_shaper  broadcast_meanbroadcast_varbroadcast_gammabroadcast_betar  r   r   r   &_broadcast_normalize_batch_in_training  s0    
r$  c                 C   s   t |g dkrd}d}nd}d}|du rDtjd| j| j| gd}|du rftjd	| j| j| gd}tjjjj| ||||d
S )a  Fused version of `normalize_batch_in_training`.

    Args:
        x: Input tensor or variable.
        gamma: Tensor by which to scale the input.
        beta: Tensor with which to center the input.
        reduction_axes: iterable of integers,
            axes over which to normalize.
        epsilon: Fuzz factor.

    Returns:
        A tuple length of 3, `(normalized_tensor, mean, variance)`.
    r   r5   rt   r  NHWCr5   NCHWNrz  )r'   ri   ry  )r  data_format)	r  r)   r   r'   ri   rC   rD   r  fused_batch_norm)r2   r  r  r  r  normalization_axistf_data_formatr   r   r   "_fused_normalize_batch_in_trainingE  s     

r,  z)keras.backend.normalize_batch_in_trainingc                 C   s   t | dkr^t|g dg dfv r^t sLt|g dkrLt| ||||dS t| ||||dS t|ttt | dd krt| ||||dS t| ||||dS dS )a  Computes mean and std for batch then apply batch_normalization on batch.

    Args:
        x: Input tensor or variable.
        gamma: Tensor by which to scale the input.
        beta: Tensor with which to center the input.
        reduction_axes: iterable of integers,
            axes over which to normalize.
        epsilon: Fuzz factor.

    Returns:
        A tuple length of 3, `(normalized_tensor, mean, variance)`.
       r%  )r   rt   r  )r  Nr  )r3  r  r   r$  r,  sortedr(  r  )r2   r  r  r  r  r   r   r   normalize_batch_in_trainingj  s    $

 

r/  z!keras.backend.batch_normalizationc           
   
   C   s   t | dkr
|dks|dkr$d}n|dks4|dkr:d}nd}|dksT|dkrt rt |dkrnt|dg}t |dkrt|dg}|du rt|}nt |dkrt|dg}|du rt|}nt |dkrt|dg}tjjjj	| ||||||d	d
\}}	}	|S tj
| |||||S )a  Applies batch normalization on x given mean, var, beta and gamma.

    I.e. returns:
    `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`

    Args:
        x: Input tensor or variable.
        mean: Mean of batch.
        var: Variance of batch.
        beta: Tensor with which to center the input.
        gamma: Tensor by which to scale the input.
        axis: Integer, the axis that should be normalized.
            (typically the features axis).
        epsilon: Fuzz factor.

    Returns:
        A tensor.
    r-  r5   r'  r  r  r&  NF)r  r|  variancer(  is_training)r3  r   r)   r  rS  rU  rC   rD   r  r)  r  )
r2   r|  r  r  r  r  r  r+  r  r   r   r   r   r    sH    



r  zkeras.backend.concatenatec                 C   s   |dk r&t | d }|r"||; }nd}tdd | D rHtjj|| S tdd | D rft| |S tdd | D |S dS )aP  Concatenates a list of tensors alongside the specified axis.

    Args:
        tensors: list of tensors to concatenate.
        axis: concatenation axis.

    Returns:
        A tensor.

    Example:

        >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
        >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]])
        >>> tf.keras.backend.concatenate((a, b), axis=-1)
        <tf.Tensor: shape=(3, 6), dtype=int32, numpy=
        array([[ 1,  2,  3, 10, 20, 30],
               [ 4,  5,  6, 40, 50, 60],
               [ 7,  8,  9, 70, 80, 90]], dtype=int32)>

    r   c                 s   s   | ]}t |V  qd S r   )r   r   r   r   r   r:    r   zconcatenate.<locals>.<genexpr>c                 s   s   | ]}t |tjV  qd S r   r(   r)   r  r   r   r   r   r:    r   c                 S   s   g | ]}t |qS r   )r   r   r   r   r   r     r   zconcatenate.<locals>.<listcomp>N)r3  rL  r)   rC   rD   sparse_concatr  )tensorsr  rD  r   r   r   r     s    
r   zkeras.backend.reshapec                 C   s   t | |S )av  Reshapes a tensor to the specified shape.

    Args:
        x: Tensor or variable.
        shape: Target shape tuple.

    Returns:
        A tensor.

    Example:

      >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
      >>> a
      <tf.Tensor: shape=(4, 3), dtype=int32, numpy=
      array([[ 1,  2,  3],
             [ 4,  5,  6],
             [ 7,  8,  9],
             [10, 11, 12]], dtype=int32)>
      >>> tf.keras.backend.reshape(a, shape=(2, 6))
      <tf.Tensor: shape=(2, 6), dtype=int32, numpy=
      array([[ 1,  2,  3,  4,  5,  6],
             [ 7,  8,  9, 10, 11, 12]], dtype=int32)>

    r)   r  rC  r   r   r   r    s    r  z keras.backend.permute_dimensionsc                 C   s   t jjj| |dS )a  Permutes axes in a tensor.

    Args:
        x: Tensor or variable.
        pattern: A tuple of
            dimension indices, e.g. `(0, 2, 1)`.

    Returns:
        A tensor.

    Example:

      >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
      >>> a
      <tf.Tensor: shape=(4, 3), dtype=int32, numpy=
      array([[ 1,  2,  3],
             [ 4,  5,  6],
             [ 7,  8,  9],
             [10, 11, 12]], dtype=int32)>
      >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))
      <tf.Tensor: shape=(3, 4), dtype=int32, numpy=
      array([[ 1,  4,  7, 10],
             [ 2,  5,  8, 11],
             [ 3,  6,  9, 12]], dtype=int32)>

    r  r  )r2   r  r   r   r   permute_dimensions  s    r7  zkeras.backend.resize_imagesnearestc           
   	   C   sX  |dkrd\}}n |dkr$d\}}nt d|f | j||d  }| r`tj| dd}nt| ||d  }|ttj||gdd9 }|dkrt| g d	} tj	j
jtj	j
jtj	j
jtj	j
jtj	j
jtj	j
jtj	j
jtj	j
jd
}dd|  d }	||v r&tj	j| ||| d} nt d|	 d| d|dkrTt| g d} | S )a0  Resizes the images contained in a 4D tensor.

    Args:
        x: Tensor or variable to resize.
        height_factor: Positive integer.
        width_factor: Positive integer.
        data_format: One of `"channels_first"`, `"channels_last"`.
        interpolation: A string, one of `"area"`, `"bicubic"`, `"bilinear"`,
          `"gaussian"`, `"lanczos3"`, `"lanczos5"`, `"mitchellcubic"`,
          `"nearest"`.

    Returns:
        A tensor.

    Raises:
        ValueError: in case of incorrect value for
          `data_format` or `interpolation`.
    channels_first)rt   r  channels_last)r5   rt   z"Invalid `data_format` argument: %sr5   int32r&   r   rt   r  r5   )areabicubicbilineargaussianlanczos3lanczos5mitchellcubicr8  "z", ")methodz+`interpolation` argument should be one of: z. Received: "z".r   r  r5   rt   )rs   ri   is_fully_definedr)   r   rB  r/   arrayr7  imageResizeMethodAREABICUBICBILINEARGAUSSIANLANCZOS3LANCZOS5MITCHELLCUBICNEAREST_NEIGHBORjoinkeysresize)
r2   height_factorwidth_factorr(  interpolationrowscols	new_shapeZinterpolationsZinterploations_listr   r   r   resize_images7  sH    




r\  zkeras.backend.resize_volumesc                 C   s   |dkr6t | |dd}t ||dd}t ||dd}|S |dkrlt | |dd}t ||dd}t ||dd}|S tdt| d	S )
a  Resizes the volume contained in a 5D tensor.

    Args:
        x: Tensor or variable to resize.
        depth_factor: Positive integer.
        height_factor: Positive integer.
        width_factor: Positive integer.
        data_format: One of `"channels_first"`, `"channels_last"`.

    Returns:
        A tensor.

    Raises:
        ValueError: if `data_format` is neither
            `channels_last` or `channels_first`.
    r9  rt   r  r  r-  r:  r5   zInvalid data_format: N)repeat_elementsrs   r   )r2   depth_factorrV  rW  r(  outputr   r   r   resize_volumesy  s    r`  zkeras.backend.repeat_elementsc                    s   | j  }|| durFtj| || |d} fdd|D }t||S |d }t | }tj| |d}tt| j d } ||< t	||}t
||} ||< tj|dd}||9 }t||}| j  }|| t||_|S )	a<  Repeats the elements of a tensor along an axis, like `np.repeat`.

    If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
    will have shape `(s1, s2 * rep, s3)`.

    Args:
        x: Tensor or variable.
        rep: Python integer, number of times to repeat.
        axis: Axis along which to repeat.

    Returns:
        A tensor.

    Example:

        >>> b = tf.constant([1, 2, 3])
        >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)
        <tf.Tensor: shape=(6,), dtype=int32,
            numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>

    N)rW   num_or_size_splitsr  c                    s   g | ]}t  D ]}|qqS r   r(  )r   r  r   repr   r   r     r   z#repeat_elements.<locals>.<listcomp>r5   r  r;  r&   )ri   rB  r)   splitr   r   r/   rO  r)  tiledeleter   r  	set_shaper   r   )r2   rd  r  r  splitsx_repauxiliary_axisrepsr   rc  r   r]    s(    





r]  zkeras.backend.repeatc                 C   s8   t | dksJ t| d} td|dg}t| |S )a  Repeats a 2D tensor.

    if `x` has shape (samples, dim) and `n` is `2`,
    the output will have shape `(samples, 2, dim)`.

    Args:
        x: Tensor or variable.
        n: Python integer, number of times to repeat.

    Returns:
        A tensor.

    Example:

        >>> b = tf.constant([[1, 2], [3, 4]])
        >>> b
        <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
        array([[1, 2],
               [3, 4]], dtype=int32)>
        >>> tf.keras.backend.repeat(b, n=2)
        <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
        array([[[1, 2],
                [1, 2]],
               [[3, 4],
                [3, 4]]], dtype=int32)>

    rt   r5   )r3  r)   r   r  rf  )r2   r  r  r   r   r   repeat  s    rm  zkeras.backend.aranger5   r;  c                 C   s<   |du r| dk rd} t j| ||dd}|dkr8t||}|S )a  Creates a 1D tensor containing a sequence of integers.

    The function arguments use the same convention as
    Theano's arange: if only one argument is provided,
    it is in fact the "stop" argument and "start" is 0.

    The default type of the returned tensor is `'int32'` to
    match TensorFlow's default.

    Args:
        start: Start value.
        stop: Stop value.
        step: Difference between two successive values.
        dtype: Integer dtype to use.

    Returns:
        An integer tensor.

    Example:

        >>> tf.keras.backend.arange(start=0, stop=10, step=1.5)
        <tf.Tensor: shape=(7,), dtype=float32,
            numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>



    Nr   arange)limitdeltarj   r;  )r)   r(  r-   )startstopstepr'   rs  r   r   r   rn    s     
rn  zkeras.backend.tilec                 C   s   t |tr|g}t| |S )zCreates a tensor by tiling `x` by `n`.

    Args:
        x: A tensor or variable
        n: A list of integer. The length must be the same as the number of
            dimensions in `x`.

    Returns:
        A tiled tensor.
    )r(   r:   r)   rf  )r2   r  r   r   r   rf  %  s    
rf  zkeras.backend.flattenc                 C   s   t | dgS )a  Flatten a tensor.

    Args:
        x: A tensor or variable.

    Returns:
        A tensor, reshaped into 1-D

    Example:

        >>> b = tf.constant([[1, 2], [3, 4]])
        >>> b
        <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
        array([[1, 2],
               [3, 4]], dtype=int32)>
        >>> tf.keras.backend.flatten(b)
        <tf.Tensor: shape=(4,), dtype=int32,
            numpy=array([1, 2, 3, 4], dtype=int32)>

    r  r6  r1   r   r   r   r>  8  s    r>  zkeras.backend.batch_flattenc              
   C   s*   t | t dtt| dd g} | S )a  Turn a nD tensor into a 2D tensor with same 0th dimension.

    In other words, it flattens each data samples of a batch.

    Args:
        x: A tensor or variable.

    Returns:
        A tensor.

    Examples:
      Flattening a 3D tensor to 2D by collapsing the last dimension.

    >>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5))
    >>> x_batch_flatten = batch_flatten(x_batch)
    >>> tf.keras.backend.int_shape(x_batch_flatten)
    (2, 60)

    r  r5   N)r)   r  r  r  ri   r1   r   r   r   batch_flattenS  s    &rt  zkeras.backend.expand_dimsc                 C   s   t | |S )zAdds a 1-sized dimension at index "axis".

    Args:
        x: A tensor or variable.
        axis: Position where to add a new axis.

    Returns:
        A tensor with expanded dimensions.
    )r)   r   r  r   r   r   r   n  s    r   zkeras.backend.squeezec                 C   s   t | |gS )zRemoves a 1-dimension from the tensor at index "axis".

    Args:
        x: A tensor or variable.
        axis: Axis to drop.

    Returns:
        A tensor with the same data as `x` but reduced dimensions.
    )r)   r  r  r   r   r   r  ~  s    r  zkeras.backend.temporal_paddingr5   r5   c                 C   s>   t |dksJ ddg|d |d gddgg}tjj| |S )zPads the middle dimension of a 3D tensor.

    Args:
        x: Tensor or variable.
        padding: Tuple of 2 integers, how many zeros to
            add at the start and end of dim 1.

    Returns:
        A padded 3D tensor.
    rt   r   r5   )r)  r)   rC   rD   pad)r2   paddingr  r   r   r   temporal_padding  s    rx  z keras.backend.spatial_2d_paddingru  ru  c                 C   s   t |dksJ t |d dks$J t |d dks8J |du rFt }|dvr^tdt| |dkrddgddgt|d t|d g}n$ddgt|d t|d ddgg}tjj| |S )al  Pads the 2nd and 3rd dimensions of a 4D tensor.

    Args:
        x: Tensor or variable.
        padding: Tuple of 2 tuples, padding pattern.
        data_format: One of `channels_last` or `channels_first`.

    Returns:
        A padded 4D tensor.

    Raises:
        ValueError: if `data_format` is neither
            `channels_last` or `channels_first`.
    rt   r   r5   N>   r9  r:  Unknown data_format: r9  )	r)  image_data_formatrs   r   r  r)   rC   rD   rv  r2   rw  r(  r  r   r   r   spatial_2d_padding  s    &$r}  z keras.backend.spatial_3d_paddingru  ru  ru  c                 C   s0  t |dksJ t |d dks$J t |d dks8J t |d dksLJ |du rZt }|dvrrtdt| |dkrddgddg|d d |d d g|d d |d d g|d d |d d gg}nRddg|d d |d d g|d d |d d g|d d |d d gddgg}tjj| |S )	a  Pads 5D tensor with zeros along the depth, height, width dimensions.

    Pads these dimensions with respectively
    "padding[0]", "padding[1]" and "padding[2]" zeros left and right.

    For 'channels_last' data_format,
    the 2nd, 3rd and 4th dimension will be padded.
    For 'channels_first' data_format,
    the 3rd, 4th and 5th dimension will be padded.

    Args:
        x: Tensor or variable.
        padding: Tuple of 3 tuples, padding pattern.
        data_format: One of `channels_last` or `channels_first`.

    Returns:
        A padded 5D tensor.

    Raises:
        ValueError: if `data_format` is neither
            `channels_last` or `channels_first`.

    r  r   rt   r5   N>   r9  r:  rz  r9  )r)  r{  rs   r   r)   rC   rD   rv  r|  r   r   r   spatial_3d_padding  s,    	r  zkeras.backend.stackc                 C   s   t j| |dS )a  Stacks a list of rank `R` tensors into a rank `R+1` tensor.

    Args:
        x: List of tensors.
        axis: Axis along which to perform stacking.

    Returns:
        A tensor.

    Example:

        >>> a = tf.constant([[1, 2],[3, 4]])
        >>> b = tf.constant([[10, 20],[30, 40]])
        >>> tf.keras.backend.stack((a, b))
        <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
        array([[[ 1,  2],
                [ 3,  4]],
               [[10, 20],
                [30, 40]]], dtype=int32)>

    r  )r)   r  r  r   r   r   r    s    r  zkeras.backend.one_hotc                 C   s   t j| |ddS )a  Computes the one-hot representation of an integer tensor.

    Args:
        indices: nD integer tensor of shape
            `(batch_size, dim1, dim2, ... dim(n-1))`
        num_classes: Integer, number of classes to consider.

    Returns:
        (n + 1)D one hot representation of the input
        with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`

    Returns:
        The one-hot tensor.
    r  )depthr  )r)   one_hot)r   num_classesr   r   r   r    s    r  zkeras.backend.reversec                 C   s   t |tr|g}t| |S )zReverse a tensor along the specified axes.

    Args:
        x: Tensor to reverse.
        axes: Integer or iterable of integers.
            Axes to reverse.

    Returns:
        A tensor.
    )r(   r:   r)   reverse)r2   r  r   r   r   r  *  s    
r  a  
  >>> K = tf.keras.backend  # Common keras convention
  >>> v = K.variable(1.)

  >>> # reassign
  >>> K.set_value(v, 2.)
  >>> print(K.get_value(v))
  2.0

  >>> # increment
  >>> K.set_value(v, K.get_value(v) + 1)
  >>> print(K.get_value(v))
  3.0

  Variable semantics in TensorFlow 2 are eager execution friendly. The above
  code is roughly equivalent to:

  >>> v = tf.Variable(1.)

  >>> v.assign(2.)
  >>> print(v.numpy())
  2.0

  >>> v.assign_add(1.)
  >>> print(v.numpy())
  3.0r  zkeras.backend.get_valuec                 C   s   t | s| S t  s$t| t jjr,|  S t| ddslt jj	  |  W  d   S 1 sb0    Y  t j
j rt   |  W  d   S 1 s0    Y  | j " | jt| fdW  d   S 1  s0    Y  dS )aW  Returns the value of a variable.

    `backend.get_value` is the complement of `backend.set_value`, and provides
    a generic interface for reading from variables while abstracting away the
    differences between TensorFlow 1.x and 2.x semantics.

    {snippet}

    Args:
        x: input variable.

    Returns:
        A Numpy array.
    _in_graph_modeTNr   )r)   	is_tensorrP   r(   r   EagerTensornumpyrZ   eager_context
eager_moderC   rD   r|   r\   r<   rH   rI  r   r1   r   r   r   rH  \  s    
&
&rH  zkeras.backend.batch_get_valuec                 C   s@   t  rdd | D S t  r&td| r8t| | S g S dS )zReturns the value of more than one tensor variable.

    Args:
        tensors: list of ops to run.

    Returns:
        A list of Numpy arrays.

    Raises:
        RuntimeError: If this method is called inside defun.
    c                 S   s   g | ]}|  qS r   )r  r   r   r   r   r     r   z#batch_get_value.<locals>.<listcomp>z2Cannot get value inside Tensorflow graph function.N)r)   rP   r   r   r   r  )r5  r   r   r   batch_get_value  s    r  zkeras.backend.set_valuec                 C   s   t j|t| d}tjj r*| | nt 	  t
| jjdd }t| drf| j}| j}n:tdg|j }tjjj||d}| |}|| _|| _t j|||id W d   n1 s0    Y  dS )a  Sets the value of a variable, from a Numpy array.

    `backend.set_value` is the complement of `backend.get_value`, and provides
    a generic interface for assigning to variables while abstracting away the
    differences between TensorFlow 1.x and 2.x semantics.

    {snippet}

    Args:
        x: Variable to set to a new value.
        value: Value to set the tensor to, as a Numpy array
            (of the same shape).
    r&   r   r   _assign_placeholderNri   	feed_dict)r/   r0   rG  r)   rC   rD   r|   r  r6   rH   r   r'   rj   re  r   r  
_assign_opTensorShaper3  r"  r   r  )r2   rW   rN  assign_placeholder	assign_opplaceholder_shaper   r   r   	set_value  s     

r  zkeras.backend.batch_set_valuec           	      C   s   t  st  r8| D ] \}}|tj|t|d qnt   | rg }i }| D ]\}}tj|t|d}t 	|j
jdd }t|dr|j}|j}n:t dg|j }t jjj||d}||}||_||_|| |||< qTt j||d W d   n1 s0    Y  dS )zSets the values of many tensor variables at once.

    Args:
        tuples: a list of tuples `(tensor, value)`.
            `value` should be a Numpy array.
    r&   r   r   r  Nr  r  )r)   rP   r   r  r/   r0   rG  r6   rH   r   r'   rj   re  r   r  r  r  r3  rC   rD   r"  r  r   r  )	tuplesr2   rW   
assign_opsr  rN  r  r  r  r   r   r   batch_set_value  s.    




r  )snippetzkeras.backend.print_tensorc              	   C   s   t | tjrt| drt  h tj|| tj|d}t	|g( t
| W  d   W  d   S 1 sp0    Y  W d   q1 s0    Y  ntj|| tj|d | S dS )a  Prints `message` and the tensor value when evaluated.

    Note that `print_tensor` returns a new tensor identical to `x`
    which should be used in the following code. Otherwise the
    print operation is not taken into account during evaluation.

    Example:

    >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
    >>> tf.keras.backend.print_tensor(x)
    <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
      array([[1., 2.],
             [3., 4.]], dtype=float32)>

    Args:
        x: Tensor to print.
        message: Message to print jointly with the tensor.
        summarize: The first and last `summarize` elements within each dimension
            are recursively printed per Tensor. If None, then the first 3 and
            last 3 elements of each dimension are printed for each tensor. If
            set to -1, it will print all elements of every tensor.

    Returns:
        The same tensor `x`, unchanged.
    r<   )output_stream	summarizeN)r(   r)   r*   r   r6   rH   printsysstdoutcontrol_dependenciesrW  )r2   messager  r   r   r   r   print_tensor  s    
Vr  c                   @   s:   e Zd ZdZdddZdd Zdd Zd	d
 Zdd ZdS )GraphExecutionFunctiona  Runs a computation graph.

    It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
    In particular additional operations via `fetches` argument and additional
    tensor substitutions via `feed_dict` arguments. Note that given
    substitutions are merged with substitutions from `inputs`. Even though
    `feed_dict` is passed once in the constructor (called in `model.compile()`)
    we can modify the values in the dictionary. Through this feed_dict we can
    provide additional substitutions besides Keras inputs.

    Args:
        inputs: Feed placeholders to the computation graph.
        outputs: Output tensors to fetch.
        updates: Additional update ops to be run at function call.
        name: A name to help users identify what this function does.
        session_kwargs: Arguments to `tf.Session.run()`:
                        `fetches`, `feed_dict`, `options`, `run_metadata`.
    Nc           
      K   s  |pg }t |ttfstd|| _tjj|dd| _|| _	t
tjj|dd| _t| jd g^ g }|D ]8}t |tr|\}}	|tjj||	 qn|| qntj| | _W d    n1 s0    Y  || _|dd | _|dg | _t | jts| jg| _|dd | _|dd | _d	d
 | jD | _|| _i | _|r^td| f d | _d | _d | _d | _ d | _!d | _"d S )Nz@`updates` in a Keras backend function should be a list or tuple.Tr%  r   r  fetchesoptionsrun_metadatac                 S   s   g | ]}t |qS r   rV  r   r   r   r   r   Y  r   z3GraphExecutionFunction.__init__.<locals>.<listcomp>z>Some keys in session_kwargs are not supported at this time: %s)#r(   r  r   r   _inputs_structurer)   r.  r>  r  _outputs_structurecast_variables_to_tensoroutputsr  r  rC   rD   r  group
updates_oprj   rN   r  r  run_optionsr  session_kwargsfetch_callbacksrs   rT  _callable_fn_feed_arrays_feed_symbols_symbol_vals_fetches_session)
r   r  r  updatesrj   r  updates_opsr  pnew_pr   r   r   r   0  sR    
*
zGraphExecutionFunction.__init__c                 C   s"  t  }|D ]}|j|j q| jrHt| j D ]}|j|j q4t||D ]R\}}|j	
 }	|j|jkrtj||jd}t|}
|
du r|}
|
j|	_|j|	_qR| j| j D ]}|j|j q|j| jj | jr|j| j ||}|| _|| _|| _|| _t| j| _|| _dS )a  Generates a callable that runs the graph.

        Args:
          feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
          feed_symbols: List of input tensors to be fed symbolic tensors at
            runtime.
          symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
          session: Session to use to generate the callable.

        Returns:
          Function that runs the graph according to the above options.
        r&   N) r	   CallableOptionsfeedr  rj   r  r.  rT  r  tensor_connectionr   r'   r)   r-   r   from_tensor	to_tensorr  r  fetchtargetr  r  CopyFrom_make_callable_from_optionsr  r  r  r  r  r  r  )r   feed_arraysfeed_symbolssymbol_valsr   callable_optsr2   r"   r  
connectionr  callable_fnr   r   r   _make_callablen  s6    


z%GraphExecutionFunction._make_callablec                 C   s2   t | j|D ] \}}|| jv r| j| | qd S r   )r  r  r  )r   fetches_outputr  r_  r   r   r   _call_fetch_callbacks  s    
z,GraphExecutionFunction._call_fetch_callbacksc                 C   s*   ddl m} ||r"| j|S |S dS )z@Helper method which evaluates any CompositeTensors passed to it.r   r7  N)r<  r8  r=  r  r  )r   r   r8  r   r   r   _eval_if_composite  s    
z)GraphExecutionFunction._eval_if_compositec                 C   s  t jj|dd}t|}g }g }g }g }t| j|D ]^\}}|d u rFq4t |rf|| || q4|| t |j	}	|t
j||	jd q4| jrt| j D ]"}
|t
j| j|
 |
j	jd q| jd u s|| jks|| jks|| jks| j| jks|| jkr&| |||| | j|d| ji}| |t| j d   t jj| j|d t| j dd}t j| j|S )NTr%  r&   r  ) r)   r.  r>  r   r  r  r  r  r   r'   r/   r0   rF  r  r.  rT  r  r  r  r  r  r  r  r  r  r  r)  pack_sequence_asr  r  r/  r  )r   r  r   r  
array_valsr  r  r   rW   tensor_typer"   fetchedoutput_structurer   r   r   __call__  sZ    



zGraphExecutionFunction.__call__)NN)	r   r   r   r   r   r  r  r  r  r   r   r   r   r    s    
>2r  zkeras.backend.functionc           	         s   t jj r|rtd|f |r0td|f ddlm} ddlm |j	| |d t
|tojt|dk fdd	}|S |r|D ]6}|tt jjjjd vr|d
vrd| }t|qt| |f||d|S )a  Instantiates a Keras function.

    Args:
        inputs: List of placeholder tensors.
        outputs: List of output tensors.
        updates: List of update ops.
        name: String, name of function.
        **kwargs: Passed to `tf.Session.run`.

    Returns:
        Output values as Numpy arrays.

    Raises:
        ValueError: if invalid kwargs are passed in or if in eager execution.
    zRSession keyword arguments are not supported during eager execution. You passed: %szJ`updates` argument is not supported during eager execution. You passed: %sr   )modelsr7  )r  r  r5   c                    s    | }r|g} |S r   )sync_to_numpy_or_python_type)model_inputsoutsmodelr8  wrap_outputsr   r   func  s    zfunction.<locals>.func)r  r  r  rj   zBInvalid argument "%s" passed to K.function with TensorFlow backend)r  rj   )r)   rC   rD   r|   rs   kerasr  r<  r8  Modelr(   r  r)  r   getfullargspecr   r  r  )	r  r  r  rj   re  r  r  r"   msgr   r  r   function  sJ    
r  zkeras.backend.gradientsc                 C   s   t jjj| |ddS )zReturns the gradients of `loss` w.r.t. `variables`.

    Args:
        loss: Scalar tensor to minimize.
        variables: List of variables.

    Returns:
        A gradients tensor.
    T)colocate_gradients_with_ops)r)   rC   rD   	gradients)lossr	  r   r   r   r  ,  s    r  zkeras.backend.stop_gradientc                 C   s$   t | ttfrttj| S t| S )ag  Returns `variables` but with zero gradient w.r.t. every other variable.

    Args:
        variables: Tensor or list of tensors to consider constant with respect
          to any other variable.


    Returns:
        A single tensor or a list of tensors (depending on the passed argument)
        that has no gradient with respect to any other variable.
    )r(   r  r   mapr)   stop_gradient)r	  r   r   r   r  =  s    r  zkeras.backend.rnnc           *   	      s  t jj sddd }|s*t j|		t j	}|d jd |d jd t |d d |D ]}|jd qh|dur|j	t j
krt |t j
}t|jdkrt|}|s||}du rg d4d	d
 |rstdt}g }g }fdd}t j	r t j|	n
|	f	fdd}|durt |}rZ|  tD ]}||}|| 
|t|t \}} 
|}|st|}n|d }t |||}t j|}t j|}t 
fdd|D }tdd t|||D }t j||}r4|| || n|g}|g}qb|d }|d }t |}rt  |d ||t|}t  ||dd|t|}nttD ]P}||}|t|t \}}r|| || n|g}|g}q|d }|d }t |}nt}tfddt|D tfddt|D t j	dd |D }|tt \} }!rndtfddtt j| D }"t jdddd}#t  s tt jj ! r du r}$n
t "}$nd}$fdd|$ddd }%|durjr4t|d}t j#t j
d!d"|fd#d$ fd%d&nZt$t j%rrt j"dd'}&t &|&d fd(d$nfd)d$d*d& nddur6td+d t j| D 	f	d,d-}'t jj j'f |'|#|"f| d.|%}(|(dd }nD	fd/d-}'t jj j'f |'|#|"f| d.|%}(|(dd }|(d }"td0d |"D }td1d |D }t j| |}t j| |}fd2d3})t j|)|}|st j||}|||fS )5a8  Iterates over the time dimension of a tensor.

    Args:
        step_function: RNN step function.
            Args;
                input; Tensor with shape `(samples, ...)` (no time dimension),
                    representing input for the batch of samples at a certain
                    time step.
                states; List of tensors.
            Returns;
                output; Tensor with shape `(samples, output_dim)`
                    (no time dimension).
                new_states; List of tensors, same length and shapes
                    as 'states'. The first state in the list must be the
                    output tensor at the previous timestep.
        inputs: Tensor of temporal data of shape `(samples, time, ...)`
            (at least 3D), or nested tensors, and each of which has shape
            `(samples, time, ...)`.
        initial_states: Tensor with shape `(samples, state_size)`
            (no time dimension), containing the initial values for the states
            used in the step function. In the case that state_size is in a
            nested shape, the shape of initial_states will also follow the
            nested structure.
        go_backwards: Boolean. If True, do the iteration over the time
            dimension in reverse order and return the reversed sequence.
        mask: Binary tensor with shape `(samples, time, 1)`,
            with a zero for every element that is masked.
        constants: List of constant values passed at each step.
        unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
        input_length: An integer or a 1-D Tensor, depending on whether
            the time dimension is fixed-length or not. In case of variable
            length input, it is used for masking in case there's no mask
            specified.
        time_major: Boolean. If true, the inputs and outputs will be in shape
            `(timesteps, batch, ...)`, whereas in the False case, it will be
            `(batch, timesteps, ...)`. Using `time_major = True` is a bit more
            efficient because it avoids transposes at the beginning and end of
            the RNN calculation. However, most TensorFlow data is batch-major,
            so by default this function accepts input and emits output in
            batch-major form.
        zero_output_for_mask: Boolean. If True, the output for masked timestep
            will be zeros, whereas in the False case, output from previous
            timestep is returned.
        return_all_outputs: Boolean. If True, return the recurrent outputs for
            all timesteps in the sequence. If False, only return the output for
            the last timestep (which consumes less memory).

    Returns:
        A tuple, `(last_output, outputs, new_states)`.
            last_output: the latest output of the rnn, of shape `(samples, ...)`
            outputs:
                - If `return_all_outputs=True`: a tensor with shape
                  `(samples, time, ...)` where each entry `outputs[s, t]` is the
                  output of the step function at time `t` for sample `s`
                - Else, a tensor equal to `last_output` with shape
                  `(samples, 1, ...)`
            new_states: list of tensors, latest states returned by
                the step function, of shape `(samples, ...)`.

    Raises:
        ValueError: if input dimension is less than 3.
        ValueError: if `unroll` is `True` but input timestep is not a fixed
            number.
        ValueError: if `mask` is provided (not `None`) but states is not
            provided (`len(states)` == 0).
    Tc                 S   s2   t tt| j}d\|d< |d< tjj| |S )N)r5   r   r   r5   )r  r(  r)  ri   r)   rC   rD   r  )input_tr  r   r   r   swap_batch_timestep  s    z rnn.<locals>.swap_batch_timestepr   r5   r  Nrt   c                 S   s   t j| rtd|  t j|r0td| t|jt| j }t|D ]}t | d} qLdg| |j |d   }t 	| |S )Nz+mask_t is expected to be tensor, but got %sz,input_t is expected to be tensor, but got %sr  r5   )
r)   r.  	is_nestedrs   r)  ri   r(  r   rB  rf  )mask_tr  	fixed_dim	rank_diffr   	multiplesr   r   r   _expand_mask  s    zrnn.<locals>._expand_maskz/Unrolling requires a fixed number of timesteps.c                    s   t | }  r|   | S r   )r)   r  r  )r  go_backwardsr   r   _process_single_input_t  s    
z$rnn.<locals>._process_single_input_tc                    s     fddD }t j|S )Nc                    s   g | ]}|  qS r   r   )r   t_timer   r   r     r   z2rnn.<locals>._get_input_tensor.<locals>.<listcomp>)r)   r.  r  )r  inp)r  processed_inputr  r   _get_input_tensor  s    zrnn.<locals>._get_input_tensorr  c                 3   s   | ]} |V  qd S r   r   )r   r  r  r  r   r   r:    s   zrnn.<locals>.<genexpr>c                 s   s"   | ]\}}}t |||V  qd S r   r)   where)r   mr  psr   r   r   r:    s   r  c                 3   s(   | ] \}}t j|j d | dV  qdS )zinput_ta_%sr'   rQ  tensor_array_nameN)r)   TensorArrayr'   )r   r5  r  time_steps_tr   r   r:  C  s   c                 3   s0   | ](\}} s| |n| t|d V  qdS )r   N)r  r  )r   tainput_r  r   r   r:  K  s   c                 S   s   g | ]}|d  qS )r   r   )r   r  r   r   r   r   V  r   zrnn.<locals>.<listcomp>c                 3   s,   | ]$\}}t j|j |jd | dV  qdS )zoutput_ta_%s)r'   rQ  element_shaper  N)r)   r  r'   ri   )r   r5  r  )output_ta_sizer   r   r:  _  s   r;  r  rJ  c                    s   |  k S r   r   )r  r   r  r   r   <lambda>{  r   zrnn.<locals>.<lambda>    )condmaximum_iterationsparallel_iterationsswap_memorymask_tar  c                    s
     | S r   readr  )r	  r   r   
masking_fn  s    zrnn.<locals>.masking_fnc                    s2   t  fdd|D }t dd t|||D S )Nc                 3   s"   | ]} |t jd V  qdS )r  N)r)  ri   r   or  r   r   r:    s   5rnn.<locals>.compute_masked_output.<locals>.<genexpr>c                 s   s"   | ]\}}}t |||V  qd S r   r  )r   r  r  fmr   r   r   r:    s   r   r  )r  flat_out	flat_masktiled_mask_t)r  r  r   compute_masked_output  s    
z"rnn.<locals>.compute_masked_outputr  c                    s   t  | S r   r  r  )rev_input_lengthr   r   r    s    c                    s   t  | S r   r  r  )input_lengthr   r   r    s    c                    s   t  fddt||D S )Nc                 3   s$   | ]\}}t jj ||V  qd S r   )r)   rC   rD   r  )r   r  zor  r   r   r:    s   r  r  )r  r  r  r   r  r   r    s    c                 s   s   | ]}t |V  qd S r   rR  r  r   r   r   r:    s   c                    s  t fddD }tj|}}	|t |t  \}}tj|}
rZn
tj|}	|||	}
tj|}tj|}t||D ] \}}t|tjr||j	 q|||}tj||}r؈nd t  fddt||
D }d |t |
ft | S )as  RNN step function.

                Args:
                    time: Current timestep value.
                    output_ta_t: TensorArray.
                    prev_output: tuple of outputs from time - 1.
                    *states: List of states.

                Returns:
                    Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
                c                 3   s   | ]}|  V  qd S r   r
  r   r  r  r   r   r:    r   %rnn.<locals>._step.<locals>.<genexpr>r   c                 3   s   | ]\}}|  |V  qd S r   writer   r  r  ta_index_to_writer   r   r:    s   r5   
r   r)   r.  r  r>  r  r(   r*   rh  ri   )r  output_ta_tprev_outputstatescurrent_inputr  r_  
new_statesflat_outputflat_mask_outputflat_new_output
flat_stateflat_new_statestate	new_stateflat_final_state)	r  	constantsflat_zero_outputinput_tar  r  return_all_outputsstep_functionzero_output_for_maskr   r  r   _step  s@    
zrnn.<locals>._step)body	loop_varsc                    s   t fddD }tj|}|t |t  \}}tj|}tj|}t||D ] \}}	t|	tjr`|	|j	 q`tj|}
rnd t  fddt||
D }tj|}d |ft | S )a)  RNN step function.

                Args:
                    time: Current timestep value.
                    output_ta_t: TensorArray.
                    *states: List of states.

                Returns:
                    Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
                c                 3   s   | ]}|  V  qd S r   r
  r  r  r   r   r:    r   r  r   c                 3   s   | ]\}}|  |V  qd S r   r  r  r  r   r   r:  	  s   r5   r!  )r  r"  r$  r%  r_  r&  r*  r+  r,  r-  r'  )r/  initial_statesr1  r  r2  r3  r5  r   r6    s&    c                 s   s   | ]}|  V  qd S r   )r  r  r   r   r   r:    r   c                 s   s   | ]}|d  V  qdS )r  Nr   r  r   r   r   r:    r   c                    sB   t | tjr>| j }r$|d< nd|d<  |d< | | | S )Nr   r5   )r(   r)   r*   ri   rB  rh  )output_ri   )batchr2  
time_stepsr   r   rh  #  s    


zrnn.<locals>.set_shape)r5   )(r)   r   r   r   r.  r/  r>  ri   with_rank_at_leastr'   r   r-   r)  r   rs   r   r  r  r  r(  rS  r  r  r  r  r  r  r   rP   r   GraphOrParentsInXlaContextrC   rD   rY   r  r  r(   r*   subtract
while_loop)*r3  r  r9  r  maskr/  unrollr  
time_majorr4  r2  r  flatted_inputsr   r$  successive_statessuccessive_outputsr  r  	mask_listr5  r  r_  r&  r  r#  flat_statesflat_new_statesflat_final_stateslast_outputr  input_time_zerooutput_time_zeror   	output_tar  max_iterationswhile_loop_kwargsmax_lenr6  final_outputsrh  r   )r  r;  r  r/  r0  r  r9  r  r1  r  r  r	  r  r  r  r2  r  r3  r<  r  r4  r   rnnT  sX   Q






















5
"

rS  zkeras.backend.switchc                    s8  | j tjkrt| d} t| }|slts:fdd}n}t sT fdd}n }tjj| ||}ntrz t r   t}||krt	dt
| d t
| |dkr&|| }tjt| dg| gd	d
}	t| |	} t}
|
|	 }t|d	k|
t|
}t| |} t|  }|S )a  Switches between two operations depending on a scalar value.

    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    Args:
        condition: tensor (`int` or `bool`).
        then_expression: either a tensor, or a callable that returns a tensor.
        else_expression: either a tensor, or a callable that returns a tensor.

    Returns:
        The selected tensor.

    Raises:
        ValueError: If rank of `condition` is greater than rank of expressions.
    r   c                      s    S r   r   r   )then_expressionr   r   then_expression_fnP  s    z"switch.<locals>.then_expression_fnc                      s    S r   r   r   )else_expressionr   r   else_expression_fnW  s    z"switch.<locals>.else_expression_fnzuRank of `condition` should be less than or equal to rank of `then_expression` and `else_expression`. ndim(condition)=z, ndim(then_expression)=r5   r   r  )r'   r)   r   r-   r3  r   rC   rD   r  rs   r   r  ri   r  r  rU  rf  )	conditionrT  rV  	cond_ndimrU  rW  r2   	expr_ndim	ndim_diff
cond_shape
expr_shape
shape_diff
tile_shaper   )rV  rT  r   switch6  sP    

r`  zkeras.backend.in_train_phasec                 C   s   ddl m} |du r| j}|du r,t }t|s||dksF|du rZt| rT|  S | S n"|dksj|du r|t|rx| S |S t|| |} | S )aF  Selects `x` in train phase, and `alt` otherwise.

    Note that `alt` should have the *same shape* as `x`.

    Args:
        x: What to return in train phase
            (tensor or callable that returns a tensor).
        alt: What to return otherwise
            (tensor or callable that returns a tensor).
        training: Optional scalar tensor
            (or Python boolean, or Python integer)
            specifying the learning phase.

    Returns:
        Either `x` or `alt` based on the `training` flag.
        the `training` flag defaults to `K.learning_phase()`.
    r   )base_layer_utilsNr5   TF)	r0  ra  call_contexttrainingr_   r)   r  r   r`  )r2   altrc  ra  r   r   r   in_train_phase  s     

re  zkeras.backend.in_test_phasec                 C   s   t || |dS )a  Selects `x` in test phase, and `alt` otherwise.

    Note that `alt` should have the *same shape* as `x`.

    Args:
        x: What to return in test phase
            (tensor or callable that returns a tensor).
        alt: What to return otherwise
            (tensor or callable that returns a tensor).
        training: Optional scalar tensor
            (or Python boolean, or Python integer)
            specifying the learning phase.

    Returns:
        Either `x` or `alt` based on `K.learning_phase`.
    )rc  )re  )r2   rd  rc  r   r   r   in_test_phase  s    rf  zkeras.backend.relury  c                 C   s  t | dt }|dkr`|du r6|dkr6tjj| |dS |dkrRtj|  | }ntj|  }|du}|dkr| tjt| ||d } n&|dkrtj| } d}ntj| } |rt	|| j
j}t	d| j
j}t| ||} |dkrt|| j
j}| || 8 } | S )	a	  Rectified linear unit.

    With default values, it returns element-wise `max(x, 0)`.

    Otherwise, it follows:
    `f(x) = max_value` for `x >= max_value`,
    `f(x) = x` for `threshold <= x < max_value`,
    `f(x) = alpha * (x - threshold)` otherwise.

    Args:
        x: A tensor or variable.
        alpha: A scalar, slope of negative section (default=`0.`).
        max_value: float. Saturation threshold.
        threshold: float. Threshold value for thresholded activation.

    Returns:
        A tensor.
    r'   ry  Nr   )alphar&      F)rZ   r.   r)   r  
leaky_relurelur-   r  relu6r   r'   rE  r  r   )r2   rg  r  	thresholdr'   negative_partclip_maxr  r   r   r   rj    s,    
rj  zkeras.backend.elurz  c                 C   s2   t j| }|dkr|S t | dk||| S dS )zExponential linear unit.

    Args:
        x: A tensor or variable to compute the activation function for.
        alpha: A scalar, slope of negative section.

    Returns:
        A tensor.
    r5   r   N)r)   r  elur  )r2   rg  resr   r   r   ro    s    ro  zkeras.backend.softmaxc                 C   s   t jj| |dS )zSoftmax of a tensor.

    Args:
        x: A tensor or variable.
        axis: The dimension softmax would be performed on.
            The default is -1 which indicates the last dimension.

    Returns:
        A tensor.
    r  )r)   r  softmaxr  r   r   r   rq    s    rq  zkeras.backend.softplusc                 C   s   t j| S )zfSoftplus of a tensor.

    Args:
        x: A tensor or variable.

    Returns:
        A tensor.
    )r)   r  softplusr1   r   r   r   rr  (  s    rr  zkeras.backend.softsignc                 C   s   t j| S )zfSoftsign of a tensor.

    Args:
        x: A tensor or variable.

    Returns:
        A tensor.
    )r)   r  softsignr1   r   r   r   rs  7  s    rs  c                 C   s   | }|}t | d}|r | j}d}t| tjjtjf oF| jj|koF| }|rpt	| jj
dks`J | jj
d }d}|r|s||rtjd| d| ddd	 ||fS )
N_keras_logitsTr5   r   z"`zK` received `from_logits=True`, but the `output` argument was produced by a zB activation and thus does not represent logits. Was this intended?rt   ru   )r   rt  r(   r)   r   r  r+   r   r   r)  r  rm   rn   )r_  from_logitsop_typefn_namer:  Zfrom_logits_Zhas_keras_logitsZfrom_expected_op_typer   r   r   _get_logitsF  s.    

rx  z&keras.backend.categorical_crossentropyc                 C   s   t | } t |}| j|j t||dd\}}|rJt jj| ||dS |t ||d }tt	 |j
j}t ||d| }t | t j| | S )a  Categorical crossentropy between an output tensor and a target tensor.

    Args:
        target: A tensor of the same shape as `output`.
        output: A tensor resulting from a softmax
            (unless `from_logits` is True, in which
            case `output` is expected to be the logits).
        from_logits: Boolean, whether `output` is the
            result of a softmax, or is a tensor of logits.
        axis: Int specifying the channels axis. `axis=-1` corresponds to data
            format `channels_last`, and `axis=1` corresponds to data format
            `channels_first`.

    Returns:
        Output tensor.

    Raises:
        ValueError: if `axis` is neither -1 nor one of the axes of `output`.

    Example:

    >>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
    >>> print(a)
    tf.Tensor(
      [[1. 0. 0.]
       [0. 1. 0.]
       [0. 0. 1.]], shape=(3, 3), dtype=float32)
    >>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94],
    ...                 shape=[3, 3])
    >>> print(b)
    tf.Tensor(
      [[0.9  0.05 0.05]
       [0.05 0.89 0.06]
       [0.05 0.01 0.94]], shape=(3, 3), dtype=float32)
    >>> loss = tf.keras.backend.categorical_crossentropy(a, b)
    >>> print(np.around(loss, 5))
    [0.10536 0.11653 0.06188]
    >>> loss = tf.keras.backend.categorical_crossentropy(a, a)
    >>> print(np.around(loss, 5))
    [0. 0. 0.]

    Softmaxcategorical_crossentropy)labelslogitsr  Trz  )r)   r   ri   assert_is_compatible_withrx  r  !softmax_cross_entropy_with_logitsr  r   r  r'   rE  r  r  r  )r  r_  ru  r  epsilon_r   r   r   rz  i  s    .

rz  z-keras.backend.sparse_categorical_crossentropyc                 C   sD  t | } t |}t| d} t||dd\}}|sbtt |jj}t ||d| }t j	
|}t|jttfr~t|j}n|jj}|dur||; }||d krttt|t|d ||g}t jjj||d}n|dkrtd|t |}| jj}	|	duo|duo|	|d k}
|
rBt| } t |d|d g}|durpt | t|| j}| | } || }td	d
 | |fD rt    t jj | |d}W d   n1 s0    Y  nt jj | |d}|durt|dd d}t ||}t !t "|||}||_#|S |
r@|dkr@t ||dd }|S )a  Categorical crossentropy with integer targets.

    Args:
        target: An integer tensor.
        output: A tensor resulting from a softmax
            (unless `from_logits` is True, in which
            case `output` is expected to be the logits).
        from_logits: Boolean, whether `output` is the
            result of a softmax, or is a tensor of logits.
        axis: Int specifying the channels axis. `axis=-1` corresponds to data
            format `channels_last`, and `axis=1` corresponds to data format
            `channels_first`.
        ignore_class: Optional integer. The ID of a class to be ignored
            during loss computation. This is useful, for example, in
            segmentation problems featuring a "void" class (commonly -1
            or 255) in segmentation maps.
            By default (`ignore_class=None`), all classes are considered.

    Returns:
        Output tensor.

    Raises:
        ValueError: if `axis` is neither -1 nor one of the axes of `output`.
    int64ry  sparse_categorical_crossentropyr5   Nr  r  zcCannot compute sparse categorical crossentropy with `axis={}` on an output tensor with unknown rankc                 s   s   | ]}t |V  qd S r   )_is_symbolic_tensorr  r   r   r   r:    r   z2sparse_categorical_crossentropy.<locals>.<genexpr>r{  r|  r  )$r)   r   r-   rx  r   r  r'   rE  r  r  r  r(   ri   r   r  r)  ndims	itertoolschainr(  rC   rD   r  rs   formatr>  r  r  r?  r6   rH   r  (sparse_softmax_cross_entropy_with_logits
scatter_ndr  _keras_mask)r  r_  ru  r  Zignore_classr  output_rankpermutationr  target_rankupdate_shapeZ
valid_maskrp  Z	res_shaper   r   r   r    sv    






(
r  z!keras.backend.binary_crossentropyc                 C   s   t | } t |}t||dd\}}|r:t jj| |dS tt |jj}t 	||d| }| t j
|t   }|d|  t j
d| t   7 }| S )ap  Binary crossentropy between an output tensor and a target tensor.

    Args:
        target: A tensor with the same shape as `output`.
        output: A tensor.
        from_logits: Whether `output` is expected to be a logits tensor.
            By default, we consider that `output`
            encodes a probability distribution.

    Returns:
        A tensor.
    Sigmoidbinary_crossentropyr  rz  r5   )r)   r   rx  r  !sigmoid_cross_entropy_with_logitsr   r  r'   rE  r  r  r  )r  r_  ru  r  bcer   r   r   r    s    

"r  z'keras.backend.binary_focal_crossentropy      ?       @c                    s   t jj| fdd fdd}| | d|  d|   }t d| |}t|  |d}	||	 }
|r| | d|  d|   }||
 }
|
S )a  Binary focal crossentropy between an output tensor and a target tensor.

    According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
    helps to apply a focal factor to down-weight easy examples and focus more on
    hard examples. By default, the focal tensor is computed as follows:

    `focal_factor = (1 - output) ** gamma` for class 1
    `focal_factor = output ** gamma` for class 0
    where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal
    effect on the binary crossentropy.

    If `apply_class_balancing == True`, this function also takes into account a
    weight balancing factor for the binary classes 0 and 1 as follows:

    `weight = alpha` for class 1 (`target == 1`)
    `weight = 1 - alpha` for class 0
    where `alpha` is a float in the range of `[0, 1]`.

    Args:
      target: A tensor with the same shape as `output`.
      output: A tensor.
      apply_class_balancing: A bool, whether to apply weight balancing on the
        binary classes 0 and 1.
      alpha: A weight balancing factor for class 1, default is `0.25` as
        mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
      gamma: A focusing parameter, default is `2.0` as mentioned in the
        reference.
      from_logits: Whether `output` is expected to be a logits tensor. By
        default, we consider that `output` encodes a probability distribution.

    Returns:
      A tensor.
    c                      s   t  S r   )sigmoidr   r_  r   r   r  h  r   z+binary_focal_crossentropy.<locals>.<lambda>c                      s    S r   r   r   r  r   r   r  i  r   r5   rz  )r  r_  ru  )r)   r   
smart_condr  r  )r  r_  Zapply_class_balancingrg  r  ru  Z	sigmoidalp_tZfocal_factorr  Z	focal_bceweightr   r  r   binary_focal_crossentropy:  s"    ,

r  zkeras.backend.sigmoidc                 C   s
   t | S )zfElement-wise sigmoid.

    Args:
        x: A tensor or variable.

    Returns:
        A tensor.
    )r)   r  r1   r   r   r   r  }  s    r  zkeras.backend.hard_sigmoidc                 C   sF   t d| jj}t d| jj}t| |} t| |} t| dd} | S )zSegment-wise linear approximation of sigmoid.

    Faster than sigmoid.
    Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
    In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.

    Args:
        x: A tensor or variable.

    Returns:
        A tensor.
    g?g      ?ry  rz  )r   r'   rE  r)   multiplyr   r  )r2   	point_two
point_fiver   r   r   hard_sigmoid  s    r  zkeras.backend.tanhc                 C   s
   t | S )zcElement-wise tanh.

    Args:
        x: A tensor or variable.

    Returns:
        A tensor.
    )r)   tanhr1   r   r   r   r    s    r  zkeras.backend.dropoutc                 C   s(   |du rt jd}tjj| |||dS )a  Sets entries in `x` to zero at random, while scaling the entire tensor.

    Args:
        x: tensor
        level: fraction of the entries in the tensor
            that will be set to 0.
        noise_shape: shape for randomly generated keep/drop flags,
            must be broadcastable to the shape of `x`
        seed: random seed to ensure determinism.

    Returns:
        A tensor.
    N    cAr  )r/   rm  rx  r)   r  r  )r2   levelr  rc  r   r   r   r    s    r  zkeras.backend.l2_normalizec                 C   s   t jj| |dS )zNormalizes a tensor wrt the L2 norm alongside the specified axis.

    Args:
        x: Tensor or variable.
        axis: axis along which to perform normalization.

    Returns:
        A tensor.
    r  )r)   linalgl2_normalizer  r   r   r   r    s    r  zkeras.backend.in_top_kc                 C   s   t jjj| ||S )a  Returns whether the `targets` are in the top `k` `predictions`.

    Args:
        predictions: A tensor of shape `(batch_size, classes)` and type
          `float32`.
        targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    Returns:
        A 1D tensor of length `batch_size` and type `bool`.
        `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
        values of `predictions[i]`.
    )r)   rC   rD   r  in_top_k)predictionstargetskr   r   r   r    s    r  c                 C   s0   d}|dkr(t  s$tjj| d} nd}| |fS )zTranspose and cast the input before the conv1d.

    Args:
        x: input tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.

    Returns:
        A tensor.
    NWCr9  r   rt   r5   NCWr   r)   rC   rD   r  r2   r(  r+  r   r   r   _preprocess_conv1d_input  s    
r  c                 C   s4   d}|dkr,t  r|r(tjj| d} nd}| |fS )a  Transpose and cast the input before the conv2d.

    Args:
        x: input tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.
        force_transpose: Boolean. If True, the input will always be transposed
            from NCHW to NHWC if `data_format` is `"channels_first"`.
            If False, the transposition only occurs on CPU (GPU ops are
            assumed to support NCHW).

    Returns:
        A tensor.
    r&  r9  r<  r'  r  )r2   r(  force_transposer+  r   r   r   _preprocess_conv2d_input  s    
r  c                 C   s0   d}|dkr(t  s$tjj| d} nd}| |fS )zTranspose and cast the input before the conv3d.

    Args:
        x: input tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.

    Returns:
        A tensor.
    NDHWCr9  )r   rt   r  r-  r5   NCDHWr  r  r   r   r   _preprocess_conv3d_input  s    
r  c                 C   s0   | dkrd} n| dkrd} nt dt|  | S )zConvert keras' padding to TensorFlow's padding.

    Args:
        padding: string, one of 'same' , 'valid'

    Returns:
        a string, one of 'SAME', 'VALID'.

    Raises:
        ValueError: if invalid `padding'`
    sameSAMEvalidVALIDzInvalid padding: )rs   r   )rw  r   r   r   _preprocess_padding-  s    r  zkeras.backend.conv1dr  c           	      C   s   |du rt  }|dvr&tdt| |j }|dkrZ||d d  }t| |df} d}t|}t| |\} }tj	j
jj| |||||d} |d	kr|d
krtj	j
| d} | S )a  1D convolution.

    Args:
        x: Tensor or variable.
        kernel: kernel tensor.
        strides: stride integer.
        padding: string, `"same"`, `"causal"` or `"valid"`.
        data_format: string, one of "channels_last", "channels_first".
        dilation_rate: integer dilate rate.

    Returns:
        A tensor, result of 1D convolution.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
    N>   r9  r:  rz  causalr   r5   r  inputfilterdilation_ratestridesrw  r(  r9  r  r  )r{  rs   r   ri   rB  rx  r  r  r)   rC   rD   r  convolutionr  )	r2   kernelr  rw  r(  r  kernel_shapeleft_padr+  r   r   r   conv1dB  s,    

r  zkeras.backend.conv2dc                 C   s|   |du rt  }|dvr&tdt| t| |\} }t|}tjjjj	| |||||d} |dkrx|dkrxtjj
| d} | S )a  2D convolution.

    Args:
        x: Tensor or variable.
        kernel: kernel tensor.
        strides: strides tuple.
        padding: string, `"same"` or `"valid"`.
        data_format: `"channels_last"` or `"channels_first"`.
        dilation_rate: tuple of 2 integers.

    Returns:
        A tensor, result of 2D convolution.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
    N>   r9  r:  rz  r  r9  r&  rF  )r{  rs   r   r  r  r)   rC   rD   r  r  r  r2   r  r  rw  r(  r  r+  r   r   r   conv2dt  s"    
r  zkeras.backend.conv2d_transposec           	      C   sr  |du rt  }|dvr&tdt| |dkr<|dkr<d}nd}t| ||\} }|dkr||dkr||d	 |d
 |d |d f}|d	 du rt| d	 ft|dd  }t|ttfrt	t|}t
|}|dkrd| d }nd| }|dkrtjjjj| |||||d} n:|d	 |d kr0td| tjj| |||d	 |d} |dkrn|dkrntjj| d} | S )a-  2D deconvolution (i.e.

    transposed convolution).

    Args:
        x: Tensor or variable.
        kernel: kernel tensor.
        output_shape: 1D int tensor for the output shape.
        strides: strides tuple.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
        dilation_rate: Tuple of 2 integers.

    Returns:
        A tensor, result of transposed 2D convolution.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
    N>   r9  r:  rz  r9  ru  TFr&  r   rt   r  r5   r5   rw  r(  zmExpected the 2 dimensions of the `dilation_rate` argument to be equal to each other. Received: dilation_rate=)r  rw  rF  )r{  rs   r   r  ri   r   r(   r  r)   r  r  rC   rD   r  conv2d_transposeatrous_conv2d_transposer  )	r2   r  r  r  rw  r(  r  r  r+  r   r   r   r    sZ     

	r  c           	   	   C   s  |du rt  }|dvr&tdt| t|tr6|f}t|trF|f}t| |\} }t|}t|tsnt|}|dkrd}d|d  d }nd}d|d  }t	| |} t	|d	}t	|d	}d| }tj
jjj| ||||||d
} t| |g} |dkr|dkrtj
j| d} | S )a(  1D convolution with separable filters.

    Args:
        x: input tensor
        depthwise_kernel: convolution kernel for the depthwise convolution.
        pointwise_kernel: kernel for the 1x1 convolution.
        strides: stride integer.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
        dilation_rate: integer dilation rate.

    Returns:
        Output tensor.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
    N>   r9  r:  rz  r  r5   r  rt   ru  r   r  rw  r  r(  r9  r  )r{  rs   r   r(   r:   r  r  r   r)   r   rC   rD   r  separable_conv2dr  r  )	r2   depthwise_kernelpointwise_kernelr  rw  r(  r  r+  spatial_start_dimr   r   r   separable_conv1d  sD    




r  zkeras.backend.separable_conv2dc              	   C   s   |du rt  }|dvr&tdt| t|dkr:tdt| |\} }t|}t|tsbt|}|dkrxd| d }nd| }tj	j
jj| ||||||d	} |d
kr|dkrtj	j
| d} | S )a  2D convolution with separable filters.

    Args:
        x: input tensor
        depthwise_kernel: convolution kernel for the depthwise convolution.
        pointwise_kernel: kernel for the 1x1 convolution.
        strides: strides tuple (length 2).
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
        dilation_rate: tuple of integers,
            dilation rates for the separable convolution.

    Returns:
        Output tensor.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
        ValueError: if `strides` is not a tuple of 2 integers.
    N>   r9  r:  rz  rt   (`strides` must be a tuple of 2 integers.r&  r  ru  r  r9  rF  )r{  rs   r   r)  r  r  r(   r   r)   rC   rD   r  r  r  )r2   r  r  r  rw  r(  r  r+  r   r   r   r  D  s2     

	r  zkeras.backend.depthwise_conv2dc                 C   s   |du rt  }|dvr&tdt| t| |\} }t|}|dkrRd| d }nd| }tjjjj	| |||||d} |dkr|dkrtjj
| d	} | S )
a.  2D convolution with separable filters.

    Args:
        x: input tensor
        depthwise_kernel: convolution kernel for the depthwise convolution.
        strides: strides tuple (length 2).
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
        dilation_rate: tuple of integers,
            dilation rates for the separable convolution.

    Returns:
        Output tensor.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
    N>   r9  r:  rz  r&  r  ru  r  r9  rF  )r{  rs   r   r  r  r)   rC   rD   r  depthwise_conv2dr  )r2   r  r  rw  r(  r  r+  r   r   r   r    s(    
r  zkeras.backend.conv3dr5   r5   r5   c                 C   s|   |du rt  }|dvr&tdt| t| |\} }t|}tjjjj	| |||||d} |dkrx|dkrxtjj
| d} | S )a  3D convolution.

    Args:
        x: Tensor or variable.
        kernel: kernel tensor.
        strides: strides tuple.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
        dilation_rate: tuple of 3 integers.

    Returns:
        A tensor, result of 3D convolution.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
    N>   r9  r:  rz  r  r9  r  r   r-  r5   rt   r  )r{  rs   r   r  r  r)   rC   rD   r  r  r  r  r   r   r   conv3d  s"    
r  c                 C   s"  |du rt  }|dvr&tdt| t|ttfr>t|}t| |\} }|dkr~|dkr~|d |d |d |d	 |d
 f}|d du rt	| d ft|d
d  }tt|}t
|}|dkrd| d }nd| }tjjjj| |||||d} |dkr|dkrtjj| d} | S )a  3D deconvolution (i.e.

    transposed convolution).

    Args:
        x: input tensor.
        kernel: kernel tensor.
        output_shape: 1D int tensor for the output shape.
        strides: strides tuple.
        padding: string, "same" or "valid".
        data_format: string, `"channels_last"` or `"channels_first"`.

    Returns:
        A tensor, result of transposed 3D convolution.

    Raises:
        ValueError: if `data_format` is neither `channels_last` or
        `channels_first`.
    N>   r9  r:  rz  r9  r  r   rt   r  r-  r5   r  ru  r  r  )r{  rs   r   r(   r   r  r)   r  r  ri   r  rC   rD   r  conv3d_transposer  )r2   r  r  r  rw  r(  r+  r   r   r   r    s@    
 
r  zkeras.backend.pool2dc                 C   s  |du rt  }|dvr&tdt| t|dkr:tdt|dkrNtdt| |\} }t|}|dkrd| d }d| d }nd	| }d	| }|d
krtjjj	j
| ||||d} n4|dkrtjjj	j| ||||d} ntdt| |dkr|dkrtjj| d} | S )a  2D Pooling.

    Args:
        x: Tensor or variable.
        pool_size: tuple of 2 integers.
        strides: tuple of 2 integers.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
        pool_mode: string, `"max"` or `"avg"`.

    Returns:
        A tensor, result of 2D pooling.

    Raises:
        ValueError: if `data_format` is neither `"channels_last"` or
        `"channels_first"`.
        ValueError: if `pool_size` is not a tuple of 2 integers.
        ValueError: if `strides` is not a tuple of 2 integers.
        ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
    N>   r9  r:  rz  rt   z*`pool_size` must be a tuple of 2 integers.r  r&  r  ru  r  r  avgInvalid pooling mode: r9  rF  )r{  rs   r   r)  r  r  r)   rC   rD   r  max_poolavg_poolr  r2   	pool_sizer  rw  r(  	pool_moder+  r   r   r   pool2d+  s6    



r  zkeras.backend.pool3dc                 C   s   |du rt  }|dvr&tdt| t| |\} }t|}|dkr^d| d }d| d }nd| }d| }|dkrtjj| ||||d} n0|d	krtjj| ||||d} ntd
t| |dkr|dkrtj	j
| d} | S )a  3D Pooling.

    Args:
        x: Tensor or variable.
        pool_size: tuple of 3 integers.
        strides: tuple of 3 integers.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
        pool_mode: string, `"max"` or `"avg"`.

    Returns:
        A tensor, result of 3D pooling.

    Raises:
        ValueError: if `data_format` is neither `"channels_last"` or
        `"channels_first"`.
        ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
    N>   r9  r:  rz  r  r  ru  r  r  r  r  r9  r  )r{  rs   r   r  r  r)   r  
max_pool3d
avg_pool3drC   rD   r  r  r   r   r   pool3dl  s.    

r  c                    sB  |du rt  }|dvr&tdt| t|}|d }|d }t|}	tt|	}
g }dd |D }tj| D ]pt	dg}|dkr|
t	d | fd	d
|
D  |dkr|
t	d |
t| | dd|f qnt|dd}t||}t||d|f }|dkr$|	|	d g|
 }n|	g|
 |	d g }t||S )a2  Apply N-D convolution with un-shared weights.

    Args:
        inputs: (N+2)-D tensor with shape
            (batch_size, channels_in, d_in1, ..., d_inN)
            if data_format='channels_first', or
            (batch_size, d_in1, ..., d_inN, channels_in)
            if data_format='channels_last'.
        kernel: the unshared weight for N-D convolution,
            with shape (output_items, feature_dim, channels_out), where
            feature_dim = np.prod(kernel_size) * channels_in,
            output_items = np.prod(output_shape).
        kernel_size: a tuple of N integers, specifying the
            spatial dimensions of the N-D convolution window.
        strides: a tuple of N integers, specifying the strides
            of the convolution along the spatial dimensions.
        output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
            dimensionality of the output.
        data_format: string, "channels_first" or "channels_last".

    Returns:
        An (N+2)-D tensor with shape:
        (batch_size, channels_out) + output_shape
        if data_format='channels_first', or:
        (batch_size,) + output_shape + (channels_out,)
        if data_format='channels_last'.

    Raises:
        ValueError: if `data_format` is neither
        `channels_last` nor `channels_first`.
    N>   r9  r:  rz  r5   r  c                 S   s   g | ]}t |qS r   rb  )r   axis_maxr   r   r   r     r   zlocal_conv.<locals>.<listcomp>r9  c                 3   s8   | ]0}t | |  | |   |  V  qd S r   )slicer   kernel_sizepositionr  r   r   r:    s
   zlocal_conv.<locals>.<genexpr>r:  r   r  )r{  rs   r   r   r)  r  r(  r  productr  r  extendr  r   r  r7  )r  r  r  r  r  r(  r  feature_dimchannels_outr  spatial_dimensionsxsoutput_axes_ticksslicesx_aggregater_  r  r   r  r   
local_conv  s8    "


r  zkeras.backend.local_conv1dc                 C   s   |j d f}t| |||||S )a  Apply 1D conv with un-shared weights.

    Args:
        inputs: 3D tensor with shape:
            (batch_size, steps, input_dim)
            if data_format is "channels_last" or
            (batch_size, input_dim, steps)
            if data_format is "channels_first".
        kernel: the unshared weight for convolution,
            with shape (output_length, feature_dim, filters).
        kernel_size: a tuple of a single integer,
            specifying the length of the 1D convolution window.
        strides: a tuple of a single integer,
            specifying the stride length of the convolution.
        data_format: the data format, channels_first or channels_last.

    Returns:
        A 3d tensor with shape:
        (batch_size, output_length, filters)
        if data_format='channels_first'
        or 3D tensor with shape:
        (batch_size, filters, output_length)
        if data_format='channels_last'.
    r   )ri   r  )r  r  r  r  r(  r  r   r   r   local_conv1d  s    r  zkeras.backend.local_conv2dc                 C   s   t | |||||S )a-  Apply 2D conv with un-shared weights.

    Args:
        inputs: 4D tensor with shape:
            (batch_size, filters, new_rows, new_cols)
            if data_format='channels_first'
            or 4D tensor with shape:
            (batch_size, new_rows, new_cols, filters)
            if data_format='channels_last'.
        kernel: the unshared weight for convolution,
            with shape (output_items, feature_dim, filters).
        kernel_size: a tuple of 2 integers, specifying the
            width and height of the 2D convolution window.
        strides: a tuple of 2 integers, specifying the strides
            of the convolution along the width and height.
        output_shape: a tuple with (output_row, output_col).
        data_format: the data format, channels_first or channels_last.

    Returns:
        A 4D tensor with shape:
        (batch_size, filters, new_rows, new_cols)
        if data_format='channels_first'
        or 4D tensor with shape:
        (batch_size, new_rows, new_cols, filters)
        if data_format='channels_last'.
    )r  )r  r  r  r  r  r(  r   r   r   local_conv2d  s     r  zkeras.backend.bias_addc                 C   s   |du rt  }|dvr&tdt| t|}t|dkrjt|t| d krjtdt|t| d f t|dkr|dkrtjj| |ddS tjj| |d	dS t| d
v r|dkrd|d f|dd  }| t	|| S | t	|d|  S tj| |S )a  Adds a bias vector to a tensor.

    Args:
        x: Tensor or variable.
        bias: Bias tensor to add.
        data_format: string, `"channels_last"` or `"channels_first"`.

    Returns:
        Output tensor.

    Raises:
        ValueError: In one of the two cases below:
                    1. invalid `data_format` argument.
                    2. invalid bias shape.
                       the bias should be either a vector or
                       a tensor with ndim(x) - 1 dimension
    N>   r9  r:  rz  r5   z>Unexpected bias dimensions %d, expect to be 1 or %d dimensionsr9  r'  )r(  r&  )r  r-     r  r  )
r{  rs   r   r   r)  r3  r)   r  bias_addr  )r2   biasr(  
bias_shapebias_reshape_axisr   r   r   r  <  s*     r  zkeras.backend.random_normalc                 C   s8   |du rt  }|du r"tjd}tjj| ||||dS )a  Returns a tensor with normal distribution of values.

    It is an alias to `tf.random.normal`.

    Args:
        shape: A tuple of integers, the shape of tensor to create.
        mean: A float, the mean value of the normal distribution to draw
          samples. Default to 0.0.
        stddev: A float, the standard deviation of the normal distribution
          to draw samples. Default to 1.0.
        dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras
          backend dtype which is float32.
        seed: Integer, random seed. Will use a random numpy integer when not
          specified.

    Returns:
        A tensor with normal distribution of values.

    Example:

    >>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3),
    ... mean=0.0, stddev=1.0)
    >>> random_normal_tensor
    <tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
    dtype=float32)>
    Nr  )r|  r}  r'   rc  )r.   r/   rm  rx  r)   r  r~  r   r   r   r  k  s    
r  zkeras.backend.random_uniformc                 C   s8   |du rt  }|du r"tjd}tjj| ||||dS )a  Returns a tensor with uniform distribution of values.

    Args:
        shape: A tuple of integers, the shape of tensor to create.
        minval: A float, lower boundary of the uniform distribution
            to draw samples.
        maxval: A float, upper boundary of the uniform distribution
            to draw samples.
        dtype: String, dtype of returned tensor.
        seed: Integer, random seed.

    Returns:
        A tensor.

    Example:

    >>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3),
    ... minval=0.0, maxval=1.0)
    >>> random_uniform_tensor
    <tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
    dtype=float32)>
    Nr  )r  r  r'   rc  )r.   r/   rm  rx  r)   r  r  r   r   r   r    s    
r  zkeras.backend.random_binomialc                 C   s   t jddd t| |||S )a  Returns a tensor with random binomial distribution of values.

    DEPRECATED, use `tf.keras.backend.random_bernoulli` instead.

    The binomial distribution with parameters `n` and `p` is the probability
    distribution of the number of successful Bernoulli process. Only supports
    `n` = 1 for now.

    Args:
        shape: A tuple of integers, the shape of tensor to create.
        p: A float, `0. <= p <= 1`, probability of binomial distribution.
        dtype: String, dtype of returned tensor.
        seed: Integer, random seed.

    Returns:
        A tensor.

    Example:

    >>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3),
    ... p=0.5)
    >>> random_binomial_tensor
    <tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
    dtype=float32)>
    z`tf.keras.backend.random_binomial` is deprecated, and will be removed in a future version.Please use `tf.keras.backend.random_bernoulli` instead.rt   ru   )rm   rn   random_bernoulliri   r  r'   rc  r   r   r   random_binomial  s
    r  zkeras.backend.random_bernoullic                 C   sV   |du rt  }|du r"tjd}ttjj| ||d|ktj| |dtj| |dS )aQ  Returns a tensor with random bernoulli distribution of values.

    Args:
        shape: A tuple of integers, the shape of tensor to create.
        p: A float, `0. <= p <= 1`, probability of bernoulli distribution.
        dtype: String, dtype of returned tensor.
        seed: Integer, random seed.

    Returns:
        A tensor.
    Nr  r  r&   )	r.   r/   rm  rx  r)   r  r  rO  rK  r  r   r   r   r    s    r  zkeras.backend.truncated_normalc                 C   s8   |du rt  }|du r"tjd}tjj| ||||dS )aA  Returns a tensor with truncated random normal distribution of values.

    The generated values follow a normal distribution
    with specified mean and standard deviation,
    except that values whose magnitude is more than
    two standard deviations from the mean are dropped and re-picked.

    Args:
        shape: A tuple of integers, the shape of tensor to create.
        mean: Mean of the values.
        stddev: Standard deviation of the values.
        dtype: String, dtype of returned tensor.
        seed: Integer, random seed.

    Returns:
        A tensor.
    Nr  r  )r.   r/   rm  rx  r)   r  r~  r   r   r   r    s    
r  z'keras.backend.ctc_label_dense_to_sparsec                    sH  t | }t |d g}t |d g  fdd}t t d|d gdt j}t jjj|||dd}|dddddf }t 	t 
t d|d ||}t jj||}t jjt 	t 
t d|d  t|d}	t jj|	|}
t jjt 	t|
|gdddd	g}t jj| |}t t |t j|t |t jS )
zConverts CTC labels from dense to sparse.

    Args:
        labels: dense CTC labels.
        label_lengths: length of the labels.

    Returns:
        A sparse tensor representation of the labels.
    r   r5   c                    s(   t t t | d dt  |k S )Nr5   r   )r)   r   r(  ri   fill)	old_inputr%  max_num_labels_tnsr   r   range_less_than+  s    z2ctc_label_dense_to_sparse.<locals>.range_less_than)r  r  Nr  rt   r  )r)   ri   r  r-   r  r   rC   rD   scanr  rf  r(  boolean_maskr  r  r   	gather_ndr,   r  )r{  label_lengthslabel_shapenum_batches_tnsr  init
dense_masklabel_array	label_indbatch_array	batch_indr   vals_sparser   r  r   ctc_label_dense_to_sparse  s6    
r  zkeras.backend.ctc_batch_costc                 C   s   t t j|ddt j}t t j|ddt j}t t| |t j}t jt jjj	|g ddt
  }t t jjjj|||ddS )av  Runs CTC loss algorithm on each batch element.

    Args:
        y_true: tensor `(samples, max_string_length)`
            containing the truth labels.
        y_pred: tensor `(samples, time_steps, num_categories)`
            containing the prediction, or output of the softmax.
        input_length: tensor `(samples, 1)` containing the sequence length for
            each batch item in `y_pred`.
        label_length: tensor `(samples, 1)` containing the sequence length for
            each batch item in `y_true`.

    Returns:
        Tensor with shape (samples,1) containing the
            CTC loss of each element.
    r  r  r5   r   rt   r  )r  r{  sequence_lengthr5   )r)   r-   r  r;  r  r  r  rC   rD   r  r  r   r  ctc_loss)y_truey_predr  label_lengthsparse_labelsr   r   r   ctc_batch_costM  s    
r  zkeras.backend.ctc_decoded   c                 C   s   t | }|d |d  }}tjtjjj| g ddt  } t|tj	}|rftj
j| |d\}}	ntjjj
j| |||d\}}	g }
|D ]0}t|j|j||f}|
tjj|dd q|
|	fS )	a  Decodes the output of a softmax.

    Can use either greedy search (also known as best path)
    or a constrained dictionary search.

    Args:
        y_pred: tensor `(samples, time_steps, num_categories)`
            containing the prediction, or output of the softmax.
        input_length: tensor `(samples, )` containing the sequence length for
            each batch item in `y_pred`.
        greedy: perform much faster best-path search if `true`.
            This does not use a dictionary.
        beam_width: if `greedy` is `false`: a beam search decoder will be used
            with a beam of this width.
        top_paths: if `greedy` is `false`,
            how many of the most probable paths will be returned.

    Returns:
        Tuple:
            List: if `greedy` is `true`, returns a list of one element that
                contains the decoded sequence.
                If `false`, returns the `top_paths` most probable
                decoded sequences.
                Each decoded sequence has shape (samples, time_steps).
                Important: blank labels are returned as `-1`.
            Tensor `(top_paths, )` that contains
                the log probability of each decoded sequence.
    r   r5   r  r  )r  r  )r  r  
beam_width	top_pathsr  )sp_inputdefault_value)ri   r)   r  r  rC   rD   r  r  r-   r;  r  ctc_greedy_decoderctc_beam_search_decoderr,   r   r   r  r   r   )r  r  greedyr  r  input_shapenum_samples	num_stepsdecodedlog_probdecoded_densestr   r   r   
ctc_decodes  s*     

r#  zkeras.backend.map_fnc                 C   s   t jjj| |||dS )a9  Map the function fn over the elements elems and return the outputs.

    Args:
        fn: Callable that will be called upon each element in elems
        elems: tensor
        name: A string name for the map node in the graph
        dtype: Output data type.

    Returns:
        Tensor with dtype `dtype`.
    )rj   r'   )r)   rC   rD   map_fn)fnelemsrj   r'   r   r   r   r$    s    r$  zkeras.backend.foldlc                 C   s   t jjj| |||dS )a  Reduce elems using fn to combine them from left to right.

    Args:
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[0]` in case of None)
        name: A string name for the foldl node in the graph

    Returns:
        Tensor with same type and shape as `initializer`.
    r  rj   )r)   rC   rD   foldlr%  r&  r  rj   r   r   r   r(    s    r(  zkeras.backend.foldrc                 C   s   t jjj| |||dS )a  Reduce elems using fn to combine them from right to left.

    Args:
        fn: Callable that will be called upon each element in elems and an
            accumulator, for instance `lambda acc, x: acc + x`
        elems: tensor
        initializer: The first value used (`elems[-1]` in case of None)
        name: A string name for the foldr node in the graph

    Returns:
        Same type and shape as initializer
    r'  )r)   rC   rD   foldrr)  r   r   r   r*    s    r*  Z
KERAS_HOME~z.kerasz
keras.jsonr.   >   float32float64float16r  r{  >   r9  r:  r$   )r.   r  r%   r{  wr-  )indentc                 C   s,   dd }| j  r t||  n||  dS )z6Configure session config and create a session with it.c                 S   s   t  }ttddr(tjjr(|tjj t| rZ| | | jj	
 }tjjj||d}nLt }|r|j}|| tjjj||jd}n| | tjjj|d}t| dS )z(Create the Distributed Strategy session.r   N)r   r  r   )r   rZ   rF   r   _config	MergeFromis_tpu_strategy	configureextended_tpu_cluster_resolvermasterr)   rC   rD   r   dcget_current_worker_contextsession_configmaster_targetr   )distribution_strategyr:  r7  r   worker_contextdc_session_configr   r   r   _create_session  s&    



zAconfigure_and_create_distributed_session.<locals>._create_sessionN)r5  _in_multi_worker_moder8  run_distribute_coordinator)r<  r?  r   r   r   r     s    %
r   c                 C   s$   dd }|| rdS t tt| jS )Nc                 S   s   | j dS )NTPUStrategy)r   
startswith)r  r   r   r   r  D  r   z(_is_tpu_strategy_class.<locals>.<lambda>T)r?  r  _is_tpu_strategy_class	__bases__)clzis_tpu_stratr   r   r   rD  C  s    rD  c                 C   s
   t | jS )zEReturns whether input is a TPUStrategy instance or subclass instance.)rD  r   )strategyr   r   r   r3  J  s    r3  c                 C   s   dd }t j|| S )Nc                 S   s   t | tjrt| S | S r   )r(   r)   r+   rW  r   r   r   r   _cast_variables_to_tensorP  s    
z;cast_variables_to_tensor.<locals>._cast_variables_to_tensor)r)   r.  r/  )r5  rI  r   r   r   r  O  s    r  c                 C   s   t | ot| t jj S r   )r)   r  r(   r   r  r1   r   r   r   r  X  s    r  c                 C   s`   dd }t j| }tdd |D }|s2| dfS t j|| } t |d  d d}| |fS )z%Converts any ragged tensors to dense.c                 S   s   t | tjr|  S | S r   )r(   r)   r  r  )r  r   r   r   _convert_ragged_input_  s    z7convert_inputs_if_ragged.<locals>._convert_ragged_inputc                 s   s   | ]}t |tjV  qd S r   r3  )r   r5  r   r   r   r:  e  s   z+convert_inputs_if_ragged.<locals>.<genexpr>Nr   r;  )r)   r.  r>  r?  r/  r-   nested_row_lengths)r  rJ  flat_inputscontains_raggedrK  r   r   r   convert_inputs_if_ragged\  s    rN  c                 C   sD   | s|S |r2t |dg}tj||}t |dgS tj||S dS )z8Converts any ragged input back to its initial structure.r5   N)r  r)   r  r  )is_ragged_inputr_  rK  r  r4  r   r   r   maybe_convert_to_raggedu  s    rP  c                   @   sB   e Zd ZdZdd Zdd Zdd Zdd	 Zd
d ZdddZ	dS )ContextValueCachea  Container that caches (possibly tensor) values based on the context.

    This class is similar to defaultdict, where values may be produced by the
    default factory specified during initialization. This class also has a
    default value for the key (when key is `None`) -- the key is set to the
    current graph or eager context. The default factories for key and value are
    only used in `__getitem__` and `setdefault`. The `.get()` behavior remains
    the same.

    This object will return the value of the current graph or closest parent
    graph if the current graph is a function. This is to reflect the fact that
    if a tensor is created in eager/graph, child functions may capture that
    tensor.

    The default factory method may accept keyword arguments (unlike defaultdict,
    which only accepts callables with 0 arguments). To pass keyword arguments to
    `default_factory`, use the `setdefault` method instead of `__getitem__`.

    An example of how this class can be used in different contexts:

    ```
    cache = ContextValueCache(int)

    # Eager mode
    cache[None] += 2
    cache[None] += 4
    assert cache[None] == 6

    # Graph mode
    with tf.Graph().as_default() as g:
      cache[None] += 5
      cache[g] += 3
    assert cache[g] == 8
    ```

    Example of a default factory with arguments:

    ```
    cache = ContextValueCache(lambda x: x + 1)
    g = tf.get_default_graph()

    # Example with keyword argument.
    value = cache.setdefault(key=g, kwargs={'x': 3})
    assert cache[g] == 4
    ```
    c                 C   s   || _ tj|  d S r   )default_factoryrd   WeakKeyDictionaryr   )r   rR  r   r   r   r     s    zContextValueCache.__init__c                 C   s   t  rtjS t jj S d S r   )r)   rP   rI   r"   rC   rD   rY   r   r   r   r   _key  s    zContextValueCache._keyc                 C   s*   |j }t|tjjs&tjj r&tj	S |S )z/Returns the parent graph or dummy eager object.)
outer_graphr(   r)   r   r   rC   rD   r|   rI   r"   )r   r<   parent_graphr   r   r   _get_parent_graph  s    
z#ContextValueCache._get_parent_graphc                 C   s8   |  |}|dur|S t|tjjr4| | |S dS )z2Gets the value at key or the closest parent graph.N)rf   r(   r)   r   r   _get_recursiverW  r   r"   rW   r   r   r   rX    s    
z ContextValueCache._get_recursivec                 C   s6   |du r|   }| |}|du r2|   }| |< |S )a	  Gets the value at key (or current context), or sets default value.

        Args:
          key: May be `None` or `Graph`object. When `None`, the key is set to
            the current context.

        Returns:
          Either the cached or default value.
        N)rT  rX  rR  rY  r   r   r   __getitem__  s    

zContextValueCache.__getitem__Nc                 C   sH   |du r|   }|pi }|du r8|| vr8| jf i |}tj| ||S )zLSets the default value if key is not in dict, and returns the
        value.N)rT  rR  rd   rS  
setdefault)r   r"   defaultre  r   r   r   r[    s    zContextValueCache.setdefault)NNN)
r   r   r   r   r   rT  rW  rX  rZ  r[  r   r   r   r   rQ    s   /rQ  )r4   )N)r   )r   )N)NNN)NNr4   FF)N)NNN)NNNFNF)NN)NN)NN)NN)NN)N)NNN)NNN)N)NF)NF)NF)NF)r   )r   )NF)NF)NF)NF)NF)r  )r  )NF)r  )r  )r  )r  )r  r  )r  )r8  )Nr5   r;  )r  )ru  )ry  N)r~  N)r   )r4   r  )NN)FNNFNFFT)N)N)ry  Nry  )rz  )r  )Fr  )Fr  N)F)Fr  r  F)NN)N)F)r5   r  Nr5   )ru  r  Nru  )ru  r  Nru  )r5   r  Nr5   )ru  r  Nru  )ru  r  Nru  )r  r  Nr  )r  r  N)ru  r  Nr  )r  r  Nr  )N)N)N)N)ry  rz  NN)ry  rz  NN)ry  NN)ry  NN)ry  rz  NN)Tr  r5   )NN)NN)NN)F(4  r   r8   r  jsonr   rm  r  	threadingrm   rd   r  r/   tensorflow.compat.v2rC   v2r)   r  r   keras.distributer   r8  r0  r   r<  r   r   r   r   tensorflow.core.protobufr	   tensorflow.python.eagerr
   tensorflow.python.eager.contextr   tensorflow.python.platformr   r    tensorflow.python.util.tf_exportr   tensorflow.tools.docsr   r  rL  r  py_sumr  r?  localrB   r   r   rF   rS  r7   r   r@   r   rI   rU   r   r  r.   r{  set_epsilon
set_floatxset_image_data_formatdo_not_generate_docsr%   r   dispatchadd_dispatch_supportr3   r>   rA   rT   register_clear_session_functionrX   r_   r`   r^   r[   rL   r]   rK   rp   ro   contextmanagerrx   rw   r   r   r   r   r   r   register_get_session_functiontrackingregister_session_providerr6   r   r   r   r   r   r   r   r   r   r   r   r   r   rk   rD   _v1_name_scoper   r   r   r   r  r  r   r   r  r"  rb   ri   r   r3  r'   rG  rI  rK  rO  rP  rS  rU  rW  rY  rw  rZ  r[  r\  AutoTrackabler]  r  r  r  r-   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r|  r  r  r  r  r  r  r  r   r  r  r  r	  r
  r  r  r  r  r  r  r  r  r  r  r$  r,  r/  r  r   r  r7  r\  r`  r]  rm  rn  rf  r>  rt  r   r  rx  r}  r  r  r  r  _VALUE_SET_CODE_STRINGrH  r  r  r  r  r  r  r  r  r  rS  r`  re  rf  rj  ro  rq  rr  rs  rx  rz  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r#  r$  r(  r*  r   rf   
_keras_dirpath
expanduser_keras_base_dirrS  _config_pathexistsopenfhloadr1  rs   Z_floatx_epsilonr(   r  Z_image_data_formatmakedirsOSErrorfr  dumpsIOErrorr   rD  r3  r  r  rN  rP  rQ  ObjectIdentityWeakSetrJ   rM   rO   r   r   r   r   <module>   s  		

K$

!#
9
K 

&




7

    
:	


?g
+!	





  "$2C L"0%!@$?:"%4"'') V9          cF/7#?h!   @/   -   [   F   ;   3   1  C   >   9N",$ #"0#9$4



D.	x