a
    Sic>                     @   s(  d Z ddlZddlm  mZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dZedg dG dd deejZ edg dG dd deeejZ!dd Z"dd Z#dd Z$dS )zGated Recurrent Unit layer.    N)activations)backend)constraints)initializers)regularizers)
base_layer)	InputSpec)gru_lstm_utils)	rnn_utils)RNN)DropoutRNNCellMixin)tf_utils)
tf_logging)keras_exportzbRNN `implementation=2` is not supported when `recurrent_dropout` is set. Using `implementation=1`.zkeras.layers.GRUCell)v1c                       sT   e Zd ZdZd fd
d	Zej fddZdddZ fddZ	dddZ
  ZS )GRUCella!  Cell class for the GRU layer.

    See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
    for details about the usage of RNN API.

    This class processes one step within the whole time sequence input, whereas
    `tf.keras.layer.GRU` processes the whole sequence.

    For example:

    >>> inputs = tf.random.normal([32, 10, 8])
    >>> rnn = tf.keras.layers.RNN(tf.keras.layers.GRUCell(4))
    >>> output = rnn(inputs)
    >>> print(output.shape)
    (32, 4)
    >>> rnn = tf.keras.layers.RNN(
    ...    tf.keras.layers.GRUCell(4),
    ...    return_sequences=True,
    ...    return_state=True)
    >>> whole_sequence_output, final_state = rnn(inputs)
    >>> print(whole_sequence_output.shape)
    (32, 10, 4)
    >>> print(final_state.shape)
    (32, 4)

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use. Default: hyperbolic tangent
        (`tanh`). If you pass None, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      recurrent_activation: Activation function to use for the recurrent step.
        Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
        applied (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs. Default:
        `glorot_uniform`.
      recurrent_initializer: Initializer for the `recurrent_kernel`
        weights matrix, used for the linear transformation of the recurrent
        state.  Default: `orthogonal`.
      bias_initializer: Initializer for the bias vector. Default: `zeros`.
      kernel_regularizer: Regularizer function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_regularizer: Regularizer function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_regularizer: Regularizer function applied to the bias vector.
        Default: `None`.
      kernel_constraint: Constraint function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_constraint: Constraint function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_constraint: Constraint function applied to the bias vector. Default:
        `None`.
      dropout: Float between 0 and 1. Fraction of the units to drop for the
        linear transformation of the inputs. Default: 0.
      recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
        for the linear transformation of the recurrent state. Default: 0.
      reset_after: GRU convention (whether to apply reset gate after or
        before matrix multiplication). False = "before",
        True = "after" (default and cuDNN compatible).

    Call arguments:
      inputs: A 2D tensor, with shape of `[batch, feature]`.
      states: A 2D tensor with shape of `[batch, units]`, which is the state
        from the previous time step. For timestep 0, the initial state provided
        by user will be feed to cell.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. Only relevant when `dropout` or
        `recurrent_dropout` is used.
    tanhsigmoidTglorot_uniform
orthogonalzerosN        c                    s\  |dk rt d| dtjj r4|dd| _n|dd| _t jf i | || _	t
|| _t
|| _|| _t|| _t|| _t|| _t|| _t|	| _t|
| _t|| _t|| _t|| _tdtd|| _tdtd|| _|d	d
}| jdkr<|dkr<t t! d| _"n|| _"|| _#| j	| _$| j	| _%d S )Nr   zQReceived an invalid value for argument `units`, expected a positive integer, got .enable_caching_deviceTF      ?r   implementation      )&
ValueErrortfcompatr   #executing_eagerly_outside_functionspop_enable_caching_devicesuper__init__unitsr   get
activationrecurrent_activationuse_biasr   kernel_initializerrecurrent_initializerbias_initializerr   kernel_regularizerrecurrent_regularizerbias_regularizerr   kernel_constraintrecurrent_constraintbias_constraintminmaxdropoutrecurrent_dropoutloggingdebugRECURRENT_DROPOUT_WARNING_MSGr   reset_after
state_sizeoutput_size)selfr&   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r6   r7   r;   kwargsr   	__class__ P/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/layers/rnn/gru.pyr%   v   sJ    
zGRUCell.__init__c                    s   t  | |d }t| }| j|| jd fd| j| j| j|d| _	| j| j| jd fd| j
| j| j|d| _| jr| jsd| j f}ndd| j f}| j|d| j| j| j|d| _nd | _d| _d S )	N   kernel)shapenameinitializerregularizer
constraintcaching_devicerecurrent_kernelr   biasT)r$   buildr
   rL   
add_weightr&   r+   r.   r1   rF   r,   r/   r2   rM   r*   r;   r-   r0   r3   rN   built)r>   input_shape	input_dimdefault_caching_device
bias_shaper@   rB   rC   rO      sB    
	
	zGRUCell.buildc                 C   sh  t j|r|d n|}| j||dd}| j||dd}| jrb| jsR| jd  }}nt | j\}}| j	dkrd| j
  k rdk rn n&||d  }	||d  }
||d  }n|}	|}
|}t|	| jd d d | jf }t|
| jd d | j| jd f }t|| jd d | jd d f }| jrxt||d | j }t||| j| jd  }t||| jd d  }d| j  k rdk rn n&||d  }||d  }||d  }n|}|}|}t|| jd d d | jf }t|| jd d | j| jd f }| jrP| jrPt||d | j }t||| j| jd  }| || }| || }| jrt|| jd d | jd d f }| jrt||| jd d  }|| }n(t|| | jd d | jd d f }| || }n6d| j
  k rdk r(n n||d  }t|| j}| jrJt||}t j|ddd	\}}}| jrt|| j}| jrt||}n$t|| jd d d d| j f }t j|| j| jdgdd	\}}}| || }| || }| jr|| }n(t|| | jd d d| j d f }| || }|| d| |  }t j|r\|gn|}||fS )
Nr   rE   countr   r   r   r   rD   axis)r   nest	is_nestedget_dropout_mask_for_cell#get_recurrent_dropout_mask_for_cellr*   r;   rN   unstackr   r6   r   dotrF   r&   bias_addr7   rM   r)   r(   split)r>   inputsstatestrainingh_tm1dp_maskrec_dp_mask
input_biasrecurrent_biasinputs_zinputs_rinputs_hx_zx_rx_hh_tm1_zh_tm1_rh_tm1_hrecurrent_zrecurrent_rzrrecurrent_hhhmatrix_xmatrix_innerh	new_staterB   rB   rC   call   s     $
 
 zGRUCell.callc                    s   | j t| jt| j| jt| jt| jt| j	t
| jt
| jt
| jt| jt| jt| j| j| j| j| jd}|t|  t  }tt| t|  S )N)r&   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r6   r7   r   r;   )r&   r   	serializer(   r)   r*   r   r+   r,   r-   r   r.   r/   r0   r   r1   r2   r3   r6   r7   r   r;   updater
    config_for_enable_caching_devicer$   
get_configdictlistitemsr>   configbase_configr@   rB   rC   r   `  sB    





zGRUCell.get_configc                 C   s   t | |||S N)r
   #generate_zero_filled_state_for_cell)r>   rb   
batch_sizedtyperB   rB   rC   get_initial_state  s    zGRUCell.get_initial_state)r   r   Tr   r   r   NNNNNNr   r   T)N)NNN)__name__
__module____qualname____doc__r%   r   shape_type_conversionrO   r}   r   r   __classcell__rB   rB   r@   rC   r   -   s,   J               A*
~$r   zkeras.layers.GRUc                       s  e Zd ZdZd7 fdd	Zd8ddZedd Zedd Zedd Z	edd Z
edd Zedd Zedd Zedd Zedd  Zed!d" Zed#d$ Zed%d& Zed'd( Zed)d* Zed+d, Zed-d. Zed/d0 Z fd1d2Zed3d4 Zd5d6 Z  ZS )9GRUa<  Gated Recurrent Unit - Cho et al. 2014.

    See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
    for details about the usage of RNN API.

    Based on available runtime hardware and constraints, this layer
    will choose different implementations (cuDNN-based or pure-TensorFlow)
    to maximize the performance. If a GPU is available and all
    the arguments to the layer meet the requirement of the cuDNN kernel
    (see below for details), the layer will use a fast cuDNN implementation.

    The requirements to use the cuDNN implementation are:

    1. `activation` == `tanh`
    2. `recurrent_activation` == `sigmoid`
    3. `recurrent_dropout` == 0
    4. `unroll` is `False`
    5. `use_bias` is `True`
    6. `reset_after` is `True`
    7. Inputs, if use masking, are strictly right-padded.
    8. Eager execution is enabled in the outermost context.

    There are two variants of the GRU implementation. The default one is based
    on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to
    hidden state before matrix multiplication. The other one is based on
    [original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.

    The second variant is compatible with CuDNNGRU (GPU-only) and allows
    inference on CPU. Thus it has separate biases for `kernel` and
    `recurrent_kernel`. To use this variant, set `reset_after=True` and
    `recurrent_activation='sigmoid'`.

    For example:

    >>> inputs = tf.random.normal([32, 10, 8])
    >>> gru = tf.keras.layers.GRU(4)
    >>> output = gru(inputs)
    >>> print(output.shape)
    (32, 4)
    >>> gru = tf.keras.layers.GRU(4, return_sequences=True, return_state=True)
    >>> whole_sequence_output, final_state = gru(inputs)
    >>> print(whole_sequence_output.shape)
    (32, 10, 4)
    >>> print(final_state.shape)
    (32, 4)

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      recurrent_activation: Activation function to use
        for the recurrent step.
        Default: sigmoid (`sigmoid`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs. Default:
        `glorot_uniform`.
      recurrent_initializer: Initializer for the `recurrent_kernel`
         weights matrix, used for the linear transformation of the recurrent
         state. Default: `orthogonal`.
      bias_initializer: Initializer for the bias vector. Default: `zeros`.
      kernel_regularizer: Regularizer function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_regularizer: Regularizer function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_regularizer: Regularizer function applied to the bias vector.
        Default: `None`.
      activity_regularizer: Regularizer function applied to the output of the
        layer (its "activation"). Default: `None`.
      kernel_constraint: Constraint function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_constraint: Constraint function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_constraint: Constraint function applied to the bias vector. Default:
        `None`.
      dropout: Float between 0 and 1. Fraction of the units to drop for the
        linear transformation of the inputs. Default: 0.
      recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
        for the linear transformation of the recurrent state. Default: 0.
      return_sequences: Boolean. Whether to return the last output
        in the output sequence, or the full sequence. Default: `False`.
      return_state: Boolean. Whether to return the last state in addition to the
        output. Default: `False`.
      go_backwards: Boolean (default `False`).
        If True, process the input sequence backwards and return the
        reversed sequence.
      stateful: Boolean (default False). If True, the last state
        for each sample at index i in a batch will be used as initial
        state for the sample of index i in the following batch.
      unroll: Boolean (default False).
        If True, the network will be unrolled,
        else a symbolic loop will be used.
        Unrolling can speed-up a RNN,
        although it tends to be more memory-intensive.
        Unrolling is only suitable for short sequences.
      time_major: The shape format of the `inputs` and `outputs` tensors.
        If True, the inputs and outputs will be in shape
        `[timesteps, batch, feature]`, whereas in the False case, it will be
        `[batch, timesteps, feature]`. Using `time_major = True` is a bit more
        efficient because it avoids transposes at the beginning and end of the
        RNN calculation. However, most TensorFlow data is batch-major, so by
        default this function accepts input and emits output in batch-major
        form.
      reset_after: GRU convention (whether to apply reset gate after or
        before matrix multiplication). False = "before",
        True = "after" (default and cuDNN compatible).

    Call arguments:
      inputs: A 3D tensor, with shape `[batch, timesteps, feature]`.
      mask: Binary tensor of shape `[samples, timesteps]` indicating whether
        a given timestep should be masked  (optional, defaults to `None`).
        An individual `True` entry indicates that the corresponding timestep
        should be utilized, while a `False` entry indicates that the
        corresponding timestep should be ignored.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. This argument is passed to the cell
        when calling it. This is only relevant if `dropout` or
        `recurrent_dropout` is used  (optional, defaults to `None`).
      initial_state: List of initial state tensors to be passed to the first
        call of the cell  (optional, defaults to `None` which causes creation
        of zero-filled initial state tensors).
    r   r   Tr   r   r   Nr   Fc                    sv  | dd| _| dd}|dkr,td d|v rDd| di}ni }t|f||||||||	|
||||||||d|d	d
d|}t j|f||||||d| t|| _	t
ddg| _| jtjtjfv o| jtjtjfv o|dko| o|o|otjj | _tjdrX| jrFttj| j  nttj| j  t rrt||d| _d S )Nreturn_runtimeFr   r   r   zm`implementation=0` has been deprecated, and now defaults to `implementation=2`.Please update your layer call.r   r   	trainableT)r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r6   r7   r   r;   r   r   )return_sequencesreturn_statego_backwardsstatefulunroll
time_majorrE   )ndimGPUgru) r"   _return_runtimer8   warningr   r'   r$   r%   r   activity_regularizerr   
input_specr(   r   r   r   r)   r   r    r   r!   _could_use_gpu_kernelr   list_logical_devicesr9   r	   ZCUDNN_AVAILABLE_MSGrH   ZCUDNN_NOT_AVAILABLE_MSGuse_new_gru_lstm_implZDefunWrapper_defun_wrapper)r>   r&   r(   r)   r*   r+   r,   r-   r.   r/   r0   r   r1   r2   r3   r6   r7   r   r   r   r   r   r   r;   r?   r   cell_kwargscellr@   rB   rC   r%     s    



	

zGRU.__init__c                    s  t |\}}|d u}|| ||d \}}}t|trH|d }t |}jr`|d n|d }	jsd|i 	j
  fdd}
t j|
||d j|j|d ur|n|	jjjd\}}}ttj}n|||||\}}}}jr2tjjjd t|d jd jg}| jrPt j|||jd}n|}jrj|gt| S jrz||fS |S d S )Nr   r   rd   c                    s   j | |fi  S r   )r   )cell_inputscell_statesr?   r>   rB   rC   step  s    zGRU.call.<locals>.step)	constantsr   maskr   input_lengthr   zero_output_for_maskreturn_all_outputs)r   ) r   convert_inputs_if_ragged_validate_args_if_ragged_process_inputs
isinstancer   	int_shaper   r   _maybe_reset_cell_dropout_maskr   rnnr   r   r   r   r	   runtimeZRUNTIME_UNKNOWN_defun_gru_callr   r   r    r   assignrc   castr   
add_updatemaybe_convert_to_raggedr   r   )r>   rb   r   rd   initial_staterow_lengthsis_ragged_input_rR   	timestepsr   last_outputoutputsrc   r   updatesoutputrB   r   rC   r}   r  sh    




zGRU.callc                 C   s   | j jS r   )r   r&   r>   rB   rB   rC   r&     s    z	GRU.unitsc                 C   s   | j jS r   )r   r(   r   rB   rB   rC   r(     s    zGRU.activationc                 C   s   | j jS r   )r   r)   r   rB   rB   rC   r)     s    zGRU.recurrent_activationc                 C   s   | j jS r   )r   r*   r   rB   rB   rC   r*     s    zGRU.use_biasc                 C   s   | j jS r   )r   r+   r   rB   rB   rC   r+     s    zGRU.kernel_initializerc                 C   s   | j jS r   )r   r,   r   rB   rB   rC   r,     s    zGRU.recurrent_initializerc                 C   s   | j jS r   )r   r-   r   rB   rB   rC   r-     s    zGRU.bias_initializerc                 C   s   | j jS r   )r   r.   r   rB   rB   rC   r.     s    zGRU.kernel_regularizerc                 C   s   | j jS r   )r   r/   r   rB   rB   rC   r/     s    zGRU.recurrent_regularizerc                 C   s   | j jS r   )r   r0   r   rB   rB   rC   r0     s    zGRU.bias_regularizerc                 C   s   | j jS r   )r   r1   r   rB   rB   rC   r1     s    zGRU.kernel_constraintc                 C   s   | j jS r   )r   r2   r   rB   rB   rC   r2     s    zGRU.recurrent_constraintc                 C   s   | j jS r   )r   r3   r   rB   rB   rC   r3     s    zGRU.bias_constraintc                 C   s   | j jS r   )r   r6   r   rB   rB   rC   r6     s    zGRU.dropoutc                 C   s   | j jS r   )r   r7   r   rB   rB   rC   r7     s    zGRU.recurrent_dropoutc                 C   s   | j jS r   )r   r   r   rB   rB   rC   r     s    zGRU.implementationc                 C   s   | j jS r   )r   r;   r   rB   rB   rC   r;     s    zGRU.reset_afterc                    s   | j t| jt| j| jt| jt| jt| j	t
| jt
| jt
| jt
| jt| jt| jt| j| j| j| j| jd}|t| j t  }|d= tt| t|  S )N)r&   r(   r)   r*   r+   r,   r-   r.   r/   r0   r   r1   r2   r3   r6   r7   r   r;   r   ) r&   r   r~   r(   r)   r*   r   r+   r,   r-   r   r.   r/   r0   r   r   r1   r2   r3   r6   r7   r   r;   r   r
   r   r   r$   r   r   r   r   r   r@   rB   rC   r     sJ    




"
zGRU.get_configc                 C   s*   d|v r|d dkrd|d< | f i |S )Nr   r   r   rB   )clsr   rB   rB   rC   from_config&  s    zGRU.from_configc                 C   s  |    | j||dd}|d ur,||d  }t r|t|d t| jjt| jjt| jj|| j	| j
|| jd
}| jjf i |\}}	}
}n|t|d t| jjt| jjt| jj|| j	| j
|| jd
}| }|d| ji t r|t }|tjks.|d u oDtjdoD|d u pDt|| j	}|rdtf i |\}}	}
}ntf i |\}}	}
}ntf i |\}}	}
}|
g}||	||fS )NrE   rV   r   )
rb   init_hrF   rM   rN   r   r   r   sequence_lengthsr   
rb   r   rF   rM   rN   r   r   r   r   r   r   r   )reset_dropout_maskr\   r	   r   Zread_variable_valuer   rF   rM   rN   r   r   r   r   defun_layerr   copyr   r   executing_eagerlyZget_context_device_typeGPU_DEVICE_NAMEr   r   is_cudnn_supported_inputsgpu_grustandard_grugru_with_backend_selection)r>   rb   r   rd   r   r   dropout_mask
gru_kwargsr   r   new_hr   gpu_gru_kwargsnormal_gru_kwargsdevice_typecan_use_gpurc   rB   rB   rC   r   ,  s    



	zGRU._defun_gru_call)r   r   Tr   r   r   NNNNNNNr   r   FFFFFFT)NNN)r   r   r   r   r%   r}   propertyr&   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r6   r7   r   r;   r   classmethodr   r   r   rB   rB   r@   rC   r     s                          g
H
















(
r   c                    s   t | }|r|d n|d }t|\  fdd}t j|| |gdd||||dur`|n||	|
d\}}}|||d ttjfS )a		  GRU with standard kernel implementation.

    This implementation can be run on all types of hardware.

    This implementation lifts out all the layer weights and make them function
    parameters. It has same number of tensor input params as the cuDNN
    counterpart. The RNN step logic has been simplified, eg dropout and mask is
    removed since cuDNN implementation does not support that.

    Args:
      inputs: Input tensor of GRU layer.
      init_h: Initial state tensor for the cell output.
      kernel: Weights for cell kernel.
      recurrent_kernel: Weights for cell recurrent kernel.
      bias: Weights for cell kernel bias and recurrent bias. The bias contains
        the combined input_bias and recurrent_bias.
      mask: Binary tensor of shape `(samples, timesteps)` indicating whether
        a given timestep should be masked. An individual `True` entry indicates
        that the corresponding timestep should be utilized, while a `False`
        entry indicates that the corresponding timestep should be ignored.
      time_major: Boolean, whether the inputs are in the format of
        [time, batch, feature] or [batch, time, feature].
      go_backwards: Boolean (default False). If True, process the input sequence
        backwards and return the reversed sequence.
      sequence_lengths: The lengths of all sequences coming from a variable
        length input, such as ragged tensors. If the input has a fixed timestep
        size, this should be None.
      zero_output_for_mask: Boolean, whether to output zero for masked timestep.
      return_sequences: Boolean. If True, return the recurrent outputs for all
        timesteps in the sequence. If False, only return the output for the
        last timestep (which consumes less memory).

    Returns:
      last_output: output tensor for the last timestep, which has shape
        [batch, units].
      outputs:
        - If `return_sequences=True`: output tensor for all timesteps,
          which has shape [batch, time, units].
        - Else, a tensor equal to `last_output` with shape [batch, 1, units]
      state_0: the cell output, which has same shape as init_h.
      runtime: constant string tensor which indicate real runtime hardware. This
        value is for testing purpose and should be used by user.
    r   r   c                    s   |d }t | }t | }tj|ddd\}}}t |}t |}tj|ddd\}}	}
t|| }t||	 }t|||
  }|| d| |  }||gfS )z5Step function that will be used by Keras RNN backend.r   rE   r   rX   )r   r_   r`   r   ra   r   r   )r   r   re   ry   rm   rn   ro   rz   rs   rt   rw   ru   rv   rx   r{   rh   rF   ri   rM   rB   rC   r     s    zstandard_gru.<locals>.stepNF)r   r   r   r   r   r   r   r   )r   r   r   r^   r   r	   r   ZRUNTIME_CPU)rb   r   rF   rM   rN   r   r   r   r   r   r   rR   r   r   r   r   
new_statesrB   r   rC   r     s0    8

r   c
              
   C   sD  |durt ||}|s8|du r8tj| dd} d\}
}n|r@dnd\}
}tj||
d}tj|ddd}|tj|ddd7 }tt|d	}tj	 d
 r|d |d  |d< |d< |d |d  |d< |d< |d |d  |d< |d< |d |d  |d< |d< t j
||tdgdd}|dur|r@tj| ||
|d} tjj| |d|dd||d\}}}}}|rtj|||
|d}tj||
gd}n6|rtj| dgd} tjj| |d|ddd\}}}}|d }|s|du r|	rtj|g dd}tj||
d}|dur|}|	s0tj||r(dndd}|||t t jfS )z>GRU with cuDNN implementation which is only available for GPU.N)r   r   r   )perm)r   r   )r   r   rX   rE   r      is_cuda_buildr      rD   T)weightsbiasesrG   transpose_weights)seq_axis
batch_axisr   )inputinput_hinput_cparamsis_trainingrnn_moder   r   )r   r   r   r   r   r   )r	   calculate_sequence_by_maskr   	transposeexpand_dimsra   r   flatten	sysconfigget_build_infoZcanonical_to_paramsconstantreverse_sequenceraw_ops
CudnnRNNV3reverseCudnnRNNsqueezer   ZRUNTIME_GPU)rb   r   rF   rM   rN   r   r   r   r   r   r   r   r   r   r   r{   r   r   rB   rB   rC   r     s    



	

r   c                    s   | |||||||||	|
ddd  t  rjtjt jfddt j fddifdd\}}}}nfdtt	  }||d	}t 
|t jt|}t 
|t j |}|f i \}}}}t j|fi  ||||fS )
a  Call the GRU with optimized backend kernel selection.

    Under the hood, this function will create two TF function, one with the most
    generic kernel and can run on all device condition, and the second one with
    cuDNN specific kernel, which can only run on GPU.

    The first function will be called with normal_lstm_params, while the second
    function is not called, but only registered in the graph. The Grappler will
    do the proper graph rewrite and swap the optimized TF function based on the
    device placement.

    Args:
      inputs: Input tensor of GRU layer.
      init_h: Initial state tensor for the cell output.
      kernel: Weights for cell kernel.
      recurrent_kernel: Weights for cell recurrent kernel.
      bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
        is used in this case.
      mask: Boolean tensor for mask out the steps within sequence.
        An individual `True` entry indicates that the corresponding timestep
        should be utilized, while a `False` entry indicates that the
        corresponding timestep should be ignored.
      time_major: Boolean, whether the inputs are in the format of
        [time, batch, feature] or [batch, time, feature].
      go_backwards: Boolean (default False). If True, process the input sequence
        backwards and return the reversed sequence.
      sequence_lengths: The lengths of all sequences coming from a variable
        length input, such as ragged tensors. If the input has a fixed timestep
        size, this should be None.
      zero_output_for_mask: Boolean, whether to output zero for masked timestep.
      return_sequences: Boolean. If True, return the recurrent outputs for all
        timesteps in the sequence. If False, only return the output for the
        last timestep (which consumes less memory).

    Returns:
      List of output tensors, same as standard_gru.
    rb   r   rF   rM   rN   r   r   r   r   r   r   c                    sz   du r$t  	d
S  	f
dd} 	
fdd}tjt	||dS )z<Use cuDNN kernel when mask is none or strictly right padded.Nr   c                      s   t  	d
S )Nr   )r   rB   )
rN   r   r   rb   rF   r   rM   r   r   r   rB   rC   cudnn_gru_fn  s    zOgru_with_backend_selection.<locals>.gpu_gru_with_fallback.<locals>.cudnn_gru_fnc                      s   t  	
dS )Nr   r   rB   rN   r   r   rb   rF   r   rM   r   r   r   r   rB   rC   standard_gru_fn  s    zRgru_with_backend_selection.<locals>.gpu_gru_with_fallback.<locals>.standard_gru_fn)true_fnfalse_fn)r   r   condr	   r   )rb   r   rF   rM   rN   r   r   r   r   r   r   r   r  rB   r  rC   gpu_gru_with_fallback  s(     
z9gru_with_backend_selection.<locals>.gpu_gru_with_fallbackc                      s   t f i  S r   r   rB   r   rB   rC   <lambda>      z,gru_with_backend_selection.<locals>.<lambda>c                      s    f i S r   rB   rB   r  r   rB   rC   r    s   c                      s   t f i  S r   r   rB   r  rB   rC   r    r	  gru_)r   r   )r	   r   r   __internal__execute_fn_for_deviceZCPU_DEVICE_NAMEr   struuiduuid4Zgenerate_defun_backendr   Zfunction_register)rb   r   rF   rM   rN   r   r   r   r   r   r   r   r   r   r   api_namesupportive_attributedefun_standard_grudefun_gpu_grurB   r
  rC   r   t  sZ    3?
	r   )%r   r  tensorflow.compat.v2r    v2r   kerasr   r   r   r   r   keras.enginer   keras.engine.input_specr   Zkeras.layers.rnnr	   r
   keras.layers.rnn.base_rnnr   'keras.layers.rnn.dropout_rnn_cell_mixinr   keras.utilsr   tensorflow.python.platformr   r8    tensorflow.python.util.tf_exportr   r:   BaseRandomLayerr   r   r   r   r   rB   rB   rB   rC   <module>   s>   
  ^
    m{