a
    Sic;=                     @   s   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 dd	lmZ dd
lmZ ddlmZ edgdG dd de	jZedgdG dd deZdS )zGated Recurrent Unit V1 layer.    )activations)constraints)initializers)regularizers)	InputSpec)gru)	rnn_utils)RNN)
tf_logging)keras_exportzkeras.layers.GRUCell)v1c                       s"   e Zd ZdZd fdd	Z  ZS )GRUCella  Cell class for the GRU layer.

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`).
        If you pass None, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      recurrent_activation: Activation function to use
        for the recurrent step.
        Default: hard sigmoid (`hard_sigmoid`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs.
      recurrent_initializer: Initializer for the `recurrent_kernel`
        weights matrix,
        used for the linear transformation of the recurrent state.
      bias_initializer: Initializer for the bias vector.
      kernel_regularizer: Regularizer function applied to
        the `kernel` weights matrix.
      recurrent_regularizer: Regularizer function applied to
        the `recurrent_kernel` weights matrix.
      bias_regularizer: Regularizer function applied to the bias vector.
      kernel_constraint: Constraint function applied to
        the `kernel` weights matrix.
      recurrent_constraint: Constraint function applied to
        the `recurrent_kernel` weights matrix.
      bias_constraint: Constraint function applied to the bias vector.
      dropout: Float between 0 and 1. Fraction of the units to drop for the
        linear transformation of the inputs.
      recurrent_dropout: Float between 0 and 1.
        Fraction of the units to drop for
        the linear transformation of the recurrent state.
      reset_after: GRU convention (whether to apply reset gate after or
        before matrix multiplication). False = "before" (default),
        True = "after" (cuDNN compatible).

    Call arguments:
      inputs: A 2D tensor.
      states: List of state tensors corresponding to the previous timestep.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. Only relevant when `dropout` or
        `recurrent_dropout` is used.
    tanhhard_sigmoidTglorot_uniform
orthogonalzerosN        Fc                    sB   t  j|f||||||||	|
||||||dd|d| d S )Nimplementation   )
activationrecurrent_activationuse_biaskernel_initializerrecurrent_initializerbias_initializerkernel_regularizerrecurrent_regularizerbias_regularizerkernel_constraintrecurrent_constraintbias_constraintdropoutrecurrent_dropoutr   reset_after)super__init__pop)selfunitsr   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   kwargs	__class__ S/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/layers/rnn/gru_v1.pyr&   Q   s,    
zGRUCell.__init__)r   r   Tr   r   r   NNNNNNr   r   F)__name__
__module____qualname____doc__r&   __classcell__r-   r-   r+   r.   r       s"   2               r   zkeras.layers.GRUc                       s  e Zd ZdZd5 fdd	Zd6 fdd	Zedd Zedd Zedd Z	edd Z
edd Zedd Zedd Zedd Zedd  Zed!d" Zed#d$ Zed%d& Zed'd( Zed)d* Zed+d, Zed-d. Zed/d0 Z fd1d2Zed3d4 Z  ZS )7GRUa  Gated Recurrent Unit - Cho et al. 2014.

    There are two variants. The default one is based on 1406.1078v3 and
    has reset gate applied to hidden state before matrix multiplication. The
    other one is based on original 1406.1078v1 and has the order reversed.

    The second variant is compatible with CuDNNGRU (GPU-only) and allows
    inference on CPU. Thus it has separate biases for `kernel` and
    `recurrent_kernel`. Use `'reset_after'=True` and
    `recurrent_activation='sigmoid'`.

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      recurrent_activation: Activation function to use
        for the recurrent step.
        Default: hard sigmoid (`hard_sigmoid`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs.
      recurrent_initializer: Initializer for the `recurrent_kernel` weights
        matrix, used for the linear transformation of the recurrent state.
      bias_initializer: Initializer for the bias vector.
      kernel_regularizer: Regularizer function applied to
        the `kernel` weights matrix.
      recurrent_regularizer: Regularizer function applied to
        the `recurrent_kernel` weights matrix.
      bias_regularizer: Regularizer function applied to the bias vector.
      activity_regularizer: Regularizer function applied to
        the output of the layer (its "activation")..
      kernel_constraint: Constraint function applied to
        the `kernel` weights matrix.
      recurrent_constraint: Constraint function applied to
        the `recurrent_kernel` weights matrix.
      bias_constraint: Constraint function applied to the bias vector.
      dropout: Float between 0 and 1.
        Fraction of the units to drop for
        the linear transformation of the inputs.
      recurrent_dropout: Float between 0 and 1.
        Fraction of the units to drop for
        the linear transformation of the recurrent state.
      return_sequences: Boolean. Whether to return the last output
        in the output sequence, or the full sequence.
      return_state: Boolean. Whether to return the last state
        in addition to the output.
      go_backwards: Boolean (default False).
        If True, process the input sequence backwards and return the
        reversed sequence.
      stateful: Boolean (default False). If True, the last state
        for each sample at index i in a batch will be used as initial
        state for the sample of index i in the following batch.
      unroll: Boolean (default False).
        If True, the network will be unrolled,
        else a symbolic loop will be used.
        Unrolling can speed-up a RNN,
        although it tends to be more memory-intensive.
        Unrolling is only suitable for short sequences.
      time_major: The shape format of the `inputs` and `outputs` tensors.
        If True, the inputs and outputs will be in shape
        `(timesteps, batch, ...)`, whereas in the False case, it will be
        `(batch, timesteps, ...)`. Using `time_major = True` is a bit more
        efficient because it avoids transposes at the beginning and end of the
        RNN calculation. However, most TensorFlow data is batch-major, so by
        default this function accepts input and emits output in batch-major
        form.
      reset_after: GRU convention (whether to apply reset gate after or
        before matrix multiplication). False = "before" (default),
        True = "after" (cuDNN compatible).

    Call arguments:
      inputs: A 3D tensor.
      mask: Binary tensor of shape `(samples, timesteps)` indicating whether
        a given timestep should be masked. An individual `True` entry indicates
        that the corresponding timestep should be utilized, while a `False`
        entry indicates that the corresponding timestep should be ignored.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. This argument is passed to the cell
        when calling it. This is only relevant if `dropout` or
        `recurrent_dropout` is used.
      initial_state: List of initial state tensors to be passed to the first
        call of the cell.
    r   r   Tr   r   r   Nr   Fc                    s   | dd}|dkrtd d|v r6d| di}ni }t|f||||||||	|
||||||||d|ddd	|}t j|f|||||d
| t|| _t	ddg| _
d S )Nr   r   r   zm`implementation=0` has been deprecated, and now defaults to `implementation=1`.Please update your layer call.enable_caching_devicedtype	trainableT)r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r   r$   r6   r7   )return_sequencesreturn_statego_backwardsstatefulunroll   )ndim)r'   loggingwarningr   getr%   r&   r   activity_regularizerr   
input_spec)r(   r)   r   r   r   r   r   r   r   r   r   rB   r   r    r!   r"   r#   r8   r9   r:   r;   r<   r$   r*   r   cell_kwargscellr+   r-   r.   r&      s\    

	zGRU.__init__c                    s   t  j||||dS )N)masktraininginitial_state)r%   call)r(   inputsrF   rG   rH   r+   r-   r.   rI     s    zGRU.callc                 C   s   | j jS N)rE   r)   r(   r-   r-   r.   r)   #  s    z	GRU.unitsc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   '  s    zGRU.activationc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   +  s    zGRU.recurrent_activationc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   /  s    zGRU.use_biasc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   3  s    zGRU.kernel_initializerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   7  s    zGRU.recurrent_initializerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   ;  s    zGRU.bias_initializerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   ?  s    zGRU.kernel_regularizerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   C  s    zGRU.recurrent_regularizerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   G  s    zGRU.bias_regularizerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   K  s    zGRU.kernel_constraintc                 C   s   | j jS rK   )rE   r    rL   r-   r-   r.   r    O  s    zGRU.recurrent_constraintc                 C   s   | j jS rK   )rE   r!   rL   r-   r-   r.   r!   S  s    zGRU.bias_constraintc                 C   s   | j jS rK   )rE   r"   rL   r-   r-   r.   r"   W  s    zGRU.dropoutc                 C   s   | j jS rK   )rE   r#   rL   r-   r-   r.   r#   [  s    zGRU.recurrent_dropoutc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   _  s    zGRU.implementationc                 C   s   | j jS rK   )rE   r$   rL   r-   r-   r.   r$   c  s    zGRU.reset_afterc                    s   | j t| jt| j| jt| jt| jt| j	t
| jt
| jt
| jt
| jt| jt| jt| j| j| j| j| jd}|t| j t  }|d= tt| t|  S )N)r)   r   r   r   r   r   r   r   r   r   rB   r   r    r!   r"   r#   r   r$   rE   ) r)   r   	serializer   r   r   r   r   r   r   r   r   r   r   rB   r   r   r    r!   r"   r#   r   r$   updater    config_for_enable_caching_devicerE   r%   
get_configdictlistitems)r(   configbase_configr+   r-   r.   rP   g  sJ    




"
zGRU.get_configc                 C   s*   d|v r|d dkrd|d< | f i |S )Nr   r   r   r-   )clsrT   r-   r-   r.   from_config  s    zGRU.from_config)r   r   Tr   r   r   NNNNNNNr   r   FFFFFF)NNN)r/   r0   r1   r2   r&   rI   propertyr)   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r   r$   rP   classmethodrW   r3   r-   r-   r+   r.   r4   {   sz   [                     I
















(r4   N)r2   kerasr   r   r   r   keras.engine.input_specr   keras.layers.rnnr   r   keras.layers.rnn.base_rnnr	   tensorflow.python.platformr
   r?    tensorflow.python.util.tf_exportr   r   r4   r-   r-   r-   r.   <module>   s   
Z
