a
    SicW=                     @   s   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 dd	lmZ dd
lmZ ddlmZ edgdG dd de	jZedgdG dd deZdS )z Long Short-Term Memory V1 layer.    )activations)constraints)initializers)regularizers)	InputSpec)lstm)	rnn_utils)RNN)
tf_logging)keras_exportzkeras.layers.LSTMCell)v1c                       s"   e Zd ZdZd fd
d	Z  ZS )LSTMCellaW	  Cell class for the LSTM layer.

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      recurrent_activation: Activation function to use
        for the recurrent step.
        Default: hard sigmoid (`hard_sigmoid`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs.
      recurrent_initializer: Initializer for the `recurrent_kernel`
        weights matrix,
        used for the linear transformation of the recurrent state.
      bias_initializer: Initializer for the bias vector.
      unit_forget_bias: Boolean.
        If True, add 1 to the bias of the forget gate at initialization.
        Setting it to true will also force `bias_initializer="zeros"`.
        This is recommended in [Jozefowicz et al., 2015](
          http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
      kernel_regularizer: Regularizer function applied to
        the `kernel` weights matrix.
      recurrent_regularizer: Regularizer function applied to
        the `recurrent_kernel` weights matrix.
      bias_regularizer: Regularizer function applied to the bias vector.
      kernel_constraint: Constraint function applied to
        the `kernel` weights matrix.
      recurrent_constraint: Constraint function applied to
        the `recurrent_kernel` weights matrix.
      bias_constraint: Constraint function applied to the bias vector.
      dropout: Float between 0 and 1.
        Fraction of the units to drop for
        the linear transformation of the inputs.
      recurrent_dropout: Float between 0 and 1.
        Fraction of the units to drop for
        the linear transformation of the recurrent state.

    Call arguments:
      inputs: A 2D tensor.
      states: List of state tensors corresponding to the previous timestep.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. Only relevant when `dropout` or
        `recurrent_dropout` is used.
    tanhhard_sigmoidTglorot_uniform
orthogonalzerosN        c                    sB   t  j|f||||||||	|
|||||||ddd| d S )Nimplementation   )
activationrecurrent_activationuse_biaskernel_initializerrecurrent_initializerbias_initializerunit_forget_biaskernel_regularizerrecurrent_regularizerbias_regularizerkernel_constraintrecurrent_constraintbias_constraintdropoutrecurrent_dropoutr   )super__init__pop)selfunitsr   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   kwargs	__class__ T/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/layers/rnn/lstm_v1.pyr&   T   s,    
zLSTMCell.__init__)r   r   Tr   r   r   TNNNNNNr   r   )__name__
__module____qualname____doc__r&   __classcell__r-   r-   r+   r.   r       s"   5               r   zkeras.layers.LSTMc                       s  e Zd ZdZd5 fdd	Zd6 fdd	Zedd Zedd Zedd Z	edd Z
edd Zedd Zedd Zedd Zedd  Zed!d" Zed#d$ Zed%d& Zed'd( Zed)d* Zed+d, Zed-d. Zed/d0 Z fd1d2Zed3d4 Z  ZS )7LSTMaT  Long Short-Term Memory layer - Hochreiter 1997.

     Note that this cell is not optimized for performance on GPU. Please use
    `tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      recurrent_activation: Activation function to use
        for the recurrent step.
        Default: hard sigmoid (`hard_sigmoid`).
        If you pass `None`, no activation is applied
        (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix,
        used for the linear transformation of the inputs..
      recurrent_initializer: Initializer for the `recurrent_kernel`
        weights matrix,
        used for the linear transformation of the recurrent state.
      bias_initializer: Initializer for the bias vector.
      unit_forget_bias: Boolean.
        If True, add 1 to the bias of the forget gate at initialization.
        Setting it to true will also force `bias_initializer="zeros"`.
        This is recommended in [Jozefowicz et al., 2015](
          http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
      kernel_regularizer: Regularizer function applied to
        the `kernel` weights matrix.
      recurrent_regularizer: Regularizer function applied to
        the `recurrent_kernel` weights matrix.
      bias_regularizer: Regularizer function applied to the bias vector.
      activity_regularizer: Regularizer function applied to
        the output of the layer (its "activation").
      kernel_constraint: Constraint function applied to
        the `kernel` weights matrix.
      recurrent_constraint: Constraint function applied to
        the `recurrent_kernel` weights matrix.
      bias_constraint: Constraint function applied to the bias vector.
      dropout: Float between 0 and 1.
        Fraction of the units to drop for
        the linear transformation of the inputs.
      recurrent_dropout: Float between 0 and 1.
        Fraction of the units to drop for
        the linear transformation of the recurrent state.
      return_sequences: Boolean. Whether to return the last output
        in the output sequence, or the full sequence.
      return_state: Boolean. Whether to return the last state
        in addition to the output.
      go_backwards: Boolean (default False).
        If True, process the input sequence backwards and return the
        reversed sequence.
      stateful: Boolean (default False). If True, the last state
        for each sample at index i in a batch will be used as initial
        state for the sample of index i in the following batch.
      unroll: Boolean (default False).
        If True, the network will be unrolled,
        else a symbolic loop will be used.
        Unrolling can speed-up a RNN,
        although it tends to be more memory-intensive.
        Unrolling is only suitable for short sequences.
      time_major: The shape format of the `inputs` and `outputs` tensors.
        If True, the inputs and outputs will be in shape
        `(timesteps, batch, ...)`, whereas in the False case, it will be
        `(batch, timesteps, ...)`. Using `time_major = True` is a bit more
        efficient because it avoids transposes at the beginning and end of the
        RNN calculation. However, most TensorFlow data is batch-major, so by
        default this function accepts input and emits output in batch-major
        form.

    Call arguments:
      inputs: A 3D tensor.
      mask: Binary tensor of shape `(samples, timesteps)` indicating whether
        a given timestep should be masked. An individual `True` entry indicates
        that the corresponding timestep should be utilized, while a `False`
        entry indicates that the corresponding timestep should be ignored.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. This argument is passed to the cell
        when calling it. This is only relevant if `dropout` or
        `recurrent_dropout` is used.
      initial_state: List of initial state tensors to be passed to the first
        call of the cell.
    r   r   Tr   r   r   Nr   Fc                    s   | dd}|dkrtd d|v r6d| di}ni }t|f||||||||	|
||||||||d|ddd	|}t j|f|||||d
| t|| _t	ddg| _
d S )Nr   r   r   zm`implementation=0` has been deprecated, and now defaults to `implementation=1`.Please update your layer call.enable_caching_devicedtype	trainableT)r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r   r6   r7   )return_sequencesreturn_statego_backwardsstatefulunroll   )ndim)r'   loggingwarningr   getr%   r&   r   activity_regularizerr   
input_spec)r(   r)   r   r   r   r   r   r   r   r   r   r   rB   r    r!   r"   r#   r$   r8   r9   r:   r;   r<   r*   r   cell_kwargscellr+   r-   r.   r&      s\    

	zLSTM.__init__c                    s   t  j||||dS )N)masktraininginitial_state)r%   call)r(   inputsrF   rG   rH   r+   r-   r.   rI     s    z	LSTM.callc                 C   s   | j jS N)rE   r)   r(   r-   r-   r.   r)   #  s    z
LSTM.unitsc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   '  s    zLSTM.activationc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   +  s    zLSTM.recurrent_activationc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   /  s    zLSTM.use_biasc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   3  s    zLSTM.kernel_initializerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   7  s    zLSTM.recurrent_initializerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   ;  s    zLSTM.bias_initializerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   ?  s    zLSTM.unit_forget_biasc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   C  s    zLSTM.kernel_regularizerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   G  s    zLSTM.recurrent_regularizerc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   K  s    zLSTM.bias_regularizerc                 C   s   | j jS rK   )rE   r    rL   r-   r-   r.   r    O  s    zLSTM.kernel_constraintc                 C   s   | j jS rK   )rE   r!   rL   r-   r-   r.   r!   S  s    zLSTM.recurrent_constraintc                 C   s   | j jS rK   )rE   r"   rL   r-   r-   r.   r"   W  s    zLSTM.bias_constraintc                 C   s   | j jS rK   )rE   r#   rL   r-   r-   r.   r#   [  s    zLSTM.dropoutc                 C   s   | j jS rK   )rE   r$   rL   r-   r-   r.   r$   _  s    zLSTM.recurrent_dropoutc                 C   s   | j jS rK   )rE   r   rL   r-   r-   r.   r   c  s    zLSTM.implementationc                    s   | j t| jt| j| jt| jt| jt| j	| j
t| jt| jt| jt| jt| jt| jt| j| j| j| jd}|t| j t  }|d= tt| t|  S )N)r)   r   r   r   r   r   r   r   r   r   r   rB   r    r!   r"   r#   r$   r   rE   ) r)   r   	serializer   r   r   r   r   r   r   r   r   r   r   r   rB   r   r    r!   r"   r#   r$   r   updater    config_for_enable_caching_devicerE   r%   
get_configdictlistitems)r(   configbase_configr+   r-   r.   rP   g  sJ    




"
zLSTM.get_configc                 C   s*   d|v r|d dkrd|d< | f i |S )Nr   r   r   r-   )clsrT   r-   r-   r.   from_config  s    zLSTM.from_config)r   r   Tr   r   r   TNNNNNNNr   r   FFFFF)NNN)r/   r0   r1   r2   r&   rI   propertyr)   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r   rP   classmethodrW   r3   r-   r-   r+   r.   r4   ~   sz   X                     I
















(r4   N)r2   kerasr   r   r   r   keras.engine.input_specr   keras.layers.rnnr   r   keras.layers.rnn.base_rnnr	   tensorflow.python.platformr
   r?    tensorflow.python.util.tf_exportr   r   r4   r-   r-   r-   r.   <module>   s   
]
