a
    SicQ                     @   s(  d Z ddlZddlm  mZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dZedg dG dd deejZ edg dG dd deeejZ!dd Z"dd Z#dd Z$dS )zLong Short-Term Memory layer.    N)activations)backend)constraints)initializers)regularizers)
base_layer	InputSpec)gru_lstm_utils)	rnn_utils)RNN)DropoutRNNCellMixin)tf_utils)
tf_logging)keras_exportzbRNN `implementation=2` is not supported when `recurrent_dropout` is set. Using `implementation=1`.zkeras.layers.LSTMCell)v1c                       sd   e Zd ZdZd fd
d	Zej fddZdd Zdd Z	dddZ
 fddZdddZ  ZS )LSTMCellax  Cell class for the LSTM layer.

    See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
    for details about the usage of RNN API.

    This class processes one step within the whole time sequence input, whereas
    `tf.keras.layer.LSTM` processes the whole sequence.

    For example:

    >>> inputs = tf.random.normal([32, 10, 8])
    >>> rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4))
    >>> output = rnn(inputs)
    >>> print(output.shape)
    (32, 4)
    >>> rnn = tf.keras.layers.RNN(
    ...    tf.keras.layers.LSTMCell(4),
    ...    return_sequences=True,
    ...    return_state=True)
    >>> whole_seq_output, final_memory_state, final_carry_state = rnn(inputs)
    >>> print(whole_seq_output.shape)
    (32, 10, 4)
    >>> print(final_memory_state.shape)
    (32, 4)
    >>> print(final_carry_state.shape)
    (32, 4)

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use. Default: hyperbolic tangent
        (`tanh`). If you pass `None`, no activation is applied (ie. "linear"
        activation: `a(x) = x`).
      recurrent_activation: Activation function to use for the recurrent step.
        Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
        applied (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean, (default `True`), whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix, used for
        the linear transformation of the inputs. Default: `glorot_uniform`.
      recurrent_initializer: Initializer for the `recurrent_kernel` weights
        matrix, used for the linear transformation of the recurrent state.
        Default: `orthogonal`.
      bias_initializer: Initializer for the bias vector. Default: `zeros`.
      unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
        the forget gate at initialization. Setting it to true will also force
        `bias_initializer="zeros"`. This is recommended in [Jozefowicz et
          al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
      kernel_regularizer: Regularizer function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_regularizer: Regularizer function applied to
        the `recurrent_kernel` weights matrix. Default: `None`.
      bias_regularizer: Regularizer function applied to the bias vector.
        Default: `None`.
      kernel_constraint: Constraint function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_constraint: Constraint function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_constraint: Constraint function applied to the bias vector. Default:
        `None`.
      dropout: Float between 0 and 1. Fraction of the units to drop for the
        linear transformation of the inputs. Default: 0.
      recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
        for the linear transformation of the recurrent state. Default: 0.

    Call arguments:
      inputs: A 2D tensor, with shape of `[batch, feature]`.
      states: List of 2 tensors that corresponding to the cell's units. Both of
        them have shape `[batch, units]`, the first tensor is the memory state
        from previous time step, the second tensor is the carry state from
        previous time step. For timestep 0, the initial state provided by user
        will be feed to cell.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. Only relevant when `dropout` or
        `recurrent_dropout` is used.
    tanhsigmoidTglorot_uniform
orthogonalzerosN        c                    sb  |dk rt d| dtjj r4|dd| _n|dd| _t jf i | || _	t
|| _t
|| _|| _t|| _t|| _t|| _|| _t|	| _t|
| _t|| _t|| _t|| _t|| _tdtd|| _tdtd|| _|d	d
}| jdkrB|dkrBt !t" d| _#n|| _#| j	| j	g| _$| j	| _%d S )Nr   zQReceived an invalid value for argument `units`, expected a positive integer, got .enable_caching_deviceTF      ?r   implementation      )&
ValueErrortfcompatr   #executing_eagerly_outside_functionspop_enable_caching_devicesuper__init__unitsr   get
activationrecurrent_activationuse_biasr   kernel_initializerrecurrent_initializerbias_initializerunit_forget_biasr   kernel_regularizerrecurrent_regularizerbias_regularizerr   kernel_constraintrecurrent_constraintbias_constraintminmaxdropoutrecurrent_dropoutloggingdebugRECURRENT_DROPOUT_WARNING_MSGr   
state_sizeoutput_size)selfr'   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r8   r9   kwargsr   	__class__ Q/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/layers/rnn/lstm.pyr&   z   sJ    
zLSTMCell.__init__c                    s   t  | t }|d } j| jd fd j j j|d _	 j j jd fd j
 j j|d _ jr jr fdd}n j} j jd fd| j j|d _nd  _d	 _d S )
N   kernel)shapenameinitializerregularizer
constraintcaching_devicerecurrent_kernelc                    s`   t  j jfg|R i |td jfg|R i | j jd fg|R i |gS )Nonesr   )r   concatenater.   r'   r   r(   )_argsr@   r?   rC   rD   r.      s0    
z(LSTMCell.build.<locals>.bias_initializerbiasT)r%   buildr   rM   
add_weightr'   r,   r0   r3   rG   r-   r1   r4   rN   r+   r/   r.   r2   r5   rT   built)r?   input_shapedefault_caching_device	input_dimr.   rA   rS   rD   rU      sB    
	

	zLSTMCell.buildc                 C   s   |\}}}}|\}}	}
}|  |t|| jddd| jf  }|  |t|	| jdd| j| jd f  }|| || |t|
| jdd| jd | jd f    }|  |t|| jdd| jd df  }||fS )z.Computes carry and output using split kernels.Nr      )r*   r   dotrN   r'   r)   )r?   xh_tm1c_tm1x_ix_fx_cx_oh_tm1_ih_tm1_fh_tm1_ch_tm1_oifcorC   rC   rD   _compute_carry_and_output   s4    " "z"LSTMCell._compute_carry_and_outputc                 C   sH   |\}}}}|  |}|  |}|| || |  }	|  |}
|	|
fS )z.Computes carry and output using fused kernels.)r*   r)   )r?   zr_   z0z1z2z3rh   ri   rj   rk   rC   rC   rD   _compute_carry_and_output_fused
  s    


z(LSTMCell._compute_carry_and_output_fusedc           !      C   sZ  |d }|d }| j ||dd}| j||dd}| jdkrd| j  k rRdk rn n2||d  }||d  }	||d  }
||d  }n|}|}	|}
|}tj| jddd\}}}}t||}t|	|}t|
|}t||}| j	r4tj| j
ddd\}}}}t||}t||}t||}t||}d| j  k rNdk rn n2||d  }||d  }||d  }||d  }n|}|}|}|}||||f}||||f}| |||\}}nd	| j  k rdk rn n||d  }t|| j}|t|| j7 }| j	r t|| j
}tj|ddd}| ||\}}|| | } | | |gfS )
Nr   r   rF   countr   r   r[   )num_or_size_splitsaxisr   )get_dropout_mask_for_cell#get_recurrent_dropout_mask_for_cellr   r8   r    splitrG   r   r\   r+   rT   bias_addr9   rl   rN   rr   r)   )!r?   inputsstatestrainingr^   r_   dp_maskrec_dp_maskinputs_iinputs_finputs_cinputs_ok_ik_fk_ck_or`   ra   rb   rc   b_ib_fb_cb_ord   re   rf   rg   r]   rj   rk   rm   hrC   rC   rD   call  sj    zLSTMCell.callc                    s   | j t| jt| j| jt| jt| jt| j	| j
t| jt| jt| jt| jt| jt| j| j| j| jd}|t|  t  }tt| t|  S )N)r'   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r8   r9   r   )r'   r   	serializer)   r*   r+   r   r,   r-   r.   r/   r   r0   r1   r2   r   r3   r4   r5   r8   r9   r   updater    config_for_enable_caching_devicer%   
get_configdictlistitemsr?   configbase_configrA   rC   rD   r   R  sB    





zLSTMCell.get_configc                 C   s   t t| |||S N)r   r   #generate_zero_filled_state_for_cell)r?   r{   
batch_sizedtyperC   rC   rD   get_initial_statev  s
    zLSTMCell.get_initial_state)r   r   Tr   r   r   TNNNNNNr   r   )N)NNN)__name__
__module____qualname____doc__r&   r   shape_type_conversionrU   rl   rr   r   r   r   __classcell__rC   rC   rA   rD   r   -   s0   N               @5	
?$r   zkeras.layers.LSTMc                       s  e Zd ZdZd5 fdd	Zd6ddZedd Zedd Zedd Z	edd Z
edd Zedd Zedd Zedd Zedd  Zed!d" Zed#d$ Zed%d& Zed'd( Zed)d* Zed+d, Zed-d. Zed/d0 Z fd1d2Zed3d4 Z  ZS )7LSTMa  Long Short-Term Memory layer - Hochreiter 1997.

    See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
    for details about the usage of RNN API.

    Based on available runtime hardware and constraints, this layer
    will choose different implementations (cuDNN-based or pure-TensorFlow)
    to maximize the performance. If a GPU is available and all
    the arguments to the layer meet the requirement of the cuDNN kernel
    (see below for details), the layer will use a fast cuDNN implementation.

    The requirements to use the cuDNN implementation are:

    1. `activation` == `tanh`
    2. `recurrent_activation` == `sigmoid`
    3. `recurrent_dropout` == 0
    4. `unroll` is `False`
    5. `use_bias` is `True`
    6. Inputs, if use masking, are strictly right-padded.
    7. Eager execution is enabled in the outermost context.

    For example:

    >>> inputs = tf.random.normal([32, 10, 8])
    >>> lstm = tf.keras.layers.LSTM(4)
    >>> output = lstm(inputs)
    >>> print(output.shape)
    (32, 4)
    >>> lstm = tf.keras.layers.LSTM(4, return_sequences=True, return_state=True)
    >>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)
    >>> print(whole_seq_output.shape)
    (32, 10, 4)
    >>> print(final_memory_state.shape)
    (32, 4)
    >>> print(final_carry_state.shape)
    (32, 4)

    Args:
      units: Positive integer, dimensionality of the output space.
      activation: Activation function to use.
        Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation
        is applied (ie. "linear" activation: `a(x) = x`).
      recurrent_activation: Activation function to use for the recurrent step.
        Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
        applied (ie. "linear" activation: `a(x) = x`).
      use_bias: Boolean (default `True`), whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix, used for
        the linear transformation of the inputs. Default: `glorot_uniform`.
      recurrent_initializer: Initializer for the `recurrent_kernel` weights
        matrix, used for the linear transformation of the recurrent state.
        Default: `orthogonal`.
      bias_initializer: Initializer for the bias vector. Default: `zeros`.
      unit_forget_bias: Boolean (default `True`). If True, add 1 to the bias of
        the forget gate at initialization. Setting it to true will also force
        `bias_initializer="zeros"`. This is recommended in [Jozefowicz et
            al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
      kernel_regularizer: Regularizer function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_regularizer: Regularizer function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_regularizer: Regularizer function applied to the bias vector.
        Default: `None`.
      activity_regularizer: Regularizer function applied to the output of the
        layer (its "activation"). Default: `None`.
      kernel_constraint: Constraint function applied to the `kernel` weights
        matrix. Default: `None`.
      recurrent_constraint: Constraint function applied to the
        `recurrent_kernel` weights matrix. Default: `None`.
      bias_constraint: Constraint function applied to the bias vector. Default:
        `None`.
      dropout: Float between 0 and 1. Fraction of the units to drop for the
        linear transformation of the inputs. Default: 0.
      recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
        for the linear transformation of the recurrent state. Default: 0.
      return_sequences: Boolean. Whether to return the last output in the output
        sequence, or the full sequence. Default: `False`.
      return_state: Boolean. Whether to return the last state in addition to the
        output. Default: `False`.
      go_backwards: Boolean (default `False`). If True, process the input
        sequence backwards and return the reversed sequence.
      stateful: Boolean (default `False`). If True, the last state for each
      sample at index i in a batch will be used as initial state for the sample
        of index i in the following batch.
      time_major: The shape format of the `inputs` and `outputs` tensors.
        If True, the inputs and outputs will be in shape
        `[timesteps, batch, feature]`, whereas in the False case, it will be
        `[batch, timesteps, feature]`. Using `time_major = True` is a bit more
        efficient because it avoids transposes at the beginning and end of the
        RNN calculation. However, most TensorFlow data is batch-major, so by
        default this function accepts input and emits output in batch-major
        form.
      unroll: Boolean (default `False`). If True, the network will be unrolled,
        else a symbolic loop will be used. Unrolling can speed-up a RNN,
        although it tends to be more memory-intensive. Unrolling is only
        suitable for short sequences.

    Call arguments:
      inputs: A 3D tensor with shape `[batch, timesteps, feature]`.
      mask: Binary tensor of shape `[batch, timesteps]` indicating whether
        a given timestep should be masked (optional, defaults to `None`).
        An individual `True` entry indicates that the corresponding timestep
        should be utilized, while a `False` entry indicates that the
        corresponding timestep should be ignored.
      training: Python boolean indicating whether the layer should behave in
        training mode or in inference mode. This argument is passed to the cell
        when calling it. This is only relevant if `dropout` or
        `recurrent_dropout` is used (optional, defaults to `None`).
      initial_state: List of initial state tensors to be passed to the first
        call of the cell (optional, defaults to `None` which causes creation
        of zero-filled initial state tensors).
    r   r   Tr   r   r   Nr   Fc                    s  | dd| _| dd}|dkr,td d|v rDd| di}ni }t|f||||||||	|
||||||||d|d	d
d|}t j|f||||||d| t|| _	t
ddg| _dd | j| jfD | _| jtjtjfv o*| jtjtjfv o*|dko*| o*|o*tjj | _tjdrj| jrXttj| j  nttj| j  t rt ||d| _!d S )Nreturn_runtimeFr   r   r   zm`implementation=0` has been deprecated, and now defaults to `implementation=1`.Please update your layer call.r   r   	trainableT)r)   r*   r+   r,   r-   r/   r.   r0   r1   r2   r3   r4   r5   r8   r9   r   r   r   )return_sequencesreturn_statego_backwardsstateful
time_majorunrollr[   )ndimc                 S   s   g | ]}t d |fdqS )N)rH   r   ).0dimrC   rC   rD   
<listcomp>=  s   z!LSTM.__init__.<locals>.<listcomp>GPUlstm)"r#   r   r:   warningr   r(   r%   r&   r   activity_regularizerr	   
input_specr'   
state_specr)   r   r   r    r*   r   r!   r   r"   _could_use_gpu_kernelr   list_logical_devicesr;   r
   CUDNN_AVAILABLE_MSGrI   CUDNN_NOT_AVAILABLE_MSGuse_new_gru_lstm_implDefunWrapper_defun_wrapper)r?   r'   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r   r3   r4   r5   r8   r9   r   r   r   r   r   r   r@   r   cell_kwargscellrA   rC   rD   r&     s    






zLSTM.__init__c                    s  t |\}}|d u}|| ||d \}}}t|trH|d }t |}jr`|d n|d }	jsd|i 	j
  fdd}
t j|
||d j|j|d ur|n|	jjjd\}}}ttj}n  j||dd}|d ur
||d  }t r|t|d t|d tj
jtj
jtj
j|jj|jd	}jjf i |\}}}}}n|t|d t|d tj
jtj
jtj
j|jj|jd
}| }|dji t r|t  }|tj!ks*|d u o@tj"#do@|d u p@t$|j}|rbt%f i |\}}}}}nt&f i |\}}}}}nt'f i |\}}}}}||g}j(rdd t)j*|D }+| jrt j,|||jd}n|}j-r|gt| S j.r||fS |S d S )Nr   r   r}   c                    s   j | |fi  S r   )r   )r{   r|   r@   r?   rC   rD   stepn  s    zLSTM.call.<locals>.step)	constantsr   maskr   input_lengthr   zero_output_for_maskreturn_all_outputsrF   rs   )r{   init_hinit_crG   rN   rT   r   r   r   sequence_lengthsr   r{   r   r   rG   rN   rT   r   r   r   r   r   r   r   c              	   S   s*   g | ]"\}}t jj|t ||jqS rC   )r    r!   r   assigncastr   )r   
self_statestaterC   rC   rD   r     s   zLSTM.call.<locals>.<listcomp>)r   )/r   convert_inputs_if_ragged_validate_args_if_ragged_process_inputs
isinstancer   	int_shaper   r   _maybe_reset_cell_dropout_maskr   rnnr   r   r   r   r
   runtimeRUNTIME_UNKNOWNreset_dropout_maskrw   r   read_variable_valuerG   rN   rT   r   defun_layercopyr   r    executing_eagerlyget_context_device_typeGPU_DEVICE_NAMEr   r   is_cudnn_supported_inputsgpu_lstmstandard_lstmlstm_with_backend_selectionr   zipr|   
add_updatemaybe_convert_to_raggedr   r   )r?   r{   r   r}   initial_staterow_lengthsis_ragged_inputrQ   rX   	timestepsr   last_outputoutputsr|   r   dropout_masklstm_kwargsnew_hnew_cgpu_lstm_kwargsnormal_lstm_kwargsdevice_typecan_use_gpuupdatesoutputrC   r   rD   r   W  s   




	






z	LSTM.callc                 C   s   | j jS r   )r   r'   rS   rC   rC   rD   r'     s    z
LSTM.unitsc                 C   s   | j jS r   )r   r)   rS   rC   rC   rD   r)     s    zLSTM.activationc                 C   s   | j jS r   )r   r*   rS   rC   rC   rD   r*     s    zLSTM.recurrent_activationc                 C   s   | j jS r   )r   r+   rS   rC   rC   rD   r+     s    zLSTM.use_biasc                 C   s   | j jS r   )r   r,   rS   rC   rC   rD   r,     s    zLSTM.kernel_initializerc                 C   s   | j jS r   )r   r-   rS   rC   rC   rD   r-   !  s    zLSTM.recurrent_initializerc                 C   s   | j jS r   )r   r.   rS   rC   rC   rD   r.   %  s    zLSTM.bias_initializerc                 C   s   | j jS r   )r   r/   rS   rC   rC   rD   r/   )  s    zLSTM.unit_forget_biasc                 C   s   | j jS r   )r   r0   rS   rC   rC   rD   r0   -  s    zLSTM.kernel_regularizerc                 C   s   | j jS r   )r   r1   rS   rC   rC   rD   r1   1  s    zLSTM.recurrent_regularizerc                 C   s   | j jS r   )r   r2   rS   rC   rC   rD   r2   5  s    zLSTM.bias_regularizerc                 C   s   | j jS r   )r   r3   rS   rC   rC   rD   r3   9  s    zLSTM.kernel_constraintc                 C   s   | j jS r   )r   r4   rS   rC   rC   rD   r4   =  s    zLSTM.recurrent_constraintc                 C   s   | j jS r   )r   r5   rS   rC   rC   rD   r5   A  s    zLSTM.bias_constraintc                 C   s   | j jS r   )r   r8   rS   rC   rC   rD   r8   E  s    zLSTM.dropoutc                 C   s   | j jS r   )r   r9   rS   rC   rC   rD   r9   I  s    zLSTM.recurrent_dropoutc                 C   s   | j jS r   )r   r   rS   rC   rC   rD   r   M  s    zLSTM.implementationc                    s   | j t| jt| j| jt| jt| jt| j	| j
t| jt| jt| jt| jt| jt| jt| j| j| j| jd}|t| j t  }|d= tt| t|  S )N)r'   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r   r3   r4   r5   r8   r9   r   r   ) r'   r   r   r)   r*   r+   r   r,   r-   r.   r/   r   r0   r1   r2   r   r   r3   r4   r5   r8   r9   r   r   r   r   r   r%   r   r   r   r   r   rA   rC   rD   r   Q  sJ    




"
zLSTM.get_configc                 C   s*   d|v r|d dkrd|d< | f i |S )Nr   r   r   rC   )clsr   rC   rC   rD   from_configy  s    zLSTM.from_config)r   r   Tr   r   r   TNNNNNNNr   r   FFFFFF)NNN)r   r   r   r   r&   r   propertyr'   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r8   r9   r   r   classmethodr   r   rC   rC   rA   rD   r   ~  s~   s                      g
 7
















(r   c                    s   t | }|r|d n|d } fdd}t j|| ||gdd||||	durR|	n||
|d\}}}|||d |d ttjfS )aP
  LSTM with standard kernel implementation.

    This implementation can be run on all types for hardware.

    This implementation lifts out all the layer weights and make them function
    parameters. It has same number of tensor input params as the cuDNN
    counterpart. The RNN step logic has been simplified, eg dropout and mask is
    removed since cuDNN implementation does not support that.

    Note that the first half of the bias tensor should be ignored by this impl.
    The cuDNN impl need an extra set of input gate bias. In order to make the
    both function take same shape of parameter, that extra set of bias is also
    feed
    here.

    Args:
      inputs: input tensor of LSTM layer.
      init_h: initial state tensor for the cell output.
      init_c: initial state tensor for the cell hidden state.
      kernel: weights for cell kernel.
      recurrent_kernel: weights for cell recurrent kernel.
      bias: weights for cell kernel bias and recurrent bias. Only recurrent bias
        is used in this case.
      mask: Boolean tensor for mask out the steps within sequence.
        An individual `True` entry indicates that the corresponding timestep
        should be utilized, while a `False` entry indicates that the
        corresponding timestep should be ignored.
      time_major: boolean, whether the inputs are in the format of
        [time, batch, feature] or [batch, time, feature].
      go_backwards: Boolean (default False). If True, process the input sequence
        backwards and return the reversed sequence.
      sequence_lengths: The lengths of all sequences coming from a variable
        length input, such as ragged tensors. If the input has a fixed timestep
        size, this should be None.
      zero_output_for_mask: Boolean, whether to output zero for masked timestep.
      return_sequences: Boolean. If True, return the recurrent outputs for all
        timesteps in the sequence. If False, only return the output for the
        last timestep (which consumes less memory).

    Returns:
      last_output: output tensor for the last timestep, which has shape
        [batch, units].
      outputs:
        - If `return_sequences=True`: output tensor for all timesteps,
          which has shape [batch, time, units].
        - Else, a tensor equal to `last_output` with shape [batch, 1, units]
      state_0: the cell output, which has same shape as init_h.
      state_1: the cell hidden state, which has same shape as init_c.
      runtime: constant string tensor which indicate real runtime hardware. This
        value is for testing purpose and should be used by user.
    r   r   c                    s   |d }|d }t | }|t |7 }t | }tj|ddd\}}}}t|}	t|}
|
| |	t|  }t|}|t| }|||gfS )z5Step function that will be used by Keras RNN backend.r   r   rF   rv   )r   r\   rz   r    ry   r   r   )cell_inputscell_statesr^   r_   rm   rn   ro   rp   rq   rh   ri   rj   rk   r   rT   rG   rN   rC   rD   r     s    


zstandard_lstm.<locals>.stepNF)r   r   r   r   r   r   r   r   )r   r   r   r
   r   RUNTIME_CPU)r{   r   r   rG   rN   rT   r   r   r   r   r   r   rX   r   r   r   r   
new_statesrC   r   rD   r     s,    A

r   c              
      s8  |durt ||}	|s8|	du r8tj| dd} d\}}n|r@dnd\}}tj||d}tj||d}tj|dddtj|ddd7 tt||fd	 tj	 d
 rfdddD tj dd	d  fdddD  t j
t dtdgdd}|	durt|r$tj| |	||d} tjj| |||dd|	|d\}}}}}|rtj||	||d}tj||gd}n6|rtj| d	gd} tjj| |||ddd\}}}}|d }|s|	du r|
rtj|g dd}tj||d}tj||d}|	dur|}|
s"tj||rd	ndd}||||t t jfS )ac  LSTM with either cuDNN or ROCm implementation which is only available for
    GPU.

    Note that currently only right padded data is supported, or the result will
    be polluted by the unmasked data which should be filtered.

    Args:
      inputs: Input tensor of LSTM layer.
      init_h: Initial state tensor for the cell output.
      init_c: Initial state tensor for the cell hidden state.
      kernel: Weights for cell kernel.
      recurrent_kernel: Weights for cell recurrent kernel.
      bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
        is used in this case.
      mask: Boolean tensor for mask out the steps within sequence. An individual
        `True` entry indicates that the corresponding timestep should be
        utilized, while a `False` entry indicates that the corresponding
        timestep should be ignored.
      time_major: Boolean, whether the inputs are in the format of [time, batch,
        feature] or [batch, time, feature].
      go_backwards: Boolean (default False). If True, process the input sequence
        backwards and return the reversed sequence.
      sequence_lengths: The lengths of all sequences coming from a variable
        length input, such as ragged tensors. If the input has a fixed timestep
        size, this should be None.
      return_sequences: Boolean. If True, return the recurrent outputs for all
        timesteps in the sequence. If False, only return the output for the
        last timestep, matching the CPU function output format.

    Returns:
      last_output: Output tensor for the last timestep, which has shape
        [batch, units].
      outputs:
        - If `return_sequences=True`: output tensor for all timesteps,
          which has shape [batch, time, units].
        - Else, a tensor equal to `last_output` with shape [batch, 1, units]
      state_0: The cell output, which has same shape as init_h.
      state_1: The cell hidden state, which has same shape as init_c.
      runtime: Constant string tensor which indicate real runtime hardware. This
        value is for testing purpose and should not be used by user.
    N)r   r   r   )perm)r   r   )r   r   r   rF   r   r   is_rocm_buildc                    s   g | ]} | qS rC   rC   r   r]   )weightsrC   rD   r   B      zgpu_lstm.<locals>.<listcomp>)r   r   r[   r   rF               c                    s   g | ]} | qS rC   rC   r  )	full_biasrC   rD   r   E  r  rE   T)r  biasesrH   transpose_weights)seq_axis
batch_axisr   )inputinput_hinput_cparamsis_trainingrnn_moder   r   )r  r  r  r  r  r  )r
   calculate_sequence_by_maskr    	transposeexpand_dimsry   concat
zeros_like	sysconfigget_build_infocanonical_to_paramsconstantreverse_sequenceraw_ops
CudnnRNNV3reverseCudnnRNNsqueezer   RUNTIME_GPU)r{   r   r   rG   rN   rT   r   r   r   r   r   r  r  r  r   r   rj   rQ   r   rC   )r  r  rD   r     s    6




	

r   c                    s   | |||||||||	|
|ddd  t  rntjt jfddt j fddifdd\}}}}}nhdtt	  }||d	}t 
|t jt|}t 
|t j |}|f i \}}}}}t j|fi  |||||fS )
a  Call the LSTM with optimized backend kernel selection.

    Under the hood, this function will create two TF function, one with the most
    generic kernel and can run on all device condition, and the second one with
    cuDNN specific kernel, which can only run on GPU.

    The first function will be called with normal_lstm_params, while the second
    function is not called, but only registered in the graph. The Grappler will
    do the proper graph rewrite and swap the optimized TF function based on the
    device placement.

    Args:
      inputs: Input tensor of LSTM layer.
      init_h: Initial state tensor for the cell output.
      init_c: Initial state tensor for the cell hidden state.
      kernel: Weights for cell kernel.
      recurrent_kernel: Weights for cell recurrent kernel.
      bias: Weights for cell kernel bias and recurrent bias. Only recurrent bias
        is used in this case.
      mask: Boolean tensor for mask out the steps within sequence.
        An individual `True` entry indicates that the corresponding timestep
        should be utilized, while a `False` entry indicates that the
        corresponding timestep should be ignored.
      time_major: Boolean, whether the inputs are in the format of
        [time, batch, feature] or [batch, time, feature].
      go_backwards: Boolean (default False). If True, process the input sequence
        backwards and return the reversed sequence.
      sequence_lengths: The lengths of all sequences coming from a variable
        length input, such as ragged tensors. If the input has a fixed timestep
        size, this should be None.
      zero_output_for_mask: Boolean, whether to output zero for masked timestep.
      return_sequences: Boolean. If True, return the recurrent outputs for all
        timesteps in the sequence. If False, only return the output for the
        last timestep (which consumes less memory).

    Returns:
      List of output tensors, same as standard_lstm.
    r{   r   r   rG   rN   rT   r   r   r   r   r   r   c                    s   du r&t  
	dS  	
fdd} 	
fdd}tjt
||dS )z<Use cuDNN kernel when mask is none or strictly right padded.Nr   c                      s   t  
	dS )Nr   )r   rC   )rT   r   r   r   r{   rG   r   rN   r   r   r   rC   rD   cudnn_lstm_fn  s    zRlstm_with_backend_selection.<locals>.gpu_lstm_with_fallback.<locals>.cudnn_lstm_fnc                      s    t  
	dS )Nr#  r   rC   rT   r   r   r   r{   rG   r   rN   r   r   r   r   rC   rD   stardard_lstm_fn	  s    zUlstm_with_backend_selection.<locals>.gpu_lstm_with_fallback.<locals>.stardard_lstm_fn)true_fnfalse_fn)r   r    condr
   r   )r{   r   r   rG   rN   rT   r   r   r   r   r   r   r$  r'  rC   r&  rD   gpu_lstm_with_fallback  s*     "
z;lstm_with_backend_selection.<locals>.gpu_lstm_with_fallbackc                      s   t f i  S r   r%  rC   r  rC   rD   <lambda>)  r  z-lstm_with_backend_selection.<locals>.<lambda>c                      s    f i S r   rC   rC   r+  r  rC   rD   r-  *  s   c                      s   t f i  S r   r%  rC   r,  rC   rD   r-  .  r  lstm_)r   r   )r
   r   r    __internal__execute_fn_for_deviceCPU_DEVICE_NAMEr   struuiduuid4generate_defun_backendr   function_register)r{   r   r   rG   rN   rT   r   r   r   r   r   r   r   r   r   r   r   api_namesupportive_attributedefun_standard_lstmdefun_gpu_lstmrC   r.  rD   r     sb    5C
	r   )%r   r4  tensorflow.compat.v2r!   v2r    kerasr   r   r   r   r   keras.enginer   keras.engine.input_specr	   keras.layers.rnnr
   r   keras.layers.rnn.base_rnnr   'keras.layers.rnn.dropout_rnn_cell_mixinr   keras.utilsr   tensorflow.python.platformr   r:    tensorflow.python.util.tf_exportr   r<   BaseRandomLayerr   r   r   r   r   rC   rC   rC   rD   <module>   s@   
  R
    o +