a
    Sic{                     @   s.  d Z ddlmZ ddlmZ ddlmZ ddlZddlZddlm  m	Z
 ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dZdZdd Zd=ddZ dd Z!edgdedgdG dd dej"Z#G dd de#Z$ed gded!gdG d"d# d#e$Z%ed$gded%gdG d&d' d'e$Z&e'd(d)Z(ed*gded+gdG d,d( d(e(Z)ed-gded.gdG d/d0 d0e$Z*ed1gded2gdG d3d4 d4e$Z+ed5gded6gdG d7d8 d8e#Z,d9d: Z-d;d< Z.dS )>a}  Legacy module implementing RNN Cells.

This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
    )absolute_import)division)print_functionN)activations)backend)initializers)base_layer_utils)
input_spec)base)tf_utils)
tf_logging)keras_export)	tf_exportbiaskernelc                 C   s,   zt | | W n ty"   Y dS 0 dS d S )NFT)getattrAttributeError)obj	attr_name r   Y/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/layers/rnn/legacy_cells.py_hasattr3   s
    r   Fc                 C   s  t | tjrT| }t| }|jjdkr8tjj|d}q|jjdkrt	d| n@t
| }|jdurp| nd}| rtj| tjdnd}t |tjr|}t|}|jjdkrtjj|d}n|jjdkrt	d| nDt
|}|jdur| nd}| r(tj| tjdnd}|r\t
||}|jdurV| nd}n8|du sp|du rt	d|  d| t||fd}|S )	a#  Concat that enables int, Tensor, or TensorShape values.

    This function takes a size specification, which can be an integer, a
    TensorShape, or a Tensor, and converts it into a concatenated Tensor
    (if static = False) or a list of integers (if static = True).

    Args:
      prefix: The prefix; usually the batch size (and/or time step size).
        (TensorShape, int, or Tensor.)
      suffix: TensorShape, int, or Tensor.
      static: If `True`, return a python list with possibly unknown dimensions.
        Otherwise return a `Tensor`.

    Returns:
      shape: the concatenation of prefix and suffix.

    Raises:
      ValueError: if `suffix` is not a scalar or vector (or TensorShape).
      ValueError: if prefix or suffix was `None` and asked for dynamic
        Tensors out.
    r      zFPrefix tensor must be either a scalar or vector, but received tensor: NdtypezFsuffix tensor must be either a scalar or vector, but received tensor: z2Prefix or suffix can't be None. Received prefix = z and suffix = )
isinstancetfTensorget_static_valueshapendimscompatv1expand_dims
ValueErrorTensorShapeas_listis_fully_definedconstantint32concatenateconcat)prefixsuffixstaticpp_staticss_staticr   r   r   r   _concat<   s^    



r3   c                    s    fdd}t j|| S )zCCreate tensors of zeros based on state_size, batch_size, and dtype.c                    s<   t  | }tj|d}t s8t  | dd}|| |S )z7Combine s with batch_size to get a proper tensor shape.r   T)r.   )r3   r   zerosexecuting_eagerly	set_shape)r1   csizec_static
batch_sizer   r   r   get_state_shape   s    

z,_zero_state_tensors.<locals>.get_state_shape)r   nestmap_structure)
state_sizer;   r   r<   r   r:   r   _zero_state_tensors   s    	r@   z*keras.__internal__.legacy.rnn_cell.RNNCell)r"   znn.rnn_cell.RNNCellc                       s   e Zd ZdZd fdd	Zd fdd	Zdd	 Zed
d Zedd Z	dd Z
dddZdd Z fddZedd Z  ZS )RNNCellac  Abstract object representing an RNN cell.

    Every `RNNCell` must have the properties below and implement `call` with
    the signature `(output, next_state) = call(input, state)`.  The optional
    third input argument, `scope`, is allowed for backwards compatibility
    purposes; but should be left off for new subclasses.

    This definition of cell differs from the definition used in the literature.
    In the literature, 'cell' refers to an object with a single scalar output.
    This definition refers to a horizontal array of such units.

    An RNN cell, in the most abstract setting, is anything that has
    a state and performs some operation that takes a matrix of inputs.
    This operation results in an output matrix with `self.output_size` columns.
    If `self.state_size` is an integer, this operation also results in a new
    state matrix with `self.state_size` columns.  If `self.state_size` is a
    (possibly nested tuple of) TensorShape object(s), then it should return a
    matching structure of Tensors having shape `[batch_size].concatenate(s)`
    for each `s` in `self.batch_size`.
    TNc                    s$   t  jf |||d| d| _d S )N)	trainablenamer   T)super__init___is_tf_rnn_cell)selfrB   rC   r   kwargs	__class__r   r   rE      s    zRNNCell.__init__c                    s   |durPt jjj|| jd"}t j|||dW  d   S 1 sD0    Y  nrd}t| |d}|du rt jjjt jj | jd}t	| || | t ||W  d   S 1 s0    Y  dS )a  Run this RNN cell on inputs, starting from the given state.

        Args:
          inputs: `2-D` tensor with shape `[batch_size, input_size]`.
          state: if `self.state_size` is an integer, this should be a
            `2-D Tensor` with shape `[batch_size, self.state_size]`. Otherwise,
            if `self.state_size` is a tuple of integers, this should be a tuple
            with shapes `[batch_size, s] for s in self.state_size`.
          scope: VariableScope for the created subgraph; defaults to class name.

        Returns:
          A pair containing:

          - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
          - New state: Either a single `2-D` tensor, or a tuple of tensors
            matching the arity and shapes of `state`.
        N)custom_getter)scopernncell_scope)
r   r!   r"   variable_scope_rnn_get_variablerD   __call__r   get_variable_scopesetattr)rG   inputsstaterL   scope_attrnamerI   r   r   rP      s     2
zRNNCell.__call__c                    s   ||i | t jj r" j}n2 t jj v pRt oRt d t jj v }|r~t	 fdd| j
D r~| j
  n(|st	 fdd| jD r| j   S )Nr   c                 3   s   | ]} |uV  qd S Nr   .0vvariabler   r   	<genexpr>   s   z,RNNCell._rnn_get_variable.<locals>.<genexpr>c                 3   s   | ]} |uV  qd S rV   r   rW   rZ   r   r   r\      s   )r   r!   r"   #executing_eagerly_outside_functionsrB   trainable_variablesr   is_split_variablelistall_trainable_weightsappend_non_trainable_weights)rG   getterargsrH   rB   r   rZ   r   rO      s     
zRNNCell._rnn_get_variablec                 C   s   t ddS )zsize(s) of state(s) used by this cell.

        It can be represented by an Integer, a TensorShape or a tuple of
        Integers or TensorShapes.
        Abstract methodNNotImplementedErrorrG   r   r   r   r?      s    zRNNCell.state_sizec                 C   s   t ddS )z>Integer or TensorShape: size of outputs produced by this cell.rg   Nrh   rj   r   r   r   output_size   s    zRNNCell.output_sizec                 C   s   d S rV   r   )rG   _r   r   r   build   s    zRNNCell.buildc                 C   s   |d urt j|dd}|d urlt |r8t j|dd}n|}|jjd j|krltd|jjd j d| |d ur|j|krtd|j d	| |jjd jpt j	j
|d }|j}|d u s|d u rtd
| d| | ||S )NrS   rC   T)partialr   zTbatch size from input tensor is different from the input param. Input tensor batch: z, batch_size: zOdtype from input tensor is different from the input param. Input tensor dtype: z	, dtype: zQbatch_size and dtype cannot be None while constructing initial state: batch_size=z, dtype=)r   convert_to_tensor	is_tensorr   r   dimsvaluer$   r   r!   r"   
zero_state)rG   rS   r;   r   static_batch_sizer   r   r   get_initial_state   sH    
zRNNCell.get_initial_statec           
      C   s   | j }t }|rJt| drJt| d\}}}}||krJ||krJ||krJ|S tt| jd  t	|||}	W d   n1 s0    Y  |r||||	f| _
|	S )aN  Return zero-filled state tensor(s).

        Args:
          batch_size: int, float, or unit Tensor representing the batch size.
          dtype: the data type to use for the state.

        Returns:
          If `state_size` is an int or TensorShape, then the return value is a
          `N-D` tensor of shape `[batch_size, state_size]` filled with zeros.

          If `state_size` is a nested list or tuple, then the return value is
          a nested list or tuple (of the same structure) of `2-D` tensors with
          the shapes `[batch_size, s]` for each s in `state_size`.
        _last_zero_state	ZeroStateN)r?   r   r5   r   r   r   
name_scopetype__name__r@   rw   )
rG   r;   r   r?   is_eagerlast_state_sizelast_batch_size
last_dtypelast_outputoutputr   r   r   rt   #  s*    *zRNNCell.zero_statec                    s
   t   S rV   )rD   
get_configrj   rI   r   r   r   J  s    zRNNCell.get_configc                 C   s   dS )NFr   rj   r   r   r   !_use_input_spec_as_call_signatureM  s    z)RNNCell._use_input_spec_as_call_signature)TNN)N)NNN)r{   
__module____qualname____doc__rE   rP   rO   propertyr?   rk   rm   rv   rt   r   r   __classcell__r   r   rI   r   rA      s   	#


&'rA   c                   @   s   e Zd ZdZdddZdS )LayerRNNCella  Subclass of RNNCells that act like proper `tf.Layer` objects.

    For backwards compatibility purposes, most `RNNCell` instances allow their
    `call` methods to instantiate variables via `tf.compat.v1.get_variable`.
    The underlying variable scope thus keeps track of any variables, and
    returning cached versions.  This is atypical of `tf.layer` objects, which
    separate this part of layer building into a `build` method that is only
    called once.

    Here we provide a subclass for `RNNCell` objects that act exactly as
    `Layer` objects do.  They must provide a `build` method and their
    `call` methods do not access Variables `tf.compat.v1.get_variable`.
    Nc                 O   s"   t jj| ||g|R d|i|S )aO  Run this RNN cell on inputs, starting from the given state.

        Args:
          inputs: `2-D` tensor with shape `[batch_size, input_size]`.
          state: if `self.state_size` is an integer, this should be a `2-D
            Tensor` with shape `[batch_size, self.state_size]`.  Otherwise, if
            `self.state_size` is a tuple of integers, this should be a tuple
            with shapes `[batch_size, s] for s in self.state_size`.
          scope: optional cell scope.
          *args: Additional positional arguments.
          **kwargs: Additional keyword arguments.

        Returns:
          A pair containing:

          - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
          - New state: Either a single `2-D` tensor, or a tuple of tensors
            matching the arity and shapes of `state`.
        rL   )
base_layerLayerrP   )rG   rS   rT   rL   rf   rH   r   r   r   rP   d  s    zLayerRNNCell.__call__)N)r{   r   r   r   rP   r   r   r   r   r   U  s   r   z/keras.__internal__.legacy.rnn_cell.BasicRNNCellznn.rnn_cell.BasicRNNCellc                       s\   e Zd ZdZd fdd	Zedd Zedd Zej	d	d
 Z
dd Z fddZ  ZS )BasicRNNCella  The most basic RNN cell.

    Note that this cell is not optimized for performance. Please use
    `tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.

    Args:
      num_units: int, The number of units in the RNN cell.
      activation: Nonlinearity to use.  Default: `tanh`. It could also be string
        that is within Keras activation function names.
      reuse: (optional) Python boolean describing whether to reuse variables in
        an existing scope. If not `True`, and the existing scope already has the
        given variables, an error is raised.
      name: String, the name of the layer. Layers with the same name will share
        weights, but to avoid mistakes we require reuse=True in such cases.
      dtype: Default dtype of the layer (default of `None` means use the type of
        the first input). Required when `build` is called before `call`.
      **kwargs: Dict, keyword named properties for common layer attributes, like
        `trainable` etc when constructing the cell from configs of get_config().
    Nc                    s   t jddd t jf |||d| t| j t rRtj	drRt
d|  tjdd| _|| _|rxt|| _ntj| _d S )Nz`tf.nn.rnn_cell.BasicRNNCell` is deprecated and will be removed in a future version. This class is equivalent as `tf.keras.layers.SimpleRNNCell`, and will be replaced by that in Tensorflow 2.0.   
stacklevel_reuserC   r   GPUz%s: Note that this cell is not optimized for performance. Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better performance on GPU.ndim)warningswarnrD   rE   _check_supported_dtypesr   r   r5   configlist_logical_devicesloggingwarningr	   	InputSpec
_num_unitsr   get_activationtanh)rG   	num_units
activationreuserC   r   rH   rI   r   r   rE     s     	
zBasicRNNCell.__init__c                 C   s   | j S rV   r   rj   r   r   r   r?     s    zBasicRNNCell.state_sizec                 C   s   | j S rV   r   rj   r   r   r   rk     s    zBasicRNNCell.output_sizec                 C   sv   |d d u rt d| t| j |d }| jt|| j | jgd| _| jt| jgtj	j
j| jdd| _d| _d S )N7Expected inputs.shape[-1] to be known, received shape: r   r   r   initializerT)r$   r   r   
add_weight_WEIGHTS_VARIABLE_NAMEr   _kernel_BIAS_VARIABLE_NAMEr   r!   r"   zeros_initializer_biasbuiltrG   inputs_shapeinput_depthr   r   r   rm     s$    
zBasicRNNCell.buildc                 C   sH   t ||g tt||gd| j}tj|| j}| |}||fS )zLMost basic RNN: output = new_state = act(W * input + U * state +
        B).r   )	_check_rnn_cell_input_dtypesr   matmulr+   r   nnbias_addr   r   )rG   rS   rT   gate_inputsr   r   r   r   call  s
    
zBasicRNNCell.callc                    s>   | j t| j| jd}t  }tt|	 t|	  S )N)r   r   r   )
r   r   	serializer   r   rD   r   dictr`   itemsrG   r   base_configrI   r   r   r     s    

zBasicRNNCell.get_config)NNNNr{   r   r   r   rE   r   r?   rk   r   shape_type_conversionrm   r   r   r   r   r   rI   r   r     s       #


	r   z*keras.__internal__.legacy.rnn_cell.GRUCellznn.rnn_cell.GRUCellc                       s\   e Zd ZdZd fdd	Zedd Zedd Zej	d	d
 Z
dd Z fddZ  ZS )GRUCella  Gated Recurrent Unit cell.

    Note that this cell is not optimized for performance. Please use
    `tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or
    `tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU.

    Args:
      num_units: int, The number of units in the GRU cell.
      activation: Nonlinearity to use.  Default: `tanh`.
      reuse: (optional) Python boolean describing whether to reuse variables in
        an existing scope. If not `True`, and the existing scope already has
        the given variables, an error is raised.
      kernel_initializer: (optional) The initializer to use for the weight and
        projection matrices.
      bias_initializer: (optional) The initializer to use for the bias.
      name: String, the name of the layer. Layers with the same name will share
        weights, but to avoid mistakes we require reuse=True in such cases.
      dtype: Default dtype of the layer (default of `None` means use the type of
        the first input). Required when `build` is called before `call`.
      **kwargs: Dict, keyword named properties for common layer attributes, like
        `trainable` etc when constructing the cell from configs of get_config().
        References: Learning Phrase Representations using RNN Encoder Decoder
        for Statistical Machine Translation: [Cho et al., 2014]
        (https://aclanthology.coli.uni-saarland.de/papers/D14-1179/d14-1179)
        ([pdf](http://emnlp2014.org/papers/pdf/EMNLP2014179.pdf))
    Nc           	         s   t jddd t jf |||d| t| j t rRtj	drRt
d|  tjdd| _|| _|rxt|| _ntj| _t|| _t|| _d S )Nz`tf.nn.rnn_cell.GRUCell` is deprecated and will be removed in a future version. This class is equivalent as `tf.keras.layers.GRUCell`, and will be replaced by that in Tensorflow 2.0.r   r   r   r   z%s: Note that this cell is not optimized for performance. Please use tf.contrib.cudnn_rnn.CudnnGRU for better performance on GPU.r   )r   r   rD   rE   r   r   r   r5   r   r   r   r   r	   r   r   r   r   r   r   r   _kernel_initializer_bias_initializer)	rG   r   r   r   kernel_initializerbias_initializerrC   r   rH   rI   r   r   rE   	  s$    
zGRUCell.__init__c                 C   s   | j S rV   r   rj   r   r   r   r?   0  s    zGRUCell.state_sizec                 C   s   | j S rV   r   rj   r   r   r   rk   4  s    zGRUCell.output_sizec                 C   s   |d d u rt d| t| j |d }| jdt || j d| j g| jd| _| jdt d| j g| j	d urx| j	nt
jjjd| jdd| _| jdt || j | jg| jd| _| jdt | jg| j	d ur| j	nt
jjj| jdd| _d	| _d S )
Nr   r   zgates/%sr   r         ?r   zcandidate/%sT)r$   r   r   r   r   r   r   _gate_kernelr   r   r   r!   r"   constant_initializer
_gate_bias_candidate_kernelr   _candidate_biasr   r   r   r   r   rm   8  sB    

	
zGRUCell.buildc                 C   s   t ||g tt||gd| j}tj|| j}t|}tj	|ddd\}}|| }tt||gd| j
}tj|| j}| |}	|| d| |	  }
|
|
fS )z-Gated recurrent unit (GRU) with nunits cells.r   r   rs   num_or_size_splitsaxis)r   r   r   r+   r   r   r   r   sigmoidsplitr   r   r   )rG   rS   rT   r   rs   rur_state	candidater7   new_hr   r   r   r   `  s    

zGRUCell.callc                    sR   | j t| jt| jt| j| jd}t 	 }t
t| t|  S )N)r   r   r   r   r   )r   r   r   r   r   r   r   r   rD   r   r   r`   r   r   rI   r   r   r   w  s    

	
zGRUCell.get_config)NNNNNNr   r   r   rI   r   r     s          '


'r   LSTMStateTuple)r7   hz1keras.__internal__.legacy.rnn_cell.LSTMStateTupleznn.rnn_cell.LSTMStateTuplec                   @   s    e Zd ZdZdZedd ZdS )r   zTuple used by LSTM Cells for `state_size`, `zero_state`, and output state.

    Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
    and `h` is the output.

    Only used when `state_is_tuple=True`.
    r   c                 C   s2   | \}}|j |j kr,td|j  d|j  |j S )Nz(Inconsistent dtypes for internal state: z vs )r   	TypeError)rG   r7   r   r   r   r   r     s    zLSTMStateTuple.dtypeN)r{   r   r   r   	__slots__r   r   r   r   r   r   r     s   z0keras.__internal__.legacy.rnn_cell.BasicLSTMCellznn.rnn_cell.BasicLSTMCellc                       s\   e Zd ZdZd fdd	Zedd Zed	d
 Zej	dd Z
dd Z fddZ  ZS )BasicLSTMCella
  DEPRECATED: Please use `tf.compat.v1.nn.rnn_cell.LSTMCell` instead.

    Basic LSTM recurrent network cell.

    The implementation is based on

    We add forget_bias (default: 1) to the biases of the forget gate in order to
    reduce the scale of forgetting in the beginning of the training.

    It does not allow cell clipping, a projection layer, and does not
    use peep-hole connections: it is the basic baseline.

    For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell`
    that follows.

    Note that this cell is not optimized for performance. Please use
    `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
    `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
    better performance on CPU.
    r   TNc           	         s   t jddd t jf |||d| t| j |sBtd|  t	 rbtj
drbtd|  tjdd| _|| _|| _|| _|rt|| _ntj| _d	S )
a  Initialize the basic LSTM cell.

        Args:
          num_units: int, The number of units in the LSTM cell.
          forget_bias: float, The bias added to forget gates (see above). Must
            set to `0.0` manually when restoring from CudnnLSTM-trained
            checkpoints.
          state_is_tuple: If True, accepted and returned states are 2-tuples of
            the `c_state` and `m_state`.  If False, they are concatenated along
            the column axis.  The latter behavior will soon be deprecated.
          activation: Activation function of the inner states.  Default: `tanh`.
            It could also be string that is within Keras activation function
            names.
          reuse: (optional) Python boolean describing whether to reuse variables
            in an existing scope.  If not `True`, and the existing scope already
            has the given variables, an error is raised.
          name: String, the name of the layer. Layers with the same name will
            share weights, but to avoid mistakes we require reuse=True in such
            cases.
          dtype: Default dtype of the layer (default of `None` means use the
            type of the first input). Required when `build` is called before
            `call`.
          **kwargs: Dict, keyword named properties for common layer attributes,
            like `trainable` etc when constructing the cell from configs of
            get_config().  When restoring from CudnnLSTM-trained checkpoints,
            must use `CudnnCompatibleLSTMCell` instead.
        z`tf.nn.rnn_cell.BasicLSTMCell` is deprecated and will be removed in a future version. This class is equivalent as `tf.keras.layers.LSTMCell`, and will be replaced by that in Tensorflow 2.0.r   r   r   _%s: Using a concatenated state is slower and will soon be deprecated.  Use state_is_tuple=True.r   %s: Note that this cell is not optimized for performance. Please use tf.contrib.cudnn_rnn.CudnnLSTM for better performance on GPU.r   N)r   r   rD   rE   r   r   r   r   r   r5   r   r   r	   r   r   _forget_bias_state_is_tupler   r   r   r   )	rG   r   forget_biasstate_is_tupler   r   rC   r   rH   rI   r   r   rE     s.    &
zBasicLSTMCell.__init__c                 C   s   | j rt| j| jS d| j S )Nr   )r   r   r   rj   r   r   r   r?      s    zBasicLSTMCell.state_sizec                 C   s   | j S rV   r   rj   r   r   r   rk     s    zBasicLSTMCell.output_sizec                 C   s   |d d u rt d| t| j |d }| j}| jt|| d| j gd| _| jtd| j gtj	j
j| jdd| _d| _d S )Nr   r      r   r   r   T)r$   r   r   r   r   r   r   r   r   r!   r"   r   r   r   )rG   r   r   h_depthr   r   r   rm     s&    

zBasicLSTMCell.buildc                 C   s  t ||g tj}tjdtjd}| jr2|\}}ntj|d|d\}}tt||gd| j	}tj
|| j}tj|d|d\}}	}
}tj| j|
jd}tj}tj}||||||
||||| |	}|| |||}| jrt||}nt||gd}||fS )a"  Long short-term memory cell (LSTM).

        Args:
          inputs: `2-D` tensor with shape `[batch_size, input_size]`.
          state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size,
            num_units]`, if `state_is_tuple` has been set to `True`.  Otherwise,
            a `Tensor` shaped `[batch_size, 2 * num_units]`.

        Returns:
          A pair containing the new hidden state, and the new state (either a
            `LSTMStateTuple` or a concatenated state, depending on
            `state_is_tuple`).
        r   r   r   r   r   )r   r   r   r(   r)   r   r   r   r+   r   r   r   r   r   r   addmultiplyr   r   )rG   rS   rT   r   oner7   r   r   ijfoforget_bias_tensorr   r   new_cr   	new_stater   r   r   r   "  s*    
zBasicLSTMCell.callc                    sF   | j | j| jt| j| jd}t  }t	t
| t
|  S )N)r   r   r   r   r   )r   r   r   r   r   r   r   rD   r   r   r`   r   r   rI   r   r   r   Q  s    

zBasicLSTMCell.get_config)r   TNNNNr   r   r   rI   r   r     s          H


/r   z+keras.__internal__.legacy.rnn_cell.LSTMCellznn.rnn_cell.LSTMCellc                       s\   e Zd ZdZd fdd	Zedd	 Zed
d Zej	dd Z
dd Z fddZ  ZS )LSTMCella  Long short-term memory unit (LSTM) recurrent network cell.

    The default non-peephole implementation is based on (Gers et al., 1999).
    The peephole implementation is based on (Sak et al., 2014).

    The class uses optional peep-hole connections, optional cell clipping, and
    an optional projection layer.

    Note that this cell is not optimized for performance. Please use
    `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
    `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
    better performance on CPU.
    References:
      Long short-term memory recurrent neural network architectures for large
      scale acoustic modeling:
        [Sak et al., 2014]
        (https://www.isca-speech.org/archive/interspeech_2014/i14_0338.html)
        ([pdf]
        (https://www.isca-speech.org/archive/archive_papers/interspeech_2014/i14_0338.pdf))
      Learning to forget:
        [Gers et al., 1999]
        (http://digital-library.theiet.org/content/conferences/10.1049/cp_19991218)
        ([pdf](https://arxiv.org/pdf/1409.2329.pdf))
      Long Short-Term Memory:
        [Hochreiter et al., 1997]
        (https://www.mitpressjournals.org/doi/abs/10.1162/neco.1997.9.8.1735)
        ([pdf](http://ml.jku.at/publications/older/3504.pdf))
    FNr   Tc                    s2  t jddd t jf |||d| t| j |
sBtd|  |dusR|dur^td|  t	 r~tj
dr~td	|  tjdd
| _|| _|| _|| _t|| _|| _|| _|| _|| _|	| _|
| _|rt|| _ntj| _|r|
rt||n|| | _|| _n |
rt||nd| | _|| _dS )a,
  Initialize the parameters for an LSTM cell.

        Args:
          num_units: int, The number of units in the LSTM cell.
          use_peepholes: bool, set True to enable diagonal/peephole connections.
          cell_clip: (optional) A float value, if provided the cell state is
            clipped by this value prior to the cell output activation.
          initializer: (optional) The initializer to use for the weight and
            projection matrices.
          num_proj: (optional) int, The output dimensionality for the projection
            matrices.  If None, no projection is performed.
          proj_clip: (optional) A float value.  If `num_proj > 0` and
            `proj_clip` is provided, then the projected values are clipped
            elementwise to within `[-proj_clip, proj_clip]`.
          num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
            variable_scope partitioner instead.
          num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
            variable_scope partitioner instead.
          forget_bias: Biases of the forget gate are initialized by default to 1
            in order to reduce the scale of forgetting at the beginning of the
            training. Must set it manually to `0.0` when restoring from
            CudnnLSTM trained checkpoints.
          state_is_tuple: If True, accepted and returned states are 2-tuples of
            the `c_state` and `m_state`.  If False, they are concatenated along
            the column axis.  This latter behavior will soon be deprecated.
          activation: Activation function of the inner states.  Default: `tanh`.
            It could also be string that is within Keras activation function
            names.
          reuse: (optional) Python boolean describing whether to reuse variables
            in an existing scope.  If not `True`, and the existing scope already
            has the given variables, an error is raised.
          name: String, the name of the layer. Layers with the same name will
            share weights, but to avoid mistakes we require reuse=True in such
            cases.
          dtype: Default dtype of the layer (default of `None` means use the
            type of the first input). Required when `build` is called before
            `call`.
          **kwargs: Dict, keyword named properties for common layer attributes,
            like `trainable` etc when constructing the cell from configs of
            get_config().  When restoring from CudnnLSTM-trained checkpoints,
            use `CudnnCompatibleLSTMCell` instead.
        z`tf.nn.rnn_cell.LSTMCell` is deprecated and will be removed in a future version. This class is equivalent as `tf.keras.layers.LSTMCell`, and will be replaced by that in Tensorflow 2.0.r   r   r   r   Nz%s: The num_unit_shards and proj_unit_shards parameters are deprecated and will be removed in Jan 2017.  Use a variable scope with a partitioner instead.r   r   r   ) r   r   rD   rE   r   r   r   r   r   r5   r   r   r	   r   r   _use_peepholes
_cell_clipr   r   _initializer	_num_proj
_proj_clip_num_unit_shards_num_proj_shardsr   r   r   r   r   r   _state_size_output_size)rG   r   use_peepholes	cell_clipr   num_proj	proj_clipnum_unit_shardsnum_proj_shardsr   r   r   r   rC   r   rH   rI   r   r   rE   }  s\    <
zLSTMCell.__init__c                 C   s   | j S rV   )r   rj   r   r   r   r?     s    zLSTMCell.state_sizec                 C   s   | j S rV   )r   rj   r   r   r   rk     s    zLSTMCell.output_sizec                 C   sn  |d d u rt d| t| j |d }| jd u r<| jn| j}| jd ur\tjj	| jnd }| j
t|| d| j g| j|d| _| jd u rtjjj}ntjjj| jd}| j
td| j g|d| _| jr| j
d| jg| jd| _| j
d| jg| jd| _| j
d	| jg| jd| _| jd urd| jd ur>tjj	| jnd }| j
d
t | j| jg| j|d| _d| _d S )Nr   r   r   )r   r   partitionerr   r   w_f_diagw_i_diagw_o_diagzprojection/%sT)r$   r   r   r   r   r   r   r!   r"   fixed_size_partitionerr   r   r   r   r   r   r   r   	_w_f_diag	_w_i_diag	_w_o_diagr   _proj_kernelr   )rG   r   r   r   maybe_partitionerr   maybe_proj_partitionerr   r   r   rm      sn    



zLSTMCell.buildc                 C   s  t ||g | jdu r| jn| j}tj}| jr8|\}}n0t|ddgd| jg}t|d| jgd|g}| dj	d j
}|du rtd|  tt||gd| j}tj|| j}tj|ddd\}	}
}}| jr||| j | j|  | ||	| j|  | |
  }n$||| j | ||	| |
  }| jdur`t|| j | j}| jr||| j|  | | }n||| | }| jdurt|| j}| jdurt|| j | j}| jrt||nt||gd}||fS )	a  Run one step of LSTM.

        Args:
          inputs: input Tensor, must be 2-D, `[batch, input_size]`.
          state: if `state_is_tuple` is False, this must be a state Tensor,
            `2-D, [batch, state_size]`.  If `state_is_tuple` is True, this must
            be a tuple of state Tensors, both `2-D`, with column sizes `c_state`
            and `m_state`.

        Returns:
          A tuple containing:

          - A `2-D, [batch, output_dim]`, Tensor representing the output of the
            LSTM after reading `inputs` when previous state was `state`.
            Here output_dim is:
               num_proj if num_proj was set,
               num_units otherwise.
          - Tensor(s) representing the new state of LSTM after reading `inputs`
            when the previous state was `state`.  Same type and shape(s) as
            `state`.

        Raises:
          ValueError: If input size cannot be inferred from inputs via
            static shape inference.
        Nr   r   r   r   zMCould not infer input size from inputs.get_shape()[-1].Received input shape: r   r   )r   r   r   r   r   r   slice	get_shape	with_rankrr   rs   r$   r   r+   r   r   r   r   r   r   r   r  r  r   r   clip_by_valuer  r  r   r   )rG   rS   rT   r   r   c_prevm_prev
input_sizelstm_matrixr   r   r   r   r7   mr   r   r   r   r   >  sd    
zLSTMCell.callc                    sh   | j | j| jt| j| j| j| j| j	| j
| jt| j| jd}t  }tt| t|  S )N)r   r   r   r   r   r   r   r   r   r   r   r   )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rD   r   r   r`   r   r   rI   r   r   r     s    


zLSTMCell.get_config)FNNNNNNr   TNNNNr   r   r   rI   r   r   ]  s.                 {


=Vr   z/keras.__internal__.legacy.rnn_cell.MultiRNNCellznn.rnn_cell.MultiRNNCellc                       sf   e Zd ZdZd fdd	Zedd Zedd Z fd	d
Zedd Z	edd Z
dd Z  ZS )MultiRNNCellzRNN cell composed sequentially of multiple simple cells.

    Example:

    ```python
    num_units = [128, 64]
    cells = [BasicLSTMCell(num_units=n) for n in num_units]
    stacked_rnn_cell = MultiRNNCell(cells)
    ```
    Tc                    s   t d t   |s tdtj|s<td| dt	t
dd |D t	|k rjt t jdd || _t| jD ],\}}t|tjjjrz| j|d	|f d
 qz|| _|stdd | jD rtddd | jD  dS )aW  Create a RNN cell composed sequentially of a number of RNNCells.

        Args:
          cells: list of RNNCells that will be composed in this order.
          state_is_tuple: If True, accepted and returned states are n-tuples,
            where `n = len(cells)`.  If False, the states are all concatenated
            along the column axis.  This latter behavior will soon be
            deprecated.

        Raises:
          ValueError: if cells is empty (not allowed), or at least one of the
            cells returns a state tuple but the flag `state_is_tuple` is
            `False`.
        z`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class is equivalent as `tf.keras.layers.StackedRNNCells`, and will be replaced by that in Tensorflow 2.0.z0Must specify at least one cell for MultiRNNCell.z-cells must be a list or tuple, but received: .c                 s   s   | ]}t |V  qd S rV   )idrX   cellr   r   r   r\         z(MultiRNNCell.__init__.<locals>.<genexpr>zWAt least two cells provided to MultiRNNCell are the same object and will share weights.r   zcell-%drn   c                 s   s   | ]}t j|jV  qd S rV   )r   r=   	is_nestedr?   rX   r7   r   r   r   r\     r  z]Some cells return tuples of states, but the flag state_is_tuple is not set. State sizes are: c                 S   s   g | ]
}|j qS r   r?   r  r   r   r   
<listcomp>  r  z)MultiRNNCell.__init__.<locals>.<listcomp>N)r   r   rD   rE   r$   r   r=   r  r   lensetlog_first_nWARN_cells	enumerater   __internal__tracking	Trackable_track_trackabler   any)rG   cellsr   cell_numberr  rI   r   r   rE     s8    

zMultiRNNCell.__init__c                 C   s2   | j rtdd | jD S tdd | jD S d S )Nc                 s   s   | ]}|j V  qd S rV   r  r  r   r   r   r\     r  z*MultiRNNCell.state_size.<locals>.<genexpr>c                 s   s   | ]}|j V  qd S rV   r  r  r   r   r   r\     r  )r   tupler  sumrj   r   r   r   r?     s    zMultiRNNCell.state_sizec                 C   s   | j d jS )Nr   )r  rk   rj   r   r   r   rk     s    zMultiRNNCell.output_sizec                    s   t t| jd Z | jrDt fdd| jD W  d    S t  W  d    S W d    n1 st0    Y  d S )Nrx   c                 3   s   | ]}|  V  qd S rV   )rt   r  r:   r   r   r\     s   z*MultiRNNCell.zero_state.<locals>.<genexpr>)	r   ry   rz   r{   r   r(  r  rD   rt   )rG   r;   r   rI   r:   r   rt     s    zMultiRNNCell.zero_statec                 C   s4   | j s
g S g }| jD ]}t|tjr||j7 }q|S rV   )rB   r  r   r   r   trainable_weights)rG   weightsr  r   r   r   r*     s    
zMultiRNNCell.trainable_weightsc                 C   s^   g }| j D ]}t|tjr
||j7 }q
| jsZg }| j D ]}t|tjr6||j7 }q6|| S |S rV   )r  r   r   r   non_trainable_weightsrB   r*  )rG   r+  r  r*  r   r   r   r,  
  s    

z"MultiRNNCell.non_trainable_weightsc           
   	   C   s   d}|}g }t | jD ]\}}tjjd|  | jrhtj|s^t	dt
| j d| || }n"t|d|gd|jg}||j7 }|||\}}	||	 W d   q1 s0    Y  q| jrt|n
t|d}||fS )z9Run this multi-layer cell on inputs, starting from state.r   zcell_%dz'Expected state to be a tuple of length z, but received: r   Nr   )r   r  r   r!   r"   rN   r   r=   r  r$   r  r?   r  rc   r(  r+   )
rG   rS   rT   cur_state_poscur_inp
new_statesr   r  	cur_stater   r   r   r   r     s4    

*

zMultiRNNCell.call)T)r{   r   r   r   rE   r   r?   rk   rt   r*  r,  r   r   r   r   rI   r   r    s   4


	
r  c                 C   s    t j| D ]}t|j qdS )a  Check whether the input tensors are with supported dtypes.

    Default RNN cells only support floats and complex as its dtypes since the
    activation function (tanh and sigmoid) only allow those types. This function
    will throw a proper error message if the inputs is not in a supported type.

    Args:
      inputs: tensor or nested structure of tensors that are feed to RNN cell as
        input or state.

    Raises:
      ValueError: if any of the input tensor are not having dtypes of float or
        complex.
    N)r   r=   flattenr   r   )rS   tr   r   r   r   8  s    r   c                 C   s4   | d u rd S t | } | js0| js0td|  d S )NzBRNN cell only supports floating point inputs, but received dtype: )r   as_dtypeis_floating
is_complexr$   r   r   r   r   r   K  s    
r   )F)/r   
__future__r   r   r   collectionsr   tensorflow.compat.v2r!   v2r   kerasr   r   r   keras.enginer   r	   Zkeras.legacy_tf_layersr
   r   keras.utilsr   tensorflow.python.platformr   r    tensorflow.python.util.tf_exportr   r   r   r   r   r3   r@   r   rA   r   r   r   
namedtuple_LSTMStateTupler   r   r   r  r   r   r   r   r   r   <module>   sh   
	
H

 A+

i

 



 <

  J

 