a
    /Sic$                    @   s  d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddl	m
Z
 dd	l	mZ dd
l	mZ ddl	mZ ddl	mZ ddl	mZ ddl	mZ ddl	mZ ddlmZ ddlmZ ddlmZ ddlmZ ejZdd Zdd Zdd Zdd Zdd Zd>dd Zd!d" Z e!d#d$ed%gd&ej"d?d'd(Z#e!d#d)ed*gd&ej"d@d+d,Z$dAd-d.Z%ed/gd&ej"dBd0d1Z&e!d#d2ed3gd&ej"dCd4d5Z'e!d#d6ed7gd&ej"dDd8d9Z(e!d#d:ed;gd&ej"dEd<d=Z)d#S )Fz"RNN helpers for TensorFlow models.    )context)constant_op)dtypes)ops)tensor_shape)tensor_util)	array_ops)control_flow_ops)control_flow_util)control_flow_util_v2)math_ops)rnn_cell_impl)tensor_array_ops)variable_scope)deprecation)dispatch)nest)	tf_exportc              	   C   s   |   }|jdur |jdk r | S t| }t| tjddgtd|fdd}|t	|j
d j|j
d jg|dd  |S )a  Transposes the batch and time dimensions of a Tensor.

  If the input tensor has rank < 2 it returns the original tensor. Retains as
  much of the static shape information as possible.

  Args:
    x: A Tensor.

  Returns:
    x transposed along the first two dimensions.
  N      r   axis)	get_shaperankr   	transposeconcatr   range	set_shaper   TensorShapedimsvalueconcatenate)xZx_static_shapex_rankx_t r%   U/var/www/html/django/DPS/env/lib/python3.9/site-packages/tensorflow/python/ops/rnn.py_transpose_batch_time(   s"    



r'   c                 C   sj   | D ]R}|j }|jdu rq|jdk r:td| d|j |jd j}|dur|  S qt | d d S )a  Get static input batch size if available, with fallback to the dynamic one.

  Args:
    flat_input: An iterable of time major input Tensors of shape `[max_time,
      batch_size, ...]`. All inputs should have compatible batch sizes.

  Returns:
    The batch size in Python integer if available, or a scalar Tensor otherwise.

  Raises:
    ValueError: if there is any input with an invalid shape.
  Nr   z3Input tensor should have rank >= 2. Received input=z	 of rank r   r   )shaper   
ValueErrorr   r    r   )
flat_inputinput_r(   
batch_sizer%   r%   r&   _best_effort_input_batch_sizeB   s    


r-   c                    s~   | dur| S t |rtdd t |D   s>td| dt fdd D }|sltd| d	   d
 S |jS dS )aE  Infer the dtype of an RNN state.

  Args:
    explicit_dtype: explicitly declared dtype or None.
    state: RNN's hidden state. Must be a Tensor or a nested iterable containing
      Tensors.

  Returns:
    dtype: inferred dtype of hidden state.

  Raises:
    ValueError: if `state` has heterogeneous dtypes or is empty.
  Nc                 S   s   g | ]
}|j qS r%   dtype).0elementr%   r%   r&   
<listcomp>n       z&_infer_state_dtype.<locals>.<listcomp>z*Unable to infer dtype from argument state=.c                 3   s   | ]}| d  kV  qdS )r   Nr%   )r0   r"   Zinferred_dtypesr%   r&   	<genexpr>q   r3   z%_infer_state_dtype.<locals>.<genexpr>zArgument state=zk has tensors of different inferred dtypes. Unable to infer a single representative dtype. Dtypes received: r   )r   	is_nestedflattenr)   allr/   )Zexplicit_dtypestateZall_samer%   r5   r&   _infer_state_dtype]   s    
r;   c                 C   s$   t | tjrtt| S | S d S N)
isinstancer   Tensorr   as_shaper   constant_value)r(   r%   r%   r&   _maybe_tensor_shape_from_tensor|   s    rA   c                  C   s@   t  rdS t } |  }t|du}t| }| o>| S )zHReturns True if a default caching device should be set, otherwise False.FN)	r   executing_eagerlyr   get_default_graph_get_control_flow_contextr
   GetContainingWhileContextr   in_while_loop_defun)graphctxtZin_v1_while_loopZin_v2_while_loopr%   r%   r&   _should_cache   s    
rI   Fc	                    s  t t 	|kfdd  fdd	fdd}	|r \}
}t 	|
 t | t |}t |
}
|
|}n fdd}t|k||	}t|tt krtd	t d
t dt| d|dt }|td }t|D ]\}}||	  qt|D ]&\}}t
|tjsB||	  qBt j	|d}t j|d}||fS )a  Calculate one step of a dynamic RNN minibatch.

  Returns an (output, state) pair conditioned on `sequence_length`.
  When skip_conditionals=False, the pseudocode is something like:

  if t >= max_sequence_length:
    return (zero_output, state)
  if t < min_sequence_length:
    return call_cell()

  # Selectively output zeros or output, old state or new state depending
  # on whether we've finished calculating each row.
  new_output, new_state = call_cell()
  final_output = np.vstack([
    zero_output if time >= sequence_length[r] else new_output_r
    for r, new_output_r in enumerate(new_output)
  ])
  final_state = np.vstack([
    state[r] if time >= sequence_length[r] else new_state_r
    for r, new_state_r in enumerate(new_state)
  ])
  return (final_output, final_state)

  Args:
    time: int32 `Tensor` scalar.
    sequence_length: int32 `Tensor` vector of size [batch_size].
    min_sequence_length: int32 `Tensor` scalar, min of sequence_length.
    max_sequence_length: int32 `Tensor` scalar, max of sequence_length.
    zero_output: `Tensor` vector of shape [output_size].
    state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
      or a list/tuple of such tensors.
    call_cell: lambda returning tuple of (new_output, new_state) where
      new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
      new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
    state_size: The `cell.state_size` associated with the state.
    skip_conditionals: Python bool, whether to skip using the conditional
      calculations.  This is useful for `dynamic_rnn`, where the input tensor
      matches `max_sequence_length`, and using conditionals just slows
      everything down.

  Returns:
    A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
      final_output is a `Tensor` matrix of shape [batch_size, output_size]
      final_state is either a single `Tensor` matrix, or a tuple of such
        matrices (matching length and shapes of input `state`).

  Raises:
    ValueError: If the cell returns a state tuple whose length does not match
      that returned by `state_size`.
  c                    s\   t | tjr|S | jjdkr |S t| t | |W  d    S 1 sN0    Y  d S Nr   	r=   r   TensorArrayr(   r   r   colocate_withr   where)output
new_output)	copy_condr%   r&   _copy_one_through   s    z$_rnn_step.<locals>._copy_one_throughc                    s8    fddt | D }  fddt |D }| | S )Nc                    s   g | ]\}} ||qS r%   r%   )r0   zero_outputrP   rR   r%   r&   r2      s   z9_rnn_step.<locals>._copy_some_through.<locals>.<listcomp>c                    s   g | ]\}} ||qS r%   r%   )r0   r:   	new_staterT   r%   r&   r2      s   )zipflat_new_outputflat_new_state)rR   
flat_stateflat_zero_outputr%   r&   _copy_some_through   s    

z%_rnn_step.<locals>._copy_some_throughc                     s^    \} }t |  t | t |t |  tk  fdd fddS )z9Run RNN step.  Pass through either no or some past state.c                      s     S r<   r%   r%   rW   r%   r&   <lambda>   r3   z=_rnn_step.<locals>._maybe_copy_some_through.<locals>.<lambda>c                      s
    S r<   r%   r%   )r\   rX   rY   r%   r&   r]      r3   )r   assert_same_structurer8   r	   cond)rP   rU   )r\   	call_cellmin_sequence_lengthr:   timerS   rW   r&   _maybe_copy_some_through   s    


z+_rnn_step.<locals>._maybe_copy_some_throughc                      s     S r<   r%   r%   )rZ   r[   r%   r&   r]     r3   z_rnn_step.<locals>.<lambda>zYInternal error: state and output were not concatenated correctly. Received state length: z, output length: z . Expected contatenated length: r4   N	structureflat_sequence)r   r8   r^   r	   r_   lenr)   rV   r   r   r=   r   rL   pack_sequence_as)rb   sequence_lengthra   max_sequence_lengthrS   r:   r`   
state_sizeskip_conditionalsrc   rP   rU   Zfinal_output_and_stateZempty_updateZfinal_outputfinal_staterO   flat_outputsubstateZflat_substater%   )
rR   r\   r`   rQ   rZ   r[   ra   r:   rb   rS   r&   	_rnn_step   sR    =






rp   c                 C   s   |du rt t| S tdd | D }dd tt| D }t| D ]}tj|d  j	d}|D ]}|
|  || qbt|}t||dd}t|}	t|	|D ]\}
}|
| ||
 qqDd	d t| |D }|S )
aw  Reverse a list of Tensors up to specified lengths.

  Args:
    input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
      or nested tuples of tensors.
    lengths:   A `Tensor` of dimension batch_size, containing lengths for each
      sequence in the batch. If "None" is specified, simply reverses the list.

  Returns:
    time-reversed sequence
  Nc                 s   s   | ]}t |V  qd S r<   )r   r8   r0   r+   r%   r%   r&   r6   :  r3   z_reverse_seq.<locals>.<genexpr>c                 S   s   g | ]}g qS r%   r%   r0   _r%   r%   r&   r2   <  r3   z _reverse_seq.<locals>.<listcomp>r   )r   r   c                 S   s   g | ]\}}t j||d qS )rd   )r   rh   )r0   r+   flat_resultr%   r%   r&   r2   N  s   )listreversedtupler   rg   rV   r   unknown_shaper   r   assert_is_compatible_withr   r   stackreverse_sequenceunstackappend)Z	input_seqlengthsZflat_input_seqflat_resultssequenceinput_shaper+   Zs_joinedZ
s_reversedresultrrt   resultsr%   r%   r&   _reverse_seq+  s&    


r   Nz`Please use `keras.layers.Bidirectional(keras.layers.RNN(cell))`, which is equivalent to this APIznn.bidirectional_dynamic_rnn)v1c                    s<  t d|  t d| t|
p"d td.}t| ||||||	|d	\}}W d   n1 sf0    Y  |	s~ddnddd	d
  tdL} fdd}t||}t|||||||	|d	\}}W d   n1 s0    Y  W d   n1 s
0    Y   |d}||f}||f}||fS )a  Creates a dynamic version of bidirectional recurrent neural network.

  Takes input and builds independent forward and backward RNNs. The input_size
  of forward and backward cell must match. The initial state for both directions
  is zero by default (but can be set optionally) and no intermediate states are
  ever returned -- the network is fully unrolled for the given (passed in)
  length(s) of the sequence(s) or completely unrolled if length(s) is not
  given.

  Args:
    cell_fw: An instance of RNNCell, to be used for forward direction.
    cell_bw: An instance of RNNCell, to be used for backward direction.
    inputs: The RNN inputs.
      If time_major == False (default), this must be a tensor of shape:
        `[batch_size, max_time, ...]`, or a nested tuple of such elements.
      If time_major == True, this must be a tensor of shape: `[max_time,
        batch_size, ...]`, or a nested tuple of such elements.
    sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
      containing the actual lengths for each of the sequences in the batch. If
      not provided, all batch entries are assumed to be full sequences; and time
      reversal is applied from time `0` to `max_time` for each sequence.
    initial_state_fw: (optional) An initial state for the forward RNN. This must
      be a tensor of appropriate type and shape `[batch_size,
      cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a
      tuple of tensors having shapes `[batch_size, s] for s in
      cell_fw.state_size`.
    initial_state_bw: (optional) Same as for `initial_state_fw`, but using the
      corresponding properties of `cell_bw`.
    dtype: (optional) The data type for the initial states and expected output.
      Required if initial_states are not provided or RNN states have a
      heterogeneous dtype.
    parallel_iterations: (Default: 32).  The number of iterations to run in
      parallel.  Those operations which do not have any temporal dependency and
      can be run in parallel, will be.  This parameter trades off time for
      space.  Values >> 1 use more memory but take less time, while smaller
      values use less memory but computations take longer.
    swap_memory: Transparently swap the tensors produced in forward inference
      but needed for back prop from GPU to CPU.  This allows training RNNs which
      would typically not fit on a single GPU, with very minimal (or no)
      performance penalty.
    time_major: The shape format of the `inputs` and `outputs` Tensors. If true,
      these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false,
      these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using
      `time_major = True` is a bit more efficient because it avoids transposes
      at the beginning and end of the RNN calculation.  However, most TensorFlow
      data is batch-major, so by default this function accepts input and emits
      output in batch-major form.
    scope: VariableScope for the created subgraph; defaults to
      "bidirectional_rnn"

  Returns:
    A tuple (outputs, output_states) where:
      outputs: A tuple (output_fw, output_bw) containing the forward and
        the backward rnn output `Tensor`.
        If time_major == False (default),
          output_fw will be a `Tensor` shaped:
          `[batch_size, max_time, cell_fw.output_size]`
          and output_bw will be a `Tensor` shaped:
          `[batch_size, max_time, cell_bw.output_size]`.
        If time_major == True,
          output_fw will be a `Tensor` shaped:
          `[max_time, batch_size, cell_fw.output_size]`
          and output_bw will be a `Tensor` shaped:
          `[max_time, batch_size, cell_bw.output_size]`.
        It returns a tuple instead of a single concatenated `Tensor`, unlike
        in the `bidirectional_rnn`. If the concatenated one is preferred,
        the forward and backward outputs can be concatenated as
        `tf.concat(outputs, 2)`.
      output_states: A tuple (output_state_fw, output_state_bw) containing
        the forward and the backward final states of bidirectional rnn.

  Raises:
    TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
  cell_fwcell_bwbidirectional_rnnfw)	cellinputsri   initial_stater/   parallel_iterationsswap_memory
time_majorscopeNr   r   c                 S   s.   |d urt j| |||dS t j| |gdS d S )N)inputseq_lengthsseq_axis
batch_axisr   )r   r{   reverse)r+   r   r   r   r%   r%   r&   _reverse  s    z+bidirectional_dynamic_rnn.<locals>._reversebwc                    s    | dS )Nr   r   r   r%   )inpr   r   ri   Z	time_axisr%   r&   _map_reverse  s    z/bidirectional_dynamic_rnn.<locals>._map_reverser   )r   assert_like_rnncellvsr   dynamic_rnnr   map_structure)r   r   r   ri   initial_state_fwinitial_state_bwr/   r   r   r   r   fw_scope	output_fwoutput_state_fwbw_scoper   Zinputs_reversetmpoutput_state_bw	output_bwoutputsZoutput_statesr%   r   r&   bidirectional_dynamic_rnnU  sX    Z(
Hr   zDPlease use `keras.layers.RNN(cell)`, which is equivalent to this APIznn.dynamic_rnnc	              
   C   s  t d|  t|pd}	t r<|	jdu r<|	dd  t|}
|sjdd |
D }
t	dd	 |
D }
|ppd
}|durt
|tj}| jdvrtd| d|  tj|dd}t|
}|dur|}n<|stdt| dddur| jd||d}n| ||}dd }t sn|durnt|||gg tj|dd}W d   n1 sd0    Y  tj||
d}t| ||||||d\}}|stt|}||fW  d   S 1 s0    Y  dS )a1!  Creates a recurrent neural network specified by RNNCell `cell`.

  Performs fully dynamic unrolling of `inputs`.

  Example:

  ```python
  # create a BasicRNNCell
  rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)

  # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]

  # defining initial state
  initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)

  # 'state' is a tensor of shape [batch_size, cell_state_size]
  outputs, state = tf.compat.v1.nn.dynamic_rnn(rnn_cell, input_data,
                                     initial_state=initial_state,
                                     dtype=tf.float32)
  ```

  ```python
  # create 2 LSTMCells
  rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]

  # create a RNN cell composed sequentially of a number of RNNCells
  multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers)

  # 'outputs' is a tensor of shape [batch_size, max_time, 256]
  # 'state' is a N-tuple where N is the number of LSTMCells containing a
  # tf.nn.rnn_cell.LSTMStateTuple for each cell
  outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell,
                                     inputs=data,
                                     dtype=tf.float32)
  ```


  Args:
    cell: An instance of RNNCell.
    inputs: The RNN inputs.
      If `time_major == False` (default), this must be a `Tensor` of shape:
        `[batch_size, max_time, ...]`, or a nested tuple of such elements.
      If `time_major == True`, this must be a `Tensor` of shape: `[max_time,
        batch_size, ...]`, or a nested tuple of such elements. This may also be
        a (possibly nested) tuple of Tensors satisfying this property.  The
        first two dimensions must match across all the inputs, but otherwise the
        ranks and other shape components may differ. In this case, input to
        `cell` at each time-step will replicate the structure of these tuples,
        except for the time dimension (from which the time is taken). The input
        to `cell` at each time step will be a `Tensor` or (possibly nested)
        tuple of Tensors each with dimensions `[batch_size, ...]`.
    sequence_length: (optional) An int32/int64 vector sized `[batch_size]`. Used
      to copy-through state and zero-out outputs when past a batch element's
      sequence length.  This parameter enables users to extract the last valid
      state and properly padded outputs, so it is provided for correctness.
    initial_state: (optional) An initial state for the RNN. If `cell.state_size`
      is an integer, this must be a `Tensor` of appropriate type and shape
      `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this
      should be a tuple of tensors having shapes `[batch_size, s] for s in
      cell.state_size`.
    dtype: (optional) The data type for the initial state and expected output.
      Required if initial_state is not provided or RNN state has a heterogeneous
      dtype.
    parallel_iterations: (Default: 32).  The number of iterations to run in
      parallel.  Those operations which do not have any temporal dependency and
      can be run in parallel, will be.  This parameter trades off time for
      space.  Values >> 1 use more memory but take less time, while smaller
      values use less memory but computations take longer.
    swap_memory: Transparently swap the tensors produced in forward inference
      but needed for back prop from GPU to CPU.  This allows training RNNs which
      would typically not fit on a single GPU, with very minimal (or no)
      performance penalty.
    time_major: The shape format of the `inputs` and `outputs` Tensors. If true,
      these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false,
      these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using
      `time_major = True` is a bit more efficient because it avoids transposes
      at the beginning and end of the RNN calculation.  However, most TensorFlow
      data is batch-major, so by default this function accepts input and emits
      output in batch-major form.
    scope: VariableScope for the created subgraph; defaults to "rnn".

  Returns:
    A pair (outputs, state) where:

    outputs: The RNN output `Tensor`.

      If time_major == False (default), this will be a `Tensor` shaped:
        `[batch_size, max_time, cell.output_size]`.

      If time_major == True, this will be a `Tensor` shaped:
        `[max_time, batch_size, cell.output_size]`.

      Note, if `cell.output_size` is a (possibly nested) tuple of integers
      or `TensorShape` objects, then `outputs` will be a tuple having the
      same structure as `cell.output_size`, containing Tensors having shapes
      corresponding to the shape data in `cell.output_size`.

    state: The final state.  If `cell.state_size` is an int, this
      will be shaped `[batch_size, cell.state_size]`.  If it is a
      `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
      If it is a (possibly nested) tuple of ints or `TensorShape`, this will
      be a tuple having the corresponding shapes. If cells are `LSTMCells`
      `state` will be a tuple containing a `LSTMStateTuple` for each cell.

  Raises:
    TypeError: If `cell` is not an instance of RNNCell.
    ValueError: If inputs is None or an empty list.

  @compatibility(TF2)
  `tf.compat.v1.nn.dynamic_rnn` is not compatible with eager execution and
  `tf.function`. Please use `tf.keras.layers.RNN` instead for TF2 migration.
  Take LSTM as an example, you can instantiate a `tf.keras.layers.RNN` layer
  with `tf.keras.layers.LSTMCell`, or directly via `tf.keras.layers.LSTM`. Once
  the keras layer is created, you can get the output and states by calling
  the layer with input and states. Please refer to [this
  guide](https://www.tensorflow.org/guide/keras/rnn) for more details about
  Keras RNN. You can also find more details about the difference and comparison
  between Keras RNN and TF compat v1 rnn in [this
  document](https://github.com/tensorflow/community/blob/master/rfcs/20180920-unify-rnn-interface.md)

  #### Structural Mapping to Native TF2

  Before:

  ```python
  # create 2 LSTMCells
  rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]

  # create a RNN cell composed sequentially of a number of RNNCells
  multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers)

  # 'outputs' is a tensor of shape [batch_size, max_time, 256]
  # 'state' is a N-tuple where N is the number of LSTMCells containing a
  # tf.nn.rnn_cell.LSTMStateTuple for each cell
  outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell,
                                               inputs=data,
                                               dtype=tf.float32)
  ```

  After:

  ```python
  # RNN layer can take a list of cells, which will then stack them together.
  # By default, keras RNN will only return the last timestep output and will not
  # return states. If you need whole time sequence output as well as the states,
  # you can set `return_sequences` and `return_state` to True.
  rnn_layer = tf.keras.layers.RNN([tf.keras.layers.LSTMCell(128),
                                   tf.keras.layers.LSTMCell(256)],
                                  return_sequences=True,
                                  return_state=True)
  outputs, output_states = rnn_layer(inputs, states)
  ```

  #### How to Map Arguments

  | TF1 Arg Name          | TF2 Arg Name    | Note                             |
  | :-------------------- | :-------------- | :------------------------------- |
  | `cell`                | `cell`          | In the RNN layer constructor     |
  | `inputs`              | `inputs`        | In the RNN layer `__call__`      |
  | `sequence_length`     | Not used        | Adding masking layer before RNN  :
  :                       :                 : to achieve the same result.      :
  | `initial_state`       | `initial_state` | In the RNN layer `__call__`      |
  | `dtype`               | `dtype`         | In the RNN layer constructor     |
  | `parallel_iterations` | Not supported   |                                  |
  | `swap_memory`         | Not supported   |                                  |
  | `time_major`          | `time_major`    | In the RNN layer constructor     |
  | `scope`               | Not supported   |                                  |
  @end_compatibility
  r   rnnNc                 S   s   | j S r<   deviceopr%   r%   r&   r]     r3   zdynamic_rnn.<locals>.<lambda>c                 S   s   g | ]}t |qS r%   r   convert_to_tensorrq   r%   r%   r&   r2     r3   zdynamic_rnn.<locals>.<listcomp>c                 s   s   | ]}t |V  qd S r<   )r'   rq   r%   r%   r&   r6     r3   zdynamic_rnn.<locals>.<genexpr>    Nr   zYArgument sequence_length must be a vector of length batch_size. Received sequence_length=z of shape: ri   nameCIf no initial_state is provided, argument `dtype` must be specifiedget_initial_stater   r,   r/   c                 S   s<   t | }t |}ttt||d| j |d|gS )Nz Expected shape for Tensor %s is z but saw shape: )	r   r(   rz   r	   Assertr   
reduce_allequalr   )r"   r(   x_shapeZpacked_shaper%   r%   r&   _assert_has_shape  s    


z&dynamic_rnn.<locals>._assert_has_shapeZCheckSeqLenrd   )r   r   ri   r/   )r   r   r   r   rI   caching_deviceset_caching_devicer   r8   rw   r   castr   int32r   r   r)   r   identityr-   getattrr   
zero_stater   rB   r   control_dependenciesrh   _dynamic_rnn_loopr   r'   )r   r   ri   r   r/   r   r   r   r   varscoper*   r,   r:   r   r   rm   r%   r%   r&   r     sp     8

	&
r   c              
      s&  |t |tsJ djt	}tj}t|d }	|	d t|t	dd |D 

d 
 dd \}
t
D ]\}}|dd  std||  d|jd j}|jd	 j}|
|krtd
| d||  d|krtd| d||  dqfddt	fdd|D }tjj|ddurbttntjdtjdd}td}|W d   n1 s0    Y  fdd t  rt	 fddt|D }t	 fddt|D t	dd t|D n"t	fddtt|D }|	
f
dd}rvttd	nt j!fdd|||f||d\}}}rt	dd |D }t||D ]&\}}t"|
g|d d!}|#| qn|}tjj|d}st$jd"d |}||fS )#a{  Internal implementation of Dynamic RNN.

  Args:
    cell: An instance of RNNCell.
    inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
      tuple of such elements.
    initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
      `cell.state_size` is a tuple, then this should be a tuple of tensors
      having shapes `[batch_size, s] for s in cell.state_size`.
    parallel_iterations: Positive Python int.
    swap_memory: A Python boolean
    sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
    dtype: (optional) Expected dtype of output. If not specified, inferred from
      initial_state.

  Returns:
    Tuple `(final_outputs, final_state)`.
    final_outputs:
      A `Tensor` of shape `[time, batch_size, cell.output_size]`.  If
      `cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
      objects, then this returns a (possibly nested) tuple of Tensors matching
      the corresponding shapes.
    final_state:
      A `Tensor`, or possibly nested tuple of Tensors, matching in length
      and shapes to `initial_state`.

  Raises:
    ValueError: If the input depth cannot be inferred via shape inference
      from the inputs.
    ValueError: If time_step is not the same for all the elements in the
      inputs.
    ValueError: If batch_size is not the same for all the elements in the
      inputs.
  zparallel_iterations must be intr   c                 s   s   | ]}|  d V  qdS )   N)r   with_rank_at_leastrq   r%   r%   r&   r6   3  s   z$_dynamic_rnn_loop.<locals>.<genexpr>Nr   zbInput size (depth of inputs) must be accessible via shape inference, but saw value None for input=r4   r   z]Time steps is not the same for all the elements in the input in a batch. Received time steps=z for input=zRBatch_size is not the same for all the elements in the input. Received batch size=c                    s"   t  | } tt| tS r<   )_concatr   zerosrz   r;   )size)r,   r/   r:   r%   r&   _create_zero_arraysJ  s    
z._dynamic_rnn_loop.<locals>._create_zero_arraysc                 3   s   | ]} |V  qd S r<   r%   r0   rO   )r   r%   r&   r6   O  s   rd   rb   )r/   r   r   c                    s   t j|| |  dS )N)r/   r   element_shapetensor_array_name)r   rL   )r   r   r/   )	base_name
time_stepsr%   r&   
_create_ta_  s    z%_dynamic_rnn_loop.<locals>._create_tac                 3   s<   | ]4\}} d | t gt|tdV  qdS )z	output_%dr   r/   N)r   r   r!   rA   r;   )r0   iout_size)r   const_batch_sizer/   r:   r%   r&   r6   h  s   c                 3   s0   | ](\}} d | |j dd |jdV  qdS )zinput_%dr   Nr   )r(   r/   )r0   r   Zflat_input_i)r   r%   r&   r6   p  s   c                 s   s   | ]\}}| |V  qd S r<   )r|   )r0   tar+   r%   r%   r&   r6   v  s   c                 3   s$   | ]}d d t   D V  qdS )c                 S   s   g | ]}d qS )r   r%   rr   r%   r%   r&   r2   y  r3   z/_dynamic_rnn_loop.<locals>.<genexpr>.<listcomp>N)r   numpy)r0   r   )r   r%   r&   r6   y  s   c           
         s  rBt fddD  t D ]\}}||dd  q$nt fddD  tj d  fdd}
durt
	|d	d
	\}}n
| \}}t|}rt fddt||D }n t||D ]\}}	|	| < qވd ||fS )a4  Take a time step of the dynamic RNN.

    Args:
      time: int32 scalar Tensor.
      output_ta_t: List of `TensorArray`s that represent the output.
      state: nested tuple of vector tensors that represent the state.

    Returns:
      The tuple (time + 1, output_ta_t with updated flow, new_state).
    c                 3   s   | ]}|  V  qd S r<   )readr0   r   rb   r%   r&   r6     r3   z8_dynamic_rnn_loop.<locals>._time_step.<locals>.<genexpr>r   Nc                 3   s   | ]}|    V  qd S r<   )r   r   r   r%   r&   r6     r3   rd   c                      s
    S r<   r%   r%   )r   input_tr:   r%   r&   r]     r3   z7_dynamic_rnn_loop.<locals>._time_step.<locals>.<lambda>T)	rb   ri   ra   rj   rS   r:   r`   rk   rl   c                 3   s   | ]\}}|  |V  qd S r<   write)r0   r   outr   r%   r&   r6     s   )rw   rV   r   r   rh   rp   r8   r   )
rb   output_ta_tr:   r+   r(   r`   rO   rU   r   r   )
r   in_graph_modeinput_tar   inputs_got_shaperj   ra   ri   rk   rS   )r   r:   rb   r&   
_time_step}  s8    


z%_dynamic_rnn_loop.<locals>._time_stepc                    s   |  k S r<   r%   )rb   rs   )
loop_boundr%   r&   r]     r3   z#_dynamic_rnn_loop.<locals>.<lambda>)r_   body	loop_varsr   maximum_iterationsr   c                 s   s   | ]}|  V  qd S r<   )rz   r   r%   r%   r&   r6     r3   Tstaticc                 S   s   t j| ddS )Nr   r   )r   rz   )r"   r%   r%   r&   r]     r3   )%r=   intrk   r   r8   output_sizer   r(   r-   rw   as_list	enumerateis_fully_definedr)   r   r    rh   r   
reduce_min
reduce_maxconstantr   r   r   
name_scoper   rB   rV   r   rg   minimummaximumr	   
while_loopr   r   map_structure_up_to)r   r   r   r   r   ri   r/   r*   flat_output_sizer   Zconst_time_stepsr   r(   Zgot_time_stepsZgot_batch_sizer[   rb   r   	output_tar   rs   Zoutput_final_tarm   final_outputsrO   r   r%   )r   r   r   r,   r   r   r/   r   r   r   r   r   rj   ra   ri   r:   rk   r   rS   r&   r     s    )


$


2

	r   z
nn.raw_rnnc              
      sr  t d ts$td d|p*d}t|p6d }t r\|jdu r\|dd  t	j
d	tjd
}|ddd\}}}	}
}t|}|dur|nt	j
d	tjd
}dd |D }t|d	 d	}|D ]}|t|d	 qt|   du rt|d	 d	  t|	j |	}t|}dd |D }tj||d}|
durxt|
}dd |D }dd |D }n$j}
t|
}|d	 jgt| }fddtt||D }tj|
|d} fddt||D }tj|
|ddd }fdd}tj||||||||g||d}|dd \}}}|du rDd}|||fW  d   S 1 sd0    Y  dS )a!  Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.

  **NOTE: This method is still in testing, and the API may change.**

  This function is a more primitive version of `dynamic_rnn` that provides
  more direct access to the inputs each iteration.  It also provides more
  control over when to start and finish reading the sequence, and
  what to emit for the output.

  For example, it can be used to implement the dynamic decoder of a seq2seq
  model.

  Instead of working with `Tensor` objects, most operations work with
  `TensorArray` objects directly.

  The operation of `raw_rnn`, in pseudo-code, is basically the following:

  ```python
  time = tf.constant(0, dtype=tf.int32)
  (finished, next_input, initial_state, emit_structure, loop_state) = loop_fn(
      time=time, cell_output=None, cell_state=None, loop_state=None)
  emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
  state = initial_state
  while not all(finished):
    (output, cell_state) = cell(next_input, state)
    (next_finished, next_input, next_state, emit, loop_state) = loop_fn(
        time=time + 1, cell_output=output, cell_state=cell_state,
        loop_state=loop_state)
    # Emit zeros and copy forward state for minibatch entries that are finished.
    state = tf.where(finished, state, next_state)
    emit = tf.where(finished, tf.zeros_like(emit_structure), emit)
    emit_ta = emit_ta.write(time, emit)
    # If any new minibatch entries are marked as finished, mark these.
    finished = tf.logical_or(finished, next_finished)
    time += 1
  return (emit_ta, state, loop_state)
  ```

  with the additional properties that output and state may be (possibly nested)
  tuples, as determined by `cell.output_size` and `cell.state_size`, and
  as a result the final `state` and `emit_ta` may themselves be tuples.

  A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:

  ```python
  inputs = tf.compat.v1.placeholder(shape=(max_time, batch_size, input_depth),
                          dtype=tf.float32)
  sequence_length = tf.compat.v1.placeholder(shape=(batch_size,),
  dtype=tf.int32)
  inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
  inputs_ta = inputs_ta.unstack(inputs)

  cell = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units)

  def loop_fn(time, cell_output, cell_state, loop_state):
    emit_output = cell_output  # == None for time == 0
    if cell_output is None:  # time == 0
      next_cell_state = cell.zero_state(batch_size, tf.float32)
    else:
      next_cell_state = cell_state
    elements_finished = (time >= sequence_length)
    finished = tf.reduce_all(elements_finished)
    next_input = tf.cond(
        finished,
        lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
        lambda: inputs_ta.read(time))
    next_loop_state = None
    return (elements_finished, next_input, next_cell_state,
            emit_output, next_loop_state)

  outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
  outputs = outputs_ta.stack()
  ```

  Args:
    cell: An instance of RNNCell.
    loop_fn: A callable that takes inputs `(time, cell_output, cell_state,
      loop_state)` and returns the tuple `(finished, next_input,
      next_cell_state, emit_output, next_loop_state)`. Here `time` is an int32
      scalar `Tensor`, `cell_output` is a `Tensor` or (possibly nested) tuple of
      tensors as determined by `cell.output_size`, and `cell_state` is a
      `Tensor` or (possibly nested) tuple of tensors, as determined by the
      `loop_fn` on its first call (and should match `cell.state_size`).
      The outputs are: `finished`, a boolean `Tensor` of
      shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
      `next_cell_state`: the next state to feed to `cell`,
      and `emit_output`: the output to store for this iteration.  Note that
        `emit_output` should be a `Tensor` or (possibly nested) tuple of tensors
        which is aggregated in the `emit_ta` inside the `while_loop`. For the
        first call to `loop_fn`, the `emit_output` corresponds to the
        `emit_structure` which is then used to determine the size of the
        `zero_tensor` for the `emit_ta` (defaults to `cell.output_size`). For
        the subsequent calls to the `loop_fn`, the `emit_output` corresponds to
        the actual output tensor that is to be aggregated in the `emit_ta`. The
        parameter `cell_state` and output `next_cell_state` may be either a
        single or (possibly nested) tuple of tensors.  The parameter
        `loop_state` and output `next_loop_state` may be either a single or
        (possibly nested) tuple of `Tensor` and `TensorArray` objects.  This
        last parameter may be ignored by `loop_fn` and the return value may be
        `None`.  If it is not `None`, then the `loop_state` will be propagated
        through the RNN loop, for use purely by `loop_fn` to keep track of its
        own state. The `next_loop_state` parameter returned may be `None`.  The
        first call to `loop_fn` will be `time = 0`, `cell_output = None`,
      `cell_state = None`, and `loop_state = None`.  For this call: The
        `next_cell_state` value should be the value with which to initialize the
        cell's state.  It may be a final state from a previous RNN or it may be
        the output of `cell.zero_state()`.  It should be a (possibly nested)
        tuple structure of tensors. If `cell.state_size` is an integer, this
        must be a `Tensor` of appropriate type and shape `[batch_size,
        cell.state_size]`. If `cell.state_size` is a `TensorShape`, this must be
        a `Tensor` of appropriate type and shape `[batch_size] +
        cell.state_size`. If `cell.state_size` is a (possibly nested) tuple of
        ints or `TensorShape`, this will be a tuple having the corresponding
        shapes. The `emit_output` value may be either `None` or a (possibly
        nested) tuple structure of tensors, e.g., `(tf.zeros(shape_0,
        dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`. If this first
        `emit_output` return value is `None`, then the `emit_ta` result of
        `raw_rnn` will have the same structure and dtypes as `cell.output_size`.
        Otherwise `emit_ta` will have the same structure, shapes (prepended with
        a `batch_size` dimension), and dtypes as `emit_output`.  The actual
        values returned for `emit_output` at this initializing call are ignored.
        Note, this emit structure must be consistent across all time steps.
    parallel_iterations: (Default: 32).  The number of iterations to run in
      parallel.  Those operations which do not have any temporal dependency and
      can be run in parallel, will be.  This parameter trades off time for
      space.  Values >> 1 use more memory but take less time, while smaller
      values use less memory but computations take longer.
    swap_memory: Transparently swap the tensors produced in forward inference
      but needed for back prop from GPU to CPU.  This allows training RNNs which
      would typically not fit on a single GPU, with very minimal (or no)
      performance penalty.
    scope: VariableScope for the created subgraph; defaults to "rnn".

  Returns:
    A tuple `(emit_ta, final_state, final_loop_state)` where:

    `emit_ta`: The RNN output `TensorArray`.
       If `loop_fn` returns a (possibly nested) set of Tensors for
       `emit_output` during initialization, (inputs `time = 0`,
       `cell_output = None`, and `loop_state = None`), then `emit_ta` will
       have the same structure, dtypes, and shapes as `emit_output` instead.
       If `loop_fn` returns `emit_output = None` during this call,
       the structure of `cell.output_size` is used:
       If `cell.output_size` is a (possibly nested) tuple of integers
       or `TensorShape` objects, then `emit_ta` will be a tuple having the
       same structure as `cell.output_size`, containing TensorArrays whose
       elements' shapes correspond to the shape data in `cell.output_size`.

    `final_state`: The final cell state.  If `cell.state_size` is an int, this
      will be shaped `[batch_size, cell.state_size]`.  If it is a
      `TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
      If it is a (possibly nested) tuple of ints or `TensorShape`, this will
      be a tuple having the corresponding shapes.

    `final_loop_state`: The final loop state as returned by `loop_fn`.

  Raises:
    TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
      a `callable`.
  r   z1Argument `loop_fn` must be a callable. Received: r4   r   r   Nc                 S   s   | j S r<   r   r   r%   r%   r&   r]     r3   zraw_rnn.<locals>.<lambda>r   r.   c                 S   s   g | ]}|  qS r%   )r   rq   r%   r%   r&   r2     r3   zraw_rnn.<locals>.<listcomp>c                 S   s   g | ]}t |qS r%   r   r0   sr%   r%   r&   r2     r3   rd   c                 S   s&   g | ]}|j  r|j nt |qS r%   )r(   r   r   r0   emitr%   r%   r&   r2     s   c                 S   s   g | ]
}|j qS r%   r.   r   r%   r%   r&   r2     r3   c              	      s>   g | ]6\}\}}t j|d t gt|dd| dqS )Tr   zrnn_output_%d)r/   dynamic_sizer   r   r   )r   rL   r   r   r!   rA   )r0   r   dtype_isize_i)r   r%   r&   r2     s   	c                    s"   g | ]\}}t t ||qS r%   )r   r   r   )r0   r  r  )r,   r%   r&   r2     s   c                 W   s   t t |S r<   )r   logical_notr   )Zunused_timeelements_finishedrs   r%   r%   r&   	condition  s    zraw_rnn.<locals>.conditionc                    s   ||\}}t || t j| d }||||\}	}
}}}t || t ||
 t || |du rx|n|} fdd}||}|||}t fdd||}t |	 | |
|||fS )a  Internal while loop body for raw_rnn.

      Args:
        time: time scalar.
        elements_finished: batch-size vector.
        current_input: possibly nested tuple of input tensors.
        emit_ta: possibly nested tuple of output TensorArrays.
        state: possibly nested tuple of state tensors.
        loop_state: possibly nested tuple of loop state tensors.

      Returns:
        Tuple having the same size as Args but with updated values.
      r   Nc                    s    fdd}t || |S )z.Copy some tensors through via array_ops.where.c                    s\   t | tjr|S | jjdkr |S t| t | |W  d    S 1 sN0    Y  d S rJ   rK   )Zcur_iZcand_ir  r%   r&   copy_fn  s    zBraw_rnn.<locals>.body.<locals>._copy_some_through.<locals>.copy_fn)r   r   )current	candidater  r  r%   r&   r\     s    
z1raw_rnn.<locals>.body.<locals>._copy_some_throughc                    s   |   |S r<   r   )r   r   r   r%   r&   r]     r3   z'raw_rnn.<locals>.body.<locals>.<lambda>)r   r^   r   r   r   
logical_or)rb   r  current_inputemit_tar:   
loop_stateZnext_outputZ
cell_stateZ	next_timeZnext_finished
next_inputZ
next_stateZemit_outputZnext_loop_stater\   )r   loop_fn	zero_emit)r  rb   r&   r     s.    



zraw_rnn.<locals>.body)r   r   r   )r   r   callable	TypeErrorr   r   rI   r   r   r   r   r   r   r   r8   r   dimension_at_indexry   dimension_valuer   r(   r^   rk   rh   r   r/   rg   r   rV   r	   r   )r   r  r   r   r   r   rb   r  r  r   Zemit_structureZinit_loop_stater*   r  r   static_batch_sizeZinput_shape_ir:   rZ   Zflat_emit_structureZflat_emit_sizeZflat_emit_dtypesZflat_emit_tar  Zflat_zero_emitr  r   returnedrm   Zfinal_loop_stater%   )r,   r   r   r  r  r&   raw_rnn  s     (












;	
r  zQPlease use `keras.layers.RNN(cell, unroll=True)`, which is equivalent to this APIznn.static_rnnc                    s  t d t|s$td| |s0tdg }t|p>d}t rd|j	du rd|
dd  |}t|r||d }qh| jd	kr$| d
}	|	jd t|}
|
D ]n}| d
}	t|	d|	d	d  } t|jD ],\}}t|du rtd| d| dqqn| d	d trNtnt|d |durl|n>sztdtdddurjddn|durPtj|dd}| jdvrtd d| dfdd j}t|}t fdd|D }tj||d}t |t!j"}t#|}t$|}t|D ]j\}|dkrr|%  fdd}|durt&||||||j'd\}n
| \}|(| qX|fW  d   S 1 s0    Y  dS )a  Creates a recurrent neural network specified by RNNCell `cell`.

  The simplest form of RNN network generated is:

  ```python
    state = cell.zero_state(...)
    outputs = []
    for input_ in inputs:
      output, state = cell(input_, state)
      outputs.append(output)
    return (outputs, state)
  ```
  However, a few other options are available:

  An initial state can be provided.
  If the sequence_length vector is provided, dynamic calculation is performed.
  This method of calculation does not compute the RNN steps past the maximum
  sequence length of the minibatch (thus saving computational time),
  and properly propagates the state at an example's sequence length
  to the final state output.

  The dynamic calculation performed is, at time `t` for batch row `b`,

  ```python
    (output, state)(b, t) =
      (t >= sequence_length(b))
        ? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
        : cell(input(b, t), state(b, t - 1))
  ```

  Args:
    cell: An instance of RNNCell.
    inputs: A length T list of inputs, each a `Tensor` of shape `[batch_size,
      input_size]`, or a nested tuple of such elements.
    initial_state: (optional) An initial state for the RNN. If `cell.state_size`
      is an integer, this must be a `Tensor` of appropriate type and shape
      `[batch_size, cell.state_size]`. If `cell.state_size` is a tuple, this
      should be a tuple of tensors having shapes `[batch_size, s] for s in
      cell.state_size`.
    dtype: (optional) The data type for the initial state and expected output.
      Required if initial_state is not provided or RNN state has a heterogeneous
      dtype.
    sequence_length: Specifies the length of each sequence in inputs. An int32
      or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
    scope: VariableScope for the created subgraph; defaults to "rnn".

  Returns:
    A pair (outputs, state) where:

    - outputs is a length T list of outputs (one for each input), or a nested
      tuple of such elements.
    - state is the final state

  Raises:
    TypeError: If `cell` is not an instance of RNNCell.
    ValueError: If `inputs` is `None` or an empty list, or if the input depth
      (column size) cannot be inferred from inputs via shape inference.
  r   0Argument `inputs` must be a sequence. Received: $Argument `inputs` must not be empty.r   Nc                 S   s   | j S r<   r   r   r%   r%   r&   r]   l  r3   zstatic_rnn.<locals>.<lambda>r   r   r   zInput size (dimension z
 of input z=) must be accessible via shape inference, but saw value None.r   r   r   ri   r   r   z6Argument `sequence_length` must be a vector of length z. Received sequence_length=r4   c                    sJ   t  | }tt|t}t t| dd}|t| |S )NTr   )	r   r   r   rz   r;   r   r  r   r   )r   r   rO   r(   )r,   r/   fixed_batch_sizer:   r%   r&   _create_zero_output  s    
z'static_rnn.<locals>._create_zero_outputc                 3   s   | ]} |V  qd S r<   r%   )r0   r   )r  r%   r&   r6     s   zstatic_rnn.<locals>.<genexpr>rd   c                      s
    S r<   r%   r%   )r   r+   r:   r%   r&   r]     r3   )rb   ri   ra   rj   rS   r:   r`   rk   ))r   r   r   r7   r  r)   r   r   rI   r   r   r   r   r   r   r8   r   r  ry   r   r  r   r(   r   r   r   r   r   r   rw   rh   r   r   r   r   r   r   reuse_variablesrp   rk   r}   )r   r   r   r/   ri   r   r   r   first_inputr   flat_inputsr*   
input_sizer   r   r   r   r[   rS   ra   rj   rb   r`   rO   r%   )r  r,   r   r/   r  r+   r:   r&   
static_rnn  s    E

















r"  zSPlease use `keras.layers.RNN(cell, stateful=True)`, which is equivalent to this APIznn.static_state_saving_rnnc              
      s  | j }t|}t|}||kr8td|d|d|rt|}	t|}
t|	t|
krtd| dt|	 d| j  dt|
 d	tj| fdd	|	D d
}n
 |}t| ||||d\}}|rt|}t|} fdd	t	||D }n 
||g}t|l |d }t|}dd	 |D }tj||d
|d< |rhtj|dd	 |D d
}n
t|}W d   n1 s0    Y  ||fS )a  RNN that accepts a state saver for time-truncated RNN calculation.

  Args:
    cell: An instance of `RNNCell`.
    inputs: A length T list of inputs, each a `Tensor` of shape `[batch_size,
      input_size]`.
    state_saver: A state saver object with methods `state` and `save_state`.
    state_name: Python string or tuple of strings.  The name to use with the
      state_saver. If the cell returns tuples of states (i.e., `cell.state_size`
      is a tuple) then `state_name` should be a tuple of strings having the same
      length as `cell.state_size`.  Otherwise it should be a single string.
    sequence_length: (optional) An int32/int64 vector size [batch_size]. See the
      documentation for rnn() for more details about sequence_length.
    scope: VariableScope for the created subgraph; defaults to "rnn".

  Returns:
    A pair (outputs, state) where:
      outputs is a length T list of outputs (one for each input)
      states is the final state

  Raises:
    TypeError: If `cell` is not an instance of RNNCell.
    ValueError: If `inputs` is `None` or an empty list, or if the arity and
     type of `state_name` does not match that of `cell.state_size`.
  zYArgument `state_name` should be the same type as `cell.state_size`. Received: state_name=z, cell.state_size=r4   zfNumber of elements in argument `state_name` and `cell.state_size` are mismatched. Received state_name=z with z elements and cell.state_size=z
 elements.c                    s   g | ]}  |qS r%   )r:   r   state_saverr%   r&   r2     r3   z+static_state_saving_rnn.<locals>.<listcomp>rd   )r   ri   r   c                    s   g | ]\}}  ||qS r%   )
save_state)r0   r   ro   r#  r%   r&   r2     s   c                 S   s   g | ]}t |qS r%   r   r   r   r%   r%   r&   r2      s   c                 S   s   g | ]}t |qS r%   r'  r   r%   r%   r&   r2   )  r3   N)rk   r   r7   r)   r8   rg   rh   r:   r"  rV   r%  r   r   r   r   )r   r   r$  Z
state_nameri   r   rk   Zstate_is_tupleZstate_name_tupleZstate_name_flatZstate_size_flatr   r   r:   rZ   r%  last_outputZflat_last_outputr%   r#  r&   static_state_saving_rnn  sr    $












*r)  zmPlease use `keras.layers.Bidirectional(keras.layers.RNN(cell, unroll=True))`, which is equivalent to this APIznn.static_bidirectional_rnnc              
   C   sF  t d|  t d| t|s0td| |s<tdt|pFd td(}t| |||||d\}	}
W d   n1 s0    Y  td	2}t	||}t||||||d\}}W d   n1 s0    Y  W d   n1 s0    Y  t	||}t
|	}t
|}td
d t||D }tj|	|d}||
|fS )a  Creates a bidirectional recurrent neural network.

  Similar to the unidirectional case above (rnn) but takes input and builds
  independent forward and backward RNNs with the final forward and backward
  outputs depth-concatenated, such that the output will have the format
  [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
  forward and backward cell must match. The initial state for both directions
  is zero by default (but can be set optionally) and no intermediate states are
  ever returned -- the network is fully unrolled for the given (passed in)
  length(s) of the sequence(s) or completely unrolled if length(s) is not given.

  Args:
    cell_fw: An instance of RNNCell, to be used for forward direction.
    cell_bw: An instance of RNNCell, to be used for backward direction.
    inputs: A length T list of inputs, each a tensor of shape [batch_size,
      input_size], or a nested tuple of such elements.
    initial_state_fw: (optional) An initial state for the forward RNN. This must
      be a tensor of appropriate type and shape `[batch_size,
      cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a
      tuple of tensors having shapes `[batch_size, s] for s in
      cell_fw.state_size`.
    initial_state_bw: (optional) Same as for `initial_state_fw`, but using the
      corresponding properties of `cell_bw`.
    dtype: (optional) The data type for the initial state.  Required if either
      of the initial states are not provided.
    sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
      containing the actual lengths for each of the sequences.
    scope: VariableScope for the created subgraph; defaults to
      "bidirectional_rnn"

  Returns:
    A tuple (outputs, output_state_fw, output_state_bw) where:
      outputs is a length `T` list of outputs (one for each input), which
        are depth-concatenated forward and backward outputs.
      output_state_fw is the final state of the forward rnn.
      output_state_bw is the final state of the backward rnn.

  Raises:
    TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
    ValueError: If inputs is None or an empty list.
  r   r   r  r  r   r   )r   Nr   c                 s   s"   | ]\}}t ||gd V  qdS )r   N)r   r   )r0   r   r   r%   r%   r&   r6     s   z+static_bidirectional_rnn.<locals>.<genexpr>rd   )r   r   r   r7   r  r)   r   r   r"  r   r8   rw   rV   rh   )r   r   r   r   r   r/   ri   r   r   r   r   r   Zreversed_inputsr   r   r   Zflat_output_fwZflat_output_bwflat_outputsr   r%   r%   r&   static_bidirectional_rnn0  sH    6
(	
F


r+  )F)NNNNNFFN)NNNNFFN)NN)NFN)NNNN)NN)NNNNN)*__doc__tensorflow.python.eagerr   tensorflow.python.frameworkr   r   r   r   r   tensorflow.python.opsr   r	   r
   r   r   r   r   r   r   tensorflow.python.utilr   r   r    tensorflow.python.util.tf_exportr   r   r'   r-   r;   rA   rI   rp   r   
deprecatedadd_dispatch_supportr   r   r   r  r"  r)  r+  r%   r%   r%   r&   <module>   s    
 *

         
           
 Y
     E
     0
  ]

     