a
    .SicZ                     @   sb   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 e	dgdG d	d
 d
ej
ZdS )zMomentum for TensorFlow.    )ops)math_ops)	optimizer)training_ops)	tf_exportztrain.MomentumOptimizer)v1c                       sR   e Zd ZdZd fdd	Zdd Zdd	 Zd
d Zdd Zdd Z	dd Z
  ZS )MomentumOptimizera  Optimizer that implements the Momentum algorithm.

  Computes (if `use_nesterov = False`):

  ```
  accumulation = momentum * accumulation + gradient
  variable -= learning_rate * accumulation
  ```

  Note that in the dense version of this algorithm, `accumulation` is updated
  and applied regardless of a gradient's value, whereas the sparse version (when
  the gradient is an `IndexedSlices`, typically because of `tf.gather` or an
  embedding) only updates variable slices and corresponding `accumulation` terms
  when that part of the variable was used in the forward pass.

  @compatibility(TF2)
  tf.compat.v1.train.MomentumOptimizer is compatible with eager mode and
  `tf.function`.
  When eager execution is enabled, `learning_rate`,`momentum`, can each be a
  callable that takes no arguments and returns the actual value to use. This
  can be useful for changing these values across different invocations of
  optimizer functions.

  To switch to native TF2 style, please directly use
  [`tf.keras.optimizers.SGD`]
  (https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/SGD)
  with the `momentum` argument.

  #### Structural mapping to native TF2

  Before:

  ```python
  optimizer = tf.compat.v1.train.MomentumOptimizer(
    learning_rate=learning_rate,
    momentum=momentum,
    use_nesterov=use_nesterov)
  ```

  After:

  ```python
  optimizer = tf.keras.optimizers.SGD(
    learning_rate=learning_rate,
    momentum=momentum,
    nesterov=use_nesterov)
  ```

  #### How to map arguments
  | TF1 Arg Name       | TF2 Arg Name   | Note                             |
  | ------------------ | -------------  | -------------------------------  |
  | `learning_rate`    | `learning_rate`| Be careful of setting           |
  : : : learning_rate tensor value computed from the global step.          :
  : : : In TF1 this was usually meant to imply a dynamic learning rate and :
  : : : would recompute in each step. In TF2 (eager + function) it will    :
  : : : treat it as a scalar value that only gets computed once instead of :
  : : : a symbolic placeholder to be computed each time.                   :
  | `momentum`         | `momentum`     | -                                |
  | `use_locking`      | -              | Not applicable in TF2.           |
  | `use_nesterov`     | `nesterov`     | -                                |

  #### Before & after usage example
  Before:

  ```python
  x = tf.Variable([1,2,3], dtype=tf.float32)
  grad = tf.constant([0.1, 0.2, 0.3])
  optimizer = tf.compat.v1.train.MomentumOptimizer(
    learning_rate=0.001,
    momentum=0.9,
    use_nesterov=False)
  optimizer.apply_gradients(zip([grad], [x]))
  ```

  After:

  ```python
  x = tf.Variable([1,2,3], dtype=tf.float32)
  grad = tf.constant([0.1, 0.2, 0.3])
  optimizer = tf.keras.optimizers.SGD(
    learning_rate=0.001,
    momentum=0.9,
    nesterov=False)
  optimizer.apply_gradients(zip([grad], [x]))
  ```

  @end_compatibility

  FMomentumc                    s(   t t| || || _|| _|| _dS )a  Construct a new Momentum optimizer.

    Args:
      learning_rate: A `Tensor` or a floating point value.  The learning rate.
      momentum: A `Tensor` or a floating point value.  The momentum.
      use_locking: If `True` use locks for update operations.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Momentum".
      use_nesterov: If `True` use Nesterov Momentum.
        See (Sutskever et al., 2013).
        This implementation always computes gradients at the value of the
        variable(s) passed to the optimizer. Using Nesterov Momentum makes the
        variable(s) track the values called `theta_t + mu*v_t` in the paper.
        This implementation is an approximation of the original formula, valid
        for high values of momentum. It will compute the "adjusted gradient"
        in NAG by assuming that the new gradient will be estimated by the
        current average gradient plus the product of momentum and the change
        in the average gradient.

    References:
      On the importance of initialization and momentum in deep learning:
        [Sutskever et al., 2013]
        (http://proceedings.mlr.press/v28/sutskever13.html)
        ([pdf](http://proceedings.mlr.press/v28/sutskever13.pdf))


    N)superr   __init___learning_rate	_momentum_use_nesterov)selflearning_ratemomentumuse_lockingnameuse_nesterov	__class__ _/var/www/html/django/DPS/env/lib/python3.9/site-packages/tensorflow/python/training/momentum.pyr   t   s    zMomentumOptimizer.__init__c                 C   s   |D ]}|  |d| j qd S )Nr   )_zeros_slot_name)r   var_listvr   r   r   _create_slots   s    zMomentumOptimizer._create_slotsc                 C   sL   | j }t|r| }tj|dd| _| j}t|r8| }tj|dd| _d S )Nr   )r   r   )r   callabler   convert_to_tensor_learning_rate_tensorr   _momentum_tensor)r   r   r   r   r   r   _prepare   s    zMomentumOptimizer._preparec              	   C   sF   |  |d}tj||t| j|jj|t| j|jj| j	| j
djS Nr   )r   r   )get_slotr   apply_momentumr   castr    dtype
base_dtyper!   _use_lockingr   opr   gradvarmomr   r   r   _apply_dense   s    zMomentumOptimizer._apply_densec              	   C   sH   |  |d}tj|j|jt| j|jj|t| j	|jj| j
| jdS r#   )r$   r   resource_apply_momentumhandler   r&   r    r'   r(   r!   r)   r   r+   r   r   r   _resource_apply_dense   s    z'MomentumOptimizer._resource_apply_densec              
   C   sL   |  |d}tj||t| j|jj|j|j	t| j
|jj| j| jdjS r#   )r$   r   sparse_apply_momentumr   r&   r    r'   r(   valuesindicesr!   r)   r   r*   r+   r   r   r   _apply_sparse   s    zMomentumOptimizer._apply_sparsec              
   C   sF   |  |d}tj|j|jt| j|j||t| j|j| j	| j
dS r#   )r$   r   resource_sparse_apply_momentumr1   r   r&   r    r'   r!   r)   r   )r   r,   r-   r5   r.   r   r   r   _resource_apply_sparse   s    z(MomentumOptimizer._resource_apply_sparse)Fr	   F)__name__
__module____qualname____doc__r   r   r"   r/   r2   r6   r8   __classcell__r   r   r   r   r      s   [ "


r   N)r<   tensorflow.python.frameworkr   tensorflow.python.opsr   tensorflow.python.trainingr   r    tensorflow.python.util.tf_exportr   	Optimizerr   r   r   r   r   <module>   s   
