a
    .Sicf1                     @   s   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	l
mZ dd
lmZ edgdG dd dejZdS )zAdam for TensorFlow.    )context)ops)control_flow_ops)math_ops)resource_variable_ops)	state_ops)	optimizer)training_ops)	tf_exportztrain.AdamOptimizer)v1c                       sr   e Zd ZdZd fdd		Zd
d Zdd Zdd Zdd Zdd Z	dd Z
dd Zdd Zdd Zdd Z  ZS )AdamOptimizera  Optimizer that implements the Adam algorithm.

  References:
    Adam - A Method for Stochastic Optimization:
      [Kingma et al., 2015](https://arxiv.org/abs/1412.6980)
      ([pdf](https://arxiv.org/pdf/1412.6980.pdf))

  @compatibility(TF2)
  tf.compat.v1.train.AdamOptimizer is compatible with eager mode and
  `tf.function`.
  When eager execution is enabled, `learning_rate`, `beta1`, `beta2`, and
  `epsilon` can each be a callable that takes no arguments and returns the
  actual value to use. This can be useful for changing these values across
  different invocations of optimizer functions.

  To switch to native TF2 style, use [`tf.keras.optimizers.Adam`]
  (https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam)
  instead. Please notice that due to the implementation differences,
  `tf.keras.optimizers.Adam` and
  `tf.compat.v1.train.AdamOptimizer` may have slight differences in
  floating point numerics even though the formula used for the variable
  updates still matches.

  #### Structural Mapping to Native TF2

  Before:

  ```python
  optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001)
  ```

  After:

  ```python
  optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
  ```

  #### How to Map Arguments
  |TF1 Arg Name          |TF2 Arg Name |Note                  |
  |----------------------|-------------|----------------------|
  |learning_rate         |learning_rate|Be careful of setting learning_rate as a
  :                      :             : tensor value computed from the global
  :                      :             : step. In TF1 this was usually meant to
  :                      :             : imply a dynamic learning rate and would
  :                      :             : recompute in each step. In TF2 (eager +
  :                      :             : function) it will treat it as a scalar
  :                      :             : value that only gets computed once
  :                      :             : instead of a symbolic placeholder to be
  :                      :             : computed each time.                   :
  |beta1                 |beta_1        |                      |
  |beta2                 |beta_2        |                      |
  |epsilon               |epsilon      | Default value is 1e-08 in TF1, but
  :                      :             : 1e-07 in TF2.                     :
  |use_locking           |N/A          |Not applicable in TF2. |

  #### Before & After Usage Example
  Before:

  ```python
  x = tf.Variable([1,2,3], dtype=tf.float32)
  grad = tf.constant([0.1, 0.2, 0.3])
  optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001)
  optimizer.apply_gradients(zip([grad], [x]))
  ```

  After:

  ```python
  x = tf.Variable([1,2,3], dtype=tf.float32)
  grad = tf.constant([0.1, 0.2, 0.3])
  optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
  optimizer.apply_gradients(zip([grad], [x]))
  ```

  @end_compatibility
  MbP??+?:0yE>FAdamc                    sF   t t| || || _|| _|| _|| _d| _d| _d| _	d| _
dS )aX	  Construct a new Adam optimizer.

    Initialization:

    $$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
    $$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
    $$t := 0 \text{(Initialize timestep)}$$

    The update rule for `variable` with gradient `g` uses an optimization
    described at the end of section 2 of the paper:

    $$t := t + 1$$
    $$\text{lr}_t := \mathrm{learning_rate} *
      \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$

    $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
    $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
    $$\text{variable} := \text{variable} -
      \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$

    The default value of 1e-8 for epsilon might not be a good default in
    general. For example, when training an Inception network on ImageNet a
    current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
    formulation just before Section 2.1 of the Kingma and Ba paper rather than
    the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
    hat" in the paper.

    The sparse implementation of this algorithm (used when the gradient is an
    IndexedSlices object, typically because of `tf.gather` or an embedding
    lookup in the forward pass) does apply momentum to variable slices even if
    they were not used in the forward pass (meaning they have a gradient equal
    to zero). Momentum decay (beta1) is also applied to the entire momentum
    accumulator. This means that the sparse behavior is equivalent to the dense
    behavior (in contrast to some momentum implementations which ignore momentum
    unless a variable slice was actually used).

    Args:
      learning_rate: A Tensor or a floating point value.  The learning rate.
      beta1: A float value or a constant float tensor. The exponential decay
        rate for the 1st moment estimates.
      beta2: A float value or a constant float tensor. The exponential decay
        rate for the 2nd moment estimates.
      epsilon: A small constant for numerical stability. This epsilon is
        "epsilon hat" in the Kingma and Ba paper (in the formula just before
        Section 2.1), not the epsilon in Algorithm 1 of the paper.
      use_locking: If True use locks for update operations.
      name: Optional name for the operations created when applying gradients.
        Defaults to "Adam".


    N)superr   __init___lr_beta1_beta2_epsilon_lr_t_beta1_t_beta2_t
_epsilon_t)selflearning_ratebeta1beta2epsilonuse_lockingname	__class__ [/var/www/html/django/DPS/env/lib/python3.9/site-packages/tensorflow/python/training/adam.pyr   j   s    ;zAdamOptimizer.__init__c                 C   s^   t  B t rd }nt  }| jd|d| jd|dfW  d    S 1 sP0    Y  d S )Nbeta1_power)graphbeta2_power)r   
init_scoper   executing_eagerlyget_default_graph_get_non_slot_variable)r   r(   r%   r%   r&   _get_beta_accumulators   s    
z$AdamOptimizer._get_beta_accumulatorsc                 C   sb   t |dd d}| j| jd|d | j| jd|d |D ]$}| |d| j | |d| j q8d S )	Nc                 S   s   | j S Nr"   )xr%   r%   r&   <lambda>       z-AdamOptimizer._create_slots.<locals>.<lambda>)keyr'   )initial_valuer"   colocate_withr)   mv)min_create_non_slot_variabler   r   _zeros_slot_name)r   var_list	first_varr8   r%   r%   r&   _create_slots   s    zAdamOptimizer._create_slotsc                 C   st   |  | j}|  | j}|  | j}|  | j}tj|dd| _tj|dd| _tj|dd| _	tj|dd| _
d S )Nr   r0   r   r   r    )_call_if_callabler   r   r   r   r   convert_to_tensorr   r   r   r   )r   lrr   r   r    r%   r%   r&   _prepare   s    zAdamOptimizer._preparec                 C   s   |  |d}|  |d}|  \}}tj|||t||jjt||jjt| j|jjt| j	|jjt| j
|jjt| j|jj|| jdjS Nr7   r8   r!   )get_slotr.   r	   
apply_adamr   castdtype
base_dtyper   r   r   r   _use_lockingopr   gradvarr7   r8   r'   r)   r%   r%   r&   _apply_dense   s     zAdamOptimizer._apply_densec                 C   s   |  |d}|  |d}|  \}}tj|j|j|jt||jjt||jjt| j	|jjt| j
|jjt| j|jjt| j|jj|| jdS rD   )rF   r.   r	   resource_apply_adamhandler   rH   rI   rJ   r   r   r   r   rK   rM   r%   r%   r&   _resource_apply_dense   s     z#AdamOptimizer._resource_apply_densec                 C   s  |   \}}t||jj}t||jj}t| j|jj}t| j|jj}t| j|jj}	t| j|jj}
|t	d|  d|  }| 
|d}|d|  }tj||| | jd}t|g ||||}W d    n1 s0    Y  | 
|d}|| d|	  }tj|||	 | jd}t|g ||||}W d    n1 sV0    Y  t	|}tj||| ||
  | jd}tj|||g S )N   r7   rE   r8   )r.   r   rH   rI   rJ   r   r   r   r   sqrtrF   r   assignrK   r   control_dependencies
assign_subr   group)r   rN   rO   indicesscatter_addr'   r)   lr_tZbeta1_tZbeta2_tZ	epsilon_trB   r7   m_scaled_g_valuesm_tr8   v_scaled_g_valuesv_tv_sqrt
var_updater%   r%   r&   _apply_sparse_shared   s.    *,
z"AdamOptimizer._apply_sparse_sharedc                    s     |j||j fddS )Nc                    s   t j| || jdS )NrE   )r   r[   rK   )r1   ir8   r   r%   r&   r2     s
   z-AdamOptimizer._apply_sparse.<locals>.<lambda>)rc   valuesrZ   )r   rN   rO   r%   re   r&   _apply_sparse  s    
zAdamOptimizer._apply_sparsec                 C   sD   t t|j||g | W  d    S 1 s60    Y  d S r/   )r   rW   r   resource_scatter_addrR   value)r   r1   rd   r8   r%   r%   r&   _resource_scatter_add  s    z#AdamOptimizer._resource_scatter_addc                 C   s   |  |||| jS r/   )rc   rj   )r   rN   rO   rZ   r%   r%   r&   _resource_apply_sparse!  s    
z$AdamOptimizer._resource_apply_sparsec              	   C   s   t |r |  \}}t |< |j|| j | jd}|j|| j | jd}W d    n1 sd0    Y  W d    n1 s0    Y  tj	|||g d|iS )NrE   r"   )
r   rW   r.   r6   rV   r   rK   r   r   rY   )r   
update_ops
name_scoper'   r)   Zupdate_beta1Zupdate_beta2r%   r%   r&   _finish%  s    B
zAdamOptimizer._finish)r   r   r   r   Fr   )__name__
__module____qualname____doc__r   r.   r?   rC   rP   rS   rc   rg   rj   rk   rn   __classcell__r%   r%   r#   r&   r      s$   N      G	r   N)rr   tensorflow.python.eagerr   tensorflow.python.frameworkr   tensorflow.python.opsr   r   r   r   tensorflow.python.trainingr   r	    tensorflow.python.util.tf_exportr
   	Optimizerr   r%   r%   r%   r&   <module>   s   
