a
    Sic                  	   @   s   d Z ddlZddlm  mZ ddlmZ ddlm	Z	 ddlm
Z
 ddlmZ ddlmZ ddlmZ dd	lmZ G d
d dejjjZdZedg dG dd deZG dd deZejjjjddd ejjjjdd ddddgd ej dee_ dS )zfBase class of optimizer.

This is under development, and subject to interface/implementation changes.
    N)logging)backend)initializers)utils)learning_rate_schedule)keras_export)doc_controlsc                   @   s`  e Zd ZdZdEddZdd	 Zd
d Zdd Zdd Ze	j
dd Zejdddd Zdd ZdFddZdd Zedd Zejdd Zedd Zejd d Zeejd!d" Zejd#d" Zd$d% Ze	j
d&d' Zd(d) ZdGd+d,ZdHd-d.ZdId/d0Zd1d2 Zd3d4 Z d5d6 Z!d7d8 Z"d9d: Z#d;d< Z$d=d> Z%d?d@ Z&e'dAdB Z(ejdCdD Z)dS )J_BaseOptimizerzBOptimizer base class, which only supports non-distribute use case.NFGz?Tc	           
      K   s   || _ || _|| _|| _|| _|| _tjds6d| _|r||dksJ|dk rXt	d| |r|t
|trn|dk r|t	d| || _|| _| jd ur| jd urt	d| j d| j d	|   | |	 g | _d S )
NGPUF   r   zC`ema_momentum` must be in the range [0, 1]. Received: ema_momentum=z\`ema_overwrite_frequency` must be an integer > 1 or None. Received: ema_overwrite_frequency=zOAt most one of `clipnorm` and `global_clipnorm` can be set. Received: clipnorm=z, global_clipnorm=.)nameclipnormglobal_clipnorm	clipvalueuse_emajit_compiletfconfiglist_physical_devices
ValueError
isinstanceintema_momentumema_overwrite_frequency_create_iteration_variable_process_kwargs
_variables
selfr   r   r   r   r   r   r   r   kwargs r"   m/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/optimizers/optimizer_experimental/optimizer.py__init__&   sL    
z_BaseOptimizer.__init__c                 C   sB   t  & t jddt jdd| _W d   n1 s40    Y  dS )z'Create the iterations counter variable.r   	iterationFr   dtype	trainableN)r   
init_scopeVariableint64_iterationsr    r"   r"   r#   r   Z   s    

z)_BaseOptimizer._create_iteration_variablec                 C   sF   | dd  h d}|D ](}||v r2td| qt| dqd S )Nis_legacy_optimizer>   gradient_aggregatordecaygradient_transformerslrzg%s is deprecated in `optimizer_experimental.Optimizer`, please check the docstring for valid arguments.zY is not a valid argument, kwargs should be empty  for `optimizer_experimental.Optimizer`.)popr   warning	TypeError)r    r!   Zlegacy_kwargskr"   r"   r#   r   c   s    z_BaseOptimizer._process_kwargsc                 C   s   |j S ).Get a unique identifier of the given variable.)
_unique_idr    variabler"   r"   r#   _var_keyx   s    z_BaseOptimizer._var_keyc           	      C   st   g }|D ]f}t |tjrd|j}|j}t|\}}tj||t|d }|	t|||j
 q|	| q|S )aB  Deduplicate sparse gradient.

        For sparse gradients, i.e., gradient is of type `tf.IndexedSlices`,
        it is possible that `gradient.indices` has duplicated indices.
        This function adds up values for the duplicated indices, and returns
        a `tf.IndexedSlices` with indices of unique values.
        r   )r   r   IndexedSlicesvaluesindicesuniquemathunsorted_segment_sumshapeappenddense_shape)	r    gradsZprocessed_gradsgradr=   r>   unique_indicesnew_index_positionssummed_valuesr"   r"   r#   _deduplicate_sparse_grad   s     z'_BaseOptimizer._deduplicate_sparse_gradc                 C   s   t dS )ai  Function to update variable value based on given gradients.

        This method must be implemented in customized optimizers.

        Args:
          gradient: backpropagated gradient of the given variable.
          variable: variable whose value needs to be updated.

        Returns:
          An `Operation` that applies the specified gradients.

        NNotImplementedErrorr    gradientr:   r"   r"   r#   update_step   s    z_BaseOptimizer.update_step)r   c                 C   s   |  ||S )a  A wrapper of `update_step` to enable XLA acceleration.

        Due to `tf.function` tracing mechanism, for (gradient, variable) pairs
        of the same shape and dtype, the execution graph always invoke the first
        pair it has seen. Thus, we need a `key` argument to make each (gradient,
        variable) pair unique. In additions, XLA cannot understand string input,
        so the key is an integer.

        Args:
          gradient: backpropagated gradient of the given variable.
          variable: variable whose value needs to be updated.
          key (int): a unique key that identifies the variable.

        Returns:
          An `Operation` that applies the specified gradients.
        )_update_step)r    rN   r:   keyr"   r"   r#   _update_step_xla   s    z_BaseOptimizer._update_step_xlac                 C   sF   t |dd d u rd S | || jvr6td|j d| || d S )Nr8   z(The optimizer cannot recognize variable z. This usually means that you're reusing an optimizer previously created for a different model. Try creating a new optimizer instance.)getattrr;   _index_dictKeyErrorr   rO   rM   r"   r"   r#   rP      s    z_BaseOptimizer._update_stepc                 C   s   t |s&|du r&td| d| d|du r6t }t |rr|  || | }W d   n1 sh0    Y  |||}tt||S )aE  Compute gradients of loss on trainable variables.

        Args:
          loss: `Tensor` or callable. If a callable, `loss` should take no
            arguments and return the value to minimize.
          var_list: list or tuple of `Variable` objects to update to minimize
            `loss`.
          tape: (Optional) `tf.GradientTape`. If `loss` is provided as a
            `Tensor`, the tape that computed the `loss` must be provided.

        Returns:
          A list of (gradient, variable) pairs. Variable is always present, but
          gradient can be `None`.
        NzB`tape` is required when a `Tensor` loss is passed. Received: loss=z, tape=r   )callabler   r   GradientTapewatchrN   listzip)r    lossvar_listtaperE   r"   r"   r#   compute_gradients   s     
$z _BaseOptimizer.compute_gradientsc                 C   s   g }| j rJ| j dkrJ|D ],}|d u r0|| q|t|| j  q|S | jrl| jdkrlt|| jd S | jr| jdkr|D ]4}|d u r|| q|tj|| j | jd q|S |S )Nr   )clip_value_minclip_value_max)r   rC   r   clip_by_normr   clip_by_global_normr   clip_by_value)r    rE   clipped_gradsgr"   r"   r#   _clip_gradients   s,    z_BaseOptimizer._clip_gradientsc                 C   s   | j S )zThe number of training steps this `optimizer` has run.

        By default, iterations would be incremented by one every time
        `apply_gradients()` is called.
        )r,   r-   r"   r"   r#   
iterations  s    z_BaseOptimizer.iterationsc                 C   s&   t | ddrtd| d|| _d S )N_builtFzCannot set `iterations` to a new Variable after the Optimizer weights have been created. Here it is attempting to set `iterations` to z.Usually this means you are trying to set `iterations` after calling `apply_gradients()`. Please set `iterations` before calling `apply_gradients()`.)rS   RuntimeErrorr,   r9   r"   r"   r#   rg     s    c                 C   s8   t | dr| jd u rtd| j}t|tjr4| jS |S )N_learning_ratezPMissing learning rate, please set self.learning_rate at optimizer creation time.)hasattrrj   r   r   r   LearningRateSchedule_current_learning_rate)r    r2   r"   r"   r#   learning_rate  s    z_BaseOptimizer.learning_ratec                 C   s:   t |tjr|| _n"t | jtjr*td| j| d S )Na  This optimizer was created with a `LearningRateSchedule` object as its `learning_rate` constructor argument, hence its learning rate is not settable. If you need the learning rate to be settable, you should instantiate the optimizer with a float `learning_rate` argument.)r   r   rl   rj   r5   assignr    rn   r"   r"   r#   rn   +  s    c                 C   s   | j S )zAlias of `learning_rate()`.

        `lr()` is heavily called in workflows using `optimizer_v2.OptimizerV2`,
        so we keep it for backward compabitliy.
        rn   r-   r"   r"   r#   r2   >  s    z_BaseOptimizer.lrc                 C   s
   || _ d S Nrq   rp   r"   r"   r#   r2   H  s    c                 C   sB   t |tjr,tj|| jdtjdd| _|S tj|dt	 ddS )Nrn   Fr&   )
r   r   rl   r   r*   rg   float32rm   r   floatxrp   r"   r"   r#   _build_learning_rateL  s     z#_BaseOptimizer._build_learning_ratec                 C   sL   t | ddrdS | | | jrHg | _|D ]}| j| j|d|d q*dS )a  Initialize the optimizer's variables, such as momemtum variables.

        This function has to be implemented by subclass optimizers, and subclass
        optimizers need to call `super().build(var_list)`.

        Args:
          var_list: List of model variables to build optimizers on. For example,
            SGD optimizer with momentum will store one momentum variable
            corresponding to each model variable.
        rh   FNaverage)initial_value)rS   _build_index_dictr   _model_variables_moving_averagerC   add_variable_from_reference)r    r\   varr"   r"   r#   build_  s    
z_BaseOptimizer.buildc                 C   s0   i | _ t|D ]\}}| |}|| j |< qdS )zBuild variable to index dictionary.

        Build a dictionary that maps variable to the index of it in the given
        var_list.

        Args:
          var_list: List of variables to build index dict on.

        Returns:
          None
        N)rT   	enumerater;   )r    r\   ir{   var_keyr"   r"   r#   rx   y  s    
z _BaseOptimizer._build_index_dictzerosc                 C   sV   t |trt|}|du r$t }|du r0g }tj||||dd}| j	| |S )a  Create an optimizer variable.

        Args:
          shape: A list of integers, a tuple of integers, or a 1-D Tensor of
            type int32. Defaults to scalar if unspecified.
          dtype: The DType of the optimizer variable to be created. Defaults to
            `tf.keras.backend.floatx` if unspecified.
          initializer: string or callable. Initializer instance.
          name: The name of the optimizer variable to be created.

        Returns:
          An optimizer variable, in the format of tf.Variable.

        NF)rw   r   r(   )
r   strr   getr   rt   r   r*   r   rC   )r    rB   r'   initializerr   r:   r"   r"   r#   add_variable  s    

z_BaseOptimizer.add_variablec                 C   sd   |du r4|du r$t j|j|jd}nt j||jd}t j|| d|j |jdd}| j| |S )ak  Create an optimizer variable from model variable.

        Create an optimizer variable based on the information of model variable.
        For example, in SGD optimizer momemtum, for each model variable, a
        corresponding momemtum variable is created of the same shape and dtype.

        Args:
          model_variable: tf.Variable. The corresponding model variable to the
            optimizer variable to be created.
          variable_name: String. The name prefix of the optimizer variable to be
            created. The create variables name will follow the pattern
            `{variable_name}/{model_variable.name}`, e.g., `momemtum/dense_1`.
          shape: List or Tuple, defaults to None. The shape of the optimizer
            variable to be created. If None, the created variable will have the
            same shape as `model_variable`.
          initial_value: A Tensor, or Python object convertible to a Tensor,
            defaults to None. The initial value of the optimizer variable, if
            None, the initial value will be default to 0.

        Returns:
          An optimizer variable.
        N)rB   r'   )r'   /F)rw   r   r'   r(   )r   r   rB   r'   r*   _shared_namer   rC   )r    model_variablevariable_namerB   rw   r:   r"   r"   r#   rz     s    z*_BaseOptimizer.add_variable_from_referencec                 C   s   |  |||}| | dS )a  Minimize `loss` by updating `var_list`.

        This method simply computes gradient using `tf.GradientTape` and calls
        `apply_gradients()`. If you want to process the gradient before applying
        then call `tf.GradientTape` and `apply_gradients()` explicitly instead
        of using this function.

        Args:
          loss: `Tensor` or callable. If a callable, `loss` should take no
            arguments and return the value to minimize.
          var_list: list or tuple of `Variable` objects to update to minimize
            `loss`.
          tape: (Optional) `tf.GradientTape`.

        Returns:
          None
        N)r^   apply_gradients)r    r[   r\   r]   grads_and_varsr"   r"   r#   minimize  s    z_BaseOptimizer.minimizec              	   C   s  t | jtjrLt| dr.| j| | j ntj	| | jdtj
dd| _t|}tt|dkrjdS t| \}}| jp~d}t|B t  | | W d   n1 s0    Y  W d   n1 s0    Y  | |}| |}tt||}| | dS )zApply gradients to variables.

        Args:
          grads_and_vars: List of (gradient, variable) pairs.

        Returns:
          None

        Raises:
          TypeError: If `grads_and_vars` is malformed.
        rm   rn   Fr&   r   N	optimizer)r   rj   r   rl   rk   rm   ro   rg   r   r*   rs   optimizer_utilsfilter_empty_gradientslenrY   rZ   r   
name_scoper)   r|   rf   rJ   _internal_apply_gradients)r    r   rE   trainable_variables
scope_namer"   r"   r#   r     s2    





F

z_BaseOptimizer.apply_gradientsc              	   C   sX   | j r.|D ] \}}| ||t| | q
n|D ]\}}| || q2| jd dS )zHelper function of apply gradients.

        This is required for separating out distributed training logic.

        Args:
          grads_and_vars: List of (gradient, variable) pairs.
        r   N)r   rR   idr;   rP   rg   
assign_add)r    r   rF   r{   r"   r"   r#   r     s    z(_BaseOptimizer._internal_apply_gradientsc                 C   s>   | j r:t|| jD ]&\}}|| j| d| j |   qdS )8Update the stored moving average using the latest value.r   N)r   rZ   ry   ro   r   )r    r\   r{   rv   r"   r"   r#   &_update_model_variables_moving_average!  s    z5_BaseOptimizer._update_model_variables_moving_averagec                 C   s@   t |t | jkr2tdt | dt | j d| | dS )z2Overwrite model variables with its moving average.zThe length of model variables (zT) to override does not match the length of model variables stored in the optimizer (z:). Please check if the optimizer was called on your model.N)r   ry   r   4_overwrite_model_variables_with_average_value_helperr    r\   r"   r"   r#   -_overwrite_model_variables_with_average_value+  s    z<_BaseOptimizer._overwrite_model_variables_with_average_valuec                 C   s$   t || jD ]\}}|| qdS )z0Helper function that overwrites model variables.N)rZ   ry   ro   )r    r\   r{   average_varr"   r"   r#   r   7  s    zC_BaseOptimizer._overwrite_model_variables_with_average_value_helperc                 C   s   | j r| | dS )a  Set the final value of model's trainable variables.

        Sometimes there are some extra steps before ending the variable updates,
        such as overriding the model variables with its average value.

        Args:
          var_list: list of model variables.
        N)r   r   r   r"   r"   r#   finalize_variable_values>  s    	z'_BaseOptimizer.finalize_variable_valuesc                 C   s<   t |tjrt|S t |tjr*| S t|r8| S |S )z=Serialize a hyperparameter that can be a numeric or callable.)r   r   rl   	serializer   r*   numpyrV   )r    Zhyperparameterr"   r"   r#   _serialize_hyperparameterM  s    
z(_BaseOptimizer._serialize_hyperparameterc              	   C   s(   | j | j| j| j| j| j| jdd}|S )a  Returns the config of the optimizer.

        An optimizer config is a Python dictionary (serializable)
        containing the configuration of an optimizer.
        The same optimizer can be reinstantiated later
        (without any saved state) from this configuration.

        Subclass optimizer should override this method to include other
        hyperparameters.

        Returns:
            Python dictionary.
        F)r   r   r   r   r   r   r   r.   )r   r   r   r   r   r   r   )r    r   r"   r"   r#   
get_configY  s    
z_BaseOptimizer.get_configc                 C   s6   d|v r(t |d tr(t|d |d< | f i |S )aD  Creates an optimizer from its config.

        This method is the reverse of `get_config`, capable of instantiating the
        same optimizer from the config dictionary.

        Args:
            config: A Python dictionary, typically the output of get_config.

        Returns:
            An optimizer instance.
        rn   )r   dictr   deserialize)clsr   r"   r"   r#   from_configs  s    z_BaseOptimizer.from_configc                 C   s   | j S )zReturns variables of this Optimizer.

        We override the `variable` property method of `tf.Module` for the
        sake of backward compatibility with `optimizer_v2.Optimizer`'s
        `variable()` method.
        )r   r-   r"   r"   r#   	variables  s    z_BaseOptimizer.variables)NNNFr
   NT)N)Nr   N)NN)N)*__name__
__module____qualname____doc__r$   r   r   r;   rJ   abcabstractmethodrO   r   functionrR   rP   r^   rf   propertyrg   setterrn   r   do_not_generate_docsr2   ru   r|   rx   r   rz   r   r   r   r   r   r   r   r   r   classmethodr   r   r"   r"   r"   r#   r	   #   sj          
4	










 
)
-

r	   a  name: String. The name to use
        for momentum accumulator weights created by
        the optimizer.
      clipnorm: Float. If set, the gradient of each weight is individually
        clipped so that its norm is no higher than this value.
      clipvalue: Float. If set, the gradient of each weight is clipped to be no
        higher than this value.
      global_clipnorm: Float. If set, the gradient of all weights is clipped so
        that their global norm is no higher than this value.
      use_ema: Boolean, defaults to False. If True, exponential moving average
        (EMA) is applied. EMA consists of computing an exponential moving
        average of the weights of the model (as the weight values change after
        each training batch), and periodically overwriting the weights with
        their moving average.
      ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is  # noqa: E501
        the momentum to use when computing the EMA of the model's weights:
        `new_average = ema_momentum * old_average + (1 - ema_momentum) *
        current_variable_value`.
      ema_overwrite_frequency: Int or None, defaults to None. Only used if
        `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we
        overwrite the model variable by its moving average. If None, the optimizer  # noqa: E501
         does not overwrite model variables in the middle of training, and you
        need to explicitly overwrite the variables at the end of training
        by calling `optimizer.finalize_variable_values()` (which updates the model  # noqa: E501
        variables in-place). When using the built-in `fit()` training loop, this
        happens automatically after the last epoch, and you don't need to do
        anything.
      jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA  # noqa: E501
        compilation. If no GPU device is found, this flag will be ignored.
      **kwargs: keyword arguments only used for backward compatibility.z'keras.optimizers.experimental.Optimizer)v1c                       sr   e Zd ZdZd fdd	Zd fdd		Z fd
dZdd Zd fdd	Zdd Z	dd Z
dd Zdd Z  ZS )	Optimizera  Abstract optimizer base class.

    This class supports distributed training. If you want to implement your own
    optimizer, please subclass this class instead of _BaseOptimizer.

    Args:
      {{base_optimizer_keyword_args}}

    ### Usage

    ```python
    # Create an optimizer with the desired parameters.
    opt = tf.keras.optimizers.experimental.SGD(learning_rate=0.1)
    var1, var2 = tf.Variable(1.0), tf.Variable(2.0)
    # `loss` is a callable that takes no argument and returns the value
    # to minimize.
    loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
    # Call minimize to update the list of variables.
    opt.minimize(loss, var_list=[var1, var2])
    ```

    ### Processing gradients before applying them

    Calling `minimize()` takes care of both computing the gradients and
    applying them to the variables. If you want to process the gradients
    before applying them you can instead use the optimizer in three steps:

    1.  Compute the gradients with `tf.GradientTape`.
    2.  Process the gradients as you wish.
    3.  Apply the processed gradients with `apply_gradients()`.

    Example:

    ```python
    # Create an optimizer.
    opt = tf.keras.optimizers.experimental.SGD(learning_rate=0.1)
    var1, var2 = tf.Variable(1.0), tf.Variable(2.0)

    # Compute the gradients for a list of variables.
    with tf.GradientTape() as tape:
      loss = 3 * var1 * var1 + 2 * var2 * var2
    grads = tape.gradient(loss, [var1, var2])

    # Process the gradients.
    grads[0] = grads[0] + 1

    # Ask the optimizer to apply the gradients on variables.
    opt.apply_gradients(zip(grads, [var1, var2]))
    ```

    ### Dynamic learning rate

    Dynamic learning rate can be achieved by setting learning rate as a built-in
    or customized `tf.keras.optimizers.schedules.LearningRateSchedule`.

    Example:

    >>> var = tf.Variable(np.random.random(size=(1,)))
    >>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
    ...   initial_learning_rate=.01, decay_steps=20, decay_rate=.1)
    >>> opt = tf.keras.optimizers.experimental.SGD(learning_rate=learning_rate)
    >>> loss = lambda: 3 * var
    >>> opt.minimize(loss, var_list=[var])

    ### Gradients clipping

    Users can clip the gradients before applying to variables by setting
    `clipnorm`, `clipvalue` and `global_clipnorm`. Notice that `clipnorm` and
    `global_clipnorm` can only have one being set.

    Example:

    >>> opt = tf.keras.optimizers.experimental.SGD(learning_rate=1, clipvalue=1)
    >>> var1, var2 = tf.Variable(2.0), tf.Variable(2.0)
    >>> with tf.GradientTape() as tape:
    ...   loss = 2 * var1 + 2 * var2
    >>> grads = tape.gradient(loss, [var1, var2])
    >>> print([grads[0].numpy(), grads[1].numpy()])
    [2.0, 2.0]
    >>> opt.apply_gradients(zip(grads, [var1, var2]))
    >>> # Without clipping, we should get [0, 0], but as gradients are clipped
    >>> # to
    >>> # have max value 1, we get [1.0, 1.0].
    >>> print([var1.numpy(), var2.numpy()])
    [1.0, 1.0]

    ### Using exponential moving average.

    Empirically it has been found that using the exponential moving average
    (EMA) of the trained parameters of a deep network achieves a better
    performance than using its trained parameters directly. Keras optimizers
    allows users to compute this moving average and overwrite the model
    variables at desired time.

    Example:

    ```python
    # Create an SGD optimizer with EMA on. `ema_momentum` controls the decay
    # rate of the moving average. `ema_momentum=1` means no decay and the stored
    # moving average is always model variable's initial value before training.
    # Reversely, `ema_momentum=0` is equivalent to not using EMA.
    # `ema_overwrite_frequency=3` means every 3 iterations, we overwrite the
    # trainable variables with their moving average values.
    opt = tf.keras.optimizers.experimental.SGD(
        learning_rate=1,
        use_ema=True,
        ema_momentum=0.5,
        ema_overwrite_frequency=3)
    var1, var2 = tf.Variable(2.0), tf.Variable(2.0)
    with tf.GradientTape() as tape:
      loss = var1 + var2
    grads = tape.gradient(loss, [var1, var2])
    # First iteration: [var1, var2] = [1.0, 1.0]
    opt.apply_gradients(zip(grads, [var1, var2]))
    print([var1, var2])

    # Second iteration: [var1, var2] = [0.0, 0.0]
    opt.apply_gradients(zip(grads, [var1, var2]))
    print([var1, var2])

    # Third iteration, without EMA, we should see [var1, var2] = [-1.0, -1.0],
    # but overwriting results in [var1, var2] = [-0.125, -0.125]. The full
    # calculation for the moving average of var1 is:
    # var1=2*0.5**3+1*(1-0.5)*0.5**2+0*(1-0.5)*0.5**1+(-1)*(1-0.5)=-0.125.
    opt.apply_gradients(zip(grads, [var1, var2]))
    print([var1, var2])

    ```
    When optimizer is constructed with `use_ema=True`, in custom training loop,
    users can explicitly call `finalize_variable_values()` to overwrite
    trainable variables with their EMA values. `finalize_variable_values()` is
    by default called at the end of `model.fit()`.

    ### Use with `tf.distribute.Strategy`

    This optimizer class is `tf.distribute.Strategy` aware, which means it
    automatically sums gradients across all replicas. To aggregate gradients
    yourself, call `apply_gradients` with `skip_aggregate_gradients` set to
    True.  This is useful if you need to process aggregated gradients.

    ```python
    # This example is not runnable, it consists of dummy code for simple
    # tutorial.
    strategy = tf.distribute.experimental.TPUStrategy()

    with strategy.scope():
      opt = tf.keras.optimizers.experimental.SGD()
      model = magic_function_that_returns_model()
      gradients = magic_function_that_returns_gradients()
      # Custom logic to aggregate gradients.
      gradients = strategy.reduce("SUM", gradients, axis=None)
      opt.apply_gradients(zip(gradients, model.trainable_variables),
          skip_aggregate_gradients=True)
    ```

    ### Creating a custom optimizer

    If you intend to create your own optimization algorithm, please inherit from
    this class and override the following methods:

      - `build`: Create your optimizer-related variables, such as `momentums` in
        SGD optimizer.
      - `update_step`: Implement your optimizer's updating logic.
      - `get_config`: serialization of the optimizer, include all hyper
        parameters.

    Your optimizer would automatically be compatible with tensorflow distributed
    training if you subclass `optimizer_experimental.Optimizer`.

    NFr
   Tc	           
   	      s2   t  j||||||||fi |	 tj | _dS )zCreate a new Optimizer.N)superr$   r   
distributeget_strategy_distribution_strategyr   	__class__r"   r#   r$   _  s    	zOptimizer.__init__c                    sL   t j }|j|" t ||||W  d    S 1 s>0    Y  d S rr   )r   r   r   extendedcolocate_vars_withr   rz   )r    r   r   rB   rw   strategyr   r"   r#   rz   z  s
    
z%Optimizer.add_variable_from_referencec                    s   t |dr| }t |S )r7   _distributed_container)rk   r   r   r;   r9   r   r"   r#   r;     s    
zOptimizer._var_keyc                 C   s
   t |S )aX  Aggregate gradients on all devices.

        By default we will perform reduce_sum of gradients across devices. Users
        can implement their own aggregation logic by overriding this method.

        Args:
          grads_and_vars: List of (gradient, variable) pairs.

        Returns:
          List of (gradient, variable) pairs.
        )r   all_reduce_sum_gradientsr    r   r"   r"   r#   aggregate_gradients  s    zOptimizer.aggregate_gradientsc                    s   |s|  |}t | dS )a  Apply gradients to variables.

        Args:
          grads_and_vars: List of (gradient, variable) pairs.
          skip_gradients_aggregation: If true, gradients aggregation will not be
            performed inside optimizer. Usually this arg is set to True when you
            write custom code aggregating gradients outside the optimizer.

        Returns:
          None

        Raises:
          TypeError: If `grads_and_vars` is malformed.
          RuntimeError: If called in a cross-replica context.
        N)r   r   r   )r    r   Zskip_gradients_aggregationr   r"   r#   r     s    
zOptimizer.apply_gradientsc                 C   s   t jjj| j| j| d S rr   )r   __internal__r   interimmaybe_merge_call_distributed_apply_gradients_fnr   r   r"   r"   r#   r     s
    
z#Optimizer._internal_apply_gradientsc                 C   s8   | j }t|| jD ] \}}|jj|dd |fd qdS )zHelper function to _overwrite_model_variables_with_average_value.

        This function overwrites variables on each device.
        Args:
          var_list: list of model variables.
        c                 S   s
   |  |S rr   )ro   )abr"   r"   r#   <lambda>      zPOptimizer._overwrite_model_variables_with_average_value_helper.<locals>.<lambda>)argsN)r   rZ   ry   r   update)r    r\   r   r{   r   r"   r"   r#   r     s    z>Optimizer._overwrite_model_variables_with_average_value_helperc                    sD    j r@ fdd}t| jD ] \}} jjj|||fdd qdS )r   c                    s"   |   j|  d j |   d S )Nr   )ro   r   )rv   r{   r-   r"   r#   update_average  s    zHOptimizer._update_model_variables_moving_average.<locals>.update_averageFr   groupN)r   rZ   ry   r   r   r   )r    r\   r   r{   rv   r"   r-   r#   r     s    
z0Optimizer._update_model_variables_moving_averagec           	         s    fdd}|D ]\}}|j j|||fdd q jd  jrt| \}   jr j j dk}tj	t
|tj fddd	d d
 dS )z1`apply_gradients` using a `DistributionStrategy`.c                    s.    j r || t | S  || S d S rr   )r   rR   r   r;   rP   )r{   rF   r-   r"   r#   apply_grad_to_update_var  s    zKOptimizer._distributed_apply_gradients_fn.<locals>.apply_grad_to_update_varFr   r   r   c                      s
     S rr   )r   r"   r   r"   r#   r     s   z;Optimizer._distributed_apply_gradients_fn.<locals>.<lambda>c                   S   s   d S rr   r"   r"   r"   r"   r#   r     r   )true_fnfalse_fnN)r   r   rg   r   r   rZ   r   r   r   condcastbool)	r    distributionr   r!   r   rF   r{   _Zshould_overwrite_model_varsr"   r   r#   r     s"    

z)Optimizer._distributed_apply_gradients_fn)NNNFr
   NT)NN)F)r   r   r   r   r$   rz   r;   r   r   r   r   r   r   __classcell__r"   r"   r   r#   r     s&    /        		r   c                       s$   e Zd Z fddZdd Z  ZS )RestoredOptimizerc                    s   t  d d S )Nr   )r   r$   r-   r   r"   r#   r$     s    zRestoredOptimizer.__init__c                 C   s   t dd S )NzRestoring functional Optimizers from SavedModels is not currently supported. Please file a feature request if this limitation bothers you.rK   r-   r"   r"   r#   r     s    zRestoredOptimizer.get_config)r   r   r   r$   r   r   r"   r"   r   r#   r     s   r   ZexperimentalOptimizerc                 C   s
   t | tS rr   )r   r   )objr"   r"   r#   r     r   r   c                 C   s   t  S rr   )r   )protor"   r"   r#   r   
  r      r   )object_factoryversionmin_producer_versionmin_consumer_version)versionsz{{base_optimizer_keyword_args}}) r   r   tensorflow.compat.v2compatv2r   abslr   kerasr   r   keras.optimizers.optimizer_v2r   r   keras.optimizers.schedulesr    tensorflow.python.util.tf_exportr   tensorflow.tools.docsr   r   trackingAutoTrackabler	   base_optimizer_keyword_argsr   r   saved_modelloadregister_revived_typeVersionedTypeRegistrationreplacer"   r"   r"   r#   <module>   sD       s 
  G

