a
    Sic%                     @   sv   d Z ddlm  mZ ddlmZ ddlmZ ddl	m
Z
 e e
dg dG dd	 d	ejZej d
eje_ dS )zFTRL optimizer implementation.    N)	optimizer)generic_utils)keras_exportz"keras.optimizers.experimental.Ftrl)v1c                       sB   e Zd ZdZd fd
d	Z fddZdd Z fddZ  ZS )Ftrla
  Optimizer that implements the FTRL algorithm.

    "Follow The Regularized Leader" (FTRL) is an optimization algorithm
    developed at Google for click-through rate prediction in the early 2010s. It
    is most suitable for shallow models with large and sparse feature spaces.
    The algorithm is described by
    [McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf).
    The Keras version has support for both online L2 regularization
    (the L2 regularization described in the paper
    above) and shrinkage-type L2 regularization
    (which is the addition of an L2 penalty to the loss function).

    Initialization:

    ```python
    n = 0
    sigma = 0
    z = 0
    ```

    Update rule for one variable `w`:

    ```python
    prev_n = n
    n = n + g ** 2
    sigma = (n ** -lr_power - prev_n ** -lr_power) / lr
    z = z + g - sigma * w
    if abs(z) < lambda_1:
      w = 0
    else:
      w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2)
    ```

    Notation:

    - `lr` is the learning rate
    - `g` is the gradient for the variable
    - `lambda_1` is the L1 regularization strength
    - `lambda_2` is the L2 regularization strength
    - `lr_power` is the power to scale n.

    Check the documentation for the `l2_shrinkage_regularization_strength`
    parameter for more details when shrinkage is enabled, in which case gradient
    is replaced with a gradient with shrinkage.

    Args:
      learning_rate: A `Tensor`, floating point value, a schedule that is a
        `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable that
        takes no arguments and returns the actual value to use. The learning
        rate.  Defaults to 0.001.
      learning_rate_power: A float value, must be less or equal to zero.
        Controls how the learning rate decreases during training. Use zero for a
        fixed learning rate.
      initial_accumulator_value: The starting value for accumulators. Only zero
        or positive values are allowed.
      l1_regularization_strength: A float value, must be greater than or equal
        to zero. Defaults to 0.0.
      l2_regularization_strength: A float value, must be greater than or equal
        to zero. Defaults to 0.0.
      l2_shrinkage_regularization_strength: A float value, must be greater than
        or equal to zero. This differs from L2 above in that the L2 above is a
        stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
        When input is sparse shrinkage will only happen on the active weights.
      beta: A float value, representing the beta value from the paper. Defaults
        to 0.0.
      {{base_optimizer_keyword_args}}
    MbP?      皙?        NFGz?Tc                    s   t  jf |||	|
||||d| |dk r<td| d|dkrTtd| d|dk rltd| d|dk rtd| d|dk rtd| d| || _|| _|| _|| _|| _|| _	|| _
d S )	N)nameclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyjit_compiler
   z^`initial_accumulator_value` needs to be positive or zero. Received: initial_accumulator_value=.zR`learning_rate_power` needs to be negative or zero. Received: learning_rate_power=z``l1_regularization_strength` needs to be positive or zero. Received: l1_regularization_strength=z``l2_regularization_strength` needs to be positive or zero. Received: l2_regularization_strength=zt`l2_shrinkage_regularization_strength` needs to be positive or zero. Received: l2_shrinkage_regularization_strength=)super__init__
ValueError_build_learning_rate_learning_ratelearning_rate_powerinitial_accumulator_valuel1_regularization_strengthl2_regularization_strength$l2_shrinkage_regularization_strengthbeta)selflearning_rater   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__ h/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/optimizers/optimizer_experimental/ftrl.pyr   a   sd    	zFtrl.__init__c                    s   t  | t| dr | jr dS g | _g | _|D ]L}| j| j|dtj	tj
|j| jd|jdd | j| j|dd q0d	| _dS )
zInitialize optimizer variables.

        Args:
          var_list: list of model variables to build Ftrl variables on.
        _builtNaccumulator)dimsvalue)dtype)model_variablevariable_nameinitial_valuelinear)r,   r-   T)r   buildhasattrr'   _accumulators_linearsappendadd_variable_from_referencetfcastfillshaper   r+   )r    var_listvarr#   r%   r&   r0      s0    z
Ftrl.buildc                 C   s   t | j|j}| |}| j| j|  }| j| j|  }| j}| j	}|| j
d|   }|d| j |  }	|t |d }
||	t |
| t ||  | |   t |
| | d|  }t || j | j}||| |  ||
 dS )z=Update step given gradient and the associated model variable.g       @   N)r6   r7   r!   r+   _var_keyr2   _index_dictr3   r   r   r   r   pow
assign_addclip_by_valuer   assign)r    gradientvariablelrvar_keyaccumr/   lr_powerZl2_regZgrad_to_useZ	new_accum	quadraticZlinear_clippedr%   r%   r&   update_step   s6    
zFtrl.update_stepc              
      s<   t   }|| | j| j| j| j| j| j	| j
d |S )N)r!   r   r   r   r   r   r   )r   
get_configupdate_serialize_hyperparameterr   r   r   r   r   r   r   )r    configr#   r%   r&   rK      s    
zFtrl.get_config)r   r   r	   r
   r
   r
   r
   NNNFr   NTr   )	__name__
__module____qualname____doc__r   r0   rJ   rK   __classcell__r%   r%   r#   r&   r      s(   F               G!r   z{{base_optimizer_keyword_args}})rR   tensorflow.compat.v2compatv2r6   'keras.optimizers.optimizer_experimentalr   keras.utilsr    tensorflow.python.util.tf_exportr   register_keras_serializable	Optimizerr   replacebase_optimizer_keyword_argsr%   r%   r%   r&   <module>   s   
 `