a
    Sic}                    @   s$  d Z ddlZddlZddlZddlZddlZddlZddlZddlZddl	Z
ddlm  mZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlm Z  ddl!m"Z# ddl$m%Z% ddl&m'Z' ddl(m)Z) zddl*Z*W n e+y6   dZ*Y n0 ddddddde j,fddZ-dddddde j,fddZ.dd Z/dQddZ0e'dG dd  d Z1e'd!G d"d# d#Z2e'd$G d%d& d&e2Z3e'd'G d(d) d)e2Z4e'd*G d+d, d,e2Z5e'd-G d.d/ d/e2Z6e'd0G d1d2 d2e2Z7e'd3g d4G d5d6 d6e2Z8e'd7g d4e%9d7G d8d9 d9e8Z:e'd:G d;d< d<e2Z;e'd=G d>d? d?e2Z<e'd@G dAdB dBe2Z=dRdCdDZ>e'dEg d4G dFdG dGe2ej?Z@e'dHG dIdJ dJe2ZAe'dKG dLdM dMe2ZBe'dNG dOdP dPe2ZCdS )SzDCallbacks: utilities called at certain points during model training.    N)backend)distributed_file_utils)worker_training_state)learning_rate_schedule)generic_utils)io_utils)tf_utils)version_utils)Sequence)Progbar)ModeKeys)
tf_logging)deprecation)keras_export)doc_controlsF   stepsc
                 C   s   t | tr| S | sg } |	tjkrRt |_t g| p4g  |jg } |rR| t| t| }
|	 }|

| t|
||||||||	d	 d|
j_|
S )a  Configures callbacks for use in various training loops.

    Args:
        callbacks: List of Callbacks.
        model: Model being trained.
        do_validation: Whether or not validation loop will be run.
        batch_size: Number of samples per batch.
        epochs: Number of epoch to train.
        steps_per_epoch: Number of batches to run per training epoch.
        samples: Number of training samples.
        verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
        count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
        mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
          Which loop mode to configure callbacks for.

    Returns:
        Instance of CallbackList used to control all Callbacks.
    )do_validation
batch_sizeepochssteps_per_epochsamplesverbosemodeF)
isinstanceCallbackListr   TRAINHistoryhistory
BaseLoggerappendProgbarLogger_get_callback_model	set_modelset_callback_parametersmodelstop_training)	callbacksr%   r   r   r   r   r   r   
count_moder   callback_listcallback_model r+   K/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/callbacks.pyconfigure_callbacks8   s2    


r-   c	                 C   s|   |j }	| D ] }
t|
ttfr
|	dd |
_q
g }|tjkrZt|	}|rZ|dd |	D 7 }|||||||d}| | dS )ac  Sets callback parameters.

    Args:
        callback_list: CallbackList instance.
        model: Model being trained.
        do_validation: Whether or not validation loop will be run.
        batch_size: Number of samples per batch.
        epochs: Number of epoch to train.
        steps_per_epoch: Number of batches to run per training epoch.
        samples: Number of training samples.
        verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
        mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
          Which loop mode to configure callbacks for.
    r   Nc                 S   s   g | ]}d | qS val_r+   ).0nr+   r+   r,   
<listcomp>       z+set_callback_parameters.<locals>.<listcomp>)r   r   r   r   r   r   metrics)	metrics_namesr   r   r!   stateful_metricsr   PREDICTcopy
set_params)r)   r%   r   r   r   r   r   r   r   metric_namescbkcallback_metricscallback_paramsr+   r+   r,   r$   y   s$    

	r$   c                 C   s0   t | dp.t | dp.t| ttjjjjtjjfS )z5Checks if data is a generator, Sequence, or Iterator.__next__next)hasattrr   r
   tfcompatv1dataIterator)rD   r+   r+   r,   _is_generator_like   s    
rF    c                 C   sH   | j }|tjtjhv r<|r<t||D ]\}}|||| < q$n||d< |S )z4Computes logs for sending to `on_batch_end` methods.outputs)r5   r   r   TESTzip)r%   logsrH   r   prefixr:   labeloutputr+   r+   r,   	make_logs   s    rO   zkeras.callbacks.CallbackListc                   @   s&  e Zd ZdZd@ddZdd ZdAdd	Zd
d Zdd Zdd Z	dBddZ
dd Zdd Zdd Zdd Zdd ZdCddZdDddZdEd d!ZdFd"d#ZdGd$d%ZdHd&d'ZdId(d)ZdJd*d+ZdKd,d-ZdLd.d/ZdMd0d1ZdNd2d3ZdOd4d5ZdPd6d7ZdQd8d9ZdRd:d;Zd<d= Z d>d? Z!dS )Sr   z*Container abstracting a list of callbacks.NFc                 K   s   |rt j|ng | _| || |r0| | |r>| | tdd | jD | _tdd | jD | _	t
dd | jD | _t
dd | jD | _t
dd | jD | _|   t
dd | jD | _d| _i | _d	| _g | _d	S )
a  Container for `Callback` instances.

        This object wraps a list of `Callback` instances, making it possible
        to call them all at once via a single endpoint
        (e.g. `callback_list.on_epoch_end(...)`).

        Args:
          callbacks: List of `Callback` instances.
          add_history: Whether a `History` callback should be added, if one does
            not already exist in the `callbacks` list.
          add_progbar: Whether a `ProgbarLogger` callback should be added, if
            one does not already exist in the `callbacks` list.
          model: The `Model` these callbacks are used with.
          **params: If provided, parameters will be passed to each `Callback`
            via `Callback.set_params`.
        c                 s   s   | ]}t |d dV  qdS _supports_tf_logsFN)getattrr0   cbr+   r+   r,   	<genexpr>   s   z(CallbackList.__init__.<locals>.<genexpr>c                 s   s2   | ]*}|  s| s| rt|d dV  qdS rP   )_implements_train_batch_hooks_implements_test_batch_hooks_implements_predict_batch_hooksrR   rS   r+   r+   r,   rU      s
   c                 s   s   | ]}|  V  qd S N)rV   rS   r+   r+   r,   rU      s   c                 s   s   | ]}|  V  qd S rY   )rW   rS   r+   r+   r,   rU      s   c                 s   s   | ]}|  V  qd S rY   )rX   rS   r+   r+   r,   rU      s   c                 s   s   | ]}|j jt vV  qd S rY   )	__class____name__globals)r0   r;   r+   r+   r,   rU     s      N)rA   nestflattenr'   _add_default_callbacksr#   r9   allrQ   _batch_hooks_support_tf_logsany_should_call_train_batch_hooks_should_call_test_batch_hooks _should_call_predict_batch_hooks$_disallow_batch_hooks_in_ps_strategy_check_timing_num_batches_for_timing_check_hook_times_batch_start_time_batch_times)selfr'   add_historyadd_progbarr%   paramsr+   r+   r,   __init__   s:    







zCallbackList.__init__c                 C   s   d| _ d| _| jD ]&}t|tr(|| _ qt|tr|| _q| jdu r^|r^t | _| j| j | j du r|rtdd| _ | j| j  dS )z)Adds `Callback`s that are always present.Nr   r(   )_progbar_historyr'   r   r!   r   r    )rm   rn   ro   rT   r+   r+   r,   r`   	  s    


z#CallbackList._add_default_callbacksc                 C   s.   |du ri S | j r|S |r$| jr$|S t|S )z?Turns tensors into numpy arrays or Python scalars if necessary.N)rQ   rb   r   sync_to_numpy_or_python_type)rm   rK   is_batch_hookr+   r+   r,   _process_logs  s    
zCallbackList._process_logsc                 C   s   | j | d S rY   )r'   r    )rm   callbackr+   r+   r,   r    &  s    zCallbackList.appendc                 C   s    || _ | jD ]}|| qd S rY   )rp   r'   r9   )rm   rp   rx   r+   r+   r,   r9   )  s    
zCallbackList.set_paramsc                 C   s.   || _ | jr| j|_| jD ]}|| qd S rY   )r%   rt   r   r'   r#   )rm   r%   rx   r+   r+   r,   r#   .  s
    
zCallbackList.set_modelc                 C   sN   | j s
dS |dkr"| ||| n(|dkr:| ||| ntd| ddS )z4Helper function for all batch_{begin | end} methods.NbeginendzUnrecognized hook: z&. Expected values are ["begin", "end"])r'   _call_batch_begin_hook_call_batch_end_hook
ValueError)rm   r   hookbatchrK   r+   r+   r,   _call_batch_hook5  s    
zCallbackList._call_batch_hookc                 C   s.   dj |d}| ||| | jr*t | _dS )z/Helper function for `on_*_batch_begin` methods.on_{mode}_batch_beginr   N)format_call_batch_hook_helperrh   timerk   )rm   r   r   rK   	hook_namer+   r+   r,   r{   D  s    z#CallbackList._call_batch_begin_hookc                 C   s  dj |d}| jr4|dkr4t | j }| j| | ||| t| j| jkr|}dj |d}t	| jt| j }t	| j
| t| j
|  }	t	| j
| t| j
|  }
d| }d}|
|krt|j |||
d |	|krt|j |||	d d| _d	| _g | _i | _
d	S )
z-Helper function for `on_*_batch_end` methods.zon_{mode}_batch_endr   r   r         ?zCallback method `{hook}` is slow compared to the batch time (batch time: {batch_time:.4f}s vs `{hook}` time: {hook_time:.4f}s). Check your callbacks.)r~   
batch_time	hook_timeFN)r   rh   r   rk   rl   r    r   lenri   sumrj   loggingwarning)rm   r   r   rK   r   r   end_hook_namebegin_hook_nameavg_batch_timeavg_end_hook_timeavg_begin_hook_timethreshold_timewarning_msgr+   r+   r,   r|   L  sL    z!CallbackList._call_batch_end_hookc                 C   sr   | j rt }| j|dd}| jD ]}t||}||| q"| j rn|| jvrVg | j|< | j| t |  dS )z+Helper function for `on_*_batch_*` methods.T)rv   N)rh   r   rw   r'   rR   rj   r    )rm   r   r   rK   
start_timerx   r~   r+   r+   r,   r   |  s    



z$CallbackList._call_batch_hook_helperc                 C   s4   |t jkr|   n|t jkr(|   n|   dS )z:Helper function for on_{train|test|predict}_begin methods.N)r   r   on_train_beginrI   on_test_beginon_predict_beginrm   r   r+   r+   r,   _call_begin_hook  s
    



zCallbackList._call_begin_hookc                 C   s4   |t jkr|   n|t jkr(|   n|   dS )z8Helper function for on_{train|test|predict}_end methods.N)r   r   on_train_endrI   on_test_endon_predict_endr   r+   r+   r,   _call_end_hook  s
    



zCallbackList._call_end_hookc                 C   s   | j r| jtjd||d d S )Nry   rK   rd   r   r   r   rm   r   rK   r+   r+   r,   on_batch_begin  s    zCallbackList.on_batch_beginc                 C   s   | j r| jtjd||d d S )Nrz   r   r   r   r+   r+   r,   on_batch_end  s    zCallbackList.on_batch_endc                 C   s&   |  |}| jD ]}||| qdS )a?  Calls the `on_epoch_begin` methods of its callbacks.

        This function should only be called during TRAIN mode.

        Args:
            epoch: Integer, index of epoch.
            logs: Dict. Currently no data is passed to this argument for this
               method but that may change in the future.
        N)rw   r'   on_epoch_beginrm   epochrK   rx   r+   r+   r,   r     s    


zCallbackList.on_epoch_beginc                 C   s&   |  |}| jD ]}||| qdS )a|  Calls the `on_epoch_end` methods of its callbacks.

        This function should only be called during TRAIN mode.

        Args:
            epoch: Integer, index of epoch.
            logs: Dict, metric results for this training epoch, and for the
              validation epoch if validation is performed. Validation result
              keys are prefixed with `val_`.
        N)rw   r'   on_epoch_endr   r+   r+   r,   r     s    

zCallbackList.on_epoch_endc                 C   s   | j r| jtjd||d dS )ad  Calls the `on_train_batch_begin` methods of its callbacks.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict, contains the return value of `model.train_step`.
              Typically, the values of the `Model`'s metrics are returned.
              Example: `{'loss': 0.2, 'accuracy': 0.7}`.
        ry   r   Nr   r   r+   r+   r,   on_train_batch_begin  s    	z!CallbackList.on_train_batch_beginc                 C   s   | j r| jtjd||d dS )zCalls the `on_train_batch_end` methods of its callbacks.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Aggregated metric results up until this batch.
        rz   r   Nr   r   r+   r+   r,   on_train_batch_end  s    zCallbackList.on_train_batch_endc                 C   s   | j r| jtjd||d dS )ab  Calls the `on_test_batch_begin` methods of its callbacks.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict, contains the return value of `model.test_step`.
              Typically, the values of the `Model`'s metrics are returned.
              Example: `{'loss': 0.2, 'accuracy': 0.7}`.
        ry   r   Nre   r   r   rI   r   r+   r+   r,   on_test_batch_begin  s    	z CallbackList.on_test_batch_beginc                 C   s   | j r| jtjd||d dS )zCalls the `on_test_batch_end` methods of its callbacks.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Aggregated metric results up until this batch.
        rz   r   Nr   r   r+   r+   r,   on_test_batch_end  s    zCallbackList.on_test_batch_endc                 C   s   | j r| jtjd||d dS )aQ  Calls the `on_predict_batch_begin` methods of its callbacks.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict, contains the return value of `model.predict_step`,
              it typically returns a dict with a key 'outputs' containing
              the model's outputs.
        ry   r   Nrf   r   r   r7   r   r+   r+   r,   on_predict_batch_begin  s    	z#CallbackList.on_predict_batch_beginc                 C   s   | j r| jtjd||d dS )zCalls the `on_predict_batch_end` methods of its callbacks.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Aggregated metric results up until this batch.
        rz   r   Nr   r   r+   r+   r,   on_predict_batch_end  s    z!CallbackList.on_predict_batch_endc                 C   s$   |  |}| jD ]}|| qdS )zCalls the `on_train_begin` methods of its callbacks.

        Args:
            logs: Dict. Currently, no data is passed via this argument
              for this method, but that may change in the future.
        N)rw   r'   r   rm   rK   rx   r+   r+   r,   r     s    

zCallbackList.on_train_beginc                 C   s$   |  |}| jD ]}|| qdS )zCalls the `on_train_end` methods of its callbacks.

        Args:
            logs: Dict. Currently, no data is passed via this argument
              for this method, but that may change in the future.
        N)rw   r'   r   r   r+   r+   r,   r     s    

zCallbackList.on_train_endc                 C   s$   |  |}| jD ]}|| qdS )zCalls the `on_test_begin` methods of its callbacks.

        Args:
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        N)rw   r'   r   r   r+   r+   r,   r     s    

zCallbackList.on_test_beginc                 C   s$   |  |}| jD ]}|| qdS )zCalls the `on_test_end` methods of its callbacks.

        Args:
            logs: Dict. Currently, no data is passed via this argument
              for this method, but that may change in the future.
        N)rw   r'   r   r   r+   r+   r,   r   %  s    

zCallbackList.on_test_endc                 C   s$   |  |}| jD ]}|| qdS )zCalls the 'on_predict_begin` methods of its callbacks.

        Args:
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        N)rw   r'   r   r   r+   r+   r,   r   0  s    

zCallbackList.on_predict_beginc                 C   s$   |  |}| jD ]}|| qdS )zCalls the `on_predict_end` methods of its callbacks.

        Args:
            logs: Dict. Currently, no data is passed via this argument
              for this method, but that may change in the future.
        N)rw   r'   r   r   r+   r+   r,   r   ;  s    

zCallbackList.on_predict_endc                 C   s
   t | jS rY   )iterr'   rm   r+   r+   r,   __iter__F  s    zCallbackList.__iter__c                 C   sf   t j }|jrbg }| jD ]4}t|ddr,q| sD| sD| r|	| q|rbt
d| dS )z>Error out if batch-level callbacks are passed with PSStrategy.rQ   FzgBatch-level `Callback`s are not supported with `ParameterServerStrategy`. Found unsupported callbacks: N)rA   
distributeget_strategy_should_use_with_coordinatorr'   rR   rV   rW   rX   r    r}   )rm   strategyunsupported_callbacksrT   r+   r+   r,   rg   I  s&    

z1CallbackList._disallow_batch_hooks_in_ps_strategy)NFFN)F)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)"r[   
__module____qualname____doc__rq   r`   rw   r    r9   r#   r   r{   r|   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rg   r+   r+   r+   r,   r      sF       
D


0		


















r   zkeras.callbacks.Callbackc                   @   sp  e Zd ZdZdd Zdd Zdd Zeje	j
d/d	d
Zeje	j
d0ddZejd1ddZejd2ddZeje	j
d3ddZeje	j
d4ddZeje	j
d5ddZeje	j
d6ddZeje	j
d7ddZeje	j
d8ddZejd9ddZejd:dd Zejd;d!d"Zejd<d#d$Zejd=d%d&Zejd>d'd(Zd)d* Zd+d, Zd-d. ZdS )?Callbacka  Abstract base class used to build new callbacks.

    Callbacks can be passed to keras methods such as `fit`, `evaluate`, and
    `predict` in order to hook into the various stages of the model training and
    inference lifecycle.

    To create a custom callback, subclass `keras.callbacks.Callback` and
    override the method associated with the stage of interest. See
    https://www.tensorflow.org/guide/keras/custom_callback for more information.

    Example:

    >>> training_finished = False
    >>> class MyCallback(tf.keras.callbacks.Callback):
    ...   def on_train_end(self, logs=None):
    ...     global training_finished
    ...     training_finished = True
    >>> model = tf.keras.Sequential([
    ...     tf.keras.layers.Dense(1, input_shape=(1,))])
    >>> model.compile(loss='mean_squared_error')
    >>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
    ...           callbacks=[MyCallback()])
    >>> assert training_finished == True

    If you want to use `Callback` objects in a custom training loop:

    1. You should pack all your callbacks into a single `callbacks.CallbackList`
       so they can all be called together.
    2. You will need to manually call all the `on_*` methods at the appropriate
       locations in your loop. Like this:

    Example:
    ```python
       callbacks =  tf.keras.callbacks.CallbackList([...])
       callbacks.append(...)
       callbacks.on_train_begin(...)
       for epoch in range(EPOCHS):
         callbacks.on_epoch_begin(epoch)
         for i, data in dataset.enumerate():
           callbacks.on_train_batch_begin(i)
           batch_logs = model.train_step(data)
           callbacks.on_train_batch_end(i, batch_logs)
         epoch_logs = ...
         callbacks.on_epoch_end(epoch, epoch_logs)
       final_logs=...
       callbacks.on_train_end(final_logs)
    ```

    Attributes:
        params: Dict. Training parameters
            (eg. verbosity, batch size, number of epochs...).
        model: Instance of `keras.models.Model`.
            Reference of the model being trained.

    The `logs` dictionary that callback methods
    take as argument will contain keys for quantities relevant to
    the current batch or epoch (see method-specific docstrings).
    c                 C   s   d | _ d | _d | _d| _d S )NF)validation_datar%   _chief_worker_onlyrQ   r   r+   r+   r,   rq     s    zCallback.__init__c                 C   s
   || _ d S rY   )rp   rm   rp   r+   r+   r,   r9     s    zCallback.set_paramsc                 C   s
   || _ d S rY   )r%   rm   r%   r+   r+   r,   r#     s    zCallback.set_modelNc                 C   s   dS )z;A backwards compatibility alias for `on_train_batch_begin`.Nr+   r   r+   r+   r,   r     s    zCallback.on_batch_beginc                 C   s   dS )z9A backwards compatibility alias for `on_train_batch_end`.Nr+   r   r+   r+   r,   r     s    zCallback.on_batch_endc                 C   s   dS )ae  Called at the start of an epoch.

        Subclasses should override for any actions to run. This function should
        only be called during TRAIN mode.

        Args:
            epoch: Integer, index of epoch.
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        Nr+   rm   r   rK   r+   r+   r,   r     s    zCallback.on_epoch_beginc                 C   s   dS )a0  Called at the end of an epoch.

        Subclasses should override for any actions to run. This function should
        only be called during TRAIN mode.

        Args:
            epoch: Integer, index of epoch.
            logs: Dict, metric results for this training epoch, and for the
              validation epoch if validation is performed. Validation result
              keys are prefixed with `val_`. For training epoch, the values of
              the `Model`'s metrics are returned. Example:
              `{'loss': 0.2, 'accuracy': 0.7}`.
        Nr+   r   r+   r+   r,   r     s    zCallback.on_epoch_endc                 C   s   | j ||d dS )a  Called at the beginning of a training batch in `fit` methods.

        Subclasses should override for any actions to run.

        Note that if the `steps_per_execution` argument to `compile` in
        `tf.keras.Model` is set to `N`, this method will only be called every
        `N` batches.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        r   N)r   r   r+   r+   r,   r     s    zCallback.on_train_batch_beginc                 C   s   | j ||d dS )a  Called at the end of a training batch in `fit` methods.

        Subclasses should override for any actions to run.

        Note that if the `steps_per_execution` argument to `compile` in
        `tf.keras.Model` is set to `N`, this method will only be called every
        `N` batches.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Aggregated metric results up until this batch.
        r   N)r   r   r+   r+   r,   r     s    zCallback.on_train_batch_endc                 C   s   dS )a~  Called at the beginning of a batch in `evaluate` methods.

        Also called at the beginning of a validation batch in the `fit`
        methods, if validation data is provided.

        Subclasses should override for any actions to run.

        Note that if the `steps_per_execution` argument to `compile` in
        `tf.keras.Model` is set to `N`, this method will only be called every
        `N` batches.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        Nr+   r   r+   r+   r,   r     s    zCallback.on_test_batch_beginc                 C   s   dS )a3  Called at the end of a batch in `evaluate` methods.

        Also called at the end of a validation batch in the `fit`
        methods, if validation data is provided.

        Subclasses should override for any actions to run.

        Note that if the `steps_per_execution` argument to `compile` in
        `tf.keras.Model` is set to `N`, this method will only be called every
        `N` batches.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Aggregated metric results up until this batch.
        Nr+   r   r+   r+   r,   r     s    zCallback.on_test_batch_endc                 C   s   dS )a  Called at the beginning of a batch in `predict` methods.

        Subclasses should override for any actions to run.

        Note that if the `steps_per_execution` argument to `compile` in
        `tf.keras.Model` is set to `N`, this method will only be called every
        `N` batches.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        Nr+   r   r+   r+   r,   r      s    zCallback.on_predict_batch_beginc                 C   s   dS )a  Called at the end of a batch in `predict` methods.

        Subclasses should override for any actions to run.

        Note that if the `steps_per_execution` argument to `compile` in
        `tf.keras.Model` is set to `N`, this method will only be called every
        `N` batches.

        Args:
            batch: Integer, index of batch within the current epoch.
            logs: Dict. Aggregated metric results up until this batch.
        Nr+   r   r+   r+   r,   r   1  s    zCallback.on_predict_batch_endc                 C   s   dS )zCalled at the beginning of training.

        Subclasses should override for any actions to run.

        Args:
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        Nr+   rm   rK   r+   r+   r,   r   A  s    zCallback.on_train_beginc                 C   s   dS )a.  Called at the end of training.

        Subclasses should override for any actions to run.

        Args:
            logs: Dict. Currently the output of the last call to
              `on_epoch_end()` is passed to this argument for this method but
              that may change in the future.
        Nr+   r   r+   r+   r,   r   L  s    zCallback.on_train_endc                 C   s   dS )a  Called at the beginning of evaluation or validation.

        Subclasses should override for any actions to run.

        Args:
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        Nr+   r   r+   r+   r,   r   X  s    zCallback.on_test_beginc                 C   s   dS )aC  Called at the end of evaluation or validation.

        Subclasses should override for any actions to run.

        Args:
            logs: Dict. Currently the output of the last call to
              `on_test_batch_end()` is passed to this argument for this method
              but that may change in the future.
        Nr+   r   r+   r+   r,   r   c  s    zCallback.on_test_endc                 C   s   dS )a   Called at the beginning of prediction.

        Subclasses should override for any actions to run.

        Args:
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        Nr+   r   r+   r+   r,   r   o  s    zCallback.on_predict_beginc                 C   s   dS )zCalled at the end of prediction.

        Subclasses should override for any actions to run.

        Args:
            logs: Dict. Currently no data is passed to this argument for this
              method but that may change in the future.
        Nr+   r   r+   r+   r,   r   z  s    zCallback.on_predict_endc                 C   s8   t | j p6t | j p6t | j p6t | j S )zBDetermines if this Callback should be called for each train batch.)r   
is_defaultr   r   r   r   r   r+   r+   r,   rV     s    z&Callback._implements_train_batch_hooksc                 C   s   t | j pt | j S )zADetermines if this Callback should be called for each test batch.)r   r   r   r   r   r+   r+   r,   rW     s
    z%Callback._implements_test_batch_hooksc                 C   s   t | j pt | j S )zLDetermines if this Callback should be called for each predict
        batch.)r   r   r   r   r   r+   r+   r,   rX     s
    z(Callback._implements_predict_batch_hooks)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)r[   r   r   r   rq   r9   r#   r   for_subclass_implementersr   defaultr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rV   rW   rX   r+   r+   r+   r,   r   a  s^   ;	



	r   zkeras.callbacks.BaseLoggerc                       s@   e Zd ZdZd fdd	ZdddZdddZdd	d
Z  ZS )r   a  Callback that accumulates epoch averages of metrics.

    This callback is automatically applied to every Keras model.

    Args:
        stateful_metrics: Iterable of string names of metrics that
            should *not* be averaged over an epoch.
            Metrics in this list will be logged as-is in `on_epoch_end`.
            All others will be averaged in `on_epoch_end`.
    Nc                    s   t    t|pg | _d S rY   )superrq   setr6   )rm   r6   rZ   r+   r,   rq     s    
zBaseLogger.__init__c                 C   s   d| _ i | _d S Nr   )seentotalsr   r+   r+   r,   r     s    zBaseLogger.on_epoch_beginc                 C   s   |pi }| dd}| dd}|  j|| 7  _| D ]N\}}|| jv rX|| j|< q:|| jv rz| j|  || 7  < q:|| | j|< q:d S )Nsizer   	num_stepsr   )getr   itemsr6   r   )rm   r   rK   r   r   kvr+   r+   r,   r     s    

zBaseLogger.on_batch_endc                 C   sT   |d urP| j d D ]<}|| jv r|| jv r:| j| ||< q| j| | j ||< qd S )Nr4   )rp   r   r6   r   )rm   r   rK   r   r+   r+   r,   r     s    

zBaseLogger.on_epoch_end)N)N)N)N)	r[   r   r   r   rq   r   r   r   __classcell__r+   r+   r   r,   r     s
   

r   zkeras.callbacks.TerminateOnNaNc                       s*   e Zd ZdZ fddZdddZ  ZS )TerminateOnNaNzACallback that terminates training when a NaN loss is encountered.c                    s   t    d| _d S NT)r   rq   rQ   r   r   r+   r,   rq     s    
zTerminateOnNaN.__init__Nc                 C   sV   |pi }| d}|d urRt|}t|s8t|rRtd| d d| j_	d S )NlosszBatch z$: Invalid loss, terminating trainingT)
r   r   ru   npisnanisinfr   	print_msgr%   r&   )rm   r   rK   r   r+   r+   r,   r     s    


zTerminateOnNaN.on_batch_end)N)r[   r   r   r   rq   r   r   r+   r+   r   r,   r     s   r   zkeras.callbacks.ProgbarLoggerc                       s   e Zd ZdZd+ed fddZdd Zd,d	d
Zd-ddZd.ddZ	d/ddZ
d0ddZd1ddZd2ddZd3ddZd4ddZd5ddZdd Zdd  Zd!d" Zd#d$ Zd%d& Zd6d'd(Zd)d* Z  ZS )7r!   aD  Callback that prints metrics to stdout.

    Args:
        count_mode: One of `"steps"` or `"samples"`.
            Whether the progress bar should
            count samples seen or steps (batches) seen.
        stateful_metrics: Iterable of string names of metrics that
            should *not* be averaged over an epoch.
            Metrics in this list will be logged as-is.
            All others will be averaged over time (e.g. loss, etc).
            If not provided, defaults to the `Model`'s metrics.

    Raises:
        ValueError: In case of invalid `count_mode`.
    r   Nrr   c                    s   t    d| _|dkr d| _n |dkr0d| _ntd| d|rLt|nt | _d| _d | _d | _	d| _
d| _d	\| _| _| _d| _d| _d S )
NTr   Fr   zUnknown `count_mode`: z*. Expected values are ["samples", "steps"]r   r   )NNN)r   rq   rQ   	use_stepsr}   r   r6   r   progbartargetr   r   _train_step
_test_step_predict_step_call_batch_hooks_called_in_fit)rm   r(   r6   r   r+   r,   rq     s&    

zProgbarLogger.__init__c                 C   s   |d | _ |d | _| jr.d|v r.|d | _n | jsHd|v rH|d | _nd | _| j dk| _| jd u rz"| jj| _| jj| _	| jj
| _W n ty   d| _Y n0 d S )Nr   r   r   r   r   T)r   r   r   r   r   r%   _train_counterr   _test_counterr   _predict_counterr   AttributeErrorr   r+   r+   r,   r9     s     




zProgbarLogger.set_paramsc                 C   s
   d| _ d S r   )r   r   r+   r+   r,   r   %  s    zProgbarLogger.on_train_beginc                 C   s   | j s|   |   d S rY   )r   _reset_progbar_maybe_init_progbarr   r+   r+   r,   r   )  s    zProgbarLogger.on_test_beginc                 C   s   |    |   d S rY   )r   r   r   r+   r+   r,   r   .  s    zProgbarLogger.on_predict_beginc                 C   s@   |    |   | jr<| jdkr<td|d  d| j  d S )Nr   Epoch /)r   r   r   r   r   r   r   r+   r+   r,   r   2  s    zProgbarLogger.on_epoch_beginc                 C   s   |  || d S rY   _batch_update_progbarr   r+   r+   r,   r   8  s    z ProgbarLogger.on_train_batch_endc                 C   s   | j s| || d S rY   )r   r   r   r+   r+   r,   r   ;  s    zProgbarLogger.on_test_batch_endc                 C   s   |  |d  d S rY   r   r   r+   r+   r,   r   ?  s    z"ProgbarLogger.on_predict_batch_endc                 C   s   |  || j d S rY   )_finalize_progbarr   r   r+   r+   r,   r   C  s    zProgbarLogger.on_epoch_endc                 C   s   | j s| || j d S rY   )r   r   r   r   r+   r+   r,   r   F  s    zProgbarLogger.on_test_endc                 C   s   |  || j d S rY   )r   r   r   r+   r+   r,   r   J  s    zProgbarLogger.on_predict_endc                 C   s   d| _ d | _d S r   )r   r   r   r+   r+   r,   r   M  s    zProgbarLogger._reset_progbarc                 C   sp   t | j| _| jr2| jt dd | jjD | _| jdu r^t| j| j| j| j	rTdndd| _| j
| j dS )zLInstantiate a `Progbar` if not yet, and update the stateful
        metrics.c                 s   s   | ]}|j V  qd S rY   )name)r0   mr+   r+   r,   rU   ^  r3   z4ProgbarLogger._maybe_init_progbar.<locals>.<genexpr>Nstepsample)r   r   r6   	unit_name)r   r6   r%   unionr4   r   r   r   r   r   _update_stateful_metricsr   r+   r+   r,   r   Q  s    
z!ProgbarLogger._maybe_init_progbarc                 C   s   | j S rY   r   r   r+   r+   r,   rV   k  s    z+ProgbarLogger._implements_train_batch_hooksc                 C   s   | j S rY   r   r   r+   r+   r,   rW   n  s    z*ProgbarLogger._implements_test_batch_hooksc                 C   s   | j S rY   r   r   r+   r+   r,   rX   q  s    z-ProgbarLogger._implements_predict_batch_hooksc                 C   s   |pi }|    | jr"|d | _nDt|}|dd}|dd}|dd || }|  j|7  _| jdkrt|}| jj	| jt
| dd dS )	zUpdates the progbar.r   r   r   r   r   NFfinalize)r   r   r   r8   popr   r   ru   r   updatelistr   )rm   r   rK   r   r   add_seenr+   r+   r,   r   t  s    


z#ProgbarLogger._batch_update_progbarc                 C   st   t |p
i }| jd u rT|d ur>| }| js>||dd9 }|pF| j| _| j| j_| jj| jt	|
 dd d S )Nr   r   Tr   )r   ru   r   numpyr   r   r   r   r   r   r   )rm   rK   counterr+   r+   r,   r     s    

zProgbarLogger._finalize_progbar)r   N)N)N)N)N)N)N)N)N)N)N)N)r[   r   r   r   strrq   r9   r   r   r   r   r   r   r   r   r   r   r   r   rV   rW   rX   r   r   r   r+   r+   r   r,   r!     s(   










r!   zkeras.callbacks.Historyc                       s4   e Zd ZdZ fddZd	ddZd
ddZ  ZS )r   a  Callback that records events into a `History` object.

    This callback is automatically applied to
    every Keras model. The `History` object
    gets returned by the `fit` method of models.

    Example:

    >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
    >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
    >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
    ...                     epochs=10, verbose=1)
    >>> print(history.params)
    {'verbose': 1, 'epochs': 10, 'steps': 1}
    >>> # check the keys of history object
    >>> print(history.history.keys())
    dict_keys(['loss'])

    c                    s   t    i | _d S rY   )r   rq   r   r   r   r+   r,   rq     s    
zHistory.__init__Nc                 C   s
   g | _ d S rY   r   r   r+   r+   r,   r     s    zHistory.on_train_beginc                 C   sF   |pi }| j | | D ]\}}| j|g | q| | j_d S rY   )r   r    r   r   
setdefaultr%   )rm   r   rK   r   r   r+   r+   r,   r     s
    zHistory.on_epoch_end)N)N)r[   r   r   r   rq   r   r   r   r+   r+   r   r,   r     s   
r   zkeras.callbacks.ModelCheckpointc                       s   e Zd ZdZd!eeeeed fd	d
Zd"ddZdd Z	d#ddZ
d$ddZd%ddZdd Zdd Zdd Zdd Zdd Zdd  Z  ZS )&ModelCheckpointa  Callback to save the Keras model or model weights at some frequency.

    `ModelCheckpoint` callback is used in conjunction with training using
    `model.fit()` to save a model or weights (in a checkpoint file) at some
    interval, so the model or weights can be loaded later to continue the
    training from the state saved.

    A few options this callback provides include:

    - Whether to only keep the model that has achieved the "best performance" so
      far, or whether to save the model at the end of every epoch regardless of
      performance.
    - Definition of 'best'; which quantity to monitor and whether it should be
      maximized or minimized.
    - The frequency it should save at. Currently, the callback supports saving
      at the end of every epoch, or after a fixed number of training batches.
    - Whether only weights are saved, or the whole model is saved.

    Note: If you get `WARNING:tensorflow:Can save best model only with <name>
    available, skipping` see the description of the `monitor` argument for
    details on how to get this right.

    Example:

    ```python
    model.compile(loss=..., optimizer=...,
                  metrics=['accuracy'])

    EPOCHS = 10
    checkpoint_filepath = '/tmp/checkpoint'
    model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=checkpoint_filepath,
        save_weights_only=True,
        monitor='val_accuracy',
        mode='max',
        save_best_only=True)

    # Model weights are saved at the end of every epoch, if it's the best seen
    # so far.
    model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])

    # The model weights (that are considered the best) are loaded into the
    # model.
    model.load_weights(checkpoint_filepath)
    ```

    Args:
        filepath: string or `PathLike`, path to save the model file. e.g.
          filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`
          can contain named formatting options, which will be filled the value
          of `epoch` and keys in `logs` (passed in `on_epoch_end`). For example:
          if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the
          model checkpoints will be saved with the epoch number and the
          validation loss in the filename. The directory of the filepath should
          not be reused by any other callbacks to avoid conflicts.
        monitor: The metric name to monitor. Typically the metrics are set by
          the `Model.compile` method. Note:

          * Prefix the name with `"val_`" to monitor validation metrics.
          * Use `"loss"` or "`val_loss`" to monitor the model's total loss.
          * If you specify metrics as strings, like `"accuracy"`, pass the same
            string (with or without the `"val_"` prefix).
          * If you pass `metrics.Metric` objects, `monitor` should be set to
            `metric.name`
          * If you're not sure about the metric names you can check the contents
            of the `history.history` dictionary returned by
            `history = model.fit()`
          * Multi-output models set additional prefixes on the metric names.

        verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1
          displays messages when the callback takes an action.
        save_best_only: if `save_best_only=True`, it only saves when the model
          is considered the "best" and the latest best model according to the
          quantity monitored will not be overwritten. If `filepath` doesn't
          contain formatting options like `{epoch}` then `filepath` will be
          overwritten by each new better model.
        mode: one of {'auto', 'min', 'max'}. If `save_best_only=True`, the
          decision to overwrite the current save file is made based on either
          the maximization or the minimization of the monitored quantity.
          For `val_acc`, this should be `max`, for `val_loss` this should be
          `min`, etc. In `auto` mode, the mode is set to `max` if the quantities
          monitored are 'acc' or start with 'fmeasure' and are set to `min` for
          the rest of the quantities.
        save_weights_only: if True, then only the model's weights will be saved
          (`model.save_weights(filepath)`), else the full model is saved
          (`model.save(filepath)`).
        save_freq: `'epoch'` or integer. When using `'epoch'`, the callback
          saves the model after each epoch. When using integer, the callback
          saves the model at end of this many batches. If the `Model` is
          compiled with `steps_per_execution=N`, then the saving criteria will
          be checked every Nth batch. Note that if the saving isn't aligned to
          epochs, the monitored metric may potentially be less reliable (it
          could reflect as little as 1 batch, since the metrics get reset every
          epoch). Defaults to `'epoch'`.
        options: Optional `tf.train.CheckpointOptions` object if
          `save_weights_only` is true or optional `tf.saved_model.SaveOptions`
          object if `save_weights_only` is false.
        initial_value_threshold: Floating point initial "best" value of the
          metric to be monitored. Only applies if `save_best_value=True`. Only
          overwrites the model weights already saved if the performance of
          current model is better than this value.
        **kwargs: Additional arguments for backwards compatibility. Possible key
          is `period`.
    val_lossr   Fautor   N)monitorr   save_best_onlysave_weights_onlyr   c
                    s
  t    d| _|| _|| _t|| _|| _|| _	|| _
d| _d| _d| _|	| _|r|d u slt|tjjr~|pxtj | _qtd| dn8|d u st|tjjr|ptj | _ntd| dd|
v r|
d | _td nd| _d	|
v r|
d	 | _td
 nd| _|dvr,td| d}|dkrTtj| _| jd u rtj| _n|dkr~tj| _| jd u rtj | _nVd| jv s| j drtj| _| jd u rtj | _ntj| _| jd u rtj| _| j
dkr t| j
t!s t"d| j
 dd| _#d S )NTr   zfIf save_weights_only is True, then `options` must be either None or a tf.train.CheckpointOptions. Got .zgIf save_weights_only is False, then `options` must be either None or a tf.saved_model.SaveOptions. Got load_weights_on_restartz`load_weights_on_restart` argument is deprecated. Please use `model.load_weights()` for loading weights before the start of `model.fit()`.Fperiodzk`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.r   r  minmaxz:ModelCheckpoint mode %s is unknown, fallback to auto mode.r  r
  r  accfmeasurer   zUnrecognized save_freq: z+. Expected save_freq are "epoch" or integer)$r   rq   rQ   r  r   r   path_to_stringfilepathr  r  	save_freqepochs_since_last_save_batches_seen_since_last_saving_last_batch_seenbestr   rA   trainCheckpointOptions_options	TypeErrorsaved_modelSaveOptionsr  r   r   r  r   less
monitor_opInfgreater
startswithintr}   r   )rm   r  r  r   r  r  r   r  optionsZinitial_value_thresholdkwargsr   r+   r,   rq   '  s    


	






zModelCheckpoint.__init__c              
   C   sv   | j rr| | j}|d urr| |rrz| j| W n< ttfyp } z td| d| W Y d }~n
d }~0 0 d S )NzError loading file from z
. Reason: )r  1_get_most_recently_modified_file_matching_patternr  _checkpoint_existsr%   load_weightsIOErrorr}   )rm   rK   filepath_to_loader+   r+   r,   r     s     zModelCheckpoint.on_train_beginc                 C   s
   | j dkS Nr   r  r   r+   r+   r,   rV     s    z-ModelCheckpoint._implements_train_batch_hooksc                 C   s    |  |r| j| j||d d S )Nr   r   rK   )_should_save_on_batch_save_model_current_epochr   r+   r+   r,   r     s    
z"ModelCheckpoint.on_train_batch_endc                 C   s
   || _ d S rY   r.  r   r+   r+   r,   r     s    zModelCheckpoint.on_epoch_beginc                 C   s,   |  j d7  _ | jdkr(| j|d |d d S )Nr   r   r+  )r  r  r-  r   r+   r+   r,   r     s    
zModelCheckpoint.on_epoch_endc                 C   sZ   | j dkrdS || jkr"|d }n
|| j }|  j|7  _|| _| j| j krVd| _dS dS )z?Handles batch-level saving logic, supports steps_per_execution.r   Fr   r   T)r  r  r  )rm   r   add_batchesr+   r+   r,   r,    s    



z%ModelCheckpoint._should_save_on_batchc                 C   s   |pi }t | jts"| j| jkrt|}d| _| |||}zH| jr(|	| j
}|du rptd| j
 n| || jr| jdkrtd|d  d| j
 d| jdd	|dd
| 
 || _| jr| jj|d| jd n| jj|d| jd n2| jdkrtd|d  d| j
 d| jd nX| jdkrNtd|d  d|  | jrl| jj|d| jd n| jj|d| jd |   W np ty   td| Y nP ty } z6dt|jd  v rtd| |W Y d}~n
d}~0 0 dS )a  Saves the model.

        Args:
            epoch: the epoch this iteration is in.
            batch: the batch this iteration is in. `None` if the `save_freq`
              is set to `epoch`.
            logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
        r   Nz5Can save best model only with %s available, skipping.
Epoch r   z: z improved from z.5fz to z, saving model to T)	overwriter!  z did not improve from z: saving model to zePlease specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: zis a directoryzfPlease specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: f)r   r  r   r  r  r   ru   _get_file_pathr  r   r  r   r   r  r  r   r   r   r  r%   save_weightsr  save_maybe_remove_fileIsADirectoryErrorr&  r   argslower)rm   r   r   rK   r  currentr(  r+   r+   r,   r-    s    	



zModelCheckpoint._save_modelc              
   C   s   zP|du sd|v r.| j jf d|d i|}n | j jf |d |d d|}W n: ty } z"td| j  d| W Y d}~n
d}~0 0 t|| jj| _| jS )z%Returns the file path for checkpoint.Nr   r   r   r   r   z*Failed to format this callback filepath: "z". Reason: )r  r   KeyErrorr   write_filepathr%   distribute_strategy_write_filepath)rm   r   r   rK   	file_pathr(  r+   r+   r,   r3  %  s$    

zModelCheckpoint._get_file_pathc                 C   s   t | j| jj d S rY   )r   remove_temp_dir_with_filepathr?  r%   r>  r   r+   r+   r,   r6  =  s    
z"ModelCheckpoint._maybe_remove_filec                 C   s@   | drtjj|S tjj|}tjj|d }|p>|S )z;Returns whether the checkpoint `filepath` refers to exists.z.h5z.index)endswithrA   iogfileexists)rm   r  tf_saved_model_exists!tf_weights_only_checkpoint_existsr+   r+   r,   r$  E  s    
z"ModelCheckpoint._checkpoint_existsc                 C   s   t j|}t j|}dtdd| d }tj|}|durZt	|t j|rZ|S d}d}d}d}	tj
j|rt |D ]d}
t	||
rt j||
}t j|}|	du s||	kr|}	||kr|}|}d}q||kr|d7 }q|dkr|S |	S dS )aW  Returns the most recently modified filepath matching pattern.

        Pattern may contain python formatting placeholder. If
        `tf.train.latest_checkpoint()` does not return None, use that;
        otherwise, check for most recently modified one that matches the
        pattern.

        In the rare case where there are more than one pattern-matching file
        having the same modified time that is most recent among all, return the
        filepath that is largest (by `>` operator, lexicographically using the
        numeric equivalents). This provides a tie-breaker when multiple files
        are most recent. Note that a larger `filepath` can sometimes indicate a
        later time of modification (for instance, when epoch/batch is used as
        formatting option), but not necessarily (when accuracy or loss is used).
        The tie-breaker is put in the logic as best effort to return the most
        recent, and to avoid undeterministic result.

        Modified time of a file is obtained with `os.path.getmtime()`.

        This utility function is best demonstrated via an example:

        ```python
        file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
        test_dir = self.get_temp_dir()
        path_pattern = os.path.join(test_dir, file_pattern)
        file_paths = [
            os.path.join(test_dir, file_name) for file_name in
            ['f.batch03epoch02.h5',
             'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
        ]
        for file_path in file_paths:
          # Write something to each of the files
        self.assertEqual(
            _get_most_recently_modified_file_matching_pattern(path_pattern),
            file_paths[-1])
        ```

        Args:
            pattern: The file pattern that may optionally contain python
                placeholder such as `{epoch:02d}`.

        Returns:
            The most recently modified file's full filepath matching `pattern`.
            If `pattern` does not contain any placeholder, this returns the
            filepath that exactly matches `pattern`. Returns `None` if no match
            is found.
        ^z{.*}z.*$Nr   r   )ospathdirnamebasenameresubrA   r  latest_checkpointmatchrC  rD  rE  listdirjoingetmtime)rm   patterndir_name	base_namebase_name_regexlatest_tf_checkpointlatest_mod_timefile_path_with_latest_mod_timen_file_with_latest_mod_time file_path_with_largest_file_name	file_namer@  mod_timer+   r+   r,   r#  O  s>    0
zAModelCheckpoint._get_most_recently_modified_file_matching_pattern)r  r   FFr  r   NN)N)N)N)N)r[   r   r   r   r   r   boolrq   r   rV   r   r   r   r,  r-  r3  r6  r$  r#  r   r+   r+   r   r,   r     s6   l        l



[
r   z keras.callbacks.BackupAndRestore)rC   c                       s\   e Zd ZdZd fdd	ZdddZdd	d
Zdd ZdddZdddZ	dddZ
  ZS )BackupAndRestorea  Callback to back up and restore the training state.

    `BackupAndRestore` callback is intended to recover training from an
    interruption that has happened in the middle of a `Model.fit` execution, by
    backing up the training states in a temporary checkpoint file (with the help
    of a `tf.train.CheckpointManager`), at the end of each epoch. Each backup
    overwrites the previously written checkpoint file, so at any given time
    there is at most one such checkpoint file for backup/restoring purpose.

    If training restarts before completion, the training state (which includes
    the `Model` weights and epoch number) is restored to the most recently saved
    state at the beginning of a new `Model.fit` run. At the completion of a
    `Model.fit` run, the temporary checkpoint file is deleted.

    Note that the user is responsible to bring jobs back after the interruption.
    This callback is important for the backup and restore mechanism for fault
    tolerance purpose, and the model to be restored from an previous checkpoint
    is expected to be the same as the one used to back up. If user changes
    arguments passed to compile or fit, the checkpoint saved for fault tolerance
    can become invalid.

    Note:

    1. This callback is not compatible with eager execution disabled.
    2. A checkpoint is saved at the end of each epoch. After restoring,
    `Model.fit` redoes any partial work during the unfinished epoch in which the
    training got restarted (so the work done before the interruption doesn't
    affect the final model state).
    3. This works for both single worker and multi-worker modes. When
    `Model.fit` is used with `tf.distribute`, it supports
    `tf.distribute.MirroredStrategy`,
    `tf.distribute.MultiWorkerMirroredStrategy`, `tf.distribute.TPUStrategy`,
    and `tf.distribute.experimental.ParameterServerStrategy`.

    Example:

    >>> class InterruptingCallback(tf.keras.callbacks.Callback):
    ...   def on_epoch_begin(self, epoch, logs=None):
    ...     if epoch == 4:
    ...       raise RuntimeError('Interrupting!')
    >>> callback = tf.keras.callbacks.BackupAndRestore(backup_dir="/tmp/backup")
    >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
    >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
    >>> try:
    ...   model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
    ...             batch_size=1, callbacks=[callback, InterruptingCallback()],
    ...             verbose=0)
    ... except:
    ...   pass
    >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
    ...                     epochs=10, batch_size=1, callbacks=[callback],
    ...                     verbose=0)
    >>> # Only 6 more epochs are run, since first trainning got interrupted at
    >>> # zero-indexed epoch 4, second training will continue from 4 to 9.
    >>> len(history.history['loss'])
    6

    Args:
        backup_dir: String, path to store the checkpoint.
          e.g. backup_dir = os.path.join(working_dir, 'backup')
          This is the directory in which the system stores temporary files to
          recover the model from jobs terminated unexpectedly. The directory
          cannot be reused elsewhere to store other files, e.g. by
          BackupAndRestore callback of another training, or by another callback
          (ModelCheckpoint) of the same training.
        save_freq: `'epoch'` or integer. When set to `'epoch'`
          the callback saves the checkpoint at the end of each epoch.
          When set to an integer, the callback saves the checkpoint every
          `save_freq` batches.
        delete_checkpoint: Boolean, default to True. This `BackupAndRestore`
          callback works by saving a checkpoint to back up the training state.
          If `delete_checkpoint=True`, the checkpoint will be deleted after
          training is finished. Use `False` if you'd like to keep the checkpoint
          for future usage.
    r   Tc                    s   t    || _d| _tjjtjjtjjj	tjj	tjjj
f| _|| _|| _d| _d| _t sxt rptdntdd| _d S )NTr   z\This Callback's method contains Python state and should be called outside of `tf.function`s.zBackupAndRestore only supports eager mode. In graph mode, consider using ModelCheckpoint to manually save and restore weights with `model.load_weights()` and by providing `initial_epoch` in `model.fit()` for fault tolerance.F)r   rq   
backup_dirrQ   rA   r   MirroredStrategyMultiWorkerMirroredStrategyexperimentalTPUStrategyParameterServerStrategy_supported_strategiesr  delete_checkpoint_batches_countr.  executing_eagerlyinside_functionr}   r   )rm   rb  r  ri  r   r+   r,   rq     s,    

zBackupAndRestore.__init__Nc                 C   s^   | j jr.t| j j| js.tt| j j dt| j | j	| j
| j _| j j| _| j  d S )Nz is not supported yet. Currently BackupAndRestore callback only supports empty strategy, MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy.)r%   _distribution_strategyr   r>  rh  NotImplementedErrortyper   WorkerTrainingStaterb  r  _training_staterestorer   r+   r+   r,   r   "  s    


zBackupAndRestore.on_train_beginc                 C   s@   | j dkr<|  jd7  _| j| j kr<d| _| jj| j|d d S )Nr   r   r   r;  )r  rj  rq  back_upr.  r   r+   r+   r,   r   5  s    
z#BackupAndRestore.on_train_batch_endc                 C   s
   | j dkS r)  r*  r   r+   r+   r,   rV   >  s    z.BackupAndRestore._implements_train_batch_hooksc                 C   s   | j r| j  | `| j`d S rY   )ri  rq  delete_backupr%   r   r+   r+   r,   r   A  s    
zBackupAndRestore.on_train_endc                 C   s
   || _ d S rY   r/  r   r+   r+   r,   r   J  s    zBackupAndRestore.on_epoch_beginc                 C   s   | j dkr| jj|d d S )Nr   r   )r  rq  rs  r   r+   r+   r,   r   M  s    
zBackupAndRestore.on_epoch_end)r   T)N)N)N)N)N)r[   r   r   r   rq   r   r   rV   r   r   r   r   r+   r+   r   r,   ra    s   L#

	
	
ra  z-keras.callbacks.experimental.BackupAndRestorec                       s    e Zd ZdZ fddZ  ZS )BackupAndRestoreExperimentala	  Deprecated. Please use `tf.keras.callbacks.BackupAndRestore` instead.

    Caution: `tf.keras.callbacks.experimental.BackupAndRestore` endpoint is
      deprecated and will be removed in a future release. Please use
      `tf.keras.callbacks.BackupAndRestore`.
    c                    s    t d t j|i | d S )Nz`tf.keras.callbacks.experimental.BackupAndRestore` endpoint is deprecated and will be removed in a future release. Please use `tf.keras.callbacks.BackupAndRestore`.)r   r   r   rq   )rm   r8  r"  r   r+   r,   rq   _  s    z%BackupAndRestoreExperimental.__init__r[   r   r   r   rq   r   r+   r+   r   r,   ru  S  s   ru  zkeras.callbacks.EarlyStoppingc                       sP   e Zd ZdZd fdd	Zdd	d
ZdddZdddZdd Zdd Z	  Z
S )EarlyStoppinga
  Stop training when a monitored metric has stopped improving.

    Assuming the goal of a training is to minimize the loss. With this, the
    metric to be monitored would be `'loss'`, and mode would be `'min'`. A
    `model.fit()` training loop will check at end of every epoch whether
    the loss is no longer decreasing, considering the `min_delta` and
    `patience` if applicable. Once it's found no longer decreasing,
    `model.stop_training` is marked True and the training terminates.

    The quantity to be monitored needs to be available in `logs` dict.
    To make it so, pass the loss or metrics at `model.compile()`.

    Args:
      monitor: Quantity to be monitored.
      min_delta: Minimum change in the monitored quantity
          to qualify as an improvement, i.e. an absolute
          change of less than min_delta, will count as no
          improvement.
      patience: Number of epochs with no improvement
          after which training will be stopped.
      verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1
          displays messages when the callback takes an action.
      mode: One of `{"auto", "min", "max"}`. In `min` mode,
          training will stop when the quantity
          monitored has stopped decreasing; in `"max"`
          mode it will stop when the quantity
          monitored has stopped increasing; in `"auto"`
          mode, the direction is automatically inferred
          from the name of the monitored quantity.
      baseline: Baseline value for the monitored quantity.
          Training will stop if the model doesn't show improvement over the
          baseline.
      restore_best_weights: Whether to restore model weights from
          the epoch with the best value of the monitored quantity.
          If False, the model weights obtained at the last step of
          training are used. An epoch will be restored regardless
          of the performance relative to the `baseline`. If no epoch
          improves on `baseline`, training will run for `patience`
          epochs and restore weights from the best epoch in that set.

    Example:

    >>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
    >>> # This callback will stop the training when there is no improvement in
    >>> # the loss for three consecutive epochs.
    >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
    >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
    >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
    ...                     epochs=10, batch_size=1, callbacks=[callback],
    ...                     verbose=0)
    >>> len(history.history['loss'])  # Only 4 epochs are run.
    4
    r  r   r  NFc                    s   t    || _|| _|| _|| _t|| _d| _d| _	|| _
d | _|dvr\td| d}|dkrntj| _nH|dkrtj| _n6| jds| jds| jd	rtj| _ntj| _| jtjkr|  jd
9  _n|  jd9  _d S )Nr   r	  z8EarlyStopping mode %s is unknown, fallback to auto mode.r  r
  r  r  accuracyaucr   )r   rq   r  patiencer   baselineabs	min_deltawaitstopped_epochrestore_best_weightsbest_weightsr   r   r   r  r  r  rB  )rm   r  r~  r{  r   r   r|  r  r   r+   r,   rq     s>    








zEarlyStopping.__init__c                 C   s8   d| _ d| _| jtjkrtjntj | _d | _d| _d S r   )	r  r  r  r   r  r  r  r  
best_epochr   r+   r+   r,   r     s
    zEarlyStopping.on_train_beginc                 C   s   |  |}|d u rd S | jr2| jd u r2| j | _|  jd7  _| || jr|| _|| _| jrl| j | _| j	d u s| || j	rd| _| j| j
kr|dkr|| _d| j_| jr| jd ur| jdkrtd| jd  d | j| j d S )Nr   r   Tz8Restoring model weights from the end of the best epoch: r  )get_monitor_valuer  r  r%   get_weightsr  _is_improvementr  r  r|  r{  r  r&   r   r   r   set_weights)rm   r   rK   r:  r+   r+   r,   r     s4    

zEarlyStopping.on_epoch_endc                 C   s0   | j dkr,| jdkr,td| j d  d d S )Nr   r   r   z: early stopping)r  r   r   r   r   r+   r+   r,   r     s    zEarlyStopping.on_train_endc              	   C   s>   |pi }| | j}|d u r:td| jdt|  |S )Nz[Early stopping conditioned on metric `%s` which is not available. Available metrics are: %s,)r   r  r   r   rS  r   keys)rm   rK   monitor_valuer+   r+   r,   r     s    zEarlyStopping.get_monitor_valuec                 C   s   |  || j |S rY   )r  r~  )rm   r  reference_valuer+   r+   r,   r    s    zEarlyStopping._is_improvement)r  r   r   r   r  NF)N)N)N)r[   r   r   r   rq   r   r   r   r  r  r   r+   r+   r   r,   rw  h  s   8       0

"
rw  zkeras.callbacks.RemoteMonitorc                       s,   e Zd ZdZd fdd	Zdd	d
Z  ZS )RemoteMonitora|  Callback used to stream events to a server.

    Requires the `requests` library.
    Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
    HTTP POST, with a `data` argument which is a
    JSON-encoded dictionary of event data.
    If `send_as_json=True`, the content type of the request will be
    `"application/json"`.
    Otherwise the serialized JSON will be sent within a form.

    Args:
      root: String; root url of the target server.
      path: String; path relative to `root` to which the events will be sent.
      field: String; JSON field under which the data will be stored.
          The field is used only if the payload is sent within a form
          (i.e. send_as_json is set to False).
      headers: Dictionary; optional custom HTTP headers.
      send_as_json: Boolean; whether the request should be
          sent as `"application/json"`.
    http://localhost:9000/publish/epoch/end/rD   NFc                    s,   t    || _|| _|| _|| _|| _d S rY   )r   rq   rootrK  fieldheaderssend_as_json)rm   r  rK  r  r  r  r   r+   r,   rq   '  s    
zRemoteMonitor.__init__c                 C   s   t d u rtd|pi }i }||d< | D ]0\}}t|tjtjfrT| ||< q,|||< q,zL| jrt j	| j
| j || jd n&t j	| j
| j | jt|i| jd W n* t jjy   tdt| j
  Y n0 d S )Nz.RemoteMonitor requires the `requests` library.r   )jsonr  )r  z6Warning: could not reach RemoteMonitor root server at )requestsImportErrorr   r   r   ndarraygenericitemr  postr  rK  r  r  r  dumps
exceptionsRequestExceptionr   r   r   )rm   r   rK   sendr   r   r+   r+   r,   r   7  s2    


zRemoteMonitor.on_epoch_end)r  r  rD   NF)N)r[   r   r   r   rq   r   r   r+   r+   r   r,   r    s        r  z%keras.callbacks.LearningRateSchedulerc                       s6   e Zd ZdZd
 fdd	ZdddZddd	Z  ZS )LearningRateSchedulera  Learning rate scheduler.

    At the beginning of every epoch, this callback gets the updated learning
    rate value from `schedule` function provided at `__init__`, with the current
    epoch and current learning rate, and applies the updated learning rate on
    the optimizer.

    Args:
      schedule: a function that takes an epoch index (integer, indexed from 0)
          and current learning rate (float) as inputs and returns a new
          learning rate as output (float).
      verbose: int. 0: quiet, 1: update messages.

    Example:

    >>> # This function keeps the initial learning rate for the first ten epochs
    >>> # and decreases it exponentially after that.
    >>> def scheduler(epoch, lr):
    ...   if epoch < 10:
    ...     return lr
    ...   else:
    ...     return lr * tf.math.exp(-0.1)
    >>>
    >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
    >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
    >>> round(model.optimizer.lr.numpy(), 5)
    0.01

    >>> callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
    >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
    ...                     epochs=15, callbacks=[callback], verbose=0)
    >>> round(model.optimizer.lr.numpy(), 5)
    0.00607

    r   c                    s   t    || _|| _d S rY   )r   rq   scheduler   )rm   r  r   r   r+   r,   rq   }  s    
zLearningRateScheduler.__init__Nc                 C   s   t | jjdstdz$tt| jjj}| ||}W n t	yV   | |}Y n0 t
|tjttjtjfs~td| t
|tjr|jjstd|j t| jjjt| | jdkrtd|d  d| d	 d S )
Nlrz%Optimizer must have a "lr" attribute.z<The output of the "schedule" function should be float. Got: z/The dtype of `lr` Tensor should be float. Got: r   r1  r   z1: LearningRateScheduler setting learning rate to r  )r@   r%   	optimizerr}   floatr   	get_valuer  r  r  r   rA   Tensorr   float32float64dtypeis_floating	set_valuer   r   r   )rm   r   rK   r  r+   r+   r,   r     s0    

z$LearningRateScheduler.on_epoch_beginc                 C   s    |pi }t | jjj|d< d S )Nr  )r   r  r%   r  r  r   r+   r+   r,   r     s    z"LearningRateScheduler.on_epoch_end)r   )N)N)r[   r   r   r   rq   r   r   r   r+   r+   r   r,   r  W  s   $
r  c           	   
   C   s   t jj }d|j_d|j_z| }W n2 tyZ } zt	
d| W Y d}~dS d}~0 0 t jj| d||gb\}}t d  t j|t jd}W d   n1 s0    Y  t jj||||dW  d   S 1 s0    Y  dS )	a  Writes a Keras model as JSON to as a Summary.

    Writing the Keras model configuration allows the TensorBoard graph plugin to
    render a conceptual graph, as opposed to graph of ops. In case the model
    fails to serialize as JSON, it ignores and returns False.

    Args:
      name: A name for this summary. The summary tag used for TensorBoard will
        be this name prefixed by any active name scopes.
      data: A Keras Model to write.
      step: Explicit `int64`-castable monotonic step value for this summary. If
        omitted, this defaults to `tf.summary.experimental.get_step()`, which
        must not be None.

    Returns:
      True on success, or False if no summary was written because no default
      summary writer was available.

    Raises:
      ValueError: if a default writer exists, but no step was provided and
        `tf.summary.experimental.get_step()` is None.
    graph_keras_model   1z1Model failed to serialize as JSON. Ignoring... %sNFzcpu:0)r  )tagtensorr   metadata)rA   rB   rC   SummaryMetadataplugin_dataplugin_namecontentto_json	Exceptionr   r   summaryre  summary_scopedeviceconstantstringwrite)	r   rD   r   summary_metadatajson_stringexcr  _r  r+   r+   r,   keras_model_summary  s&    
.r  zkeras.callbacks.TensorBoardc                	       s>  e Zd ZdZdJ fdd		Zd
d Zdd Zedd Zedd Z	dd Z
dd Zdd Zdd Zdd Zdd Zdd Zd d! Zd"d# ZdKd$d%ZdLd&d'ZdMd(d)ZdNd*d+Zd,d- ZdOd.d/ZdPd0d1ZdQd2d3ZdRd4d5Zd6d7 ZdSd8d9Zd:d; Zd<d= Zd>d? Z d@dA Z!dBdC Z"dDdE Z#dFdG Z$dTdHdIZ%  Z&S )UTensorBoarda5  Enable visualizations for TensorBoard.

    TensorBoard is a visualization tool provided with TensorFlow.

    This callback logs events for TensorBoard, including:

    * Metrics summary plots
    * Training graph visualization
    * Weight histograms
    * Sampled profiling

    When used in `Model.evaluate`, in addition to epoch summaries, there will be
    a summary that records evaluation metrics vs `Model.optimizer.iterations`
    written. The metric names will be prepended with `evaluation`, with
    `Model.optimizer.iterations` being the step in the visualized TensorBoard.

    If you have installed TensorFlow with pip, you should be able
    to launch TensorBoard from the command line:

    ```
    tensorboard --logdir=path_to_your_logs
    ```

    You can find more information about TensorBoard
    [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).

    Args:
        log_dir: the path of the directory where to save the log files to be
          parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir,
          'logs') This directory should not be reused by any other callbacks.
        histogram_freq: frequency (in epochs) at which to compute
          weight histograms for the layers of the model. If set to 0, histograms
          won't be computed. Validation data (or split) must be specified for
          histogram visualizations.
        write_graph: whether to visualize the graph in TensorBoard. The log file
          can become quite large when write_graph is set to True.
        write_images: whether to write model weights to visualize as image in
          TensorBoard.
        write_steps_per_second: whether to log the training steps per second
          into Tensorboard. This supports both epoch and batch frequency
          logging.
        update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
          writes the losses and metrics to TensorBoard after each batch. The
          same applies for `'epoch'`. If using an integer, let's say `1000`, the
          callback will write the metrics and losses to TensorBoard every 1000
          batches. Note that writing too frequently to TensorBoard can slow down
          your training.
        profile_batch: Profile the batch(es) to sample compute characteristics.
          profile_batch must be a non-negative integer or a tuple of integers.
          A pair of positive integers signify a range of batches to profile.
          By default, profiling is disabled.
        embeddings_freq: frequency (in epochs) at which embedding layers will be
          visualized. If set to 0, embeddings won't be visualized.
        embeddings_metadata: Dictionary which maps embedding layer names to the
          filename of a file in which to save metadata for the embedding layer.
          In case the same metadata file is to be
          used for all embedding layers, a single filename can be passed.

    Examples:

    Basic usage:

    ```python
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
    model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
    # Then run the tensorboard command to view the visualizations.
    ```

    Custom batch-level summaries in a subclassed Model:

    ```python
    class MyModel(tf.keras.Model):

      def build(self, _):
        self.dense = tf.keras.layers.Dense(10)

      def call(self, x):
        outputs = self.dense(x)
        tf.summary.histogram('outputs', outputs)
        return outputs

    model = MyModel()
    model.compile('sgd', 'mse')

    # Make sure to set `update_freq=N` to log a batch-level summary every N
    # batches.  In addition to any `tf.summary` contained in `Model.call`,
    # metrics added in `Model.compile` will be logged every N batches.
    tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
    model.fit(x_train, y_train, callbacks=[tb_callback])
    ```

    Custom batch-level summaries in a Functional API Model:

    ```python
    def my_summary(x):
      tf.summary.histogram('x', x)
      return x

    inputs = tf.keras.Input(10)
    x = tf.keras.layers.Dense(10)(inputs)
    outputs = tf.keras.layers.Lambda(my_summary)(x)
    model = tf.keras.Model(inputs, outputs)
    model.compile('sgd', 'mse')

    # Make sure to set `update_freq=N` to log a batch-level summary every N
    # batches. In addition to any `tf.summary` contained in `Model.call`,
    # metrics added in `Model.compile` will be logged every N batches.
    tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
    model.fit(x_train, y_train, callbacks=[tb_callback])
    ```

    Profiling:

    ```python
    # Profile a single batch, e.g. the 5th batch.
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir='./logs', profile_batch=5)
    model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])

    # Profile a range of batches, e.g. from 10 to 20.
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir='./logs', profile_batch=(10,20))
    model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
    ```
    rK   r   TFr   Nc
                    s   t    d| _| |
 t|| _|| _|| _|| _	|| _
|dkrJdn|| _|| _|	| _| | d| _d| _d| _d| _i | _g | _d S )NTr   r   r   )r   rq   rQ   _validate_kwargsr   r  log_dirhistogram_freqwrite_graphwrite_imageswrite_steps_per_secondupdate_freqembeddings_freqembeddings_metadata_init_profile_batch_global_train_batch_previous_epoch_iterations_train_accumulated_timerk   _writers_prev_summary_state)rm   r  r  r  r  r  r  profile_batchr  r  r"  r   r+   r,   rq   Q	  s$    


zTensorBoard.__init__c                 C   s   | ddrtd | ddr,td | ddrBtd | ddrXtd	 h d
}t| | }|rtd| d| dS )z&Handle arguments were supported in V1.write_gradsFzO`write_grads` will be ignored in TensorFlow 2.0 for the `TensorBoard` Callback.r   ze`batch_size` is no longer needed in the `TensorBoard` Callback and will be ignored in TensorFlow 2.0.embeddings_layer_nameszp`embeddings_layer_names` is not supported in TensorFlow 2.0. Instead, all `Embedding` layers will be visualized.embeddings_datazl`embeddings_data` is not supported in TensorFlow 2.0. Instead, all `Embedding` variables will be visualized.>   r  r  r  r   z2Unrecognized arguments in `TensorBoard` Callback: z. Supported kwargs are: N)r   r   r   r   r  r}   )rm   r"  supported_kwargsunrecognized_kwargsr+   r+   r,   r  w	  s4    zTensorBoard._validate_kwargsc                 C   sz   || _ |  | _tj| jd| _| j j| _tj| jd| _	| j j
| _i | _d| _| jrh|   d| _| jrv|   dS )z/Sets Keras model and writes graph if specified.r  
validationFTN)r%   _get_log_write_dir_log_write_dirrJ  rK  rS  
_train_dirr   r   _val_dirr   	_val_stepr  _should_write_train_graphr  _write_keras_model_summaryr  _configure_embeddingsr   r+   r+   r,   r#   	  s    


zTensorBoard.set_modelc                 C   s(   d| j vrtj| j| j d< | j d S )Nr  )r  rA   r  create_file_writerr  r   r+   r+   r,   _train_writer	  s
    

zTensorBoard._train_writerc                 C   s(   d| j vrtj| j| j d< | j d S )Nval)r  rA   r  r  r  r   r+   r+   r,   _val_writer	  s    
zTensorBoard._val_writerc                 C   s   t | j| jjS )zBFor multi-worker, only chief should write, others write to '/tmp'.)r   write_dirpathr  r%   r>  r   r+   r+   r,   r  	  s    
zTensorBoard._get_log_write_dirc                 C   s   t | j| jj dS )z/Deletes tmp write directories for multi-worker.N)r   remove_temp_dirpathr  r%   r>  r   r+   r+   r,   _delete_tmp_write_dir	  s    
z!TensorBoard._delete_tmp_write_dirc              	   C   s|   | j  ^ tjd2 | jj}t|dr<tj|j	j W d   n1 sP0    Y  W d   n1 sn0    Y  dS )z7Writes Keras model train_function graph to TensorBoard.Tfunction_specN)
r  
as_defaultrA   r  	record_ifr%   train_tf_functionr@   graph_concrete_stateful_fn)rm   train_fnr+   r+   r,   _write_keras_model_train_graph	  s
    
z*TensorBoard._write_keras_model_train_graphc              	   C   s   | j  f tjd: | jjp.| jjjdk}|rDt	d| jdd W d   n1 sX0    Y  W d   n1 sv0    Y  dS )z2Writes Keras graph network summary to TensorBoard.T
Sequentialkerasr   r   N)
r  r  rA   r  r  r%   _is_graph_networkrZ   r[   r  )rm   summary_writabler+   r+   r,   r  	  s    z&TensorBoard._write_keras_model_summaryc                 C   s  ddl m} ddlm} ddlm} | }| jjD ]d}t	||j
r4|j }d}||_| jdur4t	| jtrx| j|_q4|j| j v r4| j|j|_q4| jrt	| jtstd| j  ||}tj| jd}	tjj|	d	}
|
| W d   n1 s0    Y  dS )
z'Configure the Projector for embeddings.r   )core)projector_config_pb2)text_formatz:layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUENzmUnrecognized `Embedding` layer names passed to `keras.callbacks.TensorBoard` `embeddings_metadata` argument: zprojector_config.pbtxtw)keras.layersr  keras.protobufr  google.protobufr  ProjectorConfigr%   layersr   	Embedding
embeddingsaddtensor_namer  r   metadata_pathr   r  r   r}   MessageToStringrJ  rK  rS  r  rA   rC  rD  GFiler  )rm   r  r  r  configlayer	embeddingr   config_pbtxtrK  fr+   r+   r,   r  	  s8    



z!TensorBoard._configure_embeddingsc                    s^    j dkrdS  fdd}| tj|f} j| |d   |d   dS )z9Sets the default writer for custom batch-level summaries.r   Nc                      s   t  j dS r   )rA   equalr  r+   rm   r   r+   r,   <lambda>
  r3   z*TensorBoard._push_writer.<locals>.<lambda>r   r   )	r  r  valuerA   r  r  r  r    	__enter__)rm   writerr   should_recordsummary_contextr+   r	  r,   _push_writer
  s    

zTensorBoard._push_writerc                 C   s@   | j dkrdS | j }|d jt   |d jt   dS )zPops the current writer.r   Nr   r   )r  r  r   __exit__sysexc_info)rm   previous_contextr+   r+   r,   _pop_writer
  s
    

zTensorBoard._pop_writerc                 C   s   | j  D ]}|  q
d S rY   )r  valuesclose)rm   r  r+   r+   r,   _close_writers)
  s    zTensorBoard._close_writersc                 C   s   d| }t |tr0t|d}tjt|}t |trH|| _|| _n0t |t	t
frpt|dkrp|\| _| _nt|| jdk s| j| jk rt|d| _| jdkr| jdd | jdd d| _| jdko| jdk | _d	S )
a  Validate profile_batch value and set the range of batches to profile.

        Sets values of _start_batch and _stop_batch attributes,
        specifying the start and stop batch to profile.
        Setting `profile_batch=0` disables profiling.

        Args:
          profile_batch: The range of batches to profile. Should be a
            non-negative integer or a comma separated string of pair of positive
            integers. A pair of positive integers signify a range of batches to
            profile.

        Raises:
          ValueError: If profile_batch is not an integer or a comma separated
            pair of positive integers.

        zprofile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: r     r   FrG   logdirr5  N)r   r   splitrA   r^   map_structurer   _start_batch_stop_batchtupler   r   r}   _profiler_started_start_profiler_stop_profiler_is_tracing_should_trace)rm   r  profile_batch_error_messager+   r+   r,   r  -
  s2    	



zTensorBoard._init_profile_batchc                 C   s    d| _ d| _| | j| j d S r   )r  r  r  r  r   r   r+   r+   r,   r   i
  s    zTensorBoard.on_train_beginc                 C   s*   |    | jr|   |   |   d S rY   )r  r%  _stop_tracer  r  r   r+   r+   r,   r   n
  s
    zTensorBoard.on_train_endc                 C   s   |  | j| j d S rY   )r  r  r  r   r+   r+   r,   r   w
  s    zTensorBoard.on_test_beginc              	   C   s   | j jrt| j jdrtjdp | j F | D ],\}}tjj	d| d || j jj
 d q8W d    n1 sz0    Y  W d    n1 s0    Y  |   d S )N
iterationsTevaluation__vs_iterationsr  )r%   r  r@   rA   r  r  r  r  r   scalarr)  
read_valuer  )rm   rK   r   r  r+   r+   r,   r   z
  s    
DzTensorBoard.on_test_endc                 C   s   | j p
| jS rY   )r&  r  r   r+   r+   r,   rV   
  s    z)TensorBoard._implements_train_batch_hooksc                 C   s@   |  j d7  _ | jrt | _| js(d S | j | jkr<|   d S )Nr   )r  r  r   rk   r&  r  _start_tracer   r+   r+   r,   r   
  s    
z TensorBoard.on_train_batch_beginc                 C   sh   | j r|   d| _ | jr@t | j }tjjdd| | jd | j	sJd S | j
rd| j| jkrd|   d S )NFbatch_steps_per_secondr   r  )r  r  r  r   rk   rA   r  r,  r   r&  r%  r  r   r(  )rm   r   rK   batch_run_timer+   r+   r,   r   
  s    zTensorBoard.on_train_batch_endc                 C   s$   | j r | jjj | _t | _d S rY   )r  r%   r  r)  r   r  r   _epoch_start_timer   r+   r+   r,   r   
  s    zTensorBoard.on_epoch_beginc                 C   sL   |  || | jr*|| j dkr*| | | jrH|| j dkrH| | dS )z2Runs metrics and histogram summaries at epoch end.r   N)_log_epoch_metricsr  _log_weightsr  _log_embeddingsr   r+   r+   r,   r   
  s
    
zTensorBoard.on_epoch_endc                 C   s(   t jjddd | j| jd d| _d S )NTF)r  profilerr  )rA   r  trace_onr#  r  r%  r   r+   r+   r,   r.  
  s    zTensorBoard._start_tracec              	   C   s   |du r| j }| j P tjd$ tjjd| |d W d   n1 sP0    Y  W d   n1 sn0    Y  |   d| _dS )z$Logs the trace graph to TensorBoard.NTzbatch_%d)r   r   F)	r   r  r  rA   r  r  trace_exportr$  r%  )rm   r   r+   r+   r,   r(  
  s    PzTensorBoard._stop_tracec                 C   s2   t | jjdd }t|tjr.|| jjj|d< |S )Nr  learning_rate)rR   r%   r  r   r   LearningRateScheduler)  )rm   rK   lr_scheduler+   r+   r,   _collect_learning_rate
  s    z"TensorBoard._collect_learning_ratec                 C   s.   | j jj }t | j }|| j | }|S rY   )r%   r  r)  r   r   r1  r  )rm   current_iterationZtime_since_epoch_beginsteps_per_secondr+   r+   r,   _compute_steps_per_second
  s    z%TensorBoard._compute_steps_per_secondc              	   C   s6  |sdS dd |  D }dd |  D }| |}| jrH|  |d< tjd |r| j 8 |  D ]\}}tjj	d| ||d qnW d   n1 s0    Y  |r| j
 D |  D ]*\}}|d	d }tjj	d| ||d qW d   n1 s0    Y  W d   n1 s(0    Y  dS )
zWrites epoch metrics out as scalar summaries.

        Args:
            epoch: Int. The global step to use for TensorBoard.
            logs: Dict. Keys are scalar summary names, values are scalars.
        Nc                 S   s    i | ]\}}| d s||qS r.   r  r0   r   r   r+   r+   r,   
<dictcomp>
  r3   z2TensorBoard._log_epoch_metrics.<locals>.<dictcomp>c                 S   s    i | ]\}}| d r||qS r.   r?  r@  r+   r+   r,   rA  
  r3   r=  Tepoch_r     )r   r;  r  r>  rA   r  r  r  r  r,  r  )rm   r   rK   
train_logsval_logsr   r  r+   r+   r,   r2  
  s"    
6zTensorBoard._log_epoch_metricsc              	   C   s   | j   tjdx | jjD ]T}|jD ]H}|j	dd}|d }tjj
|||d | jr,|d }| ||| q,q"| j   W d   n1 s0    Y  W d   n1 s0    Y  dS )z-Logs the weights of the Model to TensorBoard.T:r  z
/histogramr  z/imageN)r  r  rA   r  r  r%   r  weightsr   replace	histogramr  _log_weight_as_imageflush)rm   r   r  weightweight_nameZhistogram_weight_nameZimage_weight_namer+   r+   r,   r3  
  s    
zTensorBoard._log_weightsc                 C   s  t |}t|}t|dkr:t |d|d ddg}nt|dkr|d |d krjt |}t|}t |d|d |d dg}nTt|dkrt dkrt j|g dd}t|}t ||d |d |d dg}t|}t|dkr|d	 d
v rt jj	|||d dS )z%Logs a weight as a TensorBoard image.r   r   r     channels_last)r  r   r   )permrC  rz  )r   rN  rC  r  N)
rA   squeezer   	int_shaper   reshape	transposeimage_data_formatr  image)rm   rL  rM  r   w_imgshaper+   r+   r,   rJ    s"    




 
z TensorBoard._log_weight_as_imagec                 C   s(   t j| jdd|}| j| d S )Nr  zkeras_embedding.ckpt-{})rJ  rK  rS  r  r   r%   r4  )rm   r   embeddings_ckptr+   r+   r,   r4    s    zTensorBoard._log_embeddingsc              
   C   s`   | j r
dS ztjjj|d d| _ W n6 tjjyZ } ztd|j	 W Y d}~n
d}~0 0 dS )zStarts the profiler if currently inactive.

        Args:
          logdir: Directory where profiler results will be saved.
        Nr  TzFailed to start profiler: %s)
r"  rA   r5  re  starterrorsAlreadyExistsErrorr   errormessage)rm   r  r(  r+   r+   r,   r#  %  s    
zTensorBoard._start_profilerc              
   C   sn   | j s
dS zVztjjj|d W n6 tjjyV } ztd|j	 W Y d}~n
d}~0 0 W d| _ nd| _ 0 dS )zStops the profiler if currently active.

        Args:
          save: Whether to save the profiler results to TensorBoard.
        Nr  zFailed to stop profiler: %sF)
r"  rA   r5  re  stopr[  UnavailableErrorr   r]  r^  )rm   r5  r(  r+   r+   r,   r$  4  s    &zTensorBoard._stop_profiler)	rK   r   TFFr   r   r   N)N)N)N)N)N)N)N)N)N)T)'r[   r   r   r   rq   r  r#   propertyr  r  r  r  r  r  r  r  r  r  r  r   r   r   r   rV   r   r   r   r   r.  r(  r;  r>  r2  r3  rJ  r4  r#  r$  r   r+   r+   r   r,   r    s\             &*


+<

	








r  z!keras.callbacks.ReduceLROnPlateauc                       sF   e Zd ZdZd fdd		Zd
d ZdddZdddZdd Z  Z	S )ReduceLROnPlateaua  Reduce learning rate when a metric has stopped improving.

    Models often benefit from reducing the learning rate by a factor
    of 2-10 once learning stagnates. This callback monitors a
    quantity and if no improvement is seen for a 'patience' number
    of epochs, the learning rate is reduced.

    Example:

    ```python
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                                  patience=5, min_lr=0.001)
    model.fit(X_train, Y_train, callbacks=[reduce_lr])
    ```

    Args:
        monitor: quantity to be monitored.
        factor: factor by which the learning rate will be reduced.
          `new_lr = lr * factor`.
        patience: number of epochs with no improvement after which learning rate
          will be reduced.
        verbose: int. 0: quiet, 1: update messages.
        mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode,
          the learning rate will be reduced when the
          quantity monitored has stopped decreasing; in `'max'` mode it will be
          reduced when the quantity monitored has stopped increasing; in
          `'auto'` mode, the direction is automatically inferred from the name
          of the monitored quantity.
        min_delta: threshold for measuring the new optimum, to only focus on
          significant changes.
        cooldown: number of epochs to wait before resuming normal operation
          after lr has been reduced.
        min_lr: lower bound on the learning rate.
    r  皙?
   r   r  -C6?c	           
         s   t    || _|dkr&td| d|	v rB|	d}td || _|| _|| _	|| _
|| _|| _d| _d| _d| _|| _d | _|   d S )Nr   z8ReduceLROnPlateau does not support a factor >= 1.0. Got epsilonzN`epsilon` argument is deprecated and will be removed, use `min_delta` instead.r   )r   rq   r  r}   r   r   r   factormin_lrr~  r{  r   cooldowncooldown_counterr  r  r   r  _reset)
rm   r  rg  r{  r   r   r~  ri  rh  r"  r   r+   r,   rq   j  s2    

zReduceLROnPlateau.__init__c                    s|    j dvrtd j  d _  j dks< j dkrTd jvrT fdd _tj _n fdd _tj  _d	 _d	 _	d
S )z)Resets wait counter and cooldown counter.r	  zBLearning rate reduction mode %s is unknown, fallback to auto mode.r  r
  r  c                    s   t | | j S rY   )r   r  r~  abr   r+   r,   r
    r3   z*ReduceLROnPlateau._reset.<locals>.<lambda>c                    s   t | | j S rY   )r   r  r~  rl  r   r+   r,   r
    r3   r   N)
r   r   r   r  r  r   r  r  rj  r  r   r+   r   r,   rk    s"    



zReduceLROnPlateau._resetNc                 C   s   |    d S rY   )rk  r   r+   r+   r,   r     s    z ReduceLROnPlateau.on_train_beginc              	   C   s2  |pi }t | jjj|d< || j}|d u rPtd| jd	t
|  n|  rl|  jd8  _d| _| || jr|| _d| _n|  s.|  jd7  _| j| jkr.t | jjj}|t| jkr.|| j }t|| j}t | jjj| | jdkr td|d  d| d | j| _d| _d S )	Nr  zgLearning rate reduction is conditioned on metric `%s` which is not available. Available metrics are: %sr  r   r   r1  z.: ReduceLROnPlateau reducing learning rate to r  )r   r  r%   r  r  r   r  r   r   rS  r   r  in_cooldownrj  r  r  r  r{  r   r  rh  rg  r  r  r   r   r   ri  )rm   r   rK   r:  old_lrnew_lrr+   r+   r,   r     s>    

zReduceLROnPlateau.on_epoch_endc                 C   s
   | j dkS r   )rj  r   r+   r+   r,   ro    s    zReduceLROnPlateau.in_cooldown)r  rc  rd  r   r  re  r   r   )N)N)
r[   r   r   r   rq   rk  r   r   ro  r   r+   r+   r   r,   rb  E  s   %        '

%rb  zkeras.callbacks.CSVLoggerc                       s@   e Zd ZdZd fdd	ZdddZdd	d
ZdddZ  ZS )	CSVLoggeraC  Callback that streams epoch results to a CSV file.

    Supports all values that can be represented as a string,
    including 1D iterables such as `np.ndarray`.

    Example:

    ```python
    csv_logger = CSVLogger('training.log')
    model.fit(X_train, Y_train, callbacks=[csv_logger])
    ```

    Args:
        filename: Filename of the CSV file, e.g. `'run/log.csv'`.
        separator: String used to separate elements in the CSV file.
        append: Boolean. True: append if file exists (useful for continuing
            training). False: overwrite existing file.
    r  Fc                    s8   || _ t|| _|| _d | _d | _d| _t 	  d S r   )
sepr   r  filenamer    r  r  append_headerr   rq   )rm   rt  	separatorr    r   r+   r,   rq     s    zCSVLogger.__init__Nc                 C   s~   | j rbtjj| jr\tjj| jd$}tt|	  | _
W d    n1 sR0    Y  d}nd}tjj| j|| _d S )Nrrm  r  )r    rA   rC  rD  rE  rt  r  r`  r   readlineru  csv_file)rm   rK   r  r   r+   r+   r,   r     s    2zCSVLogger.on_train_beginc                    s   pi dd  j d u r(t  _ jjrHtfddj D jsG fdddtj}dgj  }tjj	||d_j
rj  td|i}| fd	dj D  j| j	  d S )
Nc                 S   sR   t | tjo| jdk}t | tr$| S t | tjjrJ|sJddt	t|  S | S d S )Nr   z"[%s]"z, )
r   r   r  ndimr   collectionsabcIterablerS  map)r   is_zero_dim_ndarrayr+   r+   r,   handle_value  s    
z,CSVLogger.on_epoch_end.<locals>.handle_valuec                 3   s*   | ]"}| v r| | fn|d fV  qdS )NANr+   )r0   r   r   r+   r,   rU     s   z)CSVLogger.on_epoch_end.<locals>.<genexpr>c                       s   e Zd Z jZdS )z-CSVLogger.on_epoch_end.<locals>.CustomDialectN)r[   r   r   rs  	delimiterr+   r   r+   r,   CustomDialect  s   r  r   )
fieldnamesdialectc                 3   s   | ]}| | fV  qd S rY   r+   )r0   key)r  rK   r+   r,   rU     r3   )r  sortedr%   r&   dictr  csvexcel
DictWriterry  ru  writeheaderr{  OrderedDictr   writerowrK  )rm   r   rK   r  r  row_dictr+   )r  rK   rm   r,   r     s(    

zCSVLogger.on_epoch_endc                 C   s   | j   d | _d S rY   )ry  r  r  r   r+   r+   r,   r   #  s    
zCSVLogger.on_train_end)r  F)N)N)N)	r[   r   r   r   rq   r   r   r   r   r+   r+   r   r,   rr    s
   	


*rr  zkeras.callbacks.LambdaCallbackc                       s"   e Zd ZdZd fdd	Z  ZS )LambdaCallbacka  Callback for creating simple, custom callbacks on-the-fly.

    This callback is constructed with anonymous functions that will be called
    at the appropriate time (during `Model.{fit | evaluate | predict}`).
    Note that the callbacks expects positional arguments, as:

    - `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
      `epoch`, `logs`
    - `on_batch_begin` and `on_batch_end` expect two positional arguments:
      `batch`, `logs`
    - `on_train_begin` and `on_train_end` expect one positional argument:
      `logs`

    Args:
        on_epoch_begin: called at the beginning of every epoch.
        on_epoch_end: called at the end of every epoch.
        on_batch_begin: called at the beginning of every batch.
        on_batch_end: called at the end of every batch.
        on_train_begin: called at the beginning of model training.
        on_train_end: called at the end of model training.

    Example:

    ```python
    # Print the batch number at the beginning of every batch.
    batch_print_callback = LambdaCallback(
        on_batch_begin=lambda batch,logs: print(batch))

    # Stream the epoch loss to a file in JSON format. The file content
    # is not well-formed JSON but rather has a JSON object per line.
    import json
    json_log = open('loss_log.json', mode='wt', buffering=1)
    json_logging_callback = LambdaCallback(
        on_epoch_end=lambda epoch, logs: json_log.write(
            json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
        on_train_end=lambda logs: json_log.close()
    )

    # Terminate some processes after having finished model training.
    processes = ...
    cleanup_callback = LambdaCallback(
        on_train_end=lambda logs: [
            p.terminate() for p in processes if p.is_alive()])

    model.fit(...,
              callbacks=[batch_print_callback,
                         json_logging_callback,
                         cleanup_callback])
    ```
    Nc                    sn   t    | j| |d ur$|| _|d ur2|| _|d ur@|| _|d urN|| _|d ur\|| _|d urj|| _	d S rY   )
r   rq   __dict__r   r   r   r   r   r   r   )rm   r   r   r   r   r   r   r"  r   r+   r,   rq   ]  s    

zLambdaCallback.__init__)NNNNNNrv  r+   r+   r   r,   r  (  s   5      r  )rG   )N)Dr   r{  r8   r  r  rJ  rN  r  r   r   r   tensorflow.compat.v2rB   v2rA   r  r   Zkeras.distributer   r   keras.optimizers.schedulesr   keras.utilsr   r   r   r	   keras.utils.data_utilsr
   keras.utils.generic_utilsr   keras.utils.mode_keysr   tensorflow.python.platformr   r   tensorflow.python.utilr    tensorflow.python.util.tf_exportr   tensorflow.tools.docsr   r  r  r   r-   r$   rF   rO   r   r   r   r   r!   r   r   ra  deprecated_endpointsru  rw  r  r  r  TensorBoardVersionSelectorr  rb  rr  r  r+   r+   r+   r,   <module>   s   

D
2
   "  <1 2'   w
 "
 (FG
1
    x V