a
    Sic|                     @   s
  d Z ddlZddlm  mZ ddlmZ ddlm	Z
 ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ dd Zd"ddZdd Z d#ddZ!d$ddZ"d%ddZ#G dd dej$Z%dd Z&G d d! d!ej$Z'dS )&zBPart of the Keras training engine related to distributed training.    N)backend)	callbacks)distribute_coordinator_utils)distributed_training_utils_v1)partial_batch_padding_handler)training_arrays_v1)training_utils_v1)Progbar)ModeKeys)	input_lib)
tf_loggingc                 C   s   |  |}|j|j|j|jfS N)_make_execution_functioninputsoutputs
updates_opsession_kwargs)modelmode	exec_func r   `/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/engine/training_distributed_v1.py_per_replica_execution_function#   s    
r   c                 C   s2   |j rtj|| |||d nt|| ||| d S )N)r   targets)_compile_distribution
dist_utilsclone_model_on_replicas_build_distributed_network)strategyr   r   r   r   r   r   r   _build_model-   s    

r   c                    s    fdd}|S )aA  Create step fn.

    Args:
      model: a Keras Model instance.
      mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
      strategy: a `tf.distribute.Strategy` instance.
      output_labels: the output labels for the step function.

    Returns:
      A step function to run by `tf.distribute.Strategy`.
    c                    s  t  ttfr$t dkr$ \ }nd}t  trF fddjD  t | jjt	t
fd\}}}}t
||||\}}}	}
tj||f|	dt d d|
}t|jD ]4\}}|d	krtjjj}n
tjjj}| ||| q|jS )
z"A step fn that returns update ops.   Nc                    s   g | ]} | qS r   r   ).0
input_namer   r   r   
<listcomp>S   s   z9_make_train_step_fn.<locals>._step_fn.<locals>.<listcomp>argsdistributed_	_function)updatesnameloss)
isinstancetuplelistlendict_feed_input_namesr   extendedcall_for_each_replicar   r   get_distributed_modelunwrap_valuesr   functionstrzipr   tf
distributeReduceOpSUMMEANset_last_step_outputr   )ctxr   r   grouped_inputsgrouped_outputsgrouped_updatesgrouped_session_args
all_inputsall_outputsall_updatesall_session_argscombined_fnlabeloutput	reduce_opr   r   output_labelsr   r#   r   _step_fnE   sX    




z%_make_train_step_fn.<locals>._step_fnr   )r   r   r   rM   rN   r   rL   r   _make_train_step_fn8   s    ?rO   d      c
           '   
   C   s4  t j}
| j}t||jj}tj|ddd}t	||}tj
|dd}|  | jpTg }t| t j||}i }td|d< |  D ]"}| }t|j|j||j< q|jj||||d}|j}|j}t|}| jrt| |
 tj|| ||||d	|
d
}|jjg||jj  }||jj r.|||jj  t|}| |
 | !||
}t"||D ]}t#|  |$| i }d}d}d}||k r^|| } |d| d}!|%|
d||! |du s| |krt& '|(|  | }zt)||g\}"}#W n0 tj*j+y    t,-d| |  Y q^Y n0 |!.|# |%|
d||! ||  }|d7 }|j/j0rq^q|rt12|	|rt,3d| | jrt4| t j t5| ||||d}$t6|$t7s|$g}$t8||$D ]\}%}&|&|d|% < q|9|| |j/j0rV qqVd| _:|;|
 | jr t4| t j |<ddd | j=S )a  Fit loop for training with TPU tf.distribute.Strategy.

    Args:
        model: Keras Model instance.
        dataset: Dataset that returns inputs and targets
        epochs: Number of times to iterate over the data
        verbose: Integer, Verbosity mode, 0, 1 or 2
        callbacks: List of callbacks to be called during training
        initial_epoch: Epoch at which to start training
            (useful for resuming a previous training run)
        steps_per_epoch: Total number of steps (batches of samples)
            before declaring one epoch finished and starting the
            next epoch. Ignored with the default value of `None`.
        val_dataset: Dataset for validation data.
        validation_steps: Number of steps to run validation for
            (only if doing validation from data tensors).
            Ignored with the default value of `None`.
        validation_freq: Only relevant if validation data is provided. Integer
            or `collections.abc.Container` instance (e.g. list, tuple, etc.). If
            an integer, specifies how many training epochs to run before a new
            validation run is performed, e.g. `validation_freq=2` runs
            validation every 2 epochs. If a Container, specifies the epochs on
            which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
            validation at the end of the 1st, 2nd, and 10th epochs.

    Returns:
        Returns `None`.

    Raises:
        ValueError: in case of invalid arguments.
    int32steps_per_run)valuedtyper*   rQ   r   learning_phaseg    cAr+   )
iterationsinitial_loop_valuesstepsdo_validationepochssteps_per_epochverbose
count_moder   r   N)batchsize	num_stepsbeginzYour dataset iterator ran out of data; interrupting training. Make sure that your dataset can generate at least `steps_per_epoch * epochs` batches (in this case, %d batches).endz#Running validation at fit epoch: %s)rZ   r_   r   val_T)>r
   TRAIN_distribution_strategyminr2   rS   r   variabler   get_iteratordistributed_scope	__enter__metrics_namesrO   r9   constant_get_training_eval_metricsresultzerosshaperU   r*   "experimental_run_steps_on_iteratorrun_oplast_step_outputsboolr   "_copy_weights_to_distributed_modelcbksconfigure_callbacksappendr/   _call_begin_hook#_maybe_load_initial_epoch_from_ckptrange_reset_metricson_epoch_begin_call_batch_hookget_sessionrunassignbatch_get_valueerrorsOutOfRangeErrorloggingwarningupdater   stop_trainingr   should_run_validationinfo_copy_weights_to_original_modelexperimental_tpu_test_loopr,   r.   r8   on_epoch_end_successful_loop_finish_call_end_hook__exit__history)'r   datasetr]   r_   r   initial_epochr^   val_datasetvalidation_stepsvalidation_freqr   current_strategyiteration_valuerS   iteratorscope
out_labelsstep_fnrY   mtensorr?   train_opoutput_tensorsr\   steps_to_runtarget_stepsepoch
epoch_logs
step_indexprev_step_countcurrent_step
step_count
batch_logs_r   val_outsrI   val_outr   r   r   experimental_tpu_fit_loop   s    +













r   c              
      s  t j j}t||}tj|dd}|  j} fdd}	| }
|j	|	|
fd}i }t
||D ]:\}}|dkrtjjj}n
tjjj}|j||dd||< qhtt| }|d	krt|d
}jrt  t tj|dd	||dt jd}|  dgtj }|dur*|}ntdd}||k r2|d	d}| d|| zt||g\}}W n: tj j!y   d"|}t#$d|  |}Y q2Y n0 t%jD ]6\}}|dkr||  || 7  < n|| ||< qt&|| }| d|| |d	kr&|'|d	  |d	7 }q6|d	krF|'| |(  |)ddd t|dkr||d  |  < t|d	kr|d S |S )a  Test loop for evaluating with TPU tf.distribute.Strategy.

    Args:
        model: Keras Model instance.
        dataset: Dataset for input data.
        verbose: Integer, Verbosity mode 0 or 1.
        steps: Total number of steps (batches of samples)
            before declaring predictions finished.
            Ignored with the default value of `None`.
        callbacks: List of callbacks to be called during training

    Returns:
        Scalar loss (if the model has a single output and no metrics)
        or list of scalars (if the model has multiple outputs
        and/or metrics). The attribute `model.metrics_names` will give you
        the display labels for the outputs.
    r   rV   c                    s   t | ttfr$t| dkr$| \} }nd}tj jt | |fd t	t
  \}}}}t|g dd |D W  d   S 1 s0    Y  dS )z-A fn that returns output of single test step.r    Nr%   c                 S   s   g | ]}t |qS r   r9   identityr!   outr   r   r   r$   x      zEexperimental_tpu_test_loop.<locals>._test_step_fn.<locals>.<listcomp>)r,   r-   r.   r/   r9   r:   get_replica_context
merge_callr   r   r   r4   control_dependencies)r   r   r   r   r)   r   r   r   r   _test_step_fng  s    

z1experimental_tpu_test_loop.<locals>._test_step_fnr%   r+   NaxisrQ   targetFrZ   r[           TNumber of steps could not be inferred from the data, please pass the steps argument.ra   rb   rd   ]Make sure that your dataset can generate at least `steps` batches (in this case, {} batches).@Your dataset iterator ran out of data; interrupting evaluation. re   )*r
   TESTrh   r   rk   rl   rm   rn   get_nextr   r8   r9   r:   r;   r<   r=   reducegroupr.   valuesr	   r   rx   r   ry   rz   r|   r/   
ValueErrorr   r   r   r   r   formatr   r   	enumerate	make_logsr   r   r   )r   r   r_   rZ   r   r   r   r   r   r   test_input_dataper_replica_outputsr   rI   rJ   rK   test_opprogbaroutsr   r   r   r   
batch_outswarning_msgir   r   r   r   H  s    












r   c              
      s  t j t|}d}|stj}t|\}}}	||_	|
|j|j|_||j}| }|j|dd}|	dur||	}j}
t||
}tj|
dd}|   fdd}| }|
j||fd}t|
|}|d	krt|d
}jrt  t tj|dd	||d d}|  t j!}dd t"|D }|durX|}nt#dd}||k rb|d	d}|$ d|| z t%&|}t'(||g\}}W n6 t%j)j*y   d+|}t,-d|  Y qbY n0 t"|D ]4}||
j. }||
j. }||| }|| /| qt0|| }|$ d|| |d	krV|1|d	  |d	7 }qd|d	krv|1| |2  |3ddd t |d	krt4j5|d dd}ndd |D }|r|6|}|S )aX  Predict loop for predicting with TPU tf.distribute.Strategy.

    Args:
        model: Keras Model instance.
        dataset: Dataset for input data.
        verbose: Integer, Verbosity mode 0 or 1.
        steps: Total number of steps (batches of samples)
            before declaring `_predict_loop` finished.
            Ignored with the default value of `None`.
        callbacks: List of callbacks to be called during training

    Returns:
        Array of predictions (if the model has a single output)
        or list of arrays of predictions
        (if the model has multiple outputs).
    NT)drop_remainderr   rV   c                    sr   t j jt | fd tt  \}}}}t |g dd |D W  d   S 1 sd0    Y  dS )z3A fn that returns output of single prediction step.r%   c                 S   s   g | ]}t |qS r   r   r   r   r   r   r$     r   zKexperimental_tpu_predict_loop.<locals>._predict_step_fn.<locals>.<listcomp>N)	r9   r:   r   r   r   r   r   r4   r   )r   r   r   r)   r   r   r   _predict_step_fn  s    

z7experimental_tpu_predict_loop.<locals>._predict_step_fnr%   rQ   r   FrZ   r[   c                 S   s   g | ]}g qS r   r   )r!   r   r   r   r   r$   A  r   z1experimental_tpu_predict_loop.<locals>.<listcomp>r   r   rd   r   r   re   r   c                 S   s   g | ]}t j|d dqS )r   r   )npconcatenater   r   r   r   r$   }  s   )7r
   PREDICTr   is_dataset_shape_fully_definedpadding_utilPartialBatchPaddingHandler_feed_output_shapesr   _get_dataset_attributespadded_batch_sizer   padding_maskupdate_maskmap	pad_batchunbatchra   prefetchrh   rk   rl   rm   r   r   flatten_per_replica_valuesr	   r   rx   r   ry   rz   r|   r/   output_namesr~   r   r   r9   r   r   r   r   r   r   r   r   num_replicas_in_syncextendr   r   r   r   r   r   
apply_mask)r   r   r_   rZ   r   dataset_fully_shapedpadding_handler
batch_sizer   prefetch_bufferr   r   r   r   predict_input_datar   r   r   num_model_outputsunconcatenated_outsr   r   r   predict_opsr   r   r   output_start_indexoutput_end_indexsingle_model_outputprediction_resultr   r   r   experimental_tpu_predict_loop  s    

















r   c                   @   s.   e Zd ZdZdddZdd	d
ZdddZdS )$DistributionSingleWorkerTrainingLoopz;Training loop for distribution strategy with single worker.NrQ   r   Tr   c                 K   s  t j||jd t || t j|j|||tj|d\}}||||}|j	|||||||
|d}t 
|s|j , |j||||||
d\}}}W d   n1 s0    Y  d}|	rt|	\}}}t || t |j|||tj\}}|j	|||d|||
dd}n|rtdt|jrvtj||||d	d
}|du rPtdt svt||||||||||d
S tj||||||||
||||d	dS )z%Fit loop for Distribution Strategies.)input_callbacks	optimizer)validation_split)sample_weightclass_weightr   r   shuffler]   )r   r   r   r   r   NT)r   r   r   r   r   allow_partial_batchzHvalidation_split argument is not supported with distribution strategies.r^   
steps_namez^Number of steps could not be inferred from the data, please pass the steps_per_epoch argument.)r]   r_   r   r   r   r^   r   r   )r   r]   r_   r   
val_inputsr   r   r^   r   r   r  )r   validate_callbacksr   validate_inputsprocess_batch_and_step_sizerh   r
   rg   _validate_or_infer_batch_size#_distribution_standardize_user_datais_distributing_by_cloningr   _standardize_user_datar   unpack_validation_datar   r   r   is_tpu_strategyinfer_steps_for_datasetr9   executing_eagerlyr   r   fit_loop)selfr   xyr   r]   r_   r   r   validation_datar   r   r   r   r^   r   r   kwargsr   r   r   val_xval_yval_sample_weightsr   r   r   fit  s    


*	


z(DistributionSingleWorkerTrainingLoop.fitc	                 K   s   t || t |j|||tj\}}||||}|j||||dd}
t	|jrt
j||
|dd}|du rvtdt st||
|||dS tj||
||||dS )	z*Evaluate loop for Distribution Strategies.T)r   r   r   rZ   r   Nr   r_   rZ   r   )r   r   r_   rZ   r   )r   r  r  rh   r
   r   r  r  r   r  r   r  r   r9   r  r   r   	test_loop)r  r   r  r  r   r_   r   rZ   r   r  r   r   r   r   evaluate  sH    z-DistributionSingleWorkerTrainingLoop.evaluatec           	      K   s   t j|dd t |j|||tj\}}||||}|j||dd}t	|jrt
j|||dd}|du rttdt st|||||dS tj||||||d	S )
z)Predict loop for Distribution Strategies.N)r  r  T)r   r   rZ   r   r   r  )r   r_   rZ   r   )r   r  r  rh   r
   r   r  r  r   r  r   r  r   r9   r  r   r   predict_loop)	r  r   r  r   r_   rZ   r   r  r   r   r   r   predictH  s@    z,DistributionSingleWorkerTrainingLoop.predict)NNNrQ   rQ   Nr   NTNNr   NNrQ   )NNNrQ   NNN)Nr   NN)__name__
__module____qualname____doc__r  r  r  r   r   r   r   r     s>                  
        
;    r   c                    s    fdd}|S )zCDecorator handles multi worker training with distribution strategy.c                    s    fdd}t |jS )Nc                    s0     dd }t|}| d< fi  S )Nr   )popr   filter_distributed_callbacks)r   r   filtered_callbacks)r  methodr   r   r   
_worker_fnz  s    z=_train_with_multi_worker.<locals>.wrapper.<locals>._worker_fn)dcrun_distribute_coordinatorrh   )r   r  r%  r$  )r  r   r   wrappery  s    z)_train_with_multi_worker.<locals>.wrapperr   )r$  r)  r   r(  r   _train_with_multi_workerv  s    r*  c                   @   s0   e Zd ZdZdd Zdd Zdd Zdd	 Zd
S )#DistributionMultiWorkerTrainingLoopz=Training loop for distribution strategy with multiple worker.c                 C   s
   || _ d S r   )_single_worker_loop)r  single_worker_loopr   r   r   __init__  s    z,DistributionMultiWorkerTrainingLoop.__init__c                 O   s   t | jj|i |S r   )r*  r,  r  r  r&   r  r   r   r   r    s
    
z'DistributionMultiWorkerTrainingLoop.fitc                 O   s   t | jj|i |S r   )r*  r,  r  r/  r   r   r   r    s
    
z,DistributionMultiWorkerTrainingLoop.evaluatec                 O   s   | j j|i |S r   )r,  r  r/  r   r   r   r    s    z+DistributionMultiWorkerTrainingLoop.predictN)r  r  r  r   r.  r  r  r  r   r   r   r   r+    s
   r+  )N)rP   rQ   Nr   NNNrQ   )r   NN)r   NN)(r   numpyr   tensorflow.compat.v2compatv2r9   kerasr   r   ry   keras.distributer   r&  r   r   keras.enginer   r   r   r   keras.utils.generic_utilsr	   keras.utils.mode_keysr
   Ztensorflow.python.distributer   tensorflow.python.platformr   r   r   r   rO   r   r   r   TrainingLoopr   r*  r+  r   r   r   r   <module>   sH   

R        
 C 
  
 1 p