a
    0Sic                    @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
mZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddlm!Z! ddlm"Z" ddl#m$Z$ ddl%m&Z& ddl%m'Z( ddl%m)Z) ddl%m*Z* ddl%m+Z+ ddl,m-Z- ddl,m.Z/ ddl0m1Z1 ddl2m3Z3 ddl4m5Z5 ddl4m6Z6 dd l4m7Z7 dd!l8m9Z9 dd"l8m:Z; dd#l<m=Z= dd$l>m?Z? dd%l>m@Z@ dd&l>mAZA dd'l>mBZB dd(l>mCZC dd)lDmEZE daFdaGeH ZId*ZJd+ZKdaLd,d- ZMeEd.g d/d0d1 ZNd2d3 ZOd4d5 ZPG d6d7 d7eQZRG d8d9 d9eQZSG d:d; d;eQZTG d<d= d=eQZUdjd>d?ZVdejWdd@fdAdBZXdCdD ZYdEdF ZZdGdH Z[eBj\dIdJ Z]G dKdL dLeQZ^eEdMg d/dkdNdOZ_dPdQ Z`G dRdS dSe^ZaG dTdU dUe^ZbdVZcG dWdX dXe^ZdG dYdZ dZejeZfG d[d\ d\eQZgd]d^ Zhd_d` Zidadb Zjdcdd ZkeEdegd/G dfdg dge5jlZmeEdeg d/G dhdi die5jlZndS )lz/Utilities for saving/loading Trackable objects.    N)trackable_object_graph_pb2)checkpoint_management)checkpoint_options)functional_saver)
graph_view)restore)utilsession)context)def_function)executor)constant_op)dtypes)errors_implops)tensor_shape)tensor_util)file_io)	array_ops)
gen_io_ops)init_ops)variable_scope)	variables)gfile)
tf_logging)
utils_impl)metrics)autotrackable)base)data_structures)py_checkpoint_reader)saver)saveable_object_util)compat)deprecation)object_identity)tf_contextlib)
tf_inspect)	tf_exportZcheckpoint_v1Zcheckpoint_v2c                 C   s   || k rdS t ||  d S )Nr   i@B )round)start_time_secondsend_time_seconds r.   c/var/www/html/django/DPS/env/lib/python3.9/site-packages/tensorflow/python/checkpoint/checkpoint.py_get_duration_microsecondsT   s    r0   z/__internal__.tracking.register_session_provider)v1c                 C   s   | a d S N)_SESSION_PROVIDER)Zsession_providerr.   r.   r/   register_session_provider[   s    r4   c                  C   s"   t  } | d u rtd urt } | S r2   )r   get_default_sessionr3   r	   r.   r.   r/   get_sessiond   s
    r6   c                 C   s0   d}t  d| }|D ]}|t|7 }q|S )z2Calculates filesize of checkpoint based on prefix.r   z{}*)globformatr   CalculateFileSize)prefixsizefilesfiler.   r.   r/   _get_checkpoint_sizen   s
    r>   c                   @   s,   e Zd ZdZddgZdd Zedd ZdS )	ObjectGraphProtoPrettyPrintera  Lazily traverses an object graph proto to pretty print names.

  If no calls to `node_names` are made this object has no performance
  overhead. On the other hand, it will only traverse the object graph once, so
  repeated naming is cheap after the first.
  _object_graph_proto_node_name_cachec                 C   s   || _ d | _d S r2   )r@   rA   )selfobject_graph_protor.   r.   r/   __init__   s    z&ObjectGraphProtoPrettyPrinter.__init__c           	      C   s   | j dur| j S i }d|d< tdg}|rz| }| jj| }|jD ]0}|j|vrF|| |jf ||j< |	|j qFq(i }|
 D ]\}}d|||< qt| jjD ]:\}}|jD ]*}||  d|j d||j  ||j< qq|| _ |S )z@Lazily creates a mapping from node id to ("path", "to", "root").N)z(root)r   .z
's state 'z' for )rA   collectionsdequepopleftr@   nodeschildrennode_id
local_nameappenditemsjoin	enumerateslot_variables	slot_nameoriginal_variable_node_idslot_variable_node_id)	rB   Zpath_to_rootto_visitrK   objchild
node_namesnodeslot_referencer.   r.   r/   rX      s0    



z(ObjectGraphProtoPrettyPrinter.node_namesN)__name__
__module____qualname____doc__	__slots__rD   propertyrX   r.   r.   r.   r/   r?   y   s
   r?   c                   @   s0   e Zd ZdZg dZdd Zdd Zdd Zd	S )
$_CheckpointRestoreCoordinatorDeleterzDDeleter to avoid overriding _CheckpointRestoreCoordinator.__del__().expect_partialrC   matched_proto_idsunused_attributesc                 C   s   || _ || _|| _|| _d S r2   rb   )rB   rc   rC   rd   re   r.   r.   r/   rD      s    z-_CheckpointRestoreCoordinatorDeleter.__init__c                 C   s
   || _ d S r2   )rc   rB   rc   r.   r.   r/   set_expect_partial   s    z7_CheckpointRestoreCoordinatorDeleter.set_expect_partialc           
      C   s   | j r
d S td u rt}ntj}g }g }t| j}t| jjD ]*\}}|jsLq<|| j	vr<|
|j|  q<| j D ]\}}|
|j| |f qr|s|r|d |D ]}|d|  q|D ]\}}	|d| d|	  qd S )Nag  Detecting that an object or model or tf.train.Checkpoint is being deleted with unrestored values. See the following logs for the specific values in question. To silence these warnings, use `status.expect_partial()`. See https://www.tensorflow.org/api_docs/python/tf/train/Checkpoint#restorefor details about the status object returned by the restore function.z?Value in checkpoint could not be found in the restored object: zRAn attribute in the restored object could not be found in the checkpoint. Object: z, attribute: )rc   loggingprintwarningr?   rC   rP   rI   
attributesrd   rM   rX   re   rN   )
rB   Zlog_fnZunused_nodes_in_checkpointZunrestored_attributes_in_objectpretty_printerrK   rY   attribute_name	node_pathattrr.   r.   r/   __del__   s:    



z,_CheckpointRestoreCoordinatorDeleter.__del__N)r[   r\   r]   r^   r_   rD   rg   rp   r.   r.   r.   r/   ra      s
   ra   c                   @   sD   e Zd ZdZdd Zedd Zejdd Zdd Zdd
dZ	d	S )_CheckpointRestoreCoordinatorz4Holds the status of an object-based checkpoint load.c	              	   C   s  || _ || _t | _i | _t | _t	 | _
t | _|| _|| _|| _| jdu r`t|| _| | _| | _g | _|| _|| _d| _i | _i | _d| _t| jjD ]:\}	}
|
j D ]*}| j!|j"g #t$j%|	|j&|j'd qqt(| j| j| j
| j| _)|| _*dS )a  Specify the checkpoint being loaded.

    Args:
      object_graph_proto: The TrackableObjectGraph protocol buffer associated
        with this checkpoint.
      save_path: A string, the path to the checkpoint, as returned by
        `tf.train.latest_checkpoint`.
      save_path_tensor: A string `Tensor` which contains or will be fed the save
        path.
      reader: A `CheckpointReader` for `save_path`. If None,
        `_CheckpointRestoreCoordinator` will initialize one itself.
      restore_op_cache: A dictionary shared between
        `_CheckpointRestoreCoordinator`s for the same Python objects, used to
        look up restore ops by name to avoid re-creating them across multiple
        `restore()` calls.
      graph_view: A graph_view_lib.ObjectGraphView object for the restored
        objects.
      options: A CheckpointOptions object.
      saveables_cache: An optional cache storing previously created
        SaveableObjects created for each Trackable. Maps Trackables to a
        dictionary of attribute names to Trackable.
    NF)optimizer_idslot_variable_idrR   )+optionsrC   r   uidrestore_uidre   weakrefWeakValueDictionaryobject_by_proto_idsetrd   r'   ObjectIdentityWeakSetall_python_objectssave_path_tensorZsave_path_stringreaderr"   NewCheckpointReaderget_variable_to_dtype_map	dtype_mapget_variable_to_shape_mapZ	shape_maprestore_opsrestore_ops_by_namer   new_restore_ops_callbackZdeferred_slot_restorationsslot_restorationsexpect_partial_attrrP   rI   rQ   
setdefaultrS   rM   r    _SlotVariableRestorationrT   rR   ra   _deletersaveables_cache)rB   rC   	save_pathr}   r~   restore_op_cacher   rt   r   
node_indexrY   rZ   r.   r.   r/   rD      sN    






z&_CheckpointRestoreCoordinator.__init__c                 C   s   | j S r2   )r   rB   r.   r.   r/   rc   8  s    z,_CheckpointRestoreCoordinator.expect_partialc                 C   s   || _ | j| d S r2   )r   r   rg   rf   r.   r.   r/   rc   <  s    c                 C   s    | j | | jr| | d S r2   )r   extendr   )rB   new_opsr.   r.   r/   new_restore_opsA  s    z-_CheckpointRestoreCoordinator.new_restore_opsNc                 C   s   g }|D ]&}|j jd j}|j| j| q|s8|rt|}t	
||| j| j}t st| D ]*\}	}
||
 |	| jvsJ |
| j|	< qn|S )a  Run or build restore operations for SaveableObjects.

    Args:
      tensor_saveables: `SaveableObject`s which correspond to Tensors.
      python_positions: List of CheckpointPositions bound to `PythonState`
        objects which must be restored eagerly.
      registered_savers: a dict mapping saver names-> object name -> Trackable.

    Returns:
      When graph building, a list of restore operations, either cached or newly
      created, to restore `tensor_saveables`.
    r   )object_protork   checkpoint_key	trackabledeserializer~   
get_tensorr$   validate_and_slice_inputsr   MultiDeviceSaverr   r}   rt   r   executing_eagerlysortedrN   rM   r   )rB   Ztensor_saveablesZpython_positionsregistered_saversr   positionkeyZflat_saveablesr   name
restore_opr.   r.   r/   restore_saveablesF  s(    
z/_CheckpointRestoreCoordinator.restore_saveables)N)
r[   r\   r]   r^   rD   r`   rc   setterr   r   r.   r.   r.   r/   rq      s   Y

 rq   c                   @   s*   e Zd ZdZd	ddZdd Zdd ZdS )
_NameBasedRestoreCoordinatorz4Keeps the status of a name-based checkpoint restore.Nc                 C   s$   || _ || _t | _t | _d S r2   )r   r   r'   ObjectIdentityWeakKeyDictionaryre   r   ru   rv   )rB   r   r   r.   r.   r/   rD   n  s    
z%_NameBasedRestoreCoordinator.__init__c           	   	   c   s   |   D ]\}}t|rTz
| }W qX tyP   | j|g | Y qY qX0 n|}tj|gdd}| D ]"\}}tj	||dD ]
}|V  qqpqdS )a  Create globally named SaveableObjects from attributes.

    If an object's attribute has no global name specified (default construction
    for the SaveableObject factory), records the failure in
    `self.unused_attributes` (which can then be used to make status assertions
    fail; see `NameBasedSaverStatus`).

    Args:
      trackable: An object to save.

    Yields:
      SaveableObjects for `trackable`'s attributes.
    F)convert_variable_to_tensor)opr   N)
 _gather_saveables_for_checkpointrN   callable	TypeErrorre   r   rM   r$   op_list_to_dictsaveable_objects_for_op)	rB   r   rm   saveable_factorysaveablenames_to_saveablesr   r   saveable_objectr.   r.   r/    globally_named_object_attributesw  s*    



z=_NameBasedRestoreCoordinator.globally_named_object_attributesc              
   C   s   t  sJ | |D ]}g }d}|jD ]}|j| jv rtd@ tj	| j
|jgdg| j|j gd|jf d\}W d   n1 s0    Y  |t| q(d}q(|r| j|g |j q|j|dd qdS )	z.Runs restore ops for `trackable`'s attributes.Fzcpu:0 z%s_checkpoint_read)r:   tensor_namesshape_and_slicesr   r   NT)restored_tensorsrestored_shapes)r   r   r   specsr   r   r   deviceio_ops
restore_v2r   rM   r   identityre   r   r   )rB   r   r   r   Ztensor_missingspecZrestoredr.   r.   r/   eager_restore  s*    

&z*_NameBasedRestoreCoordinator.eager_restore)N)r[   r\   r]   r^   rD   r   r   r.   r.   r.   r/   r   k  s   
	$r   c                 K   s
  t |}t|}t  |du r@t j| ||d\}}n
t	| }|j
}|rj|durdtd|}	nbt|ttjr||d}|du rdn| }
dt|jv rtj||
||d}	ntj||
|d}	tjf |	| |dd|W  d   S 1  s0    Y  dS )	zDA pared-down version of get_variable which does not reuse variables.N)r   shapedtypez3If initializer is a constant, do not specify shape.r   partition_info)r   r   T)initial_valuer   r   use_resource)r   as_dtyper   as_shaper   
init_scoper   _get_default_variable_store_get_default_initializerr   
base_dtype
ValueError
isinstancetyper   Initializeras_listr)   
getargspecargs	functoolspartialr   
VariableV1)r   r   r   initializerr   kwargsZshape_objectinitializing_from_valuevariable_dtyper   Z
shape_listr.   r.   r/   _default_getter  sJ    




r   Tc                 C   s   | j ||||t|dS )z6Add a variable to a Trackable with no scope influence.)r   r   r   r   getter	trainable) _add_variable_with_custom_getterr   )r   r   r   r   r   r   r.   r.   r/   add_variable  s    r   c              	   C   s^   t | }z|tj}W n, tjyF   td|  dtj dY n0 t	 }|
| |S )a  Retrieves information about the objects in a checkpoint.

  Example usage:

  ```python
  object_graph = tf.contrib.checkpoint.object_metadata(
      tf.train.latest_checkpoint(checkpoint_directory))
  ckpt_variable_names = set()
  for node in object_graph.nodes:
    for attribute in node.attributes:
      ckpt_variable_names.add(attribute.full_name)
  ```

  Args:
    save_path: The path to the checkpoint, as returned by `save` or
      `tf.train.latest_checkpoint`.

  Returns:
    A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.
  Raises:
    ValueError: If an object graph was not found in the checkpoint.
  zThe specified checkpoint "zS" does not appear to be object-based (saved with TF2) since it is missing the key "zg". Likely it was created with the TF1 name-based saver and does not contain an object dependency graph.)r"   r   r   r    OBJECT_GRAPH_PROTO_KEYr   NotFoundErrorr   r   TrackableObjectGraphParseFromString)r   r~   object_graph_stringrC   r.   r.   r/   object_metadata  s    


r   c                 C   s   t t| S )a  Traverse the object graph and list all accessible objects.

  Looks for `Trackable` objects which are dependencies of
  `root_trackable`. Includes slot variables only if the variable they are
  slotting for and the optimizer are dependencies of `root_trackable`
  (i.e. if they would be saved with a checkpoint).

  Args:
    root_trackable: A `Trackable` object whose dependencies should be flattened.

  Returns:
    A flat list of objects.
  )r   list_objectsgraph_view_libObjectGraphView)root_trackabler.   r.   r/   r   "  s    r   c                 C   s   t | }dd |D S )a  Traverse the object graph and find initialization ops.

  Looks for `Trackable` objects which are dependencies of
  `root_trackable` and which have an `initializer` property. Includes
  initializers for slot variables only if the variable they are slotting for and
  the optimizer are dependencies of `root_trackable` (i.e. if they would be
  saved with a checkpoint).

  Args:
    root_trackable: A `Trackable` object to gather initializers for.

  Returns:
    A list of initialization ops.
  c                 S   s&   g | ]}t |d r|jdur|jqS )r   N)hasattrr   .0cr.   r.   r/   
<listcomp>C  s   z'gather_initializers.<locals>.<listcomp>)r   )r   trackable_objectsr.   r.   r/   gather_initializers3  s    r   c                 #   sL   j j d fdd	}t | dV  W d   n1 s>0    Y  dS )az  Capture variables created within this scope as `Template` dependencies.

  Requires that `template.variable_scope` is active.

  This scope is intended as a compatibility measure, allowing a trackable
  object to add dependencies on variables created in a block of code which is
  not aware of object-based saving (and instead uses variable names
  heavily). This is how `Template` objects add dependencies on variables and
  sub-`Template`s. Where possible, use `tf.compat.v1.make_template` directly.

  Args:
    template: The `Template` object to register dependencies with.

  Yields:
    None (when used as a context manager).
  Nc           	         s    fdd} dur  r td d }|sXjf |||dfd|S |\}}j||td d dd f  |fd|S )	a{  A variable creation hook which adds Trackable dependencies.

    Set for example during a `Template`'s first wrapped function
    execution. Ensures that (a) `template` depends on any trackable
    objects using their own `capture_dependencies` scope inside this scope which
    create variables, and (b) that any variables not in a more deeply nested
    scope are added as dependencies directly.

    The `trackable_parent` argument is passed between custom creators but
    ignored when the variable object itself is created. This argument indicates
    (if not `None`) that a more deeply nested scope has already added the
    variable as a dependency, and that parent scopes should add a dependency on
    that object rather than on the variable directly.

    Args:
      next_creator: See `variable_scope.variable_creator_scope`; the next
        creator in the chain.
      name: The (full, scope-influenced) name of the variable. The `name_prefix`
        itself is stripped for the purposes of object-based dependency tracking,
        but scopes opened within this scope are respected.
      initial_value: See `variable_scope.variable_creator_scope`. Taken
        explicitly so the argument can be re-named and used with
        `Trackable._add_variable_with_custom_getter`.
      trackable_parent: If not None, a more deeply nested trackable object and
        its name prefix which were passed to `capture_dependencies` to add a
        dependency on (rather than depending on the variable directly).
      **kwargs: Passed through to the next creator.

    Returns:
      The output of `next_creator`: the fetched/created variable object.
    c                    s   | d f |  d|S )Nr   )r   r   )pop)r   inner_kwargsr   next_creatorr.   r/   '_call_next_creator_renaming_initializer  s    
zhcapture_dependencies.<locals>._trackable_custom_creator.<locals>._call_next_creator_renaming_initializerN   T)r   r   r   	overwritetrackable_parent)r   r   )r   r   r   )
startswithlenr   _track_trackable)	r   r   r   r   r   r   Zscope_stripped_nameZparent_objectZparent_name_prefixZname_prefixtemplater   r/   _trackable_custom_creator^  s4    %	z7capture_dependencies.<locals>._trackable_custom_creator)N)r   r   variable_creator_scope)r   r   r.   r   r/   capture_dependenciesJ  s
     Cr   c                   @   sb   e Zd ZdZejdd Zejdd Zejdd Zejdd	d
Z	ejdddZ
dd ZdS )_LoadStatusz(Abstract base for load status callbacks.c                 C   s   dS )zCRaises an exception unless a non-trivial restoration has completed.Nr.   r   r.   r.   r/   assert_consumed  s    z_LoadStatus.assert_consumedc                 C   s   dS )zERaises an exception unless existing Python objects have been matched.Nr.   r   r.   r.   r/   assert_existing_objects_matched  s    z+_LoadStatus.assert_existing_objects_matchedc                 C   s   dS )4Raises an exception if only the root object matched.Nr.   r   r.   r.   r/   assert_nontrivial_match  s    z#_LoadStatus.assert_nontrivial_matchNc                 C   s   dS )zBRuns restore ops from the checkpoint. Requires a valid checkpoint.Nr.   rB   r
   r.   r.   r/   run_restore_ops  s    z_LoadStatus.run_restore_opsc                 C   s   dS )z?Runs restore ops from the checkpoint, or initializes variables.Nr.   r  r.   r.   r/   initialize_or_restore  s    z!_LoadStatus.initialize_or_restorec                 C   s   | S )6Silence warnings about incomplete checkpoint restores.r.   r   r.   r.   r/   rc     s    z_LoadStatus.expect_partial)N)N)r[   r\   r]   r^   abcabstractmethodr   r   r  r  r  rc   r.   r.   r.   r/   r     s   


r   z'__internal__.tracking.streaming_restorec                    sN   t  rdS  du rt  ttr,tdj d  fddj_dS )a  When graph building, runs restore ops as soon as they come in.

  Args:
    status: A _LoadStatus objects from an object-based saver's restore().
      Streaming restore from name-based checkpoints is not currently supported.
    session: A session to run new restore ops in.
  NzStreaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.r	   c                    s    j | jdS )N	feed_dict)run
_feed_dictr   r
   statusr.   r/   <lambda>      z#streaming_restore.<locals>.<lambda>)	r   r   r6   r   NameBasedSaverStatusNotImplementedErrorr  _checkpointr   )r  r
   r.   r  r/   streaming_restore  s    	
r  c                 C   s   dd | D S )zHFilters out objects with no direct variable dependencies for assertions.c                 S   s   g | ]}|  r|qS r.   )r   )r   or.   r.   r/   r     r  z,_objects_with_attributes.<locals>.<listcomp>r.   )Z	full_listr.   r.   r/   _objects_with_attributes  s    r  c                   @   sL   e Zd ZdZdd Zdd Zdd Zdd	 ZdddZdddZ	dd Z
d
S )CheckpointLoadStatusa'  Checks the status of checkpoint loading and manages restore ops.

  Returned from `Saver.restore`. Since `restore` may defer the loading of values
  in the checkpoint which don't yet have corresponding Python objects,
  `CheckpointLoadStatus` provides a callback to verify that checkpoint loading
  is complete (`assert_consumed`).

  When graph building, `restore` does not run restore ops itself since their
  creation may be deferred. The `run_restore_ops` method must be called once all
  Python objects with values to restore have been created and added to the
  dependency graph (this does not necessarily have to be the whole checkpoint;
  calling `run_restore_ops` while `assert_consumed` fails is supported and will
  partially restore the checkpoint).

  See `Saver.restore` for usage examples.
  c                 C   s   || _ || _|| _|j| _d S r2   )r  r  _object_graph_viewroot_root)rB   
checkpointr	  r   r.   r.   r/   rD     s    zCheckpointLoadStatus.__init__c           	      C   s   t | jj}|   t| jjjD ]B\}}|js2q"| jj|d}|du r"t	d|j
|  d| q"| jjrt	d| jj | jjrg }t| jjD ]4\}}| jj| }||j
|  d| d|  qd|}t	d| | S )	a  Asserts that all objects in the checkpoint have been created/matched.

    Returns:
      `self` for chaining.
    Raises:
      AssertionError: If there are any Python objects in the dependency graph
        which have not been restored from this checkpoint or a later `restore`,
        or if there are any checkpointed values which have not been matched to
        Python objects.
    Nz Unresolved object in checkpoint : zUnresolved slot restorations: z (z): 
zcUnused attributes in these objects (the attributes exist in the checkpoint but were not restored):
)r?   r  rC   r   rP   rI   rk   ry   getAssertionErrorrX   r   re   six	iteritemsrM   rO   )	rB   rl   rK   rY   r   Zunused_attribute_messages	attributerV   Zjoined_attribute_messagesr.   r.   r/   r      sH    
z$CheckpointLoadStatus.assert_consumedc                 C   s   t | jjjD ]>\}}| jj|d}|dur|j| jjk rtd| dqt	
| jD ](}t|tjrt| stqZ| jj| qZtt| jjt| jj  }|rtt|}td|}td| d| d| dt|d|  | S )	ay  Asserts that trackable Python objects have been matched.

    Note that this is a weaker assertion than `assert_consumed`. It will only
    fail for existing Python objects which are (transitive) dependencies of the
    root object and which do not have an entry in the checkpoint.

    It will not fail, for example, if a `tf.keras.Layer` object has not yet been
    built and so has not created any `tf.Variable` objects.

    Returns:
      `self` for chaining.

    Raises:
      AssertionError: If a Python object exists in the transitive dependencies
        of the root object but does not have a value in the checkpoint.
    NzObject z& not assigned a value from checkpoint.
   zFound zq Python objects that were not bound to checkpointed values, likely due to changes in the Python program. Showing z of z unmatched objects: )rP   r  rC   rI   ry   r  _update_uidrv   r  r   r   r  r   r!   ZTrackableDataStructure_trackable_childrenr|   addr'   ObjectIdentitySetr  valuesr   listmin)rB   rK   rY   r   trackable_objectunused_python_objectsZnum_unused_python_objectsZnum_variables_to_showr.   r.   r/   r   +  sN    


z4CheckpointLoadStatus.assert_existing_objects_matchedc                 C   s   t | jD ]}| jj| qt| jjdkrt	t
| jjt	| jj  }|rltdt| ntd| jj d| S )r   r   zNothing except the root object matched a checkpointed value. Typically this means that the checkpoint does not match the Python program. The following objects have no matching checkpointed value: z4Nothing to load. No dependencies have been added to z yet.)r   r   r  r  r|   r%  r   ry   r'   r&  r  r'  r  r(  r  )rB   r*  r+  r.   r.   r/   r  \  s.    

z,CheckpointLoadStatus.assert_nontrivial_matchNc                 C   s2   t  rdS |du rt }|j| jj| jd dS )z:Run operations to restore objects in the dependency graph.Nr  )r   r   r6   r
  r  r   r  r  r.   r.   r/   r  r  s
    z$CheckpointLoadStatus.run_restore_opsc                    sf   t  rdS |du rt }tj}tjj	
   fdd|D }j|d || dS )aU  Run operations to initialize or restore objects in the dependency graph.

    Any objects in the dependency graph which have initializers but are not in
    the checkpoint will have those initializers run, unless those variables are
    being restored by a later call to `tf.train.Checkpoint.restore()`.

    This method has a sibling in `InitializationOnlyStatus` which instead
    initializes variables. That type is returned if no checkpoint is specified
    in `Saver.restore`.

    Args:
      session: The session to run init/restore ops in. If `None`, uses the
        default session.
    Nc                    s@   g | ]8}t |d r| vrt|djjd jjk r|jqS )r   r#  r   )r   getattrr  rv   r   r   Zalready_initialized_objectsrB   r.   r/   r     s   
z>CheckpointLoadStatus.initialize_or_restore.<locals>.<listcomp>r	   )r   r   r6   r   r   r  r'   r&  r  ry   r'  r  r
  )rB   r
   Zall_objectsZ'initializers_for_non_restored_variablesr.   r-  r/   r  z  s    
z*CheckpointLoadStatus.initialize_or_restorec                 C   s   d| j _| S )r  T)r  rc   r   r.   r.   r/   rc     s    z#CheckpointLoadStatus.expect_partial)N)N)r[   r\   r]   r^   rD   r   r   r  r  r  rc   r.   r.   r.   r/   r    s   +1

 r  c                   @   sD   e Zd ZdZdd Zdd Zdd Zdd	 ZdddZdddZ	d
S )InitializationOnlyStatusa^  Returned from `Saver.restore` when no checkpoint has been specified.

  Objects of this type have the same `assert_consumed` method as
  `CheckpointLoadStatus`, but it always fails. However,
  `initialize_or_restore` works on objects of both types, and will
  initialize variables in `InitializationOnlyStatus` objects or restore them
  otherwise.
  c                 C   s   || _ || _|j| _d S r2   )_restore_uidr  r  r  )rB   object_graph_viewrv   r.   r.   r/   rD     s    z!InitializationOnlyStatus.__init__c                 C   s   t ddS zDAssertion for consistency with `CheckpointLoadStatus`. Always fails.zDNo checkpoint specified (save_path=None); nothing is being restored.Nr  r   r.   r.   r/   r     s    z(InitializationOnlyStatus.assert_consumedc                 C   s   t ddS r1  r2  r   r.   r.   r/   r     s    z8InitializationOnlyStatus.assert_existing_objects_matchedc                 C   s   t ddS r1  r2  r   r.   r.   r/   r    s    z0InitializationOnlyStatus.assert_nontrivial_matchNc                 C   s   t ddS )zFor consistency with `CheckpointLoadStatus`.

    Use `initialize_or_restore` for initializing if no checkpoint was passed
    to `Saver.restore` and restoring otherwise.

    Args:
      session: Not used.
    z[No checkpoint specified, so no restore ops are available (save_path=None to Saver.restore).Nr2  r  r.   r.   r/   r    s    	z(InitializationOnlyStatus.run_restore_opsc                    sF   t  rdS |du rt }t j} fdd|D }|| dS )a  Runs initialization ops for variables.

    Objects which would be saved by `Saver.save` will be initialized, unless
    those variables are being restored by a later call to
    `tf.train.Checkpoint.restore()`.

    This method does nothing when executing eagerly (initializers get run
    eagerly).

    Args:
      session: The session to run initialization ops in. If `None`, uses the
        default session.
    Nc                    s>   g | ]6}t |d r|jdurt|d jd  jk r|jqS )r   Nr#  r   )r   r   r,  r/  r   r   r.   r/   r     s   zBInitializationOnlyStatus.initialize_or_restore.<locals>.<listcomp>)r   r   r6   r   r   r  r
  )rB   r
   r   initializersr.   r   r/   r    s    
z.InitializationOnlyStatus.initialize_or_restore)N)N)
r[   r\   r]   r^   rD   r   r   r  r  r  r.   r.   r.   r/   r.    s   	
r.  a  Restoring a name-based tf.train.Saver checkpoint using the object-based restore API. This mode uses global names to match variables, and so is somewhat fragile. It also adds new restore ops to the graph each time it is called when graph building. Prefer re-encoding training checkpoints in the object-based format: run save() on the object-based saver (the same one this message is coming from) and use that checkpoint in the future.c                   @   sb   e Zd ZdZejdeddd Zdd Zdd	 Z	d
d Z
dd Zdd ZdddZdddZdS )r  z4Status for loading a name-based training checkpoint.N)dateinstructionsc                 C   s   || _ || _g | _|j| _d S r2   )r  r  _optionally_restoredr  r  )rB   r  r0  r.   r.   r/   rD     s    zNameBasedSaverStatus.__init__c                 C   s   | j | dS )a  Add a variable to the list of optionally restored variables.

    There are situations where certain variables should be ignored in assertions
    such as assert_existing_objects_matched(). One example is that of a
    checkpoint saved with train.Saver(), and restored with train.Checkpoint():
    it is possible for the train.Saver() checkpoint to be missing the internal
    `save_counter` variable, which we want to ignore on restore.

    Args:
      var: The variable to treat as optionally restored.
    N)r6  rM   )rB   varr.   r.   r/   add_to_optionally_restored  s    z/NameBasedSaverStatus.add_to_optionally_restoredc                    s|   t  jj } fdd|D }|rBdd |D }td| t jD ](}|  |j	 jj
k rNtd| qN S )z3Raises an exception if any variables are unmatched.c                    s(   g | ]  t  fd djD r qS )c                 3   s   | ]} d  |uV  qdS )r   Nr.   )r   xar.   r/   	<genexpr>  r  zBNameBasedSaverStatus.assert_consumed.<locals>.<listcomp>.<genexpr>)allr6  )r   r   r:  r/   r     s   z8NameBasedSaverStatus.assert_consumed.<locals>.<listcomp>c                 S   s    g | ]\}}d | d| qS )z
    r  r.   )r   rV   rk   r.   r.   r/   r     s   z5Some objects had attributes which were not restored: zObject not restored: )r(  r  re   rN   r  r   r   r  _maybe_initialize_trackabler#  rv   )rB   re   Zunused_attribute_stringsr   r.   r   r/   r     s$    
z$NameBasedSaverStatus.assert_consumedc                 C   s   |   S z?Raises an exception if currently created objects are unmatched.r   r   r.   r.   r/   r   "  s    z4NameBasedSaverStatus.assert_existing_objects_matchedc                 C   s   |   S r?  r@  r   r.   r.   r/   r  *  s    z,NameBasedSaverStatus.assert_nontrivial_matchc                 C   sT   t | j}g }|D ]:}|  |j| jjk r| jj|_nq|| j| q|S )z>Walk the object graph, using global names for SaveableObjects.)	r   r   r  r>  r#  r  rv   r   r   )rB   objectssaveable_objectsr   r.   r.   r/   _gather_saveable_objects2  s    
z-NameBasedSaverStatus._gather_saveable_objectsc                 C   sh   t  rdS |du rt }td0 |  }t|j|| j	j
d W d   n1 sZ0    Y  dS )zFLoad the name-based checkpoint using a new `tf.compat.v1.train.Saver`.N/cpu:0)sessr   )r   r   r6   r   r   rC  v1_saver_libSaverr   r  r   )rB   r
   	saveablesr.   r.   r/   r  B  s    
z$NameBasedSaverStatus.run_restore_opsc                 C   s   | j |d dS )zAlias for `run_restore_ops`.r	   N)r  r  r.   r.   r/   r  M  s    z*NameBasedSaverStatus.initialize_or_restore)N)N)r[   r\   r]   r^   r&   
deprecated _DEPRECATED_RESTORE_INSTRUCTIONSrD   r8  r   r   r  rC  r  r  r.   r.   r.   r/   r    s   

r  c                   @   s"   e Zd ZdZdd ZdddZdS )_SessionWithFeedDictAdditionsz7Pretends to be a session, inserts extra feeds on run().c                 C   s   || _ || _d S r2   )_wrapped_session_feed_additions)rB   r
   feed_additionsr.   r.   r/   rD   U  s    z&_SessionWithFeedDictAdditions.__init__Nc                 K   s:   |d u ri }n|  }|| j | jjf ||d|S )N)fetchesr	  )copyupdaterM  rL  r
  )rB   rO  r	  r   r.   r.   r/   r
  Y  s    z!_SessionWithFeedDictAdditions.run)N)r[   r\   r]   r^   rD   r
  r.   r.   r.   r/   rK  R  s   rK  c                   @   s@   e Zd ZdZdd ZdddZddd	Zdd
dZdddZdS )TrackableSavera  Saves and restores a `Trackable` object and its dependencies.

  See `Trackable` for details of dependency management. `Saver` wraps
  `tf.compat.v1.train.Saver` for saving, including extra information about the
  graph of
  dependencies between Python objects. When restoring, it uses this information
  about the save-time dependency graph to more robustly match objects with their
  checkpointed values. When executing eagerly, it supports restoring variables
  on object creation (see `Saver.restore`).

  Values in a checkpoint are mapped to `Trackable` Python objects
  (`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
  checkpoint was written. To avoid breaking existing checkpoints when modifying
  a class, dependency names (the names of attributes to which `Trackable`
  objects are assigned) may not change. These names are local to objects, in
  contrast to the `Variable.name`-based save/restore from
  `tf.compat.v1.train.Saver`, and
  so allow additional program transformations.
  c                 C   sN   || _ t rd| _n
t | _d| _d| _d| _d| _	d| _
i | _d| _dS )zConfigure saving.

    Args:
      graph_view: An `ObjectGraphView` object containing a description of the
        object graph to save.
    N)_graph_viewr   r   _saveables_cacher'   r   _file_prefix_placeholder_object_graph_feed_tensor_last_save_object_graph_file_prefix_feed_tensor_cached_save_operation_restore_op_cache_object_map)rB   r   r.   r.   r/   rD   x  s    
zTrackableSaver.__init__Nc                 C   s   t j| j| j| jd\}}}}|du rftd$ tj|	 t
jd}W d   qx1 sZ0    Y  n|||	 i tj|vsJ |tj|tjd ||||fS )z@Wraps _serialize_object_graph to include the object graph proto.)r   
object_mapr   NrD  r   )tensorr   )r   Zserialize_gathered_objectsrS  r[  rT  r   r   r   constantSerializeToStringr   stringrQ  r    r   rM   NoRestoreSaveable)rB   object_graph_tensornamed_saveable_objectsgraph_protorN  r   r.   r.   r/   _gather_saveables  s.    

&
z TrackableSaver._gather_saveablesFc                    s   j |d\fdd fdd} fdd}jr|  tdurdt  tj|d	at  _jfS   S )
a  Create or retrieve save ops.

    Args:
      file_prefix: The prefix for saved checkpoint files.
      object_graph_tensor: A `Tensor` to which the current object graph will be
        fed.
      options: `CheckpointOptions` object.
      update_ckpt_state: Optional bool flag. Indiciate whether the internal
        checkpoint state needs to be updated.

    Returns:
      A two-element tuple with a filename tensor and a feed_dict of tensors to
      feed when running it (if graph building). The feed dict contains the
      current object graph and any Python state to be saved in the
      checkpoint. When executing eagerly only the first argument is meaningful.
    )rb  c               	      s   j kst st rt} | jd}tdH t	|g t
_W d   n1 sn0    Y  W d   n1 s0    Y  _ j fS )z1Create and execute the SaveOp for the checkpoint.rt   rD  N)rW  r   r   r   inside_functionr   r   saver   control_dependenciesr   r   rY  )r#   save_op)rN  file_prefixrd  rc  rt   r   rB   r.   r/   	_run_save  s    
HzBTrackableSaver._save_cached_when_graph_building.<locals>._run_savec                     s    D ]} | j }t |x | jD ]`}|j}|j }|dur t t|( t||_t||_ W d   q 1 sv0    Y  q W d   q1 s0    Y  qdS )z(Copy the tensors to the host CPU device.N)	r   r   r   r]  r$   set_cpu0r   r   _tensor)r   original_devicer   r]  r   )rc  r.   r/   _copy_tensors  s    
zFTrackableSaver._save_cached_when_graph_building.<locals>._copy_tensorsc                      sN   t tjddd&    r,tt W d   n1 s@0    Y  dS )z8The thread function for executing async checkpoint save.F)enable_asyncenable_streaming_enqueueN)r   executor_scoper   new_executor!_update_checkpoint_state_internal#_convert_file_name_tensor_to_stringr.   )rl  rk  update_ckpt_stater.   r/   _async_save_fn  s    zGTrackableSaver._save_cached_when_graph_building.<locals>._async_save_fnN)target)re  $experimental_enable_async_checkpoint_ASYNC_CHECKPOINT_THREADrO   	threadingThreadstartrY  )rB   rk  rb  rt   rw  rp  rx  r.   )	rl  rN  rk  rd  rc  rt   r   rB   rw  r/    _save_cached_when_graph_building  s"    
z/TrackableSaver._save_cached_when_graph_buildingc                 C   s^  |p
t  }i }t  o"t  }|r4d||f }|r| jdu rtd4 tj	dt
jd| _tj	dt
jd| _W d   n1 s0    Y  | j}| j}	|||	< n>td  tj|t
jd}	W d   n1 s0    Y  d}t|sttj| | |	|||\}
}|r&|| |s2d}n|du rBt }|rV|j|
|dS |
S dS )a  Save a training checkpoint.

    The saved checkpoint includes variables created by this object and any
    Trackable objects it depends on at the time `Saver.save()` is called.

    Args:
      file_prefix: A prefix to use for the checkpoint filenames
        (/path/to/directory/and_a_prefix). Names are generated based on this
        prefix and `checkpoint_number`, if provided.
      checkpoint_number: An integer variable or Tensor, used to number
        checkpoints. Typically this value is saved along with other variables in
        training checkpoints, which will happen automatically if it was created
        by `root_trackable` or one of its dependencies (via
        `Trackable._add_variable`).
      session: The session to evaluate variables in. Ignored when executing
        eagerly. If not provided when graph building, the default session is
        used.
      options: Optional `tf.train.CheckpointOptions` object.
      update_ckpt_state: Optional bool flag. Indiciate whether the internal
        checkpoint state needs to be updated. Set this to True only if calling
        from tf.train.Checkpoint.save() to enable updating the checkpoint state.
        By default this is set to False, i.e., not updating checkpoint state.

    Returns:
      The full path to the checkpoint.
    %s-%dNrD  r   r   r  )r   CheckpointOptionsr   r   r   rg  rV  r   r   r^  r   r`  rX  convert_to_tensorr   	is_tensorr   recursive_create_dirospathdirnamer  rQ  r6   r
  )rB   rk  checkpoint_numberr
   rt   rw  r	  Zuse_sessionrb  file_prefix_tensorr   Znew_feed_additionsr.   r.   r/   rh    sL    

&
$

zTrackableSaver.savec              
   C   sP  |p
t  }|du r$t| jt S tdur4t  t	|}t
  }|rRd}n| }z|tj}W nb tjy   t||d}|st| jD ]"}|  |j| || qt|| jd Y S 0 |r*| jdu rtd td| _W d   n1 s0    Y  | j}	| j|i}
n:td t|}	W d   n1 sV0    Y  d}
t }| | t!|||	|| j"| j|| j#d}t$j%|dd&| jj' | jj(r<| jj(D ]|}|j)d	krҐqd}|j*d j+D ] }|j,|j)kr|j-} qq||j.v rq|du r"qt$j%||d&|j/ qt0|| j|
d
}|S )ak  Restore a training checkpoint.

    Restores `root_trackable` and any objects that it tracks
    (transitive). Either assigns values immediately if variables to restore have
    been created already, or defers restoration until the variables are
    created. Dependencies added to the `root_trackable` passed to the
    constructor after this call will be matched if they have a corresponding
    object in the checkpoint.

    When building a graph, restorations are added to the graph but not run.

    ```python
    saver = Saver(root)
    saver.restore(path)
    ```

    To ensure that loading is complete and no more deferred restorations will
    take place, you can use the `assert_consumed()` method of the status object
    returned by the `restore` call.

    The assert will raise an exception unless every object was matched and all
    checkpointed values have a matching variable object.

    ```python
    saver = Saver(root)
    saver.restore(path).assert_consumed()
    ```

    When graph building, `assert_consumed()` indicates that all of the restore
    ops which will be created for this checkpoint have been created. They can be
    run via the `run_restore_ops()` function of the status object:

    ```python
    saver.restore(path).assert_consumed().run_restore_ops()
    ```

    If the checkpoint has not been consumed completely, then the list of restore
    ops will grow as more objects are added to the dependency graph.

    Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this
    method. There is no deferred loading, and names are used to match
    variables. No restore ops are created/run until `run_restore_ops()` or
    `initialize_or_restore()` are called on the returned status object, even
    when executing eagerly. Re-encode name-based checkpoints using this
    object-based `Saver.save` as soon as possible.

    Args:
      save_path: The path to the checkpoint, as returned by `save` or
        `tf.train.latest_checkpoint`. If None (as when there is no latest
        checkpoint for `tf.train.latest_checkpoint` to return), returns an
        object which may run initializers for objects in the dependency graph.
        If the checkpoint was written by the name-based
        `tf.compat.v1.train.Saver`, names are used to match variables.
      options: Optional `tf.train.CheckpointOptions` object.

    Returns:
      A load status object, which can be used to make assertions about the
      status of checkpoint restoration and run initialization/restore ops
      (of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
      `save_path` is `None`).

      If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
      object is returned which runs restore ops from a name-based saver.

    Raises:
      RuntimeError: When a checkpoint file saved by async checkpoint is not
        available upon restore().
    N)r   r   )r0  rD  model)rC   r   r}   r~   r   r   rt   r   r   )r  proto_idr  )r   r	  )1r   r  r.  rS  r   ru   r{  rO   r"   r   r   r   r   r   r    r   r   r   r   r   r   r>  _name_based_restoresr%  _name_based_attribute_restorer  rU  r   r   r^  r   r   r   rq   rZ  rT  restore_libZCheckpointPositionr   r  attached_dependenciesr   rI   rJ   rL   rK   ry   refr  )rB   r   rt   r~   graph_buildingr   r   Zrestore_coordinatorZexisting_trackabler  Zfile_prefix_feed_dictrC   r  r  r  Z	proto_refZload_statusr.   r.   r/   r   V  s    E

,*
	


zTrackableSaver.restore)N)F)NNNF)N)	r[   r\   r]   r^   rD   re  r  rh  r   r.   r.   r.   r/   rR  c  s   &
 
^  
DrR  c                 C   s    t t| \}}t||S )ap  Creates a static `tf.compat.v1.train.Saver` from a trackable object.

  The returned `Saver` saves object-based checkpoints, but these checkpoints
  will no longer reflect structural changes to the object graph, only changes to
  the values of `Variable`s added as dependencies of the root object before
  `freeze` was called.

  `restore` works on the returned `Saver`, but requires that the object graph of
  the checkpoint being loaded exactly matches the object graph when `freeze` was
  called. This is in contrast the object-based restore performed by
  `tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's
  object graph and the current Python object graph.

  Args:
    root_trackable: A trackable object to save.

  Returns:
    A saver which saves object-based checkpoints for the object graph frozen at
    the time `frozen_saver` was called.
  )r   Zfrozen_saveables_and_saversr   r   r   r   )r   rc  r   r.   r.   r/   frozen_saver  s    r  c                 C   s,   t | tjtjfs(td| d|  dd S )Nz`Checkpoint` was expecting zD to be a trackable object (an object derived from `Trackable`), got . If you believe this object should be trackable (i.e. it is part of the TensorFlow Python API and manages state), please open an issue.)r   r    	Trackabler   Functionr   )rV   r   r.   r.   r/   _assert_trackable  s    r  c                 C   s    t jtj| | | gdd dS )z!Update internal checkpoint state.Tsave_dirmodel_checkpoint_pathall_model_checkpoint_pathssave_relative_pathsN)r    update_checkpoint_state_internalr  r  r  )	file_pathr.   r.   r/   ru  !  s    
ru  c                 C   s4   | }t |r&t r0t| }n
t|}|S )z#Convert file name tensor to string.)r   
is_tf_typer   r   r%   as_strnumpy)r]  outputr.   r.   r/   rv  *  s    

rv  ztrain.Checkpointc                       sP   e Zd ZdZ fddZdd ZdddZed	d
 ZdddZ	dd Z
  ZS )CheckpointV1a  Groups trackable objects, saving and restoring them.

  `Checkpoint`'s constructor accepts keyword arguments whose values are types
  that contain trackable state, such as `tf.compat.v1.train.Optimizer`
  implementations, `tf.Variable`, `tf.keras.Layer` implementations, or
  `tf.keras.Model` implementations. It saves these values with a checkpoint, and
  maintains a `save_counter` for numbering checkpoints.

  Example usage when graph building:

  ```python
  import tensorflow as tf
  import os

  checkpoint_directory = "/tmp/training_checkpoints"
  checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")

  checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
  status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
  train_op = optimizer.minimize( ... )
  status.assert_consumed()  # Optional sanity checks.
  with tf.compat.v1.Session() as session:
    # Use the Session to restore variables, or initialize them if
    # tf.train.latest_checkpoint returned None.
    status.initialize_or_restore(session)
    for _ in range(num_training_steps):
      session.run(train_op)
    checkpoint.save(file_prefix=checkpoint_prefix)
  ```

  Example usage with eager execution enabled:

  ```python
  import tensorflow as tf
  import os

  tf.compat.v1.enable_eager_execution()

  checkpoint_directory = "/tmp/training_checkpoints"
  checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")

  checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
  status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
  for _ in range(num_training_steps):
    optimizer.minimize( ... )  # Variables will be restored on creation.
  status.assert_consumed()  # Optional sanity checks.
  checkpoint.save(file_prefix=checkpoint_prefix)
  ```

  `Checkpoint.save` and `Checkpoint.restore` write and read object-based
  checkpoints, in contrast to `tf.compat.v1.train.Saver` which writes and reads
  `variable.name` based checkpoints. Object-based checkpointing saves a graph of
  dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,
  etc.) with named edges, and this graph is used to match variables when
  restoring a checkpoint. It can be more robust to changes in the Python
  program, and helps to support restore-on-create for variables when executing
  eagerly. Prefer `tf.train.Checkpoint` over `tf.compat.v1.train.Saver` for new
  code.

  `Checkpoint` objects have dependencies on the objects passed as keyword
  arguments to their constructors, and each dependency is given a name that is
  identical to the name of the keyword argument for which it was created.
  TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
  dependencies on their variables (e.g. "kernel" and "bias" for
  `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
  dependencies easy in user-defined classes, since `Model` hooks into attribute
  assignment. For example:

  ```python
  class Regress(tf.keras.Model):

    def __init__(self):
      super(Regress, self).__init__()
      self.input_transform = tf.keras.layers.Dense(10)
      # ...

    def call(self, inputs):
      x = self.input_transform(inputs)
      # ...
  ```

  This `Model` has a dependency named "input_transform" on its `Dense` layer,
  which in turn depends on its variables. As a result, saving an instance of
  `Regress` using `tf.train.Checkpoint` will also save all the variables created
  by the `Dense` layer.

  When variables are assigned to multiple workers, each worker writes its own
  section of the checkpoint. These sections are then merged/re-indexed to behave
  as a single checkpoint. This avoids copying all variables to one worker, but
  does require that all workers see a common filesystem.

  While `tf.keras.Model.save_weights` and `tf.train.Checkpoint.save` save in the
  same format, note that the root of the resulting checkpoint is the object the
  save method is attached to. This means saving a `tf.keras.Model` using
  `save_weights` and loading into a `tf.train.Checkpoint` with a `Model`
  attached (or vice versa) will not match the `Model`'s variables. See the
  [guide to training
  checkpoints](https://www.tensorflow.org/guide/checkpoint) for
  details. Prefer `tf.train.Checkpoint` over `tf.keras.Model.save_weights` for
  training checkpoints.

  Attributes:
    save_counter: Incremented when `save()` is called. Used to number
      checkpoints.
  c                    s   t t|   t  tdu r$t aW d   n1 s80    Y  t| dd dD ]<\}}t| || t	t
| |tjtjfsVtd| dqVd| _d| _tt| | _dS )a  Group objects into a training checkpoint.

    Args:
      **kwargs: Keyword arguments are set as attributes of this object, and are
        saved with the checkpoint. Values must be trackable objects.

    Raises:
      ValueError: If objects in `kwargs` are not trackable.
    Nc                 S   s   | d S Nr   r.   itemr.   r.   r/   r    r  z'CheckpointV1.__init__.<locals>.<lambda>r   zX`Checkpoint` was expecting a trackable object (an object derived from `Trackable`), got r  )superr  rD   _END_TIME_OF_LAST_WRITE_LOCK_END_TIME_OF_LAST_WRITEtimer   rN   setattrr   r,  r    r  r   r  r   _save_counter_save_assign_oprR  r   r   _saver)rB   r   kv	__class__r.   r/   rD     s"    
&zCheckpointV1.__init__c              
   C   sT   | j du rPtd, tt| ddtjdd| _ W d   n1 sF0    Y  dS )/Create a save counter if it does not yet exist.NrD  save_counterr   Fr   r   r   r   )r  r   r   r!   NoDependencyr   r   int64r   r.   r.   r/   _maybe_create_save_counter  s    
z'CheckpointV1._maybe_create_save_counterNc                 C   s   t   }| jj||d}t   }tjtt||d t( tjttt	|d |a	W d   n1 sf0    Y  t
|rt rt| }n
t|}tjtt|d |S )aA  Writes a training checkpoint.

    The checkpoint includes variables created by this object and any
    trackable objects it depends on at the time `Checkpoint.write()` is
    called.

    `write` does not number checkpoints, increment `save_counter`, or update the
    metadata used by `tf.train.latest_checkpoint`. It is primarily intended for
    use by higher level checkpoint management utilities. `save` provides a very
    basic implementation of these features.

    Args:
      file_prefix: A prefix to use for the checkpoint filenames
        (/path/to/directory/and_a_prefix).
      session: The session to evaluate variables in. Ignored when executing
        eagerly. If not provided when graph building, the default session is
        used.

    Returns:
      The full path to the checkpoint (i.e. `file_prefix`).
    )rk  r
   	api_labelmicrosecondsNr  filesize)r  r  rh  r   AddCheckpointWriteDuration_CHECKPOINT_V1r0   r  AddTrainingTimeSavedr  r   r  r   r   r%   r  r  RecordCheckpointSizer>   )rB   rk  r
   
start_timer  end_timer.   r.   r/   write  s.    "

zCheckpointV1.writec                 C   s   |    | jS zAn integer variable which starts at zero and is incremented on save.

    Used to number checkpoints.

    Returns:
      The save counter variable.
    r  r  r   r.   r.   r/   r    s    	zCheckpointV1.save_counterc                 C   s   t   }|rDt rtd|du r,t }| jdu rD|| jj	 |rR| j
du rt| j  | jjddd}W d   n1 s0    Y  |rt|| _
|r|| j
}n| }| jd||f |d}tjtj|||gdd |S )	a  Saves a training checkpoint and provides basic checkpoint management.

    The saved checkpoint includes variables created by this object and any
    trackable objects it depends on at the time `Checkpoint.save()` is
    called.

    `save` is a basic convenience wrapper around the `write` method,
    sequentially numbering checkpoints using `save_counter` and updating the
    metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint
    management, for example garbage collection and custom numbering, may be
    provided by other utilities which also wrap `write`
    (`tf.train.CheckpointManager` for example).

    Args:
      file_prefix: A prefix to use for the checkpoint filenames
        (/path/to/directory/and_a_prefix). Names are generated based on this
        prefix and `Checkpoint.save_counter`.
      session: The session to evaluate variables in. Ignored when executing
        eagerly. If not provided when graph building, the default session is
        used.

    Returns:
      The full path to the checkpoint.
    ?  Calling tf.train.Checkpoint.save() from a function is not supported, as save() modifies saving metadata in ways not supported by TensorFlow Operations. Consider using tf.train.Checkpoint.write(), a lower-level API which does not update metadata. tf.train.latest_checkpoint and related APIs will not see this checkpoint.Nr   T
read_valuer  r	   r  )r   r   r   rg  r  r6   r  r
  r  r   r  colocate_with
assign_addr!   r  r  r  r   r  r  r  r  )rB   rk  r
   r  	assign_opr  r  r.   r.   r/   rh    s8    

.
zCheckpointV1.savec                 C   sP   t   }| jj|d}|   t|tr4|| j tj	t
t|t   d |S )a  Restore a training checkpoint.

    Restores this `Checkpoint` and any objects it depends on.

    When executing eagerly, either assigns values immediately if variables to
    restore have been created already, or defers restoration until the variables
    are created. Dependencies added after this call will be matched if they have
    a corresponding object in the checkpoint (the restore request will queue in
    any trackable object waiting for the expected dependency to be added).

    When graph building, restoration ops are added to the graph but not run
    immediately.

    ```python
    checkpoint = tf.train.Checkpoint( ... )
    checkpoint.restore(path)
    ```

    To ensure that loading is complete and no more deferred restorations will
    take place, you can use the `assert_consumed()` method of the status object
    returned by `restore`.
    The assert will raise an exception if any Python objects in the dependency
    graph were not found in the checkpoint, or if any checkpointed values do not
    have a matching Python object:

    ```python
    checkpoint = tf.train.Checkpoint( ... )
    checkpoint.restore(path).assert_consumed()
    ```

    When graph building, `assert_consumed()` indicates that all of the restore
    ops that will be created for this checkpoint have been created. They can be
    run via the `run_restore_ops()` method of the status object:

    ```python
    checkpoint.restore(path).assert_consumed().run_restore_ops()
    ```

    If the checkpoint has not been consumed completely, then the list of restore
    ops will grow as more objects are added to the dependency graph.

    To check that all variables in the Python object have restored values from
    checkpoint, use `assert_existing_objects_matched()`. This assertion is
    useful when called after the variables in your graph have been created.

    Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this
    method. Names are used to match variables. No restore ops are created/run
    until `run_restore_ops()` or `initialize_or_restore()` are called on the
    returned status object when graph building, but there is restore-on-creation
    when executing eagerly. Re-encode name-based checkpoints using
    `tf.train.Checkpoint.save` as soon as possible.

    Args:
      save_path: The path to the checkpoint, as returned by `save` or
        `tf.train.latest_checkpoint`. If None (as when there is no latest
        checkpoint for `tf.train.latest_checkpoint` to return), returns an
        object which may run initializers for objects in the dependency graph.
        If the checkpoint was written by the name-based
        `tf.compat.v1.train.Saver`, names are used to match variables.

    Returns:
      A load status object, which can be used to make assertions about the
      status of a checkpoint restoration and run initialization/restore ops.

      The returned status object has the following methods:

      * `assert_consumed()`:
          Raises an exception if any variables are unmatched: either
          checkpointed values which don't have a matching Python object or
          Python objects in the dependency graph with no values in the
          checkpoint. This method returns the status object, and so may be
          chained with `initialize_or_restore` or `run_restore_ops`.

      * `assert_existing_objects_matched()`:
          Raises an exception if any existing Python objects in the dependency
          graph are unmatched. Unlike `assert_consumed`, this assertion will
          pass if values in the checkpoint have no corresponding Python
          objects. For example a `tf.keras.Layer` object which has not yet been
          built, and so has not created any variables, will pass this assertion
          but will fail `assert_consumed`. Useful when loading part of a larger
          checkpoint into a new Python program, e.g. a training checkpoint with
          a `tf.compat.v1.train.Optimizer` was saved but only the state required
          for inference is being loaded. This method returns the status object,
          and so may be chained with `initialize_or_restore` or
          `run_restore_ops`.

      * `assert_nontrivial_match()`: Asserts that something aside from the root
          object was matched. This is a very weak assertion, but is useful for
          sanity checking in library code where objects may exist in the
          checkpoint which haven't been created in Python and some Python
          objects may not have a checkpointed value.

      * `expect_partial()`: Silence warnings about incomplete checkpoint
          restores. Warnings are otherwise printed for unused parts of the
          checkpoint file or object when the `Checkpoint` object is deleted
          (often at program shutdown).

      * `initialize_or_restore(session=None)`:
          When graph building, runs variable initializers if `save_path` is
          `None`, but otherwise runs restore operations. If no `session` is
          explicitly specified, the default session is used. No effect when
          executing eagerly (variables are initialized or restored eagerly).

      * `run_restore_ops(session=None)`:
          When graph building, runs restore operations. If no `session` is
          explicitly specified, the default session is used. No effect when
          executing eagerly (restore operations are run eagerly). May only be
          called when `save_path` is not `None`.
    )r   r  )r  r  r   r  r   r  r8  r  r   AddCheckpointReadDurationr  r0   )rB   r   r  r  r.   r.   r/   r   J  s    n
zCheckpointV1.restore)N)N)r[   r\   r]   r^   rD   r  r  r`   r  rh  r   __classcell__r.   r.   r  r/   r  8  s   j
2

<r  c                       sh   e Zd ZdZd fdd	Zdd ZdddZdd
dZedd Z	dddZ
dddZdddZ  ZS )
Checkpointa  Manages saving/restoring trackable values to disk.

  TensorFlow objects may contain trackable state, such as `tf.Variable`s,
  `tf.keras.optimizers.Optimizer` implementations, `tf.data.Dataset` iterators,
  `tf.keras.Layer` implementations, or  `tf.keras.Model` implementations.
  These are called **trackable objects**.

  A `Checkpoint` object can be constructed to save either a single or group of
  trackable objects to a checkpoint file. It maintains a `save_counter` for
  numbering checkpoints.

  Example:

  ```python
  model = tf.keras.Model(...)
  checkpoint = tf.train.Checkpoint(model)

  # Save a checkpoint to /tmp/training_checkpoints-{save_counter}. Every time
  # checkpoint.save is called, the save counter is increased.
  save_path = checkpoint.save('/tmp/training_checkpoints')

  # Restore the checkpointed values to the `model` object.
  checkpoint.restore(save_path)
  ```

  Example 2:

  ```python
  import tensorflow as tf
  import os

  checkpoint_directory = "/tmp/training_checkpoints"
  checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")

  # Create a Checkpoint that will manage two objects with trackable state,
  # one we name "optimizer" and the other we name "model".
  checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
  status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
  for _ in range(num_training_steps):
    optimizer.minimize( ... )  # Variables will be restored on creation.
  status.assert_consumed()  # Optional sanity checks.
  checkpoint.save(file_prefix=checkpoint_prefix)
  ```

  `Checkpoint.save()` and `Checkpoint.restore()` write and read object-based
  checkpoints, in contrast to TensorFlow 1.x's `tf.compat.v1.train.Saver` which
  writes and
  reads `variable.name` based checkpoints. Object-based checkpointing saves a
  graph of dependencies between Python objects (`Layer`s, `Optimizer`s,
  `Variable`s, etc.) with named edges, and this graph is used to match variables
  when restoring a checkpoint. It can be more robust to changes in the Python
  program, and helps to support restore-on-create for variables.

  `Checkpoint` objects have dependencies on the objects passed as keyword
  arguments to their constructors, and each dependency is given a name that is
  identical to the name of the keyword argument for which it was created.
  TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
  dependencies on their own variables (e.g. "kernel" and "bias" for
  `tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
  dependencies easy in user-defined classes, since `Model` hooks into attribute
  assignment. For example:

  ```python
  class Regress(tf.keras.Model):

    def __init__(self):
      super(Regress, self).__init__()
      self.input_transform = tf.keras.layers.Dense(10)
      # ...

    def call(self, inputs):
      x = self.input_transform(inputs)
      # ...
  ```

  This `Model` has a dependency named "input_transform" on its `Dense` layer,
  which in turn depends on its variables. As a result, saving an instance of
  `Regress` using `tf.train.Checkpoint` will also save all the variables created
  by the `Dense` layer.

  When variables are assigned to multiple workers, each worker writes its own
  section of the checkpoint. These sections are then merged/re-indexed to behave
  as a single checkpoint. This avoids copying all variables to one worker, but
  does require that all workers see a common filesystem.

  This function differs slightly from the Keras Model `save_weights` function.
  `tf.keras.Model.save_weights` creates a checkpoint file with the name
  specified in `filepath`, while `tf.train.Checkpoint` numbers the checkpoints,
  using `filepath` as the prefix for the checkpoint file names. Aside from this,
  `model.save_weights()` and `tf.train.Checkpoint(model).save()` are equivalent.

  See the [guide to training
  checkpoints](https://www.tensorflow.org/guide/checkpoint) for
  details.

  Attributes:
    save_counter: Incremented when `save()` is called. Used to number
      checkpoints.
  Nc           	         sd  t t|   t  tdu r$t aW d   n1 s80    Y  d}d| _d| _|rt|t	j
rh| n|}t|d g }||d< |  t|d| _t| dd dD ]\}}t| || t| |}t|t	j
r| }t|| |r||}|du r|t|| q||krtd| d| d	qttj|rF|n| |d
| _t|| _dS )a  Creates a training checkpoint for a single or group of objects.

    Args:
      root: The root object to checkpoint. `root` may be a trackable object or
        `WeakRef` of a trackable object.
      **kwargs: Keyword arguments are set as attributes of this object, and are
        saved with the checkpoint. All `kwargs` must be trackable objects, or a
        nested structure of trackable objects (`list`, `dict`, or `tuple`).

    Raises:
      ValueError: If `root` or the objects in `kwargs` are not trackable. A
        `ValueError` is also raised if the `root` object tracks different
        objects from the ones listed in attributes in kwargs (e.g.
        `root.child = A` and `tf.train.Checkpoint(root, child=B)` are
        incompatible).

    Nr  r  c                 S   s   | d S r  r.   r  r.   r.   r/   r  V  r  z%Checkpoint.__init__.<locals>.<lambda>r  z1Cannot create a Checkpoint with keyword argument z	 if root.z already exists.)r  )r  r  rD   r  r  r  r  r  r   rw   r  r  r>  r!   r  _lookup_dependencyr   rN   r  r,  rM   r    WeakTrackableReferencer   rR  r   r   r  _attached_dependencies)	rB   r  r   r  Ztrackable_rootr  r  Zconverted_vrW   r  r.   r/   rD   -  sV    &





zCheckpoint.__init__c              
   C   s   | j du rtd tt| ddtjdd| _ | jdur| j	t
d| j  t| jtjrj|  }n| j}|jdd}|r|d | j  W d   n1 s0    Y  dS )r  NrD  r  r   Fr  r.   )r  r   r   r!   r  r   r   r  r  rM   r    TrackableReferencer   r  rw   r  _deferred_dependenciesr   r   )rB   r  r   r.   r.   r/   r  r  s*    

	
z%Checkpoint._maybe_create_save_counterc                 C   s"   t |tjrt|}| ||S )aN  Writes a training checkpoint.

    The checkpoint includes variables created by this object and any
    trackable objects it depends on at the time `Checkpoint.write()` is
    called.

    `write` does not number checkpoints, increment `save_counter`, or update the
    metadata used by `tf.train.latest_checkpoint`. It is primarily intended for
    use by higher level checkpoint management utilities. `save` provides a very
    basic implementation of these features.

    Checkpoints written with `write` must be read with `read`.

    Example usage:

    ```
    step = tf.Variable(0, name="step")
    checkpoint = tf.Checkpoint(step=step)
    checkpoint.write("/tmp/ckpt")

    # Later, read the checkpoint with read()
    checkpoint.read("/tmp/ckpt")

    # You can also pass options to write() and read(). For example this
    # runs the IO ops on the localhost:
    options = tf.CheckpointOptions(experimental_io_device="/job:localhost")
    checkpoint.write("/tmp/ckpt", options=options)

    # Later, read the checkpoint with read()
    checkpoint.read("/tmp/ckpt", options=options)
    ```

    Args:
      file_prefix: A prefix to use for the checkpoint filenames
        (/path/to/directory/and_a_prefix).
      options: Optional `tf.train.CheckpointOptions` object.

    Returns:
      The full path to the checkpoint (i.e. `file_prefix`).
    )r   r  PathLikefspath_write)rB   rk  rt   r.   r.   r/   r    s    )
zCheckpoint.writeFc                 C   s   t   }|pt }| jj|||d}t   }tjtt||d t	( tj
ttt|d |aW d   n1 st0    Y  t|}|jstjtt|d |S )a  Internal method that implements Checkpoint.write().

    Args:
      file_prefix: A prefix to use for the checkpoint filenames
        (/path/to/directory/and_a_prefix).
      options: Optional `tf.train.CheckpointOptions` object.
      update_ckpt_state: Optional bool flag. Indiciate whether the internal
        checkpoint state needs to be updated.

    Returns:
      The full path to the checkpoint (i.e. `file_prefix`).
    )rk  rt   rw  r  Nr  )r  r   r  r  rh  r   r  _CHECKPOINT_V2r0   r  r  r  rv  rz  r  r>   )rB   rk  rt   rw  r  r  r  r.   r.   r/   r    s4    "zCheckpoint._writec                 C   s   |    | jS r  r  r   r.   r.   r/   r    s    	zCheckpoint.save_counterc                 C   s"  t |tjrt|}|p t }t  }|rl|jr>t	dt
 rNt	dt }| jdu rl|| jj |rz| jdu rt
| j  | jjddd}W d   n1 s0    Y  |rt|| _|r|| j}n| }| jd||f |dd}|jst| |s|jst  |S )	a  Saves a training checkpoint and provides basic checkpoint management.

    The saved checkpoint includes variables created by this object and any
    trackable objects it depends on at the time `Checkpoint.save()` is
    called.

    `save` is a basic convenience wrapper around the `write` method,
    sequentially numbering checkpoints using `save_counter` and updating the
    metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint
    management, for example garbage collection and custom numbering, may be
    provided by other utilities which also wrap `write` and `read`.
    (`tf.train.CheckpointManager` for example).

    ```
    step = tf.Variable(0, name="step")
    checkpoint = tf.train.Checkpoint(step=step)
    checkpoint.save("/tmp/ckpt")

    # Later, read the checkpoint with restore()
    checkpoint.restore("/tmp/ckpt-1")

    # You can also pass options to save() and restore(). For example this
    # runs the IO ops on the localhost:
    options = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
    checkpoint.save("/tmp/ckpt", options=options)

    # Later, read the checkpoint with restore()
    checkpoint.restore("/tmp/ckpt-1", options=options)
    ```

    Args:
      file_prefix: A prefix to use for the checkpoint filenames
        (/path/to/directory/and_a_prefix). Names are generated based on this
        prefix and `Checkpoint.save_counter`.
      options: Optional `tf.train.CheckpointOptions` object.

    Returns:
      The full path to the checkpoint.
    z6Async checkpoint is not supported for non-eager mode. r  Nr   Tr  r  )rt   rw  )r   r  r  r  r   r  r   r   rz  r  r   rg  r6   r  r
  r  r   r  r  r  r!   r  r  r  ru  
async_wait)rB   rk  rt   r  r
   r  r  r  r.   r.   r/   rh    sD    )


.
	zCheckpoint.savec                 C   sV   t   }t|tjrt|}|p(t }| jj||d}t	j
tt|t   d |S )aR  Reads a training checkpoint written with `write`.

    Reads this `Checkpoint` and any objects it depends on.

    This method is just like `restore()` but does not expect the `save_counter`
    variable in the checkpoint. It only restores the objects that the checkpoint
    already depends on.

    The method is primarily intended for use by higher level checkpoint
    management utilities that use `write()` instead of `save()` and have their
    own mechanisms to number and track checkpoints.

    Example usage:

    ```python
    # Create a checkpoint with write()
    ckpt = tf.train.Checkpoint(v=tf.Variable(1.))
    path = ckpt.write('/tmp/my_checkpoint')

    # Later, load the checkpoint with read()
    # With restore() assert_consumed() would have failed.
    checkpoint.read(path).assert_consumed()

    # You can also pass options to read(). For example this
    # runs the IO ops on the localhost:
    options = tf.train.CheckpointOptions(
        experimental_io_device="/job:localhost")
    checkpoint.read(path, options=options)
    ```

    Args:
      save_path: The path to the checkpoint as returned by `write`.
      options: Optional `tf.train.CheckpointOptions` object.

    Returns:
      A load status object, which can be used to make assertions about the
      status of a checkpoint restoration.  See `restore` for details.
    )r   rt   r  )r  r   r  r  r  r   r  r  r   r   r  r  r0   )rB   r   rt   r  resultr.   r.   r/   read^	  s    '
zCheckpoint.readc                 C   s   |}t |tjrt|}|durVt|rVtt|sLtt	|rVt
|}z"| j||d}t rvt  W nD tjy } z*tddd| d|j dW Y d}~n
d}~0 0 |   t |tr|| j |S )a}  Restores a training checkpoint.

    Restores this `Checkpoint` and any objects it depends on.

    This method is intended to be used to load checkpoints created by `save()`.
    For checkpoints created by `write()` use the `read()` method which does not
    expect the `save_counter` variable added by `save()`.

    `restore()` either assigns values immediately if variables to restore have
    been created already, or defers restoration until the variables are
    created. Dependencies added after this call will be matched if they have a
    corresponding object in the checkpoint (the restore request will queue in
    any trackable object waiting for the expected dependency to be added).

    ```python
    checkpoint = tf.train.Checkpoint( ... )
    checkpoint.restore(path)

    # You can additionally pass options to restore():
    options = tf.CheckpointOptions(experimental_io_device="/job:localhost")
    checkpoint.restore(path, options=options)
    ```

    To ensure that loading is complete and no more deferred restorations will
    take place, use the `assert_consumed()` method of the status object returned
    by `restore()`:

    ```python
    checkpoint.restore(path, options=options).assert_consumed()
    ```

    The assert will raise an error if any Python objects in the dependency graph
    were not found in the checkpoint, or if any checkpointed values do not have
    a matching Python object.

    Name-based `tf.compat.v1.train.Saver` checkpoints from TensorFlow 1.x can be
    loaded using this method. Names are used to match variables. Re-encode
    name-based checkpoints using `tf.train.Checkpoint.save` as soon as possible.

    **Loading from SavedModel checkpoints**

    To load values from a SavedModel, just pass the SavedModel directory
    to checkpoint.restore:

    ```python
    model = tf.keras.Model(...)
    tf.saved_model.save(model, path)  # or model.save(path, save_format='tf')

    checkpoint = tf.train.Checkpoint(model)
    checkpoint.restore(path).expect_partial()
    ```

    This example calls `expect_partial()` on the loaded status, since
    SavedModels saved from Keras often generates extra keys in the checkpoint.
    Otherwise, the program prints a lot of warnings about unused keys at exit
    time.

    Args:
      save_path: The path to the checkpoint, as returned by `save` or
        `tf.train.latest_checkpoint`. If the checkpoint was written by the
        name-based `tf.compat.v1.train.Saver`, names are used to match
        variables. This path may also be a SavedModel directory.
      options: Optional `tf.train.CheckpointOptions` object.

    Returns:
      A load status object, which can be used to make assertions about the
      status of a checkpoint restoration.

      The returned status object has the following methods:

      * `assert_consumed()`:
          Raises an exception if any variables are unmatched: either
          checkpointed values which don't have a matching Python object or
          Python objects in the dependency graph with no values in the
          checkpoint. This method returns the status object, and so may be
          chained with other assertions.

      * `assert_existing_objects_matched()`:
          Raises an exception if any existing Python objects in the dependency
          graph are unmatched. Unlike `assert_consumed`, this assertion will
          pass if values in the checkpoint have no corresponding Python
          objects. For example a `tf.keras.Layer` object which has not yet been
          built, and so has not created any variables, will pass this assertion
          but fail `assert_consumed`. Useful when loading part of a larger
          checkpoint into a new Python program, e.g. a training checkpoint with
          a `tf.compat.v1.train.Optimizer` was saved but only the state required
          for
          inference is being loaded. This method returns the status object, and
          so may be chained with other assertions.

      * `assert_nontrivial_match()`: Asserts that something aside from the root
          object was matched. This is a very weak assertion, but is useful for
          sanity checking in library code where objects may exist in the
          checkpoint which haven't been created in Python and some Python
          objects may not have a checkpointed value.

      * `expect_partial()`: Silence warnings about incomplete checkpoint
          restores. Warnings are otherwise printed for unused parts of the
          checkpoint file or object when the `Checkpoint` object is deleted
          (often at program shutdown).

    Raises:
      NotFoundError: if the a checkpoint or SavedModel cannot be found at
        `save_path`.
    Nrf  z6Error when restoring from checkpoint or SavedModel at r  z|
Please double-check that the path is correct. You may be missing the checkpoint suffix (e.g. the '-1' in 'path/to/ckpt-1').)r   r  r  r  r   IsDirectoryExistsr   Zget_saved_model_pb_pathZget_saved_model_pbtxt_pathZget_variables_pathr  r   r   r  r   r   messager  r  r8  r  )rB   r   rt   Zorig_save_pathr  er.   r.   r/   r   	  s4    j

	
zCheckpoint.restore)N)N)NF)N)N)N)r[   r\   r]   r^   rD   r  r  r  r`   r  rh  r  r   r  r.   r.   r  r/   r    s   dE 
-
-

f
1r  )NN)N)or^   r  rF   r   r7   r  r|  r  rw   r  tensorflow.core.protobufr   tensorflow.python.checkpointr   r   r   r   r   r   r  r   tensorflow.python.clientr
   Zsession_libtensorflow.python.eagerr   r   r   tensorflow.python.frameworkr   r   r   r   r   r   tensorflow.python.lib.ior   tensorflow.python.opsr   r   r   r   r   r   tensorflow.python.platformr   r   rh   tensorflow.python.saved_modelr   0tensorflow.python.saved_model.pywrap_saved_modelr   tensorflow.python.trackabler   r    r!   tensorflow.python.trainingr"   r#   rF  !tensorflow.python.training.savingr$   tensorflow.python.utilr%   r&   r'   r(   r)    tensorflow.python.util.tf_exportr*   r3   r  Lockr  r  r  r{  r0   r4   r6   r>   objectr?   ra   rq   r   r   float32r   r   r   r   contextmanagerr   r   r  r  r  r.  rJ  r  SessionInterfacerK  rR  r  r  ru  rv  ZAutoTrackabler  r  r.   r.   r.   r/   <module>   s   


,7 V  
1
%
Z!
 ;I	a   
	
   
