a
    Sic?:                     @   s   d Z ddlm  mZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ ddlmZ dd	lmZ dd
lmZ ddlmZ eddG dd deZeZdS )zGKeras 3D transposed convolution layer (sometimes called deconvolution).    N)activations)constraints)initializers)regularizers)utils)	InputSpec)Conv3D)
conv_utils)keras_exportzkeras.layers.Conv3DTransposez#keras.layers.Convolution3DTransposec                       sL   e Zd ZdZejd fdd		Zd
d Zdd Zdd Z	 fddZ
  ZS )Conv3DTransposea  Transposed convolution layer (sometimes called Deconvolution).

    The need for transposed convolutions generally arises
    from the desire to use a transformation going in the opposite direction
    of a normal convolution, i.e., from something that has the shape of the
    output of some convolution to something that has the shape of its input
    while maintaining a connectivity pattern that is compatible with
    said convolution.

    When using this layer as the first layer in a model,
    provide the keyword argument `input_shape`
    (tuple of integers or `None`, does not include the sample axis),
    e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3
    channels if `data_format="channels_last"`.

    Args:
      filters: Integer, the dimensionality of the output space
        (i.e. the number of output filters in the convolution).
      kernel_size: An integer or tuple/list of 3 integers, specifying the
        depth, height and width of the 3D convolution window.
        Can be a single integer to specify the same value for
        all spatial dimensions.
      strides: An integer or tuple/list of 3 integers,
        specifying the strides of the convolution along the depth, height
          and width.
        Can be a single integer to specify the same value for
        all spatial dimensions.
        Specifying any stride value != 1 is incompatible with specifying
        any `dilation_rate` value != 1.
      padding: one of `"valid"` or `"same"` (case-insensitive).
        `"valid"` means no padding. `"same"` results in padding with zeros
        evenly to the left/right or up/down of the input such that output has
        the same height/width dimension as the input.
      output_padding: An integer or tuple/list of 3 integers,
        specifying the amount of padding along the depth, height, and
        width.
        Can be a single integer to specify the same value for all
        spatial dimensions.
        The amount of output padding along a given dimension must be
        lower than the stride along that same dimension.
        If set to `None` (default), the output shape is inferred.
      data_format: A string,
        one of `channels_last` (default) or `channels_first`.
        The ordering of the dimensions in the inputs.
        `channels_last` corresponds to inputs with shape
        `(batch_size, depth, height, width, channels)` while `channels_first`
        corresponds to inputs with shape
        `(batch_size, channels, depth, height, width)`.
        It defaults to the `image_data_format` value found in your
        Keras config file at `~/.keras/keras.json`.
        If you never set it, then it will be "channels_last".
      dilation_rate: an integer or tuple/list of 3 integers, specifying
        the dilation rate to use for dilated convolution.
        Can be a single integer to specify the same value for
        all spatial dimensions.
        Currently, specifying any `dilation_rate` value != 1 is
        incompatible with specifying any stride value != 1.
      activation: Activation function to use.
        If you don't specify anything, no activation is applied
        (see `keras.activations`).
      use_bias: Boolean, whether the layer uses a bias vector.
      kernel_initializer: Initializer for the `kernel` weights matrix
        (see `keras.initializers`). Defaults to 'glorot_uniform'.
      bias_initializer: Initializer for the bias vector
        (see `keras.initializers`). Defaults to 'zeros'.
      kernel_regularizer: Regularizer function applied to
        the `kernel` weights matrix
        (see `keras.regularizers`).
      bias_regularizer: Regularizer function applied to the bias vector
        (see `keras.regularizers`).
      activity_regularizer: Regularizer function applied to
        the output of the layer (its "activation")
        (see `keras.regularizers`).
      kernel_constraint: Constraint function applied to the kernel matrix
        (see `keras.constraints`).
      bias_constraint: Constraint function applied to the bias vector
        (see `keras.constraints`).

    Input shape:
      5D tensor with shape:
      `(batch_size, channels, depth, rows, cols)` if
      data_format='channels_first'
      or 5D tensor with shape:
      `(batch_size, depth, rows, cols, channels)` if
      data_format='channels_last'.

    Output shape:
      5D tensor with shape:
      `(batch_size, filters, new_depth, new_rows, new_cols)` if
        data_format='channels_first'
      or 5D tensor with shape:
      `(batch_size, new_depth, new_rows, new_cols, filters)` if
        data_format='channels_last'.
      `depth` and `rows` and `cols` values might have changed due to padding.
      If `output_padding` is specified::
      ```
      new_depth = ((depth - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
      output_padding[0])
      new_rows = ((rows - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
      output_padding[1])
      new_cols = ((cols - 1) * strides[2] + kernel_size[2] - 2 * padding[2] +
      output_padding[2])
      ```

    Returns:
      A tensor of rank 5 representing
      `activation(conv3dtranspose(inputs, kernel) + bias)`.

    Raises:
      ValueError: if `padding` is "causal".
      ValueError: when both `strides` > 1 and `dilation_rate` > 1.

    References:
      - [A guide to convolution arithmetic for deep
        learning](https://arxiv.org/abs/1603.07285v1)
      - [Deconvolutional
        Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
       r   r   validNTglorot_uniformzerosc                    s   t  jf ||||||t||	t|
t|t|t|t|t|t|d| || _| jd urtj	| jdddd| _t
| j| jD ]*\}}||krtd| j d| j dqd S )	N)filterskernel_sizestridespaddingdata_formatdilation_rate
activationuse_biaskernel_initializerbias_initializerkernel_regularizerbias_regularizeractivity_regularizerkernel_constraintbias_constraint   output_paddingT)
allow_zeroz>Strides must be greater than output padding. Received strides=z, output_padding=.)super__init__r   getr   r   r   r!   r	   normalize_tuplezipr   
ValueError)selfr   r   r   r   r!   r   r   r   r   r   r   r   r   r   r   r   kwargsstrideout_pad	__class__ g/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/layers/convolutional/conv3d_transpose.pyr%      sB    

zConv3DTranspose.__init__c              	   C   s   t |}t|dkr&td| d|  }|j| jd u rTtd| d| dt|| }| j| j	|f }t
d||id| _| jd|| j| j| jd	| jd
| _| jr| jd| j	f| j| j| jd	| jd
| _nd | _d	| _d S )N   z0Inputs should have rank 5. Received input_shape=r#   zhThe channel dimension of the inputs to `Conv3DTranspose` should be defined. The input_shape received is z, where axis z> (0-based) is the channel dimension, which found to be `None`.)ndimaxeskernelT)shapeinitializerregularizer
constraint	trainabledtypebias)tfTensorShapelenr)   _get_channel_axisdimsvalueintr   r   r   
input_spec
add_weightr   r   r   r;   r5   r   r   r   r   r<   built)r*   input_shapechannel_axis	input_dimkernel_shaper0   r0   r1   build   sP    
	

zConv3DTranspose.buildc              	   C   s  t |}|d }| jdkr(d\}}}n
d\}}}|| }|| }|| }	| j\}
}}| j\}}}| jd u rzd  } }}n| j\}}}tj||
| j||d}tj||| j||d}tj|	|| j||d}| jdkr|| j	|||f}dd|||f}n||||| j	f}d|||df}t 
|}t jj|| j||tj| jdd| j d	}t  sf| |j}|| | jrt jj|| jtj| jd
dd}| jd ur| |S |S )Nr   channels_first)   r       )r   rM   r    r   r!   r,   r   r2   )r3   )r   r   rN   )r   )r=   r6   r   r   r   r!   r	   deconv_output_lengthr   r   stacknnconv3d_transposer5   convert_data_formatupperexecuting_eagerlycompute_output_shape	set_shaper   bias_addr<   r   )r*   inputsinputs_shape
batch_sized_axish_axisw_axisdepthheightwidthkernel_dkernel_hkernel_wstride_dstride_hstride_w	out_pad_d	out_pad_h	out_pad_w	out_depth
out_height	out_widthoutput_shaper   output_shape_tensoroutputs	out_shaper0   r0   r1   call   s    








zConv3DTranspose.callc                 C   s   t | }t|}| jdkr.d\}}}}nd\}}}}| j\}}}	| j\}
}}| jd u rjd  } }}n| j\}}}| j||< t	j
|| || j||
d||< t	j
|| || j||d||< t	j
|| |	| j||d||< t |S )NrL   )r   rM   r    rN   )rN   r   rM   r    rO   )r=   r>   as_listlistr   r   r   r!   r   r	   rP   r   )r*   rG   ro   c_axisr]   r^   r_   rc   rd   re   rf   rg   rh   ri   rj   rk   r0   r0   r1   rW   W  sB    





z$Conv3DTranspose.compute_output_shapec                    s"   t   }|d | j|d< |S )Nr   r!   )r$   
get_configpopr!   )r*   configr.   r0   r1   rw     s    


zConv3DTranspose.get_config)r   r   NNr   NTr   r   NNNNN)__name__
__module____qualname____doc__r   allow_initializer_layoutr%   rK   rs   rW   rw   __classcell__r0   r0   r.   r1   r   !   s*   w              4+[(r   )r}   tensorflow.compat.v2compatv2r=   kerasr   r   r   r   keras.dtensorr   keras.engine.input_specr   !keras.layers.convolutional.conv3dr   keras.utilsr	    tensorflow.python.util.tf_exportr
   r   Convolution3DTransposer0   r0   r0   r1   <module>   s"     f