a
    BCCf2b                     @   s   d dl Z d dlmZmZ d dlmZ d dlmZmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZ d dlZddlmZ ddlmZmZ d dlmZ d	gZG d
d	 d	Z dd Z!dS )    N)linalgspecial)check_random_state)asarray
atleast_2dreshapezerosnewaxisexppisqrtravelpower
atleast_1dsqueezesum	transposeonescov   )_mvn)gaussian_kernel_estimategaussian_kernel_estimate_log)	logsumexpgaussian_kdec                   @   s   e Zd ZdZd&ddZdd ZeZdd Zd	d
 Zd'ddZ	dd Z
d(ddZdd Zdd ZeZde_d)ddZdd Zedd Zdd Zdd Zd d! Zed"d# Zed$d% ZdS )*r   a&  Representation of a kernel-density estimate using Gaussian kernels.

    Kernel density estimation is a way to estimate the probability density
    function (PDF) of a random variable in a non-parametric way.
    `gaussian_kde` works for both uni-variate and multi-variate data.   It
    includes automatic bandwidth determination.  The estimation works best for
    a unimodal distribution; bimodal or multi-modal distributions tend to be
    oversmoothed.

    Parameters
    ----------
    dataset : array_like
        Datapoints to estimate from. In case of univariate data this is a 1-D
        array, otherwise a 2-D array with shape (# of dims, # of data).
    bw_method : str, scalar or callable, optional
        The method used to calculate the estimator bandwidth.  This can be
        'scott', 'silverman', a scalar constant or a callable.  If a scalar,
        this will be used directly as `kde.factor`.  If a callable, it should
        take a `gaussian_kde` instance as only parameter and return a scalar.
        If None (default), 'scott' is used.  See Notes for more details.
    weights : array_like, optional
        weights of datapoints. This must be the same shape as dataset.
        If None (default), the samples are assumed to be equally weighted

    Attributes
    ----------
    dataset : ndarray
        The dataset with which `gaussian_kde` was initialized.
    d : int
        Number of dimensions.
    n : int
        Number of datapoints.
    neff : int
        Effective number of datapoints.

        .. versionadded:: 1.2.0
    factor : float
        The bandwidth factor, obtained from `kde.covariance_factor`. The square
        of `kde.factor` multiplies the covariance matrix of the data in the kde
        estimation.
    covariance : ndarray
        The covariance matrix of `dataset`, scaled by the calculated bandwidth
        (`kde.factor`).
    inv_cov : ndarray
        The inverse of `covariance`.

    Methods
    -------
    evaluate
    __call__
    integrate_gaussian
    integrate_box_1d
    integrate_box
    integrate_kde
    pdf
    logpdf
    resample
    set_bandwidth
    covariance_factor

    Notes
    -----
    Bandwidth selection strongly influences the estimate obtained from the KDE
    (much more so than the actual shape of the kernel).  Bandwidth selection
    can be done by a "rule of thumb", by cross-validation, by "plug-in
    methods" or by other means; see [3]_, [4]_ for reviews.  `gaussian_kde`
    uses a rule of thumb, the default is Scott's Rule.

    Scott's Rule [1]_, implemented as `scotts_factor`, is::

        n**(-1./(d+4)),

    with ``n`` the number of data points and ``d`` the number of dimensions.
    In the case of unequally weighted points, `scotts_factor` becomes::

        neff**(-1./(d+4)),

    with ``neff`` the effective number of datapoints.
    Silverman's Rule [2]_, implemented as `silverman_factor`, is::

        (n * (d + 2) / 4.)**(-1. / (d + 4)).

    or in the case of unequally weighted points::

        (neff * (d + 2) / 4.)**(-1. / (d + 4)).

    Good general descriptions of kernel density estimation can be found in [1]_
    and [2]_, the mathematics for this multi-dimensional implementation can be
    found in [1]_.

    With a set of weighted samples, the effective number of datapoints ``neff``
    is defined by::

        neff = sum(weights)^2 / sum(weights^2)

    as detailed in [5]_.

    `gaussian_kde` does not currently support data that lies in a
    lower-dimensional subspace of the space in which it is expressed. For such
    data, consider performing principle component analysis / dimensionality
    reduction and using `gaussian_kde` with the transformed data.

    References
    ----------
    .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
           Visualization", John Wiley & Sons, New York, Chicester, 1992.
    .. [2] B.W. Silverman, "Density Estimation for Statistics and Data
           Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
           Chapman and Hall, London, 1986.
    .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
           Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
    .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
           conditional density estimation", Computational Statistics & Data
           Analysis, Vol. 36, pp. 279-298, 2001.
    .. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
           Series A (General), 132, 272

    Examples
    --------
    Generate some random two-dimensional data:

    >>> import numpy as np
    >>> from scipy import stats
    >>> def measure(n):
    ...     "Measurement model, return two coupled measurements."
    ...     m1 = np.random.normal(size=n)
    ...     m2 = np.random.normal(scale=0.5, size=n)
    ...     return m1+m2, m1-m2

    >>> m1, m2 = measure(2000)
    >>> xmin = m1.min()
    >>> xmax = m1.max()
    >>> ymin = m2.min()
    >>> ymax = m2.max()

    Perform a kernel density estimate on the data:

    >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
    >>> positions = np.vstack([X.ravel(), Y.ravel()])
    >>> values = np.vstack([m1, m2])
    >>> kernel = stats.gaussian_kde(values)
    >>> Z = np.reshape(kernel(positions).T, X.shape)

    Plot the results:

    >>> import matplotlib.pyplot as plt
    >>> fig, ax = plt.subplots()
    >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
    ...           extent=[xmin, xmax, ymin, ymax])
    >>> ax.plot(m1, m2, 'k.', markersize=2)
    >>> ax.set_xlim([xmin, xmax])
    >>> ax.set_ylim([ymin, ymax])
    >>> plt.show()

    Nc              
   C   s  t t|| _| jjdks"td| jj\| _| _|d urt|	t
| _|  jt| j  _| jjdkrrtdt| j| jkrtddt| jd  | _| j| jkrd}t|z| j|d W n6 tjy } zd}t||W Y d }~n
d }~0 0 d S )	Nr   z.`dataset` input should have multiple elements.z*`weights` input should be one-dimensional.z%`weights` input should be of length n   a1  Number of dimensions is greater than number of samples. This results in a singular data covariance matrix, which cannot be treated using the algorithms implemented in `gaussian_kde`. Note that `gaussian_kde` interprets each *column* of `dataset` to be a point; consider transposing the input to `dataset`.	bw_methodab  The data appears to lie in a lower-dimensional subspace of the space in which it is expressed. This has resulted in a singular data covariance matrix, which cannot be treated using the algorithms implemented in `gaussian_kde`. Consider performing principle component analysis / dimensionality reduction and using `gaussian_kde` with the transformed data.)r   r   datasetsize
ValueErrorshapednr   astypefloat_weightsr   weightsndimlen_neffset_bandwidthr   ZLinAlgError)selfr   r   r'   msge r/   L/var/www/html/django/DPS/env/lib/python3.9/site-packages/scipy/stats/_kde.py__init__   s(    zgaussian_kde.__init__c                 C   s   t t|}|j\}}|| jkrb|dkrH|| jkrHt|| jdf}d}nd| d| j }t|t| j|\}}t| | j	j
| jdddf |j
| j|}|dddf S )a  Evaluate the estimated pdf on a set of points.

        Parameters
        ----------
        points : (# of dimensions, # of points)-array
            Alternatively, a (# of dimensions,) vector can be passed in and
            treated as a single point.

        Returns
        -------
        values : (# of points,)-array
            The values at each point.

        Raises
        ------
        ValueError : if the dimensionality of the input points is different than
                     the dimensionality of the KDE.

        r   points have dimension , dataset has dimension Nr   )r   r   r!   r"   r   r    _get_output_dtype
covariancer   r   Tr'   cho_cov)r,   pointsr"   mr-   output_dtypespecresultr/   r/   r0   evaluate   s     


zgaussian_kde.evaluatec                 C   s   t t|}t|}|j| jfkr0td| j |j| j| jfkrPtd| j |ddtf }| j| }t	|}| j
| }t||}tt|d }tdt |jd d | }t|| ddd }	tt|	 | j dd| }
|
S )aW  
        Multiply estimated density by a multivariate Gaussian and integrate
        over the whole space.

        Parameters
        ----------
        mean : aray_like
            A 1-D array, specifying the mean of the Gaussian.
        cov : array_like
            A 2-D array, specifying the covariance matrix of the Gaussian.

        Returns
        -------
        result : scalar
            The value of the integral.

        Raises
        ------
        ValueError
            If the mean or covariance of the input Gaussian differs from
            the KDE's dimensionality.

        zmean does not have dimension %sz%covariance does not have dimension %sNr   r          @Zaxis)r   r   r   r!   r"   r    r	   r5   r   
cho_factorr   	cho_solvenpproddiagonalr   r   r   r
   r'   )r,   meanr   sum_covsum_cov_choldifftdiffsqrt_det
norm_constenergiesr<   r/   r/   r0   integrate_gaussian  s     


zgaussian_kde.integrate_gaussianc                 C   sl   | j dkrtdtt| jd }t|| j | }t|| j | }t| jt	
|t	
|  }|S )a  
        Computes the integral of a 1D pdf between two bounds.

        Parameters
        ----------
        low : scalar
            Lower bound of integration.
        high : scalar
            Upper bound of integration.

        Returns
        -------
        value : scalar
            The result of the integral.

        Raises
        ------
        ValueError
            If the KDE is over more than one dimension.

        r   z'integrate_box_1d() only handles 1D pdfsr   )r"   r    r   r   r5   r   rB   r   r'   r   Zndtr)r,   lowhighstdevZnormalized_lowZnormalized_highvaluer/   r/   r0   integrate_box_1dL  s    
zgaussian_kde.integrate_box_1dc                 C   s^   |durd|i}ni }t j||| j| j| jfi |\}}|rZd| jd  }tj|dd |S )a  Computes the integral of a pdf over a rectangular interval.

        Parameters
        ----------
        low_bounds : array_like
            A 1-D array containing the lower bounds of integration.
        high_bounds : array_like
            A 1-D array containing the upper bounds of integration.
        maxpts : int, optional
            The maximum number of points to use for integration.

        Returns
        -------
        value : scalar
            The result of the integral.

        Nmaxptsz6An integral in _mvn.mvnun requires more points than %si  r   )
stacklevel)r   Zmvnun_weightedr   r'   r5   r"   warningswarn)r,   Z
low_boundsZhigh_boundsrS   Z
extra_kwdsrQ   Zinformr-   r/   r/   r0   integrate_boxo  s    

zgaussian_kde.integrate_boxc                 C   s   |j | j krtd|j| jk r*|}| }n| }|}|j|j }t|}d}t|jD ]h}|jdd|tf }|j| }	t	||	}
t
|	|
 ddd }|t
t| |j dd|j|  7 }qVtt|d }tdt |jd d | }|| }|S )a  
        Computes the integral of the product of this  kernel density estimate
        with another.

        Parameters
        ----------
        other : gaussian_kde instance
            The other kde.

        Returns
        -------
        value : scalar
            The result of the integral.

        Raises
        ------
        ValueError
            If the KDEs have different dimensionality.

        z$KDEs are not the same dimensionalityg        Nr   r?   r>   r   )r"   r    r#   r5   r   r@   ranger   r	   rA   r   r
   r'   rB   rC   rD   r   r   r!   )r,   otherZsmallZlargerF   rG   r<   irE   rH   rI   rL   rJ   rK   r/   r/   r0   integrate_kde  s(    

(zgaussian_kde.integrate_kdec                 C   sh   |du rt | j}t|}t|jt| jft| j|d}|j	| j
|| jd}| jdd|f }|| S )aA  Randomly sample a dataset from the estimated pdf.

        Parameters
        ----------
        size : int, optional
            The number of samples to draw.  If not provided, then the size is
            the same as the effective number of samples in the underlying
            dataset.
        seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
            If `seed` is None (or `np.random`), the `numpy.random.RandomState`
            singleton is used.
            If `seed` is an int, a new ``RandomState`` instance is used,
            seeded with `seed`.
            If `seed` is already a ``Generator`` or ``RandomState`` instance then
            that instance is used.

        Returns
        -------
        resample : (self.d, `size`) ndarray
            The sampled dataset.

        N)r   )r   p)intneffr   r   Zmultivariate_normalr   r"   r%   r5   choicer#   r'   r   )r,   r   seedZrandom_stateZnormindicesZmeansr/   r/   r0   resample  s    
zgaussian_kde.resamplec                 C   s   t | jd| jd  S )zoCompute Scott's factor.

        Returns
        -------
        s : float
            Scott's factor.
                 r   r^   r"   r,   r/   r/   r0   scotts_factor  s    zgaussian_kde.scotts_factorc                 C   s$   t | j| jd  d d| jd  S )z{Compute the Silverman factor.

        Returns
        -------
        s : float
            The silverman factor.
        r>   g      @rc   rd   re   rf   r/   r/   r0   silverman_factor  s    zgaussian_kde.silverman_factora0  Computes the coefficient (`kde.factor`) that
        multiplies the data covariance matrix to obtain the kernel covariance
        matrix. The default is `scotts_factor`.  A subclass can overwrite this
        method to provide a different method, or set it through a call to
        `kde.set_bandwidth`.c                    s    du r
nx dkrj _nf dkr.j_nTt rXt tsXd_ fdd_n*t rv _fdd_nd}t	|
  dS )	aX  Compute the estimator bandwidth with given method.

        The new bandwidth calculated after a call to `set_bandwidth` is used
        for subsequent evaluations of the estimated density.

        Parameters
        ----------
        bw_method : str, scalar or callable, optional
            The method used to calculate the estimator bandwidth.  This can be
            'scott', 'silverman', a scalar constant or a callable.  If a
            scalar, this will be used directly as `kde.factor`.  If a callable,
            it should take a `gaussian_kde` instance as only parameter and
            return a scalar.  If None (default), nothing happens; the current
            `kde.covariance_factor` method is kept.

        Notes
        -----
        .. versionadded:: 0.11

        Examples
        --------
        >>> import numpy as np
        >>> import scipy.stats as stats
        >>> x1 = np.array([-7, -5, 1, 4, 5.])
        >>> kde = stats.gaussian_kde(x1)
        >>> xs = np.linspace(-10, 10, num=50)
        >>> y1 = kde(xs)
        >>> kde.set_bandwidth(bw_method='silverman')
        >>> y2 = kde(xs)
        >>> kde.set_bandwidth(bw_method=kde.factor / 3.)
        >>> y3 = kde(xs)

        >>> import matplotlib.pyplot as plt
        >>> fig, ax = plt.subplots()
        >>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
        ...         label='Data points (rescaled)')
        >>> ax.plot(xs, y1, label='Scott (default)')
        >>> ax.plot(xs, y2, label='Silverman')
        >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
        >>> ax.legend()
        >>> plt.show()

        NZscottZ	silvermanzuse constantc                      s    S Nr/   r/   r   r/   r0   <lambda>5      z,gaussian_kde.set_bandwidth.<locals>.<lambda>c                      s
      S ri   )
_bw_methodr/   rf   r/   r0   rj   8  rk   zC`bw_method` should be 'scott', 'silverman', a scalar or a callable.)rg   covariance_factorrh   rB   Zisscalar
isinstancestrrl   callabler    _compute_covariance)r,   r   r-   r/   )r   r,   r0   r+     s    ,

zgaussian_kde.set_bandwidthc              
   C   s   |   | _t| ds@tt| jdd| jd| _tj	| jdd| _
| j| jd  | _| j
| j tj| _dtt| jtdt    | _dS )	zcComputes the covariance matrix for each Gaussian kernel using
        covariance_factor().
        _data_cho_covr   FZrowvarZbiasZaweightsT)lowerr   N)rm   factorhasattrr   r   r   r'   _data_covariancer   Zcholeskyrr   r5   r$   rB   Zfloat64r7   logZdiagr   r   r   Zlog_detrf   r/   r/   r0   rq   @  s    



z gaussian_kde._compute_covariancec                 C   s:   |   | _tt| jdd| jd| _t| j| jd  S )Nr   Frs   r   )	rm   ru   r   r   r   r'   rw   r   invrf   r/   r/   r0   inv_covR  s
    


zgaussian_kde.inv_covc                 C   s
   |  |S )z
        Evaluate the estimated pdf on a provided set of points.

        Notes
        -----
        This is an alias for `gaussian_kde.evaluate`.  See the ``evaluate``
        docstring for more details.

        )r=   )r,   xr/   r/   r0   pdf^  s    
zgaussian_kde.pdfc           	      C   s   t |}|j\}}|| jkr^|dkrD|| jkrDt|| jdf}d}nd| d| j }t|t| j|\}}t| | jj	| j
dddf |j	| j|}|dddf S )zT
        Evaluate the log of the estimated pdf on a provided set of points.
        r   r2   r3   Nr   )r   r!   r"   r   r    r4   r5   r   r   r6   r'   r7   )	r,   r{   r8   r"   r9   r-   r:   r;   r<   r/   r/   r0   logpdfj  s     


zgaussian_kde.logpdfc           	      C   s   t |}t |jt js&d}t|t| j}| }|||dk   ||dk < tt 	|t|krrd}t||dk ||kB }t 
|rd||  d| d}t|| j| }| j}t||  |dS )a)  Return a marginal KDE distribution

        Parameters
        ----------
        dimensions : int or 1-d array_like
            The dimensions of the multivariate distribution corresponding
            with the marginal variables, that is, the indices of the dimensions
            that are being retained. The other dimensions are marginalized out.

        Returns
        -------
        marginal_kde : gaussian_kde
            An object representing the marginal distribution.

        Notes
        -----
        .. versionadded:: 1.10.0

        zaElements of `dimensions` must be integers - the indices of the marginal variables being retained.r   z,All elements of `dimensions` must be unique.zDimensions z# are invalid for a distribution in z dimensions.)r   r'   )rB   r   Z
issubdtypedtypeintegerr    r)   r   copyuniqueanyr'   r   rm   )	r,   
dimensionsdimsr-   r#   Zoriginal_dimsZ	i_invalidr   r'   r/   r/   r0   marginal  s*    




zgaussian_kde.marginalc                 C   s8   z| j W S  ty2   t| j| j | _ | j  Y S 0 d S ri   )r&   AttributeErrorr   r#   rf   r/   r/   r0   r'     s
    zgaussian_kde.weightsc                 C   s:   z| j W S  ty4   dt| jd  | _ | j  Y S 0 d S )Nr   r   )r*   r   r   r'   rf   r/   r/   r0   r^     s
    zgaussian_kde.neff)NN)N)NN)N)__name__
__module____qualname____doc__r1   r=   __call__rM   rR   rW   r[   rb   rg   rh   rm   r+   rq   propertyrz   r|   r}   r   r'   r^   r/   r/   r/   r0   r   +   s2    
&(5#
!2
#

?
1
c                 C   s\   t | |}t |j}|dkr&d}n.|dkr4d}n |dv rBd}nt| d| ||fS )z
    Calculates the output dtype and the "spec" (=C type name).

    This was necessary in order to deal with the fused types in the Cython
    routine `gaussian_kernel_estimate`. See gh-10824 for details.
    rd   r%      double)      zlong doublez has unexpected item size: )rB   Zcommon_typer~   itemsizer    )r5   r8   r:   r   r;   r/   r/   r0   r4     s    r4   )"rU   Zscipyr   r   Zscipy._lib._utilr   numpyr   r   r   r   r	   r
   r   r   r   r   r   r   r   r   r   r   rB    r   Z_statsr   r   Zscipy.specialr   __all__r   r4   r/   r/   r/   r0   <module>   s   H     