U
    MZf0                 	   @  st  d Z ddlmZ ddlZddlmZmZ ddlZddlm	Z	 ddl
mZmZmZmZmZmZmZmZmZmZmZmZmZ ddlZddlZddlmZ ddlmZmZ dd	l m!Z! ddl"m#  m$Z% dd
l&m'Z' ddl(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5 ddl6m7Z8 ddl9m:Z:m;Z; ddl<m=Z=m>Z>m?Z?m@Z@ ddlAmBZB ddlCmDZDmEZEmFZFmGZGmHZHmIZImJZJmKZKmLZL ddlMmNZNmOZO ddlPmQZQmRZR ddlSmTZT ddlUmVZVmWZWmXZXmYZYmZZZm[Z[m\Z\ ddl]m^Z^m_Z_ ddl`ma  mbZc ddldmeZe ddlfmgZg ddlhmiZimjZjmkZk ddllmmZm ddlnmoZompZp ddlqmrZrmsZsmtZtmuZumvZv ddlwmxZx ddlymzZz ddl{m|Z| ddl}m~Z~mZ erdd lmZmZmZ d!Zd"d#d$d%Zd&Zd'Zd(Zd)ZeG d*d+ d+e^Zeeee eegef eeegef  eeef f ZG d,d- d-e^e_e0 eoZed.egd/ZG d0d1 d1ee0 Ze@edAd3d4d5d6d7d1d8d9d:Zd;d<d=d>d?d@ZdS )Ba  
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.

The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
    )annotationsN)partialwraps)dedent)TYPE_CHECKINGCallableHashableIterableIteratorListLiteralMappingSequenceTypeVarUnioncastfinal)option_context)	Timestamplib)rank_1d)NA)AnyArrayLike	ArrayLikeAxisAxisIntDtypeObjFillnaOptions
IndexLabelNDFrameTPositionalIndexerRandomStateScalarTnpt)function)AbstractMethodError	DataError)AppenderSubstitutioncache_readonlydoc)ensure_dtype_can_hold_na)	is_bool_dtypeis_float_dtypeis_hashable
is_integeris_integer_dtypeis_numeric_dtypeis_object_dtype	is_scalarneeds_i8_conversion)isnanotna)
algorithmssample)executor)BaseMaskedArrayBooleanArrayCategoricalDatetimeArrayExtensionArrayFloatingArrayTimedeltaArray)PandasObjectSelectionMixin)	DataFrame)NDFrame)basenumba_ops)get_grouper)GroupByIndexingMixinGroupByNthSelector)CategoricalIndexIndex
MultiIndex
RangeIndexdefault_index)ensure_block_shape)Series)get_group_index_sorter)get_jit_argumentsmaybe_use_numba)ExpandingGroupbyExponentialMovingWindowGroupbyRollingGroupbyz
        See Also
        --------
        Series.%(name)s : Apply a function %(name)s to a Series.
        DataFrame.%(name)s : Apply a function %(name)s
            to each row or column of a DataFrame.
aS  
    Apply function ``func`` group-wise and combine the results together.

    The function passed to ``apply`` must take a {input} as its first
    argument and return a DataFrame, Series or scalar. ``apply`` will
    then take care of combining the results back together into a single
    dataframe or series. ``apply`` is therefore a highly flexible
    grouping method.

    While ``apply`` is a very flexible method, its downside is that
    using it can be quite a bit slower than using more specific methods
    like ``agg`` or ``transform``. Pandas offers a wide range of method that will
    be much faster than using ``apply`` for their specific purposes, so try to
    use them before reaching for ``apply``.

    Parameters
    ----------
    func : callable
        A callable that takes a {input} as its first argument, and
        returns a dataframe, a series or a scalar. In addition the
        callable may take positional and keyword arguments.
    args, kwargs : tuple and dict
        Optional positional and keyword arguments to pass to ``func``.

    Returns
    -------
    Series or DataFrame

    See Also
    --------
    pipe : Apply function to the full GroupBy object instead of to each
        group.
    aggregate : Apply aggregate function to the GroupBy object.
    transform : Apply function column-by-column to the GroupBy object.
    Series.apply : Apply a function to a Series.
    DataFrame.apply : Apply a function to each row or column of a DataFrame.

    Notes
    -----

    .. versionchanged:: 1.3.0

        The resulting dtype will reflect the return value of the passed ``func``,
        see the examples below.

    Functions that mutate the passed object can produce unexpected
    behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
    for more details.

    Examples
    --------
    {examples}
    a%  
    >>> df = pd.DataFrame({'A': 'a a b'.split(),
    ...                    'B': [1,2,3],
    ...                    'C': [4,6,5]})
    >>> g1 = df.groupby('A', group_keys=False)
    >>> g2 = df.groupby('A', group_keys=True)

    Notice that ``g1`` and ``g2`` have two groups, ``a`` and ``b``, and only
    differ in their ``group_keys`` argument. Calling `apply` in various ways,
    we can get different grouping results:

    Example 1: below the function passed to `apply` takes a DataFrame as
    its argument and returns a DataFrame. `apply` combines the result for
    each group together into a new DataFrame:

    >>> g1[['B', 'C']].apply(lambda x: x / x.sum())
              B    C
    0  0.333333  0.4
    1  0.666667  0.6
    2  1.000000  1.0

    In the above, the groups are not part of the index. We can have them included
    by using ``g2`` where ``group_keys=True``:

    >>> g2[['B', 'C']].apply(lambda x: x / x.sum())
                B    C
    A
    a 0  0.333333  0.4
      1  0.666667  0.6
    b 2  1.000000  1.0

    Example 2: The function passed to `apply` takes a DataFrame as
    its argument and returns a Series.  `apply` combines the result for
    each group together into a new DataFrame.

    .. versionchanged:: 1.3.0

        The resulting dtype will reflect the return value of the passed ``func``.

    >>> g1[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min())
         B    C
    A
    a  1.0  2.0
    b  0.0  0.0

    >>> g2[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min())
         B    C
    A
    a  1.0  2.0
    b  0.0  0.0

    The ``group_keys`` argument has no effect here because the result is not
    like-indexed (i.e. :ref:`a transform <groupby.transform>`) when compared
    to the input.

    Example 3: The function passed to `apply` takes a DataFrame as
    its argument and returns a scalar. `apply` combines the result for
    each group together into a Series, including setting the index as
    appropriate:

    >>> g1.apply(lambda x: x.C.max() - x.B.min())
    A
    a    5
    b    2
    dtype: int64a  
    >>> s = pd.Series([0, 1, 2], index='a a b'.split())
    >>> g1 = s.groupby(s.index, group_keys=False)
    >>> g2 = s.groupby(s.index, group_keys=True)

    From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
    Notice that ``g1`` have ``g2`` have two groups, ``a`` and ``b``, and only
    differ in their ``group_keys`` argument. Calling `apply` in various ways,
    we can get different grouping results:

    Example 1: The function passed to `apply` takes a Series as
    its argument and returns a Series.  `apply` combines the result for
    each group together into a new Series.

    .. versionchanged:: 1.3.0

        The resulting dtype will reflect the return value of the passed ``func``.

    >>> g1.apply(lambda x: x*2 if x.name == 'a' else x/2)
    a    0.0
    a    2.0
    b    1.0
    dtype: float64

    In the above, the groups are not part of the index. We can have them included
    by using ``g2`` where ``group_keys=True``:

    >>> g2.apply(lambda x: x*2 if x.name == 'a' else x/2)
    a  a    0.0
       a    2.0
    b  b    1.0
    dtype: float64

    Example 2: The function passed to `apply` takes a Series as
    its argument and returns a scalar. `apply` combines the result for
    each group together into a Series, including setting the index as
    appropriate:

    >>> g1.apply(lambda x: x.max() - x.min())
    a    1
    b    0
    dtype: int64

    The ``group_keys`` argument has no effect here because the result is not
    like-indexed (i.e. :ref:`a transform <groupby.transform>`) when compared
    to the input.

    >>> g2.apply(lambda x: x.max() - x.min())
    a    1
    b    0
    dtype: int64)templatedataframe_examplesZseries_examplesa  
Compute {fname} of group values.

Parameters
----------
numeric_only : bool, default {no}
    Include only float, int, boolean columns.

    .. versionchanged:: 2.0.0

        numeric_only no longer accepts ``None``.

min_count : int, default {mc}
    The required number of valid values to perform the operation. If fewer
    than ``min_count`` non-NA values are present the result will be NA.

Returns
-------
Series or DataFrame
    Computed {fname} of values within each group.
aF  
Apply a ``func`` with arguments to this %(klass)s object and return its result.

Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing

>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)  # doctest: +SKIP

You can write

>>> (df.groupby('group')
...    .pipe(f)
...    .pipe(g, arg1=a)
...    .pipe(h, arg2=b, arg3=c))  # doctest: +SKIP

which is much more readable.

Parameters
----------
func : callable or tuple of (callable, str)
    Function to apply to this %(klass)s object or, alternatively,
    a `(callable, data_keyword)` tuple where `data_keyword` is a
    string indicating the keyword of `callable` that expects the
    %(klass)s object.
args : iterable, optional
       Positional arguments passed into `func`.
kwargs : dict, optional
         A dictionary of keyword arguments passed into `func`.

Returns
-------
the return type of `func`.

See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
    full %(klass)s object.

Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_

Examples
--------
%(examples)s
a  
Call function producing a same-indexed %(klass)s on each group.

Returns a %(klass)s having the same indexes as the original object
filled with the transformed values.

Parameters
----------
f : function, str
    Function to apply to each group. See the Notes section below for requirements.

    Accepted inputs are:

    - String
    - Python function
    - Numba JIT function with ``engine='numba'`` specified.

    Only passing a single function is supported with this engine.
    If the ``'numba'`` engine is chosen, the function must be
    a user defined function with ``values`` and ``index`` as the
    first and second arguments respectively in the function signature.
    Each group's index will be passed to the user defined function
    and optionally available for use.

    If a string is chosen, then it needs to be the name
    of the groupby method you want to use.

    .. versionchanged:: 1.1.0
*args
    Positional arguments to pass to func.
engine : str, default None
    * ``'cython'`` : Runs the function through C-extensions from cython.
    * ``'numba'`` : Runs the function through JIT compiled code from numba.
    * ``None`` : Defaults to ``'cython'`` or the global setting ``compute.use_numba``

    .. versionadded:: 1.1.0
engine_kwargs : dict, default None
    * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
    * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
      and ``parallel`` dictionary keys. The values must either be ``True`` or
      ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
      ``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
      applied to the function

    .. versionadded:: 1.1.0
**kwargs
    Keyword arguments to be passed into func.

Returns
-------
%(klass)s

See Also
--------
%(klass)s.groupby.apply : Apply function ``func`` group-wise and combine
    the results together.
%(klass)s.groupby.aggregate : Aggregate using one or more
    operations over the specified axis.
%(klass)s.transform : Call ``func`` on self producing a %(klass)s with the
    same axis shape as self.

Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.

The current implementation imposes three requirements on f:

* f must return a value that either has the same shape as the input
  subframe or can be broadcast to the shape of the input subframe.
  For example, if `f` returns a scalar it will be broadcast to have the
  same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
  in the subframe. If f also supports application to the entire subframe,
  then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
  produce unexpected results. See :ref:`gotchas.udf-mutation` for more details.

When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.

.. versionchanged:: 1.3.0

    The resulting dtype will reflect the return value of the passed ``func``,
    see the examples below.

.. versionchanged:: 2.0.0

    When using ``.transform`` on a grouped DataFrame and the transformation function
    returns a DataFrame, pandas now aligns the result's index
    with the input's index. You can call ``.to_numpy()`` on the
    result of the transformation function to avoid alignment.

Examples
--------
%(example)sa=  
Aggregate using one or more operations over the specified axis.

Parameters
----------
func : function, str, list, dict or None
    Function to use for aggregating the data. If a function, must either
    work when passed a {klass} or when passed to {klass}.apply.

    Accepted combinations are:

    - function
    - string function name
    - list of functions and/or function names, e.g. ``[np.sum, 'mean']``
    - dict of axis labels -> functions, function names or list of such.
    - None, in which case ``**kwargs`` are used with Named Aggregation. Here the
      output has one column for each element in ``**kwargs``. The name of the
      column is keyword, whereas the value determines the aggregation used to compute
      the values in the column.

    Can also accept a Numba JIT function with
    ``engine='numba'`` specified. Only passing a single function is supported
    with this engine.

    If the ``'numba'`` engine is chosen, the function must be
    a user defined function with ``values`` and ``index`` as the
    first and second arguments respectively in the function signature.
    Each group's index will be passed to the user defined function
    and optionally available for use.

    .. versionchanged:: 1.1.0
*args
    Positional arguments to pass to func.
engine : str, default None
    * ``'cython'`` : Runs the function through C-extensions from cython.
    * ``'numba'`` : Runs the function through JIT compiled code from numba.
    * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``

    .. versionadded:: 1.1.0
engine_kwargs : dict, default None
    * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
    * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
      and ``parallel`` dictionary keys. The values must either be ``True`` or
      ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
      ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
      applied to the function

    .. versionadded:: 1.1.0
**kwargs
    * If ``func`` is None, ``**kwargs`` are used to define the output names and
      aggregations via Named Aggregation. See ``func`` entry.
    * Otherwise, keyword arguments to be passed into func.

Returns
-------
{klass}

See Also
--------
{klass}.groupby.apply : Apply function func group-wise
    and combine the results together.
{klass}.groupby.transform : Transforms the Series on each group
    based on the given function.
{klass}.aggregate : Aggregate using one or more
    operations over the specified axis.

Notes
-----
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.

Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.

.. versionchanged:: 1.3.0

    The resulting dtype will reflect the return value of the passed ``func``,
    see the examples below.
{examples}c                   @  s6   e Zd ZdZdddddZdd Zd	d
ddZdS )GroupByPlotzE
    Class implementing the .plot attribute for groupby objects.
    GroupByNone)groupbyreturnc                 C  s
   || _ d S N)_groupby)selfr^    rc   ?/tmp/pip-unpacked-wheel-nbcvw55c/pandas/core/groupby/groupby.py__init__G  s    zGroupByPlot.__init__c                   s     fdd}d|_ | j|S )Nc                   s   | j  S r`   )plotrb   argskwargsrc   rd   fK  s    zGroupByPlot.__call__.<locals>.frf   )__name__ra   apply)rb   ri   rj   rk   rc   rh   rd   __call__J  s    zGroupByPlot.__call__strnamec                   s    fdd}|S )Nc                    s    fdd}j |S )Nc                   s   t | j S r`   )getattrrf   rg   )ri   rj   rq   rc   rd   rk   S  s    z0GroupByPlot.__getattr__.<locals>.attr.<locals>.f)ra   rm   )ri   rj   rk   rq   rb   rh   rd   attrR  s    z%GroupByPlot.__getattr__.<locals>.attrrc   )rb   rq   rt   rc   rs   rd   __getattr__Q  s    zGroupByPlot.__getattr__N)rl   
__module____qualname____doc__re   rn   ru   rc   rc   rc   rd   r[   A  s   r[   c                   @  sL  e Zd ZU ejddddddddd	d
dhB Zded< ded< dZded< dZded< ded< eddddZ	eddddZ
eeddddZeeddddZeedddd Zed!d" Zed#d$ Zeed%d& Zed'dd(d)Zed*ed+d,eed-d.d/d0d1Zed8d2dd3d4Zed5dd6d7ZdS )9BaseGroupByas_indexaxisdropna
exclusionsgrouper
group_keyskeyslevelobjobservedsortr   ops.BaseGrouperN_KeysArgType | NoneIndexLabel | Noneboolintr_   c                 C  s
   t | jS r`   )lengroupsrg   rc   rc   rd   __len__y  s    zBaseGroupBy.__len__ro   c                 C  s
   t | S r`   )object__repr__rg   rc   rc   rd   r   }  s    zBaseGroupBy.__repr__zdict[Hashable, np.ndarray]c                 C  s   | j jS )z4
        Dict {group name -> group labels}.
        )r~   r   rg   rc   rc   rd   r     s    zBaseGroupBy.groupsc                 C  s   | j jS r`   )r~   ngroupsrg   rc   rc   rd   r     s    zBaseGroupBy.ngroupsz$dict[Hashable, npt.NDArray[np.intp]]c                 C  s   | j jS )z5
        Dict {group name -> group indices}.
        )r~   indicesrg   rc   rc   rd   r     s    zBaseGroupBy.indicesc              
     s
  dd t |dkrg S t jdkr6ttj}nd}|d }t|trt|tsbd}t|t |t |kszfdd|D W S  tk
r } zd}t||W 5 d}~X Y nX fd	d|D fd
d|D }n|  fdd|D }fdd|D S )zd
        Safe get multiple indices, translate keys for
        datelike to underlying repr.
        c                 S  s4   t | tjrdd S t | tjr(dd S dd S d S )Nc                 S  s   t | S r`   )r   keyrc   rc   rd   <lambda>      zABaseGroupBy._get_indices.<locals>.get_converter.<locals>.<lambda>c                 S  s
   t | jS r`   )r   Zasm8r   rc   rc   rd   r     r   c                 S  s   | S r`   rc   r   rc   rc   rd   r     r   )
isinstancedatetimenpZ
datetime64)src   rc   rd   get_converter  s
    z/BaseGroupBy._get_indices.<locals>.get_converterr   Nz<must supply a tuple to get_group with multiple grouping keysc                   s   g | ]} j | qS rc   )r   .0rq   rg   rc   rd   
<listcomp>  s     z,BaseGroupBy._get_indices.<locals>.<listcomp>zHmust supply a same-length tuple to get_group with multiple grouping keysc                   s   g | ]} |qS rc   rc   )r   r   )r   rc   rd   r     s     c                 3  s&   | ]}t d d t |D V  qdS )c                 s  s   | ]\}}||V  qd S r`   rc   )r   rk   nrc   rc   rd   	<genexpr>  s     z5BaseGroupBy._get_indices.<locals>.<genexpr>.<genexpr>N)tuplezipr   )
convertersrc   rd   r     s     z+BaseGroupBy._get_indices.<locals>.<genexpr>c                 3  s   | ]} |V  qd S r`   rc   r   )	converterrc   rd   r     s     c                   s   g | ]} j |g qS rc   )r   getr   rg   rc   rd   r     s     )r   r   nextiterr   r   
ValueErrorKeyError)rb   namesZindex_sampleZname_samplemsgerrrc   )r   r   r   rb   rd   _get_indices  s.    


zBaseGroupBy._get_indicesc                 C  s   |  |gd S )zQ
        Safe get index, translate keys for datelike to underlying repr.
        r   )r   )rb   rq   rc   rc   rd   
_get_index  s    zBaseGroupBy._get_indexc                 C  s>   t | jtr| jS | jd k	r8t| jr2| j| j S | jS | jS r`   )r   r   rR   
_selectionr/   _obj_with_exclusionsrg   rc   rc   rd   _selected_obj  s    

zBaseGroupBy._selected_objzset[str]c                 C  s
   | j  S r`   )r   _dir_additionsrg   rc   rc   rd   r     s    zBaseGroupBy._dir_additionsr\   a          >>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
        >>> df
           A  B
        0  a  1
        1  b  2
        2  a  3
        3  b  4

        To get the difference between each groups maximum and minimum value in one
        pass, you can do

        >>> df.groupby('A').pipe(lambda x: x.max() - x.min())
           B
        A
        a  2
        b  2)klassexamplesz/Callable[..., T] | tuple[Callable[..., T], str]r#   )funcr_   c                 O  s   t j| |f||S r`   )compipe)rb   r   ri   rj   rc   rc   rd   r     s    zBaseGroupBy.pipeDataFrame | Seriesc                 C  s8   |dkr| j }| |}t|s(t||j|| jdS )a  
        Construct DataFrame from group with provided name.

        Parameters
        ----------
        name : object
            The name of the group to get as a DataFrame.
        obj : DataFrame, default None
            The DataFrame to take the DataFrame out of.  If
            it is None, the object groupby was called on will
            be used.

        Returns
        -------
        same type as obj
        Nr{   )r   r   r   r   Z_take_with_is_copyr{   )rb   rq   r   Zindsrc   rc   rd   	get_group	  s    
zBaseGroupBy.get_groupz#Iterator[tuple[Hashable, NDFrameT]]c                 C  sB   | j }| jj| j| jd}t|tr>t|dkr>dd |D }|S )z
        Groupby iterator.

        Returns
        -------
        Generator yielding sequence of (name, subsetted object)
        for each group
        r      c                 s  s   | ]\}}|f|fV  qd S r`   rc   )r   r   grouprc   rc   rd   r   2  s     z'BaseGroupBy.__iter__.<locals>.<genexpr>)r   r~   get_iteratorr   r{   r   listr   )rb   r   resultrc   rc   rd   __iter__$  s
    
zBaseGroupBy.__iter__)N)rl   rv   rw   rB   Z_hidden_attrs__annotations__r   r   r   r   r   propertyr   r   r   r   r   r*   r   r   r)   r   r(   _pipe_templater   r   r   rc   rc   rc   rd   ry   d  sh   

2
ry   OutputFrameOrSeries)boundc                   @  s^  e Zd ZU dZded< ded< edd
dddddddddddddddZddddZeddddZddddZ	eddddd d!Z
ed"d"d#d$d%Zed&d'd#d(d)Zd*d&d#d+d,Zed
d
d#d-d.Zedd&d/d0d1d2Zdd3ddd4d5d6Zed'd7d8d9Zd:d;d<d=d>Zedd?d@dAZedd?dBdCZeedD jdEedF dGd
ddHdIZedd:dJdKddd
dLdMdNZedddPdd:dQdRdSZdTdPd:dTdUdVdWZeddd:ddPdXdYdZZdddd[d\d]d^Zeddd_d`daZed
d
d#dbdcZeddde Z ed	ddfdgdhdiZ!ee"d:ddjdkZ#edlddmdndoZ$ee%dpdee&d
ddqdrdsZ'ee%dpdee&dddqdtduZ(ee%dpdee&d
ddvdwZ)ee%dpde%e&dxdddd;dzd{d|Z*eddd}d~dZ+ee%dpdee&ddPdd;ddddZ,ee%dpdee&ddPdd;ddddZ-edddddddJdddZ.eddPddddZ/ee%dpdee&dJdddZ0ee1e2dd	dddddPdd;dddZ3ee1e2dd	dddddPdddZ4ee1e2dd	dOddddPdd;dddZ5ee1e2dd	dOddddPdd;dddZ6edddPdddZ7edddPdddZ8ed'dddZ9e1e:j;dd
dddZ;edd Z<eddddZ=ee%dpdee&ddddZ>ee%dpdee&ddddZ?edddddZ@ee%dpddddZAee%dpddddZBee"e%dpde%e&dxddddZCdddd
dddZDedddddŜddǄZEee%dpddddȜddʄZFee%dpddddȜdd̄ZGee%dpde%e&dxd ddddd[d
dϜddфZHee%dpdee&d!dd
dҜddԄZIee%dpdee&d"dd
dҜddքZJee%dpdee&d#d[dd
dלddلZKee%dpdee&d$d[dd
dלddۄZLed%d:dddddޜddZMee%dpdd&dPddddZNee%dpdee&d'dPd[d
dddZOee%dpdee&d(dPdddddZPee%dpde%e&dxd)dPd
dddZQee%dpde%e&dxd*dPd
dddZRedd
dddZSeeTjUdfd"dd/d"dddZVed+dddddddd ZWdS (,  r\   a  
    Class for grouping and aggregating relational data.

    See aggregate, transform, and apply functions on this object.

    It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:

    ::

        grouped = groupby(obj, ...)

    Parameters
    ----------
    obj : pandas object
    axis : int, default 0
    level : int, default None
        Level of MultiIndex
    groupings : list of Grouping objects
        Most users should ignore this
    exclusions : array-like, optional
        List of columns to exclude
    name : str
        Most users should ignore this

    Returns
    -------
    **Attributes**
    groups : dict
        {group name -> group labels}
    len(grouped) : int
        Number of groups

    Notes
    -----
    After grouping, see aggregate, apply, and transform functions. Here are
    some other brief notes about usage. When grouping by multiple groups, the
    result index will be a MultiIndex (hierarchical) by default.

    Iteration produces (key, group) tuples, i.e. chunking the data by group. So
    you can write code like:

    ::

        grouped = obj.groupby(keys, axis=axis)
        for key, group in grouped:
            # do something with the data

    Function calls on GroupBy, if not specially implemented, "dispatch" to the
    grouped data. So if you group a DataFrame and wish to invoke the std()
    method on each group, you can simply do:

    ::

        df.groupby(mapper).std()

    rather than

    ::

        df.groupby(mapper).aggregate(np.std)

    You can pass arguments to these "wrapped" functions, too.

    See the online documentation for full exposition on these topics and much
    more
    r   r~   r   rz   Nr   TFr   r   r   r   ops.BaseGrouper | Nonezfrozenset[Hashable] | Noner]   )r   r   r{   r   r~   r}   	selectionrz   r   r   r   r|   r_   c              	   C  s   || _ t|tstt||| _|s6|dkr6td|| _|| _|	| _	|
| _
|| _|| _|d krt|||||	|| jd\}}}|| _||| _|| _|rt|nt | _d S )Nr   z$as_index=False only valid for axis=0)r{   r   r   r   r|   )r   r   rE   AssertionErrortyper   r   rz   r   r   r   r   r|   rI   r   Z_get_axis_numberr{   r~   	frozensetr}   )rb   r   r   r{   r   r~   r}   r   rz   r   r   r   r|   rc   rc   rd   re     s4    
zGroupBy.__init__ro   )rt   c                 C  sH   || j krt| |S || jkr(| | S tdt| j d| dd S )N'z' object has no attribute ')Z_internal_names_setr   __getattribute__r   AttributeErrorr   rl   )rb   rt   rc   rc   rd   ru     s    

zGroupBy.__getattr__rp   c                   s   t t| j|t}d|jkrNdddksDdtjkrN| j	d<  fdd}||_
|tjkrx| |S |tjk}| j|| j|| d}| jjr|r| |}|S )z<Compute the result of an operation by using GroupBy's apply.r{   Nc                   s   | f S r`   rc   xri   rk   rj   rc   rd   curried  s    z&GroupBy._op_via_apply.<locals>.curried)is_transformnot_indexed_same)rr   r   r   inspect	signature
parametersr   r   Z
no_defaultr{   rl   rF   Zplotting_methodsrm   transformation_kernels_python_apply_generalr~   has_dropped_na_set_result_index_ordered)rb   rq   ri   rj   sigr   r   r   rc   r   rd   _op_via_apply  s&    

 




zGroupBy._op_via_applyzIterable[Series]r   c                 C  s   t | d S r`   r&   rg   rc   rc   rd   _iterate_slices  s    zGroupBy._iterate_slices)r   r   c                 C  sV  ddl m} | jrn|sn| jrL| jj}| jj}| jj}||| j|||dd}n t	t
t|}	||| j|	d}n|s||| jd}| j| j}
| jr| jjd }|dk}|
| }
|
jr|j| j |
st|
j}|j|\}}|j|| jd}n|j|
| jdd}n||| jd}| jjd	kr0| jjn| j}t|trR|d k	rR||_|S )
Nr   )concatF)r{   r   levelsr   r   )r{   r   r   r{   copyr   ) Zpandas.core.reshape.concatr   r   rz   r~   result_indexr   r   r{   r   ranger   r   	_get_axisr|   
group_infoZhas_duplicatesaxesequalsr8   Zunique1d_valuesindexZget_indexer_non_uniquetakereindexr   ndimrq   r   r   rR   )rb   valuesr   r   r   r   Zgroup_levelsZgroup_namesr   r   Zaxlabelsmasktargetindexer_rq   rc   rc   rd   _concat_objects  sB    
zGroupBy._concat_objectsr   )r   r_   c                 C  s   | j | j}| jjr4| jjs4|j|| jdd}|S t| j }|j|| jdd}|j	| jd}| jjr|j
tt|| jd}|j|| jdd}|S )NFr   r   )r   r   r{   r~   Zis_monotonicr   set_axisrM   result_ilocs
sort_indexr   rO   r   )rb   r   Zobj_axisZoriginal_positionsrc   rc   rd   r   )  s    z!GroupBy._set_result_index_orderedzSeries | DataFramerD   c                 C  sr   t |tr| }|j}tt| jjt| j tdd | jj	D D ]$\}}}|rH||krH|
d|| qH|S )Nc                 S  s   g | ]
}|j qS rc   )in_axis)r   grprc   rc   rd   r   M  s     z2GroupBy._insert_inaxis_grouper.<locals>.<listcomp>r   )r   rR   to_framecolumnsr   reversedr~   r   Zget_group_levels	groupingsinsert)rb   r   r   rq   levr   rc   rc   rd   _insert_inaxis_grouperC  s    

zGroupBy._insert_inaxis_grouperz"Mapping[base.OutputKey, ArrayLike]c                 C  s   t | d S r`   r   rb   r   rc   rc   rd   _indexed_output_to_ndframeV  s    z"GroupBy._indexed_output_to_ndframec                 C  s2   | j dkr.|j}|j| jjr.| jj |_|S )Nr   )r{   r#   r   r   r   r   r  rc   rc   rd   _maybe_transpose_result[  s
    
zGroupBy._maybe_transpose_resultznpt.NDArray[np.float64] | None)r   qsc                 C  sb   | j s*| |}| }tt| jj}n| jj}|dk	rDt||}||_	| 
|}| j||dS )z
        Wraps the output of GroupBy aggregations into the expected result.

        Parameters
        ----------
        result : Series, DataFrame

        Returns
        -------
        Series or DataFrame
        Nr  )rz   r   Z_consolidaterM   r   r~   r   r   _insert_quantile_levelr   r  _reindex_output)rb   r   r  r   resrc   rc   rd   _wrap_aggregated_outputf  s    


zGroupBy._wrap_aggregated_outputr   )r   r   r   c                 C  s   t | d S r`   r   )rb   datar   r   r   rc   rc   rd   _wrap_applied_output  s    zGroupBy._wrap_applied_output)r
  c                 C  s   | j j\}}}t||}tj||dd}|j|| jd }t| j j	dkrTt
d|j}t|tr|| j j	d j}	||	}|| }
t||\}}|||
|fS )NF)Z
allow_fillr   r   zAMore than 1 grouping labels are not supported with engine='numba'r   )r~   r   rS   r8   take_ndr   r{   to_numpyr   r   NotImplementedErrorr   r   rN   rq   Zget_level_valuesr   Zgenerate_slices)rb   r
  idsr   r   sorted_indexZ
sorted_idssorted_dataZ
index_dataZ	group_keyZsorted_index_datastartsendsrc   rc   rd   _numba_prep  s&    


zGroupBy._numba_prepr   zdict[str, bool] | None)r   engine_kwargsc                 G  s   | j std| jdkr td| j}|jdkr4|n| }| |\}}}}	tj|ft	|}
|
|	||df| }| j
j}|jdkrd|ji}| }n
d|ji}|j|fd|i|S )	zp
        Perform groupby with a standard numerical aggregation function (e.g. mean)
        with Numba.
        z<as_index=False is not supported. Use .reset_index() instead.r   zaxis=1 is not supported.   r   rq   r   r   )rz   r  r{   r   r   r   r  r:   Zgenerate_shared_aggregatorrT   r~   r   rq   ravelr   _constructor)rb   r   r  Zaggregator_argsr
  dfr  r  r  r  Z
aggregatorr   r   result_kwargsrc   rc   rd   _numba_agg_general  s*    





zGroupBy._numba_agg_general)r  c                O  s   | j }|jdkr|n| }| |\}}}	}
t| tj|ft||}||
|	||t|j	f| }|j
t|	dd}|j}|jdkrd|ji}| }n
d|j	i}|j|fd|i|S )a(  
        Perform groupby transform routine with the numba engine.

        This routine mimics the data splitting routine of the DataSplitter class
        to generate the indices of each group in the sorted data and then passes the
        data and indices into a Numba jitted function.
        r  r   r   r   rq   r   r   )r   r   r   r  rG   validate_udfZgenerate_numba_transform_funcrT   r   r   r   r   argsortr   rq   r  r  )rb   r   r  ri   rj   r
  r  r  r  r  r  Znumba_transform_funcr   r   r  rc   rc   rd   _transform_with_numba  s2    	





zGroupBy._transform_with_numbac                O  s   | j }|jdkr|n| }| |\}}}	}
t| tj|ft||}||
|	||t|j	f| }| j
j}|jdkrd|ji}| }n
d|j	i}|j|fd|i|}| js| |}tt||_|S )a*  
        Perform groupby aggregation routine with the numba engine.

        This routine mimics the data splitting routine of the DataSplitter class
        to generate the indices of each group in the sorted data and then passes the
        data and indices into a Numba jitted function.
        r  r   rq   r   r   )r   r   r   r  rG   r  Zgenerate_numba_agg_funcrT   r   r   r~   r   rq   r  r  rz   r   rP   r   )rb   r   r  ri   rj   r
  r  r  r  r  r  Znumba_agg_funcr   r   r  r  rc   rc   rd   _aggregate_with_numba  s8    	





zGroupBy._aggregate_with_numbarY   Z	dataframerZ   )inputr   c              
     s   t ttrft| rTt| }t|r:| S  sBrPtd |S td dn8 snrtrt	 fdd}qtdn}t
dd H z| || j}W n. tk
r   | || j Y W  5 Q R  S X W 5 Q R X |S )Nz"Cannot pass arguments to property z$apply func should be callable, not 'r   c              
     s4   t jdd | f W  5 Q R  S Q R X d S )Nignore)all)r   Zerrstate)gri   r   rj   rc   rd   rk   :  s    zGroupBy.apply.<locals>.fz6func must be a callable if args or kwargs are suppliedzmode.chained_assignment)r   Zis_builtin_funcr   ro   hasattrrr   callabler   	TypeErrorr   r   r   r   r   )rb   r   ri   rj   r  rk   r   rc   r$  rd   rm   #  s0    




	*zGroupBy.applyr   zbool | None)rk   r
  r   r   is_aggr_   c                 C  s2   | j ||| j\}}|dkr"|}| ||||S )a  
        Apply function f in python space

        Parameters
        ----------
        f : callable
            Function to apply
        data : Series or DataFrame
            Data to apply f to
        not_indexed_same: bool, optional
            When specified, overrides the value of not_indexed_same. Apply behaves
            differently when the result index is equal to the input index, but
            this can be coincidental leading to value-dependent behavior.
        is_transform : bool, default False
            Indicator for whether the function is actually a transform
            and should not have group keys prepended.
        is_agg : bool, default False
            Indicator for whether the function is an aggregation. When the
            result is empty, we don't want to warn for this case.
            See _GroupBy._python_agg_general.

        Returns
        -------
        Series or DataFrame
            data after applying f
        N)r~   rm   r{   r  )rb   rk   r
  r   r   r(  r   Zmutatedrc   rc   rd   r   W  s    #zGroupBy._python_apply_generalr   r   numeric_only	min_countaliasnpfuncc                C  s"   | j ||||d}|j| jddS )Nhowaltr*  r+  r^   method_cython_agg_general__finalize__r   )rb   r*  r+  r,  r-  r   rc   rc   rd   _agg_general  s    	zGroupBy._agg_generalr   )r   r   r0  r_   c                 C  s   |dk	st |jdkr$t|dd}n.t|j}|jd dks@t |jdddf }| jj||dd}t	|t
rt|j||jd}n|jtkr|jtdd}t||d	S )
zn
        Fallback to pure-python aggregation if _cython_operation raises
        NotImplementedError.
        Nr   Fr   r   T)Zpreserve_dtypedtype)r   )r   r   rR   rD   r#   shapeilocr~   Z
agg_seriesr   r=   r   Z_from_sequencer9  r   astyperQ   )rb   r   r   r0  Zserr  
res_valuesrc   rc   rd   _agg_py_fallback  s    



zGroupBy._agg_py_fallbackr.  c           
        sd   j |dddd fdd}|}|}|}	jdkr`|	jdd}	|	S )	Nr*  rq   r   r   r_   c                   sT   z(j jd| fjd d}W n& tk
rN   j| j d}Y nX |S )N	aggregater   r{   r+  )r   r0  )r~   _cython_operationr   r  r>  )r   r   r0  r
  r/  rj   r+  rb   rc   rd   
array_func  s    
z/GroupBy._cython_agg_general.<locals>.array_funcr   Fr7  )_get_data_to_aggregategrouped_reduce_wrap_agged_managerr	  r{   Zinfer_objects)
rb   r/  r0  r*  r+  rj   rE  new_mgrr  outrc   rD  rd   r4    s    



zGroupBy._cython_agg_generalr   )r/  r*  r{   c                 K  s   t | d S r`   r   )rb   r/  r*  r{   rj   rc   rc   rd   _cython_transform  s    zGroupBy._cython_transform)enginer  c             
   O  s   t |r"| j|f|d|i|S t|p.|}t|tsL| j|f||S |tjkrld| d}t	|nr|tj
ks|tjkrt| |||S t| dd0 t| dd t| |||}W 5 Q R X W 5 Q R X | |S d S )Nr  r   z2' is not a valid function name for transform(name)r   Trz   )rU   r  r   Zget_cython_funcr   ro   Z_transform_generalrF   Ztransform_kernel_allowlistr   Zcythonized_kernelsr   rr   temp_setattr_wrap_transform_fast_result)rb   r   rL  r  ri   rj   r   r   rc   rc   rd   
_transform  s,    



$zGroupBy._transformc                 C  s   | j }| jj\}}}|j| jj| jdd}| jjdkrZt	|j
|}|j||j|jd}n:|jdkrhdn| j}|j||dd}|j|| j|d}|S )z7
        Fast transform path for aggregations.
        Fr   r   )r   rq   r   )r{   Zconvert_indicesr   )r   r~   r   r   r   r{   r   r   r8   r  r   r  r   rq   Z_taker   r   )rb   r   r   r  r   rJ  outputr{   rc   rc   rd   rN    s    z#GroupBy._wrap_transform_fast_resultc                 C  s   t |dkrtjg dd}ntt|}|rD| jj|| jd}n^tjt | jj	t
d}|d d||t< t|t| jjdd  dg j}| j|}|S )Nr   int64r8  r   FTr   )r   r   arrayr   concatenater   r   r{   emptyr   r   fillr<  r   tiler   r:  r#   where)rb   r   r|   filteredr   rc   rc   rd   _apply_filter/  s    
$zGroupBy._apply_filter
np.ndarray)	ascendingr_   c                 C  s2  | j j\}}}t||}|| t| }}|dkrBtjdtjdS tjd|dd |dd kf }ttjt	|d |f }| 
 }	|r|	t|	| |8 }	n&t|	tj|dd df  ||	 }	| j jrt|dktj|	jtjdd}	n|	jtjdd}	tj|tjd}
tj|tjd|
|< |	|
 S )	a.  
        Parameters
        ----------
        ascending : bool, default True
            If False, number in reverse, from length of group - 1 to 0.

        Notes
        -----
        this is currently implementing sort=False
        (though the default is sort=True) for groupby in general
        r   r8  TNr   r   Fr7  )r~   r   rS   r   r   rT  rQ  Zr_diffZnonzerocumsumrepeatr   rW  nanr<  float64intpZarange)rb   r[  r  r   r   ZsortercountrunreprJ  revrc   rc   rd   _cumcount_array@  s"    
"
&"zGroupBy._cumcount_arrayc                 C  s,   t | jtr| jjS t | jts$t| jjS r`   )r   r   rD   Z_constructor_slicedrR   r   r  rg   rc   rc   rd   _obj_1d_constructorh  s    zGroupBy._obj_1d_constructorzLiteral[('any', 'all')])val_testskipnac              	     sL   ddd fdd}dddd	dd
dd}| j tjdttj||| dS )zO
        Shared func to call any / all Cython GroupBy implementations.
        r   ztuple[np.ndarray, type]valsr_   c                   s^   t | jr0 r0t| }| r@|  } d| |< nt| tr@| j} | jt	dd} | 
tjt	fS )NTFr7  )r3   r9  r6   anyr   r   r;   _datar<  r   viewr   int8)rk  r   ri  rc   rd   objs_to_boolw  s    

z'GroupBy._bool_agg.<locals>.objs_to_boolFrZ  r   r   )r   	inferencenullabler_   c                 S  s.   |rt | jtdd| dkS | j|ddS d S )NFr7  r   )r<   r<  r   )r   rr  rs  rc   rc   rd   result_to_bool  s    z)GroupBy._bool_agg.<locals>.result_to_bool)r*  cython_dtypepre_processingpost_processingrh  ri  )F)_get_cythonized_result
libgroupbyZgroup_any_allr   r9  ro  )rb   rh  ri  rq  rt  rc   rp  rd   	_bool_aggq  s     

zGroupBy._bool_aggr^   rp  c                 C  s   |  d|S )a  
        Return True if any value in the group is truthful, else False.

        Parameters
        ----------
        skipna : bool, default True
            Flag to ignore nan values during truth testing.

        Returns
        -------
        Series or DataFrame
            DataFrame or Series of boolean values, where a value is True if any element
            is True within its respective group, False otherwise.
        rl  rz  rb   ri  rc   rc   rd   rl    s    zGroupBy.anyc                 C  s   |  d|S )a  
        Return True if all values in the group are truthful, else False.

        Parameters
        ----------
        skipna : bool, default True
            Flag to ignore nan values during truth testing.

        Returns
        -------
        Series or DataFrame
            DataFrame or Series of boolean values, where a value is True if all elements
            are True within its respective group, False otherwise.
        r"  r{  r|  rc   rc   rd   r"    s    zGroupBy.allc              	     s   |   }| jj\ } dk|jdkddd fdd}||}| |}t| dd | |}W 5 Q R X | j	|d	d
S )z
        Compute count of group, excluding missing values.

        Returns
        -------
        Series or DataFrame
            Count of values within each group.
        r   r   r   )bvaluesr_   c                   sp   | j dkr"t| dd @ }nt|  @ }tj| d}rl|j dksRt|jd dksdt|d S |S )Nr   r   )r   Zmax_binr  r   )r   r6   reshaper   Zcount_level_2dr   r:  )r}  ZmaskedZcountedr  Z	is_seriesr   r   rc   rd   hfunc  s    
zGroupBy.count.<locals>.hfuncr   Tr   
fill_value)
rF  r~   r   r   rG  rH  r   rM  r	  r  )rb   r
  r   r  rI  new_objr   rc   r  rd   rb    s    


zGroupBy.count)Zsee_alsocython)r*  rL  r  c                   sL   t |r ddlm} | ||S | jd fdd d}|j| jddS d	S )
aW  
        Compute mean of groups, excluding missing values.

        Parameters
        ----------
        numeric_only : bool, default False
            Include only float, int, boolean columns.

            .. versionchanged:: 2.0.0

                numeric_only no longer accepts ``None`` and defaults to ``False``.

        engine : str, default None
            * ``'cython'`` : Runs the operation through C-extensions from cython.
            * ``'numba'`` : Runs the operation through JIT compiled code from numba.
            * ``None`` : Defaults to ``'cython'`` or globally setting
              ``compute.use_numba``

            .. versionadded:: 1.4.0

        engine_kwargs : dict, default None
            * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
            * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
              and ``parallel`` dictionary keys. The values must either be ``True`` or
              ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
              ``{{'nopython': True, 'nogil': False, 'parallel': False}}``

            .. versionadded:: 1.4.0

        Returns
        -------
        pandas.Series or pandas.DataFrame
        %(see_also)s
        Examples
        --------
        >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
        ...                    'B': [np.nan, 2, 3, 4, 5],
        ...                    'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])

        Groupby one column and return the mean of the remaining columns in
        each group.

        >>> df.groupby('A').mean()
             B         C
        A
        1  3.0  1.333333
        2  4.0  1.500000

        Groupby two columns and return the mean of the remaining column.

        >>> df.groupby(['A', 'B']).mean()
                 C
        A B
        1 2.0  2.0
          4.0  1.0
        2 3.0  1.0
          5.0  2.0

        Groupby one column and return the mean of only particular column in
        the group.

        >>> df.groupby('A')['B'].mean()
        A
        1    3.0
        2    4.0
        Name: B, dtype: float64
        r   )sliding_meanmeanc                   s   t | j dS Nr*  )rR   r  r   r  rc   rd   r   A  r   zGroupBy.mean.<locals>.<lambda>r0  r*  r^   r1  N)rU   pandas.core._numba.kernelsr  r  r4  r5  r   )rb   r*  rL  r  r  r   rc   r  rd   r    s    M
zGroupBy.meanr  c                   s(   | j d fdd d}|j| jddS )a  
        Compute median of groups, excluding missing values.

        For multiple groupings, the result index will be a MultiIndex

        Parameters
        ----------
        numeric_only : bool, default False
            Include only float, int, boolean columns.

            .. versionchanged:: 2.0.0

                numeric_only no longer accepts ``None`` and defaults to False.

        Returns
        -------
        Series or DataFrame
            Median of values within each group.
        medianc                   s   t | j dS r  )rR   r  r   r  rc   rd   r   ]  r   z GroupBy.median.<locals>.<lambda>r  r^   r1  r3  )rb   r*  r   rc   r  rd   r  F  s    
zGroupBy.medianr   z
str | None)ddofrL  r  r*  c           	   
   C  sn   t |r(ddlm} t| |||S dd }dddd	d
d}| jtjt	tj
|d|||dd}|S dS )a  
        Compute standard deviation of groups, excluding missing values.

        For multiple groupings, the result index will be a MultiIndex.

        Parameters
        ----------
        ddof : int, default 1
            Degrees of freedom.

        engine : str, default None
            * ``'cython'`` : Runs the operation through C-extensions from cython.
            * ``'numba'`` : Runs the operation through JIT compiled code from numba.
            * ``None`` : Defaults to ``'cython'`` or globally setting
              ``compute.use_numba``

            .. versionadded:: 1.4.0

        engine_kwargs : dict, default None
            * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
            * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
              and ``parallel`` dictionary keys. The values must either be ``True`` or
              ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
              ``{{'nopython': True, 'nogil': False, 'parallel': False}}``

            .. versionadded:: 1.4.0

        numeric_only : bool, default False
            Include only `float`, `int` or `boolean` data.

            .. versionadded:: 1.5.0

            .. versionchanged:: 2.0.0

                numeric_only now defaults to ``False``.

        Returns
        -------
        Series or DataFrame
            Standard deviation of values within each group.
        r   sliding_varc                 S  s   t | tr| jd fS | d fS r`   )r   r;   rm  r   rc   rc   rd   _preprocessing  s    

z#GroupBy.std.<locals>._preprocessingFNr   r   )rs  r_   c                 S  s@   |r6|j dkr|d d df }tt| |tjS t| S )Nr  r   )r   r@   r   sqrtrn  bool_)rk  rr  rs  result_maskrc   rc   rd   _postprocessing  s
    
z$GroupBy.std.<locals>._postprocessingTstd)ru  r*  needs_countsrv  rw  r  r/  )FN)rU   r  r  r   r  r  rx  ry  Z	group_varr9  r`  )	rb   r  rL  r  r*  r  r  r  r   rc   rc   rd   r  b  s$    3   	

zGroupBy.stdc                   s@   t |r"ddlm} | || S | jd fdd| dS dS )a  
        Compute variance of groups, excluding missing values.

        For multiple groupings, the result index will be a MultiIndex.

        Parameters
        ----------
        ddof : int, default 1
            Degrees of freedom.

        engine : str, default None
            * ``'cython'`` : Runs the operation through C-extensions from cython.
            * ``'numba'`` : Runs the operation through JIT compiled code from numba.
            * ``None`` : Defaults to ``'cython'`` or globally setting
              ``compute.use_numba``

            .. versionadded:: 1.4.0

        engine_kwargs : dict, default None
            * For ``'cython'`` engine, there are no accepted ``engine_kwargs``
            * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
              and ``parallel`` dictionary keys. The values must either be ``True`` or
              ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
              ``{{'nopython': True, 'nogil': False, 'parallel': False}}``

            .. versionadded:: 1.4.0

        numeric_only : bool, default False
            Include only `float`, `int` or `boolean` data.

            .. versionadded:: 1.5.0

            .. versionchanged:: 2.0.0

                numeric_only now defaults to ``False``.

        Returns
        -------
        Series or DataFrame
            Variance of values within each group.
        r   r  varc                   s   t | j dS )Nr  )rR   r  r   r  rc   rd   r     r   zGroupBy.var.<locals>.<lambda>)r0  r*  r  N)rU   r  r  r  r4  )rb   r  rL  r  r*  r  rc   r  rd   r    s    3
zGroupBy.varzSequence[Hashable] | None)subset	normalizer   r[  r|   r_   c              	     s  | j dkrtd|rdnd}| j}| jdd | jjD  ttr`j}| krXg ng}	nxt	j
}
|dk	rt	|t	 @ }|rtd| d	|
 }|rtd| d
n|
 fddtj
D }	t| jj}|	D ]0}t||| j | jd|d\}}}|t|j7 }q|j|| j| j| jd}tt| }||_tdd |D rdd |D }tj|dd |D d \}}|j|dd}|rttt| jj|jj}|j|j|| j| jdd}|| }| d}|rtt| jj}|j!|dj"|dd}| j#r$|}n^|j}t$%|j&}||krPtd| d||_|'tt||_|( }||g |_
|}|j)| jddS ) z
        Shared implementation of value_counts for SeriesGroupBy and DataFrameGroupBy.

        SeriesGroupBy additionally supports a bins argument. See the docstring of
        DataFrameGroupBy.value_counts for a description of arguments.
        r   z1DataFrameGroupBy.value_counts only handles axis=0Z
proportionrb  c                 S  s   h | ]}|j r|jqS rc   r   rq   r   groupingrc   rc   rd   	<setcomp>  s     z(GroupBy._value_counts.<locals>.<setcomp>NzKeys z0 in subset cannot be in the groupby column keys.z) in subset do not exist in the DataFrame.c                   s2   g | ]*\}}| kr|krj d d |f qS r`   )r;  )r   idx_nameZin_axis_namesr   Z	subsettedrc   rd   r   %  s    z)GroupBy._value_counts.<locals>.<listcomp>F)r   r{   r   r   r|   )r   r   r|   c                 s  s&   | ]}t |jttfo|j V  qd S r`   )r   grouping_vectorr=   rL   Z	_observedr  rc   rc   rd   r   D  s   z(GroupBy._value_counts.<locals>.<genexpr>c                 S  s   g | ]
}|j qS rc   )r   r   pingrc   rc   rd   r   I  s     c                 S  s   g | ]
}|j qS rc   rp   r  rc   rc   rd   r   K  s     r   r   r  )r   r|   sumg        r[  )r   Zsort_remainingzColumn label 'z' is duplicate of result columnZvalue_countsr1  )*r{   r  r   r   r~   r   r   rR   rq   setr   r   	enumerater   rI   r   r^   r   r|   r   sizerl  rN   from_productZ	sortlevelr   r   r   r   ZnlevelsZ	droplevelZ	transformZfillnasort_valuesr   rz   r   Zfill_missing_namesr   Z	set_namesreset_indexr5  )rb   r  r  r   r[  r|   rq   r  r  r   Zunique_colsZclashingZdoesnt_existr   r   r~   r   gbZresult_serieslevels_listZmulti_indexr   Zindexed_group_sizeZindex_levelr   r   r   Zresult_framerc   r  rd   _value_counts  s    





 

 
zGroupBy._value_countsr  r*  c                 C  s   |r>| j jdkr>t| j js>tt| j d| d| j j | j||d}|jdkrj|t	| 
  }n`|j| j }| 
 }|j|}|j|}|jdd|f  t	|jdd|f   < |S )as  
        Compute standard error of the mean of groups, excluding missing values.

        For multiple groupings, the result index will be a MultiIndex.

        Parameters
        ----------
        ddof : int, default 1
            Degrees of freedom.

        numeric_only : bool, default False
            Include only `float`, `int` or `boolean` data.

            .. versionadded:: 1.5.0

            .. versionchanged:: 2.0.0

                numeric_only now defaults to ``False``.

        Returns
        -------
        Series or DataFrame
            Standard error of the mean of values within each group.
        r   z.sem called with numeric_only=z and dtype r  N)r   r   r2   r9  r'  r   rl   r  r   r  rb  r   
differencer}   uniqueZget_indexer_forr;  )rb   r  r*  r   colscountsr   Zcount_ilocsrc   rc   rd   semw  s    
.zGroupBy.semc              	   C  st   | j  }t| jtr*| j|| jjd}n
| |}t| dd | j	|dd}W 5 Q R X | j
sp|d }|S )z
        Compute group sizes.

        Returns
        -------
        DataFrame or Series
            Number of rows in each group as a Series if as_index is True
            or a DataFrame if as_index is False.
        rp   rz   Tr   r  r  )r~   r  r   r   rR   rg  rq   r   rM  r  rz   renamer  r  rc   rc   rd   r    s    

zGroupBy.sizer  )fnamenoZmc)r*  r+  rL  r  c              	   C  s`   t |r ddlm} | ||S t| dd | j||dtjd}W 5 Q R X | j	|ddS d S )Nr   )sliding_sumr   Tr  r)  r  )
rU   r  r  r  r   rM  r6  r   r  r  )rb   r*  r+  rL  r  r  r   rc   rc   rd   r    s    	zGroupBy.sumprod)r*  r+  c                 C  s   | j ||dtjdS )Nr  r)  )r6  r   r  )rb   r*  r+  rc   rc   rd   r    s       zGroupBy.prodminc                 C  s:   t |r"ddlm} | ||dS | j||dtjdS d S )Nr   sliding_min_maxFr  r)  )rU   r  r  r  r6  r   r  rb   r*  r+  rL  r  r  rc   rc   rd   r    s    	zGroupBy.minmaxc                 C  s:   t |r"ddlm} | ||dS | j||dtjdS d S )Nr   r  Tr  r)  )rU   r  r  r  r6  r   r  r  rc   rc   rd   r    s    	zGroupBy.maxc                 C  s$   d	ddddd}| j ||d|dS )
a  
        Compute the first non-null entry of each column.

        Parameters
        ----------
        numeric_only : bool, default False
            Include only float, int, boolean columns.
        min_count : int, default -1
            The required number of valid values to perform the operation. If fewer
            than ``min_count`` non-NA values are present the result will be NA.

        Returns
        -------
        Series or DataFrame
            First non-null of values within each group.

        See Also
        --------
        DataFrame.groupby : Apply a function groupby to each row or column of a
            DataFrame.
        pandas.core.groupby.DataFrameGroupBy.last : Compute the last non-null entry
            of each column.
        pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group.

        Examples
        --------
        >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[None, 5, 6], C=[1, 2, 3],
        ...                        D=['3/11/2000', '3/12/2000', '3/13/2000']))
        >>> df['D'] = pd.to_datetime(df['D'])
        >>> df.groupby("A").first()
             B  C          D
        A
        1  5.0  1 2000-03-11
        3  6.0  3 2000-03-13
        >>> df.groupby("A").first(min_count=2)
            B    C          D
        A
        1 NaN  1.0 2000-03-11
        3 NaN  NaN        NaT
        >>> df.groupby("A").first(numeric_only=True)
             B  C
        A
        1  5.0  1
        3  6.0  3
        r   r   r   r   r{   c                 S  sH   dddd}t | tr&| j||dS t | tr8|| S tt| d S )NrR   r   c                 S  s&   | j t| j  }t|stjS |d S )z-Helper function for first item that isn't NA.r   rR  r7   r   r   r_  r   Zarrrc   rc   rd   firstB	  s    z2GroupBy.first.<locals>.first_compat.<locals>.firstr   r   rD   rm   rR   r'  r   )r   r{   r  rc   rc   rd   first_compatA	  s    

z#GroupBy.first.<locals>.first_compatr  r)  )r   r6  )rb   r*  r+  r  rc   rc   rd   r  	  s    0zGroupBy.firstc                 C  s$   d	ddddd}| j ||d|dS )
az  
        Compute the last non-null entry of each column.

        Parameters
        ----------
        numeric_only : bool, default False
            Include only float, int, boolean columns. If None, will attempt to use
            everything, then use only numeric data.
        min_count : int, default -1
            The required number of valid values to perform the operation. If fewer
            than ``min_count`` non-NA values are present the result will be NA.

        Returns
        -------
        Series or DataFrame
            Last non-null of values within each group.

        See Also
        --------
        DataFrame.groupby : Apply a function groupby to each row or column of a
            DataFrame.
        pandas.core.groupby.DataFrameGroupBy.first : Compute the first non-null entry
            of each column.
        pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group.

        Examples
        --------
        >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3]))
        >>> df.groupby("A").last()
             B  C
        A
        1  5.0  2
        3  6.0  3
        r   r   r   r  c                 S  sH   dddd}t | tr&| j||dS t | tr8|| S tt| d S )NrR   r   c                 S  s&   | j t| j  }t|stjS |d S )z,Helper function for last item that isn't NA.r   r  r  rc   rc   rd   last}	  s    z/GroupBy.last.<locals>.last_compat.<locals>.lastr   r  )r   r{   r  rc   rc   rd   last_compat|	  s    

z!GroupBy.last.<locals>.last_compatr  r)  )r   r  )rb   r*  r+  r  rc   rc   rd   r  W	  s    %zGroupBy.lastc                 C  s   | j jdkrl| j}t|j}|s(td| jjd|jdddd}dd	d
dg}| j j	|| jj
|d}| |S | dd | j}| js| |}tt||_|S )a  
        Compute open, high, low and close values of a group, excluding missing values.

        For multiple groupings, the result index will be a MultiIndex

        Returns
        -------
        DataFrame
            Open, high, low and close values within each group.
        r   zNo numeric types to aggregaterA  ohlcr   r   rB  openhighlowclose)r   r   c                 S  s   |   S r`   )r  r   rc   rc   rd   r   	  r   zGroupBy.ohlc.<locals>.<lambda>)r   r   r   r2   r9  r'   r~   rC  r   Z_constructor_expanddimr   r  Z_apply_to_column_groupbysr   rz   r   rP   r   r   )rb   r   Z
is_numericr=  Z	agg_namesr   rc   rc   rd   r  	  s6    
      
 
zGroupBy.ohlcc              	     s   | j }t|dkrN|j d}|jdkr2|}n| }| jjd d S t	| dd" | j
 fdd|dd}W 5 Q R X | jdkr|jS | }| js| |}tt||_|S )	Nr   percentilesincludeexcluder   rz   Tc                   s   | j  dS )Nr  )describer   r  r  r  rc   rd   r   	  s     z"GroupBy.describe.<locals>.<lambda>)r   )r   r   r  r   Zunstackr   r#   r;  r   rM  r   r{   rz   r   rP   r   )rb   r  r  r  r   Z	describedr   rc   r  rd   r  	  s0      


zGroupBy.describec                 O  s   ddl m} || |f||S )a<  
        Provide resampling when using a TimeGrouper.

        Given a grouper, the function resamples it according to a string
        "string" -> "frequency".

        See the :ref:`frequency aliases <timeseries.offset_aliases>`
        documentation for more details.

        Parameters
        ----------
        rule : str or DateOffset
            The offset string or object representing target grouper conversion.
        *args, **kwargs
            Possible arguments are `how`, `fill_method`, `limit`, `kind` and
            `on`, and other arguments of `TimeGrouper`.

        Returns
        -------
        Grouper
            Return a new grouper with our resampler appended.

        See Also
        --------
        Grouper : Specify a frequency to resample with when
            grouping by a key.
        DatetimeIndex.resample : Frequency conversion and resampling of
            time series.

        Examples
        --------
        >>> idx = pd.date_range('1/1/2000', periods=4, freq='T')
        >>> df = pd.DataFrame(data=4 * [range(2)],
        ...                   index=idx,
        ...                   columns=['a', 'b'])
        >>> df.iloc[2, 0] = 5
        >>> df
                            a  b
        2000-01-01 00:00:00  0  1
        2000-01-01 00:01:00  0  1
        2000-01-01 00:02:00  5  1
        2000-01-01 00:03:00  0  1

        Downsample the DataFrame into 3 minute bins and sum the values of
        the timestamps falling into a bin.

        >>> df.groupby('a').resample('3T').sum()
                                 a  b
        a
        0   2000-01-01 00:00:00  0  2
            2000-01-01 00:03:00  0  1
        5   2000-01-01 00:00:00  5  1

        Upsample the series into 30 second bins.

        >>> df.groupby('a').resample('30S').sum()
                            a  b
        a
        0   2000-01-01 00:00:00  0  1
            2000-01-01 00:00:30  0  0
            2000-01-01 00:01:00  0  1
            2000-01-01 00:01:30  0  0
            2000-01-01 00:02:00  0  0
            2000-01-01 00:02:30  0  0
            2000-01-01 00:03:00  0  1
        5   2000-01-01 00:02:00  5  1

        Resample by month. Values are assigned to the month of the period.

        >>> df.groupby('a').resample('M').sum()
                    a  b
        a
        0   2000-01-31  0  3
        5   2000-01-31  5  1

        Downsample the series into 3 minute bins as above, but close the right
        side of the bin interval.

        >>> df.groupby('a').resample('3T', closed='right').sum()
                                 a  b
        a
        0   1999-12-31 23:57:00  0  1
            2000-01-01 00:00:00  0  2
        5   2000-01-01 00:00:00  5  1

        Downsample the series into 3 minute bins and close the right side of
        the bin interval, but label each bin using the right edge instead of
        the left.

        >>> df.groupby('a').resample('3T', closed='right', label='right').sum()
                                 a  b
        a
        0   2000-01-01 00:00:00  0  1
            2000-01-01 00:03:00  0  2
        5   2000-01-01 00:03:00  5  1
        r   )get_resampler_for_grouping)Zpandas.core.resampler  )rb   Zruleri   rj   r  rc   rc   rd   resample	  s    bzGroupBy.resamplerX   c                 O  s,   ddl m} || jf|| j| jd|S )a  
        Return a rolling grouper, providing rolling functionality per group.

        Parameters
        ----------
        window : int, timedelta, str, offset, or BaseIndexer subclass
            Size of the moving window.

            If an integer, the fixed number of observations used for
            each window.

            If a timedelta, str, or offset, the time period of each window. Each
            window will be a variable sized based on the observations included in
            the time-period. This is only valid for datetimelike indexes.
            To learn more about the offsets & frequency strings, please see `this link
            <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.

            If a BaseIndexer subclass, the window boundaries
            based on the defined ``get_window_bounds`` method. Additional rolling
            keyword arguments, namely ``min_periods``, ``center``, ``closed`` and
            ``step`` will be passed to ``get_window_bounds``.

        min_periods : int, default None
            Minimum number of observations in window required to have a value;
            otherwise, result is ``np.nan``.

            For a window that is specified by an offset,
            ``min_periods`` will default to 1.

            For a window that is specified by an integer, ``min_periods`` will default
            to the size of the window.

        center : bool, default False
            If False, set the window labels as the right edge of the window index.

            If True, set the window labels as the center of the window index.

        win_type : str, default None
            If ``None``, all points are evenly weighted.

            If a string, it must be a valid `scipy.signal window function
            <https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows>`__.

            Certain Scipy window types require additional parameters to be passed
            in the aggregation function. The additional parameters must match
            the keywords specified in the Scipy window type method signature.

        on : str, optional
            For a DataFrame, a column label or Index level on which
            to calculate the rolling window, rather than the DataFrame's index.

            Provided integer column is ignored and excluded from result since
            an integer index is not used to calculate the rolling window.

        axis : int or str, default 0
            If ``0`` or ``'index'``, roll across the rows.

            If ``1`` or ``'columns'``, roll across the columns.

            For `Series` this parameter is unused and defaults to 0.

        closed : str, default None
            If ``'right'``, the first point in the window is excluded from calculations.

            If ``'left'``, the last point in the window is excluded from calculations.

            If ``'both'``, the no points in the window are excluded from calculations.

            If ``'neither'``, the first and last points in the window are excluded
            from calculations.

            Default ``None`` (``'right'``).

        method : str {'single', 'table'}, default 'single'
            Execute the rolling operation per single column or row (``'single'``)
            or over the entire object (``'table'``).

            This argument is only implemented when specifying ``engine='numba'``
            in the method call.

        Returns
        -------
        RollingGroupby
            Return a new grouper with our rolling appended.

        See Also
        --------
        Series.rolling : Calling object with Series data.
        DataFrame.rolling : Calling object with DataFrames.
        Series.groupby : Apply a function groupby to a Series.
        DataFrame.groupby : Apply a function groupby.

        Examples
        --------
        >>> df = pd.DataFrame({'A': [1, 1, 2, 2],
        ...                    'B': [1, 2, 3, 4],
        ...                    'C': [0.362, 0.227, 1.267, -0.562]})
        >>> df
              A  B      C
        0     1  1  0.362
        1     1  2  0.227
        2     2  3  1.267
        3     2  4 -0.562

        >>> df.groupby('A').rolling(2).sum()
            B      C
        A
        1 0  NaN    NaN
          1  3.0  0.589
        2 2  NaN    NaN
          3  7.0  0.705

        >>> df.groupby('A').rolling(2, min_periods=1).sum()
            B      C
        A
        1 0  1.0  0.362
          1  3.0  0.589
        2 2  3.0  1.267
          3  7.0  0.705

        >>> df.groupby('A').rolling(2, on='B').sum()
            B      C
        A
        1 0  1    NaN
          1  2  0.589
        2 2  3    NaN
          3  4  0.705
        r   )rX   )_grouperZ	_as_index)pandas.core.windowrX   r   r~   rz   )rb   ri   rj   rX   rc   rc   rd   rollingD
  s     zGroupBy.rollingrV   c                 O  s(   ddl m} || jf|d| ji|S )zc
        Return an expanding grouper, providing expanding
        functionality per group.
        r   )rV   r  )r  rV   r   r~   )rb   ri   rj   rV   rc   rc   rd   	expanding
  s    zGroupBy.expandingrW   c                 O  s(   ddl m} || jf|d| ji|S )zO
        Return an ewm grouper, providing ewm functionality per group.
        r   )rW   r  )r  rW   r   r~   )rb   ri   rj   rW   rc   rc   rd   ewm
  s    zGroupBy.ewmzLiteral[('ffill', 'bfill')])	directionc           
        s   |dkrd}j j\}}}tj|ddjtjdd}|dkrJ|ddd }ttj||||j	d d	d	d
 fdd}
 }||}|}	jdkr|	j}	jj|	_jj|	_|	S )a  
        Shared function for `pad` and `backfill` to call Cython method.

        Parameters
        ----------
        direction : {'ffill', 'bfill'}
            Direction passed to underlying Cython function. `bfill` will cause
            values to be filled backwards. `ffill` and any other values will
            default to a forward fill
        limit : int, default None
            Maximum number of consecutive values to fill. If `None`, this
            method will convert to -1 prior to passing to Cython

        Returns
        -------
        `Series` or `DataFrame` with filled values

        See Also
        --------
        pad : Returns Series with minimum number of char in object.
        backfill : Backward fill the missing values in the dataset.
        Nr   Z	mergesort)kindFr7  bfill)r   sorted_labelsr  limitr|   r   r@  c                   s   t | }| jdkr<tj| jtjd} ||d t| |S t| tj	rr| j
}jjr`t| j
}tj| j|d}nt| j| j| j
d}t| D ]F\}}tj| jd tjd} ||| d t||||d d f< q|S d S )Nr   r8  )rJ  r   )r6   r   r   rT  r:  ra  r8   r  r   Zndarrayr9  r~   r   r,   r   _emptyr  )r   r   r   r9  rJ  iZvalue_elementZcol_funcrb   rc   rd   blk_func  s     


zGroupBy._fill.<locals>.blk_funcr   )r~   r   r   r  r<  ra  r   ry  Zgroup_fillna_indexerr|   rF  rm   rH  r{   r#   r   r   r   )
rb   r  r  r  r   r  r  mgrres_mgrr  rc   r  rd   _fill
  s.    	




zGroupBy._fillc                 C  s   | j d|dS )a:  
        Forward fill the values.

        Parameters
        ----------
        limit : int, optional
            Limit of how many values to fill.

        Returns
        -------
        Series or DataFrame
            Object with missing values filled.

        See Also
        --------
        Series.ffill: Returns Series with minimum number of char in object.
        DataFrame.ffill: Object with missing values filled or None if inplace=True.
        Series.fillna: Fill NaN values of a Series.
        DataFrame.fillna: Fill NaN values of a DataFrame.
        ffillr  r  rb   r  rc   rc   rd   r  G  s    zGroupBy.ffillc                 C  s   | j d|dS )a/  
        Backward fill the values.

        Parameters
        ----------
        limit : int, optional
            Limit of how many values to fill.

        Returns
        -------
        Series or DataFrame
            Object with missing values filled.

        See Also
        --------
        Series.bfill :  Backward fill the missing values in the dataset.
        DataFrame.bfill:  Backward fill the missing values in the dataset.
        Series.fillna: Fill NaN values of a Series.
        DataFrame.fillna: Fill NaN values of a DataFrame.
        r  r  r  r  rc   rc   rd   r  `  s    zGroupBy.bfillrK   c                 C  s   t | S )a  
        Take the nth row from each group if n is an int, otherwise a subset of rows.

        Can be either a call or an index. dropna is not available with index notation.
        Index notation accepts a comma separated list of integers and slices.

        If dropna, will take the nth non-null row, dropna is either
        'all' or 'any'; this is equivalent to calling dropna(how=dropna)
        before the groupby.

        Parameters
        ----------
        n : int, slice or list of ints and slices
            A single nth value for the row or a list of nth values or slices.

            .. versionchanged:: 1.4.0
                Added slice and lists containing slices.
                Added index notation.

        dropna : {'any', 'all', None}, default None
            Apply the specified dropna operation before counting which row is
            the nth row. Only supported if n is an int.

        Returns
        -------
        Series or DataFrame
            N-th value within each group.
        %(see_also)s
        Examples
        --------

        >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
        ...                    'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
        >>> g = df.groupby('A')
        >>> g.nth(0)
           A   B
        0  1 NaN
        2  2 3.0
        >>> g.nth(1)
           A   B
        1  1 2.0
        4  2 5.0
        >>> g.nth(-1)
           A   B
        3  1 4.0
        4  2 5.0
        >>> g.nth([0, 1])
           A   B
        0  1 NaN
        1  1 2.0
        2  2 3.0
        4  2 5.0
        >>> g.nth(slice(None, -1))
           A   B
        0  1 NaN
        1  1 2.0
        2  2 3.0

        Index notation may also be used

        >>> g.nth[0, 1]
           A   B
        0  1 NaN
        1  1 2.0
        2  2 3.0
        4  2 5.0
        >>> g.nth[:-1]
           A   B
        0  1 NaN
        1  1 2.0
        2  2 3.0

        Specifying `dropna` allows ignoring ``NaN`` values

        >>> g.nth(0, dropna='any')
           A   B
        1  1 2.0
        2  2 3.0

        When the specified ``n`` is larger than any of the groups, an
        empty DataFrame is returned

        >>> g.nth(3, dropna='any')
        Empty DataFrame
        Columns: [A, B]
        Index: []
        )rK   rg   rc   rc   rd   nthy  s    \zGroupBy.nthzPositionalIndexer | tuplezLiteral[('any', 'all', None)])r   r|   r_   c                 C  s  |s6|  |}| jj\}}}||dk@ }| |}|S t|sFtd|dkr^td| dtt|}| jj	|| j
d}| jd kr| jd kr| jj
}| jj||j }	| jjr|	dk}
t|
t|	}t|dd}	n t|| j| j
| j| jd	\}	}}|j|	| j| j| j
d
}||S )Nr   z4dropna option only supported for an integer argument)rl  r"  z_For a DataFrame or Series groupby.nth, dropna must be either None, 'any' or 'all', (was passed z).)r/  r{   ZInt64r8  )r   r{   r   r   )rz   r   r{   )"_make_mask_from_positional_indexerr~   r   _mask_selected_objr0   r   r   r   r   r|   r{   r   r   Z
codes_infoisinr   r   r   rW  r   rM   rI   r   r^   rz   r  )rb   r   r|   r   r  r   rJ  Zdroppedr{   r~   Znullsr   Zgrbrc   rc   rd   _nth  sF    



   zGroupBy._nth      ?linearzfloat | AnyArrayLike)qinterpolationr*  c                   s   ddddddddddd	fd
dt |}|r<|g}tj|tjd}| jj\}}t|ttj	||d t|dkr|
 d nd}t|dk||ddd fdd}	| j|dd}
|
|	}| |}|r| |S | j||dS )a  
        Return group values at the given quantile, a la numpy.percentile.

        Parameters
        ----------
        q : float or array-like, default 0.5 (50% quantile)
            Value(s) between 0 and 1 providing the quantile(s) to compute.
        interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
            Method to use when the desired quantile falls between two points.
        numeric_only : bool, default False
            Include only `float`, `int` or `boolean` data.

            .. versionadded:: 1.5.0

            .. versionchanged:: 2.0.0

                numeric_only now defaults to ``False``.

        Returns
        -------
        Series or DataFrame
            Return type determined by caller of GroupBy object.

        See Also
        --------
        Series.quantile : Similar method for Series.
        DataFrame.quantile : Similar method for DataFrame.
        numpy.percentile : NumPy method to compute qth percentile.

        Examples
        --------
        >>> df = pd.DataFrame([
        ...     ['a', 1], ['a', 2], ['a', 3],
        ...     ['b', 1], ['b', 3], ['b', 5]
        ... ], columns=['key', 'val'])
        >>> df.groupby('key').quantile()
            val
        key
        a    2.0
        b    3.0
        r   z"tuple[np.ndarray, DtypeObj | None]rj  c                 S  s   t | rtdd }t| tr@t| jr@| jttj	d}| j}nt
| jrxt| trf| jttj	d}n| }ttj}nxt| jrt| tr| jttj	d}nRt| jr| j}| |fS t| trt| rttj}| jttj	d}n
t| }||fS )Nz7'quantile' cannot be performed against 'object' dtypes!)r9  Zna_value)r3   r'  r   r;   r2   r9  r  floatr   r_  r1   r?   rQ  r-   r5   r.   r`  Zasarray)rk  rr  rJ  rc   rc   rd   pre_processorH  s.    



z'GroupBy.quantile.<locals>.pre_processorrZ  zDtypeObj | Noneznp.ndarray | None)rk  rr  r  	orig_valsr_   c                   s   |rt |trL|d k	st dkr4t|s4t| |S t|| |j|S nPt|r\ dkst	|r| d
|jj} || S t |tjst| |S | S )N>   r  midpointi8)r   r;   r   r.   r@   r   r<  Znumpy_dtyper1   r5   rn  Z_ndarrayr9  Z_from_backing_datar   )rk  rr  r  r  )r  rc   rd   post_processork  s6    




z(GroupBy.quantile.<locals>.post_processorr8  )r   r  r  r   r   r   r@  c                   sb  | }t | tr*| j}tjftjd}nt| }d }t| j}| \}}d}|j	dkrz|j
d }t|tf}n}tj|ftjd}	||f}
t|
jtjdd}|r|dtj}|j	dkr |	d ||||d n.t|D ]$} |	| || || || d	 q|j	dkrD|	d
}	|d k	rT|d
}n|	| }	|	|||S )Nr8  r   r  r   Fr7  r  )r   r   sort_indexerr  )r   r   r  K)r   r;   Z_maskr   zerosr  r6   r5   r9  r   r:  Zbroadcast_tor   rT  r`  Zlexsortr<  ra  rn  r   r  r~  )r   r  r   r  is_datetimelikerk  rr  ncolsZshaped_labelsrJ  orderZsort_arrr  )r   labels_for_lexsortr   nqsr  r  rc   rd   r    sJ    



 

"

z"GroupBy.quantile.<locals>.blk_funcquantiler?  r  )r4   r   rR  r`  r~   r   r   r   ry  Zgroup_quantiler  rW  rF  rG  rH  r	  )rb   r  r  r*  Zorig_scalarr  r  r   Zna_label_for_sortingr  r
  r  r  rc   )r   r  r  r   r  r  r  rd   r    s.    1#/   6


zGroupBy.quantiler  c                 C  s   | j }|| j}| jjd }| jjrBt|dktj|}tj	}ntj
}tdd | jjD rnt|ddd }| j|||d}|s| jd | }|S )	a  
        Number each group from 0 to the number of groups - 1.

        This is the enumerative complement of cumcount.  Note that the
        numbers given to the groups match the order in which the groups
        would be seen when iterating over the groupby object, not the
        order they are first observed.

        Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN`
        and will be skipped from the count.

        Parameters
        ----------
        ascending : bool, default True
            If False, number in reverse, from number of group - 1 to 0.

        Returns
        -------
        Series
            Unique numbers for each group.

        See Also
        --------
        .cumcount : Number the rows in each group.

        Examples
        --------
        >>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]})
        >>> df
           color
        0    red
        1   None
        2    red
        3   blue
        4   blue
        5    red
        >>> df.groupby("color").ngroup()
        0    1.0
        1    NaN
        2    1.0
        3    0.0
        4    0.0
        5    1.0
        dtype: float64
        >>> df.groupby("color", dropna=False).ngroup()
        0    1
        1    2
        2    1
        3    0
        4    0
        5    1
        dtype: int64
        >>> df.groupby("color", dropna=False).ngroup(ascending=False)
        0    1
        1    0
        2    1
        3    2
        4    2
        5    1
        dtype: int64
        r   r   c                 s  s   | ]}|j V  qd S r`   )Z_passed_categoricalr  rc   rc   rd   r   ;  s     z!GroupBy.ngroup.<locals>.<genexpr>Zdense)ties_methodr   r8  )r   r   r{   r~   r   r   r   rW  r_  r`  rQ  rl  r   r   rg  r   )rb   r[  r   r   Zcomp_idsr9  r   rc   rc   rd   ngroup  s    @zGroupBy.ngroupc                 C  s&   | j | j}| j|d}| ||S )a  
        Number each item in each group from 0 to the length of that group - 1.

        Essentially this is equivalent to

        .. code-block:: python

            self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))

        Parameters
        ----------
        ascending : bool, default True
            If False, number in reverse, from length of group - 1 to 0.

        Returns
        -------
        Series
            Sequence number of each element within each group.

        See Also
        --------
        .ngroup : Number the groups themselves.

        Examples
        --------
        >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
        ...                   columns=['A'])
        >>> df
           A
        0  a
        1  a
        2  a
        3  b
        4  b
        5  a
        >>> df.groupby('A').cumcount()
        0    0
        1    1
        2    2
        3    0
        4    1
        5    3
        dtype: int64
        >>> df.groupby('A').cumcount(ascending=False)
        0    3
        1    2
        2    1
        3    1
        4    0
        5    0
        dtype: int64
        r  )r   r   r{   rf  rg  )rb   r[  r   Z	cumcountsrc   rc   rd   cumcountD  s    7zGroupBy.cumcountaveragekeep)r2  r[  	na_optionpctr{   r_   c           	        sr   |dkrd}t |||||d dkr\dd<  fdd}| j|| jd	d
}|S | jdd dS )a_
  
        Provide the rank of values within each group.

        Parameters
        ----------
        method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
            * average: average rank of group.
            * min: lowest rank in group.
            * max: highest rank in group.
            * first: ranks assigned in order they appear in the array.
            * dense: like 'min', but rank always increases by 1 between groups.
        ascending : bool, default True
            False for ranks by high (1) to low (N).
        na_option : {'keep', 'top', 'bottom'}, default 'keep'
            * keep: leave NA values where they are.
            * top: smallest rank if ascending.
            * bottom: smallest rank if descending.
        pct : bool, default False
            Compute percentage rank of data within each group.
        axis : int, default 0
            The axis of the object over which to compute the rank.

        Returns
        -------
        DataFrame with ranking of values within each group
        %(see_also)s
        Examples
        --------
        >>> df = pd.DataFrame(
        ...     {
        ...         "group": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"],
        ...         "value": [2, 4, 2, 3, 5, 1, 2, 4, 1, 5],
        ...     }
        ... )
        >>> df
          group  value
        0     a      2
        1     a      4
        2     a      2
        3     a      3
        4     a      5
        5     b      1
        6     b      2
        7     b      4
        8     b      1
        9     b      5
        >>> for method in ['average', 'min', 'max', 'dense', 'first']:
        ...     df[f'{method}_rank'] = df.groupby('group')['value'].rank(method)
        >>> df
          group  value  average_rank  min_rank  max_rank  dense_rank  first_rank
        0     a      2           1.5       1.0       2.0         1.0         1.0
        1     a      4           4.0       4.0       4.0         3.0         4.0
        2     a      2           1.5       1.0       2.0         1.0         2.0
        3     a      3           3.0       3.0       3.0         2.0         3.0
        4     a      5           5.0       5.0       5.0         4.0         5.0
        5     b      1           1.5       1.0       2.0         1.0         1.0
        6     b      2           3.0       3.0       3.0         2.0         3.0
        7     b      4           4.0       4.0       4.0         3.0         4.0
        8     b      1           1.5       1.0       2.0         1.0         2.0
        9     b      5           5.0       5.0       5.0         4.0         5.0
        >   topbottomr  z3na_option must be one of 'keep', 'top', or 'bottom')r  r[  r	  r
  r   r  r2  c                   s   | j f  ddS )NF)r{   r*  )rankr   r{   rj   rc   rd   r     r   zGroupBy.rank.<locals>.<lambda>Tr   r  F)r*  r{   )r  )r   popr   r   rK  )	rb   r2  r[  r	  r
  r{   r   rk   r   rc   r  rd   r    s2    H   zGroupBy.rank)r{   r_   c                   sH   t d|ddg  dkr< fdd}| j|| jddS | jd	S )
zq
        Cumulative product for each group.

        Returns
        -------
        Series or DataFrame
        cumprodr*  ri  r   c                   s   | j f d iS Nr{   )r  r   r  rc   rd   r     r   z!GroupBy.cumprod.<locals>.<lambda>Tr  )r  nvZvalidate_groupby_funcr   r   rK  rb   r{   ri   rj   rk   rc   r  rd   r    s
    zGroupBy.cumprodc                   sH   t d|ddg  dkr< fdd}| j|| jddS | jd	S )
zm
        Cumulative sum for each group.

        Returns
        -------
        Series or DataFrame
        r]  r*  ri  r   c                   s   | j f d iS r  )r]  r   r  rc   rd   r      r   z GroupBy.cumsum.<locals>.<lambda>Tr  )r]  r  r  rc   r  rd   r]    s
    zGroupBy.cumsum)r{   r*  r_   c                   sR   | dd} dkrB fdd}| j}|r2| }| j||ddS | jd||dS )	zm
        Cumulative min for each group.

        Returns
        -------
        Series or DataFrame
        ri  Tr   c                   s   t j|  S r`   )r   Zminimum
accumulater   r   rc   rd   r     r   z GroupBy.cummin.<locals>.<lambda>r  cumminr*  ri  r   r   Z_get_numeric_datar   rK  rb   r{   r*  rj   ri  rk   r   rc   r   rd   r    s      zGroupBy.cumminc                   sR   | dd} dkrB fdd}| j}|r2| }| j||ddS | jd||dS )	zm
        Cumulative max for each group.

        Returns
        -------
        Series or DataFrame
        ri  Tr   c                   s   t j|  S r`   )r   maximumr  r   r   rc   rd   r   -  r   z GroupBy.cummax.<locals>.<lambda>r  cummaxr  r  r  rc   r   rd   r    s      zGroupBy.cummaxany_allznp.dtype)	base_funcru  r*  r  r/  c              	     s   rt stdr(t s(td| j}	|	j\}
}t |
d ddd fdd}| j|d}||}| |}| |S )	a  
        Get result for Cythonized functions.

        Parameters
        ----------
        base_func : callable, Cythonized function to be called
        cython_dtype : np.dtype
            Type of the array that will be modified by the Cython call.
        numeric_only : bool, default False
            Whether only numeric datatypes should be computed
        needs_counts : bool, default False
            Whether the counts should be a part of the Cython call
        pre_processing : function, default None
            Function to be applied to `values` prior to passing to Cython.
            Function should return a tuple where the first element is the
            values to be passed to Cython and the second element is an optional
            type which the values should be converted to after being returned
            by the Cython operation. This function is also responsible for
            raising a TypeError if the values have an invalid type. Raises
            if `needs_values` is False.
        post_processing : function, default None
            Function to be applied to result of Cython function. Should accept
            an array of values as the first argument and type inferences as its
            second argument, i.e. the signature should be
            (ndarray, Type). If `needs_nullable=True`, a third argument should be
            `nullable`, to allow for processing specific to nullable values.
        how : str, default any_all
            Determines if any/all cython interface or std interface is used.
        **kwargs : dict
            Extra arguments to be passed back to Cython funcs

        Returns
        -------
        `Series` or `DataFrame`  with filled values
        z%'post_processing' must be a callable!z$'pre_processing' must be a callable!)r   r   r@  c              	     sl  | j } | jdkrdn| jd }tj| d}||f}t |d}d }rntjtjd}t||d}| jj	dk}| }|rdkr|
d}r|\}}|jdd	}|jdkr|d
}t||d}dkst| trt| 
tj}|jdkr|dd}t||d}dkr<t| t}	t||	d}n*t| trftj|jtjd}
t||
d}dkr|f d|i n
|f  | jdkr|jd dkst|j|d d df }ri }t| t|d< dkr|d r|
|d< ||f|}dkrf|rftd| } | j}t   td |jtjdd	}W 5 Q R X |
d| d}|j S )Nr   r8  )rJ  )r  )mMr  r  Fr7  )r   r   r  r   )r   )rs  )r  r  r   rs  r  zDatetimeArray | TimedeltaArrayr!  zm8[])r#   r   r:  r   r  r~  r   rQ  r9  r  rn  r<  r   r;   r6   Zuint8r  r   r   unitwarningscatch_warningsfilterwarnings)r   r  r   r   Z
inferencesr  r  rk  r   Zis_nullabler  Z	pp_kwargsr"  r  ru  r/  rj   r  r   rw  rv  rc   rd   r  q  sd    









z0GroupBy._get_cythonized_result.<locals>.blk_funcr?  )	r&  r   r~   r   r   rF  rG  rH  r	  )rb   r  ru  r*  r  rv  rw  r/  rj   r~   r  r   r  r  r  rJ  rc   r&  rd   rx  7  s    /"H

zGroupBy._get_cythonized_resultperiodsr{   c                   s   dk	s dkr4 fdd}| j || jddS | jj\}}}tjt|tjd}	t	|	|| | j
}
|
j| j|
j| j |	fidd}|S )	a  
        Shift each group by periods observations.

        If freq is passed, the index will be increased using the periods and the freq.

        Parameters
        ----------
        periods : int, default 1
            Number of periods to shift.
        freq : str, optional
            Frequency string.
        axis : axis to shift, default 0
            Shift direction.
        fill_value : optional
            The scalar value to use for newly introduced missing values.

        Returns
        -------
        Series or DataFrame
            Object shifted within each group.

        See Also
        --------
        Index.shift : Shift values of Index.
        Nr   c                   s   |   S r`   )shiftr   r{   r  freqr(  rc   rd   r     r   zGroupBy.shift.<locals>.<lambda>Tr  r8  )r  Z
allow_dups)r   r   r~   r   r   r  r   rQ  ry  Zgroup_shift_indexerr   Z_reindex_with_indexersr{   r   )rb   r(  r+  r{   r  rk   r  r   r   Zres_indexerr   r  rc   r*  rd   r)    s    zGroupBy.shift)r(  r{   r_   c                   s    dkr|   fddS | j}| j d}ddg|jdkrX|jkr|d}n4fd	d
|j D }t|r|dd |D }|| S )a  
        First discrete difference of element.

        Calculates the difference of each element compared with another
        element in the group (default is element in previous row).

        Parameters
        ----------
        periods : int, default 1
            Periods to shift for calculating difference, accepts negative values.
        axis : axis to shift, default 0
            Take difference over rows (0) or columns (1).

        Returns
        -------
        Series or DataFrame
            First differences.
        r   c                   s   | j  dS )Nr'  )r\  r   )r{   r(  rc   rd   r     r   zGroupBy.diff.<locals>.<lambda>r'  ro  Zint16r   float32c                   s   g | ]\}}| kr|qS rc   rc   )r   cr9  )dtypes_to_f32rc   rd   r     s      z GroupBy.diff.<locals>.<listcomp>c                 S  s   i | ]
}|d qS )r,  rc   )r   r-  rc   rc   rd   
<dictcomp>  s      z GroupBy.diff.<locals>.<dictcomp>)	rm   r   r)  r   r9  r<  Zdtypesitemsr   )rb   r(  r{   r   shiftedZ	to_coercerc   )r{   r.  r(  rd   r\    s    

zGroupBy.diffr  r   )r(  fill_methodr{   c           
        s   dk	s dkr6 fdd}| j || jddS dkrFddt| d}|j| jj| j| jd	}|j| jd
}	||	 d S )z
        Calculate pct_change of each value to previous entry in group.

        Returns
        -------
        Series or DataFrame
            Percentage changes within each group.
        Nr   c                   s   | j  dS )N)r(  r2  r  r+  r{   )
pct_changer   r{   r2  r+  r  r(  rc   rd   r   -  s   z$GroupBy.pct_change.<locals>.<lambda>Tr  r  r  )r{   r   )r(  r+  r{   r   )	r   r   rr   r^   r~   codesr{   r   r)  )
rb   r(  r2  r  r+  r{   rk   ZfilledZfill_grpr1  rc   r4  rd   r3    s      zGroupBy.pct_change   )r   r_   c                 C  s   |  td|}| |S )a  
        Return first n rows of each group.

        Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
        from the original DataFrame with original index and order preserved
        (``as_index`` flag is ignored).

        Parameters
        ----------
        n : int
            If positive: number of entries to include from start of each group.
            If negative: number of entries to exclude from end of each group.

        Returns
        -------
        Series or DataFrame
            Subset of original Series or DataFrame as determined by n.
        %(see_also)s
        Examples
        --------

        >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
        ...                   columns=['A', 'B'])
        >>> df.groupby('A').head(1)
           A  B
        0  1  2
        2  5  6
        >>> df.groupby('A').head(-1)
           A  B
        0  1  2
        Nr  slicer  rb   r   r   rc   rc   rd   head@  s    #zGroupBy.headc                 C  s,   |r|  t| d}n
|  g }| |S )a  
        Return last n rows of each group.

        Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
        from the original DataFrame with original index and order preserved
        (``as_index`` flag is ignored).

        Parameters
        ----------
        n : int
            If positive: number of entries to include from end of each group.
            If negative: number of entries to exclude from start of each group.

        Returns
        -------
        Series or DataFrame
            Subset of original Series or DataFrame as determined by n.
        %(see_also)s
        Examples
        --------

        >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
        ...                   columns=['A', 'B'])
        >>> df.groupby('A').tail(1)
           A  B
        1  a  2
        3  b  2
        >>> df.groupby('A').tail(-1)
           A  B
        1  a  2
        3  b  2
        Nr7  r9  rc   rc   rd   tailf  s    $
zGroupBy.tailznpt.NDArray[np.bool_])r   r_   c                 C  sD   | j jd }||dk@ }| jdkr,| j| S | jjdd|f S dS )a  
        Return _selected_obj with mask applied to the correct axis.

        Parameters
        ----------
        mask : np.ndarray[bool]
            Boolean mask to apply.

        Returns
        -------
        Series or DataFrame
            Filtered _selected_obj.
        r   r   N)r~   r   r{   r   r;  )rb   r   r  rc   rc   rd   r    s
    

zGroupBy._mask_selected_objr"   )rP  r  r  r_   c                 C  s2  | j j}t|dkr|S | jr"|S tdd |D s8|S dd |D }| j j}|dk	rj|| |dg }tj||d}| j	r|
 }| jr| j| j|dd	d
|i}|jf |S tdd t|D }	t|	dkrt|	 \}
}|jt|dd}|| j jj|d	|d}t|	dkr&|j|
d}|jddS )aI  
        If we have categorical groupers, then we might want to make sure that
        we have a fully re-indexed output to the levels. This means expanding
        the output space to accommodate all values in the cartesian product of
        our groups, regardless of whether they were observed in the data or
        not. This will expand the output space if there are missing groups.

        The method returns early without modifying the input if the number of
        groupings is less than 2, self.observed == True or none of the groupers
        are categorical.

        Parameters
        ----------
        output : Series or DataFrame
            Object resulting from grouping and applying an operation.
        fill_value : scalar, default np.NaN
            Value to use for unobserved categories if self.observed is False.
        qs : np.ndarray[float64] or None, default None
            quantile values, only relevant for quantile.

        Returns
        -------
        Series or DataFrame
            Object (potentially) re-indexed to include all possible groups.
        r   c                 s  s   | ]}t |jttfV  qd S r`   )r   r  r=   rL   r  rc   rc   rd   r     s   z*GroupBy._reindex_output.<locals>.<genexpr>c                 S  s   g | ]
}|j qS rc   )Zgroup_indexr  rc   rc   rd   r     s     z+GroupBy._reindex_output.<locals>.<listcomp>Nr  r   Fr  c                 s  s"   | ]\}}|j r||jfV  qd S r`   r  )r   r  r  rc   rc   rd   r     s     r   )r   r{   )r   r  )r   T)drop)r~   r   r   r   rl  r   appendrN   r  r   r  rz   r   Z_get_axis_namer{   r   r   r  r   r<  Z	set_indexr   r  )rb   rP  r  r  r   r  r   r   dZin_axis_grpsZg_numsZg_namesrc   rc   rd   r    sP     

     zGroupBy._reindex_outputz
int | Nonezfloat | NonezSequence | Series | NonezRandomState | None)r   fracreplaceweightsrandom_statec                 C  s   | j jr| j S t|||}|dk	r8tj| j || jd}t|}| j	| j | j}g }	|D ]r\}
}| j
|
 }t|}|dk	r|}n|dk	stt|| }tj||||dkrdn|| |d}|	||  q\t|	}	| j j|	| jdS )a  
        Return a random sample of items from each group.

        You can use `random_state` for reproducibility.

        .. versionadded:: 1.1.0

        Parameters
        ----------
        n : int, optional
            Number of items to return for each group. Cannot be used with
            `frac` and must be no larger than the smallest group unless
            `replace` is True. Default is one if `frac` is None.
        frac : float, optional
            Fraction of items to return. Cannot be used with `n`.
        replace : bool, default False
            Allow or disallow sampling of the same row more than once.
        weights : list-like, optional
            Default None results in equal probability weighting.
            If passed a list-like then values must have the same length as
            the underlying DataFrame or Series object and will be used as
            sampling probabilities after normalization within each group.
            Values must be non-negative with at least one positive element
            within each group.
        random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional
            If int, array-like, or BitGenerator, seed for random number generator.
            If np.random.RandomState or np.random.Generator, use as given.

            .. versionchanged:: 1.4.0

                np.random.Generator objects now accepted

        Returns
        -------
        Series or DataFrame
            A new object of same type as caller containing items randomly
            sampled within each group from the caller object.

        See Also
        --------
        DataFrame.sample: Generate random samples from a DataFrame object.
        numpy.random.choice: Generate a random sample from a given 1-D numpy
            array.

        Examples
        --------
        >>> df = pd.DataFrame(
        ...     {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
        ... )
        >>> df
               a  b
        0    red  0
        1    red  1
        2   blue  2
        3   blue  3
        4  black  4
        5  black  5

        Select one row at random for each distinct value in column a. The
        `random_state` argument can be used to guarantee reproducibility:

        >>> df.groupby("a").sample(n=1, random_state=1)
               a  b
        4  black  4
        2   blue  2
        1    red  1

        Set `frac` to sample fixed proportions rather than counts:

        >>> df.groupby("a")["b"].sample(frac=0.5, random_state=2)
        5    5
        2    2
        0    0
        Name: b, dtype: int64

        Control sample probabilities within groups by setting weights:

        >>> df.groupby("a").sample(
        ...     n=1,
        ...     weights=[1, 1, 1, 0, 0, 1],
        ...     random_state=1,
        ... )
               a  b
        5  black  5
        2   blue  2
        0    red  0
        Nr   )r  r@  rA  rB  )r   rT  r9   Zprocess_sampling_sizeZpreprocess_weightsr{   r   rB  r~   r   r   r   r   roundr=  r   rS  r   )rb   r   r?  r@  rA  rB  r  Zweights_arrZgroup_iteratorZsampled_indicesr   r   Zgrp_indicesZ
group_sizeZsample_sizeZ
grp_samplerc   rc   rd   r9   
  s:    `  


zGroupBy.sample)Nr   NNNNTTTFT)FF)N)FF)NFF)Fr   )Fr   )Fr   )T)T)T)Fr  N)F)r   NNF)r   NNF)NFTFT)r   F)Fr   NN)Fr   )Fr   NN)Fr   NN)Fr   )Fr   )NNN)N)N)N)N)r  r  F)T)T)r  Tr  Fr   )r   )r   )r   F)r   F)FFNNr  )r   Nr   N)r   r   )r   r  NNr   )r6  )r6  )NNFNN)Xrl   rv   rw   rx   r   r   re   ru   r   r   r   r   r   r  r  r	  r  r  r  r  r  r(   _apply_docsformatrm   r   r6  r>  r4  rK  rO  rN  rY  rf  r   rg  rz  r)   _common_see_alsorl  r"  rb  r  r  r  r  r  r  r  r+   _groupby_agg_method_templater  r  r  r  r  r  r  rD   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r]  r  r  rx  r)  r\  r3  r:  r;  r  r   NaNr  r9   rc   rc   rc   rd   r\   :  s2  
C          *0
( =
- !#' /  - , (  #
'&*  V   P   <     +         E:%  %
e U]@   YR9     _         	,&    &#(a    r\   TrE   r   r   r   r   )r   byr{   r~   r   r_   c                 C  sX   t | trddlm} |}n*t | tr8ddlm} |}ntd|  || ||||dS )Nr   )SeriesGroupBy)DataFrameGroupByzinvalid type: )r   r   r{   r~   r   )r   rR   Zpandas.core.groupby.genericrJ  rD   rK  r'  )r   rI  r{   r~   r   rJ  r   rK  rc   rc   rd   get_groupby  s    	

rL  rM   znpt.NDArray[np.float64]rN   )r  r  r_   c                   s   t | | jrvtt| } t| \}}t| j|g } fdd| jD t	
|t | g }t||| jdg d}nt| |g}|S )a  
    Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex.

    The quantile level in the MultiIndex is a repeated copy of 'qs'.

    Parameters
    ----------
    idx : Index
    qs : np.ndarray[float64]

    Returns
    -------
    MultiIndex
    c                   s   g | ]}t | qS rc   )r   r^  )r   r   r  rc   rd   r     s     z*_insert_quantile_level.<locals>.<listcomp>N)r   r5  r   )r   Z	_is_multir   rN   rM   Z	factorizer   r   r5  r   rV  r   r  )r  r  Z	lev_codesr   r   r5  mirc   rM  rd   r    s    
&r  )Nr   NT)rx   
__future__r   r   	functoolsr   r   r   textwrapr   typingr   r   r   r	   r
   r   r   r   r   r   r   r   r   r#  Znumpyr   Zpandas._config.configr   Zpandas._libsr   r   Zpandas._libs.algosr   Zpandas._libs.groupbyZ_libsr^   ry  Zpandas._libs.missingr   Zpandas._typingr   r   r   r   r   r   r   r   r    r!   r"   r#   r$   Zpandas.compat.numpyr%   r  Zpandas.errorsr&   r'   Zpandas.util._decoratorsr(   r)   r*   r+   Zpandas.core.dtypes.castr,   Zpandas.core.dtypes.commonr-   r.   r/   r0   r1   r2   r3   r4   r5   Zpandas.core.dtypes.missingr6   r7   Zpandas.corer8   r9   Zpandas.core._numbar:   Zpandas.core.arraysr;   r<   r=   r>   r?   r@   rA   Zpandas.core.baserB   rC   Zpandas.core.commoncorecommonr   Zpandas.core.framerD   Zpandas.core.genericrE   Zpandas.core.groupbyrF   rG   rH   Zpandas.core.groupby.grouperrI   Zpandas.core.groupby.indexingrJ   rK   Zpandas.core.indexes.apirL   rM   rN   rO   rP   Zpandas.core.internals.blocksrQ   Zpandas.core.seriesrR   Zpandas.core.sortingrS   Zpandas.core.util.numba_rT   rU   r  rV   rW   rX   rF  rD  rG  r   Z_transform_templateZ_agg_templater[   Z_KeysArgTypery   r   r\   rL  r  rc   rc   rc   rd   <module>   s   <<,$		5A -3bS
	 T                          n    