8000 DOC: Add dateutil to intersphinx #24437 by benjaminr · Pull Request #24443 · pandas-dev/pandas · GitHub
[go: up one dir, main page]

Skip to content

DOC: Add dateutil to intersphinx #24437 #24443

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 18 commits into from
Dec 27, 2018
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
4446942
DOC: Fix docstrings with the sections in the wrong order #24280
benjaminr Dec 14, 2018
baa2950
DOC: Fix docstrings with the sections in the wrong order #24280
benjaminr Dec 14, 2018
c73c5f0
Removal of whitespace to satisfy flak8 checks.
benjaminr Dec 14, 2018
c042341
Merge branch 'master' of https://github.com/pandas-dev/pandas
benjaminr Dec 16, 2018
14fe729
Refactored _doc_template to _common_see_also and used with the substi…
benjaminr Dec 16, 2018
23350de
Updated CI checks to include GL07 docstring validation.
benjaminr Dec 16, 2018
3d18f6b
Docstring appends were in the wrong order - this corrects that. Altho…
benjaminr Dec 16, 2018
a1db80b
Use of format on _shared_docs['aggregate'] docstring usage to get the…
benjaminr Dec 17, 2018
faac16d
Merge branch 'master' of https://github.com/pandas-dev/pandas
benjaminr Dec 17, 2018
0a7ed6d
Merge branch 'master' of https://github.com/pandas-dev/pandas
benjaminr Dec 18, 2018
64af1fc
Updated to code check order and included GL07 in messages.
benjaminr Dec 18, 2018
3e54918
Use of substitution to clean up docstring order fix.
benjaminr Dec 18, 2018
5f64fcf
Corrected order of see_also and examples string defitions to match do…
benjaminr Dec 18, 2018
bdeb8ae
Updated name of _agg_see_also_doc to reflect fact it also contains a …
benjaminr Dec 18, 2018
fc5fc69
Fix added import statement to be in the correct order.
benjaminr Dec 18, 2018
828c03b
Merge of upstream changes.
benjaminr Dec 26, 2018
91d13fe
Addition of dateutil docs to intersphinx mapping to pull in objects f…
benjaminr Dec 26, 2018
ae6aa27
Reordered listing of mapping to be alphabetical.
benjaminr Dec 26, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
DOC: Fix docstrings with the sections in the wrong order #24280
  • Loading branch information
benjaminr committed Dec 14, 2018
commit 44469421fb8c1735f7ebecc2de0bb332e7725bbc
56 changes: 28 additions & 28 deletions pandas/_libs/interval.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,29 @@ cdef class Interval(IntervalMixin):
Whether the interval is closed on the left-side, right-side, both or
neither. See the Notes for more detailed explanation.

See Also
--------
IntervalIndex : An Index of Interval objects that are all closed on the
same side.
cut : Convert continuous data into discrete bins (Categorical
of Interval objects).
qcut : Convert continuous data into bins (Categorical of Interval objects)
based on quantiles.
Period : Represents a period of time.
"""
_typ = "interval"

cdef readonly object left
"""Left bound for the interval"""

cdef readonly object right
"""Right bound for the interval"""

cdef readonly str closed
"""
Whether the interval is closed on the left-side, right-side, both or
neither

Notes
-----
The parameters `left` and `right` must be from the same type, you must be
Expand Down Expand Up @@ -226,29 +249,6 @@ cdef class Interval(IntervalMixin):
>>> volume_1 = pd.Interval('Ant', 'Dog', closed='both')
>>> 'Bee' in volume_1
True

See Also
--------
IntervalIndex : An Index of Interval objects that are all closed on the
same side.
cut : Convert continuous data into discrete bins (Categorical
of Interval objects).
qcut : Convert continuous data into bins (Categorical of Interval objects)
based on quantiles.
Period : Represents a period of time.
"""
_typ = "interval"

cdef readonly object left
"""Left bound for the interval"""

cdef readonly object right
"""Right bound for the interval"""

cdef readonly str closed
"""
Whether the interval is closed on the left-side, right-side, both or
neither
"""

def __init__(self, left, right, str closed='right'):
Expand Down Expand Up @@ -387,6 +387,11 @@ cdef class Interval(IntervalMixin):
bool
``True`` if the two intervals overlap, e 10000 lse ``False``.

See Also
--------
IntervalArray.overlaps : The corresponding method for IntervalArray
IntervalIndex.overlaps : The corresponding method for IntervalIndex

Examples
--------
>>> i1 = pd.Interval(0, 2)
Expand All @@ -409,11 +414,6 @@ cdef class Interval(IntervalMixin):
>>> i6 = pd.Interval(1, 2, closed='neither')
>>> i4.overlaps(i6)
False

See Also
--------
IntervalArray.overlaps : The corresponding method for IntervalArray
IntervalIndex.overlaps : The corresponding method for IntervalIndex
"""
if not isinstance(other, Interval):
msg = '`other` must be an Interval, got {other}'
Expand Down
16 changes: 8 additions & 8 deletions pandas/_libs/tslibs/timedeltas.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1059,6 +1059,10 @@ cdef class _Timedelta(timedelta):
-------
formatted : str

See Also
--------
Timestamp.isoformat

Notes
-----
The longest component is days, whose value may be larger than
Expand All @@ -1081,10 +1085,6 @@ cdef class _Timedelta(timedelta):
'P0DT0H0M10S'
>>> pd.Timedelta(days=500.5).isoformat()
'P500DT12H0MS'

See Also
--------
Timestamp.isoformat
"""
components = self.components
seconds = '{}.{:0>3}{:0>3}{:0>3}'.format(components.seconds,
Expand Down Expand Up @@ -1210,14 +1210,14 @@ class Timedelta(_Timedelta):
"""
Round the Timedelta to the specified resolution

Returns
-------
a new Timedelta rounded to the given resolution of `freq`

Parameters
----------
freq : a freq string indicating the rounding resolution

Returns
-------
a new Timedelta rounded to the given resolution of `freq`

Raises
------
ValueError if the freq cannot be converted
Expand Down
8 changes: 4 additions & 4 deletions pandas/_libs/tslibs/timestamps.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -776,10 +776,6 @@ class Timestamp(_Timestamp):
"""
Round the Timestamp to the specified resolution

Returns
-------
a new Timestamp rounded to the given resolution of `freq`

Parameters
----------
freq : a freq string indicating the rounding resolution
Expand All @@ -802,6 +798,10 @@ class Timestamp(_Timestamp):

.. versionadded:: 0.24.0

Returns
-------
a new Timestamp rounded to the given resolution of `freq`

Raises
------
ValueError if the freq cannot be converted
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,10 @@ def decorator(accessor):
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.

See Also
--------
%(others)s

Notes
-----
When accessed, your accessor will be initialized with the pandas object
Expand Down Expand Up @@ -250,10 +254,6 @@ def plot(self):
(5.0, 10.0)
>>> ds.geo.plot()
# plots data on a map

See Also
--------
%(others)s
"""


Expand Down
24 changes: 12 additions & 12 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -6046,6 +6046,17 @@ def _gotitem(self,
axis=0)``.

`agg` is an alias for `aggregate`. Use the alias.

See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
pandas.core.groupby.GroupBy : Perform operations over groups.
pandas.core.resample.Resampler : Perform operations over resampled bins.
pandas.core.window.Rolling : Perform operations over rolling window.
pandas.core.window.Expanding : Perform operations over expanding window.
pandas.core.window.EWM : Perform operation over exponential weighted
window.

Examples
--------
Expand Down Expand Up @@ -6078,23 +6089,12 @@ def _gotitem(self,
2 8.0
3 NaN
dtype: float64

See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
pandas.core.groupby.GroupBy : Perform operations over groups.
pandas.core.resample.Resampler : Perform operations over resampled bins.
pandas.core.window.Rolling : Perform operations over rolling window.
pandas.core.window.Expanding : Perform operations over expanding window.
pandas.core.window.EWM : Perform operation over exponential weighted
window.
""")

@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
@Appender(_agg_doc)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)

Expand Down
14 changes: 7 additions & 7 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1247,6 +1247,12 @@ class DataFrameGroupBy(NDFrameGroupBy):
_block_agg_axis = 1

_agg_doc = dedent("""
See Also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate

Examples
--------

Expand Down Expand Up @@ -1294,19 +1300,13 @@ class DataFrameGroupBy(NDFrameGroupBy):
A
1 1 2 0.590716
2 3 4 0.704907

See Also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
""")

@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded='',
axis=''))
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)

Expand Down
28 changes: 24 additions & 4 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1080,7 +1080,6 @@ def count(self):
raise NotImplementedError

@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values.
Expand All @@ -1089,6 +1088,12 @@ def mean(self, *args, **kwargs):
-------
pandas.Series or pandas.DataFrame

See Also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s

Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
Expand Down Expand Up @@ -1528,7 +1533,6 @@ def backfill(self, limit=None):
bfill = backfill

@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
Expand All @@ -1547,6 +1551,12 @@ def nth(self, n, dropna=None):
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'

See Also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s

Examples
--------

Expand Down Expand Up @@ -2032,14 +2042,19 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
return (filled / shifted) - 1

@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.

Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.

See Also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s

Examples
--------

Expand All @@ -2059,14 +2074,19 @@ def head(self, n=5):
return self._selected_obj[mask]

@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group.

Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.

See Also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s

Examples
--------

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,11 +253,11 @@ def pipe(self, func, *args, **kwargs):
2013-01-01 00:00:04 5 NaN
""")

@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded='',
axis=''))
@Appender(_agg_doc)
def aggregate(self, func, *args, **kwargs):

self._set_binner()
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -3302,10 +3302,10 @@ def _gotitem(self, key, ndim, subset=None):
dtype: int64
""")

@Appender(_agg_doc)
@Appender(generic._shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
@Appender(_agg_doc)
def aggregate(self, func, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
Expand Down
Loading
0