Updated script that can be controled by Nodejs web app

This commit is contained in:
mac OS
2024-11-25 12:24:18 +07:00
parent c440eda1f4
commit 8b0ab2bd3a
8662 changed files with 1803808 additions and 34 deletions

View File

@ -0,0 +1,8 @@
from pandas.tests.extension.decimal.array import (
DecimalArray,
DecimalDtype,
make_data,
to_decimal,
)
__all__ = ["DecimalArray", "DecimalDtype", "to_decimal", "make_data"]

View File

@ -0,0 +1,311 @@
from __future__ import annotations
import decimal
import numbers
import sys
from typing import TYPE_CHECKING
import numpy as np
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
is_dtype_equal,
is_float,
is_integer,
pandas_dtype,
)
import pandas as pd
from pandas.api.extensions import (
no_default,
register_extension_dtype,
)
from pandas.api.types import (
is_list_like,
is_scalar,
)
from pandas.core import arraylike
from pandas.core.algorithms import value_counts_internal as value_counts
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import (
ExtensionArray,
ExtensionScalarOpsMixin,
)
from pandas.core.indexers import check_array_indexer
if TYPE_CHECKING:
from pandas._typing import type_t
@register_extension_dtype
class DecimalDtype(ExtensionDtype):
type = decimal.Decimal
name = "decimal"
na_value = decimal.Decimal("NaN")
_metadata = ("context",)
def __init__(self, context=None) -> None:
self.context = context or decimal.getcontext()
def __repr__(self) -> str:
return f"DecimalDtype(context={self.context})"
@classmethod
def construct_array_type(cls) -> type_t[DecimalArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return DecimalArray
@property
def _is_numeric(self) -> bool:
return True
class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray):
__array_priority__ = 1000
def __init__(self, values, dtype=None, copy=False, context=None) -> None:
for i, val in enumerate(values):
if is_float(val) or is_integer(val):
if np.isnan(val):
values[i] = DecimalDtype.na_value
else:
# error: Argument 1 has incompatible type "float | int |
# integer[Any]"; expected "Decimal | float | str | tuple[int,
# Sequence[int], int]"
values[i] = DecimalDtype.type(val) # type: ignore[arg-type]
elif not isinstance(val, decimal.Decimal):
raise TypeError("All values must be of type " + str(decimal.Decimal))
values = np.asarray(values, dtype=object)
self._data = values
# Some aliases for common attribute names to ensure pandas supports
# these
self._items = self.data = self._data
# those aliases are currently not working due to assumptions
# in internal code (GH-20735)
# self._values = self.values = self.data
self._dtype = DecimalDtype(context)
@property
def dtype(self):
return self._dtype
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
return cls(scalars)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
return cls._from_sequence(
[decimal.Decimal(x) for x in strings], dtype=dtype, copy=copy
)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
_HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray)
def to_numpy(
self,
dtype=None,
copy: bool = False,
na_value: object = no_default,
decimals=None,
) -> np.ndarray:
result = np.asarray(self, dtype=dtype)
if decimals is not None:
result = np.asarray([round(x, decimals) for x in result])
return result
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
#
if not all(
isinstance(t, self._HANDLED_TYPES + (DecimalArray,)) for t in inputs
):
return NotImplemented
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
# e.g. test_array_ufunc_series_scalar_other
return result
if "out" in kwargs:
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
inputs = tuple(x._data if isinstance(x, DecimalArray) else x for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
def reconstruct(x):
if isinstance(x, (decimal.Decimal, numbers.Number)):
return x
else:
return type(self)._from_sequence(x, dtype=self.dtype)
if ufunc.nout > 1:
return tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
return self._data[item]
else:
# array, slice.
item = pd.api.indexers.check_array_indexer(self, item)
return type(self)(self._data[item])
def take(self, indexer, allow_fill=False, fill_value=None):
from pandas.api.extensions import take
data = self._data
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
def copy(self):
return type(self)(self._data.copy(), dtype=self.dtype)
def astype(self, dtype, copy=True):
if is_dtype_equal(dtype, self._dtype):
if not copy:
return self
dtype = pandas_dtype(dtype)
if isinstance(dtype, type(self.dtype)):
return type(self)(self._data, copy=copy, context=dtype.context)
return super().astype(dtype, copy=copy)
def __setitem__(self, key, value) -> None:
if is_list_like(value):
if is_scalar(key):
raise ValueError("setting an array element with a sequence.")
value = [decimal.Decimal(v) for v in value]
else:
value = decimal.Decimal(value)
key = check_array_indexer(self, key)
self._data[key] = value
def __len__(self) -> int:
return len(self._data)
def __contains__(self, item) -> bool | np.bool_:
if not isinstance(item, decimal.Decimal):
return False
elif item.is_nan():
return self.isna().any()
else:
return super().__contains__(item)
@property
def nbytes(self) -> int:
n = len(self)
if n:
return n * sys.getsizeof(self[0])
return 0
def isna(self):
return np.array([x.is_nan() for x in self._data], dtype=bool)
@property
def _na_value(self):
return decimal.Decimal("NaN")
def _formatter(self, boxed=False):
if boxed:
return "Decimal: {}".format
return repr
@classmethod
def _concat_same_type(cls, to_concat):
return cls(np.concatenate([x._data for x in to_concat]))
def _reduce(
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
):
if skipna and self.isna().any():
# If we don't have any NAs, we can ignore skipna
other = self[~self.isna()]
result = other._reduce(name, **kwargs)
elif name == "sum" and len(self) == 0:
# GH#29630 avoid returning int 0 or np.bool_(False) on old numpy
result = decimal.Decimal(0)
else:
try:
op = getattr(self.data, name)
except AttributeError as err:
raise NotImplementedError(
f"decimal does not support the {name} operation"
) from err
result = op(axis=0)
if keepdims:
return type(self)([result])
else:
return result
def _cmp_method(self, other, op):
# For use with OpsMixin
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else:
# Assume it's an object
ovalues = [param] * len(self)
return ovalues
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
return np.asarray(res, dtype=bool)
def value_counts(self, dropna: bool = True):
return value_counts(self.to_numpy(), dropna=dropna)
# We override fillna here to simulate a 3rd party EA that has done so. This
# lets us test the deprecation telling authors to implement _pad_or_backfill
# Simulate a 3rd-party EA that has not yet updated to include a "copy"
# keyword in its fillna method.
# error: Signature of "fillna" incompatible with supertype "ExtensionArray"
def fillna( # type: ignore[override]
self,
value=None,
method=None,
limit: int | None = None,
):
return super().fillna(value=value, method=method, limit=limit, copy=True)
def to_decimal(values, context=None):
return DecimalArray([decimal.Decimal(x) for x in values], context=context)
def make_data():
return [decimal.Decimal(val) for val in np.random.default_rng(2).random(100)]
DecimalArray._add_arithmetic_ops()

View File

@ -0,0 +1,567 @@
from __future__ import annotations
import decimal
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension import base
from pandas.tests.extension.decimal.array import (
DecimalArray,
DecimalDtype,
make_data,
to_decimal,
)
@pytest.fixture
def dtype():
return DecimalDtype()
@pytest.fixture
def data():
return DecimalArray(make_data())
@pytest.fixture
def data_for_twos():
return DecimalArray([decimal.Decimal(2) for _ in range(100)])
@pytest.fixture
def data_missing():
return DecimalArray([decimal.Decimal("NaN"), decimal.Decimal(1)])
@pytest.fixture
def data_for_sorting():
return DecimalArray(
[decimal.Decimal("1"), decimal.Decimal("2"), decimal.Decimal("0")]
)
@pytest.fixture
def data_missing_for_sorting():
return DecimalArray(
[decimal.Decimal("1"), decimal.Decimal("NaN"), decimal.Decimal("0")]
)
@pytest.fixture
def na_cmp():
return lambda x, y: x.is_nan() and y.is_nan()
@pytest.fixture
def data_for_grouping():
b = decimal.Decimal("1.0")
a = decimal.Decimal("0.0")
c = decimal.Decimal("2.0")
na = decimal.Decimal("NaN")
return DecimalArray([b, b, na, na, a, a, b, c])
class TestDecimalArray(base.ExtensionTests):
def _get_expected_exception(
self, op_name: str, obj, other
) -> type[Exception] | None:
return None
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
return True
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
if op_name == "count":
return super().check_reduce(ser, op_name, skipna)
else:
result = getattr(ser, op_name)(skipna=skipna)
expected = getattr(np.asarray(ser), op_name)()
tm.assert_almost_equal(result, expected)
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
if all_numeric_reductions in ["kurt", "skew", "sem", "median"]:
mark = pytest.mark.xfail(raises=NotImplementedError)
request.applymarker(mark)
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
op_name = all_numeric_reductions
if op_name in ["skew", "median"]:
mark = pytest.mark.xfail(raises=NotImplementedError)
request.applymarker(mark)
return super().test_reduce_frame(data, all_numeric_reductions, skipna)
def test_compare_scalar(self, data, comparison_op):
ser = pd.Series(data)
self._compare_other(ser, data, comparison_op, 0.5)
def test_compare_array(self, data, comparison_op):
ser = pd.Series(data)
alter = np.random.default_rng(2).choice([-1, 0, 1], len(data))
# Randomly double, halve or keep same value
other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter]
self._compare_other(ser, data, comparison_op, other)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
op_name = all_arithmetic_operators
ser = pd.Series(data)
context = decimal.getcontext()
divbyzerotrap = context.traps[decimal.DivisionByZero]
invalidoptrap = context.traps[decimal.InvalidOperation]
context.traps[decimal.DivisionByZero] = 0
context.traps[decimal.InvalidOperation] = 0
# Decimal supports ops with int, but not float
other = pd.Series([int(d * 100) for d in data])
self.check_opname(ser, op_name, other)
if "mod" not in op_name:
self.check_opname(ser, op_name, ser * 2)
self.check_opname(ser, op_name, 0)
self.check_opname(ser, op_name, 5)
context.traps[decimal.DivisionByZero] = divbyzerotrap
context.traps[decimal.InvalidOperation] = invalidoptrap
def test_fillna_frame(self, data_missing):
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(
DeprecationWarning, match=msg, check_stacklevel=False
):
super().test_fillna_frame(data_missing)
def test_fillna_limit_pad(self, data_missing):
msg = "ExtensionArray.fillna 'method' keyword is deprecated"
with tm.assert_produces_warning(
DeprecationWarning,
match=msg,
check_stacklevel=False,
raise_on_extra_warnings=False,
):
super().test_fillna_limit_pad(data_missing)
msg = "The 'method' keyword in DecimalArray.fillna is deprecated"
with tm.assert_produces_warning(
FutureWarning,
match=msg,
check_stacklevel=False,
raise_on_extra_warnings=False,
):
super().test_fillna_limit_pad(data_missing)
@pytest.mark.parametrize(
"limit_area, input_ilocs, expected_ilocs",
[
("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),
("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),
("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),
("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),
("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),
("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),
("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),
("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),
],
)
def test_ffill_limit_area(
self, data_missing, limit_area, input_ilocs, expected_ilocs
):
# GH#56616
msg = "ExtensionArray.fillna 'method' keyword is deprecated"
with tm.assert_produces_warning(
DeprecationWarning,
match=msg,
check_stacklevel=False,
raise_on_extra_warnings=False,
):
msg = "DecimalArray does not implement limit_area"
with pytest.raises(NotImplementedError, match=msg):
super().test_ffill_limit_area(
data_missing, limit_area, input_ilocs, expected_ilocs
)
def test_fillna_limit_backfill(self, data_missing):
msg = "Series.fillna with 'method' is deprecated"
with tm.assert_produces_warning(
FutureWarning,
match=msg,
check_stacklevel=False,
raise_on_extra_warnings=False,
):
super().test_fillna_limit_backfill(data_missing)
msg = "ExtensionArray.fillna 'method' keyword is deprecated"
with tm.assert_produces_warning(
DeprecationWarning,
match=msg,
check_stacklevel=False,
raise_on_extra_warnings=False,
):
super().test_fillna_limit_backfill(data_missing)
msg = "The 'method' keyword in DecimalArray.fillna is deprecated"
with tm.assert_produces_warning(
FutureWarning,
match=msg,
check_stacklevel=False,
raise_on_extra_warnings=False,
):
super().test_fillna_limit_backfill(data_missing)
def test_fillna_no_op_returns_copy(self, data):
msg = "|".join(
[
"ExtensionArray.fillna 'method' keyword is deprecated",
"The 'method' keyword in DecimalArray.fillna is deprecated",
]
)
with tm.assert_produces_warning(
(FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False
):
super().test_fillna_no_op_returns_copy(data)
def test_fillna_series(self, data_missing):
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(
DeprecationWarning, match=msg, check_stacklevel=False
):
super().test_fillna_series(data_missing)
def test_fillna_series_method(self, data_missing, fillna_method):
msg = "|".join(
[
"ExtensionArray.fillna 'method' keyword is deprecated",
"The 'method' keyword in DecimalArray.fillna is deprecated",
]
)
with tm.assert_produces_warning(
(FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False
):
super().test_fillna_series_method(data_missing, fillna_method)
def test_fillna_copy_frame(self, data_missing, using_copy_on_write):
warn = DeprecationWarning if not using_copy_on_write else None
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
super().test_fillna_copy_frame(data_missing)
def test_fillna_copy_series(self, data_missing, using_copy_on_write):
warn = DeprecationWarning if not using_copy_on_write else None
msg = "ExtensionArray.fillna added a 'copy' keyword"
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
super().test_fillna_copy_series(data_missing)
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna, request):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
vcs = pd.Series(all_data).value_counts(dropna=dropna)
vcs_ex = pd.Series(other).value_counts(dropna=dropna)
with decimal.localcontext() as ctx:
# avoid raising when comparing Decimal("NAN") < Decimal(2)
ctx.traps[decimal.InvalidOperation] = False
result = vcs.sort_index()
expected = vcs_ex.sort_index()
tm.assert_series_equal(result, expected)
def test_series_repr(self, data):
# Overriding this base test to explicitly test that
# the custom _formatter is used
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
assert "Decimal: " in repr(ser)
@pytest.mark.xfail(reason="Inconsistent array-vs-scalar behavior")
@pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])
def test_unary_ufunc_dunder_equivalence(self, data, ufunc):
super().test_unary_ufunc_dunder_equivalence(data, ufunc)
def test_take_na_value_other_decimal():
arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0"))
expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")])
tm.assert_extension_array_equal(result, expected)
def test_series_constructor_coerce_data_to_extension_dtype():
dtype = DecimalDtype()
ser = pd.Series([0, 1, 2], dtype=dtype)
arr = DecimalArray(
[decimal.Decimal(0), decimal.Decimal(1), decimal.Decimal(2)],
dtype=dtype,
)
exp = pd.Series(arr)
tm.assert_series_equal(ser, exp)
def test_series_constructor_with_dtype():
arr = DecimalArray([decimal.Decimal("10.0")])
result = pd.Series(arr, dtype=DecimalDtype())
expected = pd.Series(arr)
tm.assert_series_equal(result, expected)
result = pd.Series(arr, dtype="int64")
expected = pd.Series([10])
tm.assert_series_equal(result, expected)
def test_dataframe_constructor_with_dtype():
arr = DecimalArray([decimal.Decimal("10.0")])
result = pd.DataFrame({"A": arr}, dtype=DecimalDtype())
expected = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, expected)
arr = DecimalArray([decimal.Decimal("10.0")])
result = pd.DataFrame({"A": arr}, dtype="int64")
expected = pd.DataFrame({"A": [10]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("frame", [True, False])
def test_astype_dispatches(frame):
# This is a dtype-specific test that ensures Series[decimal].astype
# gets all the way through to ExtensionArray.astype
# Designing a reliable smoke test that works for arbitrary data types
# is difficult.
data = pd.Series(DecimalArray([decimal.Decimal(2)]), name="a")
ctx = decimal.Context()
ctx.prec = 5
if frame:
data = data.to_frame()
result = data.astype(DecimalDtype(ctx))
if frame:
result = result["a"]
assert result.dtype.context.prec == ctx.prec
class DecimalArrayWithoutFromSequence(DecimalArray):
"""Helper class for testing error handling in _from_sequence."""
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
raise KeyError("For the test")
class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):
@classmethod
def _create_arithmetic_method(cls, op):
return cls._create_method(op, coerce_to_dtype=False)
DecimalArrayWithoutCoercion._add_arithmetic_ops()
def test_combine_from_sequence_raises(monkeypatch):
# https://github.com/pandas-dev/pandas/issues/22850
cls = DecimalArrayWithoutFromSequence
@classmethod
def construct_array_type(cls):
return DecimalArrayWithoutFromSequence
monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type)
arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
ser = pd.Series(arr)
result = ser.combine(ser, operator.add)
# note: object dtype
expected = pd.Series(
[decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"class_", [DecimalArrayWithoutFromSequence, DecimalArrayWithoutCoercion]
)
def test_scalar_ops_from_sequence_raises(class_):
# op(EA, EA) should return an EA, or an ndarray if it's not possible
# to return an EA with the return values.
arr = class_([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
result = arr + arr
expected = np.array(
[decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"
)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"reverse, expected_div, expected_mod",
[(False, [0, 1, 1, 2], [1, 0, 1, 0]), (True, [2, 1, 0, 0], [0, 0, 2, 2])],
)
def test_divmod_array(reverse, expected_div, expected_mod):
# https://github.com/pandas-dev/pandas/issues/22930
arr = to_decimal([1, 2, 3, 4])
if reverse:
div, mod = divmod(2, arr)
else:
div, mod = divmod(arr, 2)
expected_div = to_decimal(expected_div)
expected_mod = to_decimal(expected_mod)
tm.assert_extension_array_equal(div, expected_div)
tm.assert_extension_array_equal(mod, expected_mod)
def test_ufunc_fallback(data):
a = data[:5]
s = pd.Series(a, index=range(3, 8))
result = np.abs(s)
expected = pd.Series(np.abs(a), index=range(3, 8))
tm.assert_series_equal(result, expected)
def test_array_ufunc():
a = to_decimal([1, 2, 3])
result = np.exp(a)
expected = to_decimal(np.exp(a._data))
tm.assert_extension_array_equal(result, expected)
def test_array_ufunc_series():
a = to_decimal([1, 2, 3])
s = pd.Series(a)
result = np.exp(s)
expected = pd.Series(to_decimal(np.exp(a._data)))
tm.assert_series_equal(result, expected)
def test_array_ufunc_series_scalar_other():
# check _HANDLED_TYPES
a = to_decimal([1, 2, 3])
s = pd.Series(a)
result = np.add(s, decimal.Decimal(1))
expected = pd.Series(np.add(a, decimal.Decimal(1)))
tm.assert_series_equal(result, expected)
def test_array_ufunc_series_defer():
a = to_decimal([1, 2, 3])
s = pd.Series(a)
expected = pd.Series(to_decimal([2, 4, 6]))
r1 = np.add(s, a)
r2 = np.add(a, s)
tm.assert_series_equal(r1, expected)
tm.assert_series_equal(r2, expected)
def test_groupby_agg():
# Ensure that the result of agg is inferred to be decimal dtype
# https://github.com/pandas-dev/pandas/issues/29141
data = make_data()[:5]
df = pd.DataFrame(
{"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
)
# single key, selected column
expected = pd.Series(to_decimal([data[0], data[3]]))
result = df.groupby("id1")["decimals"].agg(lambda x: x.iloc[0])
tm.assert_series_equal(result, expected, check_names=False)
result = df["decimals"].groupby(df["id1"]).agg(lambda x: x.iloc[0])
tm.assert_series_equal(result, expected, check_names=False)
# multiple keys, selected column
expected = pd.Series(
to_decimal([data[0], data[1], data[3]]),
index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 1)]),
)
result = df.groupby(["id1", "id2"])["decimals"].agg(lambda x: x.iloc[0])
tm.assert_series_equal(result, expected, check_names=False)
result = df["decimals"].groupby([df["id1"], df["id2"]]).agg(lambda x: x.iloc[0])
tm.assert_series_equal(result, expected, check_names=False)
# multiple columns
expected = pd.DataFrame({"id2": [0, 1], "decimals": to_decimal([data[0], data[3]])})
result = df.groupby("id1").agg(lambda x: x.iloc[0])
tm.assert_frame_equal(result, expected, check_names=False)
def test_groupby_agg_ea_method(monkeypatch):
# Ensure that the result of agg is inferred to be decimal dtype
# https://github.com/pandas-dev/pandas/issues/29141
def DecimalArray__my_sum(self):
return np.sum(np.array(self))
monkeypatch.setattr(DecimalArray, "my_sum", DecimalArray__my_sum, raising=False)
data = make_data()[:5]
df = pd.DataFrame({"id": [0, 0, 0, 1, 1], "decimals": DecimalArray(data)})
expected = pd.Series(to_decimal([data[0] + data[1] + data[2], data[3] + data[4]]))
result = df.groupby("id")["decimals"].agg(lambda x: x.values.my_sum())
tm.assert_series_equal(result, expected, check_names=False)
s = pd.Series(DecimalArray(data))
grouper = np.array([0, 0, 0, 1, 1], dtype=np.int64)
result = s.groupby(grouper).agg(lambda x: x.values.my_sum())
tm.assert_series_equal(result, expected, check_names=False)
def test_indexing_no_materialize(monkeypatch):
# See https://github.com/pandas-dev/pandas/issues/29708
# Ensure that indexing operations do not materialize (convert to a numpy
# array) the ExtensionArray unnecessary
def DecimalArray__array__(self, dtype=None):
raise Exception("tried to convert a DecimalArray to a numpy array")
monkeypatch.setattr(DecimalArray, "__array__", DecimalArray__array__, raising=False)
data = make_data()
s = pd.Series(DecimalArray(data))
df = pd.DataFrame({"a": s, "b": range(len(s))})
# ensure the following operations do not raise an error
s[s > 0.5]
df[s > 0.5]
s.at[0]
df.at[0, "a"]
def test_to_numpy_keyword():
# test the extra keyword
values = [decimal.Decimal("1.1111"), decimal.Decimal("2.2222")]
expected = np.array(
[decimal.Decimal("1.11"), decimal.Decimal("2.22")], dtype="object"
)
a = pd.array(values, dtype="decimal")
result = a.to_numpy(decimals=2)
tm.assert_numpy_array_equal(result, expected)
result = pd.Series(a).to_numpy(decimals=2)
tm.assert_numpy_array_equal(result, expected)
def test_array_copy_on_write(using_copy_on_write):
df = pd.DataFrame({"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype="object")
df2 = df.astype(DecimalDtype())
df.iloc[0, 0] = 0
if using_copy_on_write:
expected = pd.DataFrame(
{"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype=DecimalDtype()
)
tm.assert_equal(df2.values, expected.values)