Skip to content

Commit bfe2d52

Browse files
authored
TST: Use more xfail instead of skip (#45719)
1 parent 5d0f6ed commit bfe2d52

File tree

16 files changed

+97
-58
lines changed

16 files changed

+97
-58
lines changed

pandas/tests/apply/test_series_apply.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -284,8 +284,6 @@ def test_transform_partial_failure(op, request):
284284
raises=AssertionError, reason=f"{op} is successful on any dtype"
285285
)
286286
)
287-
if op in ("rank", "fillna"):
288-
pytest.skip(f"{op} doesn't raise TypeError on object")
289287

290288
# Using object makes most transform kernels fail
291289
ser = Series(3 * [object])
@@ -497,9 +495,13 @@ def test_map(datetime_series):
497495
tm.assert_series_equal(a.map(c), exp)
498496

499497

500-
def test_map_empty(index):
498+
def test_map_empty(request, index):
501499
if isinstance(index, MultiIndex):
502-
pytest.skip("Initializing a Series from a MultiIndex is not supported")
500+
request.node.add_marker(
501+
pytest.mark.xfail(
502+
reason="Initializing a Series from a MultiIndex is not supported"
503+
)
504+
)
503505

504506
s = Series(index)
505507
result = s.map({})

pandas/tests/base/test_misc.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,14 +148,18 @@ def test_memory_usage_components_narrow_series(dtype):
148148
assert total_usage == non_index_usage + index_usage
149149

150150

151-
def test_searchsorted(index_or_series_obj):
151+
def test_searchsorted(request, index_or_series_obj):
152152
# numpy.searchsorted calls obj.searchsorted under the hood.
153153
# See gh-12238
154154
obj = index_or_series_obj
155155

156156
if isinstance(obj, pd.MultiIndex):
157157
# See gh-14833
158-
pytest.skip("np.searchsorted doesn't work on pd.MultiIndex")
158+
request.node.add_marker(
159+
pytest.mark.xfail(
160+
reason="np.searchsorted doesn't work on pd.MultiIndex: GH 14833"
161+
)
162+
)
159163

160164
max_obj = max(obj, default=0)
161165
index = np.searchsorted(obj, max_obj)

pandas/tests/extension/test_sparse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ def _skip_if_different_combine(self, data):
453453
# arith ops call on dtype.fill_value so that the sparsity
454454
# is maintained. Combine can't be called on a dtype in
455455
# general, so we can't make the expected. This is tested elsewhere
456-
raise pytest.skip("Incorrected expected from Series.combine")
456+
pytest.skip("Incorrected expected from Series.combine and tested elsewhere")
457457

458458
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
459459
self._skip_if_different_combine(data)

pandas/tests/frame/test_reductions.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1471,9 +1471,15 @@ def test_reductions_deprecation_level_argument(
14711471
with tm.assert_produces_warning(FutureWarning, match="level"):
14721472
getattr(obj, reduction_functions)(level=0)
14731473

1474-
def test_reductions_skipna_none_raises(self, frame_or_series, reduction_functions):
1475-
if reduction_functions in ["count", "mad"]:
1476-
pytest.skip("Count does not accept skipna. Mad needs a deprecation cycle.")
1474+
def test_reductions_skipna_none_raises(
1475+
self, request, frame_or_series, reduction_functions
1476+
):
1477+
if reduction_functions == "count":
1478+
request.node.add_marker(
1479+
pytest.mark.xfail(reason="Count does not accept skipna")
1480+
)
1481+
elif reduction_functions == "mad":
1482+
pytest.skip("Mad needs a deprecation cycle: GH 11787")
14771483
obj = frame_or_series([1, 2, 3])
14781484
msg = 'For argument "skipna" expected type bool, received type NoneType.'
14791485
with pytest.raises(ValueError, match=msg):

pandas/tests/groupby/test_groupby.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -713,11 +713,8 @@ def test_ops_not_as_index(reduction_func):
713713
# GH 10355, 21090
714714
# Using as_index=False should not modify grouped column
715715

716-
if reduction_func in ("corrwith",):
717-
pytest.skip("Test not applicable")
718-
719-
if reduction_func in ("nth", "ngroup"):
720-
pytest.skip("Skip until behavior is determined (GH #5755)")
716+
if reduction_func in ("corrwith", "nth", "ngroup"):
717+
pytest.skip(f"GH 5755: Test not applicable for {reduction_func}")
721718

722719
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
723720
expected = getattr(df.groupby("a"), reduction_func)()
@@ -2268,7 +2265,7 @@ def test_groupby_duplicate_index():
22682265
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
22692266
def test_dup_labels_output_shape(groupby_func, idx):
22702267
if groupby_func in {"size", "ngroup", "cumcount"}:
2271-
pytest.skip("Not applicable")
2268+
pytest.skip(f"Not applicable for {groupby_func}")
22722269
# TODO(2.0) Remove after pad/backfill deprecation enforced
22732270
groupby_func = maybe_normalize_deprecated_kernels(groupby_func)
22742271
df = DataFrame([[1, 1]], columns=idx)

pandas/tests/groupby/test_groupby_subclass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def test_groupby_preserves_subclass(obj, groupby_func):
2424
# GH28330 -- preserve subclass through groupby operations
2525

2626
if isinstance(obj, Series) and groupby_func in {"corrwith"}:
27-
pytest.skip("Not applicable")
27+
pytest.skip(f"Not applicable for Series and {groupby_func}")
2828
# TODO(2.0) Remove after pad/backfill deprecation enforced
2929
groupby_func = maybe_normalize_deprecated_kernels(groupby_func)
3030
grouped = obj.groupby(np.arange(0, 10))

pandas/tests/io/excel/test_readers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1562,7 +1562,7 @@ def test_corrupt_files_closed(self, engine, read_ext):
15621562
# GH41778
15631563
errors = (BadZipFile,)
15641564
if engine is None:
1565-
pytest.skip()
1565+
pytest.skip(f"Invalid test for engine={engine}")
15661566
elif engine == "xlrd":
15671567
import xlrd
15681568

pandas/tests/io/json/test_pandas.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1553,9 +1553,20 @@ def test_timedelta_as_label(self, date_format, key):
15531553
("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
15541554
("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
15551555
# TODO: the below have separate encoding procedures
1556-
# They produce JSON but not in a consistent manner
1557-
pytest.param("split", "", marks=pytest.mark.skip),
1558-
pytest.param("table", "", marks=pytest.mark.skip),
1556+
pytest.param(
1557+
"split",
1558+
"",
1559+
marks=pytest.mark.xfail(
1560+
reason="Produces JSON but not in a consistent manner"
1561+
),
1562+
),
1563+
pytest.param(
1564+
"table",
1565+
"",
1566+
marks=pytest.mark.xfail(
1567+
reason="Produces JSON but not in a consistent manner"
1568+
),
1569+
),
15591570
],
15601571
)
15611572
def test_tuple_labels(self, orient, expected):

pandas/tests/io/json/test_ujson.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -997,9 +997,11 @@ def test_dataframe_nested(self, orient):
997997
}
998998
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
999999

1000-
def test_dataframe_numpy_labelled(self, orient):
1000+
def test_dataframe_numpy_labelled(self, orient, request):
10011001
if orient in ("split", "values"):
1002-
pytest.skip("Incompatible with labelled=True")
1002+
request.node.add_marker(
1003+
pytest.mark.xfail(reason=f"{orient} incompatible for labelled=True")
1004+
)
10031005

10041006
df = DataFrame(
10051007
[[1, 2, 3], [4, 5, 6]],

pandas/tests/io/parser/dtypes/test_dtypes_basic.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ def test_delimiter_with_usecols_and_parse_dates(all_parsers):
193193
@pytest.mark.parametrize("thousands", ["_", None])
194194
def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):
195195
# GH#31920
196-
decimal_number_check(python_parser_only, numeric_decimal, thousands)
196+
decimal_number_check(python_parser_only, numeric_decimal, thousands, None)
197197

198198

199199
@pytest.mark.parametrize("thousands", ["_", None])
@@ -203,21 +203,22 @@ def test_1000_sep_decimal_float_precision(
203203
):
204204
# test decimal and thousand sep handling in across 'float_precision'
205205
# parsers
206-
decimal_number_check(c_parser_only, numeric_decimal, thousands)
206+
decimal_number_check(c_parser_only, numeric_decimal, thousands, float_precision)
207207
text, value = numeric_decimal
208208
text = " " + text + " "
209209
if isinstance(value, str): # the negative cases (parse as text)
210210
value = " " + value + " "
211-
decimal_number_check(c_parser_only, (text, value), thousands)
211+
decimal_number_check(c_parser_only, (text, value), thousands, float_precision)
212212

213213

214-
def decimal_number_check(parser, numeric_decimal, thousands):
214+
def decimal_number_check(parser, numeric_decimal, thousands, float_precision):
215215
# GH#31920
216216
value = numeric_decimal[0]
217217
if thousands is None and "_" in value:
218218
pytest.skip("Skip test if no thousands sep is defined and sep is in value")
219219
df = parser.read_csv(
220220
StringIO(value),
221+
float_precision=float_precision,
221222
sep="|",
222223
thousands=thousands,
223224
decimal=",",

0 commit comments

Comments
 (0)