summaryrefslogtreecommitdiff
path: root/sci-libs
diff options
context:
space:
mode:
authorV3n3RiX <venerix@koprulu.sector>2024-02-22 23:40:36 +0000
committerV3n3RiX <venerix@koprulu.sector>2024-02-22 23:40:36 +0000
commit1e153f5e3ce504af9cf586c9b819e4f0bc919f3b (patch)
treed6462e2ccef3a7c2e6e824e69212811a4bd727cd /sci-libs
parentc794e18e3eef5414856f70733e3a62479ce78c33 (diff)
gentoo auto-resync : 22:02:2024 - 23:40:35
Diffstat (limited to 'sci-libs')
-rw-r--r--sci-libs/Manifest.gzbin45089 -> 45094 bytes
-rw-r--r--sci-libs/datasets/Manifest5
-rw-r--r--sci-libs/datasets/datasets-2.17.1.ebuild1
-rw-r--r--sci-libs/datasets/files/datasets-2.14.4-tests.patch232
-rw-r--r--sci-libs/datasets/files/datasets-2.17.1-tests.patch240
5 files changed, 242 insertions, 236 deletions
diff --git a/sci-libs/Manifest.gz b/sci-libs/Manifest.gz
index 663d9bea36bb..7176f0e2efe4 100644
--- a/sci-libs/Manifest.gz
+++ b/sci-libs/Manifest.gz
Binary files differ
diff --git a/sci-libs/datasets/Manifest b/sci-libs/datasets/Manifest
index 32934c5c4a4c..1f16dc13e2f8 100644
--- a/sci-libs/datasets/Manifest
+++ b/sci-libs/datasets/Manifest
@@ -1,5 +1,4 @@
-AUX datasets-2.14.4-tests.patch 8198 BLAKE2B fbff05d38b138796f48a09cbf2e4499a63840bc24cc24b14e49cc8a7ef87bd6cf2fed24de4f9b45ff6a5581e8af407a734cd29f2172028592437d8bb54a5c6c9 SHA512 b9b87fce4ae2866ff23f038d5df799431600f1d7de5ef46d0c871401d6f47f45390d39c20342074f86eb31087336d80954d3ce288f317bf83c044ff65edece5a
-AUX datasets-2.17.1-tests.patch 5335 BLAKE2B 2d006dfd847d8e5424754fd3f1f855a909f407f80a3846e1959f726fff9b1076e46f5e2b4c5257dad51710e94312dba0fce98d79211fe4ffb6c72b01f2c6dd2d SHA512 be8df0b3b15371963d59054314134cbc3e263cdc4ab09eb54509842de000a21c0a0f52b0202656a1ae5cbbba5f77651f7a1e2a3d76a955211c7a8fbee01fca3b
+AUX datasets-2.17.1-tests.patch 13819 BLAKE2B 7581ae4ad336f8f8ecf6b47defa5b2d33da246d60e7159136803ceb4ad20a4f66025ec7aa3a1910ad4a79408ba7cb0de49621d56d13eaf64958c6770e1dc02e2 SHA512 96d344ef84cdc4cd1ee1cf3dd51be0ffb499839a74ba26bfb3aec3b87b459ff81c9c7f89d1704eb628f1a6f81e0a5f6770fd7316e5b0cdd3ad0df919254ed625
DIST datasets-2.17.1.gh.tar.gz 2168860 BLAKE2B ad7e9be7e60125d53b19b6277b6be6ae6050321e4210293a37737a345a4806d4901e9507fbf7a51c5e00a91912656d68a94e76cf70e070433beccc6e1ad54643 SHA512 43617c3d98cc3ad17fb577d6e917d164c8b6ec24740604ca281adaa2f0e5a6538633721792c9fa6621b7f1980161d8acf62dcdcdacca56e1739a8f28e3c71cdf
-EBUILD datasets-2.17.1.ebuild 2456 BLAKE2B 475c5d4dda897f61735a93811e5350e8e20159b2a00e448425fefae396cc8fccca4f8d8debf229e938403c59981f8461cfc78a865fc47e201351e6049830a334 SHA512 61d09cc8b95baf414523a80f8db5b01cfa5517c94e79217684d629ad7ec79f1b1f1d9115405e2734e3dc17d80158182310919bf94f44fb8ddb6e04979361d2eb
+EBUILD datasets-2.17.1.ebuild 2416 BLAKE2B 78df73a9afbaaccf854f34dafe913f10c8072d2d1855631f0716aef2852e181f6ad3ee4baff02d7fb4e13444b8aee7b6c18a97d7d1c000361f2f5720adf66303 SHA512 4be73c810e04c398838c65dc1eb301d5780a7420bed84c3754f0498f003589d37f2dcde01a7b98d6e5aa883e8534a8787e247f113652f1b5e0ae12ea2f2d6f15
MISC metadata.xml 379 BLAKE2B 48ebb9e7bfa8b58b0d15b82c4146def465e08cf3212ab4af04129d09c153b67b00d0fa05b94d6af54f643ec3a202f2335d3254b966f49d1394d3c7b9e5da56a5 SHA512 99560decfaa0e438980f372d99257695e9ca9585167d9aba091e0b775c2f8384657ddc017841c8f06f8b568017a54fb9e31da736f3c875da717e154cdce876d1
diff --git a/sci-libs/datasets/datasets-2.17.1.ebuild b/sci-libs/datasets/datasets-2.17.1.ebuild
index 9b6295db1a0e..65e38b9dbef7 100644
--- a/sci-libs/datasets/datasets-2.17.1.ebuild
+++ b/sci-libs/datasets/datasets-2.17.1.ebuild
@@ -57,7 +57,6 @@ BDEPEND="test? (
)"
PATCHES=(
- "${FILESDIR}"/${PN}-2.14.4-tests.patch
"${FILESDIR}"/${P}-tests.patch
)
diff --git a/sci-libs/datasets/files/datasets-2.14.4-tests.patch b/sci-libs/datasets/files/datasets-2.14.4-tests.patch
deleted file mode 100644
index b9791c04e8e0..000000000000
--- a/sci-libs/datasets/files/datasets-2.14.4-tests.patch
+++ /dev/null
@@ -1,232 +0,0 @@
---- a/tests/test_metric_common.py 2023-05-04 18:48:48.550861318 +0200
-+++ b/tests/test_metric_common.py 2023-05-04 18:50:25.787364577 +0200
-@@ -93,6 +93,7 @@
- INTENSIVE_CALLS_PATCHER = {}
- metric_name = None
-
-+ @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses ...")
- @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
- @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
- def test_load_metric(self, metric_name):
---- a/tests/test_distributed.py 2023-05-04 19:43:09.861275030 +0200
-+++ b/tests/test_distributed.py 2023-05-04 19:44:17.608326722 +0200
-@@ -74,6 +74,7 @@
- split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
-
-
-+@pytest.mark.skip(reason="require distributed torch")
- @pytest.mark.parametrize("streaming", [False, True])
- @require_torch
- @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
-@@ -95,6 +96,7 @@
- execute_subprocess_async(cmd, env=os.environ.copy())
-
-
-+@pytest.mark.skip(reason="require distributed torch")
- @pytest.mark.parametrize(
- "nproc_per_node, num_workers",
- [
---- a/tests/utils.py 2023-05-06 08:43:16.251987543 +0200
-+++ b/tests/utils.py 2023-05-06 08:44:24.467952870 +0200
-@@ -50,8 +50,8 @@
- # Audio
- require_sndfile = pytest.mark.skipif(
- # On Windows and OS X, soundfile installs sndfile
-- find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"),
-- reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
-+ True,
-+ reason="test requires librosa",
- )
-
- # Beam
---- a/tests/features/test_audio.py 2023-05-06 09:03:58.680108142 +0200
-+++ a/tests/features/test_audio.py 2023-05-06 09:05:50.463407967 +0200
-@@ -57,6 +57,7 @@
- assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
-
-
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize(
- "build_example",
- [
-@@ -81,6 +82,7 @@
- assert decoded_example.keys() == {"path", "array", "sampling_rate"}
-
-
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize(
- "build_example",
- [
-@@ -148,6 +149,7 @@
- assert decoded_example["sampling_rate"] == 48000
-
-
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
- def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
- audio_path = str(shared_datadir / "test_audio_16000.pcm")
-@@ -414,6 +417,7 @@
- assert column[0]["sampling_rate"] == 16000
-
-
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize(
- "build_data",
- [
-@@ -438,6 +442,7 @@
- assert item["audio"].keys() == {"path", "array", "sampling_rate"}
-
-
-+@pytest.mark.skip(reason="require librosa")
- def test_dataset_concatenate_audio_features(shared_datadir):
- # we use a different data structure between 1 and 2 to make sure they are compatible with each other
- audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -451,6 +456,7 @@
- assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
-
-
-+@pytest.mark.skip(reason="require librosa")
- def test_dataset_concatenate_nested_audio_features(shared_datadir):
- # we use a different data structure between 1 and 2 to make sure they are compatible with each other
- audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -610,6 +616,7 @@
- assert isinstance(ds, Dataset)
-
-
-+@require_sndfile
- def test_dataset_with_audio_feature_undecoded(shared_datadir):
- audio_path = str(shared_datadir / "test_audio_44100.wav")
- data = {"audio": [audio_path]}
-@@ -627,6 +634,7 @@
- assert column[0] == {"path": audio_path, "bytes": None}
-
-
-+@require_sndfile
- def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
- audio_path = str(shared_datadir / "test_audio_44100.wav")
- data = {"audio": [audio_path]}
-@@ -658,6 +666,7 @@
- assert column[0] == {"path": audio_path, "bytes": None}
-
-
-+@require_sndfile
- def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
- audio_path = str(shared_datadir / "test_audio_44100.wav")
- data = {"audio": [audio_path]}
---- a/tests/packaged_modules/test_audiofolder.py 2023-05-06 14:00:39.560876163 +0200
-+++ b/tests/packaged_modules/test_audiofolder.py 2023-05-06 14:01:26.005212423 +0200
-@@ -1,10 +1,8 @@
- import shutil
- import textwrap
-
--import librosa
- import numpy as np
- import pytest
--import soundfile as sf
-
- from datasets import Audio, ClassLabel, Features, Value
- from datasets.data_files import DataFilesDict, get_data_patterns
-@@ -192,8 +190,11 @@
- return data_files_with_two_splits_and_metadata
-
-
-+@pytest.mark.skip(reason="require soundfile")
- @pytest.fixture
- def data_files_with_zip_archives(tmp_path, audio_file):
-+ import soundfile as sf
-+ import librosa
- data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
- data_dir.mkdir(parents=True, exist_ok=True)
- archive_dir = data_dir / "archive"
---- a/tests/test_arrow_dataset.py 2023-05-06 15:36:11.080459079 +0200
-+++ b/tests/test_arrow_dataset.py 2023-05-06 15:38:07.452828528 +0200
-@@ -4136,6 +4136,7 @@
- )
- self.assertDictEqual(features_after_cast, dset.features)
-
-+ @pytest.mark.skip(reason="require soundfile")
- def test_task_automatic_speech_recognition(self):
- # Include a dummy extra column `dummy` to test we drop it correctly
- features_before_cast = Features(
---- a/tests/test_streaming_download_manager.py 2023-08-26 07:33:41.937389401 +0200
-+++ b/tests/test_streaming_download_manager.py 2023-08-26 07:37:22.521218698 +0200
-@@ -218,6 +218,7 @@
- assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
-
-
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, exists",
- [
-@@ -301,6 +302,7 @@
- assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
-
-
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, expected_paths",
- [
-@@ -331,6 +333,7 @@
- xlistdir(root_url, download_config=download_config)
-
-
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, isdir",
- [
-@@ -358,6 +361,7 @@
- assert xisdir(root_url, download_config=download_config) is False
-
-
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, isfile",
- [
-@@ -382,6 +386,7 @@
- assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False
-
-
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, size",
- [
-@@ -407,6 +412,7 @@
- xgetsize(root_url + "qwertyuiop", download_config=download_config)
-
-
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, expected_paths",
- [
-@@ -450,6 +456,7 @@
- assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0
-
-
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, expected_outputs",
- [
-@@ -540,6 +547,7 @@
- def test_xpath_as_posix(self, input_path, expected_path):
- assert xPath(input_path).as_posix() == expected_path
-
-+ @pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, exists",
- [
-@@ -555,6 +563,7 @@
- (tmp_path / "file.txt").touch()
- assert xexists(input_path) is exists
-
-+ @pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, pattern, expected_paths",
- [
-@@ -593,6 +602,7 @@
- output_paths = sorted(xPath(input_path).glob(pattern))
- assert output_paths == expected_paths
-
-+ @pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
- "input_path, pattern, expected_paths",
- [
diff --git a/sci-libs/datasets/files/datasets-2.17.1-tests.patch b/sci-libs/datasets/files/datasets-2.17.1-tests.patch
index 14ae50602d10..2281598dfb38 100644
--- a/sci-libs/datasets/files/datasets-2.17.1-tests.patch
+++ b/sci-libs/datasets/files/datasets-2.17.1-tests.patch
@@ -8,6 +8,14 @@
"hdfs://relative/path",
"hdfs:///absolute/path",
],
+@@ -4136,6 +4136,7 @@
+ )
+ self.assertDictEqual(features_after_cast, dset.features)
+
++ @pytest.mark.skip(reason="require soundfile")
+ def test_task_automatic_speech_recognition(self):
+ # Include a dummy extra column `dummy` to test we drop it correctly
+ features_before_cast = Features(
--- a/tests/test_load.py 2024-02-20 22:12:13.699209107 +0100
+++ b/tests/test_load.py 2024-02-20 22:13:10.862626708 +0100
@@ -388,6 +388,7 @@
@@ -122,3 +130,235 @@
@pytest.mark.parametrize("remote", [False, True])
@pytest.mark.parametrize("drop_labels", [None, True, False])
def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir):
+--- a/tests/test_metric_common.py 2023-05-04 18:48:48.550861318 +0200
++++ b/tests/test_metric_common.py 2023-05-04 18:50:25.787364577 +0200
+@@ -93,6 +93,7 @@
+ INTENSIVE_CALLS_PATCHER = {}
+ metric_name = None
+
++ @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses ...")
+ @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
+ @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
+ def test_load_metric(self, metric_name):
+--- a/tests/test_distributed.py 2023-05-04 19:43:09.861275030 +0200
++++ b/tests/test_distributed.py 2023-05-04 19:44:17.608326722 +0200
+@@ -74,6 +74,7 @@
+ split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
+
+
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize("streaming", [False, True])
+ @require_torch
+ @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
+@@ -95,6 +96,7 @@
+ execute_subprocess_async(cmd, env=os.environ.copy())
+
+
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize(
+ "nproc_per_node, num_workers",
+ [
+--- a/tests/utils.py 2023-05-06 08:43:16.251987543 +0200
++++ b/tests/utils.py 2023-05-06 08:44:24.467952870 +0200
+@@ -50,8 +50,8 @@
+ # Audio
+ require_sndfile = pytest.mark.skipif(
+ # On Windows and OS X, soundfile installs sndfile
+- find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"),
+- reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
++ True,
++ reason="test requires librosa",
+ )
+
+ # Beam
+--- a/tests/features/test_audio.py 2023-05-06 09:03:58.680108142 +0200
++++ a/tests/features/test_audio.py 2023-05-06 09:05:50.463407967 +0200
+@@ -57,6 +57,7 @@
+ assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
+
+
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+ "build_example",
+ [
+@@ -81,6 +82,7 @@
+ assert decoded_example.keys() == {"path", "array", "sampling_rate"}
+
+
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+ "build_example",
+ [
+@@ -148,6 +149,7 @@
+ assert decoded_example["sampling_rate"] == 48000
+
+
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
+ def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
+ audio_path = str(shared_datadir / "test_audio_16000.pcm")
+@@ -414,6 +417,7 @@
+ assert column[0]["sampling_rate"] == 16000
+
+
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+ "build_data",
+ [
+@@ -438,6 +442,7 @@
+ assert item["audio"].keys() == {"path", "array", "sampling_rate"}
+
+
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_audio_features(shared_datadir):
+ # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+ audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -451,6 +456,7 @@
+ assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
+
+
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_nested_audio_features(shared_datadir):
+ # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+ audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -610,6 +616,7 @@
+ assert isinstance(ds, Dataset)
+
+
++@require_sndfile
+ def test_dataset_with_audio_feature_undecoded(shared_datadir):
+ audio_path = str(shared_datadir / "test_audio_44100.wav")
+ data = {"audio": [audio_path]}
+@@ -627,6 +634,7 @@
+ assert column[0] == {"path": audio_path, "bytes": None}
+
+
++@require_sndfile
+ def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
+ audio_path = str(shared_datadir / "test_audio_44100.wav")
+ data = {"audio": [audio_path]}
+@@ -658,6 +666,7 @@
+ assert column[0] == {"path": audio_path, "bytes": None}
+
+
++@require_sndfile
+ def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
+ audio_path = str(shared_datadir / "test_audio_44100.wav")
+ data = {"audio": [audio_path]}
+--- a/tests/packaged_modules/test_audiofolder.py 2023-05-06 14:00:39.560876163 +0200
++++ b/tests/packaged_modules/test_audiofolder.py 2023-05-06 14:01:26.005212423 +0200
+@@ -1,10 +1,8 @@
+ import shutil
+ import textwrap
+
+-import librosa
+ import numpy as np
+ import pytest
+-import soundfile as sf
+
+ from datasets import Audio, ClassLabel, Features, Value
+ from datasets.data_files import DataFilesDict, get_data_patterns
+@@ -192,8 +190,11 @@
+ return data_files_with_two_splits_and_metadata
+
+
++@pytest.mark.skip(reason="require soundfile")
+ @pytest.fixture
+ def data_files_with_zip_archives(tmp_path, audio_file):
++ import soundfile as sf
++ import librosa
+ data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
+ data_dir.mkdir(parents=True, exist_ok=True)
+ archive_dir = data_dir / "archive"
+--- a/tests/test_streaming_download_manager.py 2023-08-26 07:33:41.937389401 +0200
++++ b/tests/test_streaming_download_manager.py 2023-08-26 07:37:22.521218698 +0200
+@@ -218,6 +218,7 @@
+ assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
+
+
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, exists",
+ [
+@@ -301,6 +302,7 @@
+ assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
+
+
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, expected_paths",
+ [
+@@ -331,6 +333,7 @@
+ xlistdir(root_url, download_config=download_config)
+
+
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, isdir",
+ [
+@@ -358,6 +361,7 @@
+ assert xisdir(root_url, download_config=download_config) is False
+
+
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, isfile",
+ [
+@@ -382,6 +386,7 @@
+ assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False
+
+
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, size",
+ [
+@@ -407,6 +412,7 @@
+ xgetsize(root_url + "qwertyuiop", download_config=download_config)
+
+
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, expected_paths",
+ [
+@@ -450,6 +456,7 @@
+ assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0
+
+
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, expected_outputs",
+ [
+@@ -540,6 +547,7 @@
+ def test_xpath_as_posix(self, input_path, expected_path):
+ assert xPath(input_path).as_posix() == expected_path
+
++ @pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, exists",
+ [
+@@ -555,6 +563,7 @@
+ (tmp_path / "file.txt").touch()
+ assert xexists(input_path) is exists
+
++ @pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, pattern, expected_paths",
+ [
+@@ -593,6 +602,7 @@
+ output_paths = sorted(xPath(input_path).glob(pattern))
+ assert output_paths == expected_paths
+
++ @pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+ "input_path, pattern, expected_paths",
+ [
+--- a/tests/io/test_parquet.py 2024-02-22 19:19:53.890749240 +0100
++++ b/tests/io/test_parquet.py 2024-02-22 19:20:30.954099914 +0100
+@@ -69,6 +69,7 @@
+ _check_parquet_dataset(dataset, expected_features)
+
+
++@pytest.mark.skip()
+ def test_parquet_read_geoparquet(geoparquet_path, tmp_path):
+ cache_dir = tmp_path / "cache"
+ dataset = ParquetDatasetReader(path_or_paths=geoparquet_path, cache_dir=cache_dir).read()