diff options
author | V3n3RiX <venerix@koprulu.sector> | 2023-05-20 16:18:45 +0100 |
---|---|---|
committer | V3n3RiX <venerix@koprulu.sector> | 2023-05-20 16:18:45 +0100 |
commit | 2ffbfc63824a9e437090fb3c744670dd45369ae0 (patch) | |
tree | ad31a5650e31f93490157943681bb982f8a4520c /sci-libs | |
parent | 399fa07bfac673a8846466b16c76549e329b55b8 (diff) |
gentoo auto-resync : 20:05:2023 - 16:18:45
Diffstat (limited to 'sci-libs')
-rw-r--r-- | sci-libs/Manifest.gz | bin | 44805 -> 44806 bytes | |||
-rw-r--r-- | sci-libs/evaluate/Manifest | 4 | ||||
-rw-r--r-- | sci-libs/evaluate/evaluate-0.4.0-r2.ebuild | 1 | ||||
-rw-r--r-- | sci-libs/evaluate/files/evaluate-0.4.0-tests.patch | 198 |
4 files changed, 178 insertions, 25 deletions
diff --git a/sci-libs/Manifest.gz b/sci-libs/Manifest.gz Binary files differindex 67717c42cbba..40261fb56ebb 100644 --- a/sci-libs/Manifest.gz +++ b/sci-libs/Manifest.gz diff --git a/sci-libs/evaluate/Manifest b/sci-libs/evaluate/Manifest index 7df1ff6711f6..2d6319eda2d8 100644 --- a/sci-libs/evaluate/Manifest +++ b/sci-libs/evaluate/Manifest @@ -1,4 +1,4 @@ -AUX evaluate-0.4.0-tests.patch 2471 BLAKE2B 9744c3f24f1e95a88f9a06d065234ba37b094723261884681c021888c794c9a8c1d6253390105379c898f59d8f18b947db9a3a1229a85fdf3f341909e5eb4eb3 SHA512 03a4b335f856da64423bce7fd10a21558d4ced499794868ac5c34f68885f39627ae75bd01634370bd7c175be330e7abb8adb1317d0e5a3adb938f140c9216387 +AUX evaluate-0.4.0-tests.patch 8011 BLAKE2B 441a4b86e8201f9ab98fdee51e121acf86685202be6b9d28dd5bd0a09ccd649041ee092476f6cc85f6764ba7f411ff983f06ffd96fec5129b00c01cb29ac3914 SHA512 8f6210b7de3e5cabbc72775de2a45af2e4210801d827f1a80b58d30e4f84043bef593f5ece12bd8d2586ec7d0a025fd4693213bd395f3e2a700de312518dea66 DIST evaluate-0.4.0.gh.tar.gz 292250 BLAKE2B f88428b263820c1af43d02ae676625257251476092efe624490f29e63a045d698db01e4a7a802c2330027d01bc6ccf16986f28ecf8202ecbfd943c5d7c40f6ec SHA512 f2136196fc4e5717859e36e173cd49d049fc5ef50c89f466e13edd0142830574dec0b5485a4a1097eec9cb9df756a617216ff48c141db008cb0c2b85288d7fc9 -EBUILD evaluate-0.4.0-r2.ebuild 1403 BLAKE2B 718eff6da357380640eec33004988fd21f6082c25ffb04d008e8de694beb575fc3038d906a5af9c771630f3e95b19d211ef27d84e7bd0f870cab162a55933c1b SHA512 85eb93a9185c5b5dda0fc52f2f7053c475446dc63aecd6d62009a9067d281b0d7ddb5b765f66976c1acc522aa7d3fb5e46b43941b63050cee16ae80958ad9429 +EBUILD evaluate-0.4.0-r2.ebuild 1445 BLAKE2B 0ff1586583bb9e91a2ff2de89531fe60f724e26712b1baad6fcfca3d9369b7de3fee15f887b00e2372e25e0b6bb85ad50eda9321ce7e83b53ec431abb940216c SHA512 414b2c6375c51de222155a8f4a9112b50984065f5db841a3d713d3cdc6056f5a3fe26c567123f08a8f5bd357b58f2ed74946ef9644016d20932cb10954660263 MISC metadata.xml 379 BLAKE2B a717b46962e59358925c866c64b2d0bc1dcd5d55d73e814686a09f703e339d2c0de52f6c214c8f795518d6d9dbb46443be11374643d415ff681dedca1511732b SHA512 03d6b58cad73cad46f1101fedf88ec94dc6d7a2028399a20b39939bead4fa402d00224085206a175a33d92417176cc45853060b18faa13769b80527fac9254e1 diff --git a/sci-libs/evaluate/evaluate-0.4.0-r2.ebuild b/sci-libs/evaluate/evaluate-0.4.0-r2.ebuild index 11f7e2a1f9c2..61fc96cebeac 100644 --- a/sci-libs/evaluate/evaluate-0.4.0-r2.ebuild +++ b/sci-libs/evaluate/evaluate-0.4.0-r2.ebuild @@ -47,5 +47,6 @@ src_prepare() { rm -r metrics/{nist_mt,rl_reliability,rouge,sacrebleu,sari} || die rm -r metrics/{ter,trec_eval,wiki_split,xtreme_s} || die rm -r measurements/word_length || die + rm tests/test_evaluation_suite.py || die distutils-r1_src_prepare } diff --git a/sci-libs/evaluate/files/evaluate-0.4.0-tests.patch b/sci-libs/evaluate/files/evaluate-0.4.0-tests.patch index 1e7e808576e3..452a6d862ada 100644 --- a/sci-libs/evaluate/files/evaluate-0.4.0-tests.patch +++ b/sci-libs/evaluate/files/evaluate-0.4.0-tests.patch @@ -8,22 +8,78 @@ from datasets import ClassLabel, Dataset, Features, Sequence, Value from PIL import Image -@@ -335,6 +335,7 @@ +@@ -128,6 +128,7 @@ + return [{"text": "Lorem ipsum"} for _ in inputs] + + ++@skip("require network") + class TestEvaluator(TestCase): + def setUp(self): + self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]}) +@@ -230,6 +230,7 @@ ) + + ++@skip("require network") + class TestTextClassificationEvaluator(TestCase): + def setUp(self): + self.data = Dataset.from_dict({"label": [1, 0], "text": ["great movie", "horrible movie"]}) +@@ -394,6 +394,7 @@ + self.assertAlmostEqual(results["latency_in_seconds"], results["total_time_in_seconds"] / len(data), 5) + + ++@skip("require network") + class TestTextClassificationEvaluatorTwoColumns(TestCase): + def setUp(self): + self.data = Dataset.from_dict( +@@ -452,6 +452,7 @@ self.assertEqual(results["accuracy"], 1.0) -+ @skip("not working") - def test_bootstrap(self): - data = Dataset.from_dict({"label": [1, 0, 0], "text": ["great movie", "great movie", "horrible movie"]}) -@@ -368,6 +369,7 @@ - self.assertAlmostEqual(results["samples_per_second"], len(self.data) / results["total_time_in_seconds"], 5) - self.assertAlmostEqual(results["latency_in_seconds"], results["total_time_in_seconds"] / len(self.data), 5) ++@skip("require network") + class TestImageClassificationEvaluator(TestCase): + def setUp(self): + self.data = Dataset.from_dict( +@@ -534,6 +535,7 @@ + self.assertEqual(results["accuracy"], 0) + -+ @skip("not working") - def test_bootstrap_and_perf(self): - data = Dataset.from_dict({"label": [1, 0, 0], "text": ["great movie", "great movie", "horrible movie"]}) ++@skip("require network") + class TestQuestionAnsweringEvaluator(TestCase): + def setUp(self): + self.data = Dataset.from_dict( +@@ -716,6 +716,7 @@ + ) + self.assertEqual(results["overall_accuracy"], 0.5) + ++ @skip("require network") + def test_class_init(self): + evaluator = TokenClassificationEvaluator() + self.assertEqual(evaluator.task, "token-classification") +@@ -735,6 +736,7 @@ + ) + self.assertEqual(results["overall_accuracy"], 2 / 3) ++ @skip("require network") + def test_overwrite_default_metric(self): + accuracy = load("seqeval") + results = self.evaluator.compute( +@@ -750,6 +752,7 @@ + ) + self.assertEqual(results["overall_accuracy"], 1.0) + ++ @skip("require network") + def test_data_loading(self): + # Test passing in dataset by name with data_split + data = self.evaluator.load_data("evaluate/conll2003-ci", split="validation[:1]") +@@ -863,6 +866,7 @@ + self.pipe = DummyTextGenerationPipeline(num_return_sequences=4) + self.evaluator = evaluator("text-generation") + ++ @skip("require network") + def test_class_init(self): + evaluator = TextGenerationEvaluator() + self.assertEqual(evaluator.task, "text-generation") @@ -877,6 +877,7 @@ results = self.evaluator.compute(data=self.data) self.assertIsInstance(results["unique_words"], int) @@ -32,22 +88,22 @@ def test_overwrite_default_metric(self): word_length = load("word_length") results = self.evaluator.compute( -@@ -939,6 +940,7 @@ - results = self.evaluator.compute(data=self.data) +@@ -906,6 +910,7 @@ + self.assertEqual(processed_predictions, {"data": ["A", "B", "C", "D"]}) + + ++@skip("require network") + class TestText2TextGenerationEvaluator(TestCase): + def setUp(self): + self.data = Dataset.from_dict( +@@ -979,6 +984,7 @@ self.assertEqual(results["bleu"], 0) -+ @skip("require rouge_score") - def test_overwrite_default_metric(self): - rouge = load("rouge") - results = self.evaluator.compute( -@@ -949,6 +952,7 @@ - ) - self.assertEqual(results["rouge1"], 1.0) -+ @skip("require rouge_score") - def test_summarization(self): - pipe = DummyText2TextGenerationPipeline(task="summarization", prefix="summary") - e = evaluator("summarization") ++@skip("require network") + class TestAutomaticSpeechRecognitionEvaluator(TestCase): + def setUp(self): + self.data = Dataset.from_dict( --- a/tests/test_trainer_evaluator_parity.py 2023-05-14 17:50:29.224525549 +0200 +++ b/tests/test_trainer_evaluator_parity.py 2023-05-14 17:37:40.947501195 +0200 @@ -269,6 +269,7 @@ @@ -58,3 +114,99 @@ def test_token_classification_parity(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" n_samples = 500 +--- a/tests/test_load.py 2023-05-20 15:45:58.855473557 +0200 ++++ b/tests/test_load.py 2023-05-20 15:50:41.620071500 +0200 +@@ -61,6 +61,7 @@ + hf_modules_cache=self.hf_modules_cache, + ) + ++ @pytest.mark.skip("require network") + def test_HubEvaluationModuleFactory_with_internal_import(self): + # "squad_v2" requires additional imports (internal) + factory = HubEvaluationModuleFactory( +@@ -72,6 +73,7 @@ + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + ++ @pytest.mark.skip("require network") + def test_HubEvaluationModuleFactory_with_external_import(self): + # "bleu" requires additional imports (external from github) + factory = HubEvaluationModuleFactory( +@@ -83,6 +85,7 @@ + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + ++ @pytest.mark.skip("require network") + def test_HubEvaluationModuleFactoryWithScript(self): + factory = HubEvaluationModuleFactory( + SAMPLE_METRIC_IDENTIFIER, +@@ -115,6 +118,7 @@ + module_factory_result = factory.get_module() + assert importlib.import_module(module_factory_result.module_path) is not None + ++ @pytest.mark.skip("require network") + def test_cache_with_remote_canonical_module(self): + metric = "accuracy" + evaluation_module_factory( +@@ -127,6 +131,7 @@ + metric, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path + ) + ++ @pytest.mark.skip("require network") + def test_cache_with_remote_community_module(self): + metric = "lvwerra/test" + evaluation_module_factory( +--- a/tests/test_metric.py 2023-05-20 15:54:32.558477445 +0200 ++++ b/tests/test_metric.py 2023-05-20 15:55:40.775415987 +0200 +@@ -736,6 +736,7 @@ + + self.assertDictEqual(dummy_result_1, combined_evaluation.compute(predictions=preds, references=refs)) + ++ @pytest.mark.skip('require network') + def test_modules_from_string(self): + expected_result = {"accuracy": 0.5, "recall": 0.5, "precision": 1.0} + predictions = [0, 1] +--- a/tests/test_metric_common.py 2023-05-20 15:57:02.399146066 +0200 ++++ b/tests/test_metric_common.py 2023-05-20 15:59:25.167947472 +0200 +@@ -99,6 +99,7 @@ + evaluation_module_name = None + evaluation_module_type = None + ++ @pytest.mark.skip('require network') + def test_load(self, evaluation_module_name, evaluation_module_type): + doctest.ELLIPSIS_MARKER = "[...]" + evaluation_module = importlib.import_module( +--- a/tests/test_trainer_evaluator_parity.py 2023-05-20 16:00:55.986549706 +0200 ++++ b/tests/test_trainer_evaluator_parity.py 2023-05-20 16:02:51.808766855 +0200 +@@ -4,6 +4,7 @@ + import subprocess + import tempfile + import unittest ++import pytest + + import numpy as np + import torch +@@ -33,6 +33,7 @@ + def tearDown(self): + shutil.rmtree(self.dir_path, ignore_errors=True) + ++ @pytest.mark.skip('require network') + def test_text_classification_parity(self): + model_name = "philschmid/tiny-bert-sst2-distilled" + +@@ -121,6 +122,7 @@ + + self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"]) + ++ @pytest.mark.skip('require network') + def test_image_classification_parity(self): + # we can not compare to the Pytorch transformers example, that uses custom preprocessing on the images + model_name = "douwekiela/resnet-18-finetuned-dogfood" +@@ -179,6 +181,7 @@ + + self.assertEqual(transformers_results["eval_accuracy"], evaluator_results["accuracy"]) + ++ @pytest.mark.skip('require network') + def test_question_answering_parity(self): + model_name_v1 = "anas-awadalla/bert-tiny-finetuned-squad" + model_name_v2 = "mrm8488/bert-tiny-finetuned-squadv2" |