diff --git a/.github/workflows/fork-maintenance.yml b/.github/workflows/fork-maintenance.yml index 290dad4dfff..77356ccff42 100644 --- a/.github/workflows/fork-maintenance.yml +++ b/.github/workflows/fork-maintenance.yml @@ -43,7 +43,7 @@ jobs: pip install huggingface_hub datasets ; pip install parameterized; unit_test_command: > - cd tests && folders=\$(python3 -c \"import os; tests = os.getcwd(); model_tests_dir = os.path.join(tests, 'models'); model_tests = os.listdir(model_tests_dir) if os.path.exists(model_tests_dir) and os.path.isdir(model_tests_dir) else []; d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted([f'models/{x}' for x in model_tests if os.path.isdir(os.path.join('models', x))]); if 'models' in d1: d1.remove('models'); d = d2 + d1; print('\\\\n'.join(d))\"); cd ..; for folder in \${folders[@]}; do pytest tests/\${folder} -v -rfEs --make-reports=\"huggingface_unit_tests_run_models_gpu_\${folder}\" -p no:faulthandler --continue-on-collection-errors -m \"not not_device_test\" -p no:cacheprovider || true; done; allstats=\$(find reports -name stats.txt); for stat in \${allstats[@]}; do echo \"\$stat\"; cat \$stat; done + cd tests; folders=(\$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, models)); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [fmodels{x} for x in model_tests]))); d1.remove(models); d = d2 + d1; print(n.join(d))' )); cd ..; for folder in \${folders[@]}; do pytest tests\${folder} -v --make-reports=huggingface_unit_tests_\${machine_type}_run_models_gpu_\${folder} -rfEsx --continue-on-collection-errors -m not not_device_test -p nocacheprovider ; done ; allstats=(\$(find reports -name stats.txt)); for stat in \${allstats[@]}; do echo \$stat; cat \$stat; done performance_test_command: > echo "python examples/pytorch/language-modeling/run_mlm.py --model_name_or_path bert-base-uncased --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --do_eval --output_dir /tmp/test-mlm --per_device_train_batch_size 8 --per_device_eval_batch_size 8 --max_steps 500" docker_image: rocm/pytorch:latest