Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into fuckof
Browse files Browse the repository at this point in the history
  • Loading branch information
kristopolous committed Jan 10, 2025
2 parents ed8baa1 + 1578108 commit 6362b8e
Show file tree
Hide file tree
Showing 37 changed files with 1,024 additions and 401 deletions.
167 changes: 80 additions & 87 deletions .buildkite/generate_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@
clouds are not supported yet, smoke tests for those clouds are not generated.
"""

import ast
import os
import random
import re
import subprocess
from typing import Any, Dict, List, Optional

import click
from conftest import cloud_to_pytest_keyword
from conftest import default_clouds_to_run
import yaml
Expand Down Expand Up @@ -60,18 +62,8 @@
'edit directly.\n')


def _get_full_decorator_path(decorator: ast.AST) -> str:
"""Recursively get the full path of a decorator."""
if isinstance(decorator, ast.Attribute):
return f'{_get_full_decorator_path(decorator.value)}.{decorator.attr}'
elif isinstance(decorator, ast.Name):
return decorator.id
elif isinstance(decorator, ast.Call):
return _get_full_decorator_path(decorator.func)
raise ValueError(f'Unknown decorator type: {type(decorator)}')


def _extract_marked_tests(file_path: str) -> Dict[str, List[str]]:
def _extract_marked_tests(file_path: str,
filter_marks: List[str]) -> Dict[str, List[str]]:
"""Extract test functions and filter clouds using pytest.mark
from a Python test file.
Expand All @@ -85,80 +77,69 @@ def _extract_marked_tests(file_path: str) -> Dict[str, List[str]]:
rerun failures. Additionally, the parallelism would be controlled by pytest
instead of the buildkite job queue.
"""
with open(file_path, 'r', encoding='utf-8') as file:
tree = ast.parse(file.read(), filename=file_path)

for node in ast.walk(tree):
for child in ast.iter_child_nodes(node):
setattr(child, 'parent', node)

cmd = f'pytest {file_path} --collect-only'
output = subprocess.run(cmd, shell=True, capture_output=True, text=True)
matches = re.findall('Collected .+?\.py::(.+?) with marks: \[(.*?)\]',
output.stdout)
function_name_marks_map = {}
for function_name, marks in matches:
function_name = re.sub(r'\[.*?\]', '', function_name)
marks = marks.replace('\'', '').split(',')
marks = [i.strip() for i in marks]
if function_name not in function_name_marks_map:
function_name_marks_map[function_name] = set(marks)
else:
function_name_marks_map[function_name].update(marks)
function_cloud_map = {}
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef) and node.name.startswith('test_'):
class_name = None
if hasattr(node, 'parent') and isinstance(node.parent,
ast.ClassDef):
class_name = node.parent.name

clouds_to_include = []
clouds_to_exclude = []
is_serve_test = False
for decorator in node.decorator_list:
if isinstance(decorator, ast.Call):
# We only need to consider the decorator with no arguments
# to extract clouds.
filter_marks = set(filter_marks)
for function_name, marks in function_name_marks_map.items():
if filter_marks and not filter_marks & marks:
continue
clouds_to_include = []
clouds_to_exclude = []
is_serve_test = 'serve' in marks
for mark in marks:
if mark.startswith('no_'):
clouds_to_exclude.append(mark[3:])
else:
if mark not in PYTEST_TO_CLOUD_KEYWORD:
# This mark does not specify a cloud, so we skip it.
continue
full_path = _get_full_decorator_path(decorator)
if full_path.startswith('pytest.mark.'):
assert isinstance(decorator, ast.Attribute)
suffix = decorator.attr
if suffix.startswith('no_'):
clouds_to_exclude.append(suffix[3:])
else:
if suffix == 'serve':
is_serve_test = True
continue
if suffix not in PYTEST_TO_CLOUD_KEYWORD:
# This mark does not specify a cloud, so we skip it.
continue
clouds_to_include.append(
PYTEST_TO_CLOUD_KEYWORD[suffix])
clouds_to_include = (clouds_to_include if clouds_to_include else
DEFAULT_CLOUDS_TO_RUN)
clouds_to_include = [
cloud for cloud in clouds_to_include
if cloud not in clouds_to_exclude
]
cloud_queue_map = SERVE_CLOUD_QUEUE_MAP if is_serve_test else CLOUD_QUEUE_MAP
final_clouds_to_include = [
cloud for cloud in clouds_to_include if cloud in cloud_queue_map
]
if clouds_to_include and not final_clouds_to_include:
print(f'Warning: {file_path}:{node.name} '
f'is marked to run on {clouds_to_include}, '
f'but we do not have credentials for those clouds. '
f'Skipped.')
continue
if clouds_to_include != final_clouds_to_include:
excluded_clouds = set(clouds_to_include) - set(
final_clouds_to_include)
print(
f'Warning: {file_path}:{node.name} '
f'is marked to run on {clouds_to_include}, '
f'but we only have credentials for {final_clouds_to_include}. '
f'clouds {excluded_clouds} are skipped.')
function_name = (f'{class_name}::{node.name}'
if class_name else node.name)
function_cloud_map[function_name] = (final_clouds_to_include, [
cloud_queue_map[cloud] for cloud in final_clouds_to_include
])
clouds_to_include.append(PYTEST_TO_CLOUD_KEYWORD[mark])

clouds_to_include = (clouds_to_include
if clouds_to_include else DEFAULT_CLOUDS_TO_RUN)
clouds_to_include = [
cloud for cloud in clouds_to_include
if cloud not in clouds_to_exclude
]
cloud_queue_map = SERVE_CLOUD_QUEUE_MAP if is_serve_test else CLOUD_QUEUE_MAP
final_clouds_to_include = [
cloud for cloud in clouds_to_include if cloud in cloud_queue_map
]
if clouds_to_include and not final_clouds_to_include:
print(
f'Warning: {function_name} is marked to run on {clouds_to_include}, '
f'but we do not have credentials for those clouds. Skipped.')
continue
if clouds_to_include != final_clouds_to_include:
excluded_clouds = set(clouds_to_include) - set(
final_clouds_to_include)
print(
f'Warning: {function_name} is marked to run on {clouds_to_include}, '
f'but we only have credentials for {final_clouds_to_include}. '
f'clouds {excluded_clouds} are skipped.')
function_cloud_map[function_name] = (final_clouds_to_include, [
cloud_queue_map[cloud] for cloud in final_clouds_to_include
])
return function_cloud_map


def _generate_pipeline(test_file: str) -> Dict[str, Any]:
def _generate_pipeline(test_file: str,
filter_marks: List[str]) -> Dict[str, Any]:
"""Generate a Buildkite pipeline from test files."""
steps = []
function_cloud_map = _extract_marked_tests(test_file)
function_cloud_map = _extract_marked_tests(test_file, filter_marks)
for test_function, clouds_and_queues in function_cloud_map.items():
for cloud, queue in zip(*clouds_and_queues):
step = {
Expand Down Expand Up @@ -194,12 +175,12 @@ def _dump_pipeline_to_file(yaml_file_path: str,
yaml.dump(final_pipeline, file, default_flow_style=False)


def _convert_release(test_files: List[str]):
def _convert_release(test_files: List[str], filter_marks: List[str]):
yaml_file_path = '.buildkite/pipeline_smoke_tests_release.yaml'
output_file_pipelines = []
for test_file in test_files:
print(f'Converting {test_file} to {yaml_file_path}')
pipeline = _generate_pipeline(test_file)
pipeline = _generate_pipeline(test_file, filter_marks)
output_file_pipelines.append(pipeline)
print(f'Converted {test_file} to {yaml_file_path}\n\n')
# Enable all clouds by default for release pipeline.
Expand All @@ -208,15 +189,15 @@ def _convert_release(test_files: List[str]):
extra_env={cloud: '1' for cloud in CLOUD_QUEUE_MAP})


def _convert_quick_tests_core(test_files: List[str]):
def _convert_quick_tests_core(test_files: List[str], filter_marks: List[str]):
yaml_file_path = '.buildkite/pipeline_smoke_tests_quick_tests_core.yaml'
output_file_pipelines = []
for test_file in test_files:
print(f'Converting {test_file} to {yaml_file_path}')
# We want enable all clouds by default for each test function
# for pre-merge. And let the author controls which clouds
# to run by parameter.
pipeline = _generate_pipeline(test_file)
pipeline = _generate_pipeline(test_file, filter_marks)
pipeline['steps'].append({
'label': 'Backward compatibility test',
'command': 'bash tests/backward_compatibility_tests.sh',
Expand All @@ -231,7 +212,12 @@ def _convert_quick_tests_core(test_files: List[str]):
extra_env={'SKYPILOT_SUPPRESS_SENSITIVE_LOG': '1'})


def main():
@click.command()
@click.option(
'--filter-marks',
type=str,
help='Filter to include only a subset of pytest marks, e.g., managed_jobs')
def main(filter_marks):
test_files = os.listdir('tests/smoke_tests')
release_files = []
quick_tests_core_files = []
Expand All @@ -244,8 +230,15 @@ def main():
else:
release_files.append(test_file_path)

_convert_release(release_files)
_convert_quick_tests_core(quick_tests_core_files)
filter_marks = filter_marks or os.getenv('FILTER_MARKS')
if filter_marks:
filter_marks = filter_marks.split(',')
print(f'Filter marks: {filter_marks}')
else:
filter_marks = []

_convert_release(release_files, filter_marks)
_convert_quick_tests_core(quick_tests_core_files, filter_marks)


if __name__ == '__main__':
Expand Down
63 changes: 0 additions & 63 deletions .github/workflows/test-poetry-build.yml

This file was deleted.

2 changes: 1 addition & 1 deletion docs/source/cloud-setup/cloud-permissions/aws.rst
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ IAM Role Creation

Using a specific VPC
-----------------------
By default, SkyPilot uses the "default" VPC in each region.
By default, SkyPilot uses the "default" VPC in each region. If a region does not have a `default VPC <https://docs.aws.amazon.com/vpc/latest/userguide/work-with-default-vpc.html#create-default-vpc>`_, SkyPilot will not be able to use the region.

To instruct SkyPilot to use a specific VPC, you can use SkyPilot's global config
file ``~/.sky/config.yaml`` to specify the VPC name in the ``aws.vpc_name``
Expand Down
2 changes: 1 addition & 1 deletion docs/source/getting-started/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ RunPod

.. code-block:: shell
pip install "runpod>=1.5.1"
pip install "runpod>=1.6.1"
runpod config
Expand Down
28 changes: 19 additions & 9 deletions docs/source/reference/config.rst
Original file line number Diff line number Diff line change
Expand Up @@ -628,20 +628,30 @@ Available fields and semantics:
# Advanced OCI configurations (optional).
oci:
# A dict mapping region names to region-specific configurations, or
# `default` for the default configuration.
# `default` for the default/global configuration.
default:
# The OCID of the profile to use for launching instances (optional).
oci_config_profile: DEFAULT
# The OCID of the compartment to use for launching instances (optional).
# The profile name in ~/.oci/config to use for launching instances. If not
# set, the one named DEFAULT will be used (optional).
oci_config_profile: SKY_PROVISION_PROFILE
# The OCID of the compartment to use for launching instances. If not set,
# the root compartment will be used (optional).
compartment_ocid: ocid1.compartment.oc1..aaaaaaaahr7aicqtodxmcfor6pbqn3hvsngpftozyxzqw36gj4kh3w3kkj4q
# The image tag to use for launching general instances (optional).
image_tag_general: skypilot:cpu-ubuntu-2004
# The image tag to use for launching GPU instances (optional).
image_tag_gpu: skypilot:gpu-ubuntu-2004
# The default image tag to use for launching general instances (CPU) if the
# image_id parameter is not specified. If not set, the default is
# skypilot:cpu-ubuntu-2204 (optional).
image_tag_general: skypilot:cpu-oraclelinux8
# The default image tag to use for launching GPU instances if the image_id
# parameter is not specified. If not set, the default is
# skypilot:gpu-ubuntu-2204 (optional).
image_tag_gpu: skypilot:gpu-oraclelinux8
# Region-specific configurations
ap-seoul-1:
# The OCID of the VCN to use for instances (optional).
vcn_ocid: ocid1.vcn.oc1.ap-seoul-1.amaaaaaaak7gbriarkfs2ssus5mh347ktmi3xa72tadajep6asio3ubqgarq
# The OCID of the subnet to use for instances (optional).
vcn_subnet: ocid1.subnet.oc1.ap-seoul-1.aaaaaaaa5c6wndifsij6yfyfehmi3tazn6mvhhiewqmajzcrlryurnl7nuja
us-ashburn-1:
vcn_ocid: ocid1.vcn.oc1.ap-seoul-1.amaaaaaaak7gbriarkfs2ssus5mh347ktmi3xa72tadajep6asio3ubqgarq
vcn_subnet: ocid1.subnet.oc1.iad.aaaaaaaafbj7i3aqc4ofjaapa5edakde6g4ea2yaslcsay32cthp7qo55pxa
Loading

0 comments on commit 6362b8e

Please sign in to comment.