Skip to content

Commit

Permalink
[query] Various Benchmark Suite Improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
ehigham committed Dec 5, 2024
1 parent 479c1cc commit c4a8c24
Show file tree
Hide file tree
Showing 14 changed files with 473 additions and 327 deletions.
41 changes: 11 additions & 30 deletions hail/python/benchmark/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
def pytest_addoption(parser):
parser.addoption("--log", type=str, help='Log file path', default=None)
parser.addoption("--output", type=str, help="Output file path.", default=None)
parser.addoption("--data-dir", type=str, help="Data directory.", default=None)
parser.addoption("--data-dir", type=str, help="Data directory.", default=os.getenv('HAIL_BENCHMARK_DIR'))
parser.addoption('--iterations', type=int, help='override number of iterations for all benchmarks', default=None)
parser.addoption('--cores', type=int, help='Number of cores to use.', default=1)
parser.addoption(
Expand All @@ -23,38 +23,19 @@ def pytest_addoption(parser):
const='cpu',
default=None,
)
parser.addoption(
'--max-duration',
type=int,
help='Maximum permitted duration for any benchmark trial in seconds, not to be confused with pytest-timeout',
default=200,
)
parser.addoption('--max-failures', type=int, help='Stop benchmarking item after this many failures', default=3)
parser.addoption('--profiler-path', type=str, help='path to aysnc profiler', default=None)
parser.addoption('--profiler-fmt', choices=['html', 'flame', 'jfr'], help='Choose profiler output.', default='html')


def run_config_from_pytest_config(pytest_config):
return type(
'RunConfig',
(object,),
{
**{
flag: pytest_config.getoption(flag) or default
for flag, default in [
('log', None),
('output', None),
('cores', 1),
('data_dir', os.getenv('HAIL_BENCHMARK_DIR')),
('iterations', None),
('max_failures', None),
('profile', None),
('profiler_path', os.getenv('ASYNC_PROFILER_HOME')),
('profiler_fmt', None),
]
},
'verbose': pytest_config.getoption('verbose') > 0,
'quiet': pytest_config.getoption('verbose') < 0,
'timeout': int(pytest_config.getoption('timeout') or 100),
},
parser.addoption(
'--profiler-path', type=str, help='path to aysnc profiler', default=os.getenv('ASYNC_PROFILER_HOME')
)
parser.addoption('--profiler-fmt', choices=['html', 'flame', 'jfr'], help='Choose profiler output.', default='html')


@pytest.hookimpl
def pytest_configure(config):
config.run_config = run_config_from_pytest_config(config)
init_logging(file=config.run_config.log)
init_logging(file=config.getoption('log'))
5 changes: 3 additions & 2 deletions hail/python/benchmark/hail/benchmark_benchmark_analysis.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import tempfile
from pathlib import Path

from benchmark.tools import benchmark
import pytest

from benchmark.tools.impex import dump_tsv, import_timings
from benchmark.tools.statistics import analyze_benchmarks


@benchmark()
@pytest.mark.benchmark()
def benchmark_analyze_benchmarks(local_tmpdir, onethreetwo, onethreethree):
inputs = (onethreetwo, onethreethree)
inputs = ((v, Path(tempfile.mktemp(dir=local_tmpdir))) for v in inputs)
Expand Down
24 changes: 14 additions & 10 deletions hail/python/benchmark/hail/benchmark_combiner.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
import pytest

import hail as hl
from benchmark.tools import benchmark, chunk
from benchmark.hail.utils import XFail
from benchmark.tools import chunk
from hail.vds.combiner import combine_variant_datasets, new_combiner, transform_gvcf

COMBINE_GVCF_MAX = 100
Expand All @@ -14,7 +15,8 @@ def import_vcf(path):
return hl.import_vcf(str(path), reference_genome='GRCh38', force=True)


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
@with_flags(no_ir_logging='1')
def benchmark_compile_2k_merge(empty_gvcf, tmp_path):
vcf = import_vcf(empty_gvcf)
Expand All @@ -23,29 +25,30 @@ def benchmark_compile_2k_merge(empty_gvcf, tmp_path):
hl.vds.write_variant_datasets(combined, str(tmp_path / 'combiner-multi-write'), overwrite=True)


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
def benchmark_python_only_10k_transform(empty_gvcf):
for vcf in [import_vcf(empty_gvcf)] * 10_000:
transform_gvcf(vcf, [])


@benchmark()
@pytest.mark.benchmark()
def benchmark_python_only_10k_combine(empty_gvcf):
vcf = import_vcf(empty_gvcf)
mt = transform_gvcf(vcf, [])
for mts in chunk(COMBINE_GVCF_MAX, [mt] * 10_000):
combine_variant_datasets(mts)


@benchmark()
@pytest.mark.benchmark()
def benchmark_import_and_transform_gvcf(single_gvcf):
mt = import_vcf(single_gvcf)
vds = transform_gvcf(mt, [])
vds.reference_data._force_count_rows()
vds.variant_data._force_count_rows()


@benchmark()
@pytest.mark.benchmark()
def benchmark_import_gvcf_force_count(single_gvcf):
mt = import_vcf(single_gvcf)
mt._force_count_rows()
Expand All @@ -60,14 +63,15 @@ def tmp_and_output_paths(tmp_path):
return (tmp, output)


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
def benchmark_vds_combiner_chr22(chr22_gvcfs, tmp_and_output_paths):
parts = hl.eval([hl.parse_locus_interval('chr22:start-end', reference_genome='GRCh38')])

tmp, output = tmp_and_output_paths
combiner = new_combiner(
output_path=str(tmp_and_output_paths[0]),
output_path=str(output),
intervals=parts,
temp_path=str(tmp_and_output_paths[1]),
temp_path=str(tmp),
gvcf_paths=[str(path) for path in chr22_gvcfs],
reference_genome='GRCh38',
branch_factor=16,
Expand Down
29 changes: 18 additions & 11 deletions hail/python/benchmark/hail/benchmark_linalg.py
Original file line number Diff line number Diff line change
@@ -1,48 +1,53 @@
import pytest

import hail as hl
from benchmark.tools import benchmark
from benchmark.hail.utils import XFail


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
def benchmark_block_matrix_nested_multiply(tmp_path):
bm = hl.linalg.BlockMatrix.random(8 * 1024, 8 * 1024)
bm = bm.checkpoint(str(tmp_path / 'checkpoint.mt'))
bm = (bm @ bm) @ bm @ bm @ (bm @ bm)
bm.write(str(tmp_path / 'result.mt'), overwrite=True)


@benchmark()
@pytest.mark.benchmark()
def benchmark_make_ndarray():
ht = hl.utils.range_table(200_000)
ht = ht.annotate(x=hl.nd.array(hl.range(ht.idx)))
ht._force_count()


@benchmark()
@pytest.mark.benchmark()
def benchmark_ndarray_addition():
arr = hl.nd.ones((1024, 1024))
hl.eval(arr + arr)


@benchmark()
@pytest.mark.benchmark()
def benchmark_ndarray_matmul_int64():
arr = hl.nd.arange(1024 * 1024).map(hl.int64).reshape((1024, 1024))
hl.eval(arr @ arr)


@benchmark()
@pytest.mark.benchmark()
def benchmark_ndarray_matmul_float64():
arr = hl.nd.arange(1024 * 1024).map(hl.float64).reshape((1024, 1024))
hl.eval(arr @ arr)


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
def benchmark_blockmatrix_write_from_entry_expr_range_mt(tmp_path):
mt = hl.utils.range_matrix_table(40_000, 40_000, n_partitions=4)
path = str(tmp_path / 'result.bm')
hl.linalg.BlockMatrix.write_from_entry_expr(mt.row_idx + mt.col_idx, path)


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
def benchmark_blockmatrix_write_from_entry_expr_range_mt_standardize(tmp_path):
mt = hl.utils.range_matrix_table(40_000, 40_000, n_partitions=4)
path = str(tmp_path / 'result.bm')
Expand All @@ -51,20 +56,22 @@ def benchmark_blockmatrix_write_from_entry_expr_range_mt_standardize(tmp_path):
)


@benchmark()
@pytest.mark.benchmark()
def benchmark_sum_table_of_ndarrays():
ht = hl.utils.range_table(400).annotate(nd=hl.nd.ones((4096, 4096)))
ht.aggregate(hl.agg.ndarray_sum(ht.nd))


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
def benchmark_block_matrix_to_matrix_table_row_major():
mt = hl.utils.range_matrix_table(20_000, 20_000, n_partitions=4)
bm = hl.linalg.BlockMatrix.from_entry_expr(mt.row_idx + mt.col_idx)
bm.to_matrix_table_row_major()._force_count_rows()


@benchmark()
@pytest.mark.benchmark()
@pytest.mark.xfail(raises=TimeoutError, reason=XFail.Timeout)
def benchmark_king(tmp_path):
mt = hl.balding_nichols_model(6, n_variants=10000, n_samples=4096)
path = str(tmp_path / 'result.mt')
Expand Down
Loading

0 comments on commit c4a8c24

Please sign in to comment.