Skip to content

Commit

Permalink
Merge branch 'master' into update_test_setup
Browse files Browse the repository at this point in the history
  • Loading branch information
waltsims authored Jun 10, 2024
2 parents 2a99d6f + 30745fb commit 21c1e3b
Show file tree
Hide file tree
Showing 3 changed files with 156 additions and 12 deletions.
18 changes: 7 additions & 11 deletions kwave/executor.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import logging
import stat
import subprocess
import unittest.mock
import sys

import h5py

Expand Down Expand Up @@ -34,21 +33,18 @@ def run_simulation(self, input_filename: str, output_filename: str, options: str
# Stream stdout in real-time
for line in proc.stdout:
print(line, end="")
else:
stdout, stderr = proc.communicate()

stdout, stderr = proc.communicate()

proc.wait() # wait for process to finish before checking return code
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, command, stdout, stderr)

except subprocess.CalledProcessError as e:
# Special handling for MagicMock during testing
if isinstance(e.returncode, unittest.mock.MagicMock):
logging.info("Skipping AssertionError in testing.")
else:
# This ensures stdout is printed regardless of show_sim_logs value if an error occurs
print(e.stdout)
raise
# This ensures stdout is printed regardless of show_sim_logs value if an error occurs
print(e.stdout)
print(e.stderr, file=sys.stderr)
raise

sensor_data = self.parse_executable_output(output_filename)

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ Bug-tracker = "https://github.com/waltsims/k-wave-python/issues"

[project.optional-dependencies]
test = ["pytest",
"coverage==7.5.2",
"coverage==7.5.3",
"phantominator",
"requests==2.32.3"]
example = ["gdown==5.2.0"]
Expand Down
148 changes: 148 additions & 0 deletions tests/test_executor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
import unittest
from unittest.mock import patch, Mock, MagicMock, call
from pathlib import Path
import subprocess
import stat
import numpy as np
import io
import os

from kwave.executor import Executor
from kwave.utils.dotdictionary import dotdict


class TestExecutor(unittest.TestCase):
def setUp(self):
# Common setup for all tests
self.execution_options = Mock()
self.simulation_options = Mock()
self.execution_options.binary_path = Path("/fake/path/to/binary")
self.execution_options.system_string = "fake_system"
self.execution_options.show_sim_log = False

# Mock for stat result
self.mock_stat_result = Mock()
self.mock_stat_result.st_mode = 0o100644

# Patchers
self.patcher_stat = patch("pathlib.Path.stat", return_value=self.mock_stat_result)
self.patcher_chmod = patch("pathlib.Path.chmod")
self.patcher_popen = patch("subprocess.Popen")
self.patcher_h5py = patch("h5py.File", autospec=True)

# Start patchers
self.mock_stat = self.patcher_stat.start()
self.mock_chmod = self.patcher_chmod.start()
self.mock_popen = self.patcher_popen.start()
self.mock_h5py_file = self.patcher_h5py.start()

# Mock Popen object
self.mock_proc = MagicMock()
self.mock_proc.communicate.return_value = ("stdout content", "stderr content")
self.mock_popen.return_value.__enter__.return_value = self.mock_proc

def tearDown(self):
# Stop patchers
self.patcher_stat.stop()
self.patcher_chmod.stop()
self.patcher_popen.stop()
self.patcher_h5py.stop()

def test_make_binary_executable(self):
"""Test that the binary executable is correctly set to executable mode."""
# Instantiate the Executor
_ = Executor(self.execution_options, self.simulation_options)

# Assert chmod was called with the correct parameters
self.mock_chmod.assert_called_once_with(self.mock_stat_result.st_mode | stat.S_IEXEC)

@patch("builtins.print")
def test_run_simulation_show_sim_log(self, mock_print):
"""Test handling real-time output when show_sim_log is True."""
self.execution_options.show_sim_log = True

# Create a generator to simulate real-time output
def mock_stdout_gen():
yield "line 1\n"
yield "line 2\n"
yield "line 3\n"

# Create a mock Popen object with stdout as a generator
self.mock_proc.stdout = mock_stdout_gen()
self.mock_proc.returncode = 0

# Instantiate the Executor
executor = Executor(self.execution_options, self.simulation_options)

# Mock the parse_executable_output method
with patch.object(executor, "parse_executable_output", return_value=dotdict()):
sensor_data = executor.run_simulation("input.h5", "output.h5", "options")

# Assert that the print function was called with the expected lines
expected_calls = [call("line 1\n", end=""), call("line 2\n", end=""), call("line 3\n", end="")]
mock_print.assert_has_calls(expected_calls, any_order=False)

# Check that sensor_data is returned correctly
self.assertEqual(sensor_data, dotdict())

def test_run_simulation_success(self):
"""Test running the simulation successfully."""
self.mock_proc.returncode = 0

# Instantiate the Executor
executor = Executor(self.execution_options, self.simulation_options)

# Mock the parse_executable_output method
with patch.object(executor, "parse_executable_output", return_value=dotdict()):
sensor_data = executor.run_simulation("input.h5", "output.h5", "options")

normalized_path = os.path.normpath(self.execution_options.binary_path)
expected_command = f"{self.execution_options.system_string} " f"{normalized_path} " f"-i input.h5 " f"-o output.h5 " f"options"

self.mock_popen.assert_called_once_with(expected_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, text=True)
self.mock_proc.communicate.assert_called_once()
self.assertEqual(sensor_data, dotdict())

def test_run_simulation_failure(self):
"""Test handling a simulation failure."""
self.mock_proc.returncode = 1

# Capture the printed output
with patch("sys.stdout", new=io.StringIO()) as mock_stdout, patch("sys.stderr", new=io.StringIO()) as mock_stderr:
# Instantiate the Executor
executor = Executor(self.execution_options, self.simulation_options)

# Mock the parse_executable_output method
with patch.object(executor, "parse_executable_output", return_value=dotdict()):
with self.assertRaises(subprocess.CalledProcessError):
executor.run_simulation("input.h5", "output.h5", "options")

# Get the printed output
stdout_output = mock_stdout.getvalue()
stderr_output = mock_stderr.getvalue()

# Assert that stdout and stderr are printed
self.assertIn("stdout content", stdout_output)
self.assertIn("stderr content", stderr_output)

def test_parse_executable_output(self):
"""Test parsing the executable output correctly."""
# Set up the mock for h5py.File
mock_file = self.mock_h5py_file.return_value.__enter__.return_value
mock_file.keys.return_value = ["data"]
mock_dataset = MagicMock()
mock_file.__getitem__.return_value = mock_dataset

# Mock the squeeze method on the mock dataset
mock_dataset.__getitem__.return_value.squeeze.return_value = np.ones((10, 10))

# Call the method with a fake file path
result = Executor.parse_executable_output("/fake/output.h5")

self.mock_h5py_file.assert_called_once_with("/fake/output.h5", "r")
mock_file.keys.assert_called_once()
mock_file.__getitem__.assert_called_once_with("/data")
mock_dataset.__getitem__.assert_called_once_with(0)
mock_dataset.__getitem__.return_value.squeeze.assert_called_once()
self.assertIn("data", result)
self.assertTrue(np.all(result["data"] == np.ones((10, 10))))

0 comments on commit 21c1e3b

Please sign in to comment.