Skip to content

Commit

Permalink
Merge branch 'ershi/ruff-0.9.0' into 'main'
Browse files Browse the repository at this point in the history
Update Ruff to v0.9.0

See merge request omniverse/warp!965
  • Loading branch information
shi-eric committed Jan 9, 2025
2 parents a1a2980 + 6f8b990 commit f57e9fd
Show file tree
Hide file tree
Showing 22 changed files with 59 additions and 59 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ ci:
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.8.0
rev: v0.9.0
hooks:
# Run the linter.
- id: ruff
Expand Down
2 changes: 1 addition & 1 deletion docs/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
sphinx==8.0.2
sphinx_copybutton==0.5.2
numpy==2.1.1
ruff==0.8.0
ruff==0.9.0
myst-parser==4.0.0
nvidia-sphinx-theme==0.0.5.post1
2 changes: 1 addition & 1 deletion exts/omni.warp/omni/warp/nodes/_impl/OgnMeshFromVolume.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def compute(db: OgnMeshFromVolumeDatabase) -> None:

if db.inputs.data.shape[0] != size:
raise RuntimeError(
"The length of the input array data doesn't match with " "the given size: `{} != {}`.".format(
"The length of the input array data doesn't match with the given size: `{} != {}`.".format(
db.inputs.data.shape[0], size
)
)
Expand Down
7 changes: 4 additions & 3 deletions exts/omni.warp/omni/warp/nodes/_impl/attributes.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def from_data_wrapper(
if shape is None:
if arr_size % element_size != 0:
raise RuntimeError(
"Cannot infer a size matching the Warp data type '{}' with " "an array size of '{}' bytes.".format(
"Cannot infer a size matching the Warp data type '{}' with an array size of '{}' bytes.".format(
dtype.__name__, arr_size
)
)
Expand Down Expand Up @@ -316,8 +316,9 @@ def from_attr_data(
if shape is None:
if arr_size % element_size != 0:
raise RuntimeError(
"Cannot infer a size matching the Warp data type '{}' with "
"an array size of '{}' bytes.".format(dtype.__name__, arr_size)
"Cannot infer a size matching the Warp data type '{}' with an array size of '{}' bytes.".format(
dtype.__name__, arr_size
)
)
shape = (arr_size // element_size,)

Expand Down
5 changes: 3 additions & 2 deletions exts/omni.warp/omni/warp/nodes/_impl/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,8 +529,9 @@ def initialize_kernel_module(
invalid_attrs = tuple(x.name for x in attr_infos[_ATTR_PORT_TYPE_OUTPUT] if not x.is_array and not x.is_bundle)
if invalid_attrs:
raise RuntimeError(
"Output attributes are required to be arrays or bundles but "
"the following attributes are not: {}.".format(", ".join(invalid_attrs))
"Output attributes are required to be arrays or bundles but the following attributes are not: {}.".format(
", ".join(invalid_attrs)
)
)

# Retrieve the kernel code to evaluate.
Expand Down
6 changes: 3 additions & 3 deletions warp/codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,9 +282,9 @@ def __setattr__(self, name, value):
else:
# wp.array
assert isinstance(value, array)
assert types_equal(
value.dtype, var.type.dtype
), f"assign to struct member variable {name} failed, expected type {type_repr(var.type.dtype)}, got type {type_repr(value.dtype)}"
assert types_equal(value.dtype, var.type.dtype), (
f"assign to struct member variable {name} failed, expected type {type_repr(var.type.dtype)}, got type {type_repr(value.dtype)}"
)
setattr(self._ctype, name, value.__ctype__())

elif isinstance(var.type, Struct):
Expand Down
6 changes: 3 additions & 3 deletions warp/examples/benchmarks/benchmark_interop_paddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_from_paddle(kernel, num_iters, array_size, device, warp_dtype=None):
wp.launch(kernel, dim=array_size, inputs=[a, b, c, d, e])

t2 = time.time_ns()
print(f"{(t2 - t1) / 1_000_000 :8.0f} ms from_paddle(...)")
print(f"{(t2 - t1) / 1_000_000:8.0f} ms from_paddle(...)")

# profiler.stop()
# profiler.print()
Expand Down Expand Up @@ -97,7 +97,7 @@ def test_array_ctype_from_paddle(kernel, num_iters, array_size, device, warp_dty
wp.launch(kernel, dim=array_size, inputs=[a, b, c, d, e])

t2 = time.time_ns()
print(f"{(t2 - t1) / 1_000_000 :8.0f} ms from_paddle(..., return_ctype=True)")
print(f"{(t2 - t1) / 1_000_000:8.0f} ms from_paddle(..., return_ctype=True)")

# profiler.stop()
# profiler.print()
Expand Down Expand Up @@ -131,7 +131,7 @@ def test_direct_from_paddle(kernel, num_iters, array_size, device, warp_dtype=No
wp.launch(kernel, dim=array_size, inputs=[_a, _b, _c, _d, _e])

t2 = time.time_ns()
print(f"{(t2 - t1) / 1_000_000 :8.0f} ms direct from paddle")
print(f"{(t2 - t1) / 1_000_000:8.0f} ms direct from paddle")

# profiler.stop()
# profiler.print()
Expand Down
6 changes: 3 additions & 3 deletions warp/examples/benchmarks/benchmark_interop_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_from_torch(kernel, num_iters, array_size, device, warp_dtype=None):
wp.launch(kernel, dim=array_size, inputs=[a, b, c, d, e])

t2 = time.time_ns()
print(f"{(t2 - t1) / 1_000_000 :8.0f} ms from_torch(...)")
print(f"{(t2 - t1) / 1_000_000:8.0f} ms from_torch(...)")

# profiler.stop()
# profiler.print()
Expand Down Expand Up @@ -97,7 +97,7 @@ def test_array_ctype_from_torch(kernel, num_iters, array_size, device, warp_dtyp
wp.launch(kernel, dim=array_size, inputs=[a, b, c, d, e])

t2 = time.time_ns()
print(f"{(t2 - t1) / 1_000_000 :8.0f} ms from_torch(..., return_ctype=True)")
print(f"{(t2 - t1) / 1_000_000:8.0f} ms from_torch(..., return_ctype=True)")

# profiler.stop()
# profiler.print()
Expand Down Expand Up @@ -131,7 +131,7 @@ def test_direct_from_torch(kernel, num_iters, array_size, device, warp_dtype=Non
wp.launch(kernel, dim=array_size, inputs=[_a, _b, _c, _d, _e])

t2 = time.time_ns()
print(f"{(t2 - t1) / 1_000_000 :8.0f} ms direct from torch")
print(f"{(t2 - t1) / 1_000_000:8.0f} ms direct from torch")

# profiler.stop()
# profiler.print()
Expand Down
2 changes: 1 addition & 1 deletion warp/examples/fem/example_mixed_elasticity.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def step(self):
final_area = fem.integrate(
area_form, quadrature=fem.RegularQuadrature(domain, order=4), fields={"u_cur": self._u_field}
)
print(f"Area gain: {final_area} (using Poisson ratio={self._lame[0] / (self._lame[0] + 2.0*self._lame[1])})")
print(f"Area gain: {final_area} (using Poisson ratio={self._lame[0] / (self._lame[0] + 2.0 * self._lame[1])})")

def render(self):
self.renderer.add_field("solution", self._u_field)
Expand Down
2 changes: 1 addition & 1 deletion warp/examples/optim/example_bounce.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def render(self):
vertices=traj_verts,
color=wp.render.bourke_color_map(0.0, 7.0, self.loss.numpy()[0]),
radius=0.02,
name=f"traj_{self.iter-1}",
name=f"traj_{self.iter - 1}",
)
self.renderer.end_frame()

Expand Down
2 changes: 1 addition & 1 deletion warp/examples/optim/example_cloth_throw.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def render(self):
vertices=traj_verts,
color=wp.render.bourke_color_map(0.0, 269.0, self.loss.numpy()[0]),
radius=0.02,
name=f"traj_{self.iter-1}",
name=f"traj_{self.iter - 1}",
)
self.renderer.end_frame()

Expand Down
2 changes: 1 addition & 1 deletion warp/examples/optim/example_drone.py
Original file line number Diff line number Diff line change
Expand Up @@ -702,7 +702,7 @@ def step_optimizer(self):
def step(self):
if self.frame % int((self.num_frames / len(self.targets))) == 0:
if self.verbose:
print(f"Choosing new flight target: {self.target_idx+1}")
print(f"Choosing new flight target: {self.target_idx + 1}")

self.target_idx += 1
self.target_idx %= len(self.targets)
Expand Down
2 changes: 1 addition & 1 deletion warp/examples/optim/example_softbody_properties.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def render(self):
vertices=traj_verts,
color=wp.render.bourke_color_map(0.0, self.losses[0], self.losses[-1]),
radius=0.02,
name=f"traj_{self.iter-1}",
name=f"traj_{self.iter - 1}",
)
self.renderer.end_frame()

Expand Down
2 changes: 1 addition & 1 deletion warp/examples/optim/example_trajectory.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def step(self):
tape.backward(loss=self.loss)

if self.verbose and (self.iter + 1) % 10 == 0:
print(f"Iter {self.iter+1} Loss: {self.loss.numpy()[0]:.3f}")
print(f"Iter {self.iter + 1} Loss: {self.loss.numpy()[0]:.3f}")

assert not np.isnan(self.actions.grad.numpy()).any(), "NaN in gradient"

Expand Down
41 changes: 20 additions & 21 deletions warp/render/render_opengl.py
Original file line number Diff line number Diff line change
Expand Up @@ -1553,9 +1553,9 @@ def setup_tiled_rendering(
if rescale_window:
self.window.set_size(self._tile_width * self._tile_ncols, self._tile_height * self._tile_nrows)
else:
assert (
len(tile_positions) == n and len(tile_sizes) == n
), "Number of tiles does not match number of instances."
assert len(tile_positions) == n and len(tile_sizes) == n, (
"Number of tiles does not match number of instances."
)
self._tile_ncols = None
self._tile_nrows = None
self._tile_width = None
Expand Down Expand Up @@ -2557,24 +2557,23 @@ def get_pixels(self, target_image: wp.array, split_up_tiles=True, mode="rgb", us
channels = 3 if mode == "rgb" else 1

if split_up_tiles:
assert (
self._tile_width is not None and self._tile_height is not None
), "Tile width and height are not set, tiles must all have the same size"
assert all(
vp[2] == self._tile_width for vp in self._tile_viewports
), "Tile widths do not all equal global tile_width, use `get_tile_pixels` instead to retrieve pixels for a single tile"
assert all(
vp[3] == self._tile_height for vp in self._tile_viewports
), "Tile heights do not all equal global tile_height, use `get_tile_pixels` instead to retrieve pixels for a single tile"
assert (
target_image.shape
== (
self.num_tiles,
self._tile_height,
self._tile_width,
channels,
)
), f"Shape of `target_image` array does not match {self.num_tiles} x {self._tile_height} x {self._tile_width} x {channels}"
assert self._tile_width is not None and self._tile_height is not None, (
"Tile width and height are not set, tiles must all have the same size"
)
assert all(vp[2] == self._tile_width for vp in self._tile_viewports), (
"Tile widths do not all equal global tile_width, use `get_tile_pixels` instead to retrieve pixels for a single tile"
)
assert all(vp[3] == self._tile_height for vp in self._tile_viewports), (
"Tile heights do not all equal global tile_height, use `get_tile_pixels` instead to retrieve pixels for a single tile"
)
assert target_image.shape == (
self.num_tiles,
self._tile_height,
self._tile_width,
channels,
), (
f"Shape of `target_image` array does not match {self.num_tiles} x {self._tile_height} x {self._tile_width} x {channels}"
)
else:
assert target_image.shape == (
self.screen_height,
Expand Down
2 changes: 1 addition & 1 deletion warp/sim/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2523,7 +2523,7 @@ def dfs(parent_body: int, child_body: int, incoming_xform: wp.transform, last_dy
last_dynamic_body_name = self.body_name[last_dynamic_body] if last_dynamic_body > -1 else "world"
if verbose:
print(
f'Remove fixed joint {joint["name"]} between {parent_name} and {child_name}, '
f"Remove fixed joint {joint['name']} between {parent_name} and {child_name}, "
f"merging {child_name} into {last_dynamic_body_name}"
)
child_id = body_data[child_body]["original_id"]
Expand Down
2 changes: 1 addition & 1 deletion warp/tape.py
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ def emit_kernel_launch_node(
node_attrs = f"label=<{label}>"
if "caller" in launch_data:
caller = launch_data["caller"]
node_attrs += f",tooltip=\"{self.sanitize(caller['file'])}:{caller['lineno']} ({caller['func']})\""
node_attrs += f',tooltip="{self.sanitize(caller["file"])}:{caller["lineno"]} ({caller["func"]})"'

self.graphviz_lines.append(f"{chart_indent}{kernel_launch_id} [{node_attrs}];")

Expand Down
2 changes: 1 addition & 1 deletion warp/tests/test_hash_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def test_hashgrid_query(test, device):

for i in range(num_runs):
if print_enabled:
print(f"Run: {i+1}")
print(f"Run: {i + 1}")
print("---------")

points = particle_grid(16, 32, 16, (0.0, 0.3, 0.0), cell_radius * 0.25, 0.1)
Expand Down
4 changes: 2 additions & 2 deletions warp/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def test_warn(self):
wp.utils.warn("hello, world!")
wp.utils.warn("hello, world!")

expected = "Warp UserWarning: hello, world!\n" "Warp UserWarning: hello, world!\n"
expected = "Warp UserWarning: hello, world!\nWarp UserWarning: hello, world!\n"

self.assertEqual(f.getvalue(), expected)

Expand Down Expand Up @@ -320,7 +320,7 @@ def test_warn(self):
wp.utils.warn("foo", category=DeprecationWarning)
wp.utils.warn("bar", category=DeprecationWarning)

expected = "Warp DeprecationWarning: foo\n" "Warp DeprecationWarning: bar\n"
expected = "Warp DeprecationWarning: foo\nWarp DeprecationWarning: bar\n"

self.assertEqual(f.getvalue(), expected)

Expand Down
4 changes: 2 additions & 2 deletions warp/thirdparty/unittest_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,9 +306,9 @@ def main(argv=None):

# Test report
print(unittest.TextTestResult.separator2, file=sys.stderr)
print(f'Ran {tests_run} {"tests" if tests_run > 1 else "test"} in {test_duration:.3f}s', file=sys.stderr)
print(f"Ran {tests_run} {'tests' if tests_run > 1 else 'test'} in {test_duration:.3f}s", file=sys.stderr)
print(file=sys.stderr)
print(f'{"OK" if is_success else "FAILED"}{" (" + ", ".join(infos) + ")" if infos else ""}', file=sys.stderr)
print(f"{'OK' if is_success else 'FAILED'}{' (' + ', '.join(infos) + ')' if infos else ''}", file=sys.stderr)

if test_records and args.junit_report_xml:
# NVIDIA modification to report results in Junit XML format
Expand Down
5 changes: 2 additions & 3 deletions warp/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,8 +171,7 @@ def __setitem__(self, key, value):
iter(value)
except TypeError:
raise TypeError(
f"Expected to assign a slice from a sequence of values "
f"but got `{type(value).__name__}` instead"
f"Expected to assign a slice from a sequence of values but got `{type(value).__name__}` instead"
) from None

if self._wp_scalar_type_ == float16:
Expand Down Expand Up @@ -419,7 +418,7 @@ def set_row(self, r, v):
iter(v)
except TypeError:
raise TypeError(
f"Expected to assign a slice from a sequence of values " f"but got `{type(v).__name__}` instead"
f"Expected to assign a slice from a sequence of values but got `{type(v).__name__}` instead"
) from None

row_start = r * self._shape_[1]
Expand Down
10 changes: 5 additions & 5 deletions warp/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -782,9 +782,9 @@ def __exit__(self, exc_type, exc_value, traceback):
print()

if self.extra_msg:
print(f"{indent}{self.name} took {self.elapsed :.2f} ms {self.extra_msg}")
print(f"{indent}{self.name} took {self.elapsed:.2f} ms {self.extra_msg}")
else:
print(f"{indent}{self.name} took {self.elapsed :.2f} ms")
print(f"{indent}{self.name} took {self.elapsed:.2f} ms")

ScopedTimer.indent -= 1

Expand Down Expand Up @@ -1045,20 +1045,20 @@ def __init__(self, count=0, elapsed=0):
activity_agg.count += 1
activity_agg.elapsed += r.elapsed

print(f"{indent}{r.elapsed :12.6f} ms | {r.device.alias :7s} | {r.name}")
print(f"{indent}{r.elapsed:12.6f} ms | {r.device.alias:7s} | {r.name}")

print()
print(f"{indent}CUDA activity summary:")
print(f"{indent}----------------+---------+{activity_dashes}")
print(f"{indent}Total time | Count | Activity")
print(f"{indent}----------------+---------+{activity_dashes}")
for name, agg in activity_totals.items():
print(f"{indent}{agg.elapsed :12.6f} ms | {agg.count :7d} | {name}")
print(f"{indent}{agg.elapsed:12.6f} ms | {agg.count:7d} | {name}")

print()
print(f"{indent}CUDA device summary:")
print(f"{indent}----------------+---------+{activity_dashes}")
print(f"{indent}Total time | Count | Device")
print(f"{indent}----------------+---------+{activity_dashes}")
for device, agg in device_totals.items():
print(f"{indent}{agg.elapsed :12.6f} ms | {agg.count :7d} | {device}")
print(f"{indent}{agg.elapsed:12.6f} ms | {agg.count:7d} | {device}")

0 comments on commit f57e9fd

Please sign in to comment.