diff --git a/.github/workflows/mirror-buildspec.yml b/.github/workflows/mirror-buildspec.yml new file mode 100644 index 0000000000..a537f5bfd9 --- /dev/null +++ b/.github/workflows/mirror-buildspec.yml @@ -0,0 +1,15 @@ +version: 0.2 + +phases: + install: + runtime-versions: + nodejs: 14 + pre_build: + commands: + - git config --global user.name "Isaac LAB CI Bot" + - git config --global user.email "isaac-lab-ci-bot@nvidia.com" + build: + commands: + - git remote set-url origin https://github.com/${TARGET_REPO}.git + - git checkout $SOURCE_BRANCH + - git push --force https://$GITHUB_TOKEN@github.com/${TARGET_REPO}.git $SOURCE_BRANCH:$TARGET_BRANCH diff --git a/.github/workflows/postmerge-ci-buildspec.yml b/.github/workflows/postmerge-ci-buildspec.yml index 94201516bf..e071d7db09 100644 --- a/.github/workflows/postmerge-ci-buildspec.yml +++ b/.github/workflows/postmerge-ci-buildspec.yml @@ -3,10 +3,42 @@ version: 0.2 phases: build: commands: - - echo "Building a docker image" - - docker login -u \$oauthtoken -p $NGC_TOKEN nvcr.io - - docker build -t $IMAGE_NAME:latest-1.2 --build-arg ISAACSIM_VERSION_ARG=4.2.0 --build-arg ISAACSIM_ROOT_PATH_ARG=/isaac-sim --build-arg ISAACLAB_PATH_ARG=/workspace/isaaclab --build-arg DOCKER_USER_HOME_ARG=/root -f docker/Dockerfile.base . - - echo "Pushing the docker image" - - docker push $IMAGE_NAME:latest-1.2 - - docker tag $IMAGE_NAME:latest-1.2 $IMAGE_NAME:latest-1.2-b$CODEBUILD_BUILD_NUMBER - - docker push $IMAGE_NAME:latest-1.2-b$CODEBUILD_BUILD_NUMBER + - echo "Building and pushing Docker image" + - | + # Determine branch name or use fallback + if [ -n "$CODEBUILD_WEBHOOK_HEAD_REF" ]; then + BRANCH_NAME=$(echo $CODEBUILD_WEBHOOK_HEAD_REF | sed 's/refs\/heads\///') + elif [ -n "$CODEBUILD_SOURCE_VERSION" ]; then + BRANCH_NAME=$CODEBUILD_SOURCE_VERSION + else + BRANCH_NAME="unknown" + fi + + # Replace '/' with '-' and remove any invalid characters for Docker tag + SAFE_BRANCH_NAME=$(echo $BRANCH_NAME | sed 's/[^a-zA-Z0-9._-]/-/g') + + # Use "latest" if branch name is empty or only contains invalid characters + if [ -z "$SAFE_BRANCH_NAME" ]; then + SAFE_BRANCH_NAME="latest" + fi + + # Get the git repository short name + REPO_SHORT_NAME=$(basename -s .git `git config --get remote.origin.url`) + if [ -z "$REPO_SHORT_NAME" ]; then + REPO_SHORT_NAME="verification" + fi + + # Combine repo short name and branch name for the tag + COMBINED_TAG="${REPO_SHORT_NAME}-${SAFE_BRANCH_NAME}" + + docker login -u \$oauthtoken -p $NGC_TOKEN nvcr.io + docker build -t $IMAGE_NAME:$COMBINED_TAG \ + --build-arg ISAACSIM_BASE_IMAGE_ARG=$ISAACSIM_BASE_IMAGE \ + --build-arg ISAACSIM_VERSION_ARG=4.2.0 \ + --build-arg ISAACSIM_ROOT_PATH_ARG=/isaac-sim \ + --build-arg ISAACLAB_PATH_ARG=/workspace/isaaclab \ + --build-arg DOCKER_USER_HOME_ARG=/root \ + -f docker/Dockerfile.base . + docker push $IMAGE_NAME:$COMBINED_TAG + docker tag $IMAGE_NAME:$COMBINED_TAG $IMAGE_NAME:$COMBINED_TAG-b$CODEBUILD_BUILD_NUMBER + docker push $IMAGE_NAME:$COMBINED_TAG-b$CODEBUILD_BUILD_NUMBER diff --git a/.github/workflows/premerge-ci-buildspec.yml b/.github/workflows/premerge-ci-buildspec.yml index bed8a5cf2e..4fa2372b4e 100644 --- a/.github/workflows/premerge-ci-buildspec.yml +++ b/.github/workflows/premerge-ci-buildspec.yml @@ -4,15 +4,39 @@ phases: pre_build: commands: - echo "Launching EC2 instance to run tests" - - INSTANCE_ID=$(aws ec2 run-instances --image-id ami-0f7f7fb14ee15d5ae --count 1 --instance-type g5.2xlarge --key-name production/ssh/isaaclab --security-group-ids sg-02617e4b8916794c4 --subnet-id subnet-0907ceaeb40fd9eac --block-device-mappings 'DeviceName=/dev/sda1,Ebs={VolumeSize=500}' --output text --query 'Instances[0].InstanceId') + - | + INSTANCE_ID=$(aws ec2 run-instances \ + --image-id ami-0b3a9d48380433e49 \ + --count 1 \ + --instance-type g5.2xlarge \ + --key-name production/ssh/isaaclab \ + --security-group-ids sg-02617e4b8916794c4 \ + --subnet-id subnet-0907ceaeb40fd9eac \ + --block-device-mappings '[{"DeviceName":"/dev/sda1","Ebs":{"VolumeSize":500}}]' \ + --output text \ + --query 'Instances[0].InstanceId') - aws ec2 wait instance-running --instance-ids $INSTANCE_ID - - EC2_INSTANCE_IP=$(aws ec2 describe-instances --filters "Name=instance-state-name,Values=running" "Name=instance-id,Values=$INSTANCE_ID" --query 'Reservations[*].Instances[*].[PrivateIpAddress]' --output text) + - | + EC2_INSTANCE_IP=$(aws ec2 describe-instances \ + --filters "Name=instance-state-name,Values=running" "Name=instance-id,Values=$INSTANCE_ID" \ + --query 'Reservations[*].Instances[*].[PrivateIpAddress]' \ + --output text) - mkdir -p ~/.ssh - - aws ec2 describe-key-pairs --include-public-key --key-name production/ssh/isaaclab --query 'KeyPairs[0].PublicKey' --output text > ~/.ssh/id_rsa.pub - - aws secretsmanager get-secret-value --secret-id production/ssh/isaaclab --query SecretString --output text > ~/.ssh/id_rsa + - | + aws ec2 describe-key-pairs --include-public-key --key-name production/ssh/isaaclab \ + --query 'KeyPairs[0].PublicKey' --output text > ~/.ssh/id_rsa.pub + - | + aws secretsmanager get-secret-value --secret-id production/ssh/isaaclab \ + --query SecretString --output text > ~/.ssh/id_rsa - chmod 400 ~/.ssh/id_* - echo "Host $EC2_INSTANCE_IP\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config - - aws ec2-instance-connect send-ssh-public-key --instance-id $INSTANCE_ID --availability-zone us-west-2a --ssh-public-key file://~/.ssh/id_rsa.pub --instance-os-user ubuntu + - | + aws ec2-instance-connect send-ssh-public-key \ + --instance-id $INSTANCE_ID \ + --availability-zone us-west-2a \ + --ssh-public-key file://~/.ssh/id_rsa.pub \ + --instance-os-user ubuntu + build: commands: - echo "Running tests on EC2 instance" @@ -40,10 +64,20 @@ phases: retry_scp ' - ssh ubuntu@$EC2_INSTANCE_IP "docker login -u \\\$oauthtoken -p $NGC_TOKEN nvcr.io" - - ssh ubuntu@$EC2_INSTANCE_IP "cd $SRC_DIR; - DOCKER_BUILDKIT=1 docker build -t isaac-lab-dev --build-arg ISAACSIM_VERSION_ARG=4.2.0 --build-arg ISAACSIM_ROOT_PATH_ARG=/isaac-sim --build-arg ISAACLAB_PATH_ARG=/workspace/isaaclab --build-arg DOCKER_USER_HOME_ARG=/root -f docker/Dockerfile.base . ; - docker run --rm --entrypoint bash --gpus all --network=host --name isaac-lab-test isaac-lab-dev ./isaaclab.sh -t && - exit $?" + - | + ssh ubuntu@$EC2_INSTANCE_IP " + cd $SRC_DIR + DOCKER_BUILDKIT=1 docker build -t isaac-lab-dev \ + --build-arg ISAACSIM_BASE_IMAGE_ARG=$ISAACSIM_BASE_IMAGE \ + --build-arg ISAACSIM_VERSION_ARG=4.2.0 \ + --build-arg ISAACSIM_ROOT_PATH_ARG=/isaac-sim \ + --build-arg ISAACLAB_PATH_ARG=/workspace/isaaclab \ + --build-arg DOCKER_USER_HOME_ARG=/root \ + -f docker/Dockerfile.base . + docker run --rm --entrypoint bash --gpus all --network=host \ + --name isaac-lab-test isaac-lab-dev ./isaaclab.sh -t && exit \$? + " + post_build: commands: - echo "Terminating EC2 instance" diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 6061385a94..26503d4e06 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -49,6 +49,7 @@ Guidelines for modifications: * Johnson Sun * Kaixi Bao * Kourosh Darvish +* Lionel Gulich * Lorenz Wellhausen * Masoud Moghani * Michael Gussert diff --git a/docker/.env.base b/docker/.env.base index cb5de785b7..0ec3332df3 100644 --- a/docker/.env.base +++ b/docker/.env.base @@ -4,6 +4,8 @@ # Accept the NVIDIA Omniverse EULA by default ACCEPT_EULA=Y +# NVIDIA Isaac Sim base image +ISAACSIM_BASE_IMAGE=nvcr.io/nvidia/isaac-sim # NVIDIA Isaac Sim version to use (e.g. 4.2.0) ISAACSIM_VERSION=4.2.0 # Derived from the default path in the NVIDIA provided Isaac Sim container diff --git a/docker/Dockerfile.base b/docker/Dockerfile.base index 8a6c7250b4..7bdb040332 100644 --- a/docker/Dockerfile.base +++ b/docker/Dockerfile.base @@ -7,8 +7,9 @@ # Please check above link for license information. # Base image +ARG ISAACSIM_BASE_IMAGE_ARG ARG ISAACSIM_VERSION_ARG -FROM nvcr.io/nvidia/isaac-sim:${ISAACSIM_VERSION_ARG} AS base +FROM ${ISAACSIM_BASE_IMAGE_ARG}:${ISAACSIM_VERSION_ARG} AS base ENV ISAACSIM_VERSION=${ISAACSIM_VERSION_ARG} # Set default RUN shell to bash diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index baf531bdfd..5e694040b2 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -79,6 +79,7 @@ services: context: ../ dockerfile: docker/Dockerfile.base args: + - ISAACSIM_BASE_IMAGE_ARG=${ISAACSIM_BASE_IMAGE} - ISAACSIM_VERSION_ARG=${ISAACSIM_VERSION} - ISAACSIM_ROOT_PATH_ARG=${DOCKER_ISAACSIM_ROOT_PATH} - ISAACLAB_PATH_ARG=${DOCKER_ISAACLAB_PATH} diff --git a/docs/source/_static/demos/multi_asset.jpg b/docs/source/_static/demos/multi_asset.jpg new file mode 100644 index 0000000000..c7124d20e6 Binary files /dev/null and b/docs/source/_static/demos/multi_asset.jpg differ diff --git a/docs/source/api/lab/omni.isaac.lab.sim.spawners.rst b/docs/source/api/lab/omni.isaac.lab.sim.spawners.rst index 6e3ca9aa9e..a1c073d4c2 100644 --- a/docs/source/api/lab/omni.isaac.lab.sim.spawners.rst +++ b/docs/source/api/lab/omni.isaac.lab.sim.spawners.rst @@ -13,6 +13,7 @@ sensors from_files materials + wrappers .. rubric:: Classes @@ -302,3 +303,27 @@ Physical Materials .. autoclass:: DeformableBodyMaterialCfg :members: :exclude-members: __init__, func + +Wrappers +-------- + +.. automodule:: omni.isaac.lab.sim.spawners.wrappers + + .. rubric:: Classes + + .. autosummary:: + + MultiAssetSpawnerCfg + MultiUsdFileCfg + +.. autofunction:: spawn_multi_asset + +.. autoclass:: MultiAssetSpawnerCfg + :members: + :exclude-members: __init__, func + +.. autofunction:: spawn_multi_usd_file + +.. autoclass:: MultiUsdFileCfg + :members: + :exclude-members: __init__, func diff --git a/docs/source/how-to/estimate_how_many_cameras_can_run.rst b/docs/source/how-to/estimate_how_many_cameras_can_run.rst index 6835f0a422..41238dcbe6 100644 --- a/docs/source/how-to/estimate_how_many_cameras_can_run.rst +++ b/docs/source/how-to/estimate_how_many_cameras_can_run.rst @@ -23,13 +23,13 @@ numbers of cameras that can run in your task environment up to a certain specified system resource utilization threshold (without training; taking zero actions at each timestep). -This guide accompanies the ``benchmark_cameras.py`` script in the ``IsaacLab/source/standalone/tutorials/04_sensors`` +This guide accompanies the ``benchmark_cameras.py`` script in the ``source/standalone/benchmarks`` directory. .. dropdown:: Code for benchmark_cameras.py :icon: code - .. literalinclude:: ../../../source/standalone/tutorials/04_sensors/benchmark_cameras.py + .. literalinclude:: ../../../source/standalone/benchmarks/benchmark_cameras.py :language: python :linenos: @@ -41,7 +41,7 @@ First, run .. code-block:: bash - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/benchmark_cameras.py -h + ./isaaclab.sh -p source/standalone/benchmarks/benchmark_cameras.py -h to see all possible parameters you can vary with this utility. @@ -61,7 +61,7 @@ only in RGB mode, run .. code-block:: bash - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/benchmark_cameras.py \ + ./isaaclab.sh -p source/standalone/benchmarks/benchmark_cameras.py \ --task Isaac-Cartpole-v0 --num_tiled_cameras 100 \ --task_num_cameras_per_env 2 \ --tiled_camera_data_types rgb @@ -74,7 +74,7 @@ you can run with cartpole, you could run: .. code-block:: bash - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/benchmark_cameras.py \ + ./isaaclab.sh -p source/standalone/benchmarks/benchmark_cameras.py \ --task Isaac-Cartpole-v0 --num_tiled_cameras 100 \ --task_num_cameras_per_env 2 \ --tiled_camera_data_types rgb --autotune \ @@ -97,7 +97,7 @@ For example, to view 100 random objects with 2 standard cameras, one could run .. code-block:: bash - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/benchmark_cameras.py \ + ./isaaclab.sh -p source/standalone/benchmarks/benchmark_cameras.py \ --height 100 --width 100 --num_standard_cameras 2 \ --standard_camera_data_types instance_segmentation_fast normals --num_objects 100 \ --experiment_length 100 @@ -118,4 +118,4 @@ If your system has a hard time handling the desired cameras, you can try the fol - Decrease the number of objects in the scene If your system is able to handle the amount of cameras, then the time statistics will be printed to the terminal. -After the simulations stops it can be closed with CTRL C. +After the simulations stops it can be closed with CTRL+C. diff --git a/docs/source/how-to/index.rst b/docs/source/how-to/index.rst index 893b3a69b3..4b5c426d82 100644 --- a/docs/source/how-to/index.rst +++ b/docs/source/how-to/index.rst @@ -36,6 +36,17 @@ a fixed base robot. This guide goes over the various considerations and steps to make_fixed_prim +Spawning Multiple Assets +------------------------ + +This guide explains how to import and configure different assets in each environment. This is +useful when you want to create diverse environments with different objects. + +.. toctree:: + :maxdepth: 1 + + multi_asset_spawning + Saving Camera Output -------------------- diff --git a/docs/source/how-to/multi_asset_spawning.rst b/docs/source/how-to/multi_asset_spawning.rst new file mode 100644 index 0000000000..9f74e39f6b --- /dev/null +++ b/docs/source/how-to/multi_asset_spawning.rst @@ -0,0 +1,101 @@ +Spawning Multiple Assets +======================== + +.. currentmodule:: omni.isaac.lab + +Typical, spawning configurations (introduced in the :ref:`tutorial-spawn-prims` tutorial) copy the same +asset (or USD primitive) across the different resolved prim paths from the expressions. +For instance, if the user specifies to spawn the asset at "/World/Table\_.*/Object", the same +asset is created at the paths "/World/Table_0/Object", "/World/Table_1/Object" and so on. + +However, at times, it might be desirable to spawn different assets under the prim paths to +ensure a diversity in the simulation. This guide describes how to create different assets under +each prim path using the spawning functionality. + +The sample script ``multi_asset.py`` is used as a reference, located in the +``IsaacLab/source/standalone/demos`` directory. + +.. dropdown:: Code for multi_asset.py + :icon: code + + .. literalinclude:: ../../../source/standalone/demos/multi_asset.py + :language: python + :emphasize-lines: 101-123, 130-149 + :linenos: + +This script creates multiple environments, where each environment has a rigid object that is either a cone, +a cube, or a sphere, and an articulation that is either the ANYmal-C or ANYmal-D robot. + +.. image:: ../_static/demos/multi_asset.jpg + :width: 100% + :alt: result of multi_asset.py + +Using Multi-Asset Spawning Functions +------------------------------------ + +It is possible to spawn different assets and USDs in each environment using the spawners +:class:`~sim.spawners.wrappers.MultiAssetSpawnerCfg` and :class:`~sim.spawners.wrappers.MultiUsdFileCfg`: + +* We set the spawn configuration in :class:`~assets.RigidObjectCfg` to be + :class:`~sim.spawners.wrappers.MultiAssetSpawnerCfg`: + + .. literalinclude:: ../../../source/standalone/demos/multi_asset.py + :language: python + :lines: 99-125 + :dedent: + + This function allows you to define a list of different assets that can be spawned as rigid objects. + When :attr:`~sim.spawners.wrappers.MultiAssetSpawnerCfg.random_choice` is set to True, one asset from the list + is randomly selected and spawned at the specified prim path. + +* Similarly, we set the spawn configuration in :class:`~assets.ArticulationCfg` to be + :class:`~sim.spawners.wrappers.MultiUsdFileCfg`: + + .. literalinclude:: ../../../source/standalone/demos/multi_asset.py + :language: python + :lines: 128-161 + :dedent: + + Similar to before, this configuration allows the selection of different USD files representing articulated assets. + + +Things to Note +-------------- + +Similar asset structuring +~~~~~~~~~~~~~~~~~~~~~~~~~ + +While spawning and handling multiple assets using the same physics interface (the rigid object or articulation classes), +it is essential to have the assets at all the prim locations follow a similar structure. In case of an articulation, +this means that they all must have the same number of links and joints, the same number of collision bodies and +the same names for them. If that is not the case, the physics parsing of the prims can get affected and fail. + +The main purpose of this functionality is to enable the user to create randomized versions of the same asset, +for example robots with different link lengths, or rigid objects with different collider shapes. + +Disabling physics replication in interactive scene +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, the flag :attr:`scene.InteractiveScene.replicate_physics` is set to True. This flag informs the physics +engine that the simulation environments are copies of one another so it just needs to parse the first environment +to understand the entire simulation scene. This helps speed up the simulation scene parsing. + +However, in the case of spawning different assets in different environments, this assumption does not hold +anymore. Hence the flag :attr:`scene.InteractiveScene.replicate_physics` must be disabled. + +.. literalinclude:: ../../../source/standalone/demos/multi_asset.py + :language: python + :lines: 221-224 + :dedent: + +The Code Execution +------------------ + +To execute the script with multiple environments and randomized assets, use the following command: + +.. code-block:: bash + + ./isaaclab.sh -p source/standalone/demos/multi_asset.py --num_envs 2048 + +This command runs the simulation with 2048 environments, each with randomly selected assets. +To stop the simulation, you can close the window, or press ``Ctrl+C`` in the terminal. diff --git a/docs/source/how-to/save_camera_output.rst b/docs/source/how-to/save_camera_output.rst index 21a3927126..bbc7327b22 100644 --- a/docs/source/how-to/save_camera_output.rst +++ b/docs/source/how-to/save_camera_output.rst @@ -89,7 +89,7 @@ To run the accompanying script, execute the following command: .. code-block:: bash # Usage with saving and drawing - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --draw + ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --draw --enable_cameras # Usage with saving only in headless mode ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --save --headless --enable_cameras diff --git a/docs/source/overview/environments.rst b/docs/source/overview/environments.rst index f88c121016..f42f3c34a5 100644 --- a/docs/source/overview/environments.rst +++ b/docs/source/overview/environments.rst @@ -53,13 +53,13 @@ Classic environments that are based on IsaacGymEnvs implementation of MuJoCo-sty | | | | | | |cartpole-direct-link| | | +------------------+-----------------------------+-------------------------------------------------------------------------+ - | |cartpole| | |manager-camera-rgb-link| | Move the cart to keep the pole upwards in the classic cartpole control | + | |cartpole| | |cartpole-rgb-link| | Move the cart to keep the pole upwards in the classic cartpole control | | | | and perceptive inputs | - | | |manager-camera-dpt-link| | | + | | |cartpole-depth-link| | | | | | | - | | |cartpole-camera-rgb-link| | | + | | |cartpole-rgb-direct-link| | | | | | | - | | |cartpole-camera-dpt-link| | | + | | |cartpole-depth-direct-link|| | +------------------+-----------------------------+-------------------------------------------------------------------------+ .. |humanoid| image:: ../_static/tasks/classic/humanoid.jpg @@ -69,16 +69,14 @@ Classic environments that are based on IsaacGymEnvs implementation of MuJoCo-sty .. |humanoid-link| replace:: `Isaac-Humanoid-v0 `__ .. |ant-link| replace:: `Isaac-Ant-v0 `__ .. |cartpole-link| replace:: `Isaac-Cartpole-v0 `__ +.. |cartpole-rgb-link| replace:: `Isaac-Cartpole-RGB-Camera-v0 `__ +.. |cartpole-depth-link| replace:: `Isaac-Cartpole-Depth-Camera-v0 `__ .. |humanoid-direct-link| replace:: `Isaac-Humanoid-Direct-v0 `__ .. |ant-direct-link| replace:: `Isaac-Ant-Direct-v0 `__ -.. |manager-camera-rgb-link| replace:: `Isaac-Cartpole-RGB-v0 `__ -.. |manager-camera-dpt-link| replace:: `Isaac-Cartpole-Depth-v0 `__ .. |cartpole-direct-link| replace:: `Isaac-Cartpole-Direct-v0 `__ -.. |manager-camera-rgb-link| replace:: `Isaac-Cartpole-RGB-v0 `__ -.. |manager-camera-dpt-link| replace:: `Isaac-Cartpole-Depth-v0 `__ -.. |cartpole-camera-rgb-link| replace:: `Isaac-Cartpole-RGB-Camera-Direct-v0 `__ -.. |cartpole-camera-dpt-link| replace:: `Isaac-Cartpole-Depth-Camera-Direct-v0 `__ +.. |cartpole-rgb-direct-link| replace:: `Isaac-Cartpole-RGB-Camera-Direct-v0 `__ +.. |cartpole-depth-direct-link| replace:: `Isaac-Cartpole-Depth-Camera-Direct-v0 `__ Manipulation ~~~~~~~~~~~~ diff --git a/docs/source/overview/showroom.rst b/docs/source/overview/showroom.rst index d8ff7a933d..d3d86fd777 100644 --- a/docs/source/overview/showroom.rst +++ b/docs/source/overview/showroom.rst @@ -77,7 +77,7 @@ A few quick showroom scripts to run and checkout: :width: 100% :alt: Dexterous hands in Isaac Lab -- Spawn procedurally generated terrains with different configurations: +- Spawn different deformable (soft) bodies and let them fall from a height: .. tab-set:: :sync-group: os @@ -87,20 +87,20 @@ A few quick showroom scripts to run and checkout: .. code:: bash - ./isaaclab.sh -p source/standalone/demos/procedural_terrain.py + ./isaaclab.sh -p source/standalone/demos/deformables.py .. tab-item:: :icon:`fa-brands fa-windows` Windows :sync: windows .. code:: batch - isaaclab.bat -p source\standalone\demos\procedural_terrain.py + isaaclab.bat -p source\standalone\demos\deformables.py - .. image:: ../_static/demos/procedural_terrain.jpg + .. image:: ../_static/demos/deformables.jpg :width: 100% - :alt: Procedural Terrains in Isaac Lab + :alt: Deformable primitive-shaped objects in Isaac Lab -- Spawn different deformable (soft) bodies and let them fall from a height: +- Use the interactive scene and spawn varying assets in individual environments: .. tab-set:: :sync-group: os @@ -110,20 +110,43 @@ A few quick showroom scripts to run and checkout: .. code:: bash - ./isaaclab.sh -p source/standalone/demos/deformables.py + ./isaaclab.sh -p source/standalone/demos/multi_asset.py .. tab-item:: :icon:`fa-brands fa-windows` Windows :sync: windows .. code:: batch - isaaclab.bat -p source\standalone\demos\deformables.py + isaaclab.bat -p source\standalone\demos\multi_asset.py - .. image:: ../_static/demos/deformables.jpg + .. image:: ../_static/demos/multi_asset.jpg :width: 100% - :alt: Deformable primitive-shaped objects in Isaac Lab + :alt: Multiple assets managed through the same simulation handles + +- Create and spawn procedurally generated terrains with different configurations: + + .. tab-set:: + :sync-group: os + + .. tab-item:: :icon:`fa-brands fa-linux` Linux + :sync: linux + + .. code:: bash + + ./isaaclab.sh -p source/standalone/demos/procedural_terrain.py + + .. tab-item:: :icon:`fa-brands fa-windows` Windows + :sync: windows + + .. code:: batch + + isaaclab.bat -p source\standalone\demos\procedural_terrain.py + + .. image:: ../_static/demos/procedural_terrain.jpg + :width: 100% + :alt: Procedural Terrains in Isaac Lab -- Spawn multiple markers that are useful for visualizations: +- Define multiple markers that are useful for visualizations: .. tab-set:: :sync-group: os diff --git a/docs/source/overview/teleop_imitation.rst b/docs/source/overview/teleop_imitation.rst index 519306a48d..642aada3fe 100644 --- a/docs/source/overview/teleop_imitation.rst +++ b/docs/source/overview/teleop_imitation.rst @@ -60,7 +60,7 @@ format. # install python module (for robomimic) ./isaaclab.sh -i robomimic # split data - ./isaaclab.sh -p source/standalone//workflows/robomimic/tools/split_train_val.py logs/robomimic/Isaac-Lift-Cube-Franka-IK-Rel-v0/hdf_dataset.hdf5 --ratio 0.2 + ./isaaclab.sh -p source/standalone/workflows/robomimic/tools/split_train_val.py logs/robomimic/Isaac-Lift-Cube-Franka-IK-Rel-v0/hdf_dataset.hdf5 --ratio 0.2 3. Train a BC agent for ``Isaac-Lift-Cube-Franka-IK-Rel-v0`` with `Robomimic `__: diff --git a/docs/source/tutorials/04_sensors/add_sensors_on_robot.rst b/docs/source/tutorials/04_sensors/add_sensors_on_robot.rst index 500eaa7666..3ddd39bf48 100644 --- a/docs/source/tutorials/04_sensors/add_sensors_on_robot.rst +++ b/docs/source/tutorials/04_sensors/add_sensors_on_robot.rst @@ -174,7 +174,7 @@ Now that we have gone through the code, let's run the script and see the result: .. code-block:: bash - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py --num_envs 2 + ./isaaclab.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py --num_envs 2 --enable_cameras This command should open a stage with a ground plane, lights, and two quadrupedal robots. diff --git a/isaaclab.bat b/isaaclab.bat index 53674d71f8..b415ef1a13 100644 --- a/isaaclab.bat +++ b/isaaclab.bat @@ -81,7 +81,7 @@ if errorlevel 1 ( set isaacsim_exe=!isaac_path!\isaac-sim.bat ) else ( rem if isaac sim installed from pip - set isaacsim_exe=isaacsim + set isaacsim_exe=isaacsim omni.isaac.sim ) rem check if there is a python path available if not exist "%isaacsim_exe%" ( diff --git a/isaaclab.sh b/isaaclab.sh index 297424b9c2..a604706e70 100755 --- a/isaaclab.sh +++ b/isaaclab.sh @@ -90,8 +90,14 @@ extract_isaacsim_exe() { local isaacsim_exe=${isaac_path}/isaac-sim.sh # check if there is a python path available if [ ! -f "${isaacsim_exe}" ]; then - echo "[ERROR] No Isaac Sim executable found at path: ${isaacsim_exe}" >&2 - exit 1 + # check for installation using Isaac Sim pip + if [ $(python -m pip list | grep -c 'isaacsim-rl') -gt 0 ]; then + # Isaac Sim - Python packages entry point + local isaacsim_exe="isaacsim omni.isaac.sim" + else + echo "[ERROR] No Isaac Sim executable found at path: ${isaac_path}" >&2 + exit 1 + fi fi # return the result echo ${isaacsim_exe} diff --git a/pyproject.toml b/pyproject.toml index 402fcff6aa..51d4375907 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ known_third_party = [ "omni.kit.*", "warp", "carb", + "Semantics", ] # Imports from this repository known_first_party = "omni.isaac.lab" diff --git a/source/extensions/omni.isaac.lab/config/extension.toml b/source/extensions/omni.isaac.lab/config/extension.toml index 2c780361d4..7e9225c2b3 100644 --- a/source/extensions/omni.isaac.lab/config/extension.toml +++ b/source/extensions/omni.isaac.lab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.24.13" +version = "0.25.0" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst index 05d6084ef1..b0fa66f629 100644 --- a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst +++ b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst @@ -1,13 +1,77 @@ Changelog --------- -0.22.15 (2024-09-20) +0.25.0 (2024-10-06) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added configuration classes for spawning assets from a list of individual asset configurations randomly + at the specified prim paths. + + +0.24.20 (2024-10-07) +~~~~~~~~~~~~~~~~~~~~ + +Fixes +^^^^^ + +* Fixed the :meth:`omni.isaac.lab.envs.mdp.events.randomize_rigid_body_material` function to + correctly sample friction and restitution from the given ranges. + + +0.24.19 (2024-10-05) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added new functionalities to the FrameTransformer to make it more general. It is now possible to track: + + * Target frames that aren't children of the source frame prim_path + * Target frames that are based upon the source frame prim_path + + +0.24.18 (2024-10-04) +~~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixes parsing and application of ``size`` parameter for :class:`~omni.isaac.lab.sim.spawn.GroundPlaneCfg` to correctly + scale the grid-based ground plane. + + +0.24.17 (2024-10-04) +~~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed the deprecation notice for using ``pxr.Semantics``. The corresponding modules use ``Semantics`` module + directly. + + +0.24.16 (2024-10-03) +~~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Renamed the observation function :meth:`grab_images` to :meth:`image` to follow convention of noun-based naming. +* Renamed the function :meth:`convert_perspective_depth_to_orthogonal_depth` to a shorter name + :meth:`omni.isaac.lab.utils.math.orthogonalize_perspective_depth`. + + +0.24.15 (2024-09-20) ~~~~~~~~~~~~~~~~~~~~ Added ^^^^^ -* Added :meth:`grab_images` to be able to use images for an observation term in manager based environments +* Added :meth:`grab_images` to be able to use images for an observation term in manager-based environments. + 0.24.14 (2024-09-20) ~~~~~~~~~~~~~~~~~~~~ @@ -15,10 +79,10 @@ Added Added ^^^^^ -* Added :meth:`convert_perspective_depth_to_orthogonal_depth`. :meth:`unproject_depth` assumes - that the input depth image is orthogonal. The new :meth:`convert_perspective_depth_to_orthogonal_depth` - can be used to convert a perspective depth image into an orthogonal depth image, so that the point cloud - can be unprojected correctly with :meth:`unproject_depth`. +* Added the method :meth:`convert_perspective_depth_to_orthogonal_depth` to convert perspective depth + images to orthogonal depth images. This is useful for the :meth:`~omni.isaac.lab.utils.math.unproject_depth`, + since it expects orthogonal depth images as inputs. + 0.24.13 (2024-09-08) ~~~~~~~~~~~~~~~~~~~~ diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/app/app_launcher.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/app/app_launcher.py index 062ff97269..8f9f5adc19 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/app/app_launcher.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/app/app_launcher.py @@ -178,11 +178,11 @@ def add_app_launcher_args(parser: argparse.ArgumentParser) -> None: * ``experience`` (str): The experience file to load when launching the SimulationApp. If a relative path is provided, it is resolved relative to the ``apps`` folder in Isaac Sim and Isaac Lab (in that order). - If provided as an empty string, the experience file is determined based on the headless flag: + If provided as an empty string, the experience file is determined based on the command-line flags: * If headless and enable_cameras are True, the experience file is set to ``isaaclab.python.headless.rendering.kit``. * If headless is False and enable_cameras is True, the experience file is set to ``isaaclab.python.rendering.kit``. - * If headless is False and enable_cameras is False, the experience file is set to ``isaaclab.python.kit``. + * If headless and enable_cameras are False, the experience file is set to ``isaaclab.python.kit``. * If headless is True and enable_cameras is False, the experience file is set to ``isaaclab.python.headless.kit``. Args: diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_marl_env.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_marl_env.py index c467225d49..ab7cb5e3c0 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_marl_env.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_marl_env.py @@ -81,7 +81,7 @@ def __init__(self, cfg: DirectMARLEnvCfg, render_mode: str | None = None, **kwar # set the seed for the environment if self.cfg.seed is not None: - self.seed(self.cfg.seed) + self.cfg.seed = self.seed(self.cfg.seed) else: carb.log_warn("Seed not set for the environment. The environment creation may not be deterministic.") diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_rl_env.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_rl_env.py index 1e51f89a76..656bf3e2dc 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_rl_env.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/direct_rl_env.py @@ -86,7 +86,7 @@ def __init__(self, cfg: DirectRLEnvCfg, render_mode: str | None = None, **kwargs # set the seed for the environment if self.cfg.seed is not None: - self.seed(self.cfg.seed) + self.cfg.seed = self.seed(self.cfg.seed) else: carb.log_warn("Seed not set for the environment. The environment creation may not be deterministic.") diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/manager_based_env.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/manager_based_env.py index 2712d4f10a..4cc1b86b8c 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/manager_based_env.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/manager_based_env.py @@ -76,7 +76,7 @@ def __init__(self, cfg: ManagerBasedEnvCfg): # set the seed for the environment if self.cfg.seed is not None: - self.seed(self.cfg.seed) + self.cfg.seed = self.seed(self.cfg.seed) else: carb.log_warn("Seed not set for the environment. The environment creation may not be deterministic.") diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/events.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/events.py index 8f2d737eb7..3eaeb650f9 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/events.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/events.py @@ -14,7 +14,6 @@ from __future__ import annotations -import numpy as np import torch from typing import TYPE_CHECKING, Literal @@ -25,22 +24,14 @@ import omni.isaac.lab.utils.math as math_utils from omni.isaac.lab.actuators import ImplicitActuator from omni.isaac.lab.assets import Articulation, DeformableObject, RigidObject -from omni.isaac.lab.managers import SceneEntityCfg +from omni.isaac.lab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from omni.isaac.lab.terrains import TerrainImporter if TYPE_CHECKING: from omni.isaac.lab.envs import ManagerBasedEnv -def randomize_rigid_body_material( - env: ManagerBasedEnv, - env_ids: torch.Tensor | None, - static_friction_range: tuple[float, float], - dynamic_friction_range: tuple[float, float], - restitution_range: tuple[float, float], - num_buckets: int, - asset_cfg: SceneEntityCfg, -): +class randomize_rigid_body_material(ManagerTermBase): """Randomize the physics materials on all geometries of the asset. This function creates a set of physics materials with random static friction, dynamic friction, and restitution @@ -53,6 +44,10 @@ def randomize_rigid_body_material( all bodies). The integer values are used as indices to select the material properties from the material buckets. + If the flag ``make_consistent`` is set to ``True``, the dynamic friction is set to be less than or equal to + the static friction. This obeys the physics constraint on friction values. However, it may not always be + essential for the application. Thus, the flag is set to ``False`` by default. + .. attention:: This function uses CPU tensors to assign the material properties. It is recommended to use this function only during the initialization of the environment. Otherwise, it may lead to a significant performance @@ -60,69 +55,111 @@ def randomize_rigid_body_material( .. note:: PhysX only allows 64000 unique physics materials in the scene. If the number of materials exceeds this - limit, the simulation will crash. + limit, the simulation will crash. Due to this reason, we sample the materials only once during initialization. + Afterwards, these materials are randomly assigned to the geometries of the asset. """ - # extract the used quantities (to enable type-hinting) - asset: RigidObject | Articulation = env.scene[asset_cfg.name] - if not isinstance(asset, (RigidObject, Articulation)): - raise ValueError( - f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{asset_cfg.name}'" - f" with type: '{type(asset)}'." - ) + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the term. - # resolve environment ids - if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") - else: - env_ids = env_ids.cpu() - - # retrieve material buffer - materials = asset.root_physx_view.get_material_properties() + Args: + cfg: The configuration of the event term. + env: The environment instance. - # sample material properties from the given ranges - material_samples = np.zeros(materials[env_ids].shape) - material_samples[..., 0] = np.random.uniform(*static_friction_range) - material_samples[..., 1] = np.random.uniform(*dynamic_friction_range) - material_samples[..., 2] = np.random.uniform(*restitution_range) + Raises: + ValueError: If the asset is not a RigidObject or an Articulation. + """ + super().__init__(cfg, env) - # create uniform range tensor for bucketing - lo = np.array([static_friction_range[0], dynamic_friction_range[0], restitution_range[0]]) - hi = np.array([static_friction_range[1], dynamic_friction_range[1], restitution_range[1]]) + # extract the used quantities (to enable type-hinting) + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] - # to avoid 64k material limit in physx, we bucket materials by binning randomized material properties - # into buckets based on the number of buckets specified - for d in range(3): - buckets = np.array([(hi[d] - lo[d]) * i / num_buckets + lo[d] for i in range(num_buckets)]) - material_samples[..., d] = buckets[np.searchsorted(buckets, material_samples[..., d]) - 1] + if not isinstance(self.asset, (RigidObject, Articulation)): + raise ValueError( + f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{self.asset_cfg.name}'" + f" with type: '{type(self.asset)}'." + ) - # update material buffer with new samples - if isinstance(asset, Articulation) and asset_cfg.body_ids != slice(None): # obtain number of shapes per body (needed for indexing the material properties correctly) # note: this is a workaround since the Articulation does not provide a direct way to obtain the number of shapes # per body. We use the physics simulation view to obtain the number of shapes per body. - num_shapes_per_body = [] - for link_path in asset.root_physx_view.link_paths[0]: - link_physx_view = asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore - num_shapes_per_body.append(link_physx_view.max_shapes) + if isinstance(self.asset, Articulation) and self.asset_cfg.body_ids != slice(None): + self.num_shapes_per_body = [] + for link_path in self.asset.root_physx_view.link_paths[0]: + link_physx_view = self.asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore + self.num_shapes_per_body.append(link_physx_view.max_shapes) + # ensure the parsing is correct + num_shapes = sum(self.num_shapes_per_body) + expected_shapes = self.asset.root_physx_view.max_shapes + if num_shapes != expected_shapes: + raise ValueError( + "Randomization term 'randomize_rigid_body_material' failed to parse the number of shapes per body." + f" Expected total shapes: {expected_shapes}, but got: {num_shapes}." + ) + else: + # in this case, we don't need to do special indexing + self.num_shapes_per_body = None + + # obtain parameters for sampling friction and restitution values + static_friction_range = cfg.params.get("static_friction_range", (1.0, 1.0)) + dynamic_friction_range = cfg.params.get("dynamic_friction_range", (1.0, 1.0)) + restitution_range = cfg.params.get("restitution_range", (0.0, 0.0)) + num_buckets = int(cfg.params.get("num_buckets", 1)) # sample material properties from the given ranges - for body_id in asset_cfg.body_ids: - # start index of shape - start_idx = sum(num_shapes_per_body[:body_id]) - # end index of shape - end_idx = start_idx + num_shapes_per_body[body_id] - # assign the new materials - # material ids are of shape: num_env_ids x num_shapes - # material_buckets are of shape: num_buckets x 3 - materials[env_ids, start_idx:end_idx] = torch.from_numpy(material_samples[:, start_idx:end_idx]).to( - dtype=torch.float - ) - else: - materials[env_ids] = torch.from_numpy(material_samples).to(dtype=torch.float) + # note: we only sample the materials once during initialization + # afterwards these are randomly assigned to the geometries of the asset + range_list = [static_friction_range, dynamic_friction_range, restitution_range] + ranges = torch.tensor(range_list, device="cpu") + self.material_buckets = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (num_buckets, 3), device="cpu") + + # ensure dynamic friction is always less than static friction + make_consistent = cfg.params.get("make_consistent", False) + if make_consistent: + self.material_buckets[:, 1] = torch.min(self.material_buckets[:, 0], self.material_buckets[:, 1]) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + static_friction_range: tuple[float, float], + dynamic_friction_range: tuple[float, float], + restitution_range: tuple[float, float], + num_buckets: int, + asset_cfg: SceneEntityCfg, + make_consistent: bool = False, + ): + # resolve environment ids + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device="cpu") + else: + env_ids = env_ids.cpu() + + # randomly assign material IDs to the geometries + total_num_shapes = self.asset.root_physx_view.max_shapes + bucket_ids = torch.randint(0, num_buckets, (len(env_ids), total_num_shapes), device="cpu") + material_samples = self.material_buckets[bucket_ids] + + # retrieve material buffer from the physics simulation + materials = self.asset.root_physx_view.get_material_properties() + + # update material buffer with new samples + if self.num_shapes_per_body is not None: + # sample material properties from the given ranges + for body_id in self.asset_cfg.body_ids: + # obtain indices of shapes for the body + start_idx = sum(self.num_shapes_per_body[:body_id]) + end_idx = start_idx + self.num_shapes_per_body[body_id] + # assign the new materials + # material samples are of shape: num_env_ids x total_num_shapes x 3 + materials[env_ids, start_idx:end_idx] = material_samples[:, start_idx:end_idx] + else: + # assign all the materials + materials[env_ids] = material_samples[:] - # apply to simulation - asset.root_physx_view.set_material_properties(materials, env_ids) + # apply to simulation + self.asset.root_physx_view.set_material_properties(materials, env_ids) def randomize_rigid_body_mass( diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/observations.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/observations.py index c5b51fc6ae..c770915337 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/observations.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/observations.py @@ -182,38 +182,52 @@ def body_incoming_wrench(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg) -> tor return link_incoming_forces.view(env.num_envs, -1) -def grab_images( +def image( env: ManagerBasedEnv, sensor_cfg: SceneEntityCfg = SceneEntityCfg("tiled_camera"), data_type: str = "rgb", convert_perspective_to_orthogonal: bool = False, normalize: bool = True, ) -> torch.Tensor: - """Grab all of the latest images of a specific datatype produced by a specific camera. + """Images of a specific datatype from the camera sensor. + + If the flag :attr:`normalize` is True, post-processing of the images are performed based on their + data-types: + + - "rgb": Scales the image to (0, 1) and subtracts with the mean of the current image batch. + - "depth" or "distance_to_camera" or "distance_to_plane": Replaces infinity values with zero. Args: env: The environment the cameras are placed within. sensor_cfg: The desired sensor to read from. Defaults to SceneEntityCfg("tiled_camera"). data_type: The data type to pull from the desired camera. Defaults to "rgb". - convert_perspective_to_orthogonal: Whether to convert perspective - depth images to orthogonal depth images. Defaults to False. - normalize: Set to True to normalize images. Defaults to True. + convert_perspective_to_orthogonal: Whether to orthogonalize perspective depth images. + This is used only when the data type is "distance_to_camera". Defaults to False. + normalize: Whether to normalize the images. This depends on the selected data type. + Defaults to True. Returns: - The images produced at the last timestep + The images produced at the last time-step """ + # extract the used quantities (to enable type-hinting) sensor: TiledCamera | Camera | RayCasterCamera = env.scene.sensors[sensor_cfg.name] + + # obtain the input image images = sensor.data.output[data_type] + + # depth image conversion if (data_type == "distance_to_camera") and convert_perspective_to_orthogonal: - images = math_utils.convert_perspective_depth_to_orthogonal_depth(images, sensor.data.intrinsic_matrices) + images = math_utils.orthogonalize_perspective_depth(images, sensor.data.intrinsic_matrices) + # rgb/depth image normalization if normalize: if data_type == "rgb": - images = images / 255 + images = images.float() / 255.0 mean_tensor = torch.mean(images, dim=(1, 2), keepdim=True) images -= mean_tensor elif "distance_to" in data_type or "depth" in data_type: images[images == float("inf")] = 0 + return images.clone() diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/scene/interactive_scene.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/scene/interactive_scene.py index c803f0e305..8c4b81aaff 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/scene/interactive_scene.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/scene/interactive_scene.py @@ -166,6 +166,18 @@ def clone_environments(self, copy_from_source: bool = False): If True, clones are independent copies of the source prim and won't reflect its changes (start-up time may increase). Defaults to False. """ + # check if user spawned different assets in individual environments + # this flag will be None if no multi asset is spawned + carb_settings_iface = carb.settings.get_settings() + has_multi_assets = carb_settings_iface.get("/isaaclab/spawn/multi_assets") + if has_multi_assets and self.cfg.replicate_physics: + carb.log_warn( + "Varying assets might have been spawned under different environments." + " However, the replicate physics flag is enabled in the 'InteractiveScene' configuration." + " This may adversely affect PhysX parsing. We recommend disabling this property." + ) + + # clone the environment env_origins = self.cloner.clone( source_prim_path=self.env_prim_paths[0], prim_paths=self.env_prim_paths, @@ -187,9 +199,6 @@ def filter_collisions(self, global_prim_paths: list[str] | None = None): global_prim_paths: A list of global prim paths to enable collisions with. Defaults to None, in which case no global prim paths are considered. """ - # obtain the current physics scene - physics_scene_prim_path = self.physics_scene_path - # validate paths in global prim paths if global_prim_paths is None: global_prim_paths = [] @@ -203,7 +212,7 @@ def filter_collisions(self, global_prim_paths: list[str] | None = None): # filter collisions within each environment instance self.cloner.filter_collisions( - physics_scene_prim_path, + self.physics_scene_path, "/World/collisions", self.env_prim_paths, global_paths=self._global_prim_paths, @@ -224,14 +233,16 @@ def __str__(self) -> str: """ @property - def physics_scene_path(self): - """Search the stage for the physics scene""" + def physics_scene_path(self) -> str: + """The path to the USD Physics Scene.""" if self._physics_scene_path is None: for prim in self.stage.Traverse(): if prim.HasAPI(PhysxSchema.PhysxSceneAPI): - self._physics_scene_path = prim.GetPrimPath() + self._physics_scene_path = prim.GetPrimPath().pathString carb.log_info(f"Physics scene prim path: {self._physics_scene_path}") break + if self._physics_scene_path is None: + raise RuntimeError("No physics scene found! Please make sure one exists.") return self._physics_scene_path @property diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer.py index 320007cabd..87205cf4cd 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer.py @@ -5,6 +5,7 @@ from __future__ import annotations +import re import torch from collections.abc import Sequence from typing import TYPE_CHECKING @@ -50,21 +51,6 @@ class FrameTransformer(SensorBase): typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the manipulator. - .. note:: - - Currently, this implementation only handles frames within an articulation. This is because the frame - regex expressions are resolved based on their parent prim path. This can be extended to handle - frames outside of articulation by using the frame prim path instead. However, this would require - additional checks to ensure that the user-specified frames are valid which is not currently implemented. - - .. warning:: - - The implementation assumes that the parent body of a target frame is not the same as that - of the source frame (i.e. :attr:`FrameTransformerCfg.prim_path`). While a corner case, this can occur - if the user specifies the same prim path for both the source frame and target frame. In this case, - the target frame will be ignored and not reported. This is a limitation of the current implementation - and will be fixed in a future release. - """ cfg: FrameTransformerCfg @@ -136,9 +122,9 @@ def _initialize_impl(self): self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) - # Keep track of mapping from the rigid body name to the desired frame, as there may be multiple frames + # Keep track of mapping from the rigid body name to the desired frames and prim path, as there may be multiple frames # based upon the same body name and we don't want to create unnecessary views - body_names_to_frames: dict[str, set[str]] = {} + body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} # The offsets associated with each target frame target_offsets: dict[str, dict[str, torch.Tensor]] = {} # The frames whose offsets are not identity @@ -148,6 +134,9 @@ def _initialize_impl(self): # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl self._apply_target_frame_offset = False + # Need to keep track of whether the source frame is also a target frame + self._source_is_also_target_frame = False + # Collect all target frames, their associated body prim paths and their offsets so that we can extract # the prim, check that it has the appropriate rigid body API in a single loop. # First element is None because user can't specify source frame name @@ -155,7 +144,8 @@ def _initialize_impl(self): frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] # First element is None because source frame offset is handled separately frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] - for frame, prim_path, offset in zip(frames, frame_prim_paths, frame_offsets): + frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) + for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): # Find correct prim matching_prims = sim_utils.find_matching_prims(prim_path) if len(matching_prims) == 0: @@ -180,9 +170,19 @@ def _initialize_impl(self): # Keep track of which frames are associated with which bodies if body_name in body_names_to_frames: - body_names_to_frames[body_name].add(frame_name) + body_names_to_frames[body_name]["frames"].add(frame_name) + + # This is a corner case where the source frame is also a target frame + if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": + self._source_is_also_target_frame = True + else: - body_names_to_frames[body_name] = {frame_name} + # Store the first matching prim path and the type of frame + body_names_to_frames[body_name] = { + "frames": {frame_name}, + "prim_path": matching_prim_path, + "type": frame_type, + } if offset is not None: offset_pos = torch.tensor(offset.pos, device=self.device) @@ -191,7 +191,6 @@ def _initialize_impl(self): if not is_identity_pose(offset_pos, offset_quat): non_identity_offset_frames.append(frame_name) self._apply_target_frame_offset = True - target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} if not self._apply_target_frame_offset: @@ -206,37 +205,75 @@ def _initialize_impl(self): ) # The names of bodies that RigidPrimView will be tracking to later extract transforms from - tracked_body_names = list(body_names_to_frames.keys()) - # Construct regex expression for the body names - body_names_regex = r"(" + "|".join(tracked_body_names) + r")" - body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" + tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] + tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] + + body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] + # Create simulation view self._physics_sim_view = physx.create_simulation_view(self._backend) self._physics_sim_view.set_subspace_roots("/") # Create a prim view for all frames and initialize it # order of transforms coming out of view will be source frame followed by target frame(s) - self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex.replace(".*", "*")) + self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) # Determine the order in which regex evaluated body names so we can later index into frame transforms # by frame name correctly all_prim_paths = self._frame_physx_view.prim_paths - # Only need first env as the names and their ordering are the same across environments - first_env_prim_paths = all_prim_paths[0 : len(tracked_body_names)] - first_env_body_names = [first_env_prim_path.split("/")[-1] for first_env_prim_path in first_env_prim_paths] + if "env_" in all_prim_paths[0]: + + def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: + """Separates the environment number and prim_path from the item. + + Args: + item: The item to extract the environment number from. Assumes item is of the form + `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. + Returns: + The environment number and the prim_path. + """ + match = re.search(r"env_(\d+)(.*)", item) + return (int(match.group(1)), match.group(2)) + + # Find the indices that would reorganize output to be per environment. We want `env_1/blah` to come before `env_11/blah` + # and env_1/Robot/base to come before env_1/Robot/foot so we need to use custom key function + self._per_env_indices = [ + index + for index, _ in sorted( + list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) + ) + ] + + # Only need 0th env as the names and their ordering are the same across environments + sorted_prim_paths = [ + all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] + ] + + else: + # If no environment is present, then the order of the body names is the same as the order of the prim paths sorted alphabetically + self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] + sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] + + # -- target frames + self._target_frame_body_names = [prim_path.split("/")[-1] for prim_path in sorted_prim_paths] - # Re-parse the list as it may have moved when resolving regex above # -- source frame self._source_frame_body_name = self.cfg.prim_path.split("/")[-1] - source_frame_index = first_env_body_names.index(self._source_frame_body_name) - # -- target frames - self._target_frame_body_names = first_env_body_names[:] - self._target_frame_body_names.remove(self._source_frame_body_name) + source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) + + # Only remove source frame from tracked bodies if it is not also a target frame + if not self._source_is_also_target_frame: + self._target_frame_body_names.remove(self._source_frame_body_name) # Determine indices into all tracked body frames for both source and target frames all_ids = torch.arange(self._num_envs * len(tracked_body_names)) self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index - self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] + + # If source frame is also a target frame, then the target frame body ids are the same as the source frame body ids + if self._source_is_also_target_frame: + self._target_frame_body_ids = all_ids + else: + self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] # The name of each of the target frame(s) - either user specified or defaulted to the body name self._target_frame_names: list[str] = [] @@ -249,26 +286,34 @@ def _initialize_impl(self): duplicate_frame_indices = [] # Go through each body name and determine the number of duplicates we need for that frame - # and extract the offsets. This is all done to handles the case where multiple frames + # and extract the offsets. This is all done to handle the case where multiple frames # reference the same body, but have different names and/or offsets for i, body_name in enumerate(self._target_frame_body_names): - for frame in body_names_to_frames[body_name]: - target_frame_offset_pos.append(target_offsets[frame]["pos"]) - target_frame_offset_quat.append(target_offsets[frame]["quat"]) - self._target_frame_names.append(frame) - duplicate_frame_indices.append(i) + for frame in body_names_to_frames[body_name]["frames"]: + # Only need to handle target frames here as source frame is handled separately + if frame in target_offsets: + target_frame_offset_pos.append(target_offsets[frame]["pos"]) + target_frame_offset_quat.append(target_offsets[frame]["quat"]) + self._target_frame_names.append(frame) + duplicate_frame_indices.append(i) # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) - num_target_body_frames = len(tracked_body_names) - 1 + if self._source_is_also_target_frame: + num_target_body_frames = len(tracked_body_names) + else: + num_target_body_frames = len(tracked_body_names) - 1 + self._duplicate_frame_indices = torch.cat( [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] ) - # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) - self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) - self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) + # Target frame offsets are only applied if at least one of the offsets are non-identity + if self._apply_target_frame_offset: + # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) + self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) + self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) # fill the data buffer self._data.target_frame_names = self._target_frame_names @@ -288,6 +333,10 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): # Extract transforms from view - shape is: # (the total number of source and target body frames being tracked * self._num_envs, 7) transforms = self._frame_physx_view.get_transforms() + + # Reorder the transforms to be per environment as is expected of SensorData + transforms = transforms[self._per_env_indices] + # Convert quaternions as PhysX uses xyzw form transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") @@ -309,6 +358,7 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): target_frames = transforms[self._target_frame_body_ids] duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] + # Only apply offset if the offsets will result in a coordinate frame transform if self._apply_target_frame_offset: target_pos_w, target_quat_w = combine_frame_transforms( diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_cfg.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_cfg.py index ef225bb1fc..014b9ab235 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_cfg.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_cfg.py @@ -31,10 +31,15 @@ class FrameCfg: """Information specific to a coordinate frame.""" prim_path: str = MISSING - """The prim path corresponding to the parent rigid body. + """The prim path corresponding to a rigid body. - This prim should be part of the same articulation as :attr:`FrameTransformerCfg.prim_path`. + This can be a regex pattern to match multiple prims. For example, "/Robot/.*" will match all prims under "/Robot". + + This means that if the source :attr:`FrameTransformerCfg.prim_path` is "/Robot/base", and the target :attr:`FrameTransformerCfg.FrameCfg.prim_path` is "/Robot/.*", + then the frame transformer will track the poses of all the prims under "/Robot", + including "/Robot/base" (even though this will result in an identity pose w.r.t. the source frame). """ + name: str | None = None """User-defined name for the new coordinate frame. Defaults to None. diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_data.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_data.py index 0f4f4e0171..2523416502 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/frame_transformer/frame_transformer_data.py @@ -15,8 +15,8 @@ class FrameTransformerData: """Target frame names (this denotes the order in which that frame data is ordered). The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. - This usually follows the order in which the frames are defined in the config. However, in - the case of regex matching, the order may be different. + This does not necessarily follow the order in which the frames are defined in the config due to + the regex matching. """ target_pos_source: torch.Tensor = None diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/__init__.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/__init__.py index 851750f371..94b1245ab6 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/__init__.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/__init__.py @@ -61,3 +61,4 @@ class and the function call in a single line of code. from .sensors import * # noqa: F401, F403 from .shapes import * # noqa: F401, F403 from .spawner_cfg import * # noqa: F401, F403 +from .wrappers import * # noqa: F401, F403 diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/from_files/from_files.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/from_files/from_files.py index 5e3c88dc1d..cab6bf06c9 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/from_files/from_files.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/from_files/from_files.py @@ -151,16 +151,11 @@ def spawn_ground_plane( # Scale only the mesh # Warning: This is specific to the default grid plane asset. - if prim_utils.is_prim_path_valid(f"{prim_path}/Enviroment"): + if prim_utils.is_prim_path_valid(f"{prim_path}/Environment"): # compute scale from size scale = (cfg.size[0] / 100.0, cfg.size[1] / 100.0, 1.0) # apply scale to the mesh - omni.kit.commands.execute( - "ChangeProperty", - prop_path=Sdf.Path(f"{prim_path}/Enviroment.xformOp:scale"), - value=scale, - prev=None, - ) + prim_utils.set_prim_property(f"{prim_path}/Environment", "xformOp:scale", scale) # Change the color of the plane # Warning: This is specific to the default grid plane asset. diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/spawner_cfg.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/spawner_cfg.py index 089b38b29a..351b3cde96 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/spawner_cfg.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/spawner_cfg.py @@ -64,11 +64,6 @@ class SpawnerCfg: This parameter is only used when cloning prims. If False, then the asset will be inherited from the source prim, i.e. all USD changes to the source prim will be reflected in the cloned prims. - - .. versionadded:: 2023.1 - - This parameter is only supported from Isaac Sim 2023.1 onwards. If you are using an older - version of Isaac Sim, this parameter will be ignored. """ diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/__init__.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/__init__.py new file mode 100644 index 0000000000..f05d3e58c7 --- /dev/null +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for wrapping spawner configurations. + +Unlike the other spawner modules, this module provides a way to wrap multiple spawner configurations +into a single configuration. This is useful when the user wants to spawn multiple assets based on +different configurations. +""" + +from .wrappers import spawn_multi_asset, spawn_multi_usd_file +from .wrappers_cfg import MultiAssetSpawnerCfg, MultiUsdFileCfg diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/wrappers.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/wrappers.py new file mode 100644 index 0000000000..9040569e4a --- /dev/null +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/wrappers.py @@ -0,0 +1,169 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import random +import re +from typing import TYPE_CHECKING + +import carb +import omni.isaac.core.utils.prims as prim_utils +import omni.isaac.core.utils.stage as stage_utils +from pxr import Sdf, Usd + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.sim.spawners.from_files import UsdFileCfg + +if TYPE_CHECKING: + from . import wrappers_cfg + + +def spawn_multi_asset( + prim_path: str, + cfg: wrappers_cfg.MultiAssetSpawnerCfg, + translation: tuple[float, float, float] | None = None, + orientation: tuple[float, float, float, float] | None = None, +) -> Usd.Prim: + """Spawn multiple assets based on the provided configurations. + + This function spawns multiple assets based on the provided configurations. The assets are spawned + in the order they are provided in the list. If the :attr:`~MultiAssetSpawnerCfg.random_choice` parameter is + set to True, a random asset configuration is selected for each spawn. + + Args: + prim_path: The prim path to spawn the assets. + cfg: The configuration for spawning the assets. + translation: The translation of the spawned assets. Default is None. + orientation: The orientation of the spawned assets in (w, x, y, z) order. Default is None. + + Returns: + The created prim at the first prim path. + """ + # resolve: {SPAWN_NS}/AssetName + # note: this assumes that the spawn namespace already exists in the stage + root_path, asset_path = prim_path.rsplit("/", 1) + # check if input is a regex expression + # note: a valid prim path can only contain alphanumeric characters, underscores, and forward slashes + is_regex_expression = re.match(r"^[a-zA-Z0-9/_]+$", root_path) is None + + # resolve matching prims for source prim path expression + if is_regex_expression and root_path != "": + source_prim_paths = sim_utils.find_matching_prim_paths(root_path) + # if no matching prims are found, raise an error + if len(source_prim_paths) == 0: + raise RuntimeError( + f"Unable to find source prim path: '{root_path}'. Please create the prim before spawning." + ) + else: + source_prim_paths = [root_path] + + # find a free prim path to hold all the template prims + template_prim_path = stage_utils.get_next_free_path("/World/Template") + prim_utils.create_prim(template_prim_path, "Scope") + + # spawn everything first in a "Dataset" prim + proto_prim_paths = list() + for index, asset_cfg in enumerate(cfg.assets_cfg): + # append semantic tags if specified + if cfg.semantic_tags is not None: + if asset_cfg.semantic_tags is None: + asset_cfg.semantic_tags = cfg.semantic_tags + else: + asset_cfg.semantic_tags += cfg.semantic_tags + # override settings for properties + attr_names = ["mass_props", "rigid_props", "collision_props", "activate_contact_sensors", "deformable_props"] + for attr_name in attr_names: + attr_value = getattr(cfg, attr_name) + if hasattr(asset_cfg, attr_name) and attr_value is not None: + setattr(asset_cfg, attr_name, attr_value) + # spawn single instance + proto_prim_path = f"{template_prim_path}/Asset_{index:04d}" + asset_cfg.func(proto_prim_path, asset_cfg, translation=translation, orientation=orientation) + # append to proto prim paths + proto_prim_paths.append(proto_prim_path) + + # resolve prim paths for spawning and cloning + prim_paths = [f"{source_prim_path}/{asset_path}" for source_prim_path in source_prim_paths] + + # acquire stage + stage = stage_utils.get_current_stage() + + # manually clone prims if the source prim path is a regex expression + # note: unlike in the cloner API from Isaac Sim, we do not "reset" xforms on the copied prims. + # This is because the "spawn" calls during the creation of the proto prims already handles this operation. + with Sdf.ChangeBlock(): + for index, prim_path in enumerate(prim_paths): + # spawn single instance + env_spec = Sdf.CreatePrimInLayer(stage.GetRootLayer(), prim_path) + # randomly select an asset configuration + if cfg.random_choice: + proto_path = random.choice(proto_prim_paths) + else: + proto_path = proto_prim_paths[index % len(proto_prim_paths)] + # copy the proto prim + Sdf.CopySpec(env_spec.layer, Sdf.Path(proto_path), env_spec.layer, Sdf.Path(prim_path)) + + # delete the dataset prim after spawning + prim_utils.delete_prim(template_prim_path) + + # set carb setting to indicate Isaac Lab's environments that different prims have been spawned + # at varying prim paths. In this case, PhysX parser shouldn't optimize the stage parsing. + # the flag is mainly used to inform the user that they should disable `InteractiveScene.replicate_physics` + carb_settings_iface = carb.settings.get_settings() + carb_settings_iface.set_bool("/isaaclab/spawn/multi_assets", True) + + # return the prim + return prim_utils.get_prim_at_path(prim_paths[0]) + + +def spawn_multi_usd_file( + prim_path: str, + cfg: wrappers_cfg.MultiUsdFileCfg, + translation: tuple[float, float, float] | None = None, + orientation: tuple[float, float, float, float] | None = None, +) -> Usd.Prim: + """Spawn multiple USD files based on the provided configurations. + + This function creates configuration instances corresponding the individual USD files and + calls the :meth:`spawn_multi_asset` method to spawn them into the scene. + + Args: + prim_path: The prim path to spawn the assets. + cfg: The configuration for spawning the assets. + translation: The translation of the spawned assets. Default is None. + orientation: The orientation of the spawned assets in (w, x, y, z) order. Default is None. + + Returns: + The created prim at the first prim path. + """ + # needed here to avoid circular imports + from .wrappers_cfg import MultiAssetSpawnerCfg + + # parse all the usd files + if isinstance(cfg.usd_path, str): + usd_paths = [cfg.usd_path] + else: + usd_paths = cfg.usd_path + + # make a template usd config + usd_template_cfg = UsdFileCfg() + for attr_name, attr_value in cfg.__dict__.items(): + # skip names we know are not present + if attr_name in ["func", "usd_path", "random_choice"]: + continue + # set the attribute into the template + setattr(usd_template_cfg, attr_name, attr_value) + + # create multi asset configuration of USD files + multi_asset_cfg = MultiAssetSpawnerCfg(assets_cfg=[]) + for usd_path in usd_paths: + usd_cfg = usd_template_cfg.replace(usd_path=usd_path) + multi_asset_cfg.assets_cfg.append(usd_cfg) + # set random choice + multi_asset_cfg.random_choice = cfg.random_choice + + # call the original function + return spawn_multi_asset(prim_path, multi_asset_cfg, translation, orientation) diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/wrappers_cfg.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/wrappers_cfg.py new file mode 100644 index 0000000000..83d42cc4af --- /dev/null +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/spawners/wrappers/wrappers_cfg.py @@ -0,0 +1,67 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from omni.isaac.lab.sim.spawners.from_files import UsdFileCfg +from omni.isaac.lab.sim.spawners.spawner_cfg import DeformableObjectSpawnerCfg, RigidObjectSpawnerCfg, SpawnerCfg +from omni.isaac.lab.utils import configclass + +from . import wrappers + + +@configclass +class MultiAssetSpawnerCfg(RigidObjectSpawnerCfg, DeformableObjectSpawnerCfg): + """Configuration parameters for loading multiple assets from their individual configurations. + + Specifying values for any properties at the configuration level will override the settings of + individual assets' configuration. For instance if the attribute + :attr:`MultiAssetSpawnerCfg.mass_props` is specified, its value will overwrite the values of the + mass properties in each configuration inside :attr:`assets_cfg` (wherever applicable). + This is done to simplify configuring similar properties globally. By default, all properties are set to None. + + The following is an exception to the above: + + * :attr:`visible`: This parameter is ignored. Its value for the individual assets is used. + * :attr:`semantic_tags`: If specified, it will be appended to each individual asset's semantic tags. + + """ + + func = wrappers.spawn_multi_asset + + assets_cfg: list[SpawnerCfg] = MISSING + """List of asset configurations to spawn.""" + + random_choice: bool = True + """Whether to randomly select an asset configuration. Default is True. + + If False, the asset configurations are spawned in the order they are provided in the list. + If True, a random asset configuration is selected for each spawn. + """ + + +@configclass +class MultiUsdFileCfg(UsdFileCfg): + """Configuration parameters for loading multiple USD files. + + Specifying values for any properties at the configuration level is applied to all the assets + imported from their USD files. + + .. tip:: + It is recommended that all the USD based assets follow a similar prim-hierarchy. + + """ + + func = wrappers.spawn_multi_usd_file + + usd_path: str | list[str] = MISSING + """Path or a list of paths to the USD files to spawn asset from.""" + + random_choice: bool = True + """Whether to randomly select an asset configuration. Default is True. + + If False, the asset configurations are spawned in the order they are provided in the list. + If True, a random asset configuration is selected for each spawn. + """ diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/utils.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/utils.py index d7a06de866..0828e4887a 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/utils.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sim/utils.py @@ -17,7 +17,13 @@ import omni.isaac.core.utils.stage as stage_utils import omni.kit.commands from omni.isaac.cloner import Cloner -from pxr import PhysxSchema, Sdf, Semantics, Usd, UsdGeom, UsdPhysics, UsdShade +from pxr import PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics, UsdShade + +# from Isaac Sim 4.2 onwards, pxr.Semantics is deprecated +try: + import Semantics +except ModuleNotFoundError: + from pxr import Semantics from omni.isaac.lab.utils.string import to_camel_case diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/utils/math.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/utils/math.py index 8be8f520da..1d3c0db81f 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/utils/math.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/utils/math.py @@ -987,115 +987,30 @@ def transform_points( @torch.jit.script -def unproject_depth(depth: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tensor: - r"""Unproject depth image into a pointcloud. This method assumes that depth - is provided orthogonally relative to the image plane, as opposed to absolutely relative to the camera's - principal point (perspective depth). To unproject a perspective depth image, use - :meth:`convert_perspective_depth_to_orthogonal_depth` to convert - to an orthogonal depth image prior to calling this method. Otherwise, the - created point cloud will be distorted, especially around the edges. +def orthogonalize_perspective_depth(depth: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tensor: + """Converts perspective depth image to orthogonal depth image. - This function converts depth images into points given the calibration matrix of the camera. - - .. math:: - p_{3D} = K^{-1} \times [u, v, 1]^T \times d - - where :math:`p_{3D}` is the 3D point, :math:`d` is the depth value, :math:`u` and :math:`v` are - the pixel coordinates and :math:`K` is the intrinsic matrix. - - If `depth` is a batch of depth images and `intrinsics` is a single intrinsic matrix, the same - calibration matrix is applied to all depth images in the batch. - - The function assumes that the width and height are both greater than 1. This makes the function - deal with many possible shapes of depth images and intrinsics matrices. - - Args: - depth: The depth measurement. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). - intrinsics: A tensor providing camera's calibration matrix. Shape is (3, 3) or (N, 3, 3). - - Returns: - The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). - - Raises: - ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). - ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3). - """ - depth_batch = depth.clone() - intrinsics_batch = intrinsics.clone() - # check if inputs are batched - is_batched = depth_batch.dim() == 4 or (depth_batch.dim() == 3 and depth_batch.shape[-1] != 1) - # make sure inputs are batched - if depth_batch.dim() == 3 and depth_batch.shape[-1] == 1: - depth_batch = depth_batch.squeeze(dim=2) # (H, W, 1) -> (H, W) - if depth_batch.dim() == 2: - depth_batch = depth_batch[None] # (H, W) -> (1, H, W) - if depth_batch.dim() == 4 and depth_batch.shape[-1] == 1: - depth_batch = depth_batch.squeeze(dim=3) # (N, H, W, 1) -> (N, H, W) - if intrinsics_batch.dim() == 2: - intrinsics_batch = intrinsics_batch[None] # (3, 3) -> (1, 3, 3) - # check shape of inputs - if depth_batch.dim() != 3: - raise ValueError(f"Expected depth images to have dim = 2 or 3 or 4: got shape {depth.shape}") - if intrinsics_batch.dim() != 3: - raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3): got shape {intrinsics.shape}") - - # get image height and width - im_height, im_width = depth_batch.shape[1:] - # create image points in homogeneous coordinates (3, H x W) - indices_u = torch.arange(im_width, device=depth.device, dtype=depth.dtype) - indices_v = torch.arange(im_height, device=depth.device, dtype=depth.dtype) - img_indices = torch.stack(torch.meshgrid([indices_u, indices_v], indexing="ij"), dim=0).reshape(2, -1) - pixels = torch.nn.functional.pad(img_indices, (0, 0, 0, 1), mode="constant", value=1.0) - pixels = pixels.unsqueeze(0) # (3, H x W) -> (1, 3, H x W) - - # unproject points into 3D space - points = torch.matmul(torch.inverse(intrinsics_batch), pixels) # (N, 3, H x W) - points = points / points[:, -1, :].unsqueeze(1) # normalize by last coordinate - # flatten depth image (N, H, W) -> (N, H x W) - depth_batch = depth_batch.transpose_(1, 2).reshape(depth_batch.shape[0], -1).unsqueeze(2) - depth_batch = depth_batch.expand(-1, -1, 3) - # scale points by depth - points_xyz = points.transpose_(1, 2) * depth_batch # (N, H x W, 3) - - # return points in same shape as input - if not is_batched: - points_xyz = points_xyz.squeeze(0) - - return points_xyz - - -@torch.jit.script -def convert_perspective_depth_to_orthogonal_depth( - perspective_depth: torch.Tensor, intrinsics: torch.Tensor -) -> torch.Tensor: - r"""Provided depth image(s) where depth is provided as the distance to the principal - point of the camera (perspective depth), this function converts it so that depth - is provided as the distance to the camera's image plane (orthogonal depth). - - This is helpful because `unproject_depth` assumes that depth is expressed in - the orthogonal depth format. - - If `perspective_depth` is a batch of depth images and `intrinsics` is a single intrinsic matrix, - the same calibration matrix is applied to all depth images in the batch. + Perspective depth images contain distances measured from the camera's optical center. + Meanwhile, orthogonal depth images provide the distance from the camera's image plane. + This method uses the camera geometry to convert perspective depth to orthogonal depth image. The function assumes that the width and height are both greater than 1. Args: - perspective_depth: The depth measurement obtained with the distance_to_camera replicator. - Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). - intrinsics: A tensor providing camera's calibration matrix. Shape is (3, 3) or (N, 3, 3). + depth: The perspective depth images. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). + intrinsics: The camera's calibration matrix. If a single matrix is provided, the same + calibration matrix is used across all the depth images in the batch. + Shape is (3, 3) or (N, 3, 3). Returns: - The depth image as if obtained by the distance_to_image_plane replicator. Shape - matches the input shape of depth + The orthogonal depth images. Shape matches the input shape of depth images. Raises: ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3). """ - # Clone inputs to avoid in-place modifications - perspective_depth_batch = perspective_depth.clone() + perspective_depth_batch = depth.clone() intrinsics_batch = intrinsics.clone() # Check if inputs are batched @@ -1123,7 +1038,7 @@ def convert_perspective_depth_to_orthogonal_depth( # Validate input shapes if perspective_depth_batch.dim() != 3: - raise ValueError(f"Expected perspective_depth to have 2, 3, or 4 dimensions; got {perspective_depth.shape}.") + raise ValueError(f"Expected depth images to have 2, 3, or 4 dimensions; got {depth.shape}.") if intrinsics_batch.dim() != 3: raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3); got {intrinsics.shape}.") @@ -1137,8 +1052,8 @@ def convert_perspective_depth_to_orthogonal_depth( cy = intrinsics_batch[:, 1, 2].view(-1, 1, 1) # Create meshgrid of pixel coordinates - u_grid = torch.arange(im_width, device=perspective_depth.device, dtype=perspective_depth.dtype) - v_grid = torch.arange(im_height, device=perspective_depth.device, dtype=perspective_depth.dtype) + u_grid = torch.arange(im_width, device=depth.device, dtype=depth.dtype) + v_grid = torch.arange(im_height, device=depth.device, dtype=depth.dtype) u_grid, v_grid = torch.meshgrid(u_grid, v_grid, indexing="xy") # Expand the grids for batch processing @@ -1150,17 +1065,104 @@ def convert_perspective_depth_to_orthogonal_depth( y_term = ((v_grid - cy) / fy) ** 2 # Calculate the orthogonal (normal) depth - normal_depth = perspective_depth_batch / torch.sqrt(1 + x_term + y_term) + orthogonal_depth = perspective_depth_batch / torch.sqrt(1 + x_term + y_term) # Restore the last dimension if it was present in the input if add_last_dim: - normal_depth = normal_depth.unsqueeze(-1) + orthogonal_depth = orthogonal_depth.unsqueeze(-1) # Return to original shape if input was not batched if not is_batched: - normal_depth = normal_depth.squeeze(0) + orthogonal_depth = orthogonal_depth.squeeze(0) - return normal_depth + return orthogonal_depth + + +@torch.jit.script +def unproject_depth(depth: torch.Tensor, intrinsics: torch.Tensor, is_ortho: bool = True) -> torch.Tensor: + r"""Un-project depth image into a pointcloud. + + This function converts orthogonal or perspective depth images into points given the calibration matrix + of the camera. It uses the following transformation based on camera geometry: + + .. math:: + p_{3D} = K^{-1} \times [u, v, 1]^T \times d + + where :math:`p_{3D}` is the 3D point, :math:`d` is the depth value (measured from the image plane), + :math:`u` and :math:`v` are the pixel coordinates and :math:`K` is the intrinsic matrix. + + The function assumes that the width and height are both greater than 1. This makes the function + deal with many possible shapes of depth images and intrinsics matrices. + + .. note:: + If :attr:`is_ortho` is False, the input depth images are transformed to orthogonal depth images + by using the :meth:`orthogonalize_perspective_depth` method. + + Args: + depth: The depth measurement. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). + intrinsics: The camera's calibration matrix. If a single matrix is provided, the same + calibration matrix is used across all the depth images in the batch. + Shape is (3, 3) or (N, 3, 3). + is_ortho: Whether the input depth image is orthogonal or perspective depth image. If True, the input + depth image is considered as the *orthogonal* type, where the measurements are from the camera's + image plane. If False, the depth image is considered as the *perspective* type, where the + measurements are from the camera's optical center. Defaults to True. + + Returns: + The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). + + Raises: + ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). + ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3). + """ + # clone inputs to avoid in-place modifications + intrinsics_batch = intrinsics.clone() + # convert depth image to orthogonal if needed + if not is_ortho: + depth_batch = orthogonalize_perspective_depth(depth, intrinsics) + else: + depth_batch = depth.clone() + + # check if inputs are batched + is_batched = depth_batch.dim() == 4 or (depth_batch.dim() == 3 and depth_batch.shape[-1] != 1) + # make sure inputs are batched + if depth_batch.dim() == 3 and depth_batch.shape[-1] == 1: + depth_batch = depth_batch.squeeze(dim=2) # (H, W, 1) -> (H, W) + if depth_batch.dim() == 2: + depth_batch = depth_batch[None] # (H, W) -> (1, H, W) + if depth_batch.dim() == 4 and depth_batch.shape[-1] == 1: + depth_batch = depth_batch.squeeze(dim=3) # (N, H, W, 1) -> (N, H, W) + if intrinsics_batch.dim() == 2: + intrinsics_batch = intrinsics_batch[None] # (3, 3) -> (1, 3, 3) + # check shape of inputs + if depth_batch.dim() != 3: + raise ValueError(f"Expected depth images to have dim = 2 or 3 or 4: got shape {depth.shape}") + if intrinsics_batch.dim() != 3: + raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3): got shape {intrinsics.shape}") + + # get image height and width + im_height, im_width = depth_batch.shape[1:] + # create image points in homogeneous coordinates (3, H x W) + indices_u = torch.arange(im_width, device=depth.device, dtype=depth.dtype) + indices_v = torch.arange(im_height, device=depth.device, dtype=depth.dtype) + img_indices = torch.stack(torch.meshgrid([indices_u, indices_v], indexing="ij"), dim=0).reshape(2, -1) + pixels = torch.nn.functional.pad(img_indices, (0, 0, 0, 1), mode="constant", value=1.0) + pixels = pixels.unsqueeze(0) # (3, H x W) -> (1, 3, H x W) + + # unproject points into 3D space + points = torch.matmul(torch.inverse(intrinsics_batch), pixels) # (N, 3, H x W) + points = points / points[:, -1, :].unsqueeze(1) # normalize by last coordinate + # flatten depth image (N, H, W) -> (N, H x W) + depth_batch = depth_batch.transpose_(1, 2).reshape(depth_batch.shape[0], -1).unsqueeze(2) + depth_batch = depth_batch.expand(-1, -1, 3) + # scale points by depth + points_xyz = points.transpose_(1, 2) * depth_batch # (N, H x W, 3) + + # return points in same shape as input + if not is_batched: + points_xyz = points_xyz.squeeze(0) + + return points_xyz @torch.jit.script @@ -1191,8 +1193,10 @@ def project_points(points: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tens Returns: Projected 3D coordinates of points. Shape is (P, 3) or (N, P, 3). """ + # clone the inputs to avoid in-place operations modifying the original data points_batch = points.clone() intrinsics_batch = intrinsics.clone() + # check if inputs are batched is_batched = points_batch.dim() == 2 # make sure inputs are batched @@ -1205,12 +1209,14 @@ def project_points(points: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tens raise ValueError(f"Expected points to have dim = 3: got shape {points.shape}.") if intrinsics_batch.dim() != 3: raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3): got shape {intrinsics.shape}.") + # project points into 2D image plane points_2d = torch.matmul(intrinsics_batch, points_batch.transpose(1, 2)) points_2d = points_2d / points_2d[:, -1, :].unsqueeze(1) # normalize by last coordinate points_2d = points_2d.transpose_(1, 2) # (N, 3, P) -> (N, P, 3) # replace last coordinate with depth points_2d[:, :, -1] = points_batch[:, :, -1] + # return points in same shape as input if not is_batched: points_2d = points_2d.squeeze(0) # (1, 3, P) -> (3, P) diff --git a/source/extensions/omni.isaac.lab/test/sensors/test_frame_transformer.py b/source/extensions/omni.isaac.lab/test/sensors/test_frame_transformer.py index f379bc86b2..aeab3d9e72 100644 --- a/source/extensions/omni.isaac.lab/test/sensors/test_frame_transformer.py +++ b/source/extensions/omni.isaac.lab/test/sensors/test_frame_transformer.py @@ -3,10 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -""" -This script checks the FrameTransformer sensor by visualizing the frames that it creates. -""" - """Launch Isaac Sim Simulator first.""" from omni.isaac.lab.app import AppLauncher, run_tests @@ -26,6 +22,7 @@ import omni.isaac.lab.sim as sim_utils import omni.isaac.lab.utils.math as math_utils +from omni.isaac.lab.assets import RigidObjectCfg from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg from omni.isaac.lab.sensors import FrameTransformerCfg, OffsetCfg from omni.isaac.lab.terrains import TerrainImporterCfg @@ -62,6 +59,19 @@ class MySceneCfg(InteractiveSceneCfg): # sensors - frame transformer (filled inside unit test) frame_transformer: FrameTransformerCfg = None + # block + cube: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(2.0, 0.0, 5)), + ) + class TestFrameTransformer(unittest.TestCase): """Test for frame transformer sensor.""" @@ -71,7 +81,7 @@ def setUp(self): # Create a new stage stage_utils.create_new_stage() # Load kit helper - self.sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.005)) + self.sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.005, device="cpu")) # Set main camera self.sim.set_camera_view(eye=[5, 5, 5], target=[0.0, 0.0, 0.0]) @@ -90,8 +100,7 @@ def tearDown(self): def test_frame_transformer_feet_wrt_base(self): """Test feet transformations w.r.t. base source frame. - In this test, the source frame is the robot base. This frame is at index 0, when - the frame bodies are sorted in the order of the regex matching in the frame transformer. + In this test, the source frame is the robot base. """ # Spawn things into stage scene_cfg = MySceneCfg(num_envs=32, env_spacing=5.0, lazy_sensor_update=False) @@ -141,9 +150,15 @@ def test_frame_transformer_feet_wrt_base(self): feet_indices, feet_names = scene.articulations["robot"].find_bodies( ["LF_FOOT", "RF_FOOT", "LH_FOOT", "RH_FOOT"] ) - # Check names are parsed the same order - user_feet_names = [f"{name}_USER" for name in feet_names] - self.assertListEqual(scene.sensors["frame_transformer"].data.target_frame_names, user_feet_names) + + target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names + + # Reorder the feet indices to match the order of the target frames with _USER suffix removed + target_frame_names = [name.split("_USER")[0] for name in target_frame_names] + + # Find the indices of the feet in the order of the target frames + reordering_indices = [feet_names.index(name) for name in target_frame_names] + feet_indices = [feet_indices[i] for i in reordering_indices] # default joint targets default_actions = scene.articulations["robot"].data.default_joint_pos.clone() @@ -185,11 +200,12 @@ def test_frame_transformer_feet_wrt_base(self): source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w + # check if they are same - torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) + torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf) + torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf) + torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf) # check if relative transforms are same feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source @@ -200,8 +216,8 @@ def test_frame_transformer_feet_wrt_base(self): root_pose_w[:, :3], root_pose_w[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] ) # check if they are same - torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b) + torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b) def test_frame_transformer_feet_wrt_thigh(self): """Test feet transformation w.r.t. thigh source frame. @@ -285,10 +301,10 @@ def test_frame_transformer_feet_wrt_thigh(self): feet_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w feet_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w # check if they are same - torch.testing.assert_close(source_pose_w_gt[:, :3], source_pos_w_tf, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(source_pose_w_gt[:, 3:], source_quat_w_tf, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(source_pose_w_gt[:, :3], source_pos_w_tf) + torch.testing.assert_close(source_pose_w_gt[:, 3:], source_quat_w_tf) + torch.testing.assert_close(feet_pos_w_gt, feet_pos_w_tf) + torch.testing.assert_close(feet_quat_w_gt, feet_quat_w_tf) # check if relative transforms are same feet_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source @@ -299,8 +315,269 @@ def test_frame_transformer_feet_wrt_thigh(self): source_pose_w_gt[:, :3], source_pose_w_gt[:, 3:], feet_pos_w_tf[:, index], feet_quat_w_tf[:, index] ) # check if they are same - torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b, rtol=1e-3, atol=1e-3) - torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b, rtol=1e-3, atol=1e-3) + torch.testing.assert_close(feet_pos_source_tf[:, index], foot_pos_b) + torch.testing.assert_close(feet_quat_source_tf[:, index], foot_quat_b) + + def test_frame_transformer_robot_body_to_external_cube(self): + """Test transformation from robot body to a cube in the scene. + + In this test, the source frame is the robot base. + + The target_frame is a cube in the scene, external to the robot. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="CUBE_USER", + prim_path="{ENV_REGEX_NS}/cube", + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + self.sim.reset() + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + # Define simulation stepping + sim_dt = self.sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos + joint_vel = scene.articulations["robot"].data.default_joint_vel + # -- set root state + # -- robot + scene.articulations["robot"].write_root_state_to_sim(root_state) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + self.sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + root_pose_w = scene.articulations["robot"].data.root_state_w[:, :7] + cube_pos_w_gt = scene.rigid_objects["cube"].data.root_state_w[:, :3] + cube_quat_w_gt = scene.rigid_objects["cube"].data.root_state_w[:, 3:7] + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w + cube_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.squeeze() + cube_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.squeeze() + + # check if they are same + torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) + torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf) + torch.testing.assert_close(cube_pos_w_gt, cube_pos_w_tf) + torch.testing.assert_close(cube_quat_w_gt, cube_quat_w_tf) + + # check if relative transforms are same + cube_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source + cube_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + # ground-truth + cube_pos_b, cube_quat_b = math_utils.subtract_frame_transforms( + root_pose_w[:, :3], root_pose_w[:, 3:], cube_pos_w_tf, cube_quat_w_tf + ) + # check if they are same + torch.testing.assert_close(cube_pos_source_tf[:, 0], cube_pos_b) + torch.testing.assert_close(cube_quat_source_tf[:, 0], cube_quat_b) + + def test_frame_transformer_offset_frames(self): + """Test body transformation w.r.t. base source frame. + + In this test, the source frame is the cube frame. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/cube", + target_frames=[ + FrameTransformerCfg.FrameCfg( + name="CUBE_CENTER", + prim_path="{ENV_REGEX_NS}/cube", + ), + FrameTransformerCfg.FrameCfg( + name="CUBE_TOP", + prim_path="{ENV_REGEX_NS}/cube", + offset=OffsetCfg( + pos=(0.0, 0.0, 0.1), + rot=(1.0, 0.0, 0.0, 0.0), + ), + ), + FrameTransformerCfg.FrameCfg( + name="CUBE_BOTTOM", + prim_path="{ENV_REGEX_NS}/cube", + offset=OffsetCfg( + pos=(0.0, 0.0, -0.1), + rot=(1.0, 0.0, 0.0, 0.0), + ), + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + self.sim.reset() + + # Define simulation stepping + sim_dt = self.sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene["cube"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + # -- set root state + # -- cube + scene["cube"].write_root_state_to_sim(root_state) + # reset buffers + scene.reset() + + # write data to sim + scene.write_data_to_sim() + # perform step + self.sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + cube_pos_w_gt = scene["cube"].data.root_state_w[:, :3] + cube_quat_w_gt = scene["cube"].data.root_state_w[:, 3:7] + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w + target_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w.squeeze() + target_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w.squeeze() + target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names + + cube_center_idx = target_frame_names.index("CUBE_CENTER") + cube_bottom_idx = target_frame_names.index("CUBE_BOTTOM") + cube_top_idx = target_frame_names.index("CUBE_TOP") + + # check if they are same + torch.testing.assert_close(cube_pos_w_gt, source_pos_w_tf) + torch.testing.assert_close(cube_quat_w_gt, source_quat_w_tf) + torch.testing.assert_close(cube_pos_w_gt, target_pos_w_tf[:, cube_center_idx]) + torch.testing.assert_close(cube_quat_w_gt, target_quat_w_tf[:, cube_center_idx]) + + # test offsets are applied correctly + # -- cube top + cube_pos_top = target_pos_w_tf[:, cube_top_idx] + cube_quat_top = target_quat_w_tf[:, cube_top_idx] + torch.testing.assert_close(cube_pos_top, cube_pos_w_gt + torch.tensor([0.0, 0.0, 0.1])) + torch.testing.assert_close(cube_quat_top, cube_quat_w_gt) + + # -- cube bottom + cube_pos_bottom = target_pos_w_tf[:, cube_bottom_idx] + cube_quat_bottom = target_quat_w_tf[:, cube_bottom_idx] + torch.testing.assert_close(cube_pos_bottom, cube_pos_w_gt + torch.tensor([0.0, 0.0, -0.1])) + torch.testing.assert_close(cube_quat_bottom, cube_quat_w_gt) + + def test_frame_transformer_all_bodies(self): + """Test transformation of all bodies w.r.t. base source frame. + + In this test, the source frame is the robot base. + + The target_frames are all bodies in the robot, implemented using .* pattern. + """ + # Spawn things into stage + scene_cfg = MySceneCfg(num_envs=2, env_spacing=5.0, lazy_sensor_update=False) + scene_cfg.frame_transformer = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*", + ), + ], + ) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + self.sim.reset() + + target_frame_names = scene.sensors["frame_transformer"].data.target_frame_names + articulation_body_names = scene.articulations["robot"].data.body_names + + reordering_indices = [target_frame_names.index(name) for name in articulation_body_names] + + # default joint targets + default_actions = scene.articulations["robot"].data.default_joint_pos.clone() + # Define simulation stepping + sim_dt = self.sim.get_physics_dt() + # Simulate physics + for count in range(100): + # # reset + if count % 25 == 0: + # reset root state + root_state = scene.articulations["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + joint_pos = scene.articulations["robot"].data.default_joint_pos + joint_vel = scene.articulations["robot"].data.default_joint_vel + # -- set root state + # -- robot + scene.articulations["robot"].write_root_state_to_sim(root_state) + scene.articulations["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # reset buffers + scene.reset() + + # set joint targets + robot_actions = default_actions + 0.5 * torch.randn_like(default_actions) + scene.articulations["robot"].set_joint_position_target(robot_actions) + # write data to sim + scene.write_data_to_sim() + # perform step + self.sim.step() + # read data from sim + scene.update(sim_dt) + + # check absolute frame transforms in world frame + # -- ground-truth + root_pose_w = scene.articulations["robot"].data.root_state_w[:, :7] + bodies_pos_w_gt = scene.articulations["robot"].data.body_pos_w + bodies_quat_w_gt = scene.articulations["robot"].data.body_quat_w + + # -- frame transformer + source_pos_w_tf = scene.sensors["frame_transformer"].data.source_pos_w + source_quat_w_tf = scene.sensors["frame_transformer"].data.source_quat_w + bodies_pos_w_tf = scene.sensors["frame_transformer"].data.target_pos_w + bodies_quat_w_tf = scene.sensors["frame_transformer"].data.target_quat_w + + # check if they are same + torch.testing.assert_close(root_pose_w[:, :3], source_pos_w_tf) + torch.testing.assert_close(root_pose_w[:, 3:], source_quat_w_tf) + torch.testing.assert_close(bodies_pos_w_gt, bodies_pos_w_tf[:, reordering_indices]) + torch.testing.assert_close(bodies_quat_w_gt, bodies_quat_w_tf[:, reordering_indices]) + + bodies_pos_source_tf = scene.sensors["frame_transformer"].data.target_pos_source + bodies_quat_source_tf = scene.sensors["frame_transformer"].data.target_quat_source + + # Go through each body and check if relative transforms are same + for index in range(len(articulation_body_names)): + body_pos_b, body_quat_b = math_utils.subtract_frame_transforms( + root_pose_w[:, :3], root_pose_w[:, 3:], bodies_pos_w_tf[:, index], bodies_quat_w_tf[:, index] + ) + + torch.testing.assert_close(bodies_pos_source_tf[:, index], body_pos_b) + torch.testing.assert_close(bodies_quat_source_tf[:, index], body_quat_b) if __name__ == "__main__": diff --git a/source/extensions/omni.isaac.lab/test/sim/test_spawn_wrappers.py b/source/extensions/omni.isaac.lab/test/sim/test_spawn_wrappers.py new file mode 100644 index 0000000000..1260facf58 --- /dev/null +++ b/source/extensions/omni.isaac.lab/test/sim/test_spawn_wrappers.py @@ -0,0 +1,191 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +from omni.isaac.lab.app import AppLauncher, run_tests + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Rest everything follows.""" + +import unittest + +import omni.isaac.core.utils.prims as prim_utils +import omni.isaac.core.utils.stage as stage_utils +from omni.isaac.core.simulation_context import SimulationContext + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR + + +class TestSpawningWrappers(unittest.TestCase): + """Test fixture for checking spawning of multiple assets wrappers.""" + + def setUp(self) -> None: + """Create a blank new stage for each test.""" + # Create a new stage + stage_utils.create_new_stage() + # Simulation time-step + self.dt = 0.1 + # Load kit helper + self.sim = SimulationContext(physics_dt=self.dt, rendering_dt=self.dt, backend="numpy") + # Wait for spawning + stage_utils.update_stage() + + def tearDown(self) -> None: + """Stops simulator after each test.""" + # stop simulation + self.sim.stop() + self.sim.clear() + self.sim.clear_all_callbacks() + self.sim.clear_instance() + + """ + Tests - Multiple assets. + """ + + def test_spawn_multiple_shapes_with_global_settings(self): + """Test spawning of shapes randomly with global rigid body settings.""" + # Define prim parents + num_clones = 10 + for i in range(num_clones): + prim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + + # Spawn shapes + cfg = sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + sim_utils.ConeCfg( + radius=0.3, + height=0.6, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), # this one should get overridden + ), + sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0), metallic=0.2), + ), + sim_utils.SphereCfg( + radius=0.3, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0), metallic=0.2), + ), + ], + random_choice=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ) + prim = cfg.func("/World/env_.*/Cone", cfg) + + # Check validity + self.assertTrue(prim.IsValid()) + self.assertEqual(prim_utils.get_prim_path(prim), "/World/env_0/Cone") + # Find matching prims + prim_paths = prim_utils.find_matching_prim_paths("/World/env_*/Cone") + self.assertEqual(len(prim_paths), num_clones) + + # Check all prims have correct settings + for prim_path in prim_paths: + prim = prim_utils.get_prim_at_path(prim_path) + self.assertEqual(prim.GetAttribute("physics:mass").Get(), cfg.mass_props.mass) + + def test_spawn_multiple_shapes_with_individual_settings(self): + """Test spawning of shapes randomly with individual rigid object settings""" + # Define prim parents + num_clones = 10 + for i in range(num_clones): + prim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + + # Make a list of masses + mass_variations = [2.0, 3.0, 4.0] + # Spawn shapes + cfg = sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + sim_utils.ConeCfg( + radius=0.3, + height=0.6, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=mass_variations[0]), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0), metallic=0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=mass_variations[1]), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + sim_utils.SphereCfg( + radius=0.3, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0), metallic=0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=mass_variations[2]), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + ], + random_choice=True, + ) + prim = cfg.func("/World/env_.*/Cone", cfg) + + # Check validity + self.assertTrue(prim.IsValid()) + self.assertEqual(prim_utils.get_prim_path(prim), "/World/env_0/Cone") + # Find matching prims + prim_paths = prim_utils.find_matching_prim_paths("/World/env_*/Cone") + self.assertEqual(len(prim_paths), num_clones) + + # Check all prims have correct settings + for prim_path in prim_paths: + prim = prim_utils.get_prim_at_path(prim_path) + self.assertTrue(prim.GetAttribute("physics:mass").Get() in mass_variations) + + """ + Tests - Multiple USDs. + """ + + def test_spawn_multiple_files_with_global_settings(self): + """Test spawning of files randomly with global articulation settings.""" + # Define prim parents + num_clones = 10 + for i in range(num_clones): + prim_utils.create_prim(f"/World/env_{i}", "Xform", translation=(i, i, 0)) + + # Spawn shapes + cfg = sim_utils.MultiUsdFileCfg( + usd_path=[ + f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd", + f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd", + ], + random_choice=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + retain_accelerations=False, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + activate_contact_sensors=True, + ) + prim = cfg.func("/World/env_.*/Robot", cfg) + + # Check validity + self.assertTrue(prim.IsValid()) + self.assertEqual(prim_utils.get_prim_path(prim), "/World/env_0/Robot") + # Find matching prims + prim_paths = prim_utils.find_matching_prim_paths("/World/env_*/Robot") + self.assertEqual(len(prim_paths), num_clones) + + +if __name__ == "__main__": + run_tests() diff --git a/source/extensions/omni.isaac.lab/test/utils/test_math.py b/source/extensions/omni.isaac.lab/test/utils/test_math.py index 06f1f3f8dc..d6d6086290 100644 --- a/source/extensions/omni.isaac.lab/test/utils/test_math.py +++ b/source/extensions/omni.isaac.lab/test/utils/test_math.py @@ -376,23 +376,27 @@ def iter_old_quat_rotate_inverse(q: torch.Tensor, v: torch.Tensor) -> torch.Tens iter_old_quat_rotate_inverse(q_rand, v_rand), ) - def test_depth_perspective_conversion(self): - # Create a sample perspective depth image (N, H, W) - perspective_depth = torch.tensor([[[10.0, 0.0, 100.0], [0.0, 3000.0, 0.0], [100.0, 0.0, 100.0]]]) + def test_orthogonalize_perspective_depth(self): + """Test for converting perspective depth to orthogonal depth.""" + for device in ["cpu", "cuda:0"]: + # Create a sample perspective depth image (N, H, W) + perspective_depth = torch.tensor( + [[[10.0, 0.0, 100.0], [0.0, 3000.0, 0.0], [100.0, 0.0, 100.0]]], device=device + ) - # Create sample intrinsic matrix (3, 3) - intrinsics = torch.tensor([[500.0, 0.0, 5.0], [0.0, 500.0, 5.0], [0.0, 0.0, 1.0]]) + # Create sample intrinsic matrix (3, 3) + intrinsics = torch.tensor([[500.0, 0.0, 5.0], [0.0, 500.0, 5.0], [0.0, 0.0, 1.0]], device=device) - # Convert perspective depth to orthogonal depth - orthogonal_depth = math_utils.convert_perspective_depth_to_orthogonal_depth(perspective_depth, intrinsics) + # Convert perspective depth to orthogonal depth + orthogonal_depth = math_utils.orthogonalize_perspective_depth(perspective_depth, intrinsics) - # Manually compute expected orthogonal depth based on the formula for comparison - expected_orthogonal_depth = torch.tensor( - [[[9.9990, 0.0000, 99.9932], [0.0000, 2999.8079, 0.0000], [99.9932, 0.0000, 99.9964]]] - ) + # Manually compute expected orthogonal depth based on the formula for comparison + expected_orthogonal_depth = torch.tensor( + [[[9.9990, 0.0000, 99.9932], [0.0000, 2999.8079, 0.0000], [99.9932, 0.0000, 99.9964]]], device=device + ) - # Assert that the output is close to the expected result - torch.testing.assert_close(orthogonal_depth, expected_orthogonal_depth) + # Assert that the output is close to the expected result + torch.testing.assert_close(orthogonal_depth, expected_orthogonal_depth) if __name__ == "__main__": diff --git a/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_vision_env.py b/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_vision_env.py index fd274a187d..b025bfb052 100644 --- a/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_vision_env.py +++ b/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/direct/shadow_hand/shadow_hand_vision_env.py @@ -9,7 +9,12 @@ import torch import omni.usd -from pxr import Semantics + +# from Isaac Sim 4.2 onwards, pxr.Semantics is deprecated +try: + import Semantics +except ModuleNotFoundError: + from pxr import Semantics import omni.isaac.lab.sim as sim_utils from omni.isaac.lab.assets import Articulation, RigidObject diff --git a/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py b/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py index 889e844843..ce5a6c90b8 100644 --- a/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py +++ b/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_camera_env_cfg.py @@ -4,14 +4,15 @@ # SPDX-License-Identifier: BSD-3-Clause import omni.isaac.lab.sim as sim_utils -from omni.isaac.lab.envs.mdp.observations import grab_images from omni.isaac.lab.managers import ObservationGroupCfg as ObsGroup from omni.isaac.lab.managers import ObservationTermCfg as ObsTerm from omni.isaac.lab.managers import SceneEntityCfg from omni.isaac.lab.sensors import TiledCameraCfg from omni.isaac.lab.utils import configclass -from omni.isaac.lab_tasks.manager_based.classic.cartpole.cartpole_env_cfg import CartpoleEnvCfg, CartpoleSceneCfg +import omni.isaac.lab_tasks.manager_based.classic.cartpole.mdp as mdp + +from .cartpole_env_cfg import CartpoleEnvCfg, CartpoleSceneCfg ## # Scene definition @@ -20,6 +21,8 @@ @configclass class CartpoleRGBCameraSceneCfg(CartpoleSceneCfg): + + # add camera to the scene tiled_camera: TiledCameraCfg = TiledCameraCfg( prim_path="{ENV_REGEX_NS}/Camera", offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"), @@ -34,6 +37,8 @@ class CartpoleRGBCameraSceneCfg(CartpoleSceneCfg): @configclass class CartpoleDepthCameraSceneCfg(CartpoleSceneCfg): + + # add camera to the scene tiled_camera: TiledCameraCfg = TiledCameraCfg( prim_path="{ENV_REGEX_NS}/Camera", offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"), @@ -46,17 +51,22 @@ class CartpoleDepthCameraSceneCfg(CartpoleSceneCfg): ) +## +# MDP settings +## + + @configclass class RGBObservationsCfg: """Observation specifications for the MDP.""" @configclass class RGBCameraPolicyCfg(ObsGroup): - """Observations for policy group.""" + """Observations for policy group with RGB images.""" - image = ObsTerm(func=grab_images, params={"sensor_cfg": SceneEntityCfg("tiled_camera"), "data_type": "rgb"}) + image = ObsTerm(func=mdp.image, params={"sensor_cfg": SceneEntityCfg("tiled_camera"), "data_type": "rgb"}) - def __post_init__(self) -> None: + def __post_init__(self): self.enable_corruption = False self.concatenate_terms = True @@ -65,22 +75,35 @@ def __post_init__(self) -> None: @configclass class DepthObservationsCfg: + """Observation specifications for the MDP.""" + @configclass class DepthCameraPolicyCfg(RGBObservationsCfg.RGBCameraPolicyCfg): + """Observations for policy group with depth images.""" + image = ObsTerm( - func=grab_images, params={"sensor_cfg": SceneEntityCfg("tiled_camera"), "data_type": "distance_to_camera"} + func=mdp.image, params={"sensor_cfg": SceneEntityCfg("tiled_camera"), "data_type": "distance_to_camera"} ) policy: ObsGroup = DepthCameraPolicyCfg() +## +# Environment configuration +## + + @configclass class CartpoleRGBCameraEnvCfg(CartpoleEnvCfg): + """Configuration for the cartpole environment with RGB camera.""" + scene: CartpoleSceneCfg = CartpoleRGBCameraSceneCfg(num_envs=1024, env_spacing=20) - observations = RGBObservationsCfg() + observations: RGBObservationsCfg = RGBObservationsCfg() @configclass class CartpoleDepthCameraEnvCfg(CartpoleEnvCfg): + """Configuration for the cartpole environment with depth camera.""" + scene: CartpoleSceneCfg = CartpoleDepthCameraSceneCfg(num_envs=1024, env_spacing=20) - observations = DepthObservationsCfg() + observations: DepthObservationsCfg = DepthObservationsCfg() diff --git a/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py b/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py index e372334602..8c92d3d5ae 100644 --- a/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py +++ b/source/extensions/omni.isaac.lab_tasks/omni/isaac/lab_tasks/manager_based/classic/cartpole/cartpole_env_cfg.py @@ -176,7 +176,7 @@ class CurriculumCfg: @configclass class CartpoleEnvCfg(ManagerBasedRLEnvCfg): - """Configuration for the locomotion velocity-tracking environment.""" + """Configuration for the cartpole environment.""" # Scene settings scene: CartpoleSceneCfg = CartpoleSceneCfg(num_envs=4096, env_spacing=4.0) diff --git a/source/standalone/tutorials/04_sensors/benchmark_cameras.py b/source/standalone/benchmarks/benchmark_cameras.py similarity index 97% rename from source/standalone/tutorials/04_sensors/benchmark_cameras.py rename to source/standalone/benchmarks/benchmark_cameras.py index e79ff87810..2ba16c357a 100644 --- a/source/standalone/tutorials/04_sensors/benchmark_cameras.py +++ b/source/standalone/benchmarks/benchmark_cameras.py @@ -5,18 +5,19 @@ """ This script might help you determine how many cameras your system can realistically run -at different desired settings. You can supply different task environments -to inject cameras into, or just test a sample scene. Additionally, -you can automatically find the maximum amount of cameras you can run a task with through the -autotune functionality. +at different desired settings. + +You can supply different task environments to inject cameras into, or just test a sample scene. +Additionally, you can automatically find the maximum amount of cameras you can run a task with +through the auto-tune functionality. .. code-block:: bash # Usage with GUI - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/benchmark_cameras.py -h + ./isaaclab.sh -p source/standalone/benchmarks/benchmark_cameras.py -h # Usage with headless - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/benchmark_cameras.py -h --headless + ./isaaclab.sh -p source/standalone/benchmarks/benchmark_cameras.py -h --headless """ @@ -260,7 +261,7 @@ TiledCameraCfg, patterns, ) -from omni.isaac.lab.utils.math import convert_perspective_depth_to_orthogonal_depth, unproject_depth +from omni.isaac.lab.utils.math import orthogonalize_perspective_depth, unproject_depth from omni.isaac.lab_tasks.utils import load_cfg_from_registry @@ -676,9 +677,8 @@ def run_simulator( depth = camera.data.output[data_type] depth_images[data_label + "_raw"] = depth if perspective_depth_predicate(data_type) and convert_depth_to_camera_to_image_plane: - depth = convert_perspective_depth_to_orthogonal_depth( - perspective_depth=camera.data.output[data_type], - intrinsics=camera.data.intrinsic_matrices, + depth = orthogonalize_perspective_depth( + camera.data.output[data_type], camera.data.intrinsic_matrices ) depth_images[data_label + "_undistorted"] = depth @@ -753,7 +753,7 @@ def main(): print("[INFO]: Designing the scene") if args_cli.task is None: print("[INFO]: No task environment provided, creating random scene.") - sim_cfg = sim_utils.SimulationCfg(device="cpu" if args_cli.cpu else "cuda") + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) diff --git a/source/standalone/demos/arms.py b/source/standalone/demos/arms.py index 0fdfd68e5c..d50034d4d9 100644 --- a/source/standalone/demos/arms.py +++ b/source/standalone/demos/arms.py @@ -210,7 +210,7 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articula def main(): """Main function.""" # Initialize the simulation context - sim_cfg = sim_utils.SimulationCfg() + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([3.5, 0.0, 3.2], [0.0, 0.0, 0.5]) diff --git a/source/standalone/demos/bipeds.py b/source/standalone/demos/bipeds.py index da0abaedd3..961af39e19 100644 --- a/source/standalone/demos/bipeds.py +++ b/source/standalone/demos/bipeds.py @@ -47,9 +47,9 @@ def main(): """Main function.""" - # Load kit helper - sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[3.0, 0.0, 2.25], target=[0.0, 0.0, 1.0]) diff --git a/source/standalone/demos/cameras.py b/source/standalone/demos/cameras.py index 25f2953804..6c5583d55a 100644 --- a/source/standalone/demos/cameras.py +++ b/source/standalone/demos/cameras.py @@ -9,10 +9,10 @@ .. code-block:: bash # Usage - ./isaaclab.sh -p source/standalone/demos/cameras.py --disable_fabric + ./isaaclab.sh -p source/standalone/demos/cameras.py --enable_cameras # Usage in headless mode - ./isaaclab.sh -p source/standalone/demos/cameras.py --headless --enable_cameras --disable_fabric + ./isaaclab.sh -p source/standalone/demos/cameras.py --headless --enable_cameras """ @@ -274,16 +274,8 @@ def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): def main(): """Main function.""" - - # note: tile rendered cameras doesn't update the camera poses when using the GPU pipeline and Fabric. - # this is a bug which should be fixed in the future releases. - sim_cfg = sim_utils.SimulationCfg(dt=0.005) - # check if fabric is enabled - if args_cli.disable_fabric: - sim_cfg.use_fabric = False - sim_cfg.device = "cpu" - # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device, use_fabric=not args_cli.disable_fabric) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) diff --git a/source/standalone/demos/deformables.py b/source/standalone/demos/deformables.py index 74cfd8d242..91f9c7b6f1 100644 --- a/source/standalone/demos/deformables.py +++ b/source/standalone/demos/deformables.py @@ -177,7 +177,7 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Deformab def main(): """Main function.""" # Initialize the simulation context - sim_cfg = sim_utils.SimulationCfg(dt=0.01) + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([4.0, 4.0, 3.0], [0.5, 0.5, 0.0]) diff --git a/source/standalone/demos/hands.py b/source/standalone/demos/hands.py index e358c3359d..e939d63cf5 100644 --- a/source/standalone/demos/hands.py +++ b/source/standalone/demos/hands.py @@ -143,9 +143,9 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articula def main(): """Main function.""" - # Initialize the simulation context - sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01)) + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[0.0, -0.5, 1.5], target=[0.0, -0.2, 0.5]) # design scene diff --git a/source/standalone/demos/markers.py b/source/standalone/demos/markers.py index 1e85c26896..418cc6c39b 100644 --- a/source/standalone/demos/markers.py +++ b/source/standalone/demos/markers.py @@ -94,7 +94,8 @@ def define_markers() -> VisualizationMarkers: def main(): """Main function.""" # Load kit helper - sim = SimulationContext(sim_utils.SimulationCfg(dt=0.01)) + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) + sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([0.0, 18.0, 12.0], [0.0, 3.0, 0.0]) diff --git a/source/standalone/demos/multi_asset.py b/source/standalone/demos/multi_asset.py new file mode 100644 index 0000000000..6363999949 --- /dev/null +++ b/source/standalone/demos/multi_asset.py @@ -0,0 +1,244 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to spawn multiple objects in multiple environments. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p source/standalone/demos/multi_asset.py --num_envs 2048 + +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from omni.isaac.lab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Demo on spawning different objects in multiple environments.") +parser.add_argument("--num_envs", type=int, default=1024, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import random + +import omni.usd +from pxr import Gf, Sdf + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sim import SimulationContext +from omni.isaac.lab.utils import Timer, configclass +from omni.isaac.lab.utils.assets import ISAACLAB_NUCLEUS_DIR + +## +# Pre-defined Configuration +## + +from omni.isaac.lab_assets.anymal import ANYDRIVE_3_LSTM_ACTUATOR_CFG # isort: skip + + +## +# Randomization events. +## + + +def randomize_shape_color(prim_path_expr: str): + """Randomize the color of the geometry.""" + # acquire stage + stage = omni.usd.get_context().get_stage() + # resolve prim paths for spawning and cloning + prim_paths = sim_utils.find_matching_prim_paths(prim_path_expr) + # manually clone prims if the source prim path is a regex expression + with Sdf.ChangeBlock(): + for prim_path in prim_paths: + # spawn single instance + prim_spec = Sdf.CreatePrimInLayer(stage.GetRootLayer(), prim_path) + + # DO YOUR OWN OTHER KIND OF RANDOMIZATION HERE! + # Note: Just need to acquire the right attribute about the property you want to set + # Here is an example on setting color randomly + color_spec = prim_spec.GetAttributeAtPath(prim_path + "/geometry/material/Shader.inputs:diffuseColor") + color_spec.default = Gf.Vec3f(random.random(), random.random(), random.random()) + + +## +# Scene Configuration +## + + +@configclass +class MultiObjectSceneCfg(InteractiveSceneCfg): + """Configuration for a multi-object scene.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # rigid object + object: RigidObjectCfg = RigidObjectCfg( + prim_path="/World/envs/env_.*/Object", + spawn=sim_utils.MultiAssetSpawnerCfg( + assets_cfg=[ + sim_utils.ConeCfg( + radius=0.3, + height=0.6, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + sim_utils.CuboidCfg( + size=(0.3, 0.3, 0.3), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0), metallic=0.2), + ), + sim_utils.SphereCfg( + radius=0.3, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0), metallic=0.2), + ), + ], + random_choice=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 2.0)), + ) + + # articulation + robot: ArticulationCfg = ArticulationCfg( + prim_path="/World/envs/env_.*/Robot", + spawn=sim_utils.MultiUsdFileCfg( + usd_path=[ + f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-C/anymal_c.usd", + f"{ISAACLAB_NUCLEUS_DIR}/Robots/ANYbotics/ANYmal-D/anymal_d.usd", + ], + random_choice=True, + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + retain_accelerations=False, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 + ), + activate_contact_sensors=True, + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 0.6), + joint_pos={ + ".*HAA": 0.0, # all HAA + ".*F_HFE": 0.4, # both front HFE + ".*H_HFE": -0.4, # both hind HFE + ".*F_KFE": -0.8, # both front KFE + ".*H_KFE": 0.8, # both hind KFE + }, + ), + actuators={"legs": ANYDRIVE_3_LSTM_ACTUATOR_CFG}, + ) + + +## +# Simulation Loop +## + + +def run_simulator(sim: SimulationContext, scene: InteractiveScene): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + rigid_object = scene["object"] + robot = scene["robot"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # object + root_state = rigid_object.data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + rigid_object.write_root_state_to_sim(root_state) + # robot + # -- root state + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + robot.write_root_state_to_sim(root_state) + # -- joint state + joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() + robot.write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting scene state...") + + # Apply action to robot + robot.set_joint_position_target(robot.data.default_joint_pos) + # Write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Update buffers + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + + # Design scene + scene_cfg = MultiObjectSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0, replicate_physics=False) + with Timer("[INFO] Time to create scene: "): + scene = InteractiveScene(scene_cfg) + + with Timer("[INFO] Time to randomize scene: "): + # DO YOUR OWN OTHER KIND OF RANDOMIZATION HERE! + # Note: Just need to acquire the right attribute about the property you want to set + # Here is an example on setting color randomly + randomize_shape_color(scene_cfg.object.prim_path) + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main execution + main() + # close sim app + simulation_app.close() diff --git a/source/standalone/demos/procedural_terrain.py b/source/standalone/demos/procedural_terrain.py index 8793004864..e2c123137e 100644 --- a/source/standalone/demos/procedural_terrain.py +++ b/source/standalone/demos/procedural_terrain.py @@ -152,9 +152,9 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, AssetBas def main(): """Main function.""" - # Initialize the simulation context - sim = sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01)) + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[5.0, 5.0, 5.0], target=[0.0, 0.0, 0.0]) # design scene diff --git a/source/standalone/demos/quadcopter.py b/source/standalone/demos/quadcopter.py index 6d079a2f89..2110ae1219 100644 --- a/source/standalone/demos/quadcopter.py +++ b/source/standalone/demos/quadcopter.py @@ -45,9 +45,9 @@ def main(): """Main function.""" - # Load kit helper - sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[0.5, 0.5, 1.0], target=[0.0, 0.0, 0.5]) diff --git a/source/standalone/demos/quadrupeds.py b/source/standalone/demos/quadrupeds.py index 492ee07af1..3122a4741c 100644 --- a/source/standalone/demos/quadrupeds.py +++ b/source/standalone/demos/quadrupeds.py @@ -49,7 +49,7 @@ def define_origins(num_origins: int, spacing: float) -> list[list[float]]: - """Defines the origins of the the scene.""" + """Defines the origins of the scene.""" # create tensor based on number of environments env_origins = torch.zeros(num_origins, 3) # create a grid of origins diff --git a/source/standalone/tutorials/00_sim/launch_app.py b/source/standalone/tutorials/00_sim/launch_app.py index 116cc18960..d7aea99519 100644 --- a/source/standalone/tutorials/00_sim/launch_app.py +++ b/source/standalone/tutorials/00_sim/launch_app.py @@ -70,7 +70,7 @@ def main(): """Main function.""" # Initialize the simulation context - sim_cfg = sim_utils.SimulationCfg(dt=0.01) + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) diff --git a/source/standalone/tutorials/00_sim/spawn_prims.py b/source/standalone/tutorials/00_sim/spawn_prims.py index 30f1972c8f..77f9ae9b1b 100644 --- a/source/standalone/tutorials/00_sim/spawn_prims.py +++ b/source/standalone/tutorials/00_sim/spawn_prims.py @@ -92,7 +92,7 @@ def main(): """Main function.""" # Initialize the simulation context - sim_cfg = sim_utils.SimulationCfg(dt=0.01) + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) diff --git a/source/standalone/tutorials/01_assets/run_articulation.py b/source/standalone/tutorials/01_assets/run_articulation.py index 2f1079969b..d06874ac40 100644 --- a/source/standalone/tutorials/01_assets/run_articulation.py +++ b/source/standalone/tutorials/01_assets/run_articulation.py @@ -120,7 +120,7 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articula def main(): """Main function.""" # Load kit helper - sim_cfg = sim_utils.SimulationCfg(device="cpu") + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) diff --git a/source/standalone/tutorials/01_assets/run_deformable_object.py b/source/standalone/tutorials/01_assets/run_deformable_object.py index 9d20b37cf5..eccbfab06e 100644 --- a/source/standalone/tutorials/01_assets/run_deformable_object.py +++ b/source/standalone/tutorials/01_assets/run_deformable_object.py @@ -146,7 +146,7 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Deformab def main(): """Main function.""" # Load kit helper - sim_cfg = sim_utils.SimulationCfg() + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[3.0, 0.0, 1.0], target=[0.0, 0.0, 0.5]) diff --git a/source/standalone/tutorials/01_assets/run_rigid_object.py b/source/standalone/tutorials/01_assets/run_rigid_object.py index af0811a912..cede00b2c1 100644 --- a/source/standalone/tutorials/01_assets/run_rigid_object.py +++ b/source/standalone/tutorials/01_assets/run_rigid_object.py @@ -125,7 +125,7 @@ def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, RigidObj def main(): """Main function.""" # Load kit helper - sim_cfg = sim_utils.SimulationCfg() + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[1.5, 0.0, 1.0], target=[0.0, 0.0, 0.0]) diff --git a/source/standalone/tutorials/02_scene/create_scene.py b/source/standalone/tutorials/02_scene/create_scene.py index a4ed8f5f1f..36096d35d5 100644 --- a/source/standalone/tutorials/02_scene/create_scene.py +++ b/source/standalone/tutorials/02_scene/create_scene.py @@ -109,7 +109,7 @@ def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): def main(): """Main function.""" # Load kit helper - sim_cfg = sim_utils.SimulationCfg(device="cpu") + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) diff --git a/source/standalone/tutorials/04_sensors/add_sensors_on_robot.py b/source/standalone/tutorials/04_sensors/add_sensors_on_robot.py index 304e8c2eaa..8ba29c2921 100644 --- a/source/standalone/tutorials/04_sensors/add_sensors_on_robot.py +++ b/source/standalone/tutorials/04_sensors/add_sensors_on_robot.py @@ -15,7 +15,7 @@ .. code-block:: bash # Usage - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py + ./isaaclab.sh -p source/standalone/tutorials/04_sensors/add_sensors_on_robot.py --enable_cameras """ @@ -156,7 +156,7 @@ def main(): """Main function.""" # Initialize the simulation context - sim_cfg = sim_utils.SimulationCfg(dt=0.005) + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) diff --git a/source/standalone/tutorials/04_sensors/run_frame_transformer.py b/source/standalone/tutorials/04_sensors/run_frame_transformer.py index 2eb12bd327..e8c702e025 100644 --- a/source/standalone/tutorials/04_sensors/run_frame_transformer.py +++ b/source/standalone/tutorials/04_sensors/run_frame_transformer.py @@ -139,10 +139,10 @@ def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): if count % 50 == 0: # get frame names frame_names = frame_transformer.data.target_frame_names - print(f"Displaying Frame ID {frame_index}: {frame_names[frame_index]}") # increment frame index frame_index += 1 frame_index = frame_index % len(frame_names) + print(f"Displaying Frame ID {frame_index}: {frame_names[frame_index]}") # visualize frame source_pos = frame_transformer.data.source_pos_w @@ -164,7 +164,8 @@ def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): def main(): """Main function.""" # Load kit helper - sim = SimulationContext(sim_utils.SimulationCfg(dt=0.005)) + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = SimulationContext(sim_cfg) # Set main camera sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0]) # Design the scene diff --git a/source/standalone/tutorials/04_sensors/run_ray_caster.py b/source/standalone/tutorials/04_sensors/run_ray_caster.py index 7346b35fa3..921eebad53 100644 --- a/source/standalone/tutorials/04_sensors/run_ray_caster.py +++ b/source/standalone/tutorials/04_sensors/run_ray_caster.py @@ -129,7 +129,7 @@ def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): def main(): """Main function.""" # Load simulation context - sim_cfg = sim_utils.SimulationCfg() + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([0.0, 15.0, 15.0], [0.0, 0.0, -2.5]) diff --git a/source/standalone/tutorials/04_sensors/run_usd_camera.py b/source/standalone/tutorials/04_sensors/run_usd_camera.py index ca4ad1d4c1..129a76f566 100644 --- a/source/standalone/tutorials/04_sensors/run_usd_camera.py +++ b/source/standalone/tutorials/04_sensors/run_usd_camera.py @@ -12,7 +12,7 @@ .. code-block:: bash # Usage with GUI - ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py + ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --enable_cameras # Usage with headless ./isaaclab.sh -p source/standalone/tutorials/04_sensors/run_usd_camera.py --headless --enable_cameras @@ -53,7 +53,7 @@ AppLauncher.add_app_launcher_args(parser) # parse the arguments args_cli = parser.parse_args() -args_cli.enable_cameras = True + # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app diff --git a/source/standalone/tutorials/05_controllers/run_diff_ik.py b/source/standalone/tutorials/05_controllers/run_diff_ik.py index 1c621a9abc..51c2102070 100644 --- a/source/standalone/tutorials/05_controllers/run_diff_ik.py +++ b/source/standalone/tutorials/05_controllers/run_diff_ik.py @@ -190,7 +190,7 @@ def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): def main(): """Main function.""" # Load kit helper - sim_cfg = sim_utils.SimulationCfg(dt=0.01) + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) sim = sim_utils.SimulationContext(sim_cfg) # Set main camera sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) diff --git a/source/standalone/workflows/rl_games/train.py b/source/standalone/workflows/rl_games/train.py index 79241df823..a925be8575 100644 --- a/source/standalone/workflows/rl_games/train.py +++ b/source/standalone/workflows/rl_games/train.py @@ -47,6 +47,7 @@ import gymnasium as gym import math import os +import random from datetime import datetime from rl_games.common import env_configurations, vecenv @@ -76,6 +77,10 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + # randomly sample a seed if seed = -1 + if args_cli.seed == -1: + args_cli.seed = random.randint(0, 10000) + agent_cfg["params"]["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["params"]["seed"] agent_cfg["params"]["config"]["max_epochs"] = ( args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg["params"]["config"]["max_epochs"] diff --git a/source/standalone/workflows/rsl_rl/cli_args.py b/source/standalone/workflows/rsl_rl/cli_args.py index e4b647f727..cf5d38b7bb 100644 --- a/source/standalone/workflows/rsl_rl/cli_args.py +++ b/source/standalone/workflows/rsl_rl/cli_args.py @@ -6,6 +6,7 @@ from __future__ import annotations import argparse +import random from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -68,6 +69,9 @@ def update_rsl_rl_cfg(agent_cfg: RslRlOnPolicyRunnerCfg, args_cli: argparse.Name """ # override the default configuration with CLI arguments if hasattr(args_cli, "seed") and args_cli.seed is not None: + # randomly sample a seed if seed = -1 + if args_cli.seed == -1: + args_cli.seed = random.randint(0, 10000) agent_cfg.seed = args_cli.seed if args_cli.resume is not None: agent_cfg.resume = args_cli.resume diff --git a/source/standalone/workflows/sb3/train.py b/source/standalone/workflows/sb3/train.py index 4c894a7355..1ce8062961 100644 --- a/source/standalone/workflows/sb3/train.py +++ b/source/standalone/workflows/sb3/train.py @@ -46,6 +46,7 @@ import gymnasium as gym import numpy as np import os +import random from datetime import datetime from stable_baselines3 import PPO @@ -71,6 +72,10 @@ @hydra_task_config(args_cli.task, "sb3_cfg_entry_point") def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict): """Train with stable-baselines agent.""" + # randomly sample a seed if seed = -1 + if args_cli.seed == -1: + args_cli.seed = random.randint(0, 10000) + # override configurations with non-hydra CLI arguments env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs agent_cfg["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["seed"] diff --git a/source/standalone/workflows/skrl/train.py b/source/standalone/workflows/skrl/train.py index 4b7f6b9942..bbbdabf6a1 100644 --- a/source/standalone/workflows/skrl/train.py +++ b/source/standalone/workflows/skrl/train.py @@ -63,6 +63,7 @@ import gymnasium as gym import os +import random from datetime import datetime import skrl @@ -119,9 +120,14 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen if args_cli.ml_framework.startswith("jax"): skrl.config.jax.backend = "jax" if args_cli.ml_framework == "jax" else "numpy" - # set the environment seed + # randomly sample a seed if seed = -1 + if args_cli.seed == -1: + args_cli.seed = random.randint(0, 10000) + + # set the agent and environment seed from command line # note: certain randomization occur in the environment initialization so we set the seed here - env_cfg.seed = args_cli.seed if args_cli.seed is not None else agent_cfg["seed"] + agent_cfg["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["seed"] + env_cfg.seed = agent_cfg["seed"] # specify directory for logging experiments log_root_path = os.path.join("logs", "skrl", agent_cfg["agent"]["experiment"]["directory"]) diff --git a/tools/run_all_tests.py b/tools/run_all_tests.py index e3a73c71ef..733af27c02 100644 --- a/tools/run_all_tests.py +++ b/tools/run_all_tests.py @@ -231,14 +231,14 @@ def test_all( else: stdout_str = stdout.decode("utf-8") else: - stdout = "" + stdout_str = "" if stderr is not None: if isinstance(stderr, str): stderr_str = stderr else: stderr_str = stderr.decode("utf-8") else: - stderr = "" + stderr_str = "" # Write to log file logging.info(stdout_str)