Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rostok - J Moved #136

Merged
merged 1 commit into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions jmoves/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
FROM ubuntu:22.04

RUN apt-get update

# Install base utilities
RUN apt-get update && \
apt-get install -y wget git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

# Install miniconda
ENV CONDA_DIR /opt/conda
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/conda


# Put conda in path so we can use conda activate
RUN useradd -m jovyan

ENV PATH=$CONDA_DIR/bin:$PATH
COPY environment_jmoves.yml .

# RUN conda env create -f environment_jmoves.yml
RUN conda env update -n base --file environment_jmoves.yml
RUN conda init

SHELL ["conda", "run", "-n", "base", "/bin/bash", "-c"]

# Install jupiter stuff
RUN conda run -n base pip install jupyter notebook voila
RUN conda run -n base jupyter server extension enable voila --sys-prefix --enable_nbextensions=True

# Install our package
WORKDIR /home/jovyan

COPY . ./jmoves_env
RUN conda run -n base pip install -e ./jmoves_env
RUN conda run -n base pip install ./jmoves_env/meshcat-0.3.2.tar.gz


USER jovyan
ENV PATH=$CONDA_DIR/bin:$PATH
RUN conda init

CMD ["/bin/bash", "-c", "if [ -d /usr/local/bin/before-notebook.d ]; then for file in /usr/local/bin/before-notebook.d/*; do $file ; done; fi && jupyter notebook --no-browser --NotebookApp.allow_origin='*' --NotebookApp.token='' --ip=0.0.0.0 --NotebookApp.allow_remote_access=True"]
121 changes: 121 additions & 0 deletions jmoves/apps/article/create_reward_manager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import multiprocessing
from networkx import Graph
import numpy as np
import matplotlib.pyplot as plt

from pymoo.algorithms.soo.nonconvex.pso import PSO
from pymoo.core.problem import StarmapParallelization
from auto_robot_design.generator.restricted_generator.two_link_generator import TwoLinkGenerator, visualize_constrains

from auto_robot_design.optimization.saver import (
ProblemSaver, )
from auto_robot_design.description.builder import jps_graph2pinocchio_robot
from auto_robot_design.description.utils import draw_joint_point
from auto_robot_design.optimization.problems import CalculateCriteriaProblemByWeigths, get_optimizing_joints
from auto_robot_design.optimization.optimizer import PymooOptimizer
from auto_robot_design.pinokla.calc_criterion import ActuatedMass, EffectiveInertiaCompute, ImfCompute, ManipCompute, MovmentSurface, NeutralPoseMass, TranslationErrorMSE, ManipJacobian
from auto_robot_design.pinokla.criterion_agregator import CriteriaAggregator
from auto_robot_design.pinokla.criterion_math import ImfProjections
from auto_robot_design.pinokla.default_traj import convert_x_y_to_6d_traj_xz, get_horizontal_trajectory, get_simple_spline, get_vertical_trajectory, create_simple_step_trajectory, get_workspace_trajectory
from auto_robot_design.optimization.rewards.reward_base import PositioningReward, PositioningConstrain, PositioningErrorCalculator, RewardManager
from auto_robot_design.optimization.rewards.jacobian_and_inertia_rewards import HeavyLiftingReward, AccelerationCapability, MeanHeavyLiftingReward, MinAccelerationCapability
from auto_robot_design.optimization.rewards.pure_jacobian_rewards import EndPointZRRReward, VelocityReward, ForceEllipsoidReward
from auto_robot_design.optimization.rewards.inertia_rewards import MassReward
from auto_robot_design.description.builder import ParametrizedBuilder, DetailedURDFCreatorFixedEE, jps_graph2pinocchio_robot, MIT_CHEETAH_PARAMS_DICT


def get_manager_preset_2_stair_climber(graph: Graph, optimizing_joints: dict, workspace_traj: np.ndarray, step_trajs: list[np.ndarray], squat_trajs: list[np.ndarray]):
dict_trajectory_criteria = {
"MASS": NeutralPoseMass()
}
# criteria calculated for each point on the trajectory
dict_point_criteria = {
"Effective_Inertia": EffectiveInertiaCompute(),
"Actuated_Mass": ActuatedMass(),
"Manip_Jacobian": ManipJacobian(MovmentSurface.XZ)
}

crag = CriteriaAggregator(dict_point_criteria, dict_trajectory_criteria)
error_calculator = PositioningErrorCalculator(
error_key='error', jacobian_key="Manip_Jacobian")
soft_constrain = PositioningConstrain(
error_calculator=error_calculator, points=[workspace_traj])
reward_manager = RewardManager(crag=crag)
reward_manager.add_trajectory_aggregator
acceleration_capability = MinAccelerationCapability(manipulability_key='Manip_Jacobian',
trajectory_key="traj_6d", error_key="error", actuated_mass_key="Actuated_Mass")

heavy_lifting = HeavyLiftingReward(
manipulability_key='Manip_Jacobian', trajectory_key="traj_6d", error_key="error", mass_key="MASS")
# reward_manager.agg_list =
reward_manager.add_trajectory(step_trajs[0], 0)
reward_manager.add_trajectory(step_trajs[1], 1)
reward_manager.add_trajectory(step_trajs[2], 2)

reward_manager.add_trajectory(squat_trajs[0], 10)
reward_manager.add_trajectory(squat_trajs[1], 11)
reward_manager.add_trajectory(squat_trajs[2], 12)

reward_manager.add_reward(acceleration_capability, 0, weight=1)
reward_manager.add_reward(acceleration_capability, 1, weight=1)
reward_manager.add_reward(acceleration_capability, 2, weight=1)

reward_manager.add_reward(heavy_lifting, 10, weight=1)
reward_manager.add_reward(heavy_lifting, 11, weight=1)
reward_manager.add_reward(heavy_lifting, 12, weight=1)

reward_manager.add_trajectory_aggregator([0, 1, 2], 'mean')
reward_manager.add_trajectory_aggregator([10, 11, 12], 'mean')

reward_manager.close_trajectories()

return reward_manager, crag, soft_constrain



def get_manager_preset_2_stair_single(graph: Graph, optimizing_joints: dict, workspace_traj: np.ndarray, step_trajs: list[np.ndarray], squat_trajs: list[np.ndarray]):
dict_trajectory_criteria = {
"MASS": NeutralPoseMass()
}
# criteria calculated for each point on the trajectory
dict_point_criteria = {
"Effective_Inertia": EffectiveInertiaCompute(),
"Actuated_Mass": ActuatedMass(),
"Manip_Jacobian": ManipJacobian(MovmentSurface.XZ)
}

crag = CriteriaAggregator(dict_point_criteria, dict_trajectory_criteria)
error_calculator = PositioningErrorCalculator(
error_key='error', jacobian_key="Manip_Jacobian")
soft_constrain = PositioningConstrain(
error_calculator=error_calculator, points=[workspace_traj])
reward_manager = RewardManager(crag=crag)
reward_manager.add_trajectory_aggregator
acceleration_capability = MinAccelerationCapability(manipulability_key='Manip_Jacobian',
trajectory_key="traj_6d", error_key="error", actuated_mass_key="Actuated_Mass")

heavy_lifting = HeavyLiftingReward(
manipulability_key='Manip_Jacobian', trajectory_key="traj_6d", error_key="error", mass_key="MASS")
# reward_manager.agg_list =
reward_manager.add_trajectory(step_trajs[0], 0)
reward_manager.add_trajectory(step_trajs[1], 1)
reward_manager.add_trajectory(step_trajs[2], 2)

reward_manager.add_trajectory(squat_trajs[0], 10)
reward_manager.add_trajectory(squat_trajs[1], 11)
reward_manager.add_trajectory(squat_trajs[2], 12)

reward_manager.add_reward(acceleration_capability, 0, weight=1)
reward_manager.add_reward(acceleration_capability, 1, weight=1)
reward_manager.add_reward(acceleration_capability, 2, weight=1)

reward_manager.add_reward(heavy_lifting, 10, weight=1)
reward_manager.add_reward(heavy_lifting, 11, weight=1)
reward_manager.add_reward(heavy_lifting, 12, weight=1)

reward_manager.add_trajectory_aggregator([0, 1, 2], 'mean')
reward_manager.add_trajectory_aggregator([10, 11, 12], 'mean')

reward_manager.close_trajectories()

return reward_manager, crag, soft_constrain
140 changes: 140 additions & 0 deletions jmoves/apps/article/crerate_squat_trq_plots.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
from auto_robot_design.pinokla.criterion_agregator import load_criterion_traj
import multiprocessing
from networkx import Graph
import numpy as np
import matplotlib.pyplot as plt

from pymoo.algorithms.soo.nonconvex.pso import PSO
from pymoo.core.problem import StarmapParallelization
from auto_robot_design.generator.restricted_generator.two_link_generator import TwoLinkGenerator, visualize_constrains

from auto_robot_design.optimization.saver import (
ProblemSaver, )
from auto_robot_design.description.builder import jps_graph2pinocchio_robot
from auto_robot_design.description.utils import draw_joint_point
from auto_robot_design.optimization.problems import CalculateCriteriaProblemByWeigths, CalculateMultiCriteriaProblem, get_optimizing_joints
from auto_robot_design.optimization.optimizer import PymooOptimizer
from auto_robot_design.pinokla.analyze_squat_history import get_sample_torque_traj_from_sample_multi
from auto_robot_design.pinokla.calc_criterion import ActuatedMass, EffectiveInertiaCompute, ImfCompute, ManipCompute, MovmentSurface, NeutralPoseMass, TranslationErrorMSE, ManipJacobian
from auto_robot_design.pinokla.criterion_agregator import CriteriaAggregator
from auto_robot_design.pinokla.criterion_math import ImfProjections
from auto_robot_design.pinokla.default_traj import convert_x_y_to_6d_traj_xz, get_horizontal_trajectory, get_simple_spline, get_vertical_trajectory, create_simple_step_trajectory, get_workspace_trajectory
from auto_robot_design.optimization.rewards.reward_base import PositioningReward, PositioningConstrain, PositioningErrorCalculator, RewardManager
from auto_robot_design.optimization.rewards.jacobian_and_inertia_rewards import HeavyLiftingReward, AccelerationCapability, MeanHeavyLiftingReward, MinAccelerationCapability
from auto_robot_design.optimization.rewards.pure_jacobian_rewards import EndPointZRRReward, VelocityReward, ForceEllipsoidReward
from auto_robot_design.optimization.rewards.inertia_rewards import MassReward
from auto_robot_design.description.builder import ParametrizedBuilder, DetailedURDFCreatorFixedEE, jps_graph2pinocchio_robot, MIT_CHEETAH_PARAMS_DICT
from auto_robot_design.description.builder import DetailedURDFCreatorFixedEE, ParametrizedBuilder, jps_graph2urdf_by_bulder
import numpy as np

import matplotlib.pyplot as plt

from auto_robot_design.description.actuators import t_motor_actuators

from auto_robot_design.description.utils import (
all_combinations_active_joints_n_actuator, )

from auto_robot_design.generator.restricted_generator.two_link_generator import TwoLinkGenerator
from auto_robot_design.pinokla.squat import SquatHopParameters, SimulateSquatHop
from auto_robot_design.optimization.analyze import get_optimizer_and_problem, get_pareto_sample_linspace, get_pareto_sample_histogram, get_urdf_from_problem
from pathlib import Path


def get_all_files_in_dir(directory):
try:
# Create a Path object for the directory
path = Path(directory)

# Use the glob method to match all files in the directory
files = [str(file) for file in path.glob('*') if file.is_file()]

return files
except Exception as e:
print(f"An error occurred: {e}")
return []


def get_metrics(loaded_dict: dict):
trq_1 = loaded_dict["tau"][:, 0]
trq_2 = loaded_dict["tau"][:, 1]
max_1 = np.max(np.abs(trq_1))
max_2 = np.max(np.abs(trq_2))

agr_max = max([max_1, max_2])

mean_1 = np.mean(np.abs(trq_1))
mean_2 = np.mean(np.abs(trq_2))

agr_mean = mean_1 + mean_2

max_diff_1 = np.max(np.abs(np.diff(trq_1)))
max_diff_2 = np.max(np.abs(np.diff(trq_2)))

agr_diff = max([max_diff_1, max_diff_2])
return agr_mean, agr_max, agr_diff, loaded_dict["Reward"], loaded_dict["X"]


def get_all_vector_metrics(directory):
sim_res_files = get_all_files_in_dir(directory)
sim_res = list(map(load_criterion_traj, sim_res_files))
agr_mean_list = []
agr_max_list = []
agr_diff_list = []
reword_list = []
param_x_list = []
for sim_res_i in sim_res:
agr_mean, agr_max, agr_diff, reword, param_x = get_metrics(sim_res_i)
agr_mean_list.append(agr_mean)
agr_max_list.append(agr_max)
agr_diff_list.append(agr_diff)
reword_list.append(reword)
param_x_list.append(param_x)
return agr_mean_list, agr_max_list, agr_diff_list, reword_list, param_x_list


PATH_CS = "results\\multi_opti_preset2\\topology_8_2024-05-30_10-40-12\\squat_compare"
agr_mean_list, agr_max_list, agr_diff_list, reword_list, param_x_list = get_all_vector_metrics(
PATH_CS)
save_p = Path(PATH_CS + "/" + "plots" + "/")
save_p.mkdir(parents=True, exist_ok=True)

plt.figure()
plt.scatter(np.array(reword_list)[:, 0], np.array(
reword_list)[:, 1], c=agr_mean_list, cmap="rainbow")
plt.colorbar()
plt.title("Mean torque in squat_sim on Pareto front")
plt.xlabel("ACC Capability")
plt.ylabel("HeavyLifting")

save_current1 = save_p / "Mean_torque_in_squat_sim_on_Pareto_front.svg"
save_current2 = save_p / "Mean_torque_in_squat_sim_on_Pareto_front.png"
plt.savefig(save_current1)
plt.savefig(save_current2)

plt.figure()
plt.scatter(np.array(reword_list)[:, 0], np.array(
reword_list)[:, 1], c=agr_max_list, cmap="rainbow")
plt.colorbar()
plt.title("Max torque in squat_sim on Pareto front")
plt.xlabel("ACC Capability")
plt.ylabel("HeavyLifting")

save_current1 = save_p / "Max_torque_in_squat_sim_on_Pareto_front.svg"
save_current2 = save_p / "Max_torque_in_squat_sim_on_Pareto_front.png"
plt.savefig(save_current1)
plt.savefig(save_current2)


plt.figure()
plt.scatter(np.array(reword_list)[:, 0], np.array(
reword_list)[:, 1], c=agr_diff_list, cmap="rainbow")
plt.colorbar()
plt.title("Max torque diff in squat_sim on Pareto front")
plt.xlabel("ACC Capability")
plt.ylabel("HeavyLifting")
plt.savefig("Max torque diff in squat_sim on Pareto front")
save_current1 = save_p / "Max_torque_diff_in_squat_sim_on_Pareto_front.svg"
save_current2 = save_p / "Max_torque_diff_in_squat_sim_on_Pareto_front.png"
plt.savefig(save_current1)
plt.savefig(save_current2)
plt.show()
24 changes: 24 additions & 0 deletions jmoves/apps/article/history_mupti_open.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from auto_robot_design.optimization.analyze import get_optimizer_and_problem, get_pareto_sample_linspace, get_pareto_sample_histogram

optimizer, problem, res = get_optimizer_and_problem(
"results\\multi_opti_preset2\\topology_0_2024-05-29_18-48-58")
sample_x, sample_F = get_pareto_sample_linspace(res, 10)
sample_x2, sample_F2 = get_pareto_sample_histogram(res, 10)



save_p = Path(str(PATH_CS) + "/" + "plots")
save_p.mkdir(parents=True, exist_ok=True)

history_mean = np.array(optimizer.history["Mean"])

plt.figure()

plt.figure()
plt.scatter(sample_F2[:, 0], sample_F2[:, 1])
plt.title("from res2")
plt.show()
Loading
Loading