From ffe6d3ba207d5e79dcf88af84241912070b8a90c Mon Sep 17 00:00:00 2001 From: j96w Date: Sun, 3 Sep 2023 15:51:14 -0700 Subject: [PATCH 01/44] setup for LIBERO envs --- robomimic/envs/env_robosuite.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 1dfa900c..659f34ae 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -136,14 +136,15 @@ def reset_to(self, state): should_ret = False if "model" in state: self.reset() - robosuite_version_id = int(robosuite.__version__.split(".")[1]) - if robosuite_version_id <= 3: - from robosuite.utils.mjcf_utils import postprocess_model_xml - xml = postprocess_model_xml(state["model"]) - else: - # v1.4 and above use the class-based edit_model_xml function - xml = self.env.edit_model_xml(state["model"]) - self.env.reset_from_xml_string(xml) + # ----- LIBERO does not require xml edit ---- + # robosuite_version_id = int(robosuite.__version__.split(".")[1]) + # if robosuite_version_id <= 3: + # from robosuite.utils.mjcf_utils import postprocess_model_xml + # xml = postprocess_model_xml(state["model"]) + # else: + # # v1.4 and above use the class-based edit_model_xml function + # xml = self.env.edit_model_xml(state["model"]) + # self.env.reset_from_xml_string(xml) self.env.sim.reset() if not self._is_v1: # hide teleop visualization after restoring from model From 57fd17c5ed2541c562c689e4c834f403c8b00e51 Mon Sep 17 00:00:00 2001 From: j96w Date: Mon, 11 Sep 2023 21:47:29 -0700 Subject: [PATCH 02/44] libero reset from xml --- robomimic/envs/env_robosuite.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 659f34ae..1ccac4f9 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -11,6 +11,7 @@ import robomimic.utils.obs_utils as ObsUtils import robomimic.envs.env_base as EB +from libero.libero.utils.utils import postprocess_model_xml # protect against missing mujoco-py module, since robosuite might be using mujoco-py or DM backend try: @@ -136,15 +137,10 @@ def reset_to(self, state): should_ret = False if "model" in state: self.reset() - # ----- LIBERO does not require xml edit ---- - # robosuite_version_id = int(robosuite.__version__.split(".")[1]) - # if robosuite_version_id <= 3: - # from robosuite.utils.mjcf_utils import postprocess_model_xml - # xml = postprocess_model_xml(state["model"]) - # else: - # # v1.4 and above use the class-based edit_model_xml function - # xml = self.env.edit_model_xml(state["model"]) - # self.env.reset_from_xml_string(xml) + # ----- loading LIBERO model xml ---- + model_xml = state["model"] + model_xml = postprocess_model_xml(model_xml, {}) + self.env.reset_from_xml_string(model_xml) self.env.sim.reset() if not self._is_v1: # hide teleop visualization after restoring from model From 784f6a0e26d4f292d9e7ac02f6d7f84fb6eb59fe Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Fri, 12 Jan 2024 19:09:18 -0500 Subject: [PATCH 03/44] crop rand, wandb utils, keep just best ckpts --- robomimic/models/obs_core.py | 96 +++++++++++++++++++++++++++++++++- robomimic/utils/log_utils.py | 5 +- robomimic/utils/train_utils.py | 28 ++++++++++ 3 files changed, 126 insertions(+), 3 deletions(-) diff --git a/robomimic/models/obs_core.py b/robomimic/models/obs_core.py index c784fa27..786ae2d4 100644 --- a/robomimic/models/obs_core.py +++ b/robomimic/models/obs_core.py @@ -11,7 +11,7 @@ import torch import torch.nn as nn -from torchvision.transforms import Lambda, Compose +from torchvision.transforms import Lambda, Compose, RandomResizedCrop import torchvision.transforms.functional as TVF import robomimic.models.base_nets as BaseNets @@ -577,6 +577,100 @@ def __repr__(self): return msg +class CropResizeRandomizer(Randomizer): + """ + Randomly sample crop, then resize to specified size + """ + def __init__( + self, + input_shape, + size, + scale, + ratio, + num_crops=1, + pos_enc=False, + ): + """ + Args: + input_shape (tuple, list): shape of input (not including batch dimension) + crop_height (int): crop height + crop_width (int): crop width + resize_height (int): resize height + resize_width (int): resize width + num_crops (int): number of random crops to take + pos_enc (bool): if True, add 2 channels to the output to encode the spatial + location of the cropped pixels in the source image + """ + super(CropResizeRandomizer, self).__init__() + + assert len(input_shape) == 3 # (C, H, W) + # assert crop_height < input_shape[1] + # assert crop_width < input_shape[2] + + self.input_shape = input_shape + self.size = size + self.scale = scale + self.ratio = ratio + self.num_crops = num_crops + self.pos_enc = pos_enc + + self.resize_crop = RandomResizedCrop(size=size, scale=scale, ratio=ratio, interpolation=TVF.InterpolationMode.BILINEAR) + + def output_shape_in(self, input_shape=None): + out_c = self.input_shape[0] + 2 if self.pos_enc else self.input_shape[0] + return [out_c, self.size[0], self.size[1]] + + def output_shape_out(self, input_shape=None): + return list(input_shape) + + def _forward_in(self, inputs): + """ + Samples N random crops for each input in the batch, and then reshapes + inputs to [B * N, ...]. + """ + # assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + # out, _ = ObsUtils.sample_random_image_crops( + # images=inputs, + # crop_height=self.crop_height, + # crop_width=self.crop_width, + # num_crops=self.num_crops, + # pos_enc=self.pos_enc, + # ) + # # [B, N, ...] -> [B * N, ...] + # out = TensorUtils.join_dimensions(out, 0, 1) + out = self.resize_crop(inputs) + + return out + + def _forward_in_eval(self, inputs): + """ + Do center crops during eval + """ + # assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + # inputs = inputs.permute(*range(inputs.dim()-3), inputs.dim()-2, inputs.dim()-1, inputs.dim()-3) + # out = ObsUtils.center_crop(inputs, self.crop_height, self.crop_width) + # out = out.permute(*range(out.dim()-3), out.dim()-1, out.dim()-3, out.dim()-2) + # return out + + # just resize + return TVF.resize(inputs, size=self.size, interpolation=TVF.InterpolationMode.BILINEAR) + + + def _forward_out(self, inputs): + """ + Splits the outputs from shape [B * N, ...] -> [B, N, ...] and then average across N + to result in shape [B, ...] to make sure the network output is consistent with + what would have happened if there were no randomization. + """ + batch_size = (inputs.shape[0] // self.num_crops) + out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, + target_dims=(batch_size, self.num_crops)) + return out.mean(dim=1) + + + + + class ColorRandomizer(Randomizer): """ Randomly sample color jitter at input, and then average across color jtters at output. diff --git a/robomimic/utils/log_utils.py b/robomimic/utils/log_utils.py index 1e1be989..ca1a3c6d 100644 --- a/robomimic/utils/log_utils.py +++ b/robomimic/utils/log_utils.py @@ -43,7 +43,7 @@ class DataLogger(object): """ Logging class to log metrics to tensorboard and/or retrieve running statistics about logged data. """ - def __init__(self, log_dir, config, log_tb=True, log_wandb=False): + def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): """ Args: log_dir (str): base path to store logs @@ -56,6 +56,7 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False): if log_tb: from tensorboardX import SummaryWriter self._tb_logger = SummaryWriter(os.path.join(log_dir, 'tb')) + if log_wandb: import wandb @@ -79,7 +80,7 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False): self._wandb_logger.init( entity=Macros.WANDB_ENTITY, project=config.experiment.logging.wandb_proj_name, - name=config.experiment.name, + name=uid if uid else config.experiment.name, dir=log_dir, mode=("offline" if attempt == num_attempts - 1 else "online"), ) diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index b5fb1e48..51ea3762 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -496,6 +496,34 @@ def save_model(model, config, env_meta, shape_meta, ckpt_path, obs_normalization torch.save(params, ckpt_path) print("save checkpoint to {}".format(ckpt_path)) +def delete_checkpoints(ckpt_dir, top_n=3, smallest=True): + """ + Delete checkpoints in a directory, keeping top @top_n checkpoints based on lowest validation loss. Where checkpoints are saved in the form "model_epoch_{n}_best_validation_{validation loss}.pth + """ + # get all checkpoints + all_checkpoints = [] + for filename in os.listdir(ckpt_dir): + if filename.endswith(".pth"): + all_checkpoints.append(filename) + all_checkpoints = sorted(all_checkpoints) + + # get validation losses + validation_losses = [] + for ckpt in all_checkpoints: + val_loss = float(ckpt.split("best_validation_")[1].split(".pth")[0]) + + validation_losses.append((val_loss, ckpt)) + # validation_losses = np.array(validation_losses) + validation_losses = sorted(validation_losses, key=lambda x: x[0]) + + # delete checkpoints + if smallest: + for ckpt in all_checkpoints[top_n:]: + os.remove(os.path.join(ckpt_dir, ckpt)) + else: + for ckpt in all_checkpoints[:-top_n]: + os.remove(os.path.join(ckpt_dir, ckpt)) + def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None): """ From c420dc73a98b98c01d5935da396a58d2cf43b616 Mon Sep 17 00:00:00 2001 From: matnay Date: Wed, 21 Feb 2024 15:37:37 -0500 Subject: [PATCH 04/44] added dinov2 --- robomimic/models/base_nets.py | 86 ++++++++++++++++++++++++++++++++++- robomimic/models/obs_nets.py | 28 ++++++------ 2 files changed, 100 insertions(+), 14 deletions(-) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index 0a4927e0..1dcc7912 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -486,7 +486,6 @@ def forward(self, inputs): ) return x - class ResNet18Conv(ConvBase): """ A ResNet18 block that can be used to process input images. @@ -541,6 +540,91 @@ def __repr__(self): header = '{}'.format(str(self.__class__.__name__)) return header + '(input_channel={}, input_coord_conv={})'.format(self._input_channel, self._input_coord_conv) +class Vit(ConvBase): + """ + Vision transformer + """ + def __init__( + self, + input_channel=3, + vit_model_class = 'vit_b', + freeze = True + ): + """ + Using pretrained observation encoder network proposed in Vision Transformers + git clone https://github.com/facebookresearch/dinov2 + pip install -r requirements.txt + Args: + input_channel (int): number of input channels for input images to the network. + If not equal to 3, modifies first conv layer to handle the number + of input channels. + vit_model_class (str): select one of the vit pretrained model "vit_b", "vit_l", "vit_s" or "vit_g" + freeze (bool): if True, use a frozen ViT pretrained model. + """ + super(Vit, self).__init__() + + assert input_channel == 3 + assert vit_model_class in ["vit_b", "vit_l" ,"vit_g", "vit_s"] # make sure the selected vit model do exist + + # cut the last fc layer + self._input_channel = input_channel + self._vit_model_class = vit_model_class + self._freeze = freeze + self._input_coord_conv = False + self._pretrained = False + + self.preprocess = nn.Sequential( + transforms.Resize((294,294)), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ) + + try: + if self._vit_model_class=="vit_s": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_lc') + if self._vit_model_class=="vit_l": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_lc') + if self._vit_model_class=="vit_g": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_lc') + if self._vit_model_class=="vit_b": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_lc') + except ImportError: + print("WARNING: could not load Vit") + + if freeze: + for param in self.nets.parameters(): + param.requires_grad = False + + if self._freeze: + self.nets.eval() + + def forward(self, inputs): + x = self.preprocess(inputs) + x = self.nets(x) + return x + + def output_shape(self, input_shape): + """ + Function to compute output shape from inputs to this module. + Args: + input_shape (iterable of int): shape of input. Does not include batch dimension. + Some modules may not need this argument, if their output does not depend + on the size of the input, or if they assume fixed size input. + Returns: + out_shape ([int]): list of integers corresponding to output shape + """ + assert(len(input_shape) == 3) + + out_dim = 1000 + + return [out_dim, 1, 1] + + def __repr__(self): + """Pretty print network.""" + print("**Number of learnable params:",sum(p.numel() for p in self.nets.parameters() if p.requires_grad)," Freeze:",self._freeze) + print("**Number of params:",sum(p.numel() for p in self.nets.parameters())) + + header = '{}'.format(str(self.__class__.__name__)) + return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) class R3MConv(ConvBase): """ diff --git a/robomimic/models/obs_nets.py b/robomimic/models/obs_nets.py index b3284185..4a0b9483 100644 --- a/robomimic/models/obs_nets.py +++ b/robomimic/models/obs_nets.py @@ -25,6 +25,7 @@ FeatureAggregator from robomimic.models.obs_core import VisualCore, Randomizer from robomimic.models.transformers import PositionalEncoding, GPT_Backbone +from robomimic.models.base_nets import Vit def obs_encoder_factory( @@ -101,13 +102,13 @@ class ObservationEncoder(Module): Module that processes inputs by observation key and then concatenates the processed observation keys together. Each key is processed with an encoder head network. Call @register_obs_key to register observation keys with the encoder and then - finally call @make to create the encoder networks. + finally call @make to create the encoder networks. """ def __init__(self, feature_activation=nn.ReLU): """ Args: feature_activation: non-linearity to apply after each obs net - defaults to ReLU. Pass - None to apply no activation. + None to apply no activation. """ super(ObservationEncoder, self).__init__() self.obs_shapes = OrderedDict() @@ -120,12 +121,12 @@ def __init__(self, feature_activation=nn.ReLU): self._locked = False def register_obs_key( - self, + self, name, - shape, - net_class=None, - net_kwargs=None, - net=None, + shape, + net_class=None, + net_kwargs=None, + net=None, randomizer=None, share_net_from=None, ): @@ -143,7 +144,7 @@ def register_obs_key( instead of creating a different net randomizer (Randomizer instance): if provided, use this Module to augment observation keys coming in to the encoder, and possibly augment the processed output as well - share_net_from (str): if provided, use the same instance of @net_class + share_net_from (str): if provided, use the same instance of @net_class as another observation key. This observation key must already exist in this encoder. Warning: Note that this does not share the observation key randomizer """ @@ -362,7 +363,7 @@ class ObservationGroupEncoder(Module): The class takes a dictionary of dictionaries, @observation_group_shapes. Each key corresponds to a observation group (e.g. 'obs', 'subgoal', 'goal') - and each OrderedDict should be a map between modalities and + and each OrderedDict should be a map between modalities and expected input shapes (e.g. { 'image' : (3, 120, 160) }). """ def __init__( @@ -403,7 +404,7 @@ def __init__( # type checking assert isinstance(observation_group_shapes, OrderedDict) assert np.all([isinstance(observation_group_shapes[k], OrderedDict) for k in observation_group_shapes]) - + self.observation_group_shapes = observation_group_shapes # create an observation encoder per observation group @@ -421,7 +422,7 @@ def forward(self, **inputs): Args: inputs (dict): dictionary that maps observation groups to observation - dictionaries of torch.Tensor batches that agree with + dictionaries of torch.Tensor batches that agree with @self.observation_group_shapes. All observation groups in @self.observation_group_shapes must be present, but additional observation groups can also be present. Note that these are specified @@ -567,7 +568,7 @@ def output_shape(self, input_shape=None): """ return { k : list(self.output_shapes[k]) for k in self.output_shapes } - def forward(self, **inputs): + def forward(self, return_latent=False, **inputs): """ Process each set of inputs in its own observation group. @@ -583,6 +584,8 @@ def forward(self, **inputs): """ enc_outputs = self.nets["encoder"](**inputs) mlp_out = self.nets["mlp"](enc_outputs) + if return_latent: + return self.nets["decoder"](mlp_out), enc_outputs.detach(), mlp_out.detach() return self.nets["decoder"](mlp_out) def _to_string(self): @@ -604,7 +607,6 @@ def __repr__(self): msg = header + '(' + msg + '\n)' return msg - class RNN_MIMO_MLP(Module): """ A wrapper class for a multi-step RNN and a per-step MLP and a decoder. From dbd84538c467fcb946a1448c86ab263aa92eff7e Mon Sep 17 00:00:00 2001 From: matnay Date: Sat, 9 Mar 2024 09:33:18 -0500 Subject: [PATCH 05/44] Added DINOv2 to base nets --- robomimic/models/base_nets.py | 85 ++++++++++++++++++++++++++++++++++ robomimic/utils/train_utils.py | 12 +++++ 2 files changed, 97 insertions(+) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index 1dcc7912..f05dc96d 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -539,6 +539,91 @@ def __repr__(self): """Pretty print network.""" header = '{}'.format(str(self.__class__.__name__)) return header + '(input_channel={}, input_coord_conv={})'.format(self._input_channel, self._input_coord_conv) +class ViT_Rein(ConvBase): + """ + ViT LoRA using Rein method + """ + def __init__( + self, + input_channel=3, + vit_model_class = 'vit_b', + freeze = True): + """ + Using pretrained observation encoder network proposed in Vision Transformers + git clone https://github.com/facebookresearch/dinov2 + pip install -r requirements.txt + Args: + input_channel (int): number of input channels for input images to the network. + If not equal to 3, modifies first conv layer to handle the number + of input channels. + vit_model_class (str): select one of the vit pretrained model "vit_b", "vit_l", "vit_s" or "vit_g" + freeze (bool): if True, use a frozen ViT pretrained model. + """ + super(ViT_Rein, self).__init__() + + assert input_channel == 3 + assert vit_model_class in ["vit_b", "vit_l" ,"vit_g", "vit_s"] # make sure the selected vit model do exist + + # cut the last fc layer + self._input_channel = input_channel + self._vit_model_class = vit_model_class + self._freeze = freeze + self._input_coord_conv = False + self._pretrained = False + + self.preprocess = nn.Sequential( + transforms.Resize((294,294)), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ) + + try: + if self._vit_model_class=="vit_s": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_lc') + if self._vit_model_class=="vit_l": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_lc') + if self._vit_model_class=="vit_g": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_lc') + if self._vit_model_class=="vit_b": + self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_lc') + except ImportError: + print("WARNING: could not load Vit") + + if freeze: + for param in self.nets.parameters(): + param.requires_grad = False + + if self._freeze: + self.nets.eval() + + def forward(self, inputs): + x = self.preprocess(inputs) + x = self.nets(x) + return x + + def output_shape(self, input_shape): + """ + Function to compute output shape from inputs to this module. + Args: + input_shape (iterable of int): shape of input. Does not include batch dimension. + Some modules may not need this argument, if their output does not depend + on the size of the input, or if they assume fixed size input. + Returns: + out_shape ([int]): list of integers corresponding to output shape + """ + assert(len(input_shape) == 3) + + out_dim = 1000 + + return [out_dim, 1, 1] + + def __repr__(self): + """Pretty print network.""" + print("**Number of learnable params:",sum(p.numel() for p in self.nets.parameters() if p.requires_grad)," Freeze:",self._freeze) + print("**Number of params:",sum(p.numel() for p in self.nets.parameters())) + + header = '{}'.format(str(self.__class__.__name__)) + return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) + class Vit(ConvBase): """ diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index 51ea3762..bed628f3 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -524,6 +524,15 @@ def delete_checkpoints(ckpt_dir, top_n=3, smallest=True): for ckpt in all_checkpoints[:-top_n]: os.remove(os.path.join(ckpt_dir, ckpt)) +def get_gpu_usage_mb(index): + """Returns the GPU usage in B.""" + h = nvmlDeviceGetHandleByIndex(index) + info = nvmlDeviceGetMemoryInfo(h) + print(f'total : {info.total}') + print(f'free : {info.free}') + print(f'used : {info.used}') + + return info.used / 1024 / 1024 def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None): """ @@ -550,6 +559,9 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor Returns: step_log_all (dict): dictionary of logged training metrics averaged across all batches """ + + #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) + epoch_timestamp = time.time() if validate: model.set_eval() From c199541852a3ab771a8f16d73c52d60b5eed30b1 Mon Sep 17 00:00:00 2001 From: matnay Date: Mon, 1 Apr 2024 15:15:47 -0400 Subject: [PATCH 06/44] added dino with lora adaptation --- robomimic/algo/bc.py | 3 +- robomimic/models/base_nets.py | 40 +++++++-- robomimic/models/vit_rein.py | 162 ++++++++++++++++++++++++++++++++++ 3 files changed, 199 insertions(+), 6 deletions(-) create mode 100644 robomimic/models/vit_rein.py diff --git a/robomimic/algo/bc.py b/robomimic/algo/bc.py index 091be78e..02c7dfe8 100644 --- a/robomimic/algo/bc.py +++ b/robomimic/algo/bc.py @@ -107,7 +107,8 @@ def process_batch_for_training(self, batch): will be used for training """ input_batch = dict() - input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} + #input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} + input_batch["obs"] = {k: v[:, 0, :] if v.ndim != 1 else v for k, v in batch['obs'].items()} input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # we move to device first before float conversion because image observation modalities will be uint8 - diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index f05dc96d..666ba339 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -16,7 +16,7 @@ from torchvision import models as vision_models import robomimic.utils.tensor_utils as TensorUtils - +from robomimic.models.vit_rein import Reins, LoRAReins, MLPhead CONV_ACTIVATIONS = { "relu": nn.ReLU, @@ -539,6 +539,7 @@ def __repr__(self): """Pretty print network.""" header = '{}'.format(str(self.__class__.__name__)) return header + '(input_channel={}, input_coord_conv={})'.format(self._input_channel, self._input_coord_conv) + class ViT_Rein(ConvBase): """ ViT LoRA using Rein method @@ -547,6 +548,8 @@ def __init__( self, input_channel=3, vit_model_class = 'vit_b', + lora_dim = 16, + patch_size = 16, freeze = True): """ Using pretrained observation encoder network proposed in Vision Transformers @@ -570,6 +573,9 @@ def __init__( self._freeze = freeze self._input_coord_conv = False self._pretrained = False + self._lora_dim = lora_dim + self._patch_size = patch_size + self._out_indices = [7, 11, 15, 23], self.preprocess = nn.Sequential( transforms.Resize((294,294)), @@ -587,17 +593,41 @@ def __init__( self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_lc') except ImportError: print("WARNING: could not load Vit") + + try : + self._rein_layers = LoRAReins(lora_dim=self._lora_dim, num_layers=len(self.nets.backbone.blocks),embed_dims = self.nets.backbone.patch_embed.proj.out_channels,patch_size=self._patch_size) + self._mlp_lora_head = MLPhead(in_dim=3*self.nets.backbone.patch_embed.proj.out_channels, out_dim = 5*self.nets.backbone.patch_embed.proj.out_channels) + except ImportError: + print("WARNING: could not load rein layer") - if freeze: + + if self._freeze: for param in self.nets.parameters(): param.requires_grad = False - - if self._freeze: self.nets.eval() def forward(self, inputs): x = self.preprocess(inputs) - x = self.nets(x) + x = self.nets.backbone.patch_embed(x) + for idx, blk in enumerate(self.nets.backbone.blocks): + x = blk(x) + x = self._rein_layers.forward( + x, + idx, + batch_first=True, + has_cls_token=True, + ) + + q_avg = x.mean(dim=1).unsqueeze(1) + q_max = torch.max(x,1)[0].unsqueeze(1) + q_N = x[:,x.shape[1]-1,:].unsqueeze(1) + + _q = torch.cat((q_avg, q_max, q_N), dim=1) + + x = self.nets.backbone.norm(_q) + x = x.flatten(-2,-1) + x = self._mlp_lora_head(x) + x = self.nets.linear_head(x) return x def output_shape(self, input_shape): diff --git a/robomimic/models/vit_rein.py b/robomimic/models/vit_rein.py new file mode 100644 index 00000000..e73f9327 --- /dev/null +++ b/robomimic/models/vit_rein.py @@ -0,0 +1,162 @@ +""" +Contains torch Modules for implementation of rein method +for domain adaptation of DINOv2 +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +import math +from functools import reduce +from operator import mul +from torch import Tensor + +class MLPhead(nn.Module): + def __init__(self, + in_dim: int, + out_dim: int, + **kwargs) -> None: + super().__init__(**kwargs) + self._in_dim = in_dim + self._out_dim = out_dim + + self._mlp = nn.Linear(self._in_dim, self._out_dim) + + def forward(self, x: Tensor) -> Tensor: + x = self._mlp.forward(x) + return x + +class Reins(nn.Module): + def __init__( + self, + num_layers: int, + embed_dims: int, + patch_size: int, + query_dims: int = 256, + token_length: int = 100, + use_softmax: bool = True, + link_token_to_query: bool = True, + scale_init: float = 0.001, + zero_mlp_delta_f: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + self.embed_dims = embed_dims + self.patch_size = patch_size + self.query_dims = query_dims + self.token_length = token_length + self.link_token_to_query = link_token_to_query + self.scale_init = scale_init + self.use_softmax = use_softmax + self.zero_mlp_delta_f = zero_mlp_delta_f + self.create_model() + + def create_model(self): + self.learnable_tokens = nn.Parameter( + torch.empty([self.num_layers, self.token_length, self.embed_dims]) + ) + self.scale = nn.Parameter(torch.tensor(self.scale_init)) + self.mlp_token2feat = nn.Linear(self.embed_dims, self.embed_dims) + self.mlp_delta_f = nn.Linear(self.embed_dims, self.embed_dims) + val = math.sqrt( + 6.0 + / float( + 3 * reduce(mul, (self.patch_size, self.patch_size), 1) + self.embed_dims + ) + ) + nn.init.uniform_(self.learnable_tokens.data, -val, val) + nn.init.kaiming_uniform_(self.mlp_delta_f.weight, a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.mlp_token2feat.weight, a=math.sqrt(5)) + self.transform = nn.Linear(self.embed_dims, self.query_dims) + self.merge = nn.Linear(self.query_dims * 3, self.query_dims) + if self.zero_mlp_delta_f: + del self.scale + self.scale = 1.0 + nn.init.zeros_(self.mlp_delta_f.weight) + nn.init.zeros_(self.mlp_delta_f.bias) + + def return_auto(self, feats): + if self.link_token_to_query: + tokens = self.transform(self.get_tokens(-1)).permute(1, 2, 0) + tokens = torch.cat( + [ + F.max_pool1d(tokens, kernel_size=self.num_layers), + F.avg_pool1d(tokens, kernel_size=self.num_layers), + tokens[:, :, -1].unsqueeze(-1), + ], + dim=-1, + ) + querys = self.merge(tokens.flatten(-2, -1)) + return feats, querys + else: + return feats + + def get_tokens(self, layer: int) -> Tensor: + if layer == -1: + # return all + return self.learnable_tokens + else: + return self.learnable_tokens[layer] + + def forward( + self, feats: Tensor, layer: int, batch_first=False, has_cls_token=True + ) -> Tensor: + if batch_first: + feats = feats.permute(1, 0, 2) + if has_cls_token: + cls_token, feats = torch.tensor_split(feats, [1], dim=0) + tokens = self.get_tokens(layer) + delta_feat = self.forward_delta_feat( + feats, + tokens, + layer, + ) + delta_feat = delta_feat * self.scale + feats = feats + delta_feat + if has_cls_token: + feats = torch.cat([cls_token, feats], dim=0) + if batch_first: + feats = feats.permute(1, 0, 2) + return feats + + def forward_delta_feat(self, feats: Tensor, tokens: Tensor, layers: int) -> Tensor: + attn = torch.einsum("nbc,mc->nbm", feats, tokens) + if self.use_softmax: + attn = attn * (self.embed_dims**-0.5) + attn = F.softmax(attn, dim=-1) + delta_f = torch.einsum( + "nbm,mc->nbc", + attn[:, :, 1:], + self.mlp_token2feat(tokens[1:, :]), + ) + delta_f = self.mlp_delta_f(delta_f + feats) + return delta_f + +class LoRAReins(Reins): + def __init__(self, lora_dim=16, **kwargs): + self.lora_dim = lora_dim + super().__init__(**kwargs) + + def create_model(self): + super().create_model() + del self.learnable_tokens + self.learnable_tokens_a = nn.Parameter( + torch.empty([self.num_layers, self.token_length, self.lora_dim]) + ) + self.learnable_tokens_b = nn.Parameter( + torch.empty([self.num_layers, self.lora_dim, self.embed_dims]) + ) + val = math.sqrt( + 6.0 + / float( + 3 * reduce(mul, (self.patch_size, self.patch_size), 1) + + (self.embed_dims * self.lora_dim) ** 0.5 + ) + ) + nn.init.uniform_(self.learnable_tokens_a.data, -val, val) + nn.init.uniform_(self.learnable_tokens_b.data, -val, val) + + def get_tokens(self, layer): + if layer == -1: + return self.learnable_tokens_a @ self.learnable_tokens_b + else: + return self.learnable_tokens_a[layer] @ self.learnable_tokens_b[layer] \ No newline at end of file From 643c68d8bea78a061301a4644749ad31b16142a5 Mon Sep 17 00:00:00 2001 From: matnay Date: Wed, 3 Apr 2024 13:07:57 -0400 Subject: [PATCH 07/44] reverting change in env_robosuite --- robomimic/envs/env_robosuite.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 1ccac4f9..9c83cd45 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -11,7 +11,6 @@ import robomimic.utils.obs_utils as ObsUtils import robomimic.envs.env_base as EB -from libero.libero.utils.utils import postprocess_model_xml # protect against missing mujoco-py module, since robosuite might be using mujoco-py or DM backend try: @@ -137,6 +136,14 @@ def reset_to(self, state): should_ret = False if "model" in state: self.reset() + robosuite_version_id = int(robosuite.__version__.split(".")[1]) + if robosuite_version_id <= 3: + from robosuite.utils.mjcf_utils import postprocess_model_xml + xml = postprocess_model_xml(state["model"]) + else: + # v1.4 and above use the class-based edit_model_xml function + xml = self.env.edit_model_xml(state["model"]) + self.env.reset_from_xml_string(xml) # ----- loading LIBERO model xml ---- model_xml = state["model"] model_xml = postprocess_model_xml(model_xml, {}) From b9c83955a3e6790021dd93896d51f183d52c0af2 Mon Sep 17 00:00:00 2001 From: matnay Date: Wed, 3 Apr 2024 13:11:12 -0400 Subject: [PATCH 08/44] reverting change in env_robosuite --- robomimic/envs/env_robosuite.py | 146 +++++++++++++++++++++++++++++--- 1 file changed, 133 insertions(+), 13 deletions(-) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 9c83cd45..7d398ff2 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -8,6 +8,12 @@ from copy import deepcopy import robosuite +import robosuite.utils.transform_utils as T +try: + # this is needed for ensuring robosuite can find the additional mimicgen environments (see https://mimicgen.github.io) + import mimicgen_envs +except ImportError: + pass import robomimic.utils.obs_utils as ObsUtils import robomimic.envs.env_base as EB @@ -28,6 +34,7 @@ def __init__( render=False, render_offscreen=False, use_image_obs=False, + use_depth_obs=False, postprocess_visual_obs=True, **kwargs, ): @@ -45,11 +52,16 @@ def __init__( on every env.step call. Set this to False for efficiency reasons, if image observations are not required. + use_depth_obs (bool): if True, environment is expected to render depth image observations + on every env.step call. Set this to False for efficiency reasons, if depth + observations are not required. + postprocess_visual_obs (bool): if True, postprocess image observations to prepare for learning. This should only be False when extracting observations for saving to a dataset (to save space on RGB images for example). """ self.postprocess_visual_obs = postprocess_visual_obs + self.use_depth_obs = use_depth_obs # robosuite version check self._is_v1 = (robosuite.__version__.split(".")[0] == "1") @@ -65,7 +77,7 @@ def __init__( ignore_done=True, use_object_obs=True, use_camera_obs=use_image_obs, - camera_depths=False, + camera_depths=use_depth_obs, ) kwargs.update(update_kwargs) @@ -81,7 +93,7 @@ def __init__( # make sure gripper visualization is turned off (we almost always want this for learning) kwargs["gripper_visualization"] = False del kwargs["camera_depths"] - kwargs["camera_depth"] = False # rename kwarg + kwargs["camera_depth"] = use_depth_obs # rename kwarg self._env_name = env_name self._init_kwargs = deepcopy(kwargs) @@ -144,10 +156,6 @@ def reset_to(self, state): # v1.4 and above use the class-based edit_model_xml function xml = self.env.edit_model_xml(state["model"]) self.env.reset_from_xml_string(xml) - # ----- loading LIBERO model xml ---- - model_xml = state["model"] - model_xml = postprocess_model_xml(model_xml, {}) - self.env.reset_from_xml_string(model_xml) self.env.sim.reset() if not self._is_v1: # hide teleop visualization after restoring from model @@ -180,7 +188,11 @@ def render(self, mode="human", height=None, width=None, camera_name="agentview") self.env.viewer.set_camera(cam_id) return self.env.render() elif mode == "rgb_array": - return self.env.sim.render(height=height, width=width, camera_name=camera_name)[::-1] + im = self.env.sim.render(height=height, width=width, camera_name=camera_name) + if self.use_depth_obs: + # render() returns a tuple when self.use_depth_obs=True + return im[0][::-1] + return im[::-1] else: raise NotImplementedError("mode={} is not implemented".format(mode)) @@ -197,7 +209,18 @@ def get_observation(self, di=None): ret = {} for k in di: if (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality(key=k, obs_modality="rgb"): + # by default images from mujoco are flipped in height + ret[k] = di[k][::-1] + if self.postprocess_visual_obs: + ret[k] = ObsUtils.process_obs(obs=ret[k], obs_key=k) + elif (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality(key=k, obs_modality="depth"): + # by default depth images from mujoco are flipped in height ret[k] = di[k][::-1] + if len(ret[k].shape) == 2: + ret[k] = ret[k][..., None] # (H, W, 1) + assert len(ret[k].shape) == 3 + # scale entries in depth map to correspond to real distance. + ret[k] = self.get_real_depth_map(ret[k]) if self.postprocess_visual_obs: ret[k] = ObsUtils.process_obs(obs=ret[k], obs_key=k) @@ -221,6 +244,80 @@ def get_observation(self, di=None): ret["gripper_qpos"] = np.array(di["gripper_qpos"]) return ret + def get_real_depth_map(self, depth_map): + """ + Reproduced from https://github.com/ARISE-Initiative/robosuite/blob/c57e282553a4f42378f2635b9a3cbc4afba270fd/robosuite/utils/camera_utils.py#L106 + since older versions of robosuite do not have this conversion from normalized depth values returned by MuJoCo + to real depth values. + """ + # Make sure that depth values are normalized + assert np.all(depth_map >= 0.0) and np.all(depth_map <= 1.0) + extent = self.env.sim.model.stat.extent + far = self.env.sim.model.vis.map.zfar * extent + near = self.env.sim.model.vis.map.znear * extent + return near / (1.0 - depth_map * (1.0 - near / far)) + + def get_camera_intrinsic_matrix(self, camera_name, camera_height, camera_width): + """ + Obtains camera intrinsic matrix. + Args: + camera_name (str): name of camera + camera_height (int): height of camera images in pixels + camera_width (int): width of camera images in pixels + Return: + K (np.array): 3x3 camera matrix + """ + cam_id = self.env.sim.model.camera_name2id(camera_name) + fovy = self.env.sim.model.cam_fovy[cam_id] + f = 0.5 * camera_height / np.tan(fovy * np.pi / 360) + K = np.array([[f, 0, camera_width / 2], [0, f, camera_height / 2], [0, 0, 1]]) + return K + + def get_camera_extrinsic_matrix(self, camera_name): + """ + Returns a 4x4 homogenous matrix corresponding to the camera pose in the + world frame. MuJoCo has a weird convention for how it sets up the + camera body axis, so we also apply a correction so that the x and y + axis are along the camera view and the z axis points along the + viewpoint. + Normal camera convention: https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html + Args: + camera_name (str): name of camera + Return: + R (np.array): 4x4 camera extrinsic matrix + """ + cam_id = self.env.sim.model.camera_name2id(camera_name) + camera_pos = self.env.sim.data.cam_xpos[cam_id] + camera_rot = self.env.sim.data.cam_xmat[cam_id].reshape(3, 3) + R = T.make_pose(camera_pos, camera_rot) + + # IMPORTANT! This is a correction so that the camera axis is set up along the viewpoint correctly. + camera_axis_correction = np.array( + [[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] + ) + R = R @ camera_axis_correction + return R + + def get_camera_transform_matrix(self, camera_name, camera_height, camera_width): + """ + Camera transform matrix to project from world coordinates to pixel coordinates. + Args: + camera_name (str): name of camera + camera_height (int): height of camera images in pixels + camera_width (int): width of camera images in pixels + Return: + K (np.array): 4x4 camera matrix to project from world coordinates to pixel coordinates + """ + R = self.get_camera_extrinsic_matrix(camera_name=camera_name) + K = self.get_camera_intrinsic_matrix( + camera_name=camera_name, camera_height=camera_height, camera_width=camera_width + ) + K_exp = np.eye(4) + K_exp[:3, :3] = K + + # Takes a point in world, transforms to camera frame, and then projects onto image plane. + return K_exp @ T.pose_inv(R) + def get_state(self): """ Get current environment simulator state as a dictionary. Should be compatible with @reset_to. @@ -317,6 +414,10 @@ def create_for_data_processing( camera_height, camera_width, reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, **kwargs, ): """ @@ -330,6 +431,12 @@ def create_for_data_processing( camera_height (int): camera height for all cameras camera_width (int): camera width for all cameras reward_shaping (bool): if True, use shaped environment rewards, else use sparse task completion rewards + render (bool or None): optionally override rendering behavior. Defaults to False. + render_offscreen (bool or None): optionally override rendering behavior. The default value is True if + @camera_names is non-empty, False otherwise. + use_image_obs (bool or None): optionally override rendering behavior. The default value is True if + @camera_names is non-empty, False otherwise. + use_depth_obs (bool): if True, use depth observations """ is_v1 = (robosuite.__version__.split(".")[0] == "1") has_camera = (len(camera_names) > 0) @@ -354,26 +461,32 @@ def create_for_data_processing( # also initialize obs utils so it knows which modalities are image modalities image_modalities = list(camera_names) + depth_modalities = list(camera_names) if is_v1: image_modalities = ["{}_image".format(cn) for cn in camera_names] + depth_modalities = ["{}_depth".format(cn) for cn in camera_names] elif has_camera: - # v0.3 only had support for one image, and it was named "rgb" + # v0.3 only had support for one image, and it was named "image" assert len(image_modalities) == 1 - image_modalities = ["rgb"] + image_modalities = ["image"] + depth_modalities = ["depth"] obs_modality_specs = { "obs": { "low_dim": [], # technically unused, so we don't have to specify all of them "rgb": image_modalities, } } + if use_depth_obs: + obs_modality_specs["obs"]["depth"] = depth_modalities ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs) # note that @postprocess_visual_obs is False since this env's images will be written to a dataset return cls( env_name=env_name, - render=False, - render_offscreen=has_camera, - use_image_obs=has_camera, + render=(False if render is None else render), + render_offscreen=(has_camera if render_offscreen is None else render_offscreen), + use_image_obs=(has_camera if use_image_obs is None else use_image_obs), + use_depth_obs=use_depth_obs, postprocess_visual_obs=False, **kwargs, ) @@ -387,8 +500,15 @@ def rollout_exceptions(self): """ return tuple(MUJOCO_EXCEPTIONS) + @property + def base_env(self): + """ + Grabs base simulation environment. + """ + return self.env + def __repr__(self): """ Pretty-print env description. """ - return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) + return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) \ No newline at end of file From 1c1977d27d15f67eba17fd548263cb03c8e0be9c Mon Sep 17 00:00:00 2001 From: matnay Date: Wed, 3 Apr 2024 13:16:04 -0400 Subject: [PATCH 09/44] reverting change in env_robosuite --- robomimic/envs/env_robosuite.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 7d398ff2..942cb623 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -17,6 +17,7 @@ import robomimic.utils.obs_utils as ObsUtils import robomimic.envs.env_base as EB +from libero.libero.utils.utils import postprocess_model_xml # protect against missing mujoco-py module, since robosuite might be using mujoco-py or DM backend try: @@ -148,14 +149,10 @@ def reset_to(self, state): should_ret = False if "model" in state: self.reset() - robosuite_version_id = int(robosuite.__version__.split(".")[1]) - if robosuite_version_id <= 3: - from robosuite.utils.mjcf_utils import postprocess_model_xml - xml = postprocess_model_xml(state["model"]) - else: - # v1.4 and above use the class-based edit_model_xml function - xml = self.env.edit_model_xml(state["model"]) - self.env.reset_from_xml_string(xml) + # ----- loading LIBERO model xml ---- + model_xml = state["model"] + model_xml = postprocess_model_xml(model_xml, {}) + self.env.reset_from_xml_string(model_xml) self.env.sim.reset() if not self._is_v1: # hide teleop visualization after restoring from model From aae48410887e5f34932334593efed2a10241c291 Mon Sep 17 00:00:00 2001 From: shuocheng Date: Thu, 4 Apr 2024 16:02:21 -0400 Subject: [PATCH 10/44] rm env type check --- robomimic/utils/env_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/robomimic/utils/env_utils.py b/robomimic/utils/env_utils.py index 465b5091..b656ea64 100644 --- a/robomimic/utils/env_utils.py +++ b/robomimic/utils/env_utils.py @@ -134,6 +134,7 @@ def is_robosuite_env(env_meta=None, env_type=None, env=None): Determines whether the environment is a robosuite environment. Accepts either env_meta, env_type, or env. """ + return False return check_env_type(type_to_check=EB.EnvType.ROBOSUITE_TYPE, env_meta=env_meta, env_type=env_type, env=env) From 6a654302dffba64f0b8d75fb75ee39e53d23208b Mon Sep 17 00:00:00 2001 From: Dhruv Rajendra Patel Date: Sun, 14 Apr 2024 14:12:34 -0400 Subject: [PATCH 11/44] dev 2 dataloaders --- robomimic/utils/train_utils.py | 99 ++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index bed628f3..3eeefba3 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -622,6 +622,105 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor return step_log_all +def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=False, num_steps=None, obs_normalization_stats=None): + """ + Run an epoch of training or validation. + + Args: + model (Algo instance): model to train + + data_loader (DataLoader instance): data loader that will be used to serve batches of data + to the model + + epoch (int): epoch number + + validate (bool): whether this is a training epoch or validation epoch. This tells the model + whether to do gradient steps or purely do forward passes. + + num_steps (int): if provided, this epoch lasts for a fixed number of batches (gradient steps), + otherwise the epoch is a complete pass through the training dataset + + obs_normalization_stats (dict or None): if provided, this should map observation keys to dicts + with a "mean" and "std" of shape (1, ...) where ... is the default + shape for the observation. + + Returns: + step_log_all (dict): dictionary of logged training metrics averaged across all batches + """ + + #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) + # breakpoint() + epoch_timestamp = time.time() + if validate: + model.set_eval() + else: + model.set_train() + if num_steps is None: + num_steps = len(data_loader) + + step_log_all = [] + timing_stats = dict(Data_Loading=[], Process_Batch=[], Train_Batch=[], Log_Info=[]) + start_time = time.time() + + data_loader_iter = iter(data_loader) + data_loader_2_iter = iter(data_loader_2) + # breakpoint() + for _ in LogUtils.custom_tqdm(range(num_steps)): + + # load next batch from data loader + try: + t = time.time() + batch = next(data_loader_iter) + batch_2 = next(data_loader_2_iter) + except StopIteration: + # reset for next dataset pass + data_loader_iter = iter(data_loader) + data_loader_2_iter = iter(data_loader_2) + t = time.time() + batch = next(data_loader_iter) + batch_2 = next(data_loader_2_iter) + timing_stats["Data_Loading"].append(time.time() - t) + + # process batch for training + t = time.time() + # breakpoint() + input_batch = model.process_batch_for_training(batch) + input_batch_2 = model.process_batch_for_training(batch_2) + + # breakpoint() + input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) + input_batch_2 = model.postprocess_batch_for_training(input_batch_2, obs_normalization_stats=obs_normalization_stats) + + timing_stats["Process_Batch"].append(time.time() - t) + + # forward and backward pass + t = time.time() + # breakpoint() + info = model.train_on_batch([input_batch, input_batch_2], epoch, validate=validate) + timing_stats["Train_Batch"].append(time.time() - t) + + # tensorboard logging + t = time.time() + step_log = model.log_info(info) + step_log_all.append(step_log) + timing_stats["Log_Info"].append(time.time() - t) + + # flatten and take the mean of the metrics + step_log_dict = {} + for i in range(len(step_log_all)): + for k in step_log_all[i]: + if k not in step_log_dict: + step_log_dict[k] = [] + step_log_dict[k].append(step_log_all[i][k]) + step_log_all = dict((k, float(np.mean(v))) for k, v in step_log_dict.items()) + + # add in timing stats + for k in timing_stats: + # sum across all training steps, and convert from seconds to minutes + step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60. + step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60. + + return step_log_all def is_every_n_steps(interval, current_step, skip_zero=False): """ From 943856d526ef886fb9681679abb7b1f9c7e9d055 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 18 Apr 2024 11:43:38 -0400 Subject: [PATCH 12/44] increased rdcc nbytes --- robomimic/utils/dataset.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 075c4d59..c98fbf2f 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -212,7 +212,8 @@ def hdf5_file(self): This property allows for a lazy hdf5 file open. """ if self._hdf5_file is None: - self._hdf5_file = h5py.File(self.hdf5_path, 'r', swmr=self.hdf5_use_swmr, libver='latest') + print("opening hdf5") + self._hdf5_file = h5py.File(self.hdf5_path, 'r', swmr=self.hdf5_use_swmr, libver='latest', rdcc_nbytes=1e10) return self._hdf5_file def close_and_delete_hdf5_handle(self): From 8e5e11a99bcdbf432b2254689cc531a6a908e3ae Mon Sep 17 00:00:00 2001 From: Tony Zhao Date: Sat, 16 Sep 2023 21:27:22 -0700 Subject: [PATCH 13/44] cherry pick initial act commit --- .gitmodules | 4 + robomimic/algo/__init__.py | 2 + robomimic/algo/act.py | 249 ++++++++++++++++++++++++ robomimic/config/__init__.py | 4 +- robomimic/config/act_config.py | 48 +++++ robomimic/exps/templates/act.json | 160 +++++++++++++++ robomimic/scripts/config_gen/act_gen.py | 131 +++++++++++++ robomimic/utils/train_utils.py | 2 +- 8 files changed, 598 insertions(+), 2 deletions(-) create mode 100644 .gitmodules create mode 100644 robomimic/algo/act.py create mode 100644 robomimic/config/act_config.py create mode 100644 robomimic/exps/templates/act.json create mode 100644 robomimic/scripts/config_gen/act_gen.py diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..9ebb5fa8 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "act"] + path = act + url = git@github.com:tonyzhaozh/act.git + branch = robomimic diff --git a/robomimic/algo/__init__.py b/robomimic/algo/__init__.py index 68d70a57..6f668be1 100644 --- a/robomimic/algo/__init__.py +++ b/robomimic/algo/__init__.py @@ -9,3 +9,5 @@ from robomimic.algo.hbc import HBC from robomimic.algo.iris import IRIS from robomimic.algo.td3_bc import TD3_BC +# from robomimic.algo.diffusion_policy import DiffusionPolicyUNet +from robomimic.algo.act import ACT diff --git a/robomimic/algo/act.py b/robomimic/algo/act.py new file mode 100644 index 00000000..a0eb31f3 --- /dev/null +++ b/robomimic/algo/act.py @@ -0,0 +1,249 @@ +""" +Implementation of Action Chunking with Transformers (ACT). +""" +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision.transforms as transforms + +import robomimic.utils.tensor_utils as TensorUtils + +from robomimic.algo import register_algo_factory_func, PolicyAlgo +from robomimic.algo.bc import BC_VAE + + +@register_algo_factory_func("act") +def algo_config_to_class(algo_config): + """ + Maps algo config to the BC algo class to instantiate, along with additional algo kwargs. + + Args: + algo_config (Config instance): algo config + + Returns: + algo_class: subclass of Algo + algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm + """ + act_enabled = algo_config.ACT.enabled + assert act_enabled + algo_class, algo_kwargs = ACT, {} + + return algo_class, algo_kwargs + + +class ACT(BC_VAE): + """ + BC training with a VAE policy. + """ + def _create_networks(self): + """ + Creates networks and places them into @self.nets. + """ + + self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + self.nets = nn.ModuleDict() + self.chunk_size = self.global_config["train"]["seq_length"] + self.camera_keys = self.obs_config['modalities']['obs']['rgb'].copy() + self.proprio_keys = self.obs_config['modalities']['obs']['low_dim'].copy() + self.obs_keys = self.proprio_keys + self.camera_keys + + self.proprio_dim = 0 + for k in self.proprio_keys: + self.proprio_dim += self.obs_key_shapes[k][0] + + from act.detr.main import build_ACT_model_and_optimizer + policy_config = {'num_queries': self.chunk_size, + 'hidden_dim': self.algo_config['ACT']['hidden_dim'], + 'dim_feedforward': self.algo_config['ACT']['dim_feedforward'], + 'backbone': self.algo_config['ACT']['backbone'], + 'enc_layers': self.algo_config['ACT']['enc_layers'], + 'dec_layers': self.algo_config['ACT']['dec_layers'], + 'nheads': self.algo_config['ACT']['nheads'], + 'latent_dim': self.algo_config['ACT']['latent_dim'], + 'a_dim': self.ac_dim, + 'state_dim': self.proprio_dim, + 'camera_names': self.camera_keys + } + self.kl_weight = self.algo_config['ACT']['kl_weight'] + model, optimizer = build_ACT_model_and_optimizer(policy_config) + self.nets["policy"] = model + self.nets = self.nets.float().to(self.device) + + self.temporal_agg = False + self.query_frequency = self.chunk_size # TODO maybe tune + + self._step_counter = 0 + self.a_hat_store = None + + + def process_batch_for_training(self, batch): + """ + Processes input batch from a data loader to filter out + relevant information and prepare the batch for training. + Args: + batch (dict): dictionary with torch.Tensors sampled + from a data loader + Returns: + input_batch (dict): processed and filtered batch that + will be used for training + """ + + input_batch = dict() + input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"] if k != 'pad_mask'} + input_batch["obs"]['pad_mask'] = batch["obs"]['pad_mask'] + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["actions"] = batch["actions"][:, :, :] + # we move to device first before float conversion because image observation modalities will be uint8 - + # this minimizes the amount of data transferred to GPU + return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device)) + + def train_on_batch(self, batch, epoch, validate=False): + """ + Update from superclass to set categorical temperature, for categorcal VAEs. + """ + + return super(BC_VAE, self).train_on_batch(batch, epoch, validate=validate) + + def _forward_training(self, batch): + """ + Internal helper function for BC algo class. Compute forward pass + and return network outputs in @predictions dict. + Args: + batch (dict): dictionary with torch.Tensors sampled + from a data loader and filtered by @process_batch_for_training + Returns: + predictions (dict): dictionary containing network outputs + """ + + proprio = [batch["obs"][k] for k in self.proprio_keys] + proprio = torch.cat(proprio, axis=1) + qpos = proprio + + images = [] + for cam_name in self.camera_keys: + image = batch['obs'][cam_name] + image = self.normalize(image) + image = image.unsqueeze(axis=1) + images.append(image) + images = torch.cat(images, axis=1) + + env_state = torch.zeros([qpos.shape[0], 10]).cuda() # this is not used + + actions = batch['actions'] + is_pad = batch['obs']['pad_mask'] == 0 # from 1.0 or 0 to False and True + is_pad = is_pad.squeeze(dim=-1) + + a_hat, is_pad_hat, (mu, logvar) = self.nets["policy"](qpos, images, env_state, actions, is_pad) + total_kld, dim_wise_kld, mean_kld = self.kl_divergence(mu, logvar) + loss_dict = dict() + all_l1 = F.l1_loss(actions, a_hat, reduction='none') + l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean() + loss_dict['l1'] = l1 + loss_dict['kl'] = total_kld[0] + + + predictions = OrderedDict( + actions=actions, + kl_loss=loss_dict['kl'], + reconstruction_loss=loss_dict['l1'], + ) + + return predictions + + def get_action(self, obs_dict, goal_dict=None): + """ + Get policy action outputs. + Args: + obs_dict (dict): current observation + goal_dict (dict): (optional) goal + Returns: + action (torch.Tensor): action tensor + """ + assert not self.nets.training + + proprio = [obs_dict[k] for k in self.proprio_keys] + proprio = torch.cat(proprio, axis=1) + qpos = proprio + + images = [] + for cam_name in self.camera_keys: + image = obs_dict[cam_name] + image = self.normalize(image) + image = image.unsqueeze(axis=1) + images.append(image) + images = torch.cat(images, axis=1) + + env_state = torch.zeros([qpos.shape[0], 10]).cuda() # not used + + if self._step_counter % self.query_frequency == 0: + a_hat, is_pad_hat, (mu, logvar) = self.nets["policy"](qpos, images, env_state) + self.a_hat_store = a_hat + + action = self.a_hat_store[:, self._step_counter % self.query_frequency, :] + self._step_counter += 1 + return action + + + def reset(self): + """ + Reset algo state to prepare for environment rollouts. + """ + self._step_counter = 0 + + def _compute_losses(self, predictions, batch): + """ + Internal helper function for BC algo class. Compute losses based on + network outputs in @predictions dict, using reference labels in @batch. + Args: + predictions (dict): dictionary containing network outputs, from @_forward_training + batch (dict): dictionary with torch.Tensors sampled + from a data loader and filtered by @process_batch_for_training + Returns: + losses (dict): dictionary of losses computed over the batch + """ + + # total loss is sum of reconstruction and KL, weighted by beta + kl_loss = predictions["kl_loss"] + recons_loss = predictions["reconstruction_loss"] + action_loss = recons_loss + self.kl_weight * kl_loss + return OrderedDict( + recons_loss=recons_loss, + kl_loss=kl_loss, + action_loss=action_loss, + ) + + def log_info(self, info): + """ + Process info dictionary from @train_on_batch to summarize + information to pass to tensorboard for logging. + Args: + info (dict): dictionary of info + Returns: + loss_log (dict): name -> summary statistic + """ + log = PolicyAlgo.log_info(self, info) + log["Loss"] = info["losses"]["action_loss"].item() + log["KL_Loss"] = info["losses"]["kl_loss"].item() + log["Reconstruction_Loss"] = info["losses"]["recons_loss"].item() + if "policy_grad_norms" in info: + log["Policy_Grad_Norms"] = info["policy_grad_norms"] + return log + + def kl_divergence(self, mu, logvar): + batch_size = mu.size(0) + assert batch_size != 0 + if mu.data.ndimension() == 4: + mu = mu.view(mu.size(0), mu.size(1)) + if logvar.data.ndimension() == 4: + logvar = logvar.view(logvar.size(0), logvar.size(1)) + + klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) + total_kld = klds.sum(1).mean(0, True) + dimension_wise_kld = klds.mean(0) + mean_kld = klds.mean(1).mean(0, True) + + return total_kld, dimension_wise_kld, mean_kld + diff --git a/robomimic/config/__init__.py b/robomimic/config/__init__.py index fa60a2f5..574c4bb4 100644 --- a/robomimic/config/__init__.py +++ b/robomimic/config/__init__.py @@ -9,4 +9,6 @@ from robomimic.config.gl_config import GLConfig from robomimic.config.hbc_config import HBCConfig from robomimic.config.iris_config import IRISConfig -from robomimic.config.td3_bc_config import TD3_BCConfig \ No newline at end of file +from robomimic.config.td3_bc_config import TD3_BCConfig +# from robomimic.config.diffusion_policy_config import DiffusionPolicyConfig +from robomimic.config.act_config import ACTConfig diff --git a/robomimic/config/act_config.py b/robomimic/config/act_config.py new file mode 100644 index 00000000..4dcdd82a --- /dev/null +++ b/robomimic/config/act_config.py @@ -0,0 +1,48 @@ +""" +Config for BC algorithm. +""" + +from robomimic.config.base_config import BaseConfig + + +class ACTConfig(BaseConfig): + ALGO_NAME = "act" + + def train_config(self): + """ + BC algorithms don't need "next_obs" from hdf5 - so save on storage and compute by disabling it. + """ + super(ACTConfig, self).train_config() + self.train.hdf5_load_next_obs = False + + def algo_config(self): + """ + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its + training and test-time behavior should be populated here. + """ + + # optimization parameters + self.algo.optim_params.policy.optimizer_type = "adamw" + self.algo.optim_params.policy.learning_rate.initial = 5e-5 # policy learning rate + self.algo.optim_params.policy.learning_rate.decay_factor = 1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.policy.learning_rate.scheduler_type = "linear" # learning rate scheduler ("multistep", "linear", etc) + self.algo.optim_params.policy.regularization.L2 = 0.0001 # L2 regularization strength + + # loss weights + self.algo.loss.l2_weight = 0.0 # L2 loss weight + self.algo.loss.l1_weight = 1.0 # L1 loss weight + self.algo.loss.cos_weight = 0.0 # cosine loss weight + + # ACT policy settings + self.algo.ACT.enabled = False # whether to train transformer policy + self.algo.ACT.hidden_dim = 512 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack + self.algo.ACT.dim_feedforward = 3200 # dimension for embeddings used by transformer + self.algo.ACT.backbone = "resnet18" # number of transformer blocks to stack + self.algo.ACT.enc_layers = 4 # number of attention heads for each transformer block (should divide embed_dim evenly) + self.algo.ACT.dec_layers = 7 # dropout probability for embedding inputs in transformer + self.algo.ACT.nheads = 8 # dropout probability for attention outputs for each transformer block + self.algo.ACT.latent_dim = 32 # latent dim of VAE + self.algo.ACT.kl_weight = 20 # KL weight of VAE diff --git a/robomimic/exps/templates/act.json b/robomimic/exps/templates/act.json new file mode 100644 index 00000000..1d37f123 --- /dev/null +++ b/robomimic/exps/templates/act.json @@ -0,0 +1,160 @@ +{ + "algo_name": "act", + "experiment": { + "name": "test", + "validate": false, + "logging": { + "terminal_output_to_txt": true, + "log_tb": true, + "log_wandb": false, + "wandb_proj_name": "debug" + }, + "save": { + "enabled": true, + "every_n_seconds": null, + "every_n_epochs": 40, + "epochs": [], + "on_best_validation": false, + "on_best_rollout_return": false, + "on_best_rollout_success_rate": true + }, + "epoch_every_n_steps": 500, + "validation_epoch_every_n_steps": 10, + "env": null, + "additional_envs": null, + "render": false, + "render_video": true, + "keep_all_videos": false, + "video_skip": 5, + "rollout": { + "enabled": true, + "n": 50, + "horizon": 400, + "rate": 40, + "warmstart": 0, + "terminate_on_success": true + } + }, + "train": { + "data": null, + "output_dir":"../act_trained_models", + "num_data_workers": 4, + "hdf5_cache_mode": "low_dim", + "hdf5_use_swmr": true, + "hdf5_load_next_obs": false, + "hdf5_normalize_obs": false, + "hdf5_filter_key": null, + "seq_length": 10, + "pad_seq_length": true, + "frame_stack": 1, + "pad_frame_stack": true, + "dataset_keys": [ + "actions" + ], + "goal_mode": null, + "cuda": true, + "batch_size": 128, + "num_epochs": 10000, + "seed": 1 + }, + "algo": { + "optim_params": { + "policy": { + "optimizer_type": "adamw", + "learning_rate": { + "initial": 0.00005, + "decay_factor": 1, + "epoch_schedule": [ + 100 + ], + "scheduler_type": "linear" + }, + "regularization": { + "L2": 0.0001 + } + } + }, + "loss": { + "l2_weight": 0.0, + "l1_weight": 1.0, + "cos_weight": 0.0 + }, + "ACT": { + "enabled": true, + "hidden_dim": 512, + "dim_feedforward": 3200, + "backbone": "resnet18", + "enc_layers": 4, + "dec_layers": 7, + "nheads": 8, + "latent_dim": 32, + "kl_weight": 20 + } + }, + "observation": { + "modalities": { + "obs": { + "low_dim": [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object" + ], + "rgb": [], + "depth": [], + "scan": [] + }, + "goal": { + "low_dim": [], + "rgb": [], + "depth": [], + "scan": [] + } + }, + "encoder": { + "low_dim": { + "core_class": null, + "core_kwargs": {}, + "obs_randomizer_class": null, + "obs_randomizer_kwargs": {} + }, + "rgb": { + "core_class": "VisualCore", + "core_kwargs": { + "feature_dimension": 64, + "backbone_class": "ResNet18Conv", + "backbone_kwargs": { + "pretrained": false, + "input_coord_conv": false + }, + "pool_class": "SpatialSoftmax", + "pool_kwargs": { + "num_kp": 32, + "learnable_temperature": false, + "temperature": 1.0, + "noise_std": 0.0 + } + }, + "obs_randomizer_class": "CropRandomizer", + "obs_randomizer_kwargs": { + "crop_height": 76, + "crop_width": 76, + "num_crops": 1, + "pos_enc": false + } + }, + "depth": { + "core_class": "VisualCore", + "core_kwargs": {}, + "obs_randomizer_class": null, + "obs_randomizer_kwargs": {} + }, + "scan": { + "core_class": "ScanCore", + "core_kwargs": {}, + "obs_randomizer_class": null, + "obs_randomizer_kwargs": {} + } + } + } +} \ No newline at end of file diff --git a/robomimic/scripts/config_gen/act_gen.py b/robomimic/scripts/config_gen/act_gen.py new file mode 100644 index 00000000..d83fce8e --- /dev/null +++ b/robomimic/scripts/config_gen/act_gen.py @@ -0,0 +1,131 @@ +from robomimic.scripts.config_gen.helper import * + +def make_generator_helper(args): + algo_name_short = "act" + generator = get_generator( + algo_name="act", + config_file=os.path.join(base_path, 'robomimic/exps/templates/act.json'), + args=args, + algo_name_short=algo_name_short, + pt=True, + ) + if args.ckpt_mode is None: + args.ckpt_mode = "off" + + + generator.add_param( + key="train.num_epochs", + name="", + group=-1, + values=[10000], + ) + + generator.add_param( + key="train.batch_size", + name="", + group=-1, + values=[64], + ) + + generator.add_param( + key="train.max_grad_norm", + name="", + group=-1, + values=[100.0], + ) + + if args.env == "r2d2": + generator.add_param( + key="train.data", + name="ds", + group=2, + values=[ + [{"path": p} for p in scan_datasets("~/Downloads/example_pen_in_cup", postfix="trajectory_im128.h5")], + ], + value_names=[ + "pen-in-cup", + ], + ) + generator.add_param( + key="train.action_keys", + name="ac_keys", + group=-1, + values=[ + [ + "action/abs_pos", + "action/abs_rot_6d", + "action/gripper_velocity", + ], + ], + value_names=[ + "abs", + ], + ) + elif args.env == "kitchen": + raise NotImplementedError + elif args.env == "square": + generator.add_param( + key="train.data", + name="ds", + group=2, + values=[ + [ + {"path": "TODO.hdf5"}, # replace with your own path + ], + ], + value_names=[ + "square", + ], + ) + + # update env config to use absolute action control + generator.add_param( + key="experiment.env_meta_update_dict", + name="", + group=-1, + values=[ + {"env_kwargs": {"controller_configs": {"control_delta": False}}} + ], + ) + + generator.add_param( + key="train.action_keys", + name="ac_keys", + group=-1, + values=[ + [ + "action_dict/abs_pos", + "action_dict/abs_rot_6d", + "action_dict/gripper", + # "actions", + ], + ], + value_names=[ + "abs", + ], + ) + + + else: + raise ValueError + + generator.add_param( + key="train.output_dir", + name="", + group=-1, + values=[ + "~/expdata/{env}/{mod}/{algo_name_short}".format( + env=args.env, + mod=args.mod, + algo_name_short=algo_name_short, + ) + ], + ) + + return generator + +if __name__ == "__main__": + parser = get_argparser() + + args = parser.parse_args() + make_generator(args, make_generator_helper) diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index bed628f3..fdd917ed 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -156,7 +156,7 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non seq_length=config.train.seq_length, pad_frame_stack=config.train.pad_frame_stack, pad_seq_length=config.train.pad_seq_length, - get_pad_mask=False, + get_pad_mask=True, goal_mode=config.train.goal_mode, hdf5_cache_mode=config.train.hdf5_cache_mode, hdf5_use_swmr=config.train.hdf5_use_swmr, From d2f27e61ce685092858111dd6c9beb15bc3eed2f Mon Sep 17 00:00:00 2001 From: snasiriany Date: Sun, 17 Sep 2023 12:28:04 -0500 Subject: [PATCH 14/44] merged second cherrypick --- .gitmodules | 2 +- README.md | 5 + act | 1 + robomimic/algo/act.py | 18 +- robomimic/config/act_config.py | 17 +- robomimic/exps/templates/act.json | 4 +- robomimic/scripts/config_gen/act_gen.py | 4 +- robomimic/scripts/config_gen/helper.py | 954 ++++++++++++++++++++++++ 8 files changed, 981 insertions(+), 24 deletions(-) create mode 160000 act create mode 100644 robomimic/scripts/config_gen/helper.py diff --git a/.gitmodules b/.gitmodules index 9ebb5fa8..578fadc7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,4 +1,4 @@ [submodule "act"] path = act - url = git@github.com:tonyzhaozh/act.git + url = https://github.com/tonyzhaozh/act branch = robomimic diff --git a/README.md b/README.md index bdc3556f..063eac13 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,11 @@ - [05/23/2022] **v0.2.1**: Updated website and documentation to feature more tutorials :notebook_with_decorative_cover: - [12/16/2021] **v0.2.0**: Modular observation modalities and encoders :wrench:, support for [MOMART](https://sites.google.com/view/il-for-mm/home) datasets :open_file_folder: [[release notes]](https://github.com/ARISE-Initiative/robomimic/releases/tag/v0.2.0) [[documentation]](https://robomimic.github.io/docs/v0.2/introduction/overview.html) - [08/09/2021] **v0.1.0**: Initial code and paper release +## Installation +1. Clone the repo with the `--recurse-submodules` flag. +2. (if applicable) switch to `r2d2` branch +3. Run `pip install -e .` in `robomimic` +4. Run `pip install -e .` in `robomimic/act/detr` ------- diff --git a/act b/act new file mode 160000 index 00000000..73071e16 --- /dev/null +++ b/act @@ -0,0 +1 @@ +Subproject commit 73071e16a6595662d753415b90c0abb64815009c diff --git a/robomimic/algo/act.py b/robomimic/algo/act.py index a0eb31f3..8f35271f 100644 --- a/robomimic/algo/act.py +++ b/robomimic/algo/act.py @@ -26,8 +26,6 @@ def algo_config_to_class(algo_config): algo_class: subclass of Algo algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm """ - act_enabled = algo_config.ACT.enabled - assert act_enabled algo_class, algo_kwargs = ACT, {} return algo_class, algo_kwargs @@ -56,18 +54,18 @@ def _create_networks(self): from act.detr.main import build_ACT_model_and_optimizer policy_config = {'num_queries': self.chunk_size, - 'hidden_dim': self.algo_config['ACT']['hidden_dim'], - 'dim_feedforward': self.algo_config['ACT']['dim_feedforward'], - 'backbone': self.algo_config['ACT']['backbone'], - 'enc_layers': self.algo_config['ACT']['enc_layers'], - 'dec_layers': self.algo_config['ACT']['dec_layers'], - 'nheads': self.algo_config['ACT']['nheads'], - 'latent_dim': self.algo_config['ACT']['latent_dim'], + 'hidden_dim': self.algo_config.act.hidden_dim, + 'dim_feedforward': self.algo_config.act.dim_feedforward, + 'backbone': self.algo_config.act.backbone, + 'enc_layers': self.algo_config.act.enc_layers, + 'dec_layers': self.algo_config.act.dec_layers, + 'nheads': self.algo_config.act.nheads, + 'latent_dim': self.algo_config.act.latent_dim, 'a_dim': self.ac_dim, 'state_dim': self.proprio_dim, 'camera_names': self.camera_keys } - self.kl_weight = self.algo_config['ACT']['kl_weight'] + self.kl_weight = self.algo_config.act.kl_weight model, optimizer = build_ACT_model_and_optimizer(policy_config) self.nets["policy"] = model self.nets = self.nets.float().to(self.device) diff --git a/robomimic/config/act_config.py b/robomimic/config/act_config.py index 4dcdd82a..9be3926b 100644 --- a/robomimic/config/act_config.py +++ b/robomimic/config/act_config.py @@ -37,12 +37,11 @@ def algo_config(self): self.algo.loss.cos_weight = 0.0 # cosine loss weight # ACT policy settings - self.algo.ACT.enabled = False # whether to train transformer policy - self.algo.ACT.hidden_dim = 512 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack - self.algo.ACT.dim_feedforward = 3200 # dimension for embeddings used by transformer - self.algo.ACT.backbone = "resnet18" # number of transformer blocks to stack - self.algo.ACT.enc_layers = 4 # number of attention heads for each transformer block (should divide embed_dim evenly) - self.algo.ACT.dec_layers = 7 # dropout probability for embedding inputs in transformer - self.algo.ACT.nheads = 8 # dropout probability for attention outputs for each transformer block - self.algo.ACT.latent_dim = 32 # latent dim of VAE - self.algo.ACT.kl_weight = 20 # KL weight of VAE + self.algo.act.hidden_dim = 512 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack + self.algo.act.dim_feedforward = 3200 # dimension for embeddings used by transformer + self.algo.act.backbone = "resnet18" # number of transformer blocks to stack + self.algo.act.enc_layers = 4 # number of attention heads for each transformer block (should divide embed_dim evenly) + self.algo.act.dec_layers = 7 # dropout probability for embedding inputs in transformer + self.algo.act.nheads = 8 # dropout probability for attention outputs for each transformer block + self.algo.act.latent_dim = 32 # latent dim of VAE + self.algo.act.kl_weight = 20 # KL weight of VAE diff --git a/robomimic/exps/templates/act.json b/robomimic/exps/templates/act.json index 1d37f123..4512ecdf 100644 --- a/robomimic/exps/templates/act.json +++ b/robomimic/exps/templates/act.json @@ -9,6 +9,7 @@ "log_wandb": false, "wandb_proj_name": "debug" }, + "mse":{}, "save": { "enabled": true, "every_n_seconds": null, @@ -79,8 +80,7 @@ "l1_weight": 1.0, "cos_weight": 0.0 }, - "ACT": { - "enabled": true, + "act": { "hidden_dim": 512, "dim_feedforward": 3200, "backbone": "resnet18", diff --git a/robomimic/scripts/config_gen/act_gen.py b/robomimic/scripts/config_gen/act_gen.py index d83fce8e..8962941d 100644 --- a/robomimic/scripts/config_gen/act_gen.py +++ b/robomimic/scripts/config_gen/act_gen.py @@ -17,7 +17,7 @@ def make_generator_helper(args): key="train.num_epochs", name="", group=-1, - values=[10000], + values=[1000], ) generator.add_param( @@ -54,7 +54,7 @@ def make_generator_helper(args): [ "action/abs_pos", "action/abs_rot_6d", - "action/gripper_velocity", + "action/gripper_position", ], ], value_names=[ diff --git a/robomimic/scripts/config_gen/helper.py b/robomimic/scripts/config_gen/helper.py new file mode 100644 index 00000000..48a3af07 --- /dev/null +++ b/robomimic/scripts/config_gen/helper.py @@ -0,0 +1,954 @@ +import argparse +import os +import time +import datetime + +import robomimic +import robomimic.utils.hyperparam_utils as HyperparamUtils + +base_path = os.path.abspath(os.path.join(os.path.dirname(robomimic.__file__), os.pardir)) + +def scan_datasets(folder, postfix=".h5"): + dataset_paths = [] + for root, dirs, files in os.walk(os.path.expanduser(folder)): + for f in files: + if f.endswith(postfix): + dataset_paths.append(os.path.join(root, f)) + return dataset_paths + + +def get_generator(algo_name, config_file, args, algo_name_short=None, pt=False): + if args.wandb_proj_name is None: + strings = [ + algo_name_short if (algo_name_short is not None) else algo_name, + args.name, + args.env, + args.mod, + ] + args.wandb_proj_name = '_'.join([str(s) for s in strings if s is not None]) + + if args.script is not None: + generated_config_dir = os.path.join(os.path.dirname(args.script), "json") + else: + curr_time = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-%y-%H-%M-%S') + generated_config_dir=os.path.join( + '~/', 'tmp/autogen_configs/ril', algo_name, args.env, args.mod, args.name, curr_time, "json", + ) + + generator = HyperparamUtils.ConfigGenerator( + base_config_file=config_file, + generated_config_dir=generated_config_dir, + wandb_proj_name=args.wandb_proj_name, + script_file=args.script, + ) + + args.algo_name = algo_name + args.pt = pt + + return generator + + +def set_env_settings(generator, args): + if args.env in ["r2d2"]: + assert args.mod == "im" + generator.add_param( + key="experiment.rollout.enabled", + name="", + group=-1, + values=[ + False + ], + ) + generator.add_param( + key="experiment.save.every_n_epochs", + name="", + group=-1, + values=[50], + ) + generator.add_param( + key="experiment.mse.enabled", + name="", + group=-1, + values=[True], + ), + generator.add_param( + key="experiment.mse.every_n_epochs", + name="", + group=-1, + values=[50], + ), + generator.add_param( + key="experiment.mse.on_save_ckpt", + name="", + group=-1, + values=[True], + ), + generator.add_param( + key="experiment.mse.num_samples", + name="", + group=-1, + values=[20], + ), + generator.add_param( + key="experiment.mse.visualize", + name="", + group=-1, + values=[True], + ), + if "observation.modalities.obs.low_dim" not in generator.parameters: + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot_state/cartesian_position", "robot_state/gripper_position"] + ], + ) + if "observation.modalities.obs.rgb" not in generator.parameters: + generator.add_param( + key="observation.modalities.obs.rgb", + name="", + group=-1, + values=[ + [ + "camera/image/hand_camera_left_image", + "camera/image/varied_camera_1_left_image", "camera/image/varied_camera_2_left_image" # uncomment to use all 3 cameras + ] + ], + ) + generator.add_param( + key="observation.encoder.rgb.obs_randomizer_class", + name="obsrand", + group=-1, + values=[ + # "CropRandomizer", # crop only + # "ColorRandomizer", # jitter only + ["ColorRandomizer", "CropRandomizer"], # jitter, followed by crop + ], + hidename=True, + ) + generator.add_param( + key="observation.encoder.rgb.obs_randomizer_kwargs", + name="obsrandargs", + group=-1, + values=[ + # {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}, # crop only + # {}, # jitter only + [{}, {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}], # jitter, followed by crop + ], + hidename=True, + ) + if ("observation.encoder.rgb.obs_randomizer_kwargs" not in generator.parameters) and \ + ("observation.encoder.rgb.obs_randomizer_kwargs.crop_height" not in generator.parameters): + generator.add_param( + key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", + name="", + group=-1, + values=[ + 116 + ], + ) + generator.add_param( + key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", + name="", + group=-1, + values=[ + 116 + ], + ) + # remove spatial softmax by default for r2d2 dataset + generator.add_param( + key="observation.encoder.rgb.core_kwargs.pool_class", + name="", + group=-1, + values=[ + None + ], + ) + generator.add_param( + key="observation.encoder.rgb.core_kwargs.pool_kwargs", + name="", + group=-1, + values=[ + None + ], + ) + + # specify dataset type is r2d2 rather than default robomimic + generator.add_param( + key="train.data_format", + name="", + group=-1, + values=[ + "r2d2" + ], + ) + + # here, we list how each action key should be treated (normalized etc) + generator.add_param( + key="train.action_config", + name="", + group=-1, + values=[ + { + "action/cartesian_position":{ + "normalization": "min_max", + }, + "action/abs_pos":{ + "normalization": "min_max", + }, + "action/abs_rot_6d":{ + "normalization": "min_max", + "format": "rot_6d", + "convert_at_runtime": "rot_euler", + }, + "action/abs_rot_euler":{ + "normalization": "min_max", + "format": "rot_euler", + }, + "action/gripper_position":{ + "normalization": "min_max", + }, + "action/cartesian_velocity":{ + "normalization": None, + }, + "action/rel_pos":{ + "normalization": None, + }, + "action/rel_rot_6d":{ + "format": "rot_6d", + "normalization": None, + "convert_at_runtime": "rot_euler", + }, + "action/rel_rot_euler":{ + "format": "rot_euler", + "normalization": None, + }, + "action/gripper_velocity":{ + "normalization": None, + }, + } + ], + ) + generator.add_param( + key="train.dataset_keys", + name="", + group=-1, + values=[[]], + ) + if "train.action_keys" not in generator.parameters: + generator.add_param( + key="train.action_keys", + name="ac_keys", + group=-1, + values=[ + [ + "action/rel_pos", + "action/rel_rot_euler", + "action/gripper_velocity", + ], + ], + value_names=[ + "rel", + ], + ) + # observation key groups to swap + generator.add_param( + key="train.shuffled_obs_key_groups", + name="", + group=-1, + values=[[[ + ( + "camera/image/varied_camera_1_left_image", + "camera/image/varied_camera_1_right_image", + "camera/extrinsics/varied_camera_1_left", + "camera/extrinsics/varied_camera_1_right", + ), + ( + "camera/image/varied_camera_2_left_image", + "camera/image/varied_camera_2_right_image", + "camera/extrinsics/varied_camera_2_left", + "camera/extrinsics/varied_camera_2_right", + ), + ]]], + ) + elif args.env == "kitchen": + generator.add_param( + key="train.action_config", + name="", + group=-1, + values=[ + { + "actions":{ + "normalization": None, + }, + "action_dict/abs_pos": { + "normalization": "min_max" + }, + "action_dict/abs_rot_axis_angle": { + "normalization": "min_max", + "format": "rot_axis_angle" + }, + "action_dict/abs_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/rel_pos": { + "normalization": None, + }, + "action_dict/rel_rot_axis_angle": { + "normalization": None, + "format": "rot_axis_angle" + }, + "action_dict/rel_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/gripper": { + "normalization": None, + }, + "action_dict/base_mode": { + "normalization": None, + } + } + ], + ) + + if args.mod == 'im': + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_base_pos", + "robot0_gripper_qpos"] + ], + ) + generator.add_param( + key="observation.modalities.obs.rgb", + name="", + group=-1, + values=[ + ["robot0_agentview_left_image", + "robot0_agentview_right_image", + "robot0_eye_in_hand_image"] + ], + ) + else: + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot0_base_pos", + "object", + ] + ], + ) + elif args.env in ['square', 'lift', 'place_close']: + # # set videos off + # args.no_video = True + + generator.add_param( + key="train.action_config", + name="", + group=-1, + values=[ + { + "actions":{ + "normalization": None, + }, + "action_dict/abs_pos": { + "normalization": "min_max" + }, + "action_dict/abs_rot_axis_angle": { + "normalization": "min_max", + "format": "rot_axis_angle" + }, + "action_dict/abs_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/rel_pos": { + "normalization": None, + }, + "action_dict/rel_rot_axis_angle": { + "normalization": None, + "format": "rot_axis_angle" + }, + "action_dict/rel_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/gripper": { + "normalization": None, + } + } + ], + ) + + if args.mod == 'im': + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos"] + ], + ) + generator.add_param( + key="observation.modalities.obs.rgb", + name="", + group=-1, + values=[ + ["agentview_image", + "robot0_eye_in_hand_image"] + ], + ) + else: + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object"] + ], + ) + elif args.env == 'transport': + # set videos off + args.no_video = True + + # TODO: fix 2 robot case + generator.add_param( + key="train.action_config", + name="", + group=-1, + values=[ + { + "actions":{ + "normalization": None, + }, + "action_dict/abs_pos": { + "normalization": "min_max" + }, + "action_dict/abs_rot_axis_angle": { + "normalization": "min_max", + "format": "rot_axis_angle" + }, + "action_dict/abs_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/rel_pos": { + "normalization": None, + }, + "action_dict/rel_rot_axis_angle": { + "normalization": None, + "format": "rot_axis_angle" + }, + "action_dict/rel_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/gripper": { + "normalization": None, + } + } + ], + ) + + if args.mod == 'im': + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos"] + ], + ) + generator.add_param( + key="observation.modalities.obs.rgb", + name="", + group=-1, + values=[ + ["shouldercamera0_image", + "robot0_eye_in_hand_image", + "shouldercamera1_image", + "robot1_eye_in_hand_image"] + ], + ) + else: + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", + "object"] + ], + ) + + generator.add_param( + key="experiment.rollout.horizon", + name="", + group=-1, + values=[700], + ) + elif args.env == 'tool_hang': + # set videos off + args.no_video = True + + generator.add_param( + key="train.action_config", + name="", + group=-1, + values=[ + { + "actions":{ + "normalization": None, + }, + "action_dict/abs_pos": { + "normalization": "min_max" + }, + "action_dict/abs_rot_axis_angle": { + "normalization": "min_max", + "format": "rot_axis_angle" + }, + "action_dict/abs_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/rel_pos": { + "normalization": None, + }, + "action_dict/rel_rot_axis_angle": { + "normalization": None, + "format": "rot_axis_angle" + }, + "action_dict/rel_rot_6d": { + "normalization": None, + "format": "rot_6d" + }, + "action_dict/gripper": { + "normalization": None, + } + } + ], + ) + + if args.mod == 'im': + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos"] + ], + ) + generator.add_param( + key="observation.modalities.obs.rgb", + name="", + group=-1, + values=[ + ["sideview_image", + "robot0_eye_in_hand_image"] + ], + ) + generator.add_param( + key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", + name="", + group=-1, + values=[ + 216 + ], + ) + generator.add_param( + key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", + name="", + group=-1, + values=[ + 216 + ], + ) + generator.add_param( + key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_height", + name="", + group=-1, + values=[ + 216 + ], + ) + generator.add_param( + key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_width", + name="", + group=-1, + values=[ + 216 + ], + ) + else: + generator.add_param( + key="observation.modalities.obs.low_dim", + name="", + group=-1, + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object"] + ], + ) + + generator.add_param( + key="experiment.rollout.horizon", + name="", + group=-1, + values=[700], + ) + else: + raise ValueError + + +def set_mod_settings(generator, args): + if args.mod == 'ld': + if "experiment.save.epochs" not in generator.parameters: + generator.add_param( + key="experiment.save.epochs", + name="", + group=-1, + values=[ + [2000] + ], + ) + elif args.mod == 'im': + if "experiment.save.every_n_epochs" not in generator.parameters: + generator.add_param( + key="experiment.save.every_n_epochs", + name="", + group=-1, + values=[40], + ) + + generator.add_param( + key="experiment.epoch_every_n_steps", + name="", + group=-1, + values=[500], + ) + if "train.num_data_workers" not in generator.parameters: + generator.add_param( + key="train.num_data_workers", + name="", + group=-1, + values=[4], + ) + generator.add_param( + key="train.hdf5_cache_mode", + name="", + group=-1, + values=["low_dim"], + ) + if "train.batch_size" not in generator.parameters: + generator.add_param( + key="train.batch_size", + name="", + group=-1, + values=[16], + ) + if "train.num_epochs" not in generator.parameters: + generator.add_param( + key="train.num_epochs", + name="", + group=-1, + values=[600], + ) + if "experiment.rollout.rate" not in generator.parameters: + generator.add_param( + key="experiment.rollout.rate", + name="", + group=-1, + values=[40], + ) + + +def set_debug_mode(generator, args): + if not args.debug: + return + + generator.add_param( + key="experiment.mse.every_n_epochs", + name="", + group=-1, + values=[2], + value_names=[""], + ) + generator.add_param( + key="experiment.mse.visualize", + name="", + group=-1, + values=[True], + value_names=[""], + ) + generator.add_param( + key="experiment.rollout.n", + name="", + group=-1, + values=[2], + value_names=[""], + ) + generator.add_param( + key="experiment.rollout.horizon", + name="", + group=-1, + values=[30], + value_names=[""], + ) + generator.add_param( + key="experiment.rollout.rate", + name="", + group=-1, + values=[2], + value_names=[""], + ) + generator.add_param( + key="experiment.epoch_every_n_steps", + name="", + group=-1, + values=[2], + value_names=[""], + ) + generator.add_param( + key="experiment.save.every_n_epochs", + name="", + group=-1, + values=[2], + value_names=[""], + ) + generator.add_param( + key="experiment.validation_epoch_every_n_steps", + name="", + group=-1, + values=[2], + value_names=[""], + ) + generator.add_param( + key="train.num_epochs", + name="", + group=-1, + values=[2], + value_names=[""], + ) + if args.name is None: + generator.add_param( + key="experiment.name", + name="", + group=-1, + values=["debug"], + value_names=[""], + ) + generator.add_param( + key="experiment.save.enabled", + name="", + group=-1, + values=[False], + value_names=[""], + ) + generator.add_param( + key="train.hdf5_cache_mode", + name="", + group=-1, + values=["low_dim"], + value_names=[""], + ) + generator.add_param( + key="train.num_data_workers", + name="", + group=-1, + values=[3], + ) + + +def set_output_dir(generator, args): + assert args.name is not None + + vals = generator.parameters["train.output_dir"].values + + for i in range(len(vals)): + vals[i] = os.path.join(vals[i], args.name) + + +def set_wandb_mode(generator, args): + generator.add_param( + key="experiment.logging.log_wandb", + name="", + group=-1, + values=[not args.no_wandb], + ) + + +def set_num_seeds(generator, args): + if args.n_seeds is not None and "train.seed" not in generator.parameters: + generator.add_param( + key="train.seed", + name="seed", + group=-10, + values=[i + 1 for i in range(args.n_seeds)], + prepend=True, + ) + + +def get_argparser(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--name", + type=str, + ) + + parser.add_argument( + "--env", + type=str, + default='r2d2', + ) + + parser.add_argument( + '--mod', + type=str, + choices=['ld', 'im'], + default='im', + ) + + parser.add_argument( + "--ckpt_mode", + type=str, + choices=["off", "all", "best_only"], + default=None, + ) + + parser.add_argument( + "--script", + type=str, + default=None + ) + + parser.add_argument( + "--wandb_proj_name", + type=str, + default=None + ) + + parser.add_argument( + "--debug", + action="store_true", + ) + + parser.add_argument( + '--no_video', + action='store_true' + ) + + parser.add_argument( + "--tmplog", + action="store_true", + ) + + parser.add_argument( + "--nr", + type=int, + default=-1 + ) + + parser.add_argument( + "--no_wandb", + action="store_true", + ) + + parser.add_argument( + "--n_seeds", + type=int, + default=None + ) + + parser.add_argument( + "--num_cmd_groups", + type=int, + default=None + ) + + return parser + + +def make_generator(args, make_generator_helper): + if args.tmplog or args.debug and args.name is None: + args.name = "debug" + else: + time_str = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-') + args.name = time_str + str(args.name) + + if args.debug or args.tmplog: + args.no_wandb = True + + if args.wandb_proj_name is not None: + # prepend data to wandb name + # time_str = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-') + # args.wandb_proj_name = time_str + args.wandb_proj_name + pass + + if (args.debug or args.tmplog) and (args.wandb_proj_name is None): + args.wandb_proj_name = 'debug' + + if not args.debug: + assert args.name is not None + + # make config generator + generator = make_generator_helper(args) + + if args.ckpt_mode is None: + if args.pt: + args.ckpt_mode = "all" + else: + args.ckpt_mode = "best_only" + + set_env_settings(generator, args) + set_mod_settings(generator, args) + set_output_dir(generator, args) + set_num_seeds(generator, args) + set_wandb_mode(generator, args) + + # set the debug settings last, to override previous setting changes + set_debug_mode(generator, args) + + """ misc settings """ + generator.add_param( + key="experiment.validate", + name="", + group=-1, + values=[ + False, + ], + ) + + # generate jsons and script + generator.generate(override_base_name=True) From f219b6657702516594fc3f1002872c7c87093b4f Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 2 May 2024 15:46:26 -0400 Subject: [PATCH 15/44] training ACT in Egoplay --- act | 2 +- robomimic/algo/__init__.py | 1 - robomimic/algo/act.py | 247 --------------------------------- robomimic/config/__init__.py | 1 - robomimic/config/act_config.py | 47 ------- robomimic/utils/dataset.py | 18 ++- robomimic/utils/file_utils.py | 5 +- 7 files changed, 17 insertions(+), 304 deletions(-) delete mode 100644 robomimic/algo/act.py delete mode 100644 robomimic/config/act_config.py diff --git a/act b/act index 73071e16..ad18b09b 160000 --- a/act +++ b/act @@ -1 +1 @@ -Subproject commit 73071e16a6595662d753415b90c0abb64815009c +Subproject commit ad18b09b1bf0831ddf5aa06c8e95ee11d9fb0677 diff --git a/robomimic/algo/__init__.py b/robomimic/algo/__init__.py index 6f668be1..dbe2ea4d 100644 --- a/robomimic/algo/__init__.py +++ b/robomimic/algo/__init__.py @@ -10,4 +10,3 @@ from robomimic.algo.iris import IRIS from robomimic.algo.td3_bc import TD3_BC # from robomimic.algo.diffusion_policy import DiffusionPolicyUNet -from robomimic.algo.act import ACT diff --git a/robomimic/algo/act.py b/robomimic/algo/act.py deleted file mode 100644 index 8f35271f..00000000 --- a/robomimic/algo/act.py +++ /dev/null @@ -1,247 +0,0 @@ -""" -Implementation of Action Chunking with Transformers (ACT). -""" -from collections import OrderedDict - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision.transforms as transforms - -import robomimic.utils.tensor_utils as TensorUtils - -from robomimic.algo import register_algo_factory_func, PolicyAlgo -from robomimic.algo.bc import BC_VAE - - -@register_algo_factory_func("act") -def algo_config_to_class(algo_config): - """ - Maps algo config to the BC algo class to instantiate, along with additional algo kwargs. - - Args: - algo_config (Config instance): algo config - - Returns: - algo_class: subclass of Algo - algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm - """ - algo_class, algo_kwargs = ACT, {} - - return algo_class, algo_kwargs - - -class ACT(BC_VAE): - """ - BC training with a VAE policy. - """ - def _create_networks(self): - """ - Creates networks and places them into @self.nets. - """ - - self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - self.nets = nn.ModuleDict() - self.chunk_size = self.global_config["train"]["seq_length"] - self.camera_keys = self.obs_config['modalities']['obs']['rgb'].copy() - self.proprio_keys = self.obs_config['modalities']['obs']['low_dim'].copy() - self.obs_keys = self.proprio_keys + self.camera_keys - - self.proprio_dim = 0 - for k in self.proprio_keys: - self.proprio_dim += self.obs_key_shapes[k][0] - - from act.detr.main import build_ACT_model_and_optimizer - policy_config = {'num_queries': self.chunk_size, - 'hidden_dim': self.algo_config.act.hidden_dim, - 'dim_feedforward': self.algo_config.act.dim_feedforward, - 'backbone': self.algo_config.act.backbone, - 'enc_layers': self.algo_config.act.enc_layers, - 'dec_layers': self.algo_config.act.dec_layers, - 'nheads': self.algo_config.act.nheads, - 'latent_dim': self.algo_config.act.latent_dim, - 'a_dim': self.ac_dim, - 'state_dim': self.proprio_dim, - 'camera_names': self.camera_keys - } - self.kl_weight = self.algo_config.act.kl_weight - model, optimizer = build_ACT_model_and_optimizer(policy_config) - self.nets["policy"] = model - self.nets = self.nets.float().to(self.device) - - self.temporal_agg = False - self.query_frequency = self.chunk_size # TODO maybe tune - - self._step_counter = 0 - self.a_hat_store = None - - - def process_batch_for_training(self, batch): - """ - Processes input batch from a data loader to filter out - relevant information and prepare the batch for training. - Args: - batch (dict): dictionary with torch.Tensors sampled - from a data loader - Returns: - input_batch (dict): processed and filtered batch that - will be used for training - """ - - input_batch = dict() - input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"] if k != 'pad_mask'} - input_batch["obs"]['pad_mask'] = batch["obs"]['pad_mask'] - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present - input_batch["actions"] = batch["actions"][:, :, :] - # we move to device first before float conversion because image observation modalities will be uint8 - - # this minimizes the amount of data transferred to GPU - return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device)) - - def train_on_batch(self, batch, epoch, validate=False): - """ - Update from superclass to set categorical temperature, for categorcal VAEs. - """ - - return super(BC_VAE, self).train_on_batch(batch, epoch, validate=validate) - - def _forward_training(self, batch): - """ - Internal helper function for BC algo class. Compute forward pass - and return network outputs in @predictions dict. - Args: - batch (dict): dictionary with torch.Tensors sampled - from a data loader and filtered by @process_batch_for_training - Returns: - predictions (dict): dictionary containing network outputs - """ - - proprio = [batch["obs"][k] for k in self.proprio_keys] - proprio = torch.cat(proprio, axis=1) - qpos = proprio - - images = [] - for cam_name in self.camera_keys: - image = batch['obs'][cam_name] - image = self.normalize(image) - image = image.unsqueeze(axis=1) - images.append(image) - images = torch.cat(images, axis=1) - - env_state = torch.zeros([qpos.shape[0], 10]).cuda() # this is not used - - actions = batch['actions'] - is_pad = batch['obs']['pad_mask'] == 0 # from 1.0 or 0 to False and True - is_pad = is_pad.squeeze(dim=-1) - - a_hat, is_pad_hat, (mu, logvar) = self.nets["policy"](qpos, images, env_state, actions, is_pad) - total_kld, dim_wise_kld, mean_kld = self.kl_divergence(mu, logvar) - loss_dict = dict() - all_l1 = F.l1_loss(actions, a_hat, reduction='none') - l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean() - loss_dict['l1'] = l1 - loss_dict['kl'] = total_kld[0] - - - predictions = OrderedDict( - actions=actions, - kl_loss=loss_dict['kl'], - reconstruction_loss=loss_dict['l1'], - ) - - return predictions - - def get_action(self, obs_dict, goal_dict=None): - """ - Get policy action outputs. - Args: - obs_dict (dict): current observation - goal_dict (dict): (optional) goal - Returns: - action (torch.Tensor): action tensor - """ - assert not self.nets.training - - proprio = [obs_dict[k] for k in self.proprio_keys] - proprio = torch.cat(proprio, axis=1) - qpos = proprio - - images = [] - for cam_name in self.camera_keys: - image = obs_dict[cam_name] - image = self.normalize(image) - image = image.unsqueeze(axis=1) - images.append(image) - images = torch.cat(images, axis=1) - - env_state = torch.zeros([qpos.shape[0], 10]).cuda() # not used - - if self._step_counter % self.query_frequency == 0: - a_hat, is_pad_hat, (mu, logvar) = self.nets["policy"](qpos, images, env_state) - self.a_hat_store = a_hat - - action = self.a_hat_store[:, self._step_counter % self.query_frequency, :] - self._step_counter += 1 - return action - - - def reset(self): - """ - Reset algo state to prepare for environment rollouts. - """ - self._step_counter = 0 - - def _compute_losses(self, predictions, batch): - """ - Internal helper function for BC algo class. Compute losses based on - network outputs in @predictions dict, using reference labels in @batch. - Args: - predictions (dict): dictionary containing network outputs, from @_forward_training - batch (dict): dictionary with torch.Tensors sampled - from a data loader and filtered by @process_batch_for_training - Returns: - losses (dict): dictionary of losses computed over the batch - """ - - # total loss is sum of reconstruction and KL, weighted by beta - kl_loss = predictions["kl_loss"] - recons_loss = predictions["reconstruction_loss"] - action_loss = recons_loss + self.kl_weight * kl_loss - return OrderedDict( - recons_loss=recons_loss, - kl_loss=kl_loss, - action_loss=action_loss, - ) - - def log_info(self, info): - """ - Process info dictionary from @train_on_batch to summarize - information to pass to tensorboard for logging. - Args: - info (dict): dictionary of info - Returns: - loss_log (dict): name -> summary statistic - """ - log = PolicyAlgo.log_info(self, info) - log["Loss"] = info["losses"]["action_loss"].item() - log["KL_Loss"] = info["losses"]["kl_loss"].item() - log["Reconstruction_Loss"] = info["losses"]["recons_loss"].item() - if "policy_grad_norms" in info: - log["Policy_Grad_Norms"] = info["policy_grad_norms"] - return log - - def kl_divergence(self, mu, logvar): - batch_size = mu.size(0) - assert batch_size != 0 - if mu.data.ndimension() == 4: - mu = mu.view(mu.size(0), mu.size(1)) - if logvar.data.ndimension() == 4: - logvar = logvar.view(logvar.size(0), logvar.size(1)) - - klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) - total_kld = klds.sum(1).mean(0, True) - dimension_wise_kld = klds.mean(0) - mean_kld = klds.mean(1).mean(0, True) - - return total_kld, dimension_wise_kld, mean_kld - diff --git a/robomimic/config/__init__.py b/robomimic/config/__init__.py index 574c4bb4..b4f857f1 100644 --- a/robomimic/config/__init__.py +++ b/robomimic/config/__init__.py @@ -11,4 +11,3 @@ from robomimic.config.iris_config import IRISConfig from robomimic.config.td3_bc_config import TD3_BCConfig # from robomimic.config.diffusion_policy_config import DiffusionPolicyConfig -from robomimic.config.act_config import ACTConfig diff --git a/robomimic/config/act_config.py b/robomimic/config/act_config.py deleted file mode 100644 index 9be3926b..00000000 --- a/robomimic/config/act_config.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Config for BC algorithm. -""" - -from robomimic.config.base_config import BaseConfig - - -class ACTConfig(BaseConfig): - ALGO_NAME = "act" - - def train_config(self): - """ - BC algorithms don't need "next_obs" from hdf5 - so save on storage and compute by disabling it. - """ - super(ACTConfig, self).train_config() - self.train.hdf5_load_next_obs = False - - def algo_config(self): - """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its - training and test-time behavior should be populated here. - """ - - # optimization parameters - self.algo.optim_params.policy.optimizer_type = "adamw" - self.algo.optim_params.policy.learning_rate.initial = 5e-5 # policy learning rate - self.algo.optim_params.policy.learning_rate.decay_factor = 1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.policy.learning_rate.scheduler_type = "linear" # learning rate scheduler ("multistep", "linear", etc) - self.algo.optim_params.policy.regularization.L2 = 0.0001 # L2 regularization strength - - # loss weights - self.algo.loss.l2_weight = 0.0 # L2 loss weight - self.algo.loss.l1_weight = 1.0 # L1 loss weight - self.algo.loss.cos_weight = 0.0 # cosine loss weight - - # ACT policy settings - self.algo.act.hidden_dim = 512 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack - self.algo.act.dim_feedforward = 3200 # dimension for embeddings used by transformer - self.algo.act.backbone = "resnet18" # number of transformer blocks to stack - self.algo.act.enc_layers = 4 # number of attention heads for each transformer block (should divide embed_dim evenly) - self.algo.act.dec_layers = 7 # dropout probability for embedding inputs in transformer - self.algo.act.nheads = 8 # dropout probability for attention outputs for each transformer block - self.algo.act.latent_dim = 32 # latent dim of VAE - self.algo.act.kl_weight = 20 # KL weight of VAE diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index c98fbf2f..4ecbe268 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -13,7 +13,7 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.log_utils as LogUtils - +import time class SequenceDataset(torch.utils.data.Dataset): def __init__( @@ -467,7 +467,7 @@ def get_item(self, index): return meta - def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1): + def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, dont_load_fut=None): """ Extract a (sub)sequence of data items from a demo given the @keys of the items. @@ -477,10 +477,13 @@ def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_sta keys (tuple): list of keys to extract num_frames_to_stack (int): numbers of frame to stack. Seq gets prepended with repeated items if out of range seq_length (int): sequence length to extract. Seq gets post-pended with repeated items if out of range + dont_load_fut (list): list of keys to not load future items for Returns: a dictionary of extracted items. """ + if dont_load_fut is None: + dont_load_fut = [] assert num_frames_to_stack >= 0 assert seq_length >= 1 @@ -504,16 +507,20 @@ def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_sta # fetch observation from the dataset file seq = dict() for k in keys: + t = time.time() data = self.get_dataset_for_ep(demo_id, k) - seq[k] = data[seq_begin_index: seq_end_index] + true_end_index = seq_begin_index + 1 if k.split("/")[-1] in dont_load_fut else seq_end_index + seq[k] = data[seq_begin_index: true_end_index] - seq = TensorUtils.pad_sequence(seq, padding=(seq_begin_pad, seq_end_pad), pad_same=True) + for k in seq: + if k.split("/")[-1] not in dont_load_fut: + seq[k] = TensorUtils.pad_sequence(seq[k], padding=(seq_begin_pad, seq_end_pad), pad_same=True) pad_mask = np.array([0] * seq_begin_pad + [1] * (seq_end_index - seq_begin_index) + [0] * seq_end_pad) pad_mask = pad_mask[:, None].astype(bool) return seq, pad_mask - def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs"): + def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False): """ Extract a (sub)sequence of observation items from a demo given the @keys of the items. @@ -534,6 +541,7 @@ def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to keys=tuple('{}/{}'.format(prefix, k) for k in keys), num_frames_to_stack=num_frames_to_stack, seq_length=seq_length, + dont_load_fut=dont_load_fut ) obs = {k.split('/')[1]: obs[k] for k in obs} # strip the prefix if self.get_pad_mask: diff --git a/robomimic/utils/file_utils.py b/robomimic/utils/file_utils.py index 65db00fd..c3d74be8 100644 --- a/robomimic/utils/file_utils.py +++ b/robomimic/utils/file_utils.py @@ -111,7 +111,7 @@ def get_env_metadata_from_dataset(dataset_path, set_env_specific_obs_processors= return env_meta -def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=False): +def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=False, ac_key="actions"): """ Retrieves shape metadata from dataset. @@ -120,6 +120,7 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal all_obs_keys (list): list of all modalities used by the model. If not provided, all modalities present in the file are used. verbose (bool): if True, include print statements + ac_dim (bool): whether to pull ac_dim Returns: shape_meta (dict): shape metadata. Contains the following keys: @@ -140,7 +141,7 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal demo = f["data/{}".format(demo_id)] # action dimension - shape_meta['ac_dim'] = f["data/{}/actions".format(demo_id)].shape[1] + shape_meta['ac_dim'] = f[f"data/{demo_id}/{ac_key}"].shape[1] # observation dimensions all_shapes = OrderedDict() From 09f4951cb35018dd68d951baf24663d7a3542dca Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 2 May 2024 16:17:10 -0400 Subject: [PATCH 16/44] moved act into repo instead of submodule --- .gitignore | 2 + .gitmodules | 4 - act | 1 - act/.gitignore | 140 ++++++ act/LICENSE | 21 + act/README.md | 89 ++++ act/assets/bimanual_viperx_ee_insertion.xml | 59 +++ .../bimanual_viperx_ee_transfer_cube.xml | 48 ++ act/assets/bimanual_viperx_insertion.xml | 53 ++ act/assets/bimanual_viperx_transfer_cube.xml | 42 ++ act/assets/scene.xml | 38 ++ act/assets/tabletop.stl | Bin 0 -> 684 bytes act/assets/vx300s_10_custom_finger_left.stl | Bin 0 -> 83384 bytes act/assets/vx300s_10_custom_finger_right.stl | Bin 0 -> 83384 bytes act/assets/vx300s_10_gripper_finger.stl | Bin 0 -> 42884 bytes act/assets/vx300s_11_ar_tag.stl | Bin 0 -> 3884 bytes act/assets/vx300s_1_base.stl | Bin 0 -> 99984 bytes act/assets/vx300s_2_shoulder.stl | Bin 0 -> 63884 bytes act/assets/vx300s_3_upper_arm.stl | Bin 0 -> 102984 bytes act/assets/vx300s_4_upper_forearm.stl | Bin 0 -> 49584 bytes act/assets/vx300s_5_lower_forearm.stl | Bin 0 -> 99884 bytes act/assets/vx300s_6_wrist.stl | Bin 0 -> 70784 bytes act/assets/vx300s_7_gripper.stl | Bin 0 -> 450084 bytes act/assets/vx300s_8_gripper_prop.stl | Bin 0 -> 31684 bytes act/assets/vx300s_9_gripper_bar.stl | Bin 0 -> 379484 bytes act/assets/vx300s_dependencies.xml | 17 + act/assets/vx300s_left.xml | 59 +++ act/assets/vx300s_right.xml | 59 +++ act/conda_env.yaml | 23 + act/constants.py | 76 +++ act/detr/LICENSE | 201 ++++++++ act/detr/README.md | 9 + act/detr/main.py | 110 ++++ act/detr/models/__init__.py | 9 + act/detr/models/backbone.py | 122 +++++ act/detr/models/detr_vae.py | 283 +++++++++++ act/detr/models/position_encoding.py | 93 ++++ act/detr/models/transformer.py | 314 ++++++++++++ act/detr/setup.py | 10 + act/detr/util/__init__.py | 1 + act/detr/util/box_ops.py | 88 ++++ act/detr/util/misc.py | 468 ++++++++++++++++++ act/detr/util/plot_utils.py | 107 ++++ act/ee_sim_env.py | 267 ++++++++++ act/imitate_episodes.py | 435 ++++++++++++++++ act/policy.py | 84 ++++ act/record_sim_episodes.py | 189 +++++++ act/scripted_policy.py | 194 ++++++++ act/sim_env.py | 278 +++++++++++ act/utils.py | 189 +++++++ act/visualize_episodes.py | 147 ++++++ 51 files changed, 4324 insertions(+), 5 deletions(-) delete mode 100644 .gitmodules delete mode 160000 act create mode 100644 act/.gitignore create mode 100644 act/LICENSE create mode 100644 act/README.md create mode 100644 act/assets/bimanual_viperx_ee_insertion.xml create mode 100644 act/assets/bimanual_viperx_ee_transfer_cube.xml create mode 100644 act/assets/bimanual_viperx_insertion.xml create mode 100644 act/assets/bimanual_viperx_transfer_cube.xml create mode 100644 act/assets/scene.xml create mode 100644 act/assets/tabletop.stl create mode 100644 act/assets/vx300s_10_custom_finger_left.stl create mode 100644 act/assets/vx300s_10_custom_finger_right.stl create mode 100644 act/assets/vx300s_10_gripper_finger.stl create mode 100644 act/assets/vx300s_11_ar_tag.stl create mode 100644 act/assets/vx300s_1_base.stl create mode 100644 act/assets/vx300s_2_shoulder.stl create mode 100644 act/assets/vx300s_3_upper_arm.stl create mode 100644 act/assets/vx300s_4_upper_forearm.stl create mode 100644 act/assets/vx300s_5_lower_forearm.stl create mode 100644 act/assets/vx300s_6_wrist.stl create mode 100644 act/assets/vx300s_7_gripper.stl create mode 100644 act/assets/vx300s_8_gripper_prop.stl create mode 100644 act/assets/vx300s_9_gripper_bar.stl create mode 100644 act/assets/vx300s_dependencies.xml create mode 100644 act/assets/vx300s_left.xml create mode 100644 act/assets/vx300s_right.xml create mode 100644 act/conda_env.yaml create mode 100644 act/constants.py create mode 100644 act/detr/LICENSE create mode 100644 act/detr/README.md create mode 100644 act/detr/main.py create mode 100644 act/detr/models/__init__.py create mode 100644 act/detr/models/backbone.py create mode 100644 act/detr/models/detr_vae.py create mode 100644 act/detr/models/position_encoding.py create mode 100644 act/detr/models/transformer.py create mode 100644 act/detr/setup.py create mode 100644 act/detr/util/__init__.py create mode 100644 act/detr/util/box_ops.py create mode 100644 act/detr/util/misc.py create mode 100644 act/detr/util/plot_utils.py create mode 100644 act/ee_sim_env.py create mode 100644 act/imitate_episodes.py create mode 100644 act/policy.py create mode 100644 act/record_sim_episodes.py create mode 100644 act/scripted_policy.py create mode 100644 act/sim_env.py create mode 100644 act/utils.py create mode 100644 act/visualize_episodes.py diff --git a/.gitignore b/.gitignore index d53318df..c2d62341 100644 --- a/.gitignore +++ b/.gitignore @@ -123,3 +123,5 @@ venv.bak/ # private macros macros_private.py +*.pyc +act/detr/models/__pycache__ \ No newline at end of file diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 578fadc7..00000000 --- a/.gitmodules +++ /dev/null @@ -1,4 +0,0 @@ -[submodule "act"] - path = act - url = https://github.com/tonyzhaozh/act - branch = robomimic diff --git a/act b/act deleted file mode 160000 index 73071e16..00000000 --- a/act +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 73071e16a6595662d753415b90c0abb64815009c diff --git a/act/.gitignore b/act/.gitignore new file mode 100644 index 00000000..d1e4c693 --- /dev/null +++ b/act/.gitignore @@ -0,0 +1,140 @@ +bin +logs +wandb +outputs +data +data_local +.vscode +_wandb + +**/.DS_Store + +fuse.cfg + +*.ai + +# Generation results +results/ + +ray/auth.json + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ \ No newline at end of file diff --git a/act/LICENSE b/act/LICENSE new file mode 100644 index 00000000..35e5f5e2 --- /dev/null +++ b/act/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Tony Z. Zhao + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/act/README.md b/act/README.md new file mode 100644 index 00000000..2a345157 --- /dev/null +++ b/act/README.md @@ -0,0 +1,89 @@ +# ACT: Action Chunking with Transformers + +### *New*: [ACT tuning tips](https://docs.google.com/document/d/1FVIZfoALXg_ZkYKaYVh-qOlaXveq5CtvJHXkY25eYhs/edit?usp=sharing) +TL;DR: if your ACT policy is jerky or pauses in the middle of an episode, just train for longer! Success rate and smoothness can improve way after loss plateaus. + +#### Project Website: https://tonyzhaozh.github.io/aloha/ + +This repo contains the implementation of ACT, together with 2 simulated environments: +Transfer Cube and Bimanual Insertion. You can train and evaluate ACT in sim or real. +For real, you would also need to install [ALOHA](https://github.com/tonyzhaozh/aloha). + +### Updates: +You can find all scripted/human demo for simulated environments [here](https://drive.google.com/drive/folders/1gPR03v05S1xiInoVJn7G7VJ9pDCnxq9O?usp=share_link). + + +### Repo Structure +- ``imitate_episodes.py`` Train and Evaluate ACT +- ``policy.py`` An adaptor for ACT policy +- ``detr`` Model definitions of ACT, modified from DETR +- ``sim_env.py`` Mujoco + DM_Control environments with joint space control +- ``ee_sim_env.py`` Mujoco + DM_Control environments with EE space control +- ``scripted_policy.py`` Scripted policies for sim environments +- ``constants.py`` Constants shared across files +- ``utils.py`` Utils such as data loading and helper functions +- ``visualize_episodes.py`` Save videos from a .hdf5 dataset + + +### Installation + + conda create -n aloha python=3.8 + conda activate aloha + pip install torchvision + pip install torch + pip install pyquaternion + pip install pyyaml + pip install rospkg + pip install pexpect + pip install mujoco + pip install dm_control + pip install opencv-python + pip install matplotlib + pip install einops + pip install packaging + pip install h5py + pip install ipython + cd act/detr && pip install -e . + +### Example Usages + +To set up a new terminal, run: + + conda activate aloha + cd + +### Simulated experiments + +We use ``sim_transfer_cube_scripted`` task in the examples below. Another option is ``sim_insertion_scripted``. +To generated 50 episodes of scripted data, run: + + python3 record_sim_episodes.py \ + --task_name sim_transfer_cube_scripted \ + --dataset_dir \ + --num_episodes 50 + +To can add the flag ``--onscreen_render`` to see real-time rendering. +To visualize the episode after it is collected, run + + python3 visualize_episodes.py --dataset_dir --episode_idx 0 + +To train ACT: + + # Transfer Cube task + python3 imitate_episodes.py \ + --task_name sim_transfer_cube_scripted \ + --ckpt_dir \ + --policy_class ACT --kl_weight 10 --chunk_size 100 --hidden_dim 512 --batch_size 8 --dim_feedforward 3200 \ + --num_epochs 2000 --lr 1e-5 \ + --seed 0 + + +To evaluate the policy, run the same command but add ``--eval``. This loads the best validation checkpoint. +The success rate should be around 90% for transfer cube, and around 50% for insertion. +To enable temporal ensembling, add flag ``--temporal_agg``. +Videos will be saved to ```` for each rollout. +You can also add ``--onscreen_render`` to see real-time rendering during evaluation. + +For real-world data where things can be harder to model, train for at least 5000 epochs or 3-4 times the length after the loss has plateaued. +Please refer to [tuning tips](https://docs.google.com/document/d/1FVIZfoALXg_ZkYKaYVh-qOlaXveq5CtvJHXkY25eYhs/edit?usp=sharing) for more info. + diff --git a/act/assets/bimanual_viperx_ee_insertion.xml b/act/assets/bimanual_viperx_ee_insertion.xml new file mode 100644 index 00000000..700aaac5 --- /dev/null +++ b/act/assets/bimanual_viperx_ee_insertion.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/assets/bimanual_viperx_ee_transfer_cube.xml b/act/assets/bimanual_viperx_ee_transfer_cube.xml new file mode 100644 index 00000000..25893842 --- /dev/null +++ b/act/assets/bimanual_viperx_ee_transfer_cube.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/assets/bimanual_viperx_insertion.xml b/act/assets/bimanual_viperx_insertion.xml new file mode 100644 index 00000000..f701d70a --- /dev/null +++ b/act/assets/bimanual_viperx_insertion.xml @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/assets/bimanual_viperx_transfer_cube.xml b/act/assets/bimanual_viperx_transfer_cube.xml new file mode 100644 index 00000000..bdc9e644 --- /dev/null +++ b/act/assets/bimanual_viperx_transfer_cube.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/assets/scene.xml b/act/assets/scene.xml new file mode 100644 index 00000000..ae594505 --- /dev/null +++ b/act/assets/scene.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/assets/tabletop.stl b/act/assets/tabletop.stl new file mode 100644 index 0000000000000000000000000000000000000000..ab35cdf76426b2dddc433afa11af96ebe0e07c17 GIT binary patch literal 684 zcmb7>F%E)I42EBvJ%Hnz90^Ri7@`m-B8hSaF5m@v6c#rJPXHl$Aa0=Tqpy?~7CSY4 z{rdmyyH&->9;>rWl4uvjQ4;TMRu<*;&@|20{vB&W0qJY?d{9=`o7_N~sr;>StPlPD zvP=v;)0)71Bn%M6vp#_a&UM;cX98;>AsU`wx9qCJJ+^9=ccS$u7gw-AIE|1%J+((i z)`D$$uG~;G$PW-)hwO_26uOtWpDy4uE8Zh1PGoW!C7Fdr*IGML4r$w z;Qqh2n;M>;S-$V@<(!=K+2{H6Z7b=zRXz0^)U1>vyg{`b#flX!Q@Cj1Vnxg5s9d{d z?RsI07A^Xe{{?iy&FN7XnZ1rqwG*!5t`IqUq& z&09<{kU+(k;21VhwOUsz#A|v`v)C*uaW0s#$Ua z3EtMO@jK60F$#LtQ4A)Q)K0UE2~>OurfnkY$+AY)%4L?_%~i}nFs)r5>z}q_Twc7H zVtgionLx#tVA>|Gm&;&0e$pUVovh^`nAWaTf1b2r6dN2xF_1vTmtfi^GLO71Mi1{1 zd}wY50}0;Nt`k@PvSLK$j;0t)@YBu&D!v5MHu3kvITq1me?JGow013gbj*rzWUNUs zkU+(kVA>{Lrig9DDEerafdp@B*MYl7tQg~`dME}GsQ400+r;U!A-nj@pY0i8Ai>+( zRr1;)D@N+l9D|7!t})A)K*g6}+9q5(8%KZFZ{)J%;X@n*)7o|6#sMqF+u@R8Fu_N~ z1S-A+(>9UhPP9ejp48JpFs)r}KJBw&T+(RkpvfV$2-!2gN`F6<>mBo47cvwG|`p#y0{9-qx=(mVIp~1`??F5*))OK9|}^D(4aj zU82~f%YVVRjVJ~ZsQ3~b!zMo0K9N*kNhEa5z%E^ntr${^VjzKvFTpWv;&Uw;N%gKo zLf3HY(lvPBF4ZXp5~%nR9K$9)w+4~a8b~B`P0cRd5{195Ofir^#h2h1Hu1R~jHFgg zBB5IxcIo!9$u}GW2~>Ouj$sqfI!h#U%gHX?&L3@7m0}=)iZ8)2Yyx^25((V{V3+RU zbjtoM#XtfTUxH)U1oW0961peDF5T~n*RL+cKmrwCf@9bO_xiYYqkf`9LifnnrF)!f zay6$Ic0ciS5EH2Q5*))O?B46vAnLt32;H+}m+m>IyxECjKo41>;!AK0n}A-pgU~%> zcIp27Pi^~A4Cv`gRD21JVH5B+5sifI>9b3}m%Lp#l42l%iZ8)2Yy#ejqLI*VEbP+n zSNxvnc#n!k#h2h1HZgbOny_3QCx)I#JwndxdCXI>Ua;CUyNXrr*u~|eQ>P=Ine_Tl zq6C-0qtq9D*Lq^qeJZu6VEMrE=yp+zT?2aCF_1u4`PPTLvE8of)EE7w-^8f+RQsTU z1lB8V*Z%k;qqi=Z7fy_lJq0v8#4<5a=o~WuGTi#b6byLv4!jzHEG%F#V*c1JoLzf&|v{Zr7ET$>p5k zlR|H$Yv3TzHR|#n&(5O3s@SL|6yrJdUW+fD6qS%#HB^wm_R8&=kf)dwxh99sJyzC1 zpsP`b-JU~PgH`{1?I}im>b-WWH90C4wa%y@f$gr_CC=88_is)P&0H#jgFx5F!pgHR zL9jY?r4Pk8Nxj#l>8C_xrCtUqNMQfL?b?Ouoj#3Xy!M89(T7OwW-t!igALTt7frh@^d9nK|()c_Ip&Lw@u}i4KqV$iueW+=o)J(Pp;wFmm zjLP|GIV&mwy``dpgf8>;du_4fIpm!xvqL);t?nSum2d4{Pn(b+)u7dOit&!>t8Njq zqxRF=HY!Nyy32k)cmMGw`lk!CL)VOG?I6&Vxc`1nyc|Jlf1_O#BM#NOJ>Soc%1EOI zs34*1V0)aQ$&}U6DXPo~HQGiv2z1qNbI@};U69HY$m2d$sr`7EM&N26+dt!mMz z8Y)QWKBGMj*L2EiVWhF|2NLMo-S`CD^OQ8!$M@W%u|8Cg(0xXGJaNDuZ$#qRvqS%& z=L!jQEzNccp505-d*x?$JB@Opf`slf+T)@nj^{8EKAaWWp2`Oj=t{KdG?c+~G+N7L z@EVQ1qJo6(Guq?3#q(7(66cx~TARu_66mU)^eogRAvA`}b;_?|{!l^LP=soq5bUGnNV zsM~8(7J%#aij)OF1qmGAce@4{LyV5b^w5gbejtIav9-=a8}&D3Ot_8Ow02DxDo9|S z!tJ7V&^TUtT4*e42a!P6lJ@7Jt>qZp)>4dJs34)^*?sEt{|+-g6`LA5f!cE<9Il+_ zq0hkAiu(+7t)ktaAfd0D{ch0iU_Ya5*&jlyQNIKUbX}Wp4*EWP&$;hI_dFUEB=r5a z-*L9h?_dPinG#xu`d3JxtL86fp%2H;F8ASvHA^m0K|(*r_Pbj7v$c#ZBPNGtqkbY1 z=o;AN4D?00402!eI=w-nf`l%Q_B-deJjIM;M<#{-Nc~+T(DmJuQ_!crOmzwOsiWw< z78N9Py<)#RUuv1$=u>V|=xyq^BZ02@6;HyuL29ZK`Q1Rx?I2M>Lf4P>7(kBuCxvm? ze*ZuMUB^291@AcLscz?YoZ9>QNmP)~^}Icnu`m8e@qD!X9)$$D@IBG(dP;2+zpLea zG|ce^iG*&i>@gY4TF#qkhU+{hZ|gVHPMz$(`(-V6q2fz0?MG0a^UJLR3_!3Ni$#XKnc}^sF zTf3q%@3Ufj$#ZG~6<>nsFL_QTIBSUnZ)?~4hx@Gperv3ArNbt6HRZMZlis7HY~#<^M~bv+ap} zOF1)&iZ8)2zT`Q#j=#Q?^PEWNnt@%q9`nz0qT)+%4F5bQ61s+Cm#&Zf^PH&o5*))n z&xwStsoAC575_XZD!v5A@XvE1p<5hw>Gsj?A!D8s6<>m5*aT)RkA zWBBJekE&_(ag-q_AOCn`u_z2eMsB7rV?Zx4?- z&xr~WSU)=RoJgRH#vj6C&U2!I1lIG;JSP(9qH&b)nDd;dAc5_bGtY?xx@bHoJmx$n zDo9|v>&$Z^fi4=?3XeI@i3$?fe{kkGkw6!X&xOaF=R^ew><2mXoJcraInRg3oadww zNUwqf_UD{=P9)Gp-uUM^kw6!X!-vP5=R^ew{fznNIgvmY<~dP80`nBkJSP(9qIp2j)?%I$6(n>#|2!uW=%N`=-kyQm zg?Ua?kkHr7KhKE-x@aaB^nEbTi3$?>{`==Skw6#CNP|8c<~dP8LO;j;c}^tIMKkN5 zFN%3iRFKf+(Lc|L1iEO3AoQs*&xr~Wx?b_mb0UE*n&}Ad2AJnW1qoe0`sX>3Ko`x} zgm)awbE1NTuIK&poJgPx-xHmAPE?T4?Ug+y6K6zjYmG_Q4^?Ph^-d4xIXA99XY%&Z zBM&{ieW!S|_4gqk{^q$+L?nw&y_^YDd>!xdt|s$MYr9ji zTNHzd76$@?eF=2&HXp4`G~aVAdi39Uf2$p*lYt5nKNr|-#duNbKE+_-(yTmrdMYi#bDw?Ncv^I1iE;eW7x#4waufK7N4># zec~YoDoFgZ;(!%neUB#;gNa52P(?A7@C-cu*F#<;1r5H>spLsCI zmp~V9a}1k!ar(KK_AdL<(P!Eis36fL%LOaOz6AeJ3?@>{iL>07Ko@Uw44WufHI4DI zSjFWx*4HsmK|=PqWZ#djmnjAld)mei@+Hv4+t&TCh;#7@84Z^GyY$xFFas4NMoztA z#W)ysnqn|f{qIn}qi`eDn{mso9?5HPw)D zw>gGQ+`e1iNHlfAvKxz&8>k@BE8%tfek@TGg9)k={RnjNwsk)&V&;(cM$-0+msP)X zU!a1-`^Gn{7{3qUb*W6$YqLJsmp~V9a}1lTAw>gGQ3@I|) z*f@1d(7#=tg`tAP$mVv86?J&-ITKX32m2D};%$y$6I8@`?NTKD7l4W{!ENG{8e9VfM*l^l zf`q;|LoRjud_M;6ZQw_sOW#?W;HP9&)KAOkxr#;w3H^+vjA%$Pn1~;;(T_lvenM@6 zpLTlExqL*Uf`l$B7juMDjL+r6k3g3$X*R*7_Eg_!Nl^J-vs*- z=+ZTYO>phA;^mCoRQpI&kkED4=_gqz1{1%`S`+L`pi9?$Ho>)M)@+S}sos^SAffBv zrn3Vn1`~8Ya{ChK(lxkEaBJ{QqeRQ7HIS$vq1%#6jRGhJ6NTO-$>U3)OSeQe!L8b| zu0IA-t0qxFLbr*>{<<&icCh;E*2{bebm=C zsV{*p-7~O>&;636)XR{lAffvxe`YyGF_`GJuk12k0$sX?V-wt4dXP?V|4O2Qgzjst zTC<;GFmbd^F797B2z2S5noV%8Z^qBdxSuFdK|=Q#r~kf>Vtno=2Kf@`(mhU_;NI(y zQPq}F?^U9Lgzoz$&vck#Fu`@MFM%%IbG8ZYg)>3DaES^Mx(}cH-f4)6<>l|XPe;rty^fG1>LZmKmuL54YuFtkw66r-J;s#40u$iAi=8@_^9mc62?FRUAjg6Ji=lHvEM;Z zL4sF7a13vjEK)zaNT5r%sGrAuAO#x@~sxJPF*>*N+MbURlnxp1Dj>K0mv-FLdc`J#*PhM8-t0 zOaI-!mw^iIS0AB051AwQ9u@BkUHXdHvxBY`g7=A-qFWmvg89?L)l397-Z7~b)c7zuRoHplRe$yfxB$)JJ+ z)%jKo@3>El1iE;eV|d4wEP}_DP(gy~d^?6U;zgrTF%szFZ7YTzBMaku&SPY#AVD>_ z6~j9Y7bAf#-sTve+mB_Nc5)0J>q7+zs==)o-toj333Tx`$MB9>Mso}vvqS|6s`ITF z-f__w33Tx`$MBB5S_F^1qJjj~`Bn_?_->2@x_Fyoc*l?}g2#|iL4sNdD~5NRIz|Fr zyv;GZW8oITW8tVEL9K)p!#my{BY`g7<`~{FeX2tO0(eXx6(p#Yuwr=g1~C%o;%$!M z%}rPY=O$1=f?5eHhByBaBY`g7<`~`_i$!pb1r;QyU9n<#^EfdQ=;CdT;ms9U1m}uS zL4w*9D~2~86(fNz-sTwIoR>v#mJAgns9mvQc=Kv866oS>j^WMSSp;YMP(gxPA}fYB zKNur{F5c!C-W;SwaMltPBxr2Wis8+3#z>%xw>gG4mueB5OGO0<8nd)wc=NR}66oS> zj-f|{zkC%K6<>neL_IG0zX^8fJ=p|hk@Zo9p`v53OP`ZBe;y-&E`3G3IeLrWtUfA8 z=zHUx2M{BHE`4Xca}6wlXD6V7gnq`n^BH0!(50VH@05^s>loPfHo-Kk361vQL=an-Z?B5!E;zpK|zZAbm{|6(n?9;+?k?BY`g65_#uF=~j(rI$5QY6(n?< z=$(HRBY`g6qI&0eSp>D`JY&m>!3q+(ZTHS2i;+N=?iqOJs#yfjRYL^{-AD1xhl`Ow zm+s+s=iFHY&$&Yd3EkK7&g+YjK$q^RdFKx5ULVf{w5}B^Na#MJcYb1w1iEyO(>n)} zd#{l^2ND$|bl=xI&r|D_!8*SZ!+GwwN?zCTZ;-3`kl%xqW8u8UfLnUb zj3UkJ8~E=rWs$!qNa$_9F=Waj`xETavHh~hk*MH)d9?x`mEXujBq~Vo+5;y1#!Dhm zL4sEjFyS}u6Nw5Eye@zV|IsK^kkI$sf7BTXbm`~Ke+*e)E53vJDsX08$Me7Es34)Q zh`p|epIaEyx6Y9Fg)V(Xyg6Ynfr>sS&e39QXQmnzB=lAA=87WqQK5o_zE&}2u&nc7 z1qoht!FSL*BzQ$OAC+H*HxdbS;d_ZQ1}aGK%5sk3%?ax|F%k)M;ro>%MkFdo z;9H-A2tWl1UQ^FUWv|tzL-HOK66nIWS4WIURFJ@TY6lU33KIM^0X{0f+;$`q=)$o9 zXAD%3(A)mm@R$g8>A&mI5AU@KKn3@!kJcWOu`cfC96c&X=&RtLKSu&x`l`p6F|n>9 zD@f=&8)H_+B3MB}KQVqedhc_^1iJJS8e?|JiopsJx)j8i^Rft5kkF;e{#F=XUECMC zbZL$;6KBO>1qoeC_|F``eW6R&N-<^>tr)Bzp=-Vv^PCpJ3KF_T^`C2i`$Ctl!DGzV zS}|BbLbnY5a}97`=+Z4wj9FwW1}jMD7S4a>0PYK2xuF* zew$buXK@~S|M2iTGQXM9n}xN_?`Qh&yH=5?(3Q4a zeAjSKUq31&=vlC7E0>D3DDDeg^z2%M-?JNu3O)Iji^~e`>FY;@1eHyT_AlqSFLY6j zVG(|{Pb4Z-cUdm3?Qlla)djGNZeOIz!#VNIP)_R;6tE-1pUI|XX1d9 z2yhZ#T(7U3HTel`m*O`k5s3=!H*dKW1{3|}4X)%kj{sDVpx@%KV$lDcM5L4W;v#27 zj^RfH;1#9z$~s$a;qWRrj|vqesI9eV|F#whbWy8q5i#}vybARntZnXb;GVvIR7g+fabM_yzk9wK!-E9%gRL0; zJ!IS$y7acr4+hXbdzKz5ItJ(Ka8Li=N5zD`3U(%rBj^~oFLdeq;lI)X6@3ReGlqNm z`cWaFpBVp@7Pv2T>1WrwZo+#WsOYDib5OXauOAf>x^(%kg1~*DOPA)D*ECo)0Og8w z9n9Gt+|$>O3JG1$`>#;IeWB}1eVoJZ*|pnqs>3}<=+?(?g#umWF9f=DYv)~W5UHOl zRCLS9c@^A~@0ovzfrM_U{nrBEzR;z62Hu(d-lIZA_cD0I6ZiD>qe4RWYV5Jsi$9#V zdP`|iWnF>$LYMw~Ory?U+6lUw;dYymBOKcm=TV`8#Kc4^EqZd%cy>GJJ#$o$priU6 z&$kcfB)+)x-va`C+VcQZaKG4UJI@0ZB=oiN>siwIc&`uFyVkLDeGY$j_5kQGy+k+? zRNGm!|EK{H=;BcWzBm4D)R$woN|fc|k_vnBD}&yn;xgzZsLWg2-nC0~{B~=A`$8Ah zJ{DouKJ+B!W8(9ZXes=}(UWLx^Ye;F>%1SRAVE*QMf>LskU$reG>h<-LGO8>LZ#Mn zaoNT_ef_AApjyJB{Z}I4zR*Rrl10Q=UwIX(;jC@02XRkdKPn{fZdyd7zJqwDsQs{F za4Um%);GqNrvwRVuPoYsH6ZQ_T|8F9&#t%S^d1!|)Ye)qZjo_MUq31&sBO2;$(mUc zVEyxdcG0Cvl((1RJt|am8RXu}|Lv%d&}H79>GZXuLKpSBtn={p$h=2|ir(g)-v8~W zkkIk``l6Bg%(<4)*NXem5aWN;a7gHT<3FB=`$Cs~7X01~tSkUOKdk8Iir+bKPhWjX z!jaI=vHw^f?h9SI-1v{&A%U(hrPjAqLj?(4qx!u&2OxniUC#aA&r#7e!{;#o>$&o4 z)f_R9(DjwyID>;gm#*z%9wXyB$cnB-KaZFEAC3wMU4#3L$vBP*UAkrPANN5;w>Y22 zmf)ypG|Fn#K95Fum6!-U%i_yCOMM4f;cd>V=xx3t`hNHmtoRZE0r}{ct?OGOUKSBa zr%nI9^pJR4yX@^I$}dJFDlrjPhQ$67!;0YR!tr$;Nk zguV(kak|dcWy7}x6Xj2Qeo81N+;cvUNF75fI))}3&y{uNHnr2cFDA5$w*vy;DbWN$ zUsh*o;(x}l2)!@8Uw)SCZG9g53r-M^BKyj$uS<)JGRi3VCQxm;Kd{o74AI83bAc+m zdr&2Q%P4X>P}yT-C*RK#|K{y26VCrupn}AMrK5~HO#;=gvxm_b*}~nUME;|FWpuHM z4gy_cDn=Sh4klA=Iu9UXbKyzi*Ao#^R5S!CNc=P_jgh`*ay9a&{zM$^G+8{j5h3?i z4|NdeN>ur*$h|G48vc7XB1T6q6yY;^%k!N|3sjKk8#-?1%KoWTHM1)bhf4n}Zp`T| z)7~oTAkY=-b-w7O%hRZz9=9Um$69}g9Kk(hib^2@6(lOZ$|B1Rr{9md*^-FZm)D5u znS08Ohx0lJbdCMlkim1(tL-J~6Y<1FI8puh=26_DN^C;C5z#3KA*9@5tuUGOIZSs}WJ? z%3-nTaA#TThjb1CU85o{${`)Is0NP>B9cBiDOQ#2BzrAPCs09Rd2mwm{>ZGV_EbT{ z{Y9t6$^@O{e>GD&2y|5}9?x`D&!$dxDoaH6?=FcN{W{3s-6;hsNbFf1WDf3^T_t>4 znusg`SHv#{{nBR9qz(dIVZo)$mx*(!qYd*DVcxwjiau&1=T1!^P(dQr=7#3Hrn%M7 zEqRH^x#Xc}vAT`C*gm#{K-bP}wah~gbE|KU<|JZDiT_0N{H^89zhVnikjNg|%gkFd zSe49}gNVV8o{0q4TFE;d--RK8uFwn}&0mfMs{>Us5wWS@8}YnTOL?LD`!H0H_@(t| zbBoBU9u3VvgmLSw7?ZxG4E^>$2Z65bDTkST*XLDLccvmD=b29;){5q`?vQ6;s2~xt zWSUvESbnwQ?-WGr+!tV+soPwRbl-Cj==y2wB(vJg{7RKbLd4OGag28No60AL?}wp+ z#Lk-w&C|IHsy5+?h=~0nj&WgHQ`z>~H3xyN=A-7Cnfet}6@QOIMEl_Q#<^lmWS!PG z!caltySyvS4(UQv#-*`{sQ)RxQTt?L*<#vx2Z63kBbJ!|HVjcitG=e5ewyTojQGPF z$%n-+hM|JQ#jcyoX9|E2=V_x=BRgbxDm^of^ojYYsRFbj=?`Kj59QnCg@JA`$M< z$&6p~)R&EC9|}VSi8ucqF;o6qTuuJ*3=!#@Bsa1})sy`)cpL<}n)Kgic6(P`bx(De zh-xWQ8W%^`m7Z+7!%#tDLa__x;%g<<$Cmqu*!NFLV@2w^GP&IDAkdYe?@3c!DXBWe z+DSy@=+wr*JGJGC8#}^KK_Y#>TV~NyrRmJK60xm*8soCNw#@bG1_yzzbG`mHQ_0e* z?UO%<7=J0P@n>C^tnv4zFjSCOzU8SY_m)-v++9IL>Otv^0&Qza)o+P|Ko{r2hEFW5 z>?|1)s2~wKBDSZ;kx6dWdg5n-GbR`Z;?uoa#f|@pQB@vl3rZhSftt;OrJQs$F zgV>nRQ)POn+FWrP5i=GgGn%S;^25x(odmdYrA*}+(!YYLK4l{jzn=^=Hq*1ab>y8e zR1yPGzC#kvMVHGksc~nCxLPxg zvHDIE`K)1}Kn01%r&D@5jw`D&-n>mj#pVIVh9S*lot-Hi1iCt}$mCg@FH{{pbAyPe z%%4Q)?q+gc?sNhbB(C*J?WwxHjQXj?Ga|OWdM%FEZz22rnaM$*D>L0u>}0o=EFi@@py8<+u10o)^F6Z7*w133CwW znw%xQCn!+~wN#`bBH^%eqH($QvgtZQpn`-kJAr4=%n&uJdoU5hiysgRb99oKzx&of zpsUX4B%Z+hh1I(CK}4+Sw_hwQ*h#+bSyP~b#FjO2JWV?mR1@lk@KLppV*ISmvc%oG z4gy{ECnxaCcveuwUtEZYE=!`tnFXC?mV^xiDo9MK@yQ&KH@~V>vlPd;u}8d~&`C}> zTH8UOtN4Jlp1YNcsH$W5Jc^v!BnE!$B2%7fDNsRT?DnVTjWofk(Xz@!jA*}H>~M9L zi!Zkqs32jizHR0{5~SW9sq{YxbUjaU%-q)}huT=H4iU$OtrTlEv#OTNLl;AkbBI;x@B$NOqOy68S!$eU%%&fP9yYJ5tzNF?3}QfgFsjPb+b)(s;uh8&;~^On|iIdLeEu- zM|A`$NbElQlldrh4mDwA4I=i;UnK74=_#LgZ|ESXg1X>;5#C=bpA~oE!Ib5Qj3VU-z*n@lL(3SdCMsv#L4C>(K zrbN{EezUlIxtpwzp`1VkiK+4Pm|L4?R%yq$h`6Ixh#hnuvlE6n2y~5|@m9Wyn?W`8 zG$UeR{H@~Nv2OB2ijo2qBz6x^Xr}6xNqt;bn}|%|tHe&aANQ`7aS-UbS<)lhg``*a zk2EJD)!c33kLYgl?9;*m6(k-%IwN}x%Bb#K;u>!Gp4DPjp`P;b&&3@Cx+P1GA0sLrO0 zB;vvIu42%1w@luszCZerGin|lt@^+;L4gy`nr;dw`y(xwA z#OL$KpMIRUHYq|*-B(+ng2W#si^zeEQ>yFb-9*G`KVG!`EkYKW@vVbE*XZ-@ zsbWX;r5Jbzkw6z-1-C0*nz>?Ox8Cwo%kKm#NGy=oV67#MgKM-iHcu$EFD#atK-G(_`PKGkD~-CNK}6t z6+JCe67^r7aYS@j94acd?I*_?5e@=f8Gg?o-@Hnq9w*@#NT7nmpGStt+MScCbNj{+ zQL$90DA%x`Y|^l=KmuKH?k|yde@d>V59Un6PqkW!MrL1`;M<-86(shCoRh9RDO954 z-xHC3NE`9v{l4<>ur3Y)T||owt4f#p5RozSNbxCCggiC7y+8$tAEVNnwQi+W z?Ncx@r`{-Wxom{&yuX!$K-b_z`OJN{)2SB~IuJ1_$xJc6d~dlgNi%^861D%XY<{;i zz1mf`JrO^*nk8h--m;I;&_SRJ+j(cd1QjH(_d%na848J$&HKq)dA=8T*OsQNFXv}V ztd6{yPzkTJ+ZFG6X>t6STUNO^M4*C1(7-?Cva?B*Ig;;r>&9h8NcMg*XSRV30$q5e z-L8fOT8Juh`pWblA_OW(?A{j7+_Wx*8X3g*9G?;-(1lmp?Rxju2od*vA9?eeE&>%K zo^LL0stIY-=&HRb20kT7pbM`wjn?j(F7D>*Eti zC5)U*#ElhMM6R;^WKdEkfv$+M+hx$oM5^ofNkr_WY)qwyezM1haRL=2YNWd_yM0Kc zhSy`F@~b4`ai@OrQ={hjXoL?kXcRUG)#OBTA_*+HNS-xHnhwWuJ0Z?A6G_pVr?PEbF2?(k%RPsY`r zf6E7_5~!(hrdGncNpD`RM56zFx9l}=vOooi1(jl%f%y`vgk|};x-l)W*cseUt{6Mf zL7)rorrY&h!=hq$9=9C7b*w-IiPvR|ne|#EQ~yr$eG-vC7v4>JuYKM?1iy)p6D~vw zRFG&?q^&t`dMdT@IIn!cClLvB;oWq*l58I+ZjA3Ee@!|_pn}B3nG?(_d(x?XBlx+( zClLvB;oWq*#@3r6&R^*zk5A|&P(dQm)J0~lHC z)pwl<>-WShcb=Fb&W0y7PmGUG<89L`y}KK1zFLu3t++d}5Kv(i1f#&*piPg-kOgt)_NKC!ymfx?OEKotB;FU1*!>nYgT~B_l$_ykc(PWK6Ac26Oq?_PU;^d^ke(`ABs;>hE(D`$=wFHk`ub+Iw# z`c|pcmE(ixJdS>-D}pyg$RpIABY`fw3eKyG3KHW}%r)zENv~#a>P|5h4(l%tcIYF= zH2+>8fiAoX&Z~wy(4irGq2nhL0l!DoC_C{*xIPm0kta z=4(}|eLwM{ULP4Sbhv{+7hVPD)kOt~euLJUE4E}-39In6ik)e)xUsjFJbkgRKmuKO z6`WTW6(n#}-8n*!1iGA8_gTrl;y~d(vPi>`0JY#Ffh)H7H_Fgi}`+g1rU3gB;YlR9Dn6q%^KafBd zUIogr)S4(VEb1jk7a1(@*wf`WW48W~UHx6EekF`Y^Ghbp5$^~0lnom95~v`7*&6C0 zKb$XyJnbP@tmy6_(1k}xxzwdUiu*Tu$dXrj3*0YenvkYnUL1T{6dBrH7Ee_}EKXkB z^KajBszm#Em4;T#?%CV8q^kF8s!EvIa%Ra;L849H7o|OKcT`Xn z%0CUq^K-jq_rEHJ^lU4iWe9VeITG`d74h7ATuxoA!E=hfMxbkA-n^c3xyz_@&v~X( ziL^IGWJDVowym5%1&J*e3V5!RDW|H3#3o|=qbnlOY0B`fs^B2dl{Ic|PyT+TRnB9H ziAZV1gzL}0#eLaU7X2aAL7;2>wQQcDdrGN7 zE0Yj$@8?^haFsT4#Dr1;6(nBv&gI#=zl_THl4D?|8VPh^2AkT!_P52hLT#k%R>F~~ zMgsHdZdcq@??j;!&E$z41;qXrEj$Ca*Pv&2P59B(E>DphRn?g?yTb8VpfWi7rPy4u zg?xXapyMe)!rWcWleIx5Rck8G{rDPzt__!kC+P%3?Y()Gh=$c(iB5@H$a6Ic3RI97 z-K>)5^{8;=p7R$G-`;vE?saS_M?45|5a_x;Ce%|^3YGWVbt1|oc`e#LXf98c&o59x zqWaS?&*?uass%rtBqGu8|B2n-w3N%v6?72j$`w-DGyG+knt1RZA|CzqS{yslTvjZY zSD=DKk_qKJ>#iBf{OvRmOCp|$WFakOMl-*IKv&Z<#XMU=!qkHuw}|L7?~Pc!rn$_R zK3JfF#PMvUJd=_e>h@}$^^&gKb8#SfOL=-}UI&4$8N)(6k2_RQuC;fF*wgB*c=1DX z+4xf~feI3hYZmf^_Y6}dw)0$^9}c|`qffPvvD@c%5a^0^EXdRLRH#Zd>j4oL9={WZ zYBra*_GA;NAaQkYZqMsO(ES&SuaT7932`9 z?;6MG`J}PjbvCCMQK`G6)CwL7;0_Xt-zPtV(M3WS-sjcy4T?-Nq)eRhBFQ6(qiETg78` zsG`y@-9&^tcYrZDu$e43Fq?xwSD;zJ6C4$;My%LNMAEmhjl$EK$V^Ey2~?1X^G$hA zti_d7|9IPp82uo?ko%j;Ayu3Ny4D;j;mMhxqG~|jgQQVsPaGq5)h4p(^|S&NBz6=n z?g_tAk!F(bA|mr}mPaEtRcDEkY7_Erte?5`m8QefF znv}*tpet_mUY`DS>QLR@+lN2}iIjy0c*bR^uihuCMMU1cNsNQ7>&rbIQwk)|m90gD zr)b7{YDl_zL{yER#K^ayzKr`_T7e1@uO5y5yyl0$r1LcbT?Hof@LWh+SG~^K*oQy` ziPGCTdq$_KqXq=GAfou#q((@R`towWK!F6h;_mC<8Qsi9b07y3@nK+MV`1$EvecmD z0u?02#B1Z3)Vda}f*4N3nRJPbWOP5u4W?@a`$E^>v08dUe*I2ey88nW(N7Z?&s0NM zXI5%~3KFO4HSx^+<6D)g@^m6zOiN(Qyx&lE_%W@6K-c!LhMsf()KG`h{z}CDy77#4 zS&2h$5wkjS~Hj%WUx>ME@HVj>1Sj%&1R-B{i}nbAR@3u{zoornq&SU)=NIX-=O z2l0ua84xW}7(4FQkq_p^68)Ez^T;aIX*Rbzym_Ujp2&o?)Z_Bq!;yBzKn01)x7&F_ zlGj$x3$>*)$0rd9bm9G{87a4t8u6~xlOG2r5vU-6caui$#-%fgtf?heU6f%slN{%W z4?_isM9~>MFINjS|9AeLkFG(8%SvyQrdCdh{0} zs*g%;bpKFSmig&!7%I5m(B7Fn>zfI6@f`OuUdIbGYD}mv?>2biAkbC!XprZQQBn2C zy_$$&p-GLLck0WFN#BH_g2aN0Sv}A1hN*&$qlnnOIFXUp-B2c(6d;g5*P*m|J-v4s zs_K!=M8rOu&=_{Op}cq_u0RC|Y#E&G2NLMQ_R8(*G&H^OTdP`f%aA3GrzB0%{pN+h zlIr;Wt>L&jf@Ws!N^5Ky=91gpYaOc~kiaz?Zr7rmX^om6T{7+BRbl8l5WLm=W@<6@ za{iHUC-Hl$RK`+ON6u=x&9RyT30!+Zs|SEU1qoacLvP#VQWz~N){_e|ZgZ@BLE>4S zdFIP8g;ZeVOS&IB9w#=Q4sIxSE;#BS(1j~|oCGRJ;Cdq`feI413X0}(-b`Sm=-61U z-+$b(+6jrr|MoOj_Mx?8^-|FLdAotJjlCV3$+GuuItX;(YAz>%3KF=+%t@ev1g?B@ zyF#|U7wxvTkUI|EbF6SfVo|dYb5w&|>f3?AbRKgTJr$e!ww4Q9e{>M&!u5Dg0u>~1 zU7_3ctMRW`+PaOrQ}#m`x^4}=AXk*gsU5L6|H&-S^`XJ9qS=#Fs`#!Bm2ed;^P1S&}27@*s= zE9$J6^lf_?dMw;QplfvD%%1-1OQ?6fQq!5^2qY><;D{cL@4|`^B+!K`Nt`2)s374S z9sJrj9FDNz$St*MIi8BqBU{Qf?}`XikifAsTD=_kKqy+nTYXz`2Z63S>xz43yeqFR zW#H9LIC6&y5;%57wNKTDBH#X2veDwA0ts}z?^4*4w|sdu_$9A0#gRKykifAsx9e$= zMgT%mvp61Wb* z?Rvhhop_bnE!WR$FYs(}?kFCib1o+;NZ_nc=ha05U3i4fxtypVfwMxLR~HF%InNwd zjG%%9&I)y2E1Xq~^M&ymQlC0=1+isPKY1gfr{n4(f%AHuvvF}>=)x=Qoc)QjF_&J< zZe}c(QoS8Kv=Yvsbh{e&A0|Fs>LbhD=^;=-qV}J)%rb@2s^5n6q9+k&e@CcnV0Z~B$=M*}xE)wW+o;j{pLj?((Q|NXb zA6Z)bc9+&092qKb-Yd>%#plg=bx}bAulm>La-s`op3)o3rjp|KKi#r!u}H@ZQzS6n z*XN|73un+e=MLhWBAnNS^O?Rr(+LTjA?BQUhWkPn&a(RYj4dQ^rW@VC$b{mLd4UAFaBY@z^%*Ki;L0AF5&w505frbV z9QWfS$4o$+!2RMCajp}_wZXU+ zx!n3!=CZ~4RJ;Fp750MiTg0YUUF6nZnhI2qz}3T0EbCZSw33QcM^vTS1u7FAsyD-JT zHSVY&;XIE$=}d8Ga%XvDcWr?Lx^PXsbGWu4&Ue z;?ZPN6a()EDoEgcqVEArzbM*1ZYOspDkG3U*Qa{PJStuZ)g(o7ih*BMK?MotJr6l_ zU1aLkR`&j{m_Pzu_!SrDmk{vl2l&K%9IMQAuY>5!L+PmSo}+>UJ_|HDn0Tjna=44U zPBSx+K-Z-TC(P3$^QtVt#VH2fb5xMPXMx^?r|uE!>UWkyCR7qgplj>1tLE#(1=Q0c z{PjJ&=cpio&jQU=+j&IvIo(mN%^W6>Kv$o(AI$KqMbv^U87T(db5xMPXMsj*b6ge$ z_qUV5v{oMpbmd7J-_vP%G1Y4i=RfeCqk;rJ3)FtJ_(xnVPhabq5+aa5*X+)Lo=HVY zs>(eQQVjft5GqJGpQ}->`y%Q>=P-& zI?Kjy!UPiN>R;wB^VpSqG-InE#lYtZ6(sO^bGxe4I3}KM=_r4kQC1*WUb6a$|tRFJ^u&FzZ6>5AC!Q#<)GTVa6&x)x4)XC|s%RE-(L-)O|=3Kb;qd2_ow z|J@YzleCqK(&QINpzHL9xE{AGuCkWmIl}l{p@IZHZ?t;(_5-nPP;2>T{vd$_x|)?v z>bYE|l-m51XGh~#xllpE`Rq=b@t?SRtEEiRD!V`eUHBC+dMko2x8T=W@X6mEwA8GA zJr~VL38SOJXBQPD@R_E0uYpqhySj^(DkzR2J=OYV3qeyNs58bE-FaiGfm%s z_;tT%8q`@%ZB$MmfvyrmP4hSUlINIhg(wC-yQm<6&os?7IDcG(&+91rJ})7VK-V89 zE}Nf56r#B}SttfRyQm<6&or%%s(Dr1=-p0UpPFAFfv$V+@0)Ax6jlogrJ)%3?4p7M zKGQTWW6EvO_Iw+8b#jnE0$q9Xy)m2DE~Z|6;MwZ8mEurck=lSjU?4p7MKGXE&uaM_r&We`un_HO#66mTU6MMF1D^1@?eMK1) z{FWXnNI1(!*I92xg%mAhweWNT33TDN`<&k?!fz8{=}Px}wmIuePL;GBe-#+Z2P#Nl zxuNgk%r!-`d0k||%%K7abdA2S#2i;BNQKnkw{0vRs33vmhTgo29260WI?JeEO9~{= z^(1<|IcRhqtW2Ks_>$`C`Z4BVA&|(=x1qm#hZr9nju?+J% ztx%8&1rq2w`YFCA@q{vJ*h^mXgWvc?1qo+4Uw1#Q(UjIrWa=4LAb~FYrm*vSefW(& ztR)r&_cRMeXQwu*BHckO=cpio<)7A{Z`mzocI_fpwJR!+K-Vu1N1H32@(ARDx}6XNl7uVoTGvSmVdYF zQn&v^=5ej$)aaA~33NpyIBiZZSwt1?!fRi#oTGvSmVdYF?{sg(qsuL1#q@y!33M54 zubYA0iqX0We&@t;jtUZ3{^{FoLq3TOrJBpm(-R6L&=t7jUvuKGB~(Cip5uk(92F$6 z{L?)@8rvu~qp2K`KdwLmT^G~5H{+ctrRo*twU$`UQ9%OBKh1&M5zoj-Yy0l){uG7; zx*BYa>lvB2tXf*@6ve==n4^M(vo7f}IiV3yzmcrc_f;4Y=)$k2Q>FpFk?FuaMUrbyso{XQ0*>qW5$}D zI)=Vd@b;)!y}FZ3{3%!E9AdP=aqLInw|$K0;o_xFjK z#kyG#$90l{&vOVQ&~(;+LInw|$7m(ezvqPKbO-r#Yi5B2x~g=e@7%o%ruICDVqkrR3KCe4(HG1{ z-4QFgwv}&|rxZw_t8dvK&8vy?sf2&>C@0oes33v$nA(;+LInw|$LN0Sej)n5ZzQr&X%m<-les33v$ z7=44T*L(4}dJFmU4{-z%==!)M+I(8AF#VbbkI7(tg$fc_kI{_yF0qXGbDPPDp&!GL zKv$uN!{*CoMb-R*JSKzn6)H$zJ?3_07!cQpq+dSqEO-%y1iG3T=gs%MimUfO^Yo(?f%O=@s})IXgv@Iw zBQo3yLjqk}FFiFA&MU3VQAa2S)>o(?f%TZ%b@ED5qwCB1@=5V4VMw6s_NtF&;O}Ks zt{42fOZcljs37626PNc+ZscrUPex2Q6NUu3@K=JIzp016KZoBv$FHV4zpai6*1^YO zr81K*q~E){T7{lntVK~l0&7>AFFN^vIGd`A%#kmrKmuJ;QWZ3t{*q076T;agtVK~l z0&7?LCV7<;!qcjg{BSa}KmuLgad@A+EbbLRc7DWXKtX-)@^?xNQY-}ly zmWU&eK-cgci_8og3aCV}cvKB*QB;t?+LhKK=b&$kylF0@T6_pY0$o=suQA(gE=1p% z;88WKMNvTlYgfvKSBz~$hc}a>W8VZR(!hsdNT6#$gMH?!9YxiNGd!w>wJ0h`VC_nCYs)4yQe15$H}3l<3<-3-xqQ;B zM7ix95!WaN)}p8&fwimKRVhvq<9x}6GH>*yFeK2`q~ld{iYiIJ^ml?{U@eLY5?H%Z z%eg+#NHD#=?DXHMFeK2m_0a?K+}_e^`YQe!3D%;hAc3_j<+hC!#)l_$<)OAm!jM2$ z+5xZ4oCnL&ZyfR$Xz*9xP(i|3Q`gy)%1F|rj{Gh0-Y_K4g}*XKUkZ71Mu_I^<(s>e z9V-rTWgvdX&G~Cks376|+8eHFL>I0lbP`{y6Y*C=u|9VGUMea`;P0A#y>3Srwkyux zeMJQcY#+bgMxhJaN9S*{;<01PfWK?%tlLpR0_%L|Z?YnRF8p0nXWfnp5?JRuf0Gpn zbm8xsI_q{+kia_M`J1drpbLN3)LFNqf&|w2)NhC1WJLm9_`9agx*ZiHu+De>CMy!? z!rwJ@*6pYufpxy~H(8ND7yhoPvu;NP39R#J%@6!0D-!6!-!*mC?WiDub-wdAS&={& z{;sLBZbt2z!calN*?#=6N#L(kqJo6;E1S5| z8(p}*+DY6j@<9}z*-UQSkjL@8KKv3NehrgqIQX6=DoEhBEuG&4L>GRE&q?6-EKxxM zzisLKCLp@-OMFfOzh{XG68LRP=Qjb-g->4>k{Y3rt%LU^aap=2)VVANBRFJ?g|IvE2AL1H= z$26Af&S!QI=(@kY?f>iR+~cA+vOPX(Pyqq)!37hanE|4UO4KA*WO^Do)Cob#=7PE~hRPo17q`r%chjZMqnBO8;CwpQYv%GG+TjdP*|39J=Jt3ls3 z%DU)WZS3#=O(IZh$y+b$uZ|l-Z`RBu8+qeGm6Rhn+N1y3*@+S)u<{@MHhfp8a`TfM z?eT(+5`kJ+`A@EAi8U;-CR}mL(fadInZ}<>S#>e2T89!Ou(}-W?^c8;zv(z$TUOoK zi3DmL+L*2XWA6xK#K)`223Frg2@+T(k7~+hw^g*?kJCE57wtp>wL*qIr>~wintFE; z*}y7-C_zH54>$K*nDU_`N9#$I3Xwo9tS%>4^2F+$*wYgarRm=c9%_8phn*XaeT5Pv zun#@%+6AqZ?w{vsZT~yki3Dnu4;`u3oXj*Tvr5PY_7zHyz&@m^DsiF8>fz(GR&yep zNT628fh@gr_DIT2yhS#oz59&b#aONP zyig}fkdVg%tDa(|QyiP#OaEP;J|o>ItYl~2WACB_3G8+HO?6ru<>k4#TECf8&l1`~ ztu*CBzqU%H_riCOS2!LhK?29g<1Ptrt31^+S38>&>O=yyii)4q zv%@lsRaK>A1IGg;NZ>em+|I3G%Jq+Ow3wI>ClaXD#&GJ@i-#G;09IcV#{(ru;5gBD zwZsVJ+#}iAv@R{2NTAlTdqeg3`@@Z^6Bm+={ry)lBI-iPFda+)k#~pREweoJO@!DtGA9A7u39Q%H{922ZT=B_3uB?g@ zB=C8q`DX^y!snIdE5zdS16HS%*K?F0flmhXc0zNNZsExStL)NymX3DHfkD~Yq1jPV z{atKVer9Oy+I_iCU%jg%&1jgtS;hNCuFHuMB(VBs^YKYJfr=i%VnvuDr#XJSot1A2@>)>h<%08;!M;=9i)G`G{e{v z{ULcJ%_Sg_2x-2$cPm2KsEyTLI(Rn~+rpV>OYRVTe*G|GPQ?x_eiqF<|FS?qF5+ty)u3WJf3DgQpjnYGQ4KY>}v**Nz6+nz~92@+T# zn6fej9TnH6G1{k<$7KTQ6Jy~X#JW`=RgTdd;9%J&CK zkiaXS-brodDnTv0Lgg{Xc5(dWF>kKsGTtB9Qu8*DKo2SJ+^25aFKb@cLOPzVPMLf} zb&Yix6(3~O%$QNDuFi270|unm*lhb$n~mO%9+2(wxUXFJA(0a(L1Oi+W1=Pc^}zBk z5`qz^b!__os1rvyjPUZ|H6Hiv@xS_%6DV2h`daPaau_Y9jS29o)%US>PM}vZQ4euU z3%>|)SP~>g{r!ZxufM}cUOa)knpFQ^qOWW=i$JY{mrkiix;l*fgHwsP+-1ZT?iET# zuRE0PAe5sdVqmvP+(rxlsVS~lN zB}lya^jWoZo!yxJSqXX7;pYm;1}9KUyL3i9|AyT-wsk!bx7Hvu)eoF@qrp`{MB(#G z#dx4(Q@0c93rTh(XVTsPucpi|32Fn0{o}tCtwEOW| z%wtyMmIR3fzd5dc7h^ZNMAVRt@fRNzabR1hb>o$z>aY%WW85sA2%F7T#BHGDn_qt^ zw6GyZgGw->qS6!7q>os4)i;@kjEFo{gCRe}-hJQgxQ|B5v8 zf2%D{FsAQ`zEpMBTBYm#m7^c_o>IzuD9OB6wDT6krNjYk+e5YdywSt=092HyS{0$a&L~=dU~bEN0fK>?sCcmYRQq8 zla25{_V%tM8|c-kSf6s|OoY+hLnj-9J$eIzh#&6s@y-}f=0gb*H@>Y_Vn1zXJeSYT zMb4=aa>SMPaaWoX*)K+l$%d_K ziZ{AbMgua)RJeo{`>N@ip4{{8S!O4lpqm% zdA6FeErGl`OtX)J`l|1@gS;oGcacb1AFci^y&s*nYEMJkGJz5#@QR@t*wj}o6Z?8w zQePnzy{+JUwP|hQCknRNV&+>uZkHn)W>Izx8-_8z)eL1fI`J zzkFG!rM-~MoP)j~xmCfzk z2Cg!A?iPCMaes0uatkL=f&_Z#aj&U=M|g$S@qAx7@<+x_-)h;gB1ZyGCZ^x3U0x$> zU|Xn#qe(knljFpg<4EDT*my0_JCHe694J8o&n#!x)n7y$NT3#;I`46ZO#eyrE=q6} z#g@$Voc9$gg`DO6tksV?RDewos$ zhD7;jukx#P;YL;rTcf5EF>AtKQ$`Mu3Dny1Qk9awIo#-Rn|ZZ_h}>W5QU)ex`cQ%d z+N1YgiSP~kE~WERvJIqh6{Wq`xI(4s{C6pDr)T<5g2eNKtCZ=p!;HB9V_qc@F{|6P zl!TEofm*nVQXQ|@Lgnh`*HSLzX8KTq#HjEpW!#gYMz4F!2D?g5Qq_;0Ic_JekhpTn z#L`dd5;=hqB+xd0ZfBN?6!!h&XzdMJ$!scnhUcI0dr4D*e@j6EzX349|J|dA8R0hA zw<7+T+2c0&_bC2t#b!fEFoMx^&d;w0E35X$J9fO@Ph;l~GT*1P->BklHhO3hrxz~S z#0Zoif$NG%kTqK%fm(9p%qvcyS9n4q+Vi+y$uCrt_rBeHImG2-XE?Dq=9bJ)!xOHs zC6hR{q}ss zVwuF$+>L&(Fmlwwm`vj0)Rytg2KtL!mfxU}bc!r-z zJXg7G6C+T9XAw5_iuW!`0*Tj)pP;ir*?VO4WJ$TGuq~^Vav#4Z*qwv31oojxtoZ&5 zKY7S6%4R~R|Y5*U+7 z#EjUomDxZE67pKhdN=FKzx(4r>$u7^jXbBSY+GO)NZ`6F&7%Ij40eA|XS>*54}7M- z@f|OAg_!@?+-UBR1R+$5*bsJ0?7dQ+ONYFg4tX`Q!S_oh#?;|npO8>qO%SwmCt9L6 z`ycERyPT$1W?QI5`+mBWS97J?#114%Xg^Vhyqd60nYf;zm^Tm-Bxo;Fx3XmCAKxG% zN3T$e_DFRrOEzgw3z5e`3GKb=R+h}#bE2%wEgg3`g;z+>zOQa&$%?CDgT{ecw4bb7 zS+dmQowoD6aFozKwQgm}(5o9mJ_frOQnslES(u)+F@OOT*_c-_j9&AS;RT_v1A zE!yALtt{EBqsv5Jp@i}VI%FDzZTg+?oFBz_pjSvx&O*1cWE<`;k;a^Rg<6yg(XA|5 zxAD4&10|GW(XA|*)gvDF%qw39wSffXcXTUDM!6zk1LHs~${Xoc&MPXcr65p3`6!*| zxv(X3F5y=RN|2yjn4tZ6wV-jJ7UkH$s~wS@#9fOL%H4@*(Y8N77<2?>q8`Saa*!f& z_WhiXAQF_16k|i*yw1N1aaaUuQT`Lg{5eloVFM+UOBL4=+NM2#Wp6)VtgU z67-$}%ucvl94MjpGGL8Duh`0rW6r&j37n}tQ~N>hS`sAa zy%ShDAG>o2M$jTq3)lH3PZ^C*7CmT;>*2GNF>Olldr&5I$6V}<4L07yOLHZ0=2|Y{u{}q{9;wouEu#u$|=b=G)J&vyej=yi6BsdgzS}nBy0_p%mxyug*&XK4O=6D L5+uyeJY(`Nju%Ka literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_10_custom_finger_right.stl b/act/assets/vx300s_10_custom_finger_right.stl new file mode 100644 index 0000000000000000000000000000000000000000..d6a492c2046d3850ce7c7d650e951701b93bb87d GIT binary patch literal 83384 zcmb511#}fjw6%i=2@oW>C%8L-R3(KD5+qn~_WB^EWd&o^lM=j-XM?M|KSzb}|S z!*GNmfr>jpv_^Qz7<-YRZRJw2AOAezM+}QV#hoD9Cc^g)p!4vmnqWi>66&bP^?3V5 z>-;`093f*Mfr>jpF>Io0wXRl-op;W}AVJ&8Rp;gfE5@&F`^p$dpyEytZ4+fOHMC-k zY?jK21Z^wVxIO2s7`}Hp$rwZ|shci<2vpn&qHQAEnesea<#GWJa+lBuqLu4w*f}f4 z`rOTB44c@oj0jZR38HP{R)viG`HOSQ_MNGt5kxCjnm^B2F%tEyFJllvcaR8F+zFy> zBFl(-jEJLiJ7@&a%60n2-&TxHIcmxnNTA|Q5N#9xJegy~XnMG>6A9W@u7yueS}{&E zt14q4fr>jpv`xHC9p8#k{OM39611&cM;;uvVl@4!vW$TQD((c)HgWDkk-c=SUL6?j zM1r=JtMtvIR*aC36axuV+zFy>B6Lra7>bc9WUvzn+E%X1w~tsc`gEx-V<3TwJ3+Kf zB)=bH#mF-;!ifZJD_5IuhpZSY2iK7?kU+(qAlfG4RY>fl^Z5HqxDyH5R<2Ez4_Yw_ zC2TBXAc2ZIL9|VL%2mLMk*aX86A9W@t|@!=TQRa8Y9nJHfr>jpv`sWCQr(J?;>yt0R*ZaGKExnF+sc(?fN903ziNn#fdne<1kpCp zy-lPQqt1*qF-Xw1a*h4Vuwv92F-FEf0u^_HXqyPD|FaciO3ETJNYJ)&4UDv7j2JgX z#y|oUcYa6BNTH;A!Vb zs3(zJ>b(j0d$Wvz1S;+X#jpu@YdI3?jUtzN|Ff6aDPtgkiaS9uYywIjj)W>1$fe4$ z8~ygk7)YSvPEZV+fKrqrp-MP%sq*;aFsF=x1S;+X#jpve4LB02q$ZcDSJIX>Weg%} z^&Jr)YaEV>J3%pQf@-yup`!z2J*W|?#vzxgAID9l7*G##RNM)QVH4kL=O|g9YlNyf z$))P~<6{gN0|`{z35sD8-&+|`vR%TFP%Qv*srE`#o){Se2~^w(ieVGqTT4;0eZ`Sb zEg5pDmRgBH(J}@RqlWcZP6R6M1jVokyPeo@xoq`mgldtIOSL$!E#4+$Ac2ZIK{0IN zdwX}eY`t9{$wVSp%AirS(l zHceG<5fv%YmCta}}p)X}C{P(^S0`tjMI4Ve} zcg1eEA4)uey&g3o`pTXG8iB5p9sf4NYXykp8*9k(xFmZMew!x*)|I^pRFF{birxRn z`REMe$0kI_ui8~3&^15!jF~1lK*Z}%SH?&y-$CyT69f0jUJ)utsCUKgM_p~1iubNC zG5VfgLybUJtrus_J;eh=iIGiZjBB#5R&w#gz-ZZ5Lj?)-uGsy-v3X1Il*cDV|8lat zMxbjzm-FV)YyqO*q4qL{pX{Y}t1~I^mh5w)f`ock?0#*<3w8MR;gh1Xm&vFR=&Jqu z1@lmn0CDzuZyBSA>^m>ZFgb9j>^q}^gnC!({`t=N9e6;!$75Q{kO~Z@Q>mAcBProQ(0n;1iGrH zxM1cE^A~;nH^>>htQGw^3wf1rYddXDWns$~9Po-FsQ=(>3eXau^FZarttX8z(^ z@Kzav-obOuS%EiY&4~&U>V359+ES--a_=XzqT3g*t`X>3mi?^Rrij02*lL%IaaEQj z-6CfP?vgD4RFF{Rirr@DzwQG|HhXsTy5X%g0$ux?oHi5X^cRO4@0BrpWtkZ9d3K;7 z+a;(Vp~^?Q?GrwE4ddzNMDwRr1{EzcvSf0TV5NT4gM&0lb>s>*gdU8_X0 z-Hr+p*oJq6Hl4gCCUq72ejtIaWc?1qJx?G7-9o*lhj zK37PfEB}Ur@a%pnpV&#y?%^-t92F$6Pvr+g` z?_iR`!5kGNuy5!HEtWsnnRn8x=mzpVM*>}AjD1j+G>{`jRF-(1&B;+g0{gg*(8eFb zo!d9hjGigWDXrl>b;%PZjL}g;OEgu*vNMK*x5!z>5g!Aye8PTO>xr+q4Qa&}H zY;P+^il}U_He(G#1qmE;aD>Wt)tPGl^yp@?{y+j@gsjhFBgoZk ztpRNYIuB|y$n%In1^27Y$sQx?@K;~wk}XrB17*7e33NTJY(m?I?gzDf_Q#qqdSwLv&eQL z66k8&VL!A*!{qxxZP9pgBo!4T)VpGjy^haY!pU+^ik>UmyGWpGKVH2b8%62;v=qfe&5cCZ`%kql)21n$`HY!M{vdbO|&;963 z%>FNS{{snhed}}t`Zx(?xl4VV%yNVt6(m#{Y>(+5PCO!J#}d0Ag#@~=pXdm!F6$5K zt0k7122_wxb%~vuz^vuGnMR1pbJDiznRe=A|6R*kTA5K)+zFyJ;(MO+$G(VGE*0A| z&xwjVL9}O{6A5)xL{Qce3EEb!Qv(lJF@EGZ6@iL7LG+J2ClQpjM1r=J zD>};|D~4yD6BT!YXwN(+611&cpPwAIV*JQ+jvDr48D&OMaVLoWk>~8lzAU3WCla)+ zTqSP&WySE!bE4u-5bc@gM1r=JE76@}Rt(QPCo1j)(VlruBxqZ?=07=M#jr~T7~gfG z;!Y546EIG#5kxE3>xZYT7@m1fRNM)oJ@cGM(6(|#oH%X8@XT|f;!Y6lndd};wv{W} zgtJzRA9>Cl<8Jv=W)u~7g6JQ4&bFry`BR<~3EEb!;MC`>7@m1fRNM)oJ@cGM(6(~T zX>!4e;hE<|#hoD9GtY?xZ7bL2Mi;FZo_S7G+zFyR^PEV~wsKWZaLJ0{ndd~sogmsX z&xr(WD_5iDb_~xvCo1j)(VlruBxqZ?)ZabxoT#`HM0@5rVJE}iJ#U_QPE_0pis6~(L_$4@jE>(_s<~dPuCn$zzo)ZaG!jVgr$DVmkRNM)Q z;hE<|LY36yQZRsit!`Qsj`+xsFn=5RJ-d(o>MmZtjs7X z?gYj7k>@0W@|;Mh78$uz`|(GfQxT}R6BOe|o|6d5b0VQymgG|H;2(KTMWEtNP>dgW zP9i94iG*q)lS{SdJ@cHXxDyn^GtY^HYUz_p^-Js#ddzd8;!aQuo4`CL5~{~SF4e#C z%yXjRPEZWbJg3YkhNwKJdZVZ{DRYw{appN)3KHsFi8asZBG4s!$gbFWo)Z-$)Vt!D z=R^WsvfmyOd!7>&B-Fd&ndd|TU2^;(B=$TfDoChz#WT-|1iIunN=WQ^PE?Rk?}}%h z6A5(5@t}~{^PH$4q23kGJSP(9lH*z-vFABaK|;MNo_S6r&?U#`LSoNzqJo5aS3L8a zNT5rOGls;T=R^ew9M{nEoJgQcj+chSp65gb2^^o%^PEVaOOE@7#GdCw1ql_;GtY?x zy5#tENbGq|RFF{D%`?x51iIund`RqhPE?Rk_un(mi3GZ2J|QIbJSQqhsOQ)-&xr)O zWL_jB_BM-ClctA^MIhP#XKh}NT_YkJZEf# z<;ryl+6X+r?6F1Qa zqLr)Ze8buFCI$E1(e&A+IntiEO)J@RIi3$?G72I#d`1tx? z8H0!>voq#(C(uRP6vHM~4qp}X_Q9@z@V${vRFLR!{h$>i)x&c#1`*L8wk>xj&_&x6 z!zS)-XdW|T-0tNWk_~pEg2cZokJvFXACobNkay6_oj@0DTQMwR@W?~7wlAvNv0+YB zka$1*m=$CC+Cwr15&s>m7T`{xi?%6-O?WNeTFGm9>1C;340ob}ME&L`tr)A3KD((J8Q+Ld+V}{LBxZWZ~ff~bkR1&u!*lL_Og;kpZb?P8sS6* zi5K%PSTRoi{Yb_jB156>0qz94Xq#f##G7-k*{Jhv0_1;ARFLSB^|BS?YGW_?o)ht< z$h_t51iEOOV%S9Ks_A%4q7*A`Z>r}+1qo;Gt5%Hg**-D`5rsb`4{#^YMcWj^CN3r_ z%y(Bfu=MWSASWtFjF@`eigE5mRvCkc0S6l{cPG$A+Z4kliZ>47KRwyB{KoNoPE?Ti z7IDLhF}ZY68H0$#MYb$;C(uRP6vHO&JqY8uYW=kQ_Tp4dRFLTDeanh*C1;3?K}4aM z7X#c0bkR1&u!)(2+w*?@n*yp|eH4QV5}%vgwqp23Hj*)j$WZ992Z1iyrWiJ{Or83wkd{93_UQAHysof5WF!m1{EZxCA?$B zC^{iZ#vmfui{@_WVOWj$Upr_Yaa@@wkjZ#K58xJP35D62m5_^qElexW6oYI4Ve}vg@ztb20`I@>&JB6X;SU zpG{CH+Hm)x21l7)`CmYLT z?aWa@Le=dzqMyhZL{O>VPM}M*3~Yj08AQld21f-6)kaCO?zxQdyo{Z;*?j6p=jqSL8;r4i^-Ej63?-cF=eA4dfV)n=?ao?;NuW>#qr z0$r-bX%p0X9Xztya@l(2s34)*zMFcykTHm;{=T)pJAp3Ma<&O-g>MZDqINq+1qs!L zfBycdj6p=)c@e9AffsqZ^}H9F^G_D2I_xk1iDmD#wMs& zG_GPK-@kWhW9P9^Tj7(~dnPi}VtU8+ZB6V$t_TkF#@*}LPYAfft(gTMSEV-Vr^ zm@}_CfiBe-wF&B_ri@9OSN2jlDoCjQ?v_m#WsL7VVJ{+3aVMyDwh4OwWDQOIXD?a} zMm0Unr&aYX)%vs&js9-W_$9j{P(ea%s}=z5-HH(v8$m7=+s^!83{-HxSZh-&gZ}B~ zLHj}%*1;ALr3hN1MrTMlI_08vn~H7EL$*S=#UKR<70+&w(IKfAxG!|6b5iZA9|Wyx zqobniq^ftRRg6cgpE)W>sFs1<-o^7k0$q4cdJNnz9-*qW?Y;rVKm`d}QAg)zXL#cx z(4}f^yN`neDoCh0*zS2DfeI3|29J))&TYpRFI%Gcof6V(Z@xgOVz=4 zPZ$YQkf1eq6vLit5Ep?iRR`NWdL&RmLe;4DI0F)>AVI74=&0;j7I6{iQZ?%L5f%%v z%Mw(OpjCSm!=5dIF_1u)s!_j>`#=m-kf4 z(M2n`h_EvzNT5P%$;d^y8{AXeM}-8fb|YHNT&5`Y8-@Eqm-@SBn*kLh)V7-I?1~{b z>@%l*p-Ua1Jr9}u>ZnjrSApgTVJpP<`W*L#E_FBU8NYN$sy;^o zUFzwv+i(~I6(rPCY0vn@7)YQ?y#;n#6l0)*gnGN|8NV0<33REq*=|!KfeI3;#IRfX zcvPq$p-LiqjxfeR0$r-)vwJKU0~I7xiE7Vhz!*rNOO@c?`)XDYyJd+A5~^nSF)u?s zB}kx4)kNQWkPrhEBvcJ&&o99}{4z_#3NT7m*s;TXHGI0^;QY{0!M~?(5NT~LT zJi+B3G|BG9E; zwf6WG5~v`d+S>N08WN}=p<3kjT(!6ebg3SIJvxX4DoChahCSylE&^SuFJg~!B7q7L zs>foFy&{1M5~@dL_c>+X*&5mY-gge68j|V{StOUAfn_{@ekS&77kWoQG))H0>*En^o1iEOO zVz|b_ErQ0vQ9(l15>^b?czdh_x@enXxW@FIbU$cJ9~C5IEn(jeE7Kse0I?G2qHXJb zsN94_P;LShBxEgN#c<_6VkOW;+Z4l;V0tr(QmM+FIWZ(Q>LtQhVDy40O@%{8!M(Ch?MkWkN2o)q$UE-RzWW{hN z(4}f3*W4(o)uL!_6e>ulI?*-%O4WlJfi6{}y5@LUF=&n#DoCih-8GNQis4S6OSKGK zbJeUEG*=B3Bvc#4H6PB3;ZC4SwQyW>?yMLz=MEJlR9nk6ug{9%PM}M*)Le51snr)n za|cmDLbVxP^AlA&Q6tc$TAZ#qkgD}cGa9WDnG_^c+t(hamRaOj33REJvrWjHFq7wD z&E-S|3Dt&o%@>W8K$q$PxaOo<1kFiB1qsy`am{;;l|Yy3$++g$S_I9lMFk1fr*h5T zjg>%`>XEtT2y?ntG)EW}BvjwfH4iyf0$r+S>6+_o5#Q%JTlbt4B&f$qwX;344LxC5 zruv2`ZyvyN25?dLfv!EQD-F3 zrJgs>F=TbE=nkr@K$&qB&vnn$)s_GM@suE;u86(9R@&r0)sFi?pi5m5&zvwS>YOM? zi?Kbj0M=Ec<3&PUg;;Y%7C{OU>RQE`!LkTakf7CvbO${%ytprPVHu;J2P#O=>O+d* znc+nOU07D?F;GE*Rv%If&wMQs=)y8zkAVsjwEB=@c;;)7Ko{0sdJI&Mz#3I2P(gy$ zn9xyqWO$>HKo_QO(0$tcI z(PN;31g&tyuTLYqP(gy$`O#6?>j$l({d--D1iG;Is>O&x z1qtj^YlIgnNYDyLIx5fHHWKKmbc6e_V1YAoYNt)>W4kWkyMY*P&KvZ%mLgNx>U;`)~vo2gA^oGi^DZ@z;(}YU+7XT zoLFZkSTRUJLbak|oikw(q#&VMXP$Ena9`+Bt-)C5epoR`K|-}MJ?9$WzR;ywwXPW{ zUaAyD1qsz!_MAC@1iDn~+_P7N3KFW-Z}-)%Ou4j*jw)T6Y%68Y#H2nl^-N`-!`i0) znfkkzS58x_apd{f1Sv?!*cR=1R7jvpo}Wc{T&pNlA^aUT^D@>#HG&*uvF zg)aH*T7>J_jrx8Eqfn7gzU88K1@{#9Q6VAUO^fz?&v9Sq^749Z5wYEm9|{t(jIjvM z@(TBbE?M%~F|74C@wO(*f zaUT^DvQ1>sp6x5#7rJCi%_3Z_KG#v9qP8i!gnNqnsE|poY{~2LYI1*J?H5ofvz8=QdF?}QK{M>Di{e>u6WMV$92pmCAh~Ndii|)I1hBGnj!WX@pM$As2Ybx{&7!n9~Ba+w)2>+t{oM+ zR4p3oSf5n_kfLg58Y$F|s_yWcRtsQ5g2j1JiH(SDB-K??ham0zI@5|R4_klFqfkLY zZimn{lqHDUbLB?3ToaP6`o8TJ!wVJMuloC>;)(R5auK8;A>V@UF>?No#1B`#3M)CC zN8fpagnrLa!Tri#e+stGe9`|%{BTLa_(8}!n4iks?|0BTUb<8A4qDsa@A>!haKE~E zwn$L>kz#m0S4f~sK9v^X`Rt-1pLWaj{r&hq92FAOo~NVodrfi77aSTU&1 zz^76tP(ec0J{Ik{UJVI!(P#qQ!C31-mm+IUYn$p<+*8~~g@mlBE!v*RAY9SbvYT&EQgGE5q8Rw#fhOsF0AY8jJR5Ek)tJ&;@@t<2-W{3E4WcV%T$v z=)7Y&a}!-^+tp4~gf*T>cZw7hgW8|}e@8`xx(cq5I~7Cz-}Q`|=u-E?)uvWN6e{Wt z(%2jBDeh~9gnD8;$Kh~a=u*$FtN-CTDpb_dPW>9(Q`|>|gnGL?$H;JB=u&U9$B0)H zDym$eJ|ONX?xR9Nm0hv*Qh(e*bp0rmVi`5Co>A(*BB4rD&(SE{7rIml?$JN@LPgaE z)bGbVxu3aef8;cgP_>WeND=M}U8=Tojb%iscMug-bJ7?I?kVo0LPFKlcK%$B;#e(! z@1r=btBWob!*g~5wkEv12AcHFdwCrR!S;%NRHz{F{`v*0UD7V~QoFU}I&)Nzz_y(p z0~I9Xv-|xB?G~9T*S+LF`wmjuM{Ub1A57bnV^OVFoluH9;pLTI{<3wL)fSDLP%fgU zcgZc(n2b)?SJXZdFR!N3rMBaWf&0~orsDeG`2WjMsUuNmuI{<(sG?B8W2bHVXzjPw zBBY!C*;kSFg)X)2iowb2KAtFD>aLlzO;fKLFXa=MY$8oMSoX!) zgK*t*>+0Hw9)phL`;%{7UHi-r7aGr=M)l#@-j`*WoYB1W2OqKh(EvejaIteff_f?E zGsf^&7kxwy$3WTtIP-ZPdzP;k_nu#qp@PKYWh43hrat12*+V7q>cklKPu)Je(UuT~ z3KGB0O2;!qq!J^3?I($CMJKX9PDgSU%rydCyMD>dCZ|p<7N71biK1J_vs%%S+&8!? zLj{Q@$?nG#DV0{FJl{nUM`|r#!zT9P`;J%C2y~6Q)Xus2MOtzGVP{E<{^u9AYeX;p zbA}Lx3KDlpo6dGcGKfdVn@b{d$ZEEyLTLOPxoQT3t>OJNF-D;z=hy@LVyCK-Z|tON_CF{Y8-)WhF7V z*k3F%X=fh&M`?x%5-(yl83RY<6`@6oN+KrWH2bMvM}DJnF^xc1sUe23bW=VtdS_ut z?7wu1h0N>7`@Am2P(fnd>1)Qf;e|y|^Q@9cUi${S-K!nHHMM|7plfQTr^cd_Ma1LX znI(~U>vgvI*LM7E_M!|GBo{Avxvlai^xiCWoi5WwSn9n;@l277&N%&QG&5opM$4EycJg?09$Y9NLj{S5ZB@;J5tYT7Uh?-ELa*%l%Hq{;#_ui6 ztr6(TR=lRUu38o040B52=ht7@t;l9P@oImD3KBaKH89&Gt1eQc*&vByr{nQdADi-n zC39*7x~|S>Yz9B6E?VAND~UtB;_*pY!}*)?xfv=*JQ&!@Y_PMYxI1xyBz%W?@nj3a zdDR_xGy+|R-?T6X?5ZJ3wO=oZCveO2cLmxDdFszF z9Mq66nwXBEf<(gVJdEJ-*Z)AE%eQEMb8OZy@i}E3N%U@%lvh03kmu=>o}q%o z=&@bRpBvQ`x90biM3R6M{6Lv7ez<2UjX+oS7LjK0Obx_fzXp={v@I!ry0{@PIUp@V z1&M@*I+&xHg^G&<21%mim=wH7@-Tj_ua8EctKh^Q=H>MD#rtecByrv^8BZy%RfR#m z3>73sCu(C(Y+Xm3Xf#X`EAFP?iEcLFUk4=D2y~U*+1VVGrk>~@&_WVl1|;JP>o(+N z2Bu=DAd#qQb8}{g+VWbBkVJ=SKD@@(2E5?Fq#A*)%J_MJS0bGNS94kSM|sx~&ntG9M9trl^Zlp7 z_|A4o87jEn#FcB98wS>p*J_d^iXTtOyML?CA2*Aq5$NjsdtLKvyjtRM&3=+-)Gh^| zFd&Rysgj7Hf<%&i%-lP&n%Gxrx+L~bPsN)quFnJeehfkaT~j_*HS>O|A?}9{m&Ek^ zKKydlFy0|^JcbGqNkYn)$0k=1JALO!BC>I6zCXA=PoDX|AdLXmipG`9L9eTc$gQI# z@$tD2U+QeYyBzuugbME0+bCpSo-4$L=kp~|I-4*5<9a=wbmc>hKv&G>lIHQJRmAh% zR5DE6nUY^_-GJ9+FN07)!kgzdA1n$M%TD|*3GchU{Mvwe{9Wc-IsvYWDGQi6?h6sI zpUSK1BUAD2U+VL6zdi^;1^1gYCZqXw4HNU%Qs4RE`ZPQuRXskp-zAMe*NbjB%sw}R z#p9#1B$201YW~wN_4&0MH-b<>B3V!>Gtst6V%mgNk~lLUEe~E+m#^M_N+Zyf^l*Cf z&}AmNp7})*Su*+Z4#n&9&)ye)Vk$6HGzQEzJ+p1f&2zBJjvAXJdZ(D$xU{A^j#JI@YDjJuki|5-nj*Z60v zMxblO_Lqk9V0rQHgO!qq8kLq0xL=pAyuCXJ6(lnDIb*QvrA4QBdnD0xwI6?dr4GNg zeXT~I>tfG;j5N-&qV0=yk_g+Io^Kr*%6B<71fhaNx)g_v%RZ&Wsl#+0orYxKOIy|9 z+XpYv2z1d{&9Dh&g*}RM=*$<~ z%fe7WVnsj-DUFEzT*}kTQ}m7hGmfQaE?J0ZyUu(X9o3BQo>wMTQ6q~4c|M~kD-FZ()EXobl(b#F-?6XakbljEX&wdd}@rZMxe`o#6_cUucBh@v{aHv zcHjlueyA1i>F{HyAh9p!U8DWB;$r&5B$6o9`yDHsrzOv`&sQVRl{WCLQMPPJ@!z^c zl1P5y4cpwQCHHON$527SNEP2KFr~Bz$oq-P#FC#`+M~^R3DZ|2&{fZw%-oT^tY{PR zP7+;bePoqVx8T)6{1_@oJU{1ab{tz?WV&-t62Z;A_~yaQc)dNo8iB6PD>IuL@&}3& z=Wk0QI?Fc}xW5^nm&cExg2dAsxy+1U+ zK!%f&$g)lLqlSm`!3q2rDoE@uUeXM?9xTpu+A9hFi3xf4_f7c8RlXX5t^_|-G~+E6 z^8MH)iPmNU9=~c+9)2r5Lj{TO!@|57SXGt`)B-p@I5E$0urc>u=&KRvnh?LX*&;yuzf;ZydeXQ&`?cxjmVZr4xZr{88u;&PZbcP?ne(@gT!2y~ro(A1o{ zuBJ#+ncAW;FO%?B`x^0jv(hqDkib^I-Zwx3UDy|qy`p_5*n@GMd9lrD7?!nn2VHiq zER{{{{9U$SphR_qj#z$`c}?oT&m{6;s30*i@``hC$E>2^a~dOiwCEgLm828@uePs7 zpbN`Kz1&3wi34l=jX`~L2=A9PMwX-2RaT>K2fo(f%aA}9mXCV5iwY74s`NK1tn!!7 zZeAH>g6sfNaVh<&G<64kZ7GNt&9s36hW_|I7Qsib(A?3;W)KA!u+LPj*_ABy`h zB+!NBqh9W!g2aIMNzJTx%8E|^J(4ka!FYUHv1WWjRUd`~y0CoI%Ux8EnDQv2nf6(r zI6L-&jB%rO0>0*cQ~s)v4?_Z7SU&3IE-Fa$%~-%3k%^09M`%>7S;IuU`RXRTb~_)2 z1iG+%)XQB|kSI90g4wZCWiezswZcb>B)n_i#=KZ}ABF_Fuzb|ZT~v_RShc2UoU1Oj z*Pkb2B+ZnR&#cjiCy4Z6NT3VLN4?xd1qmG2&__RzKo^#%j?h2&Gq$XC8-Bn1m!L(> ziWnms<`y*v1c;!3GRE6vx#jpp0fFVCUW%fEL~zMOMrifyvK}li-zY41kw6!gkB-o+ zUf0?0yaS(KJOx7q2`ud#p+$ClX6<&g;Jg2N81(#Egt4l3KJm0cY7v;Rqw)L60CA*B zW`XDD2%Wp=CEMDkHDB2Jt9Ir{?8#ooIQk@ysCgonJdd~tbm3Ky{ivs}SdyEq`2CKb zf>1#sM_^AQU+n-&z+itA%U)V+Zq}3!t=;yH=iUXm->%2FVLFb z`8z&C1qpM_IAhe>ydrzY>=yB(Hb4ShSk5~_XAFJ8CiiO1^IT2BP(cDqZCU!yt}=)%G$>TO8#k#mjAeG7@;wFv~?Cr4n*(MWAb!?@*(U%oSDLLs^;lZxZm!)53Y%n>T|{L1NFHg~qu&g=FiM zYJ*(ozp;2LoAdgEUuguoiY%FCR4q|Jto(=aI43eC;O!oT^B2b+1)+k(^~&pwcH0We zwkWmRtLKz?gAdJlbc-(K2E&UfgKeOl6TkzkeBw$FO>(seFjmISmi*9**BvG?VJf3)NGd>~kYY-|(WRL&1 zG5k_7QK4G`N#rs=vYWG7@B+=f84~Eq`D~vNv!#eQUX|`(!kJ#Y-u-5r-AKStL4vov zW%zU}A^U^zB!RUv66nHu*AaSl@He)(Omp6Ox;H}w39LosNKu81%?!?iPpaF!IiBX3F~b9rx` z`bJ~E<WGhyjWJ} ztR&9wPQ)|GEZM{T-!uYUSJHhp5}hw28WjCk5?v;FbFZ+*yh@*UL8u_{>nR^I; z6b~mJl|+&(3HjxDO?lFX-Wq|fB5zWeDJxbGt75K5BGuExd}4woeDj)w3>72_%ejPR zqaZQ4z#&Os%MuB6VVhBoE#*ke%_dFwN}r?*6(q0)=m=euD?P9MHI%1cygCR+PjT!O z-+xEw+E!`!vVHaVtZ=<#K%!~CLq<2*((j(;m^_cT2y|g7A@jBW`tp@&>+@93T|uZI zF}}oQWAV+>;%iH~R*q3A`5$@1izE2HXjGyDD^ zRFK&5^s#aAU|BJJHMPPMZt~$tribxP|DDwcbT#dG!?5^U4ZiyJhk~rTp84t+Vh>xlEUL(-e{f~6!pmdc)>f14rc%Mk- zQpbn!2Mu2Yp@KyH6aMCX9xQs~StE)4i<9zvjz&EBL@$N}x)xl?X1;z9BnmZ&mc-D& z6g<~`nYB#uAqW*Dx)v&GW(yGFThHZ^h=0MG4?Wh1U%8!-A%U)=>GPSr_Ht46_%=zf z-pP4`$_@FGb6yM;B%&siGsBWq5i_C}Nn&1dZ(crEV}7YfQjI{@$9l!hM>9fXtM5;W zu`(Hd{A)w*=a-0~g2XTPgUyN0s|x?bze)mokVv2l`-ZYde+scAoI(ATS1_Gbs>=+IBLs77dgdqXzzn=#+d6z--z7n)Ac5I4M`&CG zx-b{!2<_7%HQ)WH9)CPH9((t6l$`fSb0BkuU}hYTR?qOFf&^x;<0jCB`DjOI)x^np z{>@=LVXgGq+z*_ifk&$os33u}FXAT9g>xz5o>_x)W=^aLHH+n}D$bXqG311+Ke586 zoAJ}T3o=xYNWD7Le7(4`s5+X;#B{UYvTdbX@Xx0UX#~2A{ngBD4TY#PmD&tQpn}Ba zYs^eBo{NKbZb+h2&X;V|h?ac)r(z5VbUhj!XjXMHk?-OyNz}ail0EF$k`I4egrR~& zy^ST!8J{YOs~KNOB1hC?womq*tM4qS5$O8drKp*&Vns3NEwx_fR(-ajfh?Jg@?r41iGzKlko>us;x=E1||M6S0)tZ9Fb?JV4eJG+%) zs31{gZ)Wp&x-z2N$P|)D+5ZNsy0$G})1$mbpzG3nKQnvB(n7SLp4X9zH(7?cawcGn zvJ4d@7Q9bsj*6E3lI!Uuu`1zZ)-`>5p0r~{jX>AxbBWE{-HVGFLo-MseEJpE_IW$L zCuuo`3KAbTJ~#4|FC_9c$uEforH`?5&pYx|vqCfiT{~XgFy1FCC|({fAc+cljm62DD7z&3_;=7YxzjX+nIL#EN8V_uPaY$-{+O?Qa- z26g65J_IpTkZ93pi}6X0;!OCfq9pE2*voqE>B5^0udEU1+U38@sD3NAIF_C=qfdRD z?Ae+weC|cYP(k8)nyE&O+&M(#xt}Dl^wTDmXBW-9YDwZs z|IMtyf8BV?+k&Bj#MZDdW80+6V&(5mB(c5oQg%YlT<-FsmPVkf!}795&aIh5`87=? zQT4(ynG=rSRZ~}Is35VlbP6M+fS)LmzoR4$ZlBEtw&=x^H?OA==vv^s>D<#jz37;> zlO+7o&1DO__2S=J)?%n2;WJ~3^T|hF5kInzB*y(cn*G~5l0U!MP$ST_u5>ZyfF{1; zRz-&-3S=0|ZcdEkQxDZ;s339eV&%O%yi$t1gN93DX8F!+dwU1}JUCn<(3NcVPS#+6 zkGS9)C5gwcyRw1P9XwUzFop^eCwHXek9VdJDZ@udV#U$EEI5mUZ|h%IBhZC&>-BOM z%Rwx6m%S-#e%xJ2RI2z=mT>9IJY>xaw&tVmm1n3Ru{c#p^I4w?a%Lut`^@flgAI;o z%U@*-(g<|TOIgf(_`HI+QiDcVkU#~A3HkDw7xR=8ey?eaY-hpiZ2A4RJjXu9kU&?q zgn7&YeanhmCzDBH+|%nU={Z>@uCByTL1I+VEM~t=rNpP6l=;~keSuA^*`5cU4ABU5 zP0E_V^iNt!EMw^;;XU*sYf_;-58uccDoF6zNz8#Wi-=j>10*r576bEAUaiB+BP>t)PW)`18VnUAKHPa{JgQMpOx;ZL+ZW2|LJLQA=GPX~ z(Fk-I1zs4NqYDV%_QfPIZJNp64erbvMbu)bAW`N1O(R3ZkP>DoBKFoNYMLWD{?OG?WAn{Db-A zh~S;Qn`s2P3N;;T{1cc}j2+iV5(nllVh{30@YmfNF;tKU*wWhgyKP1>x>Gwz4B9)L zJ;>jSFELta1iI?}Q`xAsEQ8ovzr7?TCZEa1RqVwNC2z)1LE@L_3`U*1X+`_gy(N(; z%Lw)@b0j}IyS+xBYw)lS&Wj;wg}6$@oCYJ=wepd?^Wjzu6(kZoTH<{0Ybr5)5Ut?J zFt`o-|WxYJm!~?3>74*e~gZqmN~ij zulHC)#r=yCCK-ZtghdS$aP9ZKH8ZC*|P0F(( zIr{Qk*#|IGkT_W6qBAsaYLWER&yx7HPAk^f=);rLjL-;l?cbTm*t#*b7~xMuqk=71 zl{tNQhA)u}6(nA7D{1T-pH7Ub+Dj6j{vOT}e(ufh{M1Dw&{g-(I!3v|>BZ8)Jtfhk z-%$4LYHwcQeh-EU5}(EmF&gE_Bx(<7OGnjr8he+n7oXEgK8dg|bamMv=U-jRB+^Z9 zBZ*|ir?Mm8dh){eIx|#|7?OCY;r%F^=-4|<5({tq#(D?F2Pgc>UM7p+h2ynns36fd_g}`92f4*@|EiK0%-6CDvh=BSD_kSc)p_*a#=5q7 z#HFcKBvF6L8a77~=Y3l+RFIe}zm)ZIPCi+GP${~+%}zGyQ5Sx`NSH>T>wbn8#<48< z#i(CsCP&n&9jxi|E<8)qMhq1sN-g?k*@CW79yl)?1@5guI<4@Gp2y~U~ zpWb{>xtQ#w(ox+TbdD7p(w>)0Q-h&`1hzQzb_ps-VEe%lTI}prHsEU)?t8W+8?)=B zaXVdrXuQ0#m{{YRF+5)ZA!?Tq7+XINRFDXpl*F9zs*p&$m{yfKPgR!HETb%gd@_6vJ-y9Y0Qy%+P#dERLKC5QN@Oqh6`{G@THcTTaTPCbF= z=Lj9%eg)ed+MO@H)?PbvB>0+pMxNvT;^T24&m%4ZU3e87p(lr~Vsm7DUVUskh6)n3 zYuqsEh53u;?JCO{Ltk!Z(XYDj9YdRF1iC70dS@(OoL{v2k7k$-Exeljm#{mp+Mx|Y z1&L<&E*itGr7x77xm<$iVb3@AhB}nPUG6$%%bxlYL_&fK8mgE z-HXrk)d_U9I`ymJ6P-c$*X=HeSEc)~BSm}jVvR;HRFEkDbfFP`GQD`-n0j8-uD4-j z10wn6uR4LQv?WFxn_8t6*H6*P;?ybYv3X-6`IuZ|87fH3Tsh3RusyZd^NRXWYeadr zWqcn#bM!ckK-Y|bCPr9-RH9aXYBRJ9D9qX}=)=R_PGqPcv9)hiBkYgl!fVfXNxTa4 zVQ(fn_#dgJXau@GJqR$~txP6XKA0ehjl&bO?(G~r!SQJf6(lZ%q%cm8ODxN)>5{0v z_k2*_7Y@GX^bCza*NupOoR7~Y5mOUPmBjfA@!7h|U9^NeB3e8ZgcvnLbBKa3ZV<####S0xo)$4wM?4P~1_ z&I}TfeR+>BW3{V`L=C@3&Te0lieU}t%;O@^g;!d(`u<7E{1f%%V}F^*P(k8-`4UEh z7AeKEX>_e>H7d^b=XLOLJH}`Px(ZznGQP}8DcVI4QEo^vb|jgDFZ*jOLj{Ry!99(d z=hKJ|gXw!@Ti=ASDz_tfi>D(r0$pY64>7)EN-J^&5V7-p16KK1B=7xoBtr#>ajEAT z4Z36yv$s-NvT$fW_E(4Ae01}lH3D5&#yCRDwC~H_H0aH}h78k6A0)8klPf1v7h}5$ z_T_=;hH5Q+{{eqGmtROBj1i*+J`0Y}M7PSaQ?DGn%9X*|Q-Z|HBzK&js``jkm8hl9 z)1_xq6McvGkVNub1K90xz4;$025JPl{;WUE znCF*4B+N`i*#-Ss&;Gr+v+Q7o3KDG8O5;wGEMnO%T9317+a&h&bWh%WYj2G}SKomf zjFsE72=6LH#Lqm5-9FfppS#kBp@Kx4n}>|g33G}eXX{Dg-x&*7z4tx%q%mDJ0$o^Z z%lq+UJ{$b92Vc3eyH=kgfvp<(?Dpu$o>X!0x5Ha%Jz?zQ#+&#sCfmf6;`fu(+kRBE z73(m=!5jYGnxTROjuh%6kVv2lMRNSjTbLesA87fHN+%HFH#{LO;l$@<@E_f4!1iA`G9y8uHD=y|2qSa0~^9&Uv zaPF6!y|X9*KM@qpeUp3(LIPb`{jM9qUrLBN*=T(f&OAc}37p9#bHXR$^Aa<{`S1b> z84~F7+5OCz@JA`(m5OqyIP(k@ByjGRBlN<@c-*)pS35bq84~FFTFlEl6H-QO@uGQs zIP(k@Byc8|tes(n0ut!LwFmmlGgOekxnHs#?B-zJRUQ2A1fg111~W1EH6poYG}%Np z>Xhu`oZ;GPSzHI(CvfbZRsGV4YDQQ2v>z?|8@oNH7f*k;I70;qT;b{n9Tl^Xh0N&1 zFLf%b5$LM?E~~S`u=FDN9qK{GzqX!L&l1769LvX0K>}B}IzoS`vySBqh~TM35sg4s z_&*DrN5^IqeIBQ5`dF3i@%oyS2UQT*OU&GuPDhFOhe zjEY|uvtmUec>i7Xv^);FFzXa|J_?Eb7k)LKrp+nFuc{$qgpb+CE`RIBb2qH5<)hGr z*|WIwYDjz?v(MQ0-e1oB2$V5qC)>kb9P7ew$+1`57rHPD7P^ zmod(z*w1Qp>CBrYtght;(S_N}xbvJyRO<1~sME2q{IU^cUt9nA7b~8$6Zd;xRm*dt z3$wCu=WCIenJle&u5C$CCne3SX&iisEzQ@S*Pa}tg1m7ZjYxR-=^JnDoEg%gCjIso*^v3 z?%updL?pu_NpNn2k=x-X?%e4ikk&^gP(cF69P~3s0$p(*6=p3lABjh+XDv}d;_NT; zjn)e?i)+hT%d4C7%0QNTes7-kdOz*V(S_$E>+=_rS+9pZ`O79f7%E64ow~@#{UNgm zE=DnMgarw7;n;~>h23B>yL7!LKQ+E5Lj?&O3DM63V<3%JS|3luks=(?!I46J+y@mT zaNJKH#lcZPbm8cZJ_3mf5*SaeyZcg~1#FGv$5V{fMj&yd8Am_k9v4Ld$E@|SaNHNV za1=N0@m(Zv>|M?_ESi)}z2o3NubQNd@1hGwqxDf|98bi%iQ~!o=pZUc;GNY=AAAR~ z?812tanFE20!MCTZem1Pw)BCVrFVRYHUk1(IA24b`+*7)IEzG%k!>x_*8b_>^-DxC zbm389St(b=mkDGQ8ujH(8}(s03kB;doH?S;#X$uLth@C7Abz0a+l6oGScoBkF8qeA z{%u@Tkof({C}ZWTT%vq3nh&?8#c{UzcqiWGbwP#%y6_ve`nPdWL1IJv#YSl30FhxC z&Fia}5>%g2cnmkBs&Ai;4w>X-^N7+i3I!8nHzVO6&ccb$ryz`ykKUoR($TE zJPZkR;Wun$=>y-!MFoj?C;iQ~X9Go=S+p*7%F#D$)Y%q1e)~KO33TB%Z1r#BqJqS! z>}AY}skpeehQ7&hpw&nAW=eD3UHA=K{oA;x zAW{EnxS8(cPonAHv}$YUl!ScH=q7yQr7R2ybm13i9ii6>CFBX@*GPh{W@D%zf!~*P z8;64uSu3fvu|6F4!TIet2846#USHAwVs`^5;! zR!l6&nn}jUmGc@abhsT4kSze*7rJmQP~5A8koeXhrMWLrDbX}_DjB25(OWEYx3;|3 zeOZO>wVwLc*_Nc5_#%a$@S0gfd3+g7?{ubZxlzPbIX~PUyn5S#hu0LSj|! zf@aL%K#}3xTUj!^O#YO`UoUHeu|>62Tj;_yW^u1RLn3#PvgWY2L2@M$t?o~{_CL1& zr3%NP}szh`Y9H|J+77SLAFp$k{=$##3S zcdS#=7W`t3LfYCsByc67{)-4$8er+8k5l8?B^(RIQ6pTHq>t~Sf`mS{gsTT|CNYiy z;kp2Qb~GwT;CPTe27qG=IExv_1N6Dhs31{(!cL=eksKn$jr#Iw$1wmT(1qgxa{krL zg)C*F2;QP(M}`U#$G19-67zG2D&Z6Z=Vc&)E`6L~effp#;@2L$f2+<66(n%}gq*WI zd?FjWt0&L;xvw_M0>=Zer?2-|u+M?{ujfzCJ9`eyB<^3OxgVI1LInxTY{~kgU^Gj% zt{XobU4S8hu9NZ77^xP@Zvx(^B4gmUSW!Vj&v~tzbc9_<(}m~EpNk=ZF8t!E{`((T z_u<=xzYC(*=cpi|zk|3&5pxCjKH};_J^z6U68LWFtAuc-J-&~){zqRufC>`$Zt71v zzC}1^7oU86EdVM=;Ot*HhaR4GB+!LVzCNoM6(n#Lvp#PL33Tai6wZ!D1qprLQe01B zy7tA5{VTG`UP@(wZ-L&UM+FJ&tH=FDp$p$;M`*7{hgj{BU3lXLc^SGe2KLqUF#uGM zX#Av$QU0R*R&jUAc{Tlbf~{H8i6{FOz>q)}_SN+<0926ZQEi5ir+~k>CBF(K=aDTx z#{vp=d^Ab~j}^ez06lh6K8>uda^) zpn^pCURR9ztg_@Adxr!2cvo265`z#%6aV? zA-}oITl45uIT#Y?!oIpb27n3@&B~@Quazq!w!NfTNE2uL#~$2m$&ItfiCQ;>tg_@AmL+FG6SMR#PF2|Wei6iFFwhq8Lu!PJ3|6p*jLxb z08l}q*V7v2qH0w|R|kFN|M}ebyxo?jyj9k$3<-2$UtJ#qKn00iL5<9d|JIOe_5YAD z`ae&|Tefb(@14oSkU$sq)%7s|RFHVIsjYcrMs2ZdD*c*ARM8~-ci+am!Ga7733TaW z0Ig3X=9$Vj<~iqQVyGa2zZj#JiCC&(`yuk%PsWkx9HR1Z`mO@x5*Etyy=)$kG>pgmGmtczne@jgNl`$mHg*|V*N3SXH8)f)gV*0O)A%QOJdFwrT zRFJ^m64QTW3<-2$&s*=&qk;tfmYDu4V@RM2d)|7F9u*|;x5VU13;2~WB+!LDZ@ov4 z3KIBRV*0O)A%QOJdFwrTRFJ^m64QTW3<-2$&s*=&qk;tfmYDu4V@RM2d)|7F9u*|; zx5VUV6#U8<66nI7x89>i1qu8uG5uG@kU*E-qrX$^3oALZ8Q-!wul5ZgB=EPy^m$9z zo4|eoelJD;4hs_K!udt|yd_kS!0*23-;+TCT{yo;pSOex68QZZ{ku3wpbO_0>GPIQ zK?1+SqkrEA33TE7B7NQxDoEh>g!JzeA%QNOU!>1lLInx@E|UJeB_z;=^NaL(OQ;}$ z-*?i#8-)bA^m$8Dc0XdLKeXb#j+NHFk%9z%pG9APj{REfvEsKg^lx$?fi7GpuCG5w z1qu8Hi0nJVw?&XZ7p@bR?L=6AjtUa^Eff74DM+9T*NN-v&rv}FzsaJ1>jepP;i_-> zOMf5EGuFI4|L~wPLj?(aZTNzU+u7E4axL=j;S5(^<4R^+KP>0VEq~LyLN3{+PFUm)n{=fu&)Pc+LjqmckJkI=s30-!)?dc!{DsA#*%@SvpH4hv z@4B_&<*WX`zRo;6iYn{F?ZOfkK@f~6vLz6KATca6wCPG$q7je|ghf6?9Cp+Xo;WST zrqUwLz@VVw4DdV(C`u4#!k}y>B%J~zhzJgd5JgcO5EX=h=qP@W@7%6b)q9&}_(S{2 zId7f2RNcCDPu2b193Vk2K1b`%=d_S`W5~&hoTIm!|Jfeh z;&ZhAd`=6Ax8JKh4qaSYbSc3MbK+f7$}BS9~Y!TQ)v z3khnw$x0Da-$>AlW3WE9(?WvUZo29l33_o1*2i{QNKo5NSA8QvFOI?b*iH)xYP;#G zZzSl&F<2klX(2(SHeL0N1id&0>tj1DBy`m`>Qs}U7e{T$#MRU_^7r3oj9heB`+AvQ z8B_m7S8$<)1l3&hFQn%@)2n;5s2 z6c~>sTbfP-6&R@I5~s%k{TLF|X3@1E=tTtvouD2IEhMPTBCQK82zpV0K_{rkLJJ9M zv*=n7^r8ZTPEe19782BE(X}AxMFj?(pdJe?B&f}zYeCS9dI~y0Jr-I>P@6^9f}j`m z6m)`mEVPiIHjAzWK`$yW=mhmxXdyvu7F`R1Uer_232FnjkHUCVHkW4{Xp_@Ig0Xe26zZce9u?Yk zjdfZ`Ft)ChLVXm*qtd*t6;F#sKxtmrN})aq<0X|RTR*K0n9JT;B~Or=<~=MV7+cp$ zp*{-Z)i>H(Cs$^huYb5)VsseL)abi(ka6Vb(JWd>=%u1o3iVMKuYN@rtLm?&S$t-h z#K@b|#CYW5AfxxoC$nhL2(47qN})aq6>D78sLu{*pzDMrfs?Rtoh|7_V;5Jy!Ab zdqlBP<>;SJYiv~AGT0c`>Z$-OBp6%Q=cMim?`ZBP=n>rlrJipMSGc}&nhj`9<8r8R(w9t zNGkd^>+HjAtlxi`ZGOLdaWJW}g|)LPU_P{eX^^TbIzbBwD#qxjFsLb->mLBI!G-TouGvTb!@IoP#K1?!=Yn~8U(hcK4ug>Ghf?-j^B22 z4?=xMIa)~YJI^cc9LYT)xie-sl=X4FK;f#e@p1GyZ3j#e`aY1AIj7SuE`8Fd)BKaN z7bouxW*4NI8(zw-m@r{a@WmmiW@ctih0nJ&=<~^Xd*8dC?wMT?T)8~O9RKzb zDb*)y?$pi_hoIMWrz?V=J)L6iEm|SOfK{vAwxVU|I?=X!IK|x1vnW;%=Sbm@PyRVI zXQ@a$Q?V`BUUoNrETvdtgx5#3K6j2quN}!df-eP9%$n>CLUcZ0x%HqWt@yp*@6uDu zA*0@nE7c>%_a}@&qQTV4V4b!pX63Lli81Pz58W872fYUW?)~72mMLbd#uY-mKDWM0 z(DLQz4}y0$OEHH|w&F^4rM4n*>fsMvTHXr#yhFlz&}-)Z?n9~Wo7eI-r$1=fe&0Xc z+OmGKE;{Q4_llxLC#v1^O}-7EvBa&p&*xc4yngEew{OCsZ5hv^9v(rjqzxaV&%17H z=Ek5UuhA#&xy0C#+n)Fi>fyy8F}<~DnAV=hlDlh^v)XAZ9*bVlqla*PG<#~jdwtOI z%z(r0b;l!#rgugG#uBeoBwibP*c~Ng2Jm$~HwKSIFZFltZje21fEL5CkO;RsjPWY_ ziKSY9I0U`EoOcLgVru`0J6_Rp@A6MD?y^+rCl+X}>%<^2ZPG!E?W3yy;PyFdNU#29 z58(dsOYcwJdeE{gdq3_`EY&sNCa18^ol@xp@3j{?bVciW77}~Me}p?{!-emo2R(vb zywAr*(TL9%F(SD~dZ=qqX#;*f^XhEpx7g&js)zmea44r?$vcYZ(6}m-mPCY78MH@g zMfhU>>|-eliTO!uVll+!zYz3telO@=CysC|>R5~sz4W4GYahnPpoN6|t|cPbdOhL@ z*GpQY_P46Jw3W1x+g7fH$8`wR>hb+yAFH-M5W_B&PSm!p8-sISmR74r#Az$liW57n z3Od@5PS3hM7*TUXIe(*0*}YQPbHmH(;q!?2dXR8tmj8OUF#5y!F8&Fb#pS2IE;Ewf zY-HX)b-H;lx!gE^sIl25Ec@Fp`O%o$uc=x6H>EPRPKbto>+FBNM{zkVB)E&DyeTWh zrSw$))ZRKlFYewe3dCCqqXRp(^)K#IT+W?0Db@QK>HZs}9wam`t>)E2T%Fds~Yfhqi$?k0PmDVzc%aOJ^F2uQT zx_?9*L9evzGGoClN#>}>m6E{x(!!{(wcmez=i+i&NG!~~COCFXvUz(zX{C^$g+%gK zQ-c0AoupK|mQR=TuuuKo|WNHYDrxSSRe`cCOhg%~)arGI#iPS8u= zPd{fqH}SXku|fS`T8_jD z%a<1O3yr%@&FQt)SQofhvm$%%9(4~{X{Pqw<(^Cb7yJ9qvA@eqUkG2DnVE})poN4! z!u@r6QOD)^va5W?iom6s6%~8;*vF!3i_bURYWZMp?YTSYtEHVBf)*0gn+b<*DHvar zh@cmBbL^4YAy^Og8cQ1vooxH*YKNeO1T{2bgj*^S^rEtdsCk^~p6bM4EAr}MX|G%l z68bfmoP3X44<3tN`q{4Te~C)PQG)R#mzuoG?QH+vjW(s;jUK!=d7( z9o9IdqJ;$YUlg$_EfGO4s?vl*|JgRtjX?{~M8=jc_1=5Ntq0FIs#NlvmVKRfKjjj% zkf7qRtnW5Cmyn>>Z1WN8!bbzl_U1yl8xq*E5@VD-PmFjmytx{^ICGMBAd_bq-@Nx->5*Z1<^3C$7(;s}ne8u3Ge`C*F~(16 zVBYl8G?Vv7nPuEG%b0xYccq>B>`lZXGSv|nakIV~jC-CJsOUEa_f)KZPqFAFj8 z$77`}-$7eBJ&R@1S^s}0;$x7opS(h&`^28?)<1cBlnVChw~8^Q>8iMF zh9X|r-ti4ZFbhC}Eh68s2=V*1|44}^=%rt^EEO#z^wxcN>MX-p@OA1}b+XG@l1ROb z=x?r1Husn0o1K1JWT-klwq%B!SBe-rY}FfzpoPT6Au9qS&NegeC{uf2Drmnl7Z6{FLJCYRNmUgtY5srYGbSF>?M?Y*Ojb~Te@HK*mw50fj_6pF4$ zK~wYcQcXLbx;nm8Bvy_{ub4BWz1eS?()v{~ULKVdi@{^j>-dbSXi;WsbIWqIgMcDV z|9Vp_1}*Fl9?2%0)@8e^-n{+e#-{3L_H1lssjjHVi^jTg=gM)e3FnH^uXCA;Mhm-t zld1?>oa<51m&X{@sz=9of?gbB+*0`zK?@1>+o_XHWc_(jQ6j?e;@!t4P7Ij3MiI2| zxcbQ0Ja>vGn|e$vKW>B+6NW35`{ z+FCmX$9~tYgw(G4MATU#-5w+CG4V3Oa{ewW^uc59>Y(JgEacTJxv}lG^42W9 zQt5=6MLG4DSGNlF@GK-`z0~qDadqFF;m5}$3jBZ_bo3I*Zkx*ErvtTOY#Ypmx*itR%bVxL5t)jEGL7( z*s_1(bARM|yIU#}k_)lCOx)}5-K_PwQ!08%?#1#larNsx>-GmNlE<;UOq|yuv0PEY z7$hWbWOKTNj?&7wXtBoTMt?!&x!utp)qmI>=YFUfP`8tlIG z>9x*yMT_LmF*2}J=N7NTc;#8*32)pz@Je$xhN_2SAtCPvxTBJ+)|Mn=Eb$0>$$JMF z6VG0%bjK1}E-Tq*yNXWYs7~4O)<29|%oqEtq-qFDQqxX*u+kp??^x}Q~@>zy6X2{4H^nN?D5A~LXJ;U&3 z8EQ5us~pmkB&uuYD>mWe)O?Y6FQiI*Cul~7&cQ99uTHE)j*j)eqQqirJe{rze#suA>(b?a>VNGs)uCDy31|HNjN zwe!zdpX0Z8{a;Ra=TbEY-Usbjs}sqyNGoWB)Bmv^j6LJnj5jZdC!7{>iP|6u33bL; z&bgow5n7zzc6QM_lB_yk+f$XwC;yi-(mR&=?UaP8hVg{Ab{|jJw8}@x6J#vLu{zn( zgCwTc#@H3qS5g&Gbyu|KQ6Xqiv~yhh@9Hd7eXfX2Y4g<@KrLZgNQh@_?ZhTjt-q~# sa+7^!c@~~mdJF*~K3`2QMbJV*U)feM5)t%b{p>R_QK@)_GhR6K|13{N#{d8T literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_10_gripper_finger.stl b/act/assets/vx300s_10_gripper_finger.stl new file mode 100644 index 0000000000000000000000000000000000000000..d6df86bef815342723e35f4aac53a0f276877bfb GIT binary patch literal 42884 zcmb`Q37i$hwf`G&-xYU3)F^5c9c2a=oZEA8$+!mh4cx^I4WJ?hm7XX*O`c**Tp*&n z2r7sxgDXtmX$&f$h{nJZRFr2FH&D>1AiQ&`&guHq?Yi$DKc9bp9H+SFeCt$*3G~9SB`#mNap}{M zHyE9d*S|1Z`p)Z>v&SwhYxt~@tI8+dcu`h&&72B{qv(irwo*Gb;zH7Zd2U zpx;H=UVZ0Oo}0Lkh#hV^wDI;Aho}ENWbO;-wd}k8l&3dMyguevTRwPxwy!5omXgTJSv$K}E8I@a3d#@_z zuLyDbPv<^^UccD7Z`OZRd*!|R-mfB_BVrNdV{l~$udI;3nvJ3p=586Evi8)rZ-(@u z+&5=8Eq|!OIn+|Hxoyst^c+!H+`5eNasFSoi{Ci<`L^d;4@}Vu%Zg)iM4x+pT{`H5 zJ>z|59GoT-&&$%6UTf+*a$&jOapz>CU!Bo(Y0vk{mG#fb7Bo&bpGVQOgKjIuAMPE` zUR0N&*Oj;Q%?=!&HQm$q{VHO~FK#bAc+YNe^Rk{P#)U+C(l^_E%kuWWy2cE_tvipK6^-Yl)R z`PT7=+t;V)wP3_4*=9YTZ)*I@;<8W7{&H?<@9#Rrea6QO={ZXgD=WHG5wKs;%9zbQM#+GK1B->tzVv$E$ex+ zXly=j%lPs+Zc2sQwp|miw0slxi6i#t96!DE+0u|L8dBe4?iE9f`)8kQd@483) z_BpqfR^HNJ2=!uO!bbZxb>8!fD&l}6y2aaUeoyJ~`+KB*TtK}1>ulm{+eqnNb3*rc zUg?O^*ne0Jjh8jLyzz0osM}}daUQYhRo&wmCtg^3U;t@&7DtpmJw6+}@V{g3E2F5} zrTfRLH~O}5z#6Ne@v_FzlLyATL`%v(ap8yq;`=9TU&=1)k@^-#e16jL*~^n}5{-lZ z(u3-|vGJS@dYZD*cv)ks;qNq7?)$IwByHngwRNuXvPOr;E~xA@ z`YWw-A?6&_Js#Hi!nT+1%j-PXP(s>U#pYw?-n+(serHVEtS797da;I%;XbkMUc1L* zHoLp6`@DK>yAl@=wojDLZ`;3fJfY8XZCfp~cdf>Ya|XH7gt(`-Nc22j4Prs;mVG2Tf@KF zR^Q){q8H9&eIgpVPQ1rME82#v$;T)Y7ZNf)ibm7!-;_qpT{q3%sZY@h=hr^*@g_@4 z%|q5pC!g1#W3APIdAp4BqS1BL+oj#^?UbH)GHHMp&JBHH?}2kmW5;w#x80;c=L{w; zBxJrK8b|bfx^&6zJEfCes87)g=b}E*bo$e!Yj@iz9rjW__c3uHA@eTLn0wnDrSs43 zmhQH3LyBHFTlR?!-@K!==(cX@QU9pdIh=_L37J2N#+&h}rQ25RlkTv*K1DB_!}~XC<~9q#&tj?QM4ifcokxMuA`8W(K8PI|@JXX*^V#D#>+K15^Uo=uH2cJ7is zo*tE=7p^mXLS{=T&z8`Fgv_EuLsr{nM9^6VuLm^@>*yxC+Wu+N^-G_>ctd>0W1qc% zUN{euHMJ13+K%7a?5%^*f`rc0tj6(=?b&$gtHU`}Nyv+5eLf-UT9Xf*vGV#=!#u0e z)LM<{c0Cy1aNFNo(FQVfK3P zp3r;6wi0$1f___qM|$;wmMAevK2jp-vU>oKcG*>Ndjoksm`EhO#!JIGBiG#o6EQoR zvcgr7hB@tef7sn9b{B$v+jSIrVHgSGsJssN!=UW=1vi^HT-TLJ_P{=^COmr7pzO*C zPwr*sKJ03Lk`%opTw<>RK>%M#iWVe9+tAjR-I+kIz5bqLM~uEDW{n_WG|+;?Zs!il zzUs8Lp?wYXdghiSyXfxsjaQH`8fZac`F4Y{*Sa4b&_J(;Iwe`JAGdGfd;|%jffgjL zd3Inn;HM;@fnF27zcCxTptsQo5=H|pNX$8SVD`qj4+J#OYljgxWcyxysnG}$MguKK z3>){GY~H6Y1T@fV`J2PCbsxROXaotPffgj{zrQkD-23f-26~VO7%{l~@EWM3WpjL`@Z zMguKK>@|Eq)_KZ$h9*aQC43K}*Q=im&i*`Os__aE#3dZ%+BBenUPl}^ zFng%;-;7s~FdAq<;cDxiU0^X|DK+x8c;j8~8_8fZac)a{pMqyDsQKm)zT z&ABXF_{tl`D@Yg(v>>s`h)c4s&au4&Iod1X(nT-r=eaKl62v8`Z*eZuR-za zSm?ZPzpOwIz*mx@1&NCX_xH;xVRt6b3-`+k1Oa>{DO!-Y<;sivI#1Z03G~99v;si@ zUrCA;a3{S$5WrWGq6LY0$p!vBn6Nt&=q2kj)2<2x0emGXT9A1A z{`395Bw=?Z&xV6PuQIa z^unF=0zm*@Ns1OEMx1r7KSm|&&IEelPI`eLfUhJ)3leoR&hf|Egx#4yFWgBl5Crg* zq-a6n_eY)M&lwVSX9B%&=e9r)z*mx@1&K}Qshl_WN!XnU^unFn0zm*@)L%hdNGy2p zY<~`yu)EQK9bw$LEf56om857vV$RiP^E}wiMH6;s0=;nOwm=ZTSCXOyiT?f1&ej~Y ze|PS+tuMPXfnKXmjMT=fK;#-|K|*F90S)xR9ohnsYoG-QnMDOO&5veGkNwF$H!A*?Q;>&`Wj@j7Dt&El9|&NI(O%DSy<}&~Xw)Xqf`sh01vJo0cG!$YZ2~Pw$gW{P1HELY&}h^q z(1L{Qeg-tqOLjbsS8W0H9PK zO*?bv4JYCZnZrjO(y$jr*Bj^aj^Jd4yo$uMvDc?(Ji2=uCk^A$FcY{gp>utXNJ3sk zV)eu!>1Hc$H5wR~hMBGC;}o0c@>Fwx}TpO z`SwzyfpKY=30&{e2~$TTA+I8F#p09FZzpVKp5tI#8fF65?NJ0o67nh%n|3)Y-SMT} zjRwZ0VJ2|@A&P)VLS9AU!;S0Gbr&3LG%zj=GlBakQ3OO1@+uOGzt|xiGy52$fpKY= z3EU5gA|R5GSCQzrvSV6z)>%dav3Kbg!Fu(5hBnF!&XCjYNLU1X_yJ=!9^oPpcjU%hKx8y z1LM*#6EYHsMu`_=!IdcA#(;pU|bqzLS_b{5hBnF z!&XD)K1KuM(l8S;!x4=TfnFH48Zw778W@*`nUI;9XoLv#!m!nlxv0^=xHQa!%s53O zM4%Ujtps0w;v*ycl4kVXUJ z(l8UUvJ?#CyO4w+q7prTS30WaaUjl@#YmJ6_6$x3<8x8eh4Gl9PD}B)b zLf7XhdSTd>mF!Fy4U9{}OvsLfXaJ%63`Rq}iiGU27!CDebqzBiJ1?REgzlFZ4fQG# zvQuO<)QdGV%!KS9i3Sk5?_)I7t4PR>7mw$Wda;IvnUI|@(EvjCuZ)Iz6$#n7GaBl} z8X9IocJxF82;GM>8tPRfWCzk{s26K!mz^a9M>;Hx`IU3AP}iVM|<1Yp4ThEyWsYVU5r! z%>ofA!4@PmY>7Y8TJUOG;jxBVSR-`Wv_M2kumuSXTcSJdQ7@#mEo-QSHA1Im3q+&@ zTaeJOCDxs|S$aOL(OE++tPwi>TOcAO*n)(HEm2Q<3_s9b18b;-HA1JD3q+&@TaeJO zC1%iG$!@e~!WwE}jnHZA0ud>}79=!miSe}OGoSW;SVJwW5js^~AR;B$f`o=G@h$CL zT||3Wtf3ax2%U~E5RnpWK|;fp_=xtX$IxCJYp8`aLZ|!-M5F{;kkGItuA)8a{b)~- zHPpfyp|=$ZM5F{;kkGItUZFkeHrg9y4YjaF=&g$a5h=kIBs6S^`Lsv9?tu5qu7O%u zBlPx2fryk~3lbW(#Hvq*r{gQ{9ZLC#(4t}1i=r0#_JZcUEb--&3nQ6IfQ5Cwe&?OQ(l*I&Xei|2?q5Wfus%` z5v;+42~4j)0w9g4LNC8RuObNI%hrdc9SOguw%#`9Xz}~nDvg8%SOdNMp0kQj8PiHM zRY>@KqPnvNTKrzEN`q2lQ-xlBA5}%DjF&1T{N6|1IaO%!`;RJ(JXPqWy~MxY+I_#O22Lnzycixa;PG;tt&;aYY7x$>Idfqy*~iChSK4H^8%$X_ zOH1}$zo*L=p7|5STSY{UK#NZ}nl*m^@}r)?5V4bhWniB!x7Dp z5}$B1Yy7G6lcl#t4m29h(vpq%@%i$g$!m;;`eBhHRHUDrxdq=9X> z`27s|h?6AUi-e3(<{KTj*LE%c#_?SmcG`2S83)k|zd^z$I3M=1y{N{81b*j;dh_pm z#A)JtP`@vsct5hM{mI7d{I>F2|Q!8jk7DkNkq@${)r3HcPtX`8G;7cp)n-RE^fF^fyB*ccnCJ)|)r3HcPtX`8 zG;6%n`N_6fW>iD3=IrJlpKrILni&%v5jg@aKH+HAxVYQv_q*GUgIqZ)cOXQ%y;w%bw7jG~T+9Dx>}a5QVs7?s*l4ZWJPZT6Vn zZb!8i(BphK0xdq_=t~Y-^U^abho^s?yTR1DudbyXx}>t>ZROr??P|mP{nY$9exr31 zt)kt#x%cn?B6{(d$aI)kXX7v1R({G)0)`r|N#@(JhvxD68kfA-cp$|^eHC9WUJdhi zZu4#j+|B1j;dU$rQpuq(FcoGaxo)t6_7GMqZ!t-zi!b()73JE;1XPha1zT7v^&_D9CV;nAMB<#)vo&>{_ zX9a=)zWlO60!u_a4MP7j^Y$kFn>Z9plm2xbT7SsrHf~N8|5yUO>|fEL@4e57Q(tIr zd-nF`^rb^=21rdLpSLhr?s`ebX?%%kwrR0@NMXeHt?BGBvT zGY>Sx#r>bEB7Q@}WFjULfpH=6*X?>6jf?v~M;d%BTe~6sZtaE`ElAM&F+Pzf;d|BP zsq%@xwT^B5hBQ`?hHvqSDB82VfB*S5RS6T{8UnpItX_vgc%Uua_)%9@WiC-EX$n8t zTT)x*a*Lw#i1@GDo8uL(K8zQaTNKSNpGI}y6A2Mhh!{fzT9Du}WZIRkv=l05UeV1j zzdrkLQXe4UJ`~o^IZRjL2xN)rfqHBb2r| zKd1RxPEoY+bvM&IhjX})a_;9A>n4hrc=Yb?pWQ4cxUBk^7OwT-6U*M+*R^`_2?`x=&`#H33_20D-aB^29YkV5Mf*Ua=M$yJEGr24ak04^FgZhVZ*jna_?h5 z7b3C5P$AfBsOb%~9Ty1E7@BpA7RBmi!&v_uldmyA2=>BOsrLsHawRtRhXR2u5o4+* zw1xW`{}5ER6? zkkGK)=R5<5?F_(-^pf7{eCSwva^vy!jzBLCuaHsS_7biJxp%WY99kUVk9L%J)4N&1 zd11d>O_*|GkS?qb%#;0G+pdy>`L&y@P-zw93K6U8-a zmbbq#yC+$=Us>L6XW@K)_j&3o`SWQ+u*g$}KriXgG^c4E)Ev z<=IAqQ$-q)rpnGNSe;WixolIy{rrmZDDwp3AtI7BO;cv&`B1Nx%*Bhu%S3#=!*TVG zNtz0<+nt8ciH)a4bPsvGnF9{wx(8;KQk7tHEz4M{yDs$njo#Bm#qU^x&?yd z(1LYPBUPkY>|L!B=VHZ ztY5Jq} zN7V1?+JO7u!+T5HmCr3<`*s;EX%_aBxBKA}%lp1;PB~pTcM6?E!oJa+M*d2M{l={}t`_-bP~5#BN5zrVcGd!sf5verHT!jX$k>8U1!%e%ftJ zmY}#3ChTu>pPQUxDaabml~qolm#<-Cs;yD1154Px5>w^J>++b4(o+W%Q9m6q&)dY`4I?8ZgVn^fv0cEA?e&1d@=We#GrOUH$e@?Bf(B=cz z{Tg&R^mASy zat&wk2}+Cn4S7ga;%d#7sLQR%Q;{$^cX1U;p+-J1UROR=!^=76!=#Eev<{GPW%wiQ z+0k z>^n+cnAa=D*@TS9O*#Xh{F!-Ik>K2_7l&bfU7IM3=Q@|LbA~{w3?cKxifVA_>MTR& zJzO6+d$nPm?{#0tavzS!n1h*JnLFO&drV zi1q@Ju!f7PE+e9>MD2W_*C~CLmAhQF-OtDe60T;UtV}*!YpBb}t6l5guT`(xbRUj#?rFF?3g3L;e8BkO<%3pV@_q!Kb+@UqB~q=S zZ_#m{+?n|tKvn^dD@U%LvKk)21o)HgdBv!=ta;L}tF9K6lO@wO@z1-<>tKkt| z&aDN{h~rsqDczi~HD$}ny$y!%u2mCm%)k|WAXQve^y7QEeb@kzr^>ZarnU9i`^L*T zXgDvoM;ahJ4ZR2b`%zmc8_2(Dmmn(9c&BuFqf% zw>#|S4?-lICRTUv*WtT;m_i^DCfqxG_>N+LFbYgys@z+K`Zv}zf*TX=RR<9UF{HL$fK;aYxmoxAx?3(T(q z8hNVR{Hi5$v$FsZOcfHY2ZtIYs)g4BGV?YXGSkQU2ZB=Od%4+UfXGvYcN7wCUJ4o} zRnF1^Gvt5FB_8*ng;^RUbx;-&1PO|?>qOm*V#++z*@eS8xsQrqjWW! zT?0*t>j#_Vo|j%Uw%(TD5>>*@`teOPNR`oW-`0VDAKtxR&GGvAaDNlNIa|_mL}hX7 zGTOrk5RB4=7N6+w*aelHMt?=LXmEfoTUNG}U~SqI>YLtvsyOFJXeqe-Rckm4)-%38 zTAgz&T{lAx5H=f>k1lU~953qjnJJ$M8|~ZFdCxDxWt9*@Kg}6>4f)%kru|M`8YXfL z%7?ty-kenveMheFOV?<)u@>LL$M#m8b2kHMhWSGcf>MSSpK$vRpkWC2hNS=Ar7LwH zOg`M|&VL)UKv4XA`StA92Kc_JXs~<21l6<5?#$b{`FC0HK7-dYkT+i#m;bJ9Ayo;v z(~s}vR$G`_NR`oWary5IyH;6ExOoEJMS;E~PZi!D*v8ykJFLO|l7G(!%gW>&<8?D~ zY(3Q)E-wH3AY45GVKm%a-IR3xO(8BOA@ZEN8689rbzmKF?EyUVAT|nZEn_k}yH>^X1n86017(PN&cR)b$LB0f=yx=4`9s?=)8K zWG{>f@Vue7P*dxQIUdmm`u?nX|WOH>o+g<-yLY)qEWRir=S zFroVajwXUrBs)1cp5uBMcc~;*3=+ljD4jFloJ#LOUMI+vIAW~K?r~QYiAfV5Ze1&C zrZWJnffk=|d73b>TLbBc272K<2=iyXUzO7XXh8x?;gGI}QHq>~Eq7fz=}sCF=oRJqasNM3#8d~QX=6JC!kXu6kF+smWUIBufl|q8Unp@RiTCT+ERD0DR$lGX8 zyTT_`Sklej6D;<2gsVZU%_<_Q+`e0U-HwMF4Yx0fdeual6F9TLeGMcUKEL*9N1zw( z&mcj0fK;Ic34D6;KeKwaoqQ@GJK#Nbf z6*6mdy!`xl>E~Y=4QIipOJ*NV^jO0YXz>ZRre=+=4?j76etZ54kLZO@vv94>8je7V zPq>vgYwSDmu(Yg@JK_J>C`T$AI|EPsaV6S(@tu$wt^{M7rT+WR;!e9mR=|8||}`vmThxiAtp9C6j; zmq_CV)5D>~^d<5v&0n8K$y6eS83MiV*%T5(K0m*ndY{zxKHRtK=|3I;+_NdV&*eGy z#4R>xpx!67y^sGqn+dz)i7`=GaqpAb-UltFmymcdCMKvaNp0_gUgn;cXXdpDwD^SE zlVc6)OH$kWpcn2AnqEotd;%>#L4AxoyJdp!OCa5n- zZSR9#=AO@=hSwtSIj~RQPO9lkQrr8W7w*=YUP<&gRn+?kffk=|JGV?wUy|D12fc82 z7@zj~1X_H;?Lad@eMxG2AN0a|#ea&71X_H;-NCFuokMJY>j^`9D$JcDta^(s5rTdz z!DkZjEto0-G|+;C|K*P|U?~RJ-d!tlC&eqxU z+nhp)NlpkdvAWm6arf3bBK)(1ddANu+WyjmQx$Wnm{2b}JF}<%d;(MDTX+_x=LJov u7f%8vT+i-26uu?qzfG>IM;@zuFMBG`wix%;9w}4$^}@LPch}rG(*Fe~xB)Z( literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_11_ar_tag.stl b/act/assets/vx300s_11_ar_tag.stl new file mode 100644 index 0000000000000000000000000000000000000000..193014b60c1cc7547a828d8549304acb1729ac8d GIT binary patch literal 3884 zcmb7{O==Wj5Qh5%uH0u35!?i$B4lBPQP52wqG1l<4a803(i`Yf#8uoF20?}yTM+aL zdIOIjh}K*6*7v>r6Qd22sqU(${<}N#==ku?Y;k;Vw!eF0_uB5>^_zRMyGMsd4{x8I zp8lTw_i@~0O{|MmSmS&g z1Aami--C!mXHLB_gQ?b$GS3s(zpByj$G*@?nBd4YP8qqX+L7_>AjNjS}z= zZ)1X4b&+OoHg*l?XY^`BSd7kMoI|LUW!vF1j-eX(1Qi*2*ZV} zXp~^DoE~ms-B-0@r>6)w^5vPY)RbOhwPLufgY;)QAw7vTL(iPqZ z-OCqWKj!=?BU}5_nYUY#N;A}ib%@NY9Og!o;ETxLEd;DPa!upL0JnAL$~k(bdhxhpgb5|>s*(*G?Idg1! z5|(?>SUmTuMAU)%4|9iKB~pf!%?9s}I*&<0tx|?p-;yn+h%?osj2Kn}-fDHPCJD7l zIbvAVTg)A2s!16!th3ey%$;{sAyhL(M6d8%kLNo)gfH>iiOd*N1X$8aSZ0zHo>y&J zL6b6KFvAH#HB&^?Ve>|Jx8}4}B4t={HlAUbAc*X)M^~$qVdL))Y>CHEP0EPD9b*h` zqQ^)p_)@EsBL=tKW2h!&#Ng($S4r4Yw7i{%F_MU0K^sTA?fdolbAo<%v5vII?A4df z!{h}vaU5en6e|)FF*K{}TU1^%?ENQWSyhb^I{)5(`XDqD>*h&TZr9h-brg-pqg;tL zyNiCLFSTMvD!~Sq?z%<^)^QG@R_wwAp&E9Y@#+(`^vY`SE%DzOn1Rv$3nJ!G>l@Ca zYLu`H{ny0gmXuzp75axh7NT^QR-I(!G6|Q5S-GzfVO>Cd=MZYe$yTCdr5euJt@>I0 E0ev|q5C8xG literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_1_base.stl b/act/assets/vx300s_1_base.stl new file mode 100644 index 0000000000000000000000000000000000000000..5a7efda2fe330aeadad63ee4ce29e996ebdf2195 GIT binary patch literal 99984 zcmb5X2b2^=_dZNiGE0)2b54?Xx^|k>WCR2R0f~}xTEYURmn5L%B#JCc5Csu^f!(Q{ z77$dDikLt|T2T=Y1(Zeo-&Z`B*TjBrzf2SVKlI8atO$7auD-O=z_Up|H;nef1%`92s z!mZ|1Fke~!rurQFdtcMBJVX}y4<(3*eP%@Q;+;g@&r3_Wwn+T=VU^6(^KYr|?YXwH z*`VV-`g}joo?~q;IaE~Xel3!%YrDg-?}`f}BKA$(M&pv5#7E~#OMWUupjPbfj2QUr zSW*1O`w>ROw6G2Qd_U}1kDs0^cI?Qh5cnjq&vD@_S*lR~-d6ql-w4!-ededaHc%34 zqpunJ%uj{=KuPR#Uo+M}ZX-*U`6452Zv*Q1{8$_AvtJz3uru^aCj)iP3&V_I?HSR446NB2>hS0A1)EpClnksQ;yA3*TD z`gL(*)xQ{mo=VLtueYcb`^-<}wZRCK#M*P;p!ZF&eh^@OObegNoh9z5a~pcq3P!}* z;B{VU%S0-U|1x#h~n4Nmk~F^#TxS{Ai{mBm;@2A&)jpbjrWU3 zUdQi3t=Qk=05RlEV;f2k5&OIq5cwxJOUWFu72XZE;f-!1r$I{WyMk{pHinG&m}W^M zTDdC*4x*>>W(m$K{LF}E<=J!#O z&W;+V=Bl}Sf99>IJ743%*GBvib?5O``W$PIR@Af4MSf~GL&UENRYge<;i35r{#j^4 zWFYONJa;ok+z-dv=BFw{XP`HCQ4(w8K0G1MT@P{O+TyC2BWjy5G`De*PG)`|Z*RLm z{qUZ*FKE>^ix#{Sb$1Cw@aUo>hQ>Cy=dm^hRLLB1KOFlzzjF`KEcu(ND2cU!J?FU# zh~&%>{06=LaU0%P?eDcD86~mLTj6~$;ys!ryv}pqZdExry57v-aq~Vi;tttw?!G%8 zdL9>!pHtNK_V(D*^5}Yq&kx=LL|+pTvCoWP8?_-|SN=DG ztQTJ^_L-l`YophUN2{SE)`qeG{85PY<7)jH)nZ!r*{0T7%F6IZLD`A&_qJ=RbI)UK zxX=E*wsB*B=cl3_G!ulf6KzDepBXXyQZ^CY@KIAWOqMhC;AdZ#J-8!$aQ9}?6T6e+xdCDCqb>)-x;y-xv{d`Pwz*L z-%hC-)5531&-~86rg?Rfe4J6^R#e3&iG6n0isw_s5InXqEq8`{T09fo&-_$g8;n3n z4Bgj^eded~`te{JF|EF4tbg2wUgye>inS3JzJBvU)bkBve@DdK!PTmu1QD^%jJQ{S zK?={5cMiXoFD8i#$Gk~w4-wpt7^1IgyY@!h#ln z*829B2bHcA2*ty$7;U!&hg~Lvkyna zvBJxSZeQ3ZH|=o2u|fo%U1V7vTvIK@v9>*kKrMP&y~jNG22t`LBK8I!=(#=t@N|Cg zjQZb**hv;UyB>Th9IKdJ()R(24BNo(!t;as7wE@)LeanZ0(TkQeQ-qn&n)pr5fQk@ z{BHzm;XDo^aQ;v~bTrD1Yq(LI2gi!FJg=J3u8o3`kOvXeLw|K50)6V>Qy~JiaQ)NV z?e}VxXEwg9);abNXLxXQJJ!EY=U$a_wfTL--Z}dpF>A7Fxh(bz)7vVa+OZODd|7hl z(Q-8yff7W-b_}*f`F&@GKrQ+vGuFoZ+26czbmnL3sZfH5*sjfOlxnu7=DGm`YU!u) zzYDcudp;wEt^TLZ;QVWLGXf>CT{|v}Z97)=LNaDRke#~ujrYCwL_EbIswPNQLBXZ=;zU%tP>1rm%&OWv7Y;PV15i}F) zdovLwG+%uLk1UUhJ61Hh`%dW5MFeUEdtP!=#hnZ0wNpDK2?2cOWfa_`K)<3N*U z-m3MVB#7{=CGPq2|1N(Wb_O32JIUNOVte8<;>~IUW4n*-NrDK^F5x!PQ-7~DepsB^ z;dp!y;rZmWOK7i{5!pB7+I3kjP`fBfXqWJ#6!=Y)|47)n`)-Me-cIeefe0RTFLvTs zVo|~B-f4i}g<5z9JJ#1D>sN;p$3M3b5wkM6A61h-tOlnVATo;-F(UHSKX0sV>Yki2 zs~BUJ>g4pliTv~K2X>CQ(9b)aeM!u+jC&yBMSJ2t*c0KMe{6gEAkIpfSDPQ(=k19o zp`Fu5=+Aohp*?Y*wN$z zo_y0@k~U=a{*)$%CYc3($PpefVzcRe&XQ#vJ)qYh^yAt9!b7vxuHQ$Sb#^|h z+wc&fo*Sy6Br}BDM)u~DWS?ofjXK%?2_QT)Ywi7bkU4dXt=sSr)vDgFj*`p}ZX1(3 z50%L;{$liv{18BRXx5rKtGl^++26Vi5Ak)We$`Qu8NzMjW|hZekJpNbv-=hX5FVPf za1}HmQ|EFR<{!?`c4QSJTz9so8VNR)_nE zYq@?8AUrf{WqtNi_`iwi+nO4B@tMveM#+Trgdv-^vw0cxcu-@N&EGGtPM3hKERKowo)`GDEm+ zw4IPQvTpNS(W3pe>WJ_@v(~aE&nFBSKSj6UA#SF;UL7TwA>20fi6iR2mKH#GXx&D7 ztc?c{z9ch*-w%WP@o}E5+y)}lXV#)8jK;=_`k@eiPyK{@?n^v0BHT7;bPawVwFZ12 zKzL}@qLGfq-noa+Z_t-yhH%@USz_?K(zC=zc;CxfG-IN%x$7ZDq*thclFSfp8#LPm zua7&gWeFfWG;7h!w_|J2L;Rj|E3YVDk{QBngI2BJb)K)}$^gPcvlgvjJGPg22wvwX z$qeDPLA#Ie_A5kqpIM7`B0IJxdWhZQ7w`_}OEN>aZO|?%c;7AkZodG+L$em`sCH~` z_YfI*r}IwjOEN>aZO~~T`24u^^^9tW@IJE^oeXyDjPelAKR$v_9AANY&YlV(~nN-{&ZZRnX8dGXzrDTwesvzDHfx(yGJAzw~LNoEMQ4ZWh|)PW)e5#DFk z(yPn#Z698>l5C$8lw^i*+t53MB-D^AZfe4gjhH%@^JGG>pIt3BlXV%iYwr;~i&}py(C7B`IHuQ-j>BI>jJTz#;Jem~fTlPy2W!fhZzeP%6sLbelQ z{ZI(9`nc!5#6u&(ZG%RaZBa><W`sDN_nIYUZbY#LioqdG& zy{ttiwI8dYS_3!w5ovm8Qy_|y8NzL&;j4q>%%is>n?lK`#(eB$fuYK}~jf7~jp}TylKvCJbPlXUl5HaXNd%J4gID1#( zYC@FT(nV(dx40a6xor>uS`!+yv(pb2vc=`~ggCj~l0&walcfq(384hPx6{h7ef?x{ ziYIc6YTrLIT`;mdn)O+3$foSOLMTDR zcN50jt4mIfR#s8Ye{;`}Z^l0x`L_Si00Ol}mz-etIxs2P>mtwgwDF1Zc*#MLLwTPL zp#%{NJ5IEZ${x{O&-2_}*nF0}U9xAS&vT&w0<}7BoMf-d(I$HTGk$}0y3UsGlx`K- zxN2YsC5Tw@?Iio@#KzGnPw{LoetwS3Uc7W<&Fabl1Zo|sH^uHTzG$@Xk34tJ?V2NB z`J-$kGPh*_fm;2anPOk6T0GjfDX))k(Ih!_(a&j4QvDE05RtdVRJ%{bYw7N6-~DQm z%$Is9Ep+U?ctoI9+_9mz1X_a#C2_d4*t*Q3M|Gk`Edi6^tw(VHEUYaZWckGw;^RcW3N)YkNkZE?8KW?Y* zesVeWeEF)mGBNL)X{{D~6X-c=ja)s&{{6dV(Vt#_i4fOH&z679$w+&7L8(9+sD*FV zu?k;Hlq>Jlj^r+x&p-(xIweiCzk56}x@ybI)W(}lX3G8_wTgUKxLN>#S~x=;tHHXd za^2GDk(-~@Gf;vETsMxjpzTySu=0XPk@Hmy)JpzqjNRni=IEuIE6pHc>5Jp#ju$sY zvgWr8lpq4vKdq>S)-_OTK#mc1*M*Vjk7ZYyK}4BfM@aL+`;q!@b~aFg z2;9f04q(|Z`O?{QksX)n7^s!FYoOhz{Ke?y94pNrqF#}Ka#59^BgXTVff7XE?n-y! z68p)H1OJJXoL$jCt!TO4_7`_EqVH^dIe6|`i)Bwa`r%x%`|wrbF4LITFdUEE6d4; z_J&XkPqiRoXRmp3`k|$1pSOB1{%FE9yG6qK^d$+a%$v2Q*#pb$NzdQsWiz-llze@j z{NZ@BwEio8jzFK{k>&cbE%RIg7=l0_3Yr9Sug z#Q*}ea3^xCg1P3&7ur0L_Vc}^A?)p>&eQBQ1*7S^#;-83J?eSMc{2V~hxl%LcZN`c z2yD->)^DFD?YfVJX7p<0y^jQV9|`YkZq=NVWs;q$D#CpKL;PyXjvlThNrH9{!S zPpA-Yo}FgTePTdDqu06T(>|LgJ2d_>e#?cA;}L;cMJrCXE4;8y^b;=<;^VtX@}pH> zwkp=-y%0(e@$%T|_UUGiCmipvgb?d#tiCJ#R;%>M`vM5mD)`oP`@Lf3zO+Xd6Jp-n zc`|uwme8zPPkE!Oo?VYs0*@}X=UDS^CCTP*Y>6+}=s*Z1h`{z7tF4_R3tg!b|BQJ& zgj)I?B=F1#A|~BVlKTpbYx&x9{|)p5pM7=B>Grc{bIVXk9;-LA&Xebt?>JoexBVfM zAVRm1?v9m_cb+_(Ywh9q)q4X7)XJN4y4|~4q4d8~xsB)lNRs`E#iiD}`(6koh?uc| zn%yWmFMU%jK5LUA^WyRBQ1)Z+)nqXUQ_%o-**(-jA;>H*pLd zEAOZCcBtW#-jugxY8VJ4z1c%6GZ%^C-Vnp zA|h}#lOJWxlV3+xh5Fjdy`4INtvkKfCa`U&-=NxKsEwbKWNNF;q1VcG3!wxNhw@Cb ztL{3MFeWGO?H_$OPkz;@N~m=2>8%ifTDTKA)=b(Np2_+|sDk6~qH3KZ0@o(h4n8_h zX3uCBpFDAk_s-RBfC#-)C%Bg76HAh0#g4n;$2;c&2-LzgPgSgR;><2zFVwTXf8wa| z(Pv2lpU&8xW4$qOuI#sGN~mk4oCZn|f$cfg=yQ; z29Y0C{J=ot$k=fK1Zp+uG|~QGe~<7UzYvnOMJsv8u2cLJ1;{kstNQgrec) zKd>J)t>j!eI;Dv5(B)bolptc&;VJgy@8iO4eqKe0FN=|n^VZeSPnVkp5UBNQ^QpE~ z^IC%IN0m%Zl4o9hKeQ|Eqj;1c0%yKs?I1s@OVxv+W%Vxv5U8as9JSKOW+*yY7ERb{ ztTNYx@U7wd9~~ZU_niN3c)}g_wg+?{EuX)C!Z@{Pdk7_nm~>^JJ$>Mr@G#|VS9@}l z99I8~k@!tw0D)Q!)(^1D-S{HB^doK~cb>s=aF-iKo0{n%lpx~J@SgUmO&Q@r!&Ut1 z>>&B(oI6Hp_oM&&=&AR=GwZuZ`k9Og(h z6OV>^$@jAq6tmBrh(`o!-Hvv&TX)H8UR3`1w=Ex+%NpbtqZYOZw1HY<_jR-{82QYH zTk_mBR(6rYa+eU#HR>2b2_h~vY-d0DULo^@^61}g*I6$3t)!TFs#*YnTC-=jwcqVu z#BB0D&%}qCSaMSN@?!6j<{^|I;`s8gJ@S*{lp*E!@zP&yPd9m@dz`59UhMz^wQ#;U z){1KVPb~0LKbiG~yGGytiU0z&aHSDiNy#4cWPkcP@^Tmv(M63byVOK%f@xV|1!LJykX+x4>w#x1xa(L=>P1 z%bLd$!?j=K_pyKGH2K@zVMdmR8V3-lg{O~WjoOkZ$L_0TBz;xPKnWraQKabKE6u_~ zRHP{Pxvc(5GL&aa(>3ONj7QquIh$wqMLCz{& zL$F8RkpMSU61~H z1xv>DDlgchM+9o&9^+Uwn#apOPt_6Z(W3+rxUV?YBMBk-@H4dpd-Nzl#Pt6p*vS`4 znXX6w&Y)&;^t1+oJ$gi-7Vbojb@)a|zHqpfV2>VqYd&Y#J%*Ms+2_Re9IIpXM)K08 zrh+|slpq4zqgDH#+OkIuL$F6r@4}{cqTUC+3!BUN2BkD9LMwJ)WPJo(>v z!5%$I5b@vKRqPHMnwhRgKeuE9dG75Nf<1afpjMyVwd_AeHZ)z2e(WQS<>zyo2=?ev zf(YZ&D)yP(%}m!z9XPPDOzF}@ut$#w)LJsWo=yKu*Q0+jqn^xoyrp1|9-o~?)~4}N zql@i1R%O{hzIL*iV2>Uph`{#9yKB`*cFf;Yuy==AG%IX+4{Bxv5zEUxDhEw|RIo>n z{lI7cb9H^Y-{Lx^>(S2_O=Rkj#)3V1lpun}$)qwo7|6KU;jB-o=z2_ov`Xhsl4Ez)6YS9=0<~yP*)$*Yyt*@} zmF(H7o?wq2C5XTsgCg76!m?A*8iGA~lpq4v4XyJTE#<&-^#ptLC_x0S70Sv?Y%Ze> z8wmF3QGy5@1zLmon@W1`>YXD3wSsHVYSc{L*w#p}M~^+XZ`HNwpUECQj-g{6pWRF@ z>DoxJM~@Oj;F~3{sMDje{>CPPJ$jTN0_PiXAsufdw`Ff8*rP`YB5*ZRY-vDsS!rij zut(3CN!ojrb;o&4+EtZph_E&ut$#)L~wkZc57`}R%zKl%JgP} zJ$gi-7Or{6I&rp+OluM^*rUhZa;%+B2W9VKdye(n&>Hd(^@Bb7fCLcO9@$qnu12~{ z86w!Dw`v}Vo=h2RvPVCl;Pq(b{C!RK=r6pI5tYMw>e`P#mt>c}hx!Zl=uv`*-yh9t zXV2BcbUpfZS#!(Dkv@VwdPJbs3%hdI(MX&p}0K#P8Y!*JxUNU=Dk98g+}d6*P}mw zp@jUsP#3`-JxUNU;c#I)bi1wTdi1SJmY367S%N)!M4(o}+2Z!z6=Cg1)&HWB>^iQk zV2>UphzQID>%te6<%X4Q1bg&|KrLKpj%986K2mJ)Fu@)@zBPRRrE9+*J@eWS)Ai`X zOHw06N{$ok(W3+rpI=Ie#5oep$UK+N1yM$pVp`o#!f#9ucVZ-=q1XXFs2! zJ^G0wucqxTmZUs-lptbL*=y;gT1?e5aaZ0*THNU*<%VM8ywo6_OF$sJbFZMex5uEwZmcCj@7ky^R(k<<|>aKC5YhsJb5#E z-~BDTH*I?IT;;z+c8wh^Z5(Wg#$JJRU&(SkjC zM4%R~e;TV7&qdzsG)%BZj}k;2EqgIKWA{MQ_2~cYc{x%pevn{~9ucU8`Uph?sUb zuf11xHeHYY{N6(HWQT5oJ$gi-7M>*(%eYonepR8JV2>Uph`^J?5bV(h zPI|Ba5KX*;e^!<6JQo)1(FY`e2)0q_NEJEe15>a^kG2S&I6(yG=P6IGJbKR0Q=Ub6 zker{V+=}ubgF6G~=P6IGJbIKMg7fo~lhz(R=jSO;uRMB0pcd{iWW7>;p7Qj{qelrM za9?pO&d*bxUU~E=K?LXLDKn!zdd|;No?dzMh(Imei5!db^OUDo9zFJ!^YfHdQa&fP z=UAMd-(7N^^5{{52yD->I6qH$dgam6`$&NIk>I@#&d*aePJ3RQpQk*%^5{`QPv|2! zKTla~?a_07p7Qj{qeld4aekh1gxaI${5<99l}C>fL~wqd@}AmD<@`M5>6J&12-Mo2 z-`q!;YwgizUdl%J1R-B)wJiYSh5rJBqpQpUA z_UJi3PkDOf(W3+roS&!6wf5*aKTmmj<6J&15=7v-aV*ZyQ=VRV^e90Dt`*`)q5M4M>6J&15=7uAI2PyU;hp;{6}5tEkn{7D zr&k_5_WXF)TYD)Zt~`1iL&xI$Jmu+?M~@Oj;G1Vi%+QJmu+? zM~@Oj;A(a(&d*bxUU~Gi?Y+T%F`>49(zkCLxN}TVtbCo`FYCID~}!}h`{z7>(q>8M%$dz1XuPCuDUV&)vQs< zb8k|AQMk^{u_jmczm{)uxIOt%Ze{2_p7BQ#SlayD6qy*?;$q;>Nq* z&Jm-ivL6wsbzo@GaLf8rOt-TC!n$8W&&MSRuIxt%BF=WamN2Hd=STgw%9o-13zEd= zRN0RbMBrSYy`=5IP>B2}uIxtyYT-(AtR7u98nd6BA}&&8KfX15|6SkS8NR;nY16Ij zuR3O*vEb+NqBm9cqXZEv`=o|zJvLOweV! zv!5A>uRSfevL7Xg*s=JV@XYJ|bsH`I^SyEBn_+@0`w@X!?-joq&c62vy1m7D=YIQc z8za{a5?tAj5=7+Pm=V4@yr=0__P=Un5pl%^2(Ii$1Zs&l?}y_)?rFM}{fBSl5)01s z5nS1i5=8t;H2`+4Zl+t=KV@@XQ9Gfx;L3hPpw@u~xy+VHT}-#Kf7O;a@#e7}f-Cz` zf{0yIHE@>VS8iqhh=Ik$!p2<%SN0MW@S zAS=bMxUwH5h@kitXZ&>3AfrqbQQK-QxUwG+{w&2o-;bl9VWQ4 zAGNZ*6bUb<*aBDf2NC}kO*IZK8Y#H4A0>#uUCFV!=YGSu(`20B%6`=9ac*-sd&U@( zEBk|pe)Cotxz0=wT-lEjMBwg9mHnj_7+Xe96{V=MAGLaQNDNP$JJICI{vcvqt=`74 zpQnk(sIng=h`{rT?k?4>V|4gAQT#}i{is#%_h#X4Yo?f7*&jsgS)9ixHg}fb%6^m} z0#6^h=QV3XXx+zi1XuQ>HGx(Jq8;m(F&jhjv$>)YRraF<5y3WQw|_6RHhYrb%6`ngK(zoWYJd_%;7;UNTmwM004i#L5=3wffS2ucEUp2dS^yO_Km=;xPDD`y zssW%{02MXB-cqK?^fJ}h9@RQi4FJ^wsHg!-5P|KHl|eNCR12V@2K+Ago#?s@liTFF z46Xr)*8os0fQlO6v-8OEcyW~lw&z$}139=3!tI~C_x0*08n;ZM-8|JfNB9$)Bq8v#dC`1 zgIiz1H2_o#prQsSK?Lp?j>Rd^bS$m`pjrSGH9!d>@Xb0F z*8os0fQlNR1Q9sj=;s!w27qb-RMY?^h``nCSX=`@wE!w=z?L2FyQ~sQ4 z0H_u~MGa7b2(~JDw{|N`xdwo00aVlg5vYZ0p6)?X4FJ^wsHg${Y1vG zoMMg32jEK(LH7zx;sel8gDL&{i)*{Hi+L^|fR8{ey7h0qemO(m9+-BmkI1;3OEjmb z0ZI@-xBgAy1JF@}H;(iWTXx2Yh|34yOAtY~{!QWo&`|^80}#XqARcr10DK7|=*FQ* zd;mIXKzsm#_yEKhmk%I@04=)pZxSDXjvAcWZi&I$%ZZ9EAAm2x@1?`pB zh$BZTh~q9FfGAczk@RCDh9Qg7^T8Z(KeA|2@;~e*gW`4Of%+ z0K#t6;JIB-i{!DN8GT$n0AGR#y0>p;>71(HAn^f+bA>)I%D8+0J_5C>&mUnfFTOY2 z)8hjWdAsg2{!38DZPaa4Z#P+VJ+{?&iJ}IGKrQS37_(qxdDx8_{C0P$ z*p#@;*g#POlpuoc?VGDgP7b?KgMV_*5O2pnYy3)414N(}-JCZ29+(tES!q~*d;i(=UfFM2qV+lnK@Viip?(Lhs#up9yd;nt0A7u^c@&Wj5pcdVhH?LGJ z9`^YF#N0(chkkJR0DK7|=-$5Bx1x8Ch4=u(BdMoCbzMFHAAwpNOJ-le=K~N&%N`6Z zbol^$2_iTW&2ciH4?t9SY;UNV%Lm{iIPS{+2=Ai7e(Zc{u9(oVU+7nt55RvaL~t~i zJtlW=Cq4kNAn%(Y!{r0;5vWD?_RT-PYZmtT0K~mH8KIRfAApZQEqqUk4?wKFQ`^Yp z@&Wko0}*s1-*g^N4EuZlV(>?;jLR+`fR8|};M}dhZmM{F>2%{amk+?7iHN{eKsQ<2 zP8CBdFEAc)`2hUZ>E6EClss;Z8Uzu<2OxI7xXH*uQ3I490#`HT4H}LU#0Ow}>GA>i z-}_{a5oWiAkuXOMf{3!ejt~hK-ZwaEfD%LmcR1n$5X;Y=GdOC1T6AyUBtC$!<^%Be zF~=f40I{&j&xYm$@Fj@AGg0vY2;u`UO1OLg{`bb0}#XqAQrlO0RFihK)?y7_y9!lQdLFR zwEkmdt`oq_8NsNNu?`2c(gXTm)M*BMZ5J;CP#5L9mv(tH3u0=0ts71tS1 zy+KIx0rGJ^ysy8^S`2c(gA~<)> z`CFe4Kv2EGVZJ?p--TLSXK=51n}$9gfS`JVG|dO#_g0UUy2Fj_DLw$Qb+@EyJ^;Um zh`{#L-Ecwm2HxFperxJ`^*c~Z5kUmk8Bo2!AkKGgEZo}zTxUS_2CXz7 zfGxXysG*a!Z5+y+dSHgHZhT;PdRBsT{d;q=#5x7ezJ^(@W1|iJ{ z;IAk|;96090D|fb;x!+DzwQu$qu{=CsyFaBAbbRB1=k?g8Bo1JyygS&d(L$RXK(+} zK=T3kW2pE51l1eFYd!#9f{5TO;W`7VH#n^M0Q`532wXRc4?s}8L8|5h@Mj_-f;$7( z8Bo1Jn&t!GeOIwP>AhBQMd>%F_87$nAgJCTP4fZxyAL9`&VX|3={_HTpn8Ke%?IEk zPz!e=#Rnj$-XKl$0r=}25x9aCAAq2GgH+82;ICRl=$$&<=K~N_Z;-0_0DJ^$;hI-` z0D|fb(lj3cpEzoK^eLnGudqGE2Oy~4AWicD_+x?yY)|n42;u{XJmm5Ln7PNi6V-eG zCh-A8`S!plsxu%yfT+(0AS&mX8OdI~O9&-&@6t!m{YbOM#$i#P4?vVYFfekIZVw;= zwdmHrd3=A5sC#?h;Qd)5zD%2l<^%90h@ktC=0ioBMSVU1aV|bSqWJ)P1ZvT(f3xVc z#!;USKoB25~AcF2enp3`yi~4*3;{CU- zrv2#h0r&{iqFeta@d0RFHR1yh#0QY3`2c(gB5>v_J^(>{0BM>Jz(=5#_M_BFQ+xoT zaKhHeW|t4Ze`|ER-+%vfQ`hV<|J|s2d*I3Lqs7AeCn6_YJ^){W2)eg#&KP(m>hl4J z;q}i%o^$yCd<1IIjbpRyjW41;AAle}fJif!55Siog6{2`#0L;{Zx8f4J4n1S=T79P z%Lm{iP>XI(o5TkYb#D)h_^FQ={bepW$K?a?C5XshyPNq|N)FrQ12`7yCG0E(#_j89eq-dbeLes|d;oGD@d2O&5p-|gBt8J! z z6dPPwPIdVJd<1G~8{YQ$00i*?$bVfv0FDAH2_kg-%62{a%1cZ^d;l`pb44pcbA!iVr{#A3&tN%Lm}^aEPFL`{vrm5~DsJfcWF?u*knT6(4|) zKrK9d6d!0@d2c1J^=rD5fN;I_y7d)0iRS2Oy{x zKx#e!UxEmoSG9dU0710?l0AC-F4WSQaogtu5L62wH6MV>NI_K(I@_y87u!>O0D@`( zq~-(gdx!{Z&(8@<%?H3aVfDQ_53FWJ5WzJ7R0|+AAAsL;e0Ht@pj@u)^8pB|1(2E# zz?UFG*Ba=t;u-*|1(2E#z(=4K*8ot4+4lJW1l0ma_UKW92(AI3?6~do0SKxEkeUy` z|1Q+hS!6x0xCVe~0i@;w;K~yvL4?lysu`;I00h+nNX-Y}?>Yd|vp;mAW5+8t|S^%l}0Q{cojJvJ*0Q@mj zd;o%K0i@;w@Fj={&JwNxpjrT_`2hTPjtE>giVr|gEr8T~0RBuwgsxi9Z;)#Ms1`tK zJ^;n*!q%OxtWp-BuGi3SkZS;_7C>q~0Dt#E1lIsi-rM&100h+nNX-Y}BTx%>BE<(F zs1`tKJ^+86BLY{j;sX#=3m`QgfWK-Hp)0nuWyv)FR0|+AAApZQEnM@84?s{YfYf{d zTr1()&TbW}I!mxU#Rnj$7C>q~fY_J-0^1{gh8tH6;scQE(YLR8Bz!7mu&wz3!o&w) zYd(N5@d4PbM^AhJg7^R=d-Qa>-gQN#xzhz~$&J^){W2%Py2^8pCr1CW{zz(=5#u5{GaQimj z?f1i{UmIe(9=)+7)gV3q$sRpQ5OMBON|^WnY}cdz==8e=@c~Ho=n;WhH~!udCO!b$ z=L0Z^4?watV>BzyFTK&_QMW`>Cmz;-?Qo?8YP#0Q`} zdXyl-iuMQ-AAt7ge>&INAU*))(IW!2ZqI5QCO!b|(Z6qZGKddAdGv@tt@E!o2ooQG z_UMTZz#u*V<sp2 z9z7y-y|CKhux(-s?cF>?d;rR$M+qWyMYG;_{|xU95g&l^=n;WhCBAPKCO!b|(HBVk zB1C)u%A-dFYT>9;X0&`QjM^OrEJcdPJZW&K|nM z`^$WzMEKgH}}cTw(HSxh@gZKah@c~H92cUirfMaiX;sg<#pJ(2F z_UQF}PG#Nco0`gl4DJk^pJ(2F_UKW92+q$_c3gY(oS$dje)i}Qfm*o7C_aD?<>{42 zj}k=SzM}X5Ld@IG9z9ABp>Kd{kDl}M%-hc%Jt9yGcOu0H5F$PR<gP6^qilk9Gdq&n4dwt4}C{j+wGj6XWoAH=uyHMeGkF; zdCG5VkDl}M%-hc%Jt9zx^YfG=)E+(O=b5*kJ$jTNLf@~}UMlD3nYW)kdPJaB(Uxr* zQs!EF^qik(-hTGz@!2_#PB}X@y4arL14w1we)i~5f(UF+@d5DNaOK^h7QF*zicm8n zh~WG@^Y*hxkNv=B=lneLuDc#R=jWNXpFMh%AcERp?gTwnoS$dje)i}Qfm)oOXP$W1 zqv!lQ^Y*hxj}k<1ex5Sd+N0TR@d3m$Z$EqVC_x0iS;Yr%n0fozqelrMaK0%%fK=w~ zXOA8wh``mX_yE!?B+IN}lNm+NaqcT^NvJG*MQG5Vt%-hc%JxUP4`FYA#Ymc7u z^UT}N9z7yZ3s0MeMZpFMi)Eu9R^f2HhQY)|n4q%m(ld-Nzl1h(f`oS$dje)i}&KhG@e?9p?6 zp4pDsqv!lQ<<_-F&-r=g?Prf3C5YhsJY_VrN6-0r=Iv*X9ucU;`FUpNcRhN}&r_aW zdGshj1n1`|qoF-|&d)P%KYR3uKrPPCGdsWQ(Q|&DdHdO;M+qW0KhNy^u1C-LdFJhB zj~*q6;QT!0C$vY;`FZB;XOA8csKxntX6JV^qnw{--hTGzQGy7Z3$*)Cex7;z*`r4U zYT-(w8ez)MGjBh8^!V2B{d0bv8MR%Hp7ZmRr&k_5N)W;MdFI!5J$la1GjBh8^oT$$ z&d)P5xa-k#ex7;z*`r4ZA~-+K{Q9m(&-r=E(<_f25vaxad1g3wJ$la1Q=VRV^e90D z=jSN{uRVIs&r_aWdGv@tEzZv~zrLHJ=luM)#M{p~dPJZW=jSOit37(o&ogg7=jc&_ z2+q$_K3#kCoS&yWz4GW0fm)oOXU2IqN6-0r%F`>49wmt2{5?BM4%R~8^_}O zJoEOmM~@OjaDJZh>Dr^`{5(O(5o_YJ( zqeld4;XX!r9Lmo#Z$EqVC_x10=b2yM_2@Z2&%FKY(IW!2@bn?elJfJ++s__7N)W;M zdCG`ukDl}M%-hc%Jt9yG&l0+8N%?u^?Prf3C5XV2!Lc|$&%FKY(c?*v7676hi}Ul$ z+s__7N)QojgY)yu+s__7YT=0!L~wqd^7P81=lne6gl)>FDGxHZGjM*MdHdO;M+s-F zJp||HDLbw`dd|-?Z$EqVh(N8t9>aI_CG+-kjvgh5z@13l)tAiM&mKKW5J6cPW@gtO zJ?H0{x1T+FM4%S#MCz`-WZr(x(PM8pPv~WPu|0KHUovk$=jc&_2y9Q?)tAiM&mKL$ z3#ggk-%wERgR}5n#*}_*i}Lf#+s__7O4Pg12+q$_ep`F=oS$dje)i}Qfm)oOryQa7 z=s7>ny#4IaqXZF@^JE@+?WJ;lo_YJ(qeld4QP!F{`?W{U`FZB;XOAAAok!No#A18u zuD)d6e)i~5f(UHS=WqA&wf^_=tl;;c@9IN8I6u$4{p`_WKk(T(KTjE0?a_07o_YJ( zqelrMc%1mj{JZ*+dHdO;M+9ndexBLjU5}pg^UT}N9z9AB!TEX0Tx*Y>^YhHx&mKJ@ zP>bgj&jmpMR&Wif{5{lK=j-k4%FPXQWJ$jTN0^h8< zt1p?ipFMh%AOh!`x~nglx1T+Flpq3Ev%0GPzPB=NvsEPz%?*x~nglx1T+F>@C~6d^+g6`p|RE&ogg7d-V7uh`{z7 zYvj1OBIQo6_~eFhMxTY#%y;r%Pe}WInW<~@)m_w_i>8@1`Z)=Wuk-JvUR>Qp?Ag;a za&>7Tff7UnZ;XVm%@x1Zf3DRNWwRNGK&{tzO*7xE_Cvy|(cH$-LP=uWyr)||fAd}l zC5U+D#5D6i;$lMM-}pB`w|_TRq~>lLKk@r)1|qOe*f!NC)=d(>ugqC( ztc^d{7fAtxk|092r*EIBrDBT#|}J%;-CgsmJ&B5~xd z!^fKZ6F{KW!!)nf_H)wfs(EF`cNK%1HZfX9;si<%5xg-noaSzoo4x3UTbzLi)Y9)i zo#!;=AZeETn73_eXPPA_(K9BUXCHnO<%Dm}6-#sf*`nH>tbsNVfg?h7GV2D4s;!%Z z7JQvyV14UT@l(wWU28^5{jkDYUHU%G#dqh3qlYJ?-K$=Le>uyqltsj+Nz=?JGipZv zd1op0BT_F(RLWB;E!nyfLIi4I{~YVp`AH(%u+nLBOPvm(1QFOLLYzzz;XewdJskS4 zw+7YtAR^dvqu6YbW!~E#QTA?y$eIDe0|?X_Jb1Es?$c4xzSsG;e)s)C3~7IMj=b5et$`9m40>at zx$@l?qMcVT;($Fv{8+bBbqGJM7<|cB3rlS z4j@pg=IiUQP z0=2Np-?8>qnj~79vm^J@MjQCnu!bJTjeiw!lK6bj^AYnxcLODez)^6lxoby@`XB9# ze7AYHfm--fLBynOqs59}cSoxA7-*mb5jYBz1*p_dkv#ESX_W#jn`0|?Ywv#Xn#k(SFgp5isQ ze0~QpxNu?l(1i&GN)Um!M;zt9<-uNkdGl@@hmdao-XM4;B2J4|!a$`ZD_x0nC4rC3m+t}Ipg zbOn%EOdLbXv5f8}YGxFW4f8e)j1?mA%~EtQsgD@>*h8{a-fRYH zJ=ypvbLh1z(JQ5vm_fvr&i#pn_OHkXm+A-JIU;avI@bMHhKYG!d>uLSUTy=maF+-o zj$WE2W);d2`K49qzz&C}0q(AJM!h#%~k);ycG#((X#wyw|s+?OLw~s$Vv8>}n|4dPNB$g4NMi+CM6? zebPv>^@<48!WrXOwW-$mc=pDUtyh#FA~>(?x0{G6eH%))UJ-$P!nP?hdZ@9Wf08XS z{9f|lnVnF1@xe9_ff7VydAz-Ob!{2jwJZ^V5=2l0fw>KJyySl&P>a6bq;Jcl4U`~) z`ok=OIx>N6AOf{$#hTPdy=oDG5=4-<&77G!4tMK~M&ib@M-9)nW ziW1u0Oqw-nhr@58I{oCv;$~uF$<`}M5P|)p`#7VjiUmW9$%5Zx7+A|S>uTw zi`Nz3f7(*ML)I%w5P^N7`{$?Yidzd?N`tIdlprG5b7yZ^@#(O(vUlC820lBE!ow$W znYCZ*W;a#VYmEYh#k4gYr1*Bcff7Vu#U9n%r4$kyR&)}4% z7}XXGll#beMFeWyEFBGBxjfQ#t=Bfy&lz7d7%sbz^@lG2G zg%z!im9zeRj=jC{=L_Kh$rEkY zdhIr4wvqVABzcdlSCk+ED~uhh+TA9`s{1o!SF&Cafm+EMtnk`}Q?&JZ;e#^9#-r0@ z6|!DYf(WdXcC2=foC}>^JzKs+)+-`VYyaCd!z+`fY3p^y)P*X_4zhPKvAk|)S|MFeVbq>Vgry|;JhSvs_Aev+(A)+$(Wi3ThW9g9@ zWQ#Tpj1?mAJrPIz`r$^j-wfG~tXI@ZZ@4i$e&z(5t=Ax;T(%dCqZOyfPsw^k2_kU5 z(QLnvV*EX1g1ku9D{A4I4Zx)f`MI_-|NF%Q=Lc#bnxat@ccu8NvR?6><1FF0E5$9f^~!NqioYuB6(xuW zRswR|mEy0;dPM|k;f!%Cj=NI)RavhnK}2v~aom;SugZEw1ojErrY!R9616C%sH|7~ zUaV*gw(-k>-8DS~N)W+uSBi~l%kuB#bE2j$0=1}r>9JLt{dNH#ff7W}82Z(}j>U0TioYuB6%nY#aaW3T zYU`Ebt`vWjY`vlc5j4l3K9S?D6n|CLD_#b1^6iV~VJ>D~^9-{e>v zccu8NvR+Yw2<#tOuM~Hs_^agDD^@3P+?67d%6i2oq)Y?FT`B%5IrfSYL~z`d{7!AX za@>{TuaaZ0h(ImupJQ>{mEy0GW3MPd1op|XIPOaESIMzglprG5bB?=G{8e)76`vhP zf#a^^OWSVjmE*1yf0Z12MF}FXI)zw)Deg+~SIMzgM4%SOUCGi@@;;)ipuP8wTR?|^#g5s_ee^s$pM4%SOT`5AStyhk_Qv6lL zUQvPwtO}&q62)C9{;FcHh(IllyHd#mjzw`e?n?1j6?;Vl$6YB- zs;pNW5y#@VE5%<`>=h-5!1qMiaEiN9{8hzXQH$fQ6em^IYY@S4SBk$%j=iEJfC#Kw zj=NI)RdVbVwQ$Yj40SAyyHfmBa_kj%I6Mt-uXHSqyHfmBa_ki)h`=4(u{iEZ@mI;Q zS3EyZ3(=0naaW4JD(e;BInEM}yHeazTdy2kdZccu8NvR)B^S~z1U z;ze;+ioYuB6(xuW&MS_)Qv6j}uZX}tVcRP9Dk-L@tXKSAj=Oqs%3vF;?IBQt2#&i3 z;-WqRC5Yg-YasTTDFU_l`}y1a_^yvY2_m>Z+@@OdJUpX^hBXE4dnyyR zu|q5QX8k&{`GxO8h(ImupJSE0*iv3CP*=)br#P?Xj|n2MPma}fWlK5bSY5f}L%M4T z( znAvUOa*r~XJ6Xu{`^!3S+=^UF=n_Dn)|fvA+rP*!qGf7w8~2k2%K`&GjVx$0&Oiww z2ILrFcU>5XGABLV^BOZuK9qDaa_OU<0R(CdeSM_8G5&D$l*;P=F>thOmPj^Mh0z8| z5J61&c9V0Pqs*=ESh>fJl{G)#7AZFU@c;s~PJA`aKKse0=r@A<@!hwR z87M(Sv5AxHC1;YN!}c?x`>Cn&`44AC&K|B7K%iFItO@p_DjTEMC~8EtQI%%MDPIkb z)U2D!KnWt=A*O}B(Xr9Fl!0dkXgN-!+T6zteE@tJ!kG z_=RbU2i6K8P>W-U>|MI+{7mu5a`2?NkwX{y7&xNz8w&P!k0(a?w;8Zcj&-Ng4Eb%g zx{*19EdwQpz$c_%?XjoHey7SrPRC88r;2?qK2;F$1$n_WD%47A9a2?D{`0cW&weiN zoH!y>DNA1P>Ss!)y_x592qlQ%SOELHZY5xpyx^t9RL`FxwF^_UcG`M&4-j>}9U3x%u!2`N5;7 zBP&nW4IoepSF>aNv2&PoCVdk*nvx^1Y7w!h##45IVotQ1st|IXe^TyV`$uH|`UU|6 zYT>TrSnY@Qk#7xpNLKkZdtmoL#EE>p?S2!o*`rcJXQcJQ_B4dtqWmyOXI3JH`TLR*=pyP|%e(|5{mBXtYyqGI5y769eXF>3d7@AxD^;r+`N}>1w0=4+`r&VRT7C_y5 z8IcLE_7+8cn_!>>5tMI>5}%Ce-ae~Gr+>Nm{lyuw01$y%eEQSwV7eASm`?w%lLm_u zZO0iXK}7Q6NVGlumaWT+b8XJa$oSF2M7zs90|?aO)1P)Z)3pFTnX@xeaKLEM=h4vy zN)SO=xo8*qg&mjQCmWsqZ;u%(R?m4nfIuxi{pqYXT?^n2o&ICLog^lAc*;NtBKY*D z)5vr!06zW63J`1oAOf}c^rwB;bS(fr{mBXtYyqGI5q$d7S#P=)0H6M31qikPP=W|P z{pn;gT?>Fuf3gAuTL6eaEk6C}{4!k&AaCAtX?Iu87X2xsh!R8;{=Qk1SfEVz&S=Km zg=xjVnk`0CXa;$Dy>P8;w zIzwD-X&ER%1U{i-HI9^t^s=Xk$|WZnsD)1zMDXcPR)Am&0H0S!xz+xNBSJhAbo!GO zAlL#x2_kgFTkWWn1)$TPtN_6l03uLJ$HMh4%BMeB0fH?6oL4wwa1}TfpZ;V82(|!F zf(SnS>9jUo3xH35vH}EK0Ej>>T+NQfr$1Q%f-L})Ac9YSI;~CD0^rl1tN_6l03uKe z_ZT{z>GUTnK(GaX5=8LnPp7r%S^#|dlNBJ?0zd?6;qFRSIGz4v1qikPP=W~D=jpBt zo&ID62(|zafm*odJ67m+uT;L7l1isD&J3Jm36#O&8y$_51@LzNrbe$rUBrn&g#=0v zp|e@aesHV`4W2ukKsQrHP+cD)Pz&dn%8VZ7n<-q^hY~~tXJV5)ZBzMX%1E*q5P^Nd zwyE}f-ijTR9~4~Ihu^ESeJ00Yf^D?Tm6P*>3V{+t=&Ysb#(nZ$dyKP|3V{+t?9XrR z;~OL{S1~0<=VyjMEuCZ4Z7i)hmb0&38=RL_5=2mY%ps)uL1rL z=yl#8+42!6K?IFq0^cBUV@uUX?rOz1Q`oXZ1Zq`qGw#~5Y<8?^(2ovmi;FB+C*dh`>HMmUT8RH11TA zc$X|olprG5^Sy86GoIO+DDF{g37;KDA!H2;|G8nZwk&_TRmSM}?KDxBVoNANM6V8s z;fZr6nr>{VUTQbvofgx@eTpq10=0gfu_QdX;zVs(&O0#6xboFRF_21Nn9t(5)r61%i10;KXRR?fXVoQiXt+{i)3-{VONL!W}-~4S%s@O+xYzZZZ*w!y2{LGO)+OphGKC9SJ z>iy|rbzmufkOmFp_P zWLcsF5oZqNGCS_+rY%d6y|nl@Uk6c8&|(`}2T6k9?BYLVB+ab&$;Wjk`;$o+R8agbt5IHH^X$Ox~Z zUkc&a684EKnf7s_*px0JiDFAAK?FXbV|_xEvK{7i5T8(N3AOO4f(WZw9dRO|m1s+r zB|b0tpIi~Gc24Xc-D7#Au6W{HOR<+MOOzmjd{M5H)|O?1Q+37W#Vy5gvMdpSTI8j2 zCAmIvdY=Bq*bo^iDwAc2^9pAS&S_#p-1W8bLWg0Z4Ox~bLBwa5(!&YAd6wnLkKQ&~ z{5(diAj=XFsD*2j;#c!ijC-$65J$+eLZxeN@Q801QGk^jSY`qGeujLFQs%fS`U~ZwvlCt2-L#8lDJ-8&Th=9 zKU>@-%MvAszgU5D}b- zl(iImYh)Z*mWaSUVcRNeDJUDK>?{0U%I@*)3T1@_+hA=Eff7XM8yMQ^Lj+0?LD@yV zF`_NY|AqK}jhzRS6-D#6mmrdJMida(1s0UdhC97?M7SXAk_1EqNv|ZyL0B>hDi{c& z2nb5p1w}!L3wxU#5XD3=paQR?tB9y5iV6Z>Rd>(a-(2{==kpxSxo*`nJw4N1T~*To zwdhMDuS&T^87M)5>%(PwUoE|QAc0!k$GMK)S4)pT2@?Fi`8jIOlJk|6w-kHGvqS>5 zY-&^2sBJY@?dt6}PFWpZZYt?~rB{w+{$jHNq=KP=W-l7O{D@tX?rft3?E_8sQZqNT8O#=CoQw z@Tw7BF@h2#uuhQ>triiyYJ^vepacnj&HwFxzc`guT^#$Sj)nJ*Pk~m8*gOMRuNaXt z+liG`%8D^_Mp`IA0#8_`?D>NBB4>RW@psZ7AAwr5TEyl#%6i4f9W}a$HneKQdhmV= zB}mX}5t~;Q=oKThT14=w5neHZ1ZvT05u0aD>lGukT14=w5neHZ5+rD~h|McK^okK$ zEh2c;2(K7H2@77@H^gjb9pfm*a$#O7Jwdd0|( z1c|C|K4xF~R|fTpk}k#(;Q75CAqk*EIqh=md) z@T_lIWjb%DNPlg;Rr=h2d<1IIH$CqJ@<^!j&)r4Gd;hf#Y#wUiTcdBn-ud46+#;bf zC3}g3v})wfZ)wE{yjvvjDNu&x$A03=EuUL9tr$TqyemIZach6^lYPW05$t231POc! zkx>6BqeTX-8W~rtv#;h@TUsrm)&uAjBTrQvEhY@kwBGMM%t8qgcy2uTK367++pjdY z9(ud6k3cP2En=^wRU>-6^Q8C3i!xu|VZHaoNDC!M&^J%}?^7*<+NVxgk}0lr`7%&x z$Q?ccwP>}7&1)+3dguO>+r2RG>A;G|X+M3CAYt}xP&=$dLbO^$@Tw7BF@gkYamIk7 zWNq7NwTR$VBfMe+C7dzf$e3+AUWsNvd^%0=su5l>f)XS+yTb9a9tUZ)h~QNtykZ0i zS}kJp>JDD-jL(p=wfiOtUNyoiMo@wTzFGPv+-suXRU^D&1hr_jh|Q}zc*TgHpw%LR zSB>zB5tJZ-eKQiG)gppdjqr*Q)IuxaCup^Z;8i2MVgxN5js|Ee>9n*h9R#l$;T0n& zK>}@lBt)x41g{$56(cx)pcc}RP}hUcrFJ`d%DTQ~ka)fLm|$Bv5BJf*EA00sj1GP{ zX@t$E=xklsCO9jif76+I!5Pi|T{pZrCGZ4FkSJR4Qrc~Ij#D(vn*6d?*6js9$c`mY z>-pwSrS%^%))3`y)XsYINZs{Jpah9^=T`5&abvt8c%{XFPPz;vP>cS)Ct60^qjlq- zyWo}KNs#E??vFi}D!cXQzP?v{;_>~e9(XO(q9>9UeO4(Q7w4>N{&dQ~8`Jq&c@iY( zG?zR&CC1i!%0Be;qMWqVvr|UI5U6!@*y_9iKMpcA-&5zMq&r_Plzfc{lpyid$*ep& z?Z(!7<1X7shhwAN{~4BM?+`% z#fFA>_oEWozs=iUCze31!fgUh+kdMV!tN7^PuyUsc7+lo7A4xw#v+vrQLWU4_!~*B zZz51@+~iKqOV3{m>T|HB_Fb^6_s9k*HK~=L1PR{Rn06u#>Xz848|Ik1vhA7M^L}it2$UefJAxM;wk4=r zV*4T+ng@6OwuQ?;0<|_=8tNpjpKZ!`b?KYUs=xl2B2a?FP}*Ibb_O@ZvVjBNU3fI0 z+7%M0)jVgkQ}X+QhKMx%YE%1%cG6Dxv?n4+kodgnIH%*`OKJLj{5EZQ_L1<%swEnHCGZOPK_Mlyi{dO-rdTUj9t_(CuK&; z&MVZe@LH&4p1LVx=NK#Jn}OjS{}O=`B=S!k;tZ{@%n&ycAKQ3n)|$8Yu8=@2(|Qb% z`=v-M5%)+Uv@0b+;>E55oO~U28{*;&WWGu_Cb9u z(5l|4tMkU`tEP-E<1@E@6e_PQHA?nXww)SxS9170`|m38?Xy{3Rt8l)P=bW9V~VD; zejA<0`sw}@v{y=kMC-E+9V=fWL$n!NI^nw;MeFkWz-ytFvB!oOBS$2huTd|736vml zAb%z2m&Udsl13Cr_^a8B_*eq9%(!BRPPvP6K0jG**T>WnQGx{SAgGJcryQBKaZ0=qsU0wqY`p7gW^>+xpWPi#K8o$G-FYMF7~l(FiUyIZ8> zEmU?FB}m}j`joBRUcUL(j2+7BLjtw%Yz12H{PwWTneDs0%XftYY8ju%+|`(?!#B4m z&`S|0LE`hzFYP%%mQ-7x$c|oFUCwKd3<=aSo|+-bRj!-W?YhtFQxYV$%(7G1M(Q%y zv#b^MEZI*~64Wvtry=g{Q#Zb5{&@6WZz9SU{3ETfSC1#&?G-=ytmj#>C#(q6qWZBH zZpxV7d2#Zey`J7>JWEf41fCN}tNZ6HN^bo6>|MsQ^a#|V_Y@@0(v)$z)=N8=tSppc zJWEf41fF+D`%HXNq2;=6<Q1+FvKQ zw&aaz`Liz`%v81n3DhF1WRquU%E%?pGAHU;dJ-ha1GLGrG{k%4S=OBId6pi5T4XD2 z@+=J@$g^w^^(;LJ6668eK6#?GM=S(Ez}|p&}M6-J!mClmVsI{2HWIWn!6g( zW@gK6ms=|WB}kA5XtVv&p5>%v8@85t=J#g&&XGVZ@+EBYEKM0To4&QB`LMx?KnW6b zZjCy>M%$7fCJk&d;m3e#S4f~1`9wB(mZpq8ZN9qK8M<>5-xW%bAP>+^KYYpfaDS3# zc{u7>de=fN^6l(q&)sc2%QH2TYQGlsEIo;zm|(ZBIxI!ko?7Z)>hGiPBQGOy`3$;w^ zF~q=%;;q7C9#Ot1N{}F*(T?k|+YobG?@j3R&G*WdAc0!s0ovqQ8X}|jj|l~TE24ZK zBv8xr8@G%}r*_pCxO|uKEV&OV2@>R4+T>XVwPzVlP7Kt(t*o_8dzK!7TI4_4t507w zWjJw}&6_tVuPil67FM?Hes}9w)_+$|EIXUE;}y@d^vXcOSU6j^lF^k;Wc5n%JWIA$ zN`l1UGY##{`5GBw(^I7r3Y9DFdX^r6TEklJ65eg$d6sN~_*`77B^HgWR| zbE;3t>NoIT9jj@puL#sKql77=@S(v;3fO58h24oqjO+ z7}XpJ)WTdQ%{=?-+nlp~_wEW`_YDC;hB^CU6%J<;aQFR z9oWB0hzZ+iuLzVNL30)zo*(J$vn|gD!tT!8o&*V+G3jitU~ks5Cb!7HXOLnVdcoub!=pHCGZO%ri{W-?n)# z8J;=GvnKIcsKt8}@k~U$9*1U3N}f5%vnEl3gjwNczJJh+Ny#%OdDbKnsAX1(neQLG z;}FlB@uufPueU+)*R}SBOPww<9|L`v6&W*DzgS-;yZQ4a=*x-7>*CJ{c zb6)dj-r%73rpS9IjtKm+TJ2))Nsu@(eY~@NW4&NqwTn5o#4b4rffkp&^@HkKsD*Df z60)XGlj+r}21e{t-@JI0p=u3C;M=6R2A@omU%XQ?aPX|!#hjm&Y8goQ-}%&QljWBa zP6o2Z$6I*s_!O!=JJ$JNQ7AZ4?VwzK(|GB;b%!-SeYAxVBxv7iXG!NtLA?^`M#BkG z^r>m>zc9#0pw@y%$2h;28XA1zXI?E^{Oj@Z+a(>XGmYC=C_$pehEYz}FXjZ_RC`Bn z%9$v0-haUAGw)6xfm&a^Jl3f(b9iu|S`AqFk%{u{v_V$Ae)%nwATf2uNT=V8SA*l! z8pR5eM#~C==3D1K78Xj7c%l0+XVs>+g8Qqhb;5f`%1;(%Sr4?0^AV_Z^x|OW^yxjp zOKPoUv4p|$Kxpbr$?} zHu$$%Sv+BKANi8?v-RY-#uiGDINh(eQ}NJ+;E#j39*6q&kS(ed5`{Mw_7SL6u~K*E zIL#TIuht?zbG@6a)1|Pe+3_w5B}nA|sGC#g?|k;j3tYyc&qDIdUsXf}QO!r7)}dD( zZ~`=Ebc0%p{KEP6vfJXyqUDqV7D|vPRHvg8`m&h)<|d)hR1Zw?A zr{g^~KOEF+`!0)w+ffjyYYs61L$YLsccH)WWQKrOUHG%GAEMaI({-Zd#|#+KK9 zXf77-Afe``VGE^Gb-zrO!)OlguZ0f=P=W-_%i`TO%>3OCpG}rUXb$iBgdIKtwP?l` z??z(g^?i3@vfPwe(CU_0&1d0oG(hW0c6Upr>@%~HRfE3jqXY@G`Lr{|>P(q)?s%Zs zkjlRC1GSKj@Lt~1oo7k2|9CGHHLH+j8}i;OY8Iov9!Q`B39~;-bk?L-21<~i8I-)A zOmt@EEeX`3S(&_N%`M762@+Hv-XkYEGt(;r3Dlyu=dA$uKP!|VLC=rR)H5?PU#7iY zexg~E?89MOLaP>{{bc^8nVH&$v!6H}&e%$OZIcg&5+v|FMMAtsQ9hdW%RU?usD-Uk z?GZ0&uOc<$7bQsGn~j9_(hf9t(_BvW;gG;OVcE1ws6;E7B<_*lK6*J|-nqlEjJdAE zb9`Q3+RmGRn?4EfIVva<9~Hv%X@;=+wboqsRncdTaVM5OiWAya!7l1+LQv`~Ts?xR4v zY*y|evpy;$AK2w5P^;vM$DH?8{+;)66N}1t;FI2Rl5-_|Ju|=6@LP;!f$5XkcjQgJ- zNnUtXxYy^mDZ-T?fxB(cZX~Bh$^6Ny!aM1Ga2YOvTKxAdOE#EiHR8i@a`&Y<;RVz3 zE5en;5?$oW=6xKrC(3Vj4hpYZqswq5NZ_t9kx=g$6XoO|`-P_#^b@GXcQvf&Q+c{w z4bPY?>y1bZKYi#1p8)1bVu>w<2Ic9toA?#&KvOJ#ctIIc4~{VaKLZskw@R(It}Vu@gnJMy$;5QnD9(;ee;7p}ji z2v>rHKjxfXdYWw6{)63YeYkX&KrQ}zG{a(uRF`nG9~hsRD+TpkRXZ|O{Zp0^_Sls8y%QGOd52bt0@=iF+;7;=7`D71Zr2Jbj{k{x{_Fe+H_D`pQ7z1%9RfevaDCUwyPvaU<^k&fCJ;?rJv?lMJDQ&=w1u8_^zm@ z1+~3u`r1g@V@{U!(>J;dSAqn_aFNgl14qfF@4aDF{!byd*Q#rw7MDT&Kd9|h+==0G z{g!Q3cd}O~i6zKh1+~5ES8=eEdq1_-9`o##k|1F$pDE+*jRWQV&5v7ykKE?kL?uB2 z^ZN^l z+-qlLgL%@-Wa7MNMI~VJJ3wZkZqqVE7pB-BY@XJEiV${dhHzMD_{^)H0rdt@~ilj%l)4mAaxo*(;PF zVf+rz7<`~+!gy) z|{9C_w`EGNC=m$+sdOOR#^11ZuGd#bxMc#l98!Sc3g4lpw(#6chRl zwr)BgT|db-3HFDW z(DsUbEAp`f`&TGIg4Sj5xd0}Y!M+uFOv?Hofm-ZaaT)px77?i~!M>F%!Tl20ABx`9 zti*&s$AeSFaI#k@K?2v>(yqYiivzjCr;5+XULk>6>_KrE`dK~oSw8E?u}tv;#e*n8 z0#^XjK1GjT4^+R9DOyrIhy-e}2gPORcV2i#qLq?9SrjLGg%Tv#gJMFr#3vGHFY=BP zMPuq8C_w_(KGPY8wFX$d22T|2DIP=uwb+B=GIaka_uCxnH1#O9S13V(Jt!u0AFLBx zWqtbHDDm2(1-TD;5+vADVnSPnJEmq?tzRA~oP)|TcoHOVojA32@~!?nK3v%=ycTM) zZ^dP3dsTvbt0yZC7PYm#@+3%*=jCoz!mbeKI-8!)-V6`SM~}C z)FOKvjN0}~@``nyMgz82C_#coiC}cZ`7F7RNFLima6E_-Byisb%CMBZUBv(1U2r^z z1ZvS}XGh1u&9lpjj*~N#y+R2RG@{zk(Ya~GYGP37cFJC%1PR=|fLbE?R%@q+l)XX% zwa7EDqdr3t`Bo2hOcRyJUZDgD@;*3QWb9RE@~y6%NEQpoUZDgD@@hEOWb74rOTzV* zkU%Z+qWEk;W3PBFC(mX0bMeu5x5dT9l@7XNqWSH$H+_OjEWvFwhRBPyUAK&WYle7b zAi=+}7eqq;`94vt7TI^aQanoBG7O>0(0$M)^4;6gM-c64VQ(f+refM!u1ED|TjNm@ zLnvBX2A6o}mwX9G&~Hx*ZA|o8z18AKJW65+MQeNIKC6A_ZukhIJuS3+k$+p z30$>FYpP!hiHEbRiBa`c4%s76i)}93OYJjkq4?&R>19RH3o3`~NswT>&Q@Hv-K2+m z2y0Lwv45b7wmk_FxRRYtysg|r-2PD^@!f7efm%F1@z|pK;DOJ2i=I2KSl#BR9I_`t zg2zA}@w8<)7dJqhN;zW<8?SQ6o&*UVnR!&z_UiPqfnvebZ$pyd+B?Ws-ck}f$OZj`wng{B zxfI`YY_&LW_5+nk^(3(b+cIq#rc!)Uu0nhubG6FW+O7l%j&C@+(Cg1RTH&b0WQ08e zwfL^s`so;l?V%p0;^J81wi5F@kJ}Rej<&>D!j;^DFyBA^7r|Ztw{W)%_EC6@^4}Gg z!4ji|MBc3l9{JsS$2#$7t=q0CV+Plruj@)oJyb6;e>bh2iNu$)O1;&hOI9r5_6hzw zkJhdSFhi96qh{SskNi^?C3s!)chf((3?}A=a@WQZZeQWQ^Vq9fI}_V>wyOI}{>=p0*`?SM|elspjo&B3vz|-KQQ2nO^ID z`SG5+>9u>fZ!|K>&3ZM$!9kg)5fytf+b>NYFWSHlN9) z_rYQ+I@)@{v_gbi>?$)yhHP|9!ow0N{}!d$Q2kvgEz4q%8ep@)_`2sAc?JNB8qLE435x z#jD9Cu}JeB$GaA4agKq%*yz!~WF{10G7w5) zG85{$CSzglisKuKb0o(&C_%!1)-T656z52eagabQlX)@EYWP>x#9gyPvK!eelptZU zJLU}zYe-+=u7_lCvR5cUg7@&Ec*LBY$?*-vIg(=>Bv8v_sZ2|3p8s}H_-c3g8QCk8 zAYn3Lj`kTE4JafE^y(pV$X=lYi4nfj295ZNmvP|M^Q9UXsgd_!@Lqv;>8;Wxz$2dr!7W;BshJIEY-%y+*ImST=66~2V zq2FMu-~j7yI$`w@vR5cUg2pXHXnQs6ok7;s`zOjj$zGuZ2|OK~&J3gYhT5Ar&)(QJ& z^scJ>cS#mXkiaoGO8haoxQ{?Bv@ublY32u6C_%zMp1U<4GvvIFKrOVD(K3F|-jRh8 zByiM@5_fF>%txRWj$KjWxi?>~ixMRKVxqpLBjM+ z^DWgSrXN3A8wu1h?bu{%U1DjA%kd~d!o2esBJTNfJ_5B&+cjUX+%ith>y(8OBusmY zA?E(kH320^m{w`>)NUDFuO0FcsAXEd$)3AJ<%YBBp~UnqmB+`jqpiJEu@>u1PgPo` z2dnF1{i4MB_a?cPLDdimVf*If3*VEv^x|AXy+JoKVQTI@ zH#h&_y>stbxsqED(JZI03?$h9Wv`g&Xl4TLO1(i>!dfwevh%*XQuRQBy>gSeb;w#9 zk1VzfS7Po22{Rh}FM>w_lXX^QXxn}(!i)y2g@npR`|6=;?!GC$E9zZtZjsxSX(eVB zlJZ{pudK;lDtWIwS4sJ}O!D8ghs=3C%J)gm^`Qg_|4HMV=c9a| z^Zb0_A5LV-vGjcqB}m{&>XFd>-LK~!yO1d}>H8oOsKsrV%g~mA^L&)=lbq{A2@>41 zn9!Et>a~I4uQDdegY4XpceN4ESjMu3&}!bddSW6eGnx`aMq5pY$kKpsl@HF z)}P(w7WzJj1Zt5tz;kO&yW%__<@;2w4<$&DH^4J~P21%>ALaX0t`8+haGsAcc_!D# zc|OYbsazits71a6&qFqSkn?<$?^C%xlpsMK2hWZ+mVxtpYmOzWTpvo1pbVQsnLHCa zn;F>3`XGT?k-^uNCveG3v0dW(=DTG3!P-oD(Y7L54BN{-oF_qo zpDaHWZ5jCb{BQh^8|xr}TKx9-U1`hU5>sdH%;L7g@16TIzW-YjG{YtfB}iafh}Qg2 zkGrywKrOu6D3MU6E&Fg@&5^kE)4f=s9s8o*b4LPOPqd7-wVL_})WX&iB|fd%lcQ~~ zui0IxqI!F`-%+9ta1V|C7hJ_5DSVnoZRQ+g}sGCaG41luZ>X`ASl zalUVDAAwqE`J!b|1i20+NU#NEnHNE-mdIHgBv8x!yB7~Cf+9$d0G~l!*QNbLmwwN4 zG~rnQ?2p(MBB5HjW$LkSQo80sp6R5lKKDT!chQ%iFF*gx<=Iqn?x=k{)5-HPkiapR z_IepoO%X%e-tQw&3rAG;efYl_a?vj`3r9}+9poBwfAkZVI!xeMda`=6{MjgpArx); zxglQey1Wh&^xMMx$F=1GvCr=ZHy_KKcv zHauOAKrMRGiqQTQKPV<>?jS!aCE@3T&y780zAN3MkU&Wck#{q(|FQ3~kU)=veQPFk zk8;ak0wpnoD#K6Y7Fw8%1o|k{KU5jUFEKS|0wpnoD#K6Yf8h-GDDPTszhf=;@4831 z^+19pF@!2Z6Etf;y+N8ez_Sfi8T_WGcd6gpyRa>IwnT&z^3^jvRYmls2djwQ^jh0D z4vIInG;}Ssk|1HMb~K`ATOtPj_jVtFT4rR3W>^%FFyNkeW7}1J$5?6=58j$s+G0XH zN{}$4T}&C2NnMKsYTdf#*H_igLJ1OPe2jTkl}FA>KnW7Y8;E&UliKF5hXiUFk3+qi z=o>6@V+rTT*k@4B2cMf6eWIB=^>jPef1mSkJl-h@5@vjiX6_WR;Ygm3KrJ)+m`K=+ zYU23N(VS1@@lHvQFyo`jmqzPR{=ExzkU%Z&RtF|BcY>l9c01ms&eGKB{?JSZR70O zS;i5km)Z|33Oc`hTFHrvd&R~*w`o7J{%vKw1vO-kMN5)Vg2d#*0jJXBJDkfUme4$a zHQR!6YkIuQ&7Pl%1Zp+^E7>{m-CfSSw8ccsJs{+{arNXqbKmz7sD-<9M?$Sn2IP*7 z4dkIC_ot!+iOR1ua}E@VcfQKEfXY}ftCe)dHWY;~LAn z*{$TnZ%6Gw315>bzAj!V66(9Qfvj*YAa8p$=({TnS4LPA_Te)XuZZZ<6HTXy+=ff9*>itJT ze=VyjFBRz^Z|}Y>2PM6?mvK%uZsQ#IVX2K*iiA2|s31p<&X5P^t?}Iz5_ZR8&YyYh zo&P?%jNZrNf0mIw(>u#v^VS6LTB!9-RzaskgA8ZzQ_G2Xuw@Ck>|(k+|6~^nCAjZ7 zUMUiKxj=?|Vtfht>(+D&cdlQ0M~3rLZb7I0tmQGS{Y9&@tXit99C~7P0NXAS8*|z_ zJ$n{&K7EMmQF~y9TyU*|EU_rt*LG10ds-xvFRNI1<+7>rwb>hTDvd0kx4+R?=Y{W= z+M#+E_WayxoO%bi+ju0@(ms~kcJ4H}_tkfjQG&#XWxMy(`0qIL&MyvKocmzA>2h=5 z1*u4&R#Mv1J@2QDH!ZPC$!WPyj+ib#UcTQ)pcd|E9tlmjCq1`S_H_BtZLL#Lf<)0i zRrWMIG~V<l^@5((5AeC-c9C1Zl=gD2OnOnsh+zTcMGjuIl$-0R|% zBB3Si(*yPCt|kt;&v#cy&>f~lYu;w;v_K`Q`N0qN`v}y+UDG3>YK<2M-lM1c!jy%n zC_#drbXxR%gc=+RyhLws+r_QPNTAm8>KD?gCh7N2ds7`KW{r4ts(j$|rW}+!Kcal_ zd1^7-2k}ag(26y+tl95Qk*620_1zT`e@<>1?EBql(+Ag7Yijjto+;;FT@%1-p;job zOK?H^QKk>hKa^zcX)r-H{=AEY65R0}uM`Qj%Sj3^qrC^K?CWCTKK&oQ*Cns{1EZXc z)VHyPQm-x6H2e(RRkbE-11LeF{P?DM$EhAG`f?wfzo=IDD%E_<59@qw7qzgb(dk^P ztBO+vI;cK4YV)8J30;qXSWi?c;uu$h!VP{%_AF+gLoy{H|i{%wwc{P^+A*%LAAE& zE{*kh?zj+fee0<{hy-fke(^MGaz#*lGc;cHL6jgt@51I$PLG2HdbJhLEvupWAQGrW zW1!6=9{WL*H7L?S6kkzQ^+A--7-;jz#(fa46bWUH%n;jusi68GN|2y2(B@H2kAqbo z=q#%IT}JgmBv6aSK$}NAJq~U;pDx~RRYLVal;H08cqN*j7$_ksoJ&`I5KkzWv!S4U zXT1#72eG};SrAvth`yoDst=+Bi8blP?AA1%>poce_X^^Z;TftAB7s`i(;}hQmrWH1 z*A)w&>$D+<#z32UD360Q2CA`;-vRD0PhU*tP7~*8EPV1o8V6xddnCx_+C0kXciy?( zbn*M+i*rwQqHz!i)S{8V<}piqWb;N$7sCoo%RRD!#z7!Z3wN=nZxh+m#p{XbxpOPF zPDKe4G*;O>%4uJe#z67?@|C;a$!J6;tVDOjM=k#Q;XnVlSNozg28tc)R;J$mWkWi# zASy9L8ebQ$M3#Z>YG}vwfb4dk@2-$AcbKMYUYKe=YR0s{XNULu2-L#8_35N>db+nI zEDofvUzmy#B+Qde)9+&;y}?F_#{#|ovn?43)GC%V&hAhbfdAbC-p(RQY19_-6>-3>RMLO66<_-g~SWrjkbqRY8o7;e9^_tGDVGAO|2OX z*9P!fsP%OFQTB(jOK^koMMa|tqU_-$tN5BO7E16W0lZQqG_BzT@$Al|@UQ#3Sa>!A zje$0gY-${Y_7({})jU&NE!s5PpaqSCAVGr0K$}N7-3PBz&8yH@Snf9(2Z2B>>}iqE z;kXW>M0QoNtcQvRZHfnN6A#)H4^o?Y#m1fT$?NOiR#aY4L%i5&Nis?(9`p!fQEmOs z&uj~dk?HXw_0{>QNT8On+O}?qO%4drc6>c?=hN@|2-L#8^&_D=Cj;V}jSa->XYNl$ z2@+=Xv2`DOZdNPt0gZ(}CrxRE1ZtVl&enZ!=D5aUKaGVy-S-5?gFHeii5WT7b@56x zMy+iinw<-XjAw$ryF$W@)TZW*x>2pa7ox|O{XPP?`tAye4IPWwhiNPvtNP&9OJ&5*>77NN+%*Bb7Ha*RRnT_o zXV|(A#wC>y6Mjz@A3oT{LJ6K$fLEdneSr)ycYF!)+71;DdM8m(bZ1jMXmdP>Ei@7u zV|5lyOO+K}PpuB11PO}tY>EeM9S@crm?5&RRS+*O&-S%l)S`IM>uI#6dQmZ}%krsW zH05Nr4k#Z?zGtlJgNNH+NE=3DA@@N%*EAA(?xkaa#5vPMH_FMN1PK#an0J1;?@NKb zZKsRfl#@XMwM_J4T4KVQDS?`!ri(_DlR*Nt@FdhoX#A;;fqpdV9HyKMN{}#7nCXKp z=B!LzL?hc$%E=&sS|6tWPG{RpFn#dG?v=ZrCE^{*$)JSCRvy{7Z{wBd{MuL2bMw(% zaZUy$Nbu;-W0tl)+d53oz3*}I0w^bg1Zv^=tkl|1EXqx8KV8hCoD52kV4s9XIc*c) zs&p*(D2;_*QBDR4)S{?w553QmNAaMoebGLNO~VIh)M-sQ86;5a{=6=E6c5_k7cCu03SXhI zFpqLFD4}@JJF^zAMA?sFN!EjdCkW2T;EBMqAL%tREOe0n|)D=?i`8- z)jJ@s7EX$#cu-P2D7I5h1|>*v?7^d)e&-Yq$|`RJ#YW1>Ac0yO5%HL%TO!4SlHx(Z zIT<8Si=rhsTbANMN%5fgm~t{GL4sp89_92nNb#Vgcu@RyuZjn~YoQj6betD8eURco zN%5fg=eUXoJqcZt^R4Q-Shk7>CB=hc0_9{-f&|r?Gs33k6c0*@2LYWKe!UO|43Z z!jzLi37*4?SE8NL10`g|bLk>Kd&}Ls0+Z*jkezlBzG}KuXpqva! zkf2D4~qX#P6i3o!k(t$L3wChG3$(u2c3!|%Lh$7=uoCp#RKHk!Wp>~ z56YkHV}ap19`q!fMRf_XxekwVx(`x3DF1kTao}4W4|)V@(MaI%n5Fw5#e;Hip=kjV z4|)V@(fH(@m#gAIxhgR|Fi*#Wo&*URs~jHXbRXoLXk_`y)NwsjJm?Xq#bu1`^ZO<* z9+W%Rt=ye=T*ZT)#1JaZ^xqZ5gK}8M^xPaB55|_E2y=%f5~g@ij-4?r_n?jkJp#2T z#(~p;DISys5*Fw7(D9%rLBc#~6Q5H&C>tgo%U!PHL61PKVw4l@QS-tcFCLU7wiFAu z*72Yxq3F)(LSvzc2fZt)cu+pQx>mS?jt4yn5-(CN@v%uw^RzEY@t~|#t7&+&jt4yg zwJ7Ry-j`kSv@aTLG(lE4oD@E{TE&B&1WzFLKdb4K6Qy|2`kLZFJO!F_q9451C1~P7 zuf0V=b0{ZzrD#*Dyp9Jw2@;$WEjzwx(2EBp#e-I19S?d0YVp?xx2L&tKjLHxZS@5KdfTO+%de_xAZcUc9W%S3`n+ds?P! z+L?m$aPx~+E8!9-i6Qt74e{I6>Ln&wS5+BEn7dM1?)i$kjHHu=i@O9$VhBDr+?3(g zo1YaD^xM-iXYJjBKuHY2=bsy5#C=UlOqf#?Px@05qCGA5yh~k=OLeCgcL|il5DuTu zdGi}Yf_{5i?x~?S6DWxx96o0>N?dE7pz47H{r0rn(@!;#(Q{@Amq1Aj;qZB>nz(e} zr4kFy+NumB=(nfkp1i7wGwn7PcL|il5DuUF8m))h+L55&o|ZY)?iK_}Vu(CGzxL)b zkf7h5mV0*Y%>+tfh&(>I_kR;ads^;!!Z#Bri6Qd%1mgcq5bbHX=OW)spd^M+CoF5C zWYOE^yS>)*)F4YtpI5(4?^3^MbUs->tUe;`4>JuTC_Y_H}sTmmIA zggPP9BTVJJQ`yzEi1xHh?}9Si9)*$^LY=9p%eZu)!gFq~MS^~NTBdiIGN_-=cL|il z5b8|Lo9lrD{r0p>@47`jP!dD%In_Wk>;0;FA4t$|Ps{W!L$v*D^m8tOk{E)|$c~ob zy$>Ylx2I)#SCp9Hz7Led5PUj#l<>+xf_{5i)SJ22MhUMBl*ABx%DEx#yn5%0acP~^ zjKjFN-cIRhjqZE)-d1nbxiik0GvWUGYW~%p&d=@boSpuzUF7!2o5qs?!j&LFzipR} zgvy)+(;B}dB?=}(=1?#*JB|fn~xRGE#)A$wgjMQ&u z><=etM@B5;o!46f5tSgpSE2uR;;i@v^wskHnOG0+u2749J5eI0=16#2{-+yrSC0Ek d@Vb0`{(r7hBs8eNlXzDifm-IbdLog~{{sVW4VVA` literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_2_shoulder.stl b/act/assets/vx300s_2_shoulder.stl new file mode 100644 index 0000000000000000000000000000000000000000..dc22aa7e51355a0a8d0439f85be356c6aa9e1c3c GIT binary patch literal 63884 zcmb823Ak0$|NoB$GBrq1q#U~En9?mt(mnTH;z-F%5kiGbsnDE|h!BO$L-*WD>Xrr-h03IIXBPq_dGp5&whVipZ6ZuUVERt);;}GYAKN)X3l*Oeai>;759>G-w1J0|zKW#mi{swjcD_qtoF9kKA= z&dJ9HG%Tx)U>3Y@YPYJ&5kLNWZ1VHXUt2;It-I{@{`HB;hj%+60}oY{ z7_)eA>93Xdc;b&k(#d7(?y!U^O1$>r#ii%oU+IZ<8`dZ8zxJ|nc&MVpTPxd@PG~gQ z6J45iNxr>pj`dJNvqr7zQ+nES-+SVucMnR=&%R~}Rg{?gYOm5u&fdAk?W_BKYnB}R z^~aV_MTzArQ>6o@?(2zq3$`UHC$6`IDoWhFbNkXx{n~k=?m?>)y?6i05~?UM?TP(L zd)-;&i6c7CN-VneQ%k6##I=>Xmj3-$Pfr{-`liG$&#tzFDoRXi`Af;NiG4gV^Wg4@ z=>y%qQbmbZM=vXx)#wsW?EBkpiDy>7WIa?-qW8`Zl#Cg3g(vRW_vM0-Z@VL?iW1+R zaYo6$ORn|AS1+AiaNPPwtcNN}oYVQI;`^6e=ZU|UXWGuc=x$4>qQsQWm8tWG-{6Tx zb5>?nJpO~dYE@Cf#5LiW`0?eZwXLk*Ac=D~hX}HcxpPX@>Fcle9 zt^4k*w$*Fu4fjMU+bHqPBXLWpqQou5H&)ktb+soZoH-zI+OYPPP(_Ij*FRf**5pe) z(O~w>#JdGWmQY2BPj_2eeZcM)c;bhZUnX8`aFQidQR11EJJftU?sQKax4da`M)R{Q zp^6f7&TCe)=eF*iXm@1S@(*`PwbQ!m~2&kp(RvNqI3MHntRII zdg7G!wtMWqC|PU6KeKr)6^4>ANFYS`O@CbLlh+j?RrMdexv{Ot&Pc9p^6fweXLbjJ!;#Rvy;P)IM?0>RZ#*l@1p7WLH}2t{Ak2icVRnn31(q= zdTujw&&9p=#hK41$IaW_whXFhU99c74eOOSV#2U&a_mpjETM`L*durK>+gx~_f{v5 zZuqSwR8azZee*GodZK=_CCOcrEy{U3s3?JR;qBwz^u*h9`X#^lV74VxQ37Yo*k8wc z;@a3V$@+I3kw<72&d`?qHhW^|vKh(gZ}zZ+DoWt`c(`%HQn#;ST_+|ldiZQhsGY=Uhvuq6Dty2Olc*#LI_vPQJ5mALk*85@<1|{!;9TTc$TozInr` zmQY0rv`S}RbFwFf{=6>nbLMzUsG&2cs_b#BCQ} zm?$Yb)Do&Fffl^@{mVV^^32^5NA26%5~?VHyTqAWuJ**ZId>Lx{-}v1R8azVqE)M| z^+eAvd$s+%d3{T$q6F@C>jn(<#G4J?%2buCvOP;xlrV8=cqSISGp}vCp7-09A%_UE zaMx}&cZm0RcK3@44j(ep5~^rj^bBS^b0vu;%L?i@c+L{4D1qL`-&YRu#MlK*6T7@r zV+mE1Ko2JoyTB8F&p0`8@}_q!p^6gdMU`HEh9_pfc3Yy|d!JfD6(!J98@2HSPi#A- zBJucc-&sNxCD0pOF{Xy{DX#Jmi(BcCD1qEIi3n8tfdzPvwq0fotetyTeRIjpYm&VM0i$6H9v~}+a zbB8Q04r3?<{t2?!+|`s&rFJ3~3y5y0q(m4y?51Q@lHi> zP$G)nVY<)SHEwrT0vRN~lsh0grR9JTJBP;dRXOf3#Le$6{ALyK`ym zGMw$$S4yZ-I{}YrqfSbdoV}0rP$GoWMDoM;;?LCxGrFH@yQ=aXdI^@(2)VFDe^D~{n_s+#5~|crz~h=;`=`3UdA#*dB8+3P2kxjYnSVA~AM7h7RH>bSM_GA; z)L%E8V?C4z<5+CuiI10zT7$k1Jd{wSb^;y~3fC3Yv>IeRlnCQkZ0;Q+N zs8TxtkIVjis_3tquC*RYgmEl3qyFI~10QVVJ(N(Tb^;!aHl9}W?1YilLy0hs#rA2s ztayCc{@z0gRca^T@z~l$-CF-M)_N!r#%N|Kh8*qU4P(qd333xnEy=lo&UyZXK zN`!GNcI%qKRsG+Mdk-a4shxnwtoD0VE-f5mJ(LLJSZt418&v=OU2E^5getWY@c3r) z#LDwFU1vR%2xHS*FF&LD{g+yL4<%Hooq)%PE2=6db-mJhC=tf7Sj*R^R8Ky$srOJq zmD&k-+}{3=%7fcqXg!n&<5=v4qbsTxd}!K5dwnROO6>$Z+7>m-9=NTy^-v;=O}pFv zz3O)=|Mu;b5~|crz+?W3L$gn>KgN0}5yr9Dg-8BeT~oN;dnlnw?F2kZHP^ESP9`{sT zobA2hdwZ`{B8+3PoqpJ>re-;wYtbeup-SxpJeK@ zJ(N(Tb^;z_KfEEk^_FX_hZ12Ni+$SvfSMiVd|2$ZLkU%CC*V=%h_Trtx@@w2I3>b3 z7CV1Y+nT#y_}&~d>?tBP!gC9v*PYA57)RKi1vFm}iM-(7oFM~;Uja15x_PQb&TC3!?hyK{H> zhTFY|5;&Kr)K0*|pY7H|i7w&_+?Ioq&gLMXiStVeH!W@2{#`29??gc=)@T^-v;=-Mx1A0egE7C2;SfQab?;e|NSXN`$fN zGo1BCYww{1?%`BwC*a|G8P-FIFm`>PFRli=w8E<-a`rW4XM;lz{B@m;bHr=BtqKtsdsq3z@B&;Ms{v?F2mh(_J1B((akF!yx>M3=bvn3`nJR0v`S;)p{rq z#_rkq>iU2BHc<&Y%TlSGfQNtDwjN4^v3mz_@XAlUhZ1-ur&2os5C7J{dMFXb?p;RR zN%#%``$`GCTcA=q0T2K7!+IzY#_m1J*|Q$@9!lUH36QpmytATGI{^>>R?T`S5ysK?j8Ovb-uMQx zb^;#QNAADz*^tm4AXL7y^u%})s%Rfh`;1YKT*CEn^*v**hrUD9y6DfjUU+TW;ofp; zUtQl>=6YcNxVM>6Li_O1?NCCqa5UXJ%Ulmtlt6#ZJr(6{hZ35lZwl>N1;;}bCD5O9 zPj|T}4Eq6GSL?(IjehZ35lZwjNwToonIpL1_yN@#Cj`-Eoc^JkP$MG5WIY@g69eg2FRswknov+Wa_rO%&HLKP*nm$`jHv-J5h zN~ofQ_LjF#XqG;IMhR7v&|d%c3C+^y&nTga68iMAeL}PJ`7=tWqJ%z$ZJ*FAeg2FR zswkmPlj-;k3&*93zwX}cpoh1$xZ2d=hil2=RpEn&Mef@EXK}b={~(vdyigDVD*nOo6W4j&x`zz-`0{eR=epjb1_`FF%6~?H`FVdU`3G6Fop_XPbb{^AioLJPn&h>U* zslpg_`JJ5eAaQN=fSJrfEzM%=Jl-5|Xwl?zMyKj`II}<%#;D7$_nZd_oL9_3EzM%= zJQkdDU$>7>AD3!1^wxwbj8T{06gm$Q*jLO#EzM%=Ja+r-)g_;9bl(A}!Web=ZKU%c z@yw{Q46{&6vlu&%Uq1PL=^^p4sW(5qFQW=$)a4hO&V$4*2h1#B7HVl0W9KpH_wy=u z-8v$*WACO3RT!f#zaw=XB(SfTg<6`$*m;cX{AlHn8;08RN)^Va%dcCV2Z`f$exra{ zsHItqoyW~1R#t9UaCxfksSOgUFh*T|bL%`v9Jlj{1yVmWv%X9f#;D70xt#|I+|QYXTAIb! zc`SY4gzPiD+N9n+Xa9^UjO}*#Z_k|v2^tftGc(C{I?1({AZ68h*#;D7&4$gzbwbdJ* zW)^B`7GvkpwCkwsXG@MN`snM^3RGc?x*UPwJV; zjJg~b;yg&4`S$7pW}%j5F?JsK0;1We&y+ng`{0BsjOl@qFV4dg1%}8liyq9v@^Gw- zU#q209{6FIDvVJVYm4zXk?pY0QOtseW-)f#;rA|nHyE!9W7Nf7$0#A^LBh1VWz0e? z&0_34{8>`DcG!0Zs=^p`arR*BlJg+JW6m*3n#I_8__N(^hboLw7gqtsU^x#G>n?AQ zU>0g=7Gvk(uiDD^o4pdMFh*Tm%@`TxJV==SRROb5OS2d|58wJ^Up;rHger_t7p)S; zt2qx6*jLO#EzM%=JbWvf#W7cfG3ugq#V9)GLE_)dtrE;aEzM%=JpA1t`_lcFCRAZ; zx5JF=!Pq|MK?2*sEV~_&h5Jf48qnX>vh`aZP@oE9)WzKnBMzMh33xCI9-772ZHK=* z+kK@9W7NgH9pfOKhY&Glp_XPbb{@W$VLen~jJoK3V05PQAc4LQvrtR37&{N&TgvYE z!$-F7!;zlMLS6KtFlN+wkTBOrhFPekS&W^B@AYM$+Ptbv6~?HG-XKP@Iu8;!9?U{5 z&0_34eDBrnD^(byE_$^XU+X+b{B(HJ46{&6vlu%M-wV$^KK1D`RT!f#dgmDR>pV!j zd&~~D7tYbWn#I_8_@{~Nu=C>yRT!f#o-#0c*?Ew_zG4cdEH=RpGZ4`!j3W-)di{wXzk*z=dS zRfRF?;^`IR&z%PeY&o+~OS2d|5C61n_mwJ)Q5R3)7!~h4NSL<0EwfNdvlu%M|DGZH zV#9R>sxU@fyiLGZedj@9;JxSDw+0-OuUU+phkyH#y}IeG2~`-QF5Ze@1_9?m0{e4a2_Pi9dNdN8^u`_G>ftG@Nddx0>xP6Xj8PYFaWU_Q^B{qJ#Vpj)EXK~mzja0*E~W}&)Wth%%tqoownu0dW9Q-D zo?8!97^5zK1#liDep}u=!z|R&EXK}*zcd`S>BTZt7^5zKb#NZj<0}h?zF&Pfo)N-t zSi;{}kQIIngWp(Iu3T9-ZQP@lP(^>I;BO($!xQu1p@e2d*J@GM6@{tGu51rN>*^X} zoA}Fy`D*g}*9*s9eYYi4Q6l^r2HP>~uGb2`&o0X&Gz)tjqch!h>~QESg~@?GTS65j z!mnZAanpUi(^JL-p&#$+HDoTXk&%opO_Z}?l(tT1Mp;>4vF&5c*tiNn(VZi}YETM`L z_zgN7Ke)pmlM5dkH$9KgEdHM0JbKL>U)X-~qn1!biSYXwY{%NZBMQeK^i&?9S-8Vt z+_v+$;)=e7YyN%45~?T>em?_`r9I9md~u&=^9aqtJs6|OoyW^>cPiZPvlrZ2iJ}Dh z4;ZoTh;vsRUf5>xir(!K@L^qf`4K^Rt`Zu-l=E65&@* z@VN8hiuu`RUe6;m3(r@W%ffj~Z@XsxzH63QLKP*#ueadwK>dH`w;B3M9-&!yPQ(lw z&SUEM78MtzmRdp;CBm=J;4$_84yd^D$R&A%X5o1k^L{vwK2LS8=$l$-300H`zovu7 znD_}5Qyb|v4kp0gx__-e(?>D ze@|Fh@xt}{r<5A+jNQ6l_e6CNO@eYEp~wGkZQ$oKHh!>`qR z5`6Qob@^63vgMXgMTzjMP}#eAgl6fxcQfM99YIT|qD1&rC_MaGl1FG3->SRq@Mogg z!{f86C=q@Y3J-s_=MkF4x9ZMg2X{qTLKP)&b#Z*8`S!zIwRwbQ@vXY^@GXNSR8fNO z$Q|KZA4{mBMED&kw%oUHd4y*19l7)Ht*9kbQ6l_~6dt~%&LcF7@5r5pzZ_KSAaZnx${K&DcKolxhi8l)w`!M{TC#{z*8G&@8^ccH7~fwk@HG z68g?M9rsW4mQY2B@XK{2+e^X=$Rg?%vM8Lzpjmjf5i*KXdcKA15mQY0r zy#3;+&2-$qRm&qZi*KWyhktWt300KPH_++0e>-RiRg?(Fv|!8q8^}CDv-ob=t(AYv zY5P~IC=rg$fro#Snn!4szJWIP5BIj#5~?T>jxmCVeN>8oAUd+88d&LoddV5tWF7Iykj@B{rUNqVw$S>;Q`vsVHvZ2oL@F1M(ufIKyXb+gtU50*l|iga63@aF|)jtIU*Gh@c zf9+j;{?A8ykH;ShG;KA)s0&6(>>Qg+CZ1YIj$(J|}Y#|9|>0WRiegAA#en!d0 zj!@1%`hpU!m7Ax3oLwuftt0-o=IM&byH_s+K?SiA)2=_Ws^zC+{95(9`P)pt<-HQQ z1hcSw?U1Ea``ySR_}Zp=<@g7M#~JZQ3%ZtYUvX{ST6OsTn)1H4d}#L-m0V(MDUaJc4hZ^=RhA6ZX&Ba%Sl^IgggJ_AYOC`4x7{;el%qS)3^wG1t}+^|N~# zVq-vXJCulCA0X;oP+2QM+8zJB{Jwg*?6~sm;M*)gg=<9u$N!u7-kw<0qHg(Jr@d+k zD!ByClJ#Tv@x;?(H)K|{e9sb8a*09x>zS*zjVJo-oG!ogwI?k>C71Z8;=tL!Ugy)AH7 z9~#x6{MSM5EK!14@N;X$vwi+szm@fCw$OS|L6){9tmMv;xl0SnFB$xqCG-f67+0?t zM-6oJyqa_4Lz!E4yU7w%kfp@RE%z0NNARg$$ZD+2=C#IuC0k|U1J9#-C_^5nesN(t<@uoZ28!Xf30 zPdPu2U>5vvY{HiM^)*iy-s;~Xj=KWs}#6!o8Ox136C zgu4&69yb{?8)sXO+qFrUW&1n9-$x0Tqt9^2a7WA--DjpMCRW17xZ|u{=ZLkvr*DTC zY5OJauV8>MYh~vOfBpLYZQV( z_4=F#`&ZkxZ7ae*95o&((0_k{dHQb*t`E0&aXgg3ISNII&>LG0qHySzGSeSTL8;gJ zI@8CwF+#vQY)e4wfBEMb5UQw$u5CKrV5b8!|6Kan6m#6I2iHBvuawIj@xtP_?(#&S zX4i^{P^+z!?GqY`yOr3(x+b}MAv~}A@dyYi zN+8B{!x2xaXk9E1dwu`x_jrV$q6A_tPsjiFaNmsiVIsR`VL!VaR5S}7 zyo;ve2VV0>grK4XVlGd|7oJ#^Y1{LV?9!ql>p?}c;KBQDI=<_i#u0*w5{S9nyp8I) zN16G;EsHU>T1&IAoOkVX{FtIK86~JFftbtF@#PaPDQorFW!X1U@7k7uie|xsy@7Q6 z;LZ!$DnUgF#9W?^cR%;fg7t$&X5aee4C_Hfv*5vJk#xM_&^i%Ru4nnI`O7CtP*DOgm#5=@y}nH zYqx`nX2HX?$9RtVZtTGkf{GG|x!n9VF@1j!xEojxDw+ik_Q*{uy5_Tl5>%8x%;o8L zy6#5_v#*NG_*d&eMYG_+KD>F$`Q*OXR{=pq3B+8Uju-8_JC0z=wC&b|ie|xsPXK1t z(_QhGRYHP_5{S9n{1VspT%3t1)3#d=Dw+ikJ|~z~bmE2xK}89~T%L}9T|F096y{H` z9#k|79(=M$#|z90I3=hkftbtF@pkV#T436elxf>-TS7&%;KApTbo}v62cazq2r5b- z=JIs>%6EThYudz=Y1^#_70rSNpLf#nuXg*PObIGVAm(!0QfJVA8@+h=U0O@CxZJ%j zF?Rz?n6|w@6(txuVt(SGj9;sw+6ZJ}Ikpe)nbPs9&&NdwDq0sYmz&?!-YU!Zy=y(F zXcj!&al-TYD~*4P5LA>v%;o8LpDp#u{aKRITAGFBIN#h`gV|rN%P2ub3B+7(evkU@ zmyEwYtOpg%f(Nb}Jh7(Z*TwcOSAvQXh`HQ+Q#8D-c`CA3t@WUyS@6L1@1AS-d3g5- zK}89~TyEyxd8>Z8Z+)x>70rSN+A;SeT#%_7A*d*Un9I%GdCkv3FI;PB7M8pAc%0c+ z<8C=3`^6jgFE#%+Yjt!@&$myjdTq}!r66!;z?xt=h>jh{)JGFSn<#@B5OHk3ehX!0* zt%nlKg2&R#Q6+s=_4OV~P|>=G*^asGc;t@4RGlr&t%nlKg2%m6N=lmc#cvZ>DxRx43!<##$_Nm*$yeSH6X%?6NG!s325~Ch}J|Lhd!PxcmWvwc;mS$l&wl8y7 z*XVXwf{GG|u@~I40X+QP%_Eov4;-g~1JTpZC8%g!#O`R4@MnqjP=Zt^@+_V*r2P*DOgt}gd%01toFS`Q_d1rJ>Re|Fi| zdniFg>mo)g;hqiP;oB?gp#-zwfp)CRkN9mOm!P6`5u>$pzh;0iZ97^~>!Ad*;DI*y zt(EBM=Mq%3E@Ibe3xWHG^-zLY@W6c~+zx-ou>=*ZihQc$f{NBf%;!Y69p+4|G;O=}P=ZQG$xrMU1o3JsZHopY18FrCC^x z>n7}9DM3XE#JIZLvjIH(Rck$zU=}=Z{fGTTC8%g!#AqelvjIGO>tj8XU=}>s*LHhX z2`X9_F?;0hm$>QwHqZ3lxXhl3Bv2RaF8b<>V`c=JBbL8@SwOhf`GO#e1bfbo=yCY; zvavDzYU%2t7Gh>0K6&n#)bt=2Nh%~!Je}to<6Eq!6l1h<~PW&7Gh>0X3yCX&&C_h zTs&q?o(Ho)JbvlL#TWj1reCYw*WHpCFrm~ERJ1N)_Jf^A@~B_SdbPtB5MeFNLd;&R zBMR@So5@rx&m))x;^B1@i@*A=w_mGSdu=UiwE|ykg^Jci%zm)*c(3lJvbU%1XZICr zX%^z9M@}y8(C0+&apV_;nJ>=7@8_YCOTfdm)Hs4SrMhQ6=+eM?uoekqA!e`EtyTB2 zGs>pT!tb?F4`yL`i@Qe`pY}(oU#o+c6_-u=yQ?LrXkEnYqdJerKL5F_`Kwd&2xfs8 zx~a7I)M4GcNA-K>XTJQ_Z3h*tiUo*~T7e0Y~@BOlbLB9>FXSled-@ebBwL z_t;QzYvzhO@U20pXkEnYaXOFvc5acGc;w4@1hYV}|L6$wPGsi?2xfs`Z_p7p_gtP?-L-oj!7LE7-zcsM zk4N*}=Vo49agHUZXkEnY4LXlD113E^`1fsj1hYV#u;sR@@Ju}Nsa*=D_Ni~r5-M63 zF?)l~W75U9WR6>1Vm*{#76|t395L|G7Ma)o_%V-Q7KoP1%BsR^@V-owO!Lp&-ld{- z5wmaSJkGxE)w2653hZ_$!7LE_FIZR=wmzLp&&ce5$XdHrRJ1N)_6D8D(%F53_DTt6 zk#M~}v{(0RJ3e#es(bT1Kp+dm>~rQ-h3#(Nkv%f=H|=D%gNoKg%$}NCtHsxRTsG?8 zd#r~N%mQ(K`x#Z?9p|Dy7M2~}{~$|H(YlD)w{sp#{=7Bw{T6p#DZwld?5R28&;^fX zhMl{M-3}#~1>(F0S5}30=fie8Dsyh-3wEujXkEnYcR7#KmJKfJ)8mdjf>|Kgi*m#b zCwI=AbLAs>1hYU~eR1QeuwU}foI5kuCEZ$4(YlD)i*gxGQ!jzi{4fMTKqq+wGvDS@2-LD;+;{@$`%mRFpu>%th!8fGt zxNuaZ*3v92XAj5xD${3UgrK4XVlKx>y_IE?PdO{KqTX)iO;A`%v#^}K5A&C^j%;6` z1QjI^bGi8|W6S<2*tJ`$RIBQ35fSo8O8a=$xpNDl6J^ z|7)!W70rT2*pr!e`_FprTpu2>T_u1QjI^bGaF#c=v5s zt4eMM70rSN`x53C-ktA=5LA>v%;n}gnQc=N=6GazJg8_EJlHcx$4j2NB|=b90x_4T zXIl^f{GG|xjY@edghL}8&vZCK}ECR!8>X? z-Z!0z5LA>v%;o9$&Z(ob4gURY>Hqz4sQKFP*qTS~Y*O{wF8$2cpJ81r$33xQ$Mek- z%d@X`%%=bOWvThUC8#KYn0L{D=vEuSED*dunsHt&4$Iy$X<_ACpSMkE-P|?g9x+>f zTPmAeJF7CYcT-DHQ3CPLo35-nc2OU*9S=WTkZrrIVYc|Gx_Jb%;LRhOj{i_~Y_@;( z-<8KUf8X8*sc2oqla76`YQ}?S7>`C{+hi}Bd1Q7_*T?e+X2FAJrTNOD`3c!>NAHzA z{gCr4K}G8#E}Af>>gF>~HXi@{)G|A*#Yx#QPfoOVP9>NH4_+(jc$@hrX1C31m2LQH zy>eBwF5}U&PcR-0S~kxf*1%9toMi8utfg6~#1_Nc2Qw#SPpfH??bPWEOHfe)G4{g1 zfhES{+k^MWp1kSs?2+GY$Rn5q54L>fZx-*-E8Bhf50xvnEwcm_t&12(b7_xbj7Rs< zM%lejsgvF9%7ne+D8Vdvumw-YhyL9wd+iPnR5p0Jt|h2wUBtL)y>QK}G8##=G>l|`pDzY6#+zus}1rPQNf_+uQeMLpc@Cf?4oj z4<|V0MLgzIv@YV!Q~H}RQ*vIVcwQ;NEOn(-c%g9K2p3slwcM- z!oH}#229J$U@yv|umvrsAQQ~S0g#kPcs5{R)EBJEX*?UfSDf=Bpl z;M>F^wuw}7V;K3)EplvT=+fGI6BF0&XdslG(DB}G?31-0~yyN)$ zU^mHeGzM_&#?A!2!s`p;y`}V)L^eedPmbrNZvq19>=RB$h zPbv6r?q`;uf-EH(jy|U<7CXsrdA&XlHR^Ov3%lh?FpD1UI>#|L9%bqQ1zAeK@9?9K z_a1+5`Lyh+TMi36NH7aD?{IEA&aA&*`Lk`dwp9hO64=xI2J#L4cW*W>FaKv#u&>be zfnXM5-r=0bF(ui|+$Y-FwW5+s@P6e8b6zF%yuy7Vm%v#$@m=jptfXL6#DmZrZ6hygoLL zx-rA+LkVWl!`-9Y_0cf%-Q4r-c?ALrvXp?IUgzQUp#-x)^X}ueL$41i+z!-5j6EH; zaC&_x!7LEG`#2B1KB#D2#Jra{Laz@hxdhw!bbQq5lQUo3HzIZGx{HfqvEKF=aNW7( zSkC=szC)fqDMC&k^@6JteVk zK{~b5BdtxJ)NKa|(pb(jKON5{sAyfpT%L|sUjI&_PQhnI8)go)9#k|79=w9hn7D&R zM+hoPAm;LP{IIfx!P_XUrCC_cwkw~Yq6A_tH{XSCc?D}#$?c$`S@2-1osPdVWS688 zRFpu><>~lC*KNS@$ntnl(JXlIUXqR%ul+tkP*DOgmz(F>##0jJyvmw$*Pd5YGz%WQ zC#K^^-F9z;prQm~E>Fi>etuqo_psLo70u#yxVB_ik!Rf zuxC3JtxG@GilV;v$RpTtayxL{M7G=#*bZJ%B;di8@4pFV@p#;^Kb}&tM*awzx6bZJ zLD9O1+4B82!7Nn5*;94^p0?rP&t2<5Me8DF%lF>|v*3ZNAoTFpp!J}lbrG}W3$_DS zZ63iac;Gq@_myu;+!6GO)E1T@Sh0PfulbxOu60 zx^uN~9U$hJk2q|1@ifu3;@-^Pr`9n~6QPnz@SJuY+as6-I@}I;47sOoX2S5ZtOpfj zDZz8vd4QO``jv#%;#%R%w{&nk;DHwIura$^4<4yp0&R>tf+SAfJT{MD7HFP5Zmr-! zf(o*f;5i*0kJE}Du-l=>fOj9#u1)l76+3Lqop!C*CL&9T@Qy=*Yo!FUK(hr8wj)M@ z3bK^I_PM)(pYtQz@3qMZGa|1PGimr4vux~VKM7`vNykY%K6Oe0bA7luIqV!;wGfyi z1%%nVIEv;wcKf++*+-Dbb}?} zPDMR*ZPW1qwjt7oOWMAd(1$yTbRFt5HJA#8RUmDDjV!jwJ7u@fG9imiw{#_PkG;-F{d8n;plB96 z!m*aqm!1?Ms3?J$%gvV@+0%7Qb)aY#JUEKd{03Qid4!;%1Y#~X^WtnATrOk714Xmo z!BLcE?_O|dgrK4XVlFphk-N4omoe^vqFL|=$66k~=FkX1MG3@QZhjNqwS_r?<5Nfc zT7I|I(kw1_V}5EQpeVuE5$oFQ!nLZ@TAGFB97So~Up)|u5LA>v%;n~oA3cjB@2v+F z&4LFu!P*DOgmz!B`{#}OoeK-e1plB96!m*a8zt&@>5>%8x%;o9$rUUoa znJEHAv*3X%%8j+W?%r$4WcG_dQ37#DOnq~40a{Q$Cr6-Y7CbnL(#)Q;h(n^$E=TIjErFt0@Zc!QbbOCf3L^v+B@lCYI{stJ;-t)36DXPm500Wt$6pxP zJwi}X0x_4HuO>V6PMRZ#IkvJ|OS7Z4 zY1=W!mTgO@Xcjy;iZUJFcI9~yf{GG|xjY^3Q2+R(Y1=W!mi3^bS?~zQS`Pf{*a$&I z3B+7(+jesV$7fC3-dJmC7MHuRmZojbBcLe3*bz-Wc$+h36=^Na!g6e%8*4d#!#2l49@c}3X2HW9CycfHsrCC_c zp0gbRn(|}y&6Xcs%JG#T*pJ3?5Wzew5rT>mi1R!!R^NIk!7O-id_%BS0YOFUA`Z74 zwPhr0P)oDm!EqYSLkTKMAP$cpJY+;}P)oDm!OmY zTAGFB94nPiP*DOg?gqg)dUMP>WuLsh+`KOdYiSmj2lD|~;-u2o0s@K>jDz_AQhqzk zUCntYfh;V?9tq|(h!9k?E@JGfU>=r~%!Cls(kyu3$OiK>Lp}{;85rT>mh;i-)^RT33W{RMeX2AnjS}w zMG3^=lb3JXF~^qmP=ZlFYl;5Y^IutW$dS{Jc9ntmP@ zo>$gG31-0q=UXrjON5}JbrIvN4CZ0s^^wwAnuX=KZi0DOA_NsB5aa3!=3zlg&AIe~ zTABq9T>rs5ED?f=5{S`C1oN<Sydtg``XT9@#kX`t&d$~uMZ`d1;2276yAw` zT%1U5T=T9as31!T_O+eIb9W3+y#3(`c?7dSbA*y3zMcO_;^t=OT7n9)lwe=m5l`QL zWpcx=ZSB5Nf?1&1b9Th$6{jROzW#rC9?Sy4F-?x>c=Ddf<4f?nTBvAU#Oygc;^7Ic zl0&|mn@2DU1pC2`_+;iG$+NHTXt#sznuVBSnjFz=eVt@_t6M87xdi*cj%fH%o#f{J z6YW|l!7R`m)8vT5_u4ag>G(!=t*9VN3B<04jI*S9wpnu6lRMN(Ad740`nw?Z{IOed z?+c6VTB!&7z92Ym%B@xE)=tUUPd{M^Dq0sYd$o?3`1G%dMdJ^#Yo!FUKyVb4BbJ@i zHo5-uq1J0IFu1icAyd{rd76|rH9ntN#Wb)_lp35Vc1%e~39I?yt1Cwo9 zFR=s_t&5mFPDh-!`qxB@v7hA;%mTrYR*rahL;d8mI&WEmiq=KUKB^-|4mmNoB>hYt z!7LDHV_aVp$NZ=aQ76^{CavsO6sGpqiihJ^+qID6o|LBMfhZQ9I4_jkB zlwcMJ_68ks@`Q&JG@8=S`@o1hYW1Z|8{dUo=kM)zn>sxrCpGrIh2p95MQ=#>s-wF}od9v@X}s z_4;tOPwmh)xp?E$e>somc4?a&_WM*zP|>=G*;8}G%mb;K6=ZI{ro3&Jlu&)3IEhGYe!KeV}L-JlKoMC#Wcan9I$3>SOOs$msq+(JXkdhm(#cF4$P01QjI^b9p-c zabi(I<~0Zu&4LGeIOf~)JvT=PDoP;ca`W57;Fd|5pCM2*3m)vRq~l9g?-3!WD1n&E z&3Mjd<4KukB2Y979_+7}5hF)+i4atjK+NUoc)cU~C1t*hK+!CCu!m#5F!!q^-=06DXPm5B4R@%tk{}IxkM3D1kU6_MBXtk=c3zMYG_+ z`@H!}%BRnY5LA>v%;o9$Nqv9I$UH@XqFM0ZT{|5=v$|fn5>%8x%;o9$ny>dQm-&_g zMYG_+yLLW7MG3@Qo{q2E>A-TCH!4sx3m&}Bn_pk+913B(~`#-FFmJS=wnxf!og z%JG9*7t3)^4CY};nR!?^t4g3KftYtuI}Zy8&fF3qm<1xdbDDWrQf3|&j-wCOH+K!g zeP!lhNttJSU5jJgkTmt!fVjX0g*EE zuwV`d^WAHxXkEs^JQFE14-4m+2x@5-Dur!{nTI81=3(Jn7J;G!V(f)rzKoQahlTTH zLpeKvOQ2|7#E;^ z2EI+?+*E<0brIue2J^6>-R1mO5rSFp2%m9$+s-+*0!8a0###9q=V8J9gY$Sr2xh?} zyyN)$pq-CQ6|IXHS646(OX|W;c2Cw@@R%9L61pP}$9d%v7ae+PvgH#)EWuj21V?bW zXXoRGU6=gnoGEz(vq1B%?TA};dn|EYylqky#7b}kmm|!a0|_%HM;^f}(7Y!)V%Hmc zB-dUr*{&58WGTTroFh8DSCSlbcT4L*C70j`E=M$dc1ZH&pXON)C71=8cQ{Afbj%gW zk#oXLkgt0wP$C@l ztM@2uhh87NFTq0z-b>tC>GeS+m*AbjJQeNpCugR>JQMa9aNW7(SkC=sJZ`O%l$j|4 zC3nk1;pq6IAgIBQmzWvp%5rT>mh`HSSmE$ElCSPo~&c2P(TAGFBY>Dy- zDoP;c@^pOA-19IG3+Iyw6wQJM+h8+)Vv{IAMG3@QZr+y+Xvmo<%viy&mS$l&?>_1H zjCvy@1QjI^bGh-jZyRR>vgZ{Q&4LH-iTMN-B@lDDIfBa)<>qYXJVk+`S=F0~|mDeazV&A*d*Un9J>%sI$Waie|xs?PEIL z{KoSm1QjI^bGdnTuCq%*X1NIz&4LHp$8`MEZbKpj6(taJxt&2ECG$*_a)dqVvQ0Fj zzDqd@KSEThq6F8zplw@1>?suCz#7jf7o!lUPqmbDVhf=4*=-mevArpT(IbrFYc z;(rm$LM4v$5B4tS#R(LxiLrqI1@P=NuX$5 z#9^BVkL?l6f(K`;aC_IEyO{ALP_!=MuuX)AKX>y8W}#9z`-NYtNmp{0>UzlE%xI9TGqbUI znh4uI#JnaDhwU!zW)~%XPh6UvZJrH6C6@@#_WvT71v=ag5V>pRXHp5S75cfeoy1FD ze4No*nuVBq-EH~h1G^{J4#v}VsN@pi*$xj7caN;dBbWs`Jg-2!(5+wc`R6uRf(o*f z2+#K1?a*Vu`x5D39u~V++z!-*f-EJ%J5KI)D8VeyVcVX&9aNB|M0m#mvCpKx7M?kF zrTHQyoa+N|`0cq{?q=}-F{f37g+qsbTS$U4wdoIL3D4Zh&arh;9xVDZM{O*Gb*Djy1fF76;B$vP%x-a6f9j(lkcRgu$^KDdE z3o)}8yYKWtRP~D|U%2v+*-$v23}U>8;oKrzTens>zr3+SR{MNJm|e$ObocX@1)#SsIDdG!}Nm}&KcG%?<`Rw9MgccD(=yH zA&($yp)Ru!a}0&^7_@cbLY^g5kfqlkD>>rfey1*|+iGvS<-DHhfsu2FF*{J$mV9}7 z(*>_Rc0k_o2uBI&d>Pn|su6EiT+`?9JR%&sq_bau*sss<3LX#aU5tIhns97a{#wo3 z{LK6T{+M%pa|s@Qx8+}rc(age#bXPC~X}V)0=j9-ETRSTp}ECi0yc;->DtA zca>lkXm>qg93+UgGal-|c9#mWI3hEDt)6MspyPGEZC4K^cs)9g&eIR>_>4aj*#dAo zFxmw%TMS3sSk$EB|M|0>N-lwY=*F>NJ6`;7YX`Q8N-zs_I0_rY$d@-RV7p5NS=v&w IQab+s0B?#a?f?J) literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_3_upper_arm.stl b/act/assets/vx300s_3_upper_arm.stl new file mode 100644 index 0000000000000000000000000000000000000000..111c586e18fb0f44cee7e4593d24c71382a6e9b4 GIT binary patch literal 102984 zcmb5133yaR_V$C~F4`)Hs3yrKBL0$IWYrnnr*=ygP-MjQ3I%w#THB+Zf{k_Zo|G$kltf=_xt?qgI$$W5MZS!j% z4kT%qkO9_=Ei9fn}JTL?V+6ZlhA0?J9 zjkW6QPjNPS992rFq?{N~=usVrTG~*;r@hvQ5u>FZR;q|lNs>5Q>VY+!G3WQ3M#1+m)G_FWAE3t2NtVP=x z({AiWgi4g4J&b%euGm(2B~*)Tn7gx-P)U;TEq%fq5YmHrCD^*q;#gQVtCUblIWeHH zY-u1kcJoSblt7DP+SkKM6%i^)5`N_Sde|t*hjEoxf}YOVy*)Y6$oVpm6~q?7<1=J`RF zWVT;=awA$m38bl|9>Y9?BmP+lLM5dH=xAJ-4M!yW6=-VFc39QS#%nj$VeBS+VkrSS z8dsSef18MLmC?LdDrm6{{i+7dJs%y6ah34xr37qv0;T!<&bu(K5(HZpS{w_0RRghl z-lL9CNhtvv5z%Y}##KfMjuL2bOh=`{Xm^B4N{P507*95%a;Uiy9PP-Lb2Dn){C>;7 z$GA%D8A^$`KQPKW)K|G4)Vw%rk+1rgVrs?rZ#7m_)MY;CF#l22(*8&iA9k2O55xgm zx6DS1+&|Z=_WkQFv5~{^^^Zp7x7xnn* z`Y}0`D8UhvB)}=5TI_%CuUHS1ius}sSVGQ^c$6S-mWuguWU!xU+mA%3g8-+5YOz#) zv{xU}p~(nab-LtQ+p%ILINl8dcI7;aBHN zG8^PtBS^=)4DLl6vY$B{s0V6@_W|ruTO<4~V{HU!XmO9?>!GDmNo$0!ht^yPrlG~X zR(b2Hq&33tGWs?=u?71>38tZ?d&XjF@aHFmK0i0trs9w++6wE$dS@Tz(kKbj-bVEy zAB8?YH>VP&LA`pL{7n8d?qYr#KtF2iv^* zdg${*aw=(!@NX<@BS=GQcKRLWSDOv^ z_I*{NM33rA9PMq8FyH>D5~@YpepO?s;M-NA1nngWcu^%(>!qbfyHZ6y5pCssyGoQ8 zIbw8@r~`pERzkJV|7b(+MOhD&ius~-Swf^qC)W?GI4V(swvz-(rG#p+RDQ+r`^1ye z*aIbJUpoL?ze<7#ZlNxE@NVq?Cy3 zVKuj1-^Fg9s08~T^}u+84L?6XaA&EKQiAQ~`aEy%A8wzh1jjM*#e8IMlK3aySZY;L zO29_+{$Vy8(X2%2JckWGO0aviQn_~=mI{(m0yd&?g_>8Q1(aZ^pv5-yGs?Hs1CCHh zDFGXvKo8m*WLK0<3AQe@I2NL}R2wBd@cxlkNhtvv5n&^-2i_(U1V;(9IHsdgMTAO9 ziMSqC^GtY~$Sc9oj(j;cqt?Z&#rsEIC8b2%A69;Q=k)KjN^sU9U-cL^uEKdG&X`DG z^y!{K{YsKhNs@?nI0?e}D~@X22hp~#xs<9|B}&kqU(e+PGSOD5rLCSkxl>7!(EXL3 z!zsbN4|ZWX-jXMGDk&#gJBL%kr@fZWg2btFVtynE-Cyacn&!p559*<_IdOWPnCD7x zALRSPdeEKq;gnGGQZ4mLZj?lXN|J=`uf#^eC+aSYw!NoTLM2Mjp5OIJTV<3`Ew-Uw z2bEAslJNcEcdv4ino)wS8}Guoaw@@5;@7@7U-p0bcLOCZ>B}u~14_^D6xyx4-rhMITzXRedOKk` z#-$sczur#IADCC^-f=#=VLN-7b;e}(j9t ziKlPA-MxR@vHR|JmJydkOW3>necr};W6j35hu1sew1qRfsziwo+Fs;{{1zZKd-q;P zY(4LdoD!%Y;|vgw{kX{0khpCa@V@^U)#A!3Dp|axlV|mw%t4b_3HW=75UzI-|hnnMaxV3!1v97f-e<_ zZJz$B3WQ3^iJCxktY6jKcVT_i-h)OY2<>wnW5raThdVWIH~6<1&7H2wYoGh{0N2mu z#9LQ?;|P_M69e4bET&MZcE`Rj1Eo^Jr@fZeub8TQbaKw#ie{qUwNyG{)Lt>wYr+R0 z>>mkVb|rk;KX)~+g+6Mo5+(fTVLhy^YWqA~p97(ia>9EhSM%DA^(Qr<=1SJlQ z>q4n~NwkE14f*S4lZxUNrctK6a|+`&>u3&K31z&W8CC{HwEXUa{{h zCs-=HO+X(zW$&X*VqV|-k^u$yDD_?bD_N58UO!31zRQ2R1A!gC`Y0v5UoqRxUtx!T z@{mW}JEsz=rJl%kOZKMn*+DQ{=XZX4g^h~-D_U8RI->8Nx&8AojK!SPip zQ9`|)^~e1i!86oh{A%lJyZPtb!vz9RAUMyl)AN0k^bGJJP{)kY(XOLXy^@=QJh=;m zpKD6^v^}S1a0GmbN|ews?Z1XwshsywLbZIl#(6vcjUEK*qmm>MkI?@{#9GPLg@;2O z+RvPsuCJeAqrGeP%`K+bmt&ugwC@IsoS>AHY~i?c6}sPY(6Kg5+&;E3x(?H zHGMa~Z@ds!ZvSuz5o?i%Y-CWXtEwm3*Nhp~&%RVAcAlMON%$Nk^sNhG|9fwa2uPAd zA)6?bv(aI|Ek?wW)(BrJ^o0#8+mqnmR6Eb>Vn5gRkJEMiES=@^V#;eeqE02P6LtMS z__KCTy1cSIUKdsEi>dpc9M`n>B~Lp-B}!PjAL(MM>sjAy zw^!>#q;=TLm#x;cG4`ejJo?$fWjx_3)O#a(v3>!0^{?G~#CO7ru5Kg^vuYC?CFD1r3Z zLk?)E-u;e1TzK!2xletu_r9eBwLqM6VZWA>ue>b~+ugorQ}qr%%meZCffHI-sz;ZO zXi1%OPgE*A_oaHH`rx@=H|*vJv}h>-8~fG|YnitC?m&EZ)^&3`T)T8GOGScOAm(-* z)Nth(BMoDKV4+VbL|_qw(^uFDNhOH1oZb;GXfRNpw}(6V}vfQ^Qa$4IFj z7TKHRdH-}HQNN^ z2U@84Ia}kq!9M+1T3@R3Q+=x!jIMM9B}&jnJW8&zQti3H`eno~RuX7A!s-!^M2xGR zJN)1Xj1ra#%j>4ZD@~IkiBrH@ywa+FQmPS`7(%3TYj? z$x(tCRe$z4*SeS!7;$I?&M5XE=BBst;9oXu`lj<{RSoq+t=&hpys+vH`?}!M`*)e_ z($&>xxAd4tFM8S!*XB-5f1idHC9M(5265J|jQ|ZT&?81%;B1$diW1sW0wwgs)=&SM zV?C5Wn)Ud!$0aQd$KDx;ld4kbp`SnH>Ol!hqQuC?y8?04qQ{$BBS1q7bbbA$&fED? zUG>;EU0Eu$F7wrvzPkGvj`qZo&5vsAGiNK;ACw^9QX;O$)k`;;(Ha37TA-ILJuQ?9 z-@L-PzI}s?+r6T~dUM_dExWz!`=34QzL+EkN|bPE>&=R(7$w05;sKnE1QE2fPUQq^ ztND6+XSx!40_Xbp4x)Lf7M`;e$_Yx8K>D&-r?*rbj!}You)Q=d)xz^+>ncHs63$+= z_D3LUH80hoZ9fvFRF2>%Q6jSU2*xgm&{i4EOSSNvBea~LLW5tJz5?Oh=Cz?edNXgc%owJh)}F=7IN|W<%-l+`h!ke7Fj4^f+(OynH=8jpwoV z`R@5C34#(OkmmDZ>K}#bDx9vMy)-Y?a<(JyQ%Y0^i4x9UM0|hrvxu^Q4SRdXeD(-V9hS-wuDm8uBC_X*Z*FRKkppNi%}ceM?a2Fl_xuhnI)OxWkSO8oMTCu# zrqJikh7wHs`qBH0xM~x3-ww4`iRSCl=zr(0jKIk`#%?(gw0u42ecrEJh*HIb%>(#X z&DYx_VWR{!cc)&Omulg8?0u|t@dY3vC{Y4wJ};)M2b-)8dGYICnbpL z;Fpwe_9B1P`=0i>V8e}FC76!t;k-{N;Y#H^4(6aQNo3CvHcBv~Q1iUzrCP2Yk@xxL zrrHD%?~p?K&|XB~J!(?nvfL&&_OU%&|9)pwU(+(abLhc&b(BdDcy@;l3Ye=g)IUOvi|O4~|_o3W%&BEvBfq!H4Pat zyo{ihD{Z3Y?;o*!)BXd8IYQe?i5b_O+tOjm?Y3t?%~!N6Y&`FjYs&~~`4-{M62#x0 z{Jrt>`>*qa_*x}0r;ltIU!aZjfg3ih-f5>Yf?B>F^zE>*^3V@*+fUy$lq&Y^O6>Uh zNiBDr$+mj@gx_-ZZ0^>#J)xdj38Xi=o$olqUhSL996L3PtJt?wE2@X{)UeSs zVcpz2qyHS&BlvbD&|S%q103f6yl?fxfxC z_OD@Foq5GpP2ZfADkG@n+lrnVHU?k5b<>Z-@#YoQaP#3vD}gcg`6;xq`;400!f*E} zBdF!;K~D`EPgif;^v4r{Y!tPZF+lyL%1V5-+q>uOvb8y?sx@7WSZY&W5r#@~z;k>1ephWA0G~3N~ufy79#_zwd`QgER z?TdSymulhpwT>p8~m4n0r6*exTd1sj+J-iFK% zM^K{qB8@rd_a!n1%Lr=024=HwE1Bo6ttioak>)-q;Y-R0YQYA)o$qt;iH@K|^F^9| zte6@;_lw*=@3<^Ke&uPk%nSZ%cv#`l-dMAi4sWjx%u5&Uu#^a^4;c- zrjKwoC{Zog;OHqQC{Y4wK6h<}2DX2=M;7O$T6oS`;9FM-N|ZpF&t0i1tsb?k2PLWn z8=RZv1SLu!&F8Ll)7Bpu_6H@Z1sn8AAes{dB}yR8=Wgt#ZCqt+?7C4xiE6<{?01LH zJu5*_q6E@>4jU2S<_9IJ#d=`gBueE7l!`M-X+BTXJfnHBRG{%3ZRqz5Hg?%oj-W&d zr1?D2gJlG@U;|^p?-^{gbCfuO63rKBK2MC@GJ;yLfzj@5_;Kq-yCW#ke39n!#EdE< zs0ACCo4&1N);fX`%@=7tPk4qhf?BY_U8d_n`VvP_qWL1t=V+_COwU8sZECmRr!*u_ z&Y0Np*Ja-CR?nN{_6c}SUl|iG_W0frlt4=f~LGO%jd2ToSwjK~$-f>TE&<-1y5!3>mdU8z5j6L}k`KIT-%xrLX25`=*?r7Kpc2j}W3)$B%RUj@+os z2DLz7uEeF9{r26tv2(o*N;F@j5BhSL)O^yuH|JU=ZsBYwK`juN^KpMnSUxY;_w!90 zL5b#z^ve$plK$vD_Q%|pJ(s(2r3AG=EbP)>M#<0huja0~xpSEfYJs2!_kI4%+xO*; z{Jqu@lxV(4|7*({8STrDosxTctNvvKwLs7ZdmHv0H8*wV?Hxgh=8JSiMW3*5$0z~u z*W3bEb0w%n8nfBk*!G!cb9;_zE+bGX&iQyXfQ`F9`zH722ak^G!SzE4wA%|8@qWo} zeLu*xpM6*vK`mFRX#IeV!R_wLeb(W#s0ZOKp``?Rv)7xv@3Z4)n>3xe?;g$u*9;KU zLKpcV*t)OeKW98`i@Ad+j8&Ou5vU!=Kq`FdP=_KmsDbFOf$s|2+`aP2Cl z26gO_v7daOt}oYUUJdoVqB~c3&K0hlphO9zdDkRCEY`eKi?;pBDW!4**FGg^&)=p3 zF|e*{{-fO{H)~$1h38!1e5sV6L;E5r+?kk*-(O7u)%fL6MO%3YeZ0@`63~FVgWXJ>(y3IDe%CwO9|o z11Kf9hHJjG=kE@J2sN+Oyi^O%xl$+E$`O<(fi&+bdLs0B89^=B;M!hJP@?%FjS=JT z2%{ch>{_IEoR@0hId=ozh7y!0fiyVGcSQN>B?nxEt^`l%PcOMVfa_toiF<$r!+3;Xip_aa#7k><`qZ?qOuhu^SgQy|PneAiV8=H%yj z*B{=djWQx}lRPTb*H^ued*Gcj^9Rp)y%slcsipb4=TTdIebu2&2_i_8aOsE`dq7pw zOP6Nz-#(DG-+*a0ln80w{`K>G*he2F2==q)i!`4XQ+u5ML({n=-{k>gh+2uhSdn$L@=ngJg*_03Fb{?oLJoDE7;3pO~rEOPRlpBj~*LJ= zY1lv;`WTZ^f)dRaX+BT%U|#c5Ej-6qKzu_nwcneIszRSTf)XW==5yy6s>0YUBd7%% z7}Guq;=Ot2rj?*X^F^A^ooA>FbI{qKM73Z8bJIt@m@i2Xlqi8TpC^2Yvq6b!xweX; zYs{BanJ=;5R8Z$@9wbU24X@-&Wxk{;c(^ixTCf3M>Dx+tq9Z8Le39n!V#-=K7kqme zK`q#T&-Z<9z63pp)xZ&yXue4Ex$`9$C0L`%2x`FwBcCu5qgBljlxV(4^ErG;v~s%n zL5XTP8&O1+%@3~5o(K{pTsk6b)^Z0>P6RF8OGNR`c5kr4zBC}x^ESKXd9RDEVeFd5 zUA9I{a8a0y!9f%i(s6v0-Al|Yy7apo{tpS`pfrZFocQp>ZzFp0+|1@0@>Y z%13rC>Lc>dKCKZxb_q7DFVG)(5|ES(64V0C=m<{?{rte3wl4G4mS!vvOEoUCK?(9LC8Ef%yp~D{ z#^!)#ERcOm^|2o=DuuO0WSHGOZo#ct+-}5k_G~#pi4ray-92Zt2%ayAk~xNi;{ zob%-bC7LhNy!Y-2)B&-OnwM(fdF)G|tpp`XID64eSs~omRe});v>nCv(S{>X4@flM z$ez1_4I81YGMbla;W^q6J|@vtj-W&dXD@i7VyYyLMSN8MDD8;I`;-wuQePhvF0F({ z*SIl?C>quBrK0z_;g^N@9m0s(vYh}trAKm0+)_z9*DY3*FBn4Qc5tA!0!`J+j#$~2lqPAcE}iU zaa&Of&l!Q?iK*vytJ<$iy(1`rmJ)Hi0ZMiA;j`2BQ$|qBmr!C%@(WMySylDv%IPXW zS_wuH_)<-teL&U4<2p2!64Y|fBTtP|ZTYuD(m!0mn%kTRo>~b;5_lWFH++A%*%=6G zdE4~Vu<`co7p6D=tYauu?Aw)KB!RcFYT4WA!_P|j9+cI9TCfp&YS`HM(x=luf4!k2 zD1nv|j3n?jdcO2=`plDmsMNfWrWVrl$DWw;+j`+`B9@dAprbgie6ZnbF20>wk!|Ox zQIGxGevy9gg-hKiQQxja9B%;PmD^{e?Wc^Oma9kP+kJn0YyC0V)m(k85{xA9HJ{cz zH{EX26Uzu{`L?2OhmFRDGt-6J$A+56zE%lF5_lU|zc(v=?8Ilv2x|Fy(6__J%7O2v zpE&kvPXzA_EhQL9;BDM}UXQAQPvdRddQb>z;d$(-VZ&lAD=pSCp0#1^Q-WT}+pzgj zhdH8oAx$l3JMvc+Z;%f$ShjnOwd{6m3-12l?x-Vzw{tOA`4EHU2u30(fpmOB2-=L= z(!5lQw7-FEFi@|CRF<8!q64ZhX-tlxXSosixe>fYID1kJ3)5oP+3>N#`*-(O7u)*7uiE-r!N;F@j zF?M}is>NV6TMQP)ZW%!>*x=pB#QbmsC7LhNn1enpRpwwBK`q$eO-C1N$$9SDiW1Ei zY3_qu3>JM!89^=B;7!M3N_?UtDA9b8rsqqop8?oOliKvPsN+8`T z;pPV=s>OQvI2N(t2$YI5N@+e%v{hd7VyQsmIa=5686*bF5tJx_G@mDWu#BJgtLE!e=U^=&0{&=Hhq zzDV!p(#E7%6rNPYJsM&^u(cEA4@-VLZu@pftC`y_2!9Jj(ayswbUC8z}gb3X2mL*Ff=FS&9b zXM+;W7ir$P^)}kv_fmSzvj>+E)B?eqz@9kjnqs>0gA2)+?nBW{#f?A}bILN%tK_z%c97Mbt zpwD#UFM{*8QqWL0?xe~=e=5-D#K`juNk1?ThPzh>*;9BDQT<0JqnlIAuD{;+r4k|${ z5L`>V4V{COXue3p+r|B%b5IFtfq+MiM~TisC8z}=xkl+6q(t*YnroM@ht5GIs0AWk z)$BfX9zTei(7fvFd$sVl_u@I%M0cM$K~SOu(!431AZj!()uQbvZh|%(!L?5b+Kb{Q z^1S7(d8rnjbA|JzQi2jCTspcPjdtSwcFjw*@SJO+x1j_jN+6AUYf;=pp11BbFV(_x zuA<(C5|k)`H16L;u>koHgO$;|R143!ih3JLP@)9VyaVlt`xg#KTMU-7p#-&HgX?av zv1(iu2}(3yq;WSgikm<^5K-l9C_ydQ;JO>tBO)l#e38Z-&$z9ubrFN*Y$!o3*xQ-cXue4E-m@pdisMF!64ZhX?l{T` zN;F@jc?a4PVdZo-l%N)DaK}+jP@?&|baXqqO5&p^!Ag%bcNTU7J4zQ*&m`|tM-nBN zlb`2e!|D;|rCNB-eURO)P2Td3BxQu{8GPKtH_4miQ7bCp(h)H(d0Rb61TB<`y=i|@ z=YXc1+=!1PnlI8Z@nQ0wek4&X*CJ6Yz=!j`(wqI!s8#}Lj^ko#@H?01B;FvBs1|H+ zcG=&9?0P_gphO9z`Mj7~-=e8P43^EPI4{-0b9#wl>clu|%Mp|)fi$0^9*Jm zL2p-1P@)9Vd|pghe`r)nBvCC_zbICwl%UsF0%<;Xql6JvzO6!Dss$VI8fD`uK~SOu z(tPgb2V=0@jG{!fV1qG64AkF8+)YEYcma{>LYQY9~1I5(q*{&K<6-kspx>cf9 z^HMF=!@qw>sT_e)(O)rN+Vh0@61G(tK^ivDhCcSSl%PcOMVikOeV)<0R144Jy{PoL zBPdY%MiSM64a{2qZeYG7K~SOu z(tMuqC9YJIsFrK1C~jMPiLZH(D1kKml`oa~lE&Z@%Lr=0M*OZ;N>HNtBF*QnKj^8Q z4N6oCHn1A__Yd*yj-W&dr1>1H}!t`%kd?I*)T~7VYGi#NmIvxoSu&gld^>YhBvtp4xi;ZIfQ; z9Bf3AQi7!|rgpw()aJ*XU)2OcOA`H6L@SBX#ncyr`bUIHl!*Q!qpL^1Y;^`dPu;g! z^T-j0_N%CPZpn}pJ9is7VxXm$ESdTc(uelD|Ct3#p5L-l-(sqipd?8Q>^J_@IZF=z zX3Icq^h3wYaqVApHk6msp7dVeM3FE#PcWqWK~{`Pc33_U4lRK~M`eUORp}Bc6+iX4;@c^9}a; z#`Qq&wML-*p%v=UH?9Zzf^CI$Jz*Lg;wty_XqN4pW`WhG+(4qy2L1H zm7o@EprnbBnCIA~MDs-&eKGZmbhl{iwn|V7HqaMCj;J!?l{TkjHu-j0ZJ!?3+3)>; zWLWkOGgfDxo@ix1?)dD2XFt#4dC!-6W!HJQ!v213DM5)6NLN%GkbQK{I{$;9*5Q92 z*zf;V_Rb!C0SK%#`04yLep{$7RsUai53_c7cIpvG$EA&lX>Iq22-L&3k0-Q*F>&7o z-P0;jg6VH(e4e$t2Jv5~Ha&*(%{BNdh+T z{L+{}NKX+A$5LagJ^0eP7CLvu?;0u z(h3o5th?dKQ9ZP_+HOYoGaG?8?WfbU9+9LKB8!&BSL8%JA|mALw5+tz^CY2?R)}Du z%P)u3oV9FlrpJPgi9R24@@D<6AAe+a?=#=C5$J3LqIzcknrB81%G~s~PUsIIz zYw6zalW_-U$1GT7_2_xpK{an*Hz+gn@+6^J$mzx{hG*L>{?dq-F0859u{bF6+}56; zM2W3`?qMs#3E9QJfjBC2V9kOj2W4*P-YKsVCA5Ua)aEZ7P_wB2kj!`8IwlC!+Vu2) zWd~h)ZuUQi{@ZLUpE{tX?I%O*uefZTSBVlEEc`J$>ZfC}BM*Mxh~C@xuNk#+Nam3v zl7wmj;%75!$NY*THwb zA5)8W^c}yg%5?ku$?T7PE-kF{_2_<~Z=Ah06;GLW*rQ;+*=(#b0_O}MRHB61Hs9{O zPafVU_!8GXVZ;>ReY6##ah1ooQi&2_TowEr3^OY4W|W^f&W37*(O&R#FwELKX01w; z2&28=JwwfDeQL7b4$a?KjOK^SOSQI~{Ydtg&X*TDcEVa4yl5U?RLicd5PVl5;YIWC zqAF1$c+rCQqSu^XQ`4h3DF3i|(Ne-Zs?!?%T#tTN%sQcPOq+jMsb0<=ShGv>p!~GG zlZ0w%X^W{o&mK@SuK$pH-`zVV>Y;?TLgYm)4r}WKp<3F~#niBW_p0f$`{DUN-`qYQ zJWc^S7M&Bx)*W<1ubQ3yJv9IN%JzAcD4{(YdC@$)XhKUyOp*xutGw;6riXo-E0xY* zKKFaLZ*J}2_EztGSBwbR8@jozwu`CT`ag@EW&V>5e@PIk#TM~4!d^6Qd(r7CX^rr^ zzOX~i%Pzctx8JbyF5q1@?1KybeNYLND4~8O+J!qCN~l)YUl*{yP7<20`Y5GK-#_r4 z5ubAD>6g7tE@|x@$GvAdiGQ!*d6|2!wddvUQAxtyah;a0-)~D=dsjoLyaYC40&nzf z3h{ew{63f@sO4?!V1Bp!ofCwY_63g;TF!LkXXT4W5@pts4=PXue4EdDMgMFTZPEs^#m!bGV&4UcW%sxLn0c$$X0EL$Q7xqT-N=4#?T$;=N>B^+W7>A%+iah<{4-|9*`P%8 zMVjA@>`UB(>9rEnf(@pNsc9GXNn1PBT6;MglxV(4KX=5g;p@_^AN5)*K`q$em!@K> zSic{LX4;@c^9}aG*QK$!gVst|3B%VVSC2p(a#L5-JkPeGmgbAJwPE(7b5PV}%!rSv6)%I7X^_YS8!Mi(rKArDNSn0ji@Ph0a!h6*G zsT)>;xb@|^JCnd0mJ+^H!^7L%i`V?--co-&c%w#@D8V$wG2YeQX$Kp74E{HFGA=LG zVorV+zRyFAi{>sFlsT#?c_JLnFWf1x-PY?jWB$PGb9FdLO;ug__~d5>W!{_CDX$VG zGMn}8mu@#a+u;f9ukM?DMXQ8rU9)}feot*UJiFWDAo}ee6Dn!$*1O-SZHH%T9{n<_ zwu`BC@|U-2Ly6>$tit}?W%YQ!q#mjj+AZt%aQ|-C*k`u2)p6EVDnXiO@5xfZ#xr&n z{{6$rwo)RT8o87EVruWF8~Z%8WKe!su)(hu!JZpeuraXVkit)Yf6v1 z`M&pi81 zD4E*SI-y$0Q?*}@&nyh2Ib?geS?i9GT4RXwWg zrwI~Uo$amhKVb&zuAh5qlxpAwh03?DdNisB-ZMe-+rLlqjRnM{lTT{=V$|WS5c>AQ z_b5-SYMgZPDn!LwzVUmMtyg{H5^AY>Ts3pE4R>nm>Y?qEB$gPB-4blrkgBsEjw`D< z>*2LvoBOVa0N@Bppv6A$rPWf|xN79MV!K1DWF$(TI|H!|H>!QUetiu#Y`=n44fQCg zxs#L<+;@2bEz8!Wq?F*ki-h)|?ys~|+%2J0$x))aB_+7`0ih!?x%Y_)?pKPb&9=I? z=KI=fG95pBt6MljcJ~?dt6Vs(4_`(}6s8Q=T_s9zh4bgTB;Nh_iJ3~MmVU>LOEvQO zk(DY@B6#ZX6`3~v@m3$ZV;g^8Rzi0Jv5i$1{5oAFO6blYCjPla&q|djp}T>YxOv@K zl`2t!X}@x^9>sI2D%l@6?GIn&viK^;=l)Ee#7A#!6A_Rop?wpV>iGMg?WU4&#_n#@ zuvESWZCqu}EN=2>*OeE{(=$D!-ln?#5@h59#iah8vA2)g>!BRNnkr-!dY;-RN!j zn@%9+Z=Y)%dgMvALyjd#E5Un**pAY(u_C# zy*@oXFgcobt+26=T55`}6SQ0$v7aC9e@wqP>?Wa;D>o2c`Psb7wb>dH%%E=lUf^ zzahYbcd@fVX)BEFdkf;T8# zi-eo9<)!jt)Z0@6I}X+(S#u@4Rw!+_0|^`2x)x3DB+!cQDwdY&-pwa!4~9Fbr3CLM z!p7MfjN1S63)=7Rt@|6lNb?H@()2NrPt0$8$h@xlJv;b8_lB)stz+WB564Vbi4wuP zy89C`vEL746NGB%7wnkW?5QgsP>B+HUn3?qy?*`aN~o58D~^f9A8**GH4m$(yE&kB ziiwXuIw!3XCG?#tCRQBsaDq@ReQ%42rsXNFweB5I-woAvOpM>6XLMeI)vGlkvDW6_ z+^f({^U`n4+S0L&zJtdnzPPhRvF>WCNB3khxF@4`l|sJm)>F7YV>1djYr2%gtYrgs-^V= z-X==xp+v9|VgY7vbYCuB+H!?>9G%Pj*NSt=z|tLpP73wHO*mDcXV9eE3EB-B zdom9VY7-GEQ6iMqYLEG`&Hs(($#zBw)e7}+ci3FSNFI?jh%p&4Vqgo-?tk8E%b7^W z@1KX-^XK~BcHCAoONm%gO2oT9jc+ZY+TfC9tc#?bU7Ku^SPTBndYQ zmi&BnM}3w0wjU>c^c^l2C~fNHeqZ16N#f8JzKcXX=%Sr8FA&u7wmA|}5BrG-j?pCH$1%pVy94P)`z(u(($R-> zJO}-l_9M}5W<~_9L!!hjBYU?b;-gw6RO^>N;CF@j$fxpBu_POHf|ie>Qrg|}Ud$L1 zdmm`wdAr>tZefgx-mNzq9S>g2=mC`|vHrpz3vr}{-SS?{xCtdx>)?|gEyR%)cFTJ) zV>48u#3rZzs}RS3SQNx!MnNc{THHDLb2z)@y_m5wT6S#(yP4gRh?QB)SQ(Wlfixpx zT&&Du#>$iu)UxwZ^B586l=o1yz!-S4Vx_s-djmCZ$S z_?o*?DWUeFS?gjmLW{a@ccC|3Y(^NB?zxL;z-V_d4JuJ0jJG(Z0i)f;G$^53VYIt5 zhhoZN8WuCAK_yDqiA75yreQH-8dRc$me9pCEM`oD5~>x(aftu0$S{m{7t^2;CBkSA zQ7OUuxEK>9R4a_#5S0=oNrLdvnV~-{GHkI$hPjvqCA2p+TERtHEM}wy-=pBk zQnMc^Fm@$LSfs^bMp~#u3AJ5JS**-r#>%LcwnCB!tAUI8(H7NuXe&f(l#4eAE2q0d z9o9s5J0+}%F213hu$9b7l+d}kc0#q1vo@^Gu2h7m)%m5;5R|P58}x-Y{dI3x6^ZX{$V2~urJ{^NDz!^(7b%9 zczet3=j@F>L@1OH{wzF6z=r?+>D!7C3f_hiUdun{>3JxX`}?&j@is`n2Jgd#nxppa zz1GgDqnc9-ImPGgr35A32EX>Yy{OeZL_x^AL7W$e_=~dJCl;n$y`Ak7VS{z?^%&+P zNy6ihZMGwF_zj=lCPZvpm=lrte;tle5ytBqdL8wFteS7u9-lMm7Q4p;V zjEHE7zXG6CTONIF6=E9f`#>bod_!sD-xPv(XBX1|3BQabTOqDr^d=k;tho|M(<}XV zf?C=Nc02mR>niaL#P0P~+KmLuYk2m~$JcQuUPz-4_}rg`?|a|Pj#&N1*V9!Jdd(5Z zcZ21pJ&OB@7NPKYS0z-7X>ViP4eowo#_kSAgq8|#@oKM_I{3a>(XB~JyjHSKe1|Kh zR$efW^xwFc*j8I1Z;BBq^Z2)~mLAscl{L|;rZFSUxk|fbPI=L;T+NOVB zE1~DROxte?d8#_Pbq^bqz=o!Ia%*3(u}!;Om7pX^xcRZ<&c`>k`LXWXXC%C+5=cV}Hf%nIon?q6 zb2s8u0vj>mW^f=hKB|;pzP=vZSstHn3ttj>YVJ83Gjr zOto3mCgFGE-DI3E>cQv5)WEN55?+)N)$%RkcYx+aKkbqr;@xDNuWzg2g*zYL!p4>D z8FsThgT*wAZseN~_6AIcXa%dezXM%N&7bO`GcBfJ-(8qjIf2$?OoQ!x8es!54N9== zviAw4+HRjKT}*>TfO7BSBuT>C@O$bX&q?jZoBhFtyN9g%9lICa62~;O$zQ&|#56<_ zB^Gz<-NM)d^CkOV(JG-@S8w0DC5~yBGCL+zVz(?>7%P!1l|@>#YD0-|@)GV@S~N!= zMssLhs)aPqeT%7&C)+P{$Pda4ebC*Ak7}-3$Z7Zfne1jay=}1o_OZR9>WM*_^XGbk z5+%^;2^)*l1_{;D(%N4QZP&PnkrpaZLVMQy?yYV2U-I0be8qj8@}Uo0>;lIwMkR9E z;pig^fA0IK)#LbAC+tQ-B}xPvF6zQ|0Kf0N=!w4%${#=7*PMB&7II<)hKuG{#Aptc zD1o%KRX8acwPCkKOYQf5d=&bFd8rn1x@dlFVa6WpkM*9g-xL!1gAye|-^4chs0|XT zrKK&VEYhM6BP~>-1k#Mau-~_|XxBaqrL9;G)k02eL;IcNKdl$_ph}boBR@op>~d}E zgldJE=58lhB+^DPp%NW2YTMrgysqK<+~xD?(xtryuLjSlE$xZS2FG`O?%^XH!M-I; zf;$dR{JVB)y3_0RWdyZA$GauG+1WI#!w)378gN}wd)|g=yMHS2X|7T63irPVT?e@X z2&IAzSE{w|0J!1=q7F!DsgipJzpL@3dUoZ+33vbUZqy%aQQgVJJ2KeV@sdvIX&sii z6^9bo)7E8gdK=?c|2X}xn|5|K*zO?W9kR|^*m&rd1-D$$bFZ=N~Le{HoNRzmx~y(5|)&zuC-Dn32oi4uB}ghTXQbQZq!z2X{=FbD~<(s zqj53fKdyZEt#08|-o<(77c>3d78CPtI4bcQFqKH&BeMtvZ{yi92|~4K!`~w#ap1TC zyKBB$LhhWR7!zmXi@g@jREZM$1v9S4(*ECdRYJAG$`HqxI2)t>x*1|jqVKZ0Thg3j z8-sSKtyGB;x>JjZiKncYu7qmo4lpKWow25?N=VCYIh$;&y}#MG8?_(_-y>Z7hqk(# zgL%Y%q<#EHbS{b$Pu}a&6IM@Hyg}s~PxZ{Hq?BM(fhR1cp{vC-lo8Yd%@bBng!m6f zPy#I_;u~8w2lLgnmD6`U{>UNdS~tzsEhTv0i?=C#sgBrhbkw?( z&<2V4E+=f=%Yu{72tVDlaLw!JAI7foKu?351uF61!Ad{9D2fk;qz%7iOEuV zNjZVt8S9a(xe{K>)BFsHq2JYG8+)A9K0&CK-Xe*Kp(nO)REZM2ACOp`^LKx@ zXQN75BmCVcw$Gp%N`&@}?7rhi-XtS42T5p<2ls5FpmM?g>YzM2X~A zOAuo#?{Lu^N~l)y%cw=ZZ&=ACPPR$^@2SapC^6Nv zJzY%AoY}1@Q1+f4=Vdk`t&q;bb}==5=1d{te3OwD?zs@(Tw6O4^$2Bm5+y=95TWLw z9&ui(rF|3C+_jb3P_1M=Q1c(wy?V1ys)DtNvz{cLvgiC=#c?|!EKM*GkP zo=2s^2?z*hV{L?QUHfm|Yvar$K}ZRw7OJb~)mBto>ue+mN=QqoEC=*puwnV*Uz}GU zoK~EMjdicSA7AzJLzaKkIIHRT!oBrp6)Nhk>u>2S(>HF=sRikg>;J`w1%I9Dh*MJk zYE(%H(Jv+r+vMWhkM`~S3;R*R(ven&&D+~rJE1Ug+)ZX<%}I3;p^_59;+;vH_s<)0 z$RBp<>t8E0)Yn*TukGJZ|5}!5tzR+q>U|Rr$q^excd8sL*iP zRI{fMamFRB5~_8W*>FARY*-KC)_wkh_n)s+8>+=t@TDSA+E%R*zO6oA z?;p+n=ZK8j(0sK;iYX;jqQnPFhg*LmP)-mmWH|`xjTvvrruaYDYkI=-}&9m1=j=R}LyVrup;W)!TedVe2xD(e8a>yD?Yf*zcN`+R*wHQ`bJ- z5D_X-f@8tkI3~ZaRYJA26^g0ZGcJh;l_20tMKioC0QMI(@trHmSDp5ky@kso9 zy(6qY{CtLWt@+vsl_F$hZ$w1iY2f?!mo)t zxfRtDTfg^`i?LNnIpNpfXhzwLtw`7?r#ly#=Al%ki8U$^D2bOSG4APCZRPA-sOW;7 zO!t4B!06??N|b1*e^7|i)@+2o&*|1el_ZH7MzH31>jKgAL!GU)ajBSYxC|?G+=lLN zXF8(o%4>{>B}pP{^&4p34toaL81u%s5+c@0juH}=54+Os0OCw6E@K`ucSc#G9vP0uf6}3GV0YKJ|O+WV&tp6aJQom1?zF@9Ii* z=+Ud9e0A+Aram!ZXCwY&gi4e^D{zG?rcQnLrQEb9-ThjdAAVe6G&IyNbK@21rshvE z60?4k?69V(>CiXNNMrA=5+#t{ti$hCbM$%r`MsL((;7jo)NN0Sjh)}Uxa%2Bua()T zsL1xK>an96uUc9+YvH8rAKqNzdL)T>RC-@B&JkLQ`C7kzZsD}sWYWubER;q&Q^Ud!=I?lRzmIM4m81#xoCj*tF0XKWw_w@SqC1|V+w z*Ye7Z?))+G49E-lQj2N68`Nd4JLs^=FE?B52x$2=Dqfe8r5d|UugsrT?31~3n_f-P z{4g6)`bTfi?d|M&F?GEWM;q}&hvO18S3+AMYTXLV-sD&cdq!8M(sA|m@#kTlJA(ez zm)*B?vL0(})>ip>o{vi6@{MiiO!J;0ZG`#X#2if8i04W%HDi}aGcdMLs*!W%XD>c= zhwOVNP3iwehq-Qyp>KxXwIJK~xh~nwFF;%U?UQYHBcT!{lCu`GkAzB;(E8bp_$eKu zbx;Y_O4j_{x~^^q4m%C?fAeMk5q+*cIQErxYS5{J5V0gl#Qxa+n&5S9)QSwA z*IQhJpFh2PRX$md*bizO7E?|8_o(eP;jr3uUb#5067`%7_0MK^+V#7>=qI}&bkkpI z@pIyJqf3ZbOMBLSbNyqV*5c=??G8-zx#oMq8*{TCZMj{xZBN+P^TnTQ_wKfCrc3W0 zd6g)!N6)`!JHPWq--b>g#(vT{^K5pz%%Jls5`=1vx%rXoi}_C30YBes#0Eq5$c!4( zJ@e5m!JI~IhZre8N2}D`^6 zy~(H7-S*!8)z#h2Yj4tm=cPm}QR3=*{qx7Vq6Y`wwXL(U#fX+#l_=5o#%gySG;+{? zBUEd~GL&$Wmc32|F?zxBX4GThPg`bGqQs3G_H;I$IOo3+s?|{6!`Ya0GYIR!JbrGR zdt{nzb$OSG);@8&v90htCg!|zk`ca5)<*c&m3rh^l7lBr=+Ao8&zoT5*q7?u3nnCM zZhan0lwgazb}tC&k7ku9!M^$RqyI*z7JK%vw?N3aYF3F7Y&YLlYa>)k+c0k3HO24K z{JZmbN7hpcK5ygx9zA>ThhxV_1SCo@?TOp^zg4qf`)uZ**SqF*B~xFUy6s`N5*<4U zK5_T<>(%~t?LnFJ-aFW85KEL$?^;Zqow>iZ>#V_4$6GL zXwznuD3SEL)n-w~9MCc#zP6S>PDtlX|wIYBLL zb=#3`Fe`mg|7Q0k&ewc^oguO2KCLa{iGybUDulL`(*C){zHa@X>yJHlezH=(zUUi0 zpRe5guhzP)5s)asbduO<-SyI%muhMK>~DIne882;{$HsQC46uCI=R-(f4}*Mji>+n zB^y`MtZxStS})a3Szjva^Af^KNXKUv!VNTpI&(wS|Q%U$HX?P zUN9n-koE*h%Tfj3o;i5M>m@|2&PM8piFTafZGcd%>8l=egkL$gGEJ1~10z*p zE499n-@Dstc_oMg2He!75+z>mFxS!kyrk>EEuD?Ob@(Y!D%Gm5U+oDiE&e*! z0@u28E?h8CC9M+mAg=rRE=Roa*Y^|sp?$!$*7y02gI74h{%>nke72M%;`vxi*|@6W zxKcvr3ZMJ+-1lFh%u*{C1;ip^SID&$2X^kx<-j_>miCiZni&&fBY zb(f)p?um-2s>wIH{`g{I-zJqPp*uDEi??H6al|q6uV_+<61p4Y38arqQHc`KxrXa= zoR^@j@ZTh%rxYRW`n=B(@8o(O^c+f-*AwieORp^Od!61|D5jQfSukzn=>v7cwvvN}Ns1?y}{N|PH!5toqzolxvrS{l2 z#Z)Oli4sWj`F|&<_1@CqHpU*aQG&0?4?lm6JK@B7s1nUrzfIb2?7jQl9DdMNDp_jz zZXfd1oQkRA&acYh2ZTzLICSRAHm)|cafMQ?jZm#W8G(BsB$~`dlk0Q+X3BR!XlZF- zBPLX$1m8Kmjn}N!_Ty|Qp;}re8|~jVR-)FQEj%-=lGX_Smiqq4I|A`(B?(BBVA>NX z5lUuuGL+yp1xtuDbMm(=toPFR0ihBl*iWADp2vBg1fg2&4^M2jV#z+wEMKw@Y_LTk zVc#IFrM2I%@!&6M`zfo35?aDy>h0xA9$C-qz=m4qD1i;FUomyn1Mz(yExYDRZ|CpH z{P@jl?yC>pzEq+F=gNO4REzW9e^D&8!TU){AkF7?TD#l%w!dP3c(0GVG+(`6<~&^0 zt3NMrcOX@wMAF8;MzyV)e(|C*Lbdc3S~2z0th-Iymr5l{APrCCYd-wbw!1xX-nq_3 zDM2l+44$wxDt-51{hH9{D$#uD=RI-CjO)_-PJ6=bG?YM^-%r)H{nok*wyb*Q+#YTu zs-%>NzX>mDe{9vUt9NoU3IrueB)>Eaduadk(u>Eqw^a0p5G^c~_ERx6bk3Tn9x5># z5rH&TJ74p`-`&}C`hZi)dQi1ErrEly(^x&dXNbp@65IiJ0=>QG?I*gLYdr=Yy{gqZ zaew7TiS06`T3h+Atk^R$jpzOzvcJU&ZT!Q&B$g=QcbvRsvD1dvrn_(2A2lx{_>ETY z9axO}rv00aTm3HXA=}w&eB)E|_0MTz;=oUH*Wc0J?HQENyQ5lKSM#R3c3$NOm6Q;+ z4&nxOD>aWKO89pwmI{r8mI^f&0`G=i3j|v_*&oS#aR)Xgm@XyaysS=9zDURQ2ygWH zD~HtDNI}FZ@1*wggo{@DoGM?3EiDkcOMmZQgy|-$o3+?N!iZ3b5=qa16%)Pq_V63_RxPc0>jc_L zB}y>uy^q}&t>v9mwV^d9;a6uRRHB6L4BSmXX9IUqRieb7!d?{P&D}}O%biqvAK;zT zcr2j}>F52~(hGA=bYH7gV)p=Kw5xBoCgRw|om4m4u}kGR_H!kEhf9oh^NFz}N$8B? z?Q7gkWo>Z}m9>REj!?ft4x=3%7>#zkDYB)Rj^EYvPO9dmbyC~KRQp%{n4@=4i4q(O z{w?+G?Yc&UN|ewR!JX6}aBAmTSP9kQ2=z8N+Am(Ob6N@2(pGSN?&e2nTdkdtv71*L zny3_Nld6Pj{VDP0g?9Sos~fuF|JptnyYW53_^sHF_FX1D z;QR>)zAq<<{q4EjFwJ-S>nh9-XCo$JEx&eg9fandcNg6^`qEOl^8j~iHZGwrmGwm( zNX<*vL@jMJqcTQWsp67&Eq~iob4n5_QDW@VuiCoHJE`w3-l@q-g#b9>a{c{ zn;-AJ!C!v8*ukUG14=OdpvGyE&9Wky|p^|n}E2l1mfq8EfE1plE_+41Kl0J z*v8S%&NCvGBnj;g+NhW{r-X>LIAZ*1E$*Vqozy~W(MQ^w?uKbZsKoB4wlMlA+2?9Q zB}ydEeb7!@oIc0(iU0EBzgpDS-)}v*i@S%)FGBwG97NUfG4NRyB^1Z}B#HQ)!TXZ) z_qqn7y@ZIh^qs-oJj?S=s-9EZtvT2KD2eXj-JR4t@1&|k3A9MOs$n+SkNvB|Z7S?0 zvlu1y^~(ypw_>-#qUUaYc)$piD1o$FQ7xzF-m5#aEG4MLG4020FyKm+9EsT$xs&Q! z!PyA6SaYa{<^`IB+P1if6?efO2LB%Kigo85cF0*D4S4A#>FAc8N|f+B{jOR66=2t1 zE8GY%J65VV8c?^Tcko8Ml6@BuT{YC7!tI-JzHtW+UkbSt{QO zx_;OYalT#K2kDq7Sb9xg+~xH)Vj`B5608$OdtJUwMMaZ+TEZ<>KjP}$T~|vFww7>r zd87I03EtmL&O7fDK_Dl02JoYQ$EnL-S^I9|#b3N(UNn}d=j3y~N7d!+{%&La15ZUn zxW5~finJ%jZqqBz`@4}ZiR$5R%0|y!sqFr4p7(cE;`8-{*3Y-@mj7Og@oS?!o_EO^ z9Jiq-zHK`{5V543@GWgIqwlSg=l$KNR8|k2+gd{JMR|W$B}pPKZ85dZ19$LFYJS9= z`32tJwY#GQ-r3b}h30ps?-tELl_;U!wV1l)u&<+eu7ql7{k-4htW}8;TBl;lqIcYi z2C81Z*G9DUv#wY6cV!-YlBF_2*S`4ssh%JuN@awuWHF&Dk-NXUSnlseD~q<3t|jjN zZZq%isw7D~TZr#++TGn|-rudSk0nZI&$|1&&Ah*>glgG+uR`MfZZq%iDxq4t_xyVy zzQte}e>czjyGp1QJgPgHn=G#6su@U!o zLGX?b?77vs6Gn;JzeEUQFchB6mMOFTZ|5w$Mv4qp*Q2@=mt(N=iq&wPIO+u;q}hCk(aA1`}3 zK%mz5tF)~ac}%=A;&^z<%9h@{P7N0&NKC7mBWXxnoi9M z?Vs5OA|XM7PUZ^1C%)y3un>G^8zo2-?PbDFVAHAMixQ~iPH!uD=qYbH>m0T-+bBUI z*jB$rpcb|?ow>`}9UfZkChyHhQ(PPEpAVYdwfg7W&uP(aO=fqU*dr%8<{<3}%iY(z zMjZ;*$jbI!YhKeu2@($s(LO&^iEqnpyrf;Y@-=O|^lRD#2-HeR(YDe&%9pkdANab3 zH+2r*_ew~Rxb+D4W?J;oFL-bJ)XtufW#49dkxDHB1Ztfh-a=bf>apd{bocHNExq({ zN5fclY=xRN^c`GxE7#+rstw%}H(u{m`|9a1N|3O3GjlZ)jY}K3$8PTAH9RvlK%mxD z?`R%(&t>BH(hhE)rmeh!ovU+Eg2cVQDABQ;i5E9I?*ERv!Fy@qUtdQF688S@I>*GU z?(N*>xvjnaC%!2|0<}KKWa>g8tw1Zw?drH;x!j^;cTOxhpbzs~g?eo~`)Lm@G**j(b@X#hg! zj9(-CI}P@p(>kbgAD7qObH0@jEET-x9!ij~8G~;`1m>##f zUdX#;e?NECkD1X*G|OB%dUn^F=4Q(L@i~2sV)a%hy78sd=*Lg-8+B+vw5&-%mN#ca zu8R^RZofw9m5(s-X#aDsr_g*w{{;xt;wSM8m;ERce8a+S518-FCjrqm#B-f;L&1X^ z3S_J489^k_%9L9Wp521LXQ39J3YD7{5xZ%D5+u-CmfIW=yUl?FYT?;YxiJ!f8zWK= zK8K0~TKzOL&k+iu6LpG( zc7HXW<9zNb^V0))SWJ%ZO<-y1)Ju~kLUc^26Q~CgmbU0%fj9l-;cMqM*-S*Dw#jW3 zjN%MDxNvRHjjsq{l4Qa>8I9uH`c(Sb)YmpvAWVyCPHSw5YkltW;Z3$@omgivWIkhV zX^z%fq*%|MS0wc8`NfDt)R||XEi-%gc_f~zXY4GyXUVE+R}JuBNob7dQ5h&n0!91X_x$bKDju%E2nwZZc8)gnypTk#7w>8OqDFs zmVFkKBom2wmsW!?tOoL2UJa0@dA}N|2y=UI>3RxZYL+lpw+BM5)Mb zkC@#~R(Kb)&&iS%Ew|K-~kzS*N(7wIvnZ1hyYLS&GdHD9O zXZ9{ikRYp8^6>3l&+J_!P>XC-A=KU_|081l2bLXMVdRh;A$f!qz z5%WKgK&?SX_`WLmA7S%9P=W;cA3|szWVd@}w`0w*wqz|!seHTLGrJunNRV|ddH8m_ zXLdUhs72Pf5Wd~+nca>AYLR^{rSk1|&+K-TAhBzuj!NHdkC@$#1Zv?;-B^h1cIk7r z+fjl9-Ug0^o~oDPwHjR28~J(*?}W#eGLj-i zRlYbIp;(VELBiewKT_mbqzDPrdSj3FhaV~OEK-CLBy24Bks{9`MM$6)MRcUJex%5= zND)equ%7iJMV>{9kU*_xr)Z1#ks{9`MJPeSde)B=MJ!T;1Zr6i`jMiDMT$^@gpCD_ z6!G5n>yMvp%JX}Eeyga=mhaE*y0={Y{uHwM`S)Gj=0vmUcilgKaNrrAFbNWzE?hh( z%CuS;-_7jR_R9Mwzsk@0IdxvNZL@{)+`Z-VqdBE>BwcvLe9E8ijFQdZ{o%%wTW&vJ zuZRhga2g3t|Ft*AmYDdK@;E(Wa_fr`tYvxs?Sn;e;y(-eJD+@*C3$eESYmnj#1FHg ztEMiF6FtuNaDLbGIwj(gWa9RQ)1nKG7siQKU+?75e?pifncz0u+nnDhu1D_VD$eA? z11k`w#dP5plcTGSFNqUXM;~rF@sHz#FbQiRv3c>>XlM+7ZGrQsbLWhvAD-VVgh`T# zlJa~YPSr-xMlqq z=hZ14{ZiQ*WzS`T>o@F)1##l$YxA9MBYOy85-XME!EHFE&ipv>VPV+0rq4nlOp;7+ z4-Ts{FHWp~x}h`nyZ41KNixBs=i7PwEejrrS3P%~vvEYR5GF|`Y>rWa$L`jpll*bT z&tk%~IL%`^k;j+APc^N5-AWxLaY-`4?@gi}OD0u!?m4he2$Lie{5B`rYWx1VP1`kH zAcRSh37fmL9t-P#*K}Fv`wE0)cTbYEl0z`av)tUOS9yMf_ zm$mRMvYEl3wTJQS!>d8Ys#GCRV)fv6+n?KoxO+~c;{DIm%;KIzyAAEcpbZz>S*!T# z3rEwLKnW6@W?w|DzQK99?t(XJtUrHqa}Om*M9wKOzCN!w`3*X`-{2s;}Jo}SF^@yOB)kF6n)q_kBeRd;}CSR07!rDr|8^x{r$+kldwXTN}B&=`rD^N^q z`k*3#TGr=E@VJ_Ae33)r%0mefHm=kPXJYR0MMX@Yq#}_TRU-a=6wzq+y*rlJ`@tRr zK8bd)67NSK4e8&f`rYe>ZQU=9UxGP(>%8;(J=f@_`adl%5KRW>EZqd?ri+L z&xtbe>65&E9C%egwxhO_$0A1JhL*e z=45e5AFwS#qeR-!w=z7lGEjoVnJMjr_N@%>KmVLr%vJ^xsKvi?oiq=!mOQYQ{Qlq) zS|4D3GBbFCQtB7;%87fa^qx!EdEM3DZRMo7#9Q!HJs?zqgr;phrAvs8iv_7nX@XlImnaKVfU1Zo9)kk&yjz7Ep&6BGOAv_?tU*-p^kZFOd% z&yvTA*aAQa5}G!ffmSu|hi0oY*~&l(61dMD3o-F^wOCOyfm)h2+lN*)kJdq6)m)Sy z5gb=c%srk_%mhj*5~*<;j%OXRONz)Y;jbY|`@}5^NheMbsAZs@7h5tN-#r#5RAMv| zoQ{RGRJuCb`@w6im5Nt=E3Lntds_y)DC;>&kkE8m{9BXUx@4Dd>v||b!urOyWIVD< zDiEk;J*z}~T=6L3?+)#7!#P>RYpsoHULkF?bK2f0 ze|7faJ`S(7QYw~IBu2bs2a-nTM5p4-{VKmxVw{r4>yA=vgo2@*DU z`PR}!3DmNgPYIqyxm0Y!p#%vWZ#q*i_-dJ0eMq1djyL)~btA_s?0Q?cv)j?d(|>p> z@azU%8TOy#7{HP${k^`mo+-O&O>Gw?NNma173Z09CK^;5=5;x`J9pfc(_xe#fxSs{ z$+`QzcAJkEHGluT0D)SZ=JiVLDvNeZj0se|Pl*&y;<%y0(kYLalQfG|r%H)$v-#eXrJS;kq|=bWwr??xfHu z)>}B|k!+Dk6pwad)gnW}BuH@Depg?O#})q%t(Ro0hTD*Ldv#7EvzGf`J*`C1Abtl ztsZPElpw)%N)VT3u9olnRH;>I9ZHa}zR^-ScSVIbvGl}jBv8xJ+Jl*o^pbB(T=MgC z>#ocarjB+a zj_w#CHMjQw2}|o;0D)Tf<+qZNs5^@7Q#Z)_G5?ovDN2wa??pqO^}f6_13JyEO7K?xFgwuS!oz^0+{4$iFmQh-1$d(-p` z46r%(QJVg=Lm45Eo|dc2I(Z^`@>-|M%i= zr5+0&`B#8It)N8~-f*wXS9!m!oP!c1Y>v^r?b7n8LR|Rh<7^~Q%jOvM2fx@iT8JNd zmHKlDOOU`wF@4)`!rMhW+Oz6D;h+Qw^4nzWs&|)qEWenEK}T9RC_#dJH6hfyd+JYj zuV>;fLobQP^wo>I7@?l=WA>)d{N{IxQ6t2gdlbTNXpthBoS z(fhfFq|dMDmf_cfCHP!x5#3|4);-s&v;-wcSbykhuzr5lI^OMdaS(v?EryVR$9Foh6GBi?D|(p zv>sZjmkwPkc1gdpGqQ1|H~*94t-*11!-9W^ebu=0WjQE8g8X)AE8XWztuf|>_PWp&~>4CWULLCs50f`rvvV@vh& zGq&+N*ze97IhZ@n+E_ojGdiY@^auaz+q>lV*<8Xa0Im#j%A37;Z}_~`P0Z5gwFti{ zYQL*j6!+%6Y4ZlJLkSX=R>HpCI&MeV|CogHusb-IUyw+-a@1TSSb~J5l^~B^#x8sG z=IgWP?%)3*+TsE1>BmA>y)ap9(ey6qb5VkXrFC^SpL5H`^mP{_Sc_UW%3i7x%{RA_WCMB4FUvek+&p-MhzCt%$L!=iJ}JfT+SV} zX!J-uG-^!QQ{B9Tf;pCr-R@|e9dLV%%K?3uOg<5@AS5`Ip zm*)7yHyX(Tl;`rFifm@!QrUQA z?-xr;_qLy2mMQPxl-~yGfdqN4l843_4z}fQ)f9Nwy*RWOpM_d@TRIl{>B2#o6TjSX zEO6J-O2{!F%r6%D@E=i7jpDQ(5M# z6%<=S0<~GJ<^PsrShHkw&p4 zlpujEEfJOu+#m4>i{!yEE7YRciS(e}EBeEuo21XT{^^xslpsN|6Cw0o(Y(!TWF&r) z-YN?vNZ5R(5f+=_HoQ(z{jzjyAo9wkWVzN+oG-}Ljl;t8MVGATfymaPWr*FHF= zj?`lm*`k(*oxtT$!j^^2N@^{Uz1yBk#r7_)OSq3`?WUGK+3nJUY`3EX39{j(KXhM> zd;{r0_6?9gEo*h%yCb_@Y#;JiN-FNYrbXkHzT}|<`3Ba5rT8rCH72Y#buZP8j}rEA zkU%XgZ7fuK#=oRL4!t)!%i5iH@~uBOs)DUf-|X-6tn_)^0pTo^AYuKl9%OfCn2alq zupoh2mevT1*$kC>mzG$mcr;kq`AxMa=}3guWq;5g)z;GN16qsd8+GcQuHtRK_F9i( zT)ps1AvUgbH)Hi3F9|WA{vCmJ5D8q{W1$|EPRlI1>C_;Oa!RRq-KO}Zr1@TA&}O)2 z{|jPejQ`V3G-D@hA0*JKp})tnsa)3D^LZ{uapbvdAyIrr(tJA)ts1&zI^qMec?!G> zZ*^ab5+v|DJap5$^ghX>bX$+LC_w_hqd}u&+ZM6N>KtegAW(~998yB<^V%=JDXqIF zqh=;bkiaii#6lFskyVYOI7pxtez77L#X$*va{`}4@!cI&F^c1t*R*(qV~YgyKnW67 zLXG0s%E|wt1PLpxMsbip2@+N({pt`zor}<}Mc=^2o7&XmpUc-so-EnB&U|E^U(LSa zwEeC=rxDZU9r6j5AYp0qQuVJi<>lpN^1ce617OdT)8=j2ev9s(S1oT-WRU|Tcu$A# zYO^nkwDt!Q_FN=vM@aoaIxlKxApN`HRw}kKIgPZOF!c$PVA=WigttxSUWK4@uPzd( zMV6%y>TSP~`%dKHA=S!8MKwtTy464MN_Uef>Pt!kyzFaqsV= zxi{0-NFE-M>!Ab*@>nH5_HpSincaNO?wpn}pS{C^&-PGAgKuhzkme5Znl7y8S=h9p`(#Jdf|3_g4K*oqexnz`&yX zrtZM*O*^;ia6!BF?JjCs(66Z9WjPBMF8s6U|Ht1)CuU^q+qqca~_yqo{^DT zaLdBud6{3;%NyOHOF>3PM#%*ork=*%DeTgJ8IceZ)jzL@?%K0H%93i&FUV_{xh;jb z?x9_I(N0h0`NXxQ-;_6-dxFPA{TdCNSkFIGh~G}n&WrXrVu&r>@^i~4{vA%sb93SshR2vb&+vfKhwCMkp|LqnMQ z5F4i4QwUQ}hA?%W!#1b{q;8Y*VcLQrZv5_{=wA0TDP66pWJ245pL0VTiq51QP z1GWB}`$eRo*wAuMCbVXK;^o(u7-KJnELjdOCQSCy8i?I)Sg7U~n- zo;VsE`O0vyp{+WZ(0*ePl5;J^WJ2r7N|n^P)@?GOt;Z)!&c%kdpJYN?s85*E6++u+ zGNHX75~5a-QMLK~-sJgbQOLMst3+e?%#+o+6CEw-UyKUKY#W_vrDw4?e+NHAUwYn%RSLbW(14f_No%-gf> zDzTvwjF-cS{=xa6zqEvEu?-FTHXhx7ee}T0abiOy7%zvlz5O+zT5LnZK2g8M?3`H7 ziDE+~7%zttZI?4q8KGKiL&Ls}-%j6>8|^bnY^Vg|<*@dGza~_RZD`mhsy*MtX_(nl zY^Vg|<#3`$aXu;|REuqB*tc=rLj#;>r&?k|B^WPTzub!3u zp%RRj!*)Nfj8HAMp<&+!l@H~C{}K|67s51i`kF!%S3*dM7#oH#sfq$YT0z<+Z1R_9 zL=aVzW0mqkgLvZO)zG9o_dMtt@*M zbZMU--+QN%_v_V#L72bOfAJ6J?Jp4<8AzxE2@dObKUIdH9ZMoE)Z*{Pb}SO2R4qY5 zB}jlhB6i2l&g^p4ZQj-=H%RFsp<%W$xPFIt>A_7ztoiMO#Ym_Gg%>@knUU=o6bGp%NrGtluM{*xC~^Z>nhJ-8kP78!AC9 zwz0d*!PwSW&B+GUWib*eL4w2j-6t;i?%Sxd@;b4h64YWFkMEfp`*%jRv4Mn2kl?U> z_iePUIVQSf+E}rn64YWFyY|JrwU2iv8?#$BEI~pgNN`xciw*7~^j9o4RDxQ@hU42{ zRn`YWBuJRBAx53Ns|BShSMxzOkdUw`t31D~9($&@MNrN~g7I=#zehrkt$nQpm2R%q zhe}Y3ZBRYs1$CZ*gi4U$uzvT;idu=I?Mfx6#WtwDxqjQd#=WKl36&thVf`KnolyGo z5^C*^_79by7TchH=VDxtsyzN~%8&@o2NEhlg2VdVCrk}G7#C{scheSJX(c?0 zaw`!+B}i~szehsU5_wGIwwpqz7TYjwnrv`BkWdN6%VGWQ6Q)Or4V9o4+c15TY*4RV zf`m$t;IMut;+ft{%vd5eRDxP;!;CRxgGL{VPze$o*6)5kIQKNdi4B#Yma*Z{m}tr> zi4X}ACTs{AsWZ(;tu~O5uqi8^Ka33|DCZ)~3gAU*?Yw3X8!AC9wn06L z*Co^kX_djFXh5h02@dObS;=re=e3XYbCsYL+n|v_)^@U5DnUXeNN`xcb6M$LklRYI z=8TzmWvt~e34<}wCRCD2Bt}soNRj^%art3i3uqayY{HCvG1fBcT?tc(1T&6Js3e&% ztMgcpb50Wd1*2t*!>Yw$s#!C0nz9molq3^omyyPX#>IqcahO_YumZ62p^{|6?3U8l zKti>cHltEJHRmc}EhNksV-v-!rIKXA>=VZM0REudd zuGluD&Q-!%NKj4lEr)GGa*l**F>S_}G&WSiT1c4lhcq^jP%Wm-7-QRzRH=lukT4^L zZ9{Udl4Qc1w%CN&Kti>cHe*SGAZ`9jNH~lHo?iv6y*QarE&fjb1-n!sNSpr>5)LDQ z=U0J^$_N?3D<2b80yc!uld04mg@kG`ZOSk&l~4(5A)%*K0a48P#<)1FS{ycQ!A&Jp zl1!NPmLPl+6qibp3Db@f`6%`gn@}yLO}|NFLnW+*gq~}q<^u`UVw%PnS)&9*kPnrx z77}`{6%au_kWejS!_oDX-LA|CN3&!wUgdFE&$R*@m@0`2wKz=qXY^a2_kkFH`R2xXB7Sq%Vcs@_GtDvk@!dgh^=~t=^ zBvgxO>YK8^N+ndnT1ZeL5^Eoi{=Z^Q)~{E)1+5$2=+2paX`I8KJrs7ooikR#HK$dT z?_z@q{wroJl_V3x8@`1KPMeTasU(>=dF*NK_9;_rLQ;i< zYBAk*-i4(Z8DniiY^a2_keJzHd0vAm&)S68Kti>crf@8U5F08nS`sgZKdth)_w2yA zwhbXvl7?{76KI3-5$Ez@T2mX;M=@6+N;r%Jm0_&X`VdJnK`kcEEhg25#>IqcahTdd z%xWb;s#KCpP}@v;_x7~;SO}Hac`7XE-*ra(?SJegV)uUzZaLEHZm;v= zDJg_%vF%re&W@k{*bPLi`LT7_z2Y9P=8*@5Pzl>e5QPPAd^9_L`9pn(X#Q&R$eEu{ z@XqcNDaW{&P%ZwRkufiR{LHqcbe_0kNO=07iQcp8rU{`Enxa#X-+$%9M0|Vr z{>Y~VcYALyn39WeF`-)g-IO{J83Sj9f7x)SS2lW{5Gt{W!h-WIpB6vZ_eUZ=ZZaeC zW#3_5y*m!35URyCOsk}}TmPPA;j1TH>wWkBe}qs8+ei>p()A|ATeUl%h$dYZN6wyI z)O4vq%C@gs5r{l4saP-c{?n@z5 zi(^WR?ejVu2p2B@uKa>?CJ3PtwviwT3-0{(ljzp}@cc1*&wCM`3dVdnF@;bqW5dii zO`OldvqsI#S=}WdM8Y-_L}9@nH}ucDL>_h4QMO zh-sI97he2q1E*qnk=RfP#>?S1n_cEUF@HD_eJ(i^nYX*G^Ycc(e;}b+Y=e3l?6OlaZ>x$8m0-La9z6UBcmAK%hRl-NXJ!99~_y5(!4L|oadBI0$q+o>_fpU+J`VoazO$7IToh{xBz z9p2UK7H9A&Z;1_+*hFE$bGsjNi{Gysr*+A?4UvEL8s?mQtG_NmLbce2X$wRg%Z`Ts zcw>O`?b@0iO4vq%C@i>c$Y^)hDM0{I&Tx8w7n{uz2IVy!vEw*9CN+MpERvaESc2I6ePr}|Q!~&1J(W-`W5e@z^xa!u6yEh~>*z<kR&hH9}5s#!U6h=hiJ5z9V*!bx7;+pZKF zDq$N5g6{4Lv*Wv;7)LfP_%d(H{mrU*w~X}L6%wk&HmHS~bB3|qyJsJ{rLDKLOODu3 z3EM~z_8up3$|+j#jx4d>wO@tV1f8bI**()p=!#rU4d^`G|6?sARExjsJ|Q5AnMsaU z&))gFu6+YSjG%*G2|c+Fh#*x+s20<@k`0K!hDulq2|c+F zh`QUfWCJ3wp%T_YLeKjDCn4wiOzTQEl~9SX5fJHb0WdBOs}_e7D`YyDuzHkA zk_lbOrlv|I$%Nhx{57FkOzTQEl~4(5A)&Vee@&)?x-^wYtXP8*6~fd_8XHKc7N<&AnW;8Z!dgh^Dl?T(Nit#D7|qVf zIY&aZnATNhDxnhALc;WwG&Yb>Ev9wNm})~Mtc8RbSJK!(LbaIIHDjs`m9Q2PW)w|h z0}0h)TGx!JHdMk|NazaCB8pi{CCLQQUdoFUS!E; zQiX(SF|8}WR2wQ`EhMNm`2}4X8%U@Y)4BpowV@K$LV{W)zo1KF0}0h)ns#J~+a(=+ zB`wfrM%?ow9ljRv9W` zEhKauoSdrQR5s~u!Y$abeSK=_QcFxoI81wTbDI&IQ``ASA!v6mw+l?uX@a>0ut&Ir zgtZc<^$FXl`Ox%MLb#O@ZZcs~mG{?#YB6o{WE0d%5)uxZvJ!hHwTV=jm1U4a(L%zs z=`@5nnNUK`-q;@5FlT+qrK=K?s(>&jFLpk(1|=?&Cv(DvXJp1kkPmZGl*fdga{a%j zDh*+e44A5TvJGQ9eM{tg2%%b#r!++3Gzw3Rg3|pTsnRnmwqfeYCp_u0@_(B0UA(;5 z$jFee+{<&=+>9p(DP5GLArigTvY~MqTC_Nvk+G20a7`07Ou`b9G(-|c@3H1|QFO+6(DA4nop zi)nq^VC94Cq|HYhz9;g*Hjof4%`FqA+=R#|9@FT)B?QPh9BBo8Tf=7bF(bpXMJFyf zRBKubC3LmK@hV|#7=n{?&o$#R(-D*s(PA4K=A82%``qzg5U-S|+F-(jrF2+|`L9?Al|XJWg+Z#)6Pm)5d=$UG_w(GRoEsbyugf=X z9M)cIQY+v6d?IEoBUFp?p<&;K=}}@sC2Rx3rf(V>l@Y4NHZ<&)l^GethDz85hRI&AS_;OJ6hgI3 zK7cSIoaRF$CLa=Jg33+S+J35#Pze$o*6%)HMryI464c_bwo02o38zH0>{PAz?qGTK z!xuXI-|_FBX?^4H-uW3B+hWhp;BYSr$6_yC^pnHi&$@{3=$~5mO?l^fY=e=P#FC~mPqF%mI)!;$D6O)qi2DB6~b5+pwDULrO! zGV+KRytK3TOZR%tvGXDpfm&2XVuMP?i-gY3uIY_f*~j_rk^Z?TL84pTas=)**G zp3=`d-Z|vFF#be~KrN~tvGK^u$ClD-kxT1)+wbq|Gz{0K6;ncj1cd3e2-u)srbtUT zkXj-jA5oNGypX3ntE?;nwV>3Afcl7{1PQ1cBB0JaBv1=l4-wF=Jd_{-ZHx$LyB2|3 z&$}ydi2jjJ)Jij{v1UDwI*NJ ziHP3$dsqBGM2n2pUfnbP=G@cnU=$@t6f_woHfnS~nR323<5cg5#(kXFR}WbPYJJ`A zI4A z+ct7hf`r;TzNwR)4~sx8Es>e6@`-@5%0&qhT5cEa=|u$8hee>4*3+}@Wkf)oJ19Xy z>;K&zR}ca1$|6t;`;FChtsaF0wtN~(7Jrs|&10uYAH@EFeFA&1O`rscu@kH3KY3%< z{1x91p?shXBv5P7jdk-+ZP3+>B}ibr?blY%|D!W&4Rw)@gpOBPp1Gjbd(C<8ykX37 z_vhPRq_ysNHzQ+YzJ^b_^SJw5KmU6qbXL3NnJe$8o5ci5kl^sIx4%fSjHi4=b}h^N zVS0K(wK%3({gg1NdQokt1moq@D$OghVsHLE+p@Bz&(ECn%x<^mn8ohglD;m4;~efW zW^tUq`-FG->seL2$AwTyB}CjO!UYv+2}6sGjEt@o9d~v#Hct3xYu4l$eyUUgDM7;6 z;JsQp!q7qtAC5MVPzl>eLl_&|X8k+4tR!A6H=^??ABOm0-v?P0T{cKrsRYW(HTIY= zIpW|E5pezSo|S|InkCC8cW0 z;s)M@%L;O~-dH4rro|}43H_&a zeXr(Q;hZf6eJujDe(rtDt#QLqw`lBWva!Rd=e@tMTTa!_`#LB=;>I4Iy45!Q;@<-ZSdeq`ew#q8c6AQ9J+EEwHh%p&BHn$vjyL7HZaE{)?(d)k39Kjb7#Ul~ zd*Q@_oaP;D0=4Xs;g#KWyuC$TbDW?0$>^ga1C9o0I}+OUb{(&IW_Hfr-}_mqLIO*K z=JQ5%y=&_C$Z4{%pM&Ltslq%(LgzL-kh?lm*BN&0`A(-nH|Mw6yxYBP#$9g1d86}3 zjQ-HQzr%o}aTf_3k&&>+_I4xdcsqu!&1&;Ze{0;u^5GQfI2H*par=dz1e6sKO^&y8 zKKyY~?(wRfyei+1%y0co#Qi0EY#i;;jFEpr#%t7b+Zy6yB4|xb=ROGu5@_2ReWa{( zOoSF=m7CSBgfQhEM*`Xvr3!kKMW7b6b|RqHdMH5xT00Sw_tf!nuFuYKkN2|()T-a$ zxEorv+Wo!Jy+pt$>S5Zs&b1U^>>>h2Q4b|Zz*tGdOEij3ydx*4lIIO>kv z)-nIS6;~0l===uWJ8of4v#3p=7N#~5%4<{K%YGxA^GHEoD<4Q;DOjVZMW7azbR;z5 zi8|h(V_kBx+w}Kf#IgE>UAnvX*YS!T>ymTMv3?#(px4H+X01`wLoMusHbLvF5?b}~ z`btt|R(e+7hV>kkZin-hm*n0RB42;L%L&$o7J;_O!)^Dnk_DHiCv-G`^&HtSsp7S$ z*f6U*tE{jLtrfBdtME9imgBGvCL6Fq_E3TZjw_WDs0HimBpVnnj!{V4`wu+d!;?bY zi_ktgX76!Of`smtXzxq}?xT=EE!|Vm-kAv8tDyu5-SyGlnF!nuB7s`EH>ABYl@;ur zb5VkX?p|r{Oa$zmEdsS*k4uE^oxfU@O(%E3t~!o{hH39?+n|%XRD!iMwX}D(^ASY} z#;bXvy)zL|Ru+L;T86ZDCIadsiV`HWZfNgJ1k|~Q1ZrtppuIB@uy^)Qg2bq6r?)mI zVMO4$4-%-==+>u*kdq=J_Pu+#o-KK(1t(K+n|Pq_bpe4AB(VQSLe+Z>O-rB__J8v2 zyL4sl`=5?>eyG{56wce?dWyGWd&f9@cN0X-ePJYm6G0|2%<16p;THi>{a=$>VqB=D z_WbiiP1S=lPbmW;NEK-XVYX+TKt_?kkqB%g5hy_d%gwfd1ZrX3L_$+TD>EN?th$^U z;7LlKhR?O;9UY%cKz&eI-P3M;W~1NV=hq1iN|3m8%@NrT`X`Vp&wiv?t?##|5U2$s zHQAV3cVy=K*ROBIHc)~DhxP0v5~_H;M|9GzJ|)pQ_gSZ$J6cXR`H1CdFW?hTpJ=t= zY#~sROmHdq#0Oo@@XCMrF|)(Ifk7W+3Dc^@y_v)QDJK(GT)H%cKrQY6KEZL9#l~bY zi4r7u9Ag{)DJK)BcNvjFpq7q&KEY*l;@(wRTxKXif=5@j;h%CcQU9dXDFkZ4cuY0O z#Ny`fGC{w98I07l4o$?X_WV;$Ce9lBUJ8L)np&UWoQE&k$vJmWf`pcvf6B?k{O7i( z5U8c4?h{-OC$}xjBGN$#5?cTMDJK&-RbNgaP)pmJPjEfV+g}s@6DP(7d=#jJgWesgO-ocg3j{cLif5hO1hSm&Bd~oX!AyAS`a4RwQob+!3 z9C-1?EdKXckl?Ut!A%p{pmU{wKuI#ew13Ze|Jj9Jjg4<)ZJFBKL4w1o1vh47gNYM| z>=Xhe$pq8>J?GkKqrEv_9m*<~4yzX2{L!r?6JNI7F9b@G38wvfPF_pDxqW-q zIUB}gBEezRg70((Sqb4J6NV;&Q}x>`FLJ7)7#9!@Oq}fk|- zgg)>ITwfspBL=M*c=Y)s=Ur>Y;T0qjFm@SY;~_7FKrI;gh~QCl{m+xE8HZQGNWd6u zh$_F{pF*G(jNnA@Y_NBD#F}wB-Eb2E8clpuj~iLClc-oL)%QXc7Gbr*+KUmRA0 zRD;7t{Y)610_hn8kGoIy_U3Jc!m%tK?3`xtdL8}mbFeHPz(EJBs8F`f!BUz z$(FS-`)&fx$lPtkpSl-b*foFY2EKj$WKjdJR?G1lpW9mGpahA!n~%888C~bCnfE`2sBAE*T-O~jQ{R>w{+$zDZeg%Um^+q z`6byHL^jTUy(D{Oj7}gE@mlTb*H=H0`sg*MXPizTUufXXJ-hbOep`z?lt2oFHv11l zG(7&$#{Ovt)q-*}M7aIKLZAfdCXTlKRNXw%Cs2X})W5N@uq>58EofszTt;Q}$B@!3 ztEsF|0wtQ1s)4Kg`j{B8%L)lB^+@RF*~_Ijy}k5MDV8W|Veg_dnO!@C$S!>{3nfUv z+#aJ}tvmNKA+iq7vIx|InVN`xZ@wi&MVC%l5U&esAFK7yuUUQYt`IXS&O#!N1gvVv zM#&|ol=0}cY-Pt%un|WsOkpJSQ$<88wvferCf;lH&2v1VeLgo zfDKyv6pVRUh(Q%MwZOPg3sOkLtrMC^KB}MdP^pgRyqD1#hxblsFA^%6xJ8J04Gy+M z2@*Od`n$V>M_-gwjmUm56A9FUnVNEb!HA(kjJxLi5|k*xyI+in=BR@o3vqSU`)O@3 zp(&*Kk#c^Xw@!%TCr+~n)Y1~+vw=ux(^=yr=Tkd$&q4{V4eeCTdh;|sEAf~>2@=qD z$;NeEiX>Ie>QgKNwXilLp`IgZORBQl)oh7%3vJiIwh#%uwREP~;IS7aNI;l&MU)S! z2R|Q>k{HhV*qiOLLJ1ODhPjNc7Xx;D{)vj}tAfYXWSDbdcLJ1Pk-iW}qixMO> zCV!pi)f+GM(d3**TcQLBO|8G`t6ADxh>c&=Yl#vhv_$+h+=g-cTAy*|^Ep4}PLwxA z`br1xQeC()b7?;RqFWm!P=W-kyNFn`X!Ejk1Zv@13L4=`KFw4DB}l+bL^eiO3@lXw zB}l*wN5t0d4+b_+f&|Q>L>#=YG$2rd1kBn*tn0EfAW(t?kKlKr9-&*fqYX|COW=Q`@O&P{txTd!xxa$*iIrSV*x z?hSrFB*dek>1$DfZ>#auG~JjS-oKu0-21nyJ0XEua86A*&wH>|OC}D#8Y)8x68Hw1 zemU~2m$I0c{_g4}NT3$JfsTZhzPCb%QH@WXt~uv(e4vxg^k;8=M~J(=-cq6jmlYC_ zLK+j#yMMP3%jZ905vYabM(^8q#e~RuE2|W5WgzV_v=<4T`PwcaF8FC+2}+QFQi#!R z;+j{4sBS9cE+xO#I_WpD@A1Hy>@i8osNa&(%VX@(Kd(p}{5?GswRWTJ>YZ&( zD```fj$kcKq5lH=;w3Xos!k7D2}`sT_zgA26bXHEXPt|=tVUJ4`C{mU!L23VJK|Uw z30>Ui+ZJXX^zdyAjOTHfwd0Y{<|_u2D1j0rV3r`_zwZrBOQ06MhKYpsG;bFWC_w_| zE3z^2_6yPys0H&Q5d&^%9}t*xSPdkd6pfp6LzdkDZj3KrL7q5Ya2v zDj-k-tAV6cJz9Iil60v;0@e&vK?o1k@%GZR$01atC&EcH|!CJ%YaY!+7zvArJ-gx|d?A zfJTDKsvX$~lZ|8o=s3LnAsb3u*|T9ON-811Mlu03zD@w5%J-{V;X42%@P^@8Bkw)`d%r+1`=_+!sWW&Bl7NRWUq?G3;Ny_rZ$c#sbwj9)6}BNrtYFXSm^m6b)H7Ssn3#xIrY zL%e=XeZ-J}x*-DU+`+g|3)%t^#xIrIm4gx_pp6j$ZPy}D3;G8U#xIrohl3I%CJZmp z-bDoTL5o1GH)oe>-z38LrF!g_YR1|M_6_B+_O)r;Uc<7U<1+N$ks-l5NR=SLueX?H zuU6Xz5~#)NM~*2YV-OLwF1}7i9v(&eh3A;`4iaBf)%tJG*(T?FS2TKBkAOf4#*6mo zy#1Ymv;=DLeVadau?_P+(PO_<9HVfw^Utq(Rrn{V<_)CBeyJ!yg4fraC(AF@W4}}+ zP|H6%>s8^O@0z!q9{Z)D1PNa0vkl8H)nmU@Bv8vg(d$*g=gGm_T95rwQGx{TKiG!l zm+G-!DiWyWpEGhB1HV*H-`4u&-q=M#!~Pz}yx(OT@?tBQU@f)npU|5(!kmv>lwiD? zC(AFD%gQ29OUuxIe+BiCixMQXZY;l4u5)>@#rfd4P)pl_|NaWvmAt=Vf+a{`zp>h` zMW7b;o8Yy&$9)j{1om^gf0!5XLZAc*UiDACv1^6pmzqML7VjB;@7$GI3HL#?f${R1 z@stLv6&C-fgbm}{SFXN&tz0iTfUI z#=EAR{nd~_E%uw}aqarJ@8M>=Ys%Rt4kbwNe)qH+j>dhzJL7#;uHI)F7i#eiNmrSH zcTG9_C}P?f6_P^X@qN_F-%1ecR2WKa8`VpG}|y32gZmAwF@&hU_y!LkSYl+KGVC$0AS*+Aa|=!ijI68GYhNK+7k>c;uFo zN3KPn7W#Px9&Y98;ikJ(KI^HqYIU4_25rK4xRtYq8%mI{^I?1x%h^W}wXoc50%imC z)Z;lrTA#K`ECp+h5??a25*Gh4e~mI!{L0Mwn#u}hH4kPri@?5FIe}VsuSFX$JInc= znO~De23YSpxP#EO2;b4#y9r#EpacnBqgcLVvi3m<61s}=Z;^3*g#>EpTFde!lQkSl zkkHkjf47b6L?lp4*Nm1gnXE-of&{F0sm_frnfKkRHRN|#%-WeoQT8{GRk8S$@poFu z5&`R7`K=d~AffjBcL0zNi$E>SpXEy?djpgpq2=bkPJsH5->l(Oaa^dS^y$sPwK zNND~0ZziB!$*=XW4UP-7`n6u6`$Q@$XuBRtkofPE<+_6;f_@80_k$LJTCkfW;-5dx z2?&%R0V`)BmY%gBAW(t?_RYXE%&RCqwG8&odP2*4b-Cf?67jvZjGvd+zyIM(C0GKx zdknh?8_&P`d1Zv&<5XDQXI|*<541ElJR$)bm7f|!v8=EZG_`z#ZJioKQGx`Hm6a2y z1t}yO>>0LZ{KInZ)qCJnY7PEA1M0vvZ`jQKWBNN6ytrZEV?v+=346!VvDTW*PkUEe zMr$2?FDh}N7RQv>RRFN=$TOe zC?rq|#xAnKFXEs4)A#d22@?K@%58y9CYX4;k?-e)1Zu$;Y>1;%w#dtVlpvw@{1XsFhF;VlhM^XsX(lYc3u5BxE%i0Km5+vU0@wDm9u{_`NCCmJldO zC8Rcq;8s%QjS}%uL;`A1{2EghvQ6;H_v`VtUzGUau_I)j(ADDQ3E!+Y4 z#Gdh;#n%!g&~{^QHJPh@kZf>2pOK#@GY%4{1tWtY246Nn2$ZA}U3tVHf=8cuzn54u z4ohHUP#+^cJvYRJ6^l~{7~u>d=j}xBDEjo+x5e8DMmQ^87=zc0f&|PZhB#-)KT`@;ZoY zFmdVXV=08T64ui8<`dj@$2LB&gzE(*Nc28)k**)f1`}5{ea)KHI4;z}RX?40)mp>8 zWc&*P;!CD`a#$y-{|~Pp$;O%wuP=G}SkF==%<9$J(Zf2C2=*mwx}kdtfm)bC+m{R_ zNZ9#ckE%{z)NalmRk%XNHLmf3sz~^fRq^h<7<|b@f&{-WDsSDjf_=VxU$Xp*?@J+2 zi{CR%XxOzP;Y&u|XF{OFyp5`0AGOp}k%wD~4J6CKK;_OckQ_&n8goj4Aur&#R(MfkQU#SnK;PwcK3fpacmHb6fC#KhfCO!Zwl#)xy*U z9&TIM!wn@EFV-y0Q41P)mrbvFaco}L7sljFsq!OqFJh9yOXgGMZu1|2Cu2@<#tq7m-d55&fagF0ITYT@dPe&_Cm6+D7E z;a}b_!x;eQD6~hVTRv56T(W;~DN2yQS(MgSH~u6<JE)@CHs zVCWk{9+V)Bf}>N&|Hvie%eeg|T5 zBcbv8d>bW;Rw3bHypYE!MeeiBB8sB-;?_2@XJ-1s0H(&Yq!MJKYYM%w#(V?4sOQeJ+rdQw@(O^Ac3z4 z=oa~C^&nM9pcc%DWW$-#o_+ho21;NabTKCKEN+&|@7q&Sg#^rxWFtIfb=p*+7R;hV z92|XDK%fNHS4pWVh&K=F9McOoidOwcLM?s2JCFVD@D>1Hg6he=e>c&6^M~APz0<}$ zVcj$!fv-a8O#k|0LU@}xiT9b_bD$R9Cx~a55abz_g%Tw2ji-2@2|?axnMj}(zVQq^ zUp)4F(H!#YNT8E$OpO1acVEJPP;N|+fE3dFvETPm^uR|^2-L!IqhF19?oG+ZfSbZu z_^u1mo`?3t<4_3lIBbCuB%l;%Z14WSQ6Wy-y&Ox|MJ-Gr`TQ)IAq4x9!EK*-A8~E+ zJ3CA*&1yT_mT@aNqgR`jD1rOP;8mdi60lpmSZ!E2*PB?@!Z(+6a@TLK5R=!PToWZo zXxM+z*zJjNLY#EwO{Kf|S*pUz}>?90{yV@w*cn$C)F!K_V0t6z==1WJ&=H#(8fN%tN|OQ064AIQeE8J`6NN|1n61`*`r z9uO!&0#-OgT)%Cv&YafkHkdiBGdbFS{QPrZ17_qn%+yIl$0pH$z?wn=Rs)o(fjb^$ z-#%VtST%@Ru;L)1`;KY>ff88NB&CX90+Qbyzv~mfJAJuGqh7xHNNo~>9-d%K}&q?RLg!% zr8aI__IxHvAne+24U~8|b7m{h!nYVnufjCvLoa=WcVI!zA&fbvvQiuQz1^1Z>H$+y zNzQMZJr46>5q3Vvx^q?Ecwt=XHJuz1l?eK0X+_TbWwKAgnA zl$MkaBo-2B{-trJ-pGHH?bu$#CltwqXc>DPrkt~lm$xhjA`!3t+YE+q6U3qwOUr(J zAlKLsf~0ny3~g*2t`jM3Id+rDM+zadw){lSYu6quWo638(n`(8{q=g6g+F~#Qx)g* z`dY71xGjdMbeAvd&8rM5;RZCj+5d*LT_1jGIaNJ{7(P6c=R>VEiZ`IiPVk*hm z%pKdebBf5TG0-w8(Nd7_L~?yxetC_>v$xRB6bh9T2@O=}MTkRnDpM-rh+{r`8{!lV zZ)HDwD@7SnDV63q(m-U&|Gw8+_kO<5vycDvzy9xa`L%w}eSg+>jrY3OKIc69`Wt$8 ztslOjNBs`XTeWW9vU&UZS6|orx&d9fJsY`D|NrBEdrJF!$4j)ci~D@`zZWjVeZKmk z!Gp4P?AM9vg(ZQDZd{=c{n%7kI32187t#;iMffgjV z%sn49YCI8#?97W2;#KDHj$Lp4vek~<-1~`Gcu&I=fvp-fszX;9UDS*CN89yN1lF~8 zPkSQth+=D9^N|gfFzDgv(pbTqXT#{a2v`e-n7F$}kVp&b)#c7%S|#)^wR#M3_*+e& z#Y^0LQ^zo^HzKaDR6WzopatXOX;o@AiTHF-i4*~&=pt}LBL0~>XK^2RHCjlZ7tA?n zyxKR%MVNUGBLVY7#D6bwG|++s%q9`sn)6p2EAY08TO#9wBch3)KL17$Xh|pZ+(i67 zUygO@3SX?@nk^aFbL=hBnizlbeDvh;o1$p(5=-+2hAA>oKUiben)A_Xs@711SiFSn z6-^XgaV~oEnYtGvl1!VLlA8EC_gwV*cNP_(1vC_4#?27ibND(U_1 z_W_(iC@T$F3l8ztzdr|{-ta{TYE`x0-^{-C;W75>4`c>$H9$EDG>xthk(o_{Xt@YM z8o4+257M02Uo5`HrE!s!NQ8gT4T@NItQQhVB%n+Lui29R8ZjO(?zxO2tV+*(Ffs4s z_SG-19uGkQ^JLPZDBD66uJ4$@{sng^ddpftGYaG`RiCx_pp4(L#|E>tMq!XgS?Z5eSe(dbWkQo8E`J*jlYIa>40VUH>Si2$NsA2-e_UlTKI@9X_wv z`nqwOUc|c4i_6r91hvu-jbCcCHG}>!A#vAuh1d&Qt2I9EwClwh+s38!gU5ymqsP5B z8f)&H|Dx4vVH!bgS6)oeDDaFL;^{_ZHynSg1;xZvtJIF7brtp{^4G(2>G=-d%L{gN zZ-W*t3&9#(KkMjzv<#it8lV5G8Y}F%crn3!;8tpa$0_hqBc&mGo4riWRSR7wjy>_t z@>{Q+J_1{nM1sq-qtwdO8W(PDw6(#otSi+j zX|2|{r`@fs4^M2_md8gdNO0NbTe5+7u_nIk_U{YHqr203E?$OKdr}kEPguG2=#dgz z*&?IJWnLjXUN}#h7=3X3*5doCZbb_cT&5iy=8@3E$9Hzy`hy)vBgBgdGgfAm@aQ&w zAf#F)7WOiut5&Rivgl=ZLIgE$t|d=S9TbUeV>5y%d z*dFG3k>FLxXNlJM}50(T&8u!(OH#EY)@)pv~?tKJh%Wgl3v2}BcX|p zX>>Pjn%0`V%=jpR>t`LFLD^}VXisr>$hb|d(ZaM4TsD!~v}z2+_KtrPQ#(rPLN6}M z4%Qk46h*uATG+-|)EF5;jRdSKqwx_%>e}1JDGl7`ChjJgHfvW1+EG8-0i|K~hw?Hz zD#04=2zM9l+W+jYsx-vH^$Njdv**p|4%Cq$jSw#;OvErmGm6wZ=KreZLoDoNB6Sh1 z>OiN#{2kr5Vtc)WoD4b__4#~sv-8$`<^&={I$=LX@|E&Uc|zCXd#mptD*90?M=s z*C%|w`!|dlm0?!N-n==%>yCB{pe3CMG7-`m7bAQ=?PX|-wTo<(vS__D!f2d5Gp+#o zks_#1Tvq*U_%{^{pxiVzDDTsEtcMt5dzmYp@Ff;F>$u{1)wSo^lY zmxY$(CWu(}t7TuhyO`B&-=}f3V7*+HT@dkiAD&?kXxiHPr0=pcLcF-9-pjK>!#-$A z8sistwcoj}j@AC@5sDBC*2`tt1@sBzX1~4aTsN!QqFLLqF7)E(bw;%a)thrCt&)3R z?q=WqQA6w4ifdwMK?2G|l*_GW`)YKxK3Uo=`yzyTUaeV!(87`PtcZW!;2!q!%WGKq zV~4sdSTC1l&qw_G&eXC0=$UCvZTCeQAzrNg&a`5o)}xP+#!I=^*zwo@&i*cbUO(G`Ot!*cop2@!H<@y%ZgBpni)4OX?+#S?AVpkZuBHLfNoFc@61eeXK zB;txQHSId@KaxG^zHk~LUaax1{ZsagykS}kFYxu=-f4t*8I8D0V>@Ul3)XA&RIE((BQE`*R;38)hw?IGWxK{I4r8S(STC1N zv~%VoF7rX7nq+#N&Epb=sLNC^!2yVM#dmLiBvLL}_ zvnuH{Fy}{H&JU>zy;y@zICJ8db8tJKgJ?m5%W~R9{P`a~8k;sJnB8pFG$GV!*{029 zsgsjyk`s<*@Z6IwLM%vd+2^~_oSZdQOkg4|8c2v2wl?BF+h(SVkRD>aM$hzoX8z#V zur0^qtr`!Pekcoiv4)Ic#9#GTz(t4!2`BWFf?lj4Ya!xq zx?qEg5DOAqmgn@d)K0@P>fL|0U25arIM#(;{9M*_#Q#nA@h(CvNN`!6NBj*cd>zQF zKf}JI-g-G})s8|h){wnPKNIfYCd7gSm*siH|HI5~!ThYr_S|7p)u|>H^kNN(F%f?~ z`WYt@VnKq-@;u^SaA0U~PQO9+dnGDZSQmQnbBUGdgjkT^vOJIYw+(qTSiVbLyKc#z zN<%E@#TpXx>2&_ZO^5{vF3aZ%bb!{CCF@Eo z=*1ee5>1{M7+TInhy@8Q%kzl;>7zMOTJ4tX53!&ZYtX(i`R@PzyS@Mku^_=^c^>iC zzV)jp?Q=`^xmeJPH7GKK^jW)nwwn+O5?q$&5&z9IvttymEQwcQK`+*z2xsmeUP3HL za9N&5{L_|9j#1pTB<_j@y;y_dvAIXpeD47lAr>UKEYBnUrK6wV(;!>U53!&ZYshVq z&Zv7IauH%dg3Ize;^&`1{`vjsK<|0e3fR)+?S-Ky-dkdG8OLpDUS7!O>tH_LjSW2+ z*w?S2BE;e)WM;L-po-;!iwB(!uon{I1=^Ioa;yHc?VG@a`4(LHcyW1GUT!G*?`3p1V2!2QItK6VksU>gmyn&NiDwqH4E~eov(8J17e62S z$8DiAJqH+#Wi3_)wya93R*8kZknlg(D0F?{Ohe2FFA4O_e^U`+@e&eabgQnKc6;!H zPS2?MKtjBjZhCmTRjJz3M#E?RhZZg)A+b_x5Rk)=mk=+0KD^)3?Bc!i+!{G(;W83b zj_Z3A&xe2In&66uepB-ybs0i=ak+AMlwJFqXN^W;_?>~+p4Kt6c!_m;iYKhIvkmc6 z=mrFQzLUxH_?F>lk;>)mVvzy^x^!H#aZt`GG6C z2KQ%vuI3yG@nU+*W0Mp8o@#3}+Eot*7xW*a2(hpi614Ko&5Jcgp1UXbSN(6(2=QXN z&-?Qe=bCCoSr-W|Rz{%x;Z-2N# z5n{o5xlA$N+|^j4^pG2a1%KS3`hkRaF=1AZ(WuaMWAL-3w?)x{^>W#)X+!W*=l`L_ zOUT{FwLj7b@nYKSTBEU~$ECq1-u-3`TDXja+`_a*nd8$NAMW;$>bca#gm`h;L=2-b zeoL|7g16QvLM&dweGc~eymzpBpNVONcrk6FXc1#27WP8I#N#3ev3LnN=XK8)wqF$N zdAz(@B}j-D)8p31>C@qKDXAt7E&o0HRM9NO}2 zApB*IC|bCTgyaueqrtIX0(mYZTe@G@9=ULu+Y!HN+p_{UK+YN zbA>wFC6`Di#DWBueZG|;Urq*PGN7@YHINW5(5A@XC8WJrFX$QK_kV&`x4n=7z4$tVQ#|#H*z)v%0Q5s5FoeFV>KmO((>H^>VpR_q#$hD-JOl z6}J48eOu`YDsmzrUaTQ2RBIq17Oa=c$0jxjZMyt1qw!mEX?Dx|8z~JW#EUg#r)do& z#Dev5`No|)tsYI5h(`M|*?&FUT4^95UaTPz!)O2@7Oa=cBjYQxn{8QPG3GreL=fq(RB*cRCa{2Aky%OUWOf(vEr>*bw|M&EEfyHl+#N&^Y;VhuU9wFVMm!Fsv8?#+dX$MTMt zeLlQYOS|2!wn_sD@nQ`pli{&KLM&J>mrHKXPqZFe-)NM0qn$lqR=2(hq+5L`B^(hz2~TUZx*@pH3piXg;-1eeY3GKAT+N&^Y;VhywZ ziy*{;^>W!n2}79Zqco5ZFV-+|tO!CZSTC1Nv@^tSI*KX{B*cp~Ogt`v5DV7JWfQf9 z(C3HJKtjA&!<;Kc5Msf4xol1!Lzq)dX&@n9tYOZ_A_%cyy<9e@s3FXGuGTIR;>8-~ zoG*e93)ahJlMNWcWEn~W3GreL`5Z;>K_tY2^>SIVGy18_rk@Hroqfm}GT4`m7W$p8 zEb-#!l50i$PhGz`iiB8@piiN))N|jiK*Zncf3;$Uh+|#o1=@DRf9zNd7hzgLf8XHL z>+^-Ag+#pC=epRzlJ{C4OnF0TIE3TH8j@>8{BQLeSIBNBk9z&WvrZeVtX~ z!Rj{FgtV8pL){szlr6CsdVhzc)B7S?qy)Hs5 zNYGEkv*dZi->K=WSp34L+4kzuN<%E@#Tt@pMf~?Yy~0I^1qq+8MwUE}_#dyeF!tTo zR(7*VQ>|X11nbFj+O-c> ziT%~2to?Q48K79yrXtCw5ZpL9w70Ye=pY@wa$zYZM8wAi-sM z9`O%}k1eSD_au8kY`W4A3wp7JWKI!(yE9z^NQeaqF3aq0Mh zPTA7EpSM?kwRK6WJS6CMN1b|k+>8bhx4$qcXj+wxb)gqOH~k~x;i*$ygjkT^vOJIY zTL+c|&FCr(v7i@gn0cZTXM2Y(E|79KR4@*G?GW=x(Kl# z!FyhwNBpxkE)JUgp)|yTUaVpE4QZ4c{FsXn3ldzG=Mn#g$xjB&K35uIK`+*jT#G(0 z*^=)f#DWCJM|n=aRlIC|(8MdHAr|yv4HL(xA8Yo{b`fGhf_}?TVj>aWKR+U9;;zzg z2*-;xOgtvyK;}IzLM&J>pC$4<;xBuBR?wUuN<%E@#Tw>ZA&r}!sN*8Uf&`c4IsFD* zwSs^-2bG3c&`ZsS{`7-1Mt)r@h=e&gLrCzsE6*eTKmU3wV9s+rAEqw!Vhzc)BL3mW zDr6ub79=yfPf*Yph2-xyQRbF%jC=}o_bUYY*R%gLM^;_fpY3((>wa-^n&CdMuOJ#eV;(WsX& zdI=r{$-6XhcW$ZRj8i43RgQ%<9Aa`-;_a^C1&tpv8WlSp49xr_NE%2uUi{pQp&=fh z|DWK9-wKq5SbRPt?#voJrG0qFiT@d*)Z=plJvuj48V=!j`FwX~y}PAxIFbCr5dAlo z2!>udmVxT%#Umn_n(q18jF!Rvf941Hi00jByig@O_~H3aJ%r=MSq-meS~Z5a zdiFbk`)U_@2*-;F$p&@L#|@ks$UQKW2*-l;a&|^HmJPIDjK)=M_5}tXx!OZGUYO5m zjb;^R2EOm|cVo0*y?nw^KN3zq3Lae&GJQQ^kju+=zl5y(! zICA@$z>Gg%r5SWASg+4FER*}-?DKik-wJ&9!Tr=KBpfd$c$}PA^5lcN0*{ZmC5;d- zuF2$GbW7!ZzO`%l;Gmb=(pWhbte5sTWWUs7Y2X+V6 z-MUo~VsR70GILq>J<;(2oN zU2fI>H!23lzBzze+E_QY%J}l_C1p`!Z0e{`WMxs6i{y zc=2<|gSEz;BQ6OJ-j%O3#DewmUNN(2G`hBaGEieYzc0h73%&TclOc1@N8UU)a8n6B zaU6@AD3q0GG@k0)B6xPhd#Y7Z7ZYZ^hIrJ?{s`-S93J;@aLCwZG#{wpcyT*Y9R z;)>DeS@mq-nf<@hN<_l(;^!tN8sg(V*96ZF$yWUkb@bx1OLA@9syilM6`Wr(lL*J+ zCR$3QHX1kWnG_hE+(oTI!tvr0j{4=}d`oNis=phUyljn!aJ(?T)BsQOxOVG&;1nJaA29A70Euh!T@I9{A<(|+VU z(d^oNuMZC%`JLZ-f*Ot&=J2}b_r>1|G;c7DG@M%l*2`rcSsr0clOd0Wu`P|rj z7s30-E!*4L^v_zmctjWUqTQ1%Z;jyj;N8Txmx$j@I2I(hO!{G8PD3}b9qU3belB+r zmxdz5f&}O#G;vAKmR99nne^^9r!MruyNH)?ZYfA`S^hd(6EkK6tl2welZKlRFWxI= z{!ObK!m(hzT$aBR*BZ4J)v^vhu$?sAgn03Dv)YXY5@Nx6xh#LZt~GidE^GZZ?TFGq zLcDnO(5Yr_MLg#Y;aIR9$J8ux+p3{71(=4i=c}QL8#V3)8)JDT0919X$mNypYp3}O` zru9o2ZbH29hU6uj+ZNW#W%I5M(<*X?ybb@xVA$X7LId>D{ z#Tv90%w5f_k`y5pte4C3?hM@z-s5`hI(4BJ-om_ubE89o%kp*)P4FJqd(cgY7sX1+ zUQN#(!m(hzTsH4ZF$CAIV~Lv(FTPn&j4`)69xI1%ELbm>DH5r-fa!klIO&+^Cd7+1 zDDIm3AZs8Y7Oa=c6j9Y1Rhsbct~8nOHBE%}5&!Gv{f9c;VepYdC~+8^wCL zY}S?0Fe{NX+=O`XbF)8+AjE?8a@nkWLzo?{N4yiMwUID~UE#d^7Hc5qk|CNfx9 z7kY7QcW$Y?N*uzmAi-r5iHwGciKO8s#LIn4WetaLZl+i-mrdL?8YZ@rhMN#CynpJR zJA`xN#d^7HP6ngFdz{uUX}AgT;+v&82h#}0g7tFQoN$IPCnssR3Gw3GhsM#|gk_J@ ze~EA`STC2EpuO&BaL$lMh!^Kw&P~{87{ak&y+%*H=}3>R$$dz}O_=P3?j6n@*Go7S zte49)((0W{sTt%Pjx^kac=2d`C2}sRG>{N4yyNOtIfQdx z#(KF-D_^~NN)xoBIHx8JHz8hp_c3|9X_Z4b7Oa=cCL=cl*UuEzgLV_oRQ&*dJiHINVs5?nSnBqz4>T}^2qAztqLIk$>eh$fsHHrC5! zIvLFS6Pe&W&g&QJLNB~i>wY+db8|<6%X0IM_|KKji=J*b!^+ySlK#>=Wuf=tI5%v5 zBN*>kev4$pfB%IjCr=L5o>K~xD0$cp9U#ttg__@qxIw2M$xGc}<4Fb#F2(0XJ zvo&c)Nek;jFMjT<-6~Z!y9lu$!DV?K@i#0y9XPeVt94b^45c9!^kNNX4;I`~EQo|y zkl?aBkN5|C*(k{MGlg}b7eALMkxqyO2`m|g31efL9jri}V{3L5^ z7Y!uD%V_i6nKT~ycCL${_9_e3YxE3p<*s?$^SF!^5>jRj8eP6SlSbw>3tfa*uwE|9 z^N8R2coENFocB7`gmtO01efJ`#Q*GtfgBlZi6vq| zFV>(w@ZFi>?qk6m7aTr9 z(hUCitBVi|5?q$&5r6dYya4S#oA)}_gz zb)gq%CnElD|1ReuOnW6~P`7x{BVtq43 zhy@8Q%X9iY{my}yiHS->Ea=4=CRUOLo#!q>EJ$!!o~!dbW@5Y25DR*-hB*yLqs_EI zEZ3&D5W75^kNNjUXjMYcXM2XSdiedJdgMbA9*}x&RV4*7W85bd3U%v z&s~IAkl?a>F0RgVCOBtM8c2wj>4%D0=>)Y(S+HKCX9zl-x#yhwNIz19&}Kf=UZ-~g z+PN-*Mps#|UM|b?h=0}D$9V?TH+o=Q=*7=zEvUU7@waa|$3=(*2`UQZ{)f&`c4xjN5zM{!QAG{k~ltU-Izyt$sva~B~NB)BZk)p;J{e1Z2m z)`edDoMH^$ohe>X2H+yZf&`c4IT25HjB(z>dmZaSFMdu@l;2`c8g!n!2(ci+WqGd7 z^BCuEyw|ZV^y23f=lSkT8g!n!2(ci+WqGd7bGdWM`Jw7UFV>*bF3kHgoe&EWT$bk) zea0=3o3J`-#e!aVr&jN$wkg}D&trTRya}2 z!SAQu_|xGiYCt)Ov_ow2eHcZ{MF@(C{O)Vh5BhJa?dC1k`n}iCsv-z^FF0!Gvg#kl z-GK+GH8~c22aoIR-}ugLplPg-K+8o4((wN~pTDDu-{noeNzUIarbibD^PO1wIlbp@ z;*S9#puMhlgrhcn`mw5BCVdAF{Z2-X(NNzF=XwvhCiHVRffj5bYSWul=_L56Rc4HD zcUM*3L;ZLwIqu29VO=g%WuI@CdS0U5%`{d_%-(#JBG8gf6ed%|_EO8U(g;m!ueXoA znbKAoDFQ9&gl=KP&nI))8>`3o28o0&Yp)ikY^CwlAJti5I|e@SVSZBv&tkU&d1QJB;OpJq4y zbX$N=HY9Xed$lWyaZ`5@q_=0)-#s7Y6B-F!)?O127AlQ4 zwGM8QYi%w4@V-Nloy*;E$Y=0nRj#&}Hqlv-=jMQR}^hq`}1gm#0UWKubDN znA8MkNqNI-M>%6cLYKAIU;Q^LjT_fwkp>f`%QpxzftGZlFsTX7_Bsx=_;Z;qNa(Wm z^7*tzhd&z7U!O4%=%Wd=q!WcnhiF&!Cf1;z2|IP^vi1_~$%D4iZ%4C6&3AqXFoBkI zqA;lm?s<4)S?+lp327C3$r#qizmZO7CQ2MFqX@L56NO1l@C>d$SUbqO7YP|%_L8;G zI6p=jOf0*9iXzaGP823J!7H(2otf&hA0%Xzu$QdqM31jY0|~UG6NO1l@Q#XnFf+)z z7YSMI>?M1%_}^ucdJkUl=FhxqZM38lg-K1Y=Q~%IQJ?)FA-k5nB*x?ht{@F2b}adY zKdZLUl1>yRHNg?C*{R~{vmYcR`mmS8%0BH{kOmVOr^hM+E$KvIQWHGa6(hH(&wh}Q zD9T~9rs4f2XaLQVtrlC$gX{#TO*6Fa*$Qv_Pl ziNd5NcqcW^Y!Kv~g@l}H>?No6xv%-RkeHzJToGtVCkm4(q6?kpX@s24>?OHGs#RUS ztXNhNXh|oe9~{}}bXK4JAR$=>*L$GM`zj`;MmI&EC7qD@;0R6UIsf#>Mnadh*PDAk zR2r#OlD#E!=x5~X&L#4e0qm6XA}TJB{&&s~J3wb$&duarhAiduA@uVVr&>4d~xnrn;Bb3UEpNa(Wm z>h#*zN+Wd|Sf}nfRlo#V(g`^~Xn$L~ejLx|xr@-W_Nut~8>Nvt)lvjn(g`^SX@6U< zRD3bYyB7&v)?Pl}w@M>*I$Hz3>Yc#^TG9zP&zUG7f_}p5)TPVXOSDs2hILoRo7Cq^ zXh|m|FX6Fz_S{(Y*$)!ZDy~;XJ(Vq4b*7I|1X|Jw$**|M>2y|~{U9Nu%k|1yNM(K2 z=f6InayYc46Ot$L+O5_ms&Y{zWR-BevI|n#t2O-V>FV<(w4@V~@A4j;HEo9a><0;1 z?OdYjg7H-@8I6rnSKH(q+XNJ41#!nHs-b%O(nSU91yXae#ZheIQS}Vuyd2`(u zT8bic1~BvYcT}sqgwX7TXE42?Y29AcDvS)gi)5ekPQ_aW{bo(yI!fbA&)PAx6h-Jg zxT{NvA_<|{3vWyA9fg*n2p!vl7ZR!;yrYniGJD~zFXAtK>sj6NrnO^eDT>f%)Y->$ zKNffhq1g*>ukIa%mZAv#i5KoDB&5t0yrYniGJD~t2@!v- zp?cSz>|8g7mZAuq`wWQb2{dzkAQDn$FZ`4$;xA6$ValJ-|9jy=eEs^7lsgxO_wJcR_vMjY_wLz5KWoXQpHtkb zp3}Fr)7`lk5hsW^PXtt2aCB*` zV9v8)bX^3j1tMy0ZqF@JKl#CWeK}vrDha!Z7JIHt5okf8NfW-;s`W-1Jae7=Th+J4 z!uYsa1!HB1<&_pF0xhl%)l~`#xG!$P88?_sqcQhPFGZjQM;hkI5HnV-R0LX( z;If%bB6_`XBFeq@w#tkTuU#31B&`@ChEDn`Mc`Pu2t7B3n78xMAV-Gm3tz9-XCMBY zOaD9y>4?8c*+uFc?76Q-CR*h4E6%)iUMhKeop;_>kvgyZ`2r-+3$y-+|Myvo6tVE! zTT!&=JW=0kuqJv}*y-7d_-fLJQM4e@W-H&@3zHPDXslZOvQiP{r_Ya~1qq!q%3ZC0 z+o6iMuI{%k0=@92N8f&0ZjK^)b>AIDi_YQnT^6+?{*xP5sch-T*?+qKp#=&2T!Y?o zm-S30YutRMu8Tk~s3{ro|5U50>c@_sJ0d~rRL>fJawB>=PrT}_;)?iiyPH5S>7UMC z>)n*0h+A^6GWo9l1WLw=Gh}zGmX#f#i1?Duu2vx-bE7l;gRS0F#GwcJxCr!;wV>}G z=c}$!M3Z4%&5cEWhA8Wb?>O#OO~`mn5l3$C=h8qz*0H{;70mcZ5gSI_<08;YcACCH z9=&0sB5uAk=pxVyb{frKyu_D^czR*~7+R2!J+E)Vm3n-xiM#u{2=tP;qVMN-9r;2L zg;x)tD8x5<{YkvUF0L1~BmUPOKcwf>wMwIlwweJ7Mg_kiWnKtgBB z@;UWQZxvVl*!^b>7lB?n_mxkb^Ec@G`P!^jE&{#aw4<^5_0VzEs*GA4=^MILMy9eN zBy{d8pE~7TwEjI5zsRcb=DRL|KWW5xFmcdNQzr$1+Sy|ArI0|{7H)Q=xd{;S5SQ?w$^ zxw$*(Q$}V}-@Mw^)Ss7(8QaW7pqH#MeIM*O*Qdtnu0iEvXpyzccTRV!F8NZ=`R*I5 zx-^iG9i{KiUryKe+KHu_x(M`={itvBRjPp_~Mo zH29}{Og#0%k5RN-gdhz*%QNRanK5VO@H7p59~?s~Kc#(fg3fa%r-m{S*^66C_G-iE z5~1#G^xs@hY^VE2ihyx<5$0y0=!pN_DmMl?&aWNh{=j{J{w_atV{y?%5~?m2;pEiR zkB{>I4sg!Sa}ReMX?xl|I^vJ*b_i^h>60d)h4c-QryFmjK3Gj!^=xY*K$!30W)G_0 zTt#jAUgWErw`QX8$5mUS1qrh^&F|KZ+1E%BOHPJd1bUhM80O!|r{B4I?dJe%oOrfo z5Ly*BF){2LK5G*^C*t;Drxj7`#nMR72&*Wn2ooR8ZD$o~}KJ3QD$pqGiF=J#G@ ztg>45+y*^2pFkC=zmSDig(LogGf}0{;A~+AT41as2J`Rl@^2w^E6r=l8n?#PDv6@X z3tC0QeWm_X8l$uR-U{<+XSyGA~MWaWxmv?4UCETxr!Pq z$=lW5jQA(sSzc-MoIV0uMBFf8 zrRw=FM@nqPQ8f1`wLfswBmPkb$1B3Ozp85`B4O@SY6nwFq)JU2PSrO2WgfqHZH%)5|<70l$PVaNvY4q1MChiUyx5;&K!rahF z&A)+N$T5*xHOj>Hu74DB?OODLIVU1!qG)!ng>7J6ncoD{^}^~Q;zJXu>uww8+O-gO zU6F`-Zg$i!cAyRH5A)Mdb#lt*B__h%Wp-`J{Z(D@3ihD+Ij5c*Co&v6Qo=>RJ~uzL zEHvl2MCzUMe{Bu1#QfZ`P~{1*YpLgQ8qD3%9d~UKVtWEl20n3oDu1K@+P<9c_|t5P zfny||Cpp)0XO?b4Ws^kYx(G8L>ZcCsZlE;o`E#M`lRMLPqmj}MJH)0o3!=~}iBVpH zdJe6!KphEsV`Qqeq=5ulpjAY8Tea={mJH5!!QvVp^PHld`N@xU%fkndP!`Y=nx5xU zKN^+|sK~JUbn)P}4sUlIHR@P+^w2s9D2F&c^7B!nGHBP%n{qA#NUA_Sf1 zPhYxMX?O{sL%rYGozjl@bN>5Xs#R#Y2tgWObY2|glau>_1e8OY2fwE@p7`Tm`n~Y0 zAJzHpZ5!$b(Q*-jep~(N2Y%+ycP%dgwDnoe9y3-2q*3+osu&a39epc+mWvRiu{W)tt(nuITrmNA6hO#WV^=-XAR1U0kOAatf*BuR%p2hK|RN@ zLITQm#*8;*tVp9>mDXwo`8y}jauI?wwvOtr#>z_oZI}9Hhl~~VoX1GU3N05QXmra= zJlW-|c^#}`uQs<~Wh9#I-rcoLo8Q9mi5Ur4m9#%1=YQ|Ad1o7o{?Sd?*&kklX?ok0 zCbkzvFfCpmwtqXNM?W9@_=W6`OSZ7??c2!4da;G?4|`7$k9=2-G(M{Ne0I6#I$J;e zT+K!c60j<1bhD;Zux93FSu^_=a}ntE`qDj0V@Ym;h)Z`DvzGm8S>5)18b=Efuqufd z(6qJHeRziTN#A8I0=<5{VXx8{_CZr3-np)hHGXkdtNqg>;%GqvcLDtk@40T)rqdL@3+D}P2_Ga~x#JKW{q^AYQ^$Lm;VK>~If z5!HtL5y)Cn)B1XEZx?}Hp&IX~exx+Eiw0VdfM}=t5tn`-fnHw@-{~H!xQrEAkbr2X z=OZrjfdqQhI`O8`NX>a%<{T|ZK(y2A%9eG71bQt#xkG8B)~+pU7cEF&bXEJqmi>VQ zdhLHiYozv|&3ka_vUV}FAOU5H)U?lS+2xDkqDqh(VuaH2m z?j!Ylq++5iF%d0Dz}%1q#a&zCE)wX~a>ZL}K2otgF0magNWdCPsPiK(=LZt#ocUs;MkiXR{VDWITjLJ7B4)tBYq^%;w6Mu zt;(MDNvjz50|_pR7iJ%t@D;yK5oqxeOzW|t(Y3xBad(VI7YQzl7v@B2KCD5JyA**I zFTu2)b6O?VH%sq{@w!5S%i`tE6Pd`ov%VtG;w6~YYnN8Lb!g3*DDMv>xGY|nqpJO3 z^}p_h7!zpm5=`qoNW0d$tW8FY_c;<=7B9@T)jqdkO*DZPFTu2qB@}(E&R@S5<#>ez zm&FTj87f{`3+;Cmffg^pw2p}sMXhNY_7-s5MS{!Xg|{UYcdb%iA65ifyadxaw$u5M zJ-Oz!0X{#F;Ieq(9ZsDe*=sI&O%Z7E5=`^`GvYs6y@vX{{A+NdJZ6sKGmOyGV$2&Q-Wwg;?qWYRbF2>lZY8%Js&;uuMVOG z35*gEf64B1)eQdcSR#M~dW}1!BT=PlPm{)b*Vj}1xZ{$a186}4qaFP%KtWu6epT}G z(=GzN`YqKFwRrD5()jI;OH`|Vn_4o679=og)3+Qyzg!U=|2H>)1bR(xqcf-4-#kki z{E3WwCW96vp8G|giPqWKL_B>usx+$nxF(1eB=EeVcOiYk-G|ug*uI9YZ4hPVJ1fIKes=e7$X|&q-mWx2IH#;SiM!%=pl18j+an+Aeh0}s) zK?3s!ilY7ID2+BVd%FnqI$On78t0mROM7tTb5AJZ4qwk8T9A0>!MGyE7OzFblBsX%qilZyP>Wk8oYIbi$E`^iS9Vj36Ch^z|hN(u#vzFFyjC0!YoCM>~lVV zb)gsZk2D6(y-JPlU5%cKq6G=eWF!97&t&Lz6?i3p1bV?dB_jUbgFC5KJ=psmoR0(& zn2|^P^-sO7_TZ1FmbnP@g7rokbIUJOt(tRcHm<7#60i%1xN5-Ns#RmA9}1ua3A~F$ z{I_+|GdTUkpDqHuU~iH}{RbPWRvl>hX8i7o=YAXbvbP4Aykt@^(0T|u-U0r8lKdw2d?z_H}> zpKruStv(T!6B$ox`V4vBJxb$_?zW3SFF037qfBtIB7VK{?*Lkmz>Fi}-?puc>iMf1 z8{!$2Krc8SN#n?8Unz~^&wYUBpo_q4fW9lfWfP_GT*U@KtP8#1oTojQ*{7|=|H^lG zxeHjVXSN)mpZt9kzBzLRy=l(LCENsBkns6FP_(ZGXELC%9SQUTZFj4-qXh}jBjWde zf~p_g_C6X%0=;lV+yq*Xxbe;RRX?uItVQ$jYN^YVM%T3m<4B+vt_3%N79=uz?pGQ$ zD-I!z3R`~4=JC3%bOjp;^m4CuB+!DyD?|1wjZK$7MjF2*mnw~x_cyYUKrh^BZUQYx zRJ;5=rO~6w64q#6Mrr)@aBCY0^m4}%B+!CHeB~~s(QL~K(&(|U{dOL!_QTrQNT3(S zF*ku0B>L8WTWL(oT22~=Ru7A_#y=l7wUIzCjNonpEl3oLzo|4{*|~r;RwZ{RjofiH zZ6wgkec~X279?7)Oe&4>3nr4r+$kk&?#GV(|HP3%FZaob1X_?d^0TcpZoRxSY4i=( zQyQIWZHXg+UU+J|3A7;b_SU%4cr5P-?epQKS}KipyV}N)KrhTb+yq*XXjxTjv>scZ zG)lbDPHB|+tePwLK`*GuatPmRmm*;y0c9efAK6$JdO`m@1X_@QJ`n-)k&Og;!90cB z1X_@Qxgi2pNj4Jb1?$a2palt7V?@Aew~#!R{`U}QK?3$X z5fHB|B+v`un1?_M5)iwHfGBDqfnE@gJp@{ifEdh#bADJzpckAg9s(^$z*#~BoN5*l z=mqDahd>Jwa3&G~=Xo{~=mqDzhd>JwaJCZxc}X@B=mq%-?OG(zf&}C!M07g)kahId zaIMFTu3$_8FAJu|~Za??y=@js%y*3$$(1s?JYl zFo6~?VOmJU!IJk{Lx&6qd@$t=7XhuZVH6UYIDY$WQ6|vhC0Ik}aNBENXZ3CH*}57J zR(I9Kw0Oa6k_Hn?JFE&Yffg^pw9er=G;e5i8T?^jNT|DuU|PIjRgwl1zdzU_$^=@x z1k*Z)i(mLOyYJ{vGwju)T?EtO1-pwhnE2}JvOy-$;w6~YIox+&TiH);t`=-IX{w80 zTD%}ikOmVQpFba90xe#GX`RFMUy`%^@;B-QH&378BA6C0h<2pG#N4Vi6oD2m!L-id zrd*jHzxrsU;JC>XY$UiWUJ$iOgNbuD^i~8~yadxahx@BZS-bN!1A|{TzR^a4%i;y6 z4{0#5>GINwK#P}PTIX?Y4L)rhBTO{ z^HP09pv6ltt#dfK|Jz;m9}nF3*p5m_a9OrTyU|U?qlN-*^bLcnQ{^ zJxB!a+U6I&kJ3K3kboT(g8grZb`Kp_1X{cVYfvm9f}>B@)%9W&uPh`WGK3)V8G_=K zBGBR`Sc76B5hm`&D2iH0K!giH1UJOAth$OoiUOhu|bK z#IxVER|HzT1Z&V4MFgK}r?b1n=v1?ifDz0iNGA(J0%}Uoopa6( z^?72s<=yD>QHMY;%$y?rokKrW#OJ3D1ki%Sp&QkEQp5Zm=n?wdf1aL0=-}q zh&jzAcCrB>L?CQble4rt^sZ#_s}(7+S4c015Ou{nBAY@HYh0-+%mgjT+rH z58ma{KrfhoB6@t@M-hoDCj`-gM2%YNZMtFl22}bz{UvjMQLTEtpstHRuR0afJAT9b zoyie@^`$ix5nlRT05#AHRz9_A!K3=8GU0utgJ?mb#|rhX;xK;;Ih|@9?o<7^_sZ*C z1bV>^CXLaHD=FgUS=EDRL89p|I+IPj-IX-TRMCI!G-gg|7lB^wim7jv2=lje)88~~ zTBdrw^0rbg0=*!1@qCPHqgwU&<~l*NAi+O%;2lNZpF$eXj~%3lhkDd@5$FXmm^4~! zUZh%e+l%FbXhGt=e|0{m`*Gc0!<5GH5=R3_pw~y$6M7}8u^Q2(OqBQex(&l!8t4V5 z54Gxz^J`SA?my8zh!!MX8>YT*!p!;V*81lL-_>dFBGBvm;Ks=T_L~J?6cp#_FE6eO&~4ZEg92B6KW?&iGIf|2*H^MW7dCPB#7ZK(W%QRekoH z4xj~zU{rkj6nB7t6z0g}c&^Zrt;>ei}$5G_ble?@(_0-q8zR)3fJ zUJ;G$h>Jk4QKK~BTRf9)^pk$oe@?E&zMqa)#h?E)fEFZRKa$4bRr=@7l?T1$BG3!=KWQxe{H&UfmZR%qED0e2afLME zZ?9HF*Qd(32=s#3MZ|L(auo6U^b#%ty&$?$tNvT4-eX~Ho|J`AG=v1iVA8l@uAaf= z_3O9@^ny4~#P>57sreW){XE9@5PHE$M6KFz|Fw#!U+{MTEl9w5MH=so(7#`DO-?yH zqeAEfXE153J*pi$`=pFaFEoQKNRKcT3=1#7&?D3I1DTrhV@|L}wJ&Z#3vP2k<83 zc!74pO`rwq1wA4z>Df|gRPL1-M*_WI+&lzYkl?@0=6>k!ADl5GpfqOhm>ov~y0I=yk>Wn>Bdt zB7t6zosk9-XhDMC?!g**fAIch4c>!DpciDBq=5ulkl^>Fu!i2}TsLbli3EBa0x>XhDMCS;HE> z=lRYF8eNb;FVGIT3A7*qdPG3avynhA7()+%79?QYh=4iIMgqNHEqDmDAOZ7F1gza` zB+v`iw1+?o60nYmfE|^M1bV^V47&-mAOZW42#5?866ggn#zUY535Y91K!mf9Kre`u z9s(^$Kzt+uBDIACdO^(h5NJUH;ye*>Mp;Op7o1%l0xd|uc|`=AwH6ZS1!u5_K#P}9 z``p)06OuE~TTfD-%F=0{I?uz9OLz#hAOZOU5s=APNT3(wL>>YyNI)J$1kHIi$6ea* z782+Mxt)hV3lfme5kYHLY0!SRkU%fUQ9T4&kbu0D2-<^6gZ8_H1bRWP?IF;D1mxF5 zaNRsstc3)6!JR=zpA>->B;cN)Bb*|5%#c7Yxchhrv>*ZZ9U^$HxgR|DNT3(o;XDLd zkbrv|5p+f=4PL`YpcmXlJp@{ifcqyA{M~?;Kir?cg-?Aqpq%u06L!D(i-}qOyVdvh zp#_O_jR)7K6X?Zde%qUV*Cc<_U*`UY;G6!m1qtpG6Z&0~NT9__aQ`&H-}KjO{G;$q zf4VLlE1nzH(C;us0xe#GH8io3zUl9St&gkk?Lz|RoYxp@P%cXI!NlU6*6>Y#+TtZx zLlgW>f7j3L4Bzyp30%9pS6G8`YTAQL{9Gxl2()+!*3blh)8DIIu7Pj*({(W|UcBd7 zgYE{T!Nj?$trdY5FTu1X_?!Md+0_!h=}*^%v4mq6Yv^~eGSP3~O!%fhZSfMUp$Yz` zzpeM(2jBFk352LCNgA{=lFTom`;BVO+@bi^1-it`!8KqCuLcTkb1`~U;>L>y&UV=4J#P;OmzoM*x z1fI2gwmbdc7?Sf+U8R8*FX8lqzx6bDOBvO3Brs>-9L1RrB+%j|ocZAQc<1*0D$2VT z3Cw*s*J6#-Dv5WxVzwgC;w4xkwG!=?H@si4l6Ef=n8R_-$Qr5DZr2aNy-48QfbSx#k&2=z0xe#GHByn<{^ib$0PkKT@Q%ZGD%MDy z1}Oq9UV=4JCyqTTIWNHb9SOXv@!gO$Qm2~T@<<#0cKK|ycnQ|f1noV0z}o&n-n~fR zozwlkS0vElC0Ik>wwXB9=5E%Y@A`G#D(<~In|>rz&S1|feJa2Nw2J#p4w}w zUh#sxnbgF>f14`;EwDclupf!wy6JQdvKA8P1u@1Dy>FeO2()+!ZXv}hB6y6x_~Y&% zj~NmWeOzw{)&yk$ihwAZ^b)K=ahEiB=IC?|^4ucR2soXSkV|NdEEstB}r38x<%+1^eZ4)9nZ0a=FY{mVM{DgNAN zih#^ZwM6QLJcwFFGnifR`;7sfb0p9U@d9DbwcnQ|i>x$1NI?pqB?IHnLpZVrq zb)J(35|Hnz_eD#+ke3p{CuZ``xdprjkwCARPTuYz(BdUnkIr)naIo}}r*KWgi&Nve21-Dc~v=3HM zXB1kz1ncQf&pDTF^ia19-n~e`EtO7D=Q|tt?+w&@w)w5V7Q7=`-cQYM6_@u@(>~|# zICK-KZ*6fA@ZNJG%y+R^|BkL1L=7k>;eFviG|H%r-!~!ey{49=-UQAqq?HJ*vY{?| zi?w=Fwj!Wa9s(^$nD=3G&lQnlO)dLdbWHbt0ppdVmPo6(jC#}${vFHt-~AI^e|EeO z+>(nCq`_k~{LlaCZ`*|Tg1hycQP;$`hq@xc?L`eKD{b|SI??fU{7at01u1O#hHOSpwn zW}?Tn=cBAc{h)V9k1%yI@s$x@qd1LqwGpy|4*_oYIb?JKb#tl5W zG+vIL^PDw=SUn7hd>7%+OnfzeeEi#&CtDkz$;n)rSB&cKtTdc@`TKxqrpnmVw>~sF9x1%9t-t&$|(S+6z!fCB(l}&HPkJ7HqbjJ^=7vsvs2^~vB!`-T- zdCS*-`ULOs9Qy7vC(59gJ2E`jbNTufueCRX6EnPov(HC9vwZ!YUmZd$NVt1`VD9qu zE51%A(96BgpIfqg{X+*dffgj(yY|V6%h&%?@TEF&^FA?#tJ|eq~XFtVG#e zvTI)-w0!;ie|4*zXn<=M_X=xVcjxl;SwE%|xQld65=++JzkL0U(z;ddw#(Xe>+v33 zo=I~~J2r>Dbz0^ePXN8HWW`MAOLLw{bDko^;;an!$uOZW&3Puxd69&kf82w))N`8i zOqz2=IQ?+f>&z?@H0PN#=V=6bIWx-y&3Puxc^ZLU&YUwrbDl|aZU~v@T-Ryf%q$Z$ z=b1F;X#{#Xv&jU_c_z)dBGBR@)ET9t_JqDP=b1F;X&O9k618y*C-jxPgw79)C1IFB zD$82P4?B4YX8>q%6RG`A1g|H~OLV*vORmx|a}Ii}!35_el*y2W5cz3@i5NscUb3DE zsTbxPXx1p2$PXjol$nq$gYps@;Z!aXhE)PI5|oz!k?$fLnhD8EKL2OxddHF*hB*fs ziAXF5hc`=1HOOI!qcIXjIB$V)7=xQNsl#RTLfE)8ep<2q&n@)Dcp z7FUL|j^&idB?9siA{>jeGTf(+D=$%moF7OyGs_x~m$(S@a%PhW$V+UrAmPj=6OfnK zXhFi6StcMa@enfq66e)@mk7v9Y-iRu8UT@syQ~3uiHmUNUo`Z|;L1y==dwR^#G#Q+ z(rwbEr!#;G!}0@n+&W`TG2b7d@OK^QTk7(Yrc8fLtjkODhEh2&*I3N(K! z(wM!(;w64OHaI-#--U*_e(tKk@fS}t@)F|3&nx~mAv|a26NVVxe`O%M=N{E6v9K4l zBD{IZ6xBk})~#w2Ul!>6!K}1aAz{j4O&okfS}HCAR!lot|aW{t)j-SPt` z{#k=P7lP+sG^m_wh-wYj2Bx&?EQGR{R;7p&HF_lXR(j45iw;Es?fPz%u}Tr*#q^)W z1|;noiw!aB;9>d{$LB)}m#I(57C$~+M60+T-?Sgp(0GNZy-6XYENfcN`Jy_7f#2(X zuKFPsFJW4k)WpL*?+6U~p;ZBUiN#Bpei&l^hC**Y(pqKuM8wj(JpOFS;(B1CxmBzh}A!R@W5&SxF=l9ge~sTq{#Zi0I&mLdp6tFf}j zUb!w>cG4T`-rF-KLGL0;l-!;l=JFLan19<;w69Iy>b#O##TjzhQ8#L(>57m_3;(aUN7V;LM-fs1idpV zvF^=c?dBR{_tmSS z*7Ra(K9CSErY|h)le~V}5<^sJy*k=H+fsyB*b51I*HUuOxS597^h16$chu!+gm^Js zxy${@{ln)NqQce{(fi{!D?%*ng#^75Ct0WaU50q}^X1WHeRQjk5HF^G-0@g)&mEHu zv2lKW^y;a(N<%E{g#^9NDf#y4UWS-K8e>}Kq!Hr9^zAbiB?mq+$PhnocrIF{%qxx1 z!eu1r{XxlN6Pp;~$Je6KyiFNOL+WBeyttfCZwdWwLn}i(dSxuyZ~H<;h{a3LJEM}H zSY@PD9ck_Mo2ym{65_@5yR$YVPxUEhh1 zCd7-&iL);yj}N@b5Px3S7~NTStm=nYyac_MDmgN~($f7HxN=!^-e-AA0}1hBy5Esk zl8vV}H^j;D`4p{vKD2Nd2~)PU#-PvgqW#Z|R6UovfUvpzd-qq84PPpr(8Psa&qm)| z-A%PhEMCHlf*}q)wjw(I(QMUoB*cs9-!9pf+?co65FJ(&M0bz*NUajFuon_$HVv_6 z$(m@bkr&izM?$=qKHPV6^3*Fw3{hcHUbOj@bycgx!d^(2HI~#pzjEQT(dTOwsGcJs zUQFLnw;;Lb>MIPffB5QXy-K^(=!%8CkTAQ;5brLGMqfJ`R~kr&7t<%6eJ*+5_-=;S zK5uRGowlV_Kg7abNDTjGezN~pqYcsc>bcQSzmAF!iDH&}}t)At7G; zd~jAya$?Cx46(Ptv(d_ntEw3k3wt49qMadf$37LkcKQ(2b0oxz>3jR!mi#dEv?1G1ACq7Q}JF~?Qjjqj)ess?SMTo^qm~6lhRSx7vW7p15twKV)nEt!vtVHXYFBu~D z?)>PO0Y583EbN7Z$!ZMo`Wpq&#=k93Bg9L!aP(%o$MdfkBJfN>^xuD$DdI6>VJ{@U z>rjcaK10-vEs1uvi>cL)gm@VZ%h4FrVoCJQ;yxjig}sm$bLX`Cx4YRJ&2=QWqR%keF?KVaW*2`sD$6?C)jE31CX@qz&K|3w% z?7_uVR!5&2*Il(rELbm>X+MT3>oXeHPg)!;R-#uLAzn;S#0WdFWMqwH(O)jxstB=Q zyn zzFMJ9H6+A~35v&IC$^t|KQDUljqg;e#Dev5nNEfXG+VY!?gG%Vj#d!p=FE_wBl9&spbHKg8lC=u8Y#o@iQC?z)Z9f0F%G zKadbFrs)h0JLmb5+*tI1pHC@5EbN5@o$X=D6Wuw3>IV|y#Wdv-;k3L&^+PP|g#_g& zVagMY#@mCV(T$C+NF&6HY08PhPJXre=(6a_<<-=Dh=sk7pd2ntd7?XqQ$0sQyqKok zE_~5EQCZjv2~#$iW&5V9qCLLmx`jWS5(NIH~Zah2vIsJ6opEXYf=kkBT46!%01h@ioH-ST`2o) z?RW3WEh;&5(dowNf;wH!DJkhU##rOK_gL%I?~naFh@S z9Pn_ICG;9g1jq4p@QfqJVF^SrP0v1w%I7HWmOSsw|Gv`5df2vv-jAwL^T=Y)X?&|( zC8&!DM6pfJaEZ$2!G;$t^uAp?UI=)igpMn!q0gL6Y+Y<4ifwv&O{6{73IR`)(D6|< z^w~L$KoryTOr1!3J{JNWjB{Y9fjlaF)c^i)Ik>jugqL`+)R9Thpub!)4>@}~mO9*&4$`YCf zsm3W?XL+qIX)XO>2}ChX@6OCywN*26ytOTZ{(y(0EP*$A!96(kmK^WuyYE~6!j9gh zUpOabJ+}C|a<-9?v3;z;X{%bdzP>yl9{MfcTeNVY5b#8a4@aG5GRw8i3Xe-5N@;#G zsTvQq$noAgHE5M>iESbBxu8{TU(59#9X(rG1rOJ2iCbqql=bt!dZ@;PF3Y?_E%MR` zL@|wgF3?EL%JrJ=Yb^vk9A$}Pucxv)HNQeN+Eg#_ivQX*jX)IB$mar$s+*U4CsrvI z0v?XC#M|9}$?ExWGu8NhK%w_s`zO)}L`e%nS>HP^FZNnrTSW+DdK_g5WM@IEZay{F zbNk&Z1UylqXaCv>`a`SsKka?Aq(K^iD5jB}1sXTpy~L~k!XhEy;V4VcQ75RCnltRX za;bOQ6Mv)=8pSlSvq0mb?FHWab=|j zRp8A#I$sEQq6FRh3A$gjRd+61;w{L$xd}&E0#RI!JSfl@Iw8kv_0K(WFCebHEzf&)|2f6BF18V6$~C`wFfZTxdvc2$2QsfHp?X#5XCg|pg?2OSBt#H-@YaUJRD^S zWHkYCN?wuIXz8a#90gC5&}&Rv^-4*;w=a8yXjlSKOd}5pG>XnC@VdMbJZCH6SnK8-*W)5wDYjUmk!crU#Cs}S&TlqK|@PXxryqD5YZ?_NqLf)OXdG_sn2 zICik^@csR-u$Wa1f z?)&-Ps#XsQ0Z)|Bab85Q1KmXSj1kZtd-v=6zOs0ZB6w`D~o3ECUOh!B$WeJ|k`MwV{BAHADfhcKV=&NNU zlacS49Ayc*o6R@KNG2nmD8X3_k3d1I>h8|-ipI>59zw#SI+l+pz-zkBJc8F-VzPe z#RQ_*rdh&#gRIe=zSn+nD>51JLn=viSR@Tnw`xT#YiS2o+v^0UpbEyL8~+aXv9&di)}=)O}T{mqWJeumw0C`Un|D} zPn6&%qTSW&t_U;+x6buC?%ke7Aj)Xy+o%COa=o1GZ&)J5QI^mamIWG`k);ucVp@++ ziT{1K#9QvP6#^b}9A#{4-zafnm0Yjg#9?U!qL|QrQsVP7bG`R(87u@mwqCaNS||^W zBa+GRbtTW7Rzt5!)re#=6$GMKL$7Joh-5N+e~5-9^d7`Oa*}`J`T!Pc&t<+lF5jNqb#9g zyAs=KFY>;tS15fBPn6JcUWu*C^SrknjinKYVp``YB|bW#$QyDr7)#*cC`;&kr9>o? z;W9>%cZnt zS2WZv=kk)?^Anf6cN1waansl$=h<-+WDO)dhPDKqD<)Faa*sZfabD}( zYsaHP*fDDDb!V5Klea?7m9ACQv-@1vJm=(jzc=Q3;bGenWG9#o&Mp&cj_hxwQR0ab zRY&Z}8g$M&CARm^cV^c*H-n?#ISx@4Cb})`nnn;UQB0rId3x3bjddkQe9VKnPLJWY zF1H@lvqV{#nDO{WX#~|PQB1Ene|0>2jjrT~XRR-APW|c`X%#%GXNgoL!TpiBwWjTl z1QU9u#nv;Os64wXdd(36p5qXF)(vr1>I7+(CA8N>L(jSr+#jP>PhZCMS^`mAu4md1 zjW63SeGZRCaX*WO_JYs232FGyumqx1 zTfXl|t3m`nEAdl=tylGwST}q@2JctHd)L^yY!sLCvmod3zFY6pCqo20mS7v@zWdw< zhcbBI9o|vL)@7r(oS$hqkN4eEe*Zm0z+(xvQSQ69-Eo=2`v>vPK(;O$#pV2L&Uw7= zZav|`5CM-R*habUUe~aP!}}odo=CPX8^z`Pj)9*Kd^a_*b%=n+5^ST~cb}hqi^Kal z@!n3hE*mA~6<`DO7V_Uwk{jR<itLufdu`deY!sK%HKkun=DjvQM8IPSwo&f8=grHPUB7tGFk6?6;&QqJ^!rEt zBeOyTJeFV^<&+`seca*w4f$TTb=fE`r#n`^f9!N-hX{Bq!M2ol-$q|dvPV1t!nbwV zD5Fg|!%G{>j-!5=M`R)-`A2$V+WqHj2yj>=H4oe^H2l#}aI#+;>;?T8thP4R~x6 zYv@%%8s)q5LIgaPU>oJW`|YZExDr#gE*r(=bdU2FMbd~8@K}Ovl>2ViPiEqd5)F84 zlxXNz2jBIdnjI$8V+pYq(9tIm8hwO-$JWa>%0nYu8i6R*&{32$ z%qS`ZJhoo8Q63tp(+EVdhRz10Va9eL;IZ|xjdI`Z_I(a!oHPPatf8|VX_#3}2zYG0 zY@<9hbEXl9Vhwyl2+htyz+>xW`@u;^c|NCBnG7I}K$IRw8Q;;NECa0)kF8hrl%OLo z=JAmCPqcN}C@!b-re7N9?1l(YnX zgD3%yCD=x}?`~{R>~a3ad7Q1wMsYcf`TE^p=ZaM!0v=1SjdI^zSg_vXJe2cMTbGUE za=JJ4D*(;sAp#ytu#Iw=&t*Sy&Z})*Hj2yXj@7RKiEYb51U!~to6B$LT%Eofe1B>& z2+0{NfheP0Ci8iS(DuqRIS8Xi1kL9-@=RNojpA}WyF?tg`X3ct;gs~vW;??&oSeq5r|?9ns<3tBMmdF z2?39-mu-{>Z$EziF4tMwb)~!m(EkBXa8%+i+2vyz&lFqVcqam4`d{e%RkMN$g5RJ< ziPh6*m+x&qQi);h@|_NiCZ!RGV!CoR;5VtI-{d(jy?wV3@NiU!s2JOs7%^{&v*6$b z0bynXOMKNJr+i%1p4zGvwF;bAd2-W$9C3m&o6We=WiAQ9*(lahPBJe?`zmt zHS%ZYI@|hRm_{Iq>1s7rmv1@u*R0^|)|`~*eA2%#jX)F=I>sn5?bbZ!sXnxW zyIZ{%%kR!zp~RN)xlVE7AMys$5{P1=ayH;2f3Ymz+0yh0(SXO+%l4*C8_Ii^HdT#g zALlzifA(w|fhZ<0Y6nMN`_){h>E|hF6+E_Hw&}m_%h&jC1uN*{Oq_jo@aE1Eh%$6p zpi%aHo^xntZE2Myv{f>fU|tCbcU7KK@2dIsI3&svdfrr{L(^Pm{y+An5s2beVU`Fq z{!Pa*s9SJ$;o&GtX#cCm(|LK$zH0}Fh9wZibmjcPJ-G6^Jf}v>M}&Zfqb#9UWqHu5 zPY>id>&xy)BM`;3UinI#GBMvd?Y`^M2t+Y~c_q-;(x$+v-YY{0cx=6F>m95b_uiA^ zd{8I2KP-VL(Wsbl_{hIHXNfaq$CH7EnQ<(EZ+bzi5`*)d=|w}*2t=_4=8}M@+ArT3 zuxY3e@NkqRFf#_s>({@B(A>fG;bWHs1PWxkS(;~;Mc7e1iLP!+Tq%Gq~pfU2oJZDmm z5oncoRHK5R{?v$~4`Zl`F90 zZLD%U2^uRC z6XZP{9|!&4tn*T%;7c4lQ9_R?5uDvGA5J=_^a<{$;}8j^wN*NQoa{d4^tmfXjvOAN zA%q^E5?RFsPMxmz3IR`)&~vCn-SY~apC*s9XII`1qU}u67^CmV_{i_8n&WJ4AG8Xd zsD@q(dgPI@gs&fIyVcO^O^L`@A}xq&=ryfGWGs7cSgAy0EGaWbUP0)1 ztVCoiDdTG>s-a`P5|Oc_jIWd^LDzx4FX8KI%8ml(g4(CaC~D^=&BDtKT^^jPw&xXu z@^&7PIVws7D^k9G`e>8CJw44|lRZ^->#(beVe|h=RsMg}Pg_q75%BOc2MD&YcG!1` zSWX(9pHlNts^Yf?b;_`Wt`JJtdR0$}@|QnZw*Rwu=4I{!IF5>S$e>xHa#eBS z(pJTmfXCL$Hp+c>@WHvoliTNK-ZSxD(SXNBu}0;pVu}tC@K}Ovl>2V!A0HOeUuJ6Q zaM6IrMzKcas^SlR`Ea=<;IRbTDEHl+R}C)y@sd;GpEVgE8t~XC*3k8EbnQO$Mt#o` z@K}Ovl>6@CDV@9y%^SrBW_1${cx)7FVC}H)zPaQ3VoShd3AR!0yQ3y#cNz}s`unE0;AuZjjd zHi|W{+SqqbZ%KOr7y=$ku#Ixx9a_5FJM$+u{?v+W(SXNBu?AKf)3>4WtG(E7Q{n^r|3x(5u~Dpn^~t{b?uaKt1U!~t8|A*+q~~1P2Z^(pU@s(iY?RTU z^*$i(Jo5Ey>#>BfiMXJ_Y_ux#PYAi*^!VuME=s^->t!3|zS}=J6K7X6;IUDxt^GtA zLq;zP5%5@oZIt`&>}OZvDiIBMY!qwg^+t2lkst015%5@oZIt`&$&cKQt6enUu~Dp1 zxz>5k=x0I%JeFV^<@A=be-ZB5xUI`Zak<|AbR3!A%?lCmSb}Yo`|cUNXJYgb4R~x6 zYv?#e8WRiWh6s2p!8Xe24Sl<57bMw0}8;Axx zHi|WLULlQ|H?|HD@K}Ovl+)^j+y2U{hu%IRJByKOS4&r2zX6AgH5 z6l>76kQMA~RKME3CYFH55^STKR%~skmqFKVDdljY0gsJh4Z5avXQQ?sm4pa*EWtL) zefRZ@Lm8Bjr6`9J4R~x6YtX%^I~zUpMsA3J#}aI#oO8I649dt-l*5SzJT{6oXpBh& zI~!57V+nXH!8XdNKk_;`^p~O>PBh@LQLI5@rS5F>*6$rc1U!~t8|A(`{lxAL58Y1Aa1luV0-5EUwIF!SsD2J0d3LYE98Z>t$f_;h_j9wce z;IRbTD5uqJioJWd+hYZ4&`tu%Hc4d zOI{*F_uZ_Q7dVu|r6`9J4R~x6Yf#>$`xL!XwJb!yV+poV z?z>|?+93ZuU~)LA%SIUuTICJm&Lel7ZatPTHW4-IEJv$K|Adh1O^=V>Ep6DbEJVO# z>t!3|zUwu93TIa|;IUDxtvyH@x$9{*F_uVQVJcheAW$Ut0T(0*&Y2>`SDn!6z z3AR!0y92Z4WAqUXcx)7F=r~3iQ34)Iu#IxxZTkHE7)3<`9vj6PIv$fo^YM#A1U!~t z8|A*ct=%w3=MT|<$40S+&MTx*cEOks0golvM!D~P-S=8Y=Rwhc$40S+&X1&V)xZ-& z1U!~t8|A*c{em*g=b{0RjbaU*=SgF7`wJYM&xL@;5^ST~clUpmLSB+aAc{3KMm zy|0D{cx=6Fquh7fP1=(@x@^2ZD*unQ-DK0)1 z5b#(+ZT`OrYL#kqhDL;tDAvFpNwhkl*RI&evEO&Tr(<^9dThOHSN^}j)kF6rOD}Aj zdC5;f4u`sI6xW2En|wFJ*%d3_G$yn5_Y*|}9!s!||A?Tx{qjA@6|)v(-nwT(8i6R* zzz$Tt+wai3u@+UI&%Cgs~vW@>Yq5SIayOYD3SBYOTYET-1DAvG^V7|L($2DF&c5?g+Kgjyv zvGwx*#DVtR>OY?uJLNy8#oG<;CmJAZ6x)@*?4W(_bDwj)_Uf}&CH^nOceT$;xzCw^ zhuaQ0nNAd1WJ|1A}LUdnxLJ+@x9sb_WPdhPR4?(-;tC@#l;-Bk2> zDfhYc*m~Kfp4FY}wa-hr&!YsQxSaZfz9Hn(J*a)2;y$+?TQA$xPr7q`wJk*@YwP?q z$|3(vF9Z)JW|*>BhMA*m}9V@;5E+U*5Xpp@FqirylGh1U#1D z*78@G;D0T*{O?MqzEdaFwttwgQLIr(JTUg3$=}xwUJ|RFacgSum}8l?E*r(=SYPD3PY!!6`AYra zsfSzsO9*%@!8Z25r)=r|mtvXyMyCFlx-E@B6l-8blJ9;tE}1<1(D>BGBhLr{kFA$& z?14|j;pbxcx80W-^!c1L0#U4i6-mB((OoYlUzt8Rm364E5b)S~*`_@~bwB-chb@bJ zcJYI$e-*Y&BM`+JSdrwr1A47WW**H>9s6*7XX~-`vW-3PeYZzxPHf+tsi`NQ7?nmK zifh7(B;S4Xyu#$TeN$6=%YJ;-dThOHV-I}aoly0u*l(m!{KP0fN+8O#mfr8ayk~y$ ztjnjQy0olYYCX1Iwy_7k?-sUsIM%G`gQ+u$TBZ?*;+n7`iQW`F_-OK-AMQ^r{@)`) zz+>xW8++jU?uuu|#Lm9`zSPps=cEycVhyZF^4)72-k&@#HagXCRGAR)*m~K<9{6-e zZRi{OkgnYs)3&7%h+++_Ve;K?F1RVVx5bduhpV2iQG z(g;Mc239@Mf4KeBG5PJWKB?b3*G^fFt(R@=nos%e?X_Y*(pWO;Xx%gdQCt(g71QXm z?40C`eOILBY`;_pcx=6FW7m9oPkn5Er^)BWQcX&lrxA!^4SZ4d-8lmemh@=gq%Qj= zAp|_OUbeB%KD~ARBB#?2G`81j)Fq8T6l>_1U+%m6vTK$+@^J0clRNqd0gtWM%xXmZ zr+FL9Q6a)cnK_DxT4&#y>{_*ED*yT$Qnp^Uv46j%>AEVqEcrgowM$RBQFf-l?<++K z{2mj%t({b>;|B{)O8q$WW+C8-5}GqmtKQ$!-FY!}Lh6q-gM@&`64D>~n`qP@uMI5e zx&1)t<7Kx8XIDLzU>m;|;Ja(SY1Zkx%fBwYxal2f1fp0&vn6WPxv@)2CU<|KbmOmM zg@DJ_%QklZ_uby7{oJwsiO-il)^1!Hfhg9{tdBIR9XP!4=b7V5r@S{&2zYG0Y~%L= z=sQkoe#fd?#+2UiLpp&d*3cZ4G>Si7u(3_YAN?wAvPA{1ZZ=RD++}NJCrI8Lc7> z@z{Fx$oYS`E04UhIdbt>f^Gc5K*ib3#Mz~DRY4$%HS{TrG_(gZO%L*ash8V&*{=NU z0li8xah0Gh8^s#>6h<0)C1#o{QFcPG1lyIrJ)l>6Ca!kWWusU_pQA`a@2E_3Mc<`zMg0gtVh zZTyM>y#sh|W9;H2?|&&m9?LOYqow*~YIJ&|Ev{ zh1jx>$Hl+AYqe-t0#U4i9aiYgcGHq%zaQ?8_t&fs9$PQl_}v3~xAg4FSj{UY#YgTK zAsUuI6l-8l7FxHod0ldQ%PH}5k-Y}Hz?9i~*~YIJ`0iPY3SuW-lO5kM@Q`H4sLMvN zhVJM>>oRtpP@EjPZ)$vpX5sKyf^GbYf$vtmaDME@Kc>dd-2Ul>wk{iG%IUkot9utG zhab(3*WVjFO<+G6OR$Y!F`ze)J!ZwOpyN0&@Q^%BSOQVzT=7?czdfF;`_Sb0Q~Fed zeQ|8PY~yzie0S3M<71PlKX&dIk!kC)QCzP5L>fQtn4ElM#`ySozXnft@K}Ov{EC6^ zw)moN?21pu#jhW+S~M(yDAv&HEkSQmTiu)-bN=vntILC@RCsK?Y~xo9=)LHHma+YG zfBdy@zi3zjQLLf&4QZ_0-7fjY!~yXcD}twOcx=6F<983J&zn|{EnD9&-h0Gpaa)&- z;&Q$JN#ng2&PX@>MjB-~ z@08U4TdVlgHNo2tcx=6F<5vuPH+HO~q+z^O{75RDK$MvcNMp^Gg`Jwy{PEky?s4m} z^>TUT`^44buT0kJUoU?11HFWR#}aJg&7|-4dTN$8kLHi1&D)1G%$$)3A9<5GRUFN! Qg}}QoCNx_X0=Dn|ANIr_wEzGB literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_6_wrist.stl b/act/assets/vx300s_6_wrist.stl new file mode 100644 index 0000000000000000000000000000000000000000..ab8423e99da86cb9e13c197f8137b670f789ea2a GIT binary patch literal 70784 zcmb8237pN<|NlpclJyhWKZv<^#-6BT%pEDB6e6ueNJ!Cc9ec79QrWrp&M=7V%r&D) z8(JhwDk<42l0?Ptyw16=`+0vpcjo)}|2-b_@P3^4`+44HyXSL0XZf55JKc9@^%k94 zRllwNt@Rt$Z`9=G>Mb9*?}3iZ7cN|QrTYK>zj4Hho((y+RF!VK-BSIJAt$M!OJYabtcGBzkR`4Js<{;qtq?c6L3 zA6|kqtfse~pnc>`zbVJICN6tG``Go_XF;jSk7zcaY75(1kXBLW!%L8c)xLH^w2zV9 z?p8h)W&aaIs=jR!4oXda#NW-*l#g29o6_|Xq+vDY{chUF`EPnEA64$$r&G1#^U@)y z$&cu#NL3$K{_`PKUV=2N%A}-eA4_geS3Zh8^`p+mc&kuIYVsqtsS;3SHFSmc;U!4J zs*@@&w9qyOCMqAPSI^fzrVTkAl$!jAF{&nQYku=*v=1*q8djrKy~D@lO0$%Y8YMRB zvf4YbSV(H}BburfXByABzU=S_UGgQlXI4O|jK z*~rJydlo4lp=L$2kA#GTkksTy*s8B7m>>DyhkjF!_)4wNU+( zQy;Q)r6xbZ)Q7A=Nl=DWBh^pAhiO-`l}Jr~glSiAEqh(J60|ExP=?hC)lb2P`F(s< za+~h2(AuRYKf?SzeBM=65c821!tc)wimjf)CSQ$=*k5 z@*_-tC3`qYP=?hU)lb2P>33x>DmD2Lrr(u4wInFRDn<2E@L`@G@@XJ7`4Q&%F|Tv7 ze%9joAqmQ`I;;9A_%P4GZ-Pa2e}$)-)Z|B)=b(IYN`f-1l2kthALe;3pUzT~A7P&7 zas(g=%CPFE`YHH`A7w~QegsAwwxvcg8R+3Kl92>uSnX5&6ntQWV_WAx+7>)lxJf|$ zMLKNioL1*jk=$-|8s@Cpnc9Lf!t-Wct7`C8)x%wCxYDqVw)NGod*&U7KWup5|j<0`r8Qdfd5xaZY1&R!r|54Zl(4!%a9EN!(rMNljNfb%P=h|D!W%BKIyIGG6$0at_N&uhowc z)VPV>Q39y~q5c9p#wYvW9d+>Xx0li=BPE~_6T%(h!=ouasmYJXn=W#$N;e^b+_Ii! z+9e$;D@{m^BNu289{Gy;?4kZB}47aUED=vtYmDG3%=Fj9jB_+kD{(|v7OwJ{3h~=5Jby-P` zktM-=pxxqc>&DdJZ$%%|{Ri4;rvcq{ZvdI$v0s!XzM7g6BcPE43o&e7fHamZ`EOj*wPt`KA}mDzq-xEb`YSBJ2~ctN@D}L#5dQOfSKo=> z2mYnwiIX5Tl34elt%<&mz0smzLRzI>Ij2U*<8w#*XL<;!89Obgtv-5V?*C(|B#}_0 zo~Bpz#XAntpI<(t70d8^PBBl;rA89wU1WT&dB@rNTC(a#(j#+DT<4R7v|@c29}9mT zTMI-=rO`gAk;IZ$E2N>UOg^6e!l!-Ue@RFy{LY;kylq6gMELkQYm857B(bl}yV?gz z+P2CMU95?x8o%r!q}8;ICp2Md@X_zT(?ql1`}m~hi6s?vO3*T-Z}qnQLus9lV8bU} zJ|yw*k!G4ktF*1*xA)YuIIm^a^2@lS6~ob{b)BOZ)uSbr6-IsX_d&w+)T)WZ2&s{T z>?`%SL=$z7jM9BO%AI{XdsN0`Ql(O)iNRf(#RzC*yp*RtR#>$}63>2re3^_(S}~lz z+VsF5vuY;>?(LQct2&n&<-T|Hp#bEBgr{OMQ;4!k58Uwd0~t5})+L$oqSyBl$Jh4G z-M?>Wz!1;xT@d{7v%`KbK`Vr373-T@vDmsO(Pu_`zpq|H|JjE$ZZ3p7}K{=bZ)Ml@ zPkXhR_8|#cAtu!3nDOVM#2?RH$(m8UbpvS7mMU-fOQ!^K?WpV~&ONX=T&?XcT_4nt zCPCk(2GMq}f9VsQ4;e4oasGrFW%xGNZJtkkI0T-%PFc0Bu+5j&eS-Eu4N@h2e0Nua z+&*tG=Ob?{6dX|eS=|yPK`YS1PuI!aP=lpA+$yHt(GwE9@xq54Ei2L{=UH2?_Tf*) z1^K(Xl5=1E($qP98zQZHC1351^YrhW8ZR+!nqQ@g?aJD|(?21vimr1>(8{FNr&7qa zyLjukM2u*lkwl$K#){$ghG4XnoU8N+o(M4l8W}I;)3M*+iHc=>^Y>54mw*+0nD;^a z2Fo`3&|j}jN-Q57g(FoYkSDt*dxjxXs|VLi8yoXMf>!jwF?D(%`K1k^qm^EdwG#G; zl0e;5`mS)aFL~$T&ugi_r7|waw1SLQ()^?MqkUrY%ss)~jdOIWs4+x?+-`%1>RubA z?xgCS{rWP1_jTKPYdb3q-pNo_ zlv$fdl~W&&ojYwDpuv+2GM;42pPRU6(&AVR7DVuOuAk?4 zqYq#NfOs+T!t>m;p zMldC;1NPb`93y~esxXpq`QWI=y=ir+z`$ZF~rm>lVSull5m%<$_L(~Fvik8BtaReVq37S zy(@l?5mF=LMgAN@)q^f4)CXsZ&|Wz^gc_7F>H3HfQX>g>&QTYtT?I`(%*-9)mE|UB zGe@S{Rg5sX^%AmlqxsP73URT$$~QgfFP4ueVcx^hG9-cgVH|AUagY!E%@4d2d*@_X zdHuyk$eL((xn+!iMy8Om=@aRrf8NGKrbH63VmL?o>5+f3CTA4w@KiG5;)oS8N62m; zs?A0QH0zwSy~EkC)OZQ@s3@!Si0XHPTbd2hbC&F1LC^{^>M7Q@N0O75FCLvvFAbSaneGP%%N4b-u z;!?$UL9>@IgnBm!emlOn&N($+e|%>!L|?mQ^0$Ti)s}HVrWItqml&eX{uOGkOhSUx zcnQ#W_lXi0)`o^$JtWCX@I4CQOsye$l`0h8_IZLPsDYIv_}*rSpH}@Hs($$TID%H7 zU2l+);JbY?CSAKk_a)T83U9w`iS%uJbm%%Ws47s`2Q^;8_3j+``>oEwiEo%+l$XF8 zHfV%9JzU{;D`)QBuvWJ#YUqOm-_=d3T5SG4l(PJ(kcxU?9Xi7ct+(-YY`X52IxyNqcVFU{QjNZ zCu{IMx61yiAj0&C(WhEqLC!6)RWkNhj2BNe_%`!CB*u;Er=Nq=AcZ6-v%f++#~Nfp zI>)Dc$^11&t)+Z_~Eks}s~~^eAOn zx~vm2xWLe-#r`=yK&x(S&osmYIsm2NsIN$@=rDIqO$4hHX0&Tm_m zk2~keGJ;i6td7b$hm4*9D`Q4BYo4ZuQw~&XtnQbZ{0OtIrhc_P<7ew+xqd2XhI@&W zDN-W|%7#GvSP55uKG}cBbu|+%^PyJ5RX!F*38Tr6pbt|U@Fja6>5r+EtzFlLCh}%F zKG562%AG3ArbYLE$Y`b3tn^e(MfR)$bJQhe$U{Yi9HKR#ah3pT%6tnBAc zn0&wo^JH7qRO?WG_*F&N`p~M`D!sp8+xq89wGjS5u!YNC5oES@$hOt|*!TKZn-sb# zw(^6wyFEK!*6Twj{<}N2x@7v2ZtKpXB?h*X$&9ToK_iJTnvB=k}05K+ivQst#wo;Fp?NXXj|E0p-@}o`EYuwpF6}Nu8<}{09>8 zJEukxU%&aT4nJA0mCE_(i=}j`?s%b>i;z}@=VLGT_}reYi>dd)A^+6ZL}=KOrBXAg zu(5i6RiW+Dw{5lTx?Shv)bQi6(nVQG;`}FXYacKFJAJ#mZ^#!sHpR%oy8R29Ffk|vJ-Sv5&& zBynBV7)_gBRHN_PYXbjELRz8EN9me=*PoHDi6=W$_DhW<9&I~S6Zp;B)~rWIYU023 zM`Lq+h)Y_HnKnlgrZ1^p>p4vfSvSciH47h~r>)RON#C~h!d+#t>m|^tfh!ftuJ-rnaj-B;h+X zMALZg+Sa?vFX;Mc(s-qdkXH0z=6CEVj67edQc8GEOBW%n&n@&+a|Utxn=wdBLY$* z33(PoovwL#)V4XHJ3~JWb`#P{?yS;8^V>Si*M!taLhh5&scOztNsT1rDHxseROVa~ z(n{{E(xscq(v=!X$kRN!22)vsl8{z%UzTnqsca=uBMEsbNw>sQwnRxtE4h11xAs)F zcBzqsJYA)KQ33v917p6JYNnHIk60 z-E2$sZ~^vkl8{#NUBtG&crh(-{o`%7&e@q1kQzzIQ+q1sk9G)b-FZhGkM!f!8swAY9eD|@ffirsr?(E!m z>(*y~OqCi*$WxSR)$*6;0yRJHney%UH7-J0$u}h1I@n`S;Df`jZap|{SYUs_a4{d9m zEj5y0xtViCUr&B1aO}NQV|Vur zJltkhN~I>dU4*ofZd8-r8h9T109j!T!;l zAI$DlF%UekIOUcz_qhmZCEsyv>+hRO2EKVAb8D|Xogz{r3HI~m9AvKr|D>)ex-6yF zPhDMvw32Vdw)Iu-A5-@~wPNe@lX^v@MiP8pne&`Sp4*vBw8Rq(%~a?wWJL@01;ty7~7|$`cb`aS_r=zIm&2!kb&C9@w*TYn38H zB2ps>jvvhV+SKH$Q@3V>Q|jy+<|3q(d{?)vrYFZXKXC7=t?PP@h)9hjI1V!Bgo}3F z8Qk_uIHk-VZbDkgcXjP!8+}NPBsc<$hD$Ybp)o9#*x(n`)C=(d~9wktJ~;G0G4_mR!tha{wxoO95>!3ckY zQX>hzA;o^@5&q64A+6+$h3-ot>`SCZ5`5E(^;Z%0SCWuca$ZCCi4pdRQX>hT&W-iE z5%#;1kXCYDL-*|w_U%$537*c4JwGCRen>)E$(a!SjEe9XB{h=Z>D<_JFv91cB&3y` zN72vP2%oi5BMF|)jXlpJe4a}}TFKcKJ!Xh-%pf(A;CbHIxFo`Hi6o?zoS)HSp9sf3 zQX>iXIbk)vig0`-327x~ck~!8!ZDoGNP_;%>18!ejBuPN327zgfb>{2!m+5-NP^{N zPGPI@ZiM4qNk}U>Yoy225ss;)MiQ)lbDCU@+anyeOF~-7IVb&Y5aGLl)JTHu*qn-2 z?;jDqe@H@F$(bqrjuYWKj?_qkzsKlTRUYjS;d_)Mq?Med((h^!zN<-%B-pQ*uM|9- zo)9X3XlSI#m1?Q`?#XlQzu{gSgmaIMdYkYN)R+WbAxA+dq#e%e_Z>Z z285KkTS$HP5XsGsc?r$8B8u*|UWbR?It<8rg7D&(vF;TXy z%g>GWO)q{TyWiQTv=3@zypWw8TT9dH6iCntJ}9gAsHQu8^H0{up0IPM_Cbw|7qYX* z>-fbF3nXX-AC%R%kao8YsTtu&=g#HyjxFw*LcFrh8E#t>4wlnILPF^z#6?YhgxR0? ze9eT=;gYAfHMw_qgx+z^i)n|9@MCLx<(@s?SFI1WOH65Y`c#eVCOd~}f*MIcMv0hn zUXj)(HeW2gBKzhl1LFuAdF_?92K z-W4g?yjC1REBJUh`PtlW8kAP^?U#4fY?h;bQQPVt&z2e)FJ%0>%;~ckyO(TUbK#lD z(9ib75wt=~>qZaAt$Hd=`S|L=Eq4@AJ%j&BHBC?>KwXh?664Xh9%<& zTEPcptTd!YPX2Jk*SgEks$Csc8nb5;azes$YX9bUEsShVa2evJVV9ETU91(98ZWWz znE|;U@LW;k*7J`f?`v>XhL@lf!rQIupF2jKV=_LT=umQH|2{odLG$HbjdE+M{gjX| z+}j{GVd{A;Gqr|jyLz-g@%ODdRn&NiNJ8D*pHrh}up)O(=v%vOuUqQCN)og}cPewV)xRf)h7NA-tO=QRNmM`6D7Rc}zvw%k zo(WI7y?JeKs+balvu=!!%?DeCHjSAcEnTP9BvHI-wcH!B_C@niY)lE?xZy8GYtSKR z1s~70Meeyu+sG$;rrH*Y00-u4|bX0gacK@=MX&b5mAD zeLUTIZ(_aKPs95r-g`>p6rq%qN8n)8}HL#Mz(XRjHT-&gCl=$koH2=}= ztu;Z7m&i)}E2nnif+*4ON;}^tl~UpeT7iDSJ==@L=m z=-5Pm+Lf<0L5++Ta{Df8bH44J9wq+W`hz5Axhq~v) z5wrqvTgzoRvlp#bJsf(UI`cRAW_JHj6V%9fA)hXvp0i=6Ik%l2sgZZ5fA5~mID%Fn zzTUSW=i!!_Q6I}2zTkhQ%0f+0BjbhKX?Kg9?mHig=3`{hh5o`%yc$Q)3Pg#k-_7~q z_1RG$-M+2nKl%PPO;97_h1{@JkL@F`bcqrd2YupOI&g7F#wD#FKRxY@?Hx9iixO=L z_fOoobfoqnQ}W7@t8#v8^sw#|-99SRS`uneZnw`HuO!qizU`%+9UJxW%|nm--We3q zKBxgLi8edNZ*N-W(P&vUI$S8czGSaBf>xkUEL|4)r_ZdYkB^$R4R_vijV7ppl_Xkc zmT8>v?b}h}-x*H_vwVMRf*LQe?sPbNRoCv(RL#7pV))4xzr_)>0=?l-PG$yKu3-4K+cHm$>GMx6%p?yCX{6k=AHgVwIEn zSxb$V7}mO0>Y~2PN8vi%!hODLr+r9*R-h||N2g7^rwwzy{9WIeb9IuX2C^iAlYN0# zZl;f2gRjpRzv?Fa{9s%lXa%|K;Pz>+T-Q13;!Se&~g?T?&mbZci@FnvkK@(aR$ zPMog^Y7nm^o;x@z?Y0IdqwVVFZHGgXD$NSPN)og}c(dCl1}+zyA0ti?Q(v<|QKwq`8 zbz1ma_M(SMpIJKVoe?@!viD*C0pi*x-p;X)4UPI(`CskCroB3Af*M#!qT{5uw@)fR zU5%;HQjAFJQ0;Q##a9$mWBz)BKo@9p+e>I;&_ zN6F&#{4GCR5=YPqbcd~ra~@clk!pyGwg`)m!EmAH|>Fl=N|@j5vZ;pwI1G zopbEs=%|n8yQhVl{SwgxHL#LImDwNXbed8>>Z97dcZXjcw~*XLY1Hz4Zc z$J@7sSEOcZf*M#!V&DCjasr`pQ6E!ZI~smz)%$S-tw5K1u28Ojb;)SEntY>G`-XvU zYJwVAN#g4NZpcks{a3U;=H}M+jr4u0>q8Q>0)79>wR89Vx-m+;Ubl64R{w?Chb$ji zA2VNWm-AVt2c!8IXQgI*5_~`t)F56-;OYGRr2bJKPaUq5F}cE^ID%H7@eIyhJ2grK zNBYCRl-?Rg&~Z>+rAjC=Kv%@ zE6^CDytS`al-T%oa#F{fH+5N211m{jB(pEEN0g{N_Q_!1T_bewlQkwScWk$w+d;{N_LQ-i~{{1QyNB=Ragl)G>08_}o1ahuPlb1n&5 zfyR7^Gc)nT^KC+XUfQJzYG5UaVz>6lZQFBqbd>Sa8wZ15ylLLmBta|C^(G9+?fea9 z?l4*^d%UiH>4?jPr98Mzj>OYb4!q3KsBplg!$5m$me!dcvg!A_6#0!56S9^qqUkUDAo!)%daN~m- zX$6_@xVF{jM5mBLI2z=ZR*)H!Z7tv1K6L8t4yn7|8m!xuW0efEeg zm*vj|wwzg;DmCyyE6AJ=vaR=Tn^Pb`E5t-uor8R0!?Gn~h6lPA`?k5%$ao=hR>ZcB z-n+3tf>wx$vTYrk`)6YR$A$&A8fI$uT*>fB3%p0`Ih}6pqHJ5; zzW4cu4yzUj1n$v3sFCqPcIIS0&G8pV&T4BlU%0FYA0z<0kUNjMu^P1ru`ii?TX#IH;4qTkp-yFaM5p zXJ_^dK4^t-&bGx9)VPT}K~x#_pg$D8CbjdPVcLfz99hmf=cV4{BAi)gCtmntxNTi^ z*LD7~zwb!xHoCv|L5;LRKJeykX3tNqsOlo9aT6Fbkf_pHrB;Ktu~=-;^{9NC%oQ+si~o8lsz89^sr_+z-*op35QQPp=KGklHqL5;LB z^}+eX4|ja$BAgjPCtg!-Byx6qw+ueewq#t=3gMjhv#mC#a$N*9ZX!<*oxYnARBb9y zPPHu=7YK)TW{uPhM_XID2xmS?5~keD3}pJRn}e!Nq0R&FAqht|?aItXeRgcGi$Fa? zgH$<$seeN(`Rs-e;z!$p4@o$(`F)uAtAQ843`znm92)p=2-ETn@zAb9A=Rc*(YD}2 z5{@kAuhu{RPf!xhoRt$V{4rdeMLtq0lwtZ3?SmR=1>c;RQqS7P$u5E#H=#yOu^F#U z=aWN^n?6zba5RuPbK~~$`DIBs-n|5A+gki!tx&XY*ZH8vO_*ninOmB(yI?}jGf}p! ztJ5lmqR%MpgBtj7Wb-64GhSW-PcLZPgn8N-B7MX)q3E+#mz5;&+{N4*%8KE(wXJ<& zK{y)tkOXDhdTrINU~~ka^FfW9FyjX^JD50QZGi+v8L&dCDBISOt9u8dV;}uHrv^Tx zjB${ev;6Md$N~vk!3SmA>bP>pvgjC2`=ACsq|EtJ+xlzRi~R#;e#OPRb zo7BJutsrxzQ2Qv5pcP`Gtjg-tbH3=9I$LUFypS=zHZzddweDXaK`X>W*|x^6-QkPA z8$_f=#*1*gCzu&XFF}o)FmD!yI5n-1Kl+ZNeMmyec;7KIkX<7=zJdr^Ayt&sj^8yu z`=ak^+6OgAm6V+s$eC|e^cO_X3O*>S-G-(8{^&cW_CXDNNSU*rwuSL-K?JSfgR*Ts z*r~QZ`X;P>Py-*1Ea!LC+qO>$46za%hI zfEjG~a0oLWU}m<}%!EHWm!W+~Ldu*Y*K-+yF!L8!OF{Y23NmT+ZqTH*|I*E~w~c#X zg!bWBxoNJ}U`{3>K@!v;AChLcAs)Ez%(f2A%($Hz5K`vK*8e7Gg?vypMDq)^vi}KT zhpD514_ZOydfWddXoZ+48)ENE9kSca9;|&(Bjbh4^~nEC&}%-R{Nj^KBUYwg#S&@3S~vv5KEiy z%icDskM=YbDiN27`8dW74@AC-Cs9m4VM zC7jiar#`ns6&l?Y**OS%wpE$L;#Ij7GS@}acOWNx<|~Mx6=I@nTRTdP@~t`cOyu^8 zpX+f6HAt0|ot2-=+UtV_5wwC2%Ib`JjumP+dRS!q)7Z1^XyAiZ2zORYyaY9F0`EIq z^HR5%MNbd3S8GL%hO)EEJAcCQo*w~P&4ssY8A?1bH1b%(>S`=%e3*Fia^x;=q{H2E2N6DnhUR3d1e=Ytv$jx2W@dI@K@p(OIe{>02Z{|4uutrFQ1Xsdll!ja{!NiX5- zfpp?U3K?!&*A1T^yxg;EAsyBX@#=lj?H+28aJVOrPzMa#An+E#~m6JIdx`{ zenwFPLhesxxcVkwmDYlAyn6}Kwl%Ka!ey$yBf8#Ida3ae+(E1EiXSn{MNs1=^2Gk# z;FhXBRo@ZRJIX{7jx0|o)V-^*i*WV`JMqFF!)+_+&M$nbz9Xo2_@G8wA)Mza;t6Wp zM4lizwJhsb^_`7+mvMn`XnA5Jxl>aY;hY$eM4mWf@^<&OepTPusCUFG33&>J;kI?x zU3Uq>@eW!Nlx^$U@PmF;-`S{lN|jXhYxC`6~ejqC!V0jP2>q;z`1cgRo?;BJFFzZeMD{_gQ|=dgyS8wBq*yIobh!c z`gL9JD!tTr3GRAR-!D0o=_06c6Kd2S+jFOWwL$djsCT578W4``>?2fP<+_sOBAgwD zl2H4AqB{@O6Q}+MLG2Or zjY*Y>7qatqhtl0Xbwa&1rHaQ9v;x5|78oA~o6oFwe%c)?q(;UI*?H%LkHeGa)|+?u z&NzZrAaFw3oTCSEb9in&b@o;5%ycv|Udpi@rh&J5SE;`|^Q79v8c&#WuqNEOXVgEu zO8q0NSJfo}jf@vEPk@`8|B_?ZZ}8X}O;F<{ID=q_Vc(Xmzp&1jdNMA^w1VuMaYxQ; zHA<>~>(W`8pvFsZCdByop+=edAARsg96>A4Jdtk*-?=9BZ+pE$eW`&g3C`#kqD-Uf z>No4xHjbbbXntA15Lqer*WbD5Elp4ZD@kx(%Mg9O_^aM?f31ikXa$;INHD~YR~FQn zu)n6R4{Bf~3C^||B5zMt=2yFlslByMTsl?Jy~mKz$KYEHh8UWAds4?iTa%>5OK_&p z5V#X*bc@Zp?J_P9w1SNNqK=OWJFhG&wq#;Ebp7@$H4rf}J0`QKyP=fuI#+ z&chmF|0@@LFTaGd$c_fGoDpVBhM3;hB<4iz&5HC}?x6+@INJu1Aj`cHaRO%k*M%_p28ZfV&jc)9p?opYI6IrmQ5 z5OZdf$=vIosdG*Z;+2G41xOEEm~kZOtvlDo5wrsBeyTk=Y(vu754UN88dyn!s~jd( z$41_jR6LR!N6-p1-#-m;eTi8~i*vIzK@F@V!POc=q+Qw&e(6dmj-VB2zJD6x%{|-v z71o-ZQv)kWa6gM79(d%z(BJi5*YzO@T7l-fs3BSxDHi(e^4+>!Q3ESUa3_x;?zy#S zXl}XoaRjYEJMWz6cOQK?B{ZP_CY>s3U?mCeXEHvfsrlFKB`U`ev;xg{QA1=dSr@$c zXnCC~YG5S^?y@q(vQ2eEW7@1tlyO0(6=c4D8shTjErX5P)Y3kv@e(Mv*ctc0FCzoN zMj!j)2wH*WyQuN;$h@(^zWuM&1U0ad1ojujPTmL3mAoPJYSk}7GA_upg3P%r4o zlb5|b;~h;<<0UvVWr!yRzvnOXOmR(6<0ZJq&k&XW>>NDxcy1g)E6|*oGQ{=cmiv$I zo2LnCU?mCeTQtO&>h~sndUK69f>xlpgVGS^OQt4W=(0%v2Ia3vuGg~V8$z9fOjhS0 zH9-yHl?3;D86vW}W^$dEpV0(0UV{6*4AJMVq@+K;+o}m_yaZRR4e`<2SCU?7lNm?Q z3N-hA86wc<#@gfGc}}-0YG5S^uB98|{f9mYl}nwVM}3l@6=-Ky7=Gs$*1Y9EeR7K? zsDYIvxXZ@)_`c%rp{^re4$HV8(+V>8{upA*l46NJm0Pd#!94}A;@%j@^k<02TI@(_ zwdn`#gBma4?A}2>X4M{-G<8OL96>A4Ori0iP6H;Z(|{S?cx4K?XT}ifG+)X=>mH7AfuI#+_LYX%GP|9B)8hWx z2Q^*-Z8!F%kiemXA^+pWa1PRm3o@-BqvglG6cWfu_}c$Pkw@Zkj<5EoL{$HYG5S^)Mo6f5rO~CKb^63$!Y!jpvFs}&SPJV2xRO?^Y?z?^|*X6?V$1d zk9}1o@Zc}alP>nb*OMF#WJ#c3iGA@TQ2CLh#5XG));<^)2wFkLFF5walR%9d+a&GU zH&*+g#!KMW9Q)!)U~AJE!5@cK*JULMT7kwdIQGSpz$5jqN-pwEd0kf2z)BMMHOIbq z5*Xa=x8yGKL$zgGkZA=Ozxmh~PXd?fJ?uaG&6~P!r^ZX5XNZ0ABry0?I20^9TIXDj zWaNm0^WvtJd|C39(7@4InxFGBPKs$5c=o$K@+M(pxJ2XKJ ztR%tJOyfg+1uCSz0u@Kl3N+`)4UyhpYN%SG$vHK!l7zFCid5bIaplmuJ|AlzlAsl6 zt`!>}<%jkSR_<{;j-V9?&ZHY+^P{hXw)DrTUq>V3g)G;$(*u<|>n&Fcg882ksi*1PRJw6QwPCT9DC1{0k&chqxK;cMu_9JyQL5++TGH2Bd(ecL( z;i8k8>U>b+B{(l`i2wfE5dN?RzN_Y$|5o>SpN`J3@z!xeTwc8~eC+UqID%H%Uu^aq zE%DxQo5CGRAJK#}W1bhg%^U=7=&tl#^akR@7d)o|+mj!7n4))}8%&2ity^5*YM(UA=BVjhm3)f$ZCjl=nn4;G|sBsfcNvoaxg#rZBd%t*}}{*|tJs)`sdlP%Q9Izb@JbH8Nhv?7M91xAUtDBxnU6lx-{g z@zzkA=YLC`GqH#EL5++TGJ90DTSV1IK?JSfgEID0w_o=DD|4E)yMp~Cc{%+4CafIU zbz(iQ!SXeN;8}Jzaaf#j-`A;roi<~iZZ;?CLlTZGPu@>?Xw~u-g5a5a_;3j4yQAtP z>(eXiRH$6N+3u(hNjS2ZFE!sB=%T(LFtzEpW-mku%&|fP9}eMs4b}W=yNs(IN}qAk zUwgyB7M$^d%-JZ&oTW0v)Y)}HpO4$D32M9q=a~#KJd+`^+MNzA?(mWJAqiT6=KGZ)IDaKIkR`#HDMN7n%1h7+;oanyBLkeR zg+{hl*^W6&Wqfc(7z8ztCBd0uLvYsFOVA48oI5iFXSSsVvLrZ1ZV1kvdkI=0oHOZ$ z;0(RgK$ZmO`VH~5T3Pyi=(c1pK`VrFMZgd($NiX8^x|4gPy;JTaLvLH>l<2`1GdbJ zBWMMhD;tL3iiy-fmIPN)3^6G4OW&OBx9eXNHC}?FC_`*2Q_Vm9`@ej!k_4>~&XJlS z7L-r({q$zPI3KhE;a>B4GjmcXw6CA;8K{x*LUxZ{znk+}u=`6l#u2mv!8JCMs!uj{ z_RR==vs7wiypTC^HbjH*e=oab>Li^H#wD#FbFI)2og404dgjB1nxMu@ID5}ALRQ~f z@~iJH#Sydu?Oqve`{Lx#=99MWeW-zzB)DpAQZ@VLXM@E8Pih~MpcQDY#2R8p)%xM3 z$KMV~4P;4h4cZX%e|{%v>Gor~J{T7WT0!QDvLQOz?UvRowqCa@mOHHYz5|(gGKBhi zQlk2LlCD8&yoCE*O?@?cnfhwB&bcIL1)3u@pxz%?_%sW@~~PFTvRaL#VH2htyZI;|N-T=A45e)K{}Z>Z{qBpaxcw;Jk(*I9~!Q zYP@@aB}SGt z*y-vTjS~ugwnl2a1b2uTAB|IbG@d%~*jiXo<0ZIn%@CKjXEs)U@5723FTuTWhL}~h zT9ZKKch|v+8ZW{9cZL{#U#q06a~9~9$nhWuu8czFh|>@UDmPCab>ez{_;TB_7QmIPNC z4N<{=Uuf=QX?g^}xIoYf@}sdYY~XiZ?Vm~hbKi~91T|j5y#}eiDH~Sblnu+cAkzvm zSLjTtxRweH*Bv2qEfq3TXb7&QN{yG`Dybp3mg*&Fg>a_O5L`?3#w%0E^-n`^EfoYc zkR`!2PeX7m)l1L{;avYT#P3@c)$aboOUtDOvLv`l8co%O38CaGmvntFE)cYW>|RTK z{pZo46Hni$3Dy*>xRwf;Z6TVfDb>_k>L>acMU9u>Dybp*R4WMQbBO75VHGCxKhX2N&cg6 z>wHk-CAdmzQZ=debx9lVo~C_Bf>xm2Z}bN~o8{lU?N;rB8dyn!tE9#U*HUF%kZA>( ztBZ!nTi(q#Imyhh$eeR!6a-7b5EG|$PQGvQLm5&7SrS|^HAL6JqcTcVxu!O(sPPi+ zwbX69pZB*~+Al8WOgm_8lTatQbFV#M%ft4h<=4psyJI4nDMat>2k_4?lbH&sUpX@lBH12Y7{luXLR+8W< zsUa$NJ(zr3`MJ7uB|$6DTro8S*HWbhvLsw1Ep=~jSDl720va#j8udjQ zuk4>V?!Not2wH*WdzA4dpFWsT zYj}>XL0RWq1p>kPH^jzr^OskhF;_jK91Ubi@SVXByM7++Yp~&}Bv{FOFeM1*o1P(F zy|aP;(^)_2d{6@`N$@?<5Kq3++u!QQ2XO?gK=VD(5KC+I_rH4Q_Beu8Ao!+d2;RFS zH8NhveCsm=?_Kf|v_d%F4GqD2m!w9<3z_eUhTy$RUV>H#cfCPIc<++b$ao=hUflTL zy-Qw#RtU#fJGLh^!h4sz@jA2IULX74x*WRZ)Z9er9kL`aa(3=rdM)u0-;Q72*EL9u zm*5lC2UXLM3^6l$?~>F&mIR-6hTy$RUV>H# z=M&Bl{Tmj_d^K@>rqn=|gzL!}QBTg0+J&TM4V<_j(+aZt$vLM^dN{3chq(H1#&Aws zFsXXu+fX>C@nTJ|{UBaRxJG1=3bk$tANlepohnJt3N*)d#s}|Rk{Za8;CRjuymtv! z)OZPw?F_+tm%Ie65bhfFMR@NLH0+lkv;TnXT2YMf-X*E=5?oz0srqqFKmX}hX6x3@ zxIoYfvTJ2D()OXE{tKIT>$Xdcm*6U?@u5~<{c80!j-VB2uHzbl_by2dWJz$n%@DkI z$xF}*;ap=g#NY;X{QIZg8I~HzlHjaJ)W-+U`ZwG%SLcIqfuI#+uCWcd3D??Pr1(FN)|&Khj;;^J1%g(Px!z`cs5!E*nj_QoAm$WZ4E;s{!S#xvNtcS)T$2&?l3aRjYEa4yUE zSX22#c=4tcnxIC;3z_p;hTy$RGA?Na8PCM{dzYlfOStA4A|E~eVCZz?b-LeWTp(x# z8P8zn-leA>Td3et(+V=@{Y+W$-X*E=60X_7 zNM-e{?Hi7Mrh6Y}#U1bJ=2RXw< zfp+c8jNCioPv6!>*K0zi%2|DP{23q4?4YATyprHbydh5hFv54!d&Y-MJFP(TG_WBK zPk$qv-MNIWb828E39hRfqVlR;$&HT8(XCw)v;xg@(1uukU{B_V%3bu^HZ`!41lQFK zadcFpti;KevSeJ4X$6_*xeal{!?Uv9Epb>A)OZQk8hvEonpRnNzd0k0pcQD>iS@|R zZKJc+o_$;s)WAv-+!J6@_33-}X4QY9b{s(~&^%vnh{2`%XPvm_%N0@sSrXi-V2E9J zb;~;c(uZ*btw8fT0fv}o56F7CR1r;311m|mRxKkBWv*IYcE9qVu<{bIUB!{x-dR-#H;pqcXuJgI*a~mt{!y#y+&sCNekn~b`Xr`OeGB?(%A<~J@3adH1wb#EK;uqLR1l_a>* zXo%j2->>`D*!SZIT7hF zt2CXY^Fa-)B*9%zhFJCe;oysxhw8B?HC}@A*S6KQQ|H(XV6ej22j|kT3gF1jY>WHO zFc8i>j3nfWiM%t+OE|PM+hSW@!kLG0;zbG>uD^R8y8#S7sF7BXIX~mRGfZkg(@MtV zzB9~CnDtYfIFqz(6|LMMc4wH>fadHF!YSKUxxUT~U{d2IoD~||Qs1GE-5KU3ILn1_ z%C@DxrysikOlshRR*;=}xCYCs7f8?wF;RBk874I{UdWsSw5<;l&c^Nx^AfZ|IA!;p zVNxUGh0M83_nl#0f>sEp?7lNhYGk~SIm4=MWSg-zc4wHEpcTR?yKexK8W}IbIqw%w zP~#??^UwbZ#-5KU3XoYaf?mNSz20my7ne%?`JHxyLtq@Mx zeP@`|$ao<;^NGLqOpM(b<|Sx_aLVpG!(?2L<&398$UDPiTwa2-`_3>B)VPU*Vz#zh z`}>rw6RFA=*l41}}J z<-`kr3|C)Py|g2C1DKbf6~ZyYW>(4WDzVl@P$T1o%y8RUI=MpZ1~3_yv_d#%x!re$ zNsXIu&gQu94D%9<7vY?;^Cym$X7SXI0&IhDnW^kh{X5IMxmNtY30fg0%7);bVNxUG zh0L|||4q;eF;O-IZvc}T882k6rQ24;3kk71!@LBo5KdX$W`5Kg4_ZOy>e>G$XoZ+48)9kn&M>Kw z@j~Vrn{D-4S}JyDn3tdx!YRA&43ioeFT%OT=DssbYTSf8kt^>E^Ad~~;ap>L-x($~ z@Zk{hM6SFu%u6s{gmYESeP@`|z=uP~6S?xvFjz4zO+@eWk}=uV1B;wn#-zqea8=H> zUY)utb^{m)YTSf#p4WY6n3s?%ag^P6hJm04KBUYwHrq;GF(P)$n3tdx!YQj6;X-Q? zd1si^zz3}m&NVjoEn`ySCh#7W-wj}}qJ}cp)flcu8RC{PuMb#B!mQoNJHy=Z%2RtL zABOn8Vzt^Kd#-)yi&eELayCWR&94$QtZw!lPa#)As>$Htlc&5 zH@857R)~qRx+gB{wh+IhgY@!jD6Aa$U<;llwXKQo*Q>*?^?;zpP2|SHZR@?~8!YFy zcf5q8dBWAUR(^VR9lxZ5c(G>!-^Bq9*_=hrjh%^AZ(f6MuFEg!fM8s1!Z}52TP^Qc zS%=^E@e-0|3e{agMXSW_S3|tc$%lik(*q1QPc`1J1|QUbmgfy9Ya({P8VG8<1kWDC z6V$i~_o;a2el_@zgp_$&!F|6P;-v<(lu5hqS9252Nq9-CH~M;su^ZW>2DCiWKv})x zoJzZ!f{10Pc6NdntCeY9YLR)~qR@}UUc zmL@guK`Y2S7od8$=A8>9XoZ+4tFh?M55{gwlNuQ>WUk$+8|%PTa2sAChonxjWiR zID4I)c;S!X?%UGfgBodtaA%jdypc_6+=Tq{m%LvMR*Va>+=tF^HK*UXbL@UKuMbI3 zcHgfCf*P+6?v&T}F2(Lw13`_OkYE0ix21Urhjw=S+m@Gb&H^~`!XLxkx23@cHPQ;< z&WWm{6*t6gWRn^<;e4ONwoYy?6uT|WOE6x9J14{B{c2JJ9}eMsHN>{${c5mcT#)5y zH-@|KSM&Oi1ZDSaX&|Wa`rvuIc!C-?p=MR{y8K$otjcgBlQyoE!TBk^6o%5YAp4 zNjTpuvMqVP8se3N-220D+@t8+ujch33CiyK)j&`Ke~v77I;oo%WB03paP}=pLVjaQ z-j?Pi99oSAW8ZsKH!sHSR|7!}QsofxTVd)~8@pf4OE|Q%Z_Rxp8wh6~nG-MkG2FJ~ zZE0SDRtV?5wRnOW882jp+m^gvO==*^XRSk&DN`nPznb*!B}ijrnI7qv)2-gTZu8y) zfswbZN8xgOZRDyd+z_4KzdCD7l)yL|8qm&|+PoW7iV+=~jb8r6-Y-^peK@0jhqkRJ z20xp@@4X`JJP!<+r->00!)@#CoxWs;a5P?mv|5P^G|S)@Vi6ZLZsK6+*cDXa0-Ek!fe|>suEm6-3YqF;TXyU&?L|^9!+310S@4%v0aC zHRF#{{(=ZvAtuVIbh8G8`Mp=Ek?}(2yN?D)L1qxH8Nhv*lm3Ozxdkpwp)ug`=RSyk&?}8>D?ld zpcQ;@7rWZmcUQ+PD;hS5tlQsM6V%9fA!8?S#bWE!y<}$&)o*tFqEeBDCmxI=XayhK zjc;3%OFgsY=He%^OPqa56V%9fA!G0G{(VE0kL|5b+;Oq=itL-K42&aa1s^e*=OJY8x z#!Ha4tqBLqg%JPp%t}ed%G`cP8FvvPCho0w6H=2OVeUBWo7UKmyXbMlyve!w0=p@< z{JzG%X8Uz6g8Q2#0h!^p)n)V^U!jUU0|N&h*FLC`R*<>3+P2n>JCs-uK`W$|vTaSe zr$pkp!c79|FCsNEUdY@puJ*|!%w1LxK`X>W*|uKsZ(FAFkg8IqeNZFgHU1jpb{mY} zNBXZ@f&~#giDXhs;&i=j9mh@cf>qO5joRru7$ z8k8CtukpvP{Qov)dVvJnl1VLzUZ*Pi*%GBj#tWHkTFrQ!D43uXVxnwY4L5g-{i38s z#*1+7*|)9s&tByssF4I@hTB%wv01+PCu?L+*f}%+4SF)1+JG$2f-u~+S|4%`rXP*jyphgmqd0Iv7L#|!VR}euf#6;P)zP!SxSQ3g+EM zwZt;dg=--a50=z9V9tx=g;wX zceUjq`;N2fd5%ALr$!Pc=gGORetBBe`J{io+O+J}#{-ilUKK~6Z-*6P;y0w!%1^sT zo2{(SGf-8Pu++$SA)}|>P~({LF>B_*&Bsp;3j8^9Q5-=l#Kf;$sj<&>?V9b~KQxe} z+LqMFcp*E#!O{=kp@IZ+V4mS4^v_d##wK977h2XMlZV6;vM(H|bg_35u$x^qi z$4*`JJ+Zgs9hal`O|Zq8cufm8sbXvYB_T1ED*kHm>-7H4T{+*voP(f75|CMLx^(Yg z>3RuTA>37iTUdiqBjbh4`q!%4DzQ?_<5 zK`Xoo^Y>_5X;;nM#NWBp$asxE(<|xU`R)9jdkOyL5zgPE{+(~+?_6qRyhttkF5CM0 z*-v7>b4f@mggb=lMK`e*mGMHx_>;3n4srO*#C7aNWxQU3{i7OF&))n#dr=V7cnP*? z+v)Rmmuwo z8L$J5JpkgOj2dJwVe-M}IS8rAj{t33C3f%hAw?i$S$RvrsSgqz^J%>xa$)=BC=bi?+)KfEFL*geKh)YE!X_DSc~i?S!oQnt*QMkYNA?^M?z8~ ziAIBmY67%F-1lA4B&m_a@9*BIi9h{i6;c27)l-nFT0cDzmKsUiSG$f*p&?L4EGtP! ztGDJf2p}H=zp0>nNJ45PG45D;#363(T63!M5qWWY`7p|=&ZUyNtWXM+kwVDK`D>`b zNVOsdd{QF`lt?_W>iW7NNk}WC5QNEj>E)klAM=Jz@=J{*P$Ka}(=s!XBq6Pka}chw zilEG-MiMAPhxo7cET38(MalxQmB^gS_GVl6eKtW6Uu!~Yyaarph1%A#3RhCgpV;ny z?XM>yvOZ+F$(Rh$ztw3U3ASoUKt{N!!3!^~OfC0xV_(^o&qt(260!_!3&f!vCE^Ha z1sOg}4UVsIbL!c5W+vWHYiLAjBq2-Pwm>ZL{TcKU(h9QEcH7@JCy3ke&>E#i67Cvo ze_Q!b&1;&M^b*oa{%&-tvIixN)cKGaNyrvsh+BLAr1K#OX$9FSD^p8x)!Vvdo^=0fhgppp)s`B0@C+bYwtSL&gaAKuX_{k4G9 zNaBOeWi(-b52C~Io~iGg-gC!mV_yqMjU;X=eC_tUnPqclbgit2oXee4zx;Mb(uIcy zy9j9o8NWe8jBHUeb;k>1Qjh-hLNp&H?e~BBit0bg=DvHaog-BdzpZ+B zXh3QtasHzY5v1zXTWyu9C8Ov0_WW?PdGTk42c$+42WKU!-pBY;r&3Df4Gv4B%nD(BhExztGFRL4?jNEK>R*GD$%LlV+z*R+ee7d17g>O7lu zE;W+)yG#+~qhzjWC8}Lz54=1}w=1cUgpo@|Q>E%W`?HK(UFXsVQpkFisdYU$(`9aD zeMo=UC!>3wK)II}CP|GXWUI8TKkggl+gI#YJtC7Sk*&|2DiGC2{8k!I9I26nY;QUr z>FC|?RFh?5Qew(SmZ92#ebrI@to`cFKDDJr67sj{5FZx55R!znlBKTZWaj^&eViy< zI7w;C}7Av7WY literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_7_gripper.stl b/act/assets/vx300s_7_gripper.stl new file mode 100644 index 0000000000000000000000000000000000000000..043db9caeaa59dcb0a6bf570956b0904eb60ba63 GIT binary patch literal 450084 zcmb@v3%plT{{O#7C{%`AGldROqB=*XID40*8IfDAM~qyGRBpLVv~#H#Gj5SfXx!yG zzC))td!G|#P%dGj(jXDJIpHJsM^M5@4J$~l9e&_vuuJ^vI%X_W8 zKl|m}|G)pb>}>TVv6Parx1QVR+>FMR6Ax*;^Y>S1*||!T`{VB@2ZUpb(CP)6YYGU& zD7=>aqgKnCO&i%f;kYg)-X7jx>0#ep+8D%eY}x*Q=V3k+9d&E*mOw#(vi~ zPXtHBMDLB~_rtY%`Tl7oN5z)IkH1R2{y1=Y^Dz?k`gQl#N<7l;uI7p0sF;xJCPy=) z5m!36RtbB(fBGxk{C%F$SVkn$K(v*}*=k+5K62f(jH@f>*ASd}L9myclX~II%Ltdp zaC{_kgi7Gdn?EY{;uvK_5rZvENbAvaD%w?IXU<-HPU|8hj9jmhUL^I_$$#tUQ4+(+ ze@3tumrMF!CY1B%QK$FT_pUna!M@-4GW@iDCD_aNU>~E50Az$7dpeFV&o&7 zKPqm0ArW6c7^9nex9|2D&uS(8L$3mQfpg6fe0_Y`Dzx~mUjB;O9+D`T(nt6M_2 zCc;~o@MRm@-<(*XF&;R0K*nC>_rHX2O@y~F;mb3$cR6?HxV9Rj{YFDG_9}0cC4_4t zyoCv0o}r_JbAPyCRfKER?SSDKdzJT(62dhR-ok`0&&Wrg>wDL0jN$K(&e*HG2bU18 ziSQOCeAzO9d5u#v##c|CpRrf@xKcv6Cc;~o@Z}jgRysHT)C!Gp!9C+M_Tv7rPQo=2 zzAh$wd4~38=N6wltOoa^*Nh1ndzJU#62dhR-ok`0V^+g|5~E&XD8XJ5uOS>)nkT%4 z317COl53SB2F{$lte!}ZS;{bQDb0)V=oyWH3sIv|3>(_nDAvAMP+Q)7#OM9OU8MPfjQ&9 z5xy=aeA#9LnQ=4*<|y{^StjRj%@f|jgfFAtV0E|AfKwyfkK>YyvQI8v6Zh=(S>ycq z+ce&G>$d$+?$vASM*Q8mANB7Ak$_+?l;KrQpnm@^DItz6Vlb_~iTtnp2ZFth?RVR) z>&-=s|2LxPNI(N(|YtT?~ms3b;{7oQViH(e`b@=PDmo34*La$GY{XlO4%Zlub!;awaw0K; zy-Bo2w$Wpln3H6^Ck!?dESGF>)d?g1u0NS4fQh@Cc2u`k}TlTTnJ) z<@G}}#`ZN^ON^XIj9@R6;S~~_?=V1PZ2e3{%odc5c=P?i8e`bV?IcD{Bu20o%J2$_ zR-5gmG2UO?CuR%EMr{7>?=(j3{yRvFoJfpdFO=aG60<(vPGh9=2ghtd*@(YgGf-n( z@nT1bkrRm#?1eJCLgI)uEi{I!t&Q1&vJv;L?XNKgeX*m&$ce-V_Cgt6Au(;uLd_GG zA2l*&3(7{^zIUWC_I_k%iIEeD5$uIByh7soKi;D;PQLY=m@Oz9@#(<68l%0dkQh0U z7{OjB!z(0yyW{a1;vVU@w$$twKVsZiMr|)nyCHMr=K&LSy)QUP`bR%J2#aX(bv1t%NNo z8*%DOJ8BHyc1sENLK$8mA+23wptZ9FWh0Jn@mr1I`(P=-UMRyWB&65Ide5bwvjt@% zhMc>d#_(fFDZySS!z(0Y^wAg?eb|Ds5vz9IT4VSzv6Nshl;IT;GKy*pjG}Bo*@!(l zZmBW+*j`Go7s~Jo37HKv24(}cplrleV>Z_qevT?7*b8NNg@nv%8UwQ$TTnKlVVkxZ z!_T#)1bd+juaJ=0Sz};!W(&$jblYV!jp1{KQi8ouhF3^PmZ33_Wv~ThBleoUiN^4` zPbtA(D8nlxBwNxL$d=fGvJqn@wAL6thbtx63uSnPgk*gh16dziP&T6Po2@j4&qYfK z_Cgt6AtBkT#z6MU7L<*+?N%Q{`l^iWr38DS46k((Zg4iNo-HUFVX+54`9nif3?tYJ zWq5@|e&$ZDRmv7S4-j?@FS}%w#Gq@Xgw<dV) zKYwt>7A9=&`p=jKiIEeD5r~0$ZB~O#A@4rh7%Hfni4Ag5`AG|{1q0yRIx;H0Wnz4lm%TZe$ z`hOB5ClVtN1NB<=3a^lG#lIwD3lo-W`xrTpNCK+_V!&cqxRsGpdqR#%*9JH$u8Rp< zGx%$j6M7z{UUp6)Ay+rWk>P%@#nvCF*VaD%p63Lza3!o>TNS}8B&3yS474k@Fkx#r z-*$5XYc(Yh1NGYK4qhQ4tzBcFwX=l@TZ{TWm=oGRO1%&dULhg9Hq*N({hTeft_5Lh zYCo3b1lG<VovCIRqBO!@CpeTMKuOSQMTAV0|?vW z@MC*U=(t&fwkZeg~AirXZ?I)sM+spJhTuxxmQVFZqb~)h{5|Z_44CIMy zVZ!!geJ+|4*n3q1F;K7V*1{_!Y!4aPD_fZ0z2&AoWFrs*_42NBNJt+~s62J zqicY(uU{JU)XEEL^zVZPzuy4ACW3n{_pas%wlMMHwy}?K%7-N}*o%8t^8{O%XmxEr zjj{8aAD6^nFYaB<6Kr8(+8cg9@!CJHEQ!Hh+`F15*uunv@9(GQaom(IOJcAW_pas% zwlLA|=>Zz!`dQzU#9%M(UCk40Vd9WO_tzK?w82_U{w3BD*^7Hu^8{O%xcN~Z*aGR1C>}BI4_7j^W99x*MG5`3*ZA)UXmyPq-7j2esY+=IYu0g#wFNwikHeX?X zw^^c?2T>Ojd!~JKyqf%n-;~5)FUyv(Pu(ox*uuohCwps*I}XP)NWSOnWm!M=+nXgE zTbQ`&x+;zFbn7ikVz8I3Uhv$YS;Db}iE+bvX^aOamBwH%TZQ5IN3(=u3lqQV)I($Z z@_cCw_OjI^p4~J{IJPiR_g?ofMv{-B347V97%|ESxru(}9(~=!0cM5rZvEynIAwjZusxMGW?`-7kzKWkeB!Elj*JWhaeMjEO}I z_Oe|(jEQAL5rZvERDaw_V-#b15re&KHxgre8BxSw3ll$_)lp*uJvNEEG!4@WLtQ=IjmyxiSjgQzJEF+2- zY+=I2{IWfnguQH>$1Z0XQN&;i6E=5YKS-RV16m(sFPpEhzgtEC!kRN-3lla6e;X}n z5I>5+UN-My*SU-Uq%qjSgyj-19Jit3CoG5u6(-zA(~>a zm*qj&-!CHoX$-b7VY%JkzFF9S5KS@I%knuq%_t)PX$-b7VL58c^Gjo}m*u5+c2hr zf-Owg8m_GEI`*=48`^I31Y4M}wP;x%bnIp8PxQg&3AQj{tL?I}#IcvHqcN705olK# z{-^uG-u<21X6~r{w`jcn=_)Ndw`JqCU8;++_~}3Y{`u`Y?xL0c9IGM;j2y_}~6kEdyt?ZoK#I z9kjgqgiRW+KB1$QH@!69DN5{GvDa_vJUx2#rsaeL@>iGZVj_OMlg5~|cfM0p#9*(B zdV4RIwP`FPiWqER;-IlRX^iWh&v%N780_`KM?0$54ac`@EF+2-Y++)_-kp`ca_4-f zsEEN{SEoCx*U*q5kjJA47ZL8iT#gxylm@Iu^@ zn{(g#wZR{fod3aYfN*skTbST+41Prn_t_VI#A!41!-lD?eEEWpmNndWp&t`}NFFWk=xLv}684%t$)EZ3J3eh_mT+ugVpWBwzZ*qQ`!oi79X#6K z!H4I3(a|Fs*XL1^7!3-`SW+*Y6yw*|JqNzFsBY1wx53cUbxW{XzgW$VE(As!bG>vJzd^w*=x@S2k4QY*On1Q47Mc4n_XpYLN}b}l1| z7;Ir;;6XkHG6Qkef4(KQd(7w5E00>#(C<^9Q?EH^VZ%4q`0~x4(7IMf2KH+AocH?E zuyqg;#mC^fnCMsAORw&aU(=dXW3bnM{^-5#9`>(>GNOpV7ACmQ`_s;kDq*jxU;JqB z)0($}Q9_9#2G_+z^J58T_l+}rc8~Q(S*D-ZQL)$1{rl?mL7rGf6i3AtCMsUqN9po> zm%T3Nzpr{B-z_7G7;ItU@7L8RU7okISF7zJ^+MiWMiepF!o=J?W2MX2AM7>xMel|6 zM;TGXU<(sG=8Lo347(rf_2nQx17IChMgYI-EP)V349D%~ihud}3iA@rT>+7> z*Sz&SfwtQ$;n>1N!@b?~>X!FG_S)zQ?}a|tEaBL~M4NN{O1KzHaPoSsj+^Y~TI6b@ zzxQ_#Ib8eS_%d?4@XXn3=zZSnxZdj^iWpoM6R(f;Z8wYP?KT~K*lUAty;q+N-fJi$ ziWqERV$^-U5B8fvJE`{3oh-fbu&iWqERqFWwTn6 zx2uG`CT-yfWR_(_5rZvET=#^}mdZ0^_PXP2KfgwjA(s(F47M<_`C^~-m1p|w^?x_` zBSEHLMiepF!o&gd{Mc2#VqvdSF7ii$6-yaW#9#{(+?&OjS_yl7-^CvZR$gU95rZvE zaR1jB1;HcESNr-A2YE@^=#yA0VXua*Jb~P&S;Db}iB*UB{X}F@dQ?RW_G-P6_d@?@ zmT+ugqG6EVzbx~-Bv zN|%qj?6r1N?}c%dujQK_`L?s8J#=)km)ta(5kr2{k`IU9J7adZM~;t zNFDana`cCGjbHA%NB>F3ZZj9V+|G4%w?sR)u8OxG*f~B+S{z~=bxE7X7GLhB<=*?Y zZR8zj=UzIpL)4?XGJgEJ9zhJQ_qFL=l|Jzm?7e>W;3m;S7gWVlck2C-9|)*%6YVz)HUY1nBa4gukW3HQgr1} zRdL@Zy2k9a-JMlZKGi8^FW#*Ui7W4(6V-2A71wUFeQ*bvSUGPOy&pf`u$kPC{Rh=Y zhyJrN9y4vbn7w${IV9@ZFNqG^s48BuZM&E)OtctYp)uZ_faeC^PgokA_F`pRGrn!i zUc4_J603grF`=ch5qk}bKQ8YTv)6|w_UM1o$v#F%UCB`lHbjm)RUm5Sa@PUZEc>gjae)CPo?40%FT1|T>Vha;vHuEuj z9~`n^yKIph)p3tJ6R{VsutVYxS^I3{4pnjQ&F4jIVdAhhHF_R?EIIb__Su^<`gGoQ ze#Bn9q7R9RJz8fI)5`cy&#a8t!bHnY_R$!AO#Jq_*4gr3E92|_^hv~Cyyp-Sw;lhV z^y+6TU-wz$z`z`z_JyfplqHETV*^Bp1LgLtSmZbO2sf^z`s$I+$ zCV1p??&s>I=}YTZ#Xlb0HfAs00}6?K2Gys>$gFmn%xY|5g2zhvHseLzv(LY;jNf~{ ze`+mp`J~kiyb7~k@YOdzHaNHUF+H;L)>g(n7Hpof7q1ON;*vvqX73zb6+iXfr}b=M z;^l*VPpdd@t;G1_#=Wvv%w~W;uUKN@l$1X*u2Vk^-mq@*}}wWr}-E%>`0H= z?D@*9i=6r5S9hytFWwyviN0$qv+HE8eQj+A`37B6T})W)FJ4$rVtl!%GMn&2Wqi}> zHZ|$%hG zVdL0EyL>J&I(^?d+eq$Uhi;#w?8WQzkXUbR`)t5lmGLLT=cR06!p7rYx58Id{&8FT zY>1rsE1l-2?8Q3>A#v6Aow76DsEnU^?xB<|OxV0Ks4t##em$pS_S9FE@pq5TOxcTf zLPFxV$92hOOC~dK*GVZ`nBY;#xyygqHEZ`xW!ycQoU#}1=!8U@GrDL0k!!W#wEcq- zjtL&eWUl?ktojY*ejIjtuZ-80{A9y&BCJK78-Doq>5d;%#!J81D`PKy-VzcUHg2DO z^RcX>KHe)>*D_(b-GEcEle&E7fONdfoX7pXd&XY8zZViO>^&lFFZ0LZKX=X8!i43h zH%$IOVm#65^mMZH^Iqe-X6(iLi6L=C#VzT_($5Et>722J2_7-ztN4#kOJDe;GCpN| z=Zw90$1^0lA37%;zO*vFjPr~mH%P%_tDsxr&^uv5VM`<8Ly?+PSFuopi= z2#MZrA5nj_WJ~A%xiVu56Wn^7lY1W7J-78a?k(?^*^Wigt|Dt!?8SR>A<^!!={1LZ zB)xXa%8V^c@D*`x&pl_=ymn1heDp@WGWO!<6d_Ud_7OG5%9)=dXU-NT_!>GV<8EZ* zuB|ySzVg10t#^yDJ+iT#y?CD~BxL@GZ2n*i6Si(I=BUW#DE8w0vXB_uFeh3h{bSo6 z+h=TH!geMwKRUPakU7yQ(srMaw##0;?-vsHYMJ{E?8W<+A#w20BcfZR5B_pr*NiPpa0_+ro0g|XhsiykGO}yNUc3(*5;9&z zHeRuX32xJxi^i6V&Rx?{vu)&@{6@+j#&y!aBS&>k#`f68cJ|^oU_zopb(eVf>dN?U zn@&pE!o)p0_?-zqtDU$_mw1@)m$HQk+j;R>#OkEgs-8ISEdKV>g| zvnnM1wPEY{L7C4x{&8i>7ACki%NH-6Zymo_EhEFC6)Ai1o^D9&cfkhntCC;27r#&0 z!UXq!=S~>gKCY5AXT{I+BHmBpH`?rq_;u~%*SCv5l(zfN$DWDUi{FO}i6vimj4zX; z>Tt_L5nGtBJ6o*JV_ToI7r(I=60!#n+a3U0n6MUF>@&o+&%j>%=3z+4UPf$t8Ej#~ zdYWHN$l4&bwE=ta8msRn&8B&-ize^^NcQVGVon zo1h_aw(LGmSW_9#SkR${Elk{YtB)abxvYv7%liM~1(oq%wrE?!Ui?ODNPH%%#|>mZ z@%lG9$kz~?>SDrT`&DYPqB6ckGUQ#p+O3|w_|4pqcg4=>~-8!5Y z_niLO?3>%JQNp~Ei#_pr<5${ZutV=>&O4``iZgcuCoc=yRVjg*JTb_BdDv|v-#p{o z2W5C6r{cWJxtvIhU@w%B>4n5C`Bxnm%2(3Zg0d0Z#+*Cx>UQyn8%NE#_P`^QFt1jR zd*X)mK57VwXJ6P}&%Dj9gEO|25~#`NyO(v+?^C;XG-cNt=UjK|n2fzZBL;HZkU$KyE5u++DFNDN;c~6~ z7yjhxVkKOhhdfb68CN%4pFeYRJ0(Vpy+qgN=GE>eywTWm?wMzm5^Lp)i#I%b_Ur}n z#YL`*30@~UxBluU>*X|(!&^?sO1%&RE7OpGxBo8*)|~J%f>&hXTSBlGm(gqeO85%; z0w&jmvJu=uojd8fCu_RbK0o^iIS=;wswLi_^VcdQDFNEAg!dYISK6b; zXLBx+mRNcRaqRH=$Ln}&*0)OM&X#+=U#oxieL?Oy-w)mi35gr8nw>uL*3olrl9tF8 zCa}kW7=Gm?JqoQPL60i+!coC1Bu3=lnp`Dqmo2<^g53|q@NG9I5+kf$-XjT#EAwwn zE<3JPcWbRKCa}Ybzx$PNP9#Pk2BPymY)G7Zb-N6WYpRTiY+>SuLwyX4kIu=Mn9c8a zM;*pQwlIM`Ssaxg6J?}Ej07WfsTYnAULheFfF2cc2Db2CFZN^+!|(6rL}G;1%R9Fr zAvuG_K+eDxCayUEZyx&mK{J4Cmrd@N{itLBY+(YsxFGymO|lFgB!OkhrtMiF)R(+pN(YVXKRYG_uS zMz9ykSfzx-C%O92Cv%1cPi5d`1V0gSE+-Nr*b8N8*P>n2^bDzq7&B*BJzG#V!j8(X zKP-k3?1eJCLgKdk%%6C;B4vx62MBvsV;}81Usfz^VS-zXyyrDu~T42>sRy?LkZMnUMRyWBzouHV!crG z4cKBw1;VbnU(t7#Ghgp7qh|Nt@meLUUb_nE=PgQzN9BG<`*f$@F*4{;r37m7G1hGA z$K5YPX~V`VJd3w+7w<#acq&N!Mz6KV;n180KXoYMb|4mk`5b6#ZrL zv9s&vR0K!GgpH5>?Sxwv4b&J1p0`t6>VbH086|w{EsZ39Hve)NStCsDz+#H)y-1 z1ZwgzmgJetTcSi`*$dv9vaAMW%Q)~hrE@uv7=gOX3uSnP1Z6V88(Ws|fUwNKzt=~Z zj1pEayikT$NaR_DlPn{6XMzcC$Ig8jyJ-X&h^EIau z=4BcAjYs-6hJ3}pr8iCTc3j=tZXFY}#8Lt^dE$w&OKVQK`|R2C69X z-fLXyg(HMlNJ!3r`{9sfumxo!_|7_)6NwS*g);7cNXSU7F)&iI1!W_+EjYJKc2akJ z^^)0JiE@_hm|}*sT~5rkw!`W(fP-Zx7451^+ci^>u2uEsh5O9+-?_c|KXiVjUN`5Wj&by^5FKA!o!VH7JwzL8vELpN->;rp z)A`KWc-l4V>rN_;9ebfR_hFwpBu0KXHI2vC#<$m_IY%3Z5!Y!3ie zl-Hagv0ukHT`WZB7gtBT=HRR^Bv!sZHL8&_Kjwi&5zolsc+O}3?6|qn!E)xWRy?V{ zmBE=~UJA!SCf+>e=jhDgwei5oTLhzz-E)kI;Zgmq-}*7G)z%AVN`~1~m$evN1#Y3v z$<^AZzWlXE}pvpTw1&V25u0Xpu2z|3hQs-H8OVmtTmOBY8M3DLG< zs{XDB1ZFax2g4ZuTrn^D;1p>kqaTR)uJNoE5@WA^D#HD^@BS>}S)a3vkXUuwjA-`p zwefD}Er_@+F&z@0Z!<;DqpIh!i1RjXl_4RmJ+ju$78@Cmx7&!}*En5kJ4b&%u~y!! z`6=R@nhCDSxoe*oruXBTn^r|!7aw6rZ2#@lnoctMJR-k>V2h1DSVwV2?%dO7RoAp1 zQ5&~Dt%a_Na0jujVS;ONZtmJ;HE1QDt#F#@gTQ)#&nYC1JG;6bFMkVYJnHQ{c=gNiLPG9fYIl%FHKs#CM!3{Q zIJVf>jx~;r`F@QfZ8x>H%lmLla807A_r$Oi{o`I~CA<&AV{k}x-F8ZfGv8ENJ4a_a zBxKx`_u%}v%NCn)Y_AVj-LEF3pQqN(dGC!0uE{y+=XxIK=e+C4S0N-8EdMCoSVp*O zj-D8fiTN`RWcs`(r=w_UqbTnJ^R8ML$MsyBOKSo=M)k$iYkFoly`wSUP#Cs zmD(J|^DEOKA#;?@=a{2xK1WW)Gj&K<27oz=XMLta0vUj1OKh&y#lRv8xvfUbKlAxM32fw4bUeIAW)ax51dm-WYS8;3(=)w zs{Z~11fB_6i}6qR_&i2S|5z^b;H1$Hq}C^pw^_UL&mEb#PWs0s5@Y=RS!#U(Ph739 z_~)lg%$Bj_8JPzUJa0j2;|H=yro;2tMMeh1Xwh?7YNLTYcg8vSr_&rmGUU`UWUkA` zM`Xx068UHJyUHl~w9JER{`pgCBPZUCVS;ONZpP!o^r-4?T9tBLe1st(S$Mr=;cT(_ z!`>FbIr;Y)TAWc`zm3dMZBA>UZ*stb_b-^>nnY#u=VkS{=kKj>!5b-jP9cFg3dB1< zw+PUxPRLQ!*iqSxtXG%bTMCa#?nh+z!*UVa zHKs#CRxFXNSlD8@7TTC)PQJxRON^`~S{8=3%LLaX3dEB8;5_~+_uTRwv_#8;d|Tpc zwL)?@5PL}5<>*X@F=QPT**c0XmY3SwhG+}EcS)~}tk+t`ik`{@*CcOKN%m^J)-qPq z#aAIDV#(VvtKE3a#BhY8_xdcq_T!3-iII(owg$j>WtqNz_RrU9!wHKur+%^X)ZpDg zKBw@gWK2{7W1_7GFb;COkdWCxS6-M6Y>k6CgXxfv*+AD(mbgc-nAMShmy^RSf{ z*0sFyVmge0J}6s#8U7c_a!`{aVah{Oo? zvKU^2zJw@ZaC9d4XmLdN=Z^|XW;iOS&pBzzFIF7pz!r~#DG#0 z*AyNVd*NH|AO?x$%qhf-uNO)Ax1wg&f*TVPvty~r(u{+3K_!>V5C}))s>pUtZ z_=0MO%V~2Fhq-%^wweK@kJQ z|9%IV;42ay71UKwFZvx+P*6sHZ2qX&3z{Y%{`c0-1YeQxsGyI7dND>p#|CALUCkdA zdqI^3#Q#1rFu_+OI4V^JLcLHNf)WwR;_?4-RO|&^ArSxjD9Qw1k?^SO*rDxXSHUYZ zmyFa&-Un3~2uA`htJmu-mk<)jkJNnKnXn_o6$y{Z?lV+$ti^bZ?UES9QL&fZ1Fx7} zLag(snBXfC9+mY8Xk}RM@+#>iF^Z#NFKe$}^Sp#u=TR}iS0p?t8$Y0SVj~e$N%*QO zk=Pw%FYDW0wf=uODkk`f1Tj=A1+4@}WurDUUz$HE_OfwsorDA`Z&yt46$y{Z<|W)g zo8i`Z=80V^_Odx+omY3AN5zEAKDZ*`QP~`h)^6FrI$KF`RP1H*>pENFI**D8z9QjK zS-yjj!7`_H_Nd~h*voQ}b@tkI9u*UOMZ%-9+z?}iWtr<7eTt)EFU#B3Il`^;sF>g@ z;@rLR*8TS_{;%#td56U`c%YGKnm^Fw^g1?M$#XS);qQ(ek5A_x@JjWI@*MC5E9_3xgU(b$NABQz@eA&+Y)Q@WF zkNo1IK2v5-h`BC~fsp|*UYU;=@-I2=xW{Mz`s;+4y-a<|tHUxOF={LZ*Tux!zrk-o z>^$UJ)zrK+V>Yf8dzp$B?ng-6;o3ycd>Qw_wPFhspY4XPDck*+zT@#x_lwV%jeE{s zrUr&q5)%J8WK=ZlylK_D$vtNa6XDgBmKc4v;z@0Z>}9HEXq6$c`v><&Rr}mky=TjF zVzw~Bchh7%jR^InvFH>iOa%)H&ywBq3i$%9o&z?FWXeCUr3XZ%vAiujC z_1GUrlM7qf*4R`ZeXe#rY*HGAJ!celKM#a^bC z2j$j~_-daH@qcDtQ(ZOxz?dyeuqu&r@5viN15W>Q-NsA$#O!6Ne^71>iP|1L;*+XSU`-o&uTOqiJni3S)cslB#bGZ~#e{NeNPK(X znD`%EPpGcy@Jz%OCRm%tx!vR~q;_>D)Gd*>kl4%AJ)zth67T+ee*Bwv538OwV^YKx zCRk(0xd-L_lGZOAUU%%wiio{Tr4-7oA#vqvh zK;5Iy4y$1=R$C2;-j7X)=MC&r{rl?=s$mNgtoh{J3W>4y)V_5GKRc|Ry(}J-TSMaf z>nFqm?%JdJ%=x41*}?>CSNUu8*Wd4fYn8H>T{kGVhQtFGjgL>fRS`{dLL~ zCRq8%k0n!X{b81jB^i6!xB}(YkeEE_i1@Q(m-fE3XS<9oOt6BJ9}`!PSvpI`#EiXc ze1vjqNGz}4FFxqf=X!s*`z{$OX}fsmO|I*GP}_qtwlKk8 z?3Ul9%3NFb_;s_slwW7Em(9CSjSPt?kF}B~J6hupi>uaM9lV|Mx<)+~JN?W)%&UYfCm3DyPCoZ8*^w>O`{+Lpa6 zzebGa3AQl7x;cLRG5Gd{RX7j!vb6}VRY=^l)4*sF#EZh&aFu^)T+OE=WAGGX^cEw({(nH$~329epr{Vj|Lc3xM z6ReJ*{UaSPci-OVAMC|yt05u%BOO2MtXb$EY+-^`KD3`_Uk^C1H~Kkyu?}lUNI%cw z@l$7^pR{WR|jjGmTObiJbud?^LK0XWM6*+ty(|XA2XoJ)(I@_TtF4)yPZOtKHKB zGzM~?kdVA2+whzn>X4VPg$dSB(fle)-rb=Z`4xK|a_IgV136qsNPd-_dR*5!hMG$bTX%&LaGbCg^m~HfNQit^*TbN))M&Acp_L_)3 z7_pc2N35MgLe{m}+Tl~`u&!kb6Ra%h$C5XWoKlVTIeXc-f<1tcko9?X$DnKLus&xC z6RdFN$Hc8WU002L2KKV?5qlXS@p=3Gvq76ps~aZ!3~XV7mD*&lOvd(yE2dSyCBLX* zFB|8vCleAo_vn!wv+tdC7s-AJTbN+QIp-dbIjVL2oz*YPz7KoZe1*NGkdS?!?CaO= ztHZtzTbN+wJl(&_9&USoHTJLA%jR9|k%ffpUuE+?dZZ5fS8QQ|6##V~E6=)}B}lwy?$w*HjQfC4mwRE8nb> z5H!hmQvFet6Sfx)SN~rUDC)5O1MkU-FA?m;>JjTC00#TJOz^!4W3U(NTa*!sAqHE@ z3A*PA4haN%u}(+x_na+Eu#$)#kk*e9UlTq;-c=8XguQrwzl;zp!Ee11eqzE;Hn=8@ zQ4s9K&ncQETxe-}=D|m3G4Ox&el*UXf0jOXdi9@&24&H#$)86xG1> z60?N~oQKgGLypL|ME2ryDkBs_47M;KeY5f6`%BIoVPd`}GWs+=Hs5PJ3-%qmiLggjJJyAOX=EV@C@$a~0I#xI8W8mfj{DjIHCuHmL$`0c#BzpG{ZCW`xtYPhAM;g;nZ zZbb~PSKdk1zxyvCh=yA#8g5yx;a0RbzULt!8g8j*xMjJ9ThW%dC5D7(xTT`umgO35 zMRX?koK(Xt6%Ds6*KjM^F1P%U5Dm9fG~BXW!>zbJOoZ1;G~80raLaNHw_?N*bv7+S zds*Il)|&ix4Mf8&6%Ds6*KjMmL~BjU&|VG+(Qr#e!!64-+={xGkoTS99P%+|mA-!6%UTPhlES+3z$)GJzRT88#=NQj18CK_&8uHjbH z#e}?Dt$)YwBUQsK6Aia4*KjN9l`m*(8QRMsAsTL(Xt-s$hFeh=6Y^fT{@s7qKs4MU z(QwOh4L7ymPKn}>#=t$7-%#k^eJc?Sw@5VHvRuQh@Di;xEu&q9glM=$qT!b18g4~h zOvrCG^zZ(6kfPxhiH2L2Yq%Bl@?0AdqTv>ahFg|vxD|CVA-}rNzx$D4@&TiwsxK?E zGY(V@H;sWePk0;*iB@lJA8os!GTXyzxD|CVA-@dNzhmT+U#0BWG1@`e?sHzltr%Z< z+zkoQaH|mww=CCiE9zpx#$b%7s^L~68g5yx;a1E5JTrs@rUKD$%W@4jje)wD;89yO z-1Zd>w=CCiD`qmD;X*<*-0DTcEz33BiWp4rsI3}q`0ja@Yq%+a-%*RIk=~o=#SQUY zHT}E)m9c2J;oIt2uHjaAiPoByxA<=35+eV8dzNds6?HKozYNpA`!8mRh8w;SpXC~E zMZKc6rsWn0DXt-s$hFep;!+ea@Cv1Y> zC=wwWZn0>%Wx0l1VG(sUr9sHA#PsieEIIb__OWQVWx0l1;U!vYTEFWcYq%+a-^z%pk;Z^RxBPNa|L%X&l7AyU%Qf5z zFVR}lGE~4rLNwfB(QwOh4Y#5$CgfL?`gi{;O3`qOMZ+!2HQb7NMQcsV3p%gC?=G2; zukVY7Tb66M6&5Dsx1{=a8Fr)=(Qu1J!!64-+=_ZdYfZ~f&i}O`Bt*k477e#7*KjN9 zV#1Efe~m;m++xvi%W@63qF&Kj)AG%I*T>gFwI)x9hFdHeZdtD3R#=#@tBX-dHQZv+ zaLaNHx1wHAWYcoDDI4Iowpx=Xa-D`O*KjK=Oz>!@8g7wjxMjJ9n-chinIYg&eebV%eHJXx;crZG?#6V{{rH-1IK4LUzruHjbHD_Uz> zhK6)VmuHmLJP!|(6GWcK7iH2J&8g5yx;a1cuT5DRq;G<>uRjbzI3DIziMZ+!2 zHQWjd6E?#6U(Sk#TPzxGS+3z$)GJzRT81inNQj18EE;ZEuHmLJP!|(6Qv2WCiH2J& z8g5yx;a1cuT5DQ{x_n58hFdHeZdtD3rZG?#6E@@cUoVS>TPzxGS+3z$)GJzRT7G!W z7x=xi*5nD%aEnF5Ez33B3JVi9*ZN;Fi-ub)8g5yx;a1cuifmer+UReewI)yGIzL&i z;Z|6f;L%Ps+-gL_Ez33Bv|g+&MV(E{cMtm)zWA;+`CsIUh8wiDvRuQh@M`y*--m(b zeMsbbURkc;R@B9WWqtTftZKMH6Di9z+%yL26|FTbuQ_L7Lr92*TO=B8S+3!xF;Ev1 zmc9DlsEUSLBpPm6uHjbHD_Uz>etgl}_=U9AB#1KWcEZ1;T0y_nwET%C& zfA=kX9l!W{NU)bEvNc19hFgtjxMjJ9TTvGi;m$;^+o$*3R_?eTqGY8pa5v?PCt6d{ zuAmL5?TWobYfTBX#E=jTw|dcV%W@63qAn&ZL&g3iPoCNz^oP$qT!Z`hFg|vxaparE+%Ye!nbzOa7#tQEz33B zih4zBP0MKQAt4%Wsc5)mxrUp@KwV7Oo{aBl4t-7?+u2LB)|9|(5E7!{mWhU2mTR~bbul5o6W71{IZ8C#vRxKd zX1Ru2QLkvNX&JLxNQj18CK_&8uHjbH#f0sC_}N)B+%nN{%W@63qF&Kj(=uk~kPr>G zOf=lGT*FOcpe`nCPsV2%qT!Z_hFg|vxE1w^)|!@)WrT!ixMiZ@mgO358UuAP!M)i* z!z~jHw=CCiE9wTFuZ zn$xb8U)PF;TP7NAS+3z$c!}1Uma%pY3DI!NM8hr1HQe;5P!|(+&x`drG}d)}&R(Lm zrZKPw5E8kLyY2z7g$ZlB#XbYH;&q>ay+mtGV_;_@Byv4^-OFGL6V}s;wE^^y=`2|Fsk zN)-*aOf=lGT*IxXSG3l&j8$q#h=yAx8g5yx;ifTA7ZY}M{c2k@+%nN{%W@63qTWA0 z?_*%K9TK_5x~|XJ!UT_%k^!7|l73_2`m4W*_z53B`I``JrcdIQ9D%*QoJfqYr+V;0 z8D1e#d4MV+xL0~NL~LPV_M@9V+4|@Cu0?&ls;UR{wMNh%HQ9`%Wv3G4H4iq?VjWjIes)g)+QC zV#r@6XpE@q*)?oo;_tWm7#;UOYtMV+4|@Cu2OztDT0?7!{g zlr2oWd7W=p73m6zQA$|7@Io10Au*+5j9#mg7C)1+g^6Sv-#^@1byWc*( zKw^{oCGUAAvU#ug?j+HR>ast%hcF-i%m7hWjCDez3@UAULi5^hPUyKSMp7-kr`W<7?ka( zG46l7PGXc2Rxi9zhF3@&`L$OvyW+}oGPW>r%aauvW4#TYmKddk)eA3_;T00A=c?XB zvizruGPW?W!3(=-j9I<$Tm4eP>V+4|@Cu0&w)nSd>Ro@yr5Rh8s9Cv(#;DYfC9Ga}aT)K)_@ml=i+@9J!rdU}!F4h5bKkw`TIEDy zgw+c#lyR*>V%O)qPQwRsKiI;=$bqe{n;oJfqYdf|mKyh0*9#A^>6CgT-bn7Ht>-)Rg#Cgwz9gw+c#l;IT;GK%VX zVBBR36HC4t?9W5T_MFg>y3|YKiC0L-Y@jhPf3O8*BbNVlh{o`9R8AyDuouek3JICj zGzR8DwxDdpdEfi@@BCbw6NwS*g)+QCLS|=;f%%**C>xQiI9$)e=L|Wa89=EQ2+bJ+ zLb42vfxLw4LfMEt#tzjOKKIFq#0d668D1eF*^qwgPn&6yLr z9xU|&0k4pdRjS6oDwQoL8^Q0{>be$$u4_xZK;XTckifVuK!*R3FKK8S9ksuHxkJk! zculSDM&KRc!b{%3FX*xuiN#pbuq!cIgG(x@_Q_eap9jfEEyR%2G_*| zk4j+-_LAR~X^eaRutCWP$1%7rCU|rWW3ZR}K2Bpi(`EgVk(y(0T}<#S5yoII`JJD} znETeR4P}I2d5@ymiwU0Xf*6jyiCc`nfE+#mO3S+RB{0>%Qd^hULhB87he?Qp51ZRU` z4EB=W^J9=V1)?lHXTrj5b3* zEXnja2G_*|uQI|I>?OYw*BDv!e#wf3V{lze@MdR7suebnBY}k z7=yj!cl#RS(ds27D@cyPbuq!K*DwZq$@d2|#>%4>m8^t02G_*|ufoF^>?Pk(&=@y= z@^;CJo?~!bOz>_(7=yj!dk`Aq$FCQb>`ZVBu8RrY6$xS_>?Pl|&=_BhTi8%W2!oV`7V;iz}VhA!4@X?=|C8Rz2y5&8Uu4w^8{O% z;HMm64EB=mRA~&%wapW3VS=C5gfZAlzPF_@G-n8&6?w+5Shy}G_^DGEgT3UtVHyLu zPxAy@nBb>pVGQkty#jcg$aJzAI4xW`Cg;Oz?!pJ!m))3UWJD-*h{`UsWGs2E+Yi<^Eq3X z;5`Rbip8D}v}K_&qHl8Goiuxs2k(xdEV`Vk$K;$S#p*86z{$%}wlEf(3DLgIi$12x8D=k1iSg$Yrm(HLG2bf+B#WMBTibDtLSO(*u^cgI3P z6ruIZU;Uz6#ug?}#(DVn^mml+m*A$qEZ?SL3ln&cRPUf$(I9Vfln^*`)WvU`hQzZk z_@~Hib{(9tg$dCW(imPlRg}iF*&S=EMQL0K#K0S|{2ppZoP2dVJ@cst9+9zy36yaj z{=HYx4c9Z@sJ1p^3ln%J6@-6_wUoe7p)P)NH6-3#vrwNqFO;vOv4sh|lZqJrE!I*3 zF%X^KTn!0P8rK-2G@h}A2~mmC7*L5)1^aZ4yrC!x_DUcIqVxNyAt4I(h~Y%RK4S|L zDC0c*d-|7k@=vPYk?-8Gg$cZ~4Z^?WT}t4nP#3@19TLyn+)mGY#Mn!q+*{PeglKT- z-@S_O<)fF@e|YQOecqHW{IM6m*&PzYzk3o-!`;x96QJi+)WrnKI1m3Oxi}~OzXY0L zg@p;cb>BRJGe=$gZhlCtzxqjiwtcu96R9oH={(?=eC` zu5Kjf;c#`?!UW1qcTjq53V?sOYm)bt zu5O1I3Ho`d7wScS42dh{YoR!*W%3n4wxDc8_)CkNNQ_`Fl+hnUVuXAx6fsswKW7Wd zMufi@$%(`W_Cgu`F(hu6uZ1E8dM#T}HX{7x%S~6!j{0r1bKhw)GO(BE_9=nUAS5pA zbbNIG(KpX|MS3k;nBe>GT<@`WMHLVHt?v~wGO!nD92I(SNJu}|^T5c!mQn(=9~nr& z9x9buFR~2$iV9xHGQwX}<$Cl9vJCbT1xY1x%}LEy{QK>jbsrvgdo=Dd?u}JSpbx^r zMEJ|FJ3E}HEwObQ$ug*3c;Tp!4TMCse3Kk^aKOyA8C#eLf8mxBi4j&Wyii6i5)zU# zXbj{GY+)k&rJm#^@y7A!KFCYhOEfMu26B;*=p^3}PBy=N_Dhm8u!RZk^Ue)_Zr}K} z_x{-TCCN+J3pCCH`9nxZmZ9f?yo4>K1ZY2&{C3{BxX-|ueK+3m+Ok$+cT;ptH6B#$ zoI6pzPyMUAe9i*NUb+47Z|y?j;UO2r5BJ@&?}$4_rfgvXzcR&9`8SvMm2a!dEl-BZ zcOXl>a8&RLiQD8$>1ZWSJY12o#eTgC0>73;4F3jpP9#QHz4n`0c!k6#@)dK$7&B*B zJzJQ-uYM6j+~oh}L}CPDpkDrM@Hz?i$(&&|Y+=G;`#0h(h7pK?=yp`_3W@3YwYvP_ ziij;t*fsQT_UA-m1Y)3GySnfSiAUw0<6515$H<5+Ojuj++Vq{JC02jYqfh@GuZ`Hk zgpFhVHx0e>R&t?yIkMCXM+mQwpfNFGi?uNjHuCx3X5>U7Yc(aTUTgXA3WJS^LC2a!i0@uP||j8n2ftE5AEEi zeoloFh=F=-e8e{yLPFMR8bj7P2A*malBp6zWW?>M9dZ@EZ6d1bjb6>MDj$9 zq4!*l#IiGZg#_hrFOjzdR zzoe7riHYQidQ?`g<#X@~iE$l$Mz%t9e%Qi5nEm@Q1$+Q)x;XScJKVpWv9ESgBAUN}N{g#^6i zKlzu$Y_Sy*2wTJX7;yFfdBW?h~JbunQpOMm81j9ps)_7!{gnJ4GL zUbbSz)eQ-`x~UwM!_{RA6DVUo_jfSg%W$&$5!;Rj`iJd=plrJozDLpShZ3mEyikT$ z39(jpKio6Yqu7G75w=U=d(v2xzk8J)8qpLB0 zu$S%0Va^DN?gl-8*7AL7+Yv_2V0*pDMQrERXBh|k z9pR))TgglE-DUGaz3?g_{Eo2eELjFyP&UH$WPO%#aK49Z1bd+juaJro*?w%=_cL&mQt;e`y@ zeo^UjYRQnbT_HneFVPTJ0$FiL?EUch*_+EZ?t^@nElk++1fL;S%p0dMkW-g>;i%vh z5|Z!gc_80q3(7`t>nWaQXog(s1p;j>B=V;jnjy0VWh1z~6*2UQiFsk4!Qx?G!;ThZ z=ep%*-cHuF>}6LWS7>U@6%w-M z)T@g%CtH}X)>Euf^$r?=>w|jXg)+QCLe_&C18YvUFcJQGIVW_LTIz)uGL$v0lh9Qv zTTnK_Vi&7aJrAr>OT9qADAOO7d~tPZb078)ZT9gy|4e+pdTM>=Gi$SH*Q_7xo?HIlcg&d>`Qg+k9$TB; ze&3?hvT*FTTGsEM-7qnG+}vo$=-O;v#gnP6Sg_Y)>mC1`i-{rgTf~1nwlKIfHpNbJ`!jus2i`Nh={=k1(dhs4VFr=~S>=Epp+ zDB=u>=ctf)cHG?bU^(+wE1ryauH|tzB;Gvc=k(0swb{VQTj+R&v&E=p_uTI-@U{9| zzx6X*tF0H#l)SgeVl4)vGq*AMh4lOuN^E%F@_o7OG940ViFS4QNH_-5&TVw{=jl^t z)Mme)a(cuT9tUmY!-y(-`pZ8`H#)sGt3767#Pcf?T$B6?d!NZelNkNfV= zBF=sI{)fb>+h(M*kFU*kJ8wb6ZHeiS_;fm$yv#RUckEqRBpVlH+M=`-QIX8FhvU;?V&sI3isnJT1JM%e(#Bpa=*C0kK ziNO|LfATufIXRCSI}cvhGQl+=n|2OIwdCg(!J3oTh9M#6QDf)9>vN755^@J4yMsKc zF&z>z!bLX1vBk!A3(k9(z+@D2-)!6DIg+bI#w zd{b%d9G&Tqka0J%ahEMN;~+z}naJn6($6F7=e*a)1lJ@QBUeAA=Yf9Cb@5dQi3Q6) ziZ+%J?wX?~hGSy>+z4wC-pkZcG_p~Y_g;BdHH>ldrHl1ijg=9O>*8|?2^mF|z$nUl z$Q&;uWR8k#j^g>1>5z~)O6POTQ8u3=-{F}$BrF5K9L3KKm<|bK0G2JW#f}h}ti|*B z?mPK;Y(9AleJX>I0ogVaT$6K;=jXBi!kP74mmQVQae2mB|LV`9Suzj)bjs=HkV#~Kh zq?L>pqD#fp)LI6fB67Si#%Spu%Vi#%H2Q(m`ULVerbFU7=^vL!jPdtpsr3mwqqDx^ z-#p@@nk{3=Gcpezc;154#t&qZOouUck&yv0TJ&6&+Gv31@-|BNcYHX8WXO?a$Xu6= zkI0a1B=Ya#>?)(^(=rdP`R7lmjhy;M3OzsPn&b=Pj}OzMs=H}b%60J(hJ<9{HI{|5 z#pVxW;Wp0ur{OKmsIJ*Y=BPHOwFut*V1jFM?zNwn)!?4Lx5DWYceE1J#pe_fGDRgI z-ubyj@D7X3ef~`szPcx#UR{r)YAZ)&Gcu0dX0>ucj;h{{%4TGH-^Y$HJSw>#sof9D zMR3=c4hdPYq_$#Vi{)BqW0pDj79%Y&wU%gE7}_oqT$6K~OYVd7_^aG=%XjRJEu4pM zOMIL4{5s`o#`-!tfNv}N3q57QuIg5GJWroUYlC4wTu=0oC&VUIq9{j^;)ir zuR=(~lDA`4yYZNb;Rr|XZ(4rs-?Nl4F|{$#)&TmJ1rcG44JRztochJiQ&X;s&nYBi zOjH76qOAuo4syJZkl8?2UYHGRjbpnixP#@RsLTesj>2qU>m|$pwkq(k;(|gK(M8h2wGwhgS{@B)~&y3BB7>$ z2x6Fp3GM~Xz4z-h)hp^Yurr?5_Q#XB{8_4&v6kgt5b71l+^1goQ%fylZOGS9R6`G$ zUZ^O^EL?cWJg8+n?F)%UQQCO+w#sbS)0IKHVuG)_bF7KPb#Xi9qm>!wuzk{v`idsfQKE^o@uaSJ!dsj<6Oym!-~ArIo2Q?YULl%Ddp*%LW-lHkLV`7s z*uq43KL&^?LXrO62-DM2MnJYKl7C zN|=zGQ3=1U74?YJ)FaqS)FU)Tt~Wu%!=i~aRCHzD+qNCvH7G1hNG7X)_fO(#lBMYg z(M0MrzHQ82qJp6@a@`CfSQClsVnXt3CH%UUHIdj$)I2msuKz&zEL+k7y#TFPcb8y3dQaE+%9xqJ)2TcV64deeV*ro4-6&8H`ul zqj-$bVI(0%9jz;WE?<;G;;xS$u4yT%JHS9!cJ2dFoEjpEQ@3o|8Bg4cdL`f0GWvE%i1t}T+Gj;wOo*zP{vCbZInh3gNc*g) zSMqBuqi=_VXrD!-eOA=Pgs7V7-_hrtYY-)x22m2|Fr;nF7A8caObPU0=UDBAy(FjB zqrzwq60E4h7A8caOkSPO*gv`3kkhz}Kzi-H+~F;#TsV zZKztYm#jrJ2A)lX1S@B;g$elzwZ_2LtwVJ#_LB97#=w)GkYKeNwlE>zs@53zx^<|i z!xkpw%hO8WTh-37Y7cwK8b^-``+gz8%0O&kLcTn$G4QQw=U9D+y<|KxDvg?KwWJs{$6kC{( zFIsC1Xat0+R_tXh2J708V3ioQFd-TN8Uv~V&an~=TbPh9{wo2E0C@^PYB%gcIK(dMY{G(Tkv6Y|A> zjRB1S=X#3zP(M*!IC=Oy>`Z9uL%lY3VYekDzFplqhN8}&yM2|fZm96tZ>9$md{~VH6%opq)<^}3lpMBqA{Sy z;~XnBv6tng*iQ}#R)b;-6QakXF`(cRDo(M52~pQk0(w01G^5}2nk#c%WcIR*9J|gT zA*ww!rrN_6CPcMIV?ZCt=|56Y(z}-bIjI& zjpv^N$ki=0k=RSrxs=G&xrjJ5*F>7Lr6@w>dl{&U2|hySdh9u?e!gfT9k)@hjJ-t9 zNn_w1goLO;)sq@jaebHw&s?ntJi5w7%}9@==2{8O(gabg*+uN z;@Aq=6Re5E7A9=n?wDSv6tv;X^dRyPmjtIcZ(*HX#;XyOxXSd`oD9c=arhC z7ki1Gm&V8yz%+&@juB0yYSBch9NrnfGA%4j*v^Z8>TtAZBFzv@q-j@n&e%(I$uvf; zWTrKFf;Ex2E+)8zhMGw1CHiU_BUfb87--X~;g*_)8`s4Ix9Lz5NuIuHUC59`u})(k z-;rmY`ggxuFPTiCiNs!_cc(eUTNCQO^Y55B?Q}z=5L5-0s7HUnNI8Zc^x{4;!$^GZy zDRN<9!ggMeqsZGkq>02{qNAuWa^*#>$rG%J#C0*jy;)YmqKULnG?Bi3XGO|hq6eul zas^3^;h$iM=457?lUx@Q-2a{HHMV_LDVj+4{5&t>{Up)h)fl;Qug36uOQQ9gnbt3R zi8`hQjL+THS3vsf;Ex2E+(v}`M11f1zBh!v6twjYK&ZARbzO9HIcY3CglAIC9pO` zPTi!5#9pHNsxfkka>HHIfx6N&3$!mgozy2zSH>?L}_8Y5RA))<~(O(d?12_7rudg;GJ-i32c zzU8a+=HGSJGQR7~dw|{_&xuM2_L46ID~)dohlG5kxv=p5zI^Xl|Bl!Jk+2s(U1*kY zY++(uPy76N@UingW!ZTo`I#r|#k-DWgq)0z!4@XuTg-aq_+qo3c@cxXcsH_)C}OaM z3Hd^_#=zID!x-$vyOCu?5rZvE$QP|OM)3{o;yl=kcO%P)A_iNSkZ)6I415n;&!ZsZ zyUuFCH?Me{mv2gkL~&H?#dAp+QN&;i6Y`~NjZuE)?8Q${%7`KcTbPiqjcbhZJIG%A z6sU|SVz7k?`5wB)C~t}E#ZR-!h$04CnBe{|&eA!oN3j<_l`A6v>FC22CV0d^2>eqb zk+1LR8RDycy!$0z{?otXi-O+W2=?OLuQH;D!4@Xu3x*m4Uq%dLuov%sl@Ub@wlE=I zV$>M;!ebbNy?FPlj3{ETg$enhq{hIPDm8`?PQFjA7F=DPapc?B`geTiP>ICuAbar) z*F3=%CgdC68Ux=s3}dht&(zHmY+*vaRjx7cox>nT!d{$llo5jYw#ybK{vXQD1YWP{ z`uj%`GYQpHNHkFiF{K%vkU^f~CWclbh!`rgFVY$+rgBl#F{`4bQA4Y=MN3*#2NAi? z$$e1N7)p#qgpgA67{j}MYn^rWZ=dI0f4!f^-Mwpl_u6}(J*+*PedNw{wSjvMV;ii+ zo>L1^+F%J2a(BGiz&(et4c1~0u!SgXu!IS@17B_6p2OG%Yq2NWLXWbjh68|V=dk#Z6Qh|s@h)i#0Z9EsmN3D-9ZcZI2=;e)AFsvt$vjo8#k+DXL}`O1OvtU^TDlll zL|F{D4c6jaxfTKta-Ok-3Ax=|ZQ#~)(IXqI#k+DX1R%A+5+>x9cC~@q-(e!;gSB|S zu7v=kHdw-h+%~T^aEm=m1RJcy`*-4Y2) zn2@i0sEyWV&RV<|(k+p&gbDdtiP~tbyR5}~I^7ZpOPFBaE_#9)Yw_B23sKe*mN3EI zpxP)2u6?{e+)@)WuO+O-yS*($X@ez9$ag5TbWuY^Sq!)h*5cjX76OpAD3&lG-_uYV z_@;>{lMUA5{oxh@khNNhiwSOzu?^PZ{oxkEZ6qvVqWgBv65b8v9wSOs!dkqS+C9M% zCgjU5-IOkC@osOoM8Xm#xF3u1k+Bx<@U;+S>9T|g?)j4BjUK<3H;|HK-_MRpc^#ME zq2P7jlE|F!H!7foFjkslV(0Nss*PJ`AC$6$30`wbl7h&b@OYtxFxJ0gqVaR^Rd-{+v+S~g4b%~&0-@mCp=zgA&l8pOg#NbyV|(qs0X;QSi%IaSR_f6@OYtxFjj73;*BHpnP)fu=G+ELnBbKc=}Rhv#|tflvDOn4XPu?@ zcxAmlY&2NH1g{_^NtN(;p@lHktYYHwMf;=fWg)r6-V`53iM9s$wn{Ax4gbDUI~ZdloFz=~OeRUHgvSdlgfVxDiFxhz{g2tl9-Om;37(N9NtN(;p@lH! zfH5&-9lZfFd*}M2bCxi{UX+~duMi$Dv=D}O9}`zxI0L>U<;M5=GEMbDZ+9atG9xt>I#>_h=5WoG)a+WZ`{=Dc`2#*(99LD?{ z_L5}uQ%_F-_K@U*<6;%sfwO*+WCs+$%)ZLhn+O9UxK3IaV6TI@G?L%!i!CDBT4aS7D+7xAl zcFq!no!}KDZRa2|Cs+$%^d&JNy^q>Jf5j4no#2%)?XT2^6Rd?W`ox%!UQ}(M-(?BH zPVkDJ_Pc7s3D!aweS1vEXrMMQey{{#CwOI2#}BpP1ZyFTF)AixR8t!m2U&u!6TG{u zNuoJx7uYQTz zaDufEhVK&-;w`BS_*X1J*a=?0R{u(EIKf&7!-tCr@%q#T{6v-@>;$jFtDmSgoM0`4 z;fuzEc&};${w_-pc7pd4)ZbMbPOuij@Tp@$yl}MvFPtR^JHh)R>bI*6Cs+$%%nf2f zW)o@yvk8_U>;&)c==?)%IKf&7V~!IOGAmLWm=&=EVJCQ>O6O5(!wJ?x7<09lklCHu z!0e7C2s^?1Z8{%R8&0qm!kBZ$gv?Ua24<-&LD&i2H`IBp+Hiuk5XRg&CS} z3BpeB{-@5*)rJ$Sg)r6tV&ZRow^JLxmURY}AnXL6NK2A}$eds;gt3+p6EjAPP#cqE zy@VwQJHaRGlB6IqCs+$%tjWZLtm3E*tm3c)VJG;6psxF9SvkR42xDz2CS)~AZD2Ku zB?vpgCmnVDN^Lm7S_or}EGA@CO??Kes<8xNC-|(YuEVL%-~?+SjJ3X)nA79;YU5y8 zPh<(gPQ>?@6h!6(Yaxu2vM~XlM4SxGJ>T!bH{P7uIZak;+nZ1AjS-k$=f$B#MhHGJbJf#7qIP!$XH9h+^ocD7v9-Ql&NCt<3{YM#N#V|(3+3T^TAqE zzGuY1Efai`Rq671lr~tx#8I0Veea&diLH`g$w9|j`Rw-t{qAdQ!&8;9){fKcdEjZX zZHZogTAv3KEww}#tBG7oaZ`OdJ?C~3=uKM$73 z7mrm7Pq4*C=J{YP`3|!ZsC_L&m#Jbxz7(xAYJQw5mdH1&RSPYu#YUMb){-x2D{eb zS%0i1F8R$=m7tv;^Pb(~g`D@gb5s5MEnEL}K>-wf32SkQc2BT`iA!%DqBd6O^>?$8 zc`acrPJ6dRGGW=48dq)Czbv@D@5nOz$+)e$$;YkR)>VQs>A5OS&9ygCx}4fL=dAVQ z<#?N;y@@B-J;ABslypz9*1!~*; zTh3)P=$>E+6I|a)8@Yrs-ml~R!LIIAvu|ND!CJg47uiVomNfPmFlxIuphPAK(p0e) zdonEqvY-U-LGhj!r!caSaJ=l1wb&?A#aiqqwh*4Gge6R{C#$J)BIC64{$RKH$XJVa zEn5h;k+6h`mM54MrJHbi+0*CzX+E6DSc~T$Ekv0rmN0?w9JQqMGD;h)#q+2ZqO`#h zCNR#ck1GCyC`-1zUa=O>2U`e0sEO)XdR$DvGr-^BKR_?oU@e~4wh(~S21}TL{{S1M zf0cPYSc~W9EktR9B}~BkfQ`~GDQ&P8uQRj|r45!a0e=NH;MIt7D653Ec)g^B0HkHb z5+>l)z((YSE5Ta4?$bh)Hdw+0d?MH=y`{`k#ag_6)k2guSi%InDA<6nCCXG*ti|ha zEd(IVIZK#;uLT?MQDCC1y9sOYdSVL!NNuo$3HWfZ0iOsaf(_QS5p1v)ukW@HfYb&{n1IKrWhG-fOavRO#p~291R%A+5+>lIYCgmdf{9>*wRpX~ zg#e^BSi%H6S=fMg1{1*sYw^B83js)Nu!ISCnXplM$e9zY#rq#EL}`O1Ou&1EjmY2C zRIwKCzrD-{Yw=D~3sKr&2@~*YVFSLlD2L~fu@>*EwGeRc8WWtaxEp zhErP-nG>vqFv=$;YCG6>M(^KxL7gQCJHe%%Bn6Q9q)+S%H7@7k=+5`>-LR*)nGkvYLy2&3J_#MBGyY;Aq>qb9Hf zVJEmX%NGlc$eds;gwcP*#QT04_o~SqEJ4@_?v>36ZyS%wswb8H*By3 zVJEnEm75AH1ZyFT{yZiQe9g|*Znxgq4VED61dkH(9mNX4S_orYiivZE=(+H0|0i#2 zumoWzc(jwVQ5Ax<5cd5K^d&adc3hz6+q0=N|J&fW5O#t`?IfuZtc5V<biu=gQf&8`rvg)sb`nAr6k{aSN&+BQRSmLTi|dr@)=ZG~VhgfSEz9gvAVJCQXN9z@c%n8;)7)`Mq#!aUSPNma!I+R% zo1(1H&RK%66TI4{?Hokr1ZyFTIeAR{W|#hI;~?p;Sc0$;8k25Kh%a3tc5VfsF;vZO>JNt zWC_Af@M^S;&%)s9&NsoM0`4;rqmdcuQ&n{uN6Qc7k^~)W1?2POuij@Zn-Yygs!7KanK}JHfj( z>L;oVCs+$%_@Xf(-mBX9rue%oLD&i2by9y+I_c;qf=TTKH z5SXjQgv{>L24;6GLD&i2mDTy6=EDirLKt(-n2>p`+Q2N8B?vpgyURMSRU1yQ7Q&bb z$Hey*+PrqrX&v}RuHGYxyOsS$THJw*Fussz_aXm!_IGpi?c1NdSZ_CW2@~@5TP5)I z+ay_Y*a7(#e@e$5`|#`0eaL>ZE^aiAiC+)+4s0Z=^Y6IWuOp>o{Fe(4rU<20qrsJ46`1Wly=MTKLMa~i?5Jo=i2IURcwAsWzpB$UBgbCb-uFol1Dygax zNEPDZThn7={+%|P=)KA0oFz=i7k$+RzP2j2{J*+>e(S&)V;;ND2-v_q?R;x`OdNRb zTAK6OJMEscgb9R^54(;1%;olO-{O5{iWKyVa`%QiZtqp7oeGXt}-H_xhd( z>uu~F7ZbQa9e=mG)vE+-z&hWv9ut@R_|}G0_U!V@4$oP_gnYkPZQx6|N%G4Rf7h6o z-#upk%Z^q8Hej9aS&xafBd&yvGK9&S4`%JFjXXUTCrHB-wGfz1z3)j2Tg? zpydRAb)_USCs+$%w8xluWVyYg_?WbFmLTi|e|05E3L{xWUu{Ado&~~a zH>H=MwFF)UYh^z)0-i-oJiO%r>D9NUV|RPx_2_F*Oz;<+O6;4e5ONkL>zc)ZX;7~Vikh|i#91)qT>Oz?M}l0^KHbd_}0 zSokHZC0_xzlD6-LYw4QLFlf!;X}0z+Z@ou+29ApfZu3cU!0p?lmp^;rxO>DeVJ*^}W%=ICr9oBrWBnku9Ob(g=j z7893WWpkXH<_u}Dgb92N4mRw&00ogb0UNN+-;9fim#?(B+Tru|!?(%HxR}6K?O;QC z+s-5@h|CGtfOY=%U6(}i^1S^fu!IS>ZC`e98&1Fmtb3}U6%)TI%IfqvL+UJH!pqRU zH&GCo6R?4Jy>y`!6MvFtE~!Wkx#HkDOPKJw(0H|{l+|FViDOv!eKlJlLVPn`eGpT};}XL+3^On7f+-%435{jL;lw(cXZs|}$CcGcBFAx_*<^*gYUhf~F6%&_ApNMC^(=lI5S;B<(W4)hSO>(}U^t&tW zI(Tfyydg@!2IBSp5nou137M;@4VkN@EMdZXJNsJm@-?haeC^4xDNC4eFUr0lT@aZQ zNEPDp9u-Xy`uQGxq^x!n8$W)2m0R5gQiYVbKLr zXzSO;$jmEc2@~!);akr-yMup~iJusKN!k55XvM^_1I;5_+Vh7gOPFv^AK$D_l7h&b zKt2$!`*YBWiU0oGW)s;FC#Ni7!e<%(+z~cTSbp<_9k-o5HeKB`u022s`1c6xN~&B6EVZ5QbJvU@ZeV zM{Q>b!cO>_gS8Kt6*Y=kkrJ-u>l$dOF>%i(*NY0BuiD`t}?-N8j$Nb?DoD6%S!wVJk-i5IP!EwGc0~Vgl=M zuz{YMB?vp=>t(15Nh0G%e&0p2#$fzlEnkzvm=P22oNX(2bs0ZcQYB!|Yz&YU;eTJ3 zjvfE->vdl@hR@(@z3@eR<<`864qFk9_EIgT7;8iiz4T1$-I`eyc95(jfwG6x{F^}Wh~~LEMdY+-Dauu z1RGT?Y+BU(Lo|G`$w znK<}`+4b~@8Tl`-d7|N7IM!R;>$kldCT=~pS)V#l1i>&NJ_rm^d)4KPklEyWg#|N6-Fs zOe}kDc4LC%eDB{sQD+Z{$EcW?e{8d{v*i4NAvf1~tmS?;CLY=Qlg42O%*eMtdmZhs zkX!U>{>*J{fy?U8+xEy&R_i`?z4*OB;xz`nGuJWMalfyp5-VM^WE-x#OveOjqL(hG zgl#Y_-(EiV)yCfrosoYw^AMSbgj8`q=sh2LRQdk-k{27R95N$6Z?9A9Jiao)G0EwK z@$WW%C^>I_f0FjQC@aL}{iBT;!FH1T;zv&^am0|>bsjm{UTov8rS~?TpDDFu*6-{5 z)R>NmBhLL>1JC1{Yx6q$KK%S+;`Pg~YTUZ-jC|8$7S_2gF&z_|uXm2-V~vrE>+DB! zt&9n&?G3N(Eb*QJ{;u~J=7n!QV{qfI-WpBIVB$xJRdx-WrAaZH=QI%)l;AJjOLs?H;f6% z#{|y@&(GOjOvn?g`xE3|jp>+>9?+V7nD2||pP+RoOQjtS{^>)!9O#77)>$UYL8zboy$?(Lk{`k3ID zl0@2h-P<|G#ibAv2QGQBzOwXiGxk0;?h}iBBg{p3EmM2Zy7!{I_R6cOv5kNK=t(WB zKGMT+T%4zvkX}@u2YOLnLuPw1A!AhC$0#0OnT`n=qjWsS80F(R{2d;tW5PWEj8VLA zz;sN&18{GNC7wcf$ZpU4-N%c3tbO)6x+{a80p2zf98;3qSma~7$FA?-xI9(n1iK?K`LveGF}DslJV*$uB5*hOS}v5i^MK92Z(}+pekSeX zWJ%RY*X9jx6WF8kwqm=8oT^);FS$*|!JUp-*zo=X-Xznpjiu5voFq239J#pRy#e;+ zy_c}l0&GJ(jwNH6+#83&i&^-;rnPCb!A`{x`}lB_K~98&e0 zi(hYWT%5v~5HEa!d*Lkc@dIAC_w%+J{+&a{Oz0A!7<4lzn?6gfK)xZ zG|?S*)DpzSd5Q@cqB0PVf6_Deami1lfVPt^T)I6E9n*nS^_Eomh>Wx|9ovvpb$F_L zME3JOp2C=r=h5)z;l2o-8q+Z$GnR(WSXkn|7V4OLPF7>2CN{h#x)+AJ%LK=iBv;+I zUjwy$t~_)1cl^W_@?mv}%W7ls;Xw41y35v?j%~<1s^Rk}mbhPv_UK-wwJvG34R5vX zv7((b!7(L?v|3FSS}n)Lr4SQ;62E;_>96kC>(sc1qw|~YU)woL=@T2?C;A)!eV=>! zcH)!EDi^;UwPfg!*$s}1^Ax8_`b70b(I@(R0R14_iwPMGbmoQ8z~?x=s)8rj+Kb9) zpz|n<20mZH2;j3K8)Y~jKJ&u7mS8A(!Q;YSM6BE*|vqqoM0`tA$e*g5ItY+D;~#745T)kfy2Vl5lh+Hr=Z zm2eyKJCqd@Tq2^RKlTKzcW%cCs%>p7X-8kOEyrZ)WvcWfm}%LF(~h%ht%P8$WyJ)S zh}tL#>(ARU0&HtDAq#h(D%P@*tR17z7fclsTq1F*Y|d#px6#1LttC~SkBqf!p83C| ziU}@}N~(01#Ojju^H#^YPZev~Xz;&0K_<9F;#65LYVFQ?QEQLgr;4>~WcXidI}&sy3JdctY5U+C{x8+USDNrw3X;GRZMV+#HsTB zLsp~OyeE=9;Z_@Es#we0wyf5*5?!ZCcf;GUf8NF=qNyrvcwdWDd9N+!8Cq>Pk+GKd zgIy9`riuwJkvLU8F5wCKIM`**%T%$Jj~QK-ZkMTIf=eV$m5<@5?d}b9SxZ8y^yGUx zcG=r}eC@I(c9|+BxJ2Sqxxa&+!9AxgTU41U)^cB@%U0WEs+iysiBsjiA^HsWGP~@3 z%2ctI`)ytJa9yT~2`-T&xnEX_Mjk#R-Rx@JSL;lYk3c z*khus&uF;k+#VGh#rz{(;ac5SQ>_z^N+yc0u3^h9=lpKj`57%aA9S_utEtv)+kDz4 zYh)V!&8nZa#bvdyU)p$9i0Sv}zM5(++5dw!Sz*!e)_Z(VA)b|0=Sh~|l}0JZIH=)OH~38K6>FC1YFF#Nno6oA8WU2w zsh2LNgl)v7TdXIhvYv=FtKN;)Qb{6yCi)84kPrdnTG@L`*buNublkBJv#J+ZOxjI_V) ztEm?1l8nwJ9TOX_cTPQ%d@LWK`)aDSN9PCd88nPq852_5>t5Sg;ynXuyNuLIqZP=x za9K~h3y!U zeYLWdc<G#`YtX2A$m){rqGg(hu|DYM^>b9?@lII_Mzm195#xtKTo^Xn!>R-06 zR>mbhiiVM=n3!?Mm=4%j!}it6RLPp95=fQ!5*mi>B$0e{cs^tlRteI}L~KLaTY{SS zsO_tjafz2vhGRnV(c$@!QD3$6d62!BkSEyiC&;}T(=j1ET*G@fmU!QeS}9|@+O|3- zb+_SlS3C=qptdu?F(t{#vipj49JFBZ0F<`&DDAxA?Ogl>m7r%}f@4Y&Y3Eph)OOBsaVf;ax9#LW z`Yqd6E9;f)v8X-tiQ?gC*m|O31yXxa@eNdh-ja#f#;{YK)Kq2C!*N`krHJ4^Urh_GaJyiGAlt5hmJdmfD*iLH6u|oXR_SKXaabk~&USnV{CN`D!@r;av%WYpx z3HWVZuRzB{alRy#^Cj3q#QR5h$lepda~7XldeJ}2IJo-Vy04}-_I|tPL?$>U z`Fg+&`!$Y}RQ39W?yD(*xV*nY3S&aN@DBIFS>oddyl@{^;GHMQqx+2M=p$p)NZVIa z8-4oqn#csllqA=Fw73J$yywfhucidz^3ec!iV2KSM+)((?W>hGeC&h0m^fj^mq}QODeK zvKmvI7SWpMUKr{w6C6{LeDj9=>c>l24UlK<{tlkA`&3q!I3Fvne@-2>WVX~@w$5~H zL*`L+pGUF8XA@|T?qyo*l2%*yR_h)sS}GG9Q+ZlMTP?@Mr4SR(iQnE+`m2j=UoG@- zg{P1H%KdBWSENs@d!OiY0Q6Vx>9_W9AB*3PvU)@MD~^lv6sJo1M4T_tKGEj`=m*(e zOvq?Z_tC)TI2bdSjtLnJQXdU`zJw9LXGJ#3@H6+B7v{CH_Nwhe8rejqV;jY(;!Mu( zcGx(GFYhI77B2VLB&H*aJ+@YYwQP0R#w#a^6Tf)Nw80V!8(|~okvLD^mL#nNYqhIZ zOvpQGSgT#NVuDV@VA=0n~9#Tgd05wwi3QS?Z>ry(|43D#;?t(cHEK-Gr40g5;8)du3S zun{)y%2!Ht&AOFft#;LliQ;XJOy1_ed-Q4paaq_18>x@Pn--FfR)V$KRVyZncYg2& z58k|28;HxoM%dUc&qLR&TM5>()ySAA-ucOJiw8?AY=n6O^3`!&NpB@st6jBXLf+2Q z=TY3Sq4_{ure%bU?MLFhGAXN8g0=e%o{io?|exvX=?)B|V%|{?ut6jBXLf)M0K+fgONt_T;8;HxoMwpLsB;Ixu8?6LuwX0T4 zAb$Io;oOqiKwK6!!n_){(Mqruhn6XqAV*58)2T4KXZCpwjfx`o=r^fT`w%Luo315dEG4tt%xvO-nEo3rI3{p| z0LlvE2TLq$gn4H^MiqpPIIUVXmc)d-qo_9I9Yx$Ir}@AbWnm-C8}zZZAn;aWL9mvM zi7_E>XQ~Z(JG1aCEiThC!hA;e844l`1Zzp@iq8-e;$^6f;w?IR9;KEM=4HC?QxJH! z5AWRx!CK~{#6)o?M*{zfB^EZq{8IPf=oXKHU@h~tVnVz=%?JEMmRQ&b^JK}h1fiZ~ ztCsnUF(KZo+JL{y5(^t)p0oME;jWiJu$KA0T@s1BcZYZ4^m#}s+bnE^+qTmXMXGS0 zOhK@g+{G#<&tih^#ld^2Y9nYF;bmwi-RNGNf?zE>YZ4R1Z9R#+If?gD)du47l1A9; z0#4Q?3El5g5ak&btz*!N397qz&r59tEj#t%wcSp-QQa*F*0NJCF(G$m>NA%+Gx2_u z+CW@h^AYw|Yv=C@A`67Y>#Y`AF(Ef#stvg_6K_bXji6PYFVer`ykC+OL>36vvQtRj zD;YtzS>b)>GA{40pyhp~oxdxHOzyub2#eSIM`*gEbr387*!A!ua8&IiU}HP@gBX}2wHaX%*S9m z6-Z-kL9mvcJc|jrol<=Uxz7?eI;aiA<>NWR?n~HN#e&EJVez_`0j-!I-v{?IsEwdi zp7GPaPx|h2QdZ>q6a;J8NkI3ajG#N{aHB&RmwQgoa^KF*Di%a0x7HPe#p_-lv|{2D zxp6U(pNHiZ50)_DJ}ORACP_hLfv_`)?!7`QCM5by{>e{skP9WAS% zRi4b%zgrtD2;2i%5Ugcw#b>WZ6f@xjHyg3U=e5xC*|zm1G;RL-XOPKIA4(ra0P+;6AQxP^;J%2#RT0S>{R9U>d>KqfpuQk8@`2F^jaRlq0 zR|$pPP=x1dc)c<9_k}O(xf~<1K(N-_PH4r%7Ejpx6SW6ENsN&W3lFXRp;r?;Cx9zWPxCB2nDaZVBVN^cZv7qkT@YCySZl6o#l*UA@1Qom z+<(KAB^EYf)DB-#8*BdhE%{v#Ss++zZYQ*2;_Y6e)dqgE#KK0bzfE7YvED{+%I|{6 z0>N5yJE0X5NDb;0QpXYt8}a3n`>Bmb7rrjP3nB{yYt8M1R!r<7&jB{Jmgm9}3mcIn z1JuSlfBLWdE{H4;tTngOw3zsh)F;?jto4f}2ph5DZ3ETDHS=DR-vyBcg0<#$LMtYI zd*Ur>W0bTLmRQ(`IlsewzgKl$^3kjEyCAYau-07FiivklKSOQ2BJG_e7B=GJ8#YlJ zFB}0I1(5}UwdQt0D<=LtVvyQcSNbiMSlEc)EZkgetT6yK3L*;xYt8M1R!qG4E%diZ zGD><=mRQ&b>CM!J+;=9w3nB{yYt8L6EhdVSH95{9+Za{XbI5j1rrz{($i@9W(&rba zEOMMfHqWxQfnW#7Y9C9Fv6A(GhA)6mvZ({G?&NI}TehxVh#jbvibI3M>tT(ZH zZ+rChrk_I&M6rLK;~cWho$F2PliO~1z3JzW15xbP=Gd>bRh)VgJG{11q&GB4ZijaU zqBtp=;~cWB!_=EN-DEq;^`@WS4McG=Gsih(n?cGuoN@xw&Nj#`g2g%HKoqC+a-2gp z?^WJ$mJ^?LW~ScsbI5@x&co$6hiragy@_*gcDk+J^mE98kaNg5d-t&p#%Lvz$53A#+_~Iwp#fnK@2o z+KjH=#92hEDU6%NWgMQaXqVgOv-Z#*8ZtKR@Gcdt1={e*&?q@(d z=eW2OVxoA{BFCE+EV1)f*2CEvx%GTvBis&Ctk7F;$ppugB*mK+Io`D3xHwNSQM}EO zqZhTE1{-l~7r=Yx(1T0AYwybF7{%i&)3J@>T^Svtd_2ck#3OY~xCap4m9a63>6j?q z+EQ-s(j&sNym#50lA$vcTB*mG#+|MDq z#GfF}+<8gcIppG8UygHq{si?LvYzV`@2oh7>@g)tapo?^Ib@ED^AzW!IM9BdSC>Zym!ae4m;57~R7@*FbGz2-QF>^&z=$}+(*m1k0OoJr-lIE67$ zybY*cI7@u|fEVuLik-|YPWWPJjH|zQJI`W&T$Ue z$0gWcdofX*j?Qs9+DBxho#~h;PDg91d_>mL)$>I*7m11DT(~|D_eJp3n2w3!%}JfH zu*7{W)G_y*%5%tm<6?1!zTQOLWrAZW&z|Qvd+z=YYNGo=ehxX*#NrHnjx+RZo$1&{ z@jj={qgdj8DcYlZndLcTygi_;);(6Vb0#<@J%_A5HQpZJxZD>-Da1ta-b9Y~CRpO0 zKKd*7ugi1DcsE1)MEBeE9CG35xAt(wdlNa{o8Y)OPi$NJM7*1!eWK3?(6_U_m?+*# z(b2%?I2bdSj)~%}6detGzJw9LXGMMvIXv^??HZle+B&X{YD~vA)KN^b9KZR@gpD{y z>In!CIEO4Mvd&;XN@O+y*y$DrSD)_U9x`O zPce3vD%P^m;D337OmKOJ@^bAaJiD+3NVcPFn?`(Z-W%n)a6FndDl~vnOlqia|H$RzI$wz5} z`zyCs+=QVv?EMc>F9}aOc5=NGY{#`?BlCQ)mY1~cpH_&{2B(S%PoeFMB28|?pL6S; zdBsNNgx9Oq9qR6`eKV5)h`GgJHRFh&JOO{95w3YHiXZ-V3+2L8(SEEPm<^4?_Sdpg4Ra6?AF}%&ZSEH z_W50#gL-@pk%3^X6OY>U3)l$OYn@fXQd>>ch!dwZH$D3$P1T?crZsO_$x;|%B0^Ke zT65>_*8KWWpF?QQLtM2^CPImj4J8^gcWU?Rn+y&2KFF9HJ#vOoS4t zrfRKz)0_KT@T{f;;cJgV?Xw!%QWLeTSnD6FPj6na`}5rsEvaH6lt?vI$oUHIKdtdX z>);2T>Dq>*2tTz>)`E=#|M>5&2?L@W);gI8B~nclYDtejKdJGeMBn-DQ(fE0%t)=1 zweURV4z)aWSrauMK~n2vB9urqRcKM){P1y232NV>2rJym6J#QkNYw`V+Wt>is*sZV z?+f9mwwI}5E%b>@M?{yYVj`4CHC5l-acY^{r*E42`DG<3t97#0x4*heS<)?~+hwYl z2qjWY)res`mo?+TGj{&`dZnpit>+f+T-M4jmMSJfiBwZH=l@M9ThYW{PWk*csHtMD zRiB$uw%{+8DkegSR8uwXoyld7^M^I2e13nWsba0ShEFMbyDye1CPIl6slqc?8%UM+ z&TlTA+f@Pr>ob(8VlC7$_c*>-s+b5RQcV@wfqO`3AKm&_WvW;U?a{rnFP17MLWxvU#U3mA zl5Ty`GF7aFzRNw?FP17MLWv~$qw_(rGgC$yoSOTx{M)Vw!;A61BP>7Jyyku#&+9h= z&BuXXd;fjk7HL-=ITNgfFqc9IgLwYhA&vX=TZh5BuM(j|m|!i0gI0~gApY^uIgM5H zWPFfRiBN7#uol8W%f=OZHXyc__g>G?6aGO`C8Fn160C)A(5g`wHgJQ7-OUjsRU*`B zCRhvMpjD$Vh~He-C%;zTxCoLe5o$gYtc7sU5Vc0nOsj>MH`UT%0sS=@AVuH004q7z|gIId+sS=^@VuH004q7z| zgSc+SjC|ZjxXmL-ss#6mfnY6!gI0~gu(7M$H?Z!fc(W=uwCu^2L{ii%wc%P-0=3d+$gtrA zYatx8tcI3EQnU}X;aXJ!t;=Stu;B!2Asn=V=zLL-zJnU|cAgL1QwQU{^8+zB$8pBi8LO5ty z4_*?<$oWI^4F}*hk07ZM7;$WN4;xOf7Q#WxMxv5Pig8eFxK@?GC~B(-u;B!2Asn=9 zL@kM=7|+#)YgGw&12(&d4JTL&;h<%nK}jTqU!pc#t4hGDu{8(SaDufE4q7~?79uJ9 zE4ATTRRZ3bt#QDH6Rd@B&@xZ0B$C2UR2!~UC3t>bdX`$Rmc@=^G^YQwc) z!^5a=@O^D11o?1+wGa+k6(afV9s5mqR&NXmk}45aLSVxQ)FG z^<>y^g0&D1TD$@c8yoL+aC+QFxXmL-ssyi7Cz%thg>cZSQ5ZJFlS##=RvWHWCBj<2 z0Pua3U@e4$7Vi$ghWK!)_|$5{wW>te5r7RRSPS8x#d`>_afRG`G5TcO<`E=Sf_EB{ z%n8;)IB3-<3>%U#sZ-`tyH;0(?dh1N5v=85-}ymICE@wdxq(Zn1WLiy8DXQyM+a*m z9JH)NO2VHwd6NJdXxzAsn>qnU{puE4ATTRRZ7{y`N03wrj5xMN3L8$a7Q#WxMxv7NaZt<3wW>mi6#bLs=Dnyb_yrFT_ zEk`0BZN3UDscKtv#?c#<%2`nh;WT2xyu_a1^OqhyZqqN_-Pqxp@1vHK z@mB4H)nPH=bG66@2-iYP9@a$p3{q_bNlb**;TabV9)JD*E9RG+x{ne;5);QR+PQhx z2RPgE!Mm5=`W0EHeqL|X30g>%(@0ey?l|k*vD132o$vXNjoYb>Ac=`3+w9z&-Us=3c&j;M&kyU=6j!aySDj%a5U?_8yJ7js zXOCAKK@t;ATdI7W8gT_lOoSC^VR&UbB3uhGdDv`x_pTlD)7KfKsS1)>sRiPD^QJV{ zd~=E9{FS9&8uy~CY(Hcx+ocvt*Vn0I0#?2|bl3diUVYU@h$|+Xwp97bHsT7BmJ!~JC?&ALM0g%I-+ICLK6@T5ul!!4He3rth&Mc&B)Ra~)y93}rbF_L*IcOi@VJ;j zxXUMq*@TvEvFfZ(aF@EC-qI%D)Tv!t;O@!g&70IWh_(T}4e)8?IF)!j6CdUU$_76G1CHC(C)!qSS_K!3Kws zb9h91r7pe`97evqs&q;~oT4^8Gtawe!6LbQ!Kc}dtPLXr;?O2J27W&|~ zd+V9IvP3LZMNgfw7Q$X`T!xma?n>7Q&|Zc~a?(+A#*PRp^g424T^(8&6aV;zp1ISp zt6bC1ezN1R{D3pYYpPre1nmM)80qqT1Eklrs)Uz_<=hF@LO5uJGPIl*BWFwLIsrPA z^r0smGZucyHMZ8L{tj0Ap#`sI?8=sNcpv3XLk8bxuMeha&Rq*c&@vyTB$C3D(Nwt> z69{`5CP}Zn_qa)NI^k4XNmVZuYpl?M_j1D2{y zf6}+Uf7{8LbJs$=L90e#VSKKpsdB9<5%xluU@e4$R*k|)m+!Hl++C|m_>+Zxv?~)4 zL915Q1tpUA&gZuXt?xudzd9kEPxASlX~6j1wEsx^T|!r4JW&XkbIU zzEtLe>XEq?h@e%YFbLoIL8@GB@u zJ>lv5&QBZG+u^H2>&I{Iu0A|c>m_2TdVPI? zVgg|=Lpe)&;7a2zmwlX3wvW?Bdn|1O`%W)>^;%ol1wzin(@$mYtf_J>#2d7#M6sKp zsdB9<;U!`@cY?JL4q7z|BUP6k*PhnCf3DhaEhZ55GK_Mrb2Z<)ffn-Qs|LO!WT~3J z%PHvw{eOX-pHd6qpjD$VQYHVB8iJo7>52&RS|?b`!^Nqv@;Pb5S&=H8JM$Cdus^4$ zCN{jrVTT7cP(HqDfHEw1m%MZvtc7sUvgc6}m~&zeQf;_amGDxxd^o{c2nQ{zS7BeR zsEKOBwU|ISJWc9-l<<8gXrWCM>q*+W$~~{FXi=JT*8+joWm?t-OCl*+ZAa_`Ysu4xK@?$60zsu1ZyE2w5)e6 zcl(l}r*0`-CqR1{Mt+Ir93y9OnqU0~><@v!$Z6};NLMk|YR+8(Ev7?Q03Xkla0v*E zOSW!oW;u@7xwOC4LS8SWRDj%f|iYsCBgfHrB;>jx?s$5`o_pcH#<;OiS0bKm@G{!MlB>R+R`l ze|Unl+ZPc*%RJ7oqgZ&anh)244Gw$lv9j`=IOGsMHFkJFco={860zsu1ZyE2v?>Jl zqcEFjDP1Q(dl{Cg(zhTW@jWPn!yXjxN1?2WnXo<&*V0tU-1*o(`c5^`G~3oxbw^-M z6?PPhnU~tY3=8qXM$obuSxNYOP;IzYl?Z!<$cMkj!deIiEt^^Td)v{jWvNvqd^gjc zAnjU4M9{LibI6D9rJ^>t7HoK!Ymcn&ZoK8VO=N!l?Y5J(tXvBO=4bvqLLEc8irJl( zl}ngF*#EBikpH-s;co-2C~xt582hK&$v^Cv+PnItUy{XI!wao02(rO(1tPu)+|s3~ z%J`cG{0)WfQx$8iQltuZ4zO*qAwiZf;o&0Zo{x;v%QvORF+sg3-Hf&P{`MBaZOHGC zDkk`bc~R0IE2~&CKyz-lZ?b}`V@eTYHLNKo-LBa%oBf!$+Hdw-6E8zUa zsbVeuhC&O`WvZCquRO%5VhMk5qQyq$`Cu*n3PuajWvZCqIvuBK(U*ovJNruWmA`$t z^TNN6)-c}M`_HCZX!r+5eOZ3PfTxPJj{N3k-4lRPplriVLwrTfH^=s%7i_TBrmu}q z8`Tsl0Z46ds+jo7W23sxhmhNe#Cw@lx(K5M+mF{ooY(+?9>>GzyZD5M5LuBb`OR8v zuR;JqK7?RNAf)}PZTljEXv&WfQrlHx-{1&@)M@>@eg6aMPH@hp?y45PMAAaIjf8DT z8&tx+#}Vv_vi?}AVyyv66g`86%f4M0FI|o+5YopeVc*Rl8xr)VmQ0jBShegcIvf)b z8EZ+KP{O{6(+weTkT4;wOKJPs4cSOIz0!-SmVJ*S#AN#ZJXlM5Z6)mM8r=|4s+j1u zmL!~B=~vW-eMP4m8yRa!-=&0o6Q>&@N);1a$CBjP&n~HNwA$!&i|q&N_XqEvvL3$t z-H98a5SH&3cm8zf7_rx{Y3~o;v&Vq?h%vSF#-EMQ@Ag4UzMkEQo1YMtuUvP2>G6@B zXMM|wKkV|W3EA|~Y42-C>392F3pZvVEZ?lI5S^1woiP9MTDsrgYbnRY#B&RVcmC+R zZsR|HSiOGTaii1On~sQxKlB*g`9$5ca8H$|Hc8grZG3&@k7{Y-rC}*+J@D7jozq5j zMU30xp!%LtR;!$~d6aV|xZINDkwf;cpRwoYbofo1r>ynj{#$o`ytdhhiC=#C^!mde z*3!rB9;#nnM0)WIn24Xpq-C?~9lMQA7hgX(Wl85%_+n+h=8Jo*(~0}95EGuPd?)Ll z*Vd;>&PV-yP|8}gbCcY1sn3!d5LBGjve*A z@2aKWyJY>8wT^4sMr};LXYJ0InA7*E`t+4Yr_ZjpR>~44a3dIO$d{remI>K2^@HxN zrGrlDld{&T>(|xBMf${Pdyk6=+yaKb%a@`hmOs6^T6)7j zYw5xVU#_#(e|KzC8?)YBqcbMPKiW6#(Q|bA)UbQ&EMWrol)=WS$Dn6DrcZ7Dl~YHj z7yPZ3a?9ZswZT=xI`==&TaA=aGrz~OOs6`kV z+{Vz$4r;ve*IN4IW1H)DEj?b`JBTn^a7@hm+Qp5%)*PKqdu4FS5+>^3!FM9910^#@0&j?eaY|p-B)J`6IcIs1GVw) zx6xl+koV0G-e7ck!&>+1S9?5O-2I6#dV`oa`=Ryo7n0HGT{qlTX9*MI4l^4gR)meG z-W-^pCZ#*z((Cl=Lmn^g6h&CRF5ejwm;P>0{>pu|beqS1UuOvu3-^MT*{`|IJLs>r z9yT;zRZ{g}ue0=fMIJBiWkvXcZC31ziA9Z}`AQGe(#yu2QD+Ggy?@kSZPf2xNq+Y{ zdRTter?vEr`EC0BD32Gnw<0X}$acoWaf^rL8$Vb}dw;E7X9*K0KEJ-&*y5aC@_Wd? zM&?UCsHMMK`GpBAVPe_oM*L!24>#bTktbTTbB~0u*%i71Bd9Mla)qRHMx5}t??d zwe+1oUEg2{6Tdpk+WD`pctvdN+J8`f(!;fMhdZuou$K1>as%pun3#8Y-+bTKYiY-z z`x-1^VzouqC+>Coi(+Hs$9?l{rK}D-;9mV&0cs!O^&U=cR$UMiZ@;@*zLK=4^S5}p z!4f8BuWEh!aW5_w8_zzxYW~lfj0Rs@)?h8~spUr61u-%4xL)}KQWKNA-)pdhi9haR zHul*XHjZES!$u#;`Q|6~%vsAv9J%RsK}@Xts}CBlKUqtMF6^1Jgo%&W_?q@5V|zU- zHa@OB)i_%GlH-T1m9v(QoN_nrf|%H7{*#R@p01_u9ko`@5+?rhvW?ECk9t~cOkOar z@x*^>>3Spj=d9)4fZQp(ASRaWGOsc7nOb_&f&FuqF!9>U=4D)Q{}W=P?@gC9?tHnH z?s?MSoFz=Gw(?NT`HfF35@O+yOB+ihA6p$hIA<;QmgIKh1u=2cphFsGOFl-Ow0X`F zCKh}J>t+3#%a49cY}~okL5;rRmppm?<~eJ**C+QOFNldH*H3DkB%|{_-yD{+go!t{ zF&jVG4>nfn?B96nC7Ip5G(2Z5_g>{5=>;)yz_k924WF;2&%8A}X9*K4pMllEe$C(R z_o&#o^dGl$pndH2%TYN?n7DTZtTXm&ezg2wLgd^2s^iQ9N2lvvGD^Q|i8hFoxZf`K zQ!j{#(TjHP_?C>$N1tEIS;E8_kD85FpN0+j$*z5&mX6zaw0<84HW07RKM-yq5+_)~ z#7UcBHL_pRY)I+Wy>xx9kEgcQ?N~|e*TggMRSM)UNu+e^Ub?LH;fYwMv=WVp@80mM z3A?;lOXq#Lre6l~vSNZ$m?R^zg;k|{*<)ZPlor;S;B;`WuPaLuQl&FuYQ2^K37QZ z!&+Zncbw(}Jx)yQz5b>3G0SRc?SR2KOPKIA8T2bjve(>8>R0`{mR@q^;GDJI`(SIe zfj%lGcHZlt`lZsteR|F2IZK$}8k!_;t$0X%H+kmg9K3nXTHC&2br*e9Oi1rj_uhvk zOmLl6PbPIw2Dgf%?!rSlYn|;iAMkf@6FB}3uSUMrc<8XyJsH-z;wY>-_G{L~dx?nw zV}_;&yjx5EyvAAjoj8w+3EVJ_zr#mKlJE2znr{49E&XiT*$vkE%SDsa2K=3vSo_m~ z>B6^VMt}8n`VBXaiwR%-fG?3GdoLWAZojCOF1hpi25ZecVWQfAzY`NXz0fz^Ts*P^ z+wRpb%fSZXV!~Hm;G-nTPyf_6J?HUSdc?T<8m#r+P+P@;zY`OWuC!YE2l2=Tp14fE zEC(BiiwSPcG84XIwe+zuqtiDZU)o@;S?^gphrbgOr?vM=7m6qI^LyUY?+L>OdIl!A z{maRkBl@N_nL+;UlY8sDp0wY7R#up2dRf`p($t0Pr%y=T-RFH3lB zil2xlv&S<1S~JQDaWR4K1>oKCjH%yWzW#!|C3v1~G>-3qxTIXJ7HZaeOiT}u4u$QbxUHI^T4wf+C zwlOyphhb?gy;SBO8^5t>2Wy?*ZZ~0#z;S4?lk&=7L=i0^o-wGet8H3--pECD|}Q2zy^15`?j@%Hc1||F7l) z@p{@@cVa6<7UF%i&B9knoF0a~3L*A9C6IUuod8{vf7Mi-wb|t6m0y3V4aEO@V5jE) zTOIE~&g|UzIIfwUOzif%-HTdDHk1gGnVnniGQF(rEjBVymw&O=36D-M+e!-|0Dj_B zG4Y!bdlWUMNEOsG5L`>F_tA4}+BU8IQ$CbnE$**Ek5RP|B;FHQ?}OI-*vU^w9qZbL z_fesLY#|gyK7s@`0)f{2&t8wmHiVkl8Omg4=c)g1kFpK6lx~LPfnY6!y@zWd1i(-H zJeYuO>rtt!l;E_Z=8qcnsHELm+71UGTqRKZrY(F-wNQ^=dd-LsQ?+zi3vI>X<#Maq z2yxBqj0sO+l|awn@uFYpHdUG-r3iVJ9%ajJ`KU=CsaQ(Doa_yXEZpbk`OA3PIBIv;vN3(pE*mI;Jy1K z`TiqE)nQ|D^LRPiH?xguds`UWx6{AddAKUUS~mLNTrd;;-l?VcU-U>vK6q4$lh*z= zG3=dt=q}BJCq1fX-NT#3gM}FMvqw4>2*FwidryRz0E+h#Mh;qk(T z_rWNuKx`!^sa`zw{Koh>8>@{Vi3z`pAW1eA;_lPVZ>%Q-$BXcJx9!}VKk_ksf`NEh za^CsZ>l)9lVei)lNlf_d1#)Uni19+aEd(E~ z2$GlxHzE8`h&QgD-#F$}+^ib35cUxVWfh1MWKHzn!|!h#KdG~~=U2S;0 zu;HU6@)3xC%DJeMPk67f&SkySMv%mW-ykCGLx?qxd9QK!FMFvCj~6y}|K^nDokQ(S z|3JJWPjK}!dge(vD)x>VZ*&llvN=1ly#+lY_(3_|E`DC zMv%mW-}{mz2MMvO5L@2;u-fo=VdMU@CpQn5yIx=;5KE=3Mvd*CuXy)7wGkvS5$==e zA!W5_O#l2LAvj)ye>-k+^UXKx#+yJ4mM6IS3LEAdo@Vbm2T4r$ojgf0T8OF1hWTI4 zz}-7R3t{*Ak&i&s->juWb{&!*vH0hjk06N&zqv=wmk9Bp5Klh)bG6~|!p1$zcWPdp z*xT!YxJ~v3e|-Gr`Q)65{Mh!}5banxr;7UfB5L-kq9z4yfKCb)P)*PnHeOZ*2Bg8zHWk z@cW%)k6wsJh4|_n{nduY3mXs3*{S)rZS_kM<&98D@_RXx(I$JG%|UbZ9vjch9Tk8>~D%x+|h(ZFsyO zuKMLp&D{>wuWr!oVrxome?)dg#WzV{J;c>Y*u7yyh{qJPJbdHD&uJU<+w5dzTT*qK zr0U6)rtACXL1MHM;dZgBg}7CS5w+ z=-=RY5k?)m{)NR_R)P47)b>9~`xw5_c4{L?Vgl{P-gkdQh{L3PO#Wz++VFT`!`H8n zS|X&?{y?|E1z0Wm^ zf37xyBqq?0+1u+|Nq=>(5H~&hbG6~|!Up;-tau_Hffysb=!epa_CC#S_zRMlK!0p+ zdT%Vm3?VwtxJGSwys&}3y-%MP)kYvzlJ>Elj2|!FHBa*qBr$<;#ok{3k392bGJd># z_dK=X@xlhisEg#jPvj#IhsdaQi;QY3-u1BB2$Gn<_-OCc?=8f>LQJ^(VYT7$!Uo1f ztO&zKAa;?_c|RG?ADgvUZ3Ib7V4Sy`pf3^PUsKl1zj4T7wc+u?2FCU!pO}q643x5( zEPlxg8-J)af+Qy3uh`q<&r4bTPW+PhhkmFwJYLvn+VU-rt8 z!yt(X_+9pf`~yOa65=!=I9`O|Yhf)MF$Lm5@%oMxKk=m@tE-J5i3#|R_U3(4h?RvH zvhnI_!{dbw_>8MwW^Wk>V$WrALT|+Vjf*GQ9i~AN6YzuW27zORSXKPpuL!~MBJ8W@ z$VVWKm!4sn`0WRrw!Y>gNMZv1yuE*Zx)4jnZ$Io*d}SL1m;)v zrvGRma+yc<5rX4I*jMXOR)N@BW_M#{KA4Z+Ol<^7O!%AZNwSR)8=uhLxLOE~7h%i| z*V$&7mQ^4UX&+n3y!L?ehN+Dpi3xv`U2cJrvReCzw#HpTaJ&d(?!4)FW+M>K%WV60 znV+xty%B07NMgd@eV21GGNYd>^YfQxj!+vOFKqZe1SGO;CKd<0^EJi)*3vsV2V-?e8R zBry@*yf4;d>ar%IHauR~*kaS&%6b)uVojzlYciUTAc+aT55mgo%0+GU>x#PT@xsPI zx9(Q9k3dY97Pa>A?RB&$tykrp_AwFenHVR;qbIf3cPrYU#|s<2H-(r2F!q1W8PUJ3x91@yu!G*GCq8iN^~Y$80yP?5_fGyVR>K|9)Nl4@I9CBr)MPdX#H2 zby<_q=i%|f#$WE9R`$DrI9Gb=b$>Cx{>quww+Bf~gquS46r%4n^XnHCeY?jC8@>~V zvI@k`GUEJwpr<}@O#gJdf0|DnBr)N4FD1!xA@&ktwh$aI!r#7gdg-?ZqF9qj zWlcu&5hO7Y?sQ7znV;Qb!}RTwY;NH3!iMh&qpSk)p3HGJ-EBy^gUncBNlf_dSxGWh z<~X%ohosxetcc@9cucQ7%6U{ER+3XloyTpSK7O9f)q*4@!mV7znoKHdGHS!)g$>`k zMQQ_azw`{p$!&=JJ11#Af+QyVF0>^1P>63#8kTNY%sD+?*yvf?qny_UqK}N6^Op@z z+l#q#ki>-Fg_a~=6JoXyFWzo*XO9;)MoizMoSz5cKQg2L^zu>Z$;BE#ki^8m@|L{C zLiG9NsPwR64Z!1t4d1m!Sq0)%S(%uAo~%_*v9*jKi3!|bXX_=ig;@FgTDr<-yX!iG z#|scH<9;0iZz)aX(jAlJtjC_5Bs_gVlv_!N!2ZqkKK2+wWT02 z8%`j9cJKLfLd+N9#!q+GwI!q-T3%Mz)%UV8;(00Em8EpgF4o9GTvk?2;5pe>T$dHZ z%wmm<_Wt#m`#M}8hDa^BR%*$C#adsG*fVzm_04XWUt5T)g?RgRTkFF!hZgdII_B$% zf%r^n`;O8+UR*G#jvNL_OrYJ^z4yhMOe$+Kb&eNd-%Iy4Xv96T!m?c2`Ox!i?KQ*| z6KMbTrP$Ae*!cL((@ToASC1Dq(B^%8HxQGg_jy5jpOs}r7}kR%CeXv#o%Ekc4|k{# zN6M-*$BXa@#W?`P6o}>Gi++8D4bvTpweTQ`3G~NyQ~p5dcMp?(chO0<7XJS!I}bQ3 zisX-vEO??sF`$B=fQO!_BniTMyah7?0-`4d)HNMu1qIWnC+4i@=@~#pPd#%2bl>bg z&xnek=!tkDW<)U&1i^p(s;B1tdS;);e?N!Yw^iS+?w+3R>YnKu%NI5<2HSpnnfO)i zkJDu~==-|c(=SVW0`rRd8tsol94W+|LU6uFV~(=t24y1NlgZ1TjJ8!-;uDx3UEKS4 zPbM#WGHS!}g$;XC0c}+#4wm`+Wtq?W{>43GDNB3;^Sq0ud$bU1g}5qy#$x%x2Ih8q zj#DPSmYTN_FXN~9Sy5Ty6Yy7Dbo`%$=qAKeAvj;8;rrP0s50@LT*3V(t;ntVyL;wU zmiPqxE*FhI-jm76o{WrgWi6!by$rNfnRr#azI((^d{Umo`4XRi|L7vue=EeC|9qKi z96!6Wd|?AVqdgxi6Bmp3I!XN9Mb+*ZWLe@9@Pl1!{ZT?J6MuJV{0!3ag$;YB1f?w# z!(}WPEq?npQ{1!EvcxCg&%21$@t#ah_GGlJEMM4w4{y(F%fwo_f~%f+FgGE7CR~>I z1lA=kp8o4X)I9xQZmZ_*nXu&x8(53jbLTR#k*tcg53k9+89&=DOMC+BD;M!U-jm76 zo{ZMR@`Vk1vkGlhCgMGrob1WyH7`qi0xL)t_Z-jYXP-SW_l6LhFVa{W+Is+H0wcp- z3k$i^;&%}`=U(L!dNPn6$n!4CbSvhTAnwqg0g2EX-ECLV6|KoP#x3BR2lzacbE-z3MIJa4>l znx^ON*tI?g#+AkoL1c=U2#sJZq@m>#2kfyxZOpv+_&iIHHsT-KA;QME(y-x(&J`1( z5v+wYw0xp?W|Ii@m_6~(JWG%^;=|oLtBp%LwU*y85gNf-NJGmfzTcyr+UWhpetDK4 zZN$)1w^tk2{~PhcVj?txwUCCEPmJx3U8o=!{O`VbmLP3J{_T!xW6JI=Wxug0vA6 z9zv9Yaix35HJ0Bo5gNf-NJGmfE}esKO$Nce+i#a+3DQQa9PVs9-?fq0h>6e$)j8$?;`p|OoT?T7Sho2iHSF;jqs#5POfDM(nd@h?QGm| z$QrScCRht;X!*ngleDeE{J}dGS%S0?&p+jC3|@lB6lsFBkcO5|e1EE5^YF=OR~A`< zv=OHbcl~P1TX8xwO|TZy(DI2XQ%=V92pj)WEV2Y?BbIe>_s7rUzY`m2g0+x_mQURN z{!waU@|HgoS%S0?vwFGveAt8~Vk1qk7Sho2iOZ&pQX9ARXce&pX(Ohe%S555z{A zU@fGf@%&uISL6Y@`X+;xuaRN_ESJ3uQMg2;P!gJvM^#GWw_uj8`l{+K9JLMI?-II@;-Y1wu!-v=#`Ai9R8tsM^4|%MzrG*nCr* z+aIT+wvM}M!wA+w8e_Xp$ZViCFn_QFX(JB3Y>?V;b5u-(Mz9vr(DDhH)zk*&L6#tG z#9p%z<3QF?I@iWTXas8^4K1IL*;#F1K4%HiMzmf~tu~y`5EG#htc5hRd_ueowE@3` zB}f}_{o{xxFiyQ1^-DmgCzI9!0pG_b#9LAu@UK{cv=Ms_9ildz4;K^ak)^djK+7k@ z>r)%>6Ip_^5l24kY&c&uCe*V`Yk`24Pl)%bHsJ5F1Zg8mZ#Wyyr;Z8rkkeWqpydyNY+2x$3) ztcuhIRz)m9+K7XSu3x#eT1@CVDy;KY6GiOmLP4!Uq5m8xm!EOgsyATS|Fh16SCS?8(3|#1Zg8CPIKdx+XILRU7x45 zKtRhUmTo>oZ5%H93@ky~h|;cZ+;w{yF%cTUT1Z37CpvYKd|8^sc&jX3td zZa#N=WHF)pS7|K}(DDh{Ra2h!+K64=bAE~2>x&89hf8aLfR;~u-{=9gahU8U zvIJ=(+&M_M&nW9!M}$VO7Sf1D;S)bK?j7Np%WrA38X3b8v%;M@FP}xnSc1RfWME8$ zMz9v0A0*<-yWN<0zzc^D?A3Fd0_GwyS&g(SRZ#txk?!wZ7OfL`OoT?T7Sho2iAVL@ zxUy&YV3!LxZyj? zax6jGh!eVPt2S!xT`j+3A~b@vkcO5|T%q5N#`S2KX9?0qtURrQ+Ia2hU*va8$n|gp zYatCSpIFmwu-@n0<$I-_eU{EAA8`D=Wm)|iF8o^pfL(3m7{QZLVZS6msW>mBjhJwFPqpFNO6s9I zEJm;v($Mk=Y26}hpmkY-v=LWa-&<|CYaSDNJ2}_VRV#$+z)Q0Q3 zF`@k`tpx&FJ|VqbZJ@Wa1Zg97E%s9z?jDQ@y+6`gAfV+Fa@Xp7j=Pp6NE^}nUwfzx zH+J}g1nh+bz8R2yzgjET?))XrWtGwE9V|z@5Mz9vr(DDhH4b%o^1C}6dMAPdAsSP(r#e~iuX)O@Y@(G#M)COiX zmLP4!t@8?M!_BoZq4Qu`3k0-$LS|>Rf!UcQNE`9bMb&D<`3y0k^Lbhe1hjlYybQGg zFM}mW8`0|7!D_?#J~5$wNm>g8w0uImCA9%>i6uxIF?_@jwc&iYm{9*Jtpx&FJ|SLT zd_B74Sc0?>e|yB)aK30vsGpeD0s$?b5bsrOzj}u z3D`is=FdUPC&;smSi*$)sDrx0Mw);PzP7WGCSU^+^TI8S z(Z><7rw^Yx;=Gu!HG^xbn84aV3A16V326C*w63jjB9<^=Yae&bV*)<464q9>DuR|z zNH0+v=vORZ!q#xE@5V%E1Z*H*Tirp+C#1Kl4e_ZXmM~#!QFjl<1bk{GU<3KuDivBj zA$M(Fu3{iQb;J@TY)$RPl9+%`tpsc!Ut4WM%O_|LAYuswIM!rktMdD2*UOr8v3YuI&|_(R)Gej>)amhUJwe%I8%VPwpNqXf-Ey$5U;!9+oc%I%`{B zCKmSU9sRc5l3bT}nkr#hAm~&p5nt}yE4t>`4fD5bv|RR0Y7$ABD4z^Ha?_sCf9E#N zzkAmjoH$JKMVd~vQoP@;r}mE8ztS>4>DC2Gm=*{+KT5==(|bm@w%R7&`VVzlDzly@ zaNfv8)1Er8S9H=#+vFFW`Mlb&d_mCpQH_A=h-3HZ9nERhIX~s~Ta_>^5Oh+Lh!aof z8GW$NPWdyxzeG!A*3(4!MCIs4y`!tH`BVPrKF6yK%NGQlqNKRIo1fDwS~aOh{@gR0 zs13_2O_Wa}Zgzf;=)rzH^D|y=uQn`S5Oj)?BKb}~rgt>=Z$0zR?tcg3EhSn=(|JWA zE}7Re8rOSw-0SjFjtblm;UWYx!CIDfCmCH%j-XN@VwRQ!T1eB$KrNLcW}V$LYI$bQ z{PW#5%vq`8bBP{NK3!KE^@?_A-aY@~B8@JVXu$@Z45XO3*T2;(>btIMey{H@RvVUA znkb)qyJmj(=!A=R$xrGzKF3-})2Th}S1uMW^+Y913k02yBjUfC$QSZvZJqCTN2H}P z>uI8V#_W+XJ);LMZAD&`C5R=H1vc`fdJ(`CI2N z*HW4FG*Ldy)$-(C(f%J)<&S+$qah~wf}oRVIx4vc!9U9g_saH5a!<@_nrAJf>C6`q zeUFu~r2WF&+}hUCf87X&ywXJZY}Tdidq?lKoSl0*(z9eqz98s4mX7T%Vsn!NdqpdL zo0i++;qBFi<&`GvtW*%(^jeSThx;GSb^2sGt%v0cg3e><92LirF3)O8m=*{+`9#EV zvwB9g*Y2IW`Czb1wkjDC?58@gL_9a{`2*~ zb4T=0!n8op86zSlogyB<%Toqk{I+` z_j&c{dy4-Y*GWrd*3*Q=ew7uy_&&$2ez17WFpXQA(g zhUJwe%JEZYiSM&v>p8_2o@%T0uzW#K>{tE1g&P^Zmyuyq=OxAFHZn+rFPtU?K~YwT z*r;2tsNJy}Mt^GR#w)X)Cd!dY&llh4#JP>5v+iD_W1{5?f}*TaWd1?o`|SI2%V^YX z3zRS|5ERdph}idu#P`ur)U2n8a!k+_;`_Yw(l*h6)1OxxmM;j3XG-5UxLtgojhc0q zFYDf_glU1G$e~2+FTT%i`|cEtTy}|;%B-h}a)i$|;`_WRUqpPm&+%%*@&!RrJ?Tp; zbI+D9<4x`n{p-voYQyqM6XnR5bI$7#E$P=Y`tkMlYQypcK~X*H%f$8K`?R{XXLND? z4)roj3k1c_BqH`?BJ+JLuZ9TsO%o5#zXp^hEC!)GF>$_6QGB0>wUDOxfI7CjuLQ-uPf>gywPAUs35(qq1n-FN zbK>-ei+g^uo!YQ`K~Q`^ouk~hglJY%!n8n8Bt9Zy-=`?PkCw`;rwNOaC-3_>-=`?P zkJ_+&K~N+zkQc94FHPEL}^vx!DRyf4HF5)qNL?2BxT z*QJR+OB{fABqqR?$F?eRUQF1x-R}7WHl_&ig%CZ*wJNgK$hNLj-FDod+9w{8=mc-e ziTG6u3PqMMVc!hwG#t-s9}@y+=&OWatO; zKHJZI(R0UhVdD`Ywh-dH$rFpL)w8p+anw&6RQtpw5-nm2i8Zmi#Q$Uo6ZU<@88cvG zun@n>SNOh>_@AtG+3k1>zHRBOA&shiVv)qjcuHzM?V0iyJ&_j^_65m~?eR>wwZzFl zY|gO~$CR};yBKf5w=F&WYNKkOm>}OyI7Z&x-Qb=Vk}rB9FD9=2VW9S_tGdF*y+Rx- z#E}xgm9^{}m$mOVs`iO75{2X#iAZw2#Fb?U6ZQ?z?&rcr4l{HYuPtI54^Xk+9&$l z-8jEmV#+)>enpWbOxSlu-<}H_mq{F%SB2O^;{3AK*^lP+dOY%MRkcsFkuM=gY|(sY ziC4@LChR-ID?8xX_5z8AgE(75g^1R#ti3Z)NPn(D(OxU-Ki#4!;_(Xpe zVywiEX06U0253EcoFmWfdbv_LqS5SGxlg6$BPFUeOPH{48IQReHVP7F2{C0J5`wkb zZQEaM9Pv`)YM;1z;`H2?67^)?#@i*|MMhps95cL+w$=3y!^S7)PtR>F*SuS!?IPBy zpNbO$ZA+8pH?H=Ht0an5JBficM52tdgbDjza*H{z@q|Rm`cPtE?RsFBh_#+uhZ6&B zOMMzQsrHF=^6i9MBr?}M5-pu2O#F0SFRjPrKfuORiTL%25dS&*PZ4X`mz1Z;GiaY! zy8V#ck1{fxE^*>n!i0U3`H-Jr<2(6U^DROQl5fSc)~Hw9_noVHHL3QAP~xfGBUf;z zZ+44V!i0UB{N+vXKEv!+x5*(I^yDSGMXYt}1vtIhw$y!elWL!sDskanlZbB}ByK-T zm^kW^?%GxzHiwNy9cBzH3DHdm*0OJtUo@^swNL!K;pl;N61i@kd`*BQOxRb>TegLb zFV>D4_^c3L3c*@O{HeRvzFBG#(g!r8z* z=o1^tw-bJlyLOD+=PY5O>bo7a9&Rl8`I-}pcMGwF5UjQD1~}30#uA@+MWTexlhNmL z8LwEv#GQ|KRU2+hyhI|AwviETjS#GrI}(uz+?ePSkDooghzNYes_i0{Fmam12EezM z-Pqpfg6YK-GEyHP<1TA`_DL7DfwA2uiW0RIzUX-}8?c0lC9`)>8*av#D1^k=&R-z} zYkjn+liI)>gX<759c#HBLqBvwnDJh?v0%d_zXU=oqP%56{-2o;$^Uei8Whx zRvXUunIrLo=L)fx5Uh2@fgQCT@O^yZsJk0S^>WRdoV23I5+?rqNhh`8e7GTZG>*O& zV(=L&imY{WQ)dG{oKMV`Zzp^qUf&_&C$fZz$9~veZ8%@_Mv3QKD?}^#-WF>ueG`#X z+Lqvp`o!%LWqO2oug%5ZWeF2!ws1C_Pd!Wsc*qY5!CLp-(_U@Br}l|6j2cnsdAm9fVkO-ozqneR!F(fitBlJW~kVA1#Dntw+DyTI+$evri0>sQ6#WYI{RjpRk{RcS{C*K43Scq}5 z&%j!9r#Ktfned6NKJ67@EP4FPcC{>F;`ZB|4e?0ChQ!8=5Z&)@U$?7et>ce#HlXDb zn+oxk5F@^5H;^SvnC+(Re=EqhAcXk05O;muZXj!!y*BgL6?{T=SfY2O=Iw5JXCOv=?$LVZz4zC6B_!UuA4RPKYM*9R=31asJW~s|!AHh0Hj| z%8c`~M2Tey6E=6<)f6_y3NccM7rMWhV=bGn=1%;j;1jYZ6UBQnIhHVCbMUK^a87s+ zd~HGI+Q%h6E^FDmd&JCD1)nI$mk{QN2k^Xn6@ev8m@jekVX!e&e1`uDG4hNRIo2}& z;r<&}6?~#re4k}<1^;zPRh}hGm`}86-7jK8_GF6jo=l##%ny3y-zy6~arF4b#UbL6 z9WPOoS;B<*c5j{q8*PNh3306utY!Y(^rKc5d_wkQit(OIo+V6}kNVf$VdF9JMMsD) zdfV=8@~mZk=|5VoEcnDNugxlA2Xd}NwPpzu=4&5RkMqvA3xPXoju5P6{`HUvD+)fb zw|qNcwRrlwNjz_sFkx$kkN*i9ZDnonkr2m7Ja5*rbwZ1SRup`q=& z5+-cz^Vw0b(Op(WKMFBL2-dRo&h;yo7kpy0tks^DmDlwWEuAGy*c$HpcCgX+iW7@( z2{BX%*0Ob5@s8yMpIEtSaB-fD3{&K*2rOa3)}jY)1sjdt9$b7%l4VjRzC5%?2+Niq5DYdvV;lSJ9F2(gY1#v+m;|$%l6A~&3$68 zm$u2lr+!6x2}_u;JwVrYAAfn9+-xCU5`wjC-w%D)C;I<5IQOXZ_Rd3g$+LtB+skzK z;Ma2o=Ux-ybh$rR%l0pE5BkJR*?XNWckQWi*Rq5O+mm%;$w60~n42NQQ9`hm?ZaX$ z@rjj-Zpz`iFR#kz!xAQJZ`qBBgJsuwp%AwV!CJPTj4{zCnq4$Kw?IbG`()f@2@|$^ z?#A|}7fjE6C&XwOcUjB!#WA+~#LCxZ=@pD*Hed-8w%70Gs1ZWo`QS%Fu$JxbV~+BP zq4I3vaGBMb_Gy!62^03r!p*gZ%Cm_&e_Phl?gHQY-&x-I(ui4_4u!ISFhU9#oN90)%R`k8(I|{62&yC>w z_(X4cg0)DzrRm~dv4jbG7Uq1obA(tZLQ%btJ1hx3UK<=Ndw;`JT#!IwFfFk#R1 zoG&^-p5475#Gl1aWG#D+2Vc}DY6fqfhxdA~c&{vB!k$e!pZbqN93@0Y`SJp5+4D&F z)IK45GWujx_GEG_VZxriy0yXd`049WLa>%SSH;@EC$^R+yomJQPgWBwVZxpwyERS= zA>J3_5FuF0o)2S<;}hG+lj<#HRn%VAQ7mD?o_V{qnykF?ZG>1L-=kwKd(MrunooQz zPuw?`)!p&39%Km<_UzuRIhzgGDc?+pk0(ydv6elr$C}e8@a6z!gSGNi1eP#i?;yCf z^K>EbZRCAqUCUbb-T~ImK5<_B2E-S#K4%FN_AZ6n1GroWtmu0S!CLnI1oi-YV!3mM~%Ogt)zo=JM_bc5%*?eFoOD_du|h;S<}+vk#0VV`aaDB}^Q5oO{0n zyDdSmPP8%Z4*IU$K-RkbHuvraw0vR%v4Jm||3mhDSi*$ac6&>$guwIK5#P0|Wi7LZ zy(OQ3wyZT@|D+pHfxG5TOy+VKFW28X^7dOB-YfF=X{9TW5kx>z4|vZRGpn(x$q% zfb>OYZIrc>n;uH3J%nlLS_kdWST*H-IUDkiu^{8GWQM80Src*{tm-71zU zWG(pc|G!c(!7Y+1)t&t{--Xhb;LpJZ`dH;su@<~c5bJ*hncx=jONH4P`QpBV$AL8N z$I7K*E%;g>*1xwi0dEjATEs6E<|s%oMqy4w8e>=GQn41i0nK;4M+PRiMYL4%A06RB zjN!-^V=ZO|q(%5UrD83k*MAgcf?LEdm6d(({yz?~R`_hn@=U8LO1)ka=Es$>pbQ3uLoI=nY6>wtc}oGm$mE~{I^8MX7Am{%ii@L7bCQw^Ff|je`vmf@Rx^32qU! zkq|a7;R<3m68GTm);we_n==}0-NXjhkO^)Pzf?Adqqmzk@VmVvWG$Ot8|;YF(jl0P>j_AKj<#!FG@%O3m-=XUMfnY79O)EtJkazUu8NzK%>wEP4M)C@(`@iGT z&o#wc*SjTuhaP?$vD+P+sN{zK{!%#Q!i_b3)W>VYR*DFXU@fz8<5E1SOc99ogxw&EMbCM1SP_c zE7hu5v+As)wW6;-gH@(NNjManBW#ciSXk})qArZns3#+OAE8V z>6WB9C#P-%Yu)wQj!In9{M+>`6%*VdY9lGtKX%V)zG$DX_w27}v|FZBMue<&^zOU= zKTE{~w}@Y=)(iL1e9@PlxOz`bqmNZC6>Hu1Ye9+izk*C~i}*fH)JiN*MAgcf?LEdm6iRQKQykkR(P%z-ZxK;iD7&XhOA}z z4sV4M94SH!x%-?YOjrrs?BmL%L}=G}%iA}swjQ(h97K^z*+@#oT6PWo@DttzPZ154 ziV1EJS0<~u-4i3v-Kg5`uI+yOL)J!8D%P_8y5)fl5Dk`!32qU;R5pI}pVYM4MxrKz zn*L6ySj+D1=HG0TB^oRh6Wk(Ns?e^vjkPG1joQE6@_VIXEgJ_LBpNIg6Wk(xscc@t z6|@}wi9+jQqn!-G_RoOreFh`Jv?#rwp51mcw~N!GgtvTsV&^!{UA;uDHR!RgPPruE2IKkA(_wWnEl zTiGY3O1!=UMx5R6>5FQWKzWfb6K;p4N6EcUeZ}g3lW*l>F8&IeY`_}iE>ypycb}oSbG-_xpCvkdM*{Mh!%yG5pM5Q#$7#^8WW)rtc5hRe4^9c ztx&39>r>CybE&XlT1K>5jnkk#No1kbbpOEWO z{9<9ZnYbP-VZ!Zf+clT#A#r_z5Z5EEW!J_hq?dpQ&`Vg7zJpN@cP@3eQ|~Sw-}jqY zlhE7gKIi+JwZ1>b_lbm5l8yLD~qnzi;zLOoT?T7Sfm* zd_v}^2-gF16ibjcqUmFJ&%x$FnWLf>e_Z_><|x*(H}EkJ`h?82QM2nu_rqMv5+>YJ z4x7&>x9W&eh40E-o7O_9FjM=4cmP@tcmOOx+6ecg!Tge#P@f^K1p*$2Pl)fMHsJfP z1ZgAAnu(LMJxb2M65l6kI;c}$_&%&Z!6Y~?rhtpEQ zhf8arRPYmh;^0*ywRJ~|X9edWVHbg zIjseI@O^#afObb}J-W7Bqo*^`il${mI2mv9n5Tc7d}CzT^z&zaEq*)lGA%s2K^m(G zpODpr+Q4dpB}}*{Kel2S|J-TOch4Q)e-Bv|*%>UXgz&5gHn85gtxZQQm0PR*W!R)> z+Rj5}-65+xmN4O-Q`rjg#OdRsJ=-+dM3nfI_ zdg!#KIL|428Bx5K;hoO3o`|<&eS&&ohJXzaOzU2T_7d64@J?si9gMepePWc{A8SrH zyI&vqo)=4)u#v&NhrIH>@oMAWZC>?GXWHEbEuXmSW;gomdD&t;Yl*fpEz|>P8SpmIg$>&U&HGli%%}X9*KF&%5&m=U=vmUh|%Ex+bj!d(iTU>zg~jWSo4rk0s^} zfG~f>otub>&JlJL)JLnSxZ~*;B1#q$gV+g-_HhM*C5XlCTtz$PM&wPYjN6glC!VNpVYmKkx~!N%k~_QukDJsw!-em zZ7sU=lid&RtflRjpsjpDu1BuTm~Jz1Jy^no?fbZEj$ItoTy_n-vzE}ZYvU8rOO!w_ zVF?qqf93jaOaw+)zP5h_EuZ+{8r>rcW=U^nE!!`{9pw|)hkI^|fqF-=gbCZ9b9ZeZ zdwnBX=w6?9a?*AOai9Byj0~u`jtnee!gf+Y%h%Y~=x%NJu8dbKVZ!!z-6$&KRqp$} zPV4uMj908>d#@N1ed5wJRr!(o-8}P?L&xf=M)Wx3#f0rsyV;;)vn9D%lke>RUm15< z3;E*Ofc6R4KrdMw+h8rEk*`n49Hj*2D3%~?gzdw-c@X>UW7@95UWRva(st-E5Bdb2 z8w`6}_cHP+IQBkcK(^RLAB$$xNJ=f3cLSj(R2z`ybdJdcX^GV&~8!k$w(KM~KPaLr{e zBcIknsi5T(c&-LQo~z|qV$Zxl*poPSMiI}|F5C3`zVcj63Cq`>vBBT<33}R>Piuij zzCKYa&md8%+ncY^Q;oPkOv?y+2I)NgcDo#%pI&?Z%su8U^Uguq^GIDA#Lqb$A*%`4 z2(X&qyqG{5wm}DhtZ|g+D{CCqvZuOONBP9^`^M)l8ZfWli?YUH!k+MA9mTZw6xllm z2`$tdHhcolw$WA}cf3wdHKM$xWrRK3b}L9c+wRk~_e|ND@XkTnb8f6TeS)62=UBpo zJ);LL(H0i!Q~dxdVb-$u0I=Hj3A{OAt8JDrVQ&<;JpftJYt6BuPivv((DDh{HBbWk z3@ky~2zQdi_A=50YauNyucsQ(>PYMEhxQWeesEq$8)3HHsYcl^(NY<~T1Z37CuA2# zZD1FNB~}j*)*>$Qmy|93!P{r$2;+FSx*VUprts#_bEO`=q}P{!K-5UtO;S@|CWcTt zO}O{8k(a%*jr0)d9eDdYwh;;?|I~&^@MgCWrj;UMGpNMeo{Bt2^z4pnUnN@pXa$$zOYly9QgxwcSiw zphjx$&cXT-=3_#xcM!-|heflkeNR}^_~xj^R6zVpr|;?z1oF*V)r!ABo1`sD38KX+ z5gH+1pjEAl?_6Fm8flI>;uG?HUnTGrSBr#!sHgI6Vg0-N?yE3nBV;X$Yk_#k6^Vc) zOvpEq)rR}-E8Ac#i)(?n&KUxbwk}JU$h5B2Bj8rF_CbqUySZ=ua;aF$BCH^qZbibo zf=tL4kG1CR>%eS-wJgF4A}CiR0+ukrS5_?06=W@ou!3m183K^DE=!o;zMwV|!mjxf zV^$PQ3z1tar|GMg7d#^ASI{yWh$mB-U-ry`N`66D2%^*2Ny@@xd$t1Vs%V&n1{qeK{A!H~5q`UxW4RwTUfiV68byY^l8 zm1D7z*kCP-eu7B16^Vc)Ovsn!)rR}ZvDPH+iSpfOm0(=5=vf%A|M)R77Gh^ZH7o}u!IR4!Ly?eYgsHTj6NA6vB451c;pjh z{c$yyFJ@~EF(O-xHH^F5>Q3GI6>C|Acf*pE0 zm)0*8Ygs%x%&!?DDHThY;QINc;=9vY6nCn%dNMMERu9&)>w%FWLnNhQ2@}?Pk`c~q zgsf#f1|wXCNNli#3A+n2BXwYNZS9;D1sgvOt65pFF$NK#BzygF*SyuUgY^o2_Q!zUx56!oB%)2r7s&PR$;c2|Ph>5N35gik8A1%X zQn7>y`QEzP$c}KVWicT!!exlW21}UWKCLxR>cLtT6A~kJhDdC%gbBV2)JBHDRkNOt zt7$zSal?F~>2-s&OFoiJoaPO{d(DiZNzGZy=05no86qha zOPH`36~4A8OVe6!XDyp+5!*0B08-zFB~0)r0TX2#tYxz$VjE@%vk|g{3GO{=!w88r zIzW3L`jYv+7^8S5@`#YNY(0olRV2JwjR{`K_%>L}R;*~<$^=W8;1!Z@SYMQ+?dD`CTx^IL_x1q0nghU$BpNCj|f?dqsvt$Si;0V z7gcMEYp|^)DVS-y-^vDKlakRe-0Z8vCmN3EBNo^zqk2vOW zAaZ18^a;%dYndN}tCk@W8!TbM{H~;5CBzBcw$*kY_GqJOi%*X4W*CvoA6BZ6wJh2= zzUGl35*sXG!b*r4PL%b>weAy-ZKI`vH+fC`)dfWO`^!l8_b!XDdoDkrwH0geHv%#Q zAiaVtVWL-0XG0WI`*5tq-#W+;i4B%8VYa`$E@dNREwhJseHkLL!4f8{4KcTia=EQS z*0OfPH!U(mVuK}2RQ4In%fQ-zze8X-;mUfY3RuhX#R?)rWJ-m+n6MI}$Eb~juvH4$ z#3DSR=6`#{tzNR%gS9w*Vnrfg2@`L;;cQ4@QrCmEI9_9Af+b9t?aciVvXh zuOzctX!T$%%NLP{Gely8B}`ap;i+k<5*w^#^+4p|43XGi2@}@pS>K1XtfdioI71{h zSi*$$9*oCcJwn#99)rlk86vU45+YTSj)U+j3pT& zX)Bg6!DD3*{3P-EVM%_==zyo8cv@V3GV0Pp;=kj0bWDV0!aYm%pPx2B#5T&3G*Nz* zu61|MYeUrI_~X{qV|+z48l(}?0d0u1cRf%Sjag$_i0^_l;#y<~C@aAdCeRl^d;LlY z)5 z5+<fcBozYpGZZv3H<_m^B$9vB451@Qf9- z_gqbFu-4am_E#-LxycZT4VEy0XRNT{Jy%m3tc3_Y&_Z;e43XGi2@`nw3fgHV|&0_h_R$L zWNfcYu!ISC&Y&}MRKQw@=>#pzQI!dnFaggQbY`v%SPN07poO`%GQkoi;5mcN_zVGS zA@UQn;4@SvSi%H6XVBhyQ2Q=xA<`7I;K@`bSi%JMOF(-oVYR_ph=m0$M9iv8u!ITh zIf3@}OVkEyAwrkdqv-~^f7LL-c`<>#GZ1oP`*Ur;T8Ij3T9pZwFk!a6{Swqe_Djn7 znmt5Z%n(r4nzMunYeR3pL~XE^wHqQ;W{AWFOPH{}kgV>E2wBUnKO$gOBm$N&VSPGT zr6x95%la{5W>zEumM~%WX0qB&Y_OKykBBu|kqB7AgpD!Du0djhwQO8L4AY84z!D~G ztn{8J>NRIA8y^w%v?39(gb5q-lP9A|Jy^@ec|?n?NCYfl!sf2z32$P9wQRmZ6xxbJ zz!D~G4#t}blD+;oFPyb(-bLKnibTK?Cd`*eo~4QvXM?rOe?Yw7ibTK?Cd?=D-mKAU z&RXUNA&ziGB47y<=G!H22qpDkE%WCP54j?d_=(7i3G-2tH=Pn2tYv;FA~shf5`PzY zF=4)TVk5Nc!CK~D!$yX%Qt5k^m;sO%6SihZ+A6WZTDDF=TV;sE21}T*wNG-*6C12$ z>m6M443XGi2@|%4OZskNgSBkkhQ6C25*sXG!q%e6J($>FEn9!$9?TGl4VExrYwBby zNo=r|t)nrPWQfEDOPH{|fn-cfY_OK?2VhLh5QzLT0TNvX)(ctkp6^VxvdjmeS_ti}0OBm$N&(e2dHYOhQ2%Q|Pk>cLtcUVe~jUHaOxx(p$htH<7v`;OlI zkIkrCw*t>LCzfvM@j~6&{_ZPk&-HoQ*+_^kE%8iZVyX9mv+LR&(OT0#JvR4u2-do~ zsk=58-~ON9Azpr~spk8iCadb6mbD@5b?6rSPV2JPb^me1>@kfi6JF~w!BVe z&)9pUy9T)C-0Dh%c0E|@_zCWMpvPngvk|a_iPQf$Mq2@O@@=qWsEj4nR-BVhuvY)> zjzCLihz3i=1ottukq~FCaQ7Xq^E=nO`vd)z`?OE6R+qEg-G#nenP3SM+IUG%C^;p{3lx~Cc?IEOiaeB#0G2eU0aa|xP}XE#}`&*l(@L=ck01f zRZa0uq8okIuO3Wr8~Qcpdc3%K54F*2#8Y(xm$>gwBJC3_Y5dZTN?;u1oP2_{{{E!< z9w@G0Wr8J4@ObRo;L&-9W4BIPmviz7*1Bx7EtNp;t4y$j2_Ex(8(R<9ynr*#TXp)O zu-W--H9ddsvcmooaL-SzzvLr2$D%W*Jlj?Y&Q$;U=pQxx>8bYJt*P?=@oZauLhJ3U z_3*uj#W}V9*3}IVfXqh0PF^$7(r7~z3O`vw>LF5Bb7>JxyLN**5t^6#lG$#QF1I@JXQHe% zZSJ3xXx2_!H>Tt30m$l+NSNU3q&5++#7oCwP`)ts`*_%idL%TMue>O z@V)S_rq*AX`#DR%j9o#Ix?atMf1RjQA!~g(7IOyZ72Z8B`ugK+a9&JQ9vMWUrNWr8 z%X0W|Q|sTG`wP*Ez4S4@9X+-F{P)*n3Bg>c*oKr!3DkpgafKetYShxrk&asdY4_vWit8b*t2A1d zTUzx)yB@5yRbNNU{c3HN5X{-&yqJ)*mI}0QgSE=_2&ywgV#AkM&9xQcny1(2oUh+L z)>bhQJURU2{F068Bzhgj*Cc*LlE&Y?x2ZxS{4cz79F7!$dPLd=al{k%wHuDAF(i)3 z5nsVDCgeA3aZHXBK{i<85pf)rlno(S%cbM*@%ThyL!M1YySpeJ@(e-Kc)vi-E@?U& z#Y6VIRf0E9WN$i2%lTnVXQOzS7WS!)Mq17fE0M$lkT!CyD^CGbf;Tqg%$KHdDqfyI zXxe+r0@ovCEjhuZTJgEfD@e|gw_QQjl5<{4<1~4O0HnQyB}`;m*K7pb ziq<}8QENBveIKnkYsqP8EmeGuUeg%@&n6NH6Y|7F|L(nuqc&JeP6n!091oyj0?$|y z2@`x}rJ1O8Sxe3mDjml)&~)jkQWoyhRJiB#0G0wyTzyC)kc=!yqK`Q zkUWn{Y_OJH|M+yj+Q<+|zk-Ab>!HbgZZ`1LQuD&o7aNK2ltfO^>ff^`@8uc2O0efG z?*wQ%J_V!c?8$q13ZN3ar6X@+YdStztZDBJA?@v~CFhb=D?YvK6G=VfX+|PpLY}1P z-@P}T)COzGX=&ApV~hAiVuSNyg8Q*3(;dZHa&BAmjbo%}I(f${vB7yUQThJhYinZ{ zt}~DMUa6814q6f&MoSXMlhJgxUvXYc$g@c$lHMN1BSXkq62C^Z;)ih>jYpZ0WPHN6t60=B&I6{-A6C$y}c`;$v zDLY58mR-R((v;drh{Oix#f0^@%*+|Gmi4hXVwT!Sh{Oix#f04}iMNyxe6Pu=Z_O88 z7|&gzO!ql!$r)}X;^<1dEs$H z2Q6*#?uRhG-wp{A>;yRhs(Hcll5=pH#yL3l)cks|mYkwf zB0hJg>EwNeq*R<26Y@5t65d-DT65NtbAC?Cogegx#0KZZ1h=}@BeeU2wdA~_<{O`b z)O7NGiP^xLnTdo6zD{Z*A>_%r5_lFb&+9cEpU%~EHfD|aySRepmtwRtuQq!H`ChZx z2jd6dn_8;S?hn>7KQWFWq@~J^SDY6U=G7*nsM!cv%RJdQ7LwXXh@`DJFDAGZyn2MJ zWga=^j|`F6U!oc7L#z`44f-8nuxbcSFvL3GSh417>iaBXW!8 zh5JCxaB3Ro&-uPlBD7MmmYe`pB0e*!>Ex{!!JG}wiwSuzO9}7XgpNL}C1+GsD?a<` z6NwGZiwSwZOo`@WL&pjNcxxmX#2meul`D3VAVMEt2@6 zR`Y;{O8M5~WYuRcdjzFU} zGDNcefP@KK`(!;C*0S|Z97#uQWC)yaOC(J2sN|Z~+KRPo-3E^=LnLj*5+-;YQyU=i zNbT|-;{T5;jUUpc{)T6JYZ~t)U)Qy7lCDg!*0KNXr^K+~cpt?5I2)`b-m7YTeo&kG z3=zhq3R!FPkp4PQ8_h0g=4>RyKepRJCAhY-qoZkD!FfA&)iipKDC>{b1N%OS*6Hv6QPb#G z6^Vc)Oq{mwP6_SV2+rGfTb1lNb>n)u=QX|jh)wFRJfef9lX`@9Jy>hg8IC@Ad9y4b zhTQewyqL(p-BE31uLo;Q>f^L!o-KW zcUC%kf3Vh-MZD2Cu7vv|LnJm>!UXpoQI@8)>%m(8dLHkjjw|6l&k%t0{$L3c+}~gV zKaP0*sckgh!An-ui5I5nMJ-p=O&jh0e#0Sa>YTa}taZi}j+ond&F>JL7ZW*+967F3 zdiS@K4c3})oU`%d8^6_6CRoD6%Hgga&v$Jkb(5bku1CmPOD=IX{?@-yeTD#}t;-T7 zCOqV9+&ivu$_8tl`-!u$^W}}}Gely8B}_bAz`LsBO6Od)Vaf(;wYb2QB=_ou^%)|u z!4f9kZR19X7w_39WrMZm9_&ie=I4#-Gely8B~0+BB+7I>SgZd@t|Z-?Y*L>g5*sXG zg2yq>M)1wIZp3LZ`qMh`&a_3({(N!W^mE*pxVrd7ok!qVQKGf+`)*7;_{GmF69MPN z#H?P99yVb~$_8uQF~!yVjyu1ut4IVaVPaVqNB=zjyOa&qI^bk?1s|UEZCynoU$8~4N(Kh{+w0+ukrqmp+$0@j+m)ZI}hUA3aFGQkoicpOt3@*j7f zpR;Hez2@Ifc(3k?4&9UVc{{oE(uME(H4j>x>1N@-ef55wJc&_a%^C02Ej!S8087Sv z;1hV?C(+vY4^Hc`58kg#1e_NWb$dHs;*q`|rfjg*w_myWEMel7L)|<$ z=yLh)8-BvLtwPrN<|{W3Uea@6T}2{b2@|_r@80x4~NfI@I0shxS=qmmyFvoefyR1dkYMBOwmGY>-}aj54yi*EB|-%{O&vj7my` z*1D`Udb-m>sWODw2wB3!+oujxd)d}yt;Rn&EwpZiNNli#iSg6-P#f7^!djbeIY710 zOEN@agC$H1n&DPR+1}1tn{C-&ZJ@Vjh{Og3VHdw+$>jl+nBRjUU)>AjSl3;Al5QzMxT%+Obi;^Q%kt& zhxh74k8H5kFXuX~dk&vpmmvVD4VEy$bE0p9wdB1eEy?ON?|QSS5@sXdyqL($wF$v% z^6&O`y#%Y&%=$dEda#!4Uuvna63!5b4VEzR*H4^1tf@trT9>t6y2;sq_nILR8!TaB z=cSI$df}|K>p$In4lg`IBsN&W#6d+zXIB%f_2WhHcqOZe43XGi2@_Wx4C}|w}23DydYh8Jpvw>A=hA4;JHhbsqeCuJmJ&(1A71&!fXUAVS;Bn&ql!a z^IzvX{~CTY``SLiTJm0aGL}>(Si(fzeLE|i9TQpW4}W!97!xZJ0ZW+R6@XWdkhQ)a z;Jki}?HM9zU6wGxYlb9tbR0u^NWn#$#@U`Bg#*qXSPBB9YvRA-8@Uv1TE=%@%Ms9G z;&b;6>EN;A;R7YYU*U~6PLq6tYNT<}8)=Ekr+-I;l^}?T&LZb4i4Me;Nf|v-6U@fGf z#?{+&{>v6z4%W^D1+6altr#29Q zCd0OoT?T7Sho2iKfkWQyYsG^vkmZX(J>mpR*y6t%4vXLL*p9)1u`Q z&n?|rZ4}?wKhF}RjgY8(Y6B6vf*>YBBUlS*X!*n+*Ho#EZN`qxvjk})Br2cUK%}uC zh>6e$)7$KCPE`v3u$QiMBk2wsf~qIlk+S=+6altr#2A5E(l^GG=jB|hL%q} zf6_+-(N^zWctM^eNE;zh`P2p?@dZIlghsFy($Mk=$v%V)c(5!%+6altM>e49{yBoR zIE@iJDOIfyS`W?(X(J>mAGMX#1BBKitpx&YdAq5UeY1p-<=A-!E~ptrLGX(J>mpW1NuU`*)! zk=6nMEuWCPHbPtBu4M_*Mo3gXwc*B+n9%z?tpx&FJ|Uxz+Q8_;5~PigsC;Sz(f)M2 z0-@toS_=fUd_qQ1wSiHTB}f|~QTdz=9ou6<$KAA+5~Af3G8?E3%myq$+6altr#9Ri z6%#stq_sdm%O_-3QyZAoSc0?>5|vMFxVbhabRJA=fq<4z$n2~(FgvpZX(J>mpW1Lf zLrmy=p4I{ZEuRoCLv6szUNYOfVadFq>Yd$aB9Q( za516&Ray%Kw0uImKD7a_k0nSOAu;dNhVw;ZLjAa~Hl!69j2OZAiX0UqQb1=S$5d>)hHjuB)yU_B9Ue0F-*NizKVhI!GOW?~M600ahzy|U){{dP) zL7q&+5+=+i!WT-^r-mm3-$x19K)&V&LCYtqwpSl6e6ebB#1baVx5JlKf*?)62J$t3 z4q84zo@K-mCd^00S83FzHeXZ;*g(GKmqN=YP;2)uj99{i`P$A#nt%;R%nP?Pd}>F~ z+92Y*n6Nd2Yb#nCC}B2iH32Q3kk-vf$pTytmM~#!A9u}TLa$(2%dU-2#4E1=y@VyU zjzYe+hI4&4CbVCrwO|8UJ|Vqb>w)`&CAJ;}VQW!$55|PvQE4sMfR<0lU7JT+;XY@H zt!qKpn%a#eF`*+vS_?Ly1sl-v37MVM2Ig~?*!~p=+dFeULrka#kk*0?X!%6Ea~HtNV2SO+ zfv`P5=ljG2d}<{uU)vpomQTbxkl3>3_p?+doOYMm->J#F<>h*xX%M#n)MZUbZtp2Xn!wAclcb$C#_qh8B^IuI~ z6ZPo%P5m2hw5`AWwm;Wd8h;OhRr+_;4duV`FU+$R(zoBXK?!t&1R%9xd8G&`c_kZ` zuMmSQEjF@**+?X5!s;Z7T8E?_NxmRV3pOmR1R%*9KQ;QSAdW{CKR1B?$xjE$+8}z# zDrH3J%{Shc-@OW#UOT+B%?B=xzX!o0`Fl)+NI|R9@B>Sa_y3O)_k1#tqo>-vi%B5nhIdeyr)@@ROXs}dFlv^ZSszZJn zRXX9VkF+E$=8P(BuyW!5C>3jsxqDP;M%M~NgQa4k+#+$Q5~5Yx(WU!OM~~}OK>E;Y zKmH%3Vy%}q8C^Q&U;k~GNNkkby;p&Wa*L!(g_{4i(Wja(wEo_Gal{3 zj(TpxFSR7-eLG(E#s4T3YoYH}{p{*gww<<7mh>txQErhmfzhX9qp!3ixKl^Gfv!ile?(;A44nOgGs4atLdVl$sN3YrDCGoBI#1~|9Ssp#QA5l zp_Rw0q*Sc+<8DKf(Qf@p#YDM9s8k_(2})(7^ABI{zaC>dYawkj!}^tqiE@jiONH{< zOon>=?|G25&~7%1u3xE`D7Q$uRJgV_>*JdLZ=Zp+(8tWPAuj2k=xkS9*Dq#?`G>ch~e!t+AWU#8>a#oa=w; z*l56`9~76&-a)mBHC5G3A9Lxqj!fIw@0?O@-;>8iTXcG|xMs`FO1wR{QT16fJ88OV z2A(dkjbU$8<<&-nntZi~5=S4lx^VSg12p~1 zb*odvM~AE}o_zG!=%XoZa#OE#*LmhqYYTSGv-NmlP^0KPX{+bvKP2xQl&xEjK`Xp| zfa^U?+y5r^q;(^-?#B0hHsHA9oFv_%J|V3eS?h91*aq7Uf<{*^Ek5^`vC-<96J-al zT&nO=JPps4ChYcW;qb*SedD#i%HQ7j?DOKrCytG-IQXL6Db1av?xi&aCOD@c_-K#i zMbu-vlLqHFFK#!V`0|1U#S4V^r1QO;9lty(%XyH+gV~G1A4oS7`O|9p!3+y#pjM38?AU{b*}p{ zSkdQ72X3{hzy#+M1kD~lym*n6>V{jEE_x$%IGuo>PE8HQ0^r*`fRgs zWx+W%%vHzO`E)KQ`LrxT^fnPw}ONTtlCEqvMMeU(fPq*7Ou^j|bkAYT?mk+`FH@OnZ%LL~ncboj6C(d76 zmFK*;PCg;^7-;pFbH|_5hF%Y{=M!=Tb9M!JRAbsFWQ5Dv2*(l|+cCn~n2(V!2&C`k ztnXGGjW?)VZ)bvY3WD<=KRk!NJ4t%UX$N*y0)3aqV4qmr_VOHR-nqx(99w5PMVwX2 z*|^IRn{hDi+DwG;I0)oE&)I$c*bm!lsW37y!8yqYchxgFyU#f;?$Hd8ayUmS>H~r|+82NM*&Dkh=Z+j=fXc-dW+t}s81zM_5MmWxk>*NzMisoz- z9XG+vmY8waUWzzmZO-N>o?n^v37MnvHb>cfj=6|uYM(F<0CN=2`b_(TcuRTnmRMpX zgtuh&;6(+&5~;_DGS_}SrA^Vw-f8#-)l6_sLGZNHW3u$b7v?`y%XwL;;G_7&=2tAu zJuUO#S0|iUWQkqDm%rSg+FE+X47|y4y7ZC_q?gnja#7K)V8`~2s+r)NY zRDb`=;3DV6b@GWJ(n}@?ab4$|i`Fx`pWCRK?fJxxa({dz^Wd73A1K;A0l$rDpE&i( zXS82UzdtJ4J+ZPwRkhtK@Bn>cnT#bfWggt|thq%SKj2L=?GvwTb$M>0)T6fh;-ZZP zWA3i1wow9psZWT9oHGxZB{n|7L$;9!o^ufNkWusrnFlv{b#>83&O?5xs%C<73WAxB z9-cc}O4ayp%Zi*Am(VA~3m<4+I7@8)fERA_3cT|mn15*Bfh}c@>Tz6EWb;SMwvDTq z;GBZs{?&^I;+j`|6-1mD*U2YjiV9B?V(Hqd$mS*3V0%6>X>8wGlxjCAmCeW~JJUWP zrK+`3*^I2MOKTCIkn2&j>tVhKt{T%mAuE=mtyoxMz83nJc}}j!NKY(UPc$zKeU}N& zDG2s`^zh<2(pK%{nw!6ao@joM>q})1CkUEseR&bRJJKE}d8Lv1mu9Hv5m{_zi(bfYP+u5E^$ZSxw*}&E~ zm@}C637HKdn+#QLoOGDmt>2gkKg0+tN>A=#~n>rgAB7`y2gSC#Id0?r{ zfC_{F_;Jr`WrUHtsHVWg&O07h+VSd!>QR=|6dJD^QJQsU1skClkbl0`*|&^{+lOtZ zYhJElO@RsQk-3&m6Ru~t>-_e^`<0#<^j~f1wX^ptjcJD`jcMY}_M=Omt;O>p{zg?z zq1*7$rJXLu-(w;~BCXZqoYAElTRJ*J2;hjTpFg01>s(W~tJ$#lYNl;)&2QUlSaJm$ zB*aWjfo%-!b3oF^$~mQNu-4$N2b3P&<$v|Wa$YqBCQf*MXsPLKcy2^Cv{ZaYA${Oq zKGv2lU#Er$(+cn2uk=Mwfq*gjvHRRzb0*5yKepijA@1qFf9Z$s@jP3u`DOc;PWk?W zxKx!!2DH_R;(MxvmVTj!OP9yWv<=)7dmTML&NmrRwI81_7=NdlhprxFE%fl7Jspjn z?-5Bo9(i_D>7}lCTcDyPFVdyz+xVc;VZY&ttKZwFbw8*y^=CX$ zPM4~jS52X+*FmL+K803IAwwt%3EP-7e{|`nw?9kSOWRyPCqU8w_8M*?_g)d2d)e z%dGEgz(jd`OqZ%WF4YtsJ8Woj1NUib1<*g|&am?cV|Btda zf!Axg{{N2*h#?7zASx+QQ$s3|n{b~;gr+S@32lX%qG-%R3^hE4im`?mT2oM|npKf| zL!Rg6qD71`6GaTESq(A7@L!*`&u8DyKIcB)pMPGzZ|&b*>%G@L`|Pv#+H0TZtbAP& z8%~_I-q!J%cyQ6rM7@_m|+}pP9 zeCGHjb9*fy6UV3)?iy#&=6+(AWa1blaR0-6Tx=D_>$8W4`5Ys^yfVl=_bXQ|JY8W0 zK#y>hDB9?fOdOLETuX`s{KPlk3$22e`Tt(kbj8s*81vXey|5A$ZD2LHHk;>TNeRsP z(>BB#X>#Q?+;i2!+|HG{L;$saVa1^htV??=4Np&ND%wyjtZKnG?1l&?lDfh=NMhLd zopLX;c;#J^iDORMW2flJbX{MuRjRf7#GRtIR3f@%f{pEvzM<{SI%Kc0&|eg}Nq=Nr}Q!TMJ041)q9{C;nfar9S6Mbor>o zD_4&dI|x4WT{RW2T(z*izJG6+B}#-`Ij?Zy7$v%9zGAC%#(`%+|GK#i)q-F0|F5r< zfKL?eX0cUWlHkMf{x@$`*W9P`$9J8nyPoYpy!!CKbR^nHN$UmC75xagrEwdI@e)b` zO`>^(MRyR`%W~!HYN3o0Zvc=<-+cUoU6&YYu!v zX!gFp`vy;q{Whb|Z2T$#W&16N|NVuvd)fbAe(&srYM~6Rl(-@PZTN=a8$z)zXpyjA ztN7omW0o`Mb;{Z=&_`VEMp76@pi zME-kc{O%6l5Q=r7Ou~MH=YOBDzUr>){(hu`Xzl zuwQNZ-#58wO|y{`H4>_YGPF|S{QO%ldxvib#k!zH!hU_~e_t^hVm5N3MnbhvhE_^^ ze{lF_&12ykLa{Drk+5IU`rqGt9X4{JMnbhvhE_^ka*%&XCwnY>Lnzh-EfV&dVgLJA zzgio86^I%M)j}CsDG^p0th4?_pja2QNZ4^JppBaGc5p;{5n)j}CsDPdlQ+kls$5|l~UH(A_9@O_GeYM~6R zlrV3}ZNOVn3Cbkw+cIt=_;5u+wNQpuN|@K@HsJNC1Z5KTjU2ZTe9t2WCV1$_dp%8Cofk-@(l8Ilc(wHmF5{V`1E}3(SVxb0<^_ zWoV_uKKXY!Uk+acavRhl!7)9(<7Kmv6Eza5g)+2KB7dSXdj|0hA-6#-5`1bO+4niK zAo*-`hKjhv{FP%V_9l@izH-{g2W zd_%}>P>TfDt}hPiFdI2hBcWO-Ln|fnl`}IB8Q&0c8`L7fHTbY0_m~Y?Yn@Onl%bUp z59HrSc`kfI$Zb%I1bvCA*R`3AoT!mdEtH{^61gXnneT%y0=W%pk)TiX%(r)%4e@=P zP%V_9l@hr}mYENSZwR>!YLTGVxA7)-m<{paoKP*4p_LNX9~i#N`Q(Ww;EO#%|Qp%U&tP%n2y;hyINyl^L2FLy=IN(mb!ZUf^= zB}#C27shT*V6Wx`Y@lB5?x2+tHrm|=M!QOs;4U>hgE`^PM^Ov*pp_E#)Hcvpc+OSA zeJu#?w!>VK6Y#>FV7=UJLn|e0_Hi4SeN>_ZV*_DM%n6^bidwJ-t(34?)NNoERSDw^ zAQxW7hF9E?=O<1FH0^j9yg7q>!1Fe*>Rn2W+9aM=D zj5&q1HYf01PA6ak^)k)`t(36U*==AwSBVmgodusEC-7ZPCtw5hGF}F)lrS&DZNSS= zi4u$s2Hz(q+%G9=dB2(OlM?1FxefSNDq%bk^)kvCe7KxI%+d+g%P1$bQo_7Gw*fy< zB}y<_8+_56KK7(<5lsuCqMw%ip%CIK6$SEJ4;u^|8U=eGCk)@oV6 z&FdauXE)gvDCaS2WHa}_hb-VZQ6r&RC_^hHZpr&<&h^D$YL6Eza5g)+2KLgx0WN>C>8 z$mu_D8*`V34Vl}WP%V_9l@e>d7uKi)?Ay^QL7BuuJFew6cAbddbLK>igleG-t(5re z%dpnASe#lVD3f?%4C0{!I%hq$&}`&Hjf85U46T&t{3`eiS6UoSB`A|vY1?(&#y@t3 zjhv{FP%V_9l@co+8+@O?T0BuDD3dtq;Pu?b^e;a(8#z%Up;{NZZk0yc7@MnbhvhE__r zZ-tnp#i><-GKrrK8YVW1glcISqa?POBX`@_ZG^tE>qrRILK#{qVZB?i zmU-{01Z5IuoHxR4gnM2jR10NjrG$+Vw}DZj5|l~2eLJ$64d@JGw@9cK%Fs#)8|`ib zqg^E^li096%58*aut=yD%Fs#)duprbD?GI-L7Bven>KYDVJ;~Ws)aJNQsTW31E^hO z^OZ_aCNcb|R<{x6#3G?uC_^hHei>pJwae{%P$ejncwm*P+X!=ekx(s^p_LN08hBq} zHBbr4B>J4Yx!VY9RFP0El%bUpwyL=etZFJjnZzY?$GDBK))ooXLK#{qVXL#-!0N0L zlu7*kl`Y&x@EMAPYM~6RlrS&DZNSS=3Cbk;-!;~41mCAfs20l5N(u9p+y=ZQm7q*w zyY07h8^MPw5~_tVv{J&nzWja+si*{H5*ObbYy@AlNT?Rd&`JsOUfl+~SCybl;$hEy z7X08WD-x=OvOUpTqy+OvJu+*zSt>!91Z}@}E`DFl{81!S3uS1fM1JL&dB}B@@H#-S z57+-betjr?J+G@o366!n*TaU~b0=T}^>UOzD|e>xPuwakec3D`ir&_WqnDe=er+qjdT`k<~7CHQRq;uhG*i5dyk%cmAvDe+kT zjo*p3+g6DZoMSE@f?r|hM2!S&pkB^C&`OCb!)Ku8Wi5dymK)swr zp_LN(Or6=>-cX4WobxYP2pclDI{_Q0m#YD^QX*e*GFzh>Dp7)K*W7LK+i+QON>`vfBQ^!+-tE+x2U2z`|k*c&v8TD+!|u->hsWw;+I;r;{la_^Yra zz1*cjD<$lyZJ@94oU23$?y19Ek`wU4oq!G0%iT7#QbJ+?4V5Ut*g%*QB?jOGY@l97 z4WN}063b|)LY;gn1cm z173zo7>5JF7+~;ya>D(Rq84mGD<#ZZavSikRKj>72*xsl50?{&SvtXb8RdjlN|@K@ zHsB|!LpAjT4<#N^GCsZRf!TBTkgsqMFKWZuST7_C76>n`q2{E z#uxbg?=dGUuADhp`G$yVAugWhWECRMABDUC$jKTdjJJbetUu)Ql8jSMuwI@5Kr1CQ zXP8Qq;F(3t$?DgEEMrA2*voUWdTWzRX2{b}(Ykp40WF@r#GI^dg9Pj4c}t#?)ommh z+3@ZMa{{V7f;FZ zoUC3`k|7W8ejq1nlqkV7y_l2LZIFNs)Wy@jJSVHy6o~HLr=}KYlzBE8@(*_RJ~g$R z2sv4U4b51X)0BM^E1DCT61 z5+(R9MUwrLIa%EX>f-woc}`ZZDG>0j>|f3PUsdZ;f^Um-PoS?L;d>w~W6lqRWP_?| zT}trXmU!hJr$%nsq898yDK zHHstAZIDncl%bUpnt@m)D3jpZRPl*&8+jJjL<a;0KeS$Gfq+&@$QzBw$?7(!MS|}}#+B1;U>($Y zp#=h3DIsrWA}6ccpcVxC8wXr+W?gNpA{Q;P)OVvU}R+kju9_44g# zXr)A+KZ+Sv{W_=x8z}Rg+u*|$3DrUwS}7sfpyK<~)FQ!mhl4L#BvcD!Xr%-r!g)?s zzYc1Vkhh~PUQ#4f3uSvgk(1SHLOH%2jo&CDCu@|*`_#y=>VFS;u;iU>Cs;4EP=;1Y z-XQ=?vJ!3MNaLUV?xLK5^>SQ6D3Tp9BAX->VO7HmK(CGuascy2(Ia7G2eIX~ndl;6rAC#&BB zYPpS&lhsFilCdz$bFxMW*AHlMRSS6(HOFC53pSvY5}K<~B`A~N8XWRLYOcnj76@pi zgyx)73DB2J-aQEJCZLrP*1OoTFej@&AJk$maPJfD zc}_4VtB-IH+!aA9C2W+q4fBxW`_$Be4V1Zu3u8AYn3L6QPzwaKQo=^N+sJdW`gKr? z1oyS!8O#ahWOW-LxJ!jrO4w7|KwrHPaNcoFf-$GC*5(9rvbqfrj7C8# zC2Vze8+lGvw?Qq~K$)?#;4|a|bF#V(YJq@ON|=}7Hu4OsZi8AR7y}HxPfjo=tJ?s< z=peLG!n`H7k!M(S8`Odglo`tmK3q;PC#&0_76@pign4~#BhSg|HmF5{G1=gY<^*%H zx(yJF)

+Hgr>qo{?*1^}%qZ;2qh z#dx}!fEFaKKhO>K*tts9<@;i|BCQ|@AtZwCr@cRo|7WBtk*7X}(OvXWIv^W9PMZ$=(uc*(rN#inH`eA{2zZwZ%ky zMt^R0UVFIgEwPr}3204mzo3!`1<|ISFu^wpLO5)Wo!4%(--tSE+1&f@o7unBW^XA?&o8o!551zgL~L z?Cwu%ihD$rL@0@CDExW_in&LiGB@qguO+8_PZ$QZni0t%r zt7kv$U@g0g)tcg7S0xb&qD?(vf^Tw#uu^vV`uo;d9js+{%4Ek=-X*FeLP4~tCrt2- zv=G+KPU5=FIlF_k>~36ZihFjIL@0^3nJPyUnabE?G|hH%<%-9S<9b4?$a)aXiJe76W&Vf zHf}F#yWy9uvzl4U+cEA1FNkPMkrxx*+U-{HUDgM~bI*R-%v#Mpnn=goHOOY27K8o6H^+RlI z4;Q@LRAVh4=W%a-K}1`MyqNIWz;4}Va};Kr8f*D{h5P>tBHB{q#e~ml_Vz$F*J9?Z zv6j!fc#mO0L|clynDE)z-h#+t21Ecg*7EoP?_(^8XiJe76CP#Q+ZvS{?`C zy^;kHZ7K3%!lNa7E9JxNy!Ij4TVgGb=kR{af{3;hc`@NppS@kP~?u2{?7U2D}A zQ%Pt`WC;`e=35AN{HW)($qjM`lC|s|yjE@bl|(2AYr9PF8+;m5>kbS0IcwRwgss|x zD+wJLSi%IqiKsEP+Q4|lTJ}z4tBypKgpP15VS?YN3_-?SwSjS$wd~!_Rvl3*2_30f z!UVq=sxh_N!2H2l_Ks?+&J2}=&NwV#g5Pk}m|AUM9%LxJp82PL?piZ|cf7 z`!})Q;K6*(TK3LxtIpJwghl`?VS?W{mRm1&UYj5;VJ&<2xm6>MNQ66UYneAd#`5JvUkxfvaBRRL0AmO1i#6y@vfEw z@h)rGJM|V*R}vZ_vxEtLBR{e+i?!Sy)&`Y?#_cR&!fo4|NK{uHtmXBFHBKdgy2=P! z8!+LuYi}Y+OzqFSgSGtmW35(6=sJoeOn6(cH<4sLs5Y>=V=ZsTSaVhqx*lW+6W*rn zO(a>@stv4CSufwSm<(Yx%f>J%CC=*XJx@!p9hU6G>uf zwSip&*7ETYdl{94?lZ822_Gx%O(fYbQ5)F(U@af#u_seW=za-HnD8;*-b9j^T5Vt# zhqZjZ!roFPq5D28VZ!GwdlN}wYPEsgDAw|M7kgxtgzjIlgbAO6?M)<#8S1R%@dNhy zDhb348DV>5On8)GZz56bQ)exYgRo~=N$7qeOPKJO$lgSHZcV*&mpm)`qO9fd9Ns0W zBtk*ho+T3=_1T+9Pmi=%^i|p4Wi5|Ou?t*Dgo3cWS0+4qwLRn|pRqTQzL#BR*5V!L zm_Yn}ak}n|x$~T`J!B?43O5@d>>s|^Tbu&m?3Pai`~*UE6Zv2HZz6l)g_fn`a|%0~ zz?mI5Im3$xK5t>#&IO%dEu_tpi#lTh+VBYfYbQ$B$H#e{pIVpC?yL+(u$HIoES=8> z?OT2LmR@XwPq3V}Q<<)WpNv6^)8?)Gj{DRP??L`%qsYt3VZ@|xiJT~Q$Qj$MdMaaQ zoG87W*V##opR4)#ho7XNyfG1|H}y0LRv1EWlyj;!BWR+@NQB#ZxEjI5yMXJ{Pcx3O~3v7(>wj|k3Dgel^n_- zf=fVn8d@uEXgPs~9{w5e=xXO{zJ1O+t#i(krghXer$w~sXGFqU4~{e9qXDtKh{(Ny zCrmWprI*|%nU7bjwe1Ka+QNLw5qZS~*GOEBKTf_t>*|1wPwssG zFiU^G*QxXIinW&8)QIl8k8eywUNONn5_@&wCzkKUUmx4K$4i#}pJKD!pQYYb5sSk;~83eupPmd&kn1CC|NLttGa!GF8@YrB_UFjl^DkIr|*-3ax#w zp5|3$E6KfLtuuaWL}g2?^oj|tk=UyVJDOJ*8IEhQbY+jqy<)A^uQsBx*H(JP1lLIH z)j~hBd@*L6w2Y-IN1xm)*1B&4BPvI@O0SsU8d0yDNZbZy9QUwtq|UvHwalye&IZxg zJ}DH0=TsU+6Mr6ojD_~nBW?T>&n|MA{l?ql@UPh6>E7bnQxR4 zdBp_RNbHsOb3}dKCn}?*+$+}d9yQ;HEb@v8u94U)9}^L!`uI^9S>|4`mXAL3jb0$%@-ecAKgp5m@5q zUs%OA_R3F2iyJ$3Lx^*Vz4CL;;vSLRTWUx|Uik@Tans3eRz)_l@B8rG7`uzp&_?1X z)y0h+yCKw&h`jPM=;BV1-C>Ho^4kf;Efl-;($GfYcMXcWF?JWHArX1yw+M>cJ60pH zS9~vn?}0S5A-4_BPO;|XyC2w_Zb(F4xxM0^jNQwLy>j1+n-zANQL>SEkMg=I?t9qT zM2QeXaUVoE{Mi&YQtZ?r_R61gafiXqqe?b%ue@CqH!JLPsYFzI#dkEgMq;nLPZYNW z>_o9-BlpVtU~z}R&Ie0GrB~kPi(3|U@)vvM<418WZ)d_K8@X3LUKO_m?1Z;ORC?v( zV{xCs&TiGK+=lN!!Yd!CapGRG;Y7k(J`PqAm0mHyH4=N}d&qc#zSoO#H1>+MeCHPR zRw63BVuEWV_R3EI(As^s9?!gFBguMH!dkxfjy6^zD!pQYYb5r{&v7s^_<0L%B$aIB zUa^*+o}fRLh)S=R;2Md&@{>5+d-D@Bj3p%-xmT>^=U*7RN<^hsOmK~;S572uqc{_^ z`-vqRxmR9};-u2X`4ZtaqOrZ|%+qcHnqNBZCjLB%)3*k*TJDvXus9Q3z|0wW<#kn@ zT2{ZA+-mZ%C!HU^A@qS*MC^d+da<9DS6z5zEh{z(Z zybl&9aTSqFJ|-4tHw_}o+$$etiW8*;M6Z!oK3)}PUKLTFzPsdm_qN|&?C<*<7yeEH5T3-Ijuu-*(ErD7Ts1nP4s7buLa%x)7ltOu~ev+fvX{ zj_jR=R=NAZTE3f&eVwLDxvL~XL0Dc)Sh_6*oxg98;0*=V zDxc|eA@pt(=f#Aj+fwWl>K8WjjB~^3LkF;y+bd3*8WSdA!fo4rVpdluc{6Kyy0y+OxX{``xRCh2Jn2$L}3Z2@%NcF_`>SiBV3KO ze0(fU<=WClgh`n2Q519tw=bsetA!2!-Be>OALnuYu^^%?MP5w!YyeuG`N#U5)Nq2_ z5oRr)uZmN-wzLsp5+;1^0xfq}=IHlc!05emX2Ojz2=7lOPetnVAhTfa5d@>2jjgGvJL31@`m z#e}8(3DUj38f*FK0@_j~p)HXmOjz1mqP+8?<-j{XtmP*(#c6LBLf;bNyqK`G^&9Qy zxI>nppR<;q5;`)lgb7Rg$ROjD+Q4|lT7Eu+F{+Z#5soEHSlUN8y7yXREk9!` z&Y-&xxc8b7CSk(THXiHzq2<8*!CHP2hdHB?&>4p%Ojz1yoLlW1bjh-}_N=j%pYUPs zt0Y1}SYAw6+GkFQskI!K&sob)NsE*5E`&w^oEH<8ws~H@4?o-Pg>N=-Rx@k)St??Y zN+J}5<;8@hJ(8jO?ai#^XS9f^DhZ9qSi*#*JtC7hQR@nEB5U#KbS0sYB}gU3IXQ*IRLm zz}E&Dq3aLMiwUnoTjS6(?_e!|{#b2Q61t9J2@~EHY^_FZw}Z939b?s4Nucd!gstM3 z@YZf?PU?dltmXZ&xZB{X)Qr$|E$79A_f1b_csaf#XQsZ(?0*|CB8Q6-?PNmZLyP%`vE0F zzHC%{MUKDa&EF1>Yy|d%zbel;MMT0{{H^RZif$WBxl0 z_@=D?o`QW-mdn8se+Ky8t9#{NFw4(z0&Dpb#MfI(1l4Zwehd@ddhF}1Q1|D-->~Fw zX8LpTuZtCgw-V&#eZ{=8{!tQX5Vrti|8cZ%E{=1fDR#qeNsQf9CiOwYM02FS?)%|qy6Q4gCs}LH zejCuff$c33HO(!?eDeOm5+=M&+c(Ln90`wP{xw~Et)6pI{oE_o@^9nf8~P(AfmTWkYKXYh#i$Uuv5$=_YCB;ZqMdT4cZiBV>E9Rb4PPh%}A4MBw z!gI3kw~M+H2|rsOwegD)oRg_%PcUIE|Bf(zZ=yuFjmRq|O5<)$@XX>-jXm>XCW^eu zV;^Yw{A%s05y7Jj6Woqt8?5E?IoffF$jiYJCK~q-e;yd4ybq#xdF___2WxqsN53l( zxmPSyJd9 z2X+p!x0mfL+=B2g#M)P(&4NEcKh?tyMCWY0aoCoAvC!%Hxti@77^BZ4VXbS%8S#fJ zCp9L5pV1*NCj2DMb`97DYyE6BvoU#*GrHX5AR=&HO!!Hh?HUk~u-3|VBIr zm-as=KZz?5f%9Ub;fg+CtzT?sHV&P9PJR+s-UHygnBd(6Ek)upCu_}}g|`9QQrIXF zg4qbi5+>pu7O1-o*1E6BY@n`6L~es6OvL*>Y=gBPxexCTwxxLHB_g-M5+-<;N|dP_ ztaWFT*+APZ5xEVPFu}WRu?^Px*}~&lP){-a4Rc$;JldN(*aZWx?-)JZZaDuel#vWiL3ESYjnBdbI)z59P*0Fn>4^SUJSTD+ zoEH;5#@Jmow!vCHuH13qKKXc+6S)n}iwPeq?RFd6U@adXk3M{_eB8~6+y>{xgpc`l zSB-73mXGr-XYY~EA32fR;Jlddxy$aVu?^Pp`D*Y&yXEs>PUJQ?FD85rw%cuNgSC9# zJ^l5a^Z7g{avPi%6CO+0T{X7BS{^_AVa=WLxFjcX8=Myt9uwJJHMYT89tTZ(V#hqb z%8A?t=f#A_c6PgsZLpTdb8o*lIFA!^BDcYLG2t<4e!DI4@rt!PE`4&$;5^>ViQER~ z#e~P&Ig#67Esw8ZqahJk!i29G@{;E^Sj*Q5sH=uVzWzX7O!(R-e}cIU*7EfZo_Rwe zUq>M?CVUN-w?wxQSj*RKXuFLGmN4OK(Y#0HHdxEopXh^)36?P7YwCPt$ZfEeucI-R zG$vTWgzpXHBV2BSwR}GSV`5{1B~17pM?O;LHdxE|H88d}CRoCR@73fpPHuy>e18UW zRAYiAOvE?ILlKt*)*7|?MH*9Mu5C=Pgo*fmJKJEbjv;0PF+*d5B}~LO``HF-%{|X- zAogiYu!M>D4F$HrT9@8sHW0%#CRoBm{N4oHV6D|2F&l_Q8xt&HB7S3pZLrp!Pnr#j zsclcr-Z5c<^J2nn+wC?Y64r8iSR0fGKsdV#EMday&~CS}4c78{!y2bV#6!>wSCsUZd-^P~v=P2q!Y-ctdPIp}t=1G;G4W*f?(530 zG}TyQX(N2qhh4l7$^>h*rqGHB`ITe6FCoADUSo-+jquegda(4t62V%ndb$)7@@vg% zLw@|J5=$H5y9pRkLnsrh)tW*pCXUR0 z&+GBiXEn3L(nk2M2xf*5$^>h*rqGHB`4w<22Yv;dC6+eAcXu$ug-|9~OWLt$#f1Et zx!RCles5-prH$}iD$LX&lnK_7-(eQ5nDF28!mqNk#L`CaZd>uTY(cPAtG@3V6Y}fw zD2M(YJxeTYgxf~sl)o*jzeiuzYE7XP6IscF)Kv#dyc{6B4iN!Hb(Im8uh%ZLVj_Ej zL7sUBOPKJsfJioa<{8nNB42MM(29wyB?f7`9V}tO+cYBbsO@G%Yl?imwL>c=vK|$r z4|cGG3GbU&Swwv>BU)4B>%A6QF_Dc7LB^6gOPKI61}mg!EXfG@R+QxHqYt!VA{*g? zjEQxYFyUh*R(jExm=Uch^7T;^S}~E0)IrAfI!l=FF&`_|Xl&1j))e{rYyhp8$Yz`% zb5xxrO!(Y|m3K5pWkhR=e0^4fR!p?mTpK<+a7djcO!yp(9fS}vA}NU06#4q>46T^B z%wmS{j(jhJB}{lMft`>LG9oF6))e`AlmV@nxY#0@aF~4egC$IOOoSaBxp$t$J_RB3 zpycb(60~CC(>3hf*Ei&zK1-PJ*bX~WA!I~S5b}jD$=9PkXvM@&E%tj}Psu%fmN4Nl zDt6f9twtk~f@n>VuSc)YiU}Qa^?#B&OPKH|+-zh-B0+LSw5E`F3}2+%^zZ5w-hB=5 zit}QEciVD8%Td;{GR4HCwe;QB@VGn=miSr(wZOY=@+EmAl7g^&P0OB5Oh_xid#T#4 zSi%JFwrRW4H(AlH%39XOVnSNG+CXb(iKUI;-8StXYNH@n%lb-8NUyD<9O$(yv9uAq z+ot^-g!c2Ymi76V7;nFo5<42^KSCL{bo}WpjH>NL-?Ig}8(zmNvq7 zMJ)Eoh@>D`%VLz6koZb%AiiRWrH%029gE>IA}I*gvREr7Bu-Qth!a_2X(N1>%3{%s zND6|rEM|-eiFegTm_BqCODt^!@3s|h%N7J{S%e%D*}Ja+Q8-I1ZG_vlwSm7aTM(>e zF?>u=T{W}B%Yl5o4sDG?b(Im8uh%ZLVuGG|GfSB8wqR>Ddgd8nYaDMS(25CayUi?N z!rQd1IjQYtgss)QwL>c=s1G)?gbDAPwsxjIm=U(-^j-_Cn4qzw#u6rcjIlic8cQ<5 z*3LfqKr1F_Osuhl2_GwMFN4O!jIccbA4Q=R6EwEhSi*#l`L-vM5ear!%38K}5)(8> z)mY;52lDl~%l4LNj>-tj*Jm|o#f0oeX+OvQ6-$`#IoS5dG9oDm+gtK^7g{kPyJ{LU zU>}YpOn59|dwm&^6om8-$=9O{XvKu=4yq08C$fYIkBMy0G9!|Lu)RKygP;`?Bi6PU zZkFtevV;kb?QHKgBa(u!Jxh=JpcND4eQLY8%7n+Lwuk)fLcMFxTs^6Ilj}E--(8w4 zzsNL2ev#=u{lzJ~v3l6BYvfljE*-Si`q!r+{6(Vh9zW|{dsc`wGJ>^cJ^`05;;wrok+2r8Ya0^5cM32L`idUol@oY2sMGw$R$wh(pJVhX z5r9x~y{8XwkjU!_c`@P7$)Z>G%1ekofwt>4?d8a8H}P_?#7mAR z$T_J6Clc23nno)r5rSD=v4jbKvNk)bm8^ewEP|NRV+O>yo>N}CxmT>^5db3Z5>fIB zc`@N$m1~!G3V3It;S)?)i}yXd_`Q-sNmaL|!q$`>?TB zEThV)5f?d2)Yxi;_tmXM)Pp(AdAu6Fh=PHiD02=m$Q+p+|Y`VwQ-AgtdG`MXxOpxmPS< z!bfmhr9$0%6iawr%Q?kfu@ahJ8+9`+gBMo#1xPvUQYN$({#~$Uaj;i^~^z6CcO+39z$=?(k4@~`V+QOL)uSF)f z*0WM%Z!epcKLfn`Y-#^{URPRbu=a*@Oh79pmI$Xz6J@19ptRl8cYD7YX%JouA#^2B zi>`&V{{nGAd@rkQc=U zdrr^~Vf(dbG{1IgOZKbNzWagk!*)No4i!k4U@cC2t%vZ2cyr^UCpCXG?^jW4M++Z) z4}Le=eg(XEf*-?wA>Pag)`AUulOb*;*(j~<;TiBgjIB9sPsZ2OP_(rm5>*7+F8=N< zrc8KV#WSA|p|i8+)s^rz1{+0Og8F%K*Hd+8!qf2T;cvI=#NRjHa&c+>M93FUjDVJ; z2SKZn0Hl3{^Xftjth6EhyvWznm}MlV5~0r3L`%z&c_kj%*VmlJ-=T?;Tj@ct7N@;T zAq;!wSlzQ6nVwWfIiQ6)aXRyg>WT^0LK^v&iOeINaYSnndBPrQI+|;p$R%Ao%ySY8 zR%#FlYoSJ|OfdmvCD^Mf!oAYxod~8+kjkO?LJK(+wJZ78U=oPda#+dbnS)5OR>EH8`NjnNhYkD_N&W;q z?d=#|nGGUPS4yxJr=39VJ_2p8p2%D$&*E@R@37=zd75jtF=1NUcfu>n2?jD7^8c2v zX&s(vq2F~O*elA52}_%YqMrRjogJ>V>o&ZN#kNH~vHvO|9{9HyBO6)^Uh=#p3TQUG zeu!4Wy~+u(6?sMZa!pId^3R|9;p$apgFM_mOXE2~JtO4*jmSzqu<)u20Vr!*EMX6~ z&*&~Tgk&vCn}=Np(ZoO00NY@#F3&uHdPb0SZj~|NWe_edxz>-@o3%t6+jBzfSY5Fe zr>zc0J!@n8F6l{94koxv+!8Y)F@m+&_b!Bv3>iTsk88K+QDTD#*5bCr^@bYCo_W%C zW=-m6bo#4RzUds$bD*X#nEOrVeQU0*=_7ahTK?buS$*wv@eV4v?(Dgp^NzRlv}r%h zhv@$|v+>Nt?&&Xo8ld#Z_qwMIZ6vI9?V8&tamAn>jfpUL)at4={Gx8@8^2r4Y%JET zOe9&!18co>;;KqqKQWXEDZG`O{beE?xu(*nL#-*dk+9YtZ~RiVP`f3~ ziTA(ywsGwytabTmh&V>4yUqQsF%h^NOuT*=TIlHX+GXd+|Jy$+IX{Dgf7xHPcD`sn z&m&>2qv!Tj;t&6v4-t9A#DIr-D?M#q51B*QE0&yj@|LQ#+L{YBeu4>W^j%e9TP0s^qF3& zRrv&SuUPBjr`AzozI`xZub4Rc^R<*lpO3v_$#0vj7h#m(oRn~{0&8{5upYI@5*37C zr;P&0uRbu89Uv=*I@z!~| zVL1}kdhRJ36EW5{B%+qc1drhA6&%BSo^XF2xrmL_@V#UsCs^w@M;n2XHzXpjnBZ9= z@+$D0@zC;SW7Mnj8rw)%YwC$MM*Z(2H50d9W#i*WnHgfQ{{Am}w(#VW1wqVXddOvbf}b@8?W#^pQE0J&y~&JWQVHh!nkc ziD3)p>4(4lwWe3O{6AfuIcs@(<;y>peTFWCm%hd$gAt=k_S`Frcguu%$RkEfxF;wF zm#LBvG^w$K2}_%YA#@Kx;@l8km^!r1T3p&nA_2i#mL8p=Mq|QCf^|WN#zfY-@be~l z4x`g&dwf+q4<%SDveA`5_F5L!y7ZS80c`WnFFIcxVe!LtpL}69a$?pB8>sas&-t?R zsKGWnFR{s&*)z8?St&9b3Fo!OUoCo>Kfzl6e959E*kf-a8-XQEG_ETy2h%^!?G@K< zUUDw&oo8AkgZg1wy-K{~to8NN79pW_OGIviB~0+MQ5!koy$Joz(`fmelM;!0#ai6E zNXgez4@Xop+yr*Z*IHgd06 zt9dA9%dzR^J-;s#m0mHyH4=Mu{Y!{H$ENrE`K!(!e?T-jHhtruugf-auUKpMoe)`$ zO&{3sn=(=96%$+|u~)}!hS6Yby4+@8cJ}-pk@wj2mJ7Yt(lgJ!Vy(aa3;k|vdhin7 zc8RF;iV3a}^(wcq&N>*?#->B0-;H6I>&)R~LSX$lKb|2PSZF?8v6lDk?VqkFN2OOxaE(M>1s`kSm5Nm$F{w#pH%(kmvoMnd>u-nWtR-h`Y6H9SF>#yx2I#`mzwI|zeyy1$ zOvo&%HZY>fuYe!1YW=9Kf13Wx=v`{8CFe$J1Lp=YaiP3RU4SQAE zz1XqVako!jed7@|){^rmwSjY$m{|757dj?=a&Eszz@m?iH)9yn*)enaFvNR}`m(TCc=EE>YQ?;X^!*VBWizx>_F zHP({zFSUVFvY5C*egpKa!5{X0LEeL82@?_ls0~CPA?&yHYqM_J{GRm+SMofXkH77czIYz2 zC1-bP1J67rc3$(K=AZT1dHO5zJXpen#6fDqp83a%+}QlVj@$P;OWGA{$;qGEK--N8 zX;;nZiswv6yJ86w5~-;TYrDryd$IYkH!kS+3+W%MC1-_d1AQN#~f`Ugvx zkZ4J5SRa&rUb}4Nd;6iEvzDAFstt@KF>%M79<>X9dVTs~($87KghXd*!^V>Jj##y} z)LyUj`%cCy){^r`wSh4)CJy>&mD)pBF4W%R$z5tJVM5|ZwP9nTjJvgkH}BqrahJ8^ z1XFEbY>x>UcWdKktk8~emnBR{45v11Y=8RN?P{0ov2xQLGJmj^oO7xT%uz8R^GEI0 zZ#Qbk{J|0?BxY0_Hb*UY*xt2^|Fuce_c9N%mYkoe4a~JMaoF3xt6lu>f$iO89%M;G zoB&#O6LhYfeZ!%({#y)c!hFtJaz{gLAZCy_n25JGIkfiLnftac`|PL9EMY<-OC>C3 zkhrAw&(8gt5SOr)oMx#F#6B@0aY^mob4Rr!E@25163MC!#J(Xs{=?YXaXTE{^!S}; zHnWzT%Bc;+a4{kARc+MUNju^zmM|f)w%O1aZlUML)h2(LG#zpIn$4^w=aFgyv1m+8 zmGAY99@^H9IFThx$O=GhSS%{>ZtcZ+ZB2-GSxe3%)dnKun0QCNrPp)SOWP-R3?0A{ zCS=!HZCFg*eD1_r+p?E64Q@GM7Hi3gyV}6Hb4!6#2|U*dycuUeKq->3`>~swt&-t5YAoy=-T#I-q-ZOQx9~omYm(H z4V+HL#6Hq?d+w07Z#ZdI2TPdnHjUkSsjHoTUpuPzgH0!_^JNEX$qBF8z?pDNOqD+P z!WmDtKltpY9V}tO`zFp1XwW$M3#=owel7mD<29EgzMP_)5maJGXkLy{~+4DUuN3 zVdLiMul=lPwT*VGvzDAksSTWY#l#Sqqk8`^r@i}=yVP02gwI_#Ns(_fZq&Ut`@C

mev-9LM@Y2(F?th1J! zN2(2+bH>Cn5;I(R@Kx=P$oI%t!i2{XIByBzkmaT{|KInQH*FG*ud|k%V5$wAzQzP% zpV~@8+9md>vxEtciEzFX!jR|pZ$9CZeVg9c{FFLt$(g6xz{zM#>?ASVXG?Z#ml&?j z5+*#h!+A>xyDa_kfHBL?>(|mYq0U-zGO9LkmKqZ+^7Xxzz8g+|TE26~5+*!G#feZJ zQ}>gYy3Sg1;+oqi5s0akU%e1^Y>y|^l z?e~n7gC$J(n!)Pop6B=PIQG#mrvF=>2W!b$y4t`qj|r@C7F~DeCRyXuS;B;`ee9X9 zvD}o7TVL6+y`Quz){+xY@6rXF7!zaS0@(xDd+-xYugLfMSi*$w4cM3{ z<8FQMv?tn6l5v-{G(BZa!g}F8+ zPLRE&J8!?c>Ax}$vV;lWbF#Ts=JWdHPfl&ee9l^O<5#^x%n%cA$oKlbo^(soYBHa* zgbCj}vzS5RlKNX8-O`S@gtg@6q1r&~6B81b)R#SLN)zG|mN4OafEN2md{sZ?{Oj8h zU$K_lRa6^@;bLMt*|VH8>WU`BS1e(|_cASplQ^+HcG4B?h!a^$ZZxV5#G)}Fabo?s zVHY(aPGkuaz9(z3sKmSV<6gO_9q}$}@qThl{3yRUfbshKi+|n!lS3C4PwXbxe}8Z3 zKVS8Er~Tb;Y%#%FmL8p!iFv((+Iaiwq4o1V{k7UyUT$ewdUT33=M+LlBn81*W+Ntg zOtU@x2Ok?+w>L(t9F{iU*|vPmszk7s*^3FC*Rnfayx;CMigk^g0!aC)?lW*J;ip+5 zlI#pJu$J4vb1o6$n4VPQWHh(V1bb_xaDw|e_eai2iQFsJ;@(vvD!pQYYb5rHM`|8X zOEz+^Sc}KU5>e?D6I>&)S3KA9oLI7vd&OEj!@( ziKz6739gaYD~@+LE-l$`uM*bcD6>RVdc_3SNbD7_Sa@YovXOhmTD(pu5tUvs!8Hu!8Kw&W#ve`PhcnBdslV?BDRrx#aiB8vs)H1 zQRx*ETqChpK7QaV#Ydv-wn%Iv_lmW=Z)Z1BVxrP3Cb&i-uY!-Y@XAN+>}E}DBln86 zd>pJKD!pQYYb5r{=OsKrpW)_P@`SZ~&Zw;2O0SsU8i~E~IUKFsqk;Lhl7zKou$c98<>%3o9A`-7%*5X~f5+SbH9K{kQxO{Orcx}lmNZu({BKL~5*j|aq zyO68UxuXjZ| z`-f*zu#Si<*mI44v0a(~CRmH_Ni`-|!UWeyWFz5wGJG$G zbBYPp;@d(EiNF#j_{qjLEb1FkaUM}bQA#AsKsd-~VL7(>EM2v6E^2r44+Ov-arJ z52vygz9_fcpY~77#5eYZO+CxuyqNIxNf$h4<&eVI+Tg!mOnLxu?W}7b>0DyF<2C)C zA0NxVijbGQn#jJL0IiSyexA~2b$c+cE3>hB$5GqDM(g{JckZ&rtpBVWg9mT@07|>@?w5BiJ!el%ud(`-`SZZ@ zH+{FkPwlODhA6Rb&nI&te}aheR%1_X}N?7Z${eM#8>S@oFiQFr0iA->N6J`BpwafnQy!fi>`xVn( zESEg-lC#!R?N`h1U5-c}`>BEuE9EE11bb^PWkkYSzqtK9`O@5o^vCC4{2xos1lM|G zBdovdliMO+v}*SR>D@2*lm5Nm!0BC{AZvLVExbgCAuBmcn3%fh#bY-#wvn*b zSsz@a#2RZg3_P=(it0iu|N+tVUesAZQuU(<(d9&Md8~NBCyd0d@ zFMesnaj)H$6Xll3T6^wn_KsZsuEs=Ejw&MeD)F&|wPvmRCoRXIeW&I&aw0E>w{}P( zf@(yR^`EtNuAfgHyz~LIv76`KU$&8WU9nc{VUv}3v*!b4LadZWQ6}Qp=Z8Ho%Y<;p z&HHxn=z}z3IE-pM^2xieMkG$KmPdUtvG{evq#ww=(dO5hxkn-G1oxW|G9qz;wU9<1 zjEOtWIzVl#_1(fXmLTl}_bw^15s4G5g*19?OdLIDC$;g&&a2m0g0vHHug!?W3D!ay zV{l9?+^e73nEk@$HI^Xl1dqodWJKZwYaxxXJthv=dR?`V9I}0lB}hBLqjm@xkvPFx zNMkmLiC4!isWuL|d+!=ckai-T4KgBeg0+xFlo=DBUifhDS4Q5^vEp5$YY)BlT5Z|? z9*{0E@p+AwdcS;NifD;f8FC|j@=sg-dej{qiwJ>e$+g~D^P1YRHxEhANV^hfx62E$ zgb-W2(@lNH%4EjZ+jYV#q?iBK2lb~;KOnuO_wzC)CWrj_?#(hAti`LXm>B*1SH1rr z#5Z@2uCas(#EeVtb)ZI>@@pP@d~we#Dfw#R6>Gh;{@>~&wi=SYb<7K$F@bV;$$5n( zWzv3*a>RAD>q2RtWyHodR`Og6abo+^`=^L3W8%;!|8mb5DaQ{|4wf*%&q;opW6n8! z9y;=lj-AB@qOgnB9;$I$|AlXCo>Uu}e)GYLU2GiF@0LC-LL49jYdtq<+vW@Q8k(lx zbtOJLarnLCj=H1cAL12j?RMy418yHOG(C7;R|0j_U&^tt*x+(7!L=^mtX}e_-ggM` zx!7PW#HFtvI8@`^nAqq~ci#IuDaXNLgSED3-M3@iJ%^?}zJ0OFihhf$_q}%se;%YY zru&^8c!3n`}M6Y@{fbk^DpgMSD$?GZJ)TL|G7j~(BqMZ9`S2=*5%i-}(ylw$qC>iw`h!Bxd8*20>j_pS%&N{`!{ys^Ghk9+nP8$k$` zFu~6$gi}_Wc<;($qrVXB?``8I*0z87`q}N{C0(R)JD4b5p`a_^17CD3gNL`?pETn?~kdob@qzs5dO2qpPN^Be$31_ zzdu$9pFen1i-~bxb*PPF&N^Dlfk+P{XIH{U)DR9jW6kCt{(JR|8SK0MzcBttZyIYWaK>6FvX9jMnbz zL&w)yg0vG{>mmH#jV}!tcHL()&$wWM60XH#VodCE{KCz9AJH;%tG|zro*)z4dO|q( z_CL>hXvaU#_-LJpbsj}|HsHOQ5FXv;@L7jVA2;)FS4^z47SB;Ju|xOsW^KQ1x0&Y+ zn^BcGFFsR<@UMAsE8$s`+d>E%-f}<(h`rZ4 zrOx|TJYMn0C%=($TdiZUUtclf`jbzsvxG-f&MAbiKk3ymd4=IKTdp2gXDyyTVxn{U z<{dLTzMirB>f<7>nBY1L;ihqmq8!OpcZ{!h)$(~TCieK{tFqq~!h6Yt=y^cP32qA^ zJbQhjHr{&dMD@yN0QNl|+xKrjzhlt5J!h^o?}VrvyvrFA7u-;B^k0(YMD@ zb(S!}D}WGQfB88r$GXprsI!C#q%lWfKUu!%w9*$!%$WT9I%{>^fo@E=gbAeaciY*1 zc)(J+@_K*5@9M18buYXz;Swg0#^15`F46JRz0}64i)~kDt**QEjR}`9fi(V(69M@m z&>2l?Uz@Dm~aUbNaOD~rwZZ3 z2?wf;O^5%i&RV<=Gk?PKVghOW-A=GL_TdqcfGKmMoY-cPnVF! z=~5*ToIsE2O7N*u2#d{FLv4J$pd^RP~QrJpuEWb#f zI&0z73N~<#S4jjXU;~~o5ubVW9ynNS{Qc{->a2y6KiI(eU?mZpfDJ!Ubb`+fL-=CH zebvTSOD|sMd~rSq0w;=I6DH!*jEnX?QEhy(WNQa&@t$QR5uAVx zc)|qle#$Rk{)F9rS=o2rr^dTZNTVEn=7}`UJR1`@*>EjB?$=O3_v6O&3JH~|~*gb6-93E}RucT^iU z{(j9GYvG&%HgK*|NdzZg1D-I!ryQ~~@yxzzW2M?+HP*t(25jK`qml?tzy>^Fg3m?d z+aj+WrZ%qH`~7Ct!ubbm;KZbo2u{ETJYj-QPeRz}!WOmBuioCwS~v@V4V^Fg3n&$mq|yBQyX_&vtcu9;q(MH za4J(t1Senvo-n~@FCkod{6w|!*xWM)uoli(U<0Qzl|*m?HsA>pd_oh#nCHyK+Pf@2 zi?wi?1sgccsw9FFu%YKeMDSUd{ND2U^Fg3qQx_-upWYUBLw|L$Ndj&~}F-~?>I6DBxv3gNGlHmYH4zkBiK+F}=eKJfab zcSzqH^iU_#xE+u5WhZ^1>6@2-SjP571ZyFE@yH!gBbL}?Mc7ErynOc>Yn?RqOU?JF ze?F`>NR&df4up)8f#5{ z>KnC@?)_+IB@vwPyqG}xqZxx!(9%B+|GnCnKKKkI6m$ryb>4wpgJjN96KkyX*zG^4jqbNR+F3~i zCp<4EkpBIHJEovRxO|5iR>H~T38&Ur>&3TzR2#Kp9__3of)kz>6G+#;+A#$!^SRtM z{BQP)<||xrv3&WiCB1d_#hoi$f!~=On~s)W?)AS1{Uv20l32?KXvGA6&0NY6hxSNd*Nht|XA^-hmm{gO^6kly>t z*#nQhpm)0L>to_&_C2Pk&Zt2@@}#{LH|6Z|j}j zbOnfH>d@MWk1X4CsSFaVwbCZ$)eq}k-We0;et(GCXxa0t4wf*1^vW}Or=UZa(Q~*G zhadIt4wf+S%6m@_ob_1mwAXYHTfBB;ZS4(~YdWg-v=XQ*b>4(4J7eOdF(;^vPmj8|gC$HL{m9Syq@ZP&_GA2RZ3ykZIHrRo zOblD=>4D#`-Y5NRm8*pKX80+!#lK#zY0v-ctpvP6UMntSUM<^xRcB26<=#`(#>H3m z?_dcNNUz_oPYODOxtC7>ku0_B3$s}3)VKaQ5cXdE?oVp3LX!UWR4KCX|_D6`(& zNs#EO1skLKU(;Dh01Yg0+KB;E`zS5n#mUMsa+wzfu$Gsp&nefcjS>-@a0wGgUp}W# z3fi6r${^39nYH{0-aqX+t*a6doNx&fNT0l8-xRd9MA(p)*vwkqcF%ivN@pbzobbGu zK>8O~^i4sBu(b3+*a+J`-OO6vYj<1x2JMd}A~@j^CXhbtlfEfvi9u!bfsJkDSL|5J zN1{j8zOl2C2u^rjOdx&P;C?A+xu-AVE^NrSTZ^=^+2DiqZqhNhLX;$yoKLf>&+ zzZ7YkabRQRT{o(+md{a`)$sQc5u9)d6G;0kD!JoX=zo(s-yP#SWq#u(zdINaSQ|j% zH};T@35iP(Cu)=-(L|?56G)>EW^sw8^GK$Wkmx1XLfYdMjj!xPQQ|Asl9);fL}8VL z##bz10%?!CG)~lWHAH=^C9$E}Kr~rNXq?CrCXn{{QR7{W6A`_#mc+Pf15t4$q46$D zm_XX&V2#@~PDB*WS`w?P4Mg{qgvRYGVFGE7=XL#|Hn5stEm?D@4Xjcs30;4%gbAd5 zU83tKwSiR;YsuP4ZD6%kN$5I?B}^dg>nmLk*3mz(x??R_1F8+I8Y>B153+;_q0CXn`( zehAx*I=6Y;J15jmz3|KdmpydSz}?0VNbh=Lk#ycerw+o}VD|C%bk6?V{@(Y8MP$8@ zj5OkhtIiz2T1Px>#HG)4CBB#S$5qR(()$yM_)lH@(Danu4v>7d>Rj*bQ|13#(tU3~ zzjKi+#1=xZ){d(l zrCO7(xDD$lB2a3n;~M^Vi7}6LKKVp*+H=j*I%n@WQ`4{Qd0OZGGalFUFLpXjypn%O zMkK5?@sfWmn22mFwAJ60Xn8J)?8P=%Yw{{ay!b=~0mo2|$SWrP@x%2P`R+03bhS|; zf)g%b0_hK4Jv;>+!e&byto5`+{ihDrx@)f47R_!cYooM&?m45ivyuo-cwS5(z25mpq@d*n_Up&1jY%`E>R_!Yqs_+tGtcO( zB!Uy37ZXSy_WBVi=nx*7srU4gt8Uo6gSC#j*lgVO{h6JWL~z3MVgl*MHy)OPmfLxc z*&65jr5>HdTK%S&jb9HqyR(uAPIz8SAU$!+uoSf1_nE6-3r()>yVy3Y_2RW=W2@88 z>8vDz6P_0nNKd_ISPEM1u>8)-k+#1-fVFlx-)yY@@VT9pL~z3MVgl*gJ{^{VmK*WU zoUG6M=sW(e`5#AGzPofkuM_Eq)*7B7&2$J~{pO_Fz{S7pf64fhm2j;a$B#-o79F16 zba-1A;)dS3N=;taXoqIbt1D4>HG2FAcolBi`-SHFdz)8V_Bp?E+UM4z4jp=drZ+lj zc#3ogy{;puf(TTNrM zLru$XhFv~JZTx1n%cAGemGD{*VYx?lRbuNYE7vgA`uNJ@?g`h}-_IU%q1gECEx)N< z@5S;4pE}E zR*xD>kamKfW(c<&xPR^GF=6Y&URp#6*ZTOek?Ha$4^Myfp*?ey&z||bf&ZsAzCU}8 zK696JCA=+2UCroI`^Vq9Z+-GETbAZ1ueZKW9hp9R$?)`~VHZnXtv`HgvoUPbsCJx{J(@dp!pm=|}6={&LUh{m1klsD$U+_B*q2$JUcO zW8&P-wbjPbSME?_2@^=~bJ6e=bO_royOI*Se7akWB}{Cv%#rCA=L}B={t3kJ$;;H{ zY;=16)!+WD67ULn?f%pVE%_r~PwtF~J-=OCZS3>6eQGRW0_hD-8=iueJ>)TZ@)O>A z`Jft0nAqg(5$W6$hNp9P{DTlv*8R5muWu&(kKFPQCEykE`o3mf-8%UXoiXu?EkD$C z?YoB^R$~bhNS`udcnUg%%buRC#O$4qtg(cNcfTH<{(1l5>7SklardE*G(UXLg#N?A zQA)roQ7i5T@>tC2NhJXJ-?K>1l?gak8O=+lWpxstAkp zq3$+V%hS+uuZ+0+jN`QA$K7{QjrU8qw7hpB&zuO>;<}0n@+xe6|GwHXV7Tu~ z@qi8AVL{r?)ubHq&z?EAK8{N{4mN!!xEFChk7rI8$cThvAC9%yw%W)E*5U{-CN`bE z!Bp5-e9$-3cyEdKvbaX%R@gdImH7F&A5LQ}e$Fwm#Z8Ca0~=dj@|USBVS=AcTn^Ua zc2yz-v**DQCgNuf^^8bZi|eOEan@@an=PkL=6)C0uX3 z6B5}7tR?%cSv$T2rQ|cx_o% z;`Nqw?qRKk&iqR98?84IVq%F|=PB{t%vGX3$OP}U#pU2QilYpUyP~>ESc_xE5|P&x z_f(D<8uwb(VtXYbx4{x7;wT*I*%OS)!L`ts;L(TeaSIg-WP`PM^->}LX}e+x6FiPZ zHUf`Pyej6LA|hceUaOagO0SsUQ9FdyyA9TsIHBLN_i&5j{p7gq{_xkiTcYF_oU!;l zti`+BF|pYarztV;(1oMc&IIqnYsvF+$mxsrXPno_$(W|IQ&LUaIj1r9Jh(r5I=e-X z=cM|Hx2q~GJ>e(YX2G91d*$iuPJr65Q+@VIPOx%`oQ!E2C*w}rd6W?DRbVYS$5R65 z=OqG=wnUaN!S!Z7IU(nqnlEZm&Q&#yGhxn2iQFsJ@-)tCOGKqt5s}r1o%TUJ^9rZP z>Iw374{=88b(j;m4c79~;>5K?k8+mB_g-M5+?jP*~zG==Y;I(4#bWyc7bKD zH&4rsu%;__gmW#~Pu4W{lN%C2c4buyyRx!do2S{Hx#Z8BwPbf#iEIy9)9_Y_yd0bt z6YQa^1g6ZI^^fN#)SC96em|d0@OcFOK61i<^qaXjo8UeDFX#QCv)`6`$zQkXM7rnC zSJ!mB>kQ(WZBE}DrvR*lo%D?#*;s2jCirZEB}^dg&j}^hFCivKtiSn$K{y|PSDwCq z%E&D3Ck<5uYjGMm!G@id;KXFqQvX!zc!CoziPKZwYECzqg8g>;XZ>8FKJm)d0}{_^ z8tXF|cL(C{i02~$QD3elQKo9)Ot>L|_$tqf35kRC?{oz5@x@%0Zgb9AKk&T44B!*WTIAbXhd0nxD z35hb4ww(#6d+oB8ME9zNlb8~b+h7S3vQALi&R9e}C%DDQx=o(}R;HzAo_H%^Er~3Y zz`C|XVBKL|VOmKfRpQ_5izDvZZ<*;)cDn}xbzDly)K}};%PNGar+vyS!h-B1v zzj=gyj=5ImdHp*g4%JWGEADqP*Q!<)`)In<2NTwEds%!H6JjWjmU2l%$SovzSdQVi z zJtuOnSQ3>fn+@z%G1R?wBQ0u6{`Z{7ZE#=XKFH5Wl=Yvs-GsGdMXyf{r|b=hz!D}( zZ8!09cpvn8kEkm|;oj5iwxKwc6W&UoYC;VTij4VEw=dl^dOJW_s}eCdtq zf`ns_Z`PYNeouyP*$g>)Op0f7&u_;UMB)UZSKPlbtrpdaiC^BoyxKVP#yx5*VWRyD zvoUP;1hJ73NkLe?rqz-{D<-xcGPeWecDaU(;$yp%oJ=&%Io2e6ZV?8cUei=^nFj z?B}qN5lKNy*C`TL%z<+x&Vtkp6iDG1Bgv|3VV#l*L5 z_u>v@SoensHI^{(yE$g#{oP?BBa(u!d`+t*g;q>VT5qzx^E3JDi8YomG2@z(v>c1+ z-Ka8Q`I=Ts3ayxUqIsU)NnNbp*eDa0uW7ZU(29w(j@e#q9P+DmYb;^n(Vxu5-91hg8)d@s zHLaEuS}}3j%X_PhH~TMHV+j*$A2?p?>iIXuij6X1`I=Ts3aywJ_2Qvwqu(AMH?xF^ z{cbiJi(ffLY?KMh*R)zvXvIXYCi_0aQ)6c~vxJHB-ZC5A`y3}W%7o=>S}iHGVq)qW zW3?P_op4q&OPE-E$q8Cl_s={=Y?KMh*R)zvXvM^Dr;Sq^vmafjnI%jduz}e~wm4dB zlnKk%v|3VV#l(+O?5^6Fy@m{62@|rHW#y2)tPskCCy*-UoQF z0y2ZrO_u^5pn!lU-nW43Eg;Jm5m{0F%dQ^%fAi=N&t$wASy@?`kx^AqSb_w`gIL%4 z>@0D7Ph)!du|AGK+F0L!b=QdpFd!I_;b$1ubiOg*=Opxb2A1(z3S-VAV$`ea77rp~ z#j_)`*H^X=p8kHHfO4j0Cu~jzEzYbPxI`ao!)F_)#WLHA6Mcy|)S}&s{N3(~*g#15sD-^yBVjc1&!bOU$g=*O$Fa0ZZ5Y&2ncv}# z5!^88VJty{W^FkRyBe^%9tUdC`XO4p;?zhO@+?G)eG&2iL}s4>Ya$C^1GQ*97lJ*2 z8VQ41v^opH>v@fYk6N_W3c;&$jNpbzUttLn*b8zT5rLzG_Jd-BcaS(1gz&X@QHyqW zLhybtMnpES1PR)=32pa-VkJBdETNs3Xz^YRYjRtKTC|T6f_GjqqF}3#pdFIXcIUzQXg48Ryi1AMh+2hOv^Nlf zcM~-eSb_wOH)>O^uYA;^y@A-^-9(IFC^mMHz-v=%gui?bpIlTAfM*|FYgu*)@;?Hd zk@3mHnu#l~`+51U*UuxWWp>6M|6Ts-PAjVq=h*Q1SZKbAY*@KWXgYHG*UE0I)~w#J zoZj9SEwWc!yXB=KlfB}iJ*^xOVuRXS!oEm&v?ctsDa-mcm(wajeUFd!l6Bogi`J#+ z-8Jk5u|f6Q5sz9d7wR`5!e7>gY9Uducgc<~Z_z1%{Z^3RhIv?m1dfHMRbs={%AJFP zgxJ9Iyy~UzmOs??eCfB?Ij8z4i{P{^4uz z_7WY{)YFk18;R5++bYzuvh`kLL}*B^bE?;la3t_52$kg@xdw@__0mX;X(~6M3TqiJot6 zT>jQ3ONYu~?{4_;H=6V8k+ZCOHJ7(OlvaNB{fYlK<_U(s>a3 z=aN->rF5HHbC2 ztwOCPBMwdJH5enJR-yG9o~f78c{fJTQQIR&t@>vioYHwWMnpES1PMI8P+9)*y$(;}Ti>8}(^^@TaUOFC)S_!J+oGGLYbLM+32LnqnaGVR)S_!UtDa1Zz$1_9LW1U) zY|Qr|%J=9=x$N@pN64t=di8lY8ZlWNWA2f^wa4~iopGq&iXXVg&OZFioz~xq{W2JM7a705&jb2C(=R!=M|SgEt(S}x@H1L_{B#YmeMr? z$D7#j={NoFd#KfXbEvfYtHP$Xx+8GZ;Xg+US4+L3Vnk@jJq{#z-PLP8RJNwXlhHn5udh(QSyjvV z9(%~zDr^b+!FqfV;pg`i`@7m#F~XXl^MN*a7gpnuQ~mtod#FX%{Bk{7Y@yY)zumq< z0%zBV@Yx2=GIXA^x(?#3Bm`;O(T-X?tEuM{BLrg`Sb_x4kB)!EgwF(CgDmTu$md*V zzOZ^AA}yE)YbLM+XHHsIQaYoG4Qi2X6>8zU8zaJ|i4PauT_Eo)r5-EJcCHQ7Vp-?i z7~$AZEhK85MNtdKQq2UGAVG5Eox8_@T66}ZH8Dm+$AKmJgzjEKWoycr6WK?3`uX_$ z7`2K7-$O0-gYIi&XEkiCYXeJ=!1>W7aHZ#KMduGZrl?guw+glRdepfjMudjs`iOcI z32I?hk11-E*q~WItDYsTC0X#{B9Fr*@am*%C9BsRUd=*K{o(ZiTI`+aRTLvaL+;Fg z1kYDWhsyGg@8Jr`Yx`-ZTohXqt*Yg$2cTN)Ka^J2K!k?eSriFeMMGt4%EMKLqZEHV z5>bmIA$rzuyty{81PNSyTmnY}jr^>(DmJcs;^=?5J|Zn1CAxCPh|rKb5|QBXrfYDh zEdTf(wYOOPS{xa4OvTQwh*}iQyOD>CBO^wHh9tHW)rAD^6K!+U25RxjsiQqcL^iMl z35>9Y%HeU~k&~ZL{MTzOYVppy#Oz8gA=;OSF4fOJzQ_A;^=ql8i*P(;(*Q>ryj1Y_oEJ1=>sJ>RHEdTf( zM+$Ui;Am8hGXu4Vf5>|UEslPvPmR~6)wRFfxdaK0wiUiU@XDbXC#yDM*Pzc$;#Q#+ z%~x4Hju>GZMFf^0K{IMrdn8nre`FSoN@xzw>es znsndh+Q5~JXE=yuxT_kq$o3U#@%*SW4!27E$H)dl(pOr71bWU9;p>rWd$~tpmm00w zE`eHnJ|Kz{C!$uVSCmg^YsCh&$R0Ur@rZ%g(*NPekw9NUY=pmPW`I2zd&PR4*L;1T z7GCo;6Ig--Ud_@VpMKN-?p%U1QH&6b2zgFMpN+|TW+tV750$Nr{3mNt`YaC7E6Xx<=vzr~oU zt5?dCHG?V34ZdT7@0GBe%cS%0!|T&a*Z$FVQ#Z+^6O~ZLn-a~QgEi5&o!3>%vp8X1 z97juY;kd-ses@T#_P1GXBs#?JrLmS0?$bN18$OUe^84S~wmwBN=|ozT@uozx=ePvr z#St5v7Y9r734PXxo@ktLi1h9WHhYq^irr*T9?&+r8)1OI( zrbvr2DV1neCYPXmOJalbEn!JMksBp-em^bq>Fs;8`>^^f&SvF|MBcG-zwuisxvX5P zwwxqKK8iAC;TXktS6t$x>eg~T=5#)jb8JO*AwgLyOEhaG<$5V?mpSG2@oj&i%wMR5 z@5;CYWxy00VFt{oE+kmid~5W)=p+9S;+0*Ji7hHYg7U4EXjV4LDY~k2X3ys1+qSmZ z-sBAOdp`Jvic3)LHn9=rZj0(df@RIOM)`Ly>Mq2sUrHvns00bhu~wp4*^Ie&bC1l* zqsO&vXS2OYtN49Ke5b`FD0iFK;M{Fkf&|OC+-)z-=p)4HX_DbDDnWws_?2k3KbrM! z>X-S)jxlZLQWinf!Z&JMf^q_ijW8!*R2LE~YaTyij!2aY@y=_K;V&vdf^zqjXtqCN z?jJuSQ?g}L+s-zNp!5~L+k@}?xCG?{6dRlq5KE9?IhPagf@1lKlG%ITQJh6EDnWv> zFP3P&Mq@5sH!^eIQ`ficYx6)#tHRumBJ=xEE-l$ zXbBQ5YkpzLZY(x9A2Mp?zf)f`p(RMLteKVB-?P3h3wVzXpD)4ZdRZ?=pq9$~tZR&5 z$YT^skg(6$+UJv5FR~G4*s#xn+b35k2OYiB5F;288(4ybefpDXl6N!2N=RUdeO|O& z-v7XwMBfi>_^4%{Pc4_XQes5GRv}@Zhb@=)W^9}EIPBAZtfhAW3KFDk zTZUR}gX32<5)APS6j?_CTT43j$7h0~QF4!->!lcp+?TJ>h7hPl5f-^eUyx8UPOFf> z9toAhBiB1sTrb71+`_qa6v2u!XeLZwbEp!gsB>gzs$dUCV+td^W%~P>b(u@SV|u1RJ0q zw^c~+oek{~t17>VptyQ5$JM!Bx+_=Acd!cD@Yw*{KrOl}SIl?G3KG^tUaOG69uZ}# zpMT^|LovtIxn8=5P|Okff;N0sX9Bh8zCkhH1t>@mz&~zZA%Q(&b?tAtD^sjBRA~fW-3JGqZ_DHBK|H!lF#e8>x>*W!{_sZ50x{`e={1t>QIY&n)4}FCM_K4dm^jO); z{L|S-T7_Edx9QyVe`^&I*dz2!i_hhoFQ!+o-f4XE0iVFH*mq0uPp@_hGQSsPjd^0n zNUwFQlYB zvM$l}h*92_N$uPIJnzahY90LA+r^!?Ule@(Mc+IkQ+t$m=1mVIS|8UljddZ>>(x!g z*W7{__)W#-mtGXCdw4*ejn{t}>5ZM)FHv+%uQY1S zTfVV)QR9n(Z^sYJBgStV>FxUaw8Wba^>SK;#F`g37T^9!mmo8FAhqgTvN8F+X^DO( z_e!HyQOU;QZ(ivdod3X}tV>+<+eq*GH4_r^YWGTG2@==-v7xxB-!*u%J?EJiRBM!X zRkM=Bn9g+Mn>G|rT6tk`=N*HysHNV(=qH)*ejMqo{@ts(^q;-bsCE528;XZ{U4w?V z4W_)N)_<_XlXXUUbuKASoU^ruW25eG8;Z}JbYZajAFe%W)$WnriUtD`ua);qJ6dfw z7H@2SVesy=gPaypl=J72-mT9*k~q??fK7td}vQ+ zv?FoNluwFJ-*8DV?b)u>yQAo;z3}#4&riO*cN(wG8(;mdI71%tGm{fewhj6=;MXI) zeOs1P4X)oijask&@2ld=Zt4~cJh&u}*hM4p+vmnrpE{?PGczEe&okL99)F|W-FoyM z)q5p-rBQ3ZS)Ug-IKErZbozPGdtR2PMQ8Az$MmV{IihD8>q4UcQ=5zbQ`I>*Y}gfa z!m)G@7qZam$OPsyeC~rsIbkV#zJ=0i%#I-v=C?3A4b8uOm9%N(xeMWh6 z`kbCPdP9#iYT@~CiLZ99N)CTypx5G)&S&BIz*$uHANGx^`hRRnuDYzB*LdIK+h_@| zeYj)6^JL7eXVmt(zSYCqe(aG;umlP0bz|1)Z@J?&;52OmLP#E z5#=Linll8ne&*|G1wQ%p`5>K3dwKrh>_sK<9cYgu3aOdR`TZi=Un!i=s>-PEI z+F=P2xZ2ToU)K%x7EV}}Y*Y8irMMr&^$Kaq(6hVT+iSs7$u$>uZiia94!XoyoBMl@ zby%CM)Az!qSb~Jxs=FQ-?bVy#Bw2seT}yEnhr2smC5#z=#29b+aogKh&%M7Lmf#wN zH5v2Ke{b~edZ1PE@*blTsD*2yOLUz-+^hP1a&pXbtE;gD3G7448&zwhw_)+%t+Zy%rRHSbfL;r*)i6?-fyv)`r>9qY*e;TO{4Im#tMpFwP3iONi9WXjqA z!Y`y1C+^-++q2{6boBBJ|&0KzY%6(?F>zqx(YSb65h zU|lLRq0ujE0|>v6R-E85OA#D74y;RMCNw^0ZLoSs_=U9M#6H(=URG_7!^gT*Wjq%1mgaux0|aP__ikJ{j8M$Z=p@Dl?(6%$f<* zLfH~e9nd_}kybVCE38XpCN!Q{Gl5ztTjH0aI%JAyRbv}im�N{IX^OwNSRi8J}H} z=}fB{+rYY1WU*%p03kD+ zJ}sbHapJYQ%`*)te#OV3T5LmQCU~CL_jxG>kRxQK*5?aUD^7%Kl-N)$wxKc;yk6;h z;u5*zRgDQ*arC(i)ru2e@2s79jN*xW9IC}ORAz$LU447q7$E#YT5%%u8N>$Gr7{!j zKWOF+V}KA3K%Wp%tvC_-K4L?)_&8K%g8iV}8=ydlC!^1is8*aH51D;9v7uUQLuDq| zpVN0#DNA#X5RZ(lqZZ0slQD0VZeI4)*fC-Q>r$Bs_Dl7RI)@O?lC7f_%GL&-YZ@S8 z1M5 zwxKc;cn5;s;Q7~g$zLzO#@o6zXp35S55Xnq{!&ZM>CE|^G4J_nH*ZWm@oV#-{&OSv zJLOT95*{JOr@ZPO=U7KA%uM1Etj!n`q!s1pV_7qkP)%92XS14 z1b@fIB@Wrk+Sq7Y71f0V%gugoF1fL}J^JH&_+88VZ*}U|Bt_e&-}Jxlp%%-J?f!f8 zbhF$M_sWW+jZp=@o?uE+~_MeNB$b*an* zc^pxO6=Q&4PeusTLfP6Nzr+i7cLwWHnF;bZqW9g!lTm^_8L^I9C|et}OZCECDtj_f zT`Ds{9!Ky*c2gL@~r7{!baYVU(#FNo@B6~7o9ko!lHYi5cK8&gvtV?Ak z$m58z0~rGZdop4jwNSP;xR0n~#6;MZES3qk~wN%1qGxbI71vh(IlrtqpqCAVbd@NOTbEQke;QqF}lD!NwFK zPzz>F;x>RO@`z;q81i~+*6(=sDb6y=r z2eB@dnc(xEiw*+e7t)Fo5bHBom=2rV2PETk1D!YfLmgIJf!Oz`!Xiw*+e7t)Fo z>u4^yDvSM7ZL}abR64Gr{Xs;pku?tvIocqJ#XjA@>#5r7{z|?&hL{K=2)R zv5s0O+v^-+eFp1NnF;m`auF6F!~@XiplZd5(36qlP%XBuG860v%mha)a&tQn5@*mmCaM)D!kCO4hib76m6_nk zOKyz}(;0RJ~tp!5jK6(dBwcD^b=VjC(m!I8}(&`2@= zUG9c+)LH!9LRxXc&eZgq=69_NTf#CGO8h#MSy_I3k9$;~3*{r%XL+gEC441Ni(3fq z)VhQP?5Y!$Ac3t7mE|AFwh%=*dF;a5fKg3W)pGJSV5G$(G50)e-f@J6WG#&9LIQgv zRJNv~eEQsrJp1SqtyCP>9eNU9v z4|vP8hE|~#uLk;#>i?}(Nbo!lZ-Yh+v)_EtcWT)KP&sD9w~)_oX#nc&FCw-O!Lbu8_dkVyG`jRj7qK?-)UU;2*bDNMMgxT~=oTcXzm(r1BbC zg<81BjS=(*{&8D{1ontkmET0*ZVPvJR9-`?Pz(2*F~XY2YZVgMBckkEB=@BazM^m+ zMddZL3bk-|7bC2RyjCHBJtE4!MRH$V;8x*|h01Ga6>8x=Dn?ind96YMcY31iTO^lN zo?C@`11hhfRj7qKmKb48&=l@tNS?gY3y_D`9SlXD4Ll1cVV0z|f z6A`F2xkEO)$Bik;vgi`rB(*W)$XBAec1&HB^*>Jr<@fp7nhDhUeAmkC;@wHfndlOa zcYLH(Rqa+@vpTIjEJ0#^>B?-&W4&PC6ZptyU3JFuztVC1KopiFH?GJ&)zJ(1%hp(v z^i^~msI_0G71>(lUa+}tKGCdRm)4D`pH`6_tP6=7mc5)k{60_ePSX2><7PkqIB9!C zVac~eFK1s_;|1BfZu*l}q1H>)FJ*tORV=wJ(YAZeu>=Vm3&sptwzBndYSBs5GCa=> z=P%DLu3I8`Ik6^Vjz9Iy)@dS6Ap*4)ov}Py^=OIY{&b1zy&hWp4H3KOe=I@5{X)gx ziqC9w1Jz$n1ZpjOKa)N8><*HJ)FqxisPU39q}`t=)H>qvOm_B{9VF|iOZ-r;PMh9* z)I^~ced{3m>eRCX&c*5y+`l^7u>=X0^&1h!=u>)pozwLk@RNBx&-Jq`YaUs;{^jq{ zT;l0mf?72DMBmbbf9w-Vd@l!*|3hciz z;&9K=TC+~-U3uIi*@A>oLhJe%36k&bSQIdA%ySES@p1UYlwbw5jyT=gxS;OCS^j&B zFiL0%5-eY{txdo*y~F!qf3abT*CtWx!g*`uIBxm!!E8Z-9`Lh-)`bMi`^|0>Fimer zj~FU87PYxQiCRB&v^HK`^gy;CVU*CikYIUar#1o8#{60;Z#4Q{zvz}kt-tMQZL}UW zH(QV}N@!h3usmx=>wsxvcF(d|%m*B@sXc1l`01O{R|DJLpDjojCA2OiSZ=?tb-*-_ zc3Cxj7V@=t>~8-3-fTgFQG+E)EAjUst%auVP1xfw2i4!S47K!_>Q~<*Hfkoc1PPW; zDry}tO?ji9mmKl_?Q`0uu>^^}$F9w8Z`3;Y?~zRK8Laoqp$R%4Lh#XYU3xZe88avA z65Y1iT=&)cU7N-dBv@X#SL=XjW8N8dy^O@~2i=~=5+oiNwl@3V_pO5NpU)N zADW<%C(X)jV&~aem*{%Yb!@{d`f^?xOORlB&ikzbrj5C8!XRnYms?*-V+j)P zf3-Hd?A2C5ld4%noIzJ@lamik4D!|r!L8!D^a|d2;;gJoEUi~6t@`(%57JnI1j`MU zwhEZ091z?8C9Nv!`g0mfka&CKy6p2$wF(}ceK!$4%~vB=)=$hdCWBf!cTG6x?yO79 z>3F5IYJ9L)21}4&`N0QU1xy>$c*n)ks;b=wWv~Q^-%nnj{cu*R;J3<|MEpoIb^5{j z37U6>;J)Izbk^=&cV^ZlhBT13Kh3DO8)mQs36^h|(JEk?Y&=;attu`#F@q&Ybo*(2 zcJQ=TLH!wb5y2}?yPNALj_BM}2yPYErE66GMt5ahqSJHozNmS>P4f(vAi?sATUrH7 z(-}OtsmCMnx}izWW@#>yC*Lt+v(33&7A(H#rX1nt2%D!JGc&uy(Jvk*HYPrDp@$_% z*c{JgVSeh6@Nt$i)`jGH*gw4Q?nb}=p!ih6D!;P8o%UBOfkgz%8%fcM-A>ro;n>Qab$-BhruNE6c7j8{r2@>`j8fCJ(r|+v-!p{-*`v&->4VP$=C=(lR z41XbsB}mw>e3VH%fZn6Fgr6hqH$U)OATF`+=YG?UAzZt0W=e=Lq{H z68u7mOI);QwAfg`>i8sU}8QaL?9}t(Ft<@IFFBS}UF%nZ3TU zeem@6`vfk*?RuPuW5@=6rx?GB8zX!mqHp9fp)H9K(Q#l25_o)+FCo>v&7>}ay=M-T zR$;xV_P4hWe%-ZC;1YZs_D9}ol(^`JUB_kVoybV0HypG2hNI~E^qc;dHzy-4CiLA$ zm!KKWV~%GyitnZPoFBAebe7_KDUJ2A%zy9o$0!;TCtZ8C5bJ4%LoJH+rZgh_=Y-aU z1k3#Q=W3568?D~&CN?%C4&)eeR4>IAQyN45b3*Gvf@S{u#*c@Sjn_9_EjI4CXloL+ zC>oj4nB|`nS{D*5^WW>v8AdkxCCbFcP1nDWL@kO&rZmd==Y-aU1k3#QJz5VT8((}p zTx^UOHYtf(6u(Sql=IICtqTd3`S12iO@}TTEjBW1j!&W%MLbg)7yWZW>q3HM{yTq5 z%9w*58Y?!=XxpPbYVlWNHIn-0gw}-w%VJyR#FDy=ychRvm#9l#21U=Mq_X>%EXBoB z8a>CWfU>vtJ&s%DKk@lFUcQz_&s`$CqQqyw5|)*~>&BS$lsevVixY`GS6|`LeFJF~ z-(8@)04cq1fUTw3`Qp0Ls{b{*%*)r(dj&4>$(?@_;@_RSI7iM}O5pJsbJX?^k~3!( zB^r@uNq0%4Rdq`JEZqx9>0J_RExo~0@(u5C{BfUM!*@&^Exlvn63;zhq*X^<+{-y~ z)=~nGkG@8G)eN>_rgXjD+q2~>5@{~m;AiQ+k;r@pi~r8&od0f2(=iVuTMpgbrcQ&Q z9%|8jqm!ecsM*sDU+ zYYc?!aq_jeM_J1y>J{ylJ?F8sSHlvPmFT|oHLtPAYO1MXUwm0LftF|7Om_~htTHJ3;eA8td-KW65!rsLaB(T?wp}jNjQf2RqS_{rw zBQ|EY7?&+b7~XAT2@)*tys&u~1E4rVdiX6Xia5?tbn6>Z?{z=;*}ALV6uH@L_I``r zdokvPSD#69t3JKI+(RvvQ%^US7zy63i3r04mgEyWuIxSGPw$+YKH;V@iBfNm&by?m{0zmq5{{=YR3#*H4G#hQ#c?i>65?v`IlynL+_ ze{L=@U6%-6I80jgDn)Ctgk>f0Y?23XR>RDlpH53$HvN1dRO`Gs>r;KVH4n~cV9z7#P=BI@w&#aUE?c`{T%s{O!LgE_``{zO5+qc%HZE=0T(mE>=RC?y!+C1B-*}YJna%3?mvQCYa$vp0 zku(V+aCue z!f};r$sy}S=8@qNG)lwf?1Lppu$-HH_`bpUAGb(^>$%j+>p86eS$$>#X=AeY%uPP< z#gw8>$)UV*Mq0E|XLZ$g3Gx8ARq6pmZOF&kv1Ii#`&G|ARaUjA z1PNNNvbw?=WP2eZ1)i+U}X-vi?DDK0A;kXZ$u&e|gANlQd?R~W{;uZB3w~)sb$9l1~w2!iP$uxcyX;I8d zWR3zCBs7K`9XS##Ydl$E$a3Ui3^}ToV!9%86u2N^l+e16U|A#35BBVU*CikYHIO&=Nxy8(|DNs+VHAB6AeDAYqiyx{zR5BhV5<78_v3NIoK$_m6P?S7#n_wJMtiIa3AJ74rT>wQJvWZd+;QBP>OQj?CsFH_cdd!Z<#(@261unr^p#C^_du4j69kiC;_9C}T+OzVxUs<&@B&_k{C(xb)3J_qa(xWqM-NvG8j$0zofSI@%| zhgiY%@Zgby_8=Q;=XS_U9e7;zGmV>gsC7}YvDjEQmTWi#XU+cU(}NO}q0++=B)Xi- z^zdNmc((D)EzL8-2iL0}cvZqft-(JWD>epvwMXC*oF%PJ)o(?Vq0++=B>vdR^zdNb zH*Dj?J`FQ}EB;^A@8#!vsI~QtjYBgKeL~NWly>{Rd{7s9!tDh~R43!?1Ao1k8Ob-tR+|D+J zEP6YA@gGlD?RQ!~54Dc|@g%Wv$D6eSm*BiO6Shq);%^XQ2@>_TGd(;wdoA18y3Zr& z&(D}xHEG-s54A3AdaBr1w7YiT5}X%jbd%yD&U%6+NDOJt^zh&yvnSd3@~fWdb>9T`<75|o#Cg6jiVax8Mp-J#kumF@7i##Z7f0J?<1KW9<(Rgn9JLJxa`puTb9(h zX^e+jm%rXjZ1iuwXW$b21(*3V4sAmjDm^SgqTgz!hX;d*Wp4Gt-$#;tjyb1j zG>t3N8oaBG*x<425}X$&_}i2!9#>d`#OPm{*0Fo+qPLU#e>APA5nUgs)%)nSVuPkd z2@>BP&a}=YSN3V>)xPZgqIEQ1q1Ms=YcDo9X`S2eo!h}XqjQ7AR$4z$>y4Hv zvB7JUOK@JCM@KZR=Jf+hka+hLrge=v=bCO_t$xiC3uqlgt#c|$#0IamF2OmMn++_f zrcBKqmLT!oNTzkIUH|aa-c$Fd61<+H*8F=qhz<4`T%umHtG(XiE~_3y-vPxEBrcf2 zwE7I>mw0bxFHf*vf?5l2I9qJ6@8c4j1N-@@1FP9D!4f3Cy_RY9eV*Mp-1{s!I5B;} z#3X9HeAqc+gMB!c;IAScbBABe{uP!Wal}DPlMV6V4%;}|yZPL4i4`*&CsAwnP-}yI zQI}XqnRLo~RaUc~h$Tq;Qf_TnUzGe^@9o`{3HEnU>)hL|4fc>-;zP>DK7Gi{>UJx7 zwZ{@9{w1{f)XAx1y`IZwCOVc~zYMkhG1%I8vF9IImtY&Er##3umSG7JYMcFe+GY0| z?fvoEgNfh1XqrZ?0sB}R(_8M0j>8g9P+yJP_o?c0D4QqNg@pED_MDw$V^Qnj-nAz@ zoj9@eEos#H=o&k&=GNa4oexWt&^hlqY)SP|l$8|gLPE#F`Av3^jj1OM_RgNOB=PMV z%hRYeYn;74F4*;ZG_EXhDUIEF=LFTKPFR-4x{%N@-J;%h?yK{z@&>hfDRKSrKc!Kt z|K{i4vM%uvWqhBpVNLZ>^bIL2K|-(1g>`->8>g(i*qgEU8;MruADlt0_Vw(1 zHEq|gS(g|_bIIg4->Lrk$$A+qK|<%4Zx8&9Yz%lQV`z>}> zzfa#-!xAKP?OOc!&s0nQrh9lpul_AD@!Fml)Vl45gxGki_Rm?DXh>P>R+qe1-I%@= zhb2hp8r=H;w(-I@ZzaFpx+-zx{{1tk_2xU)FIh9?r>sjHLO#R2?e3{wNjcoH1PS#e z=H2!a**NCV2a|goHak&dhGtOfm?y1&b-?LAWnF@OAMc1B)%@)~EI~qjqC=arjl~pW`qcW(qxu5kDAK3O|)+N}7%hsz^O+H)(OOR0CuKN2Q$;S5wz1P0up}UK& ztsIj)DBR|G|Cb5`)V&FMIo+zZI>eOmbkAV> zs>jCe={HvVSoJZT57Zje(e9)8oV&!iw8!b&FP;zouk&-dz>IP_!)7RIFGW>&b@3yVo$ox zu>=X-Q`@D->p2)tBOORiZsXyt#1p6gef`rBZttUhNRc6$!4^^{& zg<8jc#gWbOfPFZZ=s+>c9Rp`4*uTOOBs7+3Ju>nWGs7p$u4X?GwK|NqHrN++3Gx#& z8~RR9u%Cz}NN7yfdY0txW`?{!y_)@9)WUeOOK|jqBOg3xmm^S1 zWsaW52!_N4mLP%Ada1?}bE{Ab z?SY85`Wg3rugO+xs3_ewj21f(l8O3|7E`eG|$BsNY4%EWCeK8_B4lF?e zd);Z3kL|^~)@a)$Pz&$y*GM>hg#=!kkqy2ZXY?sOey;C~&ucB$XZ)@`c^{GAID?<6 zqaUCC69P+4-0wuudaA|x=&3-i$sv5yy79mgA=E}I!zN{WU(ZlLW zV95a;hKkk~f4myEp@ffGU;j8kh-GtE#tAkPYZVe(Ub#i+Nwr?Gt%?!+q-4jA?L_Oe zF0-;9PCZ=YkK5jrN9=d>Y$+M?;ZxbgkKQHnm(@>3t>T)fdN^{_n)K~-A&yz_*q;zf z_8y<+^UQVW`DZy;c|eB$e(~cKu8rvFPuBWmkAb2!?20LIB06$x$syaPN~_Lz^rGj>f}h-(F%&SulV7?Ihd3vnQKRpq7O^=fk&m0=9R$}){&ghl-|bu2Y#=0j)FPis2!4`0Mlj^q z2tDtJAP+}m`{pF6M+DYOKAdRrv*$GuK5CKACS86pV$-qs8!^7O9?+UN4}xR z{B#@Ia|zTU&r%3}R;^|Nwa7OVf}cE#5oE|72bLgF^O4g&O6uh&aLJn#nV*!c`N&a= zd{H6zS=Sg5wF*m+z+QLz3R{IV9-m8~7WwefDt>A*MntW`5+v}<%9}6ZBk~(CIG3<| z`tc<)6XEqQnIkjb{kLdg_n!n1a#hJ#2h3uerIDoao45wV4cHbkHn&F!MabGu74S^ef>9#?xkJ#i6b)gY}X zUxvs$4^sB5fU|Y+jB3op=T-^vYq$9eDMv=6rSB{8jOr5251A^&sfSE$g(XO=yQxfU zT(P1L*;x7V7$NvHqL#|MGPs0{6FQFRFFw!;OOUv+?$y#-yQ-bI#}=_sKJ(|rl&3~Y zcm|++G9vT1!q(DLNGE3FvV+j(J_eE%%J7Hw@ zav?S^+VnhSWs;KpX&$8PNh0$Vj5Se~n*$#b;`br*pGPgqg(L)D=PvR6%$J0CV9wO% zu>=XqawN2UzWvfCKNDizsb!0?y_8Qzw0MTYnvA(*+h!r2ex%uA)S~P#Lhuad5*>=) z7GiztEsL-O2^^u6MQ~Vq@w(hdXBw2^`b(ihqU#^2!n!DHo!DSc2K&&R zcTtOS&Y^~UcjsxqpGpfyIRI?4#ve$&o zu@oaB8(4yby}C3Dlx>&wGO)y6!8RWi)+G8qk1Li?%gz!uOIC~^T}xmI5_W#nJYHP4 z5?EsAV4G14YqIJ}pq9NRY@V|i5!t{JBw}aK4{i9UWv@Y-uPsJIHc-o6^EQiIj8Gc} zwd}lNbKAv;$Oe`mVdr_xzQ^j?SN6KIIVJ7&SSIf{V@;$F1Zvq?!e-Wt5e&&F!4f2R zR^qGKdKqFRB(S}9g|pcSu_n>?)dp(W+1chyj1dJ}g#=#Bt_{4l?QCarSjKEbtwJq3 ztJ%z&F`{6rkihfAt+EkLmgrjvfqq?qWsW)UY^Q(cFEB)ePZEE1!O>zO{7!?^WQo+A z1?dYiy}kOyizM@x=I%nj)b%V&i0Dkj;Y9qj`;KhAy&FkOIF_-z`w=2z_AFyMgv4Q? z4b@^o?YYDtBCa4C^Swpsq6Y^CV~6z({@#9a_U*X?gN2Lx1}(==$-2EegNSR1IEo0= zI{(&z!BgM&4H~VOnn!#(dqKJh5v@{-(r0=DgA;G<6D+>DBKu(Zz~GvX`UKryxH*s5 zLB#1q{7lDzT7zyM7`%2`-(bP^iFw3SYE>x_9jR5=UL^4NjOlXUg7k;!-d-Cbn(RFw z`0mcq;MyOjXWP8dKX~h|(qKg2d{2K8wQ3pJxHq{defh8**=MG93f_BpRd#h^JEZ~Z z6!@7{S?pA<{TLnj>vSBb#drAhEnJsqN;b|W8yAs{Nf!(VJ{te;VDEP0vKyWn5PZA&-@%0? zxgMs8xAM|KQT;rNJ}pW;lM3G4I{GApI-#)tN-# z)rrK(hx8AAoKhO3?wvt1!+B)mT(WT_^%WjR|7!;XYb#5GqqpCdXQKrj`AI~4LN-uq z>sbSWJ8v%yri_@LM;v#z9f^E>V5^Y8F-;?pj$HW56-9ZJMOb$`j;)W22m(wBX+ zw4={H@oV#-{&OQFyLB!j7vhm9-^PWf(H-u1D8h)&CCrf$0^jat{=KOlHyLTNC0*ll^lPc)YdA{$j_yCV?^ z9B;<-4$sH$bR4|bA9cYMvdZ9>HgHTEb1o4)!||$yTKHuTmuO5x2@(B>z!D^Ijxpvy z8oLJ(F`8P1*IJi1`UNMCD-Blul7H2vX=YeV#5fx5sCD|}e!;HmOM_jV?#R2MrjU(& zh&YXG;1z|$cCTMBs$Xd^f5shjo$pP@u{RNql8rSN4Gf-XQyM&Q-!0h}TMY~bonIQ9 z+4|PJ^RbSI?}!+2)}l0OJ$2Z?U|Y}9;O^J+iRMH!AmTW(fqjJpjvixpHMoq3AL%%7 zT^dsB_iV6Nr=ZrH{58rDF^q`mM4;A;H-61-S=uoOy5$oSi8z6XnREv6I!EH_M}Emp z8Q3ux`(J*`e>$Cy&xm-H2)qvXIS|d(g4c~P57F4|OT^!aKrPI@;u0UwdcK@SyGO@? zB}nj77P&kuhtqL%AscPT2A7i0sy$wzJ)7^L?t4Ice>W!@FV^A(@LZDa$rUhh9Jc=_1U z;QT#q$s@j`-u;3`$sn3{@rptMuV%^!L^l3GHd;`t@J!u&U}>BTbNaZ%H*|fx zLFeN~x<0T33H)+}F`bC`oQPwnRj7q|cwAyQwQ3!;sy(#|OOW6vxpLWh*vnW!Hr^lt zXRY}d`|F5$;)|Xkayec z`>>~vT0D;R`(b!yjafqHd^8dNq2s_3Byi3*W;}VXl|=AboBzEc{YDY?2xWnyGuV&L zU_Cm6_`M?hu8>P4A6k$;p8D#4?cLr*0>_&%Tc}lIsa2)aD%>^VZql`}Cms2XL|jD# zYVqDczX0VDgC1RwzJFP7Z%AINkiZ_{n2h}%U3f<^tM?sopHS`X9q8X#f7BxSa-B!t zxAag8_aZKV-_GO`eaZs0+MUR6!Z7>Zvdc~uLeA5rFdie;rBmIVY|Q4xV!6xT>;j(FDw#jlbYzoKYV ziN>rb)>ERfEL!KKwS1R@-Z&(plv>4+BGjTtQ3=GBfS_npQln8=7ZMbuD$!V$c;V7l zVN51!J4IDenh~F_f|N#dBqo#8m<($1Qym(UaS6HxlX?we2@-TgLB?K+Wh6D0LHno@ zjag7EqXeQK(U~=d;w4Fqm!KBKPD&uc0tCfmk{Xl2QGx`;YDzT1MxO*E8$7N!27u=o zkI6mf-x6^c5x$*EqF!V|zl!G)6q!guWFi`INYK2J(zu2(6thff%o5KnMW9MFeu-m& zuD*K~BsG48S`@!3fruB3EAq&a8Y9EHkf7LEiALCrX+Xp@I`ZFXRipVTr7;q^=2IFw z;U^<3rsIO3quI-dYRa}DNB}s^vM0FuSQIROWEZs%EdqGm;C8$NwkCets zT!QwTN!@eOt}&(iOxk~@V3!)5CwoQdz81A;kDG$nD-aZ|O=`3jM*|Y%@1``0D-q$O zMuc&UQY5WJqr*6+DU&B1Iosek9BNSjBO3rBrbOe3EtM(9xs=MR^lcw$oHi8vZ4wpaoY$Y}IP%rdDlOVpwW zREfqf(Y7%Zze+;N8C$A-f?>pm}LIQUH#{7du zB1ejjCjzez9N`oLioSI~xh9EtlZb8PU!fL75K{UbR+pgIQc`0}Sb_vU@tun;QKTrT zks^xqq%=B2QKJ;ZmSB`nG%BglDAc0JR7zv)I2Py$Q?daO`lv1>(C?C%OcG);(N&9A zDjpL>CddZFWTLB+qAJlhEnI?PGD(QZL_Lax+gB7rmQ^i`AxEh={yQj-u#jq0K( zUPR!PCQ-GdM%A#r6i-U&*TwPp=qpN(E=X!L3boirf%p}SD~fm}HR6SJA%S~b@?@U7 zE=hi}>GJZ33?HGsiM0L)8&zquQ@$n`y_H1Iq^`a$dEb{~s}=+!GN?uGO3}<|-<9H; z^gSlZA+*lg7(fI)w@~%OY|Y)oaMF8 zWCBZ&a9dSoTjeu>TKLUDmk6Iz^SQtD9VJ6gX{pRln6g}S=EVX3o%Q7DIiH1mEzO|o z5*!s|jw6H^jbd5r#fXJ5OspR;sl~4$p<1u6UYgQxD7XZ_oxnEuUo6Qda=GqRn+eq7 zcQZ8eG{5Dc-*cm&1`78*0_sxw;uN;ZC=FQD)nE|9rA`Z`<9HjqFq_ls3Dx4a-86%?OFVs0 z<2GYRyFXD_f&})u?JJ#6aE8NJxW3QH?-&W;hXg%CpL;h{=U|?r^zU&3wN&Prx@H1P zkigbPt>SwJ{M>7_0;tTb!ui-C__w$>&hdtx|YOjMD;4cM@}`l zL}Y`qI23CZ2ezm3*2#_J@29WhuLZGs*eV~jCN^#>T8~Uwn~f0+$$ge{8;ldJBOhFq zZIm7)a>X~VNnfcwHj%S|TDusQjanDJV{Np#tY4}iVRD46 z3kfS%1WePn$th&nSsJw_Ra+Y^xAaLBButL5bs=HpihyZj7EsPiwy}Y7Y@ybj zGpvp49=Ik|kT5yI)`f(XD*~pCIr#HS#71q(6^2^hTw`sl9eGu%AYpQZtqTb&R|HHO zv!YQ4vC&~^lMHGd+RWNmHRhkGf`rKtwk{;BToEvB%*Xr7H{ARk?-wdjXiWh8c5 z+$&X(Fge23g@lzW0;Y|5pK@mM`FNOeW}?=(XRVEAkL#H#NSGX9>q5fH6#>(q5fH6#>)6JVQA%gm2Lc+=w0n_wFw(`yVHCq1_$~B2ve>>IMsJ(CZR6)Yz2wN8t zR;~z`rrN1~`AusfYJI<{Qu>Ou{)(`5Az@{;(Ufv#vJKAtiCP^htPMWezang1NLX3> z>bqlmrrE~JbUsk)fHSQPK5KtP*t(FgvYvAqS7L)l32J$tPnEvn(f3z`tqTb&>)5?! z$0o7S^zwcg)LJ>q+Tg3`uLxTg5?0o0kgoF#_Z45YsCDy2)&|c8e?{23kg&4OB{W}& z4W4~a>*9LW2G44LMcBHKu(HmH|Dv3kd>qp#XC`X>w0?@551yU>im-JdVP&1$Y5foz zyc(cZqx-B4USCLc+?r*3x<|Hh6VL ztudXf4PN#Cim-JdVP*9hnynu!HnxpfmPV~5CtDlrz5Eqn>q5fH>ih6_e)ycTw}e_F z4zV`a3;Qd=)`f(X)rTWLQEaf+hgy5?Wo@u$`B#Lk3kfT$FG}94*kJD!wI&#AgFV^5 zB5YkqSlRl);vusQpKP>8Ewy*Ve3LdTg9~a)MA7BDv4U!Z?BwueRLcS z!9I0JR0Ldtg!V4e;!}$aI_F8$()0h?lQ%@?!y$N-ghWNaB}nKfVcM8u?b!93k6o5T zEgi?J>yL`Yl|z^uQ4w$n5<1$MHs(Eh4f-wV+fJyZ*W>N~935RB4q`-=T&zH$imsY9Y7 z;1VQs7G>I))9u{ur{7C>sHO9KuO{Q7dDkIKj;IK@1PNUYm=>Q}j)T@H54Ci?dUD&i zX#H>q_NhanBH$7vbX8+od}^^lYpsV`y6&DdXMD5{Is~uIAyE-<2@<+GGi^-hGx+2) zc&MfR!@4FDqV?P%Opd4sxC9CHGME;hn*9>?sXf$EKWOji6C%IFA=sx5iHd+rkWg=l zX}a5Yy!deb0{WH^YNSRULVuOwCyLpsQ=~IF&=8EUpjW@gvd{H z2$Lf!0xm&9y;r7rbrPT2r++-uQvaH@3K9&4L`A?_NT?TXZLA$&_XeBkI8aOX3EaYh zgvk-s1`@iPut&b3vEAdmMdt&xbic#rpdev#gzYONbXR20AnesV)Y5$$kC=jlj4Lcb zLU(s|B*LE4LoMBZ^2jep@SZay?AS#@cd7P@g1xhcTDp(sE4Uyb*EyCTp}TE6Ge8W$ zLoJO5@Jv*YkogKrkkF`so#CDiV;QG@knm7T;~G4p79>oLuyZ038vU>{HN<2*)YAA2 zuM7nVSwFA@360{|6$fHV9%^ZviC4IS1jm*_!md$BXf(>MoV1>Me97|aj9MBmj4}Q#`>!ukwIXmrqeG7z&&qL#)l*>fsLh<}A8 zNNAMPdSnoLO`?{@Vc7#LNQj?^B}iD=`j1gWSo~eoveDs6hz{ow;_qTzNMLN)u@Od{ zv0fW>c8PDpIJK$j)qcsDlW1pZeQGOP`rEel?Df-S{p&Slx&2ApCeGKGGHy2D7KMp(~7%LE7iIpvJ z&u7-gS7&luG$i~Sfm)RTYq`X(PbIQ#=Fperu*Axi=-6zsjD;@)j>Cn7pCeGKGGHy2 zIM_zE&EMz?d01j)OZ2a>HXgbAQnC>eevUw`%7C?8VryNAY@40rg=2}8Em7+WYva)u zE+!ix;pYg{stj1mCA_T?**2GxPmLv3w#3#hyeqBtB{c6h-=wn za)}9HWcxKbA6Q~#OBA2WcTXzhY#I|1evUvbdv08UMu|k7d6Zy@l`V1EDr>`z-H?!R z71y$3%q3{Fiwz#_SYl;M98t`7nM9a+Rlj~;pYg{vU8V9&@3v)!LulqSlJTy zkG3}K+#V7#@5Z(49PARb8i);E4Y0(@mdLKRHtZS|60&~8wd`8r5+Bglp!j@rC7&8g ztZa#aji$>vw`*-k_&EZ#?3(Bj}6nyl`XOB7Hh-$J|Q7~NnFeNC@w+XlGtEx2}`VOiJ1$m4eP^&g!or+E$eH!1bKa8 zgS|d1v9cwOT4`-qUo<4dPmF6>pV1}Adleh(y<&-#Ezxz2wZTzt@pqXJe>bjWeP5Rd zqv!r=ik@SMl`WyR-#>8}*@zRURT;3BOTdw*u|!+Qg!W-^Y6RJc6SiLMH`a2Av+Ozd zyU`cGumlMm3!6(gz8fbh1Fl!kKWn)JjKnmSAfaRW?Rp%0jT4mt*Q?{0wOj(Os5F)! zq1WaCUvgYDPE-b5uU@sRO9X{E^(7xqx^5_?kkoc zp=(#k=aa}roFE%iudZsWkkB>wkzXg1jW|&maJ{V1sL}kGBs@KO_F7aNujcoI?A6SBf`l!2`R+5c4Q5kT( z>X)*XOUNv3J>(3QAfdjtwGk&O11?c7JSSJk-=$TuH{j#Ix{%O4gY7Fi4kqL|nYflc zCYRVZ+~aJe^MNJ0|KNIc?_yq zW6ULJw2KYCKCndhgIurfMeQ{h5^_bwwd}Rx5_Hvid>nk8V~Or-xnA8<+qonpWM+tK z*=yb<`h+onwlrU1iSEz2UX2adIWZ*s9AWF#r~zxa1kIvy96ax02@)FPuycDz$V?sA zvU9LY&}twy$fwR=iN;H~UX9h*H7X?7rw$2QuSRiL%OxPTl)(}tH0EU2T8J%$gk7UF z8pT>J0Wq>PmLQ?AGwU-zj4UMVTB}ht)^Z8*GNiA_r%q!D5*h=vzE4Q7PaP80XVB;% zYqVe8ds z?f+x!Jm9P<&Ne=Zy`b2lVmH{(1cL~o?A^OjP_ZR3_7W9Yimq6qh?FC4Y-=ni3L3k{ z#BM;v!rtY?nt%~|G?v(6?-k2_?=$at=R9-Hz54C%!_0T*`OiE3%$ateiV556jPWk} zFu`-n`8;(_=vb6zonr!RTsqMj{>v*PJPzWu7wG(435bMMJgO-X5PKW!!$iEg#5P#P zqnZ*CY_Ja#ycQ;6N(ZZWR8t~?4fbJz*Ysi=tm08ki3m2>hlzOAlGDK|9@UhHV1s>_ z;5F7b9jxL}O^FCL*oTRD6`9k)DjwC8h+u<#n21-y*#@h4R8t~?4fbK8-m1QyF<}*t zY8n!beVB;%GjKXs#iN>rgkv8j;(ZxxgH=4LX-GKsVItmN!ZujNqnd_rgkv8j;(dK=gH=4LX-GKsVItn2$TnETqnd__!IY^P_**aoX? zJcrfk#svE?VPn*AmQ2iO563DSmtu{+F~L4e*jPK*NX!PSY3(t*kB(fY_Do~)&?7_vQ;@e zYfD71!9GmbF4iz+2sT({tBV*jl!#!1eVDMls$uLCY_Q5!Nip^*5y1xgFkyRDOJg|4 zDqHQv7_LMF8|=e`?Nu#}MIEbb)f!{b5)o{$4-@e&R>-q{*RhIM-%CWW!9GlwZ9N%A zHWF5uJq$k zJgKLuOJp(9+j?LF))5o9*IVLz@5W6!Ay!8Cl(P!%rTX@-PaqNDMj*DqL`X-;hN)oV zj0d*F?LgtCAcU;nF=ZQ8228-MHg7%f_*0v)4OVIE(p9;}1p6?dJw}H=eHzExyA(hFriPP4u7gSw!tcW`sk`$V}gB{&?ljZE#pjSYH|5ih>PRS^t>76?J6y zEwK#_iwW!V!`of3!7A(L^S8T>?7k(o!C^6>ZP4&VH$l5@E5*R*^LwYpB7zO} zVM1qEsqO|FtkPLkt-B>6*kB(fbf%cvV6ed|ogLOTSR#TA_F+P2ys0kfwS7R_ScLo-rvA!3O&6qYN>>;4j7f_!Q<+sk5I$@Qr_NuBxn2m&em@wPXjF%tpvdZjX#;ZgG8|=e`W;EmFZLrGP zBWAoxM6kg=OjsWi&3Jhmtg?OuGhQVk*kB(ftgqA?HXye#1FNim#6IK_5p1vz6V~VJ zO(AT9Ro2gA2X=`FHrR&=dv@uKCv1aN_PoNr@DdShun!aV4Az@l*aoZYxr_bnB_h~h zA0})pp*PU54OZFs0sHhzM6kg=OxT!6Z_;5Ktg>+sPAHU!V1s>_u(6%q2*fs6W#c)V zn%2Kz8!qgTE?NypTzvT-R+jFgCAgMFB=QFz$~t89D?8x4stZbw*5*gHcxuUKX8 z3COF4M0o!|SWMWvPr1xlW$!yE^M*utk3v{X*gIUQ?q;jVdJ@Ur+fa8K6YRr;y^EIG zpktN2f1(XGCfJ7wd#5h-C5~109*w@FF~L4e*xW#=PjsxZ`2h5ZB|_>h{*qd|o7OUN z}ZTDQ`%K4Xtqqs~(~2IEX)Dq<>bgHt?%tS_bOoluyck*@acf5_MEk}&JGyT5%<*w<+PYZ68`4J=Ukl(ND z@<3t)tJJNk5+Ru8Fj9`Nx-P$<*@&;ZuWU8~ZncpSzRZnam4)=+1O4L!dyQY!`LE?Bkxhr?s51+g<*5ih-;Sz%B>$|{e%HNpRdZop`d#<> znOk#2V`W6F(tiH`LvZezjXV$4##3+K)^zBYanhpDBVXWGT;XmVd3EaC93LzDy-cu* z-4P*JUU&1STHas5p8c*ixL&~ry_%`mZM{#Rd^%7|y1TEGxa$)DZhP9P+v~-M{GTV# zN12M<7E-2HGljfDPn72ydInpgNBCxgRoZt!g&5|<=d<3uQ+n$fdTgsD&fV5vw?_n9y42*=+11b*WG@bR49=OKj|^mc;ePR9dgJ#vmm8 zWvw>UL_7^NcdZ_4&xhYVki-P5*nOXWuE-s9d0!$n^02Hl(LSv9U}T^+{wH;+J9|du z<$?BbSvqm%J-%FO&|o2%44VdO*7&rb`#tBs6Ej9?YJ5kpVFhA$J% zQRE4FPAz|H!P;uG@<=km+sNCqj#26ou;E7<8Y_&7H}LgUM>0AtQ8&gbkY_}e4jo75 zqnbK|%gUn`ryS{+o(}21s-#Kt3b_CiE@6UI>_)B!0#KG$+SlgK4DCNO-}K3#HZnre zp+w#@G$1?%N(qa5oDQ^kq{(df{PHQ!)KPk=Gx4^C}W}3g92?ASQW>%4-J3 zq9aFMnSRmxsje{6j|kKTQ+@WA%%CyMM;w}O zb9`+NDbLcu{Z%|7vvElv5)GGC+y>QNNCzCj2EsBrZ^8O(uSdl54Egv9gx1}V@(?Rl zS&On(P|t>jWqD;p-U^B(h0`ph|9;y`V6&^_e3^2iwK`r=2IlDIbTk1v@9nI0)hlyW z*}JUSF4d9{E9*;QqR^reiBzI7iQj(h{nK(Cu@CJdv!P)bofBF%WdboV71CtypU7#9 zCCY^PSY8={+Fl~EbmZYe)sXPU5Uzz~?wp{fGg6PfJ>;&Hxy8^%9QYl&^0-9fYok}B z*4!Xu99lX3;qgt+c5g249~ZYg)B75E&-jac8Rp`aad+Y!=lpD3qN8)?ddQbm4-sO1 zMzAVxU3gv@VQE105bhbf{Y~1$O3@4ye&<&rxJ{Bvuzw2Jf$IxO6T_6$*%T%yu z?h*mi$LffYx1#T5zrW8>sMwA24@3f&<&Whk@@?Mb3!CwKDR)66@L?6(D-kf^ZI};e zCak_e7v~kLkPgHhbX~&R;4hj&c!J|@8@t%DpUcLJAzrl$7pC5%=|6#pPxtAys)?ejs#oU*`Ei;_Hufn`2VrA`| z$JF>8Em-HNnP3&%YVYN)m_UDZ>{tJ}Q)bn?4GSwK@;P$3pL)q(RW@H@Vds@gFKprw zJ=^2h%FU5Ntg+O>&h8~+f>kzasI#2^6++aVi!HUViB)i09g7LXYOAFec3!rG+0f@8 z6Nn+6T{>Hvy&L$kJNwilFjE}f4M6x>VlgoSwbDXT;vC^yOXy3AZhIdDqJ*jP(Y?){YDjwK2l67J z?*{oyDtaQ=3q-KNK1eN8psg2$Tg3iPN0;?fb_UDnh>-t>t(S9wXFqHNTUH@|I7ea| z>|<^vG)-k2^;C5UF=XYDhh@0{!tx5wk|zi_B;n|V?(ZE&N0C% zxbZZ=%yvxRDKKoWRqy(eo(6dhH$tBVT6c>!tZX$+I;U?b*C!`d6eqpA`)pY&@^ecz z#zjnQWDyhfRumBy6L72T!t>mp)PoOl)Kt7`i4mYLk3>q)KW|xOZm29)N>HqPe+3&h z(zCIM5n6(@mg)&pX?f@yYfMn?`W1KBKv+y@PTTC@XOgQQbayu`G=K z;@8!Ngkv8jhW9+%$8hDT_-;7cVAZ16)JEHxdsZ6~j(wPzyqDTI>BH<>*kVTCC|LF8 z?wZ=6OYB~4NI3RkqIQYem^5SGLONLWR!wb8eRO9U z$zbi<*@ubyPf>u@~T7x8|=fxr}LE#Wgcv>YP+5P=u?X_FA>28`!F%D zx6+~R1{mgO4?v{vPgMFCTu2t#K27?V&^{Slf(}6ZvB7zO}VPe9qY9sU|!3L{V zxkGKBFDVhh2Kz8^%SUP>^ohX+tGd@d&!+=@Vu=Vg*oTP~x6&AfzCGAr)g`B>4fO3L zBG_ObCSE&SZG>l3u)(T(7pe_Bqe?`u!9Gm<@dCAh=c9fLFW6w!-3O?R-nSf7EfK*6`!KP~vudMzqazD8SoLLx+Iau0-&IRQ zu)#h|ocX@mSo}JC8xV5KD^{I%o7(8;7*Q<|fP7xD4-?-mQX9!OV+uA{wfci<Q)<%&OfSPgH^lCR~t~32(uwKY~*1vVYXK}@|c1RR++u)J{=d*5r|-e z!(zhn@chq@E!bd{<=Zkne;>*t5WxnA#e~&`3s1!NaUi!cXO)%zpI16L)T=-M@*_(Q ziwUdK7r%01!3L|W9-s8y#Lzwh5o~Z+Ojz68f6J2#Hdtlt@wnZR(9Q!9Y;agiSReEC zuTLr1V3qYNpDZyo^jCohHaILMtgl>rk4Xg^tg`;`!b{~_*jY~FE_MD;(R@wL=+2PDEE(t`i!C^6BW1^$JDB56^jf0MP z^UN^53PiBMVKHH2yZ44nE#wueY&`eeerJVoVjzMI4vPsJqrP@n(FUt*TzdGlv%+{c z5WxnA#e|Ku1Cdw>vdYHSu+fkR<939_guOF_lm{EEviAh!RYM}Ye;_O-?A<4nV6ed| zd*4BsHzdM)6vAS{-r+(`G#idp_TGlN+n8VKu(hY6dj2~V70 zgH<*^gJ)D@f_<2kta17n}Y1p6>Cu9w;fBeGzFRb%#48yLeiCfJ9G?|P_>FtQ9bSoQjL zY6D}@#svE?F{xW;*h*t+$Ew?Zp*D0(-H70@m@wPrcLP?LJ-i!~2$ww#9Q!a~d02kO zVU^_@-f>Dqu)#h|SX~J3YDOfivhv5fT0_FI4-;0W!#iiN!78iAc;{?LIQC(}+Gcon z4mMb2?Gf+J^$3Avg=tdvsVVjv=)C%fdF;+-&!N>nC2a5M#O@1Hb0egPNQ{V8)5V^i zmTXM0kE~!2F+H`@hgpRD|6uK4#5}Ve)0{pLC(2nmu)5$^VnVo}^IT9h{u}2NVr4M_ zft8f7HsR$;-K4=X1?=fBI1i}+%p!yS!?34C~$>5{ASk-Mgt-s5@_nb(~MzI93qt!x+ z2pID=P*S3jnF6hS?3Nv^&CrE(WU=!5ESo`OJ1D)|LJC9zw5ZBAi{x~dk$Ap|*=n`4> zZVpFTF1DvxN&nRk01x zX1iWAf6x!w6CfvYdm6Ux-Z8?$Wg8Juuz?;9BLLJTZK*t7L5%$Tm5ea7CKgqAE|my~ zLHP1OsoB#KZi`{=hB7De=ea%Gj~IEb^b8k-@>hL5B39+20X2{j39GnXp_~w_x;wUl z4-?2Y5U9uXV5B*$^RYr1n95ozXs+#XnJb~?!DVA5i;z8$z=5z>#W`IffcjXO4`?Q`J?YKR#Wq-# z?T2dyT_S*b8|DL5k&wOaUC_lgSOvG7=IAmT5#dqxF&jpdayNmoqRL7Qzn6$$!+fA( zLd!|VS8+O6#r28_AAAM>)gDeq^ybEB8NCFL+x@r%grq~?0a(RuIji3+I{C{^tY^Kb zj?cA+)3gw<=Yhzrsp}(sQCRoL83rPw2 zZzBKaZD@b4@6M*u80vd;nP3&WEhOhoxpv+D%Bc+KH=yrxg6%A}9|XS>)-^c?KJCT;=;+m&hpv zB5XDSDt2S!4B{`d-W`w=d01QqHkRn_zDaFZy~>`o`J1rSaI0e&Ir(HHSxxi=tJrOC zVbQZzYDqrE(QtXBucI2ZVfTKpO4~;R0w*+ahmhFN{3*WoD@``UknbIw&dRo@Sg+C9 z+V&KyGP=Igoi%h+0&z!7APRqiwJu;n;@>-OoflL$JXr z-NU4PNkhW14->jCi+Z?VgH^g0OZ&uzgkv8jbblH3)WHU;bWfS~?F|XXK1}F7IeOv* z8?4g3ar%sENI3RkLihXW^U>ii*kG0J@zW<~L&C8S6S{A)J&pD=_`ZZyx>vD1&G$go zAsmOrgzmqToaVip-Uh35&t-d>@8zsRI1Y;m-ACGnwej?T$ue^aO{+gymthpCL*I!nJ(M_iRh6wO!u928YFj)rDw3gSWvdEB}1&d3#zW zI4mZtPDlG0ybV@aJStd9w2 z8iY}TRo1WMXByhmGQnXnVSQydn-OfV%KFFrY=*Q!KB?zZ&S5cOeSSD&5^S)_`uY5f zNqbr*I4maY*%i*p1RJcf=T&}IM*2rSspr$dVKHIP;Be+A*kF}Cck?qp?P;0du$Zv1 zL^!(?Y_Q745Bb@p_OwiJSWMWMD4f9xHdtljp!^J0ds-$qEGBGh7tWFe8?3VNTz;0U zJuMR)785o`4QJwl4OZE>G(Qv9o|Xv?iwPTR2O_aH$SNCO=V$xs*vR$>s}B?Q&Ja=_ zY_Q7S6Y?{Pb@D3PBdk75*t<_C!C-?`_P&#!wX9R-*&bo_VZz?wLQOOq*&gBc6ye%? zTYhG=PTkGUjH(Y4_AVM)RItG+d;iSOzSe1j+1Xe1VZz?2L(dRwu*%+}^E144`jYGn zulg`ya|5A=3pQ9~^8xu;x`+hJ;EBU?&Y;HHTDP-5o~Z+OqlKR zy8){#R{36MdN&9*I4mYC56ka3tg?K|_iWQUPO!mYF=2I~{I13-EB}1&IlZd|8yprB z);`MboUF2XobREhcg|pg!(zhPd2|-QkH}bM?J++KQ0HyiuSeM~y>pM7CM$fpqY*RS zaLfHy%@QI1r(>1eo?Fs>DrC1L21gd<1=253PfT=cJG|uJBC=r#Jg8csUT|4-hJ>~ z?isWNclh^ZYt8!Zyx7$fK_pLJ-KUdP+8#Un+izXM<%ITqOjv8ycjuqpbfUMh*rGE# zS*7iFbK%k6#$oMqJ6Xkh_UjTZC-hmwgtcaUcYbiI!@Z66 zCqL|D74PP+OSqiSCnpouGw2AQ*XzIWHhTPZr5da3j1k5nbqSXfIyPV;9?9&o&M&=< z#qaM^W0jqi!kDTq;c`MpGEBrHvfr)P&)Ycn`R!_~vNK;88`dRUPUwh?iFjoB!h36b z8`sVsUSpM=UBei+F5z-QN0v;)BjoQF_3$?OA9!GmRdxmsWA(a(%LyGJGZByUZ~gX} zCbYq8_Zd}Vm7OKTJ4ao@<%Ev(nTX$5j+}6!sUd%K{8SA zO<1hR^Mh6T9^K*Juz!ruH(@6DE!??Bj(%}2EIc9UV3p1XboiNqA0yNT6Fi&X+>z@a zI{eJdj}cntOz`ZEbJ9M% z4b*m4={%Es-&xw=j}cmTnc!I}`3}~i9yOE)S}m(|zO2K~r2QD7ZIB6`ZF5fgD{lk6 z536+Eufxv>{urTs2@^bP=$!Ps-UfP6R_Xj@ho9N}F+%%9CV2MKIeC6~8+aP9O6Ora z{0!}n5!$yi!Lzu|$#c-#z*CJ?I-jifxcwNR&nPB%HrhFPo_ia3I21ZM)(sc?Q!~GbcV;?4XmBYC+ zuA1m=oOMf|PFCr9i;gURjL@k3jL`RRCajj|+jbwVyUBX(bDLOY^%(EVbqSXf`X0`N)pmW`&e~uiZLo<|)*kV` zT$jL@IwSNwoC#~S`nLVaYrZc@`u%mK3ahMN!M?7#gv$wi4`;%9AKev}^@)k}i4|5^ z|A>8EbqS2AGeTGInXq0|cZFT3eS31q^V?NeW&J$%b=4(YPDmS+m@r{a1KkytJ);tN zMpam4&nxWfs!L!@oe{d5f(d)7>8`MyzVXl6WZ44`tgyzlDy*_`5cYM|C0tJEE*&Op zw4}SjzF60f;gWBAOsKHR#&g)$RhMu%p}UQkuu-4x3Y$01k42M5rcAD|%EqPG*HxEr zIib6zn6S|+?+VlXwXCx7HB@y88IvUy_F=+C;cBCAo4y+?lytDl-V>18x`fLK-C@Rr zy-jFdP251=ah{X%V3ob^pw#LTE+;gvn6S4ZE%SR$)_1kPNxfo~y|7BE}DtrG#E2>NQ_Q5_(*juW$L22g|6dzhGtL!}*y+K{Vw{!Mk!rr#EFQGYr z3ae~B0KHmW!uMC~!-UNmXrCzku1^PgQC8W!271)GgztCRhY6eg(7v7KWGbw(`58QA z>Jt9>!9GmbERH^-cmfu|a)Y@P{E%esVr4zdpuHXEhSTACwkVwKI8;i+Gj@XvGh zVZvtBbj%>*5}yu?GFWBvei*&fCH%OAeVDM>K^^K(8R*GQk(2{VIUX(DSUktu)9NLz+cT1gmUy0jd(=F+4wXhTcv~>KR3x z3&8JoCQ@y{4hWsc&k4JwphN)k1p8=A^u_{j-Ue_~8rtnZvjkDL2+vX8oz;8j59$^@%A{65Q=*g@Z&lfAz_vy*+) ztpu+EI#(uG)#3Mx#>D9CJ?bA*=XSD>x|QHnKp~r)#1;|#6+)k{oGRW zll%JA*hk$;*zOCQf{~e`62Ynte^w?Y=4S6v17wbjeblXl?IOV`80X3at2+EynV6WQ zbA8D>GS|mG>Q=&b!{8K*oH#2Htm^P*Wn!Y5y+i05-%s%Q?!uDd}6pV9af>j;< ztV~QiwSm4zJt=eH?4xcaY_}Lr!8lhYSk>Xr%EZLRI;WpJEpz(pqi!W^*Beg3I9DcE z)#1;|#00u&{g>3(N8L);?me|pCRoL8JvZZHg*6t3bZ}VeR>F26YFX&s9R zsqNkd+6VinTM66Us%9NIbis9Oo!rK^2OMkG1GDsA&IA-#{c zf&Pkp)UAZ=Hr75dBYY26R%zcA6AM=G?@?~87q+XhkGhqxUDMjPXGD?{tkOO>Cgf@0 z(}Cv)`>0z9+x@N2sEqJWoU%%vB{A_IeUEZ`$=U?_s9Oo!#g3gk@`b$Y%_}EZrO(8e z7^iy;+yhU2*vUTXR>F4UV>IAgMkG1GDt)%cgp4wLUSV9qKI&G&P8DEhl^>UYNOFQz zI!1{J87+Am7+pv*w|v51z|D>Q;hJt2mbtNlvgz$G$P~cxGeK(+3Y>A9X8Xwz2cj zxr|71f>j-Ue_~8fUNy0grK5NXCd#Xf&~PnxvCqjHDTK@=D zOijHD+S354n4o7=g?*T?XBT#^`!Tgohdk9Xq9aAP_Pl~B zCfc%R?MDY3SYaO~>=}$5_|9cSk`o;%!nLO}R55W$HfDHTRvOrc2^&k`B!P1ok>o^2 zig0cG098y}kd1wgkU3=bVZz2lIJqDa#k>OUhoQux>?Ja@PI-+iq7ejmh&!(zhT88ojX9UzjNP#g9(0aZ+V znCazgXU-in}#38^K1FAi!6`!Heea9VdW!q=;^O6yomNNx8v zP}|wZ-Ukt`y^Cra%n08;$|`LuF(IwCCb{d-YT3u$YZ0!!Q)^$65x$+5RodobB738E z=zZA7-p>)P%?)Uum=PFLXM~1pvj$MbL^dmyr?LC)(@>R56jw zusA#o*oO(5tI=mvMqo^x5!$!gEDlsLkR56jw zcsZFPYhoWJZ0=0Q3^YfU5&EpPSv9C)BAdB$GD2=*A0})LP{%$r*Ow7GX0X{osA3|U zf%M}m_F=;2GQ)@rwcU@e$|@ae#Y8rf>SXSl_m=E%~e3{qn8tz2xiJxYNn*Ji~0got3b8cwK3b zZdwk1xjrzvGrP zO$QUKf?L-Oa9?AQ$nLZBHY_YA>~<)-aZyqtPiFj=Q>(X`>21pSDG01=$X}8Xe*MGl z3d>bj`n<|>!886pwpOHfsp(osey=ODhszvWTls-CBJbZOi+N1xrRhjiFIht7Q<#1f+JkrAx2T!@M6hFD*B%?9rNGdJuR@kQ^a zn!}y4Af)c*u`-q2pJ+ER$_Pec^{IRlc{%waGzhCpM#COx#87X#eMN|#l7s4NN^P8Dt!tURF zSMJ}MnBk?V4r$~_Qw`0xfOUotjcemwUCtf zuaw|_FF&oR<(kd1x@)&OBIS8r*}bFAy)VRTLTr-}tg@Vj?VNy(u37KSUdrDiY%#eh zORe5JXtC9MYhM!r^%h?o*ZAhQ6WXngbqW8Ca`pkuL>?oRsThKo-ZTM^5vIW7!j;ud(OQo#BxG>yyU_rR;l}r z6ng?<08^VtSmoUotDJap{}saBpRx5gR=Mp0VW%a1%At~0 z9T9?QevKT7HQzPTy-rw0@=4uWRhRVk@cWW(1M$~Ct?l(x z7Q?!PvveRVbAv#xM?_-nqn@fRArZ53A5qZ5MZb%_Lo#m@#?WH zQ%)F}c0F@eb(7bSrndC0q2~#4lo0z1agz|NdiKh(Ewk<%nLgONqd-g(;us;;5rV^F zV)!4&w%q*T$n@4j&Ji0^ueiNwR_7s=SDI!w^;vpM%exn~ri-03tvc|r(JfC-Z%vnX z7Z!5&jG4DL%@rGe5@O#I7FBOOdssU5kVmWYxBI$U9X>4Wwg00&cdeY9OC?rggjh;q zb^CW?S{564Wct+S?bZI3u`QeY@yK-8`llDt@v7M9BgBB}>?T%SJ#K8v5icK^zO(lk z1!9p9TM6-*lpv>^3C?xrc9d8R7h;VYuxR?rWuU!>+o$>3AuR!zFiF_0t~ntKY5G zFP-q#)M`xpZTjs^Z;Or7B(J#CcAYl5Ww?~)>qi$$@Fj`WAR%6s@?cfRZ$`KL_Wahg z`RO9@{GV=b`a)vWUkGk%Ol-Ew=$22;YE7RQaFNuj4a7!^*cc@#fBu!xEpKizB7OhW zQ>$m}KBnd6sUy-EPfab9U|%726yjGxukK2Q@mubAML=G;z_yH`r?9xEld=Z%k7mzy~-T`#$|`ti`mt0xZ|nC`L1HH8*+ z>Wtf){wuM1Pg2gRpDgisb;hD?(}!2RxEM@+I5NLzT2*Ij*dp%YC z*;oD2eHTnF5O+#CE)Zg<5Ukqo(kH8s^4ERrscZRq?Yu&pg_DReaY5hJoK2g%aZEcUoMzw5qYHPahc^4IG;#|qAi-p)w>J_WDI&)OZcfW5< z&--tY=nxxA39)sdoip)uWmLZh3T zT2qK!#m25euxjC?-?ePqbH8-rSN*Cn@e3h-CB$+nP~-W40;cbMJ8s+(6I+w#Fdt?A}R zo>w6H39*(C8%geRI+)mp_`BHPcCgkz&kkyBY57HK`jZz?+lPpaKMS$15U-s5MD^CI2Bj0b{#<=!!NTe#LkFe% z-hX|eJdTwT+(C$KBv!0ibE}2bBR&|IzI^?41>!$q17o=PVuO2XCip4g+;(CE}l{h5B`s1aA{Vh3GHo zVAZp)ep{`5HY{DZ%%cUOMF_kbtSJOP&zazIa&A>2&_3>xSh4ESTfV6t_V;1wg}*Ge z^D~9mK!_W}28YE2kF}lKSyKK}A^syaxaCYc^zrIawSMX5?-twOaB1iN7Gj9xE~{2} z_+QoPE&bBkW>X98{1#~+i=@>K7aRQKWMc16pQs*v?tt{VzL!WX87A%gbs^Rlf>p=& z{a1CHnf=n$51Cqxi81nwx?N)Rv80@Rm>BfO$EqJp?Uydmfx6pQQvRUWcudm4J=qec ze^xzu&7tYjOBegZEhKmMgAIultHz)BX?1+MQ~Ld=hYM}+Txo-d)zdP*;_`@y@t;&L z7_(D4@V18}R%>4%&srG)+#*jLw9ZRk7?AGq=B3pa{`Yuw?xO?JIlsEJ&<5wqNdE}* ziDH9QXyH2H_sgS$9W{2RVO@Nz52re>D!YoD-gS0BRzwZ;9^pOoV!eLeRJ-3iPbcT)oBtderx2n zNe;=m$x?#7g;*#lXBFNJHXXEWdi}32FT`pEiPicNs~romVuEwTxqs|BckX|lnOwPH z$vHc4?(!VauupV$XZ$0}%2@7FAzlz-__A|$U=`0Z#Y8@LX9ym~fF z<=lQ=1v)42tUW}^{O!u@PLv1cuMif;1i$Aqp6d#M8L#(*U=@cO6ZYhknLBU8!eZjn z?hC6cExm54^RURHEG>@b_r^Z1bA5-8y}P=`%;{1)PRzbpPDH{gS&s;+hJ<6EdPF%^ ztTH!L7LpPNjy=0+;_JV!eDKH9=W;r@=5uW2-5?{bKmYW(tl}CT6G-{zvP%82{M-NK zoh7wi4m-V8>*OCXK$US{^Nr2P3!}=5hssO8~1eHBsMZ4F@jZaLlqN0UG`X?j=>Xe zZeky}ji_FtHg>u56S0vIi4m-V8>*P-9NX$`eDK!uP3!}=5wo9F8&{3(U2`rY5+hgz zH&iik<3$I08@pfDqryIL8}W-iTE~9XxxLuPh{On1!3|YRtZ?P--p1ditXW|nxQ+O3 zvf5at#{ps^BN8K61vgYNal@@cyp8ML?OS0VxQ!U}y4u+I!{3XIj7W@N72Ht8#9F)c z70xA(G&R*$-l4y5A1&Yiqzi5ka8G~W7ykD}JND{wu1K&7Zm42n$jNJX8`t#Sv&KGf z8`0jPHaSOqs!G4XD9x70_Myg2LN8vDR)#IU=!_UV|r&r)KeNU#cSsA6Jz z<#BIg^Or`~*avPSM*dW79NWCO*eDXLf*Y!sSY_Z9-o~lpkFT*0+(u09=;zb1{P6E3 zUyB5*;D#zD+Wxn+kOpSpX-XAyz|#a!3b<1-B75 zuBbLNuOuBH5+hgzH&ihpx!VLAC=d35+lbb2TTz*3L}CQ1;6|Cpgwzsm1NDl1;5K6N zvs-!_T6Z%dF@jZaLlqNJ+r1665B7oEh%0tf8`=glA~Awha6=Un(rUd8v~%`>+la&N z>Fd*>eMv_6o}s7$0aZ*$?^8i}pub`txQ*CzpxV$rF(VQqSOqs!F(JLEw}F0_ec(3Y z>Dm@P9q5C7zYD_m)I}8tsAA&mUTWjmm|TVvwGKiuW}#BINsTw@<~EAg8}Y6Gji&SgZB6Rc|UYr-*c+9uQP zM;n~>`IH*_s9TA9uNd#s@uyc|BO{WWU{zZRRZPrp>c%t5Ew$L>8vCeQiI=J;dmC5X z0~;BUmu^6%$9Fy;Kc(HS?eQ*VsqhN^Eht+Ia3k^j8^?R1M?Al`x6yO^o;CJSw-Os2 zFv;iD*Q-tt8yS)01gqLosA3{{R&NDk^CuakY$t;9L6s*OIQaVkC|lAK^wTMAW7-295(3cT8hXLPcUx|LX==VYH( zuY7Qv*vN<^Cs@^%LKPEx&(>RkFM4?6PWDl^68mqaHm*H#oY=^SBqvzamO>R1-(Ib^ z0-x~f1Lv}jx|P^;Yqjz6Q%8%9j7V~VRc$F$F(GA$@<{MceOT3&?z7MA<{u|mwa-4+ zl?dlnlbz8|$S&zMW?mrquHmjRbDA%MoY3DTE)tuZD5}Outjq=xE}Orq*$k8f(rhzR zaNGRV9aFlS)rJwQve_@FN(3TS0sGi3uM#H+gz3xHtWFk$3O+;8ycgZUn2$-SeZ%yGn%FaBQ8453W~x4=hiHN8;(nj=~9^+k@9Kb24x!-tAtgSCbSjK5s8rh(^|qlOjtSfd3keldMej3e-#?UFRItIS+ZNM4c+wK5mWg14+XoZegNxYzX{>fw{9zxH z$r~Keway^yY8sa=e(#}O8!k29yRZG|z%G?r%31ZyNsoHfW``YICInL(92OH@bN}J# z`Rg1M+i)B!rb~TdQqRVAyD#wJZvEmxKhU>xSWFB*?=er;UOq%)71F^ze}3d&UN!cU z!^$>7tXOr==Z|}0@CqY;fZ$j$!Ot#Ho|7oPu+Y?v5^sp5v+n6s+f34XXxEqvktDX58OuBO!{scuPQb&A~Awh za6=Un*?hZ``NRtQz-@%hq`MVY5*su_?+I4H4OL8J^X*RNMJwzBw-GjzzWOK2iw&Bg z_XMlphAJkq`F6|*SJ($`BWxzU^1`xWgJ$SG!78|+iivE#9W&b%_JP|7n@K^qyc9 z+)%{?&9_(B2W}&5CSA+CNU#cSsA7WV+biq?w-Gjzu64IaunKOdVuI$|E9?We5jIDz zZLmnN3T~)kLRzi#h0eEH_JP|7n@QKcq)4y|Zm42HdLO(I`QC?p;5NeM$hA)_60Cw7 zs+h=T=rP}3VIR1Szw!ftOMxU!>Zc*Mpi$uOpX4%7E_D|$*?mD}Q-KIWAWf$xAoAmH$ zT6^X&`Az;3`9ELotim2x5ZEbOA^`b!XZB$NJN`lAWDiW>PYImbf)CDi;S3JkIP=A{ z>WyF(PVj)hnV*J4!YZ5?0)cZy4GG6SOq9}*Sgf!+8sTDRH+KKQjUC_E6$rPUd_l5} zz|Ma7;2a$G?81%naa>M5<$+)ocKm|C9^n!JGrm07hY2p(*v1mFI~L($cPw_z`WVXo ze-6nL!3Gno!X8-A*fkpy*ryL4oD9Kv6S#3I1!wT!#;Lt1Rtc+cmJBMKJ}VI+R_wzB zPQHQ0dBr$Z*c}NU?8e2OOSrK!6MGfm#_q-_R?d8IipmI_!D?LQ^=bxom^LI5^MMNc zQxOyFVzuAP^@@FBB5(h3I#`8Wv#^2P%3PXpUa=1or7};fCURZE857vRsT2+=juoqL zrUe8}zLW^T%C(&dZfQ|E67GGt&2#OEVwJE8=RXiDoERw)Ay(|e1h;}HR*vf^P6NUQ z&JS`(ajaN{lVP5avt%WrZmgKVNhHuX#}>zmeQ@##Dx8lh*+{bTNLYol86a?)qeNJ& z682#NCrLo-S!9iumbvx2ILCth?NDJyJoYEUjnlbaA7aHSZu2F=V&&MU9)a_`u?<#P zFM}M1*v(4-=M$cy@Ukf#7_{*>L12PLp#vMX_?M z!dY|>II&(Lf(`az0;jM++?in{T09C@VokX#dkF$5?1k>UWpJxzI`NYgWq>Jq$pMitN87uM3@c7K1}d? ziMJ659!K5y^Z~w{G5+N7V?-pZ8g|ESo;Yvml^PR{!(!r!U3&#Ow!tc%18PV(_F*D^ z|4=Jh9<1UypoWBFA13&1DNYBgxNI5{j(wQmx4zg0tGJvS5{~OD(+%54RNHx;sYFQB z^l898OmN%Ow>zXp-qk8t|5){ByQQd0m`~p2+C$UPM^0_FbrJj?hy(~7QnRhG;fzU%NQ_{W&3|ur%xPu9Y&Z)Gd(|wp z{!DeYf{7T4*#Bv)Y~5hO8!HC_`D4Fdu=da6J~1&H$RAF*+0)z=6N#mxo(i!l5rF(V z4*QrJM8jA)&Rv^nzUTduqMY{0HX8}6Y@Yg&=Zs?UXIpb*!_Cgt9kJ`7 z_f5Krh+P)Ae*n)x-8-m^rkumF+4|jY9@{up39D=spzE)r8xv7Fn6Pzu zx%Szr%mZW2DA&Y5u*z0}ZkyXtCM5Bia;{fQ*s9Yr2b^Ds75mtF?QYvlFWU&QVwJ5A zw{3rEnW!5pCb<2JSQ26BXnI=L;<1i>j^gJ~MN( zs;!t_SlzIYa#q<2=74QCDHFj4hsA`gv2HqVW3^Et#&@n)HPvfP>oo3t5>}awXC7Uv zG2yr;W5Uw6?Z}@LV#Pj|Z?o54y|ImiRko^|zQ0OiB8pW#0#Av!1i8*!3EUuno)bI9xfjJbar~~kykWp%#VVX} zh6?+%OGJnj+rT{vAaDZ-hoo_`bg&AypMb#L{Kf?PFu^$z+u%I;`#ri>4LfB^L`XTS zPMWJbwy=Mw_ zm>lUSs&IB5v1&|kF2-fRxh^&m{Z}q?%d5DYvRDC_zpwh;Tj#B5l&am~onm+2LXQqDx<@?aI$#JYr7!JYki zZD1mUpilWW`k8WM+#Mm2sYS<37ap~Q>G#h1gmV0)K1=`oC1+ptXO4pp*Z{6*arJB zVY8}wh8I!FY&iC@`F5NW=8#066A7zqejlfoO9UWa=Iq0SttaRyY_j3l$JQfox|>5n zq(V9pR@v-uT>?-kR!rD@vYtg|8?3S!VdQ9u2sT*7w9O&wx$Qm_hg zjP|=THePNP`Yn4JKxO^PjMjI$VnV*nQEBbjciW}qq!6crRUBLGcYo8q#B9WhRcs?B z5Swx5eKZT{VIL-}Ki2a&xDN-J^@aRzQH7L46%#V1_O%dWYWA_d9R!aC{V3eqFv7y+ zXKhT}cgo>3beAGL{%jKHlbo?sQ+P{oAY_2O;FT`x8Ef!m0!f4Yf(CO&`p9!Q50xE;n5 ztb!Y=n2;tzE zM_r;eX7pNCY-B`Y1gqeNDkkI(A#dZ{1>-x}2W}%)xL<92yVRqy?=vG3BUlADR52lU zA$c49n%C%LAGnR!;t92};FF1BBO?+cSOqs!F|pzudaCg+-|ak?ec(3Y{C}$r+z91d zMkGeC3T~)kqVnXiO|zc5ZT9Zz_zK^1#rI$F&2P?~`|h#WDeIo>8efUOF%W;PApa%- z6ByCkHw|obZ{Ih_iJ=m!J@%W~AF+z9>*yW?$5w8Y7h<__mkj{H;o6A2hkU6gCUy`4 z>DZ%?awg!;?>sx}(~2+i^$*{?#y5%c4dc9XNKRqz+vJHy`t_~wEzP`M&-)=86Rd*U z?sAuUb?~0Bk!-Qv8WmO*cWIUgXN37M0k_@$?%ZK3AL4Bs(fNH7tBQL^ON2ARe3*dS zzDXe8uN^kV+ZcDmWBzLe=&9{%1#sJr))L{2FdyE9#$PdzI}=_$-rG3s-pl;=4J=&V zGgFsvM%Z@~n1I{9j^Nw__f7CNF1vP*CRXwOo4SNE!op%A{)WOWk52YBzFzs^A*?Fy z_pxtXIF}L52=ieAZu=sJ+#~ay_CA*nT>gGm6?Y^xCd`Kkxa}Jn(ue%Ur^BW9ygipy z#eGtZ3G-nB?)*y|`!75hdF772_0Ue<1IM=;Gc6;soyXUzD||Y5`lORRVYeppZOSom z?SNCfjc5Aq)X6?YB9GO$NuxY*{oc=X^1eRa6&T-Q-skcoYa{x6H+Z8FuXeHz-+Rs> zId@#sSYJz4|LoDuqRMV#kBRH29_M4V*W%Yjv4YA7&O^Bad-%B8ZufpQc;yRk_k^kV zzUY`(IO7CwV@&T$qVgybmh18r@P~Hu#LTPLs&W6y{Vwmhlp7gu+_`r2-xu3{(iiJ_ z!oC>6H~YuL4iD_*^XjwBSF06O_9cOsc=kmdk-fb^j~e^HZ3LI5+*N$wezm2JcH95$ z*=0OoD*kdoOmtTd^(p`Q%tgM;(O(t6h`@Ehxu@^lwl?O5#kZfmYyZ+S3N-e;^4Ch_ zHr>|keabHx`Lif@dDm=Atnu}ho>=9c9ct{u1b^Q|zE<$wrnTd5oI3dF!BtNnCfG5~ zJIQ0>?0Fmcl=r%HryBb(0k?fOMMeOtuIXbn{KMUA?85|qzrnfIX{*#0^_@C+-PeBQ z3B(G!;CXj^OzidL3O-i9z2mnv_F)2U`!0lYtDm@-kJZ5E52~>b6a1A2=l<2_%gzZO zB!k!J|2t10R@lwY`%41@=5vTiD^&(~lnQdU z&vU;Sf}JNXuX>PZqf+c!`e6b#5ECYN50kX>g%iDv zP2S$1iB(+3ewcs_#Ds}>Z`2RfUC^knF@c=5*+E;igv#9ci8`fKo)JksRb3+3u$2+m zFgFOBLDKaPZ$;-U6H{T;isyao_pmaWSgcsZb4w+n6f1XO+g6MSDZ-%rhV3QJt_bU0ICR>f${;#eMryaMD)hVP)`yW;rHs>nvd z*3BO7*)G{gEU#FFd%e*HF(*?ZLaf+_2@6T*mLRuu*nQEM-?Mw2&yo2=4oUPmk+90r zgjvoK0mzp*`_v;!wcYV8o1CLu{ytV_!?B9*^lD774-*aRRTL|}qmym>SS6NnR`HFS zC8BORnBcoKBO8u=_%_UvjSwqV@!ggsqHe61;9DzwtOCJz>2bJxkDn(J%PUs#je;d2 z*sz+2o6D^p(4fbJzbK2Vo1eYM+TF4=JBC%MpitivS5y1xgFu}JA#x}Ujxty(E!42~< z!79GxzeI#su@4jWWYF&$K%S-CnGc>BMqs6Zd%ieUtg;xsBI_R|BE*V)n6R3!>o^i4 zq+eo&f4HHCZ|l*ohv@J2Mc;aaeRbIOd(|bxgxRpL@;zQQtB!G(7iy`%NqdUSjT!nM zwt-z!rs5R=tODrQkF#`m3vpQS8Ukn~Zu`Uh`w#0rV28Wkm{P-D=Vd=WEXDm$yLA4u ziuI43Up>&@GzGWbP1Wb7b?`)U`<=LJ&EC~;J0aX(e0YriN{sC)#!Ylt*oO(*#jLjoj$C=k%7@ig z?m1xA0X0?)eCNSx?-^s$(|*}nxCLs-?f=uX&RQ>QH>r1Pjl*KX_F3b$E}7qb;`t^? zc+%t8)>=`u@~F`%R55Yp)qe%yu0Q+O8vDR)gzf&uEnv>I_xWqnpU#~>=kTkPFjfEN zN2(jWH!>Y?!=cf=Va|=-a!k{E?>;za?|~=RI4mY?A3E+0lUbbqwKgr9J9W;`{ZFp3 z>K~IIt$x}zBCYOwa92#c^whUQ*8g_upj(%lTw@<5Y^S{5F1Fa@MgF#)Pgb2=E2^xH z#l(VJ&yuev$qMNgS`#tSLs(oZZU4U3#EeLcuyCQW`Wh1t9zJI-Z2bH4$u;(2!p<1s zHZ$ikA~C|kg$iz{Vq)0YJNx;P&z3u-#y(8gDJZ@3EF%&lEL^DIhAJi|?l#lgIJtU4 zjeVG~vst(?&AE(7jIeN_f*Y!sSnb03-o|fk8B=2)ChS}p?pTwZxJo2OSh!HZ4OL7W zaN2j?#<($Z<2&8g#)O@G!!4rDWkh0xg$otjP{qXF53TO|-OVrGv&KG5*m*qMPAa$Q zDv=mr;X(yBR59_$>i+90?wVtoYV5;=ooQe-zVVm> zK|O;f&}tDD6V_wk+)UK_R9Iy_(F4aH)f^Ml!&TUa3G4avbPe^Q6;@fli_r@_wF;~33HSH?PHv6~ zdU96ShY5S4>Y1Ov%hUPF!Droj;)Go*tg`1hPW{Hj>e&e3h!c0Mun!Y9G63z|#TU-M zf9Ns??lAj}DKfvSBa3HljccOy!FryP`u3f3#7M~gyOOjsX`6Q@z%9<>ka zwV{d$>f0;q!-Vz0I0q}=8P%r&`gTvC%n`2j+EB#=_3ah*VZ!=gJ;O_VyC+}+;aaZ^ zRZP4)P5btPZW&WyA1161#wla@ijoqE5wL-9t=EPsCZ=S4`^Yi!)!Xa|U@FjXTOW+G z&vM#DiNpw2!3|YR?3nfK11{gQ!ai^tVSO-8vdfxR*0&qMD!8GFiTTn~JNbL(!W}E@ z1Gf>@2g~44Y&<1Tg9G}1yxsI;nml1D>$PzfJtjtFefwr-ZC_y@Cg8Ta0T3VAyCk~- z|1JBedHsXioqqdJzu~9^b~9op`Lwkk@#l(gmT1x04_A}do6^MxPObiP{=MOMPhj7< zCs>8kRd;qb`E$ZCal_E-?wr$OK;>sk4XCjX6F0R!RNZF7N_yE`+{AYBU#k7h2Cq%< zT1cFZ=T7Z8;5vy_i|jpT6|d05#HcmjnKfMU>M}_=`!K%x* zZpIsKz(Og()ujYk#cNYBao&IaH)}~L!T%O&2@_mHott!H|GSAyD-6Kj9*#~sDojM_SMj_AMHGMrm%s6%+Q=Q0&HKAGnR^I9zS$9%1`xs1dA!8>*O)UBwl&L+mPMAGnR^ z{fOGoz0Mio_aPTmAfSo~`f6w=`@n6)V!P-aYdY6QUk&vHtKfzzCU(od8v3^E@@5~n zjrjgGwV`{@Ga@m9Rd7QU6ZF;48vDR)#6_3r7)AHc(^o@1!78|+iiw`L=~qMh%RYVf zf!heXb?jEZmYNZX5v+n6s+gd!>dxi0H2%VFysk!H)%66zMJs{|ZnTe>;P$~jOz_-t z{#9MHkD>}T&^}^<+Xwr=Z3NFo`~B^x?S6lIQ3V3+BPQ&tx`U*Bun*iu@SJu2Rb3-k z1vloxW8(hotGb`b=>hhE+lY8BJR=e#SOquQM@-z5wU1q;eXtMQM%W4jR!N=9h{On1 z!3|YRESG&%_f9#@!9H*sVJi^YJ~ARPf>m%s6%%qQ#rIb@mBK!78)54Sy84*3+n! zu?<$;vaZ@d|5ze|4eqaOT)LO+9I^U_zALuDDz3*RBG_ObCai|8u+vU{%_r3MgjH4( z(Iy%aj_at=7|B?DEBCvsvNU1bULry|*oO(rLmihuZneaoIA^Z+WwZ4L*IxZ~v(<(6 zEk0`&eNKd@8dTPo{Nv_t$^;)1X|ZBO8uYe73Vh1RLzb1fSaUHtG^~?h^a@ ztaah}7{w}Km4%BvB_$%*U>_zdhWA)68VGB(7St%#AbE@qVxEn)JN$oZb3HH$mGy!hE~2-e=i}u-g}* zf_uhY!&4<5@2%&ZzaPIwg;jRrqTRAtBAgNC!vx%?Z!I@0^16mED&I%Dt#U8^jHvbqS!3eT+6@ z+LgmR?c7_M4mWVEXXdiX(uDhP@q1&!e3*dy=I)`X-lm(C2XeQQRaQ2*vG<1w3yTT3 zM;|^k1?}9iTCd!Mfmd|0%IX;I5&mJq!eRpMQy<+a1s&Q4YI`TEtgYC+#f=H`VFK=} zw%sY!yI4P*<=eTtS-$efDr@t&@AZcX3yTT3N8Gey3R=#RX@7;@r^YJlyKu|<4-*y^ z6L9}}^Bq&YBmT&&-#ufuzBN`^ziaomHzv%73A+swv|q8Toxk=U{2fAe|ItxDUB7BK zCgCgK{B>}h<3roPZE1Yt1>c0h-=WZ3-UhvY-z+2E++>9R#u?j-39P{VbJQ-guo}lc zOxRs$dOJQ=;3jPO0PfPOu!`eeCQg0&VNb9R6LuGxep63oQhi?n8$}gnQlW~8-}dh3 ziQg{R&hLl&*;oDiU3#`>0{80ecj=SW(O>pUufJ`Il*hZh`qs|rxzcuPAKw&q&_T6Y zwvz+*+sDL4Z}))>_ul6%HTGcw?oB@Hm)(xP{E*&0Ru`Q&yv9CE?0V@F)sY|cOSkxQ zBC$&ETfMgTw>`Ie^twGgfu8gK8aop(D~hY{7qTjgAc!FDh#MxL%yvnbJ9k_lFE2VM zfgr}X5Lv`g*}@{w%8LefF(Tj_g}4*N$TEn)G_(s~6hU8+h@wGpOGH5kAi{j-Uv*E< zzxv+c`Q~|um0Ramr%qLMcXijPQ?@1)vDYUWo!rvaYU@Lnhn&L^dyzixi>@}_uj~Mw z*}&GS^57LA=P>caw2hhRTe?Pj2EZ@l7lRH~fBaU9)YdUqSpu~}>|l;5>=WgKcG~y) z;Pq=l&S3)SKdkR+;}#op+UCt5g4SOR2|0&}8Nb_*X%u#i&i)r_)p+1r)yrqKNbOxV z+!Cl2Vjy#DWS&5dhYh=B|S?=g)D`Lz4NxY=)FF%eBAut5(6X37`e2)`+1Sw!j+?Op*T)@urbvJMJ@a%;m+0niT)b$N!taO> zjo*!7g3DqZKGCJ{m>wTa9G&{*CZ|KoVgj+M@jGHw%QNbX8Sv8&W=O4)Qn(%=_P#y4~z61aF9 zIwDX4I-s2+f%`<4Yi3v-(<{b=oWlfr)*18Z-j7O8{hyN5&>h!XuQ2GqNgM1_#<9wc zx%-X{&=Fj*ewgQZ#vXJ&QT^-rwpKfa-x6{T6I?@M)^~on^o;LcPhI+Pr6tstkA3`n zqVwdrr3IsEiW{{bWi5vtXhkM`%d4k1O1GPlN;PKe>%+&{ zTFvi$Ntl#U&pe;_siGMO^S6`wdzP~|9ouKj@@6MmV!=88z1^{zrIM%Xs2^cW7$r1^3HA__h@^k9(P4t~yAN8)Qt;BlI7Tdkyo6Cg zeG-{KT0IYI{rX0H-^=smORk^e&BCpDsb8NGs0qFf@@f{+wFLPsS&u)>VFD{y_}#5} zkzZfzBM9xh&gB#2w`4v3v@9mDVus({Dl+-?#Xf@2&g*JEp?*utVzJ


<34N!sL?C*sEm+4vn_wLn zG0>5o-s+?>tOy|;`!mD@OCjz#q;5OA-C+nWiwUdY>hZIgI;H$tXP&-%(5%EDFhJFqqm}@J}VM0e@d|E+F zaQt>2$MEHZzSu)3&|0ySdV(UVMh+3{;6A`gHOj^OLEA022U$vKte@u)u@25*g6~8k zEJmmuETtm`>-9MVAUhH{hY1~_@f|KEbcADE>BvWm@^};B+FBWw((4h=s~jTM!8uIu zIJP=s0w+eG4{)z#TE{f*c9saVtyqe$i5w!9h}32_D8PQ zi#{{(oX}?pr}12|wNf4S6b!T#r1aXx=@Clj5Z*J22~FqR8SE+G(1CKbZ*ay$orFEj z0|d%qLR&pP1tYdY+X{C89mlwr)Ttw2DSejUepM&o)rtwNA;z2Ex-6y75F~L&0rvcBtx(UulZdZ#jA|x$ELa_Z_PLHY`Tu1)4?~&C~~dKIZSZh_?EMj zj&RKM>m&^4FyZ%kb`=?WWzv{s;7u5F5uS0VSE$>~0pf~x=Pbf|Crk0HCYOLU(7`!L z0{d~Cj`rf)1!8S!sof2vIqGc+DfJzNSGh5L#kXn1_@bW-Z6;eD;Qy00T!s{pm)9Hf;%4YjtielEUUOK!X?_3@(L zrq#b0{^kP*Hz+@KTSatTlU?549Ai#d-O$!*RL?8YNvTfF`bMyc?K9@4daW$6Yu{y_ z<&aW>?K9?v^oG*UdrwcDzjatzJy+o?Kl_IUUirt=gmI(N>O+eiH4CRVET8tf3JGid zpH*C2snyqZ9Vd9B;+Aw$>dFZf5p42_2X?$_Yc*l&2+wjzDZ%y`Q~71T(qQ(Tsm((s zq}3-CyJ%`oZ&dzOVMWww_}*1qTVu{^c9Ydn`t7)MQffi3iU>COMEhY6T4LCkF`ng+ zQiAO>X819idJH-5&eWkt0sDuLrMv@57(-Rs#n&E+5dQB6@!8zExaX zi4Zb&lU*6T>+2~p+I^{!Z7U+sKJlv;=VE0v*k3rsvm8=Nuzj|zR%%_#;&au#;!?7>DcuNxjt;I zaD8wN(n_#>_I_2Z_bc_;#vP~K`$xw2EA_K>dw%3Px3$7`o|Mw-+$ZE-Vrzwa3FjcK z1lwot?bVy_b@z7NZ-Be=z`;kw_jazWG2iytZEIEX~1k$xn=cYC1 zlwwYD{uf=+HLp#SxzE^WaPy@ZJyjXL=RG@&kl!n#j@=f(%OXphCj?9BS4Z1JceM1Iu;ndF-qGl|`bDvxS{Q-h=nDFN%(y*8_*gy5Ohi^X8$up## z2Y!|a1PPK-dU~0b0)t*L+^BQsTBA*DYKB73Q z5vSDZ>Cndfx$Sd>Z;Nz$p;$^iS8x2XbAoufO^aoqc)^b0|xls?LaF%cy(OXzbBUm9ip--Ue zI*T1Egy0+|G%l7KSN+FayR^MXzbzEo!FA_nyD_at+)=euh^|7gwfrPwT4FlQn^|<3 zNIxtTOYw8iCw_PA-flG_{iXcl947o{)ZP_GSN)$zA0v9;{|(RnMKjy3hW|Ep613{p zX0`v_XYsh_mwq5|qgqOoCZ0#}{EBtUsa8*w6rCgGuM|Bj#q%AXxNghT;+<$G(Ze}R z_^rFO|J%jyOO09y!4~qRn$$S7YW6!%Pk7ee(ZHv)wI2g_D|+CE`(<^#x7qu>eJqB;hBkR@S08a zinIq=N`3QRy1r9_n0r=zOSG1n@m0%&?geOBzoXRZ)MF17?2{VF+2r0+jh_r`lQE-~ z&0hM6^hIOo6PDs5d zVp+|ug7o&bvi?l-E1sA5#PZ<}7q*jjS}(QY9463D*gf?4D@RLPZEpXk)S7mM=@WJq z*tw6+cX(#u6Q`YgUuDdbd4LGP`MEy3e5kJ6qBj63<0^Vp?glrQ=0MrRd-sCb(zi)VJ!J zQ$3FJkRMe=djgn-NDM5U`v&^0bjoA`k=S*-L8gp*@g+<+^{1=2^DWtLQs^8rm z5>JDTRZmvEB+^Afaam0Gceo4hXtHF#5V)7{?1jIo@Kf8E6YptT)K<#hAOuVCHyNLp z{(G}zuSm;Y6K@8G@k*cd5T;e(WVP{O0F zPQn|BOw?&dz*6c7idC{4Lh9vOmvfllR?nJ|CAdvg3jV2Fl25Rd>VUUt-2~?_q4qg1 zRv3}A1E-xZt>ev|H$gd^{1z6O;Dc(2U670ctjM7&Q&glpR>U@6{9ltZWv!#a5143}heC=sv}?<=X3Fr32# z$IQ*@kof{)J9C@~jsCo-)r3_VQx@?jB+CD;W{b12qvLZ%A*HdSCpEY&LF}K>MB`Ii z-&(9eqAVsfZuQUs(?rMkXIGz(IU@dXjz$~4fA{o+j-6LFTbzwmoMtJFRorm>eF@^D zoA(rKl=3AWvR5l6G*0pv?>{89!XMs(@g+wi=;Gu>q}k(F({iRoX6-_?gUiyGyK8=& zm8jJTLw9S$-ZV?GKEFRo{&7V?Hjc1oM=s&~lZ~nW#z{-Q6&rp`*ukF5?Ds4ubSGkR zeDKl*5x&-8j*H$z8Z%C$@lCF7f~Ango^eOyi^+i;ynmuxe67n%#4$OvEGCe~-goz{ zFJDX!M&(6X378hiLb$1w*2jj*BR;{6k8eErBv#4$OvEGCdfECSGR z?1VsKC&V#1kc0O&=)gCUyab{xSVGHUg5Ub&r15M_j%O$)2g=2}6LjExFfU=0KzxQI z!S9LrV{)KeysJS6-XQZ5aZC=xnMe}+o``Fh_gftn-+SMR%6S2;YPWijD@nUF7>b|p@pQ#J;Ko|eQ?8bQLH zqQnGCu@0a3siobWX?lHD5+-LE7+07-@Qfoq%{-9P%Zl15JYR`VGY{l6^JXN)j$VkNO=jP z1kP1QO_<<0QGA+tAg7sEvlQk*(1F=fUcxAWbJbB3CU_Pl-(H_}do4R{zI-K1VYUPv zn9t=Uj1r%;9${<31kbhXV?+MOy`}PK=Xsm{p7TZ@u#~1X5*un56FEANlaFw*+wLe& zTknLICr=DN*!EK<+}$(U*koM>X_Slfx)!^HL~75t7AsDb(y?`Q9{dN0_mt>&j_?J%dWYm z6kYiF%i}|qnz8;rR>!Y*tj**lj1pQF6G;F2m5U?LvZDC&C#;SEPmc^)>e?swSRJkI zTARsB7$vkUCXk+f!^IJ3*{i?%J*(rBa@dfN@!V3ApPL*iz3j*JU7+)y#@=Lm4+vxObKEO!Bqn$Adu0?zd?&80Un`b!C(T#dI8}8MEH&uH(Y6&3Z>vtia1Ikqei&tG z#1Qjq#nuiyYqFJk;`cY_S{`Uyv6MRzywaZ3RA*Zm&SAox8(wM8lJaZiPGYaLQEuFs z?v*xL58G#nxK=FX&W*3Mv59gB!Q8lFseAkX!CKz=@3+_8x&cf5YROnjG`RaPgx4QT z@aXX^=QeR?tXJC8xws8|%UQ~u>0W71OUohDa<4y_aA(L@+SA$mTDcSKD{VxeUAIoK zmd`SIEDyAGS?aTYj<*Ej@YLBKURyE2W5KT#*LT3fw^|*yb(wsavCC2oPId&M(ft3l z6%#pj#Fn$vIe&KdF2obcA;cK>Eu3?h;8yoMGeh-sL5y&uao5g`7cP4|1NZx7M>mwY9k8qfjf(Hz2V*6r0WFvmap5^LUlWYZvu~qb#^$UMQ zVzR2wwvAk!^+@{T)SnVDuvR@}tEus1?sTBN+U=!OXUxdH2KHFx%)DfQL>bI0^ z1?NM5-aNZEBUJx(MB`TPwi`DbEc0q>o@;J>&x13+9Tz60;EM&DeB#=n*1z3M8#BhU zT)pO!t>E?>)Rfu18KLe4Kpb-Q@q1$Sp)#(mG39@?e)>WBok?L*3O;_Y$tM;(YWD(| zN2?}!maFezvK8F@YFm4|HzR~EC$10FO1(6H_~=j>*Vafs*gXxwonKFp(eCzUs1)`z zfcA+F&)Izw=E$Ncp5^MxnQR5Ok44)`cb}l{>Ys|X;@TP`I_#SlbR?y;Rz4wi;C;}* z4$eVZ3AWFUt5O|Tx>E=JtiGHWS6o|Tq(9s{j`T-TN_)^Jq|ZSZ^f~7stpwX=uR*&% z3fG|SYQi<4URw6*%AVk3N2QUmYiotEo0QVg&i2XsM{~P($>93n9QA=swt~H1mFoRU zcYxvkppUr?cLtx3dx>o;+)FqIX(iY`Z{J-A-zbcB+-ud-3HNrc ztueG;O-8%hou^WI7xjtko;ZX1F6SVv1lwoKj@LV+$2M)+{qr+=hw4X$XOwz~;W@~) z_4f9KNhy8e_(XR9pppH9p5^KxmTU#La}pyFPiHO*HPojl*VfJe?Atb;wJfFk4KM@9 zOW3C~=P;q3@}P~uiY00#>#3ppX5~&lMH;b}8k$oQr|QB(?@+f#3S{Nd5-Npt?sMNb z*T(zw*R~&*m0jrQB?RX%!K>u)yK{gyNPNx4IwvAm>z8e$-<>0M#`uBe_e$UK(=ajQ? z(>Nm+dUPKP-pO_T$?2oNe_@p|@WgE;I@T3$TcT3PQGNI>$|V^C=}k^Y;7f50YxkQZ ziP{{colWj{x5wn5l>d$VJNDe$3Vf*tr4=e8ykTpt9I@fWIR#liORopHrCT=JlbJNE zF#4+faOuH`zxY{kD7Ks_{lQOXeyZsji#t8yo$rqBmTh7C9L)EW@resZnnhV#yk{*x zqkN)sqdirV#PVTsE;8pZ;Xgn6{$Ix>SkcGz!S@or9(A?GMKNExWPR1ELUa@zEX7x< zPb}$s-s0?D0Nc9Q+k>*0z}T%lmHqy|?Y4VmaGkRhdujRv#%$KVJzkH(`S47rH>JGU zlt~QiYmY0;`tQaObC848H;sGm_xTr1IxhW8EUzz?vlLodquBdIul+3wF|NK7J2;04 zzGE2k=Bg)_jur3go8>ffo=x(+lzY~gdwza!={(v0aZ%$!Z}!TwKA-r0K%e5zW$Ydy zwc;EmcuY&A+tJ&q?hre!7R!13@VtaeGUgxqE-S7U9cRIEn!WHm$S0nk5Ea3{{eeWS znBWgst4KGAr| zD^-6JqF4ydVS>kk?0;PR+R_)K2fIrTa(#I&;#+?EJNFfD7Ry_RkY zNv&R!T5%2&{wxF6M|M|AsJl-RP1OZcCeK0g25ccCt8ncXRj#S zOE`xKes7oO$*{G>zn1+557saA-st&Fm`gI|!((nq^_HC$bM_Y4cW0gN@SC?!9GW%1 z>N#nv9*G`gLOloE*)@m0Iasnlc8ru9D)8Ru`Ayg-Uin#DyDLSWZgyPh*)>e4=YTtd z=lJezm$j7rKJPa#v~SxwH{^G9pFj`F(;wb#A?Glmo&)YIp>6FZr9L~hFkNqVf%gr7 z-<^G;X!G-h$H_>P9SZgir)LQmzHd!Hznc|>YU zi^6o%0|nmLWrFW~66tp3{Z&iE^79j|t3D+-4ao2FZYR#s{c0ggsizCh_VJ0!&N;m* zyDP;TyG*DrjytvI-l;F?9+QxBkk+$ud>v)`->7;<>`03pECn5U@{Ld2zu@Ddp3)zW zC$1?kB_yA@~l!?;n1D%zR^cRh86A_H)`d79ENFzT*@7cAQ!Dfe^AQ z*Xuzh{29RW7oJ-@T52^?YQ<9g{^1iHj&E6nk@$`foWlgqSKL`Ia*1FTZlh3QPK~HM z8u<(9$NsC8od>&_4DN^BPxu69NPBTBxet5-`*5~3>0x&^at;%iuR!n#`A8LvBvN~qUe-YgV1iq+3>aPb%qLD8?Bjq|Nq(8muT}#}$>gmG} zEQR!fxh0V!evmi-q9Rx?=M{NUZ*R2a4p{M&)saIOB{b(#CM5mH#U&AFDO_UiK*vn^ zNWoI=e&%$1^3;X6gYs#$91#ZJd1m;5D@m%#aIme8`8Kze-7;s~_7ZA)|;*io^) zUo}e|*TCsm(|TSeFJY9>vY0@6){jLIXk!jatQzQO*?QATmYTTj9otq1-+MfhmoQ3b zSxg|^V^L8AT1wBZsUeY+!c}zhjK?y03802^lvbk4$RbM{QU9^?`uE;q|Tz~3@%>EOLqT3h$uMpo#502T{Cbe^WzfzXc z-aNV8|7LumpTu8*j%7#PRLVI_ApJ(eq6oAx`$mtkMB5P$mU0dgb4PE;%=@k|`g+o% zLLipY%3*C%i0#BuI*u>-Vot^{7D&-s|kZ%7< zVFcQk?GpD2#3`5VF6A61J{Z3-Q}ujdRMZW`Lb+cZ`A+LpnS2yxDZO_cJAYQjClG%H zI%Hic%{fdUJ#SuN1X^nI<7Jk3xu#{BbC}q7*1MUlvkIf>oj~l8JN1frty6N}wFKG< zW$9h}`==hs_{7Ney{(Q3A9P4_4iiZKVMbvDT68>DZi%w;Gt!*HM86;2&D?ljVf18Y z5I4y)>iGv+rzT3AIF{096rO`Vao0;JtK%;vUDKSy1k(CUGzK#Ow61*5@ScP0O`|lP zi7^p?U@7*+@ri}gMplc8VC2$qp3mL17DLM4>M$gFwed{P|4v2;5iEr?`X-lv4cYsm zQ9l`+sW<8-_~KMV^}{CK`Fc)V@1!jCY?pqPz$tv{Cxf&{81>|EUkBZHIHdJ=d|UJh z|9he%AmtPD7xW0h&66nq9CwhT7ILIxW3oh`1lk9&4Izaz_Hg>dQIGerI+jdnm*yNM z{0PcfB2Yrhg%r|=Vd@j}|1`+z=<{g(ILf<9DZx>X#STXVO0X2th{fs?)i(^WI(EqE z5uAgx5*!=Zm@E+}!BR*gLaa~7DHv7<&a~hhq?PbvE@z2A36??{5ovuw&gQT>?tOb) zDd!-qgvQ)PTvlVUM4$vqAq^>?kTXWCjxJ?wOF0K=B{V9yJ3Tu~1WK?J(vb3r<}XdL zI-Wh)s|V*GtprCm7jHvH1WK?J((vW=iL>VXt+ZcwRk}&p;%dFl;ajIy@TYe-wElPM zi)T!CA^Hl@PzaXVIO*#0jxBy;cWtrvo;;X@I9-S>LU0Zf6PsOIKB3RR$X(~Z8Z<8a z#}(u3`W<|lae_7cjR(K?T4vSjH%71iX1wf0cE3B%2kbfWo%|YFNdBmDb%b~-f=HK2_roZP9hI5$E^yE{n6CJ%o#}}@31IZyple#$2@TG>GUSN;Vd zOY!P!l5l=%V0lMHa;HV@{#q#;tt&cAkd#t8@OxfDpuw5zuU{V3)ZpFHr4QAdY|}%o ze&P#aOGwf7549VdsV!^pYD$yt_Xgh6t-k41;B5sW_4P9;D8`x}7 zeY3Wk5c1^l>TZb|&kod3{sQ;U5-gRi72A;|0AyQO?XXgLh{t0cF~L%v<+ZKg>Mv^S y-(as+WI2_TB?1BDpChvE!#X^|jlwLEp+2|zxOH9sJ4xL)>g#Z{Yb&H3@&5pgFc6pk literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_dependencies.xml b/act/assets/vx300s_dependencies.xml new file mode 100644 index 00000000..c75d3ad5 --- /dev/null +++ b/act/assets/vx300s_dependencies.xml @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/assets/vx300s_left.xml b/act/assets/vx300s_left.xml new file mode 100644 index 00000000..61e6219e --- /dev/null +++ b/act/assets/vx300s_left.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/assets/vx300s_right.xml b/act/assets/vx300s_right.xml new file mode 100644 index 00000000..2c6f007c --- /dev/null +++ b/act/assets/vx300s_right.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/act/conda_env.yaml b/act/conda_env.yaml new file mode 100644 index 00000000..0f44d6b0 --- /dev/null +++ b/act/conda_env.yaml @@ -0,0 +1,23 @@ +name: aloha +channels: + - pytorch + - nvidia + - conda-forge +dependencies: + - python=3.9 + - pip=23.0.1 + - pytorch=2.0.0 + - torchvision=0.15.0 + - pytorch-cuda=11.8 + - pyquaternion=0.9.9 + - pyyaml=6.0 + - rospkg=1.5.0 + - pexpect=4.8.0 + - mujoco=2.3.3 + - dm_control=1.0.9 + - py-opencv=4.7.0 + - matplotlib=3.7.1 + - einops=0.6.0 + - packaging=23.0 + - h5py=3.8.0 + - ipython=8.12.0 diff --git a/act/constants.py b/act/constants.py new file mode 100644 index 00000000..f445350a --- /dev/null +++ b/act/constants.py @@ -0,0 +1,76 @@ +import pathlib + +### Task parameters +DATA_DIR = '' +SIM_TASK_CONFIGS = { + 'sim_transfer_cube_scripted':{ + 'dataset_dir': DATA_DIR + '/sim_transfer_cube_scripted', + 'num_episodes': 50, + 'episode_len': 400, + 'camera_names': ['top'] + }, + + 'sim_transfer_cube_human':{ + 'dataset_dir': DATA_DIR + '/sim_transfer_cube_human', + 'num_episodes': 50, + 'episode_len': 400, + 'camera_names': ['top'] + }, + + 'sim_insertion_scripted': { + 'dataset_dir': DATA_DIR + '/sim_insertion_scripted', + 'num_episodes': 50, + 'episode_len': 400, + 'camera_names': ['top'] + }, + + 'sim_insertion_human': { + 'dataset_dir': DATA_DIR + '/sim_insertion_human', + 'num_episodes': 50, + 'episode_len': 500, + 'camera_names': ['top'] + }, +} + +### Simulation envs fixed constants +DT = 0.02 +JOINT_NAMES = ["waist", "shoulder", "elbow", "forearm_roll", "wrist_angle", "wrist_rotate"] +START_ARM_POSE = [0, -0.96, 1.16, 0, -0.3, 0, 0.02239, -0.02239, 0, -0.96, 1.16, 0, -0.3, 0, 0.02239, -0.02239] + +XML_DIR = str(pathlib.Path(__file__).parent.resolve()) + '/assets/' # note: absolute path + +# Left finger position limits (qpos[7]), right_finger = -1 * left_finger +MASTER_GRIPPER_POSITION_OPEN = 0.02417 +MASTER_GRIPPER_POSITION_CLOSE = 0.01244 +PUPPET_GRIPPER_POSITION_OPEN = 0.05800 +PUPPET_GRIPPER_POSITION_CLOSE = 0.01844 + +# Gripper joint limits (qpos[6]) +MASTER_GRIPPER_JOINT_OPEN = 0.3083 +MASTER_GRIPPER_JOINT_CLOSE = -0.6842 +PUPPET_GRIPPER_JOINT_OPEN = 1.4910 +PUPPET_GRIPPER_JOINT_CLOSE = -0.6213 + +############################ Helper functions ############################ + +MASTER_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_POSITION_CLOSE) / (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) +PUPPET_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_POSITION_CLOSE) / (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) +MASTER_GRIPPER_POSITION_UNNORMALIZE_FN = lambda x: x * (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) + MASTER_GRIPPER_POSITION_CLOSE +PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN = lambda x: x * (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) + PUPPET_GRIPPER_POSITION_CLOSE +MASTER2PUPPET_POSITION_FN = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(MASTER_GRIPPER_POSITION_NORMALIZE_FN(x)) + +MASTER_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_JOINT_CLOSE) / (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) +PUPPET_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_JOINT_CLOSE) / (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) +MASTER_GRIPPER_JOINT_UNNORMALIZE_FN = lambda x: x * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + MASTER_GRIPPER_JOINT_CLOSE +PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN = lambda x: x * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + PUPPET_GRIPPER_JOINT_CLOSE +MASTER2PUPPET_JOINT_FN = lambda x: PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN(MASTER_GRIPPER_JOINT_NORMALIZE_FN(x)) + +MASTER_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) +PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) + +MASTER_POS2JOINT = lambda x: MASTER_GRIPPER_POSITION_NORMALIZE_FN(x) * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + MASTER_GRIPPER_JOINT_CLOSE +MASTER_JOINT2POS = lambda x: MASTER_GRIPPER_POSITION_UNNORMALIZE_FN((x - MASTER_GRIPPER_JOINT_CLOSE) / (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE)) +PUPPET_POS2JOINT = lambda x: PUPPET_GRIPPER_POSITION_NORMALIZE_FN(x) * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + PUPPET_GRIPPER_JOINT_CLOSE +PUPPET_JOINT2POS = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN((x - PUPPET_GRIPPER_JOINT_CLOSE) / (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE)) + +MASTER_GRIPPER_JOINT_MID = (MASTER_GRIPPER_JOINT_OPEN + MASTER_GRIPPER_JOINT_CLOSE)/2 diff --git a/act/detr/LICENSE b/act/detr/LICENSE new file mode 100644 index 00000000..b1395e94 --- /dev/null +++ b/act/detr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 - present, Facebook, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/act/detr/README.md b/act/detr/README.md new file mode 100644 index 00000000..500b1b8d --- /dev/null +++ b/act/detr/README.md @@ -0,0 +1,9 @@ +This part of the codebase is modified from DETR https://github.com/facebookresearch/detr under APACHE 2.0. + + @article{Carion2020EndtoEndOD, + title={End-to-End Object Detection with Transformers}, + author={Nicolas Carion and Francisco Massa and Gabriel Synnaeve and Nicolas Usunier and Alexander Kirillov and Sergey Zagoruyko}, + journal={ArXiv}, + year={2020}, + volume={abs/2005.12872} + } \ No newline at end of file diff --git a/act/detr/main.py b/act/detr/main.py new file mode 100644 index 00000000..07ea86c7 --- /dev/null +++ b/act/detr/main.py @@ -0,0 +1,110 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +from pathlib import Path + +import numpy as np +import torch +from .models import build_ACT_model, build_CNNMLP_model + +import IPython +e = IPython.embed + +def get_args_parser(): + parser = argparse.ArgumentParser('Set transformer detector', add_help=False) + parser.add_argument('--lr', default=1e-4, type=float) # will be overridden + parser.add_argument('--lr_backbone', default=1e-5, type=float) # will be overridden + parser.add_argument('--batch_size', default=2, type=int) # not used + parser.add_argument('--weight_decay', default=1e-4, type=float) + parser.add_argument('--epochs', default=300, type=int) # not used + parser.add_argument('--lr_drop', default=200, type=int) # not used + parser.add_argument('--clip_max_norm', default=0.1, type=float, # not used + help='gradient clipping max norm') + + # Model parameters + # * Backbone + parser.add_argument('--backbone', default='resnet18', type=str, # will be overridden + help="Name of the convolutional backbone to use") + parser.add_argument('--dilation', action='store_true', + help="If true, we replace stride with dilation in the last convolutional block (DC5)") + parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), + help="Type of positional embedding to use on top of the image features") + parser.add_argument('--camera_names', default=[], type=list, # will be overridden + help="A list of camera names") + + # * Transformer + parser.add_argument('--enc_layers', default=4, type=int, # will be overridden + help="Number of encoding layers in the transformer") + parser.add_argument('--dec_layers', default=6, type=int, # will be overridden + help="Number of decoding layers in the transformer") + parser.add_argument('--dim_feedforward', default=2048, type=int, # will be overridden + help="Intermediate size of the feedforward layers in the transformer blocks") + parser.add_argument('--hidden_dim', default=256, type=int, # will be overridden + help="Size of the embeddings (dimension of the transformer)") + parser.add_argument('--dropout', default=0.1, type=float, + help="Dropout applied in the transformer") + parser.add_argument('--nheads', default=8, type=int, # will be overridden + help="Number of attention heads inside the transformer's attentions") + parser.add_argument('--num_queries', default=400, type=int, # will be overridden + help="Number of query slots") + parser.add_argument('--pre_norm', action='store_true') + + # * Segmentation + parser.add_argument('--masks', action='store_true', + help="Train segmentation head if the flag is provided") + + parser.add_argument('--a_dim', default=-1, type=float, + help="Action dim") + parser.add_argument('--latent_dim', default=-1, type=float, + help="Latent dim") + parser.add_argument('--state_dim', default=-1, type=float, + help="State dim") + + return parser + + +def build_ACT_model_and_optimizer(args_override): + parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) + args, _ = parser.parse_known_args() + + for k, v in args_override.items(): + setattr(args, k, v) + + model = build_ACT_model(args) + model.cuda() + + param_dicts = [ + {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]}, + { + "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad], + "lr": args.lr_backbone, + }, + ] + # optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, + # weight_decay=args.weight_decay) + optimizer = None + + return model, optimizer + + +def build_CNNMLP_model_and_optimizer(args_override): + parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) + args = parser.parse_args() + + for k, v in args_override.items(): + setattr(args, k, v) + + model = build_CNNMLP_model(args) + model.cuda() + + param_dicts = [ + {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]}, + { + "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad], + "lr": args.lr_backbone, + }, + ] + optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, + weight_decay=args.weight_decay) + + return model, optimizer + diff --git a/act/detr/models/__init__.py b/act/detr/models/__init__.py new file mode 100644 index 00000000..cc78db10 --- /dev/null +++ b/act/detr/models/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .detr_vae import build as build_vae +from .detr_vae import build_cnnmlp as build_cnnmlp + +def build_ACT_model(args): + return build_vae(args) + +def build_CNNMLP_model(args): + return build_cnnmlp(args) \ No newline at end of file diff --git a/act/detr/models/backbone.py b/act/detr/models/backbone.py new file mode 100644 index 00000000..f28637ea --- /dev/null +++ b/act/detr/models/backbone.py @@ -0,0 +1,122 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Backbone modules. +""" +from collections import OrderedDict + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn +from torchvision.models._utils import IntermediateLayerGetter +from typing import Dict, List + +from util.misc import NestedTensor, is_main_process + +from .position_encoding import build_position_encoding + +import IPython +e = IPython.embed + +class FrozenBatchNorm2d(torch.nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, + without which any other policy_models than torchvision.policy_models.resnet[18,34,50,101] + produce nans. + """ + + def __init__(self, n): + super(FrozenBatchNorm2d, self).__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + num_batches_tracked_key = prefix + 'num_batches_tracked' + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super(FrozenBatchNorm2d, self)._load_from_state_dict( + state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + def forward(self, x): + # move reshapes to the beginning + # to make it fuser-friendly + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + rv = self.running_var.reshape(1, -1, 1, 1) + rm = self.running_mean.reshape(1, -1, 1, 1) + eps = 1e-5 + scale = w * (rv + eps).rsqrt() + bias = b - rm * scale + return x * scale + bias + + +class BackboneBase(nn.Module): + + def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): + super().__init__() + # for name, parameter in backbone.named_parameters(): # only train later layers # TODO do we want this? + # if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: + # parameter.requires_grad_(False) + if return_interm_layers: + return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} + else: + return_layers = {'layer4': "0"} + self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) + self.num_channels = num_channels + + def forward(self, tensor): + xs = self.body(tensor) + return xs + # out: Dict[str, NestedTensor] = {} + # for name, x in xs.items(): + # m = tensor_list.mask + # assert m is not None + # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] + # out[name] = NestedTensor(x, mask) + # return out + + +class Backbone(BackboneBase): + """ResNet backbone with frozen BatchNorm.""" + def __init__(self, name: str, + train_backbone: bool, + return_interm_layers: bool, + dilation: bool): + backbone = getattr(torchvision.models, name)( + replace_stride_with_dilation=[False, False, dilation], + pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) # pretrained # TODO do we want frozen batch_norm?? + num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 + super().__init__(backbone, train_backbone, num_channels, return_interm_layers) + + +class Joiner(nn.Sequential): + def __init__(self, backbone, position_embedding): + super().__init__(backbone, position_embedding) + + def forward(self, tensor_list: NestedTensor): + xs = self[0](tensor_list) + out: List[NestedTensor] = [] + pos = [] + for name, x in xs.items(): + out.append(x) + # position encoding + pos.append(self[1](x).to(x.dtype)) + + return out, pos + + +def build_backbone(args): + position_embedding = build_position_encoding(args) + train_backbone = args.lr_backbone > 0 + return_interm_layers = args.masks + backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) + model = Joiner(backbone, position_embedding) + model.num_channels = backbone.num_channels + return model diff --git a/act/detr/models/detr_vae.py b/act/detr/models/detr_vae.py new file mode 100644 index 00000000..59925029 --- /dev/null +++ b/act/detr/models/detr_vae.py @@ -0,0 +1,283 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR model and criterion classes. +""" +import torch +from torch import nn +from torch.autograd import Variable +from .backbone import build_backbone +from .transformer import build_transformer, TransformerEncoder, TransformerEncoderLayer + +import numpy as np + +import IPython +e = IPython.embed + + +def reparametrize(mu, logvar): + std = logvar.div(2).exp() + eps = Variable(std.data.new(std.size()).normal_()) + return mu + std * eps + + +def get_sinusoid_encoding_table(n_position, d_hid): + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +class DETRVAE(nn.Module): + """ This is the DETR module that performs object detection """ + def __init__(self, backbones, transformer, encoder, latent_dim, a_dim, state_dim, num_queries, camera_names): + """ Initializes the model. + Parameters: + backbones: torch module of the backbone to be used. See backbone.py + transformer: torch module of the transformer architecture. See transformer.py + state_dim: robot state dimension of the environment + num_queries: number of object queries, ie detection slot. This is the maximal number of objects + DETR can detect in a single image. For COCO, we recommend 100 queries. + aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. + """ + super().__init__() + self.action_dim = a_dim + self.latent_dim = latent_dim + self.state_dim = state_dim + + self.num_queries = num_queries + self.camera_names = camera_names + self.transformer = transformer + self.encoder = encoder + hidden_dim = transformer.d_model + self.action_head = nn.Linear(hidden_dim, self.action_dim) + self.is_pad_head = nn.Linear(hidden_dim, 1) + self.query_embed = nn.Embedding(num_queries, hidden_dim) + if backbones is not None: + self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) + self.backbones = nn.ModuleList(backbones) + self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) + else: + # input_dim = 14 + 7 # robot_state + env_state + self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) + self.input_proj_env_state = nn.Linear(10, hidden_dim) # TODO not used in robomimic + self.pos = torch.nn.Embedding(2, hidden_dim) + self.backbones = None + + # encoder extra parameters + self.latent_dim = self.latent_dim # final size of latent z # TODO tune + self.cls_embed = nn.Embedding(1, hidden_dim) # extra cls token embedding + self.encoder_action_proj = nn.Linear(self.action_dim, hidden_dim) # project action to embedding + self.encoder_joint_proj = nn.Linear(state_dim, hidden_dim) # project qpos to embedding + self.latent_proj = nn.Linear(hidden_dim, self.latent_dim*2) # project hidden state to latent std, var + self.register_buffer('pos_table', get_sinusoid_encoding_table(1+1+num_queries, hidden_dim)) # [CLS], qpos, a_seq + + # decoder extra parameters + self.latent_out_proj = nn.Linear(self.latent_dim, hidden_dim) # project latent sample to embedding + self.additional_pos_embed = nn.Embedding(2, hidden_dim) # learned position embedding for proprio and latent + + def forward(self, qpos, image, env_state, actions=None, is_pad=None): + """ + qpos: batch, qpos_dim + image: batch, num_cam, channel, height, width + env_state: None + actions: batch, seq, action_dim + """ + is_training = actions is not None # train or val + bs, _ = qpos.shape + ### Obtain latent z from action sequence + if is_training: + # project action sequence to embedding dim, and concat with a CLS token + action_embed = self.encoder_action_proj(actions) # (bs, seq, hidden_dim) + qpos_embed = self.encoder_joint_proj(qpos) # (bs, hidden_dim) + qpos_embed = torch.unsqueeze(qpos_embed, axis=1) # (bs, 1, hidden_dim) + cls_embed = self.cls_embed.weight # (1, hidden_dim) + cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1) # (bs, 1, hidden_dim) + encoder_input = torch.cat([cls_embed, qpos_embed, action_embed], axis=1) # (bs, seq+2, hidden_dim) + encoder_input = encoder_input.permute(1, 0, 2) # (seq+1, bs, hidden_dim) + # do not mask cls token + cls_joint_is_pad = torch.full((bs, 2), False).to(qpos.device) # False: not a padding + is_pad = torch.cat([cls_joint_is_pad, is_pad], axis=1) # (bs, seq+1) + # obtain position embedding + pos_embed = self.pos_table.clone().detach() + pos_embed = pos_embed.permute(1, 0, 2) # (seq+1, 1, hidden_dim) + # query model + encoder_output = self.encoder(encoder_input, pos=pos_embed, src_key_padding_mask=is_pad) + encoder_output = encoder_output[0] # take cls output only + latent_info = self.latent_proj(encoder_output) + mu = latent_info[:, :self.latent_dim] + logvar = latent_info[:, self.latent_dim:] + latent_sample = reparametrize(mu, logvar) + latent_input = self.latent_out_proj(latent_sample) + else: + mu = logvar = None + latent_sample = torch.zeros([bs, self.latent_dim], dtype=torch.float32).to(qpos.device) + latent_input = self.latent_out_proj(latent_sample) + + if self.backbones is not None: + # Image observation features and position embeddings + all_cam_features = [] + all_cam_pos = [] + for cam_id, cam_name in enumerate(self.camera_names): + features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED + features = features[0] # take the last layer feature + pos = pos[0] + all_cam_features.append(self.input_proj(features)) + all_cam_pos.append(pos) + # proprioception features + proprio_input = self.input_proj_robot_state(qpos) + # fold camera dimension into width dimension + src = torch.cat(all_cam_features, axis=3) + pos = torch.cat(all_cam_pos, axis=3) + hs = self.transformer(src, None, self.query_embed.weight, pos, latent_input, proprio_input, self.additional_pos_embed.weight)[0] + else: + qpos = self.input_proj_robot_state(qpos).unsqueeze(dim=1) + env_state = self.input_proj_env_state(env_state).unsqueeze(dim=1) + transformer_input = torch.cat([qpos, env_state], axis=1) # seq length = 2 + hs = self.transformer(transformer_input, None, self.query_embed.weight, self.pos.weight)[0] + a_hat = self.action_head(hs) + is_pad_hat = self.is_pad_head(hs) + return a_hat, is_pad_hat, [mu, logvar] + + + +class CNNMLP(nn.Module): + def __init__(self, backbones, state_dim, camera_names): + """ Initializes the model. + Parameters: + backbones: torch module of the backbone to be used. See backbone.py + transformer: torch module of the transformer architecture. See transformer.py + state_dim: robot state dimension of the environment + num_queries: number of object queries, ie detection slot. This is the maximal number of objects + DETR can detect in a single image. For COCO, we recommend 100 queries. + aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. + """ + super().__init__() + self.camera_names = camera_names + self.action_head = nn.Linear(1000, state_dim) # TODO add more + if backbones is not None: + self.backbones = nn.ModuleList(backbones) + backbone_down_projs = [] + for backbone in backbones: + down_proj = nn.Sequential( + nn.Conv2d(backbone.num_channels, 128, kernel_size=5), + nn.Conv2d(128, 64, kernel_size=5), + nn.Conv2d(64, 32, kernel_size=5) + ) + backbone_down_projs.append(down_proj) + self.backbone_down_projs = nn.ModuleList(backbone_down_projs) + + mlp_in_dim = 768 * len(backbones) + 14 + self.mlp = mlp(input_dim=mlp_in_dim, hidden_dim=1024, output_dim=14, hidden_depth=2) + else: + raise NotImplementedError + + def forward(self, qpos, image, env_state, actions=None): + """ + qpos: batch, qpos_dim + image: batch, num_cam, channel, height, width + env_state: None + actions: batch, seq, action_dim + """ + is_training = actions is not None # train or val + bs, _ = qpos.shape + # Image observation features and position embeddings + all_cam_features = [] + for cam_id, cam_name in enumerate(self.camera_names): + features, pos = self.backbones[cam_id](image[:, cam_id]) + features = features[0] # take the last layer feature + pos = pos[0] # not used + all_cam_features.append(self.backbone_down_projs[cam_id](features)) + # flatten everything + flattened_features = [] + for cam_feature in all_cam_features: + flattened_features.append(cam_feature.reshape([bs, -1])) + flattened_features = torch.cat(flattened_features, axis=1) # 768 each + features = torch.cat([flattened_features, qpos], axis=1) # qpos: 14 + a_hat = self.mlp(features) + return a_hat + + +def mlp(input_dim, hidden_dim, output_dim, hidden_depth): + if hidden_depth == 0: + mods = [nn.Linear(input_dim, output_dim)] + else: + mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)] + for i in range(hidden_depth - 1): + mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)] + mods.append(nn.Linear(hidden_dim, output_dim)) + trunk = nn.Sequential(*mods) + return trunk + + +def build_encoder(args): + d_model = args.hidden_dim # 256 + dropout = args.dropout # 0.1 + nhead = args.nheads # 8 + dim_feedforward = args.dim_feedforward # 2048 + num_encoder_layers = args.enc_layers # 4 # TODO shared with VAE decoder + normalize_before = args.pre_norm # False + activation = "relu" + + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + return encoder + + +def build(args): + + # From state + # backbone = None # from state for now, no need for conv nets + # From image + backbones = [] + backbone = build_backbone(args) + backbones.append(backbone) + + transformer = build_transformer(args) + + encoder = build_encoder(args) + + model = DETRVAE( + backbones, + transformer, + encoder, + latent_dim=args.latent_dim, + a_dim=args.a_dim, + state_dim=args.state_dim, + num_queries=args.num_queries, + camera_names=args.camera_names, + ) + + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + print("number of parameters: %.2fM" % (n_parameters/1e6,)) + + return model + +def build_cnnmlp(args): + state_dim = 14 # TODO hardcode + + # From state + # backbone = None # from state for now, no need for conv nets + # From image + backbones = [] + for _ in args.camera_names: + backbone = build_backbone(args) + backbones.append(backbone) + + model = CNNMLP( + backbones, + state_dim=state_dim, + camera_names=args.camera_names, + ) + + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + print("number of parameters: %.2fM" % (n_parameters/1e6,)) + + return model + diff --git a/act/detr/models/position_encoding.py b/act/detr/models/position_encoding.py new file mode 100644 index 00000000..209d9171 --- /dev/null +++ b/act/detr/models/position_encoding.py @@ -0,0 +1,93 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Various positional encodings for the transformer. +""" +import math +import torch +from torch import nn + +from util.misc import NestedTensor + +import IPython +e = IPython.embed + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, tensor): + x = tensor + # mask = tensor_list.mask + # assert mask is not None + # not_mask = ~mask + + not_mask = torch.ones_like(x[0, [0]]) + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + def __init__(self, num_pos_feats=256): + super().__init__() + self.row_embed = nn.Embedding(50, num_pos_feats) + self.col_embed = nn.Embedding(50, num_pos_feats) + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.row_embed.weight) + nn.init.uniform_(self.col_embed.weight) + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + h, w = x.shape[-2:] + i = torch.arange(w, device=x.device) + j = torch.arange(h, device=x.device) + x_emb = self.col_embed(i) + y_emb = self.row_embed(j) + pos = torch.cat([ + x_emb.unsqueeze(0).repeat(h, 1, 1), + y_emb.unsqueeze(1).repeat(1, w, 1), + ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) + return pos + + +def build_position_encoding(args): + N_steps = args.hidden_dim // 2 + if args.position_embedding in ('v2', 'sine'): + # TODO find a better way of exposing other arguments + position_embedding = PositionEmbeddingSine(N_steps, normalize=True) + elif args.position_embedding in ('v3', 'learned'): + position_embedding = PositionEmbeddingLearned(N_steps) + else: + raise ValueError(f"not supported {args.position_embedding}") + + return position_embedding diff --git a/act/detr/models/transformer.py b/act/detr/models/transformer.py new file mode 100644 index 00000000..f38afd0e --- /dev/null +++ b/act/detr/models/transformer.py @@ -0,0 +1,314 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR Transformer class. + +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +import copy +from typing import Optional, List + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + +import IPython +e = IPython.embed + +class Transformer(nn.Module): + + def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False): + super().__init__() + + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + decoder_norm = nn.LayerNorm(d_model) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, + return_intermediate=return_intermediate_dec) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed, latent_input=None, proprio_input=None, additional_pos_embed=None): + # TODO flatten only when input has H and W + if len(src.shape) == 4: # has H and W + # flatten NxCxHxW to HWxNxC + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + # mask = mask.flatten(1) + + additional_pos_embed = additional_pos_embed.unsqueeze(1).repeat(1, bs, 1) # seq, bs, dim + pos_embed = torch.cat([additional_pos_embed, pos_embed], axis=0) + + addition_input = torch.stack([latent_input, proprio_input], axis=0) + src = torch.cat([addition_input, src], axis=0) + else: + assert len(src.shape) == 3 + # flatten NxHWxC to HWxNxC + bs, hw, c = src.shape + src = src.permute(1, 0, 2) + pos_embed = pos_embed.unsqueeze(1).repeat(1, bs, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + + tgt = torch.zeros_like(query_embed) + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, + pos=pos_embed, query_pos=query_embed) + hs = hs.transpose(1, 2) + return hs + +class TransformerEncoder(nn.Module): + + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward(self, src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + output = src + + for layer in self.layers: + output = layer(output, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + output = tgt + + intermediate = [] + + for layer in self.layers: + output = layer(output, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output.unsqueeze(0) + + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def build_transformer(args): + return Transformer( + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + normalize_before=args.pre_norm, + return_intermediate_dec=True, + ) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") diff --git a/act/detr/setup.py b/act/detr/setup.py new file mode 100644 index 00000000..55d18c0d --- /dev/null +++ b/act/detr/setup.py @@ -0,0 +1,10 @@ +from distutils.core import setup +from setuptools import find_packages + +setup( + name='detr', + version='0.0.0', + packages=find_packages(), + license='MIT License', + long_description=open('README.md').read(), +) \ No newline at end of file diff --git a/act/detr/util/__init__.py b/act/detr/util/__init__.py new file mode 100644 index 00000000..168f9979 --- /dev/null +++ b/act/detr/util/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/act/detr/util/box_ops.py b/act/detr/util/box_ops.py new file mode 100644 index 00000000..9c088e5b --- /dev/null +++ b/act/detr/util/box_ops.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Utilities for bounding box manipulation and GIoU. +""" +import torch +from torchvision.ops.boxes import box_area + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), + (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, + (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +# modified from torchvision to also return the union +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + + The boxes should be in [x0, y0, x1, y1] format + + Returns a [N, M] pairwise matrix, where N = len(boxes1) + and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + iou, union = box_iou(boxes1, boxes2) + + lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,M,2] + area = wh[:, :, 0] * wh[:, :, 1] + + return iou - (area - union) / area + + +def masks_to_boxes(masks): + """Compute the bounding boxes around the provided masks + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensors, with the boxes in xyxy format + """ + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device) + + h, w = masks.shape[-2:] + + y = torch.arange(0, h, dtype=torch.float) + x = torch.arange(0, w, dtype=torch.float) + y, x = torch.meshgrid(y, x) + + x_mask = (masks * x.unsqueeze(0)) + x_max = x_mask.flatten(1).max(-1)[0] + x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + y_mask = (masks * y.unsqueeze(0)) + y_max = y_mask.flatten(1).max(-1)[0] + y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + return torch.stack([x_min, y_min, x_max, y_max], 1) diff --git a/act/detr/util/misc.py b/act/detr/util/misc.py new file mode 100644 index 00000000..dfa9fb5b --- /dev/null +++ b/act/detr/util/misc.py @@ -0,0 +1,468 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +import os +import subprocess +import time +from collections import defaultdict, deque +import datetime +import pickle +from packaging import version +from typing import Optional, List + +import torch +import torch.distributed as dist +from torch import Tensor + +# needed due to empty tensor bug in pytorch and torchvision 0.5 +import torchvision +if version.parse(torchvision.__version__) < version.parse('0.7'): + from torchvision.ops import _new_empty_tensor + from torchvision.ops.misc import _output_size + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device="cuda") + size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() + sha = 'N/A' + diff = "clean" + branch = 'N/A' + try: + sha = _run(['git', 'rev-parse', 'HEAD']) + subprocess.check_output(['git', 'diff'], cwd=cwd) + diff = _run(['git', 'diff-index', 'HEAD']) + diff = "has uncommited changes" if diff else "clean" + branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def collate_fn(batch): + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + if torchvision._is_tracing(): + # nested_tensor_from_tensor_list() does not export well to ONNX + # call _onnx_nested_tensor_from_tensor_list() instead + return _onnx_nested_tensor_from_tensor_list(tensor_list) + + # TODO make it support different-sized images + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], :img.shape[2]] = False + else: + raise ValueError('not supported') + return NestedTensor(tensor, mask) + + +# _onnx_nested_tensor_from_tensor_list() is an implementation of +# nested_tensor_from_tensor_list() that is supported by ONNX tracing. +@torch.jit.unused +def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: + max_size = [] + for i in range(tensor_list[0].dim()): + max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) + max_size.append(max_size_i) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # m[: img.shape[1], :img.shape[2]] = False + # which is not yet supported in onnx + padded_imgs = [] + padded_masks = [] + for img in tensor_list: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) + padded_imgs.append(padded_img) + + m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) + padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) + padded_masks.append(padded_mask.to(torch.bool)) + + tensor = torch.stack(padded_imgs) + mask = torch.stack(padded_masks) + + return NestedTensor(tensor, mask=mask) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty batch sizes. + This will eventually be supported natively by PyTorch, and this + class can go away. + """ + if version.parse(torchvision.__version__) < version.parse('0.7'): + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + output_shape = _output_size(2, input, size, scale_factor) + output_shape = list(input.shape[:-2]) + list(output_shape) + return _new_empty_tensor(input, output_shape) + else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/act/detr/util/plot_utils.py b/act/detr/util/plot_utils.py new file mode 100644 index 00000000..0f24bed0 --- /dev/null +++ b/act/detr/util/plot_utils.py @@ -0,0 +1,107 @@ +""" +Plotting utilities to visualize training logs. +""" +import torch +import pandas as pd +import numpy as np +import seaborn as sns +import matplotlib.pyplot as plt + +from pathlib import Path, PurePath + + +def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): + ''' + Function to plot specific fields from training log(s). Plots both training and test results. + + :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file + - fields = which results to plot from each log file - plots both training and test for each field. + - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots + - log_name = optional, name of log file if different than default 'log.txt'. + + :: Outputs - matplotlib plots of results in fields, color coded for each log file. + - solid lines are training results, dashed lines are test results. + + ''' + func_name = "plot_utils.py::plot_logs" + + # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, + # convert single Path to list to avoid 'not iterable' error + + if not isinstance(logs, list): + if isinstance(logs, PurePath): + logs = [logs] + print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") + else: + raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ + Expect list[Path] or single Path obj, received {type(logs)}") + + # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir + for i, dir in enumerate(logs): + if not isinstance(dir, PurePath): + raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") + if not dir.exists(): + raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") + # verify log_name exists + fn = Path(dir / log_name) + if not fn.exists(): + print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?") + print(f"--> full path of missing log file: {fn}") + return + + # load log file(s) and plot + dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] + + fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) + + for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): + for j, field in enumerate(fields): + if field == 'mAP': + coco_eval = pd.DataFrame( + np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1] + ).ewm(com=ewm_col).mean() + axs[j].plot(coco_eval, c=color) + else: + df.interpolate().ewm(com=ewm_col).mean().plot( + y=[f'train_{field}', f'test_{field}'], + ax=axs[j], + color=[color] * 2, + style=['-', '--'] + ) + for ax, field in zip(axs, fields): + ax.legend([Path(p).name for p in logs]) + ax.set_title(field) + + +def plot_precision_recall(files, naming_scheme='iter'): + if naming_scheme == 'exp_id': + # name becomes exp_id + names = [f.parts[-3] for f in files] + elif naming_scheme == 'iter': + names = [f.stem for f in files] + else: + raise ValueError(f'not supported {naming_scheme}') + fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) + for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): + data = torch.load(f) + # precision is n_iou, n_points, n_cat, n_area, max_det + precision = data['precision'] + recall = data['params'].recThrs + scores = data['scores'] + # take precision for all classes, all areas and 100 detections + precision = precision[0, :, :, 0, -1].mean(1) + scores = scores[0, :, :, 0, -1].mean(1) + prec = precision.mean() + rec = data['recall'][0, :, 0, -1].mean() + print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + + f'score={scores.mean():0.3f}, ' + + f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' + ) + axs[0].plot(recall, precision, c=color) + axs[1].plot(recall, scores, c=color) + + axs[0].set_title('Precision / Recall') + axs[0].legend(names) + axs[1].set_title('Scores / Recall') + axs[1].legend(names) + return fig, axs diff --git a/act/ee_sim_env.py b/act/ee_sim_env.py new file mode 100644 index 00000000..01df2337 --- /dev/null +++ b/act/ee_sim_env.py @@ -0,0 +1,267 @@ +import numpy as np +import collections +import os + +from constants import DT, XML_DIR, START_ARM_POSE +from constants import PUPPET_GRIPPER_POSITION_CLOSE +from constants import PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN +from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN +from constants import PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN + +from utils import sample_box_pose, sample_insertion_pose +from dm_control import mujoco +from dm_control.rl import control +from dm_control.suite import base + +import IPython +e = IPython.embed + + +def make_ee_sim_env(task_name): + """ + Environment for simulated robot bi-manual manipulation, with end-effector control. + Action space: [left_arm_pose (7), # position and quaternion for end effector + left_gripper_positions (1), # normalized gripper position (0: close, 1: open) + right_arm_pose (7), # position and quaternion for end effector + right_gripper_positions (1),] # normalized gripper position (0: close, 1: open) + + Observation space: {"qpos": Concat[ left_arm_qpos (6), # absolute joint position + left_gripper_position (1), # normalized gripper position (0: close, 1: open) + right_arm_qpos (6), # absolute joint position + right_gripper_qpos (1)] # normalized gripper position (0: close, 1: open) + "qvel": Concat[ left_arm_qvel (6), # absolute joint velocity (rad) + left_gripper_velocity (1), # normalized gripper velocity (pos: opening, neg: closing) + right_arm_qvel (6), # absolute joint velocity (rad) + right_gripper_qvel (1)] # normalized gripper velocity (pos: opening, neg: closing) + "images": {"main": (480x640x3)} # h, w, c, dtype='uint8' + """ + if 'sim_transfer_cube' in task_name: + xml_path = os.path.join(XML_DIR, f'bimanual_viperx_ee_transfer_cube.xml') + physics = mujoco.Physics.from_xml_path(xml_path) + task = TransferCubeEETask(random=False) + env = control.Environment(physics, task, time_limit=20, control_timestep=DT, + n_sub_steps=None, flat_observation=False) + elif 'sim_insertion' in task_name: + xml_path = os.path.join(XML_DIR, f'bimanual_viperx_ee_insertion.xml') + physics = mujoco.Physics.from_xml_path(xml_path) + task = InsertionEETask(random=False) + env = control.Environment(physics, task, time_limit=20, control_timestep=DT, + n_sub_steps=None, flat_observation=False) + else: + raise NotImplementedError + return env + +class BimanualViperXEETask(base.Task): + def __init__(self, random=None): + super().__init__(random=random) + + def before_step(self, action, physics): + a_len = len(action) // 2 + action_left = action[:a_len] + action_right = action[a_len:] + + # set mocap position and quat + # left + np.copyto(physics.data.mocap_pos[0], action_left[:3]) + np.copyto(physics.data.mocap_quat[0], action_left[3:7]) + # right + np.copyto(physics.data.mocap_pos[1], action_right[:3]) + np.copyto(physics.data.mocap_quat[1], action_right[3:7]) + + # set gripper + g_left_ctrl = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(action_left[7]) + g_right_ctrl = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(action_right[7]) + np.copyto(physics.data.ctrl, np.array([g_left_ctrl, -g_left_ctrl, g_right_ctrl, -g_right_ctrl])) + + def initialize_robots(self, physics): + # reset joint position + physics.named.data.qpos[:16] = START_ARM_POSE + + # reset mocap to align with end effector + # to obtain these numbers: + # (1) make an ee_sim env and reset to the same start_pose + # (2) get env._physics.named.data.xpos['vx300s_left/gripper_link'] + # get env._physics.named.data.xquat['vx300s_left/gripper_link'] + # repeat the same for right side + np.copyto(physics.data.mocap_pos[0], [-0.31718881, 0.5, 0.29525084]) + np.copyto(physics.data.mocap_quat[0], [1, 0, 0, 0]) + # right + np.copyto(physics.data.mocap_pos[1], np.array([0.31718881, 0.49999888, 0.29525084])) + np.copyto(physics.data.mocap_quat[1], [1, 0, 0, 0]) + + # reset gripper control + close_gripper_control = np.array([ + PUPPET_GRIPPER_POSITION_CLOSE, + -PUPPET_GRIPPER_POSITION_CLOSE, + PUPPET_GRIPPER_POSITION_CLOSE, + -PUPPET_GRIPPER_POSITION_CLOSE, + ]) + np.copyto(physics.data.ctrl, close_gripper_control) + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + super().initialize_episode(physics) + + @staticmethod + def get_qpos(physics): + qpos_raw = physics.data.qpos.copy() + left_qpos_raw = qpos_raw[:8] + right_qpos_raw = qpos_raw[8:16] + left_arm_qpos = left_qpos_raw[:6] + right_arm_qpos = right_qpos_raw[:6] + left_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(left_qpos_raw[6])] + right_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(right_qpos_raw[6])] + return np.concatenate([left_arm_qpos, left_gripper_qpos, right_arm_qpos, right_gripper_qpos]) + + @staticmethod + def get_qvel(physics): + qvel_raw = physics.data.qvel.copy() + left_qvel_raw = qvel_raw[:8] + right_qvel_raw = qvel_raw[8:16] + left_arm_qvel = left_qvel_raw[:6] + right_arm_qvel = right_qvel_raw[:6] + left_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(left_qvel_raw[6])] + right_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(right_qvel_raw[6])] + return np.concatenate([left_arm_qvel, left_gripper_qvel, right_arm_qvel, right_gripper_qvel]) + + @staticmethod + def get_env_state(physics): + raise NotImplementedError + + def get_observation(self, physics): + # note: it is important to do .copy() + obs = collections.OrderedDict() + obs['qpos'] = self.get_qpos(physics) + obs['qvel'] = self.get_qvel(physics) + obs['env_state'] = self.get_env_state(physics) + obs['images'] = dict() + obs['images']['top'] = physics.render(height=480, width=640, camera_id='top') + obs['images']['angle'] = physics.render(height=480, width=640, camera_id='angle') + obs['images']['vis'] = physics.render(height=480, width=640, camera_id='front_close') + # used in scripted policy to obtain starting pose + obs['mocap_pose_left'] = np.concatenate([physics.data.mocap_pos[0], physics.data.mocap_quat[0]]).copy() + obs['mocap_pose_right'] = np.concatenate([physics.data.mocap_pos[1], physics.data.mocap_quat[1]]).copy() + + # used when replaying joint trajectory + obs['gripper_ctrl'] = physics.data.ctrl.copy() + return obs + + def get_reward(self, physics): + raise NotImplementedError + + +class TransferCubeEETask(BimanualViperXEETask): + def __init__(self, random=None): + super().__init__(random=random) + self.max_reward = 4 + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + self.initialize_robots(physics) + # randomize box position + cube_pose = sample_box_pose() + box_start_idx = physics.model.name2id('red_box_joint', 'joint') + np.copyto(physics.data.qpos[box_start_idx : box_start_idx + 7], cube_pose) + # print(f"randomized cube position to {cube_position}") + + super().initialize_episode(physics) + + @staticmethod + def get_env_state(physics): + env_state = physics.data.qpos.copy()[16:] + return env_state + + def get_reward(self, physics): + # return whether left gripper is holding the box + all_contact_pairs = [] + for i_contact in range(physics.data.ncon): + id_geom_1 = physics.data.contact[i_contact].geom1 + id_geom_2 = physics.data.contact[i_contact].geom2 + name_geom_1 = physics.model.id2name(id_geom_1, 'geom') + name_geom_2 = physics.model.id2name(id_geom_2, 'geom') + contact_pair = (name_geom_1, name_geom_2) + all_contact_pairs.append(contact_pair) + + touch_left_gripper = ("red_box", "vx300s_left/10_left_gripper_finger") in all_contact_pairs + touch_right_gripper = ("red_box", "vx300s_right/10_right_gripper_finger") in all_contact_pairs + touch_table = ("red_box", "table") in all_contact_pairs + + reward = 0 + if touch_right_gripper: + reward = 1 + if touch_right_gripper and not touch_table: # lifted + reward = 2 + if touch_left_gripper: # attempted transfer + reward = 3 + if touch_left_gripper and not touch_table: # successful transfer + reward = 4 + return reward + + +class InsertionEETask(BimanualViperXEETask): + def __init__(self, random=None): + super().__init__(random=random) + self.max_reward = 4 + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + self.initialize_robots(physics) + # randomize peg and socket position + peg_pose, socket_pose = sample_insertion_pose() + id2index = lambda j_id: 16 + (j_id - 16) * 7 # first 16 is robot qpos, 7 is pose dim # hacky + + peg_start_id = physics.model.name2id('red_peg_joint', 'joint') + peg_start_idx = id2index(peg_start_id) + np.copyto(physics.data.qpos[peg_start_idx : peg_start_idx + 7], peg_pose) + # print(f"randomized cube position to {cube_position}") + + socket_start_id = physics.model.name2id('blue_socket_joint', 'joint') + socket_start_idx = id2index(socket_start_id) + np.copyto(physics.data.qpos[socket_start_idx : socket_start_idx + 7], socket_pose) + # print(f"randomized cube position to {cube_position}") + + super().initialize_episode(physics) + + @staticmethod + def get_env_state(physics): + env_state = physics.data.qpos.copy()[16:] + return env_state + + def get_reward(self, physics): + # return whether peg touches the pin + all_contact_pairs = [] + for i_contact in range(physics.data.ncon): + id_geom_1 = physics.data.contact[i_contact].geom1 + id_geom_2 = physics.data.contact[i_contact].geom2 + name_geom_1 = physics.model.id2name(id_geom_1, 'geom') + name_geom_2 = physics.model.id2name(id_geom_2, 'geom') + contact_pair = (name_geom_1, name_geom_2) + all_contact_pairs.append(contact_pair) + + touch_right_gripper = ("red_peg", "vx300s_right/10_right_gripper_finger") in all_contact_pairs + touch_left_gripper = ("socket-1", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ + ("socket-2", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ + ("socket-3", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ + ("socket-4", "vx300s_left/10_left_gripper_finger") in all_contact_pairs + + peg_touch_table = ("red_peg", "table") in all_contact_pairs + socket_touch_table = ("socket-1", "table") in all_contact_pairs or \ + ("socket-2", "table") in all_contact_pairs or \ + ("socket-3", "table") in all_contact_pairs or \ + ("socket-4", "table") in all_contact_pairs + peg_touch_socket = ("red_peg", "socket-1") in all_contact_pairs or \ + ("red_peg", "socket-2") in all_contact_pairs or \ + ("red_peg", "socket-3") in all_contact_pairs or \ + ("red_peg", "socket-4") in all_contact_pairs + pin_touched = ("red_peg", "pin") in all_contact_pairs + + reward = 0 + if touch_left_gripper and touch_right_gripper: # touch both + reward = 1 + if touch_left_gripper and touch_right_gripper and (not peg_touch_table) and (not socket_touch_table): # grasp both + reward = 2 + if peg_touch_socket and (not peg_touch_table) and (not socket_touch_table): # peg and socket touching + reward = 3 + if pin_touched: # successful insertion + reward = 4 + return reward diff --git a/act/imitate_episodes.py b/act/imitate_episodes.py new file mode 100644 index 00000000..34f9a372 --- /dev/null +++ b/act/imitate_episodes.py @@ -0,0 +1,435 @@ +import torch +import numpy as np +import os +import pickle +import argparse +import matplotlib.pyplot as plt +from copy import deepcopy +from tqdm import tqdm +from einops import rearrange + +from constants import DT +from constants import PUPPET_GRIPPER_JOINT_OPEN +from utils import load_data # data functions +from utils import sample_box_pose, sample_insertion_pose # robot functions +from utils import compute_dict_mean, set_seed, detach_dict # helper functions +from policy import ACTPolicy, CNNMLPPolicy +from visualize_episodes import save_videos + +from sim_env import BOX_POSE + +import IPython +e = IPython.embed + +def main(args): + set_seed(1) + # command line parameters + is_eval = args['eval'] + ckpt_dir = args['ckpt_dir'] + policy_class = args['policy_class'] + onscreen_render = args['onscreen_render'] + task_name = args['task_name'] + batch_size_train = args['batch_size'] + batch_size_val = args['batch_size'] + num_epochs = args['num_epochs'] + + # get task parameters + is_sim = task_name[:4] == 'sim_' + if is_sim: + from constants import SIM_TASK_CONFIGS + task_config = SIM_TASK_CONFIGS[task_name] + else: + from aloha_scripts.constants import TASK_CONFIGS + task_config = TASK_CONFIGS[task_name] + dataset_dir = task_config['dataset_dir'] + num_episodes = task_config['num_episodes'] + episode_len = task_config['episode_len'] + camera_names = task_config['camera_names'] + + # fixed parameters + state_dim = 14 + lr_backbone = 1e-5 + backbone = 'resnet18' + if policy_class == 'ACT': + enc_layers = 4 + dec_layers = 7 + nheads = 8 + policy_config = {'lr': args['lr'], + 'num_queries': args['chunk_size'], + 'kl_weight': args['kl_weight'], + 'hidden_dim': args['hidden_dim'], + 'dim_feedforward': args['dim_feedforward'], + 'lr_backbone': lr_backbone, + 'backbone': backbone, + 'enc_layers': enc_layers, + 'dec_layers': dec_layers, + 'nheads': nheads, + 'camera_names': camera_names, + } + elif policy_class == 'CNNMLP': + policy_config = {'lr': args['lr'], 'lr_backbone': lr_backbone, 'backbone' : backbone, 'num_queries': 1, + 'camera_names': camera_names,} + else: + raise NotImplementedError + + config = { + 'num_epochs': num_epochs, + 'ckpt_dir': ckpt_dir, + 'episode_len': episode_len, + 'state_dim': state_dim, + 'lr': args['lr'], + 'policy_class': policy_class, + 'onscreen_render': onscreen_render, + 'policy_config': policy_config, + 'task_name': task_name, + 'seed': args['seed'], + 'temporal_agg': args['temporal_agg'], + 'camera_names': camera_names, + 'real_robot': not is_sim + } + + if is_eval: + ckpt_names = [f'policy_best.ckpt'] + results = [] + for ckpt_name in ckpt_names: + success_rate, avg_return = eval_bc(config, ckpt_name, save_episode=True) + results.append([ckpt_name, success_rate, avg_return]) + + for ckpt_name, success_rate, avg_return in results: + print(f'{ckpt_name}: {success_rate=} {avg_return=}') + print() + exit() + + train_dataloader, val_dataloader, stats, _ = load_data(dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val) + + # save dataset stats + if not os.path.isdir(ckpt_dir): + os.makedirs(ckpt_dir) + stats_path = os.path.join(ckpt_dir, f'dataset_stats.pkl') + with open(stats_path, 'wb') as f: + pickle.dump(stats, f) + + best_ckpt_info = train_bc(train_dataloader, val_dataloader, config) + best_epoch, min_val_loss, best_state_dict = best_ckpt_info + + # save best checkpoint + ckpt_path = os.path.join(ckpt_dir, f'policy_best.ckpt') + torch.save(best_state_dict, ckpt_path) + print(f'Best ckpt, val loss {min_val_loss:.6f} @ epoch{best_epoch}') + + +def make_policy(policy_class, policy_config): + if policy_class == 'ACT': + policy = ACTPolicy(policy_config) + elif policy_class == 'CNNMLP': + policy = CNNMLPPolicy(policy_config) + else: + raise NotImplementedError + return policy + + +def make_optimizer(policy_class, policy): + if policy_class == 'ACT': + optimizer = policy.configure_optimizers() + elif policy_class == 'CNNMLP': + optimizer = policy.configure_optimizers() + else: + raise NotImplementedError + return optimizer + + +def get_image(ts, camera_names): + curr_images = [] + for cam_name in camera_names: + curr_image = rearrange(ts.observation['images'][cam_name], 'h w c -> c h w') + curr_images.append(curr_image) + curr_image = np.stack(curr_images, axis=0) + curr_image = torch.from_numpy(curr_image / 255.0).float().cuda().unsqueeze(0) + return curr_image + + +def eval_bc(config, ckpt_name, save_episode=True): + set_seed(1000) + ckpt_dir = config['ckpt_dir'] + state_dim = config['state_dim'] + real_robot = config['real_robot'] + policy_class = config['policy_class'] + onscreen_render = config['onscreen_render'] + policy_config = config['policy_config'] + camera_names = config['camera_names'] + max_timesteps = config['episode_len'] + task_name = config['task_name'] + temporal_agg = config['temporal_agg'] + onscreen_cam = 'angle' + + # load policy and stats + ckpt_path = os.path.join(ckpt_dir, ckpt_name) + policy = make_policy(policy_class, policy_config) + loading_status = policy.load_state_dict(torch.load(ckpt_path)) + print(loading_status) + policy.cuda() + policy.eval() + print(f'Loaded: {ckpt_path}') + stats_path = os.path.join(ckpt_dir, f'dataset_stats.pkl') + with open(stats_path, 'rb') as f: + stats = pickle.load(f) + + pre_process = lambda s_qpos: (s_qpos - stats['qpos_mean']) / stats['qpos_std'] + post_process = lambda a: a * stats['action_std'] + stats['action_mean'] + + # load environment + if real_robot: + from aloha_scripts.robot_utils import move_grippers # requires aloha + from aloha_scripts.real_env import make_real_env # requires aloha + env = make_real_env(init_node=True) + env_max_reward = 0 + else: + from sim_env import make_sim_env + env = make_sim_env(task_name) + env_max_reward = env.task.max_reward + + query_frequency = policy_config['num_queries'] + if temporal_agg: + query_frequency = 1 + num_queries = policy_config['num_queries'] + + max_timesteps = int(max_timesteps * 1) # may increase for real-world tasks + + num_rollouts = 50 + episode_returns = [] + highest_rewards = [] + for rollout_id in range(num_rollouts): + rollout_id += 0 + ### set task + if 'sim_transfer_cube' in task_name: + BOX_POSE[0] = sample_box_pose() # used in sim reset + elif 'sim_insertion' in task_name: + BOX_POSE[0] = np.concatenate(sample_insertion_pose()) # used in sim reset + + ts = env.reset() + + ### onscreen render + if onscreen_render: + ax = plt.subplot() + plt_img = ax.imshow(env._physics.render(height=480, width=640, camera_id=onscreen_cam)) + plt.ion() + + ### evaluation loop + if temporal_agg: + all_time_actions = torch.zeros([max_timesteps, max_timesteps+num_queries, state_dim]).cuda() + + qpos_history = torch.zeros((1, max_timesteps, state_dim)).cuda() + image_list = [] # for visualization + qpos_list = [] + target_qpos_list = [] + rewards = [] + with torch.inference_mode(): + for t in range(max_timesteps): + ### update onscreen render and wait for DT + if onscreen_render: + image = env._physics.render(height=480, width=640, camera_id=onscreen_cam) + plt_img.set_data(image) + plt.pause(DT) + + ### process previous timestep to get qpos and image_list + obs = ts.observation + if 'images' in obs: + image_list.append(obs['images']) + else: + image_list.append({'main': obs['image']}) + qpos_numpy = np.array(obs['qpos']) + qpos = pre_process(qpos_numpy) + qpos = torch.from_numpy(qpos).float().cuda().unsqueeze(0) + qpos_history[:, t] = qpos + curr_image = get_image(ts, camera_names) + + ### query policy + if config['policy_class'] == "ACT": + if t % query_frequency == 0: + all_actions = policy(qpos, curr_image) + if temporal_agg: + all_time_actions[[t], t:t+num_queries] = all_actions + actions_for_curr_step = all_time_actions[:, t] + actions_populated = torch.all(actions_for_curr_step != 0, axis=1) + actions_for_curr_step = actions_for_curr_step[actions_populated] + k = 0.01 + exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step))) + exp_weights = exp_weights / exp_weights.sum() + exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1) + raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True) + else: + raw_action = all_actions[:, t % query_frequency] + elif config['policy_class'] == "CNNMLP": + raw_action = policy(qpos, curr_image) + else: + raise NotImplementedError + + ### post-process actions + raw_action = raw_action.squeeze(0).cpu().numpy() + action = post_process(raw_action) + target_qpos = action + + ### step the environment + ts = env.step(target_qpos) + + ### for visualization + qpos_list.append(qpos_numpy) + target_qpos_list.append(target_qpos) + rewards.append(ts.reward) + + plt.close() + if real_robot: + move_grippers([env.puppet_bot_left, env.puppet_bot_right], [PUPPET_GRIPPER_JOINT_OPEN] * 2, move_time=0.5) # open + pass + + rewards = np.array(rewards) + episode_return = np.sum(rewards[rewards!=None]) + episode_returns.append(episode_return) + episode_highest_reward = np.max(rewards) + highest_rewards.append(episode_highest_reward) + print(f'Rollout {rollout_id}\n{episode_return=}, {episode_highest_reward=}, {env_max_reward=}, Success: {episode_highest_reward==env_max_reward}') + + if save_episode: + save_videos(image_list, DT, video_path=os.path.join(ckpt_dir, f'video{rollout_id}.mp4')) + + success_rate = np.mean(np.array(highest_rewards) == env_max_reward) + avg_return = np.mean(episode_returns) + summary_str = f'\nSuccess rate: {success_rate}\nAverage return: {avg_return}\n\n' + for r in range(env_max_reward+1): + more_or_equal_r = (np.array(highest_rewards) >= r).sum() + more_or_equal_r_rate = more_or_equal_r / num_rollouts + summary_str += f'Reward >= {r}: {more_or_equal_r}/{num_rollouts} = {more_or_equal_r_rate*100}%\n' + + print(summary_str) + + # save success rate to txt + result_file_name = 'result_' + ckpt_name.split('.')[0] + '.txt' + with open(os.path.join(ckpt_dir, result_file_name), 'w') as f: + f.write(summary_str) + f.write(repr(episode_returns)) + f.write('\n\n') + f.write(repr(highest_rewards)) + + return success_rate, avg_return + + +def forward_pass(data, policy): + image_data, qpos_data, action_data, is_pad = data + image_data, qpos_data, action_data, is_pad = image_data.cuda(), qpos_data.cuda(), action_data.cuda(), is_pad.cuda() + return policy(qpos_data, image_data, action_data, is_pad) # TODO remove None + + +def train_bc(train_dataloader, val_dataloader, config): + num_epochs = config['num_epochs'] + ckpt_dir = config['ckpt_dir'] + seed = config['seed'] + policy_class = config['policy_class'] + policy_config = config['policy_config'] + + set_seed(seed) + + policy = make_policy(policy_class, policy_config) + policy.cuda() + optimizer = make_optimizer(policy_class, policy) + + train_history = [] + validation_history = [] + min_val_loss = np.inf + best_ckpt_info = None + for epoch in tqdm(range(num_epochs)): + print(f'\nEpoch {epoch}') + # validation + with torch.inference_mode(): + policy.eval() + epoch_dicts = [] + for batch_idx, data in enumerate(val_dataloader): + forward_dict = forward_pass(data, policy) + epoch_dicts.append(forward_dict) + epoch_summary = compute_dict_mean(epoch_dicts) + validation_history.append(epoch_summary) + + epoch_val_loss = epoch_summary['loss'] + if epoch_val_loss < min_val_loss: + min_val_loss = epoch_val_loss + best_ckpt_info = (epoch, min_val_loss, deepcopy(policy.state_dict())) + print(f'Val loss: {epoch_val_loss:.5f}') + summary_string = '' + for k, v in epoch_summary.items(): + summary_string += f'{k}: {v.item():.3f} ' + print(summary_string) + + # training + policy.train() + optimizer.zero_grad() + for batch_idx, data in enumerate(train_dataloader): + forward_dict = forward_pass(data, policy) + # backward + loss = forward_dict['loss'] + loss.backward() + optimizer.step() + optimizer.zero_grad() + train_history.append(detach_dict(forward_dict)) + epoch_summary = compute_dict_mean(train_history[(batch_idx+1)*epoch:(batch_idx+1)*(epoch+1)]) + epoch_train_loss = epoch_summary['loss'] + print(f'Train loss: {epoch_train_loss:.5f}') + summary_string = '' + for k, v in epoch_summary.items(): + summary_string += f'{k}: {v.item():.3f} ' + print(summary_string) + + if epoch % 100 == 0: + ckpt_path = os.path.join(ckpt_dir, f'policy_epoch_{epoch}_seed_{seed}.ckpt') + torch.save(policy.state_dict(), ckpt_path) + plot_history(train_history, validation_history, epoch, ckpt_dir, seed) + + ckpt_path = os.path.join(ckpt_dir, f'policy_last.ckpt') + torch.save(policy.state_dict(), ckpt_path) + + best_epoch, min_val_loss, best_state_dict = best_ckpt_info + ckpt_path = os.path.join(ckpt_dir, f'policy_epoch_{best_epoch}_seed_{seed}.ckpt') + torch.save(best_state_dict, ckpt_path) + print(f'Training finished:\nSeed {seed}, val loss {min_val_loss:.6f} at epoch {best_epoch}') + + # save training curves + plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed) + + return best_ckpt_info + + +def plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed): + # save training curves + for key in train_history[0]: + plot_path = os.path.join(ckpt_dir, f'train_val_{key}_seed_{seed}.png') + plt.figure() + train_values = [summary[key].item() for summary in train_history] + val_values = [summary[key].item() for summary in validation_history] + plt.plot(np.linspace(0, num_epochs-1, len(train_history)), train_values, label='train') + plt.plot(np.linspace(0, num_epochs-1, len(validation_history)), val_values, label='validation') + # plt.ylim([-0.1, 1]) + plt.tight_layout() + plt.legend() + plt.title(key) + plt.savefig(plot_path) + print(f'Saved plots to {ckpt_dir}') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--eval', action='store_true') + parser.add_argument('--onscreen_render', action='store_true') + parser.add_argument('--ckpt_dir', action='store', type=str, help='ckpt_dir', required=True) + parser.add_argument('--policy_class', action='store', type=str, help='policy_class, capitalize', required=True) + parser.add_argument('--task_name', action='store', type=str, help='task_name', required=True) + parser.add_argument('--batch_size', action='store', type=int, help='batch_size', required=True) + parser.add_argument('--seed', action='store', type=int, help='seed', required=True) + parser.add_argument('--num_epochs', action='store', type=int, help='num_epochs', required=True) + parser.add_argument('--lr', action='store', type=float, help='lr', required=True) + + # for ACT + parser.add_argument('--kl_weight', action='store', type=int, help='KL Weight', required=False) + parser.add_argument('--chunk_size', action='store', type=int, help='chunk_size', required=False) + parser.add_argument('--hidden_dim', action='store', type=int, help='hidden_dim', required=False) + parser.add_argument('--dim_feedforward', action='store', type=int, help='dim_feedforward', required=False) + parser.add_argument('--temporal_agg', action='store_true') + + main(vars(parser.parse_args())) diff --git a/act/policy.py b/act/policy.py new file mode 100644 index 00000000..7b091e5e --- /dev/null +++ b/act/policy.py @@ -0,0 +1,84 @@ +import torch.nn as nn +from torch.nn import functional as F +import torchvision.transforms as transforms + +from detr.main import build_ACT_model_and_optimizer, build_CNNMLP_model_and_optimizer +import IPython +e = IPython.embed + +class ACTPolicy(nn.Module): + def __init__(self, args_override): + super().__init__() + model, optimizer = build_ACT_model_and_optimizer(args_override) + self.model = model # CVAE decoder + self.optimizer = optimizer + self.kl_weight = args_override['kl_weight'] + print(f'KL Weight {self.kl_weight}') + + def __call__(self, qpos, image, actions=None, is_pad=None): + env_state = None + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + image = normalize(image) + if actions is not None: # training time + actions = actions[:, :self.model.num_queries] + is_pad = is_pad[:, :self.model.num_queries] + + a_hat, is_pad_hat, (mu, logvar) = self.model(qpos, image, env_state, actions, is_pad) + total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar) + loss_dict = dict() + all_l1 = F.l1_loss(actions, a_hat, reduction='none') + l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean() + loss_dict['l1'] = l1 + loss_dict['kl'] = total_kld[0] + loss_dict['loss'] = loss_dict['l1'] + loss_dict['kl'] * self.kl_weight + return loss_dict + else: # inference time + a_hat, _, (_, _) = self.model(qpos, image, env_state) # no action, sample from prior + return a_hat + + def configure_optimizers(self): + return self.optimizer + + +class CNNMLPPolicy(nn.Module): + def __init__(self, args_override): + super().__init__() + model, optimizer = build_CNNMLP_model_and_optimizer(args_override) + self.model = model # decoder + self.optimizer = optimizer + + def __call__(self, qpos, image, actions=None, is_pad=None): + env_state = None # TODO + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + image = normalize(image) + if actions is not None: # training time + actions = actions[:, 0] + a_hat = self.model(qpos, image, env_state, actions) + mse = F.mse_loss(actions, a_hat) + loss_dict = dict() + loss_dict['mse'] = mse + loss_dict['loss'] = loss_dict['mse'] + return loss_dict + else: # inference time + a_hat = self.model(qpos, image, env_state) # no action, sample from prior + return a_hat + + def configure_optimizers(self): + return self.optimizer + +def kl_divergence(mu, logvar): + batch_size = mu.size(0) + assert batch_size != 0 + if mu.data.ndimension() == 4: + mu = mu.view(mu.size(0), mu.size(1)) + if logvar.data.ndimension() == 4: + logvar = logvar.view(logvar.size(0), logvar.size(1)) + + klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) + total_kld = klds.sum(1).mean(0, True) + dimension_wise_kld = klds.mean(0) + mean_kld = klds.mean(1).mean(0, True) + + return total_kld, dimension_wise_kld, mean_kld diff --git a/act/record_sim_episodes.py b/act/record_sim_episodes.py new file mode 100644 index 00000000..253fdea1 --- /dev/null +++ b/act/record_sim_episodes.py @@ -0,0 +1,189 @@ +import time +import os +import numpy as np +import argparse +import matplotlib.pyplot as plt +import h5py + +from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN, SIM_TASK_CONFIGS +from ee_sim_env import make_ee_sim_env +from sim_env import make_sim_env, BOX_POSE +from scripted_policy import PickAndTransferPolicy, InsertionPolicy + +import IPython +e = IPython.embed + + +def main(args): + """ + Generate demonstration data in simulation. + First rollout the policy (defined in ee space) in ee_sim_env. Obtain the joint trajectory. + Replace the gripper joint positions with the commanded joint position. + Replay this joint trajectory (as action sequence) in sim_env, and record all observations. + Save this episode of data, and continue to next episode of data collection. + """ + + task_name = args['task_name'] + dataset_dir = args['dataset_dir'] + num_episodes = args['num_episodes'] + onscreen_render = args['onscreen_render'] + inject_noise = False + render_cam_name = 'angle' + + if not os.path.isdir(dataset_dir): + os.makedirs(dataset_dir, exist_ok=True) + + episode_len = SIM_TASK_CONFIGS[task_name]['episode_len'] + camera_names = SIM_TASK_CONFIGS[task_name]['camera_names'] + if task_name == 'sim_transfer_cube_scripted': + policy_cls = PickAndTransferPolicy + elif task_name == 'sim_insertion_scripted': + policy_cls = InsertionPolicy + else: + raise NotImplementedError + + success = [] + for episode_idx in range(num_episodes): + print(f'{episode_idx=}') + print('Rollout out EE space scripted policy') + # setup the environment + env = make_ee_sim_env(task_name) + ts = env.reset() + episode = [ts] + policy = policy_cls(inject_noise) + # setup plotting + if onscreen_render: + ax = plt.subplot() + plt_img = ax.imshow(ts.observation['images'][render_cam_name]) + plt.ion() + for step in range(episode_len): + action = policy(ts) + ts = env.step(action) + episode.append(ts) + if onscreen_render: + plt_img.set_data(ts.observation['images'][render_cam_name]) + plt.pause(0.002) + plt.close() + + episode_return = np.sum([ts.reward for ts in episode[1:]]) + episode_max_reward = np.max([ts.reward for ts in episode[1:]]) + if episode_max_reward == env.task.max_reward: + print(f"{episode_idx=} Successful, {episode_return=}") + else: + print(f"{episode_idx=} Failed") + + joint_traj = [ts.observation['qpos'] for ts in episode] + # replace gripper pose with gripper control + gripper_ctrl_traj = [ts.observation['gripper_ctrl'] for ts in episode] + for joint, ctrl in zip(joint_traj, gripper_ctrl_traj): + left_ctrl = PUPPET_GRIPPER_POSITION_NORMALIZE_FN(ctrl[0]) + right_ctrl = PUPPET_GRIPPER_POSITION_NORMALIZE_FN(ctrl[2]) + joint[6] = left_ctrl + joint[6+7] = right_ctrl + + subtask_info = episode[0].observation['env_state'].copy() # box pose at step 0 + + # clear unused variables + del env + del episode + del policy + + # setup the environment + print('Replaying joint commands') + env = make_sim_env(task_name) + BOX_POSE[0] = subtask_info # make sure the sim_env has the same object configurations as ee_sim_env + ts = env.reset() + + episode_replay = [ts] + # setup plotting + if onscreen_render: + ax = plt.subplot() + plt_img = ax.imshow(ts.observation['images'][render_cam_name]) + plt.ion() + for t in range(len(joint_traj)): # note: this will increase episode length by 1 + action = joint_traj[t] + ts = env.step(action) + episode_replay.append(ts) + if onscreen_render: + plt_img.set_data(ts.observation['images'][render_cam_name]) + plt.pause(0.02) + + episode_return = np.sum([ts.reward for ts in episode_replay[1:]]) + episode_max_reward = np.max([ts.reward for ts in episode_replay[1:]]) + if episode_max_reward == env.task.max_reward: + success.append(1) + print(f"{episode_idx=} Successful, {episode_return=}") + else: + success.append(0) + print(f"{episode_idx=} Failed") + + plt.close() + + """ + For each timestep: + observations + - images + - each_cam_name (480, 640, 3) 'uint8' + - qpos (14,) 'float64' + - qvel (14,) 'float64' + + action (14,) 'float64' + """ + + data_dict = { + '/observations/qpos': [], + '/observations/qvel': [], + '/action': [], + } + for cam_name in camera_names: + data_dict[f'/observations/images/{cam_name}'] = [] + + # because the replaying, there will be eps_len + 1 actions and eps_len + 2 timesteps + # truncate here to be consistent + joint_traj = joint_traj[:-1] + episode_replay = episode_replay[:-1] + + # len(joint_traj) i.e. actions: max_timesteps + # len(episode_replay) i.e. time steps: max_timesteps + 1 + max_timesteps = len(joint_traj) + while joint_traj: + action = joint_traj.pop(0) + ts = episode_replay.pop(0) + data_dict['/observations/qpos'].append(ts.observation['qpos']) + data_dict['/observations/qvel'].append(ts.observation['qvel']) + data_dict['/action'].append(action) + for cam_name in camera_names: + data_dict[f'/observations/images/{cam_name}'].append(ts.observation['images'][cam_name]) + + # HDF5 + t0 = time.time() + dataset_path = os.path.join(dataset_dir, f'episode_{episode_idx}') + with h5py.File(dataset_path + '.hdf5', 'w', rdcc_nbytes=1024 ** 2 * 2) as root: + root.attrs['sim'] = True + obs = root.create_group('observations') + image = obs.create_group('images') + for cam_name in camera_names: + _ = image.create_dataset(cam_name, (max_timesteps, 480, 640, 3), dtype='uint8', + chunks=(1, 480, 640, 3), ) + # compression='gzip',compression_opts=2,) + # compression=32001, compression_opts=(0, 0, 0, 0, 9, 1, 1), shuffle=False) + qpos = obs.create_dataset('qpos', (max_timesteps, 14)) + qvel = obs.create_dataset('qvel', (max_timesteps, 14)) + action = root.create_dataset('action', (max_timesteps, 14)) + + for name, array in data_dict.items(): + root[name][...] = array + print(f'Saving: {time.time() - t0:.1f} secs\n') + + print(f'Saved to {dataset_dir}') + print(f'Success: {np.sum(success)} / {len(success)}') + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--task_name', action='store', type=str, help='task_name', required=True) + parser.add_argument('--dataset_dir', action='store', type=str, help='dataset saving dir', required=True) + parser.add_argument('--num_episodes', action='store', type=int, help='num_episodes', required=False) + parser.add_argument('--onscreen_render', action='store_true') + + main(vars(parser.parse_args())) + diff --git a/act/scripted_policy.py b/act/scripted_policy.py new file mode 100644 index 00000000..4fd8f000 --- /dev/null +++ b/act/scripted_policy.py @@ -0,0 +1,194 @@ +import numpy as np +import matplotlib.pyplot as plt +from pyquaternion import Quaternion + +from constants import SIM_TASK_CONFIGS +from ee_sim_env import make_ee_sim_env + +import IPython +e = IPython.embed + + +class BasePolicy: + def __init__(self, inject_noise=False): + self.inject_noise = inject_noise + self.step_count = 0 + self.left_trajectory = None + self.right_trajectory = None + + def generate_trajectory(self, ts_first): + raise NotImplementedError + + @staticmethod + def interpolate(curr_waypoint, next_waypoint, t): + t_frac = (t - curr_waypoint["t"]) / (next_waypoint["t"] - curr_waypoint["t"]) + curr_xyz = curr_waypoint['xyz'] + curr_quat = curr_waypoint['quat'] + curr_grip = curr_waypoint['gripper'] + next_xyz = next_waypoint['xyz'] + next_quat = next_waypoint['quat'] + next_grip = next_waypoint['gripper'] + xyz = curr_xyz + (next_xyz - curr_xyz) * t_frac + quat = curr_quat + (next_quat - curr_quat) * t_frac + gripper = curr_grip + (next_grip - curr_grip) * t_frac + return xyz, quat, gripper + + def __call__(self, ts): + # generate trajectory at first timestep, then open-loop execution + if self.step_count == 0: + self.generate_trajectory(ts) + + # obtain left and right waypoints + if self.left_trajectory[0]['t'] == self.step_count: + self.curr_left_waypoint = self.left_trajectory.pop(0) + next_left_waypoint = self.left_trajectory[0] + + if self.right_trajectory[0]['t'] == self.step_count: + self.curr_right_waypoint = self.right_trajectory.pop(0) + next_right_waypoint = self.right_trajectory[0] + + # interpolate between waypoints to obtain current pose and gripper command + left_xyz, left_quat, left_gripper = self.interpolate(self.curr_left_waypoint, next_left_waypoint, self.step_count) + right_xyz, right_quat, right_gripper = self.interpolate(self.curr_right_waypoint, next_right_waypoint, self.step_count) + + # Inject noise + if self.inject_noise: + scale = 0.01 + left_xyz = left_xyz + np.random.uniform(-scale, scale, left_xyz.shape) + right_xyz = right_xyz + np.random.uniform(-scale, scale, right_xyz.shape) + + action_left = np.concatenate([left_xyz, left_quat, [left_gripper]]) + action_right = np.concatenate([right_xyz, right_quat, [right_gripper]]) + + self.step_count += 1 + return np.concatenate([action_left, action_right]) + + +class PickAndTransferPolicy(BasePolicy): + + def generate_trajectory(self, ts_first): + init_mocap_pose_right = ts_first.observation['mocap_pose_right'] + init_mocap_pose_left = ts_first.observation['mocap_pose_left'] + + box_info = np.array(ts_first.observation['env_state']) + box_xyz = box_info[:3] + box_quat = box_info[3:] + # print(f"Generate trajectory for {box_xyz=}") + + gripper_pick_quat = Quaternion(init_mocap_pose_right[3:]) + gripper_pick_quat = gripper_pick_quat * Quaternion(axis=[0.0, 1.0, 0.0], degrees=-60) + + meet_left_quat = Quaternion(axis=[1.0, 0.0, 0.0], degrees=90) + + meet_xyz = np.array([0, 0.5, 0.25]) + + self.left_trajectory = [ + {"t": 0, "xyz": init_mocap_pose_left[:3], "quat": init_mocap_pose_left[3:], "gripper": 0}, # sleep + {"t": 100, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 1}, # approach meet position + {"t": 260, "xyz": meet_xyz + np.array([0.02, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 1}, # move to meet position + {"t": 310, "xyz": meet_xyz + np.array([0.02, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 0}, # close gripper + {"t": 360, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": np.array([1, 0, 0, 0]), "gripper": 0}, # move left + {"t": 400, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": np.array([1, 0, 0, 0]), "gripper": 0}, # stay + ] + + self.right_trajectory = [ + {"t": 0, "xyz": init_mocap_pose_right[:3], "quat": init_mocap_pose_right[3:], "gripper": 0}, # sleep + {"t": 90, "xyz": box_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat.elements, "gripper": 1}, # approach the cube + {"t": 130, "xyz": box_xyz + np.array([0, 0, -0.015]), "quat": gripper_pick_quat.elements, "gripper": 1}, # go down + {"t": 170, "xyz": box_xyz + np.array([0, 0, -0.015]), "quat": gripper_pick_quat.elements, "gripper": 0}, # close gripper + {"t": 200, "xyz": meet_xyz + np.array([0.05, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 0}, # approach meet position + {"t": 220, "xyz": meet_xyz, "quat": gripper_pick_quat.elements, "gripper": 0}, # move to meet position + {"t": 310, "xyz": meet_xyz, "quat": gripper_pick_quat.elements, "gripper": 1}, # open gripper + {"t": 360, "xyz": meet_xyz + np.array([0.1, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 1}, # move to right + {"t": 400, "xyz": meet_xyz + np.array([0.1, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 1}, # stay + ] + + +class InsertionPolicy(BasePolicy): + + def generate_trajectory(self, ts_first): + init_mocap_pose_right = ts_first.observation['mocap_pose_right'] + init_mocap_pose_left = ts_first.observation['mocap_pose_left'] + + peg_info = np.array(ts_first.observation['env_state'])[:7] + peg_xyz = peg_info[:3] + peg_quat = peg_info[3:] + + socket_info = np.array(ts_first.observation['env_state'])[7:] + socket_xyz = socket_info[:3] + socket_quat = socket_info[3:] + + gripper_pick_quat_right = Quaternion(init_mocap_pose_right[3:]) + gripper_pick_quat_right = gripper_pick_quat_right * Quaternion(axis=[0.0, 1.0, 0.0], degrees=-60) + + gripper_pick_quat_left = Quaternion(init_mocap_pose_right[3:]) + gripper_pick_quat_left = gripper_pick_quat_left * Quaternion(axis=[0.0, 1.0, 0.0], degrees=60) + + meet_xyz = np.array([0, 0.5, 0.15]) + lift_right = 0.00715 + + self.left_trajectory = [ + {"t": 0, "xyz": init_mocap_pose_left[:3], "quat": init_mocap_pose_left[3:], "gripper": 0}, # sleep + {"t": 120, "xyz": socket_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat_left.elements, "gripper": 1}, # approach the cube + {"t": 170, "xyz": socket_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_left.elements, "gripper": 1}, # go down + {"t": 220, "xyz": socket_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_left.elements, "gripper": 0}, # close gripper + {"t": 285, "xyz": meet_xyz + np.array([-0.1, 0, 0]), "quat": gripper_pick_quat_left.elements, "gripper": 0}, # approach meet position + {"t": 340, "xyz": meet_xyz + np.array([-0.05, 0, 0]), "quat": gripper_pick_quat_left.elements,"gripper": 0}, # insertion + {"t": 400, "xyz": meet_xyz + np.array([-0.05, 0, 0]), "quat": gripper_pick_quat_left.elements, "gripper": 0}, # insertion + ] + + self.right_trajectory = [ + {"t": 0, "xyz": init_mocap_pose_right[:3], "quat": init_mocap_pose_right[3:], "gripper": 0}, # sleep + {"t": 120, "xyz": peg_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat_right.elements, "gripper": 1}, # approach the cube + {"t": 170, "xyz": peg_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_right.elements, "gripper": 1}, # go down + {"t": 220, "xyz": peg_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # close gripper + {"t": 285, "xyz": meet_xyz + np.array([0.1, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # approach meet position + {"t": 340, "xyz": meet_xyz + np.array([0.05, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # insertion + {"t": 400, "xyz": meet_xyz + np.array([0.05, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # insertion + + ] + + +def test_policy(task_name): + # example rolling out pick_and_transfer policy + onscreen_render = True + inject_noise = False + + # setup the environment + episode_len = SIM_TASK_CONFIGS[task_name]['episode_len'] + if 'sim_transfer_cube' in task_name: + env = make_ee_sim_env('sim_transfer_cube') + elif 'sim_insertion' in task_name: + env = make_ee_sim_env('sim_insertion') + else: + raise NotImplementedError + + for episode_idx in range(2): + ts = env.reset() + episode = [ts] + if onscreen_render: + ax = plt.subplot() + plt_img = ax.imshow(ts.observation['images']['angle']) + plt.ion() + + policy = PickAndTransferPolicy(inject_noise) + for step in range(episode_len): + action = policy(ts) + ts = env.step(action) + episode.append(ts) + if onscreen_render: + plt_img.set_data(ts.observation['images']['angle']) + plt.pause(0.02) + plt.close() + + episode_return = np.sum([ts.reward for ts in episode[1:]]) + if episode_return > 0: + print(f"{episode_idx=} Successful, {episode_return=}") + else: + print(f"{episode_idx=} Failed") + + +if __name__ == '__main__': + test_task_name = 'sim_transfer_cube_scripted' + test_policy(test_task_name) + diff --git a/act/sim_env.py b/act/sim_env.py new file mode 100644 index 00000000..b79b935b --- /dev/null +++ b/act/sim_env.py @@ -0,0 +1,278 @@ +import numpy as np +import os +import collections +import matplotlib.pyplot as plt +from dm_control import mujoco +from dm_control.rl import control +from dm_control.suite import base + +from constants import DT, XML_DIR, START_ARM_POSE +from constants import PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN +from constants import MASTER_GRIPPER_POSITION_NORMALIZE_FN +from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN +from constants import PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN + +import IPython +e = IPython.embed + +BOX_POSE = [None] # to be changed from outside + +def make_sim_env(task_name): + """ + Environment for simulated robot bi-manual manipulation, with joint position control + Action space: [left_arm_qpos (6), # absolute joint position + left_gripper_positions (1), # normalized gripper position (0: close, 1: open) + right_arm_qpos (6), # absolute joint position + right_gripper_positions (1),] # normalized gripper position (0: close, 1: open) + + Observation space: {"qpos": Concat[ left_arm_qpos (6), # absolute joint position + left_gripper_position (1), # normalized gripper position (0: close, 1: open) + right_arm_qpos (6), # absolute joint position + right_gripper_qpos (1)] # normalized gripper position (0: close, 1: open) + "qvel": Concat[ left_arm_qvel (6), # absolute joint velocity (rad) + left_gripper_velocity (1), # normalized gripper velocity (pos: opening, neg: closing) + right_arm_qvel (6), # absolute joint velocity (rad) + right_gripper_qvel (1)] # normalized gripper velocity (pos: opening, neg: closing) + "images": {"main": (480x640x3)} # h, w, c, dtype='uint8' + """ + if 'sim_transfer_cube' in task_name: + xml_path = os.path.join(XML_DIR, f'bimanual_viperx_transfer_cube.xml') + physics = mujoco.Physics.from_xml_path(xml_path) + task = TransferCubeTask(random=False) + env = control.Environment(physics, task, time_limit=20, control_timestep=DT, + n_sub_steps=None, flat_observation=False) + elif 'sim_insertion' in task_name: + xml_path = os.path.join(XML_DIR, f'bimanual_viperx_insertion.xml') + physics = mujoco.Physics.from_xml_path(xml_path) + task = InsertionTask(random=False) + env = control.Environment(physics, task, time_limit=20, control_timestep=DT, + n_sub_steps=None, flat_observation=False) + else: + raise NotImplementedError + return env + +class BimanualViperXTask(base.Task): + def __init__(self, random=None): + super().__init__(random=random) + + def before_step(self, action, physics): + left_arm_action = action[:6] + right_arm_action = action[7:7+6] + normalized_left_gripper_action = action[6] + normalized_right_gripper_action = action[7+6] + + left_gripper_action = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(normalized_left_gripper_action) + right_gripper_action = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(normalized_right_gripper_action) + + full_left_gripper_action = [left_gripper_action, -left_gripper_action] + full_right_gripper_action = [right_gripper_action, -right_gripper_action] + + env_action = np.concatenate([left_arm_action, full_left_gripper_action, right_arm_action, full_right_gripper_action]) + super().before_step(env_action, physics) + return + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + super().initialize_episode(physics) + + @staticmethod + def get_qpos(physics): + qpos_raw = physics.data.qpos.copy() + left_qpos_raw = qpos_raw[:8] + right_qpos_raw = qpos_raw[8:16] + left_arm_qpos = left_qpos_raw[:6] + right_arm_qpos = right_qpos_raw[:6] + left_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(left_qpos_raw[6])] + right_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(right_qpos_raw[6])] + return np.concatenate([left_arm_qpos, left_gripper_qpos, right_arm_qpos, right_gripper_qpos]) + + @staticmethod + def get_qvel(physics): + qvel_raw = physics.data.qvel.copy() + left_qvel_raw = qvel_raw[:8] + right_qvel_raw = qvel_raw[8:16] + left_arm_qvel = left_qvel_raw[:6] + right_arm_qvel = right_qvel_raw[:6] + left_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(left_qvel_raw[6])] + right_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(right_qvel_raw[6])] + return np.concatenate([left_arm_qvel, left_gripper_qvel, right_arm_qvel, right_gripper_qvel]) + + @staticmethod + def get_env_state(physics): + raise NotImplementedError + + def get_observation(self, physics): + obs = collections.OrderedDict() + obs['qpos'] = self.get_qpos(physics) + obs['qvel'] = self.get_qvel(physics) + obs['env_state'] = self.get_env_state(physics) + obs['images'] = dict() + obs['images']['top'] = physics.render(height=480, width=640, camera_id='top') + obs['images']['angle'] = physics.render(height=480, width=640, camera_id='angle') + obs['images']['vis'] = physics.render(height=480, width=640, camera_id='front_close') + + return obs + + def get_reward(self, physics): + # return whether left gripper is holding the box + raise NotImplementedError + + +class TransferCubeTask(BimanualViperXTask): + def __init__(self, random=None): + super().__init__(random=random) + self.max_reward = 4 + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + # TODO Notice: this function does not randomize the env configuration. Instead, set BOX_POSE from outside + # reset qpos, control and box position + with physics.reset_context(): + physics.named.data.qpos[:16] = START_ARM_POSE + np.copyto(physics.data.ctrl, START_ARM_POSE) + assert BOX_POSE[0] is not None + physics.named.data.qpos[-7:] = BOX_POSE[0] + # print(f"{BOX_POSE=}") + super().initialize_episode(physics) + + @staticmethod + def get_env_state(physics): + env_state = physics.data.qpos.copy()[16:] + return env_state + + def get_reward(self, physics): + # return whether left gripper is holding the box + all_contact_pairs = [] + for i_contact in range(physics.data.ncon): + id_geom_1 = physics.data.contact[i_contact].geom1 + id_geom_2 = physics.data.contact[i_contact].geom2 + name_geom_1 = physics.model.id2name(id_geom_1, 'geom') + name_geom_2 = physics.model.id2name(id_geom_2, 'geom') + contact_pair = (name_geom_1, name_geom_2) + all_contact_pairs.append(contact_pair) + + touch_left_gripper = ("red_box", "vx300s_left/10_left_gripper_finger") in all_contact_pairs + touch_right_gripper = ("red_box", "vx300s_right/10_right_gripper_finger") in all_contact_pairs + touch_table = ("red_box", "table") in all_contact_pairs + + reward = 0 + if touch_right_gripper: + reward = 1 + if touch_right_gripper and not touch_table: # lifted + reward = 2 + if touch_left_gripper: # attempted transfer + reward = 3 + if touch_left_gripper and not touch_table: # successful transfer + reward = 4 + return reward + + +class InsertionTask(BimanualViperXTask): + def __init__(self, random=None): + super().__init__(random=random) + self.max_reward = 4 + + def initialize_episode(self, physics): + """Sets the state of the environment at the start of each episode.""" + # TODO Notice: this function does not randomize the env configuration. Instead, set BOX_POSE from outside + # reset qpos, control and box position + with physics.reset_context(): + physics.named.data.qpos[:16] = START_ARM_POSE + np.copyto(physics.data.ctrl, START_ARM_POSE) + assert BOX_POSE[0] is not None + physics.named.data.qpos[-7*2:] = BOX_POSE[0] # two objects + # print(f"{BOX_POSE=}") + super().initialize_episode(physics) + + @staticmethod + def get_env_state(physics): + env_state = physics.data.qpos.copy()[16:] + return env_state + + def get_reward(self, physics): + # return whether peg touches the pin + all_contact_pairs = [] + for i_contact in range(physics.data.ncon): + id_geom_1 = physics.data.contact[i_contact].geom1 + id_geom_2 = physics.data.contact[i_contact].geom2 + name_geom_1 = physics.model.id2name(id_geom_1, 'geom') + name_geom_2 = physics.model.id2name(id_geom_2, 'geom') + contact_pair = (name_geom_1, name_geom_2) + all_contact_pairs.append(contact_pair) + + touch_right_gripper = ("red_peg", "vx300s_right/10_right_gripper_finger") in all_contact_pairs + touch_left_gripper = ("socket-1", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ + ("socket-2", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ + ("socket-3", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ + ("socket-4", "vx300s_left/10_left_gripper_finger") in all_contact_pairs + + peg_touch_table = ("red_peg", "table") in all_contact_pairs + socket_touch_table = ("socket-1", "table") in all_contact_pairs or \ + ("socket-2", "table") in all_contact_pairs or \ + ("socket-3", "table") in all_contact_pairs or \ + ("socket-4", "table") in all_contact_pairs + peg_touch_socket = ("red_peg", "socket-1") in all_contact_pairs or \ + ("red_peg", "socket-2") in all_contact_pairs or \ + ("red_peg", "socket-3") in all_contact_pairs or \ + ("red_peg", "socket-4") in all_contact_pairs + pin_touched = ("red_peg", "pin") in all_contact_pairs + + reward = 0 + if touch_left_gripper and touch_right_gripper: # touch both + reward = 1 + if touch_left_gripper and touch_right_gripper and (not peg_touch_table) and (not socket_touch_table): # grasp both + reward = 2 + if peg_touch_socket and (not peg_touch_table) and (not socket_touch_table): # peg and socket touching + reward = 3 + if pin_touched: # successful insertion + reward = 4 + return reward + + +def get_action(master_bot_left, master_bot_right): + action = np.zeros(14) + # arm action + action[:6] = master_bot_left.dxl.joint_states.position[:6] + action[7:7+6] = master_bot_right.dxl.joint_states.position[:6] + # gripper action + left_gripper_pos = master_bot_left.dxl.joint_states.position[7] + right_gripper_pos = master_bot_right.dxl.joint_states.position[7] + normalized_left_pos = MASTER_GRIPPER_POSITION_NORMALIZE_FN(left_gripper_pos) + normalized_right_pos = MASTER_GRIPPER_POSITION_NORMALIZE_FN(right_gripper_pos) + action[6] = normalized_left_pos + action[7+6] = normalized_right_pos + return action + +def test_sim_teleop(): + """ Testing teleoperation in sim with ALOHA. Requires hardware and ALOHA repo to work. """ + from interbotix_xs_modules.arm import InterbotixManipulatorXS + + BOX_POSE[0] = [0.2, 0.5, 0.05, 1, 0, 0, 0] + + # source of data + master_bot_left = InterbotixManipulatorXS(robot_model="wx250s", group_name="arm", gripper_name="gripper", + robot_name=f'master_left', init_node=True) + master_bot_right = InterbotixManipulatorXS(robot_model="wx250s", group_name="arm", gripper_name="gripper", + robot_name=f'master_right', init_node=False) + + # setup the environment + env = make_sim_env('sim_transfer_cube') + ts = env.reset() + episode = [ts] + # setup plotting + ax = plt.subplot() + plt_img = ax.imshow(ts.observation['images']['angle']) + plt.ion() + + for t in range(1000): + action = get_action(master_bot_left, master_bot_right) + ts = env.step(action) + episode.append(ts) + + plt_img.set_data(ts.observation['images']['angle']) + plt.pause(0.02) + + +if __name__ == '__main__': + test_sim_teleop() + diff --git a/act/utils.py b/act/utils.py new file mode 100644 index 00000000..673cbb10 --- /dev/null +++ b/act/utils.py @@ -0,0 +1,189 @@ +import numpy as np +import torch +import os +import h5py +from torch.utils.data import TensorDataset, DataLoader + +import IPython +e = IPython.embed + +class EpisodicDataset(torch.utils.data.Dataset): + def __init__(self, episode_ids, dataset_dir, camera_names, norm_stats): + super(EpisodicDataset).__init__() + self.episode_ids = episode_ids + self.dataset_dir = dataset_dir + self.camera_names = camera_names + self.norm_stats = norm_stats + self.is_sim = None + self.__getitem__(0) # initialize self.is_sim + + def __len__(self): + return len(self.episode_ids) + + def __getitem__(self, index): + sample_full_episode = False # hardcode + + episode_id = self.episode_ids[index] + dataset_path = os.path.join(self.dataset_dir, f'episode_{episode_id}.hdf5') + with h5py.File(dataset_path, 'r') as root: + is_sim = root.attrs['sim'] + original_action_shape = root['/action'].shape + episode_len = original_action_shape[0] + if sample_full_episode: + start_ts = 0 + else: + start_ts = np.random.choice(episode_len) + # get observation at start_ts only + qpos = root['/observations/qpos'][start_ts] + qvel = root['/observations/qvel'][start_ts] + image_dict = dict() + for cam_name in self.camera_names: + image_dict[cam_name] = root[f'/observations/images/{cam_name}'][start_ts] + # get all actions after and including start_ts + if is_sim: + action = root['/action'][start_ts:] + action_len = episode_len - start_ts + else: + action = root['/action'][max(0, start_ts - 1):] # hack, to make timesteps more aligned + action_len = episode_len - max(0, start_ts - 1) # hack, to make timesteps more aligned + + self.is_sim = is_sim + padded_action = np.zeros(original_action_shape, dtype=np.float32) + padded_action[:action_len] = action + is_pad = np.zeros(episode_len) + is_pad[action_len:] = 1 + + # new axis for different cameras + all_cam_images = [] + for cam_name in self.camera_names: + all_cam_images.append(image_dict[cam_name]) + all_cam_images = np.stack(all_cam_images, axis=0) + + # construct observations + image_data = torch.from_numpy(all_cam_images) + qpos_data = torch.from_numpy(qpos).float() + action_data = torch.from_numpy(padded_action).float() + is_pad = torch.from_numpy(is_pad).bool() + + # channel last + image_data = torch.einsum('k h w c -> k c h w', image_data) + + # normalize image and change dtype to float + image_data = image_data / 255.0 + action_data = (action_data - self.norm_stats["action_mean"]) / self.norm_stats["action_std"] + qpos_data = (qpos_data - self.norm_stats["qpos_mean"]) / self.norm_stats["qpos_std"] + + return image_data, qpos_data, action_data, is_pad + + +def get_norm_stats(dataset_dir, num_episodes): + all_qpos_data = [] + all_action_data = [] + for episode_idx in range(num_episodes): + dataset_path = os.path.join(dataset_dir, f'episode_{episode_idx}.hdf5') + with h5py.File(dataset_path, 'r') as root: + qpos = root['/observations/qpos'][()] + qvel = root['/observations/qvel'][()] + action = root['/action'][()] + all_qpos_data.append(torch.from_numpy(qpos)) + all_action_data.append(torch.from_numpy(action)) + all_qpos_data = torch.stack(all_qpos_data) + all_action_data = torch.stack(all_action_data) + all_action_data = all_action_data + + # normalize action data + action_mean = all_action_data.mean(dim=[0, 1], keepdim=True) + action_std = all_action_data.std(dim=[0, 1], keepdim=True) + action_std = torch.clip(action_std, 1e-2, 10) # clipping + + # normalize qpos data + qpos_mean = all_qpos_data.mean(dim=[0, 1], keepdim=True) + qpos_std = all_qpos_data.std(dim=[0, 1], keepdim=True) + qpos_std = torch.clip(qpos_std, 1e-2, 10) # clipping + + stats = {"action_mean": action_mean.numpy().squeeze(), "action_std": action_std.numpy().squeeze(), + "qpos_mean": qpos_mean.numpy().squeeze(), "qpos_std": qpos_std.numpy().squeeze(), + "example_qpos": qpos} + + return stats + + +def load_data(dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val): + print(f'\nData from: {dataset_dir}\n') + # obtain train test split + train_ratio = 0.8 + shuffled_indices = np.random.permutation(num_episodes) + train_indices = shuffled_indices[:int(train_ratio * num_episodes)] + val_indices = shuffled_indices[int(train_ratio * num_episodes):] + + # obtain normalization stats for qpos and action + norm_stats = get_norm_stats(dataset_dir, num_episodes) + + # construct dataset and dataloader + train_dataset = EpisodicDataset(train_indices, dataset_dir, camera_names, norm_stats) + val_dataset = EpisodicDataset(val_indices, dataset_dir, camera_names, norm_stats) + train_dataloader = DataLoader(train_dataset, batch_size=batch_size_train, shuffle=True, pin_memory=True, num_workers=1, prefetch_factor=1) + val_dataloader = DataLoader(val_dataset, batch_size=batch_size_val, shuffle=True, pin_memory=True, num_workers=1, prefetch_factor=1) + + return train_dataloader, val_dataloader, norm_stats, train_dataset.is_sim + + +### env utils + +def sample_box_pose(): + x_range = [0.0, 0.2] + y_range = [0.4, 0.6] + z_range = [0.05, 0.05] + + ranges = np.vstack([x_range, y_range, z_range]) + cube_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) + + cube_quat = np.array([1, 0, 0, 0]) + return np.concatenate([cube_position, cube_quat]) + +def sample_insertion_pose(): + # Peg + x_range = [0.1, 0.2] + y_range = [0.4, 0.6] + z_range = [0.05, 0.05] + + ranges = np.vstack([x_range, y_range, z_range]) + peg_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) + + peg_quat = np.array([1, 0, 0, 0]) + peg_pose = np.concatenate([peg_position, peg_quat]) + + # Socket + x_range = [-0.2, -0.1] + y_range = [0.4, 0.6] + z_range = [0.05, 0.05] + + ranges = np.vstack([x_range, y_range, z_range]) + socket_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) + + socket_quat = np.array([1, 0, 0, 0]) + socket_pose = np.concatenate([socket_position, socket_quat]) + + return peg_pose, socket_pose + +### helper functions + +def compute_dict_mean(epoch_dicts): + result = {k: None for k in epoch_dicts[0]} + num_items = len(epoch_dicts) + for k in result: + value_sum = 0 + for epoch_dict in epoch_dicts: + value_sum += epoch_dict[k] + result[k] = value_sum / num_items + return result + +def detach_dict(d): + new_d = dict() + for k, v in d.items(): + new_d[k] = v.detach() + return new_d + +def set_seed(seed): + torch.manual_seed(seed) + np.random.seed(seed) diff --git a/act/visualize_episodes.py b/act/visualize_episodes.py new file mode 100644 index 00000000..4e55e471 --- /dev/null +++ b/act/visualize_episodes.py @@ -0,0 +1,147 @@ +import os +import numpy as np +import cv2 +import h5py +import argparse + +import matplotlib.pyplot as plt +from constants import DT + +import IPython +e = IPython.embed + +JOINT_NAMES = ["waist", "shoulder", "elbow", "forearm_roll", "wrist_angle", "wrist_rotate"] +STATE_NAMES = JOINT_NAMES + ["gripper"] + +def load_hdf5(dataset_dir, dataset_name): + dataset_path = os.path.join(dataset_dir, dataset_name + '.hdf5') + if not os.path.isfile(dataset_path): + print(f'Dataset does not exist at \n{dataset_path}\n') + exit() + + with h5py.File(dataset_path, 'r') as root: + is_sim = root.attrs['sim'] + qpos = root['/observations/qpos'][()] + qvel = root['/observations/qvel'][()] + action = root['/action'][()] + image_dict = dict() + for cam_name in root[f'/observations/images/'].keys(): + image_dict[cam_name] = root[f'/observations/images/{cam_name}'][()] + + return qpos, qvel, action, image_dict + +def main(args): + dataset_dir = args['dataset_dir'] + episode_idx = args['episode_idx'] + dataset_name = f'episode_{episode_idx}' + + qpos, qvel, action, image_dict = load_hdf5(dataset_dir, dataset_name) + save_videos(image_dict, DT, video_path=os.path.join(dataset_dir, dataset_name + '_video.mp4')) + visualize_joints(qpos, action, plot_path=os.path.join(dataset_dir, dataset_name + '_qpos.png')) + # visualize_timestamp(t_list, dataset_path) # TODO addn timestamp back + + +def save_videos(video, dt, video_path=None): + if isinstance(video, list): + cam_names = list(video[0].keys()) + h, w, _ = video[0][cam_names[0]].shape + w = w * len(cam_names) + fps = int(1/dt) + out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + for ts, image_dict in enumerate(video): + images = [] + for cam_name in cam_names: + image = image_dict[cam_name] + image = image[:, :, [2, 1, 0]] # swap B and R channel + images.append(image) + images = np.concatenate(images, axis=1) + out.write(images) + out.release() + print(f'Saved video to: {video_path}') + elif isinstance(video, dict): + cam_names = list(video.keys()) + all_cam_videos = [] + for cam_name in cam_names: + all_cam_videos.append(video[cam_name]) + all_cam_videos = np.concatenate(all_cam_videos, axis=2) # width dimension + + n_frames, h, w, _ = all_cam_videos.shape + fps = int(1 / dt) + out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + for t in range(n_frames): + image = all_cam_videos[t] + image = image[:, :, [2, 1, 0]] # swap B and R channel + out.write(image) + out.release() + print(f'Saved video to: {video_path}') + + +def visualize_joints(qpos_list, command_list, plot_path=None, ylim=None, label_overwrite=None): + if label_overwrite: + label1, label2 = label_overwrite + else: + label1, label2 = 'State', 'Command' + + qpos = np.array(qpos_list) # ts, dim + command = np.array(command_list) + num_ts, num_dim = qpos.shape + h, w = 2, num_dim + num_figs = num_dim + fig, axs = plt.subplots(num_figs, 1, figsize=(w, h * num_figs)) + + # plot joint state + all_names = [name + '_left' for name in STATE_NAMES] + [name + '_right' for name in STATE_NAMES] + for dim_idx in range(num_dim): + ax = axs[dim_idx] + ax.plot(qpos[:, dim_idx], label=label1) + ax.set_title(f'Joint {dim_idx}: {all_names[dim_idx]}') + ax.legend() + + # plot arm command + for dim_idx in range(num_dim): + ax = axs[dim_idx] + ax.plot(command[:, dim_idx], label=label2) + ax.legend() + + if ylim: + for dim_idx in range(num_dim): + ax = axs[dim_idx] + ax.set_ylim(ylim) + + plt.tight_layout() + plt.savefig(plot_path) + print(f'Saved qpos plot to: {plot_path}') + plt.close() + +def visualize_timestamp(t_list, dataset_path): + plot_path = dataset_path.replace('.pkl', '_timestamp.png') + h, w = 4, 10 + fig, axs = plt.subplots(2, 1, figsize=(w, h*2)) + # process t_list + t_float = [] + for secs, nsecs in t_list: + t_float.append(secs + nsecs * 10E-10) + t_float = np.array(t_float) + + ax = axs[0] + ax.plot(np.arange(len(t_float)), t_float) + ax.set_title(f'Camera frame timestamps') + ax.set_xlabel('timestep') + ax.set_ylabel('time (sec)') + + ax = axs[1] + ax.plot(np.arange(len(t_float)-1), t_float[:-1] - t_float[1:]) + ax.set_title(f'dt') + ax.set_xlabel('timestep') + ax.set_ylabel('time (sec)') + + plt.tight_layout() + plt.savefig(plot_path) + print(f'Saved timestamp plot to: {plot_path}') + plt.close() + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--dataset_dir', action='store', type=str, help='Dataset dir.', required=True) + parser.add_argument('--episode_idx', action='store', type=int, help='Episode index.', required=False) + main(vars(parser.parse_args())) From 14beb00198bc3fec4ed553e9eab1129821b7906c Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 2 May 2024 16:18:50 -0400 Subject: [PATCH 17/44] training act on robomimic --- robomimic/algo/__init__.py | 1 - robomimic/algo/act.py | 247 --------------------------------- robomimic/config/__init__.py | 1 - robomimic/config/act_config.py | 47 ------- robomimic/utils/dataset.py | 18 ++- robomimic/utils/file_utils.py | 5 +- 6 files changed, 16 insertions(+), 303 deletions(-) delete mode 100644 robomimic/algo/act.py delete mode 100644 robomimic/config/act_config.py diff --git a/robomimic/algo/__init__.py b/robomimic/algo/__init__.py index 6f668be1..dbe2ea4d 100644 --- a/robomimic/algo/__init__.py +++ b/robomimic/algo/__init__.py @@ -10,4 +10,3 @@ from robomimic.algo.iris import IRIS from robomimic.algo.td3_bc import TD3_BC # from robomimic.algo.diffusion_policy import DiffusionPolicyUNet -from robomimic.algo.act import ACT diff --git a/robomimic/algo/act.py b/robomimic/algo/act.py deleted file mode 100644 index 8f35271f..00000000 --- a/robomimic/algo/act.py +++ /dev/null @@ -1,247 +0,0 @@ -""" -Implementation of Action Chunking with Transformers (ACT). -""" -from collections import OrderedDict - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision.transforms as transforms - -import robomimic.utils.tensor_utils as TensorUtils - -from robomimic.algo import register_algo_factory_func, PolicyAlgo -from robomimic.algo.bc import BC_VAE - - -@register_algo_factory_func("act") -def algo_config_to_class(algo_config): - """ - Maps algo config to the BC algo class to instantiate, along with additional algo kwargs. - - Args: - algo_config (Config instance): algo config - - Returns: - algo_class: subclass of Algo - algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm - """ - algo_class, algo_kwargs = ACT, {} - - return algo_class, algo_kwargs - - -class ACT(BC_VAE): - """ - BC training with a VAE policy. - """ - def _create_networks(self): - """ - Creates networks and places them into @self.nets. - """ - - self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - self.nets = nn.ModuleDict() - self.chunk_size = self.global_config["train"]["seq_length"] - self.camera_keys = self.obs_config['modalities']['obs']['rgb'].copy() - self.proprio_keys = self.obs_config['modalities']['obs']['low_dim'].copy() - self.obs_keys = self.proprio_keys + self.camera_keys - - self.proprio_dim = 0 - for k in self.proprio_keys: - self.proprio_dim += self.obs_key_shapes[k][0] - - from act.detr.main import build_ACT_model_and_optimizer - policy_config = {'num_queries': self.chunk_size, - 'hidden_dim': self.algo_config.act.hidden_dim, - 'dim_feedforward': self.algo_config.act.dim_feedforward, - 'backbone': self.algo_config.act.backbone, - 'enc_layers': self.algo_config.act.enc_layers, - 'dec_layers': self.algo_config.act.dec_layers, - 'nheads': self.algo_config.act.nheads, - 'latent_dim': self.algo_config.act.latent_dim, - 'a_dim': self.ac_dim, - 'state_dim': self.proprio_dim, - 'camera_names': self.camera_keys - } - self.kl_weight = self.algo_config.act.kl_weight - model, optimizer = build_ACT_model_and_optimizer(policy_config) - self.nets["policy"] = model - self.nets = self.nets.float().to(self.device) - - self.temporal_agg = False - self.query_frequency = self.chunk_size # TODO maybe tune - - self._step_counter = 0 - self.a_hat_store = None - - - def process_batch_for_training(self, batch): - """ - Processes input batch from a data loader to filter out - relevant information and prepare the batch for training. - Args: - batch (dict): dictionary with torch.Tensors sampled - from a data loader - Returns: - input_batch (dict): processed and filtered batch that - will be used for training - """ - - input_batch = dict() - input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"] if k != 'pad_mask'} - input_batch["obs"]['pad_mask'] = batch["obs"]['pad_mask'] - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present - input_batch["actions"] = batch["actions"][:, :, :] - # we move to device first before float conversion because image observation modalities will be uint8 - - # this minimizes the amount of data transferred to GPU - return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device)) - - def train_on_batch(self, batch, epoch, validate=False): - """ - Update from superclass to set categorical temperature, for categorcal VAEs. - """ - - return super(BC_VAE, self).train_on_batch(batch, epoch, validate=validate) - - def _forward_training(self, batch): - """ - Internal helper function for BC algo class. Compute forward pass - and return network outputs in @predictions dict. - Args: - batch (dict): dictionary with torch.Tensors sampled - from a data loader and filtered by @process_batch_for_training - Returns: - predictions (dict): dictionary containing network outputs - """ - - proprio = [batch["obs"][k] for k in self.proprio_keys] - proprio = torch.cat(proprio, axis=1) - qpos = proprio - - images = [] - for cam_name in self.camera_keys: - image = batch['obs'][cam_name] - image = self.normalize(image) - image = image.unsqueeze(axis=1) - images.append(image) - images = torch.cat(images, axis=1) - - env_state = torch.zeros([qpos.shape[0], 10]).cuda() # this is not used - - actions = batch['actions'] - is_pad = batch['obs']['pad_mask'] == 0 # from 1.0 or 0 to False and True - is_pad = is_pad.squeeze(dim=-1) - - a_hat, is_pad_hat, (mu, logvar) = self.nets["policy"](qpos, images, env_state, actions, is_pad) - total_kld, dim_wise_kld, mean_kld = self.kl_divergence(mu, logvar) - loss_dict = dict() - all_l1 = F.l1_loss(actions, a_hat, reduction='none') - l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean() - loss_dict['l1'] = l1 - loss_dict['kl'] = total_kld[0] - - - predictions = OrderedDict( - actions=actions, - kl_loss=loss_dict['kl'], - reconstruction_loss=loss_dict['l1'], - ) - - return predictions - - def get_action(self, obs_dict, goal_dict=None): - """ - Get policy action outputs. - Args: - obs_dict (dict): current observation - goal_dict (dict): (optional) goal - Returns: - action (torch.Tensor): action tensor - """ - assert not self.nets.training - - proprio = [obs_dict[k] for k in self.proprio_keys] - proprio = torch.cat(proprio, axis=1) - qpos = proprio - - images = [] - for cam_name in self.camera_keys: - image = obs_dict[cam_name] - image = self.normalize(image) - image = image.unsqueeze(axis=1) - images.append(image) - images = torch.cat(images, axis=1) - - env_state = torch.zeros([qpos.shape[0], 10]).cuda() # not used - - if self._step_counter % self.query_frequency == 0: - a_hat, is_pad_hat, (mu, logvar) = self.nets["policy"](qpos, images, env_state) - self.a_hat_store = a_hat - - action = self.a_hat_store[:, self._step_counter % self.query_frequency, :] - self._step_counter += 1 - return action - - - def reset(self): - """ - Reset algo state to prepare for environment rollouts. - """ - self._step_counter = 0 - - def _compute_losses(self, predictions, batch): - """ - Internal helper function for BC algo class. Compute losses based on - network outputs in @predictions dict, using reference labels in @batch. - Args: - predictions (dict): dictionary containing network outputs, from @_forward_training - batch (dict): dictionary with torch.Tensors sampled - from a data loader and filtered by @process_batch_for_training - Returns: - losses (dict): dictionary of losses computed over the batch - """ - - # total loss is sum of reconstruction and KL, weighted by beta - kl_loss = predictions["kl_loss"] - recons_loss = predictions["reconstruction_loss"] - action_loss = recons_loss + self.kl_weight * kl_loss - return OrderedDict( - recons_loss=recons_loss, - kl_loss=kl_loss, - action_loss=action_loss, - ) - - def log_info(self, info): - """ - Process info dictionary from @train_on_batch to summarize - information to pass to tensorboard for logging. - Args: - info (dict): dictionary of info - Returns: - loss_log (dict): name -> summary statistic - """ - log = PolicyAlgo.log_info(self, info) - log["Loss"] = info["losses"]["action_loss"].item() - log["KL_Loss"] = info["losses"]["kl_loss"].item() - log["Reconstruction_Loss"] = info["losses"]["recons_loss"].item() - if "policy_grad_norms" in info: - log["Policy_Grad_Norms"] = info["policy_grad_norms"] - return log - - def kl_divergence(self, mu, logvar): - batch_size = mu.size(0) - assert batch_size != 0 - if mu.data.ndimension() == 4: - mu = mu.view(mu.size(0), mu.size(1)) - if logvar.data.ndimension() == 4: - logvar = logvar.view(logvar.size(0), logvar.size(1)) - - klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) - total_kld = klds.sum(1).mean(0, True) - dimension_wise_kld = klds.mean(0) - mean_kld = klds.mean(1).mean(0, True) - - return total_kld, dimension_wise_kld, mean_kld - diff --git a/robomimic/config/__init__.py b/robomimic/config/__init__.py index 574c4bb4..b4f857f1 100644 --- a/robomimic/config/__init__.py +++ b/robomimic/config/__init__.py @@ -11,4 +11,3 @@ from robomimic.config.iris_config import IRISConfig from robomimic.config.td3_bc_config import TD3_BCConfig # from robomimic.config.diffusion_policy_config import DiffusionPolicyConfig -from robomimic.config.act_config import ACTConfig diff --git a/robomimic/config/act_config.py b/robomimic/config/act_config.py deleted file mode 100644 index 9be3926b..00000000 --- a/robomimic/config/act_config.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Config for BC algorithm. -""" - -from robomimic.config.base_config import BaseConfig - - -class ACTConfig(BaseConfig): - ALGO_NAME = "act" - - def train_config(self): - """ - BC algorithms don't need "next_obs" from hdf5 - so save on storage and compute by disabling it. - """ - super(ACTConfig, self).train_config() - self.train.hdf5_load_next_obs = False - - def algo_config(self): - """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its - training and test-time behavior should be populated here. - """ - - # optimization parameters - self.algo.optim_params.policy.optimizer_type = "adamw" - self.algo.optim_params.policy.learning_rate.initial = 5e-5 # policy learning rate - self.algo.optim_params.policy.learning_rate.decay_factor = 1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.policy.learning_rate.scheduler_type = "linear" # learning rate scheduler ("multistep", "linear", etc) - self.algo.optim_params.policy.regularization.L2 = 0.0001 # L2 regularization strength - - # loss weights - self.algo.loss.l2_weight = 0.0 # L2 loss weight - self.algo.loss.l1_weight = 1.0 # L1 loss weight - self.algo.loss.cos_weight = 0.0 # cosine loss weight - - # ACT policy settings - self.algo.act.hidden_dim = 512 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack - self.algo.act.dim_feedforward = 3200 # dimension for embeddings used by transformer - self.algo.act.backbone = "resnet18" # number of transformer blocks to stack - self.algo.act.enc_layers = 4 # number of attention heads for each transformer block (should divide embed_dim evenly) - self.algo.act.dec_layers = 7 # dropout probability for embedding inputs in transformer - self.algo.act.nheads = 8 # dropout probability for attention outputs for each transformer block - self.algo.act.latent_dim = 32 # latent dim of VAE - self.algo.act.kl_weight = 20 # KL weight of VAE diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index c98fbf2f..4ecbe268 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -13,7 +13,7 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.log_utils as LogUtils - +import time class SequenceDataset(torch.utils.data.Dataset): def __init__( @@ -467,7 +467,7 @@ def get_item(self, index): return meta - def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1): + def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, dont_load_fut=None): """ Extract a (sub)sequence of data items from a demo given the @keys of the items. @@ -477,10 +477,13 @@ def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_sta keys (tuple): list of keys to extract num_frames_to_stack (int): numbers of frame to stack. Seq gets prepended with repeated items if out of range seq_length (int): sequence length to extract. Seq gets post-pended with repeated items if out of range + dont_load_fut (list): list of keys to not load future items for Returns: a dictionary of extracted items. """ + if dont_load_fut is None: + dont_load_fut = [] assert num_frames_to_stack >= 0 assert seq_length >= 1 @@ -504,16 +507,20 @@ def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_sta # fetch observation from the dataset file seq = dict() for k in keys: + t = time.time() data = self.get_dataset_for_ep(demo_id, k) - seq[k] = data[seq_begin_index: seq_end_index] + true_end_index = seq_begin_index + 1 if k.split("/")[-1] in dont_load_fut else seq_end_index + seq[k] = data[seq_begin_index: true_end_index] - seq = TensorUtils.pad_sequence(seq, padding=(seq_begin_pad, seq_end_pad), pad_same=True) + for k in seq: + if k.split("/")[-1] not in dont_load_fut: + seq[k] = TensorUtils.pad_sequence(seq[k], padding=(seq_begin_pad, seq_end_pad), pad_same=True) pad_mask = np.array([0] * seq_begin_pad + [1] * (seq_end_index - seq_begin_index) + [0] * seq_end_pad) pad_mask = pad_mask[:, None].astype(bool) return seq, pad_mask - def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs"): + def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False): """ Extract a (sub)sequence of observation items from a demo given the @keys of the items. @@ -534,6 +541,7 @@ def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to keys=tuple('{}/{}'.format(prefix, k) for k in keys), num_frames_to_stack=num_frames_to_stack, seq_length=seq_length, + dont_load_fut=dont_load_fut ) obs = {k.split('/')[1]: obs[k] for k in obs} # strip the prefix if self.get_pad_mask: diff --git a/robomimic/utils/file_utils.py b/robomimic/utils/file_utils.py index 65db00fd..c3d74be8 100644 --- a/robomimic/utils/file_utils.py +++ b/robomimic/utils/file_utils.py @@ -111,7 +111,7 @@ def get_env_metadata_from_dataset(dataset_path, set_env_specific_obs_processors= return env_meta -def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=False): +def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=False, ac_key="actions"): """ Retrieves shape metadata from dataset. @@ -120,6 +120,7 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal all_obs_keys (list): list of all modalities used by the model. If not provided, all modalities present in the file are used. verbose (bool): if True, include print statements + ac_dim (bool): whether to pull ac_dim Returns: shape_meta (dict): shape metadata. Contains the following keys: @@ -140,7 +141,7 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal demo = f["data/{}".format(demo_id)] # action dimension - shape_meta['ac_dim'] = f["data/{}/actions".format(demo_id)].shape[1] + shape_meta['ac_dim'] = f[f"data/{demo_id}/{ac_key}"].shape[1] # observation dimensions all_shapes = OrderedDict() From 0ae9c4b281a7e3de9b1e282c03ca525c8d5e184a Mon Sep 17 00:00:00 2001 From: dhruv2012 Date: Tue, 7 May 2024 13:56:16 -0400 Subject: [PATCH 18/44] dev 2 dataloaders - single train script --- robomimic/utils/train_utils.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index 3eeefba3..5295a332 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -663,7 +663,7 @@ def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=F start_time = time.time() data_loader_iter = iter(data_loader) - data_loader_2_iter = iter(data_loader_2) + data_loader_2_iter = None if data_loader_2 is None else iter(data_loader_2) # breakpoint() for _ in LogUtils.custom_tqdm(range(num_steps)): @@ -671,32 +671,35 @@ def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=F try: t = time.time() batch = next(data_loader_iter) - batch_2 = next(data_loader_2_iter) + batch_2 = None if data_loader_2_iter is None else next(data_loader_2_iter) except StopIteration: # reset for next dataset pass data_loader_iter = iter(data_loader) - data_loader_2_iter = iter(data_loader_2) + data_loader_2_iter = None if data_loader_2 is None else iter(data_loader_2) t = time.time() batch = next(data_loader_iter) - batch_2 = next(data_loader_2_iter) + batch_2 = None if data_loader_2_iter is None else next(data_loader_2_iter) timing_stats["Data_Loading"].append(time.time() - t) # process batch for training t = time.time() # breakpoint() input_batch = model.process_batch_for_training(batch) - input_batch_2 = model.process_batch_for_training(batch_2) + input_batch_2 = None if batch_2 is None else model.process_batch_for_training(batch_2) # breakpoint() input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) - input_batch_2 = model.postprocess_batch_for_training(input_batch_2, obs_normalization_stats=obs_normalization_stats) + input_batch_2 = None if input_batch_2 is None else model.postprocess_batch_for_training(input_batch_2, obs_normalization_stats=obs_normalization_stats) timing_stats["Process_Batch"].append(time.time() - t) # forward and backward pass t = time.time() # breakpoint() - info = model.train_on_batch([input_batch, input_batch_2], epoch, validate=validate) + if input_batch_2 is not None: + info = model.train_on_batch([input_batch, input_batch_2], epoch, validate=validate) + else: + info = model.train_on_batch(input_batch, epoch, validate=validate) timing_stats["Train_Batch"].append(time.time() - t) # tensorboard logging From 9c7533d31c4b6773469ea4a7ddb0d500014c9ab0 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Fri, 10 May 2024 12:52:14 -0400 Subject: [PATCH 19/44] added ac_key to model object --- robomimic/algo/algo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/robomimic/algo/algo.py b/robomimic/algo/algo.py index 321db01d..9211f1d9 100644 --- a/robomimic/algo/algo.py +++ b/robomimic/algo/algo.py @@ -118,6 +118,7 @@ def __init__( self.global_config = global_config self.ac_dim = ac_dim + self.ac_key = global_config.train.ac_key self.device = device self.obs_key_shapes = obs_key_shapes From b6d61f973c69bc0566a58ca7da8ca14099b4990a Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 16 May 2024 18:45:07 -0400 Subject: [PATCH 20/44] moved act from robomimic to eplay --- act/.gitignore | 140 ------ act/LICENSE | 21 - act/README.md | 89 ---- act/assets/bimanual_viperx_ee_insertion.xml | 59 --- .../bimanual_viperx_ee_transfer_cube.xml | 48 -- act/assets/bimanual_viperx_insertion.xml | 53 -- act/assets/bimanual_viperx_transfer_cube.xml | 42 -- act/assets/scene.xml | 38 -- act/assets/tabletop.stl | Bin 684 -> 0 bytes act/assets/vx300s_10_custom_finger_left.stl | Bin 83384 -> 0 bytes act/assets/vx300s_10_custom_finger_right.stl | Bin 83384 -> 0 bytes act/assets/vx300s_10_gripper_finger.stl | Bin 42884 -> 0 bytes act/assets/vx300s_11_ar_tag.stl | Bin 3884 -> 0 bytes act/assets/vx300s_1_base.stl | Bin 99984 -> 0 bytes act/assets/vx300s_2_shoulder.stl | Bin 63884 -> 0 bytes act/assets/vx300s_3_upper_arm.stl | Bin 102984 -> 0 bytes act/assets/vx300s_4_upper_forearm.stl | Bin 49584 -> 0 bytes act/assets/vx300s_5_lower_forearm.stl | Bin 99884 -> 0 bytes act/assets/vx300s_6_wrist.stl | Bin 70784 -> 0 bytes act/assets/vx300s_7_gripper.stl | Bin 450084 -> 0 bytes act/assets/vx300s_8_gripper_prop.stl | Bin 31684 -> 0 bytes act/assets/vx300s_9_gripper_bar.stl | Bin 379484 -> 0 bytes act/assets/vx300s_dependencies.xml | 17 - act/assets/vx300s_left.xml | 59 --- act/assets/vx300s_right.xml | 59 --- act/conda_env.yaml | 23 - act/constants.py | 76 --- act/detr/LICENSE | 201 -------- act/detr/README.md | 9 - act/detr/main.py | 110 ---- act/detr/models/__init__.py | 9 - act/detr/models/backbone.py | 122 ----- act/detr/models/detr_vae.py | 283 ----------- act/detr/models/position_encoding.py | 93 ---- act/detr/models/transformer.py | 314 ------------ act/detr/setup.py | 10 - act/detr/util/__init__.py | 1 - act/detr/util/box_ops.py | 88 ---- act/detr/util/misc.py | 468 ------------------ act/detr/util/plot_utils.py | 107 ---- act/ee_sim_env.py | 267 ---------- act/imitate_episodes.py | 435 ---------------- act/policy.py | 84 ---- act/record_sim_episodes.py | 189 ------- act/scripted_policy.py | 194 -------- act/sim_env.py | 278 ----------- act/utils.py | 189 ------- act/visualize_episodes.py | 147 ------ requirements.txt | 6 + 49 files changed, 6 insertions(+), 4322 deletions(-) delete mode 100644 act/.gitignore delete mode 100644 act/LICENSE delete mode 100644 act/README.md delete mode 100644 act/assets/bimanual_viperx_ee_insertion.xml delete mode 100644 act/assets/bimanual_viperx_ee_transfer_cube.xml delete mode 100644 act/assets/bimanual_viperx_insertion.xml delete mode 100644 act/assets/bimanual_viperx_transfer_cube.xml delete mode 100644 act/assets/scene.xml delete mode 100644 act/assets/tabletop.stl delete mode 100644 act/assets/vx300s_10_custom_finger_left.stl delete mode 100644 act/assets/vx300s_10_custom_finger_right.stl delete mode 100644 act/assets/vx300s_10_gripper_finger.stl delete mode 100644 act/assets/vx300s_11_ar_tag.stl delete mode 100644 act/assets/vx300s_1_base.stl delete mode 100644 act/assets/vx300s_2_shoulder.stl delete mode 100644 act/assets/vx300s_3_upper_arm.stl delete mode 100644 act/assets/vx300s_4_upper_forearm.stl delete mode 100644 act/assets/vx300s_5_lower_forearm.stl delete mode 100644 act/assets/vx300s_6_wrist.stl delete mode 100644 act/assets/vx300s_7_gripper.stl delete mode 100644 act/assets/vx300s_8_gripper_prop.stl delete mode 100644 act/assets/vx300s_9_gripper_bar.stl delete mode 100644 act/assets/vx300s_dependencies.xml delete mode 100644 act/assets/vx300s_left.xml delete mode 100644 act/assets/vx300s_right.xml delete mode 100644 act/conda_env.yaml delete mode 100644 act/constants.py delete mode 100644 act/detr/LICENSE delete mode 100644 act/detr/README.md delete mode 100644 act/detr/main.py delete mode 100644 act/detr/models/__init__.py delete mode 100644 act/detr/models/backbone.py delete mode 100644 act/detr/models/detr_vae.py delete mode 100644 act/detr/models/position_encoding.py delete mode 100644 act/detr/models/transformer.py delete mode 100644 act/detr/setup.py delete mode 100644 act/detr/util/__init__.py delete mode 100644 act/detr/util/box_ops.py delete mode 100644 act/detr/util/misc.py delete mode 100644 act/detr/util/plot_utils.py delete mode 100644 act/ee_sim_env.py delete mode 100644 act/imitate_episodes.py delete mode 100644 act/policy.py delete mode 100644 act/record_sim_episodes.py delete mode 100644 act/scripted_policy.py delete mode 100644 act/sim_env.py delete mode 100644 act/utils.py delete mode 100644 act/visualize_episodes.py diff --git a/act/.gitignore b/act/.gitignore deleted file mode 100644 index d1e4c693..00000000 --- a/act/.gitignore +++ /dev/null @@ -1,140 +0,0 @@ -bin -logs -wandb -outputs -data -data_local -.vscode -_wandb - -**/.DS_Store - -fuse.cfg - -*.ai - -# Generation results -results/ - -ray/auth.json - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -pip-wheel-metadata/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ \ No newline at end of file diff --git a/act/LICENSE b/act/LICENSE deleted file mode 100644 index 35e5f5e2..00000000 --- a/act/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Tony Z. Zhao - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/act/README.md b/act/README.md deleted file mode 100644 index 2a345157..00000000 --- a/act/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# ACT: Action Chunking with Transformers - -### *New*: [ACT tuning tips](https://docs.google.com/document/d/1FVIZfoALXg_ZkYKaYVh-qOlaXveq5CtvJHXkY25eYhs/edit?usp=sharing) -TL;DR: if your ACT policy is jerky or pauses in the middle of an episode, just train for longer! Success rate and smoothness can improve way after loss plateaus. - -#### Project Website: https://tonyzhaozh.github.io/aloha/ - -This repo contains the implementation of ACT, together with 2 simulated environments: -Transfer Cube and Bimanual Insertion. You can train and evaluate ACT in sim or real. -For real, you would also need to install [ALOHA](https://github.com/tonyzhaozh/aloha). - -### Updates: -You can find all scripted/human demo for simulated environments [here](https://drive.google.com/drive/folders/1gPR03v05S1xiInoVJn7G7VJ9pDCnxq9O?usp=share_link). - - -### Repo Structure -- ``imitate_episodes.py`` Train and Evaluate ACT -- ``policy.py`` An adaptor for ACT policy -- ``detr`` Model definitions of ACT, modified from DETR -- ``sim_env.py`` Mujoco + DM_Control environments with joint space control -- ``ee_sim_env.py`` Mujoco + DM_Control environments with EE space control -- ``scripted_policy.py`` Scripted policies for sim environments -- ``constants.py`` Constants shared across files -- ``utils.py`` Utils such as data loading and helper functions -- ``visualize_episodes.py`` Save videos from a .hdf5 dataset - - -### Installation - - conda create -n aloha python=3.8 - conda activate aloha - pip install torchvision - pip install torch - pip install pyquaternion - pip install pyyaml - pip install rospkg - pip install pexpect - pip install mujoco - pip install dm_control - pip install opencv-python - pip install matplotlib - pip install einops - pip install packaging - pip install h5py - pip install ipython - cd act/detr && pip install -e . - -### Example Usages - -To set up a new terminal, run: - - conda activate aloha - cd - -### Simulated experiments - -We use ``sim_transfer_cube_scripted`` task in the examples below. Another option is ``sim_insertion_scripted``. -To generated 50 episodes of scripted data, run: - - python3 record_sim_episodes.py \ - --task_name sim_transfer_cube_scripted \ - --dataset_dir \ - --num_episodes 50 - -To can add the flag ``--onscreen_render`` to see real-time rendering. -To visualize the episode after it is collected, run - - python3 visualize_episodes.py --dataset_dir --episode_idx 0 - -To train ACT: - - # Transfer Cube task - python3 imitate_episodes.py \ - --task_name sim_transfer_cube_scripted \ - --ckpt_dir \ - --policy_class ACT --kl_weight 10 --chunk_size 100 --hidden_dim 512 --batch_size 8 --dim_feedforward 3200 \ - --num_epochs 2000 --lr 1e-5 \ - --seed 0 - - -To evaluate the policy, run the same command but add ``--eval``. This loads the best validation checkpoint. -The success rate should be around 90% for transfer cube, and around 50% for insertion. -To enable temporal ensembling, add flag ``--temporal_agg``. -Videos will be saved to ```` for each rollout. -You can also add ``--onscreen_render`` to see real-time rendering during evaluation. - -For real-world data where things can be harder to model, train for at least 5000 epochs or 3-4 times the length after the loss has plateaued. -Please refer to [tuning tips](https://docs.google.com/document/d/1FVIZfoALXg_ZkYKaYVh-qOlaXveq5CtvJHXkY25eYhs/edit?usp=sharing) for more info. - diff --git a/act/assets/bimanual_viperx_ee_insertion.xml b/act/assets/bimanual_viperx_ee_insertion.xml deleted file mode 100644 index 700aaac5..00000000 --- a/act/assets/bimanual_viperx_ee_insertion.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/assets/bimanual_viperx_ee_transfer_cube.xml b/act/assets/bimanual_viperx_ee_transfer_cube.xml deleted file mode 100644 index 25893842..00000000 --- a/act/assets/bimanual_viperx_ee_transfer_cube.xml +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/assets/bimanual_viperx_insertion.xml b/act/assets/bimanual_viperx_insertion.xml deleted file mode 100644 index f701d70a..00000000 --- a/act/assets/bimanual_viperx_insertion.xml +++ /dev/null @@ -1,53 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/assets/bimanual_viperx_transfer_cube.xml b/act/assets/bimanual_viperx_transfer_cube.xml deleted file mode 100644 index bdc9e644..00000000 --- a/act/assets/bimanual_viperx_transfer_cube.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/assets/scene.xml b/act/assets/scene.xml deleted file mode 100644 index ae594505..00000000 --- a/act/assets/scene.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/assets/tabletop.stl b/act/assets/tabletop.stl deleted file mode 100644 index ab35cdf76426b2dddc433afa11af96ebe0e07c17..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 684 zcmb7>F%E)I42EBvJ%Hnz90^Ri7@`m-B8hSaF5m@v6c#rJPXHl$Aa0=Tqpy?~7CSY4 z{rdmyyH&->9;>rWl4uvjQ4;TMRu<*;&@|20{vB&W0qJY?d{9=`o7_N~sr;>StPlPD zvP=v;)0)71Bn%M6vp#_a&UM;cX98;>AsU`wx9qCJJ+^9=ccS$u7gw-AIE|1%J+((i z)`D$$uG~;G$PW-)hwO_26uOtWpDy4uE8Zh1PGoW!C7Fdr*IGML4r$w z;Qqh2n;M>;S-$V@<(!=K+2{H6Z7b=zRXz0^)U1>vyg{`b#flX!Q@Cj1Vnxg5s9d{d z?RsI07A^Xe{{?iy&FN7XnZ1rqwG*!5t`IqUq& z&09<{kU+(k;21VhwOUsz#A|v`v)C*uaW0s#$Ua z3EtMO@jK60F$#LtQ4A)Q)K0UE2~>OurfnkY$+AY)%4L?_%~i}nFs)r5>z}q_Twc7H zVtgionLx#tVA>|Gm&;&0e$pUVovh^`nAWaTf1b2r6dN2xF_1vTmtfi^GLO71Mi1{1 zd}wY50}0;Nt`k@PvSLK$j;0t)@YBu&D!v5MHu3kvITq1me?JGow013gbj*rzWUNUs zkU+(kVA>{Lrig9DDEerafdp@B*MYl7tQg~`dME}GsQ400+r;U!A-nj@pY0i8Ai>+( zRr1;)D@N+l9D|7!t})A)K*g6}+9q5(8%KZFZ{)J%;X@n*)7o|6#sMqF+u@R8Fu_N~ z1S-A+(>9UhPP9ejp48JpFs)r}KJBw&T+(RkpvfV$2-!2gN`F6<>mBo47cvwG|`p#y0{9-qx=(mVIp~1`??F5*))OK9|}^D(4aj zU82~f%YVVRjVJ~ZsQ3~b!zMo0K9N*kNhEa5z%E^ntr${^VjzKvFTpWv;&Uw;N%gKo zLf3HY(lvPBF4ZXp5~%nR9K$9)w+4~a8b~B`P0cRd5{195Ofir^#h2h1Hu1R~jHFgg zBB5IxcIo!9$u}GW2~>Ouj$sqfI!h#U%gHX?&L3@7m0}=)iZ8)2Yyx^25((V{V3+RU zbjtoM#XtfTUxH)U1oW0961peDF5T~n*RL+cKmrwCf@9bO_xiYYqkf`9LifnnrF)!f zay6$Ic0ciS5EH2Q5*))O?B46vAnLt32;H+}m+m>IyxECjKo41>;!AK0n}A-pgU~%> zcIp27Pi^~A4Cv`gRD21JVH5B+5sifI>9b3}m%Lp#l42l%iZ8)2Yy#ejqLI*VEbP+n zSNxvnc#n!k#h2h1HZgbOny_3QCx)I#JwndxdCXI>Ua;CUyNXrr*u~|eQ>P=Ine_Tl zq6C-0qtq9D*Lq^qeJZu6VEMrE=yp+zT?2aCF_1u4`PPTLvE8of)EE7w-^8f+RQsTU z1lB8V*Z%k;qqi=Z7fy_lJq0v8#4<5a=o~WuGTi#b6byLv4!jzHEG%F#V*c1JoLzf&|v{Zr7ET$>p5k zlR|H$Yv3TzHR|#n&(5O3s@SL|6yrJdUW+fD6qS%#HB^wm_R8&=kf)dwxh99sJyzC1 zpsP`b-JU~PgH`{1?I}im>b-WWH90C4wa%y@f$gr_CC=88_is)P&0H#jgFx5F!pgHR zL9jY?r4Pk8Nxj#l>8C_xrCtUqNMQfL?b?Ouoj#3Xy!M89(T7OwW-t!igALTt7frh@^d9nK|()c_Ip&Lw@u}i4KqV$iueW+=o)J(Pp;wFmm zjLP|GIV&mwy``dpgf8>;du_4fIpm!xvqL);t?nSum2d4{Pn(b+)u7dOit&!>t8Njq zqxRF=HY!Nyy32k)cmMGw`lk!CL)VOG?I6&Vxc`1nyc|Jlf1_O#BM#NOJ>Soc%1EOI zs34*1V0)aQ$&}U6DXPo~HQGiv2z1qNbI@};U69HY$m2d$sr`7EM&N26+dt!mMz z8Y)QWKBGMj*L2EiVWhF|2NLMo-S`CD^OQ8!$M@W%u|8Cg(0xXGJaNDuZ$#qRvqS%& z=L!jQEzNccp505-d*x?$JB@Opf`slf+T)@nj^{8EKAaWWp2`Oj=t{KdG?c+~G+N7L z@EVQ1qJo6(Guq?3#q(7(66cx~TARu_66mU)^eogRAvA`}b;_?|{!l^LP=soq5bUGnNV zsM~8(7J%#aij)OF1qmGAce@4{LyV5b^w5gbejtIav9-=a8}&D3Ot_8Ow02DxDo9|S z!tJ7V&^TUtT4*e42a!P6lJ@7Jt>qZp)>4dJs34)^*?sEt{|+-g6`LA5f!cE<9Il+_ zq0hkAiu(+7t)ktaAfd0D{ch0iU_Ya5*&jlyQNIKUbX}Wp4*EWP&$;hI_dFUEB=r5a z-*L9h?_dPinG#xu`d3JxtL86fp%2H;F8ASvHA^m0K|(*r_Pbj7v$c#ZBPNGtqkbY1 z=o;AN4D?00402!eI=w-nf`l%Q_B-deJjIM;M<#{-Nc~+T(DmJuQ_!crOmzwOsiWw< z78N9Py<)#RUuv1$=u>V|=xyq^BZ02@6;HyuL29ZK`Q1Rx?I2M>Lf4P>7(kBuCxvm? ze*ZuMUB^291@AcLscz?YoZ9>QNmP)~^}Icnu`m8e@qD!X9)$$D@IBG(dP;2+zpLea zG|ce^iG*&i>@gY4TF#qkhU+{hZ|gVHPMz$(`(-V6q2fz0?MG0a^UJLR3_!3Ni$#XKnc}^sF zTf3q%@3Ufj$#ZG~6<>nsFL_QTIBSUnZ)?~4hx@Gperv3ArNbt6HRZMZlis7HY~#<^M~bv+ap} zOF1)&iZ8)2zT`Q#j=#Q?^PEWNnt@%q9`nz0qT)+%4F5bQ61s+Cm#&Zf^PH&o5*))n z&xwStsoAC575_XZD!v5A@XvE1p<5hw>Gsj?A!D8s6<>m5*aT)RkA zWBBJekE&_(ag-q_AOCn`u_z2eMsB7rV?Zx4?- z&xr~WSU)=RoJgRH#vj6C&U2!I1lIG;JSP(9qH&b)nDd;dAc5_bGtY?xx@bHoJmx$n zDo9|v>&$Z^fi4=?3XeI@i3$?fe{kkGkw6!X&xOaF=R^ew><2mXoJcraInRg3oadww zNUwqf_UD{=P9)Gp-uUM^kw6!X!-vP5=R^ew{fznNIgvmY<~dP80`nBkJSP(9qIp2j)?%I$6(n>#|2!uW=%N`=-kyQm zg?Ua?kkHr7KhKE-x@aaB^nEbTi3$?>{`==Skw6#CNP|8c<~dP8LO;j;c}^tIMKkN5 zFN%3iRFKf+(Lc|L1iEO3AoQs*&xr~Wx?b_mb0UE*n&}Ad2AJnW1qoe0`sX>3Ko`x} zgm)awbE1NTuIK&poJgPx-xHmAPE?T4?Ug+y6K6zjYmG_Q4^?Ph^-d4xIXA99XY%&Z zBM&{ieW!S|_4gqk{^q$+L?nw&y_^YDd>!xdt|s$MYr9ji zTNHzd76$@?eF=2&HXp4`G~aVAdi39Uf2$p*lYt5nKNr|-#duNbKE+_-(yTmrdMYi#bDw?Ncv^I1iE;eW7x#4waufK7N4># zec~YoDoFgZ;(!%neUB#;gNa52P(?A7@C-cu*F#<;1r5H>spLsCI zmp~V9a}1k!ar(KK_AdL<(P!Eis36fL%LOaOz6AeJ3?@>{iL>07Ko@Uw44WufHI4DI zSjFWx*4HsmK|=PqWZ#djmnjAld)mei@+Hv4+t&TCh;#7@84Z^GyY$xFFas4NMoztA z#W)ysnqn|f{qIn}qi`eDn{mso9?5HPw)D zw>gGQ+`e1iNHlfAvKxz&8>k@BE8%tfek@TGg9)k={RnjNwsk)&V&;(cM$-0+msP)X zU!a1-`^Gn{7{3qUb*W6$YqLJsmp~V9a}1lTAw>gGQ3@I|) z*f@1d(7#=tg`tAP$mVv86?J&-ITKX32m2D};%$y$6I8@`?NTKD7l4W{!ENG{8e9VfM*l^l zf`q;|LoRjud_M;6ZQw_sOW#?W;HP9&)KAOkxr#;w3H^+vjA%$Pn1~;;(T_lvenM@6 zpLTlExqL*Uf`l$B7juMDjL+r6k3g3$X*R*7_Eg_!Nl^J-vs*- z=+ZTYO>phA;^mCoRQpI&kkED4=_gqz1{1%`S`+L`pi9?$Ho>)M)@+S}sos^SAffBv zrn3Vn1`~8Ya{ChK(lxkEaBJ{QqeRQ7HIS$vq1%#6jRGhJ6NTO-$>U3)OSeQe!L8b| zu0IA-t0qxFLbr*>{<<&icCh;E*2{bebm=C zsV{*p-7~O>&;636)XR{lAffvxe`YyGF_`GJuk12k0$sX?V-wt4dXP?V|4O2Qgzjst zTC<;GFmbd^F797B2z2S5noV%8Z^qBdxSuFdK|=Q#r~kf>Vtno=2Kf@`(mhU_;NI(y zQPq}F?^U9Lgzoz$&vck#Fu`@MFM%%IbG8ZYg)>3DaES^Mx(}cH-f4)6<>l|XPe;rty^fG1>LZmKmuL54YuFtkw66r-J;s#40u$iAi=8@_^9mc62?FRUAjg6Ji=lHvEM;Z zL4sF7a13vjEK)zaNT5r%sGrAuAO#x@~sxJPF*>*N+MbURlnxp1Dj>K0mv-FLdc`J#*PhM8-t0 zOaI-!mw^iIS0AB051AwQ9u@BkUHXdHvxBY`g7=A-qFWmvg89?L)l397-Z7~b)c7zuRoHplRe$yfxB$)JJ+ z)%jKo@3>El1iE;eV|d4wEP}_DP(gy~d^?6U;zgrTF%szFZ7YTzBMaku&SPY#AVD>_ z6~j9Y7bAf#-sTve+mB_Nc5)0J>q7+zs==)o-toj333Tx`$MB9>Mso}vvqS|6s`ITF z-f__w33Tx`$MBB5S_F^1qJjj~`Bn_?_->2@x_Fyoc*l?}g2#|iL4sNdD~5NRIz|Fr zyv;GZW8oITW8tVEL9K)p!#my{BY`g7<`~{FeX2tO0(eXx6(p#Yuwr=g1~C%o;%$!M z%}rPY=O$1=f?5eHhByBaBY`g7<`~`_i$!pb1r;QyU9n<#^EfdQ=;CdT;ms9U1m}uS zL4w*9D~2~86(fNz-sTwIoR>v#mJAgns9mvQc=Kv866oS>j^WMSSp;YMP(gxPA}fYB zKNur{F5c!C-W;SwaMltPBxr2Wis8+3#z>%xw>gG4mueB5OGO0<8nd)wc=NR}66oS> zj-f|{zkC%K6<>neL_IG0zX^8fJ=p|hk@Zo9p`v53OP`ZBe;y-&E`3G3IeLrWtUfA8 z=zHUx2M{BHE`4Xca}6wlXD6V7gnq`n^BH0!(50VH@05^s>loPfHo-Kk361vQL=an-Z?B5!E;zpK|zZAbm{|6(n?9;+?k?BY`g65_#uF=~j(rI$5QY6(n?< z=$(HRBY`g6qI&0eSp>D`JY&m>!3q+(ZTHS2i;+N=?iqOJs#yfjRYL^{-AD1xhl`Ow zm+s+s=iFHY&$&Yd3EkK7&g+YjK$q^RdFKx5ULVf{w5}B^Na#MJcYb1w1iEyO(>n)} zd#{l^2ND$|bl=xI&r|D_!8*SZ!+GwwN?zCTZ;-3`kl%xqW8u8UfLnUb zj3UkJ8~E=rWs$!qNa$_9F=Waj`xETavHh~hk*MH)d9?x`mEXujBq~Vo+5;y1#!Dhm zL4sEjFyS}u6Nw5Eye@zV|IsK^kkI$sf7BTXbm`~Ke+*e)E53vJDsX08$Me7Es34)Q zh`p|epIaEyx6Y9Fg)V(Xyg6Ynfr>sS&e39QXQmnzB=lAA=87WqQK5o_zE&}2u&nc7 z1qoht!FSL*BzQ$OAC+H*HxdbS;d_ZQ1}aGK%5sk3%?ax|F%k)M;ro>%MkFdo z;9H-A2tWl1UQ^FUWv|tzL-HOK66nIWS4WIURFJ@TY6lU33KIM^0X{0f+;$`q=)$o9 zXAD%3(A)mm@R$g8>A&mI5AU@KKn3@!kJcWOu`cfC96c&X=&RtLKSu&x`l`p6F|n>9 zD@f=&8)H_+B3MB}KQVqedhc_^1iJJS8e?|JiopsJx)j8i^Rft5kkF;e{#F=XUECMC zbZL$;6KBO>1qoeC_|F``eW6R&N-<^>tr)Bzp=-Vv^PCpJ3KF_T^`C2i`$Ctl!DGzV zS}|BbLbnY5a}97`=+Z4wj9FwW1}jMD7S4a>0PYK2xuF* zew$buXK@~S|M2iTGQXM9n}xN_?`Qh&yH=5?(3Q4a zeAjSKUq31&=vlC7E0>D3DDDeg^z2%M-?JNu3O)Iji^~e`>FY;@1eHyT_AlqSFLY6j zVG(|{Pb4Z-cUdm3?Qlla)djGNZeOIz!#VNIP)_R;6tE-1pUI|XX1d9 z2yhZ#T(7U3HTel`m*O`k5s3=!H*dKW1{3|}4X)%kj{sDVpx@%KV$lDcM5L4W;v#27 zj^RfH;1#9z$~s$a;qWRrj|vqesI9eV|F#whbWy8q5i#}vybARntZnXb;GVvIR7g+fabM_yzk9wK!-E9%gRL0; zJ!IS$y7acr4+hXbdzKz5ItJ(Ka8Li=N5zD`3U(%rBj^~oFLdeq;lI)X6@3ReGlqNm z`cWaFpBVp@7Pv2T>1WrwZo+#WsOYDib5OXauOAf>x^(%kg1~*DOPA)D*ECo)0Og8w z9n9Gt+|$>O3JG1$`>#;IeWB}1eVoJZ*|pnqs>3}<=+?(?g#umWF9f=DYv)~W5UHOl zRCLS9c@^A~@0ovzfrM_U{nrBEzR;z62Hu(d-lIZA_cD0I6ZiD>qe4RWYV5Jsi$9#V zdP`|iWnF>$LYMw~Ory?U+6lUw;dYymBOKcm=TV`8#Kc4^EqZd%cy>GJJ#$o$priU6 z&$kcfB)+)x-va`C+VcQZaKG4UJI@0ZB=oiN>siwIc&`uFyVkLDeGY$j_5kQGy+k+? zRNGm!|EK{H=;BcWzBm4D)R$woN|fc|k_vnBD}&yn;xgzZsLWg2-nC0~{B~=A`$8Ah zJ{DouKJ+B!W8(9ZXes=}(UWLx^Ye;F>%1SRAVE*QMf>LskU$reG>h<-LGO8>LZ#Mn zaoNT_ef_AApjyJB{Z}I4zR*Rrl10Q=UwIX(;jC@02XRkdKPn{fZdyd7zJqwDsQs{F za4Um%);GqNrvwRVuPoYsH6ZQ_T|8F9&#t%S^d1!|)Ye)qZjo_MUq31&sBO2;$(mUc zVEyxdcG0Cvl((1RJt|am8RXu}|Lv%d&}H79>GZXuLKpSBtn={p$h=2|ir(g)-v8~W zkkIk``l6Bg%(<4)*NXem5aWN;a7gHT<3FB=`$Cs~7X01~tSkUOKdk8Iir+bKPhWjX z!jaI=vHw^f?h9SI-1v{&A%U(hrPjAqLj?(4qx!u&2OxniUC#aA&r#7e!{;#o>$&o4 z)f_R9(DjwyID>;gm#*z%9wXyB$cnB-KaZFEAC3wMU4#3L$vBP*UAkrPANN5;w>Y22 zmf)ypG|Fn#K95Fum6!-U%i_yCOMM4f;cd>V=xx3t`hNHmtoRZE0r}{ct?OGOUKSBa zr%nI9^pJR4yX@^I$}dJFDlrjPhQ$67!;0YR!tr$;Nk zguV(kak|dcWy7}x6Xj2Qeo81N+;cvUNF75fI))}3&y{uNHnr2cFDA5$w*vy;DbWN$ zUsh*o;(x}l2)!@8Uw)SCZG9g53r-M^BKyj$uS<)JGRi3VCQxm;Kd{o74AI83bAc+m zdr&2Q%P4X>P}yT-C*RK#|K{y26VCrupn}AMrK5~HO#;=gvxm_b*}~nUME;|FWpuHM z4gy_cDn=Sh4klA=Iu9UXbKyzi*Ao#^R5S!CNc=P_jgh`*ay9a&{zM$^G+8{j5h3?i z4|NdeN>ur*$h|G48vc7XB1T6q6yY;^%k!N|3sjKk8#-?1%KoWTHM1)bhf4n}Zp`T| z)7~oTAkY=-b-w7O%hRZz9=9Um$69}g9Kk(hib^2@6(lOZ$|B1Rr{9md*^-FZm)D5u znS08Ohx0lJbdCMlkim1(tL-J~6Y<1FI8puh=26_DN^C;C5z#3KA*9@5tuUGOIZSs}WJ? z%3-nTaA#TThjb1CU85o{${`)Is0NP>B9cBiDOQ#2BzrAPCs09Rd2mwm{>ZGV_EbT{ z{Y9t6$^@O{e>GD&2y|5}9?x`D&!$dxDoaH6?=FcN{W{3s-6;hsNbFf1WDf3^T_t>4 znusg`SHv#{{nBR9qz(dIVZo)$mx*(!qYd*DVcxwjiau&1=T1!^P(dQr=7#3Hrn%M7 zEqRH^x#Xc}vAT`C*gm#{K-bP}wah~gbE|KU<|JZDiT_0N{H^89zhVnikjNg|%gkFd zSe49}gNVV8o{0q4TFE;d--RK8uFwn}&0mfMs{>Us5wWS@8}YnTOL?LD`!H0H_@(t| zbBoBU9u3VvgmLSw7?ZxG4E^>$2Z65bDTkST*XLDLccvmD=b29;){5q`?vQ6;s2~xt zWSUvESbnwQ?-WGr+!tV+soPwRbl-Cj==y2wB(vJg{7RKbLd4OGag28No60AL?}wp+ z#Lk-w&C|IHsy5+?h=~0nj&WgHQ`z>~H3xyN=A-7Cnfet}6@QOIMEl_Q#<^lmWS!PG z!caltySyvS4(UQv#-*`{sQ)RxQTt?L*<#vx2Z63kBbJ!|HVjcitG=e5ewyTojQGPF z$%n-+hM|JQ#jcyoX9|E2=V_x=BRgbxDm^of^ojYYsRFbj=?`Kj59QnCg@JA`$M< z$&6p~)R&EC9|}VSi8ucqF;o6qTuuJ*3=!#@Bsa1})sy`)cpL<}n)Kgic6(P`bx(De zh-xWQ8W%^`m7Z+7!%#tDLa__x;%g<<$Cmqu*!NFLV@2w^GP&IDAkdYe?@3c!DXBWe z+DSy@=+wr*JGJGC8#}^KK_Y#>TV~NyrRmJK60xm*8soCNw#@bG1_yzzbG`mHQ_0e* z?UO%<7=J0P@n>C^tnv4zFjSCOzU8SY_m)-v++9IL>Otv^0&Qza)o+P|Ko{r2hEFW5 z>?|1)s2~wKBDSZ;kx6dWdg5n-GbR`Z;?uoa#f|@pQB@vl3rZhSftt;OrJQs$F zgV>nRQ)POn+FWrP5i=GgGn%S;^25x(odmdYrA*}+(!YYLK4l{jzn=^=Hq*1ab>y8e zR1yPGzC#kvMVHGksc~nCxLPxg zvHDIE`K)1}Kn01%r&D@5jw`D&-n>mj#pVIVh9S*lot-Hi1iCt}$mCg@FH{{pbAyPe z%%4Q)?q+gc?sNhbB(C*J?WwxHjQXj?Ga|OWdM%FEZz22rnaM$*D>L0u>}0o=EFi@@py8<+u10o)^F6Z7*w133CwW znw%xQCn!+~wN#`bBH^%eqH($QvgtZQpn`-kJAr4=%n&uJdoU5hiysgRb99oKzx&of zpsUX4B%Z+hh1I(CK}4+Sw_hwQ*h#+bSyP~b#FjO2JWV?mR1@lk@KLppV*ISmvc%oG z4gy{ECnxaCcveuwUtEZYE=!`tnFXC?mV^xiDo9MK@yQ&KH@~V>vlPd;u}8d~&`C}> zTH8UOtN4Jlp1YNcsH$W5Jc^v!BnE!$B2%7fDNsRT?DnVTjWofk(Xz@!jA*}H>~M9L zi!Zkqs32jizHR0{5~SW9sq{YxbUjaU%-q)}huT=H4iU$OtrTlEv#OTNLl;AkbBI;x@B$NOqOy68S!$eU%%&fP9yYJ5tzNF?3}QfgFsjPb+b)(s;uh8&;~^On|iIdLeEu- zM|A`$NbElQlldrh4mDwA4I=i;UnK74=_#LgZ|ESXg1X>;5#C=bpA~oE!Ib5Qj3VU-z*n@lL(3SdCMsv#L4C>(K zrbN{EezUlIxtpwzp`1VkiK+4Pm|L4?R%yq$h`6Ixh#hnuvlE6n2y~5|@m9Wyn?W`8 zG$UeR{H@~Nv2OB2ijo2qBz6x^Xr}6xNqt;bn}|%|tHe&aANQ`7aS-UbS<)lhg``*a zk2EJD)!c33kLYgl?9;*m6(k-%IwN}x%Bb#K;u>!Gp4DPjp`P;b&&3@Cx+P1GA0sLrO0 zB;vvIu42%1w@luszCZerGin|lt@^+;L4gy`nr;dw`y(xwA z#OL$KpMIRUHYq|*-B(+ng2W#si^zeEQ>yFb-9*G`KVG!`EkYKW@vVbE*XZ-@ zsbWX;r5Jbzkw6z-1-C0*nz>?Ox8Cwo%kKm#NGy=oV67#MgKM-iHcu$EFD#atK-G(_`PKGkD~-CNK}6t z6+JCe67^r7aYS@j94acd?I*_?5e@=f8Gg?o-@Hnq9w*@#NT7nmpGStt+MScCbNj{+ zQL$90DA%x`Y|^l=KmuKH?k|yde@d>V59Un6PqkW!MrL1`;M<-86(shCoRh9RDO954 z-xHC3NE`9v{l4<>ur3Y)T||owt4f#p5RozSNbxCCggiC7y+8$tAEVNnwQi+W z?Ncx@r`{-Wxom{&yuX!$K-b_z`OJN{)2SB~IuJ1_$xJc6d~dlgNi%^861D%XY<{;i zz1mf`JrO^*nk8h--m;I;&_SRJ+j(cd1QjH(_d%na848J$&HKq)dA=8T*OsQNFXv}V ztd6{yPzkTJ+ZFG6X>t6STUNO^M4*C1(7-?Cva?B*Ig;;r>&9h8NcMg*XSRV30$q5e z-L8fOT8Juh`pWblA_OW(?A{j7+_Wx*8X3g*9G?;-(1lmp?Rxju2od*vA9?eeE&>%K zo^LL0stIY-=&HRb20kT7pbM`wjn?j(F7D>*Eti zC5)U*#ElhMM6R;^WKdEkfv$+M+hx$oM5^ofNkr_WY)qwyezM1haRL=2YNWd_yM0Kc zhSy`F@~b4`ai@OrQ={hjXoL?kXcRUG)#OBTA_*+HNS-xHnhwWuJ0Z?A6G_pVr?PEbF2?(k%RPsY`r zf6E7_5~!(hrdGncNpD`RM56zFx9l}=vOooi1(jl%f%y`vgk|};x-l)W*cseUt{6Mf zL7)rorrY&h!=hq$9=9C7b*w-IiPvR|ne|#EQ~yr$eG-vC7v4>JuYKM?1iy)p6D~vw zRFG&?q^&t`dMdT@IIn!cClLvB;oWq*l58I+ZjA3Ee@!|_pn}B3nG?(_d(x?XBlx+( zClLvB;oWq*#@3r6&R^*zk5A|&P(dQm)J0~lHC z)pwl<>-WShcb=Fb&W0y7PmGUG<89L`y}KK1zFLu3t++d}5Kv(i1f#&*piPg-kOgt)_NKC!ymfx?OEKotB;FU1*!>nYgT~B_l$_ykc(PWK6Ac26Oq?_PU;^d^ke(`ABs;>hE(D`$=wFHk`ub+Iw# z`c|pcmE(ixJdS>-D}pyg$RpIABY`fw3eKyG3KHW}%r)zENv~#a>P|5h4(l%tcIYF= zH2+>8fiAoX&Z~wy(4irGq2nhL0l!DoC_C{*xIPm0kta z=4(}|eLwM{ULP4Sbhv{+7hVPD)kOt~euLJUE4E}-39In6ik)e)xUsjFJbkgRKmuKO z6`WTW6(n#}-8n*!1iGA8_gTrl;y~d(vPi>`0JY#Ffh)H7H_Fgi}`+g1rU3gB;YlR9Dn6q%^KafBd zUIogr)S4(VEb1jk7a1(@*wf`WW48W~UHx6EekF`Y^Ghbp5$^~0lnom95~v`7*&6C0 zKb$XyJnbP@tmy6_(1k}xxzwdUiu*Tu$dXrj3*0YenvkYnUL1T{6dBrH7Ee_}EKXkB z^KajBszm#Em4;T#?%CV8q^kF8s!EvIa%Ra;L849H7o|OKcT`Xn z%0CUq^K-jq_rEHJ^lU4iWe9VeITG`d74h7ATuxoA!E=hfMxbkA-n^c3xyz_@&v~X( ziL^IGWJDVowym5%1&J*e3V5!RDW|H3#3o|=qbnlOY0B`fs^B2dl{Ic|PyT+TRnB9H ziAZV1gzL}0#eLaU7X2aAL7;2>wQQcDdrGN7 zE0Yj$@8?^haFsT4#Dr1;6(nBv&gI#=zl_THl4D?|8VPh^2AkT!_P52hLT#k%R>F~~ zMgsHdZdcq@??j;!&E$z41;qXrEj$Ca*Pv&2P59B(E>DphRn?g?yTb8VpfWi7rPy4u zg?xXapyMe)!rWcWleIx5Rck8G{rDPzt__!kC+P%3?Y()Gh=$c(iB5@H$a6Ic3RI97 z-K>)5^{8;=p7R$G-`;vE?saS_M?45|5a_x;Ce%|^3YGWVbt1|oc`e#LXf98c&o59x zqWaS?&*?uass%rtBqGu8|B2n-w3N%v6?72j$`w-DGyG+knt1RZA|CzqS{yslTvjZY zSD=DKk_qKJ>#iBf{OvRmOCp|$WFakOMl-*IKv&Z<#XMU=!qkHuw}|L7?~Pc!rn$_R zK3JfF#PMvUJd=_e>h@}$^^&gKb8#SfOL=-}UI&4$8N)(6k2_RQuC;fF*wgB*c=1DX z+4xf~feI3hYZmf^_Y6}dw)0$^9}c|`qffPvvD@c%5a^0^EXdRLRH#Zd>j4oL9={WZ zYBra*_GA;NAaQkYZqMsO(ES&SuaT7932`9 z?;6MG`J}PjbvCCMQK`G6)CwL7;0_Xt-zPtV(M3WS-sjcy4T?-Nq)eRhBFQ6(qiETg78` zsG`y@-9&^tcYrZDu$e43Fq?xwSD;zJ6C4$;My%LNMAEmhjl$EK$V^Ey2~?1X^G$hA zti_d7|9IPp82uo?ko%j;Ayu3Ny4D;j;mMhxqG~|jgQQVsPaGq5)h4p(^|S&NBz6=n z?g_tAk!F(bA|mr}mPaEtRcDEkY7_Erte?5`m8QefF znv}*tpet_mUY`DS>QLR@+lN2}iIjy0c*bR^uihuCMMU1cNsNQ7>&rbIQwk)|m90gD zr)b7{YDl_zL{yER#K^ayzKr`_T7e1@uO5y5yyl0$r1LcbT?Hof@LWh+SG~^K*oQy` ziPGCTdq$_KqXq=GAfou#q((@R`towWK!F6h;_mC<8Qsi9b07y3@nK+MV`1$EvecmD z0u?02#B1Z3)Vda}f*4N3nRJPbWOP5u4W?@a`$E^>v08dUe*I2ey88nW(N7Z?&s0NM zXI5%~3KFO4HSx^+<6D)g@^m6zOiN(Qyx&lE_%W@6K-c!LhMsf()KG`h{z}CDy77#4 zS&2h$5wkjS~Hj%WUx>ME@HVj>1Sj%&1R-B{i}nbAR@3u{zoornq&SU)=NIX-=O z2l0ua84xW}7(4FQkq_p^68)Ez^T;aIX*Rbzym_Ujp2&o?)Z_Bq!;yBzKn01)x7&F_ zlGj$x3$>*)$0rd9bm9G{87a4t8u6~xlOG2r5vU-6caui$#-%fgtf?heU6f%slN{%W z4?_isM9~>MFINjS|9AeLkFG(8%SvyQrdCdh{0} zs*g%;bpKFSmig&!7%I5m(B7Fn>zfI6@f`OuUdIbGYD}mv?>2biAkbC!XprZQQBn2C zy_$$&p-GLLck0WFN#BH_g2aN0Sv}A1hN*&$qlnnOIFXUp-B2c(6d;g5*P*m|J-v4s zs_K!=M8rOu&=_{Op}cq_u0RC|Y#E&G2NLMQ_R8(*G&H^OTdP`f%aA3GrzB0%{pN+h zlIr;Wt>L&jf@Ws!N^5Ky=91gpYaOc~kiaz?Zr7rmX^om6T{7+BRbl8l5WLm=W@<6@ za{iHUC-Hl$RK`+ON6u=x&9RyT30!+Zs|SEU1qoacLvP#VQWz~N){_e|ZgZ@BLE>4S zdFIP8g;ZeVOS&IB9w#=Q4sIxSE;#BS(1j~|oCGRJ;Cdq`feI413X0}(-b`Sm=-61U z-+$b(+6jrr|MoOj_Mx?8^-|FLdAotJjlCV3$+GuuItX;(YAz>%3KF=+%t@ev1g?B@ zyF#|U7wxvTkUI|EbF6SfVo|dYb5w&|>f3?AbRKgTJr$e!ww4Q9e{>M&!u5Dg0u>~1 zU7_3ctMRW`+PaOrQ}#m`x^4}=AXk*gsU5L6|H&-S^`XJ9qS=#Fs`#!Bm2ed;^P1S&}27@*s= zE9$J6^lf_?dMw;QplfvD%%1-1OQ?6fQq!5^2qY><;D{cL@4|`^B+!K`Nt`2)s374S z9sJrj9FDNz$St*MIi8BqBU{Qf?}`XikifAsTD=_kKqy+nTYXz`2Z63S>xz43yeqFR zW#H9LIC6&y5;%57wNKTDBH#X2veDwA0ts}z?^4*4w|sdu_$9A0#gRKykifAsx9e$= zMgT%mvp61Wb* z?Rvhhop_bnE!WR$FYs(}?kFCib1o+;NZ_nc=ha05U3i4fxtypVfwMxLR~HF%InNwd zjG%%9&I)y2E1Xq~^M&ymQlC0=1+isPKY1gfr{n4(f%AHuvvF}>=)x=Qoc)QjF_&J< zZe}c(QoS8Kv=Yvsbh{e&A0|Fs>LbhD=^;=-qV}J)%rb@2s^5n6q9+k&e@CcnV0Z~B$=M*}xE)wW+o;j{pLj?((Q|NXb zA6Z)bc9+&092qKb-Yd>%#plg=bx}bAulm>La-s`op3)o3rjp|KKi#r!u}H@ZQzS6n z*XN|73un+e=MLhWBAnNS^O?Rr(+LTjA?BQUhWkPn&a(RYj4dQ^rW@VC$b{mLd4UAFaBY@z^%*Ki;L0AF5&w505frbV z9QWfS$4o$+!2RMCajp}_wZXU+ zx!n3!=CZ~4RJ;Fp750MiTg0YUUF6nZnhI2qz}3T0EbCZSw33QcM^vTS1u7FAsyD-JT zHSVY&;XIE$=}d8Ga%XvDcWr?Lx^PXsbGWu4&Ue z;?ZPN6a()EDoEgcqVEArzbM*1ZYOspDkG3U*Qa{PJStuZ)g(o7ih*BMK?MotJr6l_ zU1aLkR`&j{m_Pzu_!SrDmk{vl2l&K%9IMQAuY>5!L+PmSo}+>UJ_|HDn0Tjna=44U zPBSx+K-Z-TC(P3$^QtVt#VH2fb5xMPXMx^?r|uE!>UWkyCR7qgplj>1tLE#(1=Q0c z{PjJ&=cpio&jQU=+j&IvIo(mN%^W6>Kv$o(AI$KqMbv^U87T(db5xMPXMsj*b6ge$ z_qUV5v{oMpbmd7J-_vP%G1Y4i=RfeCqk;rJ3)FtJ_(xnVPhabq5+aa5*X+)Lo=HVY zs>(eQQVjft5GqJGpQ}->`y%Q>=P-& zI?Kjy!UPiN>R;wB^VpSqG-InE#lYtZ6(sO^bGxe4I3}KM=_r4kQC1*WUb6a$|tRFJ^u&FzZ6>5AC!Q#<)GTVa6&x)x4)XC|s%RE-(L-)O|=3Kb;qd2_ow z|J@YzleCqK(&QINpzHL9xE{AGuCkWmIl}l{p@IZHZ?t;(_5-nPP;2>T{vd$_x|)?v z>bYE|l-m51XGh~#xllpE`Rq=b@t?SRtEEiRD!V`eUHBC+dMko2x8T=W@X6mEwA8GA zJr~VL38SOJXBQPD@R_E0uYpqhySj^(DkzR2J=OYV3qeyNs58bE-FaiGfm%s z_;tT%8q`@%ZB$MmfvyrmP4hSUlINIhg(wC-yQm<6&os?7IDcG(&+91rJ})7VK-V89 zE}Nf56r#B}SttfRyQm<6&or%%s(Dr1=-p0UpPFAFfv$V+@0)Ax6jlogrJ)%3?4p7M zKGQTWW6EvO_Iw+8b#jnE0$q9Xy)m2DE~Z|6;MwZ8mEurck=lSjU?4p7MKGXE&uaM_r&We`un_HO#66mTU6MMF1D^1@?eMK1) z{FWXnNI1(!*I92xg%mAhweWNT33TDN`<&k?!fz8{=}Px}wmIuePL;GBe-#+Z2P#Nl zxuNgk%r!-`d0k||%%K7abdA2S#2i;BNQKnkw{0vRs33vmhTgo29260WI?JeEO9~{= z^(1<|IcRhqtW2Ks_>$`C`Z4BVA&|(=x1qm#hZr9nju?+J% ztx%8&1rq2w`YFCA@q{vJ*h^mXgWvc?1qo+4Uw1#Q(UjIrWa=4LAb~FYrm*vSefW(& ztR)r&_cRMeXQwu*BHckO=cpio<)7A{Z`mzocI_fpwJR!+K-Vu1N1H32@(ARDx}6XNl7uVoTGvSmVdYF zQn&v^=5ej$)aaA~33NpyIBiZZSwt1?!fRi#oTGvSmVdYF?{sg(qsuL1#q@y!33M54 zubYA0iqX0We&@t;jtUZ3{^{FoLq3TOrJBpm(-R6L&=t7jUvuKGB~(Cip5uk(92F$6 z{L?)@8rvu~qp2K`KdwLmT^G~5H{+ctrRo*twU$`UQ9%OBKh1&M5zoj-Yy0l){uG7; zx*BYa>lvB2tXf*@6ve==n4^M(vo7f}IiV3yzmcrc_f;4Y=)$k2Q>FpFk?FuaMUrbyso{XQ0*>qW5$}D zI)=Vd@b;)!y}FZ3{3%!E9AdP=aqLInw|$K0;o_xFjK z#kyG#$90l{&vOVQ&~(;+LInw|$7m(ezvqPKbO-r#Yi5B2x~g=e@7%o%ruICDVqkrR3KCe4(HG1{ z-4QFgwv}&|rxZw_t8dvK&8vy?sf2&>C@0oes33v$nA(;+LInw|$LN0Sej)n5ZzQr&X%m<-les33v$ z7=44T*L(4}dJFmU4{-z%==!)M+I(8AF#VbbkI7(tg$fc_kI{_yF0qXGbDPPDp&!GL zKv$uN!{*CoMb-R*JSKzn6)H$zJ?3_07!cQpq+dSqEO-%y1iG3T=gs%MimUfO^Yo(?f%O=@s})IXgv@Iw zBQo3yLjqk}FFiFA&MU3VQAa2S)>o(?f%TZ%b@ED5qwCB1@=5V4VMw6s_NtF&;O}Ks zt{42fOZcljs37626PNc+ZscrUPex2Q6NUu3@K=JIzp016KZoBv$FHV4zpai6*1^YO zr81K*q~E){T7{lntVK~l0&7>AFFN^vIGd`A%#kmrKmuJ;QWZ3t{*q076T;agtVK~l z0&7?LCV7<;!qcjg{BSa}KmuLgad@A+EbbLRc7DWXKtX-)@^?xNQY-}ly zmWU&eK-cgci_8og3aCV}cvKB*QB;t?+LhKK=b&$kylF0@T6_pY0$o=suQA(gE=1p% z;88WKMNvTlYgfvKSBz~$hc}a>W8VZR(!hsdNT6#$gMH?!9YxiNGd!w>wJ0h`VC_nCYs)4yQe15$H}3l<3<-3-xqQ;B zM7ix95!WaN)}p8&fwimKRVhvq<9x}6GH>*yFeK2`q~ld{iYiIJ^ml?{U@eLY5?H%Z z%eg+#NHD#=?DXHMFeK2m_0a?K+}_e^`YQe!3D%;hAc3_j<+hC!#)l_$<)OAm!jM2$ z+5xZ4oCnL&ZyfR$Xz*9xP(i|3Q`gy)%1F|rj{Gh0-Y_K4g}*XKUkZ71Mu_I^<(s>e z9V-rTWgvdX&G~Cks376|+8eHFL>I0lbP`{y6Y*C=u|9VGUMea`;P0A#y>3Srwkyux zeMJQcY#+bgMxhJaN9S*{;<01PfWK?%tlLpR0_%L|Z?YnRF8p0nXWfnp5?JRuf0Gpn zbm8xsI_q{+kia_M`J1drpbLN3)LFNqf&|w2)NhC1WJLm9_`9agx*ZiHu+De>CMy!? z!rwJ@*6pYufpxy~H(8ND7yhoPvu;NP39R#J%@6!0D-!6!-!*mC?WiDub-wdAS&={& z{;sLBZbt2z!calN*?#=6N#L(kqJo6;E1S5| z8(p}*+DY6j@<9}z*-UQSkjL@8KKv3NehrgqIQX6=DoEhBEuG&4L>GRE&q?6-EKxxM zzisLKCLp@-OMFfOzh{XG68LRP=Qjb-g->4>k{Y3rt%LU^aap=2)VVANBRFJ?g|IvE2AL1H= z$26Af&S!QI=(@kY?f>iR+~cA+vOPX(Pyqq)!37hanE|4UO4KA*WO^Do)Cob#=7PE~hRPo17q`r%chjZMqnBO8;CwpQYv%GG+TjdP*|39J=Jt3ls3 z%DU)WZS3#=O(IZh$y+b$uZ|l-Z`RBu8+qeGm6Rhn+N1y3*@+S)u<{@MHhfp8a`TfM z?eT(+5`kJ+`A@EAi8U;-CR}mL(fadInZ}<>S#>e2T89!Ou(}-W?^c8;zv(z$TUOoK zi3DmL+L*2XWA6xK#K)`223Frg2@+T(k7~+hw^g*?kJCE57wtp>wL*qIr>~wintFE; z*}y7-C_zH54>$K*nDU_`N9#$I3Xwo9tS%>4^2F+$*wYgarRm=c9%_8phn*XaeT5Pv zun#@%+6AqZ?w{vsZT~yki3Dnu4;`u3oXj*Tvr5PY_7zHyz&@m^DsiF8>fz(GR&yep zNT628fh@gr_DIT2yhS#oz59&b#aONP zyig}fkdVg%tDa(|QyiP#OaEP;J|o>ItYl~2WACB_3G8+HO?6ru<>k4#TECf8&l1`~ ztu*CBzqU%H_riCOS2!LhK?29g<1Ptrt31^+S38>&>O=yyii)4q zv%@lsRaK>A1IGg;NZ>em+|I3G%Jq+Ow3wI>ClaXD#&GJ@i-#G;09IcV#{(ru;5gBD zwZsVJ+#}iAv@R{2NTAlTdqeg3`@@Z^6Bm+={ry)lBI-iPFda+)k#~pREweoJO@!DtGA9A7u39Q%H{922ZT=B_3uB?g@ zB=C8q`DX^y!snIdE5zdS16HS%*K?F0flmhXc0zNNZsExStL)NymX3DHfkD~Yq1jPV z{atKVer9Oy+I_iCU%jg%&1jgtS;hNCuFHuMB(VBs^YKYJfr=i%VnvuDr#XJSot1A2@>)>h<%08;!M;=9i)G`G{e{v z{ULcJ%_Sg_2x-2$cPm2KsEyTLI(Rn~+rpV>OYRVTe*G|GPQ?x_eiqF<|FS?qF5+ty)u3WJf3DgQpjnYGQ4KY>}v**Nz6+nz~92@+T# zn6fej9TnH6G1{k<$7KTQ6Jy~X#JW`=RgTdd;9%J&CK zkiaXS-brodDnTv0Lgg{Xc5(dWF>kKsGTtB9Qu8*DKo2SJ+^25aFKb@cLOPzVPMLf} zb&Yix6(3~O%$QNDuFi270|unm*lhb$n~mO%9+2(wxUXFJA(0a(L1Oi+W1=Pc^}zBk z5`qz^b!__os1rvyjPUZ|H6Hiv@xS_%6DV2h`daPaau_Y9jS29o)%US>PM}vZQ4euU z3%>|)SP~>g{r!ZxufM}cUOa)knpFQ^qOWW=i$JY{mrkiix;l*fgHwsP+-1ZT?iET# zuRE0PAe5sdVqmvP+(rxlsVS~lN zB}lya^jWoZo!yxJSqXX7;pYm;1}9KUyL3i9|AyT-wsk!bx7Hvu)eoF@qrp`{MB(#G z#dx4(Q@0c93rTh(XVTsPucpi|32Fn0{o}tCtwEOW| z%wtyMmIR3fzd5dc7h^ZNMAVRt@fRNzabR1hb>o$z>aY%WW85sA2%F7T#BHGDn_qt^ zw6GyZgGw->qS6!7q>os4)i;@kjEFo{gCRe}-hJQgxQ|B5v8 zf2%D{FsAQ`zEpMBTBYm#m7^c_o>IzuD9OB6wDT6krNjYk+e5YdywSt=092HyS{0$a&L~=dU~bEN0fK>?sCcmYRQq8 zla25{_V%tM8|c-kSf6s|OoY+hLnj-9J$eIzh#&6s@y-}f=0gb*H@>Y_Vn1zXJeSYT zMb4=aa>SMPaaWoX*)K+l$%d_K ziZ{AbMgua)RJeo{`>N@ip4{{8S!O4lpqm% zdA6FeErGl`OtX)J`l|1@gS;oGcacb1AFci^y&s*nYEMJkGJz5#@QR@t*wj}o6Z?8w zQePnzy{+JUwP|hQCknRNV&+>uZkHn)W>Izx8-_8z)eL1fI`J zzkFG!rM-~MoP)j~xmCfzk z2Cg!A?iPCMaes0uatkL=f&_Z#aj&U=M|g$S@qAx7@<+x_-)h;gB1ZyGCZ^x3U0x$> zU|Xn#qe(knljFpg<4EDT*my0_JCHe694J8o&n#!x)n7y$NT3#;I`46ZO#eyrE=q6} z#g@$Voc9$gg`DO6tksV?RDewos$ zhD7;jukx#P;YL;rTcf5EF>AtKQ$`Mu3Dny1Qk9awIo#-Rn|ZZ_h}>W5QU)ex`cQ%d z+N1YgiSP~kE~WERvJIqh6{Wq`xI(4s{C6pDr)T<5g2eNKtCZ=p!;HB9V_qc@F{|6P zl!TEofm*nVQXQ|@Lgnh`*HSLzX8KTq#HjEpW!#gYMz4F!2D?g5Qq_;0Ic_JekhpTn z#L`dd5;=hqB+xd0ZfBN?6!!h&XzdMJ$!scnhUcI0dr4D*e@j6EzX349|J|dA8R0hA zw<7+T+2c0&_bC2t#b!fEFoMx^&d;w0E35X$J9fO@Ph;l~GT*1P->BklHhO3hrxz~S z#0Zoif$NG%kTqK%fm(9p%qvcyS9n4q+Vi+y$uCrt_rBeHImG2-XE?Dq=9bJ)!xOHs zC6hR{q}ss zVwuF$+>L&(Fmlwwm`vj0)Rytg2KtL!mfxU}bc!r-z zJXg7G6C+T9XAw5_iuW!`0*Tj)pP;ir*?VO4WJ$TGuq~^Vav#4Z*qwv31oojxtoZ&5 zKY7S6%4R~R|Y5*U+7 z#EjUomDxZE67pKhdN=FKzx(4r>$u7^jXbBSY+GO)NZ`6F&7%Ij40eA|XS>*54}7M- z@f|OAg_!@?+-UBR1R+$5*bsJ0?7dQ+ONYFg4tX`Q!S_oh#?;|npO8>qO%SwmCt9L6 z`ycERyPT$1W?QI5`+mBWS97J?#114%Xg^Vhyqd60nYf;zm^Tm-Bxo;Fx3XmCAKxG% zN3T$e_DFRrOEzgw3z5e`3GKb=R+h}#bE2%wEgg3`g;z+>zOQa&$%?CDgT{ecw4bb7 zS+dmQowoD6aFozKwQgm}(5o9mJ_frOQnslES(u)+F@OOT*_c-_j9&AS;RT_v1A zE!yALtt{EBqsv5Jp@i}VI%FDzZTg+?oFBz_pjSvx&O*1cWE<`;k;a^Rg<6yg(XA|5 zxAD4&10|GW(XA|*)gvDF%qw39wSffXcXTUDM!6zk1LHs~${Xoc&MPXcr65p3`6!*| zxv(X3F5y=RN|2yjn4tZ6wV-jJ7UkH$s~wS@#9fOL%H4@*(Y8N77<2?>q8`Saa*!f& z_WhiXAQF_16k|i*yw1N1aaaUuQT`Lg{5eloVFM+UOBL4=+NM2#Wp6)VtgU z67-$}%ucvl94MjpGGL8Duh`0rW6r&j37n}tQ~N>hS`sAa zy%ShDAG>o2M$jTq3)lH3PZ^C*7CmT;>*2GNF>Olldr&5I$6V}<4L07yOLHZ0=2|Y{u{}q{9;wouEu#u$|=b=G)J&vyej=yi6BsdgzS}nBy0_p%mxyug*&XK4O=6D L5+uyeJY(`Nju%Ka diff --git a/act/assets/vx300s_10_custom_finger_right.stl b/act/assets/vx300s_10_custom_finger_right.stl deleted file mode 100644 index d6a492c2046d3850ce7c7d650e951701b93bb87d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 83384 zcmb511#}fjw6%i=2@oW>C%8L-R3(KD5+qn~_WB^EWd&o^lM=j-XM?M|KSzb}|S z!*GNmfr>jpv_^Qz7<-YRZRJw2AOAezM+}QV#hoD9Cc^g)p!4vmnqWi>66&bP^?3V5 z>-;`093f*Mfr>jpF>Io0wXRl-op;W}AVJ&8Rp;gfE5@&F`^p$dpyEytZ4+fOHMC-k zY?jK21Z^wVxIO2s7`}Hp$rwZ|shci<2vpn&qHQAEnesea<#GWJa+lBuqLu4w*f}f4 z`rOTB44c@oj0jZR38HP{R)viG`HOSQ_MNGt5kxCjnm^B2F%tEyFJllvcaR8F+zFy> zBFl(-jEJLiJ7@&a%60n2-&TxHIcmxnNTA|Q5N#9xJegy~XnMG>6A9W@u7yueS}{&E zt14q4fr>jpv`xHC9p8#k{OM39611&cM;;uvVl@4!vW$TQD((c)HgWDkk-c=SUL6?j zM1r=JtMtvIR*aC36axuV+zFy>B6Lra7>bc9WUvzn+E%X1w~tsc`gEx-V<3TwJ3+Kf zB)=bH#mF-;!ifZJD_5IuhpZSY2iK7?kU+(qAlfG4RY>fl^Z5HqxDyH5R<2Ez4_Yw_ zC2TBXAc2ZIL9|VL%2mLMk*aX86A9W@t|@!=TQRa8Y9nJHfr>jpv`sWCQr(J?;>yt0R*ZaGKExnF+sc(?fN903ziNn#fdne<1kpCp zy-lPQqt1*qF-Xw1a*h4Vuwv92F-FEf0u^_HXqyPD|FaciO3ETJNYJ)&4UDv7j2JgX z#y|oUcYa6BNTH;A!Vb zs3(zJ>b(j0d$Wvz1S;+X#jpu@YdI3?jUtzN|Ff6aDPtgkiaS9uYywIjj)W>1$fe4$ z8~ygk7)YSvPEZV+fKrqrp-MP%sq*;aFsF=x1S;+X#jpve4LB02q$ZcDSJIX>Weg%} z^&Jr)YaEV>J3%pQf@-yup`!z2J*W|?#vzxgAID9l7*G##RNM)QVH4kL=O|g9YlNyf z$))P~<6{gN0|`{z35sD8-&+|`vR%TFP%Qv*srE`#o){Se2~^w(ieVGqTT4;0eZ`Sb zEg5pDmRgBH(J}@RqlWcZP6R6M1jVokyPeo@xoq`mgldtIOSL$!E#4+$Ac2ZIK{0IN zdwX}eY`t9{$wVSp%AirS(l zHceG<5fv%YmCta}}p)X}C{P(^S0`tjMI4Ve} zcg1eEA4)uey&g3o`pTXG8iB5p9sf4NYXykp8*9k(xFmZMew!x*)|I^pRFF{birxRn z`REMe$0kI_ui8~3&^15!jF~1lK*Z}%SH?&y-$CyT69f0jUJ)utsCUKgM_p~1iubNC zG5VfgLybUJtrus_J;eh=iIGiZjBB#5R&w#gz-ZZ5Lj?)-uGsy-v3X1Il*cDV|8lat zMxbjzm-FV)YyqO*q4qL{pX{Y}t1~I^mh5w)f`ock?0#*<3w8MR;gh1Xm&vFR=&Jqu z1@lmn0CDzuZyBSA>^m>ZFgb9j>^q}^gnC!({`t=N9e6;!$75Q{kO~Z@Q>mAcBProQ(0n;1iGrH zxM1cE^A~;nH^>>htQGw^3wf1rYddXDWns$~9Po-FsQ=(>3eXau^FZarttX8z(^ z@Kzav-obOuS%EiY&4~&U>V359+ES--a_=XzqT3g*t`X>3mi?^Rrij02*lL%IaaEQj z-6CfP?vgD4RFF{Rirr@DzwQG|HhXsTy5X%g0$ux?oHi5X^cRO4@0BrpWtkZ9d3K;7 z+a;(Vp~^?Q?GrwE4ddzNMDwRr1{EzcvSf0TV5NT4gM&0lb>s>*gdU8_X0 z-Hr+p*oJq6Hl4gCCUq72ejtIaWc?1qJx?G7-9o*lhj zK37PfEB}Ur@a%pnpV&#y?%^-t92F$6Pvr+g` z?_iR`!5kGNuy5!HEtWsnnRn8x=mzpVM*>}AjD1j+G>{`jRF-(1&B;+g0{gg*(8eFb zo!d9hjGigWDXrl>b;%PZjL}g;OEgu*vNMK*x5!z>5g!Aye8PTO>xr+q4Qa&}H zY;P+^il}U_He(G#1qmE;aD>Wt)tPGl^yp@?{y+j@gsjhFBgoZk ztpRNYIuB|y$n%In1^27Y$sQx?@K;~wk}XrB17*7e33NTJY(m?I?gzDf_Q#qqdSwLv&eQL z66k8&VL!A*!{qxxZP9pgBo!4T)VpGjy^haY!pU+^ik>UmyGWpGKVH2b8%62;v=qfe&5cCZ`%kql)21n$`HY!M{vdbO|&;963 z%>FNS{{snhed}}t`Zx(?xl4VV%yNVt6(m#{Y>(+5PCO!J#}d0Ag#@~=pXdm!F6$5K zt0k7122_wxb%~vuz^vuGnMR1pbJDiznRe=A|6R*kTA5K)+zFyJ;(MO+$G(VGE*0A| z&xwjVL9}O{6A5)xL{Qce3EEb!Qv(lJF@EGZ6@iL7LG+J2ClQpjM1r=J zD>};|D~4yD6BT!YXwN(+611&cpPwAIV*JQ+jvDr48D&OMaVLoWk>~8lzAU3WCla)+ zTqSP&WySE!bE4u-5bc@gM1r=JE76@}Rt(QPCo1j)(VlruBxqZ?=07=M#jr~T7~gfG z;!Y546EIG#5kxE3>xZYT7@m1fRNM)oJ@cGM(6(|#oH%X8@XT|f;!Y6lndd};wv{W} zgtJzRA9>Cl<8Jv=W)u~7g6JQ4&bFry`BR<~3EEb!;MC`>7@m1fRNM)oJ@cGM(6(~T zX>!4e;hE<|#hoD9GtY?xZ7bL2Mi;FZo_S7G+zFyR^PEV~wsKWZaLJ0{ndd~sogmsX z&xr(WD_5iDb_~xvCo1j)(VlruBxqZ?)ZabxoT#`HM0@5rVJE}iJ#U_QPE_0pis6~(L_$4@jE>(_s<~dPuCn$zzo)ZaG!jVgr$DVmkRNM)Q z;hE<|LY36yQZRsit!`Qsj`+xsFn=5RJ-d(o>MmZtjs7X z?gYj7k>@0W@|;Mh78$uz`|(GfQxT}R6BOe|o|6d5b0VQymgG|H;2(KTMWEtNP>dgW zP9i94iG*q)lS{SdJ@cHXxDyn^GtY^HYUz_p^-Js#ddzd8;!aQuo4`CL5~{~SF4e#C z%yXjRPEZWbJg3YkhNwKJdZVZ{DRYw{appN)3KHsFi8asZBG4s!$gbFWo)Z-$)Vt!D z=R^WsvfmyOd!7>&B-Fd&ndd|TU2^;(B=$TfDoChz#WT-|1iIunN=WQ^PE?Rk?}}%h z6A5(5@t}~{^PH$4q23kGJSP(9lH*z-vFABaK|;MNo_S6r&?U#`LSoNzqJo5aS3L8a zNT5rOGls;T=R^ew9M{nEoJgQcj+chSp65gb2^^o%^PEVaOOE@7#GdCw1ql_;GtY?x zy5#tENbGq|RFF{D%`?x51iIund`RqhPE?Rk_un(mi3GZ2J|QIbJSQqhsOQ)-&xr)O zWL_jB_BM-ClctA^MIhP#XKh}NT_YkJZEf# z<;ryl+6X+r?6F1Qa zqLr)Ze8buFCI$E1(e&A+IntiEO)J@RIi3$?G72I#d`1tx? z8H0!>voq#(C(uRP6vHM~4qp}X_Q9@z@V${vRFLR!{h$>i)x&c#1`*L8wk>xj&_&x6 z!zS)-XdW|T-0tNWk_~pEg2cZokJvFXACobNkay6_oj@0DTQMwR@W?~7wlAvNv0+YB zka$1*m=$CC+Cwr15&s>m7T`{xi?%6-O?WNeTFGm9>1C;340ob}ME&L`tr)A3KD((J8Q+Ld+V}{LBxZWZ~ff~bkR1&u!*lL_Og;kpZb?P8sS6* zi5K%PSTRoi{Yb_jB156>0qz94Xq#f##G7-k*{Jhv0_1;ARFLSB^|BS?YGW_?o)ht< z$h_t51iEOOV%S9Ks_A%4q7*A`Z>r}+1qo;Gt5%Hg**-D`5rsb`4{#^YMcWj^CN3r_ z%y(Bfu=MWSASWtFjF@`eigE5mRvCkc0S6l{cPG$A+Z4kliZ>47KRwyB{KoNoPE?Ti z7IDLhF}ZY68H0$#MYb$;C(uRP6vHO&JqY8uYW=kQ_Tp4dRFLTDeanh*C1;3?K}4aM z7X#c0bkR1&u!)(2+w*?@n*yp|eH4QV5}%vgwqp23Hj*)j$WZ992Z1iyrWiJ{Or83wkd{93_UQAHysof5WF!m1{EZxCA?$B zC^{iZ#vmfui{@_WVOWj$Upr_Yaa@@wkjZ#K58xJP35D62m5_^qElexW6oYI4Ve}vg@ztb20`I@>&JB6X;SU zpG{CH+Hm)x21l7)`CmYLT z?aWa@Le=dzqMyhZL{O>VPM}M*3~Yj08AQld21f-6)kaCO?zxQdyo{Z;*?j6p=jqSL8;r4i^-Ej63?-cF=eA4dfV)n=?ao?;NuW>#qr z0$r-bX%p0X9Xztya@l(2s34)*zMFcykTHm;{=T)pJAp3Ma<&O-g>MZDqINq+1qs!L zfBycdj6p=)c@e9AffsqZ^}H9F^G_D2I_xk1iDmD#wMs& zG_GPK-@kWhW9P9^Tj7(~dnPi}VtU8+ZB6V$t_TkF#@*}LPYAfft(gTMSEV-Vr^ zm@}_CfiBe-wF&B_ri@9OSN2jlDoCjQ?v_m#WsL7VVJ{+3aVMyDwh4OwWDQOIXD?a} zMm0Unr&aYX)%vs&js9-W_$9j{P(ea%s}=z5-HH(v8$m7=+s^!83{-HxSZh-&gZ}B~ zLHj}%*1;ALr3hN1MrTMlI_08vn~H7EL$*S=#UKR<70+&w(IKfAxG!|6b5iZA9|Wyx zqobniq^ftRRg6cgpE)W>sFs1<-o^7k0$q4cdJNnz9-*qW?Y;rVKm`d}QAg)zXL#cx z(4}f^yN`neDoCh0*zS2DfeI3|29J))&TYpRFI%Gcof6V(Z@xgOVz=4 zPZ$YQkf1eq6vLit5Ep?iRR`NWdL&RmLe;4DI0F)>AVI74=&0;j7I6{iQZ?%L5f%%v z%Mw(OpjCSm!=5dIF_1u)s!_j>`#=m-kf4 z(M2n`h_EvzNT5P%$;d^y8{AXeM}-8fb|YHNT&5`Y8-@Eqm-@SBn*kLh)V7-I?1~{b z>@%l*p-Ua1Jr9}u>ZnjrSApgTVJpP<`W*L#E_FBU8NYN$sy;^o zUFzwv+i(~I6(rPCY0vn@7)YQ?y#;n#6l0)*gnGN|8NV0<33REq*=|!KfeI3;#IRfX zcvPq$p-LiqjxfeR0$r-)vwJKU0~I7xiE7Vhz!*rNOO@c?`)XDYyJd+A5~^nSF)u?s zB}kx4)kNQWkPrhEBvcJ&&o99}{4z_#3NT7m*s;TXHGI0^;QY{0!M~?(5NT~LT zJi+B3G|BG9E; zwf6WG5~v`d+S>N08WN}=p<3kjT(!6ebg3SIJvxX4DoChahCSylE&^SuFJg~!B7q7L zs>foFy&{1M5~@dL_c>+X*&5mY-gge68j|V{StOUAfn_{@ekS&77kWoQG))H0>*En^o1iEOO zVz|b_ErQ0vQ9(l15>^b?czdh_x@enXxW@FIbU$cJ9~C5IEn(jeE7Kse0I?G2qHXJb zsN94_P;LShBxEgN#c<_6VkOW;+Z4l;V0tr(QmM+FIWZ(Q>LtQhVDy40O@%{8!M(Ch?MkWkN2o)q$UE-RzWW{hN z(4}f3*W4(o)uL!_6e>ulI?*-%O4WlJfi6{}y5@LUF=&n#DoCih-8GNQis4S6OSKGK zbJeUEG*=B3Bvc#4H6PB3;ZC4SwQyW>?yMLz=MEJlR9nk6ug{9%PM}M*)Le51snr)n za|cmDLbVxP^AlA&Q6tc$TAZ#qkgD}cGa9WDnG_^c+t(hamRaOj33REJvrWjHFq7wD z&E-S|3Dt&o%@>W8K$q$PxaOo<1kFiB1qsy`am{;;l|Yy3$++g$S_I9lMFk1fr*h5T zjg>%`>XEtT2y?ntG)EW}BvjwfH4iyf0$r+S>6+_o5#Q%JTlbt4B&f$qwX;344LxC5 zruv2`ZyvyN25?dLfv!EQD-F3 zrJgs>F=TbE=nkr@K$&qB&vnn$)s_GM@suE;u86(9R@&r0)sFi?pi5m5&zvwS>YOM? zi?Kbj0M=Ec<3&PUg;;Y%7C{OU>RQE`!LkTakf7CvbO${%ytprPVHu;J2P#O=>O+d* znc+nOU07D?F;GE*Rv%If&wMQs=)y8zkAVsjwEB=@c;;)7Ko{0sdJI&Mz#3I2P(gy$ zn9xyqWO$>HKo_QO(0$tcI z(PN;31g&tyuTLYqP(gy$`O#6?>j$l({d--D1iG;Is>O&x z1qtj^YlIgnNYDyLIx5fHHWKKmbc6e_V1YAoYNt)>W4kWkyMY*P&KvZ%mLgNx>U;`)~vo2gA^oGi^DZ@z;(}YU+7XT zoLFZkSTRUJLbak|oikw(q#&VMXP$Ena9`+Bt-)C5epoR`K|-}MJ?9$WzR;ywwXPW{ zUaAyD1qsz!_MAC@1iDn~+_P7N3KFW-Z}-)%Ou4j*jw)T6Y%68Y#H2nl^-N`-!`i0) znfkkzS58x_apd{f1Sv?!*cR=1R7jvpo}Wc{T&pNlA^aUT^D@>#HG&*uvF zg)aH*T7>J_jrx8Eqfn7gzU88K1@{#9Q6VAUO^fz?&v9Sq^749Z5wYEm9|{t(jIjvM z@(TBbE?M%~F|74C@wO(*f zaUT^DvQ1>sp6x5#7rJCi%_3Z_KG#v9qP8i!gnNqnsE|poY{~2LYI1*J?H5ofvz8=QdF?}QK{M>Di{e>u6WMV$92pmCAh~Ndii|)I1hBGnj!WX@pM$As2Ybx{&7!n9~Ba+w)2>+t{oM+ zR4p3oSf5n_kfLg58Y$F|s_yWcRtsQ5g2j1JiH(SDB-K??ham0zI@5|R4_klFqfkLY zZimn{lqHDUbLB?3ToaP6`o8TJ!wVJMuloC>;)(R5auK8;A>V@UF>?No#1B`#3M)CC zN8fpagnrLa!Tri#e+stGe9`|%{BTLa_(8}!n4iks?|0BTUb<8A4qDsa@A>!haKE~E zwn$L>kz#m0S4f~sK9v^X`Rt-1pLWaj{r&hq92FAOo~NVodrfi77aSTU&1 zz^76tP(ec0J{Ik{UJVI!(P#qQ!C31-mm+IUYn$p<+*8~~g@mlBE!v*RAY9SbvYT&EQgGE5q8Rw#fhOsF0AY8jJR5Ek)tJ&;@@t<2-W{3E4WcV%T$v z=)7Y&a}!-^+tp4~gf*T>cZw7hgW8|}e@8`xx(cq5I~7Cz-}Q`|=u-E?)uvWN6e{Wt z(%2jBDeh~9gnD8;$Kh~a=u*$FtN-CTDpb_dPW>9(Q`|>|gnGL?$H;JB=u&U9$B0)H zDym$eJ|ONX?xR9Nm0hv*Qh(e*bp0rmVi`5Co>A(*BB4rD&(SE{7rIml?$JN@LPgaE z)bGbVxu3aef8;cgP_>WeND=M}U8=Tojb%iscMug-bJ7?I?kVo0LPFKlcK%$B;#e(! z@1r=btBWob!*g~5wkEv12AcHFdwCrR!S;%NRHz{F{`v*0UD7V~QoFU}I&)Nzz_y(p z0~I9Xv-|xB?G~9T*S+LF`wmjuM{Ub1A57bnV^OVFoluH9;pLTI{<3wL)fSDLP%fgU zcgZc(n2b)?SJXZdFR!N3rMBaWf&0~orsDeG`2WjMsUuNmuI{<(sG?B8W2bHVXzjPw zBBY!C*;kSFg)X)2iowb2KAtFD>aLlzO;fKLFXa=MY$8oMSoX!) zgK*t*>+0Hw9)phL`;%{7UHi-r7aGr=M)l#@-j`*WoYB1W2OqKh(EvejaIteff_f?E zGsf^&7kxwy$3WTtIP-ZPdzP;k_nu#qp@PKYWh43hrat12*+V7q>cklKPu)Je(UuT~ z3KGB0O2;!qq!J^3?I($CMJKX9PDgSU%rydCyMD>dCZ|p<7N71biK1J_vs%%S+&8!? zLj{Q@$?nG#DV0{FJl{nUM`|r#!zT9P`;J%C2y~6Q)Xus2MOtzGVP{E<{^u9AYeX;p zbA}Lx3KDlpo6dGcGKfdVn@b{d$ZEEyLTLOPxoQT3t>OJNF-D;z=hy@LVyCK-Z|tON_CF{Y8-)WhF7V z*k3F%X=fh&M`?x%5-(yl83RY<6`@6oN+KrWH2bMvM}DJnF^xc1sUe23bW=VtdS_ut z?7wu1h0N>7`@Am2P(fnd>1)Qf;e|y|^Q@9cUi${S-K!nHHMM|7plfQTr^cd_Ma1LX znI(~U>vgvI*LM7E_M!|GBo{Avxvlai^xiCWoi5WwSn9n;@l277&N%&QG&5opM$4EycJg?09$Y9NLj{S5ZB@;J5tYT7Uh?-ELa*%l%Hq{;#_ui6 ztr6(TR=lRUu38o040B52=ht7@t;l9P@oImD3KBaKH89&Gt1eQc*&vByr{nQdADi-n zC39*7x~|S>Yz9B6E?VAND~UtB;_*pY!}*)?xfv=*JQ&!@Y_PMYxI1xyBz%W?@nj3a zdDR_xGy+|R-?T6X?5ZJ3wO=oZCveO2cLmxDdFszF z9Mq66nwXBEf<(gVJdEJ-*Z)AE%eQEMb8OZy@i}E3N%U@%lvh03kmu=>o}q%o z=&@bRpBvQ`x90biM3R6M{6Lv7ez<2UjX+oS7LjK0Obx_fzXp={v@I!ry0{@PIUp@V z1&M@*I+&xHg^G&<21%mim=wH7@-Tj_ua8EctKh^Q=H>MD#rtecByrv^8BZy%RfR#m z3>73sCu(C(Y+Xm3Xf#X`EAFP?iEcLFUk4=D2y~U*+1VVGrk>~@&_WVl1|;JP>o(+N z2Bu=DAd#qQb8}{g+VWbBkVJ=SKD@@(2E5?Fq#A*)%J_MJS0bGNS94kSM|sx~&ntG9M9trl^Zlp7 z_|A4o87jEn#FcB98wS>p*J_d^iXTtOyML?CA2*Aq5$NjsdtLKvyjtRM&3=+-)Gh^| zFd&Rysgj7Hf<%&i%-lP&n%Gxrx+L~bPsN)quFnJeehfkaT~j_*HS>O|A?}9{m&Ek^ zKKydlFy0|^JcbGqNkYn)$0k=1JALO!BC>I6zCXA=PoDX|AdLXmipG`9L9eTc$gQI# z@$tD2U+QeYyBzuugbME0+bCpSo-4$L=kp~|I-4*5<9a=wbmc>hKv&G>lIHQJRmAh% zR5DE6nUY^_-GJ9+FN07)!kgzdA1n$M%TD|*3GchU{Mvwe{9Wc-IsvYWDGQi6?h6sI zpUSK1BUAD2U+VL6zdi^;1^1gYCZqXw4HNU%Qs4RE`ZPQuRXskp-zAMe*NbjB%sw}R z#p9#1B$201YW~wN_4&0MH-b<>B3V!>Gtst6V%mgNk~lLUEe~E+m#^M_N+Zyf^l*Cf z&}AmNp7})*Su*+Z4#n&9&)ye)Vk$6HGzQEzJ+p1f&2zBJjvAXJdZ(D$xU{A^j#JI@YDjJuki|5-nj*Z60v zMxblO_Lqk9V0rQHgO!qq8kLq0xL=pAyuCXJ6(lnDIb*QvrA4QBdnD0xwI6?dr4GNg zeXT~I>tfG;j5N-&qV0=yk_g+Io^Kr*%6B<71fhaNx)g_v%RZ&Wsl#+0orYxKOIy|9 z+XpYv2z1d{&9Dh&g*}RM=*$<~ z%fe7WVnsj-DUFEzT*}kTQ}m7hGmfQaE?J0ZyUu(X9o3BQo>wMTQ6q~4c|M~kD-FZ()EXobl(b#F-?6XakbljEX&wdd}@rZMxe`o#6_cUucBh@v{aHv zcHjlueyA1i>F{HyAh9p!U8DWB;$r&5B$6o9`yDHsrzOv`&sQVRl{WCLQMPPJ@!z^c zl1P5y4cpwQCHHON$527SNEP2KFr~Bz$oq-P#FC#`+M~^R3DZ|2&{fZw%-oT^tY{PR zP7+;bePoqVx8T)6{1_@oJU{1ab{tz?WV&-t62Z;A_~yaQc)dNo8iB6PD>IuL@&}3& z=Wk0QI?Fc}xW5^nm&cExg2dAsxy+1U+ zK!%f&$g)lLqlSm`!3q2rDoE@uUeXM?9xTpu+A9hFi3xf4_f7c8RlXX5t^_|-G~+E6 z^8MH)iPmNU9=~c+9)2r5Lj{TO!@|57SXGt`)B-p@I5E$0urc>u=&KRvnh?LX*&;yuzf;ZydeXQ&`?cxjmVZr4xZr{88u;&PZbcP?ne(@gT!2y~ro(A1o{ zuBJ#+ncAW;FO%?B`x^0jv(hqDkib^I-Zwx3UDy|qy`p_5*n@GMd9lrD7?!nn2VHiq zER{{{{9U$SphR_qj#z$`c}?oT&m{6;s30*i@``hC$E>2^a~dOiwCEgLm828@uePs7 zpbN`Kz1&3wi34l=jX`~L2=A9PMwX-2RaT>K2fo(f%aA}9mXCV5iwY74s`NK1tn!!7 zZeAH>g6sfNaVh<&G<64kZ7GNt&9s36hW_|I7Qsib(A?3;W)KA!u+LPj*_ABy`h zB+!NBqh9W!g2aIMNzJTx%8E|^J(4ka!FYUHv1WWjRUd`~y0CoI%Ux8EnDQv2nf6(r zI6L-&jB%rO0>0*cQ~s)v4?_Z7SU&3IE-Fa$%~-%3k%^09M`%>7S;IuU`RXRTb~_)2 z1iG+%)XQB|kSI90g4wZCWiezswZcb>B)n_i#=KZ}ABF_Fuzb|ZT~v_RShc2UoU1Oj z*Pkb2B+ZnR&#cjiCy4Z6NT3VLN4?xd1qmG2&__RzKo^#%j?h2&Gq$XC8-Bn1m!L(> ziWnms<`y*v1c;!3GRE6vx#jpp0fFVCUW%fEL~zMOMrifyvK}li-zY41kw6!gkB-o+ zUf0?0yaS(KJOx7q2`ud#p+$ClX6<&g;Jg2N81(#Egt4l3KJm0cY7v;Rqw)L60CA*B zW`XDD2%Wp=CEMDkHDB2Jt9Ir{?8#ooIQk@ysCgonJdd~tbm3Ky{ivs}SdyEq`2CKb zf>1#sM_^AQU+n-&z+itA%U)V+Zq}3!t=;yH=iUXm->%2FVLFb z`8z&C1qpM_IAhe>ydrzY>=yB(Hb4ShSk5~_XAFJ8CiiO1^IT2BP(cDqZCU!yt}=)%G$>TO8#k#mjAeG7@;wFv~?Cr4n*(MWAb!?@*(U%oSDLLs^;lZxZm!)53Y%n>T|{L1NFHg~qu&g=FiM zYJ*(ozp;2LoAdgEUuguoiY%FCR4q|Jto(=aI43eC;O!oT^B2b+1)+k(^~&pwcH0We zwkWmRtLKz?gAdJlbc-(K2E&UfgKeOl6TkzkeBw$FO>(seFjmISmi*9**BvG?VJf3)NGd>~kYY-|(WRL&1 zG5k_7QK4G`N#rs=vYWG7@B+=f84~Eq`D~vNv!#eQUX|`(!kJ#Y-u-5r-AKStL4vov zW%zU}A^U^zB!RUv66nHu*AaSl@He)(Omp6Ox;H}w39LosNKu81%?!?iPpaF!IiBX3F~b9rx` z`bJ~E<WGhyjWJ} ztR&9wPQ)|GEZM{T-!uYUSJHhp5}hw28WjCk5?v;FbFZ+*yh@*UL8u_{>nR^I; z6b~mJl|+&(3HjxDO?lFX-Wq|fB5zWeDJxbGt75K5BGuExd}4woeDj)w3>72_%ejPR zqaZQ4z#&Os%MuB6VVhBoE#*ke%_dFwN}r?*6(q0)=m=euD?P9MHI%1cygCR+PjT!O z-+xEw+E!`!vVHaVtZ=<#K%!~CLq<2*((j(;m^_cT2y|g7A@jBW`tp@&>+@93T|uZI zF}}oQWAV+>;%iH~R*q3A`5$@1izE2HXjGyDD^ zRFK&5^s#aAU|BJJHMPPMZt~$tribxP|DDwcbT#dG!?5^U4ZiyJhk~rTp84t+Vh>xlEUL(-e{f~6!pmdc)>f14rc%Mk- zQpbn!2Mu2Yp@KyH6aMCX9xQs~StE)4i<9zvjz&EBL@$N}x)xl?X1;z9BnmZ&mc-D& z6g<~`nYB#uAqW*Dx)v&GW(yGFThHZ^h=0MG4?Wh1U%8!-A%U)=>GPSr_Ht46_%=zf z-pP4`$_@FGb6yM;B%&siGsBWq5i_C}Nn&1dZ(crEV}7YfQjI{@$9l!hM>9fXtM5;W zu`(Hd{A)w*=a-0~g2XTPgUyN0s|x?bze)mokVv2l`-ZYde+scAoI(ATS1_Gbs>=+IBLs77dgdqXzzn=#+d6z--z7n)Ac5I4M`&CG zx-b{!2<_7%HQ)WH9)CPH9((t6l$`fSb0BkuU}hYTR?qOFf&^x;<0jCB`DjOI)x^np z{>@=LVXgGq+z*_ifk&$os33u}FXAT9g>xz5o>_x)W=^aLHH+n}D$bXqG311+Ke586 zoAJ}T3o=xYNWD7Le7(4`s5+X;#B{UYvTdbX@Xx0UX#~2A{ngBD4TY#PmD&tQpn}Ba zYs^eBo{NKbZb+h2&X;V|h?ac)r(z5VbUhj!XjXMHk?-OyNz}ail0EF$k`I4egrR~& zy^ST!8J{YOs~KNOB1hC?womq*tM4qS5$O8drKp*&Vns3NEwx_fR(-ajfh?Jg@?r41iGzKlko>us;x=E1||M6S0)tZ9Fb?JV4eJG+%) zs31{gZ)Wp&x-z2N$P|)D+5ZNsy0$G})1$mbpzG3nKQnvB(n7SLp4X9zH(7?cawcGn zvJ4d@7Q9bsj*6E3lI!Uuu`1zZ)-`>5p0r~{jX>AxbBWE{-HVGFLo-MseEJpE_IW$L zCuuo`3KAbTJ~#4|FC_9c$uEforH`?5&pYx|vqCfiT{~XgFy1FCC|({fAc+cljm62DD7z&3_;=7YxzjX+nIL#EN8V_uPaY$-{+O?Qa- z26g65J_IpTkZ93pi}6X0;!OCfq9pE2*voqE>B5^0udEU1+U38@sD3NAIF_C=qfdRD z?Ae+weC|cYP(k8)nyE&O+&M(#xt}Dl^wTDmXBW-9YDwZs z|IMtyf8BV?+k&Bj#MZDdW80+6V&(5mB(c5oQg%YlT<-FsmPVkf!}795&aIh5`87=? zQT4(ynG=rSRZ~}Is35VlbP6M+fS)LmzoR4$ZlBEtw&=x^H?OA==vv^s>D<#jz37;> zlO+7o&1DO__2S=J)?%n2;WJ~3^T|hF5kInzB*y(cn*G~5l0U!MP$ST_u5>ZyfF{1; zRz-&-3S=0|ZcdEkQxDZ;s339eV&%O%yi$t1gN93DX8F!+dwU1}JUCn<(3NcVPS#+6 zkGS9)C5gwcyRw1P9XwUzFop^eCwHXek9VdJDZ@udV#U$EEI5mUZ|h%IBhZC&>-BOM z%Rwx6m%S-#e%xJ2RI2z=mT>9IJY>xaw&tVmm1n3Ru{c#p^I4w?a%Lut`^@flgAI;o z%U@*-(g<|TOIgf(_`HI+QiDcVkU#~A3HkDw7xR=8ey?eaY-hpiZ2A4RJjXu9kU&?q zgn7&YeanhmCzDBH+|%nU={Z>@uCByTL1I+VEM~t=rNpP6l=;~keSuA^*`5cU4ABU5 zP0E_V^iNt!EMw^;;XU*sYf_;-58uccDoF6zNz8#Wi-=j>10*r576bEAUaiB+BP>t)PW)`18VnUAKHPa{JgQMpOx;ZL+ZW2|LJLQA=GPX~ z(Fk-I1zs4NqYDV%_QfPIZJNp64erbvMbu)bAW`N1O(R3ZkP>DoBKFoNYMLWD{?OG?WAn{Db-A zh~S;Qn`s2P3N;;T{1cc}j2+iV5(nllVh{30@YmfNF;tKU*wWhgyKP1>x>Gwz4B9)L zJ;>jSFELta1iI?}Q`xAsEQ8ovzr7?TCZEa1RqVwNC2z)1LE@L_3`U*1X+`_gy(N(; z%Lw)@b0j}IyS+xBYw)lS&Wj;wg}6$@oCYJ=wepd?^Wjzu6(kZoTH<{0Ybr5)5Ut?J zFt`o-|WxYJm!~?3>74*e~gZqmN~ij zulHC)#r=yCCK-ZtghdS$aP9ZKH8ZC*|P0F(( zIr{Qk*#|IGkT_W6qBAsaYLWER&yx7HPAk^f=);rLjL-;l?cbTm*t#*b7~xMuqk=71 zl{tNQhA)u}6(nA7D{1T-pH7Ub+Dj6j{vOT}e(ufh{M1Dw&{g-(I!3v|>BZ8)Jtfhk z-%$4LYHwcQeh-EU5}(EmF&gE_Bx(<7OGnjr8he+n7oXEgK8dg|bamMv=U-jRB+^Z9 zBZ*|ir?Mm8dh){eIx|#|7?OCY;r%F^=-4|<5({tq#(D?F2Pgc>UM7p+h2ynns36fd_g}`92f4*@|EiK0%-6CDvh=BSD_kSc)p_*a#=5q7 z#HFcKBvF6L8a77~=Y3l+RFIe}zm)ZIPCi+GP${~+%}zGyQ5Sx`NSH>T>wbn8#<48< z#i(CsCP&n&9jxi|E<8)qMhq1sN-g?k*@CW79yl)?1@5guI<4@Gp2y~U~ zpWb{>xtQ#w(ox+TbdD7p(w>)0Q-h&`1hzQzb_ps-VEe%lTI}prHsEU)?t8W+8?)=B zaXVdrXuQ0#m{{YRF+5)ZA!?Tq7+XINRFDXpl*F9zs*p&$m{yfKPgR!HETb%gd@_6vJ-y9Y0Qy%+P#dERLKC5QN@Oqh6`{G@THcTTaTPCbF= z=Lj9%eg)ed+MO@H)?PbvB>0+pMxNvT;^T24&m%4ZU3e87p(lr~Vsm7DUVUskh6)n3 zYuqsEh53u;?JCO{Ltk!Z(XYDj9YdRF1iC70dS@(OoL{v2k7k$-Exeljm#{mp+Mx|Y z1&L<&E*itGr7x77xm<$iVb3@AhB}nPUG6$%%bxlYL_&fK8mgE z-HXrk)d_U9I`ymJ6P-c$*X=HeSEc)~BSm}jVvR;HRFEkDbfFP`GQD`-n0j8-uD4-j z10wn6uR4LQv?WFxn_8t6*H6*P;?ybYv3X-6`IuZ|87fH3Tsh3RusyZd^NRXWYeadr zWqcn#bM!ckK-Y|bCPr9-RH9aXYBRJ9D9qX}=)=R_PGqPcv9)hiBkYgl!fVfXNxTa4 zVQ(fn_#dgJXau@GJqR$~txP6XKA0ehjl&bO?(G~r!SQJf6(lZ%q%cm8ODxN)>5{0v z_k2*_7Y@GX^bCza*NupOoR7~Y5mOUPmBjfA@!7h|U9^NeB3e8ZgcvnLbBKa3ZV<####S0xo)$4wM?4P~1_ z&I}TfeR+>BW3{V`L=C@3&Te0lieU}t%;O@^g;!d(`u<7E{1f%%V}F^*P(k8-`4UEh z7AeKEX>_e>H7d^b=XLOLJH}`Px(ZznGQP}8DcVI4QEo^vb|jgDFZ*jOLj{Ry!99(d z=hKJ|gXw!@Ti=ASDz_tfi>D(r0$pY64>7)EN-J^&5V7-p16KK1B=7xoBtr#>ajEAT z4Z36yv$s-NvT$fW_E(4Ae01}lH3D5&#yCRDwC~H_H0aH}h78k6A0)8klPf1v7h}5$ z_T_=;hH5Q+{{eqGmtROBj1i*+J`0Y}M7PSaQ?DGn%9X*|Q-Z|HBzK&js``jkm8hl9 z)1_xq6McvGkVNub1K90xz4;$025JPl{;WUE znCF*4B+N`i*#-Ss&;Gr+v+Q7o3KDG8O5;wGEMnO%T9317+a&h&bWh%WYj2G}SKomf zjFsE72=6LH#Lqm5-9FfppS#kBp@Kx4n}>|g33G}eXX{Dg-x&*7z4tx%q%mDJ0$o^Z z%lq+UJ{$b92Vc3eyH=kgfvp<(?Dpu$o>X!0x5Ha%Jz?zQ#+&#sCfmf6;`fu(+kRBE z73(m=!5jYGnxTROjuh%6kVv2lMRNSjTbLesA87fHN+%HFH#{LO;l$@<@E_f4!1iA`G9y8uHD=y|2qSa0~^9&Uv zaPF6!y|X9*KM@qpeUp3(LIPb`{jM9qUrLBN*=T(f&OAc}37p9#bHXR$^Aa<{`S1b> z84~F7+5OCz@JA`(m5OqyIP(k@ByjGRBlN<@c-*)pS35bq84~FFTFlEl6H-QO@uGQs zIP(k@Byc8|tes(n0ut!LwFmmlGgOekxnHs#?B-zJRUQ2A1fg111~W1EH6poYG}%Np z>Xhu`oZ;GPSzHI(CvfbZRsGV4YDQQ2v>z?|8@oNH7f*k;I70;qT;b{n9Tl^Xh0N&1 zFLf%b5$LM?E~~S`u=FDN9qK{GzqX!L&l1769LvX0K>}B}IzoS`vySBqh~TM35sg4s z_&*DrN5^IqeIBQ5`dF3i@%oyS2UQT*OU&GuPDhFOhe zjEY|uvtmUec>i7Xv^);FFzXa|J_?Eb7k)LKrp+nFuc{$qgpb+CE`RIBb2qH5<)hGr z*|WIwYDjz?v(MQ0-e1oB2$V5qC)>kb9P7ew$+1`57rHPD7P^ zmod(z*w1Qp>CBrYtght;(S_N}xbvJyRO<1~sME2q{IU^cUt9nA7b~8$6Zd;xRm*dt z3$wCu=WCIenJle&u5C$CCne3SX&iisEzQ@S*Pa}tg1m7ZjYxR-=^JnDoEg%gCjIso*^v3 z?%updL?pu_NpNn2k=x-X?%e4ikk&^gP(cF69P~3s0$p(*6=p3lABjh+XDv}d;_NT; zjn)e?i)+hT%d4C7%0QNTes7-kdOz*V(S_$E>+=_rS+9pZ`O79f7%E64ow~@#{UNgm zE=DnMgarw7;n;~>h23B>yL7!LKQ+E5Lj?&O3DM63V<3%JS|3luks=(?!I46J+y@mT zaNJKH#lcZPbm8cZJ_3mf5*SaeyZcg~1#FGv$5V{fMj&yd8Am_k9v4Ld$E@|SaNHNV za1=N0@m(Zv>|M?_ESi)}z2o3NubQNd@1hGwqxDf|98bi%iQ~!o=pZUc;GNY=AAAR~ z?812tanFE20!MCTZem1Pw)BCVrFVRYHUk1(IA24b`+*7)IEzG%k!>x_*8b_>^-DxC zbm389St(b=mkDGQ8ujH(8}(s03kB;doH?S;#X$uLth@C7Abz0a+l6oGScoBkF8qeA z{%u@Tkof({C}ZWTT%vq3nh&?8#c{UzcqiWGbwP#%y6_ve`nPdWL1IJv#YSl30FhxC z&Fia}5>%g2cnmkBs&Ai;4w>X-^N7+i3I!8nHzVO6&ccb$ryz`ykKUoR($TE zJPZkR;Wun$=>y-!MFoj?C;iQ~X9Go=S+p*7%F#D$)Y%q1e)~KO33TB%Z1r#BqJqS! z>}AY}skpeehQ7&hpw&nAW=eD3UHA=K{oA;x zAW{EnxS8(cPonAHv}$YUl!ScH=q7yQr7R2ybm13i9ii6>CFBX@*GPh{W@D%zf!~*P z8;64uSu3fvu|6F4!TIet2846#USHAwVs`^5;! zR!l6&nn}jUmGc@abhsT4kSze*7rJmQP~5A8koeXhrMWLrDbX}_DjB25(OWEYx3;|3 zeOZO>wVwLc*_Nc5_#%a$@S0gfd3+g7?{ubZxlzPbIX~PUyn5S#hu0LSj|! zf@aL%K#}3xTUj!^O#YO`UoUHeu|>62Tj;_yW^u1RLn3#PvgWY2L2@M$t?o~{_CL1& zr3%NP}szh`Y9H|J+77SLAFp$k{=$##3S zcdS#=7W`t3LfYCsByc67{)-4$8er+8k5l8?B^(RIQ6pTHq>t~Sf`mS{gsTT|CNYiy z;kp2Qb~GwT;CPTe27qG=IExv_1N6Dhs31{(!cL=eksKn$jr#Iw$1wmT(1qgxa{krL zg)C*F2;QP(M}`U#$G19-67zG2D&Z6Z=Vc&)E`6L~effp#;@2L$f2+<66(n%}gq*WI zd?FjWt0&L;xvw_M0>=Zer?2-|u+M?{ujfzCJ9`eyB<^3OxgVI1LInxTY{~kgU^Gj% zt{XobU4S8hu9NZ77^xP@Zvx(^B4gmUSW!Vj&v~tzbc9_<(}m~EpNk=ZF8t!E{`((T z_u<=xzYC(*=cpi|zk|3&5pxCjKH};_J^z6U68LWFtAuc-J-&~){zqRufC>`$Zt71v zzC}1^7oU86EdVM=;Ot*HhaR4GB+!LVzCNoM6(n#Lvp#PL33Tai6wZ!D1qprLQe01B zy7tA5{VTG`UP@(wZ-L&UM+FJ&tH=FDp$p$;M`*7{hgj{BU3lXLc^SGe2KLqUF#uGM zX#Av$QU0R*R&jUAc{Tlbf~{H8i6{FOz>q)}_SN+<0926ZQEi5ir+~k>CBF(K=aDTx z#{vp=d^Ab~j}^ez06lh6K8>uda^) zpn^pCURR9ztg_@Adxr!2cvo265`z#%6aV? zA-}oITl45uIT#Y?!oIpb27n3@&B~@Quazq!w!NfTNE2uL#~$2m$&ItfiCQ;>tg_@AmL+FG6SMR#PF2|Wei6iFFwhq8Lu!PJ3|6p*jLxb z08l}q*V7v2qH0w|R|kFN|M}ebyxo?jyj9k$3<-2$UtJ#qKn00iL5<9d|JIOe_5YAD z`ae&|Tefb(@14oSkU$sq)%7s|RFHVIsjYcrMs2ZdD*c*ARM8~-ci+am!Ga7733TaW z0Ig3X=9$Vj<~iqQVyGa2zZj#JiCC&(`yuk%PsWkx9HR1Z`mO@x5*Etyy=)$kG>pgmGmtczne@jgNl`$mHg*|V*N3SXH8)f)gV*0O)A%QOJdFwrT zRFJ^m64QTW3<-2$&s*=&qk;tfmYDu4V@RM2d)|7F9u*|;x5VU13;2~WB+!LDZ@ov4 z3KIBRV*0O)A%QOJdFwrTRFJ^m64QTW3<-2$&s*=&qk;tfmYDu4V@RM2d)|7F9u*|; zx5VUV6#U8<66nI7x89>i1qu8uG5uG@kU*E-qrX$^3oALZ8Q-!wul5ZgB=EPy^m$9z zo4|eoelJD;4hs_K!udt|yd_kS!0*23-;+TCT{yo;pSOex68QZZ{ku3wpbO_0>GPIQ zK?1+SqkrEA33TE7B7NQxDoEh>g!JzeA%QNOU!>1lLInx@E|UJeB_z;=^NaL(OQ;}$ z-*?i#8-)bA^m$8Dc0XdLKeXb#j+NHFk%9z%pG9APj{REfvEsKg^lx$?fi7GpuCG5w z1qu8Hi0nJVw?&XZ7p@bR?L=6AjtUa^Eff74DM+9T*NN-v&rv}FzsaJ1>jepP;i_-> zOMf5EGuFI4|L~wPLj?(aZTNzU+u7E4axL=j;S5(^<4R^+KP>0VEq~LyLN3{+PFUm)n{=fu&)Pc+LjqmckJkI=s30-!)?dc!{DsA#*%@SvpH4hv z@4B_&<*WX`zRo;6iYn{F?ZOfkK@f~6vLz6KATca6wCPG$q7je|ghf6?9Cp+Xo;WST zrqUwLz@VVw4DdV(C`u4#!k}y>B%J~zhzJgd5JgcO5EX=h=qP@W@7%6b)q9&}_(S{2 zId7f2RNcCDPu2b193Vk2K1b`%=d_S`W5~&hoTIm!|Jfeh z;&ZhAd`=6Ax8JKh4qaSYbSc3MbK+f7$}BS9~Y!TQ)v z3khnw$x0Da-$>AlW3WE9(?WvUZo29l33_o1*2i{QNKo5NSA8QvFOI?b*iH)xYP;#G zZzSl&F<2klX(2(SHeL0N1id&0>tj1DBy`m`>Qs}U7e{T$#MRU_^7r3oj9heB`+AvQ z8B_m7S8$<)1l3&hFQn%@)2n;5s2 z6c~>sTbfP-6&R@I5~s%k{TLF|X3@1E=tTtvouD2IEhMPTBCQK82zpV0K_{rkLJJ9M zv*=n7^r8ZTPEe19782BE(X}AxMFj?(pdJe?B&f}zYeCS9dI~y0Jr-I>P@6^9f}j`m z6m)`mEVPiIHjAzWK`$yW=mhmxXdyvu7F`R1Uer_232FnjkHUCVHkW4{Xp_@Ig0Xe26zZce9u?Yk zjdfZ`Ft)ChLVXm*qtd*t6;F#sKxtmrN})aq<0X|RTR*K0n9JT;B~Or=<~=MV7+cp$ zp*{-Z)i>H(Cs$^huYb5)VsseL)abi(ka6Vb(JWd>=%u1o3iVMKuYN@rtLm?&S$t-h z#K@b|#CYW5AfxxoC$nhL2(47qN})aq6>D78sLu{*pzDMrfs?Rtoh|7_V;5Jy!Ab zdqlBP<>;SJYiv~AGT0c`>Z$-OBp6%Q=cMim?`ZBP=n>rlrJipMSGc}&nhj`9<8r8R(w9t zNGkd^>+HjAtlxi`ZGOLdaWJW}g|)LPU_P{eX^^TbIzbBwD#qxjFsLb->mLBI!G-TouGvTb!@IoP#K1?!=Yn~8U(hcK4ug>Ghf?-j^B22 z4?=xMIa)~YJI^cc9LYT)xie-sl=X4FK;f#e@p1GyZ3j#e`aY1AIj7SuE`8Fd)BKaN z7bouxW*4NI8(zw-m@r{a@WmmiW@ctih0nJ&=<~^Xd*8dC?wMT?T)8~O9RKzb zDb*)y?$pi_hoIMWrz?V=J)L6iEm|SOfK{vAwxVU|I?=X!IK|x1vnW;%=Sbm@PyRVI zXQ@a$Q?V`BUUoNrETvdtgx5#3K6j2quN}!df-eP9%$n>CLUcZ0x%HqWt@yp*@6uDu zA*0@nE7c>%_a}@&qQTV4V4b!pX63Lli81Pz58W872fYUW?)~72mMLbd#uY-mKDWM0 z(DLQz4}y0$OEHH|w&F^4rM4n*>fsMvTHXr#yhFlz&}-)Z?n9~Wo7eI-r$1=fe&0Xc z+OmGKE;{Q4_llxLC#v1^O}-7EvBa&p&*xc4yngEew{OCsZ5hv^9v(rjqzxaV&%17H z=Ek5UuhA#&xy0C#+n)Fi>fyy8F}<~DnAV=hlDlh^v)XAZ9*bVlqla*PG<#~jdwtOI z%z(r0b;l!#rgugG#uBeoBwibP*c~Ng2Jm$~HwKSIFZFltZje21fEL5CkO;RsjPWY_ ziKSY9I0U`EoOcLgVru`0J6_Rp@A6MD?y^+rCl+X}>%<^2ZPG!E?W3yy;PyFdNU#29 z58(dsOYcwJdeE{gdq3_`EY&sNCa18^ol@xp@3j{?bVciW77}~Me}p?{!-emo2R(vb zywAr*(TL9%F(SD~dZ=qqX#;*f^XhEpx7g&js)zmea44r?$vcYZ(6}m-mPCY78MH@g zMfhU>>|-eliTO!uVll+!zYz3telO@=CysC|>R5~sz4W4GYahnPpoN6|t|cPbdOhL@ z*GpQY_P46Jw3W1x+g7fH$8`wR>hb+yAFH-M5W_B&PSm!p8-sISmR74r#Az$liW57n z3Od@5PS3hM7*TUXIe(*0*}YQPbHmH(;q!?2dXR8tmj8OUF#5y!F8&Fb#pS2IE;Ewf zY-HX)b-H;lx!gE^sIl25Ec@Fp`O%o$uc=x6H>EPRPKbto>+FBNM{zkVB)E&DyeTWh zrSw$))ZRKlFYewe3dCCqqXRp(^)K#IT+W?0Db@QK>HZs}9wam`t>)E2T%Fds~Yfhqi$?k0PmDVzc%aOJ^F2uQT zx_?9*L9evzGGoClN#>}>m6E{x(!!{(wcmez=i+i&NG!~~COCFXvUz(zX{C^$g+%gK zQ-c0AoupK|mQR=TuuuKo|WNHYDrxSSRe`cCOhg%~)arGI#iPS8u= zPd{fqH}SXku|fS`T8_jD z%a<1O3yr%@&FQt)SQofhvm$%%9(4~{X{Pqw<(^Cb7yJ9qvA@eqUkG2DnVE})poN4! z!u@r6QOD)^va5W?iom6s6%~8;*vF!3i_bURYWZMp?YTSYtEHVBf)*0gn+b<*DHvar zh@cmBbL^4YAy^Og8cQ1vooxH*YKNeO1T{2bgj*^S^rEtdsCk^~p6bM4EAr}MX|G%l z68bfmoP3X44<3tN`q{4Te~C)PQG)R#mzuoG?QH+vjW(s;jUK!=d7( z9o9IdqJ;$YUlg$_EfGO4s?vl*|JgRtjX?{~M8=jc_1=5Ntq0FIs#NlvmVKRfKjjj% zkf7qRtnW5Cmyn>>Z1WN8!bbzl_U1yl8xq*E5@VD-PmFjmytx{^ICGMBAd_bq-@Nx->5*Z1<^3C$7(;s}ne8u3Ge`C*F~(16 zVBYl8G?Vv7nPuEG%b0xYccq>B>`lZXGSv|nakIV~jC-CJsOUEa_f)KZPqFAFj8 z$77`}-$7eBJ&R@1S^s}0;$x7opS(h&`^28?)<1cBlnVChw~8^Q>8iMF zh9X|r-ti4ZFbhC}Eh68s2=V*1|44}^=%rt^EEO#z^wxcN>MX-p@OA1}b+XG@l1ROb z=x?r1Husn0o1K1JWT-klwq%B!SBe-rY}FfzpoPT6Au9qS&NegeC{uf2Drmnl7Z6{FLJCYRNmUgtY5srYGbSF>?M?Y*Ojb~Te@HK*mw50fj_6pF4$ zK~wYcQcXLbx;nm8Bvy_{ub4BWz1eS?()v{~ULKVdi@{^j>-dbSXi;WsbIWqIgMcDV z|9Vp_1}*Fl9?2%0)@8e^-n{+e#-{3L_H1lssjjHVi^jTg=gM)e3FnH^uXCA;Mhm-t zld1?>oa<51m&X{@sz=9of?gbB+*0`zK?@1>+o_XHWc_(jQ6j?e;@!t4P7Ij3MiI2| zxcbQ0Ja>vGn|e$vKW>B+6NW35`{ z+FCmX$9~tYgw(G4MATU#-5w+CG4V3Oa{ewW^uc59>Y(JgEacTJxv}lG^42W9 zQt5=6MLG4DSGNlF@GK-`z0~qDadqFF;m5}$3jBZ_bo3I*Zkx*ErvtTOY#Ypmx*itR%bVxL5t)jEGL7( z*s_1(bARM|yIU#}k_)lCOx)}5-K_PwQ!08%?#1#larNsx>-GmNlE<;UOq|yuv0PEY z7$hWbWOKTNj?&7wXtBoTMt?!&x!utp)qmI>=YFUfP`8tlIG z>9x*yMT_LmF*2}J=N7NTc;#8*32)pz@Je$xhN_2SAtCPvxTBJ+)|Mn=Eb$0>$$JMF z6VG0%bjK1}E-Tq*yNXWYs7~4O)<29|%oqEtq-qFDQqxX*u+kp??^x}Q~@>zy6X2{4H^nN?D5A~LXJ;U&3 z8EQ5us~pmkB&uuYD>mWe)O?Y6FQiI*Cul~7&cQ99uTHE)j*j)eqQqirJe{rze#suA>(b?a>VNGs)uCDy31|HNjN zwe!zdpX0Z8{a;Ra=TbEY-Usbjs}sqyNGoWB)Bmv^j6LJnj5jZdC!7{>iP|6u33bL; z&bgow5n7zzc6QM_lB_yk+f$XwC;yi-(mR&=?UaP8hVg{Ab{|jJw8}@x6J#vLu{zn( zgCwTc#@H3qS5g&Gbyu|KQ6Xqiv~yhh@9Hd7eXfX2Y4g<@KrLZgNQh@_?ZhTjt-q~# sa+7^!c@~~mdJF*~K3`2QMbJV*U)feM5)t%b{p>R_QK@)_GhR6K|13{N#{d8T diff --git a/act/assets/vx300s_10_gripper_finger.stl b/act/assets/vx300s_10_gripper_finger.stl deleted file mode 100644 index d6df86bef815342723e35f4aac53a0f276877bfb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 42884 zcmb`Q37i$hwf`G&-xYU3)F^5c9c2a=oZEA8$+!mh4cx^I4WJ?hm7XX*O`c**Tp*&n z2r7sxgDXtmX$&f$h{nJZRFr2FH&D>1AiQ&`&guHq?Yi$DKc9bp9H+SFeCt$*3G~9SB`#mNap}{M zHyE9d*S|1Z`p)Z>v&SwhYxt~@tI8+dcu`h&&72B{qv(irwo*Gb;zH7Zd2U zpx;H=UVZ0Oo}0Lkh#hV^wDI;Aho}ENWbO;-wd}k8l&3dMyguevTRwPxwy!5omXgTJSv$K}E8I@a3d#@_z zuLyDbPv<^^UccD7Z`OZRd*!|R-mfB_BVrNdV{l~$udI;3nvJ3p=586Evi8)rZ-(@u z+&5=8Eq|!OIn+|Hxoyst^c+!H+`5eNasFSoi{Ci<`L^d;4@}Vu%Zg)iM4x+pT{`H5 zJ>z|59GoT-&&$%6UTf+*a$&jOapz>CU!Bo(Y0vk{mG#fb7Bo&bpGVQOgKjIuAMPE` zUR0N&*Oj;Q%?=!&HQm$q{VHO~FK#bAc+YNe^Rk{P#)U+C(l^_E%kuWWy2cE_tvipK6^-Yl)R z`PT7=+t;V)wP3_4*=9YTZ)*I@;<8W7{&H?<@9#Rrea6QO={ZXgD=WHG5wKs;%9zbQM#+GK1B->tzVv$E$ex+ zXly=j%lPs+Zc2sQwp|miw0slxi6i#t96!DE+0u|L8dBe4?iE9f`)8kQd@483) z_BpqfR^HNJ2=!uO!bbZxb>8!fD&l}6y2aaUeoyJ~`+KB*TtK}1>ulm{+eqnNb3*rc zUg?O^*ne0Jjh8jLyzz0osM}}daUQYhRo&wmCtg^3U;t@&7DtpmJw6+}@V{g3E2F5} zrTfRLH~O}5z#6Ne@v_FzlLyATL`%v(ap8yq;`=9TU&=1)k@^-#e16jL*~^n}5{-lZ z(u3-|vGJS@dYZD*cv)ks;qNq7?)$IwByHngwRNuXvPOr;E~xA@ z`YWw-A?6&_Js#Hi!nT+1%j-PXP(s>U#pYw?-n+(serHVEtS797da;I%;XbkMUc1L* zHoLp6`@DK>yAl@=wojDLZ`;3fJfY8XZCfp~cdf>Ya|XH7gt(`-Nc22j4Prs;mVG2Tf@KF zR^Q){q8H9&eIgpVPQ1rME82#v$;T)Y7ZNf)ibm7!-;_qpT{q3%sZY@h=hr^*@g_@4 z%|q5pC!g1#W3APIdAp4BqS1BL+oj#^?UbH)GHHMp&JBHH?}2kmW5;w#x80;c=L{w; zBxJrK8b|bfx^&6zJEfCes87)g=b}E*bo$e!Yj@iz9rjW__c3uHA@eTLn0wnDrSs43 zmhQH3LyBHFTlR?!-@K!==(cX@QU9pdIh=_L37J2N#+&h}rQ25RlkTv*K1DB_!}~XC<~9q#&tj?QM4ifcokxMuA`8W(K8PI|@JXX*^V#D#>+K15^Uo=uH2cJ7is zo*tE=7p^mXLS{=T&z8`Fgv_EuLsr{nM9^6VuLm^@>*yxC+Wu+N^-G_>ctd>0W1qc% zUN{euHMJ13+K%7a?5%^*f`rc0tj6(=?b&$gtHU`}Nyv+5eLf-UT9Xf*vGV#=!#u0e z)LM<{c0Cy1aNFNo(FQVfK3P zp3r;6wi0$1f___qM|$;wmMAevK2jp-vU>oKcG*>Ndjoksm`EhO#!JIGBiG#o6EQoR zvcgr7hB@tef7sn9b{B$v+jSIrVHgSGsJssN!=UW=1vi^HT-TLJ_P{=^COmr7pzO*C zPwr*sKJ03Lk`%opTw<>RK>%M#iWVe9+tAjR-I+kIz5bqLM~uEDW{n_WG|+;?Zs!il zzUs8Lp?wYXdghiSyXfxsjaQH`8fZac`F4Y{*Sa4b&_J(;Iwe`JAGdGfd;|%jffgjL zd3Inn;HM;@fnF27zcCxTptsQo5=H|pNX$8SVD`qj4+J#OYljgxWcyxysnG}$MguKK z3>){GY~H6Y1T@fV`J2PCbsxROXaotPffgj{zrQkD-23f-26~VO7%{l~@EWM3WpjL`@Z zMguKK>@|Eq)_KZ$h9*aQC43K}*Q=im&i*`Os__aE#3dZ%+BBenUPl}^ zFng%;-;7s~FdAq<;cDxiU0^X|DK+x8c;j8~8_8fZac)a{pMqyDsQKm)zT z&ABXF_{tl`D@Yg(v>>s`h)c4s&au4&Iod1X(nT-r=eaKl62v8`Z*eZuR-za zSm?ZPzpOwIz*mx@1&NCX_xH;xVRt6b3-`+k1Oa>{DO!-Y<;sivI#1Z03G~99v;si@ zUrCA;a3{S$5WrWGq6LY0$p!vBn6Nt&=q2kj)2<2x0emGXT9A1A z{`395Bw=?Z&xV6PuQIa z^unF=0zm*@Ns1OEMx1r7KSm|&&IEelPI`eLfUhJ)3leoR&hf|Egx#4yFWgBl5Crg* zq-a6n_eY)M&lwVSX9B%&=e9r)z*mx@1&K}Qshl_WN!XnU^unFn0zm*@)L%hdNGy2p zY<~`yu)EQK9bw$LEf56om857vV$RiP^E}wiMH6;s0=;nOwm=ZTSCXOyiT?f1&ej~Y ze|PS+tuMPXfnKXmjMT=fK;#-|K|*F90S)xR9ohnsYoG-QnMDOO&5veGkNwF$H!A*?Q;>&`Wj@j7Dt&El9|&NI(O%DSy<}&~Xw)Xqf`sh01vJo0cG!$YZ2~Pw$gW{P1HELY&}h^q z(1L{Qeg-tqOLjbsS8W0H9PK zO*?bv4JYCZnZrjO(y$jr*Bj^aj^Jd4yo$uMvDc?(Ji2=uCk^A$FcY{gp>utXNJ3sk zV)eu!>1Hc$H5wR~hMBGC;}o0c@>Fwx}TpO z`SwzyfpKY=30&{e2~$TTA+I8F#p09FZzpVKp5tI#8fF65?NJ0o67nh%n|3)Y-SMT} zjRwZ0VJ2|@A&P)VLS9AU!;S0Gbr&3LG%zj=GlBakQ3OO1@+uOGzt|xiGy52$fpKY= z3EU5gA|R5GSCQzrvSV6z)>%dav3Kbg!Fu(5hBnF!&XCjYNLU1X_yJ=!9^oPpcjU%hKx8y z1LM*#6EYHsMu`_=!IdcA#(;pU|bqzLS_b{5hBnF z!&XD)K1KuM(l8S;!x4=TfnFH48Zw778W@*`nUI;9XoLv#!m!nlxv0^=xHQa!%s53O zM4%Ujtps0w;v*ycl4kVXUJ z(l8UUvJ?#CyO4w+q7prTS30WaaUjl@#YmJ6_6$x3<8x8eh4Gl9PD}B)b zLf7XhdSTd>mF!Fy4U9{}OvsLfXaJ%63`Rq}iiGU27!CDebqzBiJ1?REgzlFZ4fQG# zvQuO<)QdGV%!KS9i3Sk5?_)I7t4PR>7mw$Wda;IvnUI|@(EvjCuZ)Iz6$#n7GaBl} z8X9IocJxF82;GM>8tPRfWCzk{s26K!mz^a9M>;Hx`IU3AP}iVM|<1Yp4ThEyWsYVU5r! z%>ofA!4@PmY>7Y8TJUOG;jxBVSR-`Wv_M2kumuSXTcSJdQ7@#mEo-QSHA1Im3q+&@ zTaeJOCDxs|S$aOL(OE++tPwi>TOcAO*n)(HEm2Q<3_s9b18b;-HA1JD3q+&@TaeJO zC1%iG$!@e~!WwE}jnHZA0ud>}79=!miSe}OGoSW;SVJwW5js^~AR;B$f`o=G@h$CL zT||3Wtf3ax2%U~E5RnpWK|;fp_=xtX$IxCJYp8`aLZ|!-M5F{;kkGItuA)8a{b)~- zHPpfyp|=$ZM5F{;kkGItUZFkeHrg9y4YjaF=&g$a5h=kIBs6S^`Lsv9?tu5qu7O%u zBlPx2fryk~3lbW(#Hvq*r{gQ{9ZLC#(4t}1i=r0#_JZcUEb--&3nQ6IfQ5Cwe&?OQ(l*I&Xei|2?q5Wfus%` z5v;+42~4j)0w9g4LNC8RuObNI%hrdc9SOguw%#`9Xz}~nDvg8%SOdNMp0kQj8PiHM zRY>@KqPnvNTKrzEN`q2lQ-xlBA5}%DjF&1T{N6|1IaO%!`;RJ(JXPqWy~MxY+I_#O22Lnzycixa;PG;tt&;aYY7x$>Idfqy*~iChSK4H^8%$X_ zOH1}$zo*L=p7|5STSY{UK#NZ}nl*m^@}r)?5V4bhWniB!x7Dp z5}$B1Yy7G6lcl#t4m29h(vpq%@%i$g$!m;;`eBhHRHUDrxdq=9X> z`27s|h?6AUi-e3(<{KTj*LE%c#_?SmcG`2S83)k|zd^z$I3M=1y{N{81b*j;dh_pm z#A)JtP`@vsct5hM{mI7d{I>F2|Q!8jk7DkNkq@${)r3HcPtX`8G;7cp)n-RE^fF^fyB*ccnCJ)|)r3HcPtX`8 zG;6%n`N_6fW>iD3=IrJlpKrILni&%v5jg@aKH+HAxVYQv_q*GUgIqZ)cOXQ%y;w%bw7jG~T+9Dx>}a5QVs7?s*l4ZWJPZT6Vn zZb!8i(BphK0xdq_=t~Y-^U^abho^s?yTR1DudbyXx}>t>ZROr??P|mP{nY$9exr31 zt)kt#x%cn?B6{(d$aI)kXX7v1R({G)0)`r|N#@(JhvxD68kfA-cp$|^eHC9WUJdhi zZu4#j+|B1j;dU$rQpuq(FcoGaxo)t6_7GMqZ!t-zi!b()73JE;1XPha1zT7v^&_D9CV;nAMB<#)vo&>{_ zX9a=)zWlO60!u_a4MP7j^Y$kFn>Z9plm2xbT7SsrHf~N8|5yUO>|fEL@4e57Q(tIr zd-nF`^rb^=21rdLpSLhr?s`ebX?%%kwrR0@NMXeHt?BGBvT zGY>Sx#r>bEB7Q@}WFjULfpH=6*X?>6jf?v~M;d%BTe~6sZtaE`ElAM&F+Pzf;d|BP zsq%@xwT^B5hBQ`?hHvqSDB82VfB*S5RS6T{8UnpItX_vgc%Uua_)%9@WiC-EX$n8t zTT)x*a*Lw#i1@GDo8uL(K8zQaTNKSNpGI}y6A2Mhh!{fzT9Du}WZIRkv=l05UeV1j zzdrkLQXe4UJ`~o^IZRjL2xN)rfqHBb2r| zKd1RxPEoY+bvM&IhjX})a_;9A>n4hrc=Yb?pWQ4cxUBk^7OwT-6U*M+*R^`_2?`x=&`#H33_20D-aB^29YkV5Mf*Ua=M$yJEGr24ak04^FgZhVZ*jna_?h5 z7b3C5P$AfBsOb%~9Ty1E7@BpA7RBmi!&v_uldmyA2=>BOsrLsHawRtRhXR2u5o4+* zw1xW`{}5ER6? zkkGK)=R5<5?F_(-^pf7{eCSwva^vy!jzBLCuaHsS_7biJxp%WY99kUVk9L%J)4N&1 zd11d>O_*|GkS?qb%#;0G+pdy>`L&y@P-zw93K6U8-a zmbbq#yC+$=Us>L6XW@K)_j&3o`SWQ+u*g$}KriXgG^c4E)Ev z<=IAqQ$-q)rpnGNSe;WixolIy{rrmZDDwp3AtI7BO;cv&`B1Nx%*Bhu%S3#=!*TVG zNtz0<+nt8ciH)a4bPsvGnF9{wx(8;KQk7tHEz4M{yDs$njo#Bm#qU^x&?yd z(1LYPBUPkY>|L!B=VHZ ztY5Jq} zN7V1?+JO7u!+T5HmCr3<`*s;EX%_aBxBKA}%lp1;PB~pTcM6?E!oJa+M*d2M{l={}t`_-bP~5#BN5zrVcGd!sf5verHT!jX$k>8U1!%e%ftJ zmY}#3ChTu>pPQUxDaabml~qolm#<-Cs;yD1154Px5>w^J>++b4(o+W%Q9m6q&)dY`4I?8ZgVn^fv0cEA?e&1d@=We#GrOUH$e@?Bf(B=cz z{Tg&R^mASy zat&wk2}+Cn4S7ga;%d#7sLQR%Q;{$^cX1U;p+-J1UROR=!^=76!=#Eev<{GPW%wiQ z+0k z>^n+cnAa=D*@TS9O*#Xh{F!-Ik>K2_7l&bfU7IM3=Q@|LbA~{w3?cKxifVA_>MTR& zJzO6+d$nPm?{#0tavzS!n1h*JnLFO&drV zi1q@Ju!f7PE+e9>MD2W_*C~CLmAhQF-OtDe60T;UtV}*!YpBb}t6l5guT`(xbRUj#?rFF?3g3L;e8BkO<%3pV@_q!Kb+@UqB~q=S zZ_#m{+?n|tKvn^dD@U%LvKk)21o)HgdBv!=ta;L}tF9K6lO@wO@z1-<>tKkt| z&aDN{h~rsqDczi~HD$}ny$y!%u2mCm%)k|WAXQve^y7QEeb@kzr^>ZarnU9i`^L*T zXgDvoM;ahJ4ZR2b`%zmc8_2(Dmmn(9c&BuFqf% zw>#|S4?-lICRTUv*WtT;m_i^DCfqxG_>N+LFbYgys@z+K`Zv}zf*TX=RR<9UF{HL$fK;aYxmoxAx?3(T(q z8hNVR{Hi5$v$FsZOcfHY2ZtIYs)g4BGV?YXGSkQU2ZB=Od%4+UfXGvYcN7wCUJ4o} zRnF1^Gvt5FB_8*ng;^RUbx;-&1PO|?>qOm*V#++z*@eS8xsQrqjWW! zT?0*t>j#_Vo|j%Uw%(TD5>>*@`teOPNR`oW-`0VDAKtxR&GGvAaDNlNIa|_mL}hX7 zGTOrk5RB4=7N6+w*aelHMt?=LXmEfoTUNG}U~SqI>YLtvsyOFJXeqe-Rckm4)-%38 zTAgz&T{lAx5H=f>k1lU~953qjnJJ$M8|~ZFdCxDxWt9*@Kg}6>4f)%kru|M`8YXfL z%7?ty-kenveMheFOV?<)u@>LL$M#m8b2kHMhWSGcf>MSSpK$vRpkWC2hNS=Ar7LwH zOg`M|&VL)UKv4XA`StA92Kc_JXs~<21l6<5?#$b{`FC0HK7-dYkT+i#m;bJ9Ayo;v z(~s}vR$G`_NR`oWary5IyH;6ExOoEJMS;E~PZi!D*v8ykJFLO|l7G(!%gW>&<8?D~ zY(3Q)E-wH3AY45GVKm%a-IR3xO(8BOA@ZEN8689rbzmKF?EyUVAT|nZEn_k}yH>^X1n86017(PN&cR)b$LB0f=yx=4`9s?=)8K zWG{>f@Vue7P*dxQIUdmm`u?nX|WOH>o+g<-yLY)qEWRir=S zFroVajwXUrBs)1cp5uBMcc~;*3=+ljD4jFloJ#LOUMI+vIAW~K?r~QYiAfV5Ze1&C zrZWJnffk=|d73b>TLbBc272K<2=iyXUzO7XXh8x?;gGI}QHq>~Eq7fz=}sCF=oRJqasNM3#8d~QX=6JC!kXu6kF+smWUIBufl|q8Unp@RiTCT+ERD0DR$lGX8 zyTT_`Sklej6D;<2gsVZU%_<_Q+`e0U-HwMF4Yx0fdeual6F9TLeGMcUKEL*9N1zw( z&mcj0fK;Ic34D6;KeKwaoqQ@GJK#Nbf z6*6mdy!`xl>E~Y=4QIipOJ*NV^jO0YXz>ZRre=+=4?j76etZ54kLZO@vv94>8je7V zPq>vgYwSDmu(Yg@JK_J>C`T$AI|EPsaV6S(@tu$wt^{M7rT+WR;!e9mR=|8||}`vmThxiAtp9C6j; zmq_CV)5D>~^d<5v&0n8K$y6eS83MiV*%T5(K0m*ndY{zxKHRtK=|3I;+_NdV&*eGy z#4R>xpx!67y^sGqn+dz)i7`=GaqpAb-UltFmymcdCMKvaNp0_gUgn;cXXdpDwD^SE zlVc6)OH$kWpcn2AnqEotd;%>#L4AxoyJdp!OCa5n- zZSR9#=AO@=hSwtSIj~RQPO9lkQrr8W7w*=YUP<&gRn+?kffk=|JGV?wUy|D12fc82 z7@zj~1X_H;?Lad@eMxG2AN0a|#ea&71X_H;-NCFuokMJY>j^`9D$JcDta^(s5rTdz z!DkZjEto0-G|+;C|K*P|U?~RJ-d!tlC&eqxU z+nhp)NlpkdvAWm6arf3bBK)(1ddANu+WyjmQx$Wnm{2b}JF}<%d;(MDTX+_x=LJov u7f%8vT+i-26uu?qzfG>IM;@zuFMBG`wix%;9w}4$^}@LPch}rG(*Fe~xB)Z( diff --git a/act/assets/vx300s_11_ar_tag.stl b/act/assets/vx300s_11_ar_tag.stl deleted file mode 100644 index 193014b60c1cc7547a828d8549304acb1729ac8d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3884 zcmb7{O==Wj5Qh5%uH0u35!?i$B4lBPQP52wqG1l<4a803(i`Yf#8uoF20?}yTM+aL zdIOIjh}K*6*7v>r6Qd22sqU(${<}N#==ku?Y;k;Vw!eF0_uB5>^_zRMyGMsd4{x8I zp8lTw_i@~0O{|MmSmS&g z1Aami--C!mXHLB_gQ?b$GS3s(zpByj$G*@?nBd4YP8qqX+L7_>AjNjS}z= zZ)1X4b&+OoHg*l?XY^`BSd7kMoI|LUW!vF1j-eX(1Qi*2*ZV} zXp~^DoE~ms-B-0@r>6)w^5vPY)RbOhwPLufgY;)QAw7vTL(iPqZ z-OCqWKj!=?BU}5_nYUY#N;A}ib%@NY9Og!o;ETxLEd;DPa!upL0JnAL$~k(bdhxhpgb5|>s*(*G?Idg1! z5|(?>SUmTuMAU)%4|9iKB~pf!%?9s}I*&<0tx|?p-;yn+h%?osj2Kn}-fDHPCJD7l zIbvAVTg)A2s!16!th3ey%$;{sAyhL(M6d8%kLNo)gfH>iiOd*N1X$8aSZ0zHo>y&J zL6b6KFvAH#HB&^?Ve>|Jx8}4}B4t={HlAUbAc*X)M^~$qVdL))Y>CHEP0EPD9b*h` zqQ^)p_)@EsBL=tKW2h!&#Ng($S4r4Yw7i{%F_MU0K^sTA?fdolbAo<%v5vII?A4df z!{h}vaU5en6e|)FF*K{}TU1^%?ENQWSyhb^I{)5(`XDqD>*h&TZr9h-brg-pqg;tL zyNiCLFSTMvD!~Sq?z%<^)^QG@R_wwAp&E9Y@#+(`^vY`SE%DzOn1Rv$3nJ!G>l@Ca zYLu`H{ny0gmXuzp75axh7NT^QR-I(!G6|Q5S-GzfVO>Cd=MZYe$yTCdr5euJt@>I0 E0ev|q5C8xG diff --git a/act/assets/vx300s_1_base.stl b/act/assets/vx300s_1_base.stl deleted file mode 100644 index 5a7efda2fe330aeadad63ee4ce29e996ebdf2195..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 99984 zcmb5X2b2^=_dZNiGE0)2b54?Xx^|k>WCR2R0f~}xTEYURmn5L%B#JCc5Csu^f!(Q{ z77$dDikLt|T2T=Y1(Zeo-&Z`B*TjBrzf2SVKlI8atO$7auD-O=z_Up|H;nef1%`92s z!mZ|1Fke~!rurQFdtcMBJVX}y4<(3*eP%@Q;+;g@&r3_Wwn+T=VU^6(^KYr|?YXwH z*`VV-`g}joo?~q;IaE~Xel3!%YrDg-?}`f}BKA$(M&pv5#7E~#OMWUupjPbfj2QUr zSW*1O`w>ROw6G2Qd_U}1kDs0^cI?Qh5cnjq&vD@_S*lR~-d6ql-w4!-ededaHc%34 zqpunJ%uj{=KuPR#Uo+M}ZX-*U`6452Zv*Q1{8$_AvtJz3uru^aCj)iP3&V_I?HSR446NB2>hS0A1)EpClnksQ;yA3*TD z`gL(*)xQ{mo=VLtueYcb`^-<}wZRCK#M*P;p!ZF&eh^@OObegNoh9z5a~pcq3P!}* z;B{VU%S0-U|1x#h~n4Nmk~F^#TxS{Ai{mBm;@2A&)jpbjrWU3 zUdQi3t=Qk=05RlEV;f2k5&OIq5cwxJOUWFu72XZE;f-!1r$I{WyMk{pHinG&m}W^M zTDdC*4x*>>W(m$K{LF}E<=J!#O z&W;+V=Bl}Sf99>IJ743%*GBvib?5O``W$PIR@Af4MSf~GL&UENRYge<;i35r{#j^4 zWFYONJa;ok+z-dv=BFw{XP`HCQ4(w8K0G1MT@P{O+TyC2BWjy5G`De*PG)`|Z*RLm z{qUZ*FKE>^ix#{Sb$1Cw@aUo>hQ>Cy=dm^hRLLB1KOFlzzjF`KEcu(ND2cU!J?FU# zh~&%>{06=LaU0%P?eDcD86~mLTj6~$;ys!ryv}pqZdExry57v-aq~Vi;tttw?!G%8 zdL9>!pHtNK_V(D*^5}Yq&kx=LL|+pTvCoWP8?_-|SN=DG ztQTJ^_L-l`YophUN2{SE)`qeG{85PY<7)jH)nZ!r*{0T7%F6IZLD`A&_qJ=RbI)UK zxX=E*wsB*B=cl3_G!ulf6KzDepBXXyQZ^CY@KIAWOqMhC;AdZ#J-8!$aQ9}?6T6e+xdCDCqb>)-x;y-xv{d`Pwz*L z-%hC-)5531&-~86rg?Rfe4J6^R#e3&iG6n0isw_s5InXqEq8`{T09fo&-_$g8;n3n z4Bgj^eded~`te{JF|EF4tbg2wUgye>inS3JzJBvU)bkBve@DdK!PTmu1QD^%jJQ{S zK?={5cMiXoFD8i#$Gk~w4-wpt7^1IgyY@!h#ln z*829B2bHcA2*ty$7;U!&hg~Lvkyna zvBJxSZeQ3ZH|=o2u|fo%U1V7vTvIK@v9>*kKrMP&y~jNG22t`LBK8I!=(#=t@N|Cg zjQZb**hv;UyB>Th9IKdJ()R(24BNo(!t;as7wE@)LeanZ0(TkQeQ-qn&n)pr5fQk@ z{BHzm;XDo^aQ;v~bTrD1Yq(LI2gi!FJg=J3u8o3`kOvXeLw|K50)6V>Qy~JiaQ)NV z?e}VxXEwg9);abNXLxXQJJ!EY=U$a_wfTL--Z}dpF>A7Fxh(bz)7vVa+OZODd|7hl z(Q-8yff7W-b_}*f`F&@GKrQ+vGuFoZ+26czbmnL3sZfH5*sjfOlxnu7=DGm`YU!u) zzYDcudp;wEt^TLZ;QVWLGXf>CT{|v}Z97)=LNaDRke#~ujrYCwL_EbIswPNQLBXZ=;zU%tP>1rm%&OWv7Y;PV15i}F) zdovLwG+%uLk1UUhJ61Hh`%dW5MFeUEdtP!=#hnZ0wNpDK2?2cOWfa_`K)<3N*U z-m3MVB#7{=CGPq2|1N(Wb_O32JIUNOVte8<;>~IUW4n*-NrDK^F5x!PQ-7~DepsB^ z;dp!y;rZmWOK7i{5!pB7+I3kjP`fBfXqWJ#6!=Y)|47)n`)-Me-cIeefe0RTFLvTs zVo|~B-f4i}g<5z9JJ#1D>sN;p$3M3b5wkM6A61h-tOlnVATo;-F(UHSKX0sV>Yki2 zs~BUJ>g4pliTv~K2X>CQ(9b)aeM!u+jC&yBMSJ2t*c0KMe{6gEAkIpfSDPQ(=k19o zp`Fu5=+Aohp*?Y*wN$z zo_y0@k~U=a{*)$%CYc3($PpefVzcRe&XQ#vJ)qYh^yAt9!b7vxuHQ$Sb#^|h z+wc&fo*Sy6Br}BDM)u~DWS?ofjXK%?2_QT)Ywi7bkU4dXt=sSr)vDgFj*`p}ZX1(3 z50%L;{$liv{18BRXx5rKtGl^++26Vi5Ak)We$`Qu8NzMjW|hZekJpNbv-=hX5FVPf za1}HmQ|EFR<{!?`c4QSJTz9so8VNR)_nE zYq@?8AUrf{WqtNi_`iwi+nO4B@tMveM#+Trgdv-^vw0cxcu-@N&EGGtPM3hKERKowo)`GDEm+ zw4IPQvTpNS(W3pe>WJ_@v(~aE&nFBSKSj6UA#SF;UL7TwA>20fi6iR2mKH#GXx&D7 ztc?c{z9ch*-w%WP@o}E5+y)}lXV#)8jK;=_`k@eiPyK{@?n^v0BHT7;bPawVwFZ12 zKzL}@qLGfq-noa+Z_t-yhH%@USz_?K(zC=zc;CxfG-IN%x$7ZDq*thclFSfp8#LPm zua7&gWeFfWG;7h!w_|J2L;Rj|E3YVDk{QBngI2BJb)K)}$^gPcvlgvjJGPg22wvwX z$qeDPLA#Ie_A5kqpIM7`B0IJxdWhZQ7w`_}OEN>aZO|?%c;7AkZodG+L$em`sCH~` z_YfI*r}IwjOEN>aZO~~T`24u^^^9tW@IJE^oeXyDjPelAKR$v_9AANY&YlV(~nN-{&ZZRnX8dGXzrDTwesvzDHfx(yGJAzw~LNoEMQ4ZWh|)PW)e5#DFk z(yPn#Z698>l5C$8lw^i*+t53MB-D^AZfe4gjhH%@^JGG>pIt3BlXV%iYwr;~i&}py(C7B`IHuQ-j>BI>jJTz#;Jem~fTlPy2W!fhZzeP%6sLbelQ z{ZI(9`nc!5#6u&(ZG%RaZBa><W`sDN_nIYUZbY#LioqdG& zy{ttiwI8dYS_3!w5ovm8Qy_|y8NzL&;j4q>%%is>n?lK`#(eB$fuYK}~jf7~jp}TylKvCJbPlXUl5HaXNd%J4gID1#( zYC@FT(nV(dx40a6xor>uS`!+yv(pb2vc=`~ggCj~l0&walcfq(384hPx6{h7ef?x{ ziYIc6YTrLIT`;mdn)O+3$foSOLMTDR zcN50jt4mIfR#s8Ye{;`}Z^l0x`L_Si00Ol}mz-etIxs2P>mtwgwDF1Zc*#MLLwTPL zp#%{NJ5IEZ${x{O&-2_}*nF0}U9xAS&vT&w0<}7BoMf-d(I$HTGk$}0y3UsGlx`K- zxN2YsC5Tw@?Iio@#KzGnPw{LoetwS3Uc7W<&Fabl1Zo|sH^uHTzG$@Xk34tJ?V2NB z`J-$kGPh*_fm;2anPOk6T0GjfDX))k(Ih!_(a&j4QvDE05RtdVRJ%{bYw7N6-~DQm z%$Is9Ep+U?ctoI9+_9mz1X_a#C2_d4*t*Q3M|Gk`Edi6^tw(VHEUYaZWckGw;^RcW3N)YkNkZE?8KW?Y* zesVeWeEF)mGBNL)X{{D~6X-c=ja)s&{{6dV(Vt#_i4fOH&z679$w+&7L8(9+sD*FV zu?k;Hlq>Jlj^r+x&p-(xIweiCzk56}x@ybI)W(}lX3G8_wTgUKxLN>#S~x=;tHHXd za^2GDk(-~@Gf;vETsMxjpzTySu=0XPk@Hmy)JpzqjNRni=IEuIE6pHc>5Jp#ju$sY zvgWr8lpq4vKdq>S)-_OTK#mc1*M*Vjk7ZYyK}4BfM@aL+`;q!@b~aFg z2;9f04q(|Z`O?{QksX)n7^s!FYoOhz{Ke?y94pNrqF#}Ka#59^BgXTVff7XE?n-y! z68p)H1OJJXoL$jCt!TO4_7`_EqVH^dIe6|`i)Bwa`r%x%`|wrbF4LITFdUEE6d4; z_J&XkPqiRoXRmp3`k|$1pSOB1{%FE9yG6qK^d$+a%$v2Q*#pb$NzdQsWiz-llze@j z{NZ@BwEio8jzFK{k>&cbE%RIg7=l0_3Yr9Sug z#Q*}ea3^xCg1P3&7ur0L_Vc}^A?)p>&eQBQ1*7S^#;-83J?eSMc{2V~hxl%LcZN`c z2yD->)^DFD?YfVJX7p<0y^jQV9|`YkZq=NVWs;q$D#CpKL;PyXjvlThNrH9{!S zPpA-Yo}FgTePTdDqu06T(>|LgJ2d_>e#?cA;}L;cMJrCXE4;8y^b;=<;^VtX@}pH> zwkp=-y%0(e@$%T|_UUGiCmipvgb?d#tiCJ#R;%>M`vM5mD)`oP`@Lf3zO+Xd6Jp-n zc`|uwme8zPPkE!Oo?VYs0*@}X=UDS^CCTP*Y>6+}=s*Z1h`{z7tF4_R3tg!b|BQJ& zgj)I?B=F1#A|~BVlKTpbYx&x9{|)p5pM7=B>Grc{bIVXk9;-LA&Xebt?>JoexBVfM zAVRm1?v9m_cb+_(Ywh9q)q4X7)XJN4y4|~4q4d8~xsB)lNRs`E#iiD}`(6koh?uc| zn%yWmFMU%jK5LUA^WyRBQ1)Z+)nqXUQ_%o-**(-jA;>H*pLd zEAOZCcBtW#-jugxY8VJ4z1c%6GZ%^C-Vnp zA|h}#lOJWxlV3+xh5Fjdy`4INtvkKfCa`U&-=NxKsEwbKWNNF;q1VcG3!wxNhw@Cb ztL{3MFeWGO?H_$OPkz;@N~m=2>8%ifTDTKA)=b(Np2_+|sDk6~qH3KZ0@o(h4n8_h zX3uCBpFDAk_s-RBfC#-)C%Bg76HAh0#g4n;$2;c&2-LzgPgSgR;><2zFVwTXf8wa| z(Pv2lpU&8xW4$qOuI#sGN~mk4oCZn|f$cfg=yQ; z29Y0C{J=ot$k=fK1Zp+uG|~QGe~<7UzYvnOMJsv8u2cLJ1;{kstNQgrec) zKd>J)t>j!eI;Dv5(B)bolptc&;VJgy@8iO4eqKe0FN=|n^VZeSPnVkp5UBNQ^QpE~ z^IC%IN0m%Zl4o9hKeQ|Eqj;1c0%yKs?I1s@OVxv+W%Vxv5U8as9JSKOW+*yY7ERb{ ztTNYx@U7wd9~~ZU_niN3c)}g_wg+?{EuX)C!Z@{Pdk7_nm~>^JJ$>Mr@G#|VS9@}l z99I8~k@!tw0D)Q!)(^1D-S{HB^doK~cb>s=aF-iKo0{n%lpx~J@SgUmO&Q@r!&Ut1 z>>&B(oI6Hp_oM&&=&AR=GwZuZ`k9Og(h z6OV>^$@jAq6tmBrh(`o!-Hvv&TX)H8UR3`1w=Ex+%NpbtqZYOZw1HY<_jR-{82QYH zTk_mBR(6rYa+eU#HR>2b2_h~vY-d0DULo^@^61}g*I6$3t)!TFs#*YnTC-=jwcqVu z#BB0D&%}qCSaMSN@?!6j<{^|I;`s8gJ@S*{lp*E!@zP&yPd9m@dz`59UhMz^wQ#;U z){1KVPb~0LKbiG~yGGytiU0z&aHSDiNy#4cWPkcP@^Tmv(M63byVOK%f@xV|1!LJykX+x4>w#x1xa(L=>P1 z%bLd$!?j=K_pyKGH2K@zVMdmR8V3-lg{O~WjoOkZ$L_0TBz;xPKnWraQKabKE6u_~ zRHP{Pxvc(5GL&aa(>3ONj7QquIh$wqMLCz{& zL$F8RkpMSU61~H z1xv>DDlgchM+9o&9^+Uwn#apOPt_6Z(W3+rxUV?YBMBk-@H4dpd-Nzl#Pt6p*vS`4 znXX6w&Y)&;^t1+oJ$gi-7Vbojb@)a|zHqpfV2>VqYd&Y#J%*Ms+2_Re9IIpXM)K08 zrh+|slpq4zqgDH#+OkIuL$F6r@4}{cqTUC+3!BUN2BkD9LMwJ)WPJo(>v z!5%$I5b@vKRqPHMnwhRgKeuE9dG75Nf<1afpjMyVwd_AeHZ)z2e(WQS<>zyo2=?ev zf(YZ&D)yP(%}m!z9XPPDOzF}@ut$#w)LJsWo=yKu*Q0+jqn^xoyrp1|9-o~?)~4}N zql@i1R%O{hzIL*iV2>Uph`{#9yKB`*cFf;Yuy==AG%IX+4{Bxv5zEUxDhEw|RIo>n z{lI7cb9H^Y-{Lx^>(S2_O=Rkj#)3V1lpun}$)qwo7|6KU;jB-o=z2_ov`Xhsl4Ez)6YS9=0<~yP*)$*Yyt*@} zmF(H7o?wq2C5XTsgCg76!m?A*8iGA~lpq4v4XyJTE#<&-^#ptLC_x0S70Sv?Y%Ze> z8wmF3QGy5@1zLmon@W1`>YXD3wSsHVYSc{L*w#p}M~^+XZ`HNwpUECQj-g{6pWRF@ z>DoxJM~@Oj;F~3{sMDje{>CPPJ$jTN0_PiXAsufdw`Ff8*rP`YB5*ZRY-vDsS!rij zut(3CN!ojrb;o&4+EtZph_E&ut$#)L~wkZc57`}R%zKl%JgP} zJ$gi-7Or{6I&rp+OluM^*rUhZa;%+B2W9VKdye(n&>Hd(^@Bb7fCLcO9@$qnu12~{ z86w!Dw`v}Vo=h2RvPVCl;Pq(b{C!RK=r6pI5tYMw>e`P#mt>c}hx!Zl=uv`*-yh9t zXV2BcbUpfZS#!(Dkv@VwdPJbs3%hdI(MX&p}0K#P8Y!*JxUNU=Dk98g+}d6*P}mw zp@jUsP#3`-JxUNU;c#I)bi1wTdi1SJmY367S%N)!M4(o}+2Z!z6=Cg1)&HWB>^iQk zV2>UphzQID>%te6<%X4Q1bg&|KrLKpj%986K2mJ)Fu@)@zBPRRrE9+*J@eWS)Ai`X zOHw06N{$ok(W3+rpI=Ie#5oep$UK+N1yM$pVp`o#!f#9ucVZ-=q1XXFs2! zJ^G0wucqxTmZUs-lptbL*=y;gT1?e5aaZ0*THNU*<%VM8ywo6_OF$sJbFZMex5uEwZmcCj@7ky^R(k<<|>aKC5YhsJb5#E z-~BDTH*I?IT;;z+c8wh^Z5(Wg#$JJRU&(SkjC zM4%R~e;TV7&qdzsG)%BZj}k;2EqgIKWA{MQ_2~cYc{x%pevn{~9ucU8`Uph?sUb zuf11xHeHYY{N6(HWQT5oJ$gi-7M>*(%eYonepR8JV2>Uph`^J?5bV(h zPI|Ba5KX*;e^!<6JQo)1(FY`e2)0q_NEJEe15>a^kG2S&I6(yG=P6IGJbKR0Q=Ub6 zker{V+=}ubgF6G~=P6IGJbIKMg7fo~lhz(R=jSO;uRMB0pcd{iWW7>;p7Qj{qelrM za9?pO&d*bxUU~E=K?LXLDKn!zdd|;No?dzMh(Imei5!db^OUDo9zFJ!^YfHdQa&fP z=UAMd-(7N^^5{{52yD->I6qH$dgam6`$&NIk>I@#&d*aePJ3RQpQk*%^5{`QPv|2! zKTla~?a_07p7Qj{qeld4aekh1gxaI${5<99l}C>fL~wqd@}AmD<@`M5>6J&12-Mo2 z-`q!;YwgizUdl%J1R-B)wJiYSh5rJBqpQpUA z_UJi3PkDOf(W3+roS&!6wf5*aKTmmj<6J&15=7v-aV*ZyQ=VRV^e90Dt`*`)q5M4M>6J&15=7uAI2PyU;hp;{6}5tEkn{7D zr&k_5_WXF)TYD)Zt~`1iL&xI$Jmu+?M~@Oj;G1Vi%+QJmu+? zM~@Oj;A(a(&d*bxUU~Gi?Y+T%F`>49(zkCLxN}TVtbCo`FYCID~}!}h`{z7>(q>8M%$dz1XuPCuDUV&)vQs< zb8k|AQMk^{u_jmczm{)uxIOt%Ze{2_p7BQ#SlayD6qy*?;$q;>Nq* z&Jm-ivL6wsbzo@GaLf8rOt-TC!n$8W&&MSRuIxt%BF=WamN2Hd=STgw%9o-13zEd= zRN0RbMBrSYy`=5IP>B2}uIxtyYT-(AtR7u98nd6BA}&&8KfX15|6SkS8NR;nY16Ij zuR3O*vEb+NqBm9cqXZEv`=o|zJvLOweV! zv!5A>uRSfevL7Xg*s=JV@XYJ|bsH`I^SyEBn_+@0`w@X!?-joq&c62vy1m7D=YIQc z8za{a5?tAj5=7+Pm=V4@yr=0__P=Un5pl%^2(Ii$1Zs&l?}y_)?rFM}{fBSl5)01s z5nS1i5=8t;H2`+4Zl+t=KV@@XQ9Gfx;L3hPpw@u~xy+VHT}-#Kf7O;a@#e7}f-Cz` zf{0yIHE@>VS8iqhh=Ik$!p2<%SN0MW@S zAS=bMxUwH5h@kitXZ&>3AfrqbQQK-QxUwG+{w&2o-;bl9VWQ4 zAGNZ*6bUb<*aBDf2NC}kO*IZK8Y#H4A0>#uUCFV!=YGSu(`20B%6`=9ac*-sd&U@( zEBk|pe)Cotxz0=wT-lEjMBwg9mHnj_7+Xe96{V=MAGLaQNDNP$JJICI{vcvqt=`74 zpQnk(sIng=h`{rT?k?4>V|4gAQT#}i{is#%_h#X4Yo?f7*&jsgS)9ixHg}fb%6^m} z0#6^h=QV3XXx+zi1XuQ>HGx(Jq8;m(F&jhjv$>)YRraF<5y3WQw|_6RHhYrb%6`ngK(zoWYJd_%;7;UNTmwM004i#L5=3wffS2ucEUp2dS^yO_Km=;xPDD`y zssW%{02MXB-cqK?^fJ}h9@RQi4FJ^wsHg!-5P|KHl|eNCR12V@2K+Ago#?s@liTFF z46Xr)*8os0fQlO6v-8OEcyW~lw&z$}139=3!tI~C_x0*08n;ZM-8|JfNB9$)Bq8v#dC`1 zgIiz1H2_o#prQsSK?Lp?j>Rd^bS$m`pjrSGH9!d>@Xb0F z*8os0fQlNR1Q9sj=;s!w27qb-RMY?^h``nCSX=`@wE!w=z?L2FyQ~sQ4 z0H_u~MGa7b2(~JDw{|N`xdwo00aVlg5vYZ0p6)?X4FJ^wsHg${Y1vG zoMMg32jEK(LH7zx;sel8gDL&{i)*{Hi+L^|fR8{ey7h0qemO(m9+-BmkI1;3OEjmb z0ZI@-xBgAy1JF@}H;(iWTXx2Yh|34yOAtY~{!QWo&`|^80}#XqARcr10DK7|=*FQ* zd;mIXKzsm#_yEKhmk%I@04=)pZxSDXjvAcWZi&I$%ZZ9EAAm2x@1?`pB zh$BZTh~q9FfGAczk@RCDh9Qg7^T8Z(KeA|2@;~e*gW`4Of%+ z0K#t6;JIB-i{!DN8GT$n0AGR#y0>p;>71(HAn^f+bA>)I%D8+0J_5C>&mUnfFTOY2 z)8hjWdAsg2{!38DZPaa4Z#P+VJ+{?&iJ}IGKrQS37_(qxdDx8_{C0P$ z*p#@;*g#POlpuoc?VGDgP7b?KgMV_*5O2pnYy3)414N(}-JCZ29+(tES!q~*d;i(=UfFM2qV+lnK@Viip?(Lhs#up9yd;nt0A7u^c@&Wj5pcdVhH?LGJ z9`^YF#N0(chkkJR0DK7|=-$5Bx1x8Ch4=u(BdMoCbzMFHAAwpNOJ-le=K~N&%N`6Z zbol^$2_iTW&2ciH4?t9SY;UNV%Lm{iIPS{+2=Ai7e(Zc{u9(oVU+7nt55RvaL~t~i zJtlW=Cq4kNAn%(Y!{r0;5vWD?_RT-PYZmtT0K~mH8KIRfAApZQEqqUk4?wKFQ`^Yp z@&Wko0}*s1-*g^N4EuZlV(>?;jLR+`fR8|};M}dhZmM{F>2%{amk+?7iHN{eKsQ<2 zP8CBdFEAc)`2hUZ>E6EClss;Z8Uzu<2OxI7xXH*uQ3I490#`HT4H}LU#0Ow}>GA>i z-}_{a5oWiAkuXOMf{3!ejt~hK-ZwaEfD%LmcR1n$5X;Y=GdOC1T6AyUBtC$!<^%Be zF~=f40I{&j&xYm$@Fj@AGg0vY2;u`UO1OLg{`bb0}#XqAQrlO0RFihK)?y7_y9!lQdLFR zwEkmdt`oq_8NsNNu?`2c(gXTm)M*BMZ5J;CP#5L9mv(tH3u0=0ts71tS1 zy+KIx0rGJ^ysy8^S`2c(gA~<)> z`CFe4Kv2EGVZJ?p--TLSXK=51n}$9gfS`JVG|dO#_g0UUy2Fj_DLw$Qb+@EyJ^;Um zh`{#L-Ecwm2HxFperxJ`^*c~Z5kUmk8Bo2!AkKGgEZo}zTxUS_2CXz7 zfGxXysG*a!Z5+y+dSHgHZhT;PdRBsT{d;q=#5x7ezJ^(@W1|iJ{ z;IAk|;96090D|fb;x!+DzwQu$qu{=CsyFaBAbbRB1=k?g8Bo1JyygS&d(L$RXK(+} zK=T3kW2pE51l1eFYd!#9f{5TO;W`7VH#n^M0Q`532wXRc4?s}8L8|5h@Mj_-f;$7( z8Bo1Jn&t!GeOIwP>AhBQMd>%F_87$nAgJCTP4fZxyAL9`&VX|3={_HTpn8Ke%?IEk zPz!e=#Rnj$-XKl$0r=}25x9aCAAq2GgH+82;ICRl=$$&<=K~N_Z;-0_0DJ^$;hI-` z0D|fb(lj3cpEzoK^eLnGudqGE2Oy~4AWicD_+x?yY)|n42;u{XJmm5Ln7PNi6V-eG zCh-A8`S!plsxu%yfT+(0AS&mX8OdI~O9&-&@6t!m{YbOM#$i#P4?vVYFfekIZVw;= zwdmHrd3=A5sC#?h;Qd)5zD%2l<^%90h@ktC=0ioBMSVU1aV|bSqWJ)P1ZvT(f3xVc z#!;USKoB25~AcF2enp3`yi~4*3;{CU- zrv2#h0r&{iqFeta@d0RFHR1yh#0QY3`2c(gB5>v_J^(>{0BM>Jz(=5#_M_BFQ+xoT zaKhHeW|t4Ze`|ER-+%vfQ`hV<|J|s2d*I3Lqs7AeCn6_YJ^){W2)eg#&KP(m>hl4J z;q}i%o^$yCd<1IIjbpRyjW41;AAle}fJif!55Siog6{2`#0L;{Zx8f4J4n1S=T79P z%Lm{iP>XI(o5TkYb#D)h_^FQ={bepW$K?a?C5XshyPNq|N)FrQ12`7yCG0E(#_j89eq-dbeLes|d;oGD@d2O&5p-|gBt8J! z z6dPPwPIdVJd<1G~8{YQ$00i*?$bVfv0FDAH2_kg-%62{a%1cZ^d;l`pb44pcbA!iVr{#A3&tN%Lm}^aEPFL`{vrm5~DsJfcWF?u*knT6(4|) zKrK9d6d!0@d2c1J^=rD5fN;I_y7d)0iRS2Oy{x zKx#e!UxEmoSG9dU0710?l0AC-F4WSQaogtu5L62wH6MV>NI_K(I@_y87u!>O0D@`( zq~-(gdx!{Z&(8@<%?H3aVfDQ_53FWJ5WzJ7R0|+AAAsL;e0Ht@pj@u)^8pB|1(2E# zz?UFG*Ba=t;u-*|1(2E#z(=4K*8ot4+4lJW1l0ma_UKW92(AI3?6~do0SKxEkeUy` z|1Q+hS!6x0xCVe~0i@;w;K~yvL4?lysu`;I00h+nNX-Y}?>Yd|vp;mAW5+8t|S^%l}0Q{cojJvJ*0Q@mj zd;o%K0i@;w@Fj={&JwNxpjrT_`2hTPjtE>giVr|gEr8T~0RBuwgsxi9Z;)#Ms1`tK zJ^;n*!q%OxtWp-BuGi3SkZS;_7C>q~0Dt#E1lIsi-rM&100h+nNX-Y}BTx%>BE<(F zs1`tKJ^+86BLY{j;sX#=3m`QgfWK-Hp)0nuWyv)FR0|+AAApZQEnM@84?s{YfYf{d zTr1()&TbW}I!mxU#Rnj$7C>q~fY_J-0^1{gh8tH6;scQE(YLR8Bz!7mu&wz3!o&w) zYd(N5@d4PbM^AhJg7^R=d-Qa>-gQN#xzhz~$&J^){W2%Py2^8pCr1CW{zz(=5#u5{GaQimj z?f1i{UmIe(9=)+7)gV3q$sRpQ5OMBON|^WnY}cdz==8e=@c~Ho=n;WhH~!udCO!b$ z=L0Z^4?watV>BzyFTK&_QMW`>Cmz;-?Qo?8YP#0Q`} zdXyl-iuMQ-AAt7ge>&INAU*))(IW!2ZqI5QCO!b|(Z6qZGKddAdGv@tt@E!o2ooQG z_UMTZz#u*V<sp2 z9z7y-y|CKhux(-s?cF>?d;rR$M+qWyMYG;_{|xU95g&l^=n;WhCBAPKCO!b|(HBVk zB1C)u%A-dFYT>9;X0&`QjM^OrEJcdPJZW&K|nM z`^$WzMEKgH}}cTw(HSxh@gZKah@c~H92cUirfMaiX;sg<#pJ(2F z_UQF}PG#Nco0`gl4DJk^pJ(2F_UKW92+q$_c3gY(oS$dje)i}Qfm*o7C_aD?<>{42 zj}k=SzM}X5Ld@IG9z9ABp>Kd{kDl}M%-hc%Jt9yGcOu0H5F$PR<gP6^qilk9Gdq&n4dwt4}C{j+wGj6XWoAH=uyHMeGkF; zdCG5VkDl}M%-hc%Jt9zx^YfG=)E+(O=b5*kJ$jTNLf@~}UMlD3nYW)kdPJaB(Uxr* zQs!EF^qik(-hTGz@!2_#PB}X@y4arL14w1we)i~5f(UF+@d5DNaOK^h7QF*zicm8n zh~WG@^Y*hxkNv=B=lneLuDc#R=jWNXpFMh%AcERp?gTwnoS$dje)i}Qfm)oOXP$W1 zqv!lQ^Y*hxj}k<1ex5Sd+N0TR@d3m$Z$EqVC_x0iS;Yr%n0fozqelrMaK0%%fK=w~ zXOA8wh``mX_yE!?B+IN}lNm+NaqcT^NvJG*MQG5Vt%-hc%JxUP4`FYA#Ymc7u z^UT}N9z7yZ3s0MeMZpFMi)Eu9R^f2HhQY)|n4q%m(ld-Nzl1h(f`oS$dje)i}&KhG@e?9p?6 zp4pDsqv!lQ<<_-F&-r=g?Prf3C5YhsJY_VrN6-0r=Iv*X9ucU;`FUpNcRhN}&r_aW zdGshj1n1`|qoF-|&d)P%KYR3uKrPPCGdsWQ(Q|&DdHdO;M+qW0KhNy^u1C-LdFJhB zj~*q6;QT!0C$vY;`FZB;XOA8csKxntX6JV^qnw{--hTGzQGy7Z3$*)Cex7;z*`r4U zYT-(w8ez)MGjBh8^!V2B{d0bv8MR%Hp7ZmRr&k_5N)W;MdFI!5J$la1GjBh8^oT$$ z&d)P5xa-k#ex7;z*`r4ZA~-+K{Q9m(&-r=E(<_f25vaxad1g3wJ$la1Q=VRV^e90D z=jSN{uRVIs&r_aWdGv@tEzZv~zrLHJ=luM)#M{p~dPJZW=jSOit37(o&ogg7=jc&_ z2+q$_K3#kCoS&yWz4GW0fm)oOXU2IqN6-0r%F`>49wmt2{5?BM4%R~8^_}O zJoEOmM~@OjaDJZh>Dr^`{5(O(5o_YJ( zqeld4;XX!r9Lmo#Z$EqVC_x10=b2yM_2@Z2&%FKY(IW!2@bn?elJfJ++s__7N)W;M zdCG`ukDl}M%-hc%Jt9yG&l0+8N%?u^?Prf3C5XV2!Lc|$&%FKY(c?*v7676hi}Ul$ z+s__7N)QojgY)yu+s__7YT=0!L~wqd^7P81=lne6gl)>FDGxHZGjM*MdHdO;M+s-F zJp||HDLbw`dd|-?Z$EqVh(N8t9>aI_CG+-kjvgh5z@13l)tAiM&mKKW5J6cPW@gtO zJ?H0{x1T+FM4%S#MCz`-WZr(x(PM8pPv~WPu|0KHUovk$=jc&_2y9Q?)tAiM&mKL$ z3#ggk-%wERgR}5n#*}_*i}Lf#+s__7O4Pg12+q$_ep`F=oS$dje)i}Qfm)oOryQa7 z=s7>ny#4IaqXZF@^JE@+?WJ;lo_YJ(qeld4QP!F{`?W{U`FZB;XOAAAok!No#A18u zuD)d6e)i~5f(UHS=WqA&wf^_=tl;;c@9IN8I6u$4{p`_WKk(T(KTjE0?a_07o_YJ( zqelrMc%1mj{JZ*+dHdO;M+9ndexBLjU5}pg^UT}N9z9AB!TEX0Tx*Y>^YhHx&mKJ@ zP>bgj&jmpMR&Wif{5{lK=j-k4%FPXQWJ$jTN0^h8< zt1p?ipFMh%AOh!`x~nglx1T+Flpq3Ev%0GPzPB=NvsEPz%?*x~nglx1T+F>@C~6d^+g6`p|RE&ogg7d-V7uh`{z7 zYvj1OBIQo6_~eFhMxTY#%y;r%Pe}WInW<~@)m_w_i>8@1`Z)=Wuk-JvUR>Qp?Ag;a za&>7Tff7UnZ;XVm%@x1Zf3DRNWwRNGK&{tzO*7xE_Cvy|(cH$-LP=uWyr)||fAd}l zC5U+D#5D6i;$lMM-}pB`w|_TRq~>lLKk@r)1|qOe*f!NC)=d(>ugqC( ztc^d{7fAtxk|092r*EIBrDBT#|}J%;-CgsmJ&B5~xd z!^fKZ6F{KW!!)nf_H)wfs(EF`cNK%1HZfX9;si<%5xg-noaSzoo4x3UTbzLi)Y9)i zo#!;=AZeETn73_eXPPA_(K9BUXCHnO<%Dm}6-#sf*`nH>tbsNVfg?h7GV2D4s;!%Z z7JQvyV14UT@l(wWU28^5{jkDYUHU%G#dqh3qlYJ?-K$=Le>uyqltsj+Nz=?JGipZv zd1op0BT_F(RLWB;E!nyfLIi4I{~YVp`AH(%u+nLBOPvm(1QFOLLYzzz;XewdJskS4 zw+7YtAR^dvqu6YbW!~E#QTA?y$eIDe0|?X_Jb1Es?$c4xzSsG;e)s)C3~7IMj=b5et$`9m40>at zx$@l?qMcVT;($Fv{8+bBbqGJM7<|cB3rlS z4j@pg=IiUQP z0=2Np-?8>qnj~79vm^J@MjQCnu!bJTjeiw!lK6bj^AYnxcLODez)^6lxoby@`XB9# ze7AYHfm--fLBynOqs59}cSoxA7-*mb5jYBz1*p_dkv#ESX_W#jn`0|?Ywv#Xn#k(SFgp5isQ ze0~QpxNu?l(1i&GN)Um!M;zt9<-uNkdGl@@hmdao-XM4;B2J4|!a$`ZD_x0nC4rC3m+t}Ipg zbOn%EOdLbXv5f8}YGxFW4f8e)j1?mA%~EtQsgD@>*h8{a-fRYH zJ=ypvbLh1z(JQ5vm_fvr&i#pn_OHkXm+A-JIU;avI@bMHhKYG!d>uLSUTy=maF+-o zj$WE2W);d2`K49qzz&C}0q(AJM!h#%~k);ycG#((X#wyw|s+?OLw~s$Vv8>}n|4dPNB$g4NMi+CM6? zebPv>^@<48!WrXOwW-$mc=pDUtyh#FA~>(?x0{G6eH%))UJ-$P!nP?hdZ@9Wf08XS z{9f|lnVnF1@xe9_ff7VydAz-Ob!{2jwJZ^V5=2l0fw>KJyySl&P>a6bq;Jcl4U`~) z`ok=OIx>N6AOf{$#hTPdy=oDG5=4-<&77G!4tMK~M&ib@M-9)nW ziW1u0Oqw-nhr@58I{oCv;$~uF$<`}M5P|)p`#7VjiUmW9$%5Zx7+A|S>uTw zi`Nz3f7(*ML)I%w5P^N7`{$?Yidzd?N`tIdlprG5b7yZ^@#(O(vUlC820lBE!ow$W znYCZ*W;a#VYmEYh#k4gYr1*Bcff7Vu#U9n%r4$kyR&)}4% z7}XXGll#beMFeWyEFBGBxjfQ#t=Bfy&lz7d7%sbz^@lG2G zg%z!im9zeRj=jC{=L_Kh$rEkY zdhIr4wvqVABzcdlSCk+ED~uhh+TA9`s{1o!SF&Cafm+EMtnk`}Q?&JZ;e#^9#-r0@ z6|!DYf(WdXcC2=foC}>^JzKs+)+-`VYyaCd!z+`fY3p^y)P*X_4zhPKvAk|)S|MFeVbq>Vgry|;JhSvs_Aev+(A)+$(Wi3ThW9g9@ zWQ#Tpj1?mAJrPIz`r$^j-wfG~tXI@ZZ@4i$e&z(5t=Ax;T(%dCqZOyfPsw^k2_kU5 z(QLnvV*EX1g1ku9D{A4I4Zx)f`MI_-|NF%Q=Lc#bnxat@ccu8NvR?6><1FF0E5$9f^~!NqioYuB6(xuW zRswR|mEy0;dPM|k;f!%Cj=NI)RavhnK}2v~aom;SugZEw1ojErrY!R9616C%sH|7~ zUaV*gw(-k>-8DS~N)W+uSBi~l%kuB#bE2j$0=1}r>9JLt{dNH#ff7W}82Z(}j>U0TioYuB6%nY#aaW3T zYU`Ebt`vWjY`vlc5j4l3K9S?D6n|CLD_#b1^6iV~VJ>D~^9-{e>v zccu8NvR+Yw2<#tOuM~Hs_^agDD^@3P+?67d%6i2oq)Y?FT`B%5IrfSYL~z`d{7!AX za@>{TuaaZ0h(ImupJQ>{mEy0GW3MPd1op|XIPOaESIMzglprG5bB?=G{8e)76`vhP zf#a^^OWSVjmE*1yf0Z12MF}FXI)zw)Deg+~SIMzgM4%SOUCGi@@;;)ipuP8wTR?|^#g5s_ee^s$pM4%SOT`5AStyhk_Qv6lL zUQvPwtO}&q62)C9{;FcHh(IllyHd#mjzw`e?n?1j6?;Vl$6YB- zs;pNW5y#@VE5%<`>=h-5!1qMiaEiN9{8hzXQH$fQ6em^IYY@S4SBk$%j=iEJfC#Kw zj=NI)RdVbVwQ$Yj40SAyyHfmBa_kj%I6Mt-uXHSqyHfmBa_ki)h`=4(u{iEZ@mI;Q zS3EyZ3(=0naaW4JD(e;BInEM}yHeazTdy2kdZccu8NvR)B^S~z1U z;ze;+ioYuB6(xuW&MS_)Qv6j}uZX}tVcRP9Dk-L@tXKSAj=Oqs%3vF;?IBQt2#&i3 z;-WqRC5Yg-YasTTDFU_l`}y1a_^yvY2_m>Z+@@OdJUpX^hBXE4dnyyR zu|q5QX8k&{`GxO8h(ImupJSE0*iv3CP*=)br#P?Xj|n2MPma}fWlK5bSY5f}L%M4T z( znAvUOa*r~XJ6Xu{`^!3S+=^UF=n_Dn)|fvA+rP*!qGf7w8~2k2%K`&GjVx$0&Oiww z2ILrFcU>5XGABLV^BOZuK9qDaa_OU<0R(CdeSM_8G5&D$l*;P=F>thOmPj^Mh0z8| z5J61&c9V0Pqs*=ESh>fJl{G)#7AZFU@c;s~PJA`aKKse0=r@A<@!hwR z87M(Sv5AxHC1;YN!}c?x`>Cn&`44AC&K|B7K%iFItO@p_DjTEMC~8EtQI%%MDPIkb z)U2D!KnWt=A*O}B(Xr9Fl!0dkXgN-!+T6zteE@tJ!kG z_=RbU2i6K8P>W-U>|MI+{7mu5a`2?NkwX{y7&xNz8w&P!k0(a?w;8Zcj&-Ng4Eb%g zx{*19EdwQpz$c_%?XjoHey7SrPRC88r;2?qK2;F$1$n_WD%47A9a2?D{`0cW&weiN zoH!y>DNA1P>Ss!)y_x592qlQ%SOELHZY5xpyx^t9RL`FxwF^_UcG`M&4-j>}9U3x%u!2`N5;7 zBP&nW4IoepSF>aNv2&PoCVdk*nvx^1Y7w!h##45IVotQ1st|IXe^TyV`$uH|`UU|6 zYT>TrSnY@Qk#7xpNLKkZdtmoL#EE>p?S2!o*`rcJXQcJQ_B4dtqWmyOXI3JH`TLR*=pyP|%e(|5{mBXtYyqGI5y769eXF>3d7@AxD^;r+`N}>1w0=4+`r&VRT7C_y5 z8IcLE_7+8cn_!>>5tMI>5}%Ce-ae~Gr+>Nm{lyuw01$y%eEQSwV7eASm`?w%lLm_u zZO0iXK}7Q6NVGlumaWT+b8XJa$oSF2M7zs90|?aO)1P)Z)3pFTnX@xeaKLEM=h4vy zN)SO=xo8*qg&mjQCmWsqZ;u%(R?m4nfIuxi{pqYXT?^n2o&ICLog^lAc*;NtBKY*D z)5vr!06zW63J`1oAOf}c^rwB;bS(fr{mBXtYyqGI5q$d7S#P=)0H6M31qikPP=W|P z{pn;gT?>Fuf3gAuTL6eaEk6C}{4!k&AaCAtX?Iu87X2xsh!R8;{=Qk1SfEVz&S=Km zg=xjVnk`0CXa;$Dy>P8;w zIzwD-X&ER%1U{i-HI9^t^s=Xk$|WZnsD)1zMDXcPR)Am&0H0S!xz+xNBSJhAbo!GO zAlL#x2_kgFTkWWn1)$TPtN_6l03uLJ$HMh4%BMeB0fH?6oL4wwa1}TfpZ;V82(|!F zf(SnS>9jUo3xH35vH}EK0Ej>>T+NQfr$1Q%f-L})Ac9YSI;~CD0^rl1tN_6l03uKe z_ZT{z>GUTnK(GaX5=8LnPp7r%S^#|dlNBJ?0zd?6;qFRSIGz4v1qikPP=W~D=jpBt zo&ID62(|zafm*odJ67m+uT;L7l1isD&J3Jm36#O&8y$_51@LzNrbe$rUBrn&g#=0v zp|e@aesHV`4W2ukKsQrHP+cD)Pz&dn%8VZ7n<-q^hY~~tXJV5)ZBzMX%1E*q5P^Nd zwyE}f-ijTR9~4~Ihu^ESeJ00Yf^D?Tm6P*>3V{+t=&Ysb#(nZ$dyKP|3V{+t?9XrR z;~OL{S1~0<=VyjMEuCZ4Z7i)hmb0&38=RL_5=2mY%ps)uL1rL z=yl#8+42!6K?IFq0^cBUV@uUX?rOz1Q`oXZ1Zq`qGw#~5Y<8?^(2ovmi;FB+C*dh`>HMmUT8RH11TA zc$X|olprG5^Sy86GoIO+DDF{g37;KDA!H2;|G8nZwk&_TRmSM}?KDxBVoNANM6V8s z;fZr6nr>{VUTQbvofgx@eTpq10=0gfu_QdX;zVs(&O0#6xboFRF_21Nn9t(5)r61%i10;KXRR?fXVoQiXt+{i)3-{VONL!W}-~4S%s@O+xYzZZZ*w!y2{LGO)+OphGKC9SJ z>iy|rbzmufkOmFp_P zWLcsF5oZqNGCS_+rY%d6y|nl@Uk6c8&|(`}2T6k9?BYLVB+ab&$;Wjk`;$o+R8agbt5IHH^X$Ox~Z zUkc&a684EKnf7s_*px0JiDFAAK?FXbV|_xEvK{7i5T8(N3AOO4f(WZw9dRO|m1s+r zB|b0tpIi~Gc24Xc-D7#Au6W{HOR<+MOOzmjd{M5H)|O?1Q+37W#Vy5gvMdpSTI8j2 zCAmIvdY=Bq*bo^iDwAc2^9pAS&S_#p-1W8bLWg0Z4Ox~bLBwa5(!&YAd6wnLkKQ&~ z{5(diAj=XFsD*2j;#c!ijC-$65J$+eLZxeN@Q801QGk^jSY`qGeujLFQs%fS`U~ZwvlCt2-L#8lDJ-8&Th=9 zKU>@-%MvAszgU5D}b- zl(iImYh)Z*mWaSUVcRNeDJUDK>?{0U%I@*)3T1@_+hA=Eff7XM8yMQ^Lj+0?LD@yV zF`_NY|AqK}jhzRS6-D#6mmrdJMida(1s0UdhC97?M7SXAk_1EqNv|ZyL0B>hDi{c& z2nb5p1w}!L3wxU#5XD3=paQR?tB9y5iV6Z>Rd>(a-(2{==kpxSxo*`nJw4N1T~*To zwdhMDuS&T^87M)5>%(PwUoE|QAc0!k$GMK)S4)pT2@?Fi`8jIOlJk|6w-kHGvqS>5 zY-&^2sBJY@?dt6}PFWpZZYt?~rB{w+{$jHNq=KP=W-l7O{D@tX?rft3?E_8sQZqNT8O#=CoQw z@Tw7BF@h2#uuhQ>triiyYJ^vepacnj&HwFxzc`guT^#$Sj)nJ*Pk~m8*gOMRuNaXt z+liG`%8D^_Mp`IA0#8_`?D>NBB4>RW@psZ7AAwr5TEyl#%6i4f9W}a$HneKQdhmV= zB}mX}5t~;Q=oKThT14=w5neHZ1ZvT05u0aD>lGukT14=w5neHZ5+rD~h|McK^okK$ zEh2c;2(K7H2@77@H^gjb9pfm*a$#O7Jwdd0|( z1c|C|K4xF~R|fTpk}k#(;Q75CAqk*EIqh=md) z@T_lIWjb%DNPlg;Rr=h2d<1IIH$CqJ@<^!j&)r4Gd;hf#Y#wUiTcdBn-ud46+#;bf zC3}g3v})wfZ)wE{yjvvjDNu&x$A03=EuUL9tr$TqyemIZach6^lYPW05$t231POc! zkx>6BqeTX-8W~rtv#;h@TUsrm)&uAjBTrQvEhY@kwBGMM%t8qgcy2uTK367++pjdY z9(ud6k3cP2En=^wRU>-6^Q8C3i!xu|VZHaoNDC!M&^J%}?^7*<+NVxgk}0lr`7%&x z$Q?ccwP>}7&1)+3dguO>+r2RG>A;G|X+M3CAYt}xP&=$dLbO^$@Tw7BF@gkYamIk7 zWNq7NwTR$VBfMe+C7dzf$e3+AUWsNvd^%0=su5l>f)XS+yTb9a9tUZ)h~QNtykZ0i zS}kJp>JDD-jL(p=wfiOtUNyoiMo@wTzFGPv+-suXRU^D&1hr_jh|Q}zc*TgHpw%LR zSB>zB5tJZ-eKQiG)gppdjqr*Q)IuxaCup^Z;8i2MVgxN5js|Ee>9n*h9R#l$;T0n& zK>}@lBt)x41g{$56(cx)pcc}RP}hUcrFJ`d%DTQ~ka)fLm|$Bv5BJf*EA00sj1GP{ zX@t$E=xklsCO9jif76+I!5Pi|T{pZrCGZ4FkSJR4Qrc~Ij#D(vn*6d?*6js9$c`mY z>-pwSrS%^%))3`y)XsYINZs{Jpah9^=T`5&abvt8c%{XFPPz;vP>cS)Ct60^qjlq- zyWo}KNs#E??vFi}D!cXQzP?v{;_>~e9(XO(q9>9UeO4(Q7w4>N{&dQ~8`Jq&c@iY( zG?zR&CC1i!%0Be;qMWqVvr|UI5U6!@*y_9iKMpcA-&5zMq&r_Plzfc{lpyid$*ep& z?Z(!7<1X7shhwAN{~4BM?+`% z#fFA>_oEWozs=iUCze31!fgUh+kdMV!tN7^PuyUsc7+lo7A4xw#v+vrQLWU4_!~*B zZz51@+~iKqOV3{m>T|HB_Fb^6_s9k*HK~=L1PR{Rn06u#>Xz848|Ik1vhA7M^L}it2$UefJAxM;wk4=r zV*4T+ng@6OwuQ?;0<|_=8tNpjpKZ!`b?KYUs=xl2B2a?FP}*Ibb_O@ZvVjBNU3fI0 z+7%M0)jVgkQ}X+QhKMx%YE%1%cG6Dxv?n4+kodgnIH%*`OKJLj{5EZQ_L1<%swEnHCGZOPK_Mlyi{dO-rdTUj9t_(CuK&; z&MVZe@LH&4p1LVx=NK#Jn}OjS{}O=`B=S!k;tZ{@%n&ycAKQ3n)|$8Yu8=@2(|Qb% z`=v-M5%)+Uv@0b+;>E55oO~U28{*;&WWGu_Cb9u z(5l|4tMkU`tEP-E<1@E@6e_PQHA?nXww)SxS9170`|m38?Xy{3Rt8l)P=bW9V~VD; zejA<0`sw}@v{y=kMC-E+9V=fWL$n!NI^nw;MeFkWz-ytFvB!oOBS$2huTd|736vml zAb%z2m&Udsl13Cr_^a8B_*eq9%(!BRPPvP6K0jG**T>WnQGx{SAgGJcryQBKaZ0=qsU0wqY`p7gW^>+xpWPi#K8o$G-FYMF7~l(FiUyIZ8> zEmU?FB}m}j`joBRUcUL(j2+7BLjtw%Yz12H{PwWTneDs0%XftYY8ju%+|`(?!#B4m z&`S|0LE`hzFYP%%mQ-7x$c|oFUCwKd3<=aSo|+-bRj!-W?YhtFQxYV$%(7G1M(Q%y zv#b^MEZI*~64Wvtry=g{Q#Zb5{&@6WZz9SU{3ETfSC1#&?G-=ytmj#>C#(q6qWZBH zZpxV7d2#Zey`J7>JWEf41fCN}tNZ6HN^bo6>|MsQ^a#|V_Y@@0(v)$z)=N8=tSppc zJWEf41fF+D`%HXNq2;=6<Q1+FvKQ zw&aaz`Liz`%v81n3DhF1WRquU%E%?pGAHU;dJ-ha1GLGrG{k%4S=OBId6pi5T4XD2 z@+=J@$g^w^^(;LJ6668eK6#?GM=S(Ez}|p&}M6-J!mClmVsI{2HWIWn!6g( zW@gK6ms=|WB}kA5XtVv&p5>%v8@85t=J#g&&XGVZ@+EBYEKM0To4&QB`LMx?KnW6b zZjCy>M%$7fCJk&d;m3e#S4f~1`9wB(mZpq8ZN9qK8M<>5-xW%bAP>+^KYYpfaDS3# zc{u7>de=fN^6l(q&)sc2%QH2TYQGlsEIo;zm|(ZBIxI!ko?7Z)>hGiPBQGOy`3$;w^ zF~q=%;;q7C9#Ot1N{}F*(T?k|+YobG?@j3R&G*WdAc0!s0ovqQ8X}|jj|l~TE24ZK zBv8xr8@G%}r*_pCxO|uKEV&OV2@>R4+T>XVwPzVlP7Kt(t*o_8dzK!7TI4_4t507w zWjJw}&6_tVuPil67FM?Hes}9w)_+$|EIXUE;}y@d^vXcOSU6j^lF^k;Wc5n%JWIA$ zN`l1UGY##{`5GBw(^I7r3Y9DFdX^r6TEklJ65eg$d6sN~_*`77B^HgWR| zbE;3t>NoIT9jj@puL#sKql77=@S(v;3fO58h24oqjO+ z7}XpJ)WTdQ%{=?-+nlp~_wEW`_YDC;hB^CU6%J<;aQFR z9oWB0hzZ+iuLzVNL30)zo*(J$vn|gD!tT!8o&*V+G3jitU~ks5Cb!7HXOLnVdcoub!=pHCGZO%ri{W-?n)# z8J;=GvnKIcsKt8}@k~U$9*1U3N}f5%vnEl3gjwNczJJh+Ny#%OdDbKnsAX1(neQLG z;}FlB@uufPueU+)*R}SBOPww<9|L`v6&W*DzgS-;yZQ4a=*x-7>*CJ{c zb6)dj-r%73rpS9IjtKm+TJ2))Nsu@(eY~@NW4&NqwTn5o#4b4rffkp&^@HkKsD*Df z60)XGlj+r}21e{t-@JI0p=u3C;M=6R2A@omU%XQ?aPX|!#hjm&Y8goQ-}%&QljWBa zP6o2Z$6I*s_!O!=JJ$JNQ7AZ4?VwzK(|GB;b%!-SeYAxVBxv7iXG!NtLA?^`M#BkG z^r>m>zc9#0pw@y%$2h;28XA1zXI?E^{Oj@Z+a(>XGmYC=C_$pehEYz}FXjZ_RC`Bn z%9$v0-haUAGw)6xfm&a^Jl3f(b9iu|S`AqFk%{u{v_V$Ae)%nwATf2uNT=V8SA*l! z8pR5eM#~C==3D1K78Xj7c%l0+XVs>+g8Qqhb;5f`%1;(%Sr4?0^AV_Z^x|OW^yxjp zOKPoUv4p|$Kxpbr$?} zHu$$%Sv+BKANi8?v-RY-#uiGDINh(eQ}NJ+;E#j39*6q&kS(ed5`{Mw_7SL6u~K*E zIL#TIuht?zbG@6a)1|Pe+3_w5B}nA|sGC#g?|k;j3tYyc&qDIdUsXf}QO!r7)}dD( zZ~`=Ebc0%p{KEP6vfJXyqUDqV7D|vPRHvg8`m&h)<|d)hR1Zw?A zr{g^~KOEF+`!0)w+ffjyYYs61L$YLsccH)WWQKrOUHG%GAEMaI({-Zd#|#+KK9 zXf77-Afe``VGE^Gb-zrO!)OlguZ0f=P=W-_%i`TO%>3OCpG}rUXb$iBgdIKtwP?l` z??z(g^?i3@vfPwe(CU_0&1d0oG(hW0c6Upr>@%~HRfE3jqXY@G`Lr{|>P(q)?s%Zs zkjlRC1GSKj@Lt~1oo7k2|9CGHHLH+j8}i;OY8Iov9!Q`B39~;-bk?L-21<~i8I-)A zOmt@EEeX`3S(&_N%`M762@+Hv-XkYEGt(;r3Dlyu=dA$uKP!|VLC=rR)H5?PU#7iY zexg~E?89MOLaP>{{bc^8nVH&$v!6H}&e%$OZIcg&5+v|FMMAtsQ9hdW%RU?usD-Uk z?GZ0&uOc<$7bQsGn~j9_(hf9t(_BvW;gG;OVcE1ws6;E7B<_*lK6*J|-nqlEjJdAE zb9`Q3+RmGRn?4EfIVva<9~Hv%X@;=+wboqsRncdTaVM5OiWAya!7l1+LQv`~Ts?xR4v zY*y|evpy;$AK2w5P^;vM$DH?8{+;)66N}1t;FI2Rl5-_|Ju|=6@LP;!f$5XkcjQgJ- zNnUtXxYy^mDZ-T?fxB(cZX~Bh$^6Ny!aM1Ga2YOvTKxAdOE#EiHR8i@a`&Y<;RVz3 zE5en;5?$oW=6xKrC(3Vj4hpYZqswq5NZ_t9kx=g$6XoO|`-P_#^b@GXcQvf&Q+c{w z4bPY?>y1bZKYi#1p8)1bVu>w<2Ic9toA?#&KvOJ#ctIIc4~{VaKLZskw@R(It}Vu@gnJMy$;5QnD9(;ee;7p}ji z2v>rHKjxfXdYWw6{)63YeYkX&KrQ}zG{a(uRF`nG9~hsRD+TpkRXZ|O{Zp0^_Sls8y%QGOd52bt0@=iF+;7;=7`D71Zr2Jbj{k{x{_Fe+H_D`pQ7z1%9RfevaDCUwyPvaU<^k&fCJ;?rJv?lMJDQ&=w1u8_^zm@ z1+~3u`r1g@V@{U!(>J;dSAqn_aFNgl14qfF@4aDF{!byd*Q#rw7MDT&Kd9|h+==0G z{g!Q3cd}O~i6zKh1+~5ES8=eEdq1_-9`o##k|1F$pDE+*jRWQV&5v7ykKE?kL?uB2 z^ZN^l z+-qlLgL%@-Wa7MNMI~VJJ3wZkZqqVE7pB-BY@XJEiV${dhHzMD_{^)H0rdt@~ilj%l)4mAaxo*(;PF zVf+rz7<`~+!gy) z|{9C_w`EGNC=m$+sdOOR#^11ZuGd#bxMc#l98!Sc3g4lpw(#6chRl zwr)BgT|db-3HFDW z(DsUbEAp`f`&TGIg4Sj5xd0}Y!M+uFOv?Hofm-ZaaT)px77?i~!M>F%!Tl20ABx`9 zti*&s$AeSFaI#k@K?2v>(yqYiivzjCr;5+XULk>6>_KrE`dK~oSw8E?u}tv;#e*n8 z0#^XjK1GjT4^+R9DOyrIhy-e}2gPORcV2i#qLq?9SrjLGg%Tv#gJMFr#3vGHFY=BP zMPuq8C_w_(KGPY8wFX$d22T|2DIP=uwb+B=GIaka_uCxnH1#O9S13V(Jt!u0AFLBx zWqtbHDDm2(1-TD;5+vADVnSPnJEmq?tzRA~oP)|TcoHOVojA32@~!?nK3v%=ycTM) zZ^dP3dsTvbt0yZC7PYm#@+3%*=jCoz!mbeKI-8!)-V6`SM~}C z)FOKvjN0}~@``nyMgz82C_#coiC}cZ`7F7RNFLima6E_-Byisb%CMBZUBv(1U2r^z z1ZvS}XGh1u&9lpjj*~N#y+R2RG@{zk(Ya~GYGP37cFJC%1PR=|fLbE?R%@q+l)XX% zwa7EDqdr3t`Bo2hOcRyJUZDgD@;*3QWb9RE@~y6%NEQpoUZDgD@@hEOWb74rOTzV* zkU%Z+qWEk;W3PBFC(mX0bMeu5x5dT9l@7XNqWSH$H+_OjEWvFwhRBPyUAK&WYle7b zAi=+}7eqq;`94vt7TI^aQanoBG7O>0(0$M)^4;6gM-c64VQ(f+refM!u1ED|TjNm@ zLnvBX2A6o}mwX9G&~Hx*ZA|o8z18AKJW65+MQeNIKC6A_ZukhIJuS3+k$+p z30$>FYpP!hiHEbRiBa`c4%s76i)}93OYJjkq4?&R>19RH3o3`~NswT>&Q@Hv-K2+m z2y0Lwv45b7wmk_FxRRYtysg|r-2PD^@!f7efm%F1@z|pK;DOJ2i=I2KSl#BR9I_`t zg2zA}@w8<)7dJqhN;zW<8?SQ6o&*UVnR!&z_UiPqfnvebZ$pyd+B?Ws-ck}f$OZj`wng{B zxfI`YY_&LW_5+nk^(3(b+cIq#rc!)Uu0nhubG6FW+O7l%j&C@+(Cg1RTH&b0WQ08e zwfL^s`so;l?V%p0;^J81wi5F@kJ}Rej<&>D!j;^DFyBA^7r|Ztw{W)%_EC6@^4}Gg z!4ji|MBc3l9{JsS$2#$7t=q0CV+Plruj@)oJyb6;e>bh2iNu$)O1;&hOI9r5_6hzw zkJhdSFhi96qh{SskNi^?C3s!)chf((3?}A=a@WQZZeQWQ^Vq9fI}_V>wyOI}{>=p0*`?SM|elspjo&B3vz|-KQQ2nO^ID z`SG5+>9u>fZ!|K>&3ZM$!9kg)5fytf+b>NYFWSHlN9) z_rYQ+I@)@{v_gbi>?$)yhHP|9!ow0N{}!d$Q2kvgEz4q%8ep@)_`2sAc?JNB8qLE435x z#jD9Cu}JeB$GaA4agKq%*yz!~WF{10G7w5) zG85{$CSzglisKuKb0o(&C_%!1)-T656z52eagabQlX)@EYWP>x#9gyPvK!eelptZU zJLU}zYe-+=u7_lCvR5cUg7@&Ec*LBY$?*-vIg(=>Bv8v_sZ2|3p8s}H_-c3g8QCk8 zAYn3Lj`kTE4JafE^y(pV$X=lYi4nfj295ZNmvP|M^Q9UXsgd_!@Lqv;>8;Wxz$2dr!7W;BshJIEY-%y+*ImST=66~2V zq2FMu-~j7yI$`w@vR5cUg2pXHXnQs6ok7;s`zOjj$zGuZ2|OK~&J3gYhT5Ar&)(QJ& z^scJ>cS#mXkiaoGO8haoxQ{?Bv@ublY32u6C_%zMp1U<4GvvIFKrOVD(K3F|-jRh8 zByiM@5_fF>%txRWj$KjWxi?>~ixMRKVxqpLBjM+ z^DWgSrXN3A8wu1h?bu{%U1DjA%kd~d!o2esBJTNfJ_5B&+cjUX+%ith>y(8OBusmY zA?E(kH320^m{w`>)NUDFuO0FcsAXEd$)3AJ<%YBBp~UnqmB+`jqpiJEu@>u1PgPo` z2dnF1{i4MB_a?cPLDdimVf*If3*VEv^x|AXy+JoKVQTI@ zH#h&_y>stbxsqED(JZI03?$h9Wv`g&Xl4TLO1(i>!dfwevh%*XQuRQBy>gSeb;w#9 zk1VzfS7Po22{Rh}FM>w_lXX^QXxn}(!i)y2g@npR`|6=;?!GC$E9zZtZjsxSX(eVB zlJZ{pudK;lDtWIwS4sJ}O!D8ghs=3C%J)gm^`Qg_|4HMV=c9a| z^Zb0_A5LV-vGjcqB}m{&>XFd>-LK~!yO1d}>H8oOsKsrV%g~mA^L&)=lbq{A2@>41 zn9!Et>a~I4uQDdegY4XpceN4ESjMu3&}!bddSW6eGnx`aMq5pY$kKpsl@HF z)}P(w7WzJj1Zt5tz;kO&yW%__<@;2w4<$&DH^4J~P21%>ALaX0t`8+haGsAcc_!D# zc|OYbsazits71a6&qFqSkn?<$?^C%xlpsMK2hWZ+mVxtpYmOzWTpvo1pbVQsnLHCa zn;F>3`XGT?k-^uNCveG3v0dW(=DTG3!P-oD(Y7L54BN{-oF_qo zpDaHWZ5jCb{BQh^8|xr}TKx9-U1`hU5>sdH%;L7g@16TIzW-YjG{YtfB}iafh}Qg2 zkGrywKrOu6D3MU6E&Fg@&5^kE)4f=s9s8o*b4LPOPqd7-wVL_})WX&iB|fd%lcQ~~ zui0IxqI!F`-%+9ta1V|C7hJ_5DSVnoZRQ+g}sGCaG41luZ>X`ASl zalUVDAAwqE`J!b|1i20+NU#NEnHNE-mdIHgBv8x!yB7~Cf+9$d0G~l!*QNbLmwwN4 zG~rnQ?2p(MBB5HjW$LkSQo80sp6R5lKKDT!chQ%iFF*gx<=Iqn?x=k{)5-HPkiapR z_IepoO%X%e-tQw&3rAG;efYl_a?vj`3r9}+9poBwfAkZVI!xeMda`=6{MjgpArx); zxglQey1Wh&^xMMx$F=1GvCr=ZHy_KKcv zHauOAKrMRGiqQTQKPV<>?jS!aCE@3T&y780zAN3MkU&Wck#{q(|FQ3~kU)=veQPFk zk8;ak0wpnoD#K6Y7Fw8%1o|k{KU5jUFEKS|0wpnoD#K6Yf8h-GDDPTszhf=;@4831 z^+19pF@!2Z6Etf;y+N8ez_Sfi8T_WGcd6gpyRa>IwnT&z^3^jvRYmls2djwQ^jh0D z4vIInG;}Ssk|1HMb~K`ATOtPj_jVtFT4rR3W>^%FFyNkeW7}1J$5?6=58j$s+G0XH zN{}$4T}&C2NnMKsYTdf#*H_igLJ1OPe2jTkl}FA>KnW7Y8;E&UliKF5hXiUFk3+qi z=o>6@V+rTT*k@4B2cMf6eWIB=^>jPef1mSkJl-h@5@vjiX6_WR;Ygm3KrJ)+m`K=+ zYU23N(VS1@@lHvQFyo`jmqzPR{=ExzkU%Z&RtF|BcY>l9c01ms&eGKB{?JSZR70O zS;i5km)Z|33Oc`hTFHrvd&R~*w`o7J{%vKw1vO-kMN5)Vg2d#*0jJXBJDkfUme4$a zHQR!6YkIuQ&7Pl%1Zp+^E7>{m-CfSSw8ccsJs{+{arNXqbKmz7sD-<9M?$Sn2IP*7 z4dkIC_ot!+iOR1ua}E@VcfQKEfXY}ftCe)dHWY;~LAn z*{$TnZ%6Gw315>bzAj!V66(9Qfvj*YAa8p$=({TnS4LPA_Te)XuZZZ<6HTXy+=ff9*>itJT ze=VyjFBRz^Z|}Y>2PM6?mvK%uZsQ#IVX2K*iiA2|s31p<&X5P^t?}Iz5_ZR8&YyYh zo&P?%jNZrNf0mIw(>u#v^VS6LTB!9-RzaskgA8ZzQ_G2Xuw@Ck>|(k+|6~^nCAjZ7 zUMUiKxj=?|Vtfht>(+D&cdlQ0M~3rLZb7I0tmQGS{Y9&@tXit99C~7P0NXAS8*|z_ zJ$n{&K7EMmQF~y9TyU*|EU_rt*LG10ds-xvFRNI1<+7>rwb>hTDvd0kx4+R?=Y{W= z+M#+E_WayxoO%bi+ju0@(ms~kcJ4H}_tkfjQG&#XWxMy(`0qIL&MyvKocmzA>2h=5 z1*u4&R#Mv1J@2QDH!ZPC$!WPyj+ib#UcTQ)pcd|E9tlmjCq1`S_H_BtZLL#Lf<)0i zRrWMIG~V<l^@5((5AeC-c9C1Zl=gD2OnOnsh+zTcMGjuIl$-0R|% zBB3Si(*yPCt|kt;&v#cy&>f~lYu;w;v_K`Q`N0qN`v}y+UDG3>YK<2M-lM1c!jy%n zC_#drbXxR%gc=+RyhLws+r_QPNTAm8>KD?gCh7N2ds7`KW{r4ts(j$|rW}+!Kcal_ zd1^7-2k}ag(26y+tl95Qk*620_1zT`e@<>1?EBql(+Ag7Yijjto+;;FT@%1-p;job zOK?H^QKk>hKa^zcX)r-H{=AEY65R0}uM`Qj%Sj3^qrC^K?CWCTKK&oQ*Cns{1EZXc z)VHyPQm-x6H2e(RRkbE-11LeF{P?DM$EhAG`f?wfzo=IDD%E_<59@qw7qzgb(dk^P ztBO+vI;cK4YV)8J30;qXSWi?c;uu$h!VP{%_AF+gLoy{H|i{%wwc{P^+A*%LAAE& zE{*kh?zj+fee0<{hy-fke(^MGaz#*lGc;cHL6jgt@51I$PLG2HdbJhLEvupWAQGrW zW1!6=9{WL*H7L?S6kkzQ^+A--7-;jz#(fa46bWUH%n;jusi68GN|2y2(B@H2kAqbo z=q#%IT}JgmBv6aSK$}NAJq~U;pDx~RRYLVal;H08cqN*j7$_ksoJ&`I5KkzWv!S4U zXT1#72eG};SrAvth`yoDst=+Bi8blP?AA1%>poce_X^^Z;TftAB7s`i(;}hQmrWH1 z*A)w&>$D+<#z32UD360Q2CA`;-vRD0PhU*tP7~*8EPV1o8V6xddnCx_+C0kXciy?( zbn*M+i*rwQqHz!i)S{8V<}piqWb;N$7sCoo%RRD!#z7!Z3wN=nZxh+m#p{XbxpOPF zPDKe4G*;O>%4uJe#z67?@|C;a$!J6;tVDOjM=k#Q;XnVlSNozg28tc)R;J$mWkWi# zASy9L8ebQ$M3#Z>YG}vwfb4dk@2-$AcbKMYUYKe=YR0s{XNULu2-L#8_35N>db+nI zEDofvUzmy#B+Qde)9+&;y}?F_#{#|ovn?43)GC%V&hAhbfdAbC-p(RQY19_-6>-3>RMLO66<_-g~SWrjkbqRY8o7;e9^_tGDVGAO|2OX z*9P!fsP%OFQTB(jOK^koMMa|tqU_-$tN5BO7E16W0lZQqG_BzT@$Al|@UQ#3Sa>!A zje$0gY-${Y_7({})jU&NE!s5PpaqSCAVGr0K$}N7-3PBz&8yH@Snf9(2Z2B>>}iqE z;kXW>M0QoNtcQvRZHfnN6A#)H4^o?Y#m1fT$?NOiR#aY4L%i5&Nis?(9`p!fQEmOs z&uj~dk?HXw_0{>QNT8On+O}?qO%4drc6>c?=hN@|2-L#8^&_D=Cj;V}jSa->XYNl$ z2@+=Xv2`DOZdNPt0gZ(}CrxRE1ZtVl&enZ!=D5aUKaGVy-S-5?gFHeii5WT7b@56x zMy+iinw<-XjAw$ryF$W@)TZW*x>2pa7ox|O{XPP?`tAye4IPWwhiNPvtNP&9OJ&5*>77NN+%*Bb7Ha*RRnT_o zXV|(A#wC>y6Mjz@A3oT{LJ6K$fLEdneSr)ycYF!)+71;DdM8m(bZ1jMXmdP>Ei@7u zV|5lyOO+K}PpuB11PO}tY>EeM9S@crm?5&RRS+*O&-S%l)S`IM>uI#6dQmZ}%krsW zH05Nr4k#Z?zGtlJgNNH+NE=3DA@@N%*EAA(?xkaa#5vPMH_FMN1PK#an0J1;?@NKb zZKsRfl#@XMwM_J4T4KVQDS?`!ri(_DlR*Nt@FdhoX#A;;fqpdV9HyKMN{}#7nCXKp z=B!LzL?hc$%E=&sS|6tWPG{RpFn#dG?v=ZrCE^{*$)JSCRvy{7Z{wBd{MuL2bMw(% zaZUy$Nbu;-W0tl)+d53oz3*}I0w^bg1Zv^=tkl|1EXqx8KV8hCoD52kV4s9XIc*c) zs&p*(D2;_*QBDR4)S{?w553QmNAaMoebGLNO~VIh)M-sQ86;5a{=6=E6c5_k7cCu03SXhI zFpqLFD4}@JJF^zAMA?sFN!EjdCkW2T;EBMqAL%tREOe0n|)D=?i`8- z)jJ@s7EX$#cu-P2D7I5h1|>*v?7^d)e&-Yq$|`RJ#YW1>Ac0yO5%HL%TO!4SlHx(Z zIT<8Si=rhsTbANMN%5fgm~t{GL4sp89_92nNb#Vgcu@RyuZjn~YoQj6betD8eURco zN%5fg=eUXoJqcZt^R4Q-Shk7>CB=hc0_9{-f&|r?Gs33k6c0*@2LYWKe!UO|43Z z!jzLi37*4?SE8NL10`g|bLk>Kd&}Ls0+Z*jkezlBzG}KuXpqva! zkf2D4~qX#P6i3o!k(t$L3wChG3$(u2c3!|%Lh$7=uoCp#RKHk!Wp>~ z56YkHV}ap19`q!fMRf_XxekwVx(`x3DF1kTao}4W4|)V@(MaI%n5Fw5#e;Hip=kjV z4|)V@(fH(@m#gAIxhgR|Fi*#Wo&*URs~jHXbRXoLXk_`y)NwsjJm?Xq#bu1`^ZO<* z9+W%Rt=ye=T*ZT)#1JaZ^xqZ5gK}8M^xPaB55|_E2y=%f5~g@ij-4?r_n?jkJp#2T z#(~p;DISys5*Fw7(D9%rLBc#~6Q5H&C>tgo%U!PHL61PKVw4l@QS-tcFCLU7wiFAu z*72Yxq3F)(LSvzc2fZt)cu+pQx>mS?jt4yn5-(CN@v%uw^RzEY@t~|#t7&+&jt4yg zwJ7Ry-j`kSv@aTLG(lE4oD@E{TE&B&1WzFLKdb4K6Qy|2`kLZFJO!F_q9451C1~P7 zuf0V=b0{ZzrD#*Dyp9Jw2@;$WEjzwx(2EBp#e-I19S?d0YVp?xx2L&tKjLHxZS@5KdfTO+%de_xAZcUc9W%S3`n+ds?P! z+L?m$aPx~+E8!9-i6Qt74e{I6>Ln&wS5+BEn7dM1?)i$kjHHu=i@O9$VhBDr+?3(g zo1YaD^xM-iXYJjBKuHY2=bsy5#C=UlOqf#?Px@05qCGA5yh~k=OLeCgcL|il5DuTu zdGi}Yf_{5i?x~?S6DWxx96o0>N?dE7pz47H{r0rn(@!;#(Q{@Amq1Aj;qZB>nz(e} zr4kFy+NumB=(nfkp1i7wGwn7PcL|il5DuUF8m))h+L55&o|ZY)?iK_}Vu(CGzxL)b zkf7h5mV0*Y%>+tfh&(>I_kR;ads^;!!Z#Bri6Qd%1mgcq5bbHX=OW)spd^M+CoF5C zWYOE^yS>)*)F4YtpI5(4?^3^MbUs->tUe;`4>JuTC_Y_H}sTmmIA zggPP9BTVJJQ`yzEi1xHh?}9Si9)*$^LY=9p%eZu)!gFq~MS^~NTBdiIGN_-=cL|il z5b8|Lo9lrD{r0p>@47`jP!dD%In_Wk>;0;FA4t$|Ps{W!L$v*D^m8tOk{E)|$c~ob zy$>Ylx2I)#SCp9Hz7Led5PUj#l<>+xf_{5i)SJ22MhUMBl*ABx%DEx#yn5%0acP~^ zjKjFN-cIRhjqZE)-d1nbxiik0GvWUGYW~%p&d=@boSpuzUF7!2o5qs?!j&LFzipR} zgvy)+(;B}dB?=}(=1?#*JB|fn~xRGE#)A$wgjMQ&u z><=etM@B5;o!46f5tSgpSE2uR;;i@v^wskHnOG0+u2749J5eI0=16#2{-+yrSC0Ek d@Vb0`{(r7hBs8eNlXzDifm-IbdLog~{{sVW4VVA` diff --git a/act/assets/vx300s_2_shoulder.stl b/act/assets/vx300s_2_shoulder.stl deleted file mode 100644 index dc22aa7e51355a0a8d0439f85be356c6aa9e1c3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 63884 zcmb823Ak0$|NoB$GBrq1q#U~En9?mt(mnTH;z-F%5kiGbsnDE|h!BO$L-*WD>Xrr-h03IIXBPq_dGp5&whVipZ6ZuUVERt);;}GYAKN)X3l*Oeai>;759>G-w1J0|zKW#mi{swjcD_qtoF9kKA= z&dJ9HG%Tx)U>3Y@YPYJ&5kLNWZ1VHXUt2;It-I{@{`HB;hj%+60}oY{ z7_)eA>93Xdc;b&k(#d7(?y!U^O1$>r#ii%oU+IZ<8`dZ8zxJ|nc&MVpTPxd@PG~gQ z6J45iNxr>pj`dJNvqr7zQ+nES-+SVucMnR=&%R~}Rg{?gYOm5u&fdAk?W_BKYnB}R z^~aV_MTzArQ>6o@?(2zq3$`UHC$6`IDoWhFbNkXx{n~k=?m?>)y?6i05~?UM?TP(L zd)-;&i6c7CN-VneQ%k6##I=>Xmj3-$Pfr{-`liG$&#tzFDoRXi`Af;NiG4gV^Wg4@ z=>y%qQbmbZM=vXx)#wsW?EBkpiDy>7WIa?-qW8`Zl#Cg3g(vRW_vM0-Z@VL?iW1+R zaYo6$ORn|AS1+AiaNPPwtcNN}oYVQI;`^6e=ZU|UXWGuc=x$4>qQsQWm8tWG-{6Tx zb5>?nJpO~dYE@Cf#5LiW`0?eZwXLk*Ac=D~hX}HcxpPX@>Fcle9 zt^4k*w$*Fu4fjMU+bHqPBXLWpqQou5H&)ktb+soZoH-zI+OYPPP(_Ij*FRf**5pe) z(O~w>#JdGWmQY2BPj_2eeZcM)c;bhZUnX8`aFQidQR11EJJftU?sQKax4da`M)R{Q zp^6f7&TCe)=eF*iXm@1S@(*`PwbQ!m~2&kp(RvNqI3MHntRII zdg7G!wtMWqC|PU6KeKr)6^4>ANFYS`O@CbLlh+j?RrMdexv{Ot&Pc9p^6fweXLbjJ!;#Rvy;P)IM?0>RZ#*l@1p7WLH}2t{Ak2icVRnn31(q= zdTujw&&9p=#hK41$IaW_whXFhU99c74eOOSV#2U&a_mpjETM`L*durK>+gx~_f{v5 zZuqSwR8azZee*GodZK=_CCOcrEy{U3s3?JR;qBwz^u*h9`X#^lV74VxQ37Yo*k8wc z;@a3V$@+I3kw<72&d`?qHhW^|vKh(gZ}zZ+DoWt`c(`%HQn#;ST_+|ldiZQhsGY=Uhvuq6Dty2Olc*#LI_vPQJ5mALk*85@<1|{!;9TTc$TozInr` zmQY0rv`S}RbFwFf{=6>nbLMzUsG&2cs_b#BCQ} zm?$Yb)Do&Fffl^@{mVV^^32^5NA26%5~?VHyTqAWuJ**ZId>Lx{-}v1R8azVqE)M| z^+eAvd$s+%d3{T$q6F@C>jn(<#G4J?%2buCvOP;xlrV8=cqSISGp}vCp7-09A%_UE zaMx}&cZm0RcK3@44j(ep5~^rj^bBS^b0vu;%L?i@c+L{4D1qL`-&YRu#MlK*6T7@r zV+mE1Ko2JoyTB8F&p0`8@}_q!p^6gdMU`HEh9_pfc3Yy|d!JfD6(!J98@2HSPi#A- zBJucc-&sNxCD0pOF{Xy{DX#Jmi(BcCD1qEIi3n8tfdzPvwq0fotetyTeRIjpYm&VM0i$6H9v~}+a zbB8Q04r3?<{t2?!+|`s&rFJ3~3y5y0q(m4y?51Q@lHi> zP$G)nVY<)SHEwrT0vRN~lsh0grR9JTJBP;dRXOf3#Le$6{ALyK`ym zGMw$$S4yZ-I{}YrqfSbdoV}0rP$GoWMDoM;;?LCxGrFH@yQ=aXdI^@(2)VFDe^D~{n_s+#5~|crz~h=;`=`3UdA#*dB8+3P2kxjYnSVA~AM7h7RH>bSM_GA; z)L%E8V?C4z<5+CuiI10zT7$k1Jd{wSb^;y~3fC3Yv>IeRlnCQkZ0;Q+N zs8TxtkIVjis_3tquC*RYgmEl3qyFI~10QVVJ(N(Tb^;!aHl9}W?1YilLy0hs#rA2s ztayCc{@z0gRca^T@z~l$-CF-M)_N!r#%N|Kh8*qU4P(qd333xnEy=lo&UyZXK zN`!GNcI%qKRsG+Mdk-a4shxnwtoD0VE-f5mJ(LLJSZt418&v=OU2E^5getWY@c3r) z#LDwFU1vR%2xHS*FF&LD{g+yL4<%Hooq)%PE2=6db-mJhC=tf7Sj*R^R8Ky$srOJq zmD&k-+}{3=%7fcqXg!n&<5=v4qbsTxd}!K5dwnROO6>$Z+7>m-9=NTy^-v;=O}pFv zz3O)=|Mu;b5~|crz+?W3L$gn>KgN0}5yr9Dg-8BeT~oN;dnlnw?F2kZHP^ESP9`{sT zobA2hdwZ`{B8+3PoqpJ>re-;wYtbeup-SxpJeK@ zJ(N(Tb^;z_KfEEk^_FX_hZ12Ni+$SvfSMiVd|2$ZLkU%CC*V=%h_Trtx@@w2I3>b3 z7CV1Y+nT#y_}&~d>?tBP!gC9v*PYA57)RKi1vFm}iM-(7oFM~;Uja15x_PQb&TC3!?hyK{H> zhTFY|5;&Kr)K0*|pY7H|i7w&_+?Ioq&gLMXiStVeH!W@2{#`29??gc=)@T^-v;=-Mx1A0egE7C2;SfQab?;e|NSXN`$fN zGo1BCYww{1?%`BwC*a|G8P-FIFm`>PFRli=w8E<-a`rW4XM;lz{B@m;bHr=BtqKtsdsq3z@B&;Ms{v?F2mh(_J1B((akF!yx>M3=bvn3`nJR0v`S;)p{rq z#_rkq>iU2BHc<&Y%TlSGfQNtDwjN4^v3mz_@XAlUhZ1-ur&2os5C7J{dMFXb?p;RR zN%#%``$`GCTcA=q0T2K7!+IzY#_m1J*|Q$@9!lUH36QpmytATGI{^>>R?T`S5ysK?j8Ovb-uMQx zb^;#QNAADz*^tm4AXL7y^u%})s%Rfh`;1YKT*CEn^*v**hrUD9y6DfjUU+TW;ofp; zUtQl>=6YcNxVM>6Li_O1?NCCqa5UXJ%Ulmtlt6#ZJr(6{hZ35lZwl>N1;;}bCD5O9 zPj|T}4Eq6GSL?(IjehZ35lZwjNwToonIpL1_yN@#Cj`-Eoc^JkP$MG5WIY@g69eg2FRswknov+Wa_rO%&HLKP*nm$`jHv-J5h zN~ofQ_LjF#XqG;IMhR7v&|d%c3C+^y&nTga68iMAeL}PJ`7=tWqJ%z$ZJ*FAeg2FR zswkmPlj-;k3&*93zwX}cpoh1$xZ2d=hil2=RpEn&Mef@EXK}b={~(vdyigDVD*nOo6W4j&x`zz-`0{eR=epjb1_`FF%6~?H`FVdU`3G6Fop_XPbb{^AioLJPn&h>U* zslpg_`JJ5eAaQN=fSJrfEzM%=Jl-5|Xwl?zMyKj`II}<%#;D7$_nZd_oL9_3EzM%= zJQkdDU$>7>AD3!1^wxwbj8T{06gm$Q*jLO#EzM%=Ja+r-)g_;9bl(A}!Web=ZKU%c z@yw{Q46{&6vlu&%Uq1PL=^^p4sW(5qFQW=$)a4hO&V$4*2h1#B7HVl0W9KpH_wy=u z-8v$*WACO3RT!f#zaw=XB(SfTg<6`$*m;cX{AlHn8;08RN)^Va%dcCV2Z`f$exra{ zsHItqoyW~1R#t9UaCxfksSOgUFh*T|bL%`v9Jlj{1yVmWv%X9f#;D70xt#|I+|QYXTAIb! zc`SY4gzPiD+N9n+Xa9^UjO}*#Z_k|v2^tftGc(C{I?1({AZ68h*#;D7&4$gzbwbdJ* zW)^B`7GvkpwCkwsXG@MN`snM^3RGc?x*UPwJV; zjJg~b;yg&4`S$7pW}%j5F?JsK0;1We&y+ng`{0BsjOl@qFV4dg1%}8liyq9v@^Gw- zU#q209{6FIDvVJVYm4zXk?pY0QOtseW-)f#;rA|nHyE!9W7Nf7$0#A^LBh1VWz0e? z&0_34{8>`DcG!0Zs=^p`arR*BlJg+JW6m*3n#I_8__N(^hboLw7gqtsU^x#G>n?AQ zU>0g=7Gvk(uiDD^o4pdMFh*Tm%@`TxJV==SRROb5OS2d|58wJ^Up;rHger_t7p)S; zt2qx6*jLO#EzM%=JbWvf#W7cfG3ugq#V9)GLE_)dtrE;aEzM%=JpA1t`_lcFCRAZ; zx5JF=!Pq|MK?2*sEV~_&h5Jf48qnX>vh`aZP@oE9)WzKnBMzMh33xCI9-772ZHK=* z+kK@9W7NgH9pfOKhY&Glp_XPbb{@W$VLen~jJoK3V05PQAc4LQvrtR37&{N&TgvYE z!$-F7!;zlMLS6KtFlN+wkTBOrhFPekS&W^B@AYM$+Ptbv6~?HG-XKP@Iu8;!9?U{5 z&0_34eDBrnD^(byE_$^XU+X+b{B(HJ46{&6vlu%M-wV$^KK1D`RT!f#dgmDR>pV!j zd&~~D7tYbWn#I_8_@{~Nu=C>yRT!f#o-#0c*?Ew_zG4cdEH=RpGZ4`!j3W-)di{wXzk*z=dS zRfRF?;^`IR&z%PeY&o+~OS2d|5C61n_mwJ)Q5R3)7!~h4NSL<0EwfNdvlu%M|DGZH zV#9R>sxU@fyiLGZedj@9;JxSDw+0-OuUU+phkyH#y}IeG2~`-QF5Ze@1_9?m0{e4a2_Pi9dNdN8^u`_G>ftG@Nddx0>xP6Xj8PYFaWU_Q^B{qJ#Vpj)EXK~mzja0*E~W}&)Wth%%tqoownu0dW9Q-D zo?8!97^5zK1#liDep}u=!z|R&EXK}*zcd`S>BTZt7^5zKb#NZj<0}h?zF&Pfo)N-t zSi;{}kQIIngWp(Iu3T9-ZQP@lP(^>I;BO($!xQu1p@e2d*J@GM6@{tGu51rN>*^X} zoA}Fy`D*g}*9*s9eYYi4Q6l^r2HP>~uGb2`&o0X&Gz)tjqch!h>~QESg~@?GTS65j z!mnZAanpUi(^JL-p&#$+HDoTXk&%opO_Z}?l(tT1Mp;>4vF&5c*tiNn(VZi}YETM`L z_zgN7Ke)pmlM5dkH$9KgEdHM0JbKL>U)X-~qn1!biSYXwY{%NZBMQeK^i&?9S-8Vt z+_v+$;)=e7YyN%45~?T>em?_`r9I9md~u&=^9aqtJs6|OoyW^>cPiZPvlrZ2iJ}Dh z4;ZoTh;vsRUf5>xir(!K@L^qf`4K^Rt`Zu-l=E65&@* z@VN8hiuu`RUe6;m3(r@W%ffj~Z@XsxzH63QLKP*#ueadwK>dH`w;B3M9-&!yPQ(lw z&SUEM78MtzmRdp;CBm=J;4$_84yd^D$R&A%X5o1k^L{vwK2LS8=$l$-300H`zovu7 znD_}5Qyb|v4kp0gx__-e(?>D ze@|Fh@xt}{r<5A+jNQ6l_e6CNO@eYEp~wGkZQ$oKHh!>`qR z5`6Qob@^63vgMXgMTzjMP}#eAgl6fxcQfM99YIT|qD1&rC_MaGl1FG3->SRq@Mogg z!{f86C=q@Y3J-s_=MkF4x9ZMg2X{qTLKP)&b#Z*8`S!zIwRwbQ@vXY^@GXNSR8fNO z$Q|KZA4{mBMED&kw%oUHd4y*19l7)Ht*9kbQ6l_~6dt~%&LcF7@5r5pzZ_KSAaZnx${K&DcKolxhi8l)w`!M{TC#{z*8G&@8^ccH7~fwk@HG z68g?M9rsW4mQY2B@XK{2+e^X=$Rg?%vM8Lzpjmjf5i*KXdcKA15mQY0r zy#3;+&2-$qRm&qZi*KWyhktWt300KPH_++0e>-RiRg?(Fv|!8q8^}CDv-ob=t(AYv zY5P~IC=rg$fro#Snn!4szJWIP5BIj#5~?T>jxmCVeN>8oAUd+88d&LoddV5tWF7Iykj@B{rUNqVw$S>;Q`vsVHvZ2oL@F1M(ufIKyXb+gtU50*l|iga63@aF|)jtIU*Gh@c zf9+j;{?A8ykH;ShG;KA)s0&6(>>Qg+CZ1YIj$(J|}Y#|9|>0WRiegAA#en!d0 zj!@1%`hpU!m7Ax3oLwuftt0-o=IM&byH_s+K?SiA)2=_Ws^zC+{95(9`P)pt<-HQQ z1hcSw?U1Ea``ySR_}Zp=<@g7M#~JZQ3%ZtYUvX{ST6OsTn)1H4d}#L-m0V(MDUaJc4hZ^=RhA6ZX&Ba%Sl^IgggJ_AYOC`4x7{;el%qS)3^wG1t}+^|N~# zVq-vXJCulCA0X;oP+2QM+8zJB{Jwg*?6~sm;M*)gg=<9u$N!u7-kw<0qHg(Jr@d+k zD!ByClJ#Tv@x;?(H)K|{e9sb8a*09x>zS*zjVJo-oG!ogwI?k>C71Z8;=tL!Ugy)AH7 z9~#x6{MSM5EK!14@N;X$vwi+szm@fCw$OS|L6){9tmMv;xl0SnFB$xqCG-f67+0?t zM-6oJyqa_4Lz!E4yU7w%kfp@RE%z0NNARg$$ZD+2=C#IuC0k|U1J9#-C_^5nesN(t<@uoZ28!Xf30 zPdPu2U>5vvY{HiM^)*iy-s;~Xj=KWs}#6!o8Ox136C zgu4&69yb{?8)sXO+qFrUW&1n9-$x0Tqt9^2a7WA--DjpMCRW17xZ|u{=ZLkvr*DTC zY5OJauV8>MYh~vOfBpLYZQV( z_4=F#`&ZkxZ7ae*95o&((0_k{dHQb*t`E0&aXgg3ISNII&>LG0qHySzGSeSTL8;gJ zI@8CwF+#vQY)e4wfBEMb5UQw$u5CKrV5b8!|6Kan6m#6I2iHBvuawIj@xtP_?(#&S zX4i^{P^+z!?GqY`yOr3(x+b}MAv~}A@dyYi zN+8B{!x2xaXk9E1dwu`x_jrV$q6A_tPsjiFaNmsiVIsR`VL!VaR5S}7 zyo;ve2VV0>grK4XVlGd|7oJ#^Y1{LV?9!ql>p?}c;KBQDI=<_i#u0*w5{S9nyp8I) zN16G;EsHU>T1&IAoOkVX{FtIK86~JFftbtF@#PaPDQorFW!X1U@7k7uie|xsy@7Q6 z;LZ!$DnUgF#9W?^cR%;fg7t$&X5aee4C_Hfv*5vJk#xM_&^i%Ru4nnI`O7CtP*DOgm#5=@y}nH zYqx`nX2HX?$9RtVZtTGkf{GG|x!n9VF@1j!xEojxDw+ik_Q*{uy5_Tl5>%8x%;o8L zy6#5_v#*NG_*d&eMYG_+KD>F$`Q*OXR{=pq3B+8Uju-8_JC0z=wC&b|ie|xsPXK1t z(_QhGRYHP_5{S9n{1VspT%3t1)3#d=Dw+ikJ|~z~bmE2xK}89~T%L}9T|F096y{H` z9#k|79(=M$#|z90I3=hkftbtF@pkV#T436elxf>-TS7&%;KApTbo}v62cazq2r5b- z=JIs>%6EThYudz=Y1^#_70rSNpLf#nuXg*PObIGVAm(!0QfJVA8@+h=U0O@CxZJ%j zF?Rz?n6|w@6(txuVt(SGj9;sw+6ZJ}Ikpe)nbPs9&&NdwDq0sYmz&?!-YU!Zy=y(F zXcj!&al-TYD~*4P5LA>v%;o8LpDp#u{aKRITAGFBIN#h`gV|rN%P2ub3B+7(evkU@ zmyEwYtOpg%f(Nb}Jh7(Z*TwcOSAvQXh`HQ+Q#8D-c`CA3t@WUyS@6L1@1AS-d3g5- zK}89~TyEyxd8>Z8Z+)x>70rSN+A;SeT#%_7A*d*Un9I%GdCkv3FI;PB7M8pAc%0c+ z<8C=3`^6jgFE#%+Yjt!@&$myjdTq}!r66!;z?xt=h>jh{)JGFSn<#@B5OHk3ehX!0* zt%nlKg2&R#Q6+s=_4OV~P|>=G*^asGc;t@4RGlr&t%nlKg2%m6N=lmc#cvZ>DxRx43!<##$_Nm*$yeSH6X%?6NG!s325~Ch}J|Lhd!PxcmWvwc;mS$l&wl8y7 z*XVXwf{GG|u@~I40X+QP%_Eov4;-g~1JTpZC8%g!#O`R4@MnqjP=Zt^@+_V*r2P*DOgt}gd%01toFS`Q_d1rJ>Re|Fi| zdniFg>mo)g;hqiP;oB?gp#-zwfp)CRkN9mOm!P6`5u>$pzh;0iZ97^~>!Ad*;DI*y zt(EBM=Mq%3E@Ibe3xWHG^-zLY@W6c~+zx-ou>=*ZihQc$f{NBf%;!Y69p+4|G;O=}P=ZQG$xrMU1o3JsZHopY18FrCC^x z>n7}9DM3XE#JIZLvjIH(Rck$zU=}=Z{fGTTC8%g!#AqelvjIGO>tj8XU=}>s*LHhX z2`X9_F?;0hm$>QwHqZ3lxXhl3Bv2RaF8b<>V`c=JBbL8@SwOhf`GO#e1bfbo=yCY; zvavDzYU%2t7Gh>0K6&n#)bt=2Nh%~!Je}to<6Eq!6l1h<~PW&7Gh>0X3yCX&&C_h zTs&q?o(Ho)JbvlL#TWj1reCYw*WHpCFrm~ERJ1N)_Jf^A@~B_SdbPtB5MeFNLd;&R zBMR@So5@rx&m))x;^B1@i@*A=w_mGSdu=UiwE|ykg^Jci%zm)*c(3lJvbU%1XZICr zX%^z9M@}y8(C0+&apV_;nJ>=7@8_YCOTfdm)Hs4SrMhQ6=+eM?uoekqA!e`EtyTB2 zGs>pT!tb?F4`yL`i@Qe`pY}(oU#o+c6_-u=yQ?LrXkEnYqdJerKL5F_`Kwd&2xfs8 zx~a7I)M4GcNA-K>XTJQ_Z3h*tiUo*~T7e0Y~@BOlbLB9>FXSled-@ebBwL z_t;QzYvzhO@U20pXkEnYaXOFvc5acGc;w4@1hYV}|L6$wPGsi?2xfs`Z_p7p_gtP?-L-oj!7LE7-zcsM zk4N*}=Vo49agHUZXkEnY4LXlD113E^`1fsj1hYV#u;sR@@Ju}Nsa*=D_Ni~r5-M63 zF?)l~W75U9WR6>1Vm*{#76|t395L|G7Ma)o_%V-Q7KoP1%BsR^@V-owO!Lp&-ld{- z5wmaSJkGxE)w2653hZ_$!7LE_FIZR=wmzLp&&ce5$XdHrRJ1N)_6D8D(%F53_DTt6 zk#M~}v{(0RJ3e#es(bT1Kp+dm>~rQ-h3#(Nkv%f=H|=D%gNoKg%$}NCtHsxRTsG?8 zd#r~N%mQ(K`x#Z?9p|Dy7M2~}{~$|H(YlD)w{sp#{=7Bw{T6p#DZwld?5R28&;^fX zhMl{M-3}#~1>(F0S5}30=fie8Dsyh-3wEujXkEnYcR7#KmJKfJ)8mdjf>|Kgi*m#b zCwI=AbLAs>1hYU~eR1QeuwU}foI5kuCEZ$4(YlD)i*gxGQ!jzi{4fMTKqq+wGvDS@2-LD;+;{@$`%mRFpu>%th!8fGt zxNuaZ*3v92XAj5xD${3UgrK4XVlKx>y_IE?PdO{KqTX)iO;A`%v#^}K5A&C^j%;6` z1QjI^bGi8|W6S<2*tJ`$RIBQ35fSo8O8a=$xpNDl6J^ z|7)!W70rT2*pr!e`_FprTpu2>T_u1QjI^bGaF#c=v5s zt4eMM70rSN`x53C-ktA=5LA>v%;n}gnQc=N=6GazJg8_EJlHcx$4j2NB|=b90x_4T zXIl^f{GG|xjY@edghL}8&vZCK}ECR!8>X? z-Z!0z5LA>v%;o9$&Z(ob4gURY>Hqz4sQKFP*qTS~Y*O{wF8$2cpJ81r$33xQ$Mek- z%d@X`%%=bOWvThUC8#KYn0L{D=vEuSED*dunsHt&4$Iy$X<_ACpSMkE-P|?g9x+>f zTPmAeJF7CYcT-DHQ3CPLo35-nc2OU*9S=WTkZrrIVYc|Gx_Jb%;LRhOj{i_~Y_@;( z-<8KUf8X8*sc2oqla76`YQ}?S7>`C{+hi}Bd1Q7_*T?e+X2FAJrTNOD`3c!>NAHzA z{gCr4K}G8#E}Af>>gF>~HXi@{)G|A*#Yx#QPfoOVP9>NH4_+(jc$@hrX1C31m2LQH zy>eBwF5}U&PcR-0S~kxf*1%9toMi8utfg6~#1_Nc2Qw#SPpfH??bPWEOHfe)G4{g1 zfhES{+k^MWp1kSs?2+GY$Rn5q54L>fZx-*-E8Bhf50xvnEwcm_t&12(b7_xbj7Rs< zM%lejsgvF9%7ne+D8Vdvumw-YhyL9wd+iPnR5p0Jt|h2wUBtL)y>QK}G8##=G>l|`pDzY6#+zus}1rPQNf_+uQeMLpc@Cf?4oj z4<|V0MLgzIv@YV!Q~H}RQ*vIVcwQ;NEOn(-c%g9K2p3slwcM- z!oH}#229J$U@yv|umvrsAQQ~S0g#kPcs5{R)EBJEX*?UfSDf=Bpl z;M>F^wuw}7V;K3)EplvT=+fGI6BF0&XdslG(DB}G?31-0~yyN)$ zU^mHeGzM_&#?A!2!s`p;y`}V)L^eedPmbrNZvq19>=RB$h zPbv6r?q`;uf-EH(jy|U<7CXsrdA&XlHR^Ov3%lh?FpD1UI>#|L9%bqQ1zAeK@9?9K z_a1+5`Lyh+TMi36NH7aD?{IEA&aA&*`Lk`dwp9hO64=xI2J#L4cW*W>FaKv#u&>be zfnXM5-r=0bF(ui|+$Y-FwW5+s@P6e8b6zF%yuy7Vm%v#$@m=jptfXL6#DmZrZ6hygoLL zx-rA+LkVWl!`-9Y_0cf%-Q4r-c?ALrvXp?IUgzQUp#-x)^X}ueL$41i+z!-5j6EH; zaC&_x!7LEG`#2B1KB#D2#Jra{Laz@hxdhw!bbQq5lQUo3HzIZGx{HfqvEKF=aNW7( zSkC=szC)fqDMC&k^@6JteVk zK{~b5BdtxJ)NKa|(pb(jKON5{sAyfpT%L|sUjI&_PQhnI8)go)9#k|79=w9hn7D&R zM+hoPAm;LP{IIfx!P_XUrCC_cwkw~Yq6A_tH{XSCc?D}#$?c$`S@2-1osPdVWS688 zRFpu><>~lC*KNS@$ntnl(JXlIUXqR%ul+tkP*DOgmz(F>##0jJyvmw$*Pd5YGz%WQ zC#K^^-F9z;prQm~E>Fi>etuqo_psLo70u#yxVB_ik!Rf zuxC3JtxG@GilV;v$RpTtayxL{M7G=#*bZJ%B;di8@4pFV@p#;^Kb}&tM*awzx6bZJ zLD9O1+4B82!7Nn5*;94^p0?rP&t2<5Me8DF%lF>|v*3ZNAoTFpp!J}lbrG}W3$_DS zZ63iac;Gq@_myu;+!6GO)E1T@Sh0PfulbxOu60 zx^uN~9U$hJk2q|1@ifu3;@-^Pr`9n~6QPnz@SJuY+as6-I@}I;47sOoX2S5ZtOpfj zDZz8vd4QO``jv#%;#%R%w{&nk;DHwIura$^4<4yp0&R>tf+SAfJT{MD7HFP5Zmr-! zf(o*f;5i*0kJE}Du-l=>fOj9#u1)l76+3Lqop!C*CL&9T@Qy=*Yo!FUK(hr8wj)M@ z3bK^I_PM)(pYtQz@3qMZGa|1PGimr4vux~VKM7`vNykY%K6Oe0bA7luIqV!;wGfyi z1%%nVIEv;wcKf++*+-Dbb}?} zPDMR*ZPW1qwjt7oOWMAd(1$yTbRFt5HJA#8RUmDDjV!jwJ7u@fG9imiw{#_PkG;-F{d8n;plB96 z!m*aqm!1?Ms3?J$%gvV@+0%7Qb)aY#JUEKd{03Qid4!;%1Y#~X^WtnATrOk714Xmo z!BLcE?_O|dgrK4XVlFphk-N4omoe^vqFL|=$66k~=FkX1MG3@QZhjNqwS_r?<5Nfc zT7I|I(kw1_V}5EQpeVuE5$oFQ!nLZ@TAGFB97So~Up)|u5LA>v%;n~oA3cjB@2v+F z&4LFu!P*DOgmz!B`{#}OoeK-e1plB96!m*a8zt&@>5>%8x%;o9$rUUoa znJEHAv*3X%%8j+W?%r$4WcG_dQ37#DOnq~40a{Q$Cr6-Y7CbnL(#)Q;h(n^$E=TIjErFt0@Zc!QbbOCf3L^v+B@lCYI{stJ;-t)36DXPm500Wt$6pxP zJwi}X0x_4HuO>V6PMRZ#IkvJ|OS7Z4 zY1=W!mTgO@Xcjy;iZUJFcI9~yf{GG|xjY^3Q2+R(Y1=W!mi3^bS?~zQS`Pf{*a$&I z3B+7(+jesV$7fC3-dJmC7MHuRmZojbBcLe3*bz-Wc$+h36=^Na!g6e%8*4d#!#2l49@c}3X2HW9CycfHsrCC_c zp0gbRn(|}y&6Xcs%JG#T*pJ3?5Wzew5rT>mi1R!!R^NIk!7O-id_%BS0YOFUA`Z74 zwPhr0P)oDm!EqYSLkTKMAP$cpJY+;}P)oDm!OmY zTAGFB94nPiP*DOg?gqg)dUMP>WuLsh+`KOdYiSmj2lD|~;-u2o0s@K>jDz_AQhqzk zUCntYfh;V?9tq|(h!9k?E@JGfU>=r~%!Cls(kyu3$OiK>Lp}{;85rT>mh;i-)^RT33W{RMeX2AnjS}w zMG3^=lb3JXF~^qmP=ZlFYl;5Y^IutW$dS{Jc9ntmP@ zo>$gG31-0q=UXrjON5}JbrIvN4CZ0s^^wwAnuX=KZi0DOA_NsB5aa3!=3zlg&AIe~ zTABq9T>rs5ED?f=5{S`C1oN<Sydtg``XT9@#kX`t&d$~uMZ`d1;2276yAw` zT%1U5T=T9as31!T_O+eIb9W3+y#3(`c?7dSbA*y3zMcO_;^t=OT7n9)lwe=m5l`QL zWpcx=ZSB5Nf?1&1b9Th$6{jROzW#rC9?Sy4F-?x>c=Ddf<4f?nTBvAU#Oygc;^7Ic zl0&|mn@2DU1pC2`_+;iG$+NHTXt#sznuVBSnjFz=eVt@_t6M87xdi*cj%fH%o#f{J z6YW|l!7R`m)8vT5_u4ag>G(!=t*9VN3B<04jI*S9wpnu6lRMN(Ad740`nw?Z{IOed z?+c6VTB!&7z92Ym%B@xE)=tUUPd{M^Dq0sYd$o?3`1G%dMdJ^#Yo!FUKyVb4BbJ@i zHo5-uq1J0IFu1icAyd{rd76|rH9ntN#Wb)_lp35Vc1%e~39I?yt1Cwo9 zFR=s_t&5mFPDh-!`qxB@v7hA;%mTrYR*rahL;d8mI&WEmiq=KUKB^-|4mmNoB>hYt z!7LDHV_aVp$NZ=aQ76^{CavsO6sGpqiihJ^+qID6o|LBMfhZQ9I4_jkB zlwcMJ_68ks@`Q&JG@8=S`@o1hYW1Z|8{dUo=kM)zn>sxrCpGrIh2p95MQ=#>s-wF}od9v@X}s z_4;tOPwmh)xp?E$e>somc4?a&_WM*zP|>=G*;8}G%mb;K6=ZI{ro3&Jlu&)3IEhGYe!KeV}L-JlKoMC#Wcan9I$3>SOOs$msq+(JXkdhm(#cF4$P01QjI^b9p-c zabi(I<~0Zu&4LGeIOf~)JvT=PDoP;ca`W57;Fd|5pCM2*3m)vRq~l9g?-3!WD1n&E z&3Mjd<4KukB2Y979_+7}5hF)+i4atjK+NUoc)cU~C1t*hK+!CCu!m#5F!!q^-=06DXPm5B4R@%tk{}IxkM3D1kU6_MBXtk=c3zMYG_+ z`@H!}%BRnY5LA>v%;o9$Nqv9I$UH@XqFM0ZT{|5=v$|fn5>%8x%;o9$ny>dQm-&_g zMYG_+yLLW7MG3@Qo{q2E>A-TCH!4sx3m&}Bn_pk+913B(~`#-FFmJS=wnxf!og z%JG9*7t3)^4CY};nR!?^t4g3KftYtuI}Zy8&fF3qm<1xdbDDWrQf3|&j-wCOH+K!g zeP!lhNttJSU5jJgkTmt!fVjX0g*EE zuwV`d^WAHxXkEs^JQFE14-4m+2x@5-Dur!{nTI81=3(Jn7J;G!V(f)rzKoQahlTTH zLpeKvOQ2|7#E;^ z2EI+?+*E<0brIue2J^6>-R1mO5rSFp2%m9$+s-+*0!8a0###9q=V8J9gY$Sr2xh?} zyyN)$pq-CQ6|IXHS646(OX|W;c2Cw@@R%9L61pP}$9d%v7ae+PvgH#)EWuj21V?bW zXXoRGU6=gnoGEz(vq1B%?TA};dn|EYylqky#7b}kmm|!a0|_%HM;^f}(7Y!)V%Hmc zB-dUr*{&58WGTTroFh8DSCSlbcT4L*C70j`E=M$dc1ZH&pXON)C71=8cQ{Afbj%gW zk#oXLkgt0wP$C@l ztM@2uhh87NFTq0z-b>tC>GeS+m*AbjJQeNpCugR>JQMa9aNW7(SkC=sJZ`O%l$j|4 zC3nk1;pq6IAgIBQmzWvp%5rT>mh`HSSmE$ElCSPo~&c2P(TAGFBY>Dy- zDoP;c@^pOA-19IG3+Iyw6wQJM+h8+)Vv{IAMG3@QZr+y+Xvmo<%viy&mS$l&?>_1H zjCvy@1QjI^bGh-jZyRR>vgZ{Q&4LH-iTMN-B@lDDIfBa)<>qYXJVk+`S=F0~|mDeazV&A*d*Un9J>%sI$Waie|xs?PEIL z{KoSm1QjI^bGdnTuCq%*X1NIz&4LHp$8`MEZbKpj6(taJxt&2ECG$*_a)dqVvQ0Fj zzDqd@KSEThq6F8zplw@1>?suCz#7jf7o!lUPqmbDVhf=4*=-mevArpT(IbrFYc z;(rm$LM4v$5B4tS#R(LxiLrqI1@P=NuX$5 z#9^BVkL?l6f(K`;aC_IEyO{ALP_!=MuuX)AKX>y8W}#9z`-NYtNmp{0>UzlE%xI9TGqbUI znh4uI#JnaDhwU!zW)~%XPh6UvZJrH6C6@@#_WvT71v=ag5V>pRXHp5S75cfeoy1FD ze4No*nuVBq-EH~h1G^{J4#v}VsN@pi*$xj7caN;dBbWs`Jg-2!(5+wc`R6uRf(o*f z2+#K1?a*Vu`x5D39u~V++z!-*f-EJ%J5KI)D8VeyVcVX&9aNB|M0m#mvCpKx7M?kF zrTHQyoa+N|`0cq{?q=}-F{f37g+qsbTS$U4wdoIL3D4Zh&arh;9xVDZM{O*Gb*Djy1fF76;B$vP%x-a6f9j(lkcRgu$^KDdE z3o)}8yYKWtRP~D|U%2v+*-$v23}U>8;oKrzTens>zr3+SR{MNJm|e$ObocX@1)#SsIDdG!}Nm}&KcG%?<`Rw9MgccD(=yH zA&($yp)Ru!a}0&^7_@cbLY^g5kfqlkD>>rfey1*|+iGvS<-DHhfsu2FF*{J$mV9}7 z(*>_Rc0k_o2uBI&d>Pn|su6EiT+`?9JR%&sq_bau*sss<3LX#aU5tIhns97a{#wo3 z{LK6T{+M%pa|s@Qx8+}rc(age#bXPC~X}V)0=j9-ETRSTp}ECi0yc;->DtA zca>lkXm>qg93+UgGal-|c9#mWI3hEDt)6MspyPGEZC4K^cs)9g&eIR>_>4aj*#dAo zFxmw%TMS3sSk$EB|M|0>N-lwY=*F>NJ6`;7YX`Q8N-zs_I0_rY$d@-RV7p5NS=v&w IQab+s0B?#a?f?J) diff --git a/act/assets/vx300s_3_upper_arm.stl b/act/assets/vx300s_3_upper_arm.stl deleted file mode 100644 index 111c586e18fb0f44cee7e4593d24c71382a6e9b4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 102984 zcmb5133yaR_V$C~F4`)Hs3yrKBL0$IWYrnnr*=ygP-MjQ3I%w#THB+Zf{k_Zo|G$kltf=_xt?qgI$$W5MZS!j% z4kT%qkO9_=Ei9fn}JTL?V+6ZlhA0?J9 zjkW6QPjNPS992rFq?{N~=usVrTG~*;r@hvQ5u>FZR;q|lNs>5Q>VY+!G3WQ3M#1+m)G_FWAE3t2NtVP=x z({AiWgi4g4J&b%euGm(2B~*)Tn7gx-P)U;TEq%fq5YmHrCD^*q;#gQVtCUblIWeHH zY-u1kcJoSblt7DP+SkKM6%i^)5`N_Sde|t*hjEoxf}YOVy*)Y6$oVpm6~q?7<1=J`RF zWVT;=awA$m38bl|9>Y9?BmP+lLM5dH=xAJ-4M!yW6=-VFc39QS#%nj$VeBS+VkrSS z8dsSef18MLmC?LdDrm6{{i+7dJs%y6ah34xr37qv0;T!<&bu(K5(HZpS{w_0RRghl z-lL9CNhtvv5z%Y}##KfMjuL2bOh=`{Xm^B4N{P507*95%a;Uiy9PP-Lb2Dn){C>;7 z$GA%D8A^$`KQPKW)K|G4)Vw%rk+1rgVrs?rZ#7m_)MY;CF#l22(*8&iA9k2O55xgm zx6DS1+&|Z=_WkQFv5~{^^^Zp7x7xnn* z`Y}0`D8UhvB)}=5TI_%CuUHS1ius}sSVGQ^c$6S-mWuguWU!xU+mA%3g8-+5YOz#) zv{xU}p~(nab-LtQ+p%ILINl8dcI7;aBHN zG8^PtBS^=)4DLl6vY$B{s0V6@_W|ruTO<4~V{HU!XmO9?>!GDmNo$0!ht^yPrlG~X zR(b2Hq&33tGWs?=u?71>38tZ?d&XjF@aHFmK0i0trs9w++6wE$dS@Tz(kKbj-bVEy zAB8?YH>VP&LA`pL{7n8d?qYr#KtF2iv^* zdg${*aw=(!@NX<@BS=GQcKRLWSDOv^ z_I*{NM33rA9PMq8FyH>D5~@YpepO?s;M-NA1nngWcu^%(>!qbfyHZ6y5pCssyGoQ8 zIbw8@r~`pERzkJV|7b(+MOhD&ius~-Swf^qC)W?GI4V(swvz-(rG#p+RDQ+r`^1ye z*aIbJUpoL?ze<7#ZlNxE@NVq?Cy3 zVKuj1-^Fg9s08~T^}u+84L?6XaA&EKQiAQ~`aEy%A8wzh1jjM*#e8IMlK3aySZY;L zO29_+{$Vy8(X2%2JckWGO0aviQn_~=mI{(m0yd&?g_>8Q1(aZ^pv5-yGs?Hs1CCHh zDFGXvKo8m*WLK0<3AQe@I2NL}R2wBd@cxlkNhtvv5n&^-2i_(U1V;(9IHsdgMTAO9 ziMSqC^GtY~$Sc9oj(j;cqt?Z&#rsEIC8b2%A69;Q=k)KjN^sU9U-cL^uEKdG&X`DG z^y!{K{YsKhNs@?nI0?e}D~@X22hp~#xs<9|B}&kqU(e+PGSOD5rLCSkxl>7!(EXL3 z!zsbN4|ZWX-jXMGDk&#gJBL%kr@fZWg2btFVtynE-Cyacn&!p559*<_IdOWPnCD7x zALRSPdeEKq;gnGGQZ4mLZj?lXN|J=`uf#^eC+aSYw!NoTLM2Mjp5OIJTV<3`Ew-Uw z2bEAslJNcEcdv4ino)wS8}Guoaw@@5;@7@7U-p0bcLOCZ>B}u~14_^D6xyx4-rhMITzXRedOKk` z#-$sczur#IADCC^-f=#=VLN-7b;e}(j9t ziKlPA-MxR@vHR|JmJydkOW3>necr};W6j35hu1sew1qRfsziwo+Fs;{{1zZKd-q;P zY(4LdoD!%Y;|vgw{kX{0khpCa@V@^U)#A!3Dp|axlV|mw%t4b_3HW=75UzI-|hnnMaxV3!1v97f-e<_ zZJz$B3WQ3^iJCxktY6jKcVT_i-h)OY2<>wnW5raThdVWIH~6<1&7H2wYoGh{0N2mu z#9LQ?;|P_M69e4bET&MZcE`Rj1Eo^Jr@fZeub8TQbaKw#ie{qUwNyG{)Lt>wYr+R0 z>>mkVb|rk;KX)~+g+6Mo5+(fTVLhy^YWqA~p97(ia>9EhSM%DA^(Qr<=1SJlQ z>q4n~NwkE14f*S4lZxUNrctK6a|+`&>u3&K31z&W8CC{HwEXUa{{h zCs-=HO+X(zW$&X*VqV|-k^u$yDD_?bD_N58UO!31zRQ2R1A!gC`Y0v5UoqRxUtx!T z@{mW}JEsz=rJl%kOZKMn*+DQ{=XZX4g^h~-D_U8RI->8Nx&8AojK!SPip zQ9`|)^~e1i!86oh{A%lJyZPtb!vz9RAUMyl)AN0k^bGJJP{)kY(XOLXy^@=QJh=;m zpKD6^v^}S1a0GmbN|ews?Z1XwshsywLbZIl#(6vcjUEK*qmm>MkI?@{#9GPLg@;2O z+RvPsuCJeAqrGeP%`K+bmt&ugwC@IsoS>AHY~i?c6}sPY(6Kg5+&;E3x(?H zHGMa~Z@ds!ZvSuz5o?i%Y-CWXtEwm3*Nhp~&%RVAcAlMON%$Nk^sNhG|9fwa2uPAd zA)6?bv(aI|Ek?wW)(BrJ^o0#8+mqnmR6Eb>Vn5gRkJEMiES=@^V#;eeqE02P6LtMS z__KCTy1cSIUKdsEi>dpc9M`n>B~Lp-B}!PjAL(MM>sjAy zw^!>#q;=TLm#x;cG4`ejJo?$fWjx_3)O#a(v3>!0^{?G~#CO7ru5Kg^vuYC?CFD1r3Z zLk?)E-u;e1TzK!2xletu_r9eBwLqM6VZWA>ue>b~+ugorQ}qr%%meZCffHI-sz;ZO zXi1%OPgE*A_oaHH`rx@=H|*vJv}h>-8~fG|YnitC?m&EZ)^&3`T)T8GOGScOAm(-* z)Nth(BMoDKV4+VbL|_qw(^uFDNhOH1oZb;GXfRNpw}(6V}vfQ^Qa$4IFj z7TKHRdH-}HQNN^ z2U@84Ia}kq!9M+1T3@R3Q+=x!jIMM9B}&jnJW8&zQti3H`eno~RuX7A!s-!^M2xGR zJN)1Xj1ra#%j>4ZD@~IkiBrH@ywa+FQmPS`7(%3TYj? z$x(tCRe$z4*SeS!7;$I?&M5XE=BBst;9oXu`lj<{RSoq+t=&hpys+vH`?}!M`*)e_ z($&>xxAd4tFM8S!*XB-5f1idHC9M(5265J|jQ|ZT&?81%;B1$diW1sW0wwgs)=&SM zV?C5Wn)Ud!$0aQd$KDx;ld4kbp`SnH>Ol!hqQuC?y8?04qQ{$BBS1q7bbbA$&fED? zUG>;EU0Eu$F7wrvzPkGvj`qZo&5vsAGiNK;ACw^9QX;O$)k`;;(Ha37TA-ILJuQ?9 z-@L-PzI}s?+r6T~dUM_dExWz!`=34QzL+EkN|bPE>&=R(7$w05;sKnE1QE2fPUQq^ ztND6+XSx!40_Xbp4x)Lf7M`;e$_Yx8K>D&-r?*rbj!}You)Q=d)xz^+>ncHs63$+= z_D3LUH80hoZ9fvFRF2>%Q6jSU2*xgm&{i4EOSSNvBea~LLW5tJz5?Oh=Cz?edNXgc%owJh)}F=7IN|W<%-l+`h!ke7Fj4^f+(OynH=8jpwoV z`R@5C34#(OkmmDZ>K}#bDx9vMy)-Y?a<(JyQ%Y0^i4x9UM0|hrvxu^Q4SRdXeD(-V9hS-wuDm8uBC_X*Z*FRKkppNi%}ceM?a2Fl_xuhnI)OxWkSO8oMTCu# zrqJikh7wHs`qBH0xM~x3-ww4`iRSCl=zr(0jKIk`#%?(gw0u42ecrEJh*HIb%>(#X z&DYx_VWR{!cc)&Omulg8?0u|t@dY3vC{Y4wJ};)M2b-)8dGYICnbpL z;Fpwe_9B1P`=0i>V8e}FC76!t;k-{N;Y#H^4(6aQNo3CvHcBv~Q1iUzrCP2Yk@xxL zrrHD%?~p?K&|XB~J!(?nvfL&&_OU%&|9)pwU(+(abLhc&b(BdDcy@;l3Ye=g)IUOvi|O4~|_o3W%&BEvBfq!H4Pat zyo{ihD{Z3Y?;o*!)BXd8IYQe?i5b_O+tOjm?Y3t?%~!N6Y&`FjYs&~~`4-{M62#x0 z{Jrt>`>*qa_*x}0r;ltIU!aZjfg3ih-f5>Yf?B>F^zE>*^3V@*+fUy$lq&Y^O6>Uh zNiBDr$+mj@gx_-ZZ0^>#J)xdj38Xi=o$olqUhSL996L3PtJt?wE2@X{)UeSs zVcpz2qyHS&BlvbD&|S%q103f6yl?fxfxC z_OD@Foq5GpP2ZfADkG@n+lrnVHU?k5b<>Z-@#YoQaP#3vD}gcg`6;xq`;400!f*E} zBdF!;K~D`EPgif;^v4r{Y!tPZF+lyL%1V5-+q>uOvb8y?sx@7WSZY&W5r#@~z;k>1ephWA0G~3N~ufy79#_zwd`QgER z?TdSymulhpwT>p8~m4n0r6*exTd1sj+J-iFK% zM^K{qB8@rd_a!n1%Lr=024=HwE1Bo6ttioak>)-q;Y-R0YQYA)o$qt;iH@K|^F^9| zte6@;_lw*=@3<^Ke&uPk%nSZ%cv#`l-dMAi4sWjx%u5&Uu#^a^4;c- zrjKwoC{Zog;OHqQC{Y4wK6h<}2DX2=M;7O$T6oS`;9FM-N|ZpF&t0i1tsb?k2PLWn z8=RZv1SLu!&F8Ll)7Bpu_6H@Z1sn8AAes{dB}yR8=Wgt#ZCqt+?7C4xiE6<{?01LH zJu5*_q6E@>4jU2S<_9IJ#d=`gBueE7l!`M-X+BTXJfnHBRG{%3ZRqz5Hg?%oj-W&d zr1?D2gJlG@U;|^p?-^{gbCfuO63rKBK2MC@GJ;yLfzj@5_;Kq-yCW#ke39n!#EdE< zs0ACCo4&1N);fX`%@=7tPk4qhf?BY_U8d_n`VvP_qWL1t=V+_COwU8sZECmRr!*u_ z&Y0Np*Ja-CR?nN{_6c}SUl|iG_W0frlt4=f~LGO%jd2ToSwjK~$-f>TE&<-1y5!3>mdU8z5j6L}k`KIT-%xrLX25`=*?r7Kpc2j}W3)$B%RUj@+os z2DLz7uEeF9{r26tv2(o*N;F@j5BhSL)O^yuH|JU=ZsBYwK`juN^KpMnSUxY;_w!90 zL5b#z^ve$plK$vD_Q%|pJ(s(2r3AG=EbP)>M#<0huja0~xpSEfYJs2!_kI4%+xO*; z{Jqu@lxV(4|7*({8STrDosxTctNvvKwLs7ZdmHv0H8*wV?Hxgh=8JSiMW3*5$0z~u z*W3bEb0w%n8nfBk*!G!cb9;_zE+bGX&iQyXfQ`F9`zH722ak^G!SzE4wA%|8@qWo} zeLu*xpM6*vK`mFRX#IeV!R_wLeb(W#s0ZOKp``?Rv)7xv@3Z4)n>3xe?;g$u*9;KU zLKpcV*t)OeKW98`i@Ad+j8&Ou5vU!=Kq`FdP=_KmsDbFOf$s|2+`aP2Cl z26gO_v7daOt}oYUUJdoVqB~c3&K0hlphO9zdDkRCEY`eKi?;pBDW!4**FGg^&)=p3 zF|e*{{-fO{H)~$1h38!1e5sV6L;E5r+?kk*-(O7u)%fL6MO%3YeZ0@`63~FVgWXJ>(y3IDe%CwO9|o z11Kf9hHJjG=kE@J2sN+Oyi^O%xl$+E$`O<(fi&+bdLs0B89^=B;M!hJP@?%FjS=JT z2%{ch>{_IEoR@0hId=ozh7y!0fiyVGcSQN>B?nxEt^`l%PcOMVfa_toiF<$r!+3;Xip_aa#7k><`qZ?qOuhu^SgQy|PneAiV8=H%yj z*B{=djWQx}lRPTb*H^ued*Gcj^9Rp)y%slcsipb4=TTdIebu2&2_i_8aOsE`dq7pw zOP6Nz-#(DG-+*a0ln80w{`K>G*he2F2==q)i!`4XQ+u5ML({n=-{k>gh+2uhSdn$L@=ngJg*_03Fb{?oLJoDE7;3pO~rEOPRlpBj~*LJ= zY1lv;`WTZ^f)dRaX+BT%U|#c5Ej-6qKzu_nwcneIszRSTf)XW==5yy6s>0YUBd7%% z7}Guq;=Ot2rj?*X^F^A^ooA>FbI{qKM73Z8bJIt@m@i2Xlqi8TpC^2Yvq6b!xweX; zYs{BanJ=;5R8Z$@9wbU24X@-&Wxk{;c(^ixTCf3M>Dx+tq9Z8Le39n!V#-=K7kqme zK`q#T&-Z<9z63pp)xZ&yXue4Ex$`9$C0L`%2x`FwBcCu5qgBljlxV(4^ErG;v~s%n zL5XTP8&O1+%@3~5o(K{pTsk6b)^Z0>P6RF8OGNR`c5kr4zBC}x^ESKXd9RDEVeFd5 zUA9I{a8a0y!9f%i(s6v0-Al|Yy7apo{tpS`pfrZFocQp>ZzFp0+|1@0@>Y z%13rC>Lc>dKCKZxb_q7DFVG)(5|ES(64V0C=m<{?{rte3wl4G4mS!vvOEoUCK?(9LC8Ef%yp~D{ z#^!)#ERcOm^|2o=DuuO0WSHGOZo#ct+-}5k_G~#pi4ray-92Zt2%ayAk~xNi;{ zob%-bC7LhNy!Y-2)B&-OnwM(fdF)G|tpp`XID64eSs~omRe});v>nCv(S{>X4@flM z$ez1_4I81YGMbla;W^q6J|@vtj-W&dXD@i7VyYyLMSN8MDD8;I`;-wuQePhvF0F({ z*SIl?C>quBrK0z_;g^N@9m0s(vYh}trAKm0+)_z9*DY3*FBn4Qc5tA!0!`J+j#$~2lqPAcE}iU zaa&Of&l!Q?iK*vytJ<$iy(1`rmJ)Hi0ZMiA;j`2BQ$|qBmr!C%@(WMySylDv%IPXW zS_wuH_)<-teL&U4<2p2!64Y|fBTtP|ZTYuD(m!0mn%kTRo>~b;5_lWFH++A%*%=6G zdE4~Vu<`co7p6D=tYauu?Aw)KB!RcFYT4WA!_P|j9+cI9TCfp&YS`HM(x=luf4!k2 zD1nv|j3n?jdcO2=`plDmsMNfWrWVrl$DWw;+j`+`B9@dAprbgie6ZnbF20>wk!|Ox zQIGxGevy9gg-hKiQQxja9B%;PmD^{e?Wc^Oma9kP+kJn0YyC0V)m(k85{xA9HJ{cz zH{EX26Uzu{`L?2OhmFRDGt-6J$A+56zE%lF5_lU|zc(v=?8Ilv2x|Fy(6__J%7O2v zpE&kvPXzA_EhQL9;BDM}UXQAQPvdRddQb>z;d$(-VZ&lAD=pSCp0#1^Q-WT}+pzgj zhdH8oAx$l3JMvc+Z;%f$ShjnOwd{6m3-12l?x-Vzw{tOA`4EHU2u30(fpmOB2-=L= z(!5lQw7-FEFi@|CRF<8!q64ZhX-tlxXSosixe>fYID1kJ3)5oP+3>N#`*-(O7u)*7uiE-r!N;F@j zF?M}is>NV6TMQP)ZW%!>*x=pB#QbmsC7LhNn1enpRpwwBK`q$eO-C1N$$9SDiW1Ei zY3_qu3>JM!89^=B;7!M3N_?UtDA9b8rsqqop8?oOliKvPsN+8`T z;pPV=s>OQvI2N(t2$YI5N@+e%v{hd7VyQsmIa=5686*bF5tJx_G@mDWu#BJgtLE!e=U^=&0{&=Hhq zzDV!p(#E7%6rNPYJsM&^u(cEA4@-VLZu@pftC`y_2!9Jj(ayswbUC8z}gb3X2mL*Ff=FS&9b zXM+;W7ir$P^)}kv_fmSzvj>+E)B?eqz@9kjnqs>0gA2)+?nBW{#f?A}bILN%tK_z%c97Mbt zpwD#UFM{*8QqWL0?xe~=e=5-D#K`juNk1?ThPzh>*;9BDQT<0JqnlIAuD{;+r4k|${ z5L`>V4V{COXue3p+r|B%b5IFtfq+MiM~TisC8z}=xkl+6q(t*YnroM@ht5GIs0AWk z)$BfX9zTei(7fvFd$sVl_u@I%M0cM$K~SOu(!431AZj!()uQbvZh|%(!L?5b+Kb{Q z^1S7(d8rnjbA|JzQi2jCTspcPjdtSwcFjw*@SJO+x1j_jN+6AUYf;=pp11BbFV(_x zuA<(C5|k)`H16L;u>koHgO$;|R143!ih3JLP@)9VyaVlt`xg#KTMU-7p#-&HgX?av zv1(iu2}(3yq;WSgikm<^5K-l9C_ydQ;JO>tBO)l#e38Z-&$z9ubrFN*Y$!o3*xQ-cXue4E-m@pdisMF!64ZhX?l{T` zN;F@jc?a4PVdZo-l%N)DaK}+jP@?&|baXqqO5&p^!Ag%bcNTU7J4zQ*&m`|tM-nBN zlb`2e!|D;|rCNB-eURO)P2Td3BxQu{8GPKtH_4miQ7bCp(h)H(d0Rb61TB<`y=i|@ z=YXc1+=!1PnlI8Z@nQ0wek4&X*CJ6Yz=!j`(wqI!s8#}Lj^ko#@H?01B;FvBs1|H+ zcG=&9?0P_gphO9z`Mj7~-=e8P43^EPI4{-0b9#wl>clu|%Mp|)fi$0^9*Jm zL2p-1P@)9Vd|pghe`r)nBvCC_zbICwl%UsF0%<;Xql6JvzO6!Dss$VI8fD`uK~SOu z(tPgb2V=0@jG{!fV1qG64AkF8+)YEYcma{>LYQY9~1I5(q*{&K<6-kspx>cf9 z^HMF=!@qw>sT_e)(O)rN+Vh0@61G(tK^ivDhCcSSl%PcOMVikOeV)<0R144Jy{PoL zBPdY%MiSM64a{2qZeYG7K~SOu z(tMuqC9YJIsFrK1C~jMPiLZH(D1kKml`oa~lE&Z@%Lr=0M*OZ;N>HNtBF*QnKj^8Q z4N6oCHn1A__Yd*yj-W&dr1>1H}!t`%kd?I*)T~7VYGi#NmIvxoSu&gld^>YhBvtp4xi;ZIfQ; z9Bf3AQi7!|rgpw()aJ*XU)2OcOA`H6L@SBX#ncyr`bUIHl!*Q!qpL^1Y;^`dPu;g! z^T-j0_N%CPZpn}pJ9is7VxXm$ESdTc(uelD|Ct3#p5L-l-(sqipd?8Q>^J_@IZF=z zX3Icq^h3wYaqVApHk6msp7dVeM3FE#PcWqWK~{`Pc33_U4lRK~M`eUORp}Bc6+iX4;@c^9}a; z#`Qq&wML-*p%v=UH?9Zzf^CI$Jz*Lg;wty_XqN4pW`WhG+(4qy2L1H zm7o@EprnbBnCIA~MDs-&eKGZmbhl{iwn|V7HqaMCj;J!?l{TkjHu-j0ZJ!?3+3)>; zWLWkOGgfDxo@ix1?)dD2XFt#4dC!-6W!HJQ!v213DM5)6NLN%GkbQK{I{$;9*5Q92 z*zf;V_Rb!C0SK%#`04yLep{$7RsUai53_c7cIpvG$EA&lX>Iq22-L&3k0-Q*F>&7o z-P0;jg6VH(e4e$t2Jv5~Ha&*(%{BNdh+T z{L+{}NKX+A$5LagJ^0eP7CLvu?;0u z(h3o5th?dKQ9ZP_+HOYoGaG?8?WfbU9+9LKB8!&BSL8%JA|mALw5+tz^CY2?R)}Du z%P)u3oV9FlrpJPgi9R24@@D<6AAe+a?=#=C5$J3LqIzcknrB81%G~s~PUsIIz zYw6zalW_-U$1GT7_2_xpK{an*Hz+gn@+6^J$mzx{hG*L>{?dq-F0859u{bF6+}56; zM2W3`?qMs#3E9QJfjBC2V9kOj2W4*P-YKsVCA5Ua)aEZ7P_wB2kj!`8IwlC!+Vu2) zWd~h)ZuUQi{@ZLUpE{tX?I%O*uefZTSBVlEEc`J$>ZfC}BM*Mxh~C@xuNk#+Nam3v zl7wmj;%75!$NY*THwb zA5)8W^c}yg%5?ku$?T7PE-kF{_2_<~Z=Ah06;GLW*rQ;+*=(#b0_O}MRHB61Hs9{O zPafVU_!8GXVZ;>ReY6##ah1ooQi&2_TowEr3^OY4W|W^f&W37*(O&R#FwELKX01w; z2&28=JwwfDeQL7b4$a?KjOK^SOSQI~{Ydtg&X*TDcEVa4yl5U?RLicd5PVl5;YIWC zqAF1$c+rCQqSu^XQ`4h3DF3i|(Ne-Zs?!?%T#tTN%sQcPOq+jMsb0<=ShGv>p!~GG zlZ0w%X^W{o&mK@SuK$pH-`zVV>Y;?TLgYm)4r}WKp<3F~#niBW_p0f$`{DUN-`qYQ zJWc^S7M&Bx)*W<1ubQ3yJv9IN%JzAcD4{(YdC@$)XhKUyOp*xutGw;6riXo-E0xY* zKKFaLZ*J}2_EztGSBwbR8@jozwu`CT`ag@EW&V>5e@PIk#TM~4!d^6Qd(r7CX^rr^ zzOX~i%Pzctx8JbyF5q1@?1KybeNYLND4~8O+J!qCN~l)YUl*{yP7<20`Y5GK-#_r4 z5ubAD>6g7tE@|x@$GvAdiGQ!*d6|2!wddvUQAxtyah;a0-)~D=dsjoLyaYC40&nzf z3h{ew{63f@sO4?!V1Bp!ofCwY_63g;TF!LkXXT4W5@pts4=PXue4EdDMgMFTZPEs^#m!bGV&4UcW%sxLn0c$$X0EL$Q7xqT-N=4#?T$;=N>B^+W7>A%+iah<{4-|9*`P%8 zMVjA@>`UB(>9rEnf(@pNsc9GXNn1PBT6;MglxV(4KX=5g;p@_^AN5)*K`q$em!@K> zSic{LX4;@c^9}aG*QK$!gVst|3B%VVSC2p(a#L5-JkPeGmgbAJwPE(7b5PV}%!rSv6)%I7X^_YS8!Mi(rKArDNSn0ji@Ph0a!h6*G zsT)>;xb@|^JCnd0mJ+^H!^7L%i`V?--co-&c%w#@D8V$wG2YeQX$Kp74E{HFGA=LG zVorV+zRyFAi{>sFlsT#?c_JLnFWf1x-PY?jWB$PGb9FdLO;ug__~d5>W!{_CDX$VG zGMn}8mu@#a+u;f9ukM?DMXQ8rU9)}feot*UJiFWDAo}ee6Dn!$*1O-SZHH%T9{n<_ zwu`BC@|U-2Ly6>$tit}?W%YQ!q#mjj+AZt%aQ|-C*k`u2)p6EVDnXiO@5xfZ#xr&n z{{6$rwo)RT8o87EVruWF8~Z%8WKe!su)(hu!JZpeuraXVkit)Yf6v1 z`M&pi81 zD4E*SI-y$0Q?*}@&nyh2Ib?geS?i9GT4RXwWg zrwI~Uo$amhKVb&zuAh5qlxpAwh03?DdNisB-ZMe-+rLlqjRnM{lTT{=V$|WS5c>AQ z_b5-SYMgZPDn!LwzVUmMtyg{H5^AY>Ts3pE4R>nm>Y?qEB$gPB-4blrkgBsEjw`D< z>*2LvoBOVa0N@Bppv6A$rPWf|xN79MV!K1DWF$(TI|H!|H>!QUetiu#Y`=n44fQCg zxs#L<+;@2bEz8!Wq?F*ki-h)|?ys~|+%2J0$x))aB_+7`0ih!?x%Y_)?pKPb&9=I? z=KI=fG95pBt6MljcJ~?dt6Vs(4_`(}6s8Q=T_s9zh4bgTB;Nh_iJ3~MmVU>LOEvQO zk(DY@B6#ZX6`3~v@m3$ZV;g^8Rzi0Jv5i$1{5oAFO6blYCjPla&q|djp}T>YxOv@K zl`2t!X}@x^9>sI2D%l@6?GIn&viK^;=l)Ee#7A#!6A_Rop?wpV>iGMg?WU4&#_n#@ zuvESWZCqu}EN=2>*OeE{(=$D!-ln?#5@h59#iah8vA2)g>!BRNnkr-!dY;-RN!j zn@%9+Z=Y)%dgMvALyjd#E5Un**pAY(u_C# zy*@oXFgcobt+26=T55`}6SQ0$v7aC9e@wqP>?Wa;D>o2c`Psb7wb>dH%%E=lUf^ zzahYbcd@fVX)BEFdkf;T8# zi-eo9<)!jt)Z0@6I}X+(S#u@4Rw!+_0|^`2x)x3DB+!cQDwdY&-pwa!4~9Fbr3CLM z!p7MfjN1S63)=7Rt@|6lNb?H@()2NrPt0$8$h@xlJv;b8_lB)stz+WB564Vbi4wuP zy89C`vEL746NGB%7wnkW?5QgsP>B+HUn3?qy?*`aN~o58D~^f9A8**GH4m$(yE&kB ziiwXuIw!3XCG?#tCRQBsaDq@ReQ%42rsXNFweB5I-woAvOpM>6XLMeI)vGlkvDW6_ z+^f({^U`n4+S0L&zJtdnzPPhRvF>WCNB3khxF@4`l|sJm)>F7YV>1djYr2%gtYrgs-^V= z-X==xp+v9|VgY7vbYCuB+H!?>9G%Pj*NSt=z|tLpP73wHO*mDcXV9eE3EB-B zdom9VY7-GEQ6iMqYLEG`&Hs(($#zBw)e7}+ci3FSNFI?jh%p&4Vqgo-?tk8E%b7^W z@1KX-^XK~BcHCAoONm%gO2oT9jc+ZY+TfC9tc#?bU7Ku^SPTBndYQ zmi&BnM}3w0wjU>c^c^l2C~fNHeqZ16N#f8JzKcXX=%Sr8FA&u7wmA|}5BrG-j?pCH$1%pVy94P)`z(u(($R-> zJO}-l_9M}5W<~_9L!!hjBYU?b;-gw6RO^>N;CF@j$fxpBu_POHf|ie>Qrg|}Ud$L1 zdmm`wdAr>tZefgx-mNzq9S>g2=mC`|vHrpz3vr}{-SS?{xCtdx>)?|gEyR%)cFTJ) zV>48u#3rZzs}RS3SQNx!MnNc{THHDLb2z)@y_m5wT6S#(yP4gRh?QB)SQ(Wlfixpx zT&&Du#>$iu)UxwZ^B586l=o1yz!-S4Vx_s-djmCZ$S z_?o*?DWUeFS?gjmLW{a@ccC|3Y(^NB?zxL;z-V_d4JuJ0jJG(Z0i)f;G$^53VYIt5 zhhoZN8WuCAK_yDqiA75yreQH-8dRc$me9pCEM`oD5~>x(aftu0$S{m{7t^2;CBkSA zQ7OUuxEK>9R4a_#5S0=oNrLdvnV~-{GHkI$hPjvqCA2p+TERtHEM}wy-=pBk zQnMc^Fm@$LSfs^bMp~#u3AJ5JS**-r#>%LcwnCB!tAUI8(H7NuXe&f(l#4eAE2q0d z9o9s5J0+}%F213hu$9b7l+d}kc0#q1vo@^Gu2h7m)%m5;5R|P58}x-Y{dI3x6^ZX{$V2~urJ{^NDz!^(7b%9 zczet3=j@F>L@1OH{wzF6z=r?+>D!7C3f_hiUdun{>3JxX`}?&j@is`n2Jgd#nxppa zz1GgDqnc9-ImPGgr35A32EX>Yy{OeZL_x^AL7W$e_=~dJCl;n$y`Ak7VS{z?^%&+P zNy6ihZMGwF_zj=lCPZvpm=lrte;tle5ytBqdL8wFteS7u9-lMm7Q4p;V zjEHE7zXG6CTONIF6=E9f`#>bod_!sD-xPv(XBX1|3BQabTOqDr^d=k;tho|M(<}XV zf?C=Nc02mR>niaL#P0P~+KmLuYk2m~$JcQuUPz-4_}rg`?|a|Pj#&N1*V9!Jdd(5Z zcZ21pJ&OB@7NPKYS0z-7X>ViP4eowo#_kSAgq8|#@oKM_I{3a>(XB~JyjHSKe1|Kh zR$efW^xwFc*j8I1Z;BBq^Z2)~mLAscl{L|;rZFSUxk|fbPI=L;T+NOVB zE1~DROxte?d8#_Pbq^bqz=o!Ia%*3(u}!;Om7pX^xcRZ<&c`>k`LXWXXC%C+5=cV}Hf%nIon?q6 zb2s8u0vj>mW^f=hKB|;pzP=vZSstHn3ttj>YVJ83Gjr zOto3mCgFGE-DI3E>cQv5)WEN55?+)N)$%RkcYx+aKkbqr;@xDNuWzg2g*zYL!p4>D z8FsThgT*wAZseN~_6AIcXa%dezXM%N&7bO`GcBfJ-(8qjIf2$?OoQ!x8es!54N9== zviAw4+HRjKT}*>TfO7BSBuT>C@O$bX&q?jZoBhFtyN9g%9lICa62~;O$zQ&|#56<_ zB^Gz<-NM)d^CkOV(JG-@S8w0DC5~yBGCL+zVz(?>7%P!1l|@>#YD0-|@)GV@S~N!= zMssLhs)aPqeT%7&C)+P{$Pda4ebC*Ak7}-3$Z7Zfne1jay=}1o_OZR9>WM*_^XGbk z5+%^;2^)*l1_{;D(%N4QZP&PnkrpaZLVMQy?yYV2U-I0be8qj8@}Uo0>;lIwMkR9E z;pig^fA0IK)#LbAC+tQ-B}xPvF6zQ|0Kf0N=!w4%${#=7*PMB&7II<)hKuG{#Aptc zD1o%KRX8acwPCkKOYQf5d=&bFd8rn1x@dlFVa6WpkM*9g-xL!1gAye|-^4chs0|XT zrKK&VEYhM6BP~>-1k#Mau-~_|XxBaqrL9;G)k02eL;IcNKdl$_ph}boBR@op>~d}E zgldJE=58lhB+^DPp%NW2YTMrgysqK<+~xD?(xtryuLjSlE$xZS2FG`O?%^XH!M-I; zf;$dR{JVB)y3_0RWdyZA$GauG+1WI#!w)378gN}wd)|g=yMHS2X|7T63irPVT?e@X z2&IAzSE{w|0J!1=q7F!DsgipJzpL@3dUoZ+33vbUZqy%aQQgVJJ2KeV@sdvIX&sii z6^9bo)7E8gdK=?c|2X}xn|5|K*zO?W9kR|^*m&rd1-D$$bFZ=N~Le{HoNRzmx~y(5|)&zuC-Dn32oi4uB}ghTXQbQZq!z2X{=FbD~<(s zqj53fKdyZEt#08|-o<(77c>3d78CPtI4bcQFqKH&BeMtvZ{yi92|~4K!`~w#ap1TC zyKBB$LhhWR7!zmXi@g@jREZM$1v9S4(*ECdRYJAG$`HqxI2)t>x*1|jqVKZ0Thg3j z8-sSKtyGB;x>JjZiKncYu7qmo4lpKWow25?N=VCYIh$;&y}#MG8?_(_-y>Z7hqk(# zgL%Y%q<#EHbS{b$Pu}a&6IM@Hyg}s~PxZ{Hq?BM(fhR1cp{vC-lo8Yd%@bBng!m6f zPy#I_;u~8w2lLgnmD6`U{>UNdS~tzsEhTv0i?=C#sgBrhbkw?( z&<2V4E+=f=%Yu{72tVDlaLw!JAI7foKu?351uF61!Ad{9D2fk;qz%7iOEuV zNjZVt8S9a(xe{K>)BFsHq2JYG8+)A9K0&CK-Xe*Kp(nO)REZM2ACOp`^LKx@ zXQN75BmCVcw$Gp%N`&@}?7rhi-XtS42T5p<2ls5FpmM?g>YzM2X~A zOAuo#?{Lu^N~l)y%cw=ZZ&=ACPPR$^@2SapC^6Nv zJzY%AoY}1@Q1+f4=Vdk`t&q;bb}==5=1d{te3OwD?zs@(Tw6O4^$2Bm5+y=95TWLw z9&ui(rF|3C+_jb3P_1M=Q1c(wy?V1ys)DtNvz{cLvgiC=#c?|!EKM*GkP zo=2s^2?z*hV{L?QUHfm|Yvar$K}ZRw7OJb~)mBto>ue+mN=QqoEC=*puwnV*Uz}GU zoK~EMjdicSA7AzJLzaKkIIHRT!oBrp6)Nhk>u>2S(>HF=sRikg>;J`w1%I9Dh*MJk zYE(%H(Jv+r+vMWhkM`~S3;R*R(ven&&D+~rJE1Ug+)ZX<%}I3;p^_59;+;vH_s<)0 z$RBp<>t8E0)Yn*TukGJZ|5}!5tzR+q>U|Rr$q^excd8sL*iP zRI{fMamFRB5~_8W*>FARY*-KC)_wkh_n)s+8>+=t@TDSA+E%R*zO6oA z?;p+n=ZK8j(0sK;iYX;jqQnPFhg*LmP)-mmWH|`xjTvvrruaYDYkI=-}&9m1=j=R}LyVrup;W)!TedVe2xD(e8a>yD?Yf*zcN`+R*wHQ`bJ- z5D_X-f@8tkI3~ZaRYJA26^g0ZGcJh;l_20tMKioC0QMI(@trHmSDp5ky@kso9 zy(6qY{CtLWt@+vsl_F$hZ$w1iY2f?!mo)t zxfRtDTfg^`i?LNnIpNpfXhzwLtw`7?r#ly#=Al%ki8U$^D2bOSG4APCZRPA-sOW;7 zO!t4B!06??N|b1*e^7|i)@+2o&*|1el_ZH7MzH31>jKgAL!GU)ajBSYxC|?G+=lLN zXF8(o%4>{>B}pP{^&4p34toaL81u%s5+c@0juH}=54+Os0OCw6E@K`ucSc#G9vP0uf6}3GV0YKJ|O+WV&tp6aJQom1?zF@9Ii* z=+Ud9e0A+Aram!ZXCwY&gi4e^D{zG?rcQnLrQEb9-ThjdAAVe6G&IyNbK@21rshvE z60?4k?69V(>CiXNNMrA=5+#t{ti$hCbM$%r`MsL((;7jo)NN0Sjh)}Uxa%2Bua()T zsL1xK>an96uUc9+YvH8rAKqNzdL)T>RC-@B&JkLQ`C7kzZsD}sWYWubER;q&Q^Ud!=I?lRzmIM4m81#xoCj*tF0XKWw_w@SqC1|V+w z*Ye7Z?))+G49E-lQj2N68`Nd4JLs^=FE?B52x$2=Dqfe8r5d|UugsrT?31~3n_f-P z{4g6)`bTfi?d|M&F?GEWM;q}&hvO18S3+AMYTXLV-sD&cdq!8M(sA|m@#kTlJA(ez zm)*B?vL0(})>ip>o{vi6@{MiiO!J;0ZG`#X#2if8i04W%HDi}aGcdMLs*!W%XD>c= zhwOVNP3iwehq-Qyp>KxXwIJK~xh~nwFF;%U?UQYHBcT!{lCu`GkAzB;(E8bp_$eKu zbx;Y_O4j_{x~^^q4m%C?fAeMk5q+*cIQErxYS5{J5V0gl#Qxa+n&5S9)QSwA z*IQhJpFh2PRX$md*bizO7E?|8_o(eP;jr3uUb#5067`%7_0MK^+V#7>=qI}&bkkpI z@pIyJqf3ZbOMBLSbNyqV*5c=??G8-zx#oMq8*{TCZMj{xZBN+P^TnTQ_wKfCrc3W0 zd6g)!N6)`!JHPWq--b>g#(vT{^K5pz%%Jls5`=1vx%rXoi}_C30YBes#0Eq5$c!4( zJ@e5m!JI~IhZre8N2}D`^6 zy~(H7-S*!8)z#h2Yj4tm=cPm}QR3=*{qx7Vq6Y`wwXL(U#fX+#l_=5o#%gySG;+{? zBUEd~GL&$Wmc32|F?zxBX4GThPg`bGqQs3G_H;I$IOo3+s?|{6!`Ya0GYIR!JbrGR zdt{nzb$OSG);@8&v90htCg!|zk`ca5)<*c&m3rh^l7lBr=+Ao8&zoT5*q7?u3nnCM zZhan0lwgazb}tC&k7ku9!M^$RqyI*z7JK%vw?N3aYF3F7Y&YLlYa>)k+c0k3HO24K z{JZmbN7hpcK5ygx9zA>ThhxV_1SCo@?TOp^zg4qf`)uZ**SqF*B~xFUy6s`N5*<4U zK5_T<>(%~t?LnFJ-aFW85KEL$?^;Zqow>iZ>#V_4$6GL zXwznuD3SEL)n-w~9MCc#zP6S>PDtlX|wIYBLL zb=#3`Fe`mg|7Q0k&ewc^oguO2KCLa{iGybUDulL`(*C){zHa@X>yJHlezH=(zUUi0 zpRe5guhzP)5s)asbduO<-SyI%muhMK>~DIne882;{$HsQC46uCI=R-(f4}*Mji>+n zB^y`MtZxStS})a3Szjva^Af^KNXKUv!VNTpI&(wS|Q%U$HX?P zUN9n-koE*h%Tfj3o;i5M>m@|2&PM8piFTafZGcd%>8l=egkL$gGEJ1~10z*p zE499n-@Dstc_oMg2He!75+z>mFxS!kyrk>EEuD?Ob@(Y!D%Gm5U+oDiE&e*! z0@u28E?h8CC9M+mAg=rRE=Roa*Y^|sp?$!$*7y02gI74h{%>nke72M%;`vxi*|@6W zxKcvr3ZMJ+-1lFh%u*{C1;ip^SID&$2X^kx<-j_>miCiZni&&fBY zb(f)p?um-2s>wIH{`g{I-zJqPp*uDEi??H6al|q6uV_+<61p4Y38arqQHc`KxrXa= zoR^@j@ZTh%rxYRW`n=B(@8o(O^c+f-*AwieORp^Od!61|D5jQfSukzn=>v7cwvvN}Ns1?y}{N|PH!5toqzolxvrS{l2 z#Z)Oli4sWj`F|&<_1@CqHpU*aQG&0?4?lm6JK@B7s1nUrzfIb2?7jQl9DdMNDp_jz zZXfd1oQkRA&acYh2ZTzLICSRAHm)|cafMQ?jZm#W8G(BsB$~`dlk0Q+X3BR!XlZF- zBPLX$1m8Kmjn}N!_Ty|Qp;}re8|~jVR-)FQEj%-=lGX_Smiqq4I|A`(B?(BBVA>NX z5lUuuGL+yp1xtuDbMm(=toPFR0ihBl*iWADp2vBg1fg2&4^M2jV#z+wEMKw@Y_LTk zVc#IFrM2I%@!&6M`zfo35?aDy>h0xA9$C-qz=m4qD1i;FUomyn1Mz(yExYDRZ|CpH z{P@jl?yC>pzEq+F=gNO4REzW9e^D&8!TU){AkF7?TD#l%w!dP3c(0GVG+(`6<~&^0 zt3NMrcOX@wMAF8;MzyV)e(|C*Lbdc3S~2z0th-Iymr5l{APrCCYd-wbw!1xX-nq_3 zDM2l+44$wxDt-51{hH9{D$#uD=RI-CjO)_-PJ6=bG?YM^-%r)H{nok*wyb*Q+#YTu zs-%>NzX>mDe{9vUt9NoU3IrueB)>Eaduadk(u>Eqw^a0p5G^c~_ERx6bk3Tn9x5># z5rH&TJ74p`-`&}C`hZi)dQi1ErrEly(^x&dXNbp@65IiJ0=>QG?I*gLYdr=Yy{gqZ zaew7TiS06`T3h+Atk^R$jpzOzvcJU&ZT!Q&B$g=QcbvRsvD1dvrn_(2A2lx{_>ETY z9axO}rv00aTm3HXA=}w&eB)E|_0MTz;=oUH*Wc0J?HQENyQ5lKSM#R3c3$NOm6Q;+ z4&nxOD>aWKO89pwmI{r8mI^f&0`G=i3j|v_*&oS#aR)Xgm@XyaysS=9zDURQ2ygWH zD~HtDNI}FZ@1*wggo{@DoGM?3EiDkcOMmZQgy|-$o3+?N!iZ3b5=qa16%)Pq_V63_RxPc0>jc_L zB}y>uy^q}&t>v9mwV^d9;a6uRRHB6L4BSmXX9IUqRieb7!d?{P&D}}O%biqvAK;zT zcr2j}>F52~(hGA=bYH7gV)p=Kw5xBoCgRw|om4m4u}kGR_H!kEhf9oh^NFz}N$8B? z?Q7gkWo>Z}m9>REj!?ft4x=3%7>#zkDYB)Rj^EYvPO9dmbyC~KRQp%{n4@=4i4q(O z{w?+G?Yc&UN|ewR!JX6}aBAmTSP9kQ2=z8N+Am(Ob6N@2(pGSN?&e2nTdkdtv71*L zny3_Nld6Pj{VDP0g?9Sos~fuF|JptnyYW53_^sHF_FX1D z;QR>)zAq<<{q4EjFwJ-S>nh9-XCo$JEx&eg9fandcNg6^`qEOl^8j~iHZGwrmGwm( zNX<*vL@jMJqcTQWsp67&Eq~iob4n5_QDW@VuiCoHJE`w3-l@q-g#b9>a{c{ zn;-AJ!C!v8*ukUG14=OdpvGyE&9Wky|p^|n}E2l1mfq8EfE1plE_+41Kl0J z*v8S%&NCvGBnj;g+NhW{r-X>LIAZ*1E$*Vqozy~W(MQ^w?uKbZsKoB4wlMlA+2?9Q zB}ydEeb7!@oIc0(iU0EBzgpDS-)}v*i@S%)FGBwG97NUfG4NRyB^1Z}B#HQ)!TXZ) z_qqn7y@ZIh^qs-oJj?S=s-9EZtvT2KD2eXj-JR4t@1&|k3A9MOs$n+SkNvB|Z7S?0 zvlu1y^~(ypw_>-#qUUaYc)$piD1o$FQ7xzF-m5#aEG4MLG4020FyKm+9EsT$xs&Q! z!PyA6SaYa{<^`IB+P1if6?efO2LB%Kigo85cF0*D4S4A#>FAc8N|f+B{jOR66=2t1 zE8GY%J65VV8c?^Tcko8Ml6@BuT{YC7!tI-JzHtW+UkbSt{QO zx_;OYalT#K2kDq7Sb9xg+~xH)Vj`B5608$OdtJUwMMaZ+TEZ<>KjP}$T~|vFww7>r zd87I03EtmL&O7fDK_Dl02JoYQ$EnL-S^I9|#b3N(UNn}d=j3y~N7d!+{%&La15ZUn zxW5~finJ%jZqqBz`@4}ZiR$5R%0|y!sqFr4p7(cE;`8-{*3Y-@mj7Og@oS?!o_EO^ z9Jiq-zHK`{5V543@GWgIqwlSg=l$KNR8|k2+gd{JMR|W$B}pPKZ85dZ19$LFYJS9= z`32tJwY#GQ-r3b}h30ps?-tELl_;U!wV1l)u&<+eu7ql7{k-4htW}8;TBl;lqIcYi z2C81Z*G9DUv#wY6cV!-YlBF_2*S`4ssh%JuN@awuWHF&Dk-NXUSnlseD~q<3t|jjN zZZq%isw7D~TZr#++TGn|-rudSk0nZI&$|1&&Ah*>glgG+uR`MfZZq%iDxq4t_xyVy zzQte}e>czjyGp1QJgPgHn=G#6su@U!o zLGX?b?77vs6Gn;JzeEUQFchB6mMOFTZ|5w$Mv4qp*Q2@=mt(N=iq&wPIO+u;q}hCk(aA1`}3 zK%mz5tF)~ac}%=A;&^z<%9h@{P7N0&NKC7mBWXxnoi9M z?Vs5OA|XM7PUZ^1C%)y3un>G^8zo2-?PbDFVAHAMixQ~iPH!uD=qYbH>m0T-+bBUI z*jB$rpcb|?ow>`}9UfZkChyHhQ(PPEpAVYdwfg7W&uP(aO=fqU*dr%8<{<3}%iY(z zMjZ;*$jbI!YhKeu2@($s(LO&^iEqnpyrf;Y@-=O|^lRD#2-HeR(YDe&%9pkdANab3 zH+2r*_ew~Rxb+D4W?J;oFL-bJ)XtufW#49dkxDHB1Ztfh-a=bf>apd{bocHNExq({ zN5fclY=xRN^c`GxE7#+rstw%}H(u{m`|9a1N|3O3GjlZ)jY}K3$8PTAH9RvlK%mxD z?`R%(&t>BH(hhE)rmeh!ovU+Eg2cVQDABQ;i5E9I?*ERv!Fy@qUtdQF688S@I>*GU z?(N*>xvjnaC%!2|0<}KKWa>g8tw1Zw?drH;x!j^;cTOxhpbzs~g?eo~`)Lm@G**j(b@X#hg! zj9(-CI}P@p(>kbgAD7qObH0@jEET-x9!ij~8G~;`1m>##f zUdX#;e?NECkD1X*G|OB%dUn^F=4Q(L@i~2sV)a%hy78sd=*Lg-8+B+vw5&-%mN#ca zu8R^RZofw9m5(s-X#aDsr_g*w{{;xt;wSM8m;ERce8a+S518-FCjrqm#B-f;L&1X^ z3S_J489^k_%9L9Wp521LXQ39J3YD7{5xZ%D5+u-CmfIW=yUl?FYT?;YxiJ!f8zWK= zK8K0~TKzOL&k+iu6LpG( zc7HXW<9zNb^V0))SWJ%ZO<-y1)Ju~kLUc^26Q~CgmbU0%fj9l-;cMqM*-S*Dw#jW3 zjN%MDxNvRHjjsq{l4Qa>8I9uH`c(Sb)YmpvAWVyCPHSw5YkltW;Z3$@omgivWIkhV zX^z%fq*%|MS0wc8`NfDt)R||XEi-%gc_f~zXY4GyXUVE+R}JuBNob7dQ5h&n0!91X_x$bKDju%E2nwZZc8)gnypTk#7w>8OqDFs zmVFkKBom2wmsW!?tOoL2UJa0@dA}N|2y=UI>3RxZYL+lpw+BM5)Mb zkC@#~R(Kb)&&iS%Ew|K-~kzS*N(7wIvnZ1hyYLS&GdHD9O zXZ9{ikRYp8^6>3l&+J_!P>XC-A=KU_|081l2bLXMVdRh;A$f!qz z5%WKgK&?SX_`WLmA7S%9P=W;cA3|szWVd@}w`0w*wqz|!seHTLGrJunNRV|ddH8m_ zXLdUhs72Pf5Wd~+nca>AYLR^{rSk1|&+K-TAhBzuj!NHdkC@$#1Zv?;-B^h1cIk7r z+fjl9-Ug0^o~oDPwHjR28~J(*?}W#eGLj-i zRlYbIp;(VELBiewKT_mbqzDPrdSj3FhaV~OEK-CLBy24Bks{9`MM$6)MRcUJex%5= zND)equ%7iJMV>{9kU*_xr)Z1#ks{9`MJPeSde)B=MJ!T;1Zr6i`jMiDMT$^@gpCD_ z6!G5n>yMvp%JX}Eeyga=mhaE*y0={Y{uHwM`S)Gj=0vmUcilgKaNrrAFbNWzE?hh( z%CuS;-_7jR_R9Mwzsk@0IdxvNZL@{)+`Z-VqdBE>BwcvLe9E8ijFQdZ{o%%wTW&vJ zuZRhga2g3t|Ft*AmYDdK@;E(Wa_fr`tYvxs?Sn;e;y(-eJD+@*C3$eESYmnj#1FHg ztEMiF6FtuNaDLbGIwj(gWa9RQ)1nKG7siQKU+?75e?pifncz0u+nnDhu1D_VD$eA? z11k`w#dP5plcTGSFNqUXM;~rF@sHz#FbQiRv3c>>XlM+7ZGrQsbLWhvAD-VVgh`T# zlJa~YPSr-xMlqq z=hZ14{ZiQ*WzS`T>o@F)1##l$YxA9MBYOy85-XME!EHFE&ipv>VPV+0rq4nlOp;7+ z4-Ts{FHWp~x}h`nyZ41KNixBs=i7PwEejrrS3P%~vvEYR5GF|`Y>rWa$L`jpll*bT z&tk%~IL%`^k;j+APc^N5-AWxLaY-`4?@gi}OD0u!?m4he2$Lie{5B`rYWx1VP1`kH zAcRSh37fmL9t-P#*K}Fv`wE0)cTbYEl0z`av)tUOS9yMf_ zm$mRMvYEl3wTJQS!>d8Ys#GCRV)fv6+n?KoxO+~c;{DIm%;KIzyAAEcpbZz>S*!T# z3rEwLKnW6@W?w|DzQK99?t(XJtUrHqa}Om*M9wKOzCN!w`3*X`-{2s;}Jo}SF^@yOB)kF6n)q_kBeRd;}CSR07!rDr|8^x{r$+kldwXTN}B&=`rD^N^q z`k*3#TGr=E@VJ_Ae33)r%0mefHm=kPXJYR0MMX@Yq#}_TRU-a=6wzq+y*rlJ`@tRr zK8bd)67NSK4e8&f`rYe>ZQU=9UxGP(>%8;(J=f@_`adl%5KRW>EZqd?ri+L z&xtbe>65&E9C%egwxhO_$0A1JhL*e z=45e5AFwS#qeR-!w=z7lGEjoVnJMjr_N@%>KmVLr%vJ^xsKvi?oiq=!mOQYQ{Qlq) zS|4D3GBbFCQtB7;%87fa^qx!EdEM3DZRMo7#9Q!HJs?zqgr;phrAvs8iv_7nX@XlImnaKVfU1Zo9)kk&yjz7Ep&6BGOAv_?tU*-p^kZFOd% z&yvTA*aAQa5}G!ffmSu|hi0oY*~&l(61dMD3o-F^wOCOyfm)h2+lN*)kJdq6)m)Sy z5gb=c%srk_%mhj*5~*<;j%OXRONz)Y;jbY|`@}5^NheMbsAZs@7h5tN-#r#5RAMv| zoQ{RGRJuCb`@w6im5Nt=E3Lntds_y)DC;>&kkE8m{9BXUx@4Dd>v||b!urOyWIVD< zDiEk;J*z}~T=6L3?+)#7!#P>RYpsoHULkF?bK2f0 ze|7faJ`S(7QYw~IBu2bs2a-nTM5p4-{VKmxVw{r4>yA=vgo2@*DU z`PR}!3DmNgPYIqyxm0Y!p#%vWZ#q*i_-dJ0eMq1djyL)~btA_s?0Q?cv)j?d(|>p> z@azU%8TOy#7{HP${k^`mo+-O&O>Gw?NNma173Z09CK^;5=5;x`J9pfc(_xe#fxSs{ z$+`QzcAJkEHGluT0D)SZ=JiVLDvNeZj0se|Pl*&y;<%y0(kYLalQfG|r%H)$v-#eXrJS;kq|=bWwr??xfHu z)>}B|k!+Dk6pwad)gnW}BuH@Depg?O#})q%t(Ro0hTD*Ldv#7EvzGf`J*`C1Abtl ztsZPElpw)%N)VT3u9olnRH;>I9ZHa}zR^-ScSVIbvGl}jBv8xJ+Jl*o^pbB(T=MgC z>#ocarjB+a zj_w#CHMjQw2}|o;0D)Tf<+qZNs5^@7Q#Z)_G5?ovDN2wa??pqO^}f6_13JyEO7K?xFgwuS!oz^0+{4$iFmQh-1$d(-p` z46r%(QJVg=Lm45Eo|dc2I(Z^`@>-|M%i= zr5+0&`B#8It)N8~-f*wXS9!m!oP!c1Y>v^r?b7n8LR|Rh<7^~Q%jOvM2fx@iT8JNd zmHKlDOOU`wF@4)`!rMhW+Oz6D;h+Qw^4nzWs&|)qEWenEK}T9RC_#dJH6hfyd+JYj zuV>;fLobQP^wo>I7@?l=WA>)d{N{IxQ6t2gdlbTNXpthBoS z(fhfFq|dMDmf_cfCHP!x5#3|4);-s&v;-wcSbykhuzr5lI^OMdaS(v?EryVR$9Foh6GBi?D|(p zv>sZjmkwPkc1gdpGqQ1|H~*94t-*11!-9W^ebu=0WjQE8g8X)AE8XWztuf|>_PWp&~>4CWULLCs50f`rvvV@vh& zGq&+N*ze97IhZ@n+E_ojGdiY@^auaz+q>lV*<8Xa0Im#j%A37;Z}_~`P0Z5gwFti{ zYQL*j6!+%6Y4ZlJLkSX=R>HpCI&MeV|CogHusb-IUyw+-a@1TSSb~J5l^~B^#x8sG z=IgWP?%)3*+TsE1>BmA>y)ap9(ey6qb5VkXrFC^SpL5H`^mP{_Sc_UW%3i7x%{RA_WCMB4FUvek+&p-MhzCt%$L!=iJ}JfT+SV} zX!J-uG-^!QQ{B9Tf;pCr-R@|e9dLV%%K?3uOg<5@AS5`Ip zm*)7yHyX(Tl;`rFifm@!QrUQA z?-xr;_qLy2mMQPxl-~yGfdqN4l843_4z}fQ)f9Nwy*RWOpM_d@TRIl{>B2#o6TjSX zEO6J-O2{!F%r6%D@E=i7jpDQ(5M# z6%<=S0<~GJ<^PsrShHkw&p4 zlpujEEfJOu+#m4>i{!yEE7YRciS(e}EBeEuo21XT{^^xslpsN|6Cw0o(Y(!TWF&r) z-YN?vNZ5R(5f+=_HoQ(z{jzjyAo9wkWVzN+oG-}Ljl;t8MVGATfymaPWr*FHF= zj?`lm*`k(*oxtT$!j^^2N@^{Uz1yBk#r7_)OSq3`?WUGK+3nJUY`3EX39{j(KXhM> zd;{r0_6?9gEo*h%yCb_@Y#;JiN-FNYrbXkHzT}|<`3Ba5rT8rCH72Y#buZP8j}rEA zkU%XgZ7fuK#=oRL4!t)!%i5iH@~uBOs)DUf-|X-6tn_)^0pTo^AYuKl9%OfCn2alq zupoh2mevT1*$kC>mzG$mcr;kq`AxMa=}3guWq;5g)z;GN16qsd8+GcQuHtRK_F9i( zT)ps1AvUgbH)Hi3F9|WA{vCmJ5D8q{W1$|EPRlI1>C_;Oa!RRq-KO}Zr1@TA&}O)2 z{|jPejQ`V3G-D@hA0*JKp})tnsa)3D^LZ{uapbvdAyIrr(tJA)ts1&zI^qMec?!G> zZ*^ab5+v|DJap5$^ghX>bX$+LC_w_hqd}u&+ZM6N>KtegAW(~998yB<^V%=JDXqIF zqh=;bkiaii#6lFskyVYOI7pxtez77L#X$*va{`}4@!cI&F^c1t*R*(qV~YgyKnW67 zLXG0s%E|wt1PLpxMsbip2@+N({pt`zor}<}Mc=^2o7&XmpUc-so-EnB&U|E^U(LSa zwEeC=rxDZU9r6j5AYp0qQuVJi<>lpN^1ce617OdT)8=j2ev9s(S1oT-WRU|Tcu$A# zYO^nkwDt!Q_FN=vM@aoaIxlKxApN`HRw}kKIgPZOF!c$PVA=WigttxSUWK4@uPzd( zMV6%y>TSP~`%dKHA=S!8MKwtTy464MN_Uef>Pt!kyzFaqsV= zxi{0-NFE-M>!Ab*@>nH5_HpSincaNO?wpn}pS{C^&-PGAgKuhzkme5Znl7y8S=h9p`(#Jdf|3_g4K*oqexnz`&yX zrtZM*O*^;ia6!BF?JjCs(66Z9WjPBMF8s6U|Ht1)CuU^q+qqca~_yqo{^DT zaLdBud6{3;%NyOHOF>3PM#%*ork=*%DeTgJ8IceZ)jzL@?%K0H%93i&FUV_{xh;jb z?x9_I(N0h0`NXxQ-;_6-dxFPA{TdCNSkFIGh~G}n&WrXrVu&r>@^i~4{vA%sb93SshR2vb&+vfKhwCMkp|LqnMQ z5F4i4QwUQ}hA?%W!#1b{q;8Y*VcLQrZv5_{=wA0TDP66pWJ245pL0VTiq51QP z1GWB}`$eRo*wAuMCbVXK;^o(u7-KJnELjdOCQSCy8i?I)Sg7U~n- zo;VsE`O0vyp{+WZ(0*ePl5;J^WJ2r7N|n^P)@?GOt;Z)!&c%kdpJYN?s85*E6++u+ zGNHX75~5a-QMLK~-sJgbQOLMst3+e?%#+o+6CEw-UyKUKY#W_vrDw4?e+NHAUwYn%RSLbW(14f_No%-gf> zDzTvwjF-cS{=xa6zqEvEu?-FTHXhx7ee}T0abiOy7%zvlz5O+zT5LnZK2g8M?3`H7 ziDE+~7%zttZI?4q8KGKiL&Ls}-%j6>8|^bnY^Vg|<*@dGza~_RZD`mhsy*MtX_(nl zY^Vg|<#3`$aXu;|REuqB*tc=rLj#;>r&?k|B^WPTzub!3u zp%RRj!*)Nfj8HAMp<&+!l@H~C{}K|67s51i`kF!%S3*dM7#oH#sfq$YT0z<+Z1R_9 zL=aVzW0mqkgLvZO)zG9o_dMtt@*M zbZMU--+QN%_v_V#L72bOfAJ6J?Jp4<8AzxE2@dObKUIdH9ZMoE)Z*{Pb}SO2R4qY5 zB}jlhB6i2l&g^p4ZQj-=H%RFsp<%W$xPFIt>A_7ztoiMO#Ym_Gg%>@knUU=o6bGp%NrGtluM{*xC~^Z>nhJ-8kP78!AC9 zwz0d*!PwSW&B+GUWib*eL4w2j-6t;i?%Sxd@;b4h64YWFkMEfp`*%jRv4Mn2kl?U> z_iePUIVQSf+E}rn64YWFyY|JrwU2iv8?#$BEI~pgNN`xciw*7~^j9o4RDxQ@hU42{ zRn`YWBuJRBAx53Ns|BShSMxzOkdUw`t31D~9($&@MNrN~g7I=#zehrkt$nQpm2R%q zhe}Y3ZBRYs1$CZ*gi4U$uzvT;idu=I?Mfx6#WtwDxqjQd#=WKl36&thVf`KnolyGo z5^C*^_79by7TchH=VDxtsyzN~%8&@o2NEhlg2VdVCrk}G7#C{scheSJX(c?0 zaw`!+B}i~szehsU5_wGIwwpqz7TYjwnrv`BkWdN6%VGWQ6Q)Or4V9o4+c15TY*4RV zf`m$t;IMut;+ft{%vd5eRDxP;!;CRxgGL{VPze$o*6)5kIQKNdi4B#Yma*Z{m}tr> zi4X}ACTs{AsWZ(;tu~O5uqi8^Ka33|DCZ)~3gAU*?Yw3X8!AC9wn06L z*Co^kX_djFXh5h02@dObS;=re=e3XYbCsYL+n|v_)^@U5DnUXeNN`xcb6M$LklRYI z=8TzmWvt~e34<}wCRCD2Bt}soNRj^%art3i3uqayY{HCvG1fBcT?tc(1T&6Js3e&% ztMgcpb50Wd1*2t*!>Yw$s#!C0nz9molq3^omyyPX#>IqcahO_YumZ62p^{|6?3U8l zKti>cHltEJHRmc}EhNksV-v-!rIKXA>=VZM0REudd zuGluD&Q-!%NKj4lEr)GGa*l**F>S_}G&WSiT1c4lhcq^jP%Wm-7-QRzRH=lukT4^L zZ9{Udl4Qc1w%CN&Kti>cHe*SGAZ`9jNH~lHo?iv6y*QarE&fjb1-n!sNSpr>5)LDQ z=U0J^$_N?3D<2b80yc!uld04mg@kG`ZOSk&l~4(5A)%*K0a48P#<)1FS{ycQ!A&Jp zl1!NPmLPl+6qibp3Db@f`6%`gn@}yLO}|NFLnW+*gq~}q<^u`UVw%PnS)&9*kPnrx z77}`{6%au_kWejS!_oDX-LA|CN3&!wUgdFE&$R*@m@0`2wKz=qXY^a2_kkFH`R2xXB7Sq%Vcs@_GtDvk@!dgh^=~t=^ zBvgxO>YK8^N+ndnT1ZeL5^Eoi{=Z^Q)~{E)1+5$2=+2paX`I8KJrs7ooikR#HK$dT z?_z@q{wroJl_V3x8@`1KPMeTasU(>=dF*NK_9;_rLQ;i< zYBAk*-i4(Z8DniiY^a2_keJzHd0vAm&)S68Kti>crf@8U5F08nS`sgZKdth)_w2yA zwhbXvl7?{76KI3-5$Ez@T2mX;M=@6+N;r%Jm0_&X`VdJnK`kcEEhg25#>IqcahTdd z%xWb;s#KCpP}@v;_x7~;SO}Hac`7XE-*ra(?SJegV)uUzZaLEHZm;v= zDJg_%vF%re&W@k{*bPLi`LT7_z2Y9P=8*@5Pzl>e5QPPAd^9_L`9pn(X#Q&R$eEu{ z@XqcNDaW{&P%ZwRkufiR{LHqcbe_0kNO=07iQcp8rU{`Enxa#X-+$%9M0|Vr z{>Y~VcYALyn39WeF`-)g-IO{J83Sj9f7x)SS2lW{5Gt{W!h-WIpB6vZ_eUZ=ZZaeC zW#3_5y*m!35URyCOsk}}TmPPA;j1TH>wWkBe}qs8+ei>p()A|ATeUl%h$dYZN6wyI z)O4vq%C@gs5r{l4saP-c{?n@z5 zi(^WR?ejVu2p2B@uKa>?CJ3PtwviwT3-0{(ljzp}@cc1*&wCM`3dVdnF@;bqW5dii zO`OldvqsI#S=}WdM8Y-_L}9@nH}ucDL>_h4QMO zh-sI97he2q1E*qnk=RfP#>?S1n_cEUF@HD_eJ(i^nYX*G^Ycc(e;}b+Y=e3l?6OlaZ>x$8m0-La9z6UBcmAK%hRl-NXJ!99~_y5(!4L|oadBI0$q+o>_fpU+J`VoazO$7IToh{xBz z9p2UK7H9A&Z;1_+*hFE$bGsjNi{Gysr*+A?4UvEL8s?mQtG_NmLbce2X$wRg%Z`Ts zcw>O`?b@0iO4vq%C@i>c$Y^)hDM0{I&Tx8w7n{uz2IVy!vEw*9CN+MpERvaESc2I6ePr}|Q!~&1J(W-`W5e@z^xa!u6yEh~>*z<kR&hH9}5s#!U6h=hiJ5z9V*!bx7;+pZKF zDq$N5g6{4Lv*Wv;7)LfP_%d(H{mrU*w~X}L6%wk&HmHS~bB3|qyJsJ{rLDKLOODu3 z3EM~z_8up3$|+j#jx4d>wO@tV1f8bI**()p=!#rU4d^`G|6?sARExjsJ|Q5AnMsaU z&))gFu6+YSjG%*G2|c+Fh#*x+s20<@k`0K!hDulq2|c+F zh`QUfWCJ3wp%T_YLeKjDCn4wiOzTQEl~9SX5fJHb0WdBOs}_e7D`YyDuzHkA zk_lbOrlv|I$%Nhx{57FkOzTQEl~4(5A)&Vee@&)?x-^wYtXP8*6~fd_8XHKc7N<&AnW;8Z!dgh^Dl?T(Nit#D7|qVf zIY&aZnATNhDxnhALc;WwG&Yb>Ev9wNm})~Mtc8RbSJK!(LbaIIHDjs`m9Q2PW)w|h z0}0h)TGx!JHdMk|NazaCB8pi{CCLQQUdoFUS!E; zQiX(SF|8}WR2wQ`EhMNm`2}4X8%U@Y)4BpowV@K$LV{W)zo1KF0}0h)ns#J~+a(=+ zB`wfrM%?ow9ljRv9W` zEhKauoSdrQR5s~u!Y$abeSK=_QcFxoI81wTbDI&IQ``ASA!v6mw+l?uX@a>0ut&Ir zgtZc<^$FXl`Ox%MLb#O@ZZcs~mG{?#YB6o{WE0d%5)uxZvJ!hHwTV=jm1U4a(L%zs z=`@5nnNUK`-q;@5FlT+qrK=K?s(>&jFLpk(1|=?&Cv(DvXJp1kkPmZGl*fdga{a%j zDh*+e44A5TvJGQ9eM{tg2%%b#r!++3Gzw3Rg3|pTsnRnmwqfeYCp_u0@_(B0UA(;5 z$jFee+{<&=+>9p(DP5GLArigTvY~MqTC_Nvk+G20a7`07Ou`b9G(-|c@3H1|QFO+6(DA4nop zi)nq^VC94Cq|HYhz9;g*Hjof4%`FqA+=R#|9@FT)B?QPh9BBo8Tf=7bF(bpXMJFyf zRBKubC3LmK@hV|#7=n{?&o$#R(-D*s(PA4K=A82%``qzg5U-S|+F-(jrF2+|`L9?Al|XJWg+Z#)6Pm)5d=$UG_w(GRoEsbyugf=X z9M)cIQY+v6d?IEoBUFp?p<&;K=}}@sC2Rx3rf(V>l@Y4NHZ<&)l^GethDz85hRI&AS_;OJ6hgI3 zK7cSIoaRF$CLa=Jg33+S+J35#Pze$o*6%)HMryI464c_bwo02o38zH0>{PAz?qGTK z!xuXI-|_FBX?^4H-uW3B+hWhp;BYSr$6_yC^pnHi&$@{3=$~5mO?l^fY=e=P#FC~mPqF%mI)!;$D6O)qi2DB6~b5+pwDULrO! zGV+KRytK3TOZR%tvGXDpfm&2XVuMP?i-gY3uIY_f*~j_rk^Z?TL84pTas=)**G zp3=`d-Z|vFF#be~KrN~tvGK^u$ClD-kxT1)+wbq|Gz{0K6;ncj1cd3e2-u)srbtUT zkXj-jA5oNGypX3ntE?;nwV>3Afcl7{1PQ1cBB0JaBv1=l4-wF=Jd_{-ZHx$LyB2|3 z&$}ydi2jjJ)Jij{v1UDwI*NJ ziHP3$dsqBGM2n2pUfnbP=G@cnU=$@t6f_woHfnS~nR323<5cg5#(kXFR}WbPYJJ`A zI4A z+ct7hf`r;TzNwR)4~sx8Es>e6@`-@5%0&qhT5cEa=|u$8hee>4*3+}@Wkf)oJ19Xy z>;K&zR}ca1$|6t;`;FChtsaF0wtN~(7Jrs|&10uYAH@EFeFA&1O`rscu@kH3KY3%< z{1x91p?shXBv5P7jdk-+ZP3+>B}ibr?blY%|D!W&4Rw)@gpOBPp1Gjbd(C<8ykX37 z_vhPRq_ysNHzQ+YzJ^b_^SJw5KmU6qbXL3NnJe$8o5ci5kl^sIx4%fSjHi4=b}h^N zVS0K(wK%3({gg1NdQokt1moq@D$OghVsHLE+p@Bz&(ECn%x<^mn8ohglD;m4;~efW zW^tUq`-FG->seL2$AwTyB}CjO!UYv+2}6sGjEt@o9d~v#Hct3xYu4l$eyUUgDM7;6 z;JsQp!q7qtAC5MVPzl>eLl_&|X8k+4tR!A6H=^??ABOm0-v?P0T{cKrsRYW(HTIY= zIpW|E5pezSo|S|InkCC8cW0 z;s)M@%L;O~-dH4rro|}43H_&a zeXr(Q;hZf6eJujDe(rtDt#QLqw`lBWva!Rd=e@tMTTa!_`#LB=;>I4Iy45!Q;@<-ZSdeq`ew#q8c6AQ9J+EEwHh%p&BHn$vjyL7HZaE{)?(d)k39Kjb7#Ul~ zd*Q@_oaP;D0=4Xs;g#KWyuC$TbDW?0$>^ga1C9o0I}+OUb{(&IW_Hfr-}_mqLIO*K z=JQ5%y=&_C$Z4{%pM&Ltslq%(LgzL-kh?lm*BN&0`A(-nH|Mw6yxYBP#$9g1d86}3 zjQ-HQzr%o}aTf_3k&&>+_I4xdcsqu!&1&;Ze{0;u^5GQfI2H*par=dz1e6sKO^&y8 zKKyY~?(wRfyei+1%y0co#Qi0EY#i;;jFEpr#%t7b+Zy6yB4|xb=ROGu5@_2ReWa{( zOoSF=m7CSBgfQhEM*`Xvr3!kKMW7b6b|RqHdMH5xT00Sw_tf!nuFuYKkN2|()T-a$ zxEorv+Wo!Jy+pt$>S5Zs&b1U^>>>h2Q4b|Zz*tGdOEij3ydx*4lIIO>kv z)-nIS6;~0l===uWJ8of4v#3p=7N#~5%4<{K%YGxA^GHEoD<4Q;DOjVZMW7azbR;z5 zi8|h(V_kBx+w}Kf#IgE>UAnvX*YS!T>ymTMv3?#(px4H+X01`wLoMusHbLvF5?b}~ z`btt|R(e+7hV>kkZin-hm*n0RB42;L%L&$o7J;_O!)^Dnk_DHiCv-G`^&HtSsp7S$ z*f6U*tE{jLtrfBdtME9imgBGvCL6Fq_E3TZjw_WDs0HimBpVnnj!{V4`wu+d!;?bY zi_ktgX76!Of`smtXzxq}?xT=EE!|Vm-kAv8tDyu5-SyGlnF!nuB7s`EH>ABYl@;ur zb5VkX?p|r{Oa$zmEdsS*k4uE^oxfU@O(%E3t~!o{hH39?+n|%XRD!iMwX}D(^ASY} z#;bXvy)zL|Ru+L;T86ZDCIadsiV`HWZfNgJ1k|~Q1ZrtppuIB@uy^)Qg2bq6r?)mI zVMO4$4-%-==+>u*kdq=J_Pu+#o-KK(1t(K+n|Pq_bpe4AB(VQSLe+Z>O-rB__J8v2 zyL4sl`=5?>eyG{56wce?dWyGWd&f9@cN0X-ePJYm6G0|2%<16p;THi>{a=$>VqB=D z_WbiiP1S=lPbmW;NEK-XVYX+TKt_?kkqB%g5hy_d%gwfd1ZrX3L_$+TD>EN?th$^U z;7LlKhR?O;9UY%cKz&eI-P3M;W~1NV=hq1iN|3m8%@NrT`X`Vp&wiv?t?##|5U2$s zHQAV3cVy=K*ROBIHc)~DhxP0v5~_H;M|9GzJ|)pQ_gSZ$J6cXR`H1CdFW?hTpJ=t= zY#~sROmHdq#0Oo@@XCMrF|)(Ifk7W+3Dc^@y_v)QDJK(GT)H%cKrQY6KEZL9#l~bY zi4r7u9Ag{)DJK)BcNvjFpq7q&KEY*l;@(wRTxKXif=5@j;h%CcQU9dXDFkZ4cuY0O z#Ny`fGC{w98I07l4o$?X_WV;$Ce9lBUJ8L)np&UWoQE&k$vJmWf`pcvf6B?k{O7i( z5U8c4?h{-OC$}xjBGN$#5?cTMDJK&-RbNgaP)pmJPjEfV+g}s@6DP(7d=#jJgWesgO-ocg3j{cLif5hO1hSm&Bd~oX!AyAS`a4RwQob+!3 z9C-1?EdKXckl?Ut!A%p{pmU{wKuI#ew13Ze|Jj9Jjg4<)ZJFBKL4w1o1vh47gNYM| z>=Xhe$pq8>J?GkKqrEv_9m*<~4yzX2{L!r?6JNI7F9b@G38wvfPF_pDxqW-q zIUB}gBEezRg70((Sqb4J6NV;&Q}x>`FLJ7)7#9!@Oq}fk|- zgg)>ITwfspBL=M*c=Y)s=Ur>Y;T0qjFm@SY;~_7FKrI;gh~QCl{m+xE8HZQGNWd6u zh$_F{pF*G(jNnA@Y_NBD#F}wB-Eb2E8clpuj~iLClc-oL)%QXc7Gbr*+KUmRA0 zRD;7t{Y)610_hn8kGoIy_U3Jc!m%tK?3`xtdL8}mbFeHPz(EJBs8F`f!BUz z$(FS-`)&fx$lPtkpSl-b*foFY2EKj$WKjdJR?G1lpW9mGpahA!n~%888C~bCnfE`2sBAE*T-O~jQ{R>w{+$zDZeg%Um^+q z`6byHL^jTUy(D{Oj7}gE@mlTb*H=H0`sg*MXPizTUufXXJ-hbOep`z?lt2oFHv11l zG(7&$#{Ovt)q-*}M7aIKLZAfdCXTlKRNXw%Cs2X})W5N@uq>58EofszTt;Q}$B@!3 ztEsF|0wtQ1s)4Kg`j{B8%L)lB^+@RF*~_Ijy}k5MDV8W|Veg_dnO!@C$S!>{3nfUv z+#aJ}tvmNKA+iq7vIx|InVN`xZ@wi&MVC%l5U&esAFK7yuUUQYt`IXS&O#!N1gvVv zM#&|ol=0}cY-Pt%un|WsOkpJSQ$<88wvferCf;lH&2v1VeLgo zfDKyv6pVRUh(Q%MwZOPg3sOkLtrMC^KB}MdP^pgRyqD1#hxblsFA^%6xJ8J04Gy+M z2@*Od`n$V>M_-gwjmUm56A9FUnVNEb!HA(kjJxLi5|k*xyI+in=BR@o3vqSU`)O@3 zp(&*Kk#c^Xw@!%TCr+~n)Y1~+vw=ux(^=yr=Tkd$&q4{V4eeCTdh;|sEAf~>2@=qD z$;NeEiX>Ie>QgKNwXilLp`IgZORBQl)oh7%3vJiIwh#%uwREP~;IS7aNI;l&MU)S! z2R|Q>k{HhV*qiOLLJ1ODhPjNc7Xx;D{)vj}tAfYXWSDbdcLJ1Pk-iW}qixMO> zCV!pi)f+GM(d3**TcQLBO|8G`t6ADxh>c&=Yl#vhv_$+h+=g-cTAy*|^Ep4}PLwxA z`br1xQeC()b7?;RqFWm!P=W-kyNFn`X!Ejk1Zv@13L4=`KFw4DB}l+bL^eiO3@lXw zB}l*wN5t0d4+b_+f&|Q>L>#=YG$2rd1kBn*tn0EfAW(t?kKlKr9-&*fqYX|COW=Q`@O&P{txTd!xxa$*iIrSV*x z?hSrFB*dek>1$DfZ>#auG~JjS-oKu0-21nyJ0XEua86A*&wH>|OC}D#8Y)8x68Hw1 zemU~2m$I0c{_g4}NT3$JfsTZhzPCb%QH@WXt~uv(e4vxg^k;8=M~J(=-cq6jmlYC_ zLK+j#yMMP3%jZ905vYabM(^8q#e~RuE2|W5WgzV_v=<4T`PwcaF8FC+2}+QFQi#!R z;+j{4sBS9cE+xO#I_WpD@A1Hy>@i8osNa&(%VX@(Kd(p}{5?GswRWTJ>YZ&( zD```fj$kcKq5lH=;w3Xos!k7D2}`sT_zgA26bXHEXPt|=tVUJ4`C{mU!L23VJK|Uw z30>Ui+ZJXX^zdyAjOTHfwd0Y{<|_u2D1j0rV3r`_zwZrBOQ06MhKYpsG;bFWC_w_| zE3z^2_6yPys0H&Q5d&^%9}t*xSPdkd6pfp6LzdkDZj3KrL7q5Ya2v zDj-k-tAV6cJz9Iil60v;0@e&vK?o1k@%GZR$01atC&EcH|!CJ%YaY!+7zvArJ-gx|d?A zfJTDKsvX$~lZ|8o=s3LnAsb3u*|T9ON-811Mlu03zD@w5%J-{V;X42%@P^@8Bkw)`d%r+1`=_+!sWW&Bl7NRWUq?G3;Ny_rZ$c#sbwj9)6}BNrtYFXSm^m6b)H7Ssn3#xIrY zL%e=XeZ-J}x*-DU+`+g|3)%t^#xIrIm4gx_pp6j$ZPy}D3;G8U#xIrohl3I%CJZmp z-bDoTL5o1GH)oe>-z38LrF!g_YR1|M_6_B+_O)r;Uc<7U<1+N$ks-l5NR=SLueX?H zuU6Xz5~#)NM~*2YV-OLwF1}7i9v(&eh3A;`4iaBf)%tJG*(T?FS2TKBkAOf4#*6mo zy#1Ymv;=DLeVadau?_P+(PO_<9HVfw^Utq(Rrn{V<_)CBeyJ!yg4fraC(AF@W4}}+ zP|H6%>s8^O@0z!q9{Z)D1PNa0vkl8H)nmU@Bv8vg(d$*g=gGm_T95rwQGx{TKiG!l zm+G-!DiWyWpEGhB1HV*H-`4u&-q=M#!~Pz}yx(OT@?tBQU@f)npU|5(!kmv>lwiD? zC(AFD%gQ29OUuxIe+BiCixMQXZY;l4u5)>@#rfd4P)pl_|NaWvmAt=Vf+a{`zp>h` zMW7b;o8Yy&$9)j{1om^gf0!5XLZAc*UiDACv1^6pmzqML7VjB;@7$GI3HL#?f${R1 z@stLv6&C-fgbm}{SFXN&tz0iTfUI z#=EAR{nd~_E%uw}aqarJ@8M>=Ys%Rt4kbwNe)qH+j>dhzJL7#;uHI)F7i#eiNmrSH zcTG9_C}P?f6_P^X@qN_F-%1ecR2WKa8`VpG}|y32gZmAwF@&hU_y!LkSYl+KGVC$0AS*+Aa|=!ijI68GYhNK+7k>c;uFo zN3KPn7W#Px9&Y98;ikJ(KI^HqYIU4_25rK4xRtYq8%mI{^I?1x%h^W}wXoc50%imC z)Z;lrTA#K`ECp+h5??a25*Gh4e~mI!{L0Mwn#u}hH4kPri@?5FIe}VsuSFX$JInc= znO~De23YSpxP#EO2;b4#y9r#EpacnBqgcLVvi3m<61s}=Z;^3*g#>EpTFde!lQkSl zkkHkjf47b6L?lp4*Nm1gnXE-of&{F0sm_frnfKkRHRN|#%-WeoQT8{GRk8S$@poFu z5&`R7`K=d~AffjBcL0zNi$E>SpXEy?djpgpq2=bkPJsH5->l(Oaa^dS^y$sPwK zNND~0ZziB!$*=XW4UP-7`n6u6`$Q@$XuBRtkofPE<+_6;f_@80_k$LJTCkfW;-5dx z2?&%R0V`)BmY%gBAW(t?_RYXE%&RCqwG8&odP2*4b-Cf?67jvZjGvd+zyIM(C0GKx zdknh?8_&P`d1Zv&<5XDQXI|*<541ElJR$)bm7f|!v8=EZG_`z#ZJioKQGx`Hm6a2y z1t}yO>>0LZ{KInZ)qCJnY7PEA1M0vvZ`jQKWBNN6ytrZEV?v+=346!VvDTW*PkUEe zMr$2?FDh}N7RQv>RRFN=$TOe zC?rq|#xAnKFXEs4)A#d22@?K@%58y9CYX4;k?-e)1Zu$;Y>1;%w#dtVlpvw@{1XsFhF;VlhM^XsX(lYc3u5BxE%i0Km5+vU0@wDm9u{_`NCCmJldO zC8Rcq;8s%QjS}%uL;`A1{2EghvQ6;H_v`VtUzGUau_I)j(ADDQ3E!+Y4 z#Gdh;#n%!g&~{^QHJPh@kZf>2pOK#@GY%4{1tWtY246Nn2$ZA}U3tVHf=8cuzn54u z4ohHUP#+^cJvYRJ6^l~{7~u>d=j}xBDEjo+x5e8DMmQ^87=zc0f&|PZhB#-)KT`@;ZoY zFmdVXV=08T64ui8<`dj@$2LB&gzE(*Nc28)k**)f1`}5{ea)KHI4;z}RX?40)mp>8 zWc&*P;!CD`a#$y-{|~Pp$;O%wuP=G}SkF==%<9$J(Zf2C2=*mwx}kdtfm)bC+m{R_ zNZ9#ckE%{z)NalmRk%XNHLmf3sz~^fRq^h<7<|b@f&{-WDsSDjf_=VxU$Xp*?@J+2 zi{CR%XxOzP;Y&u|XF{OFyp5`0AGOp}k%wD~4J6CKK;_OckQ_&n8goj4Aur&#R(MfkQU#SnK;PwcK3fpacmHb6fC#KhfCO!Zwl#)xy*U z9&TIM!wn@EFV-y0Q41P)mrbvFaco}L7sljFsq!OqFJh9yOXgGMZu1|2Cu2@<#tq7m-d55&fagF0ITYT@dPe&_Cm6+D7E z;a}b_!x;eQD6~hVTRv56T(W;~DN2yQS(MgSH~u6<JE)@CHs zVCWk{9+V)Bf}>N&|Hvie%eeg|T5 zBcbv8d>bW;Rw3bHypYE!MeeiBB8sB-;?_2@XJ-1s0H(&Yq!MJKYYM%w#(V?4sOQeJ+rdQw@(O^Ac3z4 z=oa~C^&nM9pcc%DWW$-#o_+ho21;NabTKCKEN+&|@7q&Sg#^rxWFtIfb=p*+7R;hV z92|XDK%fNHS4pWVh&K=F9McOoidOwcLM?s2JCFVD@D>1Hg6he=e>c&6^M~APz0<}$ zVcj$!fv-a8O#k|0LU@}xiT9b_bD$R9Cx~a55abz_g%Tw2ji-2@2|?axnMj}(zVQq^ zUp)4F(H!#YNT8E$OpO1acVEJPP;N|+fE3dFvETPm^uR|^2-L!IqhF19?oG+ZfSbZu z_^u1mo`?3t<4_3lIBbCuB%l;%Z14WSQ6Wy-y&Ox|MJ-Gr`TQ)IAq4x9!EK*-A8~E+ zJ3CA*&1yT_mT@aNqgR`jD1rOP;8mdi60lpmSZ!E2*PB?@!Z(+6a@TLK5R=!PToWZo zXxM+z*zJjNLY#EwO{Kf|S*pUz}>?90{yV@w*cn$C)F!K_V0t6z==1WJ&=H#(8fN%tN|OQ064AIQeE8J`6NN|1n61`*`r z9uO!&0#-OgT)%Cv&YafkHkdiBGdbFS{QPrZ17_qn%+yIl$0pH$z?wn=Rs)o(fjb^$ z-#%VtST%@Ru;L)1`;KY>ff88NB&CX90+Qbyzv~mfJAJuGqh7xHNNo~>9-d%K}&q?RLg!% zr8aI__IxHvAne+24U~8|b7m{h!nYVnufjCvLoa=WcVI!zA&fbvvQiuQz1^1Z>H$+y zNzQMZJr46>5q3Vvx^q?Ecwt=XHJuz1l?eK0X+_TbWwKAgnA zl$MkaBo-2B{-trJ-pGHH?bu$#CltwqXc>DPrkt~lm$xhjA`!3t+YE+q6U3qwOUr(J zAlKLsf~0ny3~g*2t`jM3Id+rDM+zadw){lSYu6quWo638(n`(8{q=g6g+F~#Qx)g* z`dY71xGjdMbeAvd&8rM5;RZCj+5d*LT_1jGIaNJ{7(P6c=R>VEiZ`IiPVk*hm z%pKdebBf5TG0-w8(Nd7_L~?yxetC_>v$xRB6bh9T2@O=}MTkRnDpM-rh+{r`8{!lV zZ)HDwD@7SnDV63q(m-U&|Gw8+_kO<5vycDvzy9xa`L%w}eSg+>jrY3OKIc69`Wt$8 ztslOjNBs`XTeWW9vU&UZS6|orx&d9fJsY`D|NrBEdrJF!$4j)ci~D@`zZWjVeZKmk z!Gp4P?AM9vg(ZQDZd{=c{n%7kI32187t#;iMffgjV z%sn49YCI8#?97W2;#KDHj$Lp4vek~<-1~`Gcu&I=fvp-fszX;9UDS*CN89yN1lF~8 zPkSQth+=D9^N|gfFzDgv(pbTqXT#{a2v`e-n7F$}kVp&b)#c7%S|#)^wR#M3_*+e& z#Y^0LQ^zo^HzKaDR6WzopatXOX;o@AiTHF-i4*~&=pt}LBL0~>XK^2RHCjlZ7tA?n zyxKR%MVNUGBLVY7#D6bwG|++s%q9`sn)6p2EAY08TO#9wBch3)KL17$Xh|pZ+(i67 zUygO@3SX?@nk^aFbL=hBnizlbeDvh;o1$p(5=-+2hAA>oKUiben)A_Xs@711SiFSn z6-^XgaV~oEnYtGvl1!VLlA8EC_gwV*cNP_(1vC_4#?27ibND(U_1 z_W_(iC@T$F3l8ztzdr|{-ta{TYE`x0-^{-C;W75>4`c>$H9$EDG>xthk(o_{Xt@YM z8o4+257M02Uo5`HrE!s!NQ8gT4T@NItQQhVB%n+Lui29R8ZjO(?zxO2tV+*(Ffs4s z_SG-19uGkQ^JLPZDBD66uJ4$@{sng^ddpftGYaG`RiCx_pp4(L#|E>tMq!XgS?Z5eSe(dbWkQo8E`J*jlYIa>40VUH>Si2$NsA2-e_UlTKI@9X_wv z`nqwOUc|c4i_6r91hvu-jbCcCHG}>!A#vAuh1d&Qt2I9EwClwh+s38!gU5ymqsP5B z8f)&H|Dx4vVH!bgS6)oeDDaFL;^{_ZHynSg1;xZvtJIF7brtp{^4G(2>G=-d%L{gN zZ-W*t3&9#(KkMjzv<#it8lV5G8Y}F%crn3!;8tpa$0_hqBc&mGo4riWRSR7wjy>_t z@>{Q+J_1{nM1sq-qtwdO8W(PDw6(#otSi+j zX|2|{r`@fs4^M2_md8gdNO0NbTe5+7u_nIk_U{YHqr203E?$OKdr}kEPguG2=#dgz z*&?IJWnLjXUN}#h7=3X3*5doCZbb_cT&5iy=8@3E$9Hzy`hy)vBgBgdGgfAm@aQ&w zAf#F)7WOiut5&Rivgl=ZLIgE$t|d=S9TbUeV>5y%d z*dFG3k>FLxXNlJM}50(T&8u!(OH#EY)@)pv~?tKJh%Wgl3v2}BcX|p zX>>Pjn%0`V%=jpR>t`LFLD^}VXisr>$hb|d(ZaM4TsD!~v}z2+_KtrPQ#(rPLN6}M z4%Qk46h*uATG+-|)EF5;jRdSKqwx_%>e}1JDGl7`ChjJgHfvW1+EG8-0i|K~hw?Hz zD#04=2zM9l+W+jYsx-vH^$Njdv**p|4%Cq$jSw#;OvErmGm6wZ=KreZLoDoNB6Sh1 z>OiN#{2kr5Vtc)WoD4b__4#~sv-8$`<^&={I$=LX@|E&Uc|zCXd#mptD*90?M=s z*C%|w`!|dlm0?!N-n==%>yCB{pe3CMG7-`m7bAQ=?PX|-wTo<(vS__D!f2d5Gp+#o zks_#1Tvq*U_%{^{pxiVzDDTsEtcMt5dzmYp@Ff;F>$u{1)wSo^lY zmxY$(CWu(}t7TuhyO`B&-=}f3V7*+HT@dkiAD&?kXxiHPr0=pcLcF-9-pjK>!#-$A z8sistwcoj}j@AC@5sDBC*2`tt1@sBzX1~4aTsN!QqFLLqF7)E(bw;%a)thrCt&)3R z?q=WqQA6w4ifdwMK?2G|l*_GW`)YKxK3Uo=`yzyTUaeV!(87`PtcZW!;2!q!%WGKq zV~4sdSTC1l&qw_G&eXC0=$UCvZTCeQAzrNg&a`5o)}xP+#!I=^*zwo@&i*cbUO(G`Ot!*cop2@!H<@y%ZgBpni)4OX?+#S?AVpkZuBHLfNoFc@61eeXK zB;txQHSId@KaxG^zHk~LUaax1{ZsagykS}kFYxu=-f4t*8I8D0V>@Ul3)XA&RIE((BQE`*R;38)hw?IGWxK{I4r8S(STC1N zv~%VoF7rX7nq+#N&Epb=sLNC^!2yVM#dmLiBvLL}_ zvnuH{Fy}{H&JU>zy;y@zICJ8db8tJKgJ?m5%W~R9{P`a~8k;sJnB8pFG$GV!*{029 zsgsjyk`s<*@Z6IwLM%vd+2^~_oSZdQOkg4|8c2v2wl?BF+h(SVkRD>aM$hzoX8z#V zur0^qtr`!Pekcoiv4)Ic#9#GTz(t4!2`BWFf?lj4Ya!xq zx?qEg5DOAqmgn@d)K0@P>fL|0U25arIM#(;{9M*_#Q#nA@h(CvNN`!6NBj*cd>zQF zKf}JI-g-G})s8|h){wnPKNIfYCd7gSm*siH|HI5~!ThYr_S|7p)u|>H^kNN(F%f?~ z`WYt@VnKq-@;u^SaA0U~PQO9+dnGDZSQmQnbBUGdgjkT^vOJIYw+(qTSiVbLyKc#z zN<%E@#TpXx>2&_ZO^5{vF3aZ%bb!{CCF@Eo z=*1ee5>1{M7+TInhy@8Q%kzl;>7zMOTJ4tX53!&ZYtX(i`R@PzyS@Mku^_=^c^>iC zzV)jp?Q=`^xmeJPH7GKK^jW)nwwn+O5?q$&5&z9IvttymEQwcQK`+*z2xsmeUP3HL za9N&5{L_|9j#1pTB<_j@y;y_dvAIXpeD47lAr>UKEYBnUrK6wV(;!>U53!&ZYshVq z&Zv7IauH%dg3Ize;^&`1{`vjsK<|0e3fR)+?S-Ky-dkdG8OLpDUS7!O>tH_LjSW2+ z*w?S2BE;e)WM;L-po-;!iwB(!uon{I1=^Ioa;yHc?VG@a`4(LHcyW1GUT!G*?`3p1V2!2QItK6VksU>gmyn&NiDwqH4E~eov(8J17e62S z$8DiAJqH+#Wi3_)wya93R*8kZknlg(D0F?{Ohe2FFA4O_e^U`+@e&eabgQnKc6;!H zPS2?MKtjBjZhCmTRjJz3M#E?RhZZg)A+b_x5Rk)=mk=+0KD^)3?Bc!i+!{G(;W83b zj_Z3A&xe2In&66uepB-ybs0i=ak+AMlwJFqXN^W;_?>~+p4Kt6c!_m;iYKhIvkmc6 z=mrFQzLUxH_?F>lk;>)mVvzy^x^!H#aZt`GG6C z2KQ%vuI3yG@nU+*W0Mp8o@#3}+Eot*7xW*a2(hpi614Ko&5Jcgp1UXbSN(6(2=QXN z&-?Qe=bCCoSr-W|Rz{%x;Z-2N# z5n{o5xlA$N+|^j4^pG2a1%KS3`hkRaF=1AZ(WuaMWAL-3w?)x{^>W#)X+!W*=l`L_ zOUT{FwLj7b@nYKSTBEU~$ECq1-u-3`TDXja+`_a*nd8$NAMW;$>bca#gm`h;L=2-b zeoL|7g16QvLM&dweGc~eymzpBpNVONcrk6FXc1#27WP8I#N#3ev3LnN=XK8)wqF$N zdAz(@B}j-D)8p31>C@qKDXAt7E&o0HRM9NO}2 zApB*IC|bCTgyaueqrtIX0(mYZTe@G@9=ULu+Y!HN+p_{UK+YN zbA>wFC6`Di#DWBueZG|;Urq*PGN7@YHINW5(5A@XC8WJrFX$QK_kV&`x4n=7z4$tVQ#|#H*z)v%0Q5s5FoeFV>KmO((>H^>VpR_q#$hD-JOl z6}J48eOu`YDsmzrUaTQ2RBIq17Oa=c$0jxjZMyt1qw!mEX?Dx|8z~JW#EUg#r)do& z#Dev5`No|)tsYI5h(`M|*?&FUT4^95UaTPz!)O2@7Oa=cBjYQxn{8QPG3GreL=fq(RB*cRCa{2Aky%OUWOf(vEr>*bw|M&EEfyHl+#N&^Y;VhuU9wFVMm!Fsv8?#+dX$MTMt zeLlQYOS|2!wn_sD@nQ`pli{&KLM&J>mrHKXPqZFe-)NM0qn$lqR=2(hq+5L`B^(hz2~TUZx*@pH3piXg;-1eeY3GKAT+N&^Y;VhywZ ziy*{;^>W!n2}79Zqco5ZFV-+|tO!CZSTC1Nv@^tSI*KX{B*cp~Ogt`v5DV7JWfQf9 z(C3HJKtjA&!<;Kc5Msf4xol1!Lzq)dX&@n9tYOZ_A_%cyy<9e@s3FXGuGTIR;>8-~ zoG*e93)ahJlMNWcWEn~W3GreL`5Z;>K_tY2^>SIVGy18_rk@Hroqfm}GT4`m7W$p8 zEb-#!l50i$PhGz`iiB8@piiN))N|jiK*Zncf3;$Uh+|#o1=@DRf9zNd7hzgLf8XHL z>+^-Ag+#pC=epRzlJ{C4OnF0TIE3TH8j@>8{BQLeSIBNBk9z&WvrZeVtX~ z!Rj{FgtV8pL){szlr6CsdVhzc)B7S?qy)Hs5 zNYGEkv*dZi->K=WSp34L+4kzuN<%E@#Tt@pMf~?Yy~0I^1qq+8MwUE}_#dyeF!tTo zR(7*VQ>|X11nbFj+O-c> ziT%~2to?Q48K79yrXtCw5ZpL9w70Ye=pY@wa$zYZM8wAi-sM z9`O%}k1eSD_au8kY`W4A3wp7JWKI!(yE9z^NQeaqF3aq0Mh zPTA7EpSM?kwRK6WJS6CMN1b|k+>8bhx4$qcXj+wxb)gqOH~k~x;i*$ygjkT^vOJIY zTL+c|&FCr(v7i@gn0cZTXM2Y(E|79KR4@*G?GW=x(Kl# z!FyhwNBpxkE)JUgp)|yTUaVpE4QZ4c{FsXn3ldzG=Mn#g$xjB&K35uIK`+*jT#G(0 z*^=)f#DWCJM|n=aRlIC|(8MdHAr|yv4HL(xA8Yo{b`fGhf_}?TVj>aWKR+U9;;zzg z2*-;xOgtvyK;}IzLM&J>pC$4<;xBuBR?wUuN<%E@#Tw>ZA&r}!sN*8Uf&`c4IsFD* zwSs^-2bG3c&`ZsS{`7-1Mt)r@h=e&gLrCzsE6*eTKmU3wV9s+rAEqw!Vhzc)BL3mW zDr6ub79=yfPf*Yph2-xyQRbF%jC=}o_bUYY*R%gLM^;_fpY3((>wa-^n&CdMuOJ#eV;(WsX& zdI=r{$-6XhcW$ZRj8i43RgQ%<9Aa`-;_a^C1&tpv8WlSp49xr_NE%2uUi{pQp&=fh z|DWK9-wKq5SbRPt?#voJrG0qFiT@d*)Z=plJvuj48V=!j`FwX~y}PAxIFbCr5dAlo z2!>udmVxT%#Umn_n(q18jF!Rvf941Hi00jByig@O_~H3aJ%r=MSq-meS~Z5a zdiFbk`)U_@2*-;F$p&@L#|@ks$UQKW2*-l;a&|^HmJPIDjK)=M_5}tXx!OZGUYO5m zjb;^R2EOm|cVo0*y?nw^KN3zq3Lae&GJQQ^kju+=zl5y(! zICA@$z>Gg%r5SWASg+4FER*}-?DKik-wJ&9!Tr=KBpfd$c$}PA^5lcN0*{ZmC5;d- zuF2$GbW7!ZzO`%l;Gmb=(pWhbte5sTWWUs7Y2X+V6 z-MUo~VsR70GILq>J<;(2oN zU2fI>H!23lzBzze+E_QY%J}l_C1p`!Z0e{`WMxs6i{y zc=2<|gSEz;BQ6OJ-j%O3#DewmUNN(2G`hBaGEieYzc0h73%&TclOc1@N8UU)a8n6B zaU6@AD3q0GG@k0)B6xPhd#Y7Z7ZYZ^hIrJ?{s`-S93J;@aLCwZG#{wpcyT*Y9R z;)>DeS@mq-nf<@hN<_l(;^!tN8sg(V*96ZF$yWUkb@bx1OLA@9syilM6`Wr(lL*J+ zCR$3QHX1kWnG_hE+(oTI!tvr0j{4=}d`oNis=phUyljn!aJ(?T)BsQOxOVG&;1nJaA29A70Euh!T@I9{A<(|+VU z(d^oNuMZC%`JLZ-f*Ot&=J2}b_r>1|G;c7DG@M%l*2`rcSsr0clOd0Wu`P|rj z7s30-E!*4L^v_zmctjWUqTQ1%Z;jyj;N8Txmx$j@I2I(hO!{G8PD3}b9qU3belB+r zmxdz5f&}O#G;vAKmR99nne^^9r!MruyNH)?ZYfA`S^hd(6EkK6tl2welZKlRFWxI= z{!ObK!m(hzT$aBR*BZ4J)v^vhu$?sAgn03Dv)YXY5@Nx6xh#LZt~GidE^GZZ?TFGq zLcDnO(5Yr_MLg#Y;aIR9$J8ux+p3{71(=4i=c}QL8#V3)8)JDT0919X$mNypYp3}O` zru9o2ZbH29hU6uj+ZNW#W%I5M(<*X?ybb@xVA$X7LId>D{ z#Tv90%w5f_k`y5pte4C3?hM@z-s5`hI(4BJ-om_ubE89o%kp*)P4FJqd(cgY7sX1+ zUQN#(!m(hzTsH4ZF$CAIV~Lv(FTPn&j4`)69xI1%ELbm>DH5r-fa!klIO&+^Cd7+1 zDDIm3AZs8Y7Oa=c6j9Y1Rhsbct~8nOHBE%}5&!Gv{f9c;VepYdC~+8^wCL zY}S?0Fe{NX+=O`XbF)8+AjE?8a@nkWLzo?{N4yiMwUID~UE#d^7Hc5qk|CNfx9 z7kY7QcW$Y?N*uzmAi-r5iHwGciKO8s#LIn4WetaLZl+i-mrdL?8YZ@rhMN#CynpJR zJA`xN#d^7HP6ngFdz{uUX}AgT;+v&82h#}0g7tFQoN$IPCnssR3Gw3GhsM#|gk_J@ ze~EA`STC2EpuO&BaL$lMh!^Kw&P~{87{ak&y+%*H=}3>R$$dz}O_=P3?j6n@*Go7S zte49)((0W{sTt%Pjx^kac=2d`C2}sRG>{N4yyNOtIfQdx z#(KF-D_^~NN)xoBIHx8JHz8hp_c3|9X_Z4b7Oa=cCL=cl*UuEzgLV_oRQ&*dJiHINVs5?nSnBqz4>T}^2qAztqLIk$>eh$fsHHrC5! zIvLFS6Pe&W&g&QJLNB~i>wY+db8|<6%X0IM_|KKji=J*b!^+ySlK#>=Wuf=tI5%v5 zBN*>kev4$pfB%IjCr=L5o>K~xD0$cp9U#ttg__@qxIw2M$xGc}<4Fb#F2(0XJ zvo&c)Nek;jFMjT<-6~Z!y9lu$!DV?K@i#0y9XPeVt94b^45c9!^kNNX4;I`~EQo|y zkl?aBkN5|C*(k{MGlg}b7eALMkxqyO2`m|g31efL9jri}V{3L5^ z7Y!uD%V_i6nKT~ycCL${_9_e3YxE3p<*s?$^SF!^5>jRj8eP6SlSbw>3tfa*uwE|9 z^N8R2coENFocB7`gmtO01efJ`#Q*GtfgBlZi6vq| zFV>(w@ZFi>?qk6m7aTr9 z(hUCitBVi|5?q$&5r6dYya4S#oA)}_gz zb)gq%CnElD|1ReuOnW6~P`7x{BVtq43 zhy@8Q%X9iY{my}yiHS->Ea=4=CRUOLo#!q>EJ$!!o~!dbW@5Y25DR*-hB*yLqs_EI zEZ3&D5W75^kNNjUXjMYcXM2XSdiedJdgMbA9*}x&RV4*7W85bd3U%v z&s~IAkl?a>F0RgVCOBtM8c2wj>4%D0=>)Y(S+HKCX9zl-x#yhwNIz19&}Kf=UZ-~g z+PN-*Mps#|UM|b?h=0}D$9V?TH+o=Q=*7=zEvUU7@waa|$3=(*2`UQZ{)f&`c4xjN5zM{!QAG{k~ltU-Izyt$sva~B~NB)BZk)p;J{e1Z2m z)`edDoMH^$ohe>X2H+yZf&`c4IT25HjB(z>dmZaSFMdu@l;2`c8g!n!2(ci+WqGd7 z^BCuEyw|ZV^y23f=lSkT8g!n!2(ci+WqGd7bGdWM`Jw7UFV>*bF3kHgoe&EWT$bk) zea0=3o3J`-#e!aVr&jN$wkg}D&trTRya}2 z!SAQu_|xGiYCt)Ov_ow2eHcZ{MF@(C{O)Vh5BhJa?dC1k`n}iCsv-z^FF0!Gvg#kl z-GK+GH8~c22aoIR-}ugLplPg-K+8o4((wN~pTDDu-{noeNzUIarbibD^PO1wIlbp@ z;*S9#puMhlgrhcn`mw5BCVdAF{Z2-X(NNzF=XwvhCiHVRffj5bYSWul=_L56Rc4HD zcUM*3L;ZLwIqu29VO=g%WuI@CdS0U5%`{d_%-(#JBG8gf6ed%|_EO8U(g;m!ueXoA znbKAoDFQ9&gl=KP&nI))8>`3o28o0&Yp)ikY^CwlAJti5I|e@SVSZBv&tkU&d1QJB;OpJq4y zbX$N=HY9Xed$lWyaZ`5@q_=0)-#s7Y6B-F!)?O127AlQ4 zwGM8QYi%w4@V-Nloy*;E$Y=0nRj#&}Hqlv-=jMQR}^hq`}1gm#0UWKubDN znA8MkNqNI-M>%6cLYKAIU;Q^LjT_fwkp>f`%QpxzftGZlFsTX7_Bsx=_;Z;qNa(Wm z^7*tzhd&z7U!O4%=%Wd=q!WcnhiF&!Cf1;z2|IP^vi1_~$%D4iZ%4C6&3AqXFoBkI zqA;lm?s<4)S?+lp327C3$r#qizmZO7CQ2MFqX@L56NO1l@C>d$SUbqO7YP|%_L8;G zI6p=jOf0*9iXzaGP823J!7H(2otf&hA0%Xzu$QdqM31jY0|~UG6NO1l@Q#XnFf+)z z7YSMI>?M1%_}^ucdJkUl=FhxqZM38lg-K1Y=Q~%IQJ?)FA-k5nB*x?ht{@F2b}adY zKdZLUl1>yRHNg?C*{R~{vmYcR`mmS8%0BH{kOmVOr^hM+E$KvIQWHGa6(hH(&wh}Q zD9T~9rs4f2XaLQVtrlC$gX{#TO*6Fa*$Qv_Pl ziNd5NcqcW^Y!Kv~g@l}H>?No6xv%-RkeHzJToGtVCkm4(q6?kpX@s24>?OHGs#RUS ztXNhNXh|oe9~{}}bXK4JAR$=>*L$GM`zj`;MmI&EC7qD@;0R6UIsf#>Mnadh*PDAk zR2r#OlD#E!=x5~X&L#4e0qm6XA}TJB{&&s~J3wb$&duarhAiduA@uVVr&>4d~xnrn;Bb3UEpNa(Wm z>h#*zN+Wd|Sf}nfRlo#V(g`^~Xn$L~ejLx|xr@-W_Nut~8>Nvt)lvjn(g`^SX@6U< zRD3bYyB7&v)?Pl}w@M>*I$Hz3>Yc#^TG9zP&zUG7f_}p5)TPVXOSDs2hILoRo7Cq^ zXh|m|FX6Fz_S{(Y*$)!ZDy~;XJ(Vq4b*7I|1X|Jw$**|M>2y|~{U9Nu%k|1yNM(K2 z=f6InayYc46Ot$L+O5_ms&Y{zWR-BevI|n#t2O-V>FV<(w4@V~@A4j;HEo9a><0;1 z?OdYjg7H-@8I6rnSKH(q+XNJ41#!nHs-b%O(nSU91yXae#ZheIQS}Vuyd2`(u zT8bic1~BvYcT}sqgwX7TXE42?Y29AcDvS)gi)5ekPQ_aW{bo(yI!fbA&)PAx6h-Jg zxT{NvA_<|{3vWyA9fg*n2p!vl7ZR!;yrYniGJD~zFXAtK>sj6NrnO^eDT>f%)Y->$ zKNffhq1g*>ukIa%mZAv#i5KoDB&5t0yrYniGJD~t2@!v- zp?cSz>|8g7mZAuq`wWQb2{dzkAQDn$FZ`4$;xA6$ValJ-|9jy=eEs^7lsgxO_wJcR_vMjY_wLz5KWoXQpHtkb zp3}Fr)7`lk5hsW^PXtt2aCB*` zV9v8)bX^3j1tMy0ZqF@JKl#CWeK}vrDha!Z7JIHt5okf8NfW-;s`W-1Jae7=Th+J4 z!uYsa1!HB1<&_pF0xhl%)l~`#xG!$P88?_sqcQhPFGZjQM;hkI5HnV-R0LX( z;If%bB6_`XBFeq@w#tkTuU#31B&`@ChEDn`Mc`Pu2t7B3n78xMAV-Gm3tz9-XCMBY zOaD9y>4?8c*+uFc?76Q-CR*h4E6%)iUMhKeop;_>kvgyZ`2r-+3$y-+|Myvo6tVE! zTT!&=JW=0kuqJv}*y-7d_-fLJQM4e@W-H&@3zHPDXslZOvQiP{r_Ya~1qq!q%3ZC0 z+o6iMuI{%k0=@92N8f&0ZjK^)b>AIDi_YQnT^6+?{*xP5sch-T*?+qKp#=&2T!Y?o zm-S30YutRMu8Tk~s3{ro|5U50>c@_sJ0d~rRL>fJawB>=PrT}_;)?iiyPH5S>7UMC z>)n*0h+A^6GWo9l1WLw=Gh}zGmX#f#i1?Duu2vx-bE7l;gRS0F#GwcJxCr!;wV>}G z=c}$!M3Z4%&5cEWhA8Wb?>O#OO~`mn5l3$C=h8qz*0H{;70mcZ5gSI_<08;YcACCH z9=&0sB5uAk=pxVyb{frKyu_D^czR*~7+R2!J+E)Vm3n-xiM#u{2=tP;qVMN-9r;2L zg;x)tD8x5<{YkvUF0L1~BmUPOKcwf>wMwIlwweJ7Mg_kiWnKtgBB z@;UWQZxvVl*!^b>7lB?n_mxkb^Ec@G`P!^jE&{#aw4<^5_0VzEs*GA4=^MILMy9eN zBy{d8pE~7TwEjI5zsRcb=DRL|KWW5xFmcdNQzr$1+Sy|ArI0|{7H)Q=xd{;S5SQ?w$^ zxw$*(Q$}V}-@Mw^)Ss7(8QaW7pqH#MeIM*O*Qdtnu0iEvXpyzccTRV!F8NZ=`R*I5 zx-^iG9i{KiUryKe+KHu_x(M`={itvBRjPp_~Mo zH29}{Og#0%k5RN-gdhz*%QNRanK5VO@H7p59~?s~Kc#(fg3fa%r-m{S*^66C_G-iE z5~1#G^xs@hY^VE2ihyx<5$0y0=!pN_DmMl?&aWNh{=j{J{w_atV{y?%5~?m2;pEiR zkB{>I4sg!Sa}ReMX?xl|I^vJ*b_i^h>60d)h4c-QryFmjK3Gj!^=xY*K$!30W)G_0 zTt#jAUgWErw`QX8$5mUS1qrh^&F|KZ+1E%BOHPJd1bUhM80O!|r{B4I?dJe%oOrfo z5Ly*BF){2LK5G*^C*t;Drxj7`#nMR72&*Wn2ooR8ZD$o~}KJ3QD$pqGiF=J#G@ ztg>45+y*^2pFkC=zmSDig(LogGf}0{;A~+AT41as2J`Rl@^2w^E6r=l8n?#PDv6@X z3tC0QeWm_X8l$uR-U{<+XSyGA~MWaWxmv?4UCETxr!Pq z$=lW5jQA(sSzc-MoIV0uMBFf8 zrRw=FM@nqPQ8f1`wLfswBmPkb$1B3Ozp85`B4O@SY6nwFq)JU2PSrO2WgfqHZH%)5|<70l$PVaNvY4q1MChiUyx5;&K!rahF z&A)+N$T5*xHOj>Hu74DB?OODLIVU1!qG)!ng>7J6ncoD{^}^~Q;zJXu>uww8+O-gO zU6F`-Zg$i!cAyRH5A)Mdb#lt*B__h%Wp-`J{Z(D@3ihD+Ij5c*Co&v6Qo=>RJ~uzL zEHvl2MCzUMe{Bu1#QfZ`P~{1*YpLgQ8qD3%9d~UKVtWEl20n3oDu1K@+P<9c_|t5P zfny||Cpp)0XO?b4Ws^kYx(G8L>ZcCsZlE;o`E#M`lRMLPqmj}MJH)0o3!=~}iBVpH zdJe6!KphEsV`Qqeq=5ulpjAY8Tea={mJH5!!QvVp^PHld`N@xU%fkndP!`Y=nx5xU zKN^+|sK~JUbn)P}4sUlIHR@P+^w2s9D2F&c^7B!nGHBP%n{qA#NUA_Sf1 zPhYxMX?O{sL%rYGozjl@bN>5Xs#R#Y2tgWObY2|glau>_1e8OY2fwE@p7`Tm`n~Y0 zAJzHpZ5!$b(Q*-jep~(N2Y%+ycP%dgwDnoe9y3-2q*3+osu&a39epc+mWvRiu{W)tt(nuITrmNA6hO#WV^=-XAR1U0kOAatf*BuR%p2hK|RN@ zLITQm#*8;*tVp9>mDXwo`8y}jauI?wwvOtr#>z_oZI}9Hhl~~VoX1GU3N05QXmra= zJlW-|c^#}`uQs<~Wh9#I-rcoLo8Q9mi5Ur4m9#%1=YQ|Ad1o7o{?Sd?*&kklX?ok0 zCbkzvFfCpmwtqXNM?W9@_=W6`OSZ7??c2!4da;G?4|`7$k9=2-G(M{Ne0I6#I$J;e zT+K!c60j<1bhD;Zux93FSu^_=a}ntE`qDj0V@Ym;h)Z`DvzGm8S>5)18b=Efuqufd z(6qJHeRziTN#A8I0=<5{VXx8{_CZr3-np)hHGXkdtNqg>;%GqvcLDtk@40T)rqdL@3+D}P2_Ga~x#JKW{q^AYQ^$Lm;VK>~If z5!HtL5y)Cn)B1XEZx?}Hp&IX~exx+Eiw0VdfM}=t5tn`-fnHw@-{~H!xQrEAkbr2X z=OZrjfdqQhI`O8`NX>a%<{T|ZK(y2A%9eG71bQt#xkG8B)~+pU7cEF&bXEJqmi>VQ zdhLHiYozv|&3ka_vUV}FAOU5H)U?lS+2xDkqDqh(VuaH2m z?j!Ylq++5iF%d0Dz}%1q#a&zCE)wX~a>ZL}K2otgF0magNWdCPsPiK(=LZt#ocUs;MkiXR{VDWITjLJ7B4)tBYq^%;w6Mu zt;(MDNvjz50|_pR7iJ%t@D;yK5oqxeOzW|t(Y3xBad(VI7YQzl7v@B2KCD5JyA**I zFTu2)b6O?VH%sq{@w!5S%i`tE6Pd`ov%VtG;w6~YYnN8Lb!g3*DDMv>xGY|nqpJO3 z^}p_h7!zpm5=`qoNW0d$tW8FY_c;<=7B9@T)jqdkO*DZPFTu2qB@}(E&R@S5<#>ez zm&FTj87f{`3+;Cmffg^pw2p}sMXhNY_7-s5MS{!Xg|{UYcdb%iA65ifyadxaw$u5M zJ-Oz!0X{#F;Ieq(9ZsDe*=sI&O%Z7E5=`^`GvYs6y@vX{{A+NdJZ6sKGmOyGV$2&Q-Wwg;?qWYRbF2>lZY8%Js&;uuMVOG z35*gEf64B1)eQdcSR#M~dW}1!BT=PlPm{)b*Vj}1xZ{$a186}4qaFP%KtWu6epT}G z(=GzN`YqKFwRrD5()jI;OH`|Vn_4o679=og)3+Qyzg!U=|2H>)1bR(xqcf-4-#kki z{E3WwCW96vp8G|giPqWKL_B>usx+$nxF(1eB=EeVcOiYk-G|ug*uI9YZ4hPVJ1fIKes=e7$X|&q-mWx2IH#;SiM!%=pl18j+an+Aeh0}s) zK?3s!ilY7ID2+BVd%FnqI$On78t0mROM7tTb5AJZ4qwk8T9A0>!MGyE7OzFblBsX%qilZyP>Wk8oYIbi$E`^iS9Vj36Ch^z|hN(u#vzFFyjC0!YoCM>~lVV zb)gsZk2D6(y-JPlU5%cKq6G=eWF!97&t&Lz6?i3p1bV?dB_jUbgFC5KJ=psmoR0(& zn2|^P^-sO7_TZ1FmbnP@g7rokbIUJOt(tRcHm<7#60i%1xN5-Ns#RmA9}1ua3A~F$ z{I_+|GdTUkpDqHuU~iH}{RbPWRvl>hX8i7o=YAXbvbP4Aykt@^(0T|u-U0r8lKdw2d?z_H}> zpKruStv(T!6B$ox`V4vBJxb$_?zW3SFF037qfBtIB7VK{?*Lkmz>Fi}-?puc>iMf1 z8{!$2Krc8SN#n?8Unz~^&wYUBpo_q4fW9lfWfP_GT*U@KtP8#1oTojQ*{7|=|H^lG zxeHjVXSN)mpZt9kzBzLRy=l(LCENsBkns6FP_(ZGXELC%9SQUTZFj4-qXh}jBjWde zf~p_g_C6X%0=;lV+yq*Xxbe;RRX?uItVQ$jYN^YVM%T3m<4B+vt_3%N79=uz?pGQ$ zD-I!z3R`~4=JC3%bOjp;^m4CuB+!DyD?|1wjZK$7MjF2*mnw~x_cyYUKrh^BZUQYx zRJ;5=rO~6w64q#6Mrr)@aBCY0^m4}%B+!CHeB~~s(QL~K(&(|U{dOL!_QTrQNT3(S zF*ku0B>L8WTWL(oT22~=Ru7A_#y=l7wUIzCjNonpEl3oLzo|4{*|~r;RwZ{RjofiH zZ6wgkec~X279?7)Oe&4>3nr4r+$kk&?#GV(|HP3%FZaob1X_?d^0TcpZoRxSY4i=( zQyQIWZHXg+UU+J|3A7;b_SU%4cr5P-?epQKS}KipyV}N)KrhTb+yq*XXjxTjv>scZ zG)lbDPHB|+tePwLK`*GuatPmRmm*;y0c9efAK6$JdO`m@1X_@QJ`n-)k&Og;!90cB z1X_@Qxgi2pNj4Jb1?$a2palt7V?@Aew~#!R{`U}QK?3$X z5fHB|B+v`un1?_M5)iwHfGBDqfnE@gJp@{ifEdh#bADJzpckAg9s(^$z*#~BoN5*l z=mqDahd>Jwa3&G~=Xo{~=mqDzhd>JwaJCZxc}X@B=mq%-?OG(zf&}C!M07g)kahId zaIMFTu3$_8FAJu|~Za??y=@js%y*3$$(1s?JYl zFo6~?VOmJU!IJk{Lx&6qd@$t=7XhuZVH6UYIDY$WQ6|vhC0Ik}aNBENXZ3CH*}57J zR(I9Kw0Oa6k_Hn?JFE&Yffg^pw9er=G;e5i8T?^jNT|DuU|PIjRgwl1zdzU_$^=@x z1k*Z)i(mLOyYJ{vGwju)T?EtO1-pwhnE2}JvOy-$;w6~YIox+&TiH);t`=-IX{w80 zTD%}ikOmVQpFba90xe#GX`RFMUy`%^@;B-QH&378BA6C0h<2pG#N4Vi6oD2m!L-id zrd*jHzxrsU;JC>XY$UiWUJ$iOgNbuD^i~8~yadxahx@BZS-bN!1A|{TzR^a4%i;y6 z4{0#5>GINwK#P}PTIX?Y4L)rhBTO{ z^HP09pv6ltt#dfK|Jz;m9}nF3*p5m_a9OrTyU|U?qlN-*^bLcnQ{^ zJxB!a+U6I&kJ3K3kboT(g8grZb`Kp_1X{cVYfvm9f}>B@)%9W&uPh`WGK3)V8G_=K zBGBR`Sc76B5hm`&D2iH0K!giH1UJOAth$OoiUOhu|bK z#IxVER|HzT1Z&V4MFgK}r?b1n=v1?ifDz0iNGA(J0%}Uoopa6( z^?72s<=yD>QHMY;%$y?rokKrW#OJ3D1ki%Sp&QkEQp5Zm=n?wdf1aL0=-}q zh&jzAcCrB>L?CQble4rt^sZ#_s}(7+S4c015Ou{nBAY@HYh0-+%mgjT+rH z58ma{KrfhoB6@t@M-hoDCj`-gM2%YNZMtFl22}bz{UvjMQLTEtpstHRuR0afJAT9b zoyie@^`$ix5nlRT05#AHRz9_A!K3=8GU0utgJ?mb#|rhX;xK;;Ih|@9?o<7^_sZ*C z1bV>^CXLaHD=FgUS=EDRL89p|I+IPj-IX-TRMCI!G-gg|7lB^wim7jv2=lje)88~~ zTBdrw^0rbg0=*!1@qCPHqgwU&<~l*NAi+O%;2lNZpF$eXj~%3lhkDd@5$FXmm^4~! zUZh%e+l%FbXhGt=e|0{m`*Gc0!<5GH5=R3_pw~y$6M7}8u^Q2(OqBQex(&l!8t4V5 z54Gxz^J`SA?my8zh!!MX8>YT*!p!;V*81lL-_>dFBGBvm;Ks=T_L~J?6cp#_FE6eO&~4ZEg92B6KW?&iGIf|2*H^MW7dCPB#7ZK(W%QRekoH z4xj~zU{rkj6nB7t6z0g}c&^Zrt;>ei}$5G_ble?@(_0-q8zR)3fJ zUJ;G$h>Jk4QKK~BTRf9)^pk$oe@?E&zMqa)#h?E)fEFZRKa$4bRr=@7l?T1$BG3!=KWQxe{H&UfmZR%qED0e2afLME zZ?9HF*Qd(32=s#3MZ|L(auo6U^b#%ty&$?$tNvT4-eX~Ho|J`AG=v1iVA8l@uAaf= z_3O9@^ny4~#P>57sreW){XE9@5PHE$M6KFz|Fw#!U+{MTEl9w5MH=so(7#`DO-?yH zqeAEfXE153J*pi$`=pFaFEoQKNRKcT3=1#7&?D3I1DTrhV@|L}wJ&Z#3vP2k<83 zc!74pO`rwq1wA4z>Df|gRPL1-M*_WI+&lzYkl?@0=6>k!ADl5GpfqOhm>ov~y0I=yk>Wn>Bdt zB7t6zosk9-XhDMC?!g**fAIch4c>!DpciDBq=5ulkl^>Fu!i2}TsLbli3EBa0x>XhDMCS;HE> z=lRYF8eNb;FVGIT3A7*qdPG3avynhA7()+%79?QYh=4iIMgqNHEqDmDAOZ7F1gza` zB+v`iw1+?o60nYmfE|^M1bV^V47&-mAOZW42#5?866ggn#zUY535Y91K!mf9Kre`u z9s(^$Kzt+uBDIACdO^(h5NJUH;ye*>Mp;Op7o1%l0xd|uc|`=AwH6ZS1!u5_K#P}9 z``p)06OuE~TTfD-%F=0{I?uz9OLz#hAOZOU5s=APNT3(wL>>YyNI)J$1kHIi$6ea* z782+Mxt)hV3lfme5kYHLY0!SRkU%fUQ9T4&kbu0D2-<^6gZ8_H1bRWP?IF;D1mxF5 zaNRsstc3)6!JR=zpA>->B;cN)Bb*|5%#c7Yxchhrv>*ZZ9U^$HxgR|DNT3(o;XDLd zkbrv|5p+f=4PL`YpcmXlJp@{ifcqyA{M~?;Kir?cg-?Aqpq%u06L!D(i-}qOyVdvh zp#_O_jR)7K6X?Zde%qUV*Cc<_U*`UY;G6!m1qtpG6Z&0~NT9__aQ`&H-}KjO{G;$q zf4VLlE1nzH(C;us0xe#GH8io3zUl9St&gkk?Lz|RoYxp@P%cXI!NlU6*6>Y#+TtZx zLlgW>f7j3L4Bzyp30%9pS6G8`YTAQL{9Gxl2()+!*3blh)8DIIu7Pj*({(W|UcBd7 zgYE{T!Nj?$trdY5FTu1X_?!Md+0_!h=}*^%v4mq6Yv^~eGSP3~O!%fhZSfMUp$Yz` zzpeM(2jBFk352LCNgA{=lFTom`;BVO+@bi^1-it`!8KqCuLcTkb1`~U;>L>y&UV=4J#P;OmzoM*x z1fI2gwmbdc7?Sf+U8R8*FX8lqzx6bDOBvO3Brs>-9L1RrB+%j|ocZAQc<1*0D$2VT z3Cw*s*J6#-Dv5WxVzwgC;w4xkwG!=?H@si4l6Ef=n8R_-$Qr5DZr2aNy-48QfbSx#k&2=z0xe#GHByn<{^ib$0PkKT@Q%ZGD%MDy z1}Oq9UV=4JCyqTTIWNHb9SOXv@!gO$Qm2~T@<<#0cKK|ycnQ|f1noV0z}o&n-n~fR zozwlkS0vElC0Ik>wwXB9=5E%Y@A`G#D(<~In|>rz&S1|feJa2Nw2J#p4w}w zUh#sxnbgF>f14`;EwDclupf!wy6JQdvKA8P1u@1Dy>FeO2()+!ZXv}hB6y6x_~Y&% zj~NmWeOzw{)&yk$ihwAZ^b)K=ahEiB=IC?|^4ucR2soXSkV|NdEEstB}r38x<%+1^eZ4)9nZ0a=FY{mVM{DgNAN zih#^ZwM6QLJcwFFGnifR`;7sfb0p9U@d9DbwcnQ|i>x$1NI?pqB?IHnLpZVrq zb)J(35|Hnz_eD#+ke3p{CuZ``xdprjkwCARPTuYz(BdUnkIr)naIo}}r*KWgi&Nve21-Dc~v=3HM zXB1kz1ncQf&pDTF^ia19-n~e`EtO7D=Q|tt?+w&@w)w5V7Q7=`-cQYM6_@u@(>~|# zICK-KZ*6fA@ZNJG%y+R^|BkL1L=7k>;eFviG|H%r-!~!ey{49=-UQAqq?HJ*vY{?| zi?w=Fwj!Wa9s(^$nD=3G&lQnlO)dLdbWHbt0ppdVmPo6(jC#}${vFHt-~AI^e|EeO z+>(nCq`_k~{LlaCZ`*|Tg1hycQP;$`hq@xc?L`eKD{b|SI??fU{7at01u1O#hHOSpwn zW}?Tn=cBAc{h)V9k1%yI@s$x@qd1LqwGpy|4*_oYIb?JKb#tl5W zG+vIL^PDw=SUn7hd>7%+OnfzeeEi#&CtDkz$;n)rSB&cKtTdc@`TKxqrpnmVw>~sF9x1%9t-t&$|(S+6z!fCB(l}&HPkJ7HqbjJ^=7vsvs2^~vB!`-T- zdCS*-`ULOs9Qy7vC(59gJ2E`jbNTufueCRX6EnPov(HC9vwZ!YUmZd$NVt1`VD9qu zE51%A(96BgpIfqg{X+*dffgj(yY|V6%h&%?@TEF&^FA?#tJ|eq~XFtVG#e zvTI)-w0!;ie|4*zXn<=M_X=xVcjxl;SwE%|xQld65=++JzkL0U(z;ddw#(Xe>+v33 zo=I~~J2r>Dbz0^ePXN8HWW`MAOLLw{bDko^;;an!$uOZW&3Puxd69&kf82w))N`8i zOqz2=IQ?+f>&z?@H0PN#=V=6bIWx-y&3Puxc^ZLU&YUwrbDl|aZU~v@T-Ryf%q$Z$ z=b1F;X#{#Xv&jU_c_z)dBGBR@)ET9t_JqDP=b1F;X&O9k618y*C-jxPgw79)C1IFB zD$82P4?B4YX8>q%6RG`A1g|H~OLV*vORmx|a}Ii}!35_el*y2W5cz3@i5NscUb3DE zsTbxPXx1p2$PXjol$nq$gYps@;Z!aXhE)PI5|oz!k?$fLnhD8EKL2OxddHF*hB*fs ziAXF5hc`=1HOOI!qcIXjIB$V)7=xQNsl#RTLfE)8ep<2q&n@)Dcp z7FUL|j^&idB?9siA{>jeGTf(+D=$%moF7OyGs_x~m$(S@a%PhW$V+UrAmPj=6OfnK zXhFi6StcMa@enfq66e)@mk7v9Y-iRu8UT@syQ~3uiHmUNUo`Z|;L1y==dwR^#G#Q+ z(rwbEr!#;G!}0@n+&W`TG2b7d@OK^QTk7(Yrc8fLtjkODhEh2&*I3N(K! z(wM!(;w64OHaI-#--U*_e(tKk@fS}t@)F|3&nx~mAv|a26NVVxe`O%M=N{E6v9K4l zBD{IZ6xBk})~#w2Ul!>6!K}1aAz{j4O&okfS}HCAR!lot|aW{t)j-SPt` z{#k=P7lP+sG^m_wh-wYj2Bx&?EQGR{R;7p&HF_lXR(j45iw;Es?fPz%u}Tr*#q^)W z1|;noiw!aB;9>d{$LB)}m#I(57C$~+M60+T-?Sgp(0GNZy-6XYENfcN`Jy_7f#2(X zuKFPsFJW4k)WpL*?+6U~p;ZBUiN#Bpei&l^hC**Y(pqKuM8wj(JpOFS;(B1CxmBzh}A!R@W5&SxF=l9ge~sTq{#Zi0I&mLdp6tFf}j zUb!w>cG4T`-rF-KLGL0;l-!;l=JFLan19<;w69Iy>b#O##TjzhQ8#L(>57m_3;(aUN7V;LM-fs1idpV zvF^=c?dBR{_tmSS z*7Ra(K9CSErY|h)le~V}5<^sJy*k=H+fsyB*b51I*HUuOxS597^h16$chu!+gm^Js zxy${@{ln)NqQce{(fi{!D?%*ng#^75Ct0WaU50q}^X1WHeRQjk5HF^G-0@g)&mEHu zv2lKW^y;a(N<%E{g#^9NDf#y4UWS-K8e>}Kq!Hr9^zAbiB?mq+$PhnocrIF{%qxx1 z!eu1r{XxlN6Pp;~$Je6KyiFNOL+WBeyttfCZwdWwLn}i(dSxuyZ~H<;h{a3LJEM}H zSY@PD9ck_Mo2ym{65_@5yR$YVPxUEhh1 zCd7-&iL);yj}N@b5Px3S7~NTStm=nYyac_MDmgN~($f7HxN=!^-e-AA0}1hBy5Esk zl8vV}H^j;D`4p{vKD2Nd2~)PU#-PvgqW#Z|R6UovfUvpzd-qq84PPpr(8Psa&qm)| z-A%PhEMCHlf*}q)wjw(I(QMUoB*cs9-!9pf+?co65FJ(&M0bz*NUajFuon_$HVv_6 z$(m@bkr&izM?$=qKHPV6^3*Fw3{hcHUbOj@bycgx!d^(2HI~#pzjEQT(dTOwsGcJs zUQFLnw;;Lb>MIPffB5QXy-K^(=!%8CkTAQ;5brLGMqfJ`R~kr&7t<%6eJ*+5_-=;S zK5uRGowlV_Kg7abNDTjGezN~pqYcsc>bcQSzmAF!iDH&}}t)At7G; zd~jAya$?Cx46(Ptv(d_ntEw3k3wt49qMadf$37LkcKQ(2b0oxz>3jR!mi#dEv?1G1ACq7Q}JF~?Qjjqj)ess?SMTo^qm~6lhRSx7vW7p15twKV)nEt!vtVHXYFBu~D z?)>PO0Y583EbN7Z$!ZMo`Wpq&#=k93Bg9L!aP(%o$MdfkBJfN>^xuD$DdI6>VJ{@U z>rjcaK10-vEs1uvi>cL)gm@VZ%h4FrVoCJQ;yxjig}sm$bLX`Cx4YRJ&2=QWqR%keF?KVaW*2`sD$6?C)jE31CX@qz&K|3w% z?7_uVR!5&2*Il(rELbm>X+MT3>oXeHPg)!;R-#uLAzn;S#0WdFWMqwH(O)jxstB=Q zyn zzFMJ9H6+A~35v&IC$^t|KQDUljqg;e#Dev5nNEfXG+VY!?gG%Vj#d!p=FE_wBl9&spbHKg8lC=u8Y#o@iQC?z)Z9f0F%G zKadbFrs)h0JLmb5+*tI1pHC@5EbN5@o$X=D6Wuw3>IV|y#Wdv-;k3L&^+PP|g#_g& zVagMY#@mCV(T$C+NF&6HY08PhPJXre=(6a_<<-=Dh=sk7pd2ntd7?XqQ$0sQyqKok zE_~5EQCZjv2~#$iW&5V9qCLLmx`jWS5(NIH~Zah2vIsJ6opEXYf=kkBT46!%01h@ioH-ST`2o) z?RW3WEh;&5(dowNf;wH!DJkhU##rOK_gL%I?~naFh@S z9Pn_ICG;9g1jq4p@QfqJVF^SrP0v1w%I7HWmOSsw|Gv`5df2vv-jAwL^T=Y)X?&|( zC8&!DM6pfJaEZ$2!G;$t^uAp?UI=)igpMn!q0gL6Y+Y<4ifwv&O{6{73IR`)(D6|< z^w~L$KoryTOr1!3J{JNWjB{Y9fjlaF)c^i)Ik>jugqL`+)R9Thpub!)4>@}~mO9*&4$`YCf zsm3W?XL+qIX)XO>2}ChX@6OCywN*26ytOTZ{(y(0EP*$A!96(kmK^WuyYE~6!j9gh zUpOabJ+}C|a<-9?v3;z;X{%bdzP>yl9{MfcTeNVY5b#8a4@aG5GRw8i3Xe-5N@;#G zsTvQq$noAgHE5M>iESbBxu8{TU(59#9X(rG1rOJ2iCbqql=bt!dZ@;PF3Y?_E%MR` zL@|wgF3?EL%JrJ=Yb^vk9A$}Pucxv)HNQeN+Eg#_ivQX*jX)IB$mar$s+*U4CsrvI z0v?XC#M|9}$?ExWGu8NhK%w_s`zO)}L`e%nS>HP^FZNnrTSW+DdK_g5WM@IEZay{F zbNk&Z1UylqXaCv>`a`SsKka?Aq(K^iD5jB}1sXTpy~L~k!XhEy;V4VcQ75RCnltRX za;bOQ6Mv)=8pSlSvq0mb?FHWab=|j zRp8A#I$sEQq6FRh3A$gjRd+61;w{L$xd}&E0#RI!JSfl@Iw8kv_0K(WFCebHEzf&)|2f6BF18V6$~C`wFfZTxdvc2$2QsfHp?X#5XCg|pg?2OSBt#H-@YaUJRD^S zWHkYCN?wuIXz8a#90gC5&}&Rv^-4*;w=a8yXjlSKOd}5pG>XnC@VdMbJZCH6SnK8-*W)5wDYjUmk!crU#Cs}S&TlqK|@PXxryqD5YZ?_NqLf)OXdG_sn2 zICik^@csR-u$Wa1f z?)&-Ps#XsQ0Z)|Bab85Q1KmXSj1kZtd-v=6zOs0ZB6w`D~o3ECUOh!B$WeJ|k`MwV{BAHADfhcKV=&NNU zlacS49Ayc*o6R@KNG2nmD8X3_k3d1I>h8|-ipI>59zw#SI+l+pz-zkBJc8F-VzPe z#RQ_*rdh&#gRIe=zSn+nD>51JLn=viSR@Tnw`xT#YiS2o+v^0UpbEyL8~+aXv9&di)}=)O}T{mqWJeumw0C`Un|D} zPn6&%qTSW&t_U;+x6buC?%ke7Aj)Xy+o%COa=o1GZ&)J5QI^mamIWG`k);ucVp@++ ziT{1K#9QvP6#^b}9A#{4-zafnm0Yjg#9?U!qL|QrQsVP7bG`R(87u@mwqCaNS||^W zBa+GRbtTW7Rzt5!)re#=6$GMKL$7Joh-5N+e~5-9^d7`Oa*}`J`T!Pc&t<+lF5jNqb#9g zyAs=KFY>;tS15fBPn6JcUWu*C^SrknjinKYVp``YB|bW#$QyDr7)#*cC`;&kr9>o? z;W9>%cZnt zS2WZv=kk)?^Anf6cN1waansl$=h<-+WDO)dhPDKqD<)Faa*sZfabD}( zYsaHP*fDDDb!V5Klea?7m9ACQv-@1vJm=(jzc=Q3;bGenWG9#o&Mp&cj_hxwQR0ab zRY&Z}8g$M&CARm^cV^c*H-n?#ISx@4Cb})`nnn;UQB0rId3x3bjddkQe9VKnPLJWY zF1H@lvqV{#nDO{WX#~|PQB1Ene|0>2jjrT~XRR-APW|c`X%#%GXNgoL!TpiBwWjTl z1QU9u#nv;Os64wXdd(36p5qXF)(vr1>I7+(CA8N>L(jSr+#jP>PhZCMS^`mAu4md1 zjW63SeGZRCaX*WO_JYs232FGyumqx1 zTfXl|t3m`nEAdl=tylGwST}q@2JctHd)L^yY!sLCvmod3zFY6pCqo20mS7v@zWdw< zhcbBI9o|vL)@7r(oS$hqkN4eEe*Zm0z+(xvQSQ69-Eo=2`v>vPK(;O$#pV2L&Uw7= zZav|`5CM-R*habUUe~aP!}}odo=CPX8^z`Pj)9*Kd^a_*b%=n+5^ST~cb}hqi^Kal z@!n3hE*mA~6<`DO7V_Uwk{jR<itLufdu`deY!sK%HKkun=DjvQM8IPSwo&f8=grHPUB7tGFk6?6;&QqJ^!rEt zBeOyTJeFV^<&+`seca*w4f$TTb=fE`r#n`^f9!N-hX{Bq!M2ol-$q|dvPV1t!nbwV zD5Fg|!%G{>j-!5=M`R)-`A2$V+WqHj2yj>=H4oe^H2l#}aI#+;>;?T8thP4R~x6 zYv@%%8s)q5LIgaPU>oJW`|YZExDr#gE*r(=bdU2FMbd~8@K}Ovl>2ViPiEqd5)F84 zlxXNz2jBIdnjI$8V+pYq(9tIm8hwO-$JWa>%0nYu8i6R*&{32$ z%qS`ZJhoo8Q63tp(+EVdhRz10Va9eL;IZ|xjdI`Z_I(a!oHPPatf8|VX_#3}2zYG0 zY@<9hbEXl9Vhwyl2+htyz+>xW`@u;^c|NCBnG7I}K$IRw8Q;;NECa0)kF8hrl%OLo z=JAmCPqcN}C@!b-re7N9?1l(YnX zgD3%yCD=x}?`~{R>~a3ad7Q1wMsYcf`TE^p=ZaM!0v=1SjdI^zSg_vXJe2cMTbGUE za=JJ4D*(;sAp#ytu#Iw=&t*Sy&Z})*Hj2yXj@7RKiEYb51U!~to6B$LT%Eofe1B>& z2+0{NfheP0Ci8iS(DuqRIS8Xi1kL9-@=RNojpA}WyF?tg`X3ct;gs~vW;??&oSeq5r|?9ns<3tBMmdF z2?39-mu-{>Z$EziF4tMwb)~!m(EkBXa8%+i+2vyz&lFqVcqam4`d{e%RkMN$g5RJ< ziPh6*m+x&qQi);h@|_NiCZ!RGV!CoR;5VtI-{d(jy?wV3@NiU!s2JOs7%^{&v*6$b z0bynXOMKNJr+i%1p4zGvwF;bAd2-W$9C3m&o6We=WiAQ9*(lahPBJe?`zmt zHS%ZYI@|hRm_{Iq>1s7rmv1@u*R0^|)|`~*eA2%#jX)F=I>sn5?bbZ!sXnxW zyIZ{%%kR!zp~RN)xlVE7AMys$5{P1=ayH;2f3Ymz+0yh0(SXO+%l4*C8_Ii^HdT#g zALlzifA(w|fhZ<0Y6nMN`_){h>E|hF6+E_Hw&}m_%h&jC1uN*{Oq_jo@aE1Eh%$6p zpi%aHo^xntZE2Myv{f>fU|tCbcU7KK@2dIsI3&svdfrr{L(^Pm{y+An5s2beVU`Fq z{!Pa*s9SJ$;o&GtX#cCm(|LK$zH0}Fh9wZibmjcPJ-G6^Jf}v>M}&Zfqb#9UWqHu5 zPY>id>&xy)BM`;3UinI#GBMvd?Y`^M2t+Y~c_q-;(x$+v-YY{0cx=6F>m95b_uiA^ zd{8I2KP-VL(Wsbl_{hIHXNfaq$CH7EnQ<(EZ+bzi5`*)d=|w}*2t=_4=8}M@+ArT3 zuxY3e@NkqRFf#_s>({@B(A>fG;bWHs1PWxkS(;~;Mc7e1iLP!+Tq%Gq~pfU2oJZDmm z5oncoRHK5R{?v$~4`Zl`F90 zZLD%U2^uRC z6XZP{9|!&4tn*T%;7c4lQ9_R?5uDvGA5J=_^a<{$;}8j^wN*NQoa{d4^tmfXjvOAN zA%q^E5?RFsPMxmz3IR`)&~vCn-SY~apC*s9XII`1qU}u67^CmV_{i_8n&WJ4AG8Xd zsD@q(dgPI@gs&fIyVcO^O^L`@A}xq&=ryfGWGs7cSgAy0EGaWbUP0)1 ztVCoiDdTG>s-a`P5|Oc_jIWd^LDzx4FX8KI%8ml(g4(CaC~D^=&BDtKT^^jPw&xXu z@^&7PIVws7D^k9G`e>8CJw44|lRZ^->#(beVe|h=RsMg}Pg_q75%BOc2MD&YcG!1` zSWX(9pHlNts^Yf?b;_`Wt`JJtdR0$}@|QnZw*Rwu=4I{!IF5>S$e>xHa#eBS z(pJTmfXCL$Hp+c>@WHvoliTNK-ZSxD(SXNBu}0;pVu}tC@K}Ovl>2V!A0HOeUuJ6Q zaM6IrMzKcas^SlR`Ea=<;IRbTDEHl+R}C)y@sd;GpEVgE8t~XC*3k8EbnQO$Mt#o` z@K}Ovl>6@CDV@9y%^SrBW_1${cx)7FVC}H)zPaQ3VoShd3AR!0yQ3y#cNz}s`unE0;AuZjjd zHi|W{+SqqbZ%KOr7y=$ku#Ixx9a_5FJM$+u{?v+W(SXNBu?AKf)3>4WtG(E7Q{n^r|3x(5u~Dpn^~t{b?uaKt1U!~t8|A*+q~~1P2Z^(pU@s(iY?RTU z^*$i(Jo5Ey>#>BfiMXJ_Y_ux#PYAi*^!VuME=s^->t!3|zS}=J6K7X6;IUDxt^GtA zLq;zP5%5@oZIt`&>}OZvDiIBMY!qwg^+t2lkst015%5@oZIt`&$&cKQt6enUu~Dp1 zxz>5k=x0I%JeFV^<@A=be-ZB5xUI`Zak<|AbR3!A%?lCmSb}Yo`|cUNXJYgb4R~x6 zYv?#e8WRiWh6s2p!8Xe24Sl<57bMw0}8;Axx zHi|WLULlQ|H?|HD@K}Ovl+)^j+y2U{hu%IRJByKOS4&r2zX6AgH5 z6l>76kQMA~RKME3CYFH55^STKR%~skmqFKVDdljY0gsJh4Z5avXQQ?sm4pa*EWtL) zefRZ@Lm8Bjr6`9J4R~x6YtX%^I~zUpMsA3J#}aI#oO8I649dt-l*5SzJT{6oXpBh& zI~!57V+nXH!8XdNKk_;`^p~O>PBh@LQLI5@rS5F>*6$rc1U!~t8|A(`{lxAL58Y1Aa1luV0-5EUwIF!SsD2J0d3LYE98Z>t$f_;h_j9wce z;IRbTD5uqJioJWd+hYZ4&`tu%Hc4d zOI{*F_uZ_Q7dVu|r6`9J4R~x6Yf#>$`xL!XwJb!yV+poV z?z>|?+93ZuU~)LA%SIUuTICJm&Lel7ZatPTHW4-IEJv$K|Adh1O^=V>Ep6DbEJVO# z>t!3|zUwu93TIa|;IUDxtvyH@x$9{*F_uVQVJcheAW$Ut0T(0*&Y2>`SDn!6z z3AR!0y92Z4WAqUXcx)7F=r~3iQ34)Iu#IxxZTkHE7)3<`9vj6PIv$fo^YM#A1U!~t z8|A*ct=%w3=MT|<$40S+&MTx*cEOks0golvM!D~P-S=8Y=Rwhc$40S+&X1&V)xZ-& z1U!~t8|A*c{em*g=b{0RjbaU*=SgF7`wJYM&xL@;5^ST~clUpmLSB+aAc{3KMm zy|0D{cx=6Fquh7fP1=(@x@^2ZD*unQ-DK0)1 z5b#(+ZT`OrYL#kqhDL;tDAvFpNwhkl*RI&evEO&Tr(<^9dThOHSN^}j)kF6rOD}Aj zdC5;f4u`sI6xW2En|wFJ*%d3_G$yn5_Y*|}9!s!||A?Tx{qjA@6|)v(-nwT(8i6R* zzz$Tt+wai3u@+UI&%Cgs~vW@>Yq5SIayOYD3SBYOTYET-1DAvG^V7|L($2DF&c5?g+Kgjyv zvGwx*#DVtR>OY?uJLNy8#oG<;CmJAZ6x)@*?4W(_bDwj)_Uf}&CH^nOceT$;xzCw^ zhuaQ0nNAd1WJ|1A}LUdnxLJ+@x9sb_WPdhPR4?(-;tC@#l;-Bk2> zDfhYc*m~Kfp4FY}wa-hr&!YsQxSaZfz9Hn(J*a)2;y$+?TQA$xPr7q`wJk*@YwP?q z$|3(vF9Z)JW|*>BhMA*m}9V@;5E+U*5Xpp@FqirylGh1U#1D z*78@G;D0T*{O?MqzEdaFwttwgQLIr(JTUg3$=}xwUJ|RFacgSum}8l?E*r(=SYPD3PY!!6`AYra zsfSzsO9*%@!8Z25r)=r|mtvXyMyCFlx-E@B6l-8blJ9;tE}1<1(D>BGBhLr{kFA$& z?14|j;pbxcx80W-^!c1L0#U4i6-mB((OoYlUzt8Rm364E5b)S~*`_@~bwB-chb@bJ zcJYI$e-*Y&BM`+JSdrwr1A47WW**H>9s6*7XX~-`vW-3PeYZzxPHf+tsi`NQ7?nmK zifh7(B;S4Xyu#$TeN$6=%YJ;-dThOHV-I}aoly0u*l(m!{KP0fN+8O#mfr8ayk~y$ ztjnjQy0olYYCX1Iwy_7k?-sUsIM%G`gQ+u$TBZ?*;+n7`iQW`F_-OK-AMQ^r{@)`) zz+>xW8++jU?uuu|#Lm9`zSPps=cEycVhyZF^4)72-k&@#HagXCRGAR)*m~K<9{6-e zZRi{OkgnYs)3&7%h+++_Ve;K?F1RVVx5bduhpV2iQG z(g;Mc239@Mf4KeBG5PJWKB?b3*G^fFt(R@=nos%e?X_Y*(pWO;Xx%gdQCt(g71QXm z?40C`eOILBY`;_pcx=6FW7m9oPkn5Er^)BWQcX&lrxA!^4SZ4d-8lmemh@=gq%Qj= zAp|_OUbeB%KD~ARBB#?2G`81j)Fq8T6l>_1U+%m6vTK$+@^J0clRNqd0gtWM%xXmZ zr+FL9Q6a)cnK_DxT4&#y>{_*ED*yT$Qnp^Uv46j%>AEVqEcrgowM$RBQFf-l?<++K z{2mj%t({b>;|B{)O8q$WW+C8-5}GqmtKQ$!-FY!}Lh6q-gM@&`64D>~n`qP@uMI5e zx&1)t<7Kx8XIDLzU>m;|;Ja(SY1Zkx%fBwYxal2f1fp0&vn6WPxv@)2CU<|KbmOmM zg@DJ_%QklZ_uby7{oJwsiO-il)^1!Hfhg9{tdBIR9XP!4=b7V5r@S{&2zYG0Y~%L= z=sQkoe#fd?#+2UiLpp&d*3cZ4G>Si7u(3_YAN?wAvPA{1ZZ=RD++}NJCrI8Lc7> z@z{Fx$oYS`E04UhIdbt>f^Gc5K*ib3#Mz~DRY4$%HS{TrG_(gZO%L*ash8V&*{=NU z0li8xah0Gh8^s#>6h<0)C1#o{QFcPG1lyIrJ)l>6Ca!kWWusU_pQA`a@2E_3Mc<`zMg0gtVh zZTyM>y#sh|W9;H2?|&&m9?LOYqow*~YIJ&|Ev{ zh1jx>$Hl+AYqe-t0#U4i9aiYgcGHq%zaQ?8_t&fs9$PQl_}v3~xAg4FSj{UY#YgTK zAsUuI6l-8l7FxHod0ldQ%PH}5k-Y}Hz?9i~*~YIJ`0iPY3SuW-lO5kM@Q`H4sLMvN zhVJM>>oRtpP@EjPZ)$vpX5sKyf^GbYf$vtmaDME@Kc>dd-2Ul>wk{iG%IUkot9utG zhab(3*WVjFO<+G6OR$Y!F`ze)J!ZwOpyN0&@Q^%BSOQVzT=7?czdfF;`_Sb0Q~Fed zeQ|8PY~yzie0S3M<71PlKX&dIk!kC)QCzP5L>fQtn4ElM#`ySozXnft@K}Ov{EC6^ zw)moN?21pu#jhW+S~M(yDAv&HEkSQmTiu)-bN=vntILC@RCsK?Y~xo9=)LHHma+YG zfBdy@zi3zjQLLf&4QZ_0-7fjY!~yXcD}twOcx=6F<983J&zn|{EnD9&-h0Gpaa)&- z;&Q$JN#ng2&PX@>MjB-~ z@08U4TdVlgHNo2tcx=6F<5vuPH+HO~q+z^O{75RDK$MvcNMp^Gg`Jwy{PEky?s4m} z^>TUT`^44buT0kJUoU?11HFWR#}aJg&7|-4dTN$8kLHi1&D)1G%$$)3A9<5GRUFN! Qg}}QoCNx_X0=Dn|ANIr_wEzGB diff --git a/act/assets/vx300s_6_wrist.stl b/act/assets/vx300s_6_wrist.stl deleted file mode 100644 index ab8423e99da86cb9e13c197f8137b670f789ea2a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 70784 zcmb8237pN<|NlpclJyhWKZv<^#-6BT%pEDB6e6ueNJ!Cc9ec79QrWrp&M=7V%r&D) z8(JhwDk<42l0?Ptyw16=`+0vpcjo)}|2-b_@P3^4`+44HyXSL0XZf55JKc9@^%k94 zRllwNt@Rt$Z`9=G>Mb9*?}3iZ7cN|QrTYK>zj4Hho((y+RF!VK-BSIJAt$M!OJYabtcGBzkR`4Js<{;qtq?c6L3 zA6|kqtfse~pnc>`zbVJICN6tG``Go_XF;jSk7zcaY75(1kXBLW!%L8c)xLH^w2zV9 z?p8h)W&aaIs=jR!4oXda#NW-*l#g29o6_|Xq+vDY{chUF`EPnEA64$$r&G1#^U@)y z$&cu#NL3$K{_`PKUV=2N%A}-eA4_geS3Zh8^`p+mc&kuIYVsqtsS;3SHFSmc;U!4J zs*@@&w9qyOCMqAPSI^fzrVTkAl$!jAF{&nQYku=*v=1*q8djrKy~D@lO0$%Y8YMRB zvf4YbSV(H}BburfXByABzU=S_UGgQlXI4O|jK z*~rJydlo4lp=L$2kA#GTkksTy*s8B7m>>DyhkjF!_)4wNU+( zQy;Q)r6xbZ)Q7A=Nl=DWBh^pAhiO-`l}Jr~glSiAEqh(J60|ExP=?hC)lb2P`F(s< za+~h2(AuRYKf?SzeBM=65c821!tc)wimjf)CSQ$=*k5 z@*_-tC3`qYP=?hU)lb2P>33x>DmD2Lrr(u4wInFRDn<2E@L`@G@@XJ7`4Q&%F|Tv7 ze%9joAqmQ`I;;9A_%P4GZ-Pa2e}$)-)Z|B)=b(IYN`f-1l2kthALe;3pUzT~A7P&7 zas(g=%CPFE`YHH`A7w~QegsAwwxvcg8R+3Kl92>uSnX5&6ntQWV_WAx+7>)lxJf|$ zMLKNioL1*jk=$-|8s@Cpnc9Lf!t-Wct7`C8)x%wCxYDqVw)NGod*&U7KWup5|j<0`r8Qdfd5xaZY1&R!r|54Zl(4!%a9EN!(rMNljNfb%P=h|D!W%BKIyIGG6$0at_N&uhowc z)VPV>Q39y~q5c9p#wYvW9d+>Xx0li=BPE~_6T%(h!=ouasmYJXn=W#$N;e^b+_Ii! z+9e$;D@{m^BNu289{Gy;?4kZB}47aUED=vtYmDG3%=Fj9jB_+kD{(|v7OwJ{3h~=5Jby-P` zktM-=pxxqc>&DdJZ$%%|{Ri4;rvcq{ZvdI$v0s!XzM7g6BcPE43o&e7fHamZ`EOj*wPt`KA}mDzq-xEb`YSBJ2~ctN@D}L#5dQOfSKo=> z2mYnwiIX5Tl34elt%<&mz0smzLRzI>Ij2U*<8w#*XL<;!89Obgtv-5V?*C(|B#}_0 zo~Bpz#XAntpI<(t70d8^PBBl;rA89wU1WT&dB@rNTC(a#(j#+DT<4R7v|@c29}9mT zTMI-=rO`gAk;IZ$E2N>UOg^6e!l!-Ue@RFy{LY;kylq6gMELkQYm857B(bl}yV?gz z+P2CMU95?x8o%r!q}8;ICp2Md@X_zT(?ql1`}m~hi6s?vO3*T-Z}qnQLus9lV8bU} zJ|yw*k!G4ktF*1*xA)YuIIm^a^2@lS6~ob{b)BOZ)uSbr6-IsX_d&w+)T)WZ2&s{T z>?`%SL=$z7jM9BO%AI{XdsN0`Ql(O)iNRf(#RzC*yp*RtR#>$}63>2re3^_(S}~lz z+VsF5vuY;>?(LQct2&n&<-T|Hp#bEBgr{OMQ;4!k58Uwd0~t5})+L$oqSyBl$Jh4G z-M?>Wz!1;xT@d{7v%`KbK`Vr373-T@vDmsO(Pu_`zpq|H|JjE$ZZ3p7}K{=bZ)Ml@ zPkXhR_8|#cAtu!3nDOVM#2?RH$(m8UbpvS7mMU-fOQ!^K?WpV~&ONX=T&?XcT_4nt zCPCk(2GMq}f9VsQ4;e4oasGrFW%xGNZJtkkI0T-%PFc0Bu+5j&eS-Eu4N@h2e0Nua z+&*tG=Ob?{6dX|eS=|yPK`YS1PuI!aP=lpA+$yHt(GwE9@xq54Ei2L{=UH2?_Tf*) z1^K(Xl5=1E($qP98zQZHC1351^YrhW8ZR+!nqQ@g?aJD|(?21vimr1>(8{FNr&7qa zyLjukM2u*lkwl$K#){$ghG4XnoU8N+o(M4l8W}I;)3M*+iHc=>^Y>54mw*+0nD;^a z2Fo`3&|j}jN-Q57g(FoYkSDt*dxjxXs|VLi8yoXMf>!jwF?D(%`K1k^qm^EdwG#G; zl0e;5`mS)aFL~$T&ugi_r7|waw1SLQ()^?MqkUrY%ss)~jdOIWs4+x?+-`%1>RubA z?xgCS{rWP1_jTKPYdb3q-pNo_ zlv$fdl~W&&ojYwDpuv+2GM;42pPRU6(&AVR7DVuOuAk?4 zqYq#NfOs+T!t>m;p zMldC;1NPb`93y~esxXpq`QWI=y=ir+z`$ZF~rm>lVSull5m%<$_L(~Fvik8BtaReVq37S zy(@l?5mF=LMgAN@)q^f4)CXsZ&|Wz^gc_7F>H3HfQX>g>&QTYtT?I`(%*-9)mE|UB zGe@S{Rg5sX^%AmlqxsP73URT$$~QgfFP4ueVcx^hG9-cgVH|AUagY!E%@4d2d*@_X zdHuyk$eL((xn+!iMy8Om=@aRrf8NGKrbH63VmL?o>5+f3CTA4w@KiG5;)oS8N62m; zs?A0QH0zwSy~EkC)OZQ@s3@!Si0XHPTbd2hbC&F1LC^{^>M7Q@N0O75FCLvvFAbSaneGP%%N4b-u z;!?$UL9>@IgnBm!emlOn&N($+e|%>!L|?mQ^0$Ti)s}HVrWItqml&eX{uOGkOhSUx zcnQ#W_lXi0)`o^$JtWCX@I4CQOsye$l`0h8_IZLPsDYIv_}*rSpH}@Hs($$TID%H7 zU2l+);JbY?CSAKk_a)T83U9w`iS%uJbm%%Ws47s`2Q^;8_3j+``>oEwiEo%+l$XF8 zHfV%9JzU{;D`)QBuvWJ#YUqOm-_=d3T5SG4l(PJ(kcxU?9Xi7ct+(-YY`X52IxyNqcVFU{QjNZ zCu{IMx61yiAj0&C(WhEqLC!6)RWkNhj2BNe_%`!CB*u;Er=Nq=AcZ6-v%f++#~Nfp zI>)Dc$^11&t)+Z_~Eks}s~~^eAOn zx~vm2xWLe-#r`=yK&x(S&osmYIsm2NsIN$@=rDIqO$4hHX0&Tm_m zk2~keGJ;i6td7b$hm4*9D`Q4BYo4ZuQw~&XtnQbZ{0OtIrhc_P<7ew+xqd2XhI@&W zDN-W|%7#GvSP55uKG}cBbu|+%^PyJ5RX!F*38Tr6pbt|U@Fja6>5r+EtzFlLCh}%F zKG562%AG3ArbYLE$Y`b3tn^e(MfR)$bJQhe$U{Yi9HKR#ah3pT%6tnBAc zn0&wo^JH7qRO?WG_*F&N`p~M`D!sp8+xq89wGjS5u!YNC5oES@$hOt|*!TKZn-sb# zw(^6wyFEK!*6Twj{<}N2x@7v2ZtKpXB?h*X$&9ToK_iJTnvB=k}05K+ivQst#wo;Fp?NXXj|E0p-@}o`EYuwpF6}Nu8<}{09>8 zJEukxU%&aT4nJA0mCE_(i=}j`?s%b>i;z}@=VLGT_}reYi>dd)A^+6ZL}=KOrBXAg zu(5i6RiW+Dw{5lTx?Shv)bQi6(nVQG;`}FXYacKFJAJ#mZ^#!sHpR%oy8R29Ffk|vJ-Sv5&& zBynBV7)_gBRHN_PYXbjELRz8EN9me=*PoHDi6=W$_DhW<9&I~S6Zp;B)~rWIYU023 zM`Lq+h)Y_HnKnlgrZ1^p>p4vfSvSciH47h~r>)RON#C~h!d+#t>m|^tfh!ftuJ-rnaj-B;h+X zMALZg+Sa?vFX;Mc(s-qdkXH0z=6CEVj67edQc8GEOBW%n&n@&+a|Utxn=wdBLY$* z33(PoovwL#)V4XHJ3~JWb`#P{?yS;8^V>Si*M!taLhh5&scOztNsT1rDHxseROVa~ z(n{{E(xscq(v=!X$kRN!22)vsl8{z%UzTnqsca=uBMEsbNw>sQwnRxtE4h11xAs)F zcBzqsJYA)KQ33v917p6JYNnHIk60 z-E2$sZ~^vkl8{#NUBtG&crh(-{o`%7&e@q1kQzzIQ+q1sk9G)b-FZhGkM!f!8swAY9eD|@ffirsr?(E!m z>(*y~OqCi*$WxSR)$*6;0yRJHney%UH7-J0$u}h1I@n`S;Df`jZap|{SYUs_a4{d9m zEj5y0xtViCUr&B1aO}NQV|Vur zJltkhN~I>dU4*ofZd8-r8h9T109j!T!;l zAI$DlF%UekIOUcz_qhmZCEsyv>+hRO2EKVAb8D|Xogz{r3HI~m9AvKr|D>)ex-6yF zPhDMvw32Vdw)Iu-A5-@~wPNe@lX^v@MiP8pne&`Sp4*vBw8Rq(%~a?wWJL@01;ty7~7|$`cb`aS_r=zIm&2!kb&C9@w*TYn38H zB2ps>jvvhV+SKH$Q@3V>Q|jy+<|3q(d{?)vrYFZXKXC7=t?PP@h)9hjI1V!Bgo}3F z8Qk_uIHk-VZbDkgcXjP!8+}NPBsc<$hD$Ybp)o9#*x(n`)C=(d~9wktJ~;G0G4_mR!tha{wxoO95>!3ckY zQX>hzA;o^@5&q64A+6+$h3-ot>`SCZ5`5E(^;Z%0SCWuca$ZCCi4pdRQX>hT&W-iE z5%#;1kXCYDL-*|w_U%$537*c4JwGCRen>)E$(a!SjEe9XB{h=Z>D<_JFv91cB&3y` zN72vP2%oi5BMF|)jXlpJe4a}}TFKcKJ!Xh-%pf(A;CbHIxFo`Hi6o?zoS)HSp9sf3 zQX>iXIbk)vig0`-327x~ck~!8!ZDoGNP_;%>18!ejBuPN327zgfb>{2!m+5-NP^{N zPGPI@ZiM4qNk}U>Yoy225ss;)MiQ)lbDCU@+anyeOF~-7IVb&Y5aGLl)JTHu*qn-2 z?;jDqe@H@F$(bqrjuYWKj?_qkzsKlTRUYjS;d_)Mq?Med((h^!zN<-%B-pQ*uM|9- zo)9X3XlSI#m1?Q`?#XlQzu{gSgmaIMdYkYN)R+WbAxA+dq#e%e_Z>Z z285KkTS$HP5XsGsc?r$8B8u*|UWbR?It<8rg7D&(vF;TXy z%g>GWO)q{TyWiQTv=3@zypWw8TT9dH6iCntJ}9gAsHQu8^H0{up0IPM_Cbw|7qYX* z>-fbF3nXX-AC%R%kao8YsTtu&=g#HyjxFw*LcFrh8E#t>4wlnILPF^z#6?YhgxR0? ze9eT=;gYAfHMw_qgx+z^i)n|9@MCLx<(@s?SFI1WOH65Y`c#eVCOd~}f*MIcMv0hn zUXj)(HeW2gBKzhl1LFuAdF_?92K z-W4g?yjC1REBJUh`PtlW8kAP^?U#4fY?h;bQQPVt&z2e)FJ%0>%;~ckyO(TUbK#lD z(9ib75wt=~>qZaAt$Hd=`S|L=Eq4@AJ%j&BHBC?>KwXh?664Xh9%<& zTEPcptTd!YPX2Jk*SgEks$Csc8nb5;azes$YX9bUEsShVa2evJVV9ETU91(98ZWWz znE|;U@LW;k*7J`f?`v>XhL@lf!rQIupF2jKV=_LT=umQH|2{odLG$HbjdE+M{gjX| z+}j{GVd{A;Gqr|jyLz-g@%ODdRn&NiNJ8D*pHrh}up)O(=v%vOuUqQCN)og}cPewV)xRf)h7NA-tO=QRNmM`6D7Rc}zvw%k zo(WI7y?JeKs+balvu=!!%?DeCHjSAcEnTP9BvHI-wcH!B_C@niY)lE?xZy8GYtSKR z1s~70Meeyu+sG$;rrH*Y00-u4|bX0gacK@=MX&b5mAD zeLUTIZ(_aKPs95r-g`>p6rq%qN8n)8}HL#Mz(XRjHT-&gCl=$koH2=}= ztu;Z7m&i)}E2nnif+*4ON;}^tl~UpeT7iDSJ==@L=m z=-5Pm+Lf<0L5++Ta{Df8bH44J9wq+W`hz5Axhq~v) z5wrqvTgzoRvlp#bJsf(UI`cRAW_JHj6V%9fA)hXvp0i=6Ik%l2sgZZ5fA5~mID%Fn zzTUSW=i!!_Q6I}2zTkhQ%0f+0BjbhKX?Kg9?mHig=3`{hh5o`%yc$Q)3Pg#k-_7~q z_1RG$-M+2nKl%PPO;97_h1{@JkL@F`bcqrd2YupOI&g7F#wD#FKRxY@?Hx9iixO=L z_fOoobfoqnQ}W7@t8#v8^sw#|-99SRS`uneZnw`HuO!qizU`%+9UJxW%|nm--We3q zKBxgLi8edNZ*N-W(P&vUI$S8czGSaBf>xkUEL|4)r_ZdYkB^$R4R_vijV7ppl_Xkc zmT8>v?b}h}-x*H_vwVMRf*LQe?sPbNRoCv(RL#7pV))4xzr_)>0=?l-PG$yKu3-4K+cHm$>GMx6%p?yCX{6k=AHgVwIEn zSxb$V7}mO0>Y~2PN8vi%!hODLr+r9*R-h||N2g7^rwwzy{9WIeb9IuX2C^iAlYN0# zZl;f2gRjpRzv?Fa{9s%lXa%|K;Pz>+T-Q13;!Se&~g?T?&mbZci@FnvkK@(aR$ zPMog^Y7nm^o;x@z?Y0IdqwVVFZHGgXD$NSPN)og}c(dCl1}+zyA0ti?Q(v<|QKwq`8 zbz1ma_M(SMpIJKVoe?@!viD*C0pi*x-p;X)4UPI(`CskCroB3Af*M#!qT{5uw@)fR zU5%;HQjAFJQ0;Q##a9$mWBz)BKo@9p+e>I;&_ zN6F&#{4GCR5=YPqbcd~ra~@clk!pyGwg`)m!EmAH|>Fl=N|@j5vZ;pwI1G zopbEs=%|n8yQhVl{SwgxHL#LImDwNXbed8>>Z97dcZXjcw~*XLY1Hz4Zc z$J@7sSEOcZf*M#!V&DCjasr`pQ6E!ZI~smz)%$S-tw5K1u28Ojb;)SEntY>G`-XvU zYJwVAN#g4NZpcks{a3U;=H}M+jr4u0>q8Q>0)79>wR89Vx-m+;Ubl64R{w?Chb$ji zA2VNWm-AVt2c!8IXQgI*5_~`t)F56-;OYGRr2bJKPaUq5F}cE^ID%H7@eIyhJ2grK zNBYCRl-?Rg&~Z>+rAjC=Kv%@ zE6^CDytS`al-T%oa#F{fH+5N211m{jB(pEEN0g{N_Q_!1T_bewlQkwScWk$w+d;{N_LQ-i~{{1QyNB=Ragl)G>08_}o1ahuPlb1n&5 zfyR7^Gc)nT^KC+XUfQJzYG5UaVz>6lZQFBqbd>Sa8wZ15ylLLmBta|C^(G9+?fea9 z?l4*^d%UiH>4?jPr98Mzj>OYb4!q3KsBplg!$5m$me!dcvg!A_6#0!56S9^qqUkUDAo!)%daN~m- zX$6_@xVF{jM5mBLI2z=ZR*)H!Z7tv1K6L8t4yn7|8m!xuW0efEeg zm*vj|wwzg;DmCyyE6AJ=vaR=Tn^Pb`E5t-uor8R0!?Gn~h6lPA`?k5%$ao=hR>ZcB z-n+3tf>wx$vTYrk`)6YR$A$&A8fI$uT*>fB3%p0`Ih}6pqHJ5; zzW4cu4yzUj1n$v3sFCqPcIIS0&G8pV&T4BlU%0FYA0z<0kUNjMu^P1ru`ii?TX#IH;4qTkp-yFaM5p zXJ_^dK4^t-&bGx9)VPT}K~x#_pg$D8CbjdPVcLfz99hmf=cV4{BAi)gCtmntxNTi^ z*LD7~zwb!xHoCv|L5;LRKJeykX3tNqsOlo9aT6Fbkf_pHrB;Ktu~=-;^{9NC%oQ+si~o8lsz89^sr_+z-*op35QQPp=KGklHqL5;LB z^}+eX4|ja$BAgjPCtg!-Byx6qw+ueewq#t=3gMjhv#mC#a$N*9ZX!<*oxYnARBb9y zPPHu=7YK)TW{uPhM_XID2xmS?5~keD3}pJRn}e!Nq0R&FAqht|?aItXeRgcGi$Fa? zgH$<$seeN(`Rs-e;z!$p4@o$(`F)uAtAQ843`znm92)p=2-ETn@zAb9A=Rc*(YD}2 z5{@kAuhu{RPf!xhoRt$V{4rdeMLtq0lwtZ3?SmR=1>c;RQqS7P$u5E#H=#yOu^F#U z=aWN^n?6zba5RuPbK~~$`DIBs-n|5A+gki!tx&XY*ZH8vO_*ninOmB(yI?}jGf}p! ztJ5lmqR%MpgBtj7Wb-64GhSW-PcLZPgn8N-B7MX)q3E+#mz5;&+{N4*%8KE(wXJ<& zK{y)tkOXDhdTrINU~~ka^FfW9FyjX^JD50QZGi+v8L&dCDBISOt9u8dV;}uHrv^Tx zjB${ev;6Md$N~vk!3SmA>bP>pvgjC2`=ACsq|EtJ+xlzRi~R#;e#OPRb zo7BJutsrxzQ2Qv5pcP`Gtjg-tbH3=9I$LUFypS=zHZzddweDXaK`X>W*|x^6-QkPA z8$_f=#*1*gCzu&XFF}o)FmD!yI5n-1Kl+ZNeMmyec;7KIkX<7=zJdr^Ayt&sj^8yu z`=ak^+6OgAm6V+s$eC|e^cO_X3O*>S-G-(8{^&cW_CXDNNSU*rwuSL-K?JSfgR*Ts z*r~QZ`X;P>Py-*1Ea!LC+qO>$46za%hI zfEjG~a0oLWU}m<}%!EHWm!W+~Ldu*Y*K-+yF!L8!OF{Y23NmT+ZqTH*|I*E~w~c#X zg!bWBxoNJ}U`{3>K@!v;AChLcAs)Ez%(f2A%($Hz5K`vK*8e7Gg?vypMDq)^vi}KT zhpD514_ZOydfWddXoZ+48)ENE9kSca9;|&(Bjbh4^~nEC&}%-R{Nj^KBUYwg#S&@3S~vv5KEiy z%icDskM=YbDiN27`8dW74@AC-Cs9m4VM zC7jiar#`ns6&l?Y**OS%wpE$L;#Ij7GS@}acOWNx<|~Mx6=I@nTRTdP@~t`cOyu^8 zpX+f6HAt0|ot2-=+UtV_5wwC2%Ib`JjumP+dRS!q)7Z1^XyAiZ2zORYyaY9F0`EIq z^HR5%MNbd3S8GL%hO)EEJAcCQo*w~P&4ssY8A?1bH1b%(>S`=%e3*Fia^x;=q{H2E2N6DnhUR3d1e=Ytv$jx2W@dI@K@p(OIe{>02Z{|4uutrFQ1Xsdll!ja{!NiX5- zfpp?U3K?!&*A1T^yxg;EAsyBX@#=lj?H+28aJVOrPzMa#An+E#~m6JIdx`{ zenwFPLhesxxcVkwmDYlAyn6}Kwl%Ka!ey$yBf8#Ida3ae+(E1EiXSn{MNs1=^2Gk# z;FhXBRo@ZRJIX{7jx0|o)V-^*i*WV`JMqFF!)+_+&M$nbz9Xo2_@G8wA)Mza;t6Wp zM4lizwJhsb^_`7+mvMn`XnA5Jxl>aY;hY$eM4mWf@^<&OepTPusCUFG33&>J;kI?x zU3Uq>@eW!Nlx^$U@PmF;-`S{lN|jXhYxC`6~ejqC!V0jP2>q;z`1cgRo?;BJFFzZeMD{_gQ|=dgyS8wBq*yIobh!c z`gL9JD!tTr3GRAR-!D0o=_06c6Kd2S+jFOWwL$djsCT578W4``>?2fP<+_sOBAgwD zl2H4AqB{@O6Q}+MLG2Or zjY*Y>7qatqhtl0Xbwa&1rHaQ9v;x5|78oA~o6oFwe%c)?q(;UI*?H%LkHeGa)|+?u z&NzZrAaFw3oTCSEb9in&b@o;5%ycv|Udpi@rh&J5SE;`|^Q79v8c&#WuqNEOXVgEu zO8q0NSJfo}jf@vEPk@`8|B_?ZZ}8X}O;F<{ID=q_Vc(Xmzp&1jdNMA^w1VuMaYxQ; zHA<>~>(W`8pvFsZCdByop+=edAARsg96>A4Jdtk*-?=9BZ+pE$eW`&g3C`#kqD-Uf z>No4xHjbbbXntA15Lqer*WbD5Elp4ZD@kx(%Mg9O_^aM?f31ikXa$;INHD~YR~FQn zu)n6R4{Bf~3C^||B5zMt=2yFlslByMTsl?Jy~mKz$KYEHh8UWAds4?iTa%>5OK_&p z5V#X*bc@Zp?J_P9w1SNNqK=OWJFhG&wq#;Ebp7@$H4rf}J0`QKyP=fuI#+ z&chmF|0@@LFTaGd$c_fGoDpVBhM3;hB<4iz&5HC}?x6+@INJu1Aj`cHaRO%k*M%_p28ZfV&jc)9p?opYI6IrmQ5 z5OZdf$=vIosdG*Z;+2G41xOEEm~kZOtvlDo5wrsBeyTk=Y(vu754UN88dyn!s~jd( z$41_jR6LR!N6-p1-#-m;eTi8~i*vIzK@F@V!POc=q+Qw&e(6dmj-VB2zJD6x%{|-v z71o-ZQv)kWa6gM79(d%z(BJi5*YzO@T7l-fs3BSxDHi(e^4+>!Q3ESUa3_x;?zy#S zXl}XoaRjYEJMWz6cOQK?B{ZP_CY>s3U?mCeXEHvfsrlFKB`U`ev;xg{QA1=dSr@$c zXnCC~YG5S^?y@q(vQ2eEW7@1tlyO0(6=c4D8shTjErX5P)Y3kv@e(Mv*ctc0FCzoN zMj!j)2wH*WyQuN;$h@(^zWuM&1U0ad1ojujPTmL3mAoPJYSk}7GA_upg3P%r4o zlb5|b;~h;<<0UvVWr!yRzvnOXOmR(6<0ZJq&k&XW>>NDxcy1g)E6|*oGQ{=cmiv$I zo2LnCU?mCeTQtO&>h~sndUK69f>xlpgVGS^OQt4W=(0%v2Ia3vuGg~V8$z9fOjhS0 zH9-yHl?3;D86vW}W^$dEpV0(0UV{6*4AJMVq@+K;+o}m_yaZRR4e`<2SCU?7lNm?Q z3N-hA86wc<#@gfGc}}-0YG5S^uB98|{f9mYl}nwVM}3l@6=-Ky7=Gs$*1Y9EeR7K? zsDYIvxXZ@)_`c%rp{^re4$HV8(+V>8{upA*l46NJm0Pd#!94}A;@%j@^k<02TI@(_ zwdn`#gBma4?A}2>X4M{-G<8OL96>A4Ori0iP6H;Z(|{S?cx4K?XT}ifG+)X=>mH7AfuI#+_LYX%GP|9B)8hWx z2Q^*-Z8!F%kiemXA^+pWa1PRm3o@-BqvglG6cWfu_}c$Pkw@Zkj<5EoL{$HYG5S^)Mo6f5rO~CKb^63$!Y!jpvFs}&SPJV2xRO?^Y?z?^|*X6?V$1d zk9}1o@Zc}alP>nb*OMF#WJ#c3iGA@TQ2CLh#5XG));<^)2wFkLFF5walR%9d+a&GU zH&*+g#!KMW9Q)!)U~AJE!5@cK*JULMT7kwdIQGSpz$5jqN-pwEd0kf2z)BMMHOIbq z5*Xa=x8yGKL$zgGkZA=Ozxmh~PXd?fJ?uaG&6~P!r^ZX5XNZ0ABry0?I20^9TIXDj zWaNm0^WvtJd|C39(7@4InxFGBPKs$5c=o$K@+M(pxJ2XKJ ztR%tJOyfg+1uCSz0u@Kl3N+`)4UyhpYN%SG$vHK!l7zFCid5bIaplmuJ|AlzlAsl6 zt`!>}<%jkSR_<{;j-V9?&ZHY+^P{hXw)DrTUq>V3g)G;$(*u<|>n&Fcg882ksi*1PRJw6QwPCT9DC1{0k&chqxK;cMu_9JyQL5++TGH2Bd(ecL( z;i8k8>U>b+B{(l`i2wfE5dN?RzN_Y$|5o>SpN`J3@z!xeTwc8~eC+UqID%H%Uu^aq zE%DxQo5CGRAJK#}W1bhg%^U=7=&tl#^akR@7d)o|+mj!7n4))}8%&2ity^5*YM(UA=BVjhm3)f$ZCjl=nn4;G|sBsfcNvoaxg#rZBd%t*}}{*|tJs)`sdlP%Q9Izb@JbH8Nhv?7M91xAUtDBxnU6lx-{g z@zzkA=YLC`GqH#EL5++TGJ90DTSV1IK?JSfgEID0w_o=DD|4E)yMp~Cc{%+4CafIU zbz(iQ!SXeN;8}Jzaaf#j-`A;roi<~iZZ;?CLlTZGPu@>?Xw~u-g5a5a_;3j4yQAtP z>(eXiRH$6N+3u(hNjS2ZFE!sB=%T(LFtzEpW-mku%&|fP9}eMs4b}W=yNs(IN}qAk zUwgyB7M$^d%-JZ&oTW0v)Y)}HpO4$D32M9q=a~#KJd+`^+MNzA?(mWJAqiT6=KGZ)IDaKIkR`#HDMN7n%1h7+;oanyBLkeR zg+{hl*^W6&Wqfc(7z8ztCBd0uLvYsFOVA48oI5iFXSSsVvLrZ1ZV1kvdkI=0oHOZ$ z;0(RgK$ZmO`VH~5T3Pyi=(c1pK`VrFMZgd($NiX8^x|4gPy;JTaLvLH>l<2`1GdbJ zBWMMhD;tL3iiy-fmIPN)3^6G4OW&OBx9eXNHC}?FC_`*2Q_Vm9`@ej!k_4>~&XJlS z7L-r({q$zPI3KhE;a>B4GjmcXw6CA;8K{x*LUxZ{znk+}u=`6l#u2mv!8JCMs!uj{ z_RR==vs7wiypTC^HbjH*e=oab>Li^H#wD#FbFI)2og404dgjB1nxMu@ID5}ALRQ~f z@~iJH#Sydu?Oqve`{Lx#=99MWeW-zzB)DpAQZ@VLXM@E8Pih~MpcQDY#2R8p)%xM3 z$KMV~4P;4h4cZX%e|{%v>Gor~J{T7WT0!QDvLQOz?UvRowqCa@mOHHYz5|(gGKBhi zQlk2LlCD8&yoCE*O?@?cnfhwB&bcIL1)3u@pxz%?_%sW@~~PFTvRaL#VH2htyZI;|N-T=A45e)K{}Z>Z{qBpaxcw;Jk(*I9~!Q zYP@@aB}SGt z*y-vTjS~ugwnl2a1b2uTAB|IbG@d%~*jiXo<0ZIn%@CKjXEs)U@5723FTuTWhL}~h zT9ZKKch|v+8ZW{9cZL{#U#q06a~9~9$nhWuu8czFh|>@UDmPCab>ez{_;TB_7QmIPNC z4N<{=Uuf=QX?g^}xIoYf@}sdYY~XiZ?Vm~hbKi~91T|j5y#}eiDH~Sblnu+cAkzvm zSLjTtxRweH*Bv2qEfq3TXb7&QN{yG`Dybp3mg*&Fg>a_O5L`?3#w%0E^-n`^EfoYc zkR`!2PeX7m)l1L{;avYT#P3@c)$aboOUtDOvLv`l8co%O38CaGmvntFE)cYW>|RTK z{pZo46Hni$3Dy*>xRwf;Z6TVfDb>_k>L>acMU9u>Dybp*R4WMQbBO75VHGCxKhX2N&cg6 z>wHk-CAdmzQZ=debx9lVo~C_Bf>xm2Z}bN~o8{lU?N;rB8dyn!tE9#U*HUF%kZA>( ztBZ!nTi(q#Imyhh$eeR!6a-7b5EG|$PQGvQLm5&7SrS|^HAL6JqcTcVxu!O(sPPi+ zwbX69pZB*~+Al8WOgm_8lTatQbFV#M%ft4h<=4psyJI4nDMat>2k_4?lbH&sUpX@lBH12Y7{luXLR+8W< zsUa$NJ(zr3`MJ7uB|$6DTro8S*HWbhvLsw1Ep=~jSDl720va#j8udjQ zuk4>V?!Not2wH*WdzA4dpFWsT zYj}>XL0RWq1p>kPH^jzr^OskhF;_jK91Ubi@SVXByM7++Yp~&}Bv{FOFeM1*o1P(F zy|aP;(^)_2d{6@`N$@?<5Kq3++u!QQ2XO?gK=VD(5KC+I_rH4Q_Beu8Ao!+d2;RFS zH8NhveCsm=?_Kf|v_d%F4GqD2m!w9<3z_eUhTy$RUV>H#cfCPIc<++b$ao=hUflTL zy-Qw#RtU#fJGLh^!h4sz@jA2IULX74x*WRZ)Z9er9kL`aa(3=rdM)u0-;Q72*EL9u zm*5lC2UXLM3^6l$?~>F&mIR-6hTy$RUV>H# z=M&Bl{Tmj_d^K@>rqn=|gzL!}QBTg0+J&TM4V<_j(+aZt$vLM^dN{3chq(H1#&Aws zFsXXu+fX>C@nTJ|{UBaRxJG1=3bk$tANlepohnJt3N*)d#s}|Rk{Za8;CRjuymtv! z)OZPw?F_+tm%Ie65bhfFMR@NLH0+lkv;TnXT2YMf-X*E=5?oz0srqqFKmX}hX6x3@ zxIoYfvTJ2D()OXE{tKIT>$Xdcm*6U?@u5~<{c80!j-VB2uHzbl_by2dWJz$n%@DkI z$xF}*;ap=g#NY;X{QIZg8I~HzlHjaJ)W-+U`ZwG%SLcIqfuI#+uCWcd3D??Pr1(FN)|&Khj;;^J1%g(Px!z`cs5!E*nj_QoAm$WZ4E;s{!S#xvNtcS)T$2&?l3aRjYEa4yUE zSX22#c=4tcnxIC;3z_p;hTy$RGA?Na8PCM{dzYlfOStA4A|E~eVCZz?b-LeWTp(x# z8P8zn-leA>Td3et(+V=@{Y+W$-X*E=60X_7 zNM-e{?Hi7Mrh6Y}#U1bJ=2RXw< zfp+c8jNCioPv6!>*K0zi%2|DP{23q4?4YATyprHbydh5hFv54!d&Y-MJFP(TG_WBK zPk$qv-MNIWb828E39hRfqVlR;$&HT8(XCw)v;xg@(1uukU{B_V%3bu^HZ`!41lQFK zadcFpti;KevSeJ4X$6_*xeal{!?Uv9Epb>A)OZQk8hvEonpRnNzd0k0pcQD>iS@|R zZKJc+o_$;s)WAv-+!J6@_33-}X4QY9b{s(~&^%vnh{2`%XPvm_%N0@sSrXi-V2E9J zb;~;c(uZ*btw8fT0fv}o56F7CR1r;311m|mRxKkBWv*IYcE9qVu<{bIUB!{x-dR-#H;pqcXuJgI*a~mt{!y#y+&sCNekn~b`Xr`OeGB?(%A<~J@3adH1wb#EK;uqLR1l_a>* zXo%j2->>`D*!SZIT7hF zt2CXY^Fa-)B*9%zhFJCe;oysxhw8B?HC}@A*S6KQQ|H(XV6ej22j|kT3gF1jY>WHO zFc8i>j3nfWiM%t+OE|PM+hSW@!kLG0;zbG>uD^R8y8#S7sF7BXIX~mRGfZkg(@MtV zzB9~CnDtYfIFqz(6|LMMc4wH>fadHF!YSKUxxUT~U{d2IoD~||Qs1GE-5KU3ILn1_ z%C@DxrysikOlshRR*;=}xCYCs7f8?wF;RBk874I{UdWsSw5<;l&c^Nx^AfZ|IA!;p zVNxUGh0M83_nl#0f>sEp?7lNhYGk~SIm4=MWSg-zc4wHEpcTR?yKexK8W}IbIqw%w zP~#??^UwbZ#-5KU3XoYaf?mNSz20my7ne%?`JHxyLtq@Mx zeP@`|$ao<;^NGLqOpM(b<|Sx_aLVpG!(?2L<&398$UDPiTwa2-`_3>B)VPU*Vz#zh z`}>rw6RFA=*l41}}J z<-`kr3|C)Py|g2C1DKbf6~ZyYW>(4WDzVl@P$T1o%y8RUI=MpZ1~3_yv_d#%x!re$ zNsXIu&gQu94D%9<7vY?;^Cym$X7SXI0&IhDnW^kh{X5IMxmNtY30fg0%7);bVNxUG zh0L|||4q;eF;O-IZvc}T882k6rQ24;3kk71!@LBo5KdX$W`5Kg4_ZOy>e>G$XoZ+48)9kn&M>Kw z@j~Vrn{D-4S}JyDn3tdx!YRA&43ioeFT%OT=DssbYTSf8kt^>E^Ad~~;ap>L-x($~ z@Zk{hM6SFu%u6s{gmYESeP@`|z=uP~6S?xvFjz4zO+@eWk}=uV1B;wn#-zqea8=H> zUY)utb^{m)YTSf#p4WY6n3s?%ag^P6hJm04KBUYwHrq;GF(P)$n3tdx!YQj6;X-Q? zd1si^zz3}m&NVjoEn`ySCh#7W-wj}}qJ}cp)flcu8RC{PuMb#B!mQoNJHy=Z%2RtL zABOn8Vzt^Kd#-)yi&eELayCWR&94$QtZw!lPa#)As>$Htlc&5 zH@857R)~qRx+gB{wh+IhgY@!jD6Aa$U<;llwXKQo*Q>*?^?;zpP2|SHZR@?~8!YFy zcf5q8dBWAUR(^VR9lxZ5c(G>!-^Bq9*_=hrjh%^AZ(f6MuFEg!fM8s1!Z}52TP^Qc zS%=^E@e-0|3e{agMXSW_S3|tc$%lik(*q1QPc`1J1|QUbmgfy9Ya({P8VG8<1kWDC z6V$i~_o;a2el_@zgp_$&!F|6P;-v<(lu5hqS9252Nq9-CH~M;su^ZW>2DCiWKv})x zoJzZ!f{10Pc6NdntCeY9YLR)~qR@}UUc zmL@guK`Y2S7od8$=A8>9XoZ+4tFh?M55{gwlNuQ>WUk$+8|%PTa2sAChonxjWiR zID4I)c;S!X?%UGfgBodtaA%jdypc_6+=Tq{m%LvMR*Va>+=tF^HK*UXbL@UKuMbI3 zcHgfCf*P+6?v&T}F2(Lw13`_OkYE0ix21Urhjw=S+m@Gb&H^~`!XLxkx23@cHPQ;< z&WWm{6*t6gWRn^<;e4ONwoYy?6uT|WOE6x9J14{B{c2JJ9}eMsHN>{${c5mcT#)5y zH-@|KSM&Oi1ZDSaX&|Wa`rvuIc!C-?p=MR{y8K$otjcgBlQyoE!TBk^6o%5YAp4 zNjTpuvMqVP8se3N-220D+@t8+ujch33CiyK)j&`Ke~v77I;oo%WB03paP}=pLVjaQ z-j?Pi99oSAW8ZsKH!sHSR|7!}QsofxTVd)~8@pf4OE|Q%Z_Rxp8wh6~nG-MkG2FJ~ zZE0SDRtV?5wRnOW882jp+m^gvO==*^XRSk&DN`nPznb*!B}ijrnI7qv)2-gTZu8y) zfswbZN8xgOZRDyd+z_4KzdCD7l)yL|8qm&|+PoW7iV+=~jb8r6-Y-^peK@0jhqkRJ z20xp@@4X`JJP!<+r->00!)@#CoxWs;a5P?mv|5P^G|S)@Vi6ZLZsK6+*cDXa0-Ek!fe|>suEm6-3YqF;TXyU&?L|^9!+310S@4%v0aC zHRF#{{(=ZvAtuVIbh8G8`Mp=Ek?}(2yN?D)L1qxH8Nhv*lm3Ozxdkpwp)ug`=RSyk&?}8>D?ld zpcQ;@7rWZmcUQ+PD;hS5tlQsM6V%9fA!8?S#bWE!y<}$&)o*tFqEeBDCmxI=XayhK zjc;3%OFgsY=He%^OPqa56V%9fA!G0G{(VE0kL|5b+;Oq=itL-K42&aa1s^e*=OJY8x z#!Ha4tqBLqg%JPp%t}ed%G`cP8FvvPCho0w6H=2OVeUBWo7UKmyXbMlyve!w0=p@< z{JzG%X8Uz6g8Q2#0h!^p)n)V^U!jUU0|N&h*FLC`R*<>3+P2n>JCs-uK`W$|vTaSe zr$pkp!c79|FCsNEUdY@puJ*|!%w1LxK`X>W*|uKsZ(FAFkg8IqeNZFgHU1jpb{mY} zNBXZ@f&~#giDXhs;&i=j9mh@cf>qO5joRru7$ z8k8CtukpvP{Qov)dVvJnl1VLzUZ*Pi*%GBj#tWHkTFrQ!D43uXVxnwY4L5g-{i38s z#*1+7*|)9s&tByssF4I@hTB%wv01+PCu?L+*f}%+4SF)1+JG$2f-u~+S|4%`rXP*jyphgmqd0Iv7L#|!VR}euf#6;P)zP!SxSQ3g+EM zwZt;dg=--a50=z9V9tx=g;wX zceUjq`;N2fd5%ALr$!Pc=gGORetBBe`J{io+O+J}#{-ilUKK~6Z-*6P;y0w!%1^sT zo2{(SGf-8Pu++$SA)}|>P~({LF>B_*&Bsp;3j8^9Q5-=l#Kf;$sj<&>?V9b~KQxe} z+LqMFcp*E#!O{=kp@IZ+V4mS4^v_d##wK977h2XMlZV6;vM(H|bg_35u$x^qi z$4*`JJ+Zgs9hal`O|Zq8cufm8sbXvYB_T1ED*kHm>-7H4T{+*voP(f75|CMLx^(Yg z>3RuTA>37iTUdiqBjbh4`q!%4DzQ?_<5 zK`Xoo^Y>_5X;;nM#NWBp$asxE(<|xU`R)9jdkOyL5zgPE{+(~+?_6qRyhttkF5CM0 z*-v7>b4f@mggb=lMK`e*mGMHx_>;3n4srO*#C7aNWxQU3{i7OF&))n#dr=V7cnP*? z+v)Rmmuwo z8L$J5JpkgOj2dJwVe-M}IS8rAj{t33C3f%hAw?i$S$RvrsSgqz^J%>xa$)=BC=bi?+)KfEFL*geKh)YE!X_DSc~i?S!oQnt*QMkYNA?^M?z8~ ziAIBmY67%F-1lA4B&m_a@9*BIi9h{i6;c27)l-nFT0cDzmKsUiSG$f*p&?L4EGtP! ztGDJf2p}H=zp0>nNJ45PG45D;#363(T63!M5qWWY`7p|=&ZUyNtWXM+kwVDK`D>`b zNVOsdd{QF`lt?_W>iW7NNk}WC5QNEj>E)klAM=Jz@=J{*P$Ka}(=s!XBq6Pka}chw zilEG-MiMAPhxo7cET38(MalxQmB^gS_GVl6eKtW6Uu!~Yyaarph1%A#3RhCgpV;ny z?XM>yvOZ+F$(Rh$ztw3U3ASoUKt{N!!3!^~OfC0xV_(^o&qt(260!_!3&f!vCE^Ha z1sOg}4UVsIbL!c5W+vWHYiLAjBq2-Pwm>ZL{TcKU(h9QEcH7@JCy3ke&>E#i67Cvo ze_Q!b&1;&M^b*oa{%&-tvIixN)cKGaNyrvsh+BLAr1K#OX$9FSD^p8x)!Vvdo^=0fhgppp)s`B0@C+bYwtSL&gaAKuX_{k4G9 zNaBOeWi(-b52C~Io~iGg-gC!mV_yqMjU;X=eC_tUnPqclbgit2oXee4zx;Mb(uIcy zy9j9o8NWe8jBHUeb;k>1Qjh-hLNp&H?e~BBit0bg=DvHaog-BdzpZ+B zXh3QtasHzY5v1zXTWyu9C8Ov0_WW?PdGTk42c$+42WKU!-pBY;r&3Df4Gv4B%nD(BhExztGFRL4?jNEK>R*GD$%LlV+z*R+ee7d17g>O7lu zE;W+)yG#+~qhzjWC8}Lz54=1}w=1cUgpo@|Q>E%W`?HK(UFXsVQpkFisdYU$(`9aD zeMo=UC!>3wK)II}CP|GXWUI8TKkggl+gI#YJtC7Sk*&|2DiGC2{8k!I9I26nY;QUr z>FC|?RFh?5Qew(SmZ92#ebrI@to`cFKDDJr67sj{5FZx55R!znlBKTZWaj^&eViy< zI7w;C}7Av7WY diff --git a/act/assets/vx300s_7_gripper.stl b/act/assets/vx300s_7_gripper.stl deleted file mode 100644 index 043db9caeaa59dcb0a6bf570956b0904eb60ba63..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 450084 zcmb@v3%plT{{O#7C{%`AGldROqB=*XID40*8IfDAM~qyGRBpLVv~#H#Gj5SfXx!yG zzC))td!G|#P%dGj(jXDJIpHJsM^M5@4J$~l9e&_vuuJ^vI%X_W8 zKl|m}|G)pb>}>TVv6Parx1QVR+>FMR6Ax*;^Y>S1*||!T`{VB@2ZUpb(CP)6YYGU& zD7=>aqgKnCO&i%f;kYg)-X7jx>0#ep+8D%eY}x*Q=V3k+9d&E*mOw#(vi~ zPXtHBMDLB~_rtY%`Tl7oN5z)IkH1R2{y1=Y^Dz?k`gQl#N<7l;uI7p0sF;xJCPy=) z5m!36RtbB(fBGxk{C%F$SVkn$K(v*}*=k+5K62f(jH@f>*ASd}L9myclX~II%Ltdp zaC{_kgi7Gdn?EY{;uvK_5rZvENbAvaD%w?IXU<-HPU|8hj9jmhUL^I_$$#tUQ4+(+ ze@3tumrMF!CY1B%QK$FT_pUna!M@-4GW@iDCD_aNU>~E50Az$7dpeFV&o&7 zKPqm0ArW6c7^9nex9|2D&uS(8L$3mQfpg6fe0_Y`Dzx~mUjB;O9+D`T(nt6M_2 zCc;~o@MRm@-<(*XF&;R0K*nC>_rHX2O@y~F;mb3$cR6?HxV9Rj{YFDG_9}0cC4_4t zyoCv0o}r_JbAPyCRfKER?SSDKdzJT(62dhR-ok`0&&Wrg>wDL0jN$K(&e*HG2bU18 ziSQOCeAzO9d5u#v##c|CpRrf@xKcv6Cc;~o@Z}jgRysHT)C!Gp!9C+M_Tv7rPQo=2 zzAh$wd4~38=N6wltOoa^*Nh1ndzJU#62dhR-ok`0V^+g|5~E&XD8XJ5uOS>)nkT%4 z317COl53SB2F{$lte!}ZS;{bQDb0)V=oyWH3sIv|3>(_nDAvAMP+Q)7#OM9OU8MPfjQ&9 z5xy=aeA#9LnQ=4*<|y{^StjRj%@f|jgfFAtV0E|AfKwyfkK>YyvQI8v6Zh=(S>ycq z+ce&G>$d$+?$vASM*Q8mANB7Ak$_+?l;KrQpnm@^DItz6Vlb_~iTtnp2ZFth?RVR) z>&-=s|2LxPNI(N(|YtT?~ms3b;{7oQViH(e`b@=PDmo34*La$GY{XlO4%Zlub!;awaw0K; zy-Bo2w$Wpln3H6^Ck!?dESGF>)d?g1u0NS4fQh@Cc2u`k}TlTTnJ) z<@G}}#`ZN^ON^XIj9@R6;S~~_?=V1PZ2e3{%odc5c=P?i8e`bV?IcD{Bu20o%J2$_ zR-5gmG2UO?CuR%EMr{7>?=(j3{yRvFoJfpdFO=aG60<(vPGh9=2ghtd*@(YgGf-n( z@nT1bkrRm#?1eJCLgI)uEi{I!t&Q1&vJv;L?XNKgeX*m&$ce-V_Cgt6Au(;uLd_GG zA2l*&3(7{^zIUWC_I_k%iIEeD5$uIByh7soKi;D;PQLY=m@Oz9@#(<68l%0dkQh0U z7{OjB!z(0yyW{a1;vVU@w$$twKVsZiMr|)nyCHMr=K&LSy)QUP`bR%J2#aX(bv1t%NNo z8*%DOJ8BHyc1sENLK$8mA+23wptZ9FWh0Jn@mr1I`(P=-UMRyWB&65Ide5bwvjt@% zhMc>d#_(fFDZySS!z(0Y^wAg?eb|Ds5vz9IT4VSzv6Nshl;IT;GKy*pjG}Bo*@!(l zZmBW+*j`Go7s~Jo37HKv24(}cplrleV>Z_qevT?7*b8NNg@nv%8UwQ$TTnKlVVkxZ z!_T#)1bd+juaJ=0Sz};!W(&$jblYV!jp1{KQi8ouhF3^PmZ33_Wv~ThBleoUiN^4` zPbtA(D8nlxBwNxL$d=fGvJqn@wAL6thbtx63uSnPgk*gh16dziP&T6Po2@j4&qYfK z_Cgt6AtBkT#z6MU7L<*+?N%Q{`l^iWr38DS46k((Zg4iNo-HUFVX+54`9nif3?tYJ zWq5@|e&$ZDRmv7S4-j?@FS}%w#Gq@Xgw<dV) zKYwt>7A9=&`p=jKiIEeD5r~0$ZB~O#A@4rh7%Hfni4Ag5`AG|{1q0yRIx;H0Wnz4lm%TZe$ z`hOB5ClVtN1NB<=3a^lG#lIwD3lo-W`xrTpNCK+_V!&cqxRsGpdqR#%*9JH$u8Rp< zGx%$j6M7z{UUp6)Ay+rWk>P%@#nvCF*VaD%p63Lza3!o>TNS}8B&3yS474k@Fkx#r z-*$5XYc(Yh1NGYK4qhQ4tzBcFwX=l@TZ{TWm=oGRO1%&dULhg9Hq*N({hTeft_5Lh zYCo3b1lG<VovCIRqBO!@CpeTMKuOSQMTAV0|?vW z@MC*U=(t&fwkZeg~AirXZ?I)sM+spJhTuxxmQVFZqb~)h{5|Z_44CIMy zVZ!!geJ+|4*n3q1F;K7V*1{_!Y!4aPD_fZ0z2&AoWFrs*_42NBNJt+~s62J zqicY(uU{JU)XEEL^zVZPzuy4ACW3n{_pas%wlMMHwy}?K%7-N}*o%8t^8{O%XmxEr zjj{8aAD6^nFYaB<6Kr8(+8cg9@!CJHEQ!Hh+`F15*uunv@9(GQaom(IOJcAW_pas% zwlLA|=>Zz!`dQzU#9%M(UCk40Vd9WO_tzK?w82_U{w3BD*^7Hu^8{O%xcN~Z*aGR1C>}BI4_7j^W99x*MG5`3*ZA)UXmyPq-7j2esY+=IYu0g#wFNwikHeX?X zw^^c?2T>Ojd!~JKyqf%n-;~5)FUyv(Pu(ox*uuohCwps*I}XP)NWSOnWm!M=+nXgE zTbQ`&x+;zFbn7ikVz8I3Uhv$YS;Db}iE+bvX^aOamBwH%TZQ5IN3(=u3lqQV)I($Z z@_cCw_OjI^p4~J{IJPiR_g?ofMv{-B347V97%|ESxru(}9(~=!0cM5rZvEynIAwjZusxMGW?`-7kzKWkeB!Elj*JWhaeMjEO}I z_Oe|(jEQAL5rZvERDaw_V-#b15re&KHxgre8BxSw3ll$_)lp*uJvNEEG!4@WLtQ=IjmyxiSjgQzJEF+2- zY+=I2{IWfnguQH>$1Z0XQN&;i6E=5YKS-RV16m(sFPpEhzgtEC!kRN-3lla6e;X}n z5I>5+UN-My*SU-Uq%qjSgyj-19Jit3CoG5u6(-zA(~>a zm*qj&-!CHoX$-b7VY%JkzFF9S5KS@I%knuq%_t)PX$-b7VL58c^Gjo}m*u5+c2hr zf-Owg8m_GEI`*=48`^I31Y4M}wP;x%bnIp8PxQg&3AQj{tL?I}#IcvHqcN705olK# z{-^uG-u<21X6~r{w`jcn=_)Ndw`JqCU8;++_~}3Y{`u`Y?xL0c9IGM;j2y_}~6kEdyt?ZoK#I z9kjgqgiRW+KB1$QH@!69DN5{GvDa_vJUx2#rsaeL@>iGZVj_OMlg5~|cfM0p#9*(B zdV4RIwP`FPiWqER;-IlRX^iWh&v%N780_`KM?0$54ac`@EF+2-Y++)_-kp`ca_4-f zsEEN{SEoCx*U*q5kjJA47ZL8iT#gxylm@Iu^@ zn{(g#wZR{fod3aYfN*skTbST+41Prn_t_VI#A!41!-lD?eEEWpmNndWp&t`}NFFWk=xLv}684%t$)EZ3J3eh_mT+ugVpWBwzZ*qQ`!oi79X#6K z!H4I3(a|Fs*XL1^7!3-`SW+*Y6yw*|JqNzFsBY1wx53cUbxW{XzgW$VE(As!bG>vJzd^w*=x@S2k4QY*On1Q47Mc4n_XpYLN}b}l1| z7;Ir;;6XkHG6Qkef4(KQd(7w5E00>#(C<^9Q?EH^VZ%4q`0~x4(7IMf2KH+AocH?E zuyqg;#mC^fnCMsAORw&aU(=dXW3bnM{^-5#9`>(>GNOpV7ACmQ`_s;kDq*jxU;JqB z)0($}Q9_9#2G_+z^J58T_l+}rc8~Q(S*D-ZQL)$1{rl?mL7rGf6i3AtCMsUqN9po> zm%T3Nzpr{B-z_7G7;ItU@7L8RU7okISF7zJ^+MiWMiepF!o=J?W2MX2AM7>xMel|6 zM;TGXU<(sG=8Lo347(rf_2nQx17IChMgYI-EP)V349D%~ihud}3iA@rT>+7> z*Sz&SfwtQ$;n>1N!@b?~>X!FG_S)zQ?}a|tEaBL~M4NN{O1KzHaPoSsj+^Y~TI6b@ zzxQ_#Ib8eS_%d?4@XXn3=zZSnxZdj^iWpoM6R(f;Z8wYP?KT~K*lUAty;q+N-fJi$ ziWqERV$^-U5B8fvJE`{3oh-fbu&iWqERqFWwTn6 zx2uG`CT-yfWR_(_5rZvET=#^}mdZ0^_PXP2KfgwjA(s(F47M<_`C^~-m1p|w^?x_` zBSEHLMiepF!o&gd{Mc2#VqvdSF7ii$6-yaW#9#{(+?&OjS_yl7-^CvZR$gU95rZvE zaR1jB1;HcESNr-A2YE@^=#yA0VXua*Jb~P&S;Db}iB*UB{X}F@dQ?RW_G-P6_d@?@ zmT+ugqG6EVzbx~-Bv zN|%qj?6r1N?}c%dujQK_`L?s8J#=)km)ta(5kr2{k`IU9J7adZM~;t zNFDana`cCGjbHA%NB>F3ZZj9V+|G4%w?sR)u8OxG*f~B+S{z~=bxE7X7GLhB<=*?Y zZR8zj=UzIpL)4?XGJgEJ9zhJQ_qFL=l|Jzm?7e>W;3m;S7gWVlck2C-9|)*%6YVz)HUY1nBa4gukW3HQgr1} zRdL@Zy2k9a-JMlZKGi8^FW#*Ui7W4(6V-2A71wUFeQ*bvSUGPOy&pf`u$kPC{Rh=Y zhyJrN9y4vbn7w${IV9@ZFNqG^s48BuZM&E)OtctYp)uZ_faeC^PgokA_F`pRGrn!i zUc4_J603grF`=ch5qk}bKQ8YTv)6|w_UM1o$v#F%UCB`lHbjm)RUm5Sa@PUZEc>gjae)CPo?40%FT1|T>Vha;vHuEuj z9~`n^yKIph)p3tJ6R{VsutVYxS^I3{4pnjQ&F4jIVdAhhHF_R?EIIb__Su^<`gGoQ ze#Bn9q7R9RJz8fI)5`cy&#a8t!bHnY_R$!AO#Jq_*4gr3E92|_^hv~Cyyp-Sw;lhV z^y+6TU-wz$z`z`z_JyfplqHETV*^Bp1LgLtSmZbO2sf^z`s$I+$ zCV1p??&s>I=}YTZ#Xlb0HfAs00}6?K2Gys>$gFmn%xY|5g2zhvHseLzv(LY;jNf~{ ze`+mp`J~kiyb7~k@YOdzHaNHUF+H;L)>g(n7Hpof7q1ON;*vvqX73zb6+iXfr}b=M z;^l*VPpdd@t;G1_#=Wvv%w~W;uUKN@l$1X*u2Vk^-mq@*}}wWr}-E%>`0H= z?D@*9i=6r5S9hytFWwyviN0$qv+HE8eQj+A`37B6T})W)FJ4$rVtl!%GMn&2Wqi}> zHZ|$%hG zVdL0EyL>J&I(^?d+eq$Uhi;#w?8WQzkXUbR`)t5lmGLLT=cR06!p7rYx58Id{&8FT zY>1rsE1l-2?8Q3>A#v6Aow76DsEnU^?xB<|OxV0Ks4t##em$pS_S9FE@pq5TOxcTf zLPFxV$92hOOC~dK*GVZ`nBY;#xyygqHEZ`xW!ycQoU#}1=!8U@GrDL0k!!W#wEcq- zjtL&eWUl?ktojY*ejIjtuZ-80{A9y&BCJK78-Doq>5d;%#!J81D`PKy-VzcUHg2DO z^RcX>KHe)>*D_(b-GEcEle&E7fONdfoX7pXd&XY8zZViO>^&lFFZ0LZKX=X8!i43h zH%$IOVm#65^mMZH^Iqe-X6(iLi6L=C#VzT_($5Et>722J2_7-ztN4#kOJDe;GCpN| z=Zw90$1^0lA37%;zO*vFjPr~mH%P%_tDsxr&^uv5VM`<8Ly?+PSFuopi= z2#MZrA5nj_WJ~A%xiVu56Wn^7lY1W7J-78a?k(?^*^Wigt|Dt!?8SR>A<^!!={1LZ zB)xXa%8V^c@D*`x&pl_=ymn1heDp@WGWO!<6d_Ud_7OG5%9)=dXU-NT_!>GV<8EZ* zuB|ySzVg10t#^yDJ+iT#y?CD~BxL@GZ2n*i6Si(I=BUW#DE8w0vXB_uFeh3h{bSo6 z+h=TH!geMwKRUPakU7yQ(srMaw##0;?-vsHYMJ{E?8W<+A#w20BcfZR5B_pr*NiPpa0_+ro0g|XhsiykGO}yNUc3(*5;9&z zHeRuX32xJxi^i6V&Rx?{vu)&@{6@+j#&y!aBS&>k#`f68cJ|^oU_zopb(eVf>dN?U zn@&pE!o)p0_?-zqtDU$_mw1@)m$HQk+j;R>#OkEgs-8ISEdKV>g| zvnnM1wPEY{L7C4x{&8i>7ACki%NH-6Zymo_EhEFC6)Ai1o^D9&cfkhntCC;27r#&0 z!UXq!=S~>gKCY5AXT{I+BHmBpH`?rq_;u~%*SCv5l(zfN$DWDUi{FO}i6vimj4zX; z>Tt_L5nGtBJ6o*JV_ToI7r(I=60!#n+a3U0n6MUF>@&o+&%j>%=3z+4UPf$t8Ej#~ zdYWHN$l4&bwE=ta8msRn&8B&-ize^^NcQVGVon zo1h_aw(LGmSW_9#SkR${Elk{YtB)abxvYv7%liM~1(oq%wrE?!Ui?ODNPH%%#|>mZ z@%lG9$kz~?>SDrT`&DYPqB6ckGUQ#p+O3|w_|4pqcg4=>~-8!5Y z_niLO?3>%JQNp~Ei#_pr<5${ZutV=>&O4``iZgcuCoc=yRVjg*JTb_BdDv|v-#p{o z2W5C6r{cWJxtvIhU@w%B>4n5C`Bxnm%2(3Zg0d0Z#+*Cx>UQyn8%NE#_P`^QFt1jR zd*X)mK57VwXJ6P}&%Dj9gEO|25~#`NyO(v+?^C;XG-cNt=UjK|n2fzZBL;HZkU$KyE5u++DFNDN;c~6~ z7yjhxVkKOhhdfb68CN%4pFeYRJ0(Vpy+qgN=GE>eywTWm?wMzm5^Lp)i#I%b_Ur}n z#YL`*30@~UxBluU>*X|(!&^?sO1%&RE7OpGxBo8*)|~J%f>&hXTSBlGm(gqeO85%; z0w&jmvJu=uojd8fCu_RbK0o^iIS=;wswLi_^VcdQDFNEAg!dYISK6b; zXLBx+mRNcRaqRH=$Ln}&*0)OM&X#+=U#oxieL?Oy-w)mi35gr8nw>uL*3olrl9tF8 zCa}kW7=Gm?JqoQPL60i+!coC1Bu3=lnp`Dqmo2<^g53|q@NG9I5+kf$-XjT#EAwwn zE<3JPcWbRKCa}Ybzx$PNP9#Pk2BPymY)G7Zb-N6WYpRTiY+>SuLwyX4kIu=Mn9c8a zM;*pQwlIM`Ssaxg6J?}Ej07WfsTYnAULheFfF2cc2Db2CFZN^+!|(6rL}G;1%R9Fr zAvuG_K+eDxCayUEZyx&mK{J4Cmrd@N{itLBY+(YsxFGymO|lFgB!OkhrtMiF)R(+pN(YVXKRYG_uS zMz9ykSfzx-C%O92Cv%1cPi5d`1V0gSE+-Nr*b8N8*P>n2^bDzq7&B*BJzG#V!j8(X zKP-k3?1eJCLgKdk%%6C;B4vx62MBvsV;}81Usfz^VS-zXyyrDu~T42>sRy?LkZMnUMRyWBzouHV!crG z4cKBw1;VbnU(t7#Ghgp7qh|Nt@meLUUb_nE=PgQzN9BG<`*f$@F*4{;r37m7G1hGA z$K5YPX~V`VJd3w+7w<#acq&N!Mz6KV;n180KXoYMb|4mk`5b6#ZrL zv9s&vR0K!GgpH5>?Sxwv4b&J1p0`t6>VbH086|w{EsZ39Hve)NStCsDz+#H)y-1 z1ZwgzmgJetTcSi`*$dv9vaAMW%Q)~hrE@uv7=gOX3uSnP1Z6V88(Ws|fUwNKzt=~Z zj1pEayikT$NaR_DlPn{6XMzcC$Ig8jyJ-X&h^EIau z=4BcAjYs-6hJ3}pr8iCTc3j=tZXFY}#8Lt^dE$w&OKVQK`|R2C69X z-fLXyg(HMlNJ!3r`{9sfumxo!_|7_)6NwS*g);7cNXSU7F)&iI1!W_+EjYJKc2akJ z^^)0JiE@_hm|}*sT~5rkw!`W(fP-Zx7451^+ci^>u2uEsh5O9+-?_c|KXiVjUN`5Wj&by^5FKA!o!VH7JwzL8vELpN->;rp z)A`KWc-l4V>rN_;9ebfR_hFwpBu0KXHI2vC#<$m_IY%3Z5!Y!3ie zl-Hagv0ukHT`WZB7gtBT=HRR^Bv!sZHL8&_Kjwi&5zolsc+O}3?6|qn!E)xWRy?V{ zmBE=~UJA!SCf+>e=jhDgwei5oTLhzz-E)kI;Zgmq-}*7G)z%AVN`~1~m$evN1#Y3v z$<^AZzWlXE}pvpTw1&V25u0Xpu2z|3hQs-H8OVmtTmOBY8M3DLG< zs{XDB1ZFax2g4ZuTrn^D;1p>kqaTR)uJNoE5@WA^D#HD^@BS>}S)a3vkXUuwjA-`p zwefD}Er_@+F&z@0Z!<;DqpIh!i1RjXl_4RmJ+ju$78@Cmx7&!}*En5kJ4b&%u~y!! z`6=R@nhCDSxoe*oruXBTn^r|!7aw6rZ2#@lnoctMJR-k>V2h1DSVwV2?%dO7RoAp1 zQ5&~Dt%a_Na0jujVS;ONZtmJ;HE1QDt#F#@gTQ)#&nYC1JG;6bFMkVYJnHQ{c=gNiLPG9fYIl%FHKs#CM!3{Q zIJVf>jx~;r`F@QfZ8x>H%lmLla807A_r$Oi{o`I~CA<&AV{k}x-F8ZfGv8ENJ4a_a zBxKx`_u%}v%NCn)Y_AVj-LEF3pQqN(dGC!0uE{y+=XxIK=e+C4S0N-8EdMCoSVp*O zj-D8fiTN`RWcs`(r=w_UqbTnJ^R8ML$MsyBOKSo=M)k$iYkFoly`wSUP#Cs zmD(J|^DEOKA#;?@=a{2xK1WW)Gj&K<27oz=XMLta0vUj1OKh&y#lRv8xvfUbKlAxM32fw4bUeIAW)ax51dm-WYS8;3(=)w zs{Z~11fB_6i}6qR_&i2S|5z^b;H1$Hq}C^pw^_UL&mEb#PWs0s5@Y=RS!#U(Ph739 z_~)lg%$Bj_8JPzUJa0j2;|H=yro;2tMMeh1Xwh?7YNLTYcg8vSr_&rmGUU`UWUkA` zM`Xx068UHJyUHl~w9JER{`pgCBPZUCVS;ONZpP!o^r-4?T9tBLe1st(S$Mr=;cT(_ z!`>FbIr;Y)TAWc`zm3dMZBA>UZ*stb_b-^>nnY#u=VkS{=kKj>!5b-jP9cFg3dB1< zw+PUxPRLQ!*iqSxtXG%bTMCa#?nh+z!*UVa zHKs#CRxFXNSlD8@7TTC)PQJxRON^`~S{8=3%LLaX3dEB8;5_~+_uTRwv_#8;d|Tpc zwL)?@5PL}5<>*X@F=QPT**c0XmY3SwhG+}EcS)~}tk+t`ik`{@*CcOKN%m^J)-qPq z#aAIDV#(VvtKE3a#BhY8_xdcq_T!3-iII(owg$j>WtqNz_RrU9!wHKur+%^X)ZpDg zKBw@gWK2{7W1_7GFb;COkdWCxS6-M6Y>k6CgXxfv*+AD(mbgc-nAMShmy^RSf{ z*0sFyVmge0J}6s#8U7c_a!`{aVah{Oo? zvKU^2zJw@ZaC9d4XmLdN=Z^|XW;iOS&pBzzFIF7pz!r~#DG#0 z*AyNVd*NH|AO?x$%qhf-uNO)Ax1wg&f*TVPvty~r(u{+3K_!>V5C}))s>pUtZ z_=0MO%V~2Fhq-%^wweK@kJQ z|9%IV;42ay71UKwFZvx+P*6sHZ2qX&3z{Y%{`c0-1YeQxsGyI7dND>p#|CALUCkdA zdqI^3#Q#1rFu_+OI4V^JLcLHNf)WwR;_?4-RO|&^ArSxjD9Qw1k?^SO*rDxXSHUYZ zmyFa&-Un3~2uA`htJmu-mk<)jkJNnKnXn_o6$y{Z?lV+$ti^bZ?UES9QL&fZ1Fx7} zLag(snBXfC9+mY8Xk}RM@+#>iF^Z#NFKe$}^Sp#u=TR}iS0p?t8$Y0SVj~e$N%*QO zk=Pw%FYDW0wf=uODkk`f1Tj=A1+4@}WurDUUz$HE_OfwsorDA`Z&yt46$y{Z<|W)g zo8i`Z=80V^_Odx+omY3AN5zEAKDZ*`QP~`h)^6FrI$KF`RP1H*>pENFI**D8z9QjK zS-yjj!7`_H_Nd~h*voQ}b@tkI9u*UOMZ%-9+z?}iWtr<7eTt)EFU#B3Il`^;sF>g@ z;@rLR*8TS_{;%#td56U`c%YGKnm^Fw^g1?M$#XS);qQ(ek5A_x@JjWI@*MC5E9_3xgU(b$NABQz@eA&+Y)Q@WF zkNo1IK2v5-h`BC~fsp|*UYU;=@-I2=xW{Mz`s;+4y-a<|tHUxOF={LZ*Tux!zrk-o z>^$UJ)zrK+V>Yf8dzp$B?ng-6;o3ycd>Qw_wPFhspY4XPDck*+zT@#x_lwV%jeE{s zrUr&q5)%J8WK=ZlylK_D$vtNa6XDgBmKc4v;z@0Z>}9HEXq6$c`v><&Rr}mky=TjF zVzw~Bchh7%jR^InvFH>iOa%)H&ywBq3i$%9o&z?FWXeCUr3XZ%vAiujC z_1GUrlM7qf*4R`ZeXe#rY*HGAJ!celKM#a^bC z2j$j~_-daH@qcDtQ(ZOxz?dyeuqu&r@5viN15W>Q-NsA$#O!6Ne^71>iP|1L;*+XSU`-o&uTOqiJni3S)cslB#bGZ~#e{NeNPK(X znD`%EPpGcy@Jz%OCRm%tx!vR~q;_>D)Gd*>kl4%AJ)zth67T+ee*Bwv538OwV^YKx zCRk(0xd-L_lGZOAUU%%wiio{Tr4-7oA#vqvh zK;5Iy4y$1=R$C2;-j7X)=MC&r{rl?=s$mNgtoh{J3W>4y)V_5GKRc|Ry(}J-TSMaf z>nFqm?%JdJ%=x41*}?>CSNUu8*Wd4fYn8H>T{kGVhQtFGjgL>fRS`{dLL~ zCRq8%k0n!X{b81jB^i6!xB}(YkeEE_i1@Q(m-fE3XS<9oOt6BJ9}`!PSvpI`#EiXc ze1vjqNGz}4FFxqf=X!s*`z{$OX}fsmO|I*GP}_qtwlKk8 z?3Ul9%3NFb_;s_slwW7Em(9CSjSPt?kF}B~J6hupi>uaM9lV|Mx<)+~JN?W)%&UYfCm3DyPCoZ8*^w>O`{+Lpa6 zzebGa3AQl7x;cLRG5Gd{RX7j!vb6}VRY=^l)4*sF#EZh&aFu^)T+OE=WAGGX^cEw({(nH$~329epr{Vj|Lc3xM z6ReJ*{UaSPci-OVAMC|yt05u%BOO2MtXb$EY+-^`KD3`_Uk^C1H~Kkyu?}lUNI%cw z@l$7^pR{WR|jjGmTObiJbud?^LK0XWM6*+ty(|XA2XoJ)(I@_TtF4)yPZOtKHKB zGzM~?kdVA2+whzn>X4VPg$dSB(fle)-rb=Z`4xK|a_IgV136qsNPd-_dR*5!hMG$bTX%&LaGbCg^m~HfNQit^*TbN))M&Acp_L_)3 z7_pc2N35MgLe{m}+Tl~`u&!kb6Ra%h$C5XWoKlVTIeXc-f<1tcko9?X$DnKLus&xC z6RdFN$Hc8WU002L2KKV?5qlXS@p=3Gvq76ps~aZ!3~XV7mD*&lOvd(yE2dSyCBLX* zFB|8vCleAo_vn!wv+tdC7s-AJTbN+QIp-dbIjVL2oz*YPz7KoZe1*NGkdS?!?CaO= ztHZtzTbN+wJl(&_9&USoHTJLA%jR9|k%ffpUuE+?dZZ5fS8QQ|6##V~E6=)}B}lwy?$w*HjQfC4mwRE8nb> z5H!hmQvFet6Sfx)SN~rUDC)5O1MkU-FA?m;>JjTC00#TJOz^!4W3U(NTa*!sAqHE@ z3A*PA4haN%u}(+x_na+Eu#$)#kk*e9UlTq;-c=8XguQrwzl;zp!Ee11eqzE;Hn=8@ zQ4s9K&ncQETxe-}=D|m3G4Ox&el*UXf0jOXdi9@&24&H#$)86xG1> z60?N~oQKgGLypL|ME2ryDkBs_47M;KeY5f6`%BIoVPd`}GWs+=Hs5PJ3-%qmiLggjJJyAOX=EV@C@$a~0I#xI8W8mfj{DjIHCuHmL$`0c#BzpG{ZCW`xtYPhAM;g;nZ zZbb~PSKdk1zxyvCh=yA#8g5yx;a0RbzULt!8g8j*xMjJ9ThW%dC5D7(xTT`umgO35 zMRX?koK(Xt6%Ds6*KjM^F1P%U5Dm9fG~BXW!>zbJOoZ1;G~80raLaNHw_?N*bv7+S zds*Il)|&ix4Mf8&6%Ds6*KjMmL~BjU&|VG+(Qr#e!!64-+={xGkoTS99P%+|mA-!6%UTPhlES+3z$)GJzRT88#=NQj18CK_&8uHjbH z#e}?Dt$)YwBUQsK6Aia4*KjN9l`m*(8QRMsAsTL(Xt-s$hFeh=6Y^fT{@s7qKs4MU z(QwOh4L7ymPKn}>#=t$7-%#k^eJc?Sw@5VHvRuQh@Di;xEu&q9glM=$qT!b18g4~h zOvrCG^zZ(6kfPxhiH2L2Yq%Bl@?0AdqTv>ahFg|vxD|CVA-}rNzx$D4@&TiwsxK?E zGY(V@H;sWePk0;*iB@lJA8os!GTXyzxD|CVA-@dNzhmT+U#0BWG1@`e?sHzltr%Z< z+zkoQaH|mww=CCiE9zpx#$b%7s^L~68g5yx;a1E5JTrs@rUKD$%W@4jje)wD;89yO z-1Zd>w=CCiD`qmD;X*<*-0DTcEz33BiWp4rsI3}q`0ja@Yq%+a-%*RIk=~o=#SQUY zHT}E)m9c2J;oIt2uHjaAiPoByxA<=35+eV8dzNds6?HKozYNpA`!8mRh8w;SpXC~E zMZKc6rsWn0DXt-s$hFep;!+ea@Cv1Y> zC=wwWZn0>%Wx0l1VG(sUr9sHA#PsieEIIb__OWQVWx0l1;U!vYTEFWcYq%+a-^z%pk;Z^RxBPNa|L%X&l7AyU%Qf5z zFVR}lGE~4rLNwfB(QwOh4Y#5$CgfL?`gi{;O3`qOMZ+!2HQb7NMQcsV3p%gC?=G2; zukVY7Tb66M6&5Dsx1{=a8Fr)=(Qu1J!!64-+=_ZdYfZ~f&i}O`Bt*k477e#7*KjN9 zV#1Efe~m;m++xvi%W@63qF&Kj)AG%I*T>gFwI)x9hFdHeZdtD3R#=#@tBX-dHQZv+ zaLaNHx1wHAWYcoDDI4Iowpx=Xa-D`O*KjK=Oz>!@8g7wjxMjJ9n-chinIYg&eebV%eHJXx;crZG?#6V{{rH-1IK4LUzruHjbHD_Uz> zhK6)VmuHmLJP!|(6GWcK7iH2J&8g5yx;a1cuT5DRq;G<>uRjbzI3DIziMZ+!2 zHQWjd6E?#6U(Sk#TPzxGS+3z$)GJzRT81inNQj18EE;ZEuHmLJP!|(6Qv2WCiH2J& z8g5yx;a1cuT5DQ{x_n58hFdHeZdtD3rZG?#6E@@cUoVS>TPzxGS+3z$)GJzRT7G!W z7x=xi*5nD%aEnF5Ez33B3JVi9*ZN;Fi-ub)8g5yx;a1cuifmer+UReewI)yGIzL&i z;Z|6f;L%Ps+-gL_Ez33Bv|g+&MV(E{cMtm)zWA;+`CsIUh8wiDvRuQh@M`y*--m(b zeMsbbURkc;R@B9WWqtTftZKMH6Di9z+%yL26|FTbuQ_L7Lr92*TO=B8S+3!xF;Ev1 zmc9DlsEUSLBpPm6uHjbHD_Uz>etgl}_=U9AB#1KWcEZ1;T0y_nwET%C& zfA=kX9l!W{NU)bEvNc19hFgtjxMjJ9TTvGi;m$;^+o$*3R_?eTqGY8pa5v?PCt6d{ zuAmL5?TWobYfTBX#E=jTw|dcV%W@63qAn&ZL&g3iPoCNz^oP$qT!Z`hFg|vxaparE+%Ye!nbzOa7#tQEz33B zih4zBP0MKQAt4%Wsc5)mxrUp@KwV7Oo{aBl4t-7?+u2LB)|9|(5E7!{mWhU2mTR~bbul5o6W71{IZ8C#vRxKd zX1Ru2QLkvNX&JLxNQj18CK_&8uHjbH#f0sC_}N)B+%nN{%W@63qF&Kj(=uk~kPr>G zOf=lGT*FOcpe`nCPsV2%qT!Z_hFg|vxE1w^)|!@)WrT!ixMiZ@mgO358UuAP!M)i* z!z~jHw=CCiE9wTFuZ zn$xb8U)PF;TP7NAS+3z$c!}1Uma%pY3DI!NM8hr1HQe;5P!|(+&x`drG}d)}&R(Lm zrZKPw5E8kLyY2z7g$ZlB#XbYH;&q>ay+mtGV_;_@Byv4^-OFGL6V}s;wE^^y=`2|Fsk zN)-*aOf=lGT*IxXSG3l&j8$q#h=yAx8g5yx;ifTA7ZY}M{c2k@+%nN{%W@63qTWA0 z?_*%K9TK_5x~|XJ!UT_%k^!7|l73_2`m4W*_z53B`I``JrcdIQ9D%*QoJfqYr+V;0 z8D1e#d4MV+xL0~NL~LPV_M@9V+4|@Cu0?&ls;UR{wMNh%HQ9`%Wv3G4H4iq?VjWjIes)g)+QC zV#r@6XpE@q*)?oo;_tWm7#;UOYtMV+4|@Cu2OztDT0?7!{g zlr2oWd7W=p73m6zQA$|7@Io10Au*+5j9#mg7C)1+g^6Sv-#^@1byWc*( zKw^{oCGUAAvU#ug?j+HR>ast%hcF-i%m7hWjCDez3@UAULi5^hPUyKSMp7-kr`W<7?ka( zG46l7PGXc2Rxi9zhF3@&`L$OvyW+}oGPW>r%aauvW4#TYmKddk)eA3_;T00A=c?XB zvizruGPW?W!3(=-j9I<$Tm4eP>V+4|@Cu0&w)nSd>Ro@yr5Rh8s9Cv(#;DYfC9Ga}aT)K)_@ml=i+@9J!rdU}!F4h5bKkw`TIEDy zgw+c#lyR*>V%O)qPQwRsKiI;=$bqe{n;oJfqYdf|mKyh0*9#A^>6CgT-bn7Ht>-)Rg#Cgwz9gw+c#l;IT;GK%VX zVBBR36HC4t?9W5T_MFg>y3|YKiC0L-Y@jhPf3O8*BbNVlh{o`9R8AyDuouek3JICj zGzR8DwxDdpdEfi@@BCbw6NwS*g)+QCLS|=;f%%**C>xQiI9$)e=L|Wa89=EQ2+bJ+ zLb42vfxLw4LfMEt#tzjOKKIFq#0d668D1eF*^qwgPn&6yLr z9xU|&0k4pdRjS6oDwQoL8^Q0{>be$$u4_xZK;XTckifVuK!*R3FKK8S9ksuHxkJk! zculSDM&KRc!b{%3FX*xuiN#pbuq!cIgG(x@_Q_eap9jfEEyR%2G_*| zk4j+-_LAR~X^eaRutCWP$1%7rCU|rWW3ZR}K2Bpi(`EgVk(y(0T}<#S5yoII`JJD} znETeR4P}I2d5@ymiwU0Xf*6jyiCc`nfE+#mO3S+RB{0>%Qd^hULhB87he?Qp51ZRU` z4EB=W^J9=V1)?lHXTrj5b3* zEXnja2G_*|uQI|I>?OYw*BDv!e#wf3V{lze@MdR7suebnBY}k z7=yj!cl#RS(ds27D@cyPbuq!K*DwZq$@d2|#>%4>m8^t02G_*|ufoF^>?Pk(&=@y= z@^;CJo?~!bOz>_(7=yj!dk`Aq$FCQb>`ZVBu8RrY6$xS_>?Pl|&=_BhTi8%W2!oV`7V;iz}VhA!4@X?=|C8Rz2y5&8Uu4w^8{O% z;HMm64EB=mRA~&%wapW3VS=C5gfZAlzPF_@G-n8&6?w+5Shy}G_^DGEgT3UtVHyLu zPxAy@nBb>pVGQkty#jcg$aJzAI4xW`Cg;Oz?!pJ!m))3UWJD-*h{`UsWGs2E+Yi<^Eq3X z;5`Rbip8D}v}K_&qHl8Goiuxs2k(xdEV`Vk$K;$S#p*86z{$%}wlEf(3DLgIi$12x8D=k1iSg$Yrm(HLG2bf+B#WMBTibDtLSO(*u^cgI3P z6ruIZU;Uz6#ug?}#(DVn^mml+m*A$qEZ?SL3ln&cRPUf$(I9Vfln^*`)WvU`hQzZk z_@~Hib{(9tg$dCW(imPlRg}iF*&S=EMQL0K#K0S|{2ppZoP2dVJ@cst9+9zy36yaj z{=HYx4c9Z@sJ1p^3ln%J6@-6_wUoe7p)P)NH6-3#vrwNqFO;vOv4sh|lZqJrE!I*3 zF%X^KTn!0P8rK-2G@h}A2~mmC7*L5)1^aZ4yrC!x_DUcIqVxNyAt4I(h~Y%RK4S|L zDC0c*d-|7k@=vPYk?-8Gg$cZ~4Z^?WT}t4nP#3@19TLyn+)mGY#Mn!q+*{PeglKT- z-@S_O<)fF@e|YQOecqHW{IM6m*&PzYzk3o-!`;x96QJi+)WrnKI1m3Oxi}~OzXY0L zg@p;cb>BRJGe=$gZhlCtzxqjiwtcu96R9oH={(?=eC` zu5Kjf;c#`?!UW1qcTjq53V?sOYm)bt zu5O1I3Ho`d7wScS42dh{YoR!*W%3n4wxDc8_)CkNNQ_`Fl+hnUVuXAx6fsswKW7Wd zMufi@$%(`W_Cgu`F(hu6uZ1E8dM#T}HX{7x%S~6!j{0r1bKhw)GO(BE_9=nUAS5pA zbbNIG(KpX|MS3k;nBe>GT<@`WMHLVHt?v~wGO!nD92I(SNJu}|^T5c!mQn(=9~nr& z9x9buFR~2$iV9xHGQwX}<$Cl9vJCbT1xY1x%}LEy{QK>jbsrvgdo=Dd?u}JSpbx^r zMEJ|FJ3E}HEwObQ$ug*3c;Tp!4TMCse3Kk^aKOyA8C#eLf8mxBi4j&Wyii6i5)zU# zXbj{GY+)k&rJm#^@y7A!KFCYhOEfMu26B;*=p^3}PBy=N_Dhm8u!RZk^Ue)_Zr}K} z_x{-TCCN+J3pCCH`9nxZmZ9f?yo4>K1ZY2&{C3{BxX-|ueK+3m+Ok$+cT;ptH6B#$ zoI6pzPyMUAe9i*NUb+47Z|y?j;UO2r5BJ@&?}$4_rfgvXzcR&9`8SvMm2a!dEl-BZ zcOXl>a8&RLiQD8$>1ZWSJY12o#eTgC0>73;4F3jpP9#QHz4n`0c!k6#@)dK$7&B*B zJzJQ-uYM6j+~oh}L}CPDpkDrM@Hz?i$(&&|Y+=G;`#0h(h7pK?=yp`_3W@3YwYvP_ ziij;t*fsQT_UA-m1Y)3GySnfSiAUw0<6515$H<5+Ojuj++Vq{JC02jYqfh@GuZ`Hk zgpFhVHx0e>R&t?yIkMCXM+mQwpfNFGi?uNjHuCx3X5>U7Yc(aTUTgXA3WJS^LC2a!i0@uP||j8n2ftE5AEEi zeoloFh=F=-e8e{yLPFMR8bj7P2A*malBp6zWW?>M9dZ@EZ6d1bjb6>MDj$9 zq4!*l#IiGZg#_hrFOjzdR zzoe7riHYQidQ?`g<#X@~iE$l$Mz%t9e%Qi5nEm@Q1$+Q)x;XScJKVpWv9ESgBAUN}N{g#^6i zKlzu$Y_Sy*2wTJX7;yFfdBW?h~JbunQpOMm81j9ps)_7!{gnJ4GL zUbbSz)eQ-`x~UwM!_{RA6DVUo_jfSg%W$&$5!;Rj`iJd=plrJozDLpShZ3mEyikT$ z39(jpKio6Yqu7G75w=U=d(v2xzk8J)8qpLB0 zu$S%0Va^DN?gl-8*7AL7+Yv_2V0*pDMQrERXBh|k z9pR))TgglE-DUGaz3?g_{Eo2eELjFyP&UH$WPO%#aK49Z1bd+juaJro*?w%=_cL&mQt;e`y@ zeo^UjYRQnbT_HneFVPTJ0$FiL?EUch*_+EZ?t^@nElk++1fL;S%p0dMkW-g>;i%vh z5|Z!gc_80q3(7`t>nWaQXog(s1p;j>B=V;jnjy0VWh1z~6*2UQiFsk4!Qx?G!;ThZ z=ep%*-cHuF>}6LWS7>U@6%w-M z)T@g%CtH}X)>Euf^$r?=>w|jXg)+QCLe_&C18YvUFcJQGIVW_LTIz)uGL$v0lh9Qv zTTnK_Vi&7aJrAr>OT9qADAOO7d~tPZb078)ZT9gy|4e+pdTM>=Gi$SH*Q_7xo?HIlcg&d>`Qg+k9$TB; ze&3?hvT*FTTGsEM-7qnG+}vo$=-O;v#gnP6Sg_Y)>mC1`i-{rgTf~1nwlKIfHpNbJ`!jus2i`Nh={=k1(dhs4VFr=~S>=Epp+ zDB=u>=ctf)cHG?bU^(+wE1ryauH|tzB;Gvc=k(0swb{VQTj+R&v&E=p_uTI-@U{9| zzx6X*tF0H#l)SgeVl4)vGq*AMh4lOuN^E%F@_o7OG940ViFS4QNH_-5&TVw{=jl^t z)Mme)a(cuT9tUmY!-y(-`pZ8`H#)sGt3767#Pcf?T$B6?d!NZelNkNfV= zBF=sI{)fb>+h(M*kFU*kJ8wb6ZHeiS_;fm$yv#RUckEqRBpVlH+M=`-QIX8FhvU;?V&sI3isnJT1JM%e(#Bpa=*C0kK ziNO|LfATufIXRCSI}cvhGQl+=n|2OIwdCg(!J3oTh9M#6QDf)9>vN755^@J4yMsKc zF&z>z!bLX1vBk!A3(k9(z+@D2-)!6DIg+bI#w zd{b%d9G&Tqka0J%ahEMN;~+z}naJn6($6F7=e*a)1lJ@QBUeAA=Yf9Cb@5dQi3Q6) ziZ+%J?wX?~hGSy>+z4wC-pkZcG_p~Y_g;BdHH>ldrHl1ijg=9O>*8|?2^mF|z$nUl z$Q&;uWR8k#j^g>1>5z~)O6POTQ8u3=-{F}$BrF5K9L3KKm<|bK0G2JW#f}h}ti|*B z?mPK;Y(9AleJX>I0ogVaT$6K;=jXBi!kP74mmQVQae2mB|LV`9Suzj)bjs=HkV#~Kh zq?L>pqD#fp)LI6fB67Si#%Spu%Vi#%H2Q(m`ULVerbFU7=^vL!jPdtpsr3mwqqDx^ z-#p@@nk{3=Gcpezc;154#t&qZOouUck&yv0TJ&6&+Gv31@-|BNcYHX8WXO?a$Xu6= zkI0a1B=Ya#>?)(^(=rdP`R7lmjhy;M3OzsPn&b=Pj}OzMs=H}b%60J(hJ<9{HI{|5 z#pVxW;Wp0ur{OKmsIJ*Y=BPHOwFut*V1jFM?zNwn)!?4Lx5DWYceE1J#pe_fGDRgI z-ubyj@D7X3ef~`szPcx#UR{r)YAZ)&Gcu0dX0>ucj;h{{%4TGH-^Y$HJSw>#sof9D zMR3=c4hdPYq_$#Vi{)BqW0pDj79%Y&wU%gE7}_oqT$6K~OYVd7_^aG=%XjRJEu4pM zOMIL4{5s`o#`-!tfNv}N3q57QuIg5GJWroUYlC4wTu=0oC&VUIq9{j^;)ir zuR=(~lDA`4yYZNb;Rr|XZ(4rs-?Nl4F|{$#)&TmJ1rcG44JRztochJiQ&X;s&nYBi zOjH76qOAuo4syJZkl8?2UYHGRjbpnixP#@RsLTesj>2qU>m|$pwkq(k;(|gK(M8h2wGwhgS{@B)~&y3BB7>$ z2x6Fp3GM~Xz4z-h)hp^Yurr?5_Q#XB{8_4&v6kgt5b71l+^1goQ%fylZOGS9R6`G$ zUZ^O^EL?cWJg8+n?F)%UQQCO+w#sbS)0IKHVuG)_bF7KPb#Xi9qm>!wuzk{v`idsfQKE^o@uaSJ!dsj<6Oym!-~ArIo2Q?YULl%Ddp*%LW-lHkLV`7s z*uq43KL&^?LXrO62-DM2MnJYKl7C zN|=zGQ3=1U74?YJ)FaqS)FU)Tt~Wu%!=i~aRCHzD+qNCvH7G1hNG7X)_fO(#lBMYg z(M0MrzHQ82qJp6@a@`CfSQClsVnXt3CH%UUHIdj$)I2msuKz&zEL+k7y#TFPcb8y3dQaE+%9xqJ)2TcV64deeV*ro4-6&8H`ul zqj-$bVI(0%9jz;WE?<;G;;xS$u4yT%JHS9!cJ2dFoEjpEQ@3o|8Bg4cdL`f0GWvE%i1t}T+Gj;wOo*zP{vCbZInh3gNc*g) zSMqBuqi=_VXrD!-eOA=Pgs7V7-_hrtYY-)x22m2|Fr;nF7A8caObPU0=UDBAy(FjB zqrzwq60E4h7A8caOkSPO*gv`3kkhz}Kzi-H+~F;#TsV zZKztYm#jrJ2A)lX1S@B;g$elzwZ_2LtwVJ#_LB97#=w)GkYKeNwlE>zs@53zx^<|i z!xkpw%hO8WTh-37Y7cwK8b^-``+gz8%0O&kLcTn$G4QQw=U9D+y<|KxDvg?KwWJs{$6kC{( zFIsC1Xat0+R_tXh2J708V3ioQFd-TN8Uv~V&an~=TbPh9{wo2E0C@^PYB%gcIK(dMY{G(Tkv6Y|A> zjRB1S=X#3zP(M*!IC=Oy>`Z9uL%lY3VYekDzFplqhN8}&yM2|fZm96tZ>9$md{~VH6%opq)<^}3lpMBqA{Sy z;~XnBv6tng*iQ}#R)b;-6QakXF`(cRDo(M52~pQk0(w01G^5}2nk#c%WcIR*9J|gT zA*ww!rrN_6CPcMIV?ZCt=|56Y(z}-bIjI& zjpv^N$ki=0k=RSrxs=G&xrjJ5*F>7Lr6@w>dl{&U2|hySdh9u?e!gfT9k)@hjJ-t9 zNn_w1goLO;)sq@jaebHw&s?ntJi5w7%}9@==2{8O(gabg*+uN z;@Aq=6Re5E7A9=n?wDSv6tv;X^dRyPmjtIcZ(*HX#;XyOxXSd`oD9c=arhC z7ki1Gm&V8yz%+&@juB0yYSBch9NrnfGA%4j*v^Z8>TtAZBFzv@q-j@n&e%(I$uvf; zWTrKFf;Ex2E+)8zhMGw1CHiU_BUfb87--X~;g*_)8`s4Ix9Lz5NuIuHUC59`u})(k z-;rmY`ggxuFPTiCiNs!_cc(eUTNCQO^Y55B?Q}z=5L5-0s7HUnNI8Zc^x{4;!$^GZy zDRN<9!ggMeqsZGkq>02{qNAuWa^*#>$rG%J#C0*jy;)YmqKULnG?Bi3XGO|hq6eul zas^3^;h$iM=457?lUx@Q-2a{HHMV_LDVj+4{5&t>{Up)h)fl;Qug36uOQQ9gnbt3R zi8`hQjL+THS3vsf;Ex2E+(v}`M11f1zBh!v6twjYK&ZARbzO9HIcY3CglAIC9pO` zPTi!5#9pHNsxfkka>HHIfx6N&3$!mgozy2zSH>?L}_8Y5RA))<~(O(d?12_7rudg;GJ-i32c zzU8a+=HGSJGQR7~dw|{_&xuM2_L46ID~)dohlG5kxv=p5zI^Xl|Bl!Jk+2s(U1*kY zY++(uPy76N@UingW!ZTo`I#r|#k-DWgq)0z!4@XuTg-aq_+qo3c@cxXcsH_)C}OaM z3Hd^_#=zID!x-$vyOCu?5rZvE$QP|OM)3{o;yl=kcO%P)A_iNSkZ)6I415n;&!ZsZ zyUuFCH?Me{mv2gkL~&H?#dAp+QN&;i6Y`~NjZuE)?8Q${%7`KcTbPiqjcbhZJIG%A z6sU|SVz7k?`5wB)C~t}E#ZR-!h$04CnBe{|&eA!oN3j<_l`A6v>FC22CV0d^2>eqb zk+1LR8RDycy!$0z{?otXi-O+W2=?OLuQH;D!4@Xu3x*m4Uq%dLuov%sl@Ub@wlE=I zV$>M;!ebbNy?FPlj3{ETg$enhq{hIPDm8`?PQFjA7F=DPapc?B`geTiP>ICuAbar) z*F3=%CgdC68Ux=s3}dht&(zHmY+*vaRjx7cox>nT!d{$llo5jYw#ybK{vXQD1YWP{ z`uj%`GYQpHNHkFiF{K%vkU^f~CWclbh!`rgFVY$+rgBl#F{`4bQA4Y=MN3*#2NAi? z$$e1N7)p#qgpgA67{j}MYn^rWZ=dI0f4!f^-Mwpl_u6}(J*+*PedNw{wSjvMV;ii+ zo>L1^+F%J2a(BGiz&(et4c1~0u!SgXu!IS@17B_6p2OG%Yq2NWLXWbjh68|V=dk#Z6Qh|s@h)i#0Z9EsmN3D-9ZcZI2=;e)AFsvt$vjo8#k+DXL}`O1OvtU^TDlll zL|F{D4c6jaxfTKta-Ok-3Ax=|ZQ#~)(IXqI#k+DX1R%A+5+>x9cC~@q-(e!;gSB|S zu7v=kHdw-h+%~T^aEm=m1RJcy`*-4Y2) zn2@i0sEyWV&RV<|(k+p&gbDdtiP~tbyR5}~I^7ZpOPFBaE_#9)Yw_B23sKe*mN3EI zpxP)2u6?{e+)@)WuO+O-yS*($X@ez9$ag5TbWuY^Sq!)h*5cjX76OpAD3&lG-_uYV z_@;>{lMUA5{oxh@khNNhiwSOzu?^PZ{oxkEZ6qvVqWgBv65b8v9wSOs!dkqS+C9M% zCgjU5-IOkC@osOoM8Xm#xF3u1k+Bx<@U;+S>9T|g?)j4BjUK<3H;|HK-_MRpc^#ME zq2P7jlE|F!H!7foFjkslV(0Nss*PJ`AC$6$30`wbl7h&b@OYtxFxJ0gqVaR^Rd-{+v+S~g4b%~&0-@mCp=zgA&l8pOg#NbyV|(qs0X;QSi%IaSR_f6@OYtxFjj73;*BHpnP)fu=G+ELnBbKc=}Rhv#|tflvDOn4XPu?@ zcxAmlY&2NH1g{_^NtN(;p@lHktYYHwMf;=fWg)r6-V`53iM9s$wn{Ax4gbDUI~ZdloFz=~OeRUHgvSdlgfVxDiFxhz{g2tl9-Om;37(N9NtN(;p@lH! zfH5&-9lZfFd*}M2bCxi{UX+~duMi$Dv=D}O9}`zxI0L>U<;M5=GEMbDZ+9atG9xt>I#>_h=5WoG)a+WZ`{=Dc`2#*(99LD?{ z_L5}uQ%_F-_K@U*<6;%sfwO*+WCs+$%)ZLhn+O9UxK3IaV6TI@G?L%!i!CDBT4aS7D+7xAl zcFq!no!}KDZRa2|Cs+$%^d&JNy^q>Jf5j4no#2%)?XT2^6Rd?W`ox%!UQ}(M-(?BH zPVkDJ_Pc7s3D!aweS1vEXrMMQey{{#CwOI2#}BpP1ZyFTF)AixR8t!m2U&u!6TG{u zNuoJx7uYQTz zaDufEhVK&-;w`BS_*X1J*a=?0R{u(EIKf&7!-tCr@%q#T{6v-@>;$jFtDmSgoM0`4 z;fuzEc&};${w_-pc7pd4)ZbMbPOuij@Tp@$yl}MvFPtR^JHh)R>bI*6Cs+$%%nf2f zW)o@yvk8_U>;&)c==?)%IKf&7V~!IOGAmLWm=&=EVJCQ>O6O5(!wJ?x7<09lklCHu z!0e7C2s^?1Z8{%R8&0qm!kBZ$gv?Ua24<-&LD&i2H`IBp+Hiuk5XRg&CS} z3BpeB{-@5*)rJ$Sg)r6tV&ZRow^JLxmURY}AnXL6NK2A}$eds;gt3+p6EjAPP#cqE zy@VwQJHaRGlB6IqCs+$%tjWZLtm3E*tm3c)VJG;6psxF9SvkR42xDz2CS)~AZD2Ku zB?vpgCmnVDN^Lm7S_or}EGA@CO??Kes<8xNC-|(YuEVL%-~?+SjJ3X)nA79;YU5y8 zPh<(gPQ>?@6h!6(Yaxu2vM~XlM4SxGJ>T!bH{P7uIZak;+nZ1AjS-k$=f$B#MhHGJbJf#7qIP!$XH9h+^ocD7v9-Ql&NCt<3{YM#N#V|(3+3T^TAqE zzGuY1Efai`Rq671lr~tx#8I0Veea&diLH`g$w9|j`Rw-t{qAdQ!&8;9){fKcdEjZX zZHZogTAv3KEww}#tBG7oaZ`OdJ?C~3=uKM$73 z7mrm7Pq4*C=J{YP`3|!ZsC_L&m#Jbxz7(xAYJQw5mdH1&RSPYu#YUMb){-x2D{eb zS%0i1F8R$=m7tv;^Pb(~g`D@gb5s5MEnEL}K>-wf32SkQc2BT`iA!%DqBd6O^>?$8 zc`acrPJ6dRGGW=48dq)Czbv@D@5nOz$+)e$$;YkR)>VQs>A5OS&9ygCx}4fL=dAVQ z<#?N;y@@B-J;ABslypz9*1!~*; zTh3)P=$>E+6I|a)8@Yrs-ml~R!LIIAvu|ND!CJg47uiVomNfPmFlxIuphPAK(p0e) zdonEqvY-U-LGhj!r!caSaJ=l1wb&?A#aiqqwh*4Gge6R{C#$J)BIC64{$RKH$XJVa zEn5h;k+6h`mM54MrJHbi+0*CzX+E6DSc~T$Ekv0rmN0?w9JQqMGD;h)#q+2ZqO`#h zCNR#ck1GCyC`-1zUa=O>2U`e0sEO)XdR$DvGr-^BKR_?oU@e~4wh(~S21}TL{{S1M zf0cPYSc~W9EktR9B}~BkfQ`~GDQ&P8uQRj|r45!a0e=NH;MIt7D653Ec)g^B0HkHb z5+>l)z((YSE5Ta4?$bh)Hdw+0d?MH=y`{`k#ag_6)k2guSi%InDA<6nCCXG*ti|ha zEd(IVIZK#;uLT?MQDCC1y9sOYdSVL!NNuo$3HWfZ0iOsaf(_QS5p1v)ukW@HfYb&{n1IKrWhG-fOavRO#p~291R%A+5+>lIYCgmdf{9>*wRpX~ zg#e^BSi%H6S=fMg1{1*sYw^B83js)Nu!ISCnXplM$e9zY#rq#EL}`O1Ou&1EjmY2C zRIwKCzrD-{Yw=D~3sKr&2@~*YVFSLlD2L~fu@>*EwGeRc8WWtaxEp zhErP-nG>vqFv=$;YCG6>M(^KxL7gQCJHe%%Bn6Q9q)+S%H7@7k=+5`>-LR*)nGkvYLy2&3J_#MBGyY;Aq>qb9Hf zVJEmX%NGlc$eds;gwcP*#QT04_o~SqEJ4@_?v>36ZyS%wswb8H*By3 zVJEnEm75AH1ZyFT{yZiQe9g|*Znxgq4VED61dkH(9mNX4S_orYiivZE=(+H0|0i#2 zumoWzc(jwVQ5Ax<5cd5K^d&adc3hz6+q0=N|J&fW5O#t`?IfuZtc5V<biu=gQf&8`rvg)sb`nAr6k{aSN&+BQRSmLTi|dr@)=ZG~VhgfSEz9gvAVJCQXN9z@c%n8;)7)`Mq#!aUSPNma!I+R% zo1(1H&RK%66TI4{?Hokr1ZyFTIeAR{W|#hI;~?p;Sc0$;8k25Kh%a3tc5VfsF;vZO>JNt zWC_Af@M^S;&%)s9&NsoM0`4;rqmdcuQ&n{uN6Qc7k^~)W1?2POuij@Zn-Yygs!7KanK}JHfj( z>L;oVCs+$%_@Xf(-mBX9rue%oLD&i2by9y+I_c;qf=TTKH z5SXjQgv{>L24;6GLD&i2mDTy6=EDirLKt(-n2>p`+Q2N8B?vpgyURMSRU1yQ7Q&bb z$Hey*+PrqrX&v}RuHGYxyOsS$THJw*Fussz_aXm!_IGpi?c1NdSZ_CW2@~@5TP5)I z+ay_Y*a7(#e@e$5`|#`0eaL>ZE^aiAiC+)+4s0Z=^Y6IWuOp>o{Fe(4rU<20qrsJ46`1Wly=MTKLMa~i?5Jo=i2IURcwAsWzpB$UBgbCb-uFol1Dygax zNEPDZThn7={+%|P=)KA0oFz=i7k$+RzP2j2{J*+>e(S&)V;;ND2-v_q?R;x`OdNRb zTAK6OJMEscgb9R^54(;1%;olO-{O5{iWKyVa`%QiZtqp7oeGXt}-H_xhd( z>uu~F7ZbQa9e=mG)vE+-z&hWv9ut@R_|}G0_U!V@4$oP_gnYkPZQx6|N%G4Rf7h6o z-#upk%Z^q8Hej9aS&xafBd&yvGK9&S4`%JFjXXUTCrHB-wGfz1z3)j2Tg? zpydRAb)_USCs+$%w8xluWVyYg_?WbFmLTi|e|05E3L{xWUu{Ado&~~a zH>H=MwFF)UYh^z)0-i-oJiO%r>D9NUV|RPx_2_F*Oz;<+O6;4e5ONkL>zc)ZX;7~Vikh|i#91)qT>Oz?M}l0^KHbd_}0 zSokHZC0_xzlD6-LYw4QLFlf!;X}0z+Z@ou+29ApfZu3cU!0p?lmp^;rxO>DeVJ*^}W%=ICr9oBrWBnku9Ob(g=j z7893WWpkXH<_u}Dgb92N4mRw&00ogb0UNN+-;9fim#?(B+Tru|!?(%HxR}6K?O;QC z+s-5@h|CGtfOY=%U6(}i^1S^fu!IS>ZC`e98&1Fmtb3}U6%)TI%IfqvL+UJH!pqRU zH&GCo6R?4Jy>y`!6MvFtE~!Wkx#HkDOPKJw(0H|{l+|FViDOv!eKlJlLVPn`eGpT};}XL+3^On7f+-%435{jL;lw(cXZs|}$CcGcBFAx_*<^*gYUhf~F6%&_ApNMC^(=lI5S;B<(W4)hSO>(}U^t&tW zI(Tfyydg@!2IBSp5nou137M;@4VkN@EMdZXJNsJm@-?haeC^4xDNC4eFUr0lT@aZQ zNEPDp9u-Xy`uQGxq^x!n8$W)2m0R5gQiYVbKLr zXzSO;$jmEc2@~!);akr-yMup~iJusKN!k55XvM^_1I;5_+Vh7gOPFv^AK$D_l7h&b zKt2$!`*YBWiU0oGW)s;FC#Ni7!e<%(+z~cTSbp<_9k-o5HeKB`u022s`1c6xN~&B6EVZ5QbJvU@ZeV zM{Q>b!cO>_gS8Kt6*Y=kkrJ-u>l$dOF>%i(*NY0BuiD`t}?-N8j$Nb?DoD6%S!wVJk-i5IP!EwGc0~Vgl=M zuz{YMB?vp=>t(15Nh0G%e&0p2#$fzlEnkzvm=P22oNX(2bs0ZcQYB!|Yz&YU;eTJ3 zjvfE->vdl@hR@(@z3@eR<<`864qFk9_EIgT7;8iiz4T1$-I`eyc95(jfwG6x{F^}Wh~~LEMdY+-Dauu z1RGT?Y+BU(Lo|G`$w znK<}`+4b~@8Tl`-d7|N7IM!R;>$kldCT=~pS)V#l1i>&NJ_rm^d)4KPklEyWg#|N6-Fs zOe}kDc4LC%eDB{sQD+Z{$EcW?e{8d{v*i4NAvf1~tmS?;CLY=Qlg42O%*eMtdmZhs zkX!U>{>*J{fy?U8+xEy&R_i`?z4*OB;xz`nGuJWMalfyp5-VM^WE-x#OveOjqL(hG zgl#Y_-(EiV)yCfrosoYw^AMSbgj8`q=sh2LRQdk-k{27R95N$6Z?9A9Jiao)G0EwK z@$WW%C^>I_f0FjQC@aL}{iBT;!FH1T;zv&^am0|>bsjm{UTov8rS~?TpDDFu*6-{5 z)R>NmBhLL>1JC1{Yx6q$KK%S+;`Pg~YTUZ-jC|8$7S_2gF&z_|uXm2-V~vrE>+DB! zt&9n&?G3N(Eb*QJ{;u~J=7n!QV{qfI-WpBIVB$xJRdx-WrAaZH=QI%)l;AJjOLs?H;f6% z#{|y@&(GOjOvn?g`xE3|jp>+>9?+V7nD2||pP+RoOQjtS{^>)!9O#77)>$UYL8zboy$?(Lk{`k3ID zl0@2h-P<|G#ibAv2QGQBzOwXiGxk0;?h}iBBg{p3EmM2Zy7!{I_R6cOv5kNK=t(WB zKGMT+T%4zvkX}@u2YOLnLuPw1A!AhC$0#0OnT`n=qjWsS80F(R{2d;tW5PWEj8VLA zz;sN&18{GNC7wcf$ZpU4-N%c3tbO)6x+{a80p2zf98;3qSma~7$FA?-xI9(n1iK?K`LveGF}DslJV*$uB5*hOS}v5i^MK92Z(}+pekSeX zWJ%RY*X9jx6WF8kwqm=8oT^);FS$*|!JUp-*zo=X-Xznpjiu5voFq239J#pRy#e;+ zy_c}l0&GJ(jwNH6+#83&i&^-;rnPCb!A`{x`}lB_K~98&e0 zi(hYWT%5v~5HEa!d*Lkc@dIAC_w%+J{+&a{Oz0A!7<4lzn?6gfK)xZ zG|?S*)DpzSd5Q@cqB0PVf6_Deami1lfVPt^T)I6E9n*nS^_Eomh>Wx|9ovvpb$F_L zME3JOp2C=r=h5)z;l2o-8q+Z$GnR(WSXkn|7V4OLPF7>2CN{h#x)+AJ%LK=iBv;+I zUjwy$t~_)1cl^W_@?mv}%W7ls;Xw41y35v?j%~<1s^Rk}mbhPv_UK-wwJvG34R5vX zv7((b!7(L?v|3FSS}n)Lr4SQ;62E;_>96kC>(sc1qw|~YU)woL=@T2?C;A)!eV=>! zcH)!EDi^;UwPfg!*$s}1^Ax8_`b70b(I@(R0R14_iwPMGbmoQ8z~?x=s)8rj+Kb9) zpz|n<20mZH2;j3K8)Y~jKJ&u7mS8A(!Q;YSM6BE*|vqqoM0`tA$e*g5ItY+D;~#745T)kfy2Vl5lh+Hr=Z zm2eyKJCqd@Tq2^RKlTKzcW%cCs%>p7X-8kOEyrZ)WvcWfm}%LF(~h%ht%P8$WyJ)S zh}tL#>(ARU0&HtDAq#h(D%P@*tR17z7fclsTq1F*Y|d#px6#1LttC~SkBqf!p83C| ziU}@}N~(01#Ojju^H#^YPZev~Xz;&0K_<9F;#65LYVFQ?QEQLgr;4>~WcXidI}&sy3JdctY5U+C{x8+USDNrw3X;GRZMV+#HsTB zLsp~OyeE=9;Z_@Es#we0wyf5*5?!ZCcf;GUf8NF=qNyrvcwdWDd9N+!8Cq>Pk+GKd zgIy9`riuwJkvLU8F5wCKIM`**%T%$Jj~QK-ZkMTIf=eV$m5<@5?d}b9SxZ8y^yGUx zcG=r}eC@I(c9|+BxJ2Sqxxa&+!9AxgTU41U)^cB@%U0WEs+iysiBsjiA^HsWGP~@3 z%2ctI`)ytJa9yT~2`-T&xnEX_Mjk#R-Rx@JSL;lYk3c z*khus&uF;k+#VGh#rz{(;ac5SQ>_z^N+yc0u3^h9=lpKj`57%aA9S_utEtv)+kDz4 zYh)V!&8nZa#bvdyU)p$9i0Sv}zM5(++5dw!Sz*!e)_Z(VA)b|0=Sh~|l}0JZIH=)OH~38K6>FC1YFF#Nno6oA8WU2w zsh2LNgl)v7TdXIhvYv=FtKN;)Qb{6yCi)84kPrdnTG@L`*buNublkBJv#J+ZOxjI_V) ztEm?1l8nwJ9TOX_cTPQ%d@LWK`)aDSN9PCd88nPq852_5>t5Sg;ynXuyNuLIqZP=x za9K~h3y!U zeYLWdc<G#`YtX2A$m){rqGg(hu|DYM^>b9?@lII_Mzm195#xtKTo^Xn!>R-06 zR>mbhiiVM=n3!?Mm=4%j!}it6RLPp95=fQ!5*mi>B$0e{cs^tlRteI}L~KLaTY{SS zsO_tjafz2vhGRnV(c$@!QD3$6d62!BkSEyiC&;}T(=j1ET*G@fmU!QeS}9|@+O|3- zb+_SlS3C=qptdu?F(t{#vipj49JFBZ0F<`&DDAxA?Ogl>m7r%}f@4Y&Y3Eph)OOBsaVf;ax9#LW z`Yqd6E9;f)v8X-tiQ?gC*m|O31yXxa@eNdh-ja#f#;{YK)Kq2C!*N`krHJ4^Urh_GaJyiGAlt5hmJdmfD*iLH6u|oXR_SKXaabk~&USnV{CN`D!@r;av%WYpx z3HWVZuRzB{alRy#^Cj3q#QR5h$lepda~7XldeJ}2IJo-Vy04}-_I|tPL?$>U z`Fg+&`!$Y}RQ39W?yD(*xV*nY3S&aN@DBIFS>oddyl@{^;GHMQqx+2M=p$p)NZVIa z8-4oqn#csllqA=Fw73J$yywfhucidz^3ec!iV2KSM+)((?W>hGeC&h0m^fj^mq}QODeK zvKmvI7SWpMUKr{w6C6{LeDj9=>c>l24UlK<{tlkA`&3q!I3Fvne@-2>WVX~@w$5~H zL*`L+pGUF8XA@|T?qyo*l2%*yR_h)sS}GG9Q+ZlMTP?@Mr4SR(iQnE+`m2j=UoG@- zg{P1H%KdBWSENs@d!OiY0Q6Vx>9_W9AB*3PvU)@MD~^lv6sJo1M4T_tKGEj`=m*(e zOvq?Z_tC)TI2bdSjtLnJQXdU`zJw9LXGJ#3@H6+B7v{CH_Nwhe8rejqV;jY(;!Mu( zcGx(GFYhI77B2VLB&H*aJ+@YYwQP0R#w#a^6Tf)Nw80V!8(|~okvLD^mL#nNYqhIZ zOvpQGSgT#NVuDV@VA=0n~9#Tgd05wwi3QS?Z>ry(|43D#;?t(cHEK-Gr40g5;8)du3S zun{)y%2!Ht&AOFft#;LliQ;XJOy1_ed-Q4paaq_18>x@Pn--FfR)V$KRVyZncYg2& z58k|28;HxoM%dUc&qLR&TM5>()ySAA-ucOJiw8?AY=n6O^3`!&NpB@st6jBXLf+2Q z=TY3Sq4_{ure%bU?MLFhGAXN8g0=e%o{io?|exvX=?)B|V%|{?ut6jBXLf)M0K+fgONt_T;8;HxoMwpLsB;Ixu8?6LuwX0T4 zAb$Io;oOqiKwK6!!n_){(Mqruhn6XqAV*58)2T4KXZCpwjfx`o=r^fT`w%Luo315dEG4tt%xvO-nEo3rI3{p| z0LlvE2TLq$gn4H^MiqpPIIUVXmc)d-qo_9I9Yx$Ir}@AbWnm-C8}zZZAn;aWL9mvM zi7_E>XQ~Z(JG1aCEiThC!hA;e844l`1Zzp@iq8-e;$^6f;w?IR9;KEM=4HC?QxJH! z5AWRx!CK~{#6)o?M*{zfB^EZq{8IPf=oXKHU@h~tVnVz=%?JEMmRQ&b^JK}h1fiZ~ ztCsnUF(KZo+JL{y5(^t)p0oME;jWiJu$KA0T@s1BcZYZ4^m#}s+bnE^+qTmXMXGS0 zOhK@g+{G#<&tih^#ld^2Y9nYF;bmwi-RNGNf?zE>YZ4R1Z9R#+If?gD)du47l1A9; z0#4Q?3El5g5ak&btz*!N397qz&r59tEj#t%wcSp-QQa*F*0NJCF(G$m>NA%+Gx2_u z+CW@h^AYw|Yv=C@A`67Y>#Y`AF(Ef#stvg_6K_bXji6PYFVer`ykC+OL>36vvQtRj zD;YtzS>b)>GA{40pyhp~oxdxHOzyub2#eSIM`*gEbr387*!A!ua8&IiU}HP@gBX}2wHaX%*S9m z6-Z-kL9mvcJc|jrol<=Uxz7?eI;aiA<>NWR?n~HN#e&EJVez_`0j-!I-v{?IsEwdi zp7GPaPx|h2QdZ>q6a;J8NkI3ajG#N{aHB&RmwQgoa^KF*Di%a0x7HPe#p_-lv|{2D zxp6U(pNHiZ50)_DJ}ORACP_hLfv_`)?!7`QCM5by{>e{skP9WAS% zRi4b%zgrtD2;2i%5Ugcw#b>WZ6f@xjHyg3U=e5xC*|zm1G;RL-XOPKIA4(ra0P+;6AQxP^;J%2#RT0S>{R9U>d>KqfpuQk8@`2F^jaRlq0 zR|$pPP=x1dc)c<9_k}O(xf~<1K(N-_PH4r%7Ejpx6SW6ENsN&W3lFXRp;r?;Cx9zWPxCB2nDaZVBVN^cZv7qkT@YCySZl6o#l*UA@1Qom z+<(KAB^EYf)DB-#8*BdhE%{v#Ss++zZYQ*2;_Y6e)dqgE#KK0bzfE7YvED{+%I|{6 z0>N5yJE0X5NDb;0QpXYt8}a3n`>Bmb7rrjP3nB{yYt8M1R!r<7&jB{Jmgm9}3mcIn z1JuSlfBLWdE{H4;tTngOw3zsh)F;?jto4f}2ph5DZ3ETDHS=DR-vyBcg0<#$LMtYI zd*Ur>W0bTLmRQ(`IlsewzgKl$^3kjEyCAYau-07FiivklKSOQ2BJG_e7B=GJ8#YlJ zFB}0I1(5}UwdQt0D<=LtVvyQcSNbiMSlEc)EZkgetT6yK3L*;xYt8M1R!qG4E%diZ zGD><=mRQ&b>CM!J+;=9w3nB{yYt8L6EhdVSH95{9+Za{XbI5j1rrz{($i@9W(&rba zEOMMfHqWxQfnW#7Y9C9Fv6A(GhA)6mvZ({G?&NI}TehxVh#jbvibI3M>tT(ZH zZ+rChrk_I&M6rLK;~cWho$F2PliO~1z3JzW15xbP=Gd>bRh)VgJG{11q&GB4ZijaU zqBtp=;~cWB!_=EN-DEq;^`@WS4McG=Gsih(n?cGuoN@xw&Nj#`g2g%HKoqC+a-2gp z?^WJ$mJ^?LW~ScsbI5@x&co$6hiragy@_*gcDk+J^mE98kaNg5d-t&p#%Lvz$53A#+_~Iwp#fnK@2o z+KjH=#92hEDU6%NWgMQaXqVgOv-Z#*8ZtKR@Gcdt1={e*&?q@(d z=eW2OVxoA{BFCE+EV1)f*2CEvx%GTvBis&Ctk7F;$ppugB*mK+Io`D3xHwNSQM}EO zqZhTE1{-l~7r=Yx(1T0AYwybF7{%i&)3J@>T^Svtd_2ck#3OY~xCap4m9a63>6j?q z+EQ-s(j&sNym#50lA$vcTB*mG#+|MDq z#GfF}+<8gcIppG8UygHq{si?LvYzV`@2oh7>@g)tapo?^Ib@ED^AzW!IM9BdSC>Zym!ae4m;57~R7@*FbGz2-QF>^&z=$}+(*m1k0OoJr-lIE67$ zybY*cI7@u|fEVuLik-|YPWWPJjH|zQJI`W&T$Ue z$0gWcdofX*j?Qs9+DBxho#~h;PDg91d_>mL)$>I*7m11DT(~|D_eJp3n2w3!%}JfH zu*7{W)G_y*%5%tm<6?1!zTQOLWrAZW&z|Qvd+z=YYNGo=ehxX*#NrHnjx+RZo$1&{ z@jj={qgdj8DcYlZndLcTygi_;);(6Vb0#<@J%_A5HQpZJxZD>-Da1ta-b9Y~CRpO0 zKKd*7ugi1DcsE1)MEBeE9CG35xAt(wdlNa{o8Y)OPi$NJM7*1!eWK3?(6_U_m?+*# z(b2%?I2bdSj)~%}6detGzJw9LXGMMvIXv^??HZle+B&X{YD~vA)KN^b9KZR@gpD{y z>In!CIEO4Mvd&;XN@O+y*y$DrSD)_U9x`O zPce3vD%P^m;D337OmKOJ@^bAaJiD+3NVcPFn?`(Z-W%n)a6FndDl~vnOlqia|H$RzI$wz5} z`zyCs+=QVv?EMc>F9}aOc5=NGY{#`?BlCQ)mY1~cpH_&{2B(S%PoeFMB28|?pL6S; zdBsNNgx9Oq9qR6`eKV5)h`GgJHRFh&JOO{95w3YHiXZ-V3+2L8(SEEPm<^4?_Sdpg4Ra6?AF}%&ZSEH z_W50#gL-@pk%3^X6OY>U3)l$OYn@fXQd>>ch!dwZH$D3$P1T?crZsO_$x;|%B0^Ke zT65>_*8KWWpF?QQLtM2^CPImj4J8^gcWU?Rn+y&2KFF9HJ#vOoS4t zrfRKz)0_KT@T{f;;cJgV?Xw!%QWLeTSnD6FPj6na`}5rsEvaH6lt?vI$oUHIKdtdX z>);2T>Dq>*2tTz>)`E=#|M>5&2?L@W);gI8B~nclYDtejKdJGeMBn-DQ(fE0%t)=1 zweURV4z)aWSrauMK~n2vB9urqRcKM){P1y232NV>2rJym6J#QkNYw`V+Wt>is*sZV z?+f9mwwI}5E%b>@M?{yYVj`4CHC5l-acY^{r*E42`DG<3t97#0x4*heS<)?~+hwYl z2qjWY)res`mo?+TGj{&`dZnpit>+f+T-M4jmMSJfiBwZH=l@M9ThYW{PWk*csHtMD zRiB$uw%{+8DkegSR8uwXoyld7^M^I2e13nWsba0ShEFMbyDye1CPIl6slqc?8%UM+ z&TlTA+f@Pr>ob(8VlC7$_c*>-s+b5RQcV@wfqO`3AKm&_WvW;U?a{rnFP17MLWxvU#U3mA zl5Ty`GF7aFzRNw?FP17MLWv~$qw_(rGgC$yoSOTx{M)Vw!;A61BP>7Jyyku#&+9h= z&BuXXd;fjk7HL-=ITNgfFqc9IgLwYhA&vX=TZh5BuM(j|m|!i0gI0~gApY^uIgM5H zWPFfRiBN7#uol8W%f=OZHXyc__g>G?6aGO`C8Fn160C)A(5g`wHgJQ7-OUjsRU*`B zCRhvMpjD$Vh~He-C%;zTxCoLe5o$gYtc7sU5Vc0nOsj>MH`UT%0sS=@AVuH004q7z|gIId+sS=^@VuH004q7z| zgSc+SjC|ZjxXmL-ss#6mfnY6!gI0~gu(7M$H?Z!fc(W=uwCu^2L{ii%wc%P-0=3d+$gtrA zYatx8tcI3EQnU}X;aXJ!t;=Stu;B!2Asn=V=zLL-zJnU|cAgL1QwQU{^8+zB$8pBi8LO5ty z4_*?<$oWI^4F}*hk07ZM7;$WN4;xOf7Q#WxMxv5Pig8eFxK@?GC~B(-u;B!2Asn=9 zL@kM=7|+#)YgGw&12(&d4JTL&;h<%nK}jTqU!pc#t4hGDu{8(SaDufE4q7~?79uJ9 zE4ATTRRZ3bt#QDH6Rd@B&@xZ0B$C2UR2!~UC3t>bdX`$Rmc@=^G^YQwc) z!^5a=@O^D11o?1+wGa+k6(afV9s5mqR&NXmk}45aLSVxQ)FG z^<>y^g0&D1TD$@c8yoL+aC+QFxXmL-ssyi7Cz%thg>cZSQ5ZJFlS##=RvWHWCBj<2 z0Pua3U@e4$7Vi$ghWK!)_|$5{wW>te5r7RRSPS8x#d`>_afRG`G5TcO<`E=Sf_EB{ z%n8;)IB3-<3>%U#sZ-`tyH;0(?dh1N5v=85-}ymICE@wdxq(Zn1WLiy8DXQyM+a*m z9JH)NO2VHwd6NJdXxzAsn>qnU{puE4ATTRRZ7{y`N03wrj5xMN3L8$a7Q#WxMxv7NaZt<3wW>mi6#bLs=Dnyb_yrFT_ zEk`0BZN3UDscKtv#?c#<%2`nh;WT2xyu_a1^OqhyZqqN_-Pqxp@1vHK z@mB4H)nPH=bG66@2-iYP9@a$p3{q_bNlb**;TabV9)JD*E9RG+x{ne;5);QR+PQhx z2RPgE!Mm5=`W0EHeqL|X30g>%(@0ey?l|k*vD132o$vXNjoYb>Ac=`3+w9z&-Us=3c&j;M&kyU=6j!aySDj%a5U?_8yJ7js zXOCAKK@t;ATdI7W8gT_lOoSC^VR&UbB3uhGdDv`x_pTlD)7KfKsS1)>sRiPD^QJV{ zd~=E9{FS9&8uy~CY(Hcx+ocvt*Vn0I0#?2|bl3diUVYU@h$|+Xwp97bHsT7BmJ!~JC?&ALM0g%I-+ICLK6@T5ul!!4He3rth&Mc&B)Ra~)y93}rbF_L*IcOi@VJ;j zxXUMq*@TvEvFfZ(aF@EC-qI%D)Tv!t;O@!g&70IWh_(T}4e)8?IF)!j6CdUU$_76G1CHC(C)!qSS_K!3Kws zb9h91r7pe`97evqs&q;~oT4^8Gtawe!6LbQ!Kc}dtPLXr;?O2J27W&|~ zd+V9IvP3LZMNgfw7Q$X`T!xma?n>7Q&|Zc~a?(+A#*PRp^g424T^(8&6aV;zp1ISp zt6bC1ezN1R{D3pYYpPre1nmM)80qqT1Eklrs)Uz_<=hF@LO5uJGPIl*BWFwLIsrPA z^r0smGZucyHMZ8L{tj0Ap#`sI?8=sNcpv3XLk8bxuMeha&Rq*c&@vyTB$C3D(Nwt> z69{`5CP}Zn_qa)NI^k4XNmVZuYpl?M_j1D2{y zf6}+Uf7{8LbJs$=L90e#VSKKpsdB9<5%xluU@e4$R*k|)m+!Hl++C|m_>+Zxv?~)4 zL915Q1tpUA&gZuXt?xudzd9kEPxASlX~6j1wEsx^T|!r4JW&XkbIU zzEtLe>XEq?h@e%YFbLoIL8@GB@u zJ>lv5&QBZG+u^H2>&I{Iu0A|c>m_2TdVPI? zVgg|=Lpe)&;7a2zmwlX3wvW?Bdn|1O`%W)>^;%ol1wzin(@$mYtf_J>#2d7#M6sKp zsdB9<;U!`@cY?JL4q7z|BUP6k*PhnCf3DhaEhZ55GK_Mrb2Z<)ffn-Qs|LO!WT~3J z%PHvw{eOX-pHd6qpjD$VQYHVB8iJo7>52&RS|?b`!^Nqv@;Pb5S&=H8JM$Cdus^4$ zCN{jrVTT7cP(HqDfHEw1m%MZvtc7sUvgc6}m~&zeQf;_amGDxxd^o{c2nQ{zS7BeR zsEKOBwU|ISJWc9-l<<8gXrWCM>q*+W$~~{FXi=JT*8+joWm?t-OCl*+ZAa_`Ysu4xK@?$60zsu1ZyE2w5)e6 zcl(l}r*0`-CqR1{Mt+Ir93y9OnqU0~><@v!$Z6};NLMk|YR+8(Ev7?Q03Xkla0v*E zOSW!oW;u@7xwOC4LS8SWRDj%f|iYsCBgfHrB;>jx?s$5`o_pcH#<;OiS0bKm@G{!MlB>R+R`l ze|Unl+ZPc*%RJ7oqgZ&anh)244Gw$lv9j`=IOGsMHFkJFco={860zsu1ZyE2v?>Jl zqcEFjDP1Q(dl{Cg(zhTW@jWPn!yXjxN1?2WnXo<&*V0tU-1*o(`c5^`G~3oxbw^-M z6?PPhnU~tY3=8qXM$obuSxNYOP;IzYl?Z!<$cMkj!deIiEt^^Td)v{jWvNvqd^gjc zAnjU4M9{LibI6D9rJ^>t7HoK!Ymcn&ZoK8VO=N!l?Y5J(tXvBO=4bvqLLEc8irJl( zl}ngF*#EBikpH-s;co-2C~xt582hK&$v^Cv+PnItUy{XI!wao02(rO(1tPu)+|s3~ z%J`cG{0)WfQx$8iQltuZ4zO*qAwiZf;o&0Zo{x;v%QvORF+sg3-Hf&P{`MBaZOHGC zDkk`bc~R0IE2~&CKyz-lZ?b}`V@eTYHLNKo-LBa%oBf!$+Hdw-6E8zUa zsbVeuhC&O`WvZCquRO%5VhMk5qQyq$`Cu*n3PuajWvZCqIvuBK(U*ovJNruWmA`$t z^TNN6)-c}M`_HCZX!r+5eOZ3PfTxPJj{N3k-4lRPplriVLwrTfH^=s%7i_TBrmu}q z8`Tsl0Z46ds+jo7W23sxhmhNe#Cw@lx(K5M+mF{ooY(+?9>>GzyZD5M5LuBb`OR8v zuR;JqK7?RNAf)}PZTljEXv&WfQrlHx-{1&@)M@>@eg6aMPH@hp?y45PMAAaIjf8DT z8&tx+#}Vv_vi?}AVyyv66g`86%f4M0FI|o+5YopeVc*Rl8xr)VmQ0jBShegcIvf)b z8EZ+KP{O{6(+weTkT4;wOKJPs4cSOIz0!-SmVJ*S#AN#ZJXlM5Z6)mM8r=|4s+j1u zmL!~B=~vW-eMP4m8yRa!-=&0o6Q>&@N);1a$CBjP&n~HNwA$!&i|q&N_XqEvvL3$t z-H98a5SH&3cm8zf7_rx{Y3~o;v&Vq?h%vSF#-EMQ@Ag4UzMkEQo1YMtuUvP2>G6@B zXMM|wKkV|W3EA|~Y42-C>392F3pZvVEZ?lI5S^1woiP9MTDsrgYbnRY#B&RVcmC+R zZsR|HSiOGTaii1On~sQxKlB*g`9$5ca8H$|Hc8grZG3&@k7{Y-rC}*+J@D7jozq5j zMU30xp!%LtR;!$~d6aV|xZINDkwf;cpRwoYbofo1r>ynj{#$o`ytdhhiC=#C^!mde z*3!rB9;#nnM0)WIn24Xpq-C?~9lMQA7hgX(Wl85%_+n+h=8Jo*(~0}95EGuPd?)Ll z*Vd;>&PV-yP|8}gbCcY1sn3!d5LBGjve*A z@2aKWyJY>8wT^4sMr};LXYJ0InA7*E`t+4Yr_ZjpR>~44a3dIO$d{remI>K2^@HxN zrGrlDld{&T>(|xBMf${Pdyk6=+yaKb%a@`hmOs6^T6)7j zYw5xVU#_#(e|KzC8?)YBqcbMPKiW6#(Q|bA)UbQ&EMWrol)=WS$Dn6DrcZ7Dl~YHj z7yPZ3a?9ZswZT=xI`==&TaA=aGrz~OOs6`kV z+{Vz$4r;ve*IN4IW1H)DEj?b`JBTn^a7@hm+Qp5%)*PKqdu4FS5+>^3!FM9910^#@0&j?eaY|p-B)J`6IcIs1GVw) zx6xl+koV0G-e7ck!&>+1S9?5O-2I6#dV`oa`=Ryo7n0HGT{qlTX9*MI4l^4gR)meG z-W-^pCZ#*z((Cl=Lmn^g6h&CRF5ejwm;P>0{>pu|beqS1UuOvu3-^MT*{`|IJLs>r z9yT;zRZ{g}ue0=fMIJBiWkvXcZC31ziA9Z}`AQGe(#yu2QD+Ggy?@kSZPf2xNq+Y{ zdRTter?vEr`EC0BD32Gnw<0X}$acoWaf^rL8$Vb}dw;E7X9*K0KEJ-&*y5aC@_Wd? zM&?UCsHMMK`GpBAVPe_oM*L!24>#bTktbTTbB~0u*%i71Bd9Mla)qRHMx5}t??d zwe+1oUEg2{6Tdpk+WD`pctvdN+J8`f(!;fMhdZuou$K1>as%pun3#8Y-+bTKYiY-z z`x-1^VzouqC+>Coi(+Hs$9?l{rK}D-;9mV&0cs!O^&U=cR$UMiZ@;@*zLK=4^S5}p z!4f8BuWEh!aW5_w8_zzxYW~lfj0Rs@)?h8~spUr61u-%4xL)}KQWKNA-)pdhi9haR zHul*XHjZES!$u#;`Q|6~%vsAv9J%RsK}@Xts}CBlKUqtMF6^1Jgo%&W_?q@5V|zU- zHa@OB)i_%GlH-T1m9v(QoN_nrf|%H7{*#R@p01_u9ko`@5+?rhvW?ECk9t~cOkOar z@x*^>>3Spj=d9)4fZQp(ASRaWGOsc7nOb_&f&FuqF!9>U=4D)Q{}W=P?@gC9?tHnH z?s?MSoFz=Gw(?NT`HfF35@O+yOB+ihA6p$hIA<;QmgIKh1u=2cphFsGOFl-Ow0X`F zCKh}J>t+3#%a49cY}~okL5;rRmppm?<~eJ**C+QOFNldH*H3DkB%|{_-yD{+go!t{ zF&jVG4>nfn?B96nC7Ip5G(2Z5_g>{5=>;)yz_k924WF;2&%8A}X9*K4pMllEe$C(R z_o&#o^dGl$pndH2%TYN?n7DTZtTXm&ezg2wLgd^2s^iQ9N2lvvGD^Q|i8hFoxZf`K zQ!j{#(TjHP_?C>$N1tEIS;E8_kD85FpN0+j$*z5&mX6zaw0<84HW07RKM-yq5+_)~ z#7UcBHL_pRY)I+Wy>xx9kEgcQ?N~|e*TggMRSM)UNu+e^Ub?LH;fYwMv=WVp@80mM z3A?;lOXq#Lre6l~vSNZ$m?R^zg;k|{*<)ZPlor;S;B;`WuPaLuQl&FuYQ2^K37QZ z!&+Zncbw(}Jx)yQz5b>3G0SRc?SR2KOPKIA8T2bjve(>8>R0`{mR@q^;GDJI`(SIe zfj%lGcHZlt`lZsteR|F2IZK$}8k!_;t$0X%H+kmg9K3nXTHC&2br*e9Oi1rj_uhvk zOmLl6PbPIw2Dgf%?!rSlYn|;iAMkf@6FB}3uSUMrc<8XyJsH-z;wY>-_G{L~dx?nw zV}_;&yjx5EyvAAjoj8w+3EVJ_zr#mKlJE2znr{49E&XiT*$vkE%SDsa2K=3vSo_m~ z>B6^VMt}8n`VBXaiwR%-fG?3GdoLWAZojCOF1hpi25ZecVWQfAzY`NXz0fz^Ts*P^ z+wRpb%fSZXV!~Hm;G-nTPyf_6J?HUSdc?T<8m#r+P+P@;zY`OWuC!YE2l2=Tp14fE zEC(BiiwSPcG84XIwe+zuqtiDZU)o@;S?^gphrbgOr?vM=7m6qI^LyUY?+L>OdIl!A z{maRkBl@N_nL+;UlY8sDp0wY7R#up2dRf`p($t0Pr%y=T-RFH3lB zil2xlv&S<1S~JQDaWR4K1>oKCjH%yWzW#!|C3v1~G>-3qxTIXJ7HZaeOiT}u4u$QbxUHI^T4wf+C zwlOyphhb?gy;SBO8^5t>2Wy?*ZZ~0#z;S4?lk&=7L=i0^o-wGet8H3--pECD|}Q2zy^15`?j@%Hc1||F7l) z@p{@@cVa6<7UF%i&B9knoF0a~3L*A9C6IUuod8{vf7Mi-wb|t6m0y3V4aEO@V5jE) zTOIE~&g|UzIIfwUOzif%-HTdDHk1gGnVnniGQF(rEjBVymw&O=36D-M+e!-|0Dj_B zG4Y!bdlWUMNEOsG5L`>F_tA4}+BU8IQ$CbnE$**Ek5RP|B;FHQ?}OI-*vU^w9qZbL z_fesLY#|gyK7s@`0)f{2&t8wmHiVkl8Omg4=c)g1kFpK6lx~LPfnY6!y@zWd1i(-H zJeYuO>rtt!l;E_Z=8qcnsHELm+71UGTqRKZrY(F-wNQ^=dd-LsQ?+zi3vI>X<#Maq z2yxBqj0sO+l|awn@uFYpHdUG-r3iVJ9%ajJ`KU=CsaQ(Doa_yXEZpbk`OA3PIBIv;vN3(pE*mI;Jy1K z`TiqE)nQ|D^LRPiH?xguds`UWx6{AddAKUUS~mLNTrd;;-l?VcU-U>vK6q4$lh*z= zG3=dt=q}BJCq1fX-NT#3gM}FMvqw4>2*FwidryRz0E+h#Mh;qk(T z_rWNuKx`!^sa`zw{Koh>8>@{Vi3z`pAW1eA;_lPVZ>%Q-$BXcJx9!}VKk_ksf`NEh za^CsZ>l)9lVei)lNlf_d1#)Uni19+aEd(E~ z2$GlxHzE8`h&QgD-#F$}+^ib35cUxVWfh1MWKHzn!|!h#KdG~~=U2S;0 zu;HU6@)3xC%DJeMPk67f&SkySMv%mW-ykCGLx?qxd9QK!FMFvCj~6y}|K^nDokQ(S z|3JJWPjK}!dge(vD)x>VZ*&llvN=1ly#+lY_(3_|E`DC zMv%mW-}{mz2MMvO5L@2;u-fo=VdMU@CpQn5yIx=;5KE=3Mvd*CuXy)7wGkvS5$==e zA!W5_O#l2LAvj)ye>-k+^UXKx#+yJ4mM6IS3LEAdo@Vbm2T4r$ojgf0T8OF1hWTI4 zz}-7R3t{*Ak&i&s->juWb{&!*vH0hjk06N&zqv=wmk9Bp5Klh)bG6~|!p1$zcWPdp z*xT!YxJ~v3e|-Gr`Q)65{Mh!}5banxr;7UfB5L-kq9z4yfKCb)P)*PnHeOZ*2Bg8zHWk z@cW%)k6wsJh4|_n{nduY3mXs3*{S)rZS_kM<&98D@_RXx(I$JG%|UbZ9vjch9Tk8>~D%x+|h(ZFsyO zuKMLp&D{>wuWr!oVrxome?)dg#WzV{J;c>Y*u7yyh{qJPJbdHD&uJU<+w5dzTT*qK zr0U6)rtACXL1MHM;dZgBg}7CS5w+ z=-=RY5k?)m{)NR_R)P47)b>9~`xw5_c4{L?Vgl{P-gkdQh{L3PO#Wz++VFT`!`H8n zS|X&?{y?|E1z0Wm^ zf37xyBqq?0+1u+|Nq=>(5H~&hbG6~|!Up;-tau_Hffysb=!epa_CC#S_zRMlK!0p+ zdT%Vm3?VwtxJGSwys&}3y-%MP)kYvzlJ>Elj2|!FHBa*qBr$<;#ok{3k392bGJd># z_dK=X@xlhisEg#jPvj#IhsdaQi;QY3-u1BB2$Gn<_-OCc?=8f>LQJ^(VYT7$!Uo1f ztO&zKAa;?_c|RG?ADgvUZ3Ib7V4Sy`pf3^PUsKl1zj4T7wc+u?2FCU!pO}q643x5( zEPlxg8-J)af+Qy3uh`q<&r4bTPW+PhhkmFwJYLvn+VU-rt8 z!yt(X_+9pf`~yOa65=!=I9`O|Yhf)MF$Lm5@%oMxKk=m@tE-J5i3#|R_U3(4h?RvH zvhnI_!{dbw_>8MwW^Wk>V$WrALT|+Vjf*GQ9i~AN6YzuW27zORSXKPpuL!~MBJ8W@ z$VVWKm!4sn`0WRrw!Y>gNMZv1yuE*Zx)4jnZ$Io*d}SL1m;)v zrvGRma+yc<5rX4I*jMXOR)N@BW_M#{KA4Z+Ol<^7O!%AZNwSR)8=uhLxLOE~7h%i| z*V$&7mQ^4UX&+n3y!L?ehN+Dpi3xv`U2cJrvReCzw#HpTaJ&d(?!4)FW+M>K%WV60 znV+xty%B07NMgd@eV21GGNYd>^YfQxj!+vOFKqZe1SGO;CKd<0^EJi)*3vsV2V-?e8R zBry@*yf4;d>ar%IHauR~*kaS&%6b)uVojzlYciUTAc+aT55mgo%0+GU>x#PT@xsPI zx9(Q9k3dY97Pa>A?RB&$tykrp_AwFenHVR;qbIf3cPrYU#|s<2H-(r2F!q1W8PUJ3x91@yu!G*GCq8iN^~Y$80yP?5_fGyVR>K|9)Nl4@I9CBr)MPdX#H2 zby<_q=i%|f#$WE9R`$DrI9Gb=b$>Cx{>quww+Bf~gquS46r%4n^XnHCeY?jC8@>~V zvI@k`GUEJwpr<}@O#gJdf0|DnBr)N4FD1!xA@&ktwh$aI!r#7gdg-?ZqF9qj zWlcu&5hO7Y?sQ7znV;Qb!}RTwY;NH3!iMh&qpSk)p3HGJ-EBy^gUncBNlf_dSxGWh z<~X%ohosxetcc@9cucQ7%6U{ER+3XloyTpSK7O9f)q*4@!mV7znoKHdGHS!)g$>`k zMQQ_azw`{p$!&=JJ11#Af+QyVF0>^1P>63#8kTNY%sD+?*yvf?qny_UqK}N6^Op@z z+l#q#ki>-Fg_a~=6JoXyFWzo*XO9;)MoizMoSz5cKQg2L^zu>Z$;BE#ki^8m@|L{C zLiG9NsPwR64Z!1t4d1m!Sq0)%S(%uAo~%_*v9*jKi3!|bXX_=ig;@FgTDr<-yX!iG z#|scH<9;0iZz)aX(jAlJtjC_5Bs_gVlv_!N!2ZqkKK2+wWT02 z8%`j9cJKLfLd+N9#!q+GwI!q-T3%Mz)%UV8;(00Em8EpgF4o9GTvk?2;5pe>T$dHZ z%wmm<_Wt#m`#M}8hDa^BR%*$C#adsG*fVzm_04XWUt5T)g?RgRTkFF!hZgdII_B$% zf%r^n`;O8+UR*G#jvNL_OrYJ^z4yhMOe$+Kb&eNd-%Iy4Xv96T!m?c2`Ox!i?KQ*| z6KMbTrP$Ae*!cL((@ToASC1Dq(B^%8HxQGg_jy5jpOs}r7}kR%CeXv#o%Ekc4|k{# zN6M-*$BXa@#W?`P6o}>Gi++8D4bvTpweTQ`3G~NyQ~p5dcMp?(chO0<7XJS!I}bQ3 zisX-vEO??sF`$B=fQO!_BniTMyah7?0-`4d)HNMu1qIWnC+4i@=@~#pPd#%2bl>bg z&xnek=!tkDW<)U&1i^p(s;B1tdS;);e?N!Yw^iS+?w+3R>YnKu%NI5<2HSpnnfO)i zkJDu~==-|c(=SVW0`rRd8tsol94W+|LU6uFV~(=t24y1NlgZ1TjJ8!-;uDx3UEKS4 zPbM#WGHS!}g$;XC0c}+#4wm`+Wtq?W{>43GDNB3;^Sq0ud$bU1g}5qy#$x%x2Ih8q zj#DPSmYTN_FXN~9Sy5Ty6Yy7Dbo`%$=qAKeAvj;8;rrP0s50@LT*3V(t;ntVyL;wU zmiPqxE*FhI-jm76o{WrgWi6!by$rNfnRr#azI((^d{Umo`4XRi|L7vue=EeC|9qKi z96!6Wd|?AVqdgxi6Bmp3I!XN9Mb+*ZWLe@9@Pl1!{ZT?J6MuJV{0!3ag$;YB1f?w# z!(}WPEq?npQ{1!EvcxCg&%21$@t#ah_GGlJEMM4w4{y(F%fwo_f~%f+FgGE7CR~>I z1lA=kp8o4X)I9xQZmZ_*nXu&x8(53jbLTR#k*tcg53k9+89&=DOMC+BD;M!U-jm76 zo{ZMR@`Vk1vkGlhCgMGrob1WyH7`qi0xL)t_Z-jYXP-SW_l6LhFVa{W+Is+H0wcp- z3k$i^;&%}`=U(L!dNPn6$n!4CbSvhTAnwqg0g2EX-ECLV6|KoP#x3BR2lzacbE-z3MIJa4>l znx^ON*tI?g#+AkoL1c=U2#sJZq@m>#2kfyxZOpv+_&iIHHsT-KA;QME(y-x(&J`1( z5v+wYw0xp?W|Ii@m_6~(JWG%^;=|oLtBp%LwU*y85gNf-NJGmfzTcyr+UWhpetDK4 zZN$)1w^tk2{~PhcVj?txwUCCEPmJx3U8o=!{O`VbmLP3J{_T!xW6JI=Wxug0vA6 z9zv9Yaix35HJ0Bo5gNf-NJGmfE}esKO$Nce+i#a+3DQQa9PVs9-?fq0h>6e$)j8$?;`p|OoT?T7Sho2iHSF;jqs#5POfDM(nd@h?QGm| z$QrScCRht;X!*ngleDeE{J}dGS%S0?&p+jC3|@lB6lsFBkcO5|e1EE5^YF=OR~A`< zv=OHbcl~P1TX8xwO|TZy(DI2XQ%=V92pj)WEV2Y?BbIe>_s7rUzY`m2g0+x_mQURN z{!waU@|HgoS%S0?vwFGveAt8~Vk1qk7Sho2iOZ&pQX9ARXce&pX(Ohe%S555z{A zU@fGf@%&uISL6Y@`X+;xuaRN_ESJ3uQMg2;P!gJvM^#GWw_uj8`l{+K9JLMI?-II@;-Y1wu!-v=#`Ai9R8tsM^4|%MzrG*nCr* z+aIT+wvM}M!wA+w8e_Xp$ZViCFn_QFX(JB3Y>?V;b5u-(Mz9vr(DDhH)zk*&L6#tG z#9p%z<3QF?I@iWTXas8^4K1IL*;#F1K4%HiMzmf~tu~y`5EG#htc5hRd_ueowE@3` zB}f}_{o{xxFiyQ1^-DmgCzI9!0pG_b#9LAu@UK{cv=Ms_9ildz4;K^ak)^djK+7k@ z>r)%>6Ip_^5l24kY&c&uCe*V`Yk`24Pl)%bHsJ5F1Zg8mZ#Wyyr;Z8rkkeWqpydyNY+2x$3) ztcuhIRz)m9+K7XSu3x#eT1@CVDy;KY6GiOmLP4!Uq5m8xm!EOgsyATS|Fh16SCS?8(3|#1Zg8CPIKdx+XILRU7x45 zKtRhUmTo>oZ5%H93@ky~h|;cZ+;w{yF%cTUT1Z37CpvYKd|8^sc&jX3td zZa#N=WHF)pS7|K}(DDh{Ra2h!+K64=bAE~2>x&89hf8aLfR;~u-{=9gahU8U zvIJ=(+&M_M&nW9!M}$VO7Sf1D;S)bK?j7Np%WrA38X3b8v%;M@FP}xnSc1RfWME8$ zMz9v0A0*<-yWN<0zzc^D?A3Fd0_GwyS&g(SRZ#txk?!wZ7OfL`OoT?T7Sho2iAVL@ zxUy&YV3!LxZyj? zax6jGh!eVPt2S!xT`j+3A~b@vkcO5|T%q5N#`S2KX9?0qtURrQ+Ia2hU*va8$n|gp zYatCSpIFmwu-@n0<$I-_eU{EAA8`D=Wm)|iF8o^pfL(3m7{QZLVZS6msW>mBjhJwFPqpFNO6s9I zEJm;v($Mk=Y26}hpmkY-v=LWa-&<|CYaSDNJ2}_VRV#$+z)Q0Q3 zF`@k`tpx&FJ|VqbZJ@Wa1Zg97E%s9z?jDQ@y+6`gAfV+Fa@Xp7j=Pp6NE^}nUwfzx zH+J}g1nh+bz8R2yzgjET?))XrWtGwE9V|z@5Mz9vr(DDhH4b%o^1C}6dMAPdAsSP(r#e~iuX)O@Y@(G#M)COiX zmLP4!t@8?M!_BoZq4Qu`3k0-$LS|>Rf!UcQNE`9bMb&D<`3y0k^Lbhe1hjlYybQGg zFM}mW8`0|7!D_?#J~5$wNm>g8w0uImCA9%>i6uxIF?_@jwc&iYm{9*Jtpx&FJ|SLT zd_B74Sc0?>e|yB)aK30vsGpeD0s$?b5bsrOzj}u z3D`is=FdUPC&;smSi*$)sDrx0Mw);PzP7WGCSU^+^TI8S z(Z><7rw^Yx;=Gu!HG^xbn84aV3A16V326C*w63jjB9<^=Yae&bV*)<464q9>DuR|z zNH0+v=vORZ!q#xE@5V%E1Z*H*Tirp+C#1Kl4e_ZXmM~#!QFjl<1bk{GU<3KuDivBj zA$M(Fu3{iQb;J@TY)$RPl9+%`tpsc!Ut4WM%O_|LAYuswIM!rktMdD2*UOr8v3YuI&|_(R)Gej>)amhUJwe%I8%VPwpNqXf-Ey$5U;!9+oc%I%`{B zCKmSU9sRc5l3bT}nkr#hAm~&p5nt}yE4t>`4fD5bv|RR0Y7$ABD4z^Ha?_sCf9E#N zzkAmjoH$JKMVd~vQoP@;r}mE8ztS>4>DC2Gm=*{+KT5==(|bm@w%R7&`VVzlDzly@ zaNfv8)1Er8S9H=#+vFFW`Mlb&d_mCpQH_A=h-3HZ9nERhIX~s~Ta_>^5Oh+Lh!aof z8GW$NPWdyxzeG!A*3(4!MCIs4y`!tH`BVPrKF6yK%NGQlqNKRIo1fDwS~aOh{@gR0 zs13_2O_Wa}Zgzf;=)rzH^D|y=uQn`S5Oj)?BKb}~rgt>=Z$0zR?tcg3EhSn=(|JWA zE}7Re8rOSw-0SjFjtblm;UWYx!CIDfCmCH%j-XN@VwRQ!T1eB$KrNLcW}V$LYI$bQ z{PW#5%vq`8bBP{NK3!KE^@?_A-aY@~B8@JVXu$@Z45XO3*T2;(>btIMey{H@RvVUA znkb)qyJmj(=!A=R$xrGzKF3-})2Th}S1uMW^+Y913k02yBjUfC$QSZvZJqCTN2H}P z>uI8V#_W+XJ);LMZAD&`C5R=H1vc`fdJ(`CI2N z*HW4FG*Ldy)$-(C(f%J)<&S+$qah~wf}oRVIx4vc!9U9g_saH5a!<@_nrAJf>C6`q zeUFu~r2WF&+}hUCf87X&ywXJZY}Tdidq?lKoSl0*(z9eqz98s4mX7T%Vsn!NdqpdL zo0i++;qBFi<&`GvtW*%(^jeSThx;GSb^2sGt%v0cg3e><92LirF3)O8m=*{+`9#EV zvwB9g*Y2IW`Czb1wkjDC?58@gL_9a{`2*~ zb4T=0!n8op86zSlogyB<%Toqk{I+` z_j&c{dy4-Y*GWrd*3*Q=ew7uy_&&$2ez17WFpXQA(g zhUJwe%JEZYiSM&v>p8_2o@%T0uzW#K>{tE1g&P^Zmyuyq=OxAFHZn+rFPtU?K~YwT z*r;2tsNJy}Mt^GR#w)X)Cd!dY&llh4#JP>5v+iD_W1{5?f}*TaWd1?o`|SI2%V^YX z3zRS|5ERdph}idu#P`ur)U2n8a!k+_;`_Yw(l*h6)1OxxmM;j3XG-5UxLtgojhc0q zFYDf_glU1G$e~2+FTT%i`|cEtTy}|;%B-h}a)i$|;`_WRUqpPm&+%%*@&!RrJ?Tp; zbI+D9<4x`n{p-voYQyqM6XnR5bI$7#E$P=Y`tkMlYQypcK~X*H%f$8K`?R{XXLND? z4)roj3k1c_BqH`?BJ+JLuZ9TsO%o5#zXp^hEC!)GF>$_6QGB0>wUDOxfI7CjuLQ-uPf>gywPAUs35(qq1n-FN zbK>-ei+g^uo!YQ`K~Q`^ouk~hglJY%!n8n8Bt9Zy-=`?PkCw`;rwNOaC-3_>-=`?P zkJ_+&K~N+zkQc94FHPEL}^vx!DRyf4HF5)qNL?2BxT z*QJR+OB{fABqqR?$F?eRUQF1x-R}7WHl_&ig%CZ*wJNgK$hNLj-FDod+9w{8=mc-e ziTG6u3PqMMVc!hwG#t-s9}@y+=&OWatO; zKHJZI(R0UhVdD`Ywh-dH$rFpL)w8p+anw&6RQtpw5-nm2i8Zmi#Q$Uo6ZU<@88cvG zun@n>SNOh>_@AtG+3k1>zHRBOA&shiVv)qjcuHzM?V0iyJ&_j^_65m~?eR>wwZzFl zY|gO~$CR};yBKf5w=F&WYNKkOm>}OyI7Z&x-Qb=Vk}rB9FD9=2VW9S_tGdF*y+Rx- z#E}xgm9^{}m$mOVs`iO75{2X#iAZw2#Fb?U6ZQ?z?&rcr4l{HYuPtI54^Xk+9&$l z-8jEmV#+)>enpWbOxSlu-<}H_mq{F%SB2O^;{3AK*^lP+dOY%MRkcsFkuM=gY|(sY ziC4@LChR-ID?8xX_5z8AgE(75g^1R#ti3Z)NPn(D(OxU-Ki#4!;_(Xpe zVywiEX06U0253EcoFmWfdbv_LqS5SGxlg6$BPFUeOPH{48IQReHVP7F2{C0J5`wkb zZQEaM9Pv`)YM;1z;`H2?67^)?#@i*|MMhps95cL+w$=3y!^S7)PtR>F*SuS!?IPBy zpNbO$ZA+8pH?H=Ht0an5JBficM52tdgbDjza*H{z@q|Rm`cPtE?RsFBh_#+uhZ6&B zOMMzQsrHF=^6i9MBr?}M5-pu2O#F0SFRjPrKfuORiTL%25dS&*PZ4X`mz1Z;GiaY! zy8V#ck1{fxE^*>n!i0U3`H-Jr<2(6U^DROQl5fSc)~Hw9_noVHHL3QAP~xfGBUf;z zZ+44V!i0UB{N+vXKEv!+x5*(I^yDSGMXYt}1vtIhw$y!elWL!sDskanlZbB}ByK-T zm^kW^?%GxzHiwNy9cBzH3DHdm*0OJtUo@^swNL!K;pl;N61i@kd`*BQOxRb>TegLb zFV>D4_^c3L3c*@O{HeRvzFBG#(g!r8z* z=o1^tw-bJlyLOD+=PY5O>bo7a9&Rl8`I-}pcMGwF5UjQD1~}30#uA@+MWTexlhNmL z8LwEv#GQ|KRU2+hyhI|AwviETjS#GrI}(uz+?ePSkDooghzNYes_i0{Fmam12EezM z-Pqpfg6YK-GEyHP<1TA`_DL7DfwA2uiW0RIzUX-}8?c0lC9`)>8*av#D1^k=&R-z} zYkjn+liI)>gX<759c#HBLqBvwnDJh?v0%d_zXU=oqP%56{-2o;$^Uei8Whx zRvXUunIrLo=L)fx5Uh2@fgQCT@O^yZsJk0S^>WRdoV23I5+?rqNhh`8e7GTZG>*O& zV(=L&imY{WQ)dG{oKMV`Zzp^qUf&_&C$fZz$9~veZ8%@_Mv3QKD?}^#-WF>ueG`#X z+Lqvp`o!%LWqO2oug%5ZWeF2!ws1C_Pd!Wsc*qY5!CLp-(_U@Br}l|6j2cnsdAm9fVkO-ozqneR!F(fitBlJW~kVA1#Dntw+DyTI+$evri0>sQ6#WYI{RjpRk{RcS{C*K43Scq}5 z&%j!9r#Ktfned6NKJ67@EP4FPcC{>F;`ZB|4e?0ChQ!8=5Z&)@U$?7et>ce#HlXDb zn+oxk5F@^5H;^SvnC+(Re=EqhAcXk05O;muZXj!!y*BgL6?{T=SfY2O=Iw5JXCOv=?$LVZz4zC6B_!UuA4RPKYM*9R=31asJW~s|!AHh0Hj| z%8c`~M2Tey6E=6<)f6_y3NccM7rMWhV=bGn=1%;j;1jYZ6UBQnIhHVCbMUK^a87s+ zd~HGI+Q%h6E^FDmd&JCD1)nI$mk{QN2k^Xn6@ev8m@jekVX!e&e1`uDG4hNRIo2}& z;r<&}6?~#re4k}<1^;zPRh}hGm`}86-7jK8_GF6jo=l##%ny3y-zy6~arF4b#UbL6 z9WPOoS;B<*c5j{q8*PNh3306utY!Y(^rKc5d_wkQit(OIo+V6}kNVf$VdF9JMMsD) zdfV=8@~mZk=|5VoEcnDNugxlA2Xd}NwPpzu=4&5RkMqvA3xPXoju5P6{`HUvD+)fb zw|qNcwRrlwNjz_sFkx$kkN*i9ZDnonkr2m7Ja5*rbwZ1SRup`q=& z5+-cz^Vw0b(Op(WKMFBL2-dRo&h;yo7kpy0tks^DmDlwWEuAGy*c$HpcCgX+iW7@( z2{BX%*0Ob5@s8yMpIEtSaB-fD3{&K*2rOa3)}jY)1sjdt9$b7%l4VjRzC5%?2+Niq5DYdvV;lSJ9F2(gY1#v+m;|$%l6A~&3$68 zm$u2lr+!6x2}_u;JwVrYAAfn9+-xCU5`wjC-w%D)C;I<5IQOXZ_Rd3g$+LtB+skzK z;Ma2o=Ux-ybh$rR%l0pE5BkJR*?XNWckQWi*Rq5O+mm%;$w60~n42NQQ9`hm?ZaX$ z@rjj-Zpz`iFR#kz!xAQJZ`qBBgJsuwp%AwV!CJPTj4{zCnq4$Kw?IbG`()f@2@|$^ z?#A|}7fjE6C&XwOcUjB!#WA+~#LCxZ=@pD*Hed-8w%70Gs1ZWo`QS%Fu$JxbV~+BP zq4I3vaGBMb_Gy!62^03r!p*gZ%Cm_&e_Phl?gHQY-&x-I(ui4_4u!ISFhU9#oN90)%R`k8(I|{62&yC>w z_(X4cg0)DzrRm~dv4jbG7Uq1obA(tZLQ%btJ1hx3UK<=Ndw;`JT#!IwFfFk#R1 zoG&^-p5475#Gl1aWG#D+2Vc}DY6fqfhxdA~c&{vB!k$e!pZbqN93@0Y`SJp5+4D&F z)IK45GWujx_GEG_VZxriy0yXd`049WLa>%SSH;@EC$^R+yomJQPgWBwVZxpwyERS= zA>J3_5FuF0o)2S<;}hG+lj<#HRn%VAQ7mD?o_V{qnykF?ZG>1L-=kwKd(MrunooQz zPuw?`)!p&39%Km<_UzuRIhzgGDc?+pk0(ydv6elr$C}e8@a6z!gSGNi1eP#i?;yCf z^K>EbZRCAqUCUbb-T~ImK5<_B2E-S#K4%FN_AZ6n1GroWtmu0S!CLnI1oi-YV!3mM~%Ogt)zo=JM_bc5%*?eFoOD_du|h;S<}+vk#0VV`aaDB}^Q5oO{0n zyDdSmPP8%Z4*IU$K-RkbHuvraw0vR%v4Jm||3mhDSi*$ac6&>$guwIK5#P0|Wi7LZ zy(OQ3wyZT@|D+pHfxG5TOy+VKFW28X^7dOB-YfF=X{9TW5kx>z4|vZRGpn(x$q% zfb>OYZIrc>n;uH3J%nlLS_kdWST*H-IUDkiu^{8GWQM80Src*{tm-71zU zWG(pc|G!c(!7Y+1)t&t{--Xhb;LpJZ`dH;su@<~c5bJ*hncx=jONH4P`QpBV$AL8N z$I7K*E%;g>*1xwi0dEjATEs6E<|s%oMqy4w8e>=GQn41i0nK;4M+PRiMYL4%A06RB zjN!-^V=ZO|q(%5UrD83k*MAgcf?LEdm6d(({yz?~R`_hn@=U8LO1)ka=Es$>pbQ3uLoI=nY6>wtc}oGm$mE~{I^8MX7Am{%ii@L7bCQw^Ff|je`vmf@Rx^32qU! zkq|a7;R<3m68GTm);we_n==}0-NXjhkO^)Pzf?Adqqmzk@VmVvWG$Ot8|;YF(jl0P>j_AKj<#!FG@%O3m-=XUMfnY79O)EtJkazUu8NzK%>wEP4M)C@(`@iGT z&o#wc*SjTuhaP?$vD+P+sN{zK{!%#Q!i_b3)W>VYR*DFXU@fz8<5E1SOc99ogxw&EMbCM1SP_c zE7hu5v+As)wW6;-gH@(NNjManBW#ciSXk})qArZns3#+OAE8V z>6WB9C#P-%Yu)wQj!In9{M+>`6%*VdY9lGtKX%V)zG$DX_w27}v|FZBMue<&^zOU= zKTE{~w}@Y=)(iL1e9@PlxOz`bqmNZC6>Hu1Ye9+izk*C~i}*fH)JiN*MAgcf?LEdm6iRQKQykkR(P%z-ZxK;iD7&XhOA}z z4sV4M94SH!x%-?YOjrrs?BmL%L}=G}%iA}swjQ(h97K^z*+@#oT6PWo@DttzPZ154 ziV1EJS0<~u-4i3v-Kg5`uI+yOL)J!8D%P_8y5)fl5Dk`!32qU;R5pI}pVYM4MxrKz zn*L6ySj+D1=HG0TB^oRh6Wk(Ns?e^vjkPG1joQE6@_VIXEgJ_LBpNIg6Wk(xscc@t z6|@}wi9+jQqn!-G_RoOreFh`Jv?#rwp51mcw~N!GgtvTsV&^!{UA;uDHR!RgPPruE2IKkA(_wWnEl zTiGY3O1!=UMx5R6>5FQWKzWfb6K;p4N6EcUeZ}g3lW*l>F8&IeY`_}iE>ypycb}oSbG-_xpCvkdM*{Mh!%yG5pM5Q#$7#^8WW)rtc5hRe4^9c ztx&39>r>CybE&XlT1K>5jnkk#No1kbbpOEWO z{9<9ZnYbP-VZ!Zf+clT#A#r_z5Z5EEW!J_hq?dpQ&`Vg7zJpN@cP@3eQ|~Sw-}jqY zlhE7gKIi+JwZ1>b_lbm5l8yLD~qnzi;zLOoT?T7Sfm* zd_v}^2-gF16ibjcqUmFJ&%x$FnWLf>e_Z_><|x*(H}EkJ`h?82QM2nu_rqMv5+>YJ z4x7&>x9W&eh40E-o7O_9FjM=4cmP@tcmOOx+6ecg!Tge#P@f^K1p*$2Pl)fMHsJfP z1ZgAAnu(LMJxb2M65l6kI;c}$_&%&Z!6Y~?rhtpEQ zhf8arRPYmh;^0*ywRJ~|X9edWVHbg zIjseI@O^#afObb}J-W7Bqo*^`il${mI2mv9n5Tc7d}CzT^z&zaEq*)lGA%s2K^m(G zpODpr+Q4dpB}}*{Kel2S|J-TOch4Q)e-Bv|*%>UXgz&5gHn85gtxZQQm0PR*W!R)> z+Rj5}-65+xmN4O-Q`rjg#OdRsJ=-+dM3nfI_ zdg!#KIL|428Bx5K;hoO3o`|<&eS&&ohJXzaOzU2T_7d64@J?si9gMepePWc{A8SrH zyI&vqo)=4)u#v&NhrIH>@oMAWZC>?GXWHEbEuXmSW;gomdD&t;Yl*fpEz|>P8SpmIg$>&U&HGli%%}X9*KF&%5&m=U=vmUh|%Ex+bj!d(iTU>zg~jWSo4rk0s^} zfG~f>otub>&JlJL)JLnSxZ~*;B1#q$gV+g-_HhM*C5XlCTtz$PM&wPYjN6glC!VNpVYmKkx~!N%k~_QukDJsw!-em zZ7sU=lid&RtflRjpsjpDu1BuTm~Jz1Jy^no?fbZEj$ItoTy_n-vzE}ZYvU8rOO!w_ zVF?qqf93jaOaw+)zP5h_EuZ+{8r>rcW=U^nE!!`{9pw|)hkI^|fqF-=gbCZ9b9ZeZ zdwnBX=w6?9a?*AOai9Byj0~u`jtnee!gf+Y%h%Y~=x%NJu8dbKVZ!!z-6$&KRqp$} zPV4uMj908>d#@N1ed5wJRr!(o-8}P?L&xf=M)Wx3#f0rsyV;;)vn9D%lke>RUm15< z3;E*Ofc6R4KrdMw+h8rEk*`n49Hj*2D3%~?gzdw-c@X>UW7@95UWRva(st-E5Bdb2 z8w`6}_cHP+IQBkcK(^RLAB$$xNJ=f3cLSj(R2z`ybdJdcX^GV&~8!k$w(KM~KPaLr{e zBcIknsi5T(c&-LQo~z|qV$Zxl*poPSMiI}|F5C3`zVcj63Cq`>vBBT<33}R>Piuij zzCKYa&md8%+ncY^Q;oPkOv?y+2I)NgcDo#%pI&?Z%su8U^Uguq^GIDA#Lqb$A*%`4 z2(X&qyqG{5wm}DhtZ|g+D{CCqvZuOONBP9^`^M)l8ZfWli?YUH!k+MA9mTZw6xllm z2`$tdHhcolw$WA}cf3wdHKM$xWrRK3b}L9c+wRk~_e|ND@XkTnb8f6TeS)62=UBpo zJ);LL(H0i!Q~dxdVb-$u0I=Hj3A{OAt8JDrVQ&<;JpftJYt6BuPivv((DDh{HBbWk z3@ky~2zQdi_A=50YauNyucsQ(>PYMEhxQWeesEq$8)3HHsYcl^(NY<~T1Z37CuA2# zZD1FNB~}j*)*>$Qmy|93!P{r$2;+FSx*VUprts#_bEO`=q}P{!K-5UtO;S@|CWcTt zO}O{8k(a%*jr0)d9eDdYwh;;?|I~&^@MgCWrj;UMGpNMeo{Bt2^z4pnUnN@pXa$$zOYly9QgxwcSiw zphjx$&cXT-=3_#xcM!-|heflkeNR}^_~xj^R6zVpr|;?z1oF*V)r!ABo1`sD38KX+ z5gH+1pjEAl?_6Fm8flI>;uG?HUnTGrSBr#!sHgI6Vg0-N?yE3nBV;X$Yk_#k6^Vc) zOvpEq)rR}-E8Ac#i)(?n&KUxbwk}JU$h5B2Bj8rF_CbqUySZ=ua;aF$BCH^qZbibo zf=tL4kG1CR>%eS-wJgF4A}CiR0+ukrS5_?06=W@ou!3m183K^DE=!o;zMwV|!mjxf zV^$PQ3z1tar|GMg7d#^ASI{yWh$mB-U-ry`N`66D2%^*2Ny@@xd$t1Vs%V&n1{qeK{A!H~5q`UxW4RwTUfiV68byY^l8 zm1D7z*kCP-eu7B16^Vc)Ovsn!)rR}ZvDPH+iSpfOm0(=5=vf%A|M)R77Gh^ZH7o}u!IR4!Ly?eYgsHTj6NA6vB451c;pjh z{c$yyFJ@~EF(O-xHH^F5>Q3GI6>C|Acf*pE0 zm)0*8Ygs%x%&!?DDHThY;QINc;=9vY6nCn%dNMMERu9&)>w%FWLnNhQ2@}?Pk`c~q zgsf#f1|wXCNNli#3A+n2BXwYNZS9;D1sgvOt65pFF$NK#BzygF*SyuUgY^o2_Q!zUx56!oB%)2r7s&PR$;c2|Ph>5N35gik8A1%X zQn7>y`QEzP$c}KVWicT!!exlW21}UWKCLxR>cLtT6A~kJhDdC%gbBV2)JBHDRkNOt zt7$zSal?F~>2-s&OFoiJoaPO{d(DiZNzGZy=05no86qha zOPH`36~4A8OVe6!XDyp+5!*0B08-zFB~0)r0TX2#tYxz$VjE@%vk|g{3GO{=!w88r zIzW3L`jYv+7^8S5@`#YNY(0olRV2JwjR{`K_%>L}R;*~<$^=W8;1!Z@SYMQ+?dD`CTx^IL_x1q0nghU$BpNCj|f?dqsvt$Si;0V z7gcMEYp|^)DVS-y-^vDKlakRe-0Z8vCmN3EBNo^zqk2vOW zAaZ18^a;%dYndN}tCk@W8!TbM{H~;5CBzBcw$*kY_GqJOi%*X4W*CvoA6BZ6wJh2= zzUGl35*sXG!b*r4PL%b>weAy-ZKI`vH+fC`)dfWO`^!l8_b!XDdoDkrwH0geHv%#Q zAiaVtVWL-0XG0WI`*5tq-#W+;i4B%8VYa`$E@dNREwhJseHkLL!4f8{4KcTia=EQS z*0OfPH!U(mVuK}2RQ4In%fQ-zze8X-;mUfY3RuhX#R?)rWJ-m+n6MI}$Eb~juvH4$ z#3DSR=6`#{tzNR%gS9w*Vnrfg2@`L;;cQ4@QrCmEI9_9Af+b9t?aciVvXh zuOzctX!T$%%NLP{Gely8B}`ap;i+k<5*w^#^+4p|43XGi2@}@pS>K1XtfdioI71{h zSi*$$9*oCcJwn#99)rlk86vU45+YTSj)U+j3pT& zX)Bg6!DD3*{3P-EVM%_==zyo8cv@V3GV0Pp;=kj0bWDV0!aYm%pPx2B#5T&3G*Nz* zu61|MYeUrI_~X{qV|+z48l(}?0d0u1cRf%Sjag$_i0^_l;#y<~C@aAdCeRl^d;LlY z)5 z5+<fcBozYpGZZv3H<_m^B$9vB451@Qf9- z_gqbFu-4am_E#-LxycZT4VEy0XRNT{Jy%m3tc3_Y&_Z;e43XGi2@`nw3fgHV|&0_h_R$L zWNfcYu!ISC&Y&}MRKQw@=>#pzQI!dnFaggQbY`v%SPN07poO`%GQkoi;5mcN_zVGS zA@UQn;4@SvSi%H6XVBhyQ2Q=xA<`7I;K@`bSi%JMOF(-oVYR_ph=m0$M9iv8u!ITh zIf3@}OVkEyAwrkdqv-~^f7LL-c`<>#GZ1oP`*Ur;T8Ij3T9pZwFk!a6{Swqe_Djn7 znmt5Z%n(r4nzMunYeR3pL~XE^wHqQ;W{AWFOPH{}kgV>E2wBUnKO$gOBm$N&VSPGT zr6x95%la{5W>zEumM~%WX0qB&Y_OKykBBu|kqB7AgpD!Du0djhwQO8L4AY84z!D~G ztn{8J>NRIA8y^w%v?39(gb5q-lP9A|Jy^@ec|?n?NCYfl!sf2z32$P9wQRmZ6xxbJ zz!D~G4#t}blD+;oFPyb(-bLKnibTK?Cd`*eo~4QvXM?rOe?Yw7ibTK?Cd?=D-mKAU z&RXUNA&ziGB47y<=G!H22qpDkE%WCP54j?d_=(7i3G-2tH=Pn2tYv;FA~shf5`PzY zF=4)TVk5Nc!CK~D!$yX%Qt5k^m;sO%6SihZ+A6WZTDDF=TV;sE21}T*wNG-*6C12$ z>m6M443XGi2@|%4OZskNgSBkkhQ6C25*sXG!q%e6J($>FEn9!$9?TGl4VExrYwBby zNo=r|t)nrPWQfEDOPH{|fn-cfY_OK?2VhLh5QzLT0TNvX)(ctkp6^VxvdjmeS_ti}0OBm$N&(e2dHYOhQ2%Q|Pk>cLtcUVe~jUHaOxx(p$htH<7v`;OlI zkIkrCw*t>LCzfvM@j~6&{_ZPk&-HoQ*+_^kE%8iZVyX9mv+LR&(OT0#JvR4u2-do~ zsk=58-~ON9Azpr~spk8iCadb6mbD@5b?6rSPV2JPb^me1>@kfi6JF~w!BVe z&)9pUy9T)C-0Dh%c0E|@_zCWMpvPngvk|a_iPQf$Mq2@O@@=qWsEj4nR-BVhuvY)> zjzCLihz3i=1ottukq~FCaQ7Xq^E=nO`vd)z`?OE6R+qEg-G#nenP3SM+IUG%C^;p{3lx~Cc?IEOiaeB#0G2eU0aa|xP}XE#}`&*l(@L=ck01f zRZa0uq8okIuO3Wr8~Qcpdc3%K54F*2#8Y(xm$>gwBJC3_Y5dZTN?;u1oP2_{{{E!< z9w@G0Wr8J4@ObRo;L&-9W4BIPmviz7*1Bx7EtNp;t4y$j2_Ex(8(R<9ynr*#TXp)O zu-W--H9ddsvcmooaL-SzzvLr2$D%W*Jlj?Y&Q$;U=pQxx>8bYJt*P?=@oZauLhJ3U z_3*uj#W}V9*3}IVfXqh0PF^$7(r7~z3O`vw>LF5Bb7>JxyLN**5t^6#lG$#QF1I@JXQHe% zZSJ3xXx2_!H>Tt30m$l+NSNU3q&5++#7oCwP`)ts`*_%idL%TMue>O z@V)S_rq*AX`#DR%j9o#Ix?atMf1RjQA!~g(7IOyZ72Z8B`ugK+a9&JQ9vMWUrNWr8 z%X0W|Q|sTG`wP*Ez4S4@9X+-F{P)*n3Bg>c*oKr!3DkpgafKetYShxrk&asdY4_vWit8b*t2A1d zTUzx)yB@5yRbNNU{c3HN5X{-&yqJ)*mI}0QgSE=_2&ywgV#AkM&9xQcny1(2oUh+L z)>bhQJURU2{F068Bzhgj*Cc*LlE&Y?x2ZxS{4cz79F7!$dPLd=al{k%wHuDAF(i)3 z5nsVDCgeA3aZHXBK{i<85pf)rlno(S%cbM*@%ThyL!M1YySpeJ@(e-Kc)vi-E@?U& z#Y6VIRf0E9WN$i2%lTnVXQOzS7WS!)Mq17fE0M$lkT!CyD^CGbf;Tqg%$KHdDqfyI zXxe+r0@ovCEjhuZTJgEfD@e|gw_QQjl5<{4<1~4O0HnQyB}`;m*K7pb ziq<}8QENBveIKnkYsqP8EmeGuUeg%@&n6NH6Y|7F|L(nuqc&JeP6n!091oyj0?$|y z2@`x}rJ1O8Sxe3mDjml)&~)jkQWoyhRJiB#0G0wyTzyC)kc=!yqK`Q zkUWn{Y_OJH|M+yj+Q<+|zk-Ab>!HbgZZ`1LQuD&o7aNK2ltfO^>ff^`@8uc2O0efG z?*wQ%J_V!c?8$q13ZN3ar6X@+YdStztZDBJA?@v~CFhb=D?YvK6G=VfX+|PpLY}1P z-@P}T)COzGX=&ApV~hAiVuSNyg8Q*3(;dZHa&BAmjbo%}I(f${vB7yUQThJhYinZ{ zt}~DMUa6814q6f&MoSXMlhJgxUvXYc$g@c$lHMN1BSXkq62C^Z;)ih>jYpZ0WPHN6t60=B&I6{-A6C$y}c`;$v zDLY58mR-R((v;drh{Oix#f0^@%*+|Gmi4hXVwT!Sh{Oix#f04}iMNyxe6Pu=Z_O88 z7|&gzO!ql!$r)}X;^<1dEs$H z2Q6*#?uRhG-wp{A>;yRhs(Hcll5=pH#yL3l)cks|mYkwf zB0hJg>EwNeq*R<26Y@5t65d-DT65NtbAC?Cogegx#0KZZ1h=}@BeeU2wdA~_<{O`b z)O7NGiP^xLnTdo6zD{Z*A>_%r5_lFb&+9cEpU%~EHfD|aySRepmtwRtuQq!H`ChZx z2jd6dn_8;S?hn>7KQWFWq@~J^SDY6U=G7*nsM!cv%RJdQ7LwXXh@`DJFDAGZyn2MJ zWga=^j|`F6U!oc7L#z`44f-8nuxbcSFvL3GSh417>iaBXW!8 zh5JCxaB3Ro&-uPlBD7MmmYe`pB0e*!>Ex{!!JG}wiwSuzO9}7XgpNL}C1+GsD?a<` z6NwGZiwSwZOo`@WL&pjNcxxmX#2meul`D3VAVMEt2@6 zR`Y;{O8M5~WYuRcdjzFU} zGDNcefP@KK`(!;C*0S|Z97#uQWC)yaOC(J2sN|Z~+KRPo-3E^=LnLj*5+-;YQyU=i zNbT|-;{T5;jUUpc{)T6JYZ~t)U)Qy7lCDg!*0KNXr^K+~cpt?5I2)`b-m7YTeo&kG z3=zhq3R!FPkp4PQ8_h0g=4>RyKepRJCAhY-qoZkD!FfA&)iipKDC>{b1N%OS*6Hv6QPb#G z6^Vc)Oq{mwP6_SV2+rGfTb1lNb>n)u=QX|jh)wFRJfef9lX`@9Jy>hg8IC@Ad9y4b zhTQewyqL(p-BE31uLo;Q>f^L!o-KW zcUC%kf3Vh-MZD2Cu7vv|LnJm>!UXpoQI@8)>%m(8dLHkjjw|6l&k%t0{$L3c+}~gV zKaP0*sckgh!An-ui5I5nMJ-p=O&jh0e#0Sa>YTa}taZi}j+ond&F>JL7ZW*+967F3 zdiS@K4c3})oU`%d8^6_6CRoD6%Hgga&v$Jkb(5bku1CmPOD=IX{?@-yeTD#}t;-T7 zCOqV9+&ivu$_8tl`-!u$^W}}}Gely8B}_bAz`LsBO6Od)Vaf(;wYb2QB=_ou^%)|u z!4f9kZR19X7w_39WrMZm9_&ie=I4#-Gely8B~0+BB+7I>SgZd@t|Z-?Y*L>g5*sXG zg2yq>M)1wIZp3LZ`qMh`&a_3({(N!W^mE*pxVrd7ok!qVQKGf+`)*7;_{GmF69MPN z#H?P99yVb~$_8uQF~!yVjyu1ut4IVaVPaVqNB=zjyOa&qI^bk?1s|UEZCynoU$8~4N(Kh{+w0+ukrqmp+$0@j+m)ZI}hUA3aFGQkoicpOt3@*j7f zpR;Hez2@Ifc(3k?4&9UVc{{oE(uME(H4j>x>1N@-ef55wJc&_a%^C02Ej!S8087Sv z;1hV?C(+vY4^Hc`58kg#1e_NWb$dHs;*q`|rfjg*w_myWEMel7L)|<$ z=yLh)8-BvLtwPrN<|{W3Uea@6T}2{b2@|_r@80x4~NfI@I0shxS=qmmyFvoefyR1dkYMBOwmGY>-}aj54yi*EB|-%{O&vj7my` z*1D`Udb-m>sWODw2wB3!+oujxd)d}yt;Rn&EwpZiNNli#iSg6-P#f7^!djbeIY710 zOEN@agC$H1n&DPR+1}1tn{C-&ZJ@Vjh{Og3VHdw+$>jl+nBRjUU)>AjSl3;Al5QzMxT%+Obi;^Q%kt& zhxh74k8H5kFXuX~dk&vpmmvVD4VEy$bE0p9wdB1eEy?ON?|QSS5@sXdyqL($wF$v% z^6&O`y#%Y&%=$dEda#!4Uuvna63!5b4VEzR*H4^1tf@trT9>t6y2;sq_nILR8!TaB z=cSI$df}|K>p$In4lg`IBsN&W#6d+zXIB%f_2WhHcqOZe43XGi2@_Wx4C}|w}23DydYh8Jpvw>A=hA4;JHhbsqeCuJmJ&(1A71&!fXUAVS;Bn&ql!a z^IzvX{~CTY``SLiTJm0aGL}>(Si(fzeLE|i9TQpW4}W!97!xZJ0ZW+R6@XWdkhQ)a z;Jki}?HM9zU6wGxYlb9tbR0u^NWn#$#@U`Bg#*qXSPBB9YvRA-8@Uv1TE=%@%Ms9G z;&b;6>EN;A;R7YYU*U~6PLq6tYNT<}8)=Ekr+-I;l^}?T&LZb4i4Me;Nf|v-6U@fGf z#?{+&{>v6z4%W^D1+6altr#29Q zCd0OoT?T7Sho2iKfkWQyYsG^vkmZX(J>mpR*y6t%4vXLL*p9)1u`Q z&n?|rZ4}?wKhF}RjgY8(Y6B6vf*>YBBUlS*X!*n+*Ho#EZN`qxvjk})Br2cUK%}uC zh>6e$)7$KCPE`v3u$QiMBk2wsf~qIlk+S=+6altr#2A5E(l^GG=jB|hL%q} zf6_+-(N^zWctM^eNE;zh`P2p?@dZIlghsFy($Mk=$v%V)c(5!%+6altM>e49{yBoR zIE@iJDOIfyS`W?(X(J>mAGMX#1BBKitpx&YdAq5UeY1p-<=A-!E~ptrLGX(J>mpW1NuU`*)! zk=6nMEuWCPHbPtBu4M_*Mo3gXwc*B+n9%z?tpx&FJ|Uxz+Q8_;5~PigsC;Sz(f)M2 z0-@toS_=fUd_qQ1wSiHTB}f|~QTdz=9ou6<$KAA+5~Af3G8?E3%myq$+6altr#9Ri z6%#stq_sdm%O_-3QyZAoSc0?>5|vMFxVbhabRJA=fq<4z$n2~(FgvpZX(J>mpW1Lf zLrmy=p4I{ZEuRoCLv6szUNYOfVadFq>Yd$aB9Q( za516&Ray%Kw0uImKD7a_k0nSOAu;dNhVw;ZLjAa~Hl!69j2OZAiX0UqQb1=S$5d>)hHjuB)yU_B9Ue0F-*NizKVhI!GOW?~M600ahzy|U){{dP) zL7q&+5+=+i!WT-^r-mm3-$x19K)&V&LCYtqwpSl6e6ebB#1baVx5JlKf*?)62J$t3 z4q84zo@K-mCd^00S83FzHeXZ;*g(GKmqN=YP;2)uj99{i`P$A#nt%;R%nP?Pd}>F~ z+92Y*n6Nd2Yb#nCC}B2iH32Q3kk-vf$pTytmM~#!A9u}TLa$(2%dU-2#4E1=y@VyU zjzYe+hI4&4CbVCrwO|8UJ|Vqb>w)`&CAJ;}VQW!$55|PvQE4sMfR<0lU7JT+;XY@H zt!qKpn%a#eF`*+vS_?Ly1sl-v37MVM2Ig~?*!~p=+dFeULrka#kk*0?X!%6Ea~HtNV2SO+ zfv`P5=ljG2d}<{uU)vpomQTbxkl3>3_p?+doOYMm->J#F<>h*xX%M#n)MZUbZtp2Xn!wAclcb$C#_qh8B^IuI~ z6ZPo%P5m2hw5`AWwm;Wd8h;OhRr+_;4duV`FU+$R(zoBXK?!t&1R%9xd8G&`c_kZ` zuMmSQEjF@**+?X5!s;Z7T8E?_NxmRV3pOmR1R%*9KQ;QSAdW{CKR1B?$xjE$+8}z# zDrH3J%{Shc-@OW#UOT+B%?B=xzX!o0`Fl)+NI|R9@B>Sa_y3O)_k1#tqo>-vi%B5nhIdeyr)@@ROXs}dFlv^ZSszZJn zRXX9VkF+E$=8P(BuyW!5C>3jsxqDP;M%M~NgQa4k+#+$Q5~5Yx(WU!OM~~}OK>E;Y zKmH%3Vy%}q8C^Q&U;k~GNNkkby;p&Wa*L!(g_{4i(Wja(wEo_Gal{3 zj(TpxFSR7-eLG(E#s4T3YoYH}{p{*gww<<7mh>txQErhmfzhX9qp!3ixKl^Gfv!ile?(;A44nOgGs4atLdVl$sN3YrDCGoBI#1~|9Ssp#QA5l zp_Rw0q*Sc+<8DKf(Qf@p#YDM9s8k_(2})(7^ABI{zaC>dYawkj!}^tqiE@jiONH{< zOon>=?|G25&~7%1u3xE`D7Q$uRJgV_>*JdLZ=Zp+(8tWPAuj2k=xkS9*Dq#?`G>ch~e!t+AWU#8>a#oa=w; z*l56`9~76&-a)mBHC5G3A9Lxqj!fIw@0?O@-;>8iTXcG|xMs`FO1wR{QT16fJ88OV z2A(dkjbU$8<<&-nntZi~5=S4lx^VSg12p~1 zb*odvM~AE}o_zG!=%XoZa#OE#*LmhqYYTSGv-NmlP^0KPX{+bvKP2xQl&xEjK`Xp| zfa^U?+y5r^q;(^-?#B0hHsHA9oFv_%J|V3eS?h91*aq7Uf<{*^Ek5^`vC-<96J-al zT&nO=JPps4ChYcW;qb*SedD#i%HQ7j?DOKrCytG-IQXL6Db1av?xi&aCOD@c_-K#i zMbu-vlLqHFFK#!V`0|1U#S4V^r1QO;9lty(%XyH+gV~G1A4oS7`O|9p!3+y#pjM38?AU{b*}p{ zSkdQ72X3{hzy#+M1kD~lym*n6>V{jEE_x$%IGuo>PE8HQ0^r*`fRgs zWx+W%%vHzO`E)KQ`LrxT^fnPw}ONTtlCEqvMMeU(fPq*7Ou^j|bkAYT?mk+`FH@OnZ%LL~ncboj6C(d76 zmFK*;PCg;^7-;pFbH|_5hF%Y{=M!=Tb9M!JRAbsFWQ5Dv2*(l|+cCn~n2(V!2&C`k ztnXGGjW?)VZ)bvY3WD<=KRk!NJ4t%UX$N*y0)3aqV4qmr_VOHR-nqx(99w5PMVwX2 z*|^IRn{hDi+DwG;I0)oE&)I$c*bm!lsW37y!8yqYchxgFyU#f;?$Hd8ayUmS>H~r|+82NM*&Dkh=Z+j=fXc-dW+t}s81zM_5MmWxk>*NzMisoz- z9XG+vmY8waUWzzmZO-N>o?n^v37MnvHb>cfj=6|uYM(F<0CN=2`b_(TcuRTnmRMpX zgtuh&;6(+&5~;_DGS_}SrA^Vw-f8#-)l6_sLGZNHW3u$b7v?`y%XwL;;G_7&=2tAu zJuUO#S0|iUWQkqDm%rSg+FE+X47|y4y7ZC_q?gnja#7K)V8`~2s+r)NY zRDb`=;3DV6b@GWJ(n}@?ab4$|i`Fx`pWCRK?fJxxa({dz^Wd73A1K;A0l$rDpE&i( zXS82UzdtJ4J+ZPwRkhtK@Bn>cnT#bfWggt|thq%SKj2L=?GvwTb$M>0)T6fh;-ZZP zWA3i1wow9psZWT9oHGxZB{n|7L$;9!o^ufNkWusrnFlv{b#>83&O?5xs%C<73WAxB z9-cc}O4ayp%Zi*Am(VA~3m<4+I7@8)fERA_3cT|mn15*Bfh}c@>Tz6EWb;SMwvDTq z;GBZs{?&^I;+j`|6-1mD*U2YjiV9B?V(Hqd$mS*3V0%6>X>8wGlxjCAmCeW~JJUWP zrK+`3*^I2MOKTCIkn2&j>tVhKt{T%mAuE=mtyoxMz83nJc}}j!NKY(UPc$zKeU}N& zDG2s`^zh<2(pK%{nw!6ao@joM>q})1CkUEseR&bRJJKE}d8Lv1mu9Hv5m{_zi(bfYP+u5E^$ZSxw*}&E~ zm@}C637HKdn+#QLoOGDmt>2gkKg0+tN>A=#~n>rgAB7`y2gSC#Id0?r{ zfC_{F_;Jr`WrUHtsHVWg&O07h+VSd!>QR=|6dJD^QJQsU1skClkbl0`*|&^{+lOtZ zYhJElO@RsQk-3&m6Ru~t>-_e^`<0#<^j~f1wX^ptjcJD`jcMY}_M=Omt;O>p{zg?z zq1*7$rJXLu-(w;~BCXZqoYAElTRJ*J2;hjTpFg01>s(W~tJ$#lYNl;)&2QUlSaJm$ zB*aWjfo%-!b3oF^$~mQNu-4$N2b3P&<$v|Wa$YqBCQf*MXsPLKcy2^Cv{ZaYA${Oq zKGv2lU#Er$(+cn2uk=Mwfq*gjvHRRzb0*5yKepijA@1qFf9Z$s@jP3u`DOc;PWk?W zxKx!!2DH_R;(MxvmVTj!OP9yWv<=)7dmTML&NmrRwI81_7=NdlhprxFE%fl7Jspjn z?-5Bo9(i_D>7}lCTcDyPFVdyz+xVc;VZY&ttKZwFbw8*y^=CX$ zPM4~jS52X+*FmL+K803IAwwt%3EP-7e{|`nw?9kSOWRyPCqU8w_8M*?_g)d2d)e z%dGEgz(jd`OqZ%WF4YtsJ8Woj1NUib1<*g|&am?cV|Btda zf!Axg{{N2*h#?7zASx+QQ$s3|n{b~;gr+S@32lX%qG-%R3^hE4im`?mT2oM|npKf| zL!Rg6qD71`6GaTESq(A7@L!*`&u8DyKIcB)pMPGzZ|&b*>%G@L`|Pv#+H0TZtbAP& z8%~_I-q!J%cyQ6rM7@_m|+}pP9 zeCGHjb9*fy6UV3)?iy#&=6+(AWa1blaR0-6Tx=D_>$8W4`5Ys^yfVl=_bXQ|JY8W0 zK#y>hDB9?fOdOLETuX`s{KPlk3$22e`Tt(kbj8s*81vXey|5A$ZD2LHHk;>TNeRsP z(>BB#X>#Q?+;i2!+|HG{L;$saVa1^htV??=4Np&ND%wyjtZKnG?1l&?lDfh=NMhLd zopLX;c;#J^iDORMW2flJbX{MuRjRf7#GRtIR3f@%f{pEvzM<{SI%Kc0&|eg}Nq=Nr}Q!TMJ041)q9{C;nfar9S6Mbor>o zD_4&dI|x4WT{RW2T(z*izJG6+B}#-`Ij?Zy7$v%9zGAC%#(`%+|GK#i)q-F0|F5r< zfKL?eX0cUWlHkMf{x@$`*W9P`$9J8nyPoYpy!!CKbR^nHN$UmC75xagrEwdI@e)b` zO`>^(MRyR`%W~!HYN3o0Zvc=<-+cUoU6&YYu!v zX!gFp`vy;q{Whb|Z2T$#W&16N|NVuvd)fbAe(&srYM~6Rl(-@PZTN=a8$z)zXpyjA ztN7omW0o`Mb;{Z=&_`VEMp76@pi zME-kc{O%6l5Q=r7Ou~MH=YOBDzUr>){(hu`Xzl zuwQNZ-#58wO|y{`H4>_YGPF|S{QO%ldxvib#k!zH!hU_~e_t^hVm5N3MnbhvhE_^^ ze{lF_&12ykLa{Drk+5IU`rqGt9X4{JMnbhvhE_^ka*%&XCwnY>Lnzh-EfV&dVgLJA zzgio86^I%M)j}CsDG^p0th4?_pja2QNZ4^JppBaGc5p;{5n)j}CsDPdlQ+kls$5|l~UH(A_9@O_GeYM~6R zlrV3}ZNOVn3Cbkw+cIt=_;5u+wNQpuN|@K@HsJNC1Z5KTjU2ZTe9t2WCV1$_dp%8Cofk-@(l8Ilc(wHmF5{V`1E}3(SVxb0<^_ zWoV_uKKXY!Uk+acavRhl!7)9(<7Kmv6Eza5g)+2KB7dSXdj|0hA-6#-5`1bO+4niK zAo*-`hKjhv{FP%V_9l@izH-{g2W zd_%}>P>TfDt}hPiFdI2hBcWO-Ln|fnl`}IB8Q&0c8`L7fHTbY0_m~Y?Yn@Onl%bUp z59HrSc`kfI$Zb%I1bvCA*R`3AoT!mdEtH{^61gXnneT%y0=W%pk)TiX%(r)%4e@=P zP%V_9l@hr}mYENSZwR>!YLTGVxA7)-m<{paoKP*4p_LNX9~i#N`Q(Ww;EO#%|Qp%U&tP%n2y;hyINyl^L2FLy=IN(mb!ZUf^= zB}#C27shT*V6Wx`Y@lB5?x2+tHrm|=M!QOs;4U>hgE`^PM^Ov*pp_E#)Hcvpc+OSA zeJu#?w!>VK6Y#>FV7=UJLn|e0_Hi4SeN>_ZV*_DM%n6^bidwJ-t(34?)NNoERSDw^ zAQxW7hF9E?=O<1FH0^j9yg7q>!1Fe*>Rn2W+9aM=D zj5&q1HYf01PA6ak^)k)`t(36U*==AwSBVmgodusEC-7ZPCtw5hGF}F)lrS&DZNSS= zi4u$s2Hz(q+%G9=dB2(OlM?1FxefSNDq%bk^)kvCe7KxI%+d+g%P1$bQo_7Gw*fy< zB}y<_8+_56KK7(<5lsuCqMw%ip%CIK6$SEJ4;u^|8U=eGCk)@oV6 z&FdauXE)gvDCaS2WHa}_hb-VZQ6r&RC_^hHZpr&<&h^D$YL6Eza5g)+2KLgx0WN>C>8 z$mu_D8*`V34Vl}WP%V_9l@e>d7uKi)?Ay^QL7BuuJFew6cAbddbLK>igleG-t(5re z%dpnASe#lVD3f?%4C0{!I%hq$&}`&Hjf85U46T&t{3`eiS6UoSB`A|vY1?(&#y@t3 zjhv{FP%V_9l@co+8+@O?T0BuDD3dtq;Pu?b^e;a(8#z%Up;{NZZk0yc7@MnbhvhE__r zZ-tnp#i><-GKrrK8YVW1glcISqa?POBX`@_ZG^tE>qrRILK#{qVZB?i zmU-{01Z5IuoHxR4gnM2jR10NjrG$+Vw}DZj5|l~2eLJ$64d@JGw@9cK%Fs#)8|`ib zqg^E^li096%58*aut=yD%Fs#)duprbD?GI-L7Bven>KYDVJ;~Ws)aJNQsTW31E^hO z^OZ_aCNcb|R<{x6#3G?uC_^hHei>pJwae{%P$ejncwm*P+X!=ekx(s^p_LN08hBq} zHBbr4B>J4Yx!VY9RFP0El%bUpwyL=etZFJjnZzY?$GDBK))ooXLK#{qVXL#-!0N0L zlu7*kl`Y&x@EMAPYM~6RlrS&DZNSS=3Cbk;-!;~41mCAfs20l5N(u9p+y=ZQm7q*w zyY07h8^MPw5~_tVv{J&nzWja+si*{H5*ObbYy@AlNT?Rd&`JsOUfl+~SCybl;$hEy z7X08WD-x=OvOUpTqy+OvJu+*zSt>!91Z}@}E`DFl{81!S3uS1fM1JL&dB}B@@H#-S z57+-betjr?J+G@o366!n*TaU~b0=T}^>UOzD|e>xPuwakec3D`ir&_WqnDe=er+qjdT`k<~7CHQRq;uhG*i5dyk%cmAvDe+kT zjo*p3+g6DZoMSE@f?r|hM2!S&pkB^C&`OCb!)Ku8Wi5dymK)swr zp_LN(Or6=>-cX4WobxYP2pclDI{_Q0m#YD^QX*e*GFzh>Dp7)K*W7LK+i+QON>`vfBQ^!+-tE+x2U2z`|k*c&v8TD+!|u->hsWw;+I;r;{la_^Yra zz1*cjD<$lyZJ@94oU23$?y19Ek`wU4oq!G0%iT7#QbJ+?4V5Ut*g%*QB?jOGY@l97 z4WN}063b|)LY;gn1cm z173zo7>5JF7+~;ya>D(Rq84mGD<#ZZavSikRKj>72*xsl50?{&SvtXb8RdjlN|@K@ zHsB|!LpAjT4<#N^GCsZRf!TBTkgsqMFKWZuST7_C76>n`q2{E z#uxbg?=dGUuADhp`G$yVAugWhWECRMABDUC$jKTdjJJbetUu)Ql8jSMuwI@5Kr1CQ zXP8Qq;F(3t$?DgEEMrA2*voUWdTWzRX2{b}(Ykp40WF@r#GI^dg9Pj4c}t#?)ommh z+3@ZMa{{V7f;FZ zoUC3`k|7W8ejq1nlqkV7y_l2LZIFNs)Wy@jJSVHy6o~HLr=}KYlzBE8@(*_RJ~g$R z2sv4U4b51X)0BM^E1DCT61 z5+(R9MUwrLIa%EX>f-woc}`ZZDG>0j>|f3PUsdZ;f^Um-PoS?L;d>w~W6lqRWP_?| zT}trXmU!hJr$%nsq898yDK zHHstAZIDncl%bUpnt@m)D3jpZRPl*&8+jJjL<a;0KeS$Gfq+&@$QzBw$?7(!MS|}}#+B1;U>($Y zp#=h3DIsrWA}6ccpcVxC8wXr+W?gNpA{Q;P)OVvU}R+kju9_44g# zXr)A+KZ+Sv{W_=x8z}Rg+u*|$3DrUwS}7sfpyK<~)FQ!mhl4L#BvcD!Xr%-r!g)?s zzYc1Vkhh~PUQ#4f3uSvgk(1SHLOH%2jo&CDCu@|*`_#y=>VFS;u;iU>Cs;4EP=;1Y z-XQ=?vJ!3MNaLUV?xLK5^>SQ6D3Tp9BAX->VO7HmK(CGuascy2(Ia7G2eIX~ndl;6rAC#&BB zYPpS&lhsFilCdz$bFxMW*AHlMRSS6(HOFC53pSvY5}K<~B`A~N8XWRLYOcnj76@pi zgyx)73DB2J-aQEJCZLrP*1OoTFej@&AJk$maPJfD zc}_4VtB-IH+!aA9C2W+q4fBxW`_$Be4V1Zu3u8AYn3L6QPzwaKQo=^N+sJdW`gKr? z1oyS!8O#ahWOW-LxJ!jrO4w7|KwrHPaNcoFf-$GC*5(9rvbqfrj7C8# zC2Vze8+lGvw?Qq~K$)?#;4|a|bF#V(YJq@ON|=}7Hu4OsZi8AR7y}HxPfjo=tJ?s< z=peLG!n`H7k!M(S8`Odglo`tmK3q;PC#&0_76@pign4~#BhSg|HmF5{G1=gY<^*%H zx(yJF)

+Hgr>qo{?*1^}%qZ;2qh z#dx}!fEFaKKhO>K*tts9<@;i|BCQ|@AtZwCr@cRo|7WBtk*7X}(OvXWIv^W9PMZ$=(uc*(rN#inH`eA{2zZwZ%ky zMt^R0UVFIgEwPr}3204mzo3!`1<|ISFu^wpLO5)Wo!4%(--tSE+1&f@o7unBW^XA?&o8o!551zgL~L z?Cwu%ihD$rL@0@CDExW_in&LiGB@qguO+8_PZ$QZni0t%r zt7kv$U@g0g)tcg7S0xb&qD?(vf^Tw#uu^vV`uo;d9js+{%4Ek=-X*FeLP4~tCrt2- zv=G+KPU5=FIlF_k>~36ZihFjIL@0^3nJPyUnabE?G|hH%<%-9S<9b4?$a)aXiJe76W&Vf zHf}F#yWy9uvzl4U+cEA1FNkPMkrxx*+U-{HUDgM~bI*R-%v#Mpnn=goHOOY27K8o6H^+RlI z4;Q@LRAVh4=W%a-K}1`MyqNIWz;4}Va};Kr8f*D{h5P>tBHB{q#e~ml_Vz$F*J9?Z zv6j!fc#mO0L|clynDE)z-h#+t21Ecg*7EoP?_(^8XiJe76CP#Q+ZvS{?`C zy^;kHZ7K3%!lNa7E9JxNy!Ij4TVgGb=kR{af{3;hc`@NppS@kP~?u2{?7U2D}A zQ%Pt`WC;`e=35AN{HW)($qjM`lC|s|yjE@bl|(2AYr9PF8+;m5>kbS0IcwRwgss|x zD+wJLSi%IqiKsEP+Q4|lTJ}z4tBypKgpP15VS?YN3_-?SwSjS$wd~!_Rvl3*2_30f z!UVq=sxh_N!2H2l_Ks?+&J2}=&NwV#g5Pk}m|AUM9%LxJp82PL?piZ|cf7 z`!})Q;K6*(TK3LxtIpJwghl`?VS?W{mRm1&UYj5;VJ&<2xm6>MNQ66UYneAd#`5JvUkxfvaBRRL0AmO1i#6y@vfEw z@h)rGJM|V*R}vZ_vxEtLBR{e+i?!Sy)&`Y?#_cR&!fo4|NK{uHtmXBFHBKdgy2=P! z8!+LuYi}Y+OzqFSgSGtmW35(6=sJoeOn6(cH<4sLs5Y>=V=ZsTSaVhqx*lW+6W*rn zO(a>@stv4CSufwSm<(Yx%f>J%CC=*XJx@!p9hU6G>uf zwSip&*7ETYdl{94?lZ822_Gx%O(fYbQ5)F(U@af#u_seW=za-HnD8;*-b9j^T5Vt# zhqZjZ!roFPq5D28VZ!GwdlN}wYPEsgDAw|M7kgxtgzjIlgbAO6?M)<#8S1R%@dNhy zDhb348DV>5On8)GZz56bQ)exYgRo~=N$7qeOPKJO$lgSHZcV*&mpm)`qO9fd9Ns0W zBtk*ho+T3=_1T+9Pmi=%^i|p4Wi5|Ou?t*Dgo3cWS0+4qwLRn|pRqTQzL#BR*5V!L zm_Yn}ak}n|x$~T`J!B?43O5@d>>s|^Tbu&m?3Pai`~*UE6Zv2HZz6l)g_fn`a|%0~ zz?mI5Im3$xK5t>#&IO%dEu_tpi#lTh+VBYfYbQ$B$H#e{pIVpC?yL+(u$HIoES=8> z?OT2LmR@XwPq3V}Q<<)WpNv6^)8?)Gj{DRP??L`%qsYt3VZ@|xiJT~Q$Qj$MdMaaQ zoG87W*V##opR4)#ho7XNyfG1|H}y0LRv1EWlyj;!BWR+@NQB#ZxEjI5yMXJ{Pcx3O~3v7(>wj|k3Dgel^n_- zf=fVn8d@uEXgPs~9{w5e=xXO{zJ1O+t#i(krghXer$w~sXGFqU4~{e9qXDtKh{(Ny zCrmWprI*|%nU7bjwe1Ka+QNLw5qZS~*GOEBKTf_t>*|1wPwssG zFiU^G*QxXIinW&8)QIl8k8eywUNONn5_@&wCzkKUUmx4K$4i#}pJKD!pQYYb5sSk;~83eupPmd&kn1CC|NLttGa!GF8@YrB_UFjl^DkIr|*-3ax#w zp5|3$E6KfLtuuaWL}g2?^oj|tk=UyVJDOJ*8IEhQbY+jqy<)A^uQsBx*H(JP1lLIH z)j~hBd@*L6w2Y-IN1xm)*1B&4BPvI@O0SsU8d0yDNZbZy9QUwtq|UvHwalye&IZxg zJ}DH0=TsU+6Mr6ojD_~nBW?T>&n|MA{l?ql@UPh6>E7bnQxR4 zdBp_RNbHsOb3}dKCn}?*+$+}d9yQ;HEb@v8u94U)9}^L!`uI^9S>|4`mXAL3jb0$%@-ecAKgp5m@5q zUs%OA_R3F2iyJ$3Lx^*Vz4CL;;vSLRTWUx|Uik@Tans3eRz)_l@B8rG7`uzp&_?1X z)y0h+yCKw&h`jPM=;BV1-C>Ho^4kf;Efl-;($GfYcMXcWF?JWHArX1yw+M>cJ60pH zS9~vn?}0S5A-4_BPO;|XyC2w_Zb(F4xxM0^jNQwLy>j1+n-zANQL>SEkMg=I?t9qT zM2QeXaUVoE{Mi&YQtZ?r_R61gafiXqqe?b%ue@CqH!JLPsYFzI#dkEgMq;nLPZYNW z>_o9-BlpVtU~z}R&Ie0GrB~kPi(3|U@)vvM<418WZ)d_K8@X3LUKO_m?1Z;ORC?v( zV{xCs&TiGK+=lN!!Yd!CapGRG;Y7k(J`PqAm0mHyH4=N}d&qc#zSoO#H1>+MeCHPR zRw63BVuEWV_R3EI(As^s9?!gFBguMH!dkxfjy6^zD!pQYYb5r{&v7s^_<0L%B$aIB zUa^*+o}fRLh)S=R;2Md&@{>5+d-D@Bj3p%-xmT>^=U*7RN<^hsOmK~;S572uqc{_^ z`-vqRxmR9};-u2X`4ZtaqOrZ|%+qcHnqNBZCjLB%)3*k*TJDvXus9Q3z|0wW<#kn@ zT2{ZA+-mZ%C!HU^A@qS*MC^d+da<9DS6z5zEh{z(Z zybl&9aTSqFJ|-4tHw_}o+$$etiW8*;M6Z!oK3)}PUKLTFzPsdm_qN|&?C<*<7yeEH5T3-Ijuu-*(ErD7Ts1nP4s7buLa%x)7ltOu~ev+fvX{ zj_jR=R=NAZTE3f&eVwLDxvL~XL0Dc)Sh_6*oxg98;0*=V zDxc|eA@pt(=f#Aj+fwWl>K8WjjB~^3LkF;y+bd3*8WSdA!fo4rVpdluc{6Kyy0y+OxX{``xRCh2Jn2$L}3Z2@%NcF_`>SiBV3KO ze0(fU<=WClgh`n2Q519tw=bsetA!2!-Be>OALnuYu^^%?MP5w!YyeuG`N#U5)Nq2_ z5oRr)uZmN-wzLsp5+;1^0xfq}=IHlc!05emX2Ojz2=7lOPetnVAhTfa5d@>2jjgGvJL31@`m z#e}8(3DUj38f*FK0@_j~p)HXmOjz1mqP+8?<-j{XtmP*(#c6LBLf;bNyqK`G^&9Qy zxI>nppR<;q5;`)lgb7Rg$ROjD+Q4|lT7Eu+F{+Z#5soEHSlUN8y7yXREk9!` z&Y-&xxc8b7CSk(THXiHzq2<8*!CHP2hdHB?&>4p%Ojz1yoLlW1bjh-}_N=j%pYUPs zt0Y1}SYAw6+GkFQskI!K&sob)NsE*5E`&w^oEH<8ws~H@4?o-Pg>N=-Rx@k)St??Y zN+J}5<;8@hJ(8jO?ai#^XS9f^DhZ9qSi*#*JtC7hQR@nEB5U#KbS0sYB}gU3IXQ*IRLm zz}E&Dq3aLMiwUnoTjS6(?_e!|{#b2Q61t9J2@~EHY^_FZw}Z939b?s4Nucd!gstM3 z@YZf?PU?dltmXZ&xZB{X)Qr$|E$79A_f1b_csaf#XQsZ(?0*|CB8Q6-?PNmZLyP%`vE0F zzHC%{MUKDa&EF1>Yy|d%zbel;MMT0{{H^RZif$WBxl0 z_@=D?o`QW-mdn8se+Ky8t9#{NFw4(z0&Dpb#MfI(1l4Zwehd@ddhF}1Q1|D-->~Fw zX8LpTuZtCgw-V&#eZ{=8{!tQX5Vrti|8cZ%E{=1fDR#qeNsQf9CiOwYM02FS?)%|qy6Q4gCs}LH zejCuff$c33HO(!?eDeOm5+=M&+c(Ln90`wP{xw~Et)6pI{oE_o@^9nf8~P(AfmTWkYKXYh#i$Uuv5$=_YCB;ZqMdT4cZiBV>E9Rb4PPh%}A4MBw z!gI3kw~M+H2|rsOwegD)oRg_%PcUIE|Bf(zZ=yuFjmRq|O5<)$@XX>-jXm>XCW^eu zV;^Yw{A%s05y7Jj6Woqt8?5E?IoffF$jiYJCK~q-e;yd4ybq#xdF___2WxqsN53l( zxmPSyJd9 z2X+p!x0mfL+=B2g#M)P(&4NEcKh?tyMCWY0aoCoAvC!%Hxti@77^BZ4VXbS%8S#fJ zCp9L5pV1*NCj2DMb`97DYyE6BvoU#*GrHX5AR=&HO!!Hh?HUk~u-3|VBIr zm-as=KZz?5f%9Ub;fg+CtzT?sHV&P9PJR+s-UHygnBd(6Ek)upCu_}}g|`9QQrIXF zg4qbi5+>pu7O1-o*1E6BY@n`6L~es6OvL*>Y=gBPxexCTwxxLHB_g-M5+-<;N|dP_ ztaWFT*+APZ5xEVPFu}WRu?^Px*}~&lP){-a4Rc$;JldN(*aZWx?-)JZZaDuel#vWiL3ESYjnBdbI)z59P*0Fn>4^SUJSTD+ zoEH;5#@Jmow!vCHuH13qKKXc+6S)n}iwPeq?RFd6U@adXk3M{_eB8~6+y>{xgpc`l zSB-73mXGr-XYY~EA32fR;Jlddxy$aVu?^Pp`D*Y&yXEs>PUJQ?FD85rw%cuNgSC9# zJ^l5a^Z7g{avPi%6CO+0T{X7BS{^_AVa=WLxFjcX8=Myt9uwJJHMYT89tTZ(V#hqb z%8A?t=f#A_c6PgsZLpTdb8o*lIFA!^BDcYLG2t<4e!DI4@rt!PE`4&$;5^>ViQER~ z#e~P&Ig#67Esw8ZqahJk!i29G@{;E^Sj*Q5sH=uVzWzX7O!(R-e}cIU*7EfZo_Rwe zUq>M?CVUN-w?wxQSj*RKXuFLGmN4OK(Y#0HHdxEopXh^)36?P7YwCPt$ZfEeucI-R zG$vTWgzpXHBV2BSwR}GSV`5{1B~17pM?O;LHdxE|H88d}CRoCR@73fpPHuy>e18UW zRAYiAOvE?ILlKt*)*7|?MH*9Mu5C=Pgo*fmJKJEbjv;0PF+*d5B}~LO``HF-%{|X- zAogiYu!M>D4F$HrT9@8sHW0%#CRoBm{N4oHV6D|2F&l_Q8xt&HB7S3pZLrp!Pnr#j zsclcr-Z5c<^J2nn+wC?Y64r8iSR0fGKsdV#EMday&~CS}4c78{!y2bV#6!>wSCsUZd-^P~v=P2q!Y-ctdPIp}t=1G;G4W*f?(530 zG}TyQX(N2qhh4l7$^>h*rqGHB`ITe6FCoADUSo-+jquegda(4t62V%ndb$)7@@vg% zLw@|J5=$H5y9pRkLnsrh)tW*pCXUR0 z&+GBiXEn3L(nk2M2xf*5$^>h*rqGHB`4w<22Yv;dC6+eAcXu$ug-|9~OWLt$#f1Et zx!RCles5-prH$}iD$LX&lnK_7-(eQ5nDF28!mqNk#L`CaZd>uTY(cPAtG@3V6Y}fw zD2M(YJxeTYgxf~sl)o*jzeiuzYE7XP6IscF)Kv#dyc{6B4iN!Hb(Im8uh%ZLVj_Ej zL7sUBOPKJsfJioa<{8nNB42MM(29wyB?f7`9V}tO+cYBbsO@G%Yl?imwL>c=vK|$r z4|cGG3GbU&Swwv>BU)4B>%A6QF_Dc7LB^6gOPKI61}mg!EXfG@R+QxHqYt!VA{*g? zjEQxYFyUh*R(jExm=Uch^7T;^S}~E0)IrAfI!l=FF&`_|Xl&1j))e{rYyhp8$Yz`% zb5xxrO!(Y|m3K5pWkhR=e0^4fR!p?mTpK<+a7djcO!yp(9fS}vA}NU06#4q>46T^B z%wmS{j(jhJB}{lMft`>LG9oF6))e`AlmV@nxY#0@aF~4egC$IOOoSaBxp$t$J_RB3 zpycb(60~CC(>3hf*Ei&zK1-PJ*bX~WA!I~S5b}jD$=9PkXvM@&E%tj}Psu%fmN4Nl zDt6f9twtk~f@n>VuSc)YiU}Qa^?#B&OPKH|+-zh-B0+LSw5E`F3}2+%^zZ5w-hB=5 zit}QEciVD8%Td;{GR4HCwe;QB@VGn=miSr(wZOY=@+EmAl7g^&P0OB5Oh_xid#T#4 zSi%JFwrRW4H(AlH%39XOVnSNG+CXb(iKUI;-8StXYNH@n%lb-8NUyD<9O$(yv9uAq z+ot^-g!c2Ymi76V7;nFo5<42^KSCL{bo}WpjH>NL-?Ig}8(zmNvq7 zMJ)Eoh@>D`%VLz6koZb%AiiRWrH%029gE>IA}I*gvREr7Bu-Qth!a_2X(N1>%3{%s zND6|rEM|-eiFegTm_BqCODt^!@3s|h%N7J{S%e%D*}Ja+Q8-I1ZG_vlwSm7aTM(>e zF?>u=T{W}B%Yl5o4sDG?b(Im8uh%ZLVuGG|GfSB8wqR>Ddgd8nYaDMS(25CayUi?N z!rQd1IjQYtgss)QwL>c=s1G)?gbDAPwsxjIm=U(-^j-_Cn4qzw#u6rcjIlic8cQ<5 z*3LfqKr1F_Osuhl2_GwMFN4O!jIccbA4Q=R6EwEhSi*#l`L-vM5ear!%38K}5)(8> z)mY;52lDl~%l4LNj>-tj*Jm|o#f0oeX+OvQ6-$`#IoS5dG9oDm+gtK^7g{kPyJ{LU zU>}YpOn59|dwm&^6om8-$=9O{XvKu=4yq08C$fYIkBMy0G9!|Lu)RKygP;`?Bi6PU zZkFtevV;kb?QHKgBa(u!Jxh=JpcND4eQLY8%7n+Lwuk)fLcMFxTs^6Ilj}E--(8w4 zzsNL2ev#=u{lzJ~v3l6BYvfljE*-Si`q!r+{6(Vh9zW|{dsc`wGJ>^cJ^`05;;wrok+2r8Ya0^5cM32L`idUol@oY2sMGw$R$wh(pJVhX z5r9x~y{8XwkjU!_c`@P7$)Z>G%1ekofwt>4?d8a8H}P_?#7mAR z$T_J6Clc23nno)r5rSD=v4jbKvNk)bm8^ewEP|NRV+O>yo>N}CxmT>^5db3Z5>fIB zc`@N$m1~!G3V3It;S)?)i}yXd_`Q-sNmaL|!q$`>?TB zEThV)5f?d2)Yxi;_tmXM)Pp(AdAu6Fh=PHiD02=m$Q+p+|Y`VwQ-AgtdG`MXxOpxmPS< z!bfmhr9$0%6iawr%Q?kfu@ahJ8+9`+gBMo#1xPvUQYN$({#~$Uaj;i^~^z6CcO+39z$=?(k4@~`V+QOL)uSF)f z*0WM%Z!epcKLfn`Y-#^{URPRbu=a*@Oh79pmI$Xz6J@19ptRl8cYD7YX%JouA#^2B zi>`&V{{nGAd@rkQc=U zdrr^~Vf(dbG{1IgOZKbNzWagk!*)No4i!k4U@cC2t%vZ2cyr^UCpCXG?^jW4M++Z) z4}Le=eg(XEf*-?wA>Pag)`AUulOb*;*(j~<;TiBgjIB9sPsZ2OP_(rm5>*7+F8=N< zrc8KV#WSA|p|i8+)s^rz1{+0Og8F%K*Hd+8!qf2T;cvI=#NRjHa&c+>M93FUjDVJ; z2SKZn0Hl3{^Xftjth6EhyvWznm}MlV5~0r3L`%z&c_kj%*VmlJ-=T?;Tj@ct7N@;T zAq;!wSlzQ6nVwWfIiQ6)aXRyg>WT^0LK^v&iOeINaYSnndBPrQI+|;p$R%Ao%ySY8 zR%#FlYoSJ|OfdmvCD^Mf!oAYxod~8+kjkO?LJK(+wJZ78U=oPda#+dbnS)5OR>EH8`NjnNhYkD_N&W;q z?d=#|nGGUPS4yxJr=39VJ_2p8p2%D$&*E@R@37=zd75jtF=1NUcfu>n2?jD7^8c2v zX&s(vq2F~O*elA52}_%YqMrRjogJ>V>o&ZN#kNH~vHvO|9{9HyBO6)^Uh=#p3TQUG zeu!4Wy~+u(6?sMZa!pId^3R|9;p$apgFM_mOXE2~JtO4*jmSzqu<)u20Vr!*EMX6~ z&*&~Tgk&vCn}=Np(ZoO00NY@#F3&uHdPb0SZj~|NWe_edxz>-@o3%t6+jBzfSY5Fe zr>zc0J!@n8F6l{94koxv+!8Y)F@m+&_b!Bv3>iTsk88K+QDTD#*5bCr^@bYCo_W%C zW=-m6bo#4RzUds$bD*X#nEOrVeQU0*=_7ahTK?buS$*wv@eV4v?(Dgp^NzRlv}r%h zhv@$|v+>Nt?&&Xo8ld#Z_qwMIZ6vI9?V8&tamAn>jfpUL)at4={Gx8@8^2r4Y%JET zOe9&!18co>;;KqqKQWXEDZG`O{beE?xu(*nL#-*dk+9YtZ~RiVP`f3~ ziTA(ywsGwytabTmh&V>4yUqQsF%h^NOuT*=TIlHX+GXd+|Jy$+IX{Dgf7xHPcD`sn z&m&>2qv!Tj;t&6v4-t9A#DIr-D?M#q51B*QE0&yj@|LQ#+L{YBeu4>W^j%e9TP0s^qF3& zRrv&SuUPBjr`AzozI`xZub4Rc^R<*lpO3v_$#0vj7h#m(oRn~{0&8{5upYI@5*37C zr;P&0uRbu89Uv=*I@z!~| zVL1}kdhRJ36EW5{B%+qc1drhA6&%BSo^XF2xrmL_@V#UsCs^w@M;n2XHzXpjnBZ9= z@+$D0@zC;SW7Mnj8rw)%YwC$MM*Z(2H50d9W#i*WnHgfQ{{Am}w(#VW1wqVXddOvbf}b@8?W#^pQE0J&y~&JWQVHh!nkc ziD3)p>4(4lwWe3O{6AfuIcs@(<;y>peTFWCm%hd$gAt=k_S`Frcguu%$RkEfxF;wF zm#LBvG^w$K2}_%YA#@Kx;@l8km^!r1T3p&nA_2i#mL8p=Mq|QCf^|WN#zfY-@be~l z4x`g&dwf+q4<%SDveA`5_F5L!y7ZS80c`WnFFIcxVe!LtpL}69a$?pB8>sas&-t?R zsKGWnFR{s&*)z8?St&9b3Fo!OUoCo>Kfzl6e959E*kf-a8-XQEG_ETy2h%^!?G@K< zUUDw&oo8AkgZg1wy-K{~to8NN79pW_OGIviB~0+MQ5!koy$Joz(`fmelM;!0#ai6E zNXgez4@Xop+yr*Z*IHgd06 zt9dA9%dzR^J-;s#m0mHyH4=Mu{Y!{H$ENrE`K!(!e?T-jHhtruugf-auUKpMoe)`$ zO&{3sn=(=96%$+|u~)}!hS6Yby4+@8cJ}-pk@wj2mJ7Yt(lgJ!Vy(aa3;k|vdhin7 zc8RF;iV3a}^(wcq&N>*?#->B0-;H6I>&)R~LSX$lKb|2PSZF?8v6lDk?VqkFN2OOxaE(M>1s`kSm5Nm$F{w#pH%(kmvoMnd>u-nWtR-h`Y6H9SF>#yx2I#`mzwI|zeyy1$ zOvo&%HZY>fuYe!1YW=9Kf13Wx=v`{8CFe$J1Lp=YaiP3RU4SQAE zz1XqVako!jed7@|){^rmwSjY$m{|757dj?=a&Eszz@m?iH)9yn*)enaFvNR}`m(TCc=EE>YQ?;X^!*VBWizx>_F zHP({zFSUVFvY5C*egpKa!5{X0LEeL82@?_ls0~CPA?&yHYqM_J{GRm+SMofXkH77czIYz2 zC1-bP1J67rc3$(K=AZT1dHO5zJXpen#6fDqp83a%+}QlVj@$P;OWGA{$;qGEK--N8 zX;;nZiswv6yJ86w5~-;TYrDryd$IYkH!kS+3+W%MC1-_d1AQN#~f`Ugvx zkZ4J5SRa&rUb}4Nd;6iEvzDAFstt@KF>%M79<>X9dVTs~($87KghXd*!^V>Jj##y} z)LyUj`%cCy){^r`wSh4)CJy>&mD)pBF4W%R$z5tJVM5|ZwP9nTjJvgkH}BqrahJ8^ z1XFEbY>x>UcWdKktk8~emnBR{45v11Y=8RN?P{0ov2xQLGJmj^oO7xT%uz8R^GEI0 zZ#Qbk{J|0?BxY0_Hb*UY*xt2^|Fuce_c9N%mYkoe4a~JMaoF3xt6lu>f$iO89%M;G zoB&#O6LhYfeZ!%({#y)c!hFtJaz{gLAZCy_n25JGIkfiLnftac`|PL9EMY<-OC>C3 zkhrAw&(8gt5SOr)oMx#F#6B@0aY^mob4Rr!E@25163MC!#J(Xs{=?YXaXTE{^!S}; zHnWzT%Bc;+a4{kARc+MUNju^zmM|f)w%O1aZlUML)h2(LG#zpIn$4^w=aFgyv1m+8 zmGAY99@^H9IFThx$O=GhSS%{>ZtcZ+ZB2-GSxe3%)dnKun0QCNrPp)SOWP-R3?0A{ zCS=!HZCFg*eD1_r+p?E64Q@GM7Hi3gyV}6Hb4!6#2|U*dycuUeKq->3`>~swt&-t5YAoy=-T#I-q-ZOQx9~omYm(H z4V+HL#6Hq?d+w07Z#ZdI2TPdnHjUkSsjHoTUpuPzgH0!_^JNEX$qBF8z?pDNOqD+P z!WmDtKltpY9V}tO`zFp1XwW$M3#=owel7mD<29EgzMP_)5maJGXkLy{~+4DUuN3 zVdLiMul=lPwT*VGvzDAksSTWY#l#Sqqk8`^r@i}=yVP02gwI_#Ns(_fZq&Ut`@C

mev-9LM@Y2(F?th1J! zN2(2+bH>Cn5;I(R@Kx=P$oI%t!i2{XIByBzkmaT{|KInQH*FG*ud|k%V5$wAzQzP% zpV~@8+9md>vxEtciEzFX!jR|pZ$9CZeVg9c{FFLt$(g6xz{zM#>?ASVXG?Z#ml&?j z5+*#h!+A>xyDa_kfHBL?>(|mYq0U-zGO9LkmKqZ+^7Xxzz8g+|TE26~5+*!G#feZJ zQ}>gYy3Sg1;+oqi5s0akU%e1^Y>y|^l z?e~n7gC$J(n!)Pop6B=PIQG#mrvF=>2W!b$y4t`qj|r@C7F~DeCRyXuS;B;`ee9X9 zvD}o7TVL6+y`Quz){+xY@6rXF7!zaS0@(xDd+-xYugLfMSi*$w4cM3{ z<8FQMv?tn6l5v-{G(BZa!g}F8+ zPLRE&J8!?c>Ax}$vV;lWbF#Ts=JWdHPfl&ee9l^O<5#^x%n%cA$oKlbo^(soYBHa* zgbCj}vzS5RlKNX8-O`S@gtg@6q1r&~6B81b)R#SLN)zG|mN4OafEN2md{sZ?{Oj8h zU$K_lRa6^@;bLMt*|VH8>WU`BS1e(|_cASplQ^+HcG4B?h!a^$ZZxV5#G)}Fabo?s zVHY(aPGkuaz9(z3sKmSV<6gO_9q}$}@qThl{3yRUfbshKi+|n!lS3C4PwXbxe}8Z3 zKVS8Er~Tb;Y%#%FmL8p!iFv((+Iaiwq4o1V{k7UyUT$ewdUT33=M+LlBn81*W+Ntg zOtU@x2Ok?+w>L(t9F{iU*|vPmszk7s*^3FC*Rnfayx;CMigk^g0!aC)?lW*J;ip+5 zlI#pJu$J4vb1o6$n4VPQWHh(V1bb_xaDw|e_eai2iQFsJ;@(vvD!pQYYb5rHM`|8X zOEz+^Sc}KU5>e?D6I>&)S3KA9oLI7vd&OEj!@( ziKz6739gaYD~@+LE-l$`uM*bcD6>RVdc_3SNbD7_Sa@YovXOhmTD(pu5tUvs!8Hu!8Kw&W#ve`PhcnBdslV?BDRrx#aiB8vs)H1 zQRx*ETqChpK7QaV#Ydv-wn%Iv_lmW=Z)Z1BVxrP3Cb&i-uY!-Y@XAN+>}E}DBln86 zd>pJKD!pQYYb5r{=OsKrpW)_P@`SZ~&Zw;2O0SsU8i~E~IUKFsqk;Lhl7zKou$c98<>%3o9A`-7%*5X~f5+SbH9K{kQxO{Orcx}lmNZu({BKL~5*j|aq zyO68UxuXjZ| z`-f*zu#Si<*mI44v0a(~CRmH_Ni`-|!UWeyWFz5wGJG$G zbBYPp;@d(EiNF#j_{qjLEb1FkaUM}bQA#AsKsd-~VL7(>EM2v6E^2r44+Ov-arJ z52vygz9_fcpY~77#5eYZO+CxuyqNIxNf$h4<&eVI+Tg!mOnLxu?W}7b>0DyF<2C)C zA0NxVijbGQn#jJL0IiSyexA~2b$c+cE3>hB$5GqDM(g{JckZ&rtpBVWg9mT@07|>@?w5BiJ!el%ud(`-`SZZ@ zH+{FkPwlODhA6Rb&nI&te}aheR%1_X}N?7Z${eM#8>S@oFiQFr0iA->N6J`BpwafnQy!fi>`xVn( zESEg-lC#!R?N`h1U5-c}`>BEuE9EE11bb^PWkkYSzqtK9`O@5o^vCC4{2xos1lM|G zBdovdliMO+v}*SR>D@2*lm5Nm!0BC{AZvLVExbgCAuBmcn3%fh#bY-#wvn*b zSsz@a#2RZg3_P=(it0iu|N+tVUesAZQuU(<(d9&Md8~NBCyd0d@ zFMesnaj)H$6Xll3T6^wn_KsZsuEs=Ejw&MeD)F&|wPvmRCoRXIeW&I&aw0E>w{}P( zf@(yR^`EtNuAfgHyz~LIv76`KU$&8WU9nc{VUv}3v*!b4LadZWQ6}Qp=Z8Ho%Y<;p z&HHxn=z}z3IE-pM^2xieMkG$KmPdUtvG{evq#ww=(dO5hxkn-G1oxW|G9qz;wU9<1 zjEOtWIzVl#_1(fXmLTl}_bw^15s4G5g*19?OdLIDC$;g&&a2m0g0vHHug!?W3D!ay zV{l9?+^e73nEk@$HI^Xl1dqodWJKZwYaxxXJthv=dR?`V9I}0lB}hBLqjm@xkvPFx zNMkmLiC4!isWuL|d+!=ckai-T4KgBeg0+xFlo=DBUifhDS4Q5^vEp5$YY)BlT5Z|? z9*{0E@p+AwdcS;NifD;f8FC|j@=sg-dej{qiwJ>e$+g~D^P1YRHxEhANV^hfx62E$ zgb-W2(@lNH%4EjZ+jYV#q?iBK2lb~;KOnuO_wzC)CWrj_?#(hAti`LXm>B*1SH1rr z#5Z@2uCas(#EeVtb)ZI>@@pP@d~we#Dfw#R6>Gh;{@>~&wi=SYb<7K$F@bV;$$5n( zWzv3*a>RAD>q2RtWyHodR`Og6abo+^`=^L3W8%;!|8mb5DaQ{|4wf*%&q;opW6n8! z9y;=lj-AB@qOgnB9;$I$|AlXCo>Uu}e)GYLU2GiF@0LC-LL49jYdtq<+vW@Q8k(lx zbtOJLarnLCj=H1cAL12j?RMy418yHOG(C7;R|0j_U&^tt*x+(7!L=^mtX}e_-ggM` zx!7PW#HFtvI8@`^nAqq~ci#IuDaXNLgSED3-M3@iJ%^?}zJ0OFihhf$_q}%se;%YY zru&^8c!3n`}M6Y@{fbk^DpgMSD$?GZJ)TL|G7j~(BqMZ9`S2=*5%i-}(ylw$qC>iw`h!Bxd8*20>j_pS%&N{`!{ys^Ghk9+nP8$k$` zFu~6$gi}_Wc<;($qrVXB?``8I*0z87`q}N{C0(R)JD4b5p`a_^17CD3gNL`?pETn?~kdob@qzs5dO2qpPN^Be$31_ zzdu$9pFen1i-~bxb*PPF&N^Dlfk+P{XIH{U)DR9jW6kCt{(JR|8SK0MzcBttZyIYWaK>6FvX9jMnbz zL&w)yg0vG{>mmH#jV}!tcHL()&$wWM60XH#VodCE{KCz9AJH;%tG|zro*)z4dO|q( z_CL>hXvaU#_-LJpbsj}|HsHOQ5FXv;@L7jVA2;)FS4^z47SB;Ju|xOsW^KQ1x0&Y+ zn^BcGFFsR<@UMAsE8$s`+d>E%-f}<(h`rZ4 zrOx|TJYMn0C%=($TdiZUUtclf`jbzsvxG-f&MAbiKk3ymd4=IKTdp2gXDyyTVxn{U z<{dLTzMirB>f<7>nBY1L;ihqmq8!OpcZ{!h)$(~TCieK{tFqq~!h6Yt=y^cP32qA^ zJbQhjHr{&dMD@yN0QNl|+xKrjzhlt5J!h^o?}VrvyvrFA7u-;B^k0(YMD@ zb(S!}D}WGQfB88r$GXprsI!C#q%lWfKUu!%w9*$!%$WT9I%{>^fo@E=gbAeaciY*1 zc)(J+@_K*5@9M18buYXz;Swg0#^15`F46JRz0}64i)~kDt**QEjR}`9fi(V(69M@m z&>2l?Uz@Dm~aUbNaOD~rwZZ3 z2?wf;O^5%i&RV<=Gk?PKVghOW-A=GL_TdqcfGKmMoY-cPnVF! z=~5*ToIsE2O7N*u2#d{FLv4J$pd^RP~QrJpuEWb#f zI&0z73N~<#S4jjXU;~~o5ubVW9ynNS{Qc{->a2y6KiI(eU?mZpfDJ!Ubb`+fL-=CH zebvTSOD|sMd~rSq0w;=I6DH!*jEnX?QEhy(WNQa&@t$QR5uAVx zc)|qle#$Rk{)F9rS=o2rr^dTZNTVEn=7}`UJR1`@*>EjB?$=O3_v6O&3JH~|~*gb6-93E}RucT^iU z{(j9GYvG&%HgK*|NdzZg1D-I!ryQ~~@yxzzW2M?+HP*t(25jK`qml?tzy>^Fg3m?d z+aj+WrZ%qH`~7Ct!ubbm;KZbo2u{ETJYj-QPeRz}!WOmBuioCwS~v@V4V^Fg3n&$mq|yBQyX_&vtcu9;q(MH za4J(t1Senvo-n~@FCkod{6w|!*xWM)uoli(U<0Qzl|*m?HsA>pd_oh#nCHyK+Pf@2 zi?wi?1sgccsw9FFu%YKeMDSUd{ND2U^Fg3qQx_-upWYUBLw|L$Ndj&~}F-~?>I6DBxv3gNGlHmYH4zkBiK+F}=eKJfab zcSzqH^iU_#xE+u5WhZ^1>6@2-SjP571ZyFE@yH!gBbL}?Mc7ErynOc>Yn?RqOU?JF ze?F`>NR&df4up)8f#5{ z>KnC@?)_+IB@vwPyqG}xqZxx!(9%B+|GnCnKKKkI6m$ryb>4wpgJjN96KkyX*zG^4jqbNR+F3~i zCp<4EkpBIHJEovRxO|5iR>H~T38&Ur>&3TzR2#Kp9__3of)kz>6G+#;+A#$!^SRtM z{BQP)<||xrv3&WiCB1d_#hoi$f!~=On~s)W?)AS1{Uv20l32?KXvGA6&0NY6hxSNd*Nht|XA^-hmm{gO^6kly>t z*#nQhpm)0L>to_&_C2Pk&Zt2@@}#{LH|6Z|j}j zbOnfH>d@MWk1X4CsSFaVwbCZ$)eq}k-We0;et(GCXxa0t4wf*1^vW}Or=UZa(Q~*G zhadIt4wf+S%6m@_ob_1mwAXYHTfBB;ZS4(~YdWg-v=XQ*b>4(4J7eOdF(;^vPmj8|gC$HL{m9Syq@ZP&_GA2RZ3ykZIHrRo zOblD=>4D#`-Y5NRm8*pKX80+!#lK#zY0v-ctpvP6UMntSUM<^xRcB26<=#`(#>H3m z?_dcNNUz_oPYODOxtC7>ku0_B3$s}3)VKaQ5cXdE?oVp3LX!UWR4KCX|_D6`(& zNs#EO1skLKU(;Dh01Yg0+KB;E`zS5n#mUMsa+wzfu$Gsp&nefcjS>-@a0wGgUp}W# z3fi6r${^39nYH{0-aqX+t*a6doNx&fNT0l8-xRd9MA(p)*vwkqcF%ivN@pbzobbGu zK>8O~^i4sBu(b3+*a+J`-OO6vYj<1x2JMd}A~@j^CXhbtlfEfvi9u!bfsJkDSL|5J zN1{j8zOl2C2u^rjOdx&P;C?A+xu-AVE^NrSTZ^=^+2DiqZqhNhLX;$yoKLf>&+ zzZ7YkabRQRT{o(+md{a`)$sQc5u9)d6G;0kD!JoX=zo(s-yP#SWq#u(zdINaSQ|j% zH};T@35iP(Cu)=-(L|?56G)>EW^sw8^GK$Wkmx1XLfYdMjj!xPQQ|Asl9);fL}8VL z##bz10%?!CG)~lWHAH=^C9$E}Kr~rNXq?CrCXn{{QR7{W6A`_#mc+Pf15t4$q46$D zm_XX&V2#@~PDB*WS`w?P4Mg{qgvRYGVFGE7=XL#|Hn5stEm?D@4Xjcs30;4%gbAd5 zU83tKwSiR;YsuP4ZD6%kN$5I?B}^dg>nmLk*3mz(x??R_1F8+I8Y>B153+;_q0CXn`( zehAx*I=6Y;J15jmz3|KdmpydSz}?0VNbh=Lk#ycerw+o}VD|C%bk6?V{@(Y8MP$8@ zj5OkhtIiz2T1Px>#HG)4CBB#S$5qR(()$yM_)lH@(Danu4v>7d>Rj*bQ|13#(tU3~ zzjKi+#1=xZ){d(l zrCO7(xDD$lB2a3n;~M^Vi7}6LKKVp*+H=j*I%n@WQ`4{Qd0OZGGalFUFLpXjypn%O zMkK5?@sfWmn22mFwAJ60Xn8J)?8P=%Yw{{ay!b=~0mo2|$SWrP@x%2P`R+03bhS|; zf)g%b0_hK4Jv;>+!e&byto5`+{ihDrx@)f47R_!cYooM&?m45ivyuo-cwS5(z25mpq@d*n_Up&1jY%`E>R_!Yqs_+tGtcO( zB!Uy37ZXSy_WBVi=nx*7srU4gt8Uo6gSC#j*lgVO{h6JWL~z3MVgl*MHy)OPmfLxc z*&65jr5>HdTK%S&jb9HqyR(uAPIz8SAU$!+uoSf1_nE6-3r()>yVy3Y_2RW=W2@88 z>8vDz6P_0nNKd_ISPEM1u>8)-k+#1-fVFlx-)yY@@VT9pL~z3MVgl*gJ{^{VmK*WU zoUG6M=sW(e`5#AGzPofkuM_Eq)*7B7&2$J~{pO_Fz{S7pf64fhm2j;a$B#-o79F16 zba-1A;)dS3N=;taXoqIbt1D4>HG2FAcolBi`-SHFdz)8V_Bp?E+UM4z4jp=drZ+lj zc#3ogy{;puf(TTNrM zLru$XhFv~JZTx1n%cAGemGD{*VYx?lRbuNYE7vgA`uNJ@?g`h}-_IU%q1gECEx)N< z@5S;4pE}E zR*xD>kamKfW(c<&xPR^GF=6Y&URp#6*ZTOek?Ha$4^Myfp*?ey&z||bf&ZsAzCU}8 zK696JCA=+2UCroI`^Vq9Z+-GETbAZ1ueZKW9hp9R$?)`~VHZnXtv`HgvoUPbsCJx{J(@dp!pm=|}6={&LUh{m1klsD$U+_B*q2$JUcO zW8&P-wbjPbSME?_2@^=~bJ6e=bO_royOI*Se7akWB}{Cv%#rCA=L}B={t3kJ$;;H{ zY;=16)!+WD67ULn?f%pVE%_r~PwtF~J-=OCZS3>6eQGRW0_hD-8=iueJ>)TZ@)O>A z`Jft0nAqg(5$W6$hNp9P{DTlv*8R5muWu&(kKFPQCEykE`o3mf-8%UXoiXu?EkD$C z?YoB^R$~bhNS`udcnUg%%buRC#O$4qtg(cNcfTH<{(1l5>7SklardE*G(UXLg#N?A zQA)roQ7i5T@>tC2NhJXJ-?K>1l?gak8O=+lWpxstAkp zq3$+V%hS+uuZ+0+jN`QA$K7{QjrU8qw7hpB&zuO>;<}0n@+xe6|GwHXV7Tu~ z@qi8AVL{r?)ubHq&z?EAK8{N{4mN!!xEFChk7rI8$cThvAC9%yw%W)E*5U{-CN`bE z!Bp5-e9$-3cyEdKvbaX%R@gdImH7F&A5LQ}e$Fwm#Z8Ca0~=dj@|USBVS=AcTn^Ua zc2yz-v**DQCgNuf^^8bZi|eOEan@@an=PkL=6)C0uX3 z6B5}7tR?%cSv$T2rQ|cx_o% z;`Nqw?qRKk&iqR98?84IVq%F|=PB{t%vGX3$OP}U#pU2QilYpUyP~>ESc_xE5|P&x z_f(D<8uwb(VtXYbx4{x7;wT*I*%OS)!L`ts;L(TeaSIg-WP`PM^->}LX}e+x6FiPZ zHUf`Pyej6LA|hceUaOagO0SsUQ9FdyyA9TsIHBLN_i&5j{p7gq{_xkiTcYF_oU!;l zti`+BF|pYarztV;(1oMc&IIqnYsvF+$mxsrXPno_$(W|IQ&LUaIj1r9Jh(r5I=e-X z=cM|Hx2q~GJ>e(YX2G91d*$iuPJr65Q+@VIPOx%`oQ!E2C*w}rd6W?DRbVYS$5R65 z=OqG=wnUaN!S!Z7IU(nqnlEZm&Q&#yGhxn2iQFsJ@-)tCOGKqt5s}r1o%TUJ^9rZP z>Iw374{=88b(j;m4c79~;>5K?k8+mB_g-M5+?jP*~zG==Y;I(4#bWyc7bKD zH&4rsu%;__gmW#~Pu4W{lN%C2c4buyyRx!do2S{Hx#Z8BwPbf#iEIy9)9_Y_yd0bt z6YQa^1g6ZI^^fN#)SC96em|d0@OcFOK61i<^qaXjo8UeDFX#QCv)`6`$zQkXM7rnC zSJ!mB>kQ(WZBE}DrvR*lo%D?#*;s2jCirZEB}^dg&j}^hFCivKtiSn$K{y|PSDwCq z%E&D3Ck<5uYjGMm!G@id;KXFqQvX!zc!CoziPKZwYECzqg8g>;XZ>8FKJm)d0}{_^ z8tXF|cL(C{i02~$QD3elQKo9)Ot>L|_$tqf35kRC?{oz5@x@%0Zgb9AKk&T44B!*WTIAbXhd0nxD z35hb4ww(#6d+oB8ME9zNlb8~b+h7S3vQALi&R9e}C%DDQx=o(}R;HzAo_H%^Er~3Y zz`C|XVBKL|VOmKfRpQ_5izDvZZ<*;)cDn}xbzDly)K}};%PNGar+vyS!h-B1v zzj=gyj=5ImdHp*g4%JWGEADqP*Q!<)`)In<2NTwEds%!H6JjWjmU2l%$SovzSdQVi z zJtuOnSQ3>fn+@z%G1R?wBQ0u6{`Z{7ZE#=XKFH5Wl=Yvs-GsGdMXyf{r|b=hz!D}( zZ8!09cpvn8kEkm|;oj5iwxKwc6W&UoYC;VTij4VEw=dl^dOJW_s}eCdtq zf`ns_Z`PYNeouyP*$g>)Op0f7&u_;UMB)UZSKPlbtrpdaiC^BoyxKVP#yx5*VWRyD zvoUP;1hJ73NkLe?rqz-{D<-xcGPeWecDaU(;$yp%oJ=&%Io2e6ZV?8cUei=^nFj z?B}qN5lKNy*C`TL%z<+x&Vtkp6iDG1Bgv|3VV#l*L5 z_u>v@SoensHI^{(yE$g#{oP?BBa(u!d`+t*g;q>VT5qzx^E3JDi8YomG2@z(v>c1+ z-Ka8Q`I=Ts3ayxUqIsU)NnNbp*eDa0uW7ZU(29w(j@e#q9P+DmYb;^n(Vxu5-91hg8)d@s zHLaEuS}}3j%X_PhH~TMHV+j*$A2?p?>iIXuij6X1`I=Ts3aywJ_2Qvwqu(AMH?xF^ z{cbiJi(ffLY?KMh*R)zvXvIXYCi_0aQ)6c~vxJHB-ZC5A`y3}W%7o=>S}iHGVq)qW zW3?P_op4q&OPE-E$q8Cl_s={=Y?KMh*R)zvXvM^Dr;Sq^vmafjnI%jduz}e~wm4dB zlnKk%v|3VV#l(+O?5^6Fy@m{62@|rHW#y2)tPskCCy*-UoQF z0y2ZrO_u^5pn!lU-nW43Eg;Jm5m{0F%dQ^%fAi=N&t$wASy@?`kx^AqSb_w`gIL%4 z>@0D7Ph)!du|AGK+F0L!b=QdpFd!I_;b$1ubiOg*=Opxb2A1(z3S-VAV$`ea77rp~ z#j_)`*H^X=p8kHHfO4j0Cu~jzEzYbPxI`ao!)F_)#WLHA6Mcy|)S}&s{N3(~*g#15sD-^yBVjc1&!bOU$g=*O$Fa0ZZ5Y&2ncv}# z5!^88VJty{W^FkRyBe^%9tUdC`XO4p;?zhO@+?G)eG&2iL}s4>Ya$C^1GQ*97lJ*2 z8VQ41v^opH>v@fYk6N_W3c;&$jNpbzUttLn*b8zT5rLzG_Jd-BcaS(1gz&X@QHyqW zLhybtMnpES1PR)=32pa-VkJBdETNs3Xz^YRYjRtKTC|T6f_GjqqF}3#pdFIXcIUzQXg48Ryi1AMh+2hOv^Nlf zcM~-eSb_wOH)>O^uYA;^y@A-^-9(IFC^mMHz-v=%gui?bpIlTAfM*|FYgu*)@;?Hd zk@3mHnu#l~`+51U*UuxWWp>6M|6Ts-PAjVq=h*Q1SZKbAY*@KWXgYHG*UE0I)~w#J zoZj9SEwWc!yXB=KlfB}iJ*^xOVuRXS!oEm&v?ctsDa-mcm(wajeUFd!l6Bogi`J#+ z-8Jk5u|f6Q5sz9d7wR`5!e7>gY9Uducgc<~Z_z1%{Z^3RhIv?m1dfHMRbs={%AJFP zgxJ9Iyy~UzmOs??eCfB?Ij8z4i{P{^4uz z_7WY{)YFk18;R5++bYzuvh`kLL}*B^bE?;la3t_52$kg@xdw@__0mX;X(~6M3TqiJot6 zT>jQ3ONYu~?{4_;H=6V8k+ZCOHJ7(OlvaNB{fYlK<_U(s>a3 z=aN->rF5HHbC2 ztwOCPBMwdJH5enJR-yG9o~f78c{fJTQQIR&t@>vioYHwWMnpES1PMI8P+9)*y$(;}Ti>8}(^^@TaUOFC)S_!J+oGGLYbLM+32LnqnaGVR)S_!UtDa1Zz$1_9LW1U) zY|Qr|%J=9=x$N@pN64t=di8lY8ZlWNWA2f^wa4~iopGq&iXXVg&OZFioz~xq{W2JM7a705&jb2C(=R!=M|SgEt(S}x@H1L_{B#YmeMr? z$D7#j={NoFd#KfXbEvfYtHP$Xx+8GZ;Xg+US4+L3Vnk@jJq{#z-PLP8RJNwXlhHn5udh(QSyjvV z9(%~zDr^b+!FqfV;pg`i`@7m#F~XXl^MN*a7gpnuQ~mtod#FX%{Bk{7Y@yY)zumq< z0%zBV@Yx2=GIXA^x(?#3Bm`;O(T-X?tEuM{BLrg`Sb_x4kB)!EgwF(CgDmTu$md*V zzOZ^AA}yE)YbLM+XHHsIQaYoG4Qi2X6>8zU8zaJ|i4PauT_Eo)r5-EJcCHQ7Vp-?i z7~$AZEhK85MNtdKQq2UGAVG5Eox8_@T66}ZH8Dm+$AKmJgzjEKWoycr6WK?3`uX_$ z7`2K7-$O0-gYIi&XEkiCYXeJ=!1>W7aHZ#KMduGZrl?guw+glRdepfjMudjs`iOcI z32I?hk11-E*q~WItDYsTC0X#{B9Fr*@am*%C9BsRUd=*K{o(ZiTI`+aRTLvaL+;Fg z1kYDWhsyGg@8Jr`Yx`-ZTohXqt*Yg$2cTN)Ka^J2K!k?eSriFeMMGt4%EMKLqZEHV z5>bmIA$rzuyty{81PNSyTmnY}jr^>(DmJcs;^=?5J|Zn1CAxCPh|rKb5|QBXrfYDh zEdTf(wYOOPS{xa4OvTQwh*}iQyOD>CBO^wHh9tHW)rAD^6K!+U25RxjsiQqcL^iMl z35>9Y%HeU~k&~ZL{MTzOYVppy#Oz8gA=;OSF4fOJzQ_A;^=ql8i*P(;(*Q>ryj1Y_oEJ1=>sJ>RHEdTf( zM+$Ui;Am8hGXu4Vf5>|UEslPvPmR~6)wRFfxdaK0wiUiU@XDbXC#yDM*Pzc$;#Q#+ z%~x4Hju>GZMFf^0K{IMrdn8nre`FSoN@xzw>es znsndh+Q5~JXE=yuxT_kq$o3U#@%*SW4!27E$H)dl(pOr71bWU9;p>rWd$~tpmm00w zE`eHnJ|Kz{C!$uVSCmg^YsCh&$R0Ur@rZ%g(*NPekw9NUY=pmPW`I2zd&PR4*L;1T z7GCo;6Ig--Ud_@VpMKN-?p%U1QH&6b2zgFMpN+|TW+tV750$Nr{3mNt`YaC7E6Xx<=vzr~oU zt5?dCHG?V34ZdT7@0GBe%cS%0!|T&a*Z$FVQ#Z+^6O~ZLn-a~QgEi5&o!3>%vp8X1 z97juY;kd-ses@T#_P1GXBs#?JrLmS0?$bN18$OUe^84S~wmwBN=|ozT@uozx=ePvr z#St5v7Y9r734PXxo@ktLi1h9WHhYq^irr*T9?&+r8)1OI( zrbvr2DV1neCYPXmOJalbEn!JMksBp-em^bq>Fs;8`>^^f&SvF|MBcG-zwuisxvX5P zwwxqKK8iAC;TXktS6t$x>eg~T=5#)jb8JO*AwgLyOEhaG<$5V?mpSG2@oj&i%wMR5 z@5;CYWxy00VFt{oE+kmid~5W)=p+9S;+0*Ji7hHYg7U4EXjV4LDY~k2X3ys1+qSmZ z-sBAOdp`Jvic3)LHn9=rZj0(df@RIOM)`Ly>Mq2sUrHvns00bhu~wp4*^Ie&bC1l* zqsO&vXS2OYtN49Ke5b`FD0iFK;M{Fkf&|OC+-)z-=p)4HX_DbDDnWws_?2k3KbrM! z>X-S)jxlZLQWinf!Z&JMf^q_ijW8!*R2LE~YaTyij!2aY@y=_K;V&vdf^zqjXtqCN z?jJuSQ?g}L+s-zNp!5~L+k@}?xCG?{6dRlq5KE9?IhPagf@1lKlG%ITQJh6EDnWv> zFP3P&Mq@5sH!^eIQ`ficYx6)#tHRumBJ=xEE-l$ zXbBQ5YkpzLZY(x9A2Mp?zf)f`p(RMLteKVB-?P3h3wVzXpD)4ZdRZ?=pq9$~tZR&5 z$YT^skg(6$+UJv5FR~G4*s#xn+b35k2OYiB5F;288(4ybefpDXl6N!2N=RUdeO|O& z-v7XwMBfi>_^4%{Pc4_XQes5GRv}@Zhb@=)W^9}EIPBAZtfhAW3KFDk zTZUR}gX32<5)APS6j?_CTT43j$7h0~QF4!->!lcp+?TJ>h7hPl5f-^eUyx8UPOFf> z9toAhBiB1sTrb71+`_qa6v2u!XeLZwbEp!gsB>gzs$dUCV+td^W%~P>b(u@SV|u1RJ0q zw^c~+oek{~t17>VptyQ5$JM!Bx+_=Acd!cD@Yw*{KrOl}SIl?G3KG^tUaOG69uZ}# zpMT^|LovtIxn8=5P|Okff;N0sX9Bh8zCkhH1t>@mz&~zZA%Q(&b?tAtD^sjBRA~fW-3JGqZ_DHBK|H!lF#e8>x>*W!{_sZ50x{`e={1t>QIY&n)4}FCM_K4dm^jO); z{L|S-T7_Edx9QyVe`^&I*dz2!i_hhoFQ!+o-f4XE0iVFH*mq0uPp@_hGQSsPjd^0n zNUwFQlYB zvM$l}h*92_N$uPIJnzahY90LA+r^!?Ule@(Mc+IkQ+t$m=1mVIS|8UljddZ>>(x!g z*W7{__)W#-mtGXCdw4*ejn{t}>5ZM)FHv+%uQY1S zTfVV)QR9n(Z^sYJBgStV>FxUaw8Wba^>SK;#F`g37T^9!mmo8FAhqgTvN8F+X^DO( z_e!HyQOU;QZ(ivdod3X}tV>+<+eq*GH4_r^YWGTG2@==-v7xxB-!*u%J?EJiRBM!X zRkM=Bn9g+Mn>G|rT6tk`=N*HysHNV(=qH)*ejMqo{@ts(^q;-bsCE528;XZ{U4w?V z4W_)N)_<_XlXXUUbuKASoU^ruW25eG8;Z}JbYZajAFe%W)$WnriUtD`ua);qJ6dfw z7H@2SVesy=gPaypl=J72-mT9*k~q??fK7td}vQ+ zv?FoNluwFJ-*8DV?b)u>yQAo;z3}#4&riO*cN(wG8(;mdI71%tGm{fewhj6=;MXI) zeOs1P4X)oijask&@2ld=Zt4~cJh&u}*hM4p+vmnrpE{?PGczEe&okL99)F|W-FoyM z)q5p-rBQ3ZS)Ug-IKErZbozPGdtR2PMQ8Az$MmV{IihD8>q4UcQ=5zbQ`I>*Y}gfa z!m)G@7qZam$OPsyeC~rsIbkV#zJ=0i%#I-v=C?3A4b8uOm9%N(xeMWh6 z`kbCPdP9#iYT@~CiLZ99N)CTypx5G)&S&BIz*$uHANGx^`hRRnuDYzB*LdIK+h_@| zeYj)6^JL7eXVmt(zSYCqe(aG;umlP0bz|1)Z@J?&;52OmLP#E z5#=Linll8ne&*|G1wQ%p`5>K3dwKrh>_sK<9cYgu3aOdR`TZi=Un!i=s>-PEI z+F=P2xZ2ToU)K%x7EV}}Y*Y8irMMr&^$Kaq(6hVT+iSs7$u$>uZiia94!XoyoBMl@ zby%CM)Az!qSb~Jxs=FQ-?bVy#Bw2seT}yEnhr2smC5#z=#29b+aogKh&%M7Lmf#wN zH5v2Ke{b~edZ1PE@*blTsD*2yOLUz-+^hP1a&pXbtE;gD3G7448&zwhw_)+%t+Zy%rRHSbfL;r*)i6?-fyv)`r>9qY*e;TO{4Im#tMpFwP3iONi9WXjqA z!Y`y1C+^-++q2{6boBBJ|&0KzY%6(?F>zqx(YSb65h zU|lLRq0ujE0|>v6R-E85OA#D74y;RMCNw^0ZLoSs_=U9M#6H(=URG_7!^gT*Wjq%1mgaux0|aP__ikJ{j8M$Z=p@Dl?(6%$f<* zLfH~e9nd_}kybVCE38XpCN!Q{Gl5ztTjH0aI%JAyRbv}im�N{IX^OwNSRi8J}H} z=}fB{+rYY1WU*%p03kD+ zJ}sbHapJYQ%`*)te#OV3T5LmQCU~CL_jxG>kRxQK*5?aUD^7%Kl-N)$wxKc;yk6;h z;u5*zRgDQ*arC(i)ru2e@2s79jN*xW9IC}ORAz$LU447q7$E#YT5%%u8N>$Gr7{!j zKWOF+V}KA3K%Wp%tvC_-K4L?)_&8K%g8iV}8=ydlC!^1is8*aH51D;9v7uUQLuDq| zpVN0#DNA#X5RZ(lqZZ0slQD0VZeI4)*fC-Q>r$Bs_Dl7RI)@O?lC7f_%GL&-YZ@S8 z1M5 zwxKc;cn5;s;Q7~g$zLzO#@o6zXp35S55Xnq{!&ZM>CE|^G4J_nH*ZWm@oV#-{&OSv zJLOT95*{JOr@ZPO=U7KA%uM1Etj!n`q!s1pV_7qkP)%92XS14 z1b@fIB@Wrk+Sq7Y71f0V%gugoF1fL}J^JH&_+88VZ*}U|Bt_e&-}Jxlp%%-J?f!f8 zbhF$M_sWW+jZp=@o?uE+~_MeNB$b*an* zc^pxO6=Q&4PeusTLfP6Nzr+i7cLwWHnF;bZqW9g!lTm^_8L^I9C|et}OZCECDtj_f zT`Ds{9!Ky*c2gL@~r7{!baYVU(#FNo@B6~7o9ko!lHYi5cK8&gvtV?Ak z$m58z0~rGZdop4jwNSP;xR0n~#6;MZES3qk~wN%1qGxbI71vh(IlrtqpqCAVbd@NOTbEQke;QqF}lD!NwFK zPzz>F;x>RO@`z;q81i~+*6(=sDb6y=r z2eB@dnc(xEiw*+e7t)Fo5bHBom=2rV2PETk1D!YfLmgIJf!Oz`!Xiw*+e7t)Fo z>u4^yDvSM7ZL}abR64Gr{Xs;pku?tvIocqJ#XjA@>#5r7{z|?&hL{K=2)R zv5s0O+v^-+eFp1NnF;m`auF6F!~@XiplZd5(36qlP%XBuG860v%mha)a&tQn5@*mmCaM)D!kCO4hib76m6_nk zOKyz}(;0RJ~tp!5jK6(dBwcD^b=VjC(m!I8}(&`2@= zUG9c+)LH!9LRxXc&eZgq=69_NTf#CGO8h#MSy_I3k9$;~3*{r%XL+gEC441Ni(3fq z)VhQP?5Y!$Ac3t7mE|AFwh%=*dF;a5fKg3W)pGJSV5G$(G50)e-f@J6WG#&9LIQgv zRJNv~eEQsrJp1SqtyCP>9eNU9v z4|vP8hE|~#uLk;#>i?}(Nbo!lZ-Yh+v)_EtcWT)KP&sD9w~)_oX#nc&FCw-O!Lbu8_dkVyG`jRj7qK?-)UU;2*bDNMMgxT~=oTcXzm(r1BbC zg<81BjS=(*{&8D{1ontkmET0*ZVPvJR9-`?Pz(2*F~XY2YZVgMBckkEB=@BazM^m+ zMddZL3bk-|7bC2RyjCHBJtE4!MRH$V;8x*|h01Ga6>8x=Dn?ind96YMcY31iTO^lN zo?C@`11hhfRj7qKmKb48&=l@tNS?gY3y_D`9SlXD4Ll1cVV0z|f z6A`F2xkEO)$Bik;vgi`rB(*W)$XBAec1&HB^*>Jr<@fp7nhDhUeAmkC;@wHfndlOa zcYLH(Rqa+@vpTIjEJ0#^>B?-&W4&PC6ZptyU3JFuztVC1KopiFH?GJ&)zJ(1%hp(v z^i^~msI_0G71>(lUa+}tKGCdRm)4D`pH`6_tP6=7mc5)k{60_ePSX2><7PkqIB9!C zVac~eFK1s_;|1BfZu*l}q1H>)FJ*tORV=wJ(YAZeu>=Vm3&sptwzBndYSBs5GCa=> z=P%DLu3I8`Ik6^Vjz9Iy)@dS6Ap*4)ov}Py^=OIY{&b1zy&hWp4H3KOe=I@5{X)gx ziqC9w1Jz$n1ZpjOKa)N8><*HJ)FqxisPU39q}`t=)H>qvOm_B{9VF|iOZ-r;PMh9* z)I^~ced{3m>eRCX&c*5y+`l^7u>=X0^&1h!=u>)pozwLk@RNBx&-Jq`YaUs;{^jq{ zT;l0mf?72DMBmbbf9w-Vd@l!*|3hciz z;&9K=TC+~-U3uIi*@A>oLhJe%36k&bSQIdA%ySES@p1UYlwbw5jyT=gxS;OCS^j&B zFiL0%5-eY{txdo*y~F!qf3abT*CtWx!g*`uIBxm!!E8Z-9`Lh-)`bMi`^|0>Fimer zj~FU87PYxQiCRB&v^HK`^gy;CVU*CikYIUar#1o8#{60;Z#4Q{zvz}kt-tMQZL}UW zH(QV}N@!h3usmx=>wsxvcF(d|%m*B@sXc1l`01O{R|DJLpDjojCA2OiSZ=?tb-*-_ zc3Cxj7V@=t>~8-3-fTgFQG+E)EAjUst%auVP1xfw2i4!S47K!_>Q~<*Hfkoc1PPW; zDry}tO?ji9mmKl_?Q`0uu>^^}$F9w8Z`3;Y?~zRK8Laoqp$R%4Lh#XYU3xZe88avA z65Y1iT=&)cU7N-dBv@X#SL=XjW8N8dy^O@~2i=~=5+oiNwl@3V_pO5NpU)N zADW<%C(X)jV&~aem*{%Yb!@{d`f^?xOORlB&ikzbrj5C8!XRnYms?*-V+j)P zf3-Hd?A2C5ld4%noIzJ@lamik4D!|r!L8!D^a|d2;;gJoEUi~6t@`(%57JnI1j`MU zwhEZ091z?8C9Nv!`g0mfka&CKy6p2$wF(}ceK!$4%~vB=)=$hdCWBf!cTG6x?yO79 z>3F5IYJ9L)21}4&`N0QU1xy>$c*n)ks;b=wWv~Q^-%nnj{cu*R;J3<|MEpoIb^5{j z37U6>;J)Izbk^=&cV^ZlhBT13Kh3DO8)mQs36^h|(JEk?Y&=;attu`#F@q&Ybo*(2 zcJQ=TLH!wb5y2}?yPNALj_BM}2yPYErE66GMt5ahqSJHozNmS>P4f(vAi?sATUrH7 z(-}OtsmCMnx}izWW@#>yC*Lt+v(33&7A(H#rX1nt2%D!JGc&uy(Jvk*HYPrDp@$_% z*c{JgVSeh6@Nt$i)`jGH*gw4Q?nb}=p!ih6D!;P8o%UBOfkgz%8%fcM-A>ro;n>Qab$-BhruNE6c7j8{r2@>`j8fCJ(r|+v-!p{-*`v&->4VP$=C=(lR z41XbsB}mw>e3VH%fZn6Fgr6hqH$U)OATF`+=YG?UAzZt0W=e=Lq{H z68u7mOI);QwAfg`>i8sU}8QaL?9}t(Ft<@IFFBS}UF%nZ3TU zeem@6`vfk*?RuPuW5@=6rx?GB8zX!mqHp9fp)H9K(Q#l25_o)+FCo>v&7>}ay=M-T zR$;xV_P4hWe%-ZC;1YZs_D9}ol(^`JUB_kVoybV0HypG2hNI~E^qc;dHzy-4CiLA$ zm!KKWV~%GyitnZPoFBAebe7_KDUJ2A%zy9o$0!;TCtZ8C5bJ4%LoJH+rZgh_=Y-aU z1k3#Q=W3568?D~&CN?%C4&)eeR4>IAQyN45b3*Gvf@S{u#*c@Sjn_9_EjI4CXloL+ zC>oj4nB|`nS{D*5^WW>v8AdkxCCbFcP1nDWL@kO&rZmd==Y-aU1k3#QJz5VT8((}p zTx^UOHYtf(6u(Sql=IICtqTd3`S12iO@}TTEjBW1j!&W%MLbg)7yWZW>q3HM{yTq5 z%9w*58Y?!=XxpPbYVlWNHIn-0gw}-w%VJyR#FDy=ychRvm#9l#21U=Mq_X>%EXBoB z8a>CWfU>vtJ&s%DKk@lFUcQz_&s`$CqQqyw5|)*~>&BS$lsevVixY`GS6|`LeFJF~ z-(8@)04cq1fUTw3`Qp0Ls{b{*%*)r(dj&4>$(?@_;@_RSI7iM}O5pJsbJX?^k~3!( zB^r@uNq0%4Rdq`JEZqx9>0J_RExo~0@(u5C{BfUM!*@&^Exlvn63;zhq*X^<+{-y~ z)=~nGkG@8G)eN>_rgXjD+q2~>5@{~m;AiQ+k;r@pi~r8&od0f2(=iVuTMpgbrcQ&Q z9%|8jqm!ecsM*sDU+ zYYc?!aq_jeM_J1y>J{ylJ?F8sSHlvPmFT|oHLtPAYO1MXUwm0LftF|7Om_~htTHJ3;eA8td-KW65!rsLaB(T?wp}jNjQf2RqS_{rw zBQ|EY7?&+b7~XAT2@)*tys&u~1E4rVdiX6Xia5?tbn6>Z?{z=;*}ALV6uH@L_I``r zdokvPSD#69t3JKI+(RvvQ%^US7zy63i3r04mgEyWuIxSGPw$+YKH;V@iBfNm&by?m{0zmq5{{=YR3#*H4G#hQ#c?i>65?v`IlynL+_ ze{L=@U6%-6I80jgDn)Ctgk>f0Y?23XR>RDlpH53$HvN1dRO`Gs>r;KVH4n~cV9z7#P=BI@w&#aUE?c`{T%s{O!LgE_``{zO5+qc%HZE=0T(mE>=RC?y!+C1B-*}YJna%3?mvQCYa$vp0 zku(V+aCue z!f};r$sy}S=8@qNG)lwf?1Lppu$-HH_`bpUAGb(^>$%j+>p86eS$$>#X=AeY%uPP< z#gw8>$)UV*Mq0E|XLZ$g3Gx8ARq6pmZOF&kv1Ii#`&G|ARaUjA z1PNNNvbw?=WP2eZ1)i+U}X-vi?DDK0A;kXZ$u&e|gANlQd?R~W{;uZB3w~)sb$9l1~w2!iP$uxcyX;I8d zWR3zCBs7K`9XS##Ydl$E$a3Ui3^}ToV!9%86u2N^l+e16U|A#35BBVU*CikYHIO&=Nxy8(|DNs+VHAB6AeDAYqiyx{zR5BhV5<78_v3NIoK$_m6P?S7#n_wJMtiIa3AJ74rT>wQJvWZd+;QBP>OQj?CsFH_cdd!Z<#(@261unr^p#C^_du4j69kiC;_9C}T+OzVxUs<&@B&_k{C(xb)3J_qa(xWqM-NvG8j$0zofSI@%| zhgiY%@Zgby_8=Q;=XS_U9e7;zGmV>gsC7}YvDjEQmTWi#XU+cU(}NO}q0++=B)Xi- z^zdNmc((D)EzL8-2iL0}cvZqft-(JWD>epvwMXC*oF%PJ)o(?Vq0++=B>vdR^zdNb zH*Dj?J`FQ}EB;^A@8#!vsI~QtjYBgKeL~NWly>{Rd{7s9!tDh~R43!?1Ao1k8Ob-tR+|D+J zEP6YA@gGlD?RQ!~54Dc|@g%Wv$D6eSm*BiO6Shq);%^XQ2@>_TGd(;wdoA18y3Zr& z&(D}xHEG-s54A3AdaBr1w7YiT5}X%jbd%yD&U%6+NDOJt^zh&yvnSd3@~fWdb>9T`<75|o#Cg6jiVax8Mp-J#kumF@7i##Z7f0J?<1KW9<(Rgn9JLJxa`puTb9(h zX^e+jm%rXjZ1iuwXW$b21(*3V4sAmjDm^SgqTgz!hX;d*Wp4Gt-$#;tjyb1j zG>t3N8oaBG*x<425}X$&_}i2!9#>d`#OPm{*0Fo+qPLU#e>APA5nUgs)%)nSVuPkd z2@>BP&a}=YSN3V>)xPZgqIEQ1q1Ms=YcDo9X`S2eo!h}XqjQ7AR$4z$>y4Hv zvB7JUOK@JCM@KZR=Jf+hka+hLrge=v=bCO_t$xiC3uqlgt#c|$#0IamF2OmMn++_f zrcBKqmLT!oNTzkIUH|aa-c$Fd61<+H*8F=qhz<4`T%umHtG(XiE~_3y-vPxEBrcf2 zwE7I>mw0bxFHf*vf?5l2I9qJ6@8c4j1N-@@1FP9D!4f3Cy_RY9eV*Mp-1{s!I5B;} z#3X9HeAqc+gMB!c;IAScbBABe{uP!Wal}DPlMV6V4%;}|yZPL4i4`*&CsAwnP-}yI zQI}XqnRLo~RaUc~h$Tq;Qf_TnUzGe^@9o`{3HEnU>)hL|4fc>-;zP>DK7Gi{>UJx7 zwZ{@9{w1{f)XAx1y`IZwCOVc~zYMkhG1%I8vF9IImtY&Er##3umSG7JYMcFe+GY0| z?fvoEgNfh1XqrZ?0sB}R(_8M0j>8g9P+yJP_o?c0D4QqNg@pED_MDw$V^Qnj-nAz@ zoj9@eEos#H=o&k&=GNa4oexWt&^hlqY)SP|l$8|gLPE#F`Av3^jj1OM_RgNOB=PMV z%hRYeYn;74F4*;ZG_EXhDUIEF=LFTKPFR-4x{%N@-J;%h?yK{z@&>hfDRKSrKc!Kt z|K{i4vM%uvWqhBpVNLZ>^bIL2K|-(1g>`->8>g(i*qgEU8;MruADlt0_Vw(1 zHEq|gS(g|_bIIg4->Lrk$$A+qK|<%4Zx8&9Yz%lQV`z>}> zzfa#-!xAKP?OOc!&s0nQrh9lpul_AD@!Fml)Vl45gxGki_Rm?DXh>P>R+qe1-I%@= zhb2hp8r=H;w(-I@ZzaFpx+-zx{{1tk_2xU)FIh9?r>sjHLO#R2?e3{wNjcoH1PS#e z=H2!a**NCV2a|goHak&dhGtOfm?y1&b-?LAWnF@OAMc1B)%@)~EI~qjqC=arjl~pW`qcW(qxu5kDAK3O|)+N}7%hsz^O+H)(OOR0CuKN2Q$;S5wz1P0up}UK& ztsIj)DBR|G|Cb5`)V&FMIo+zZI>eOmbkAV> zs>jCe={HvVSoJZT57Zje(e9)8oV&!iw8!b&FP;zouk&-dz>IP_!)7RIFGW>&b@3yVo$ox zu>=X-Q`@D->p2)tBOORiZsXyt#1p6gef`rBZttUhNRc6$!4^^{& zg<8jc#gWbOfPFZZ=s+>c9Rp`4*uTOOBs7+3Ju>nWGs7p$u4X?GwK|NqHrN++3Gx#& z8~RR9u%Cz}NN7yfdY0txW`?{!y_)@9)WUeOOK|jqBOg3xmm^S1 zWsaW52!_N4mLP%Ada1?}bE{Ab z?SY85`Wg3rugO+xs3_ewj21f(l8O3|7E`eG|$BsNY4%EWCeK8_B4lF?e zd);Z3kL|^~)@a)$Pz&$y*GM>hg#=!kkqy2ZXY?sOey;C~&ucB$XZ)@`c^{GAID?<6 zqaUCC69P+4-0wuudaA|x=&3-i$sv5yy79mgA=E}I!zN{WU(ZlLW zV95a;hKkk~f4myEp@ffGU;j8kh-GtE#tAkPYZVe(Ub#i+Nwr?Gt%?!+q-4jA?L_Oe zF0-;9PCZ=YkK5jrN9=d>Y$+M?;ZxbgkKQHnm(@>3t>T)fdN^{_n)K~-A&yz_*q;zf z_8y<+^UQVW`DZy;c|eB$e(~cKu8rvFPuBWmkAb2!?20LIB06$x$syaPN~_Lz^rGj>f}h-(F%&SulV7?Ihd3vnQKRpq7O^=fk&m0=9R$}){&ghl-|bu2Y#=0j)FPis2!4`0Mlj^q z2tDtJAP+}m`{pF6M+DYOKAdRrv*$GuK5CKACS86pV$-qs8!^7O9?+UN4}xR z{B#@Ia|zTU&r%3}R;^|Nwa7OVf}cE#5oE|72bLgF^O4g&O6uh&aLJn#nV*!c`N&a= zd{H6zS=Sg5wF*m+z+QLz3R{IV9-m8~7WwefDt>A*MntW`5+v}<%9}6ZBk~(CIG3<| z`tc<)6XEqQnIkjb{kLdg_n!n1a#hJ#2h3uerIDoao45wV4cHbkHn&F!MabGu74S^ef>9#?xkJ#i6b)gY}X zUxvs$4^sB5fU|Y+jB3op=T-^vYq$9eDMv=6rSB{8jOr5251A^&sfSE$g(XO=yQxfU zT(P1L*;x7V7$NvHqL#|MGPs0{6FQFRFFw!;OOUv+?$y#-yQ-bI#}=_sKJ(|rl&3~Y zcm|++G9vT1!q(DLNGE3FvV+j(J_eE%%J7Hw@ zav?S^+VnhSWs;KpX&$8PNh0$Vj5Se~n*$#b;`br*pGPgqg(L)D=PvR6%$J0CV9wO% zu>=XqawN2UzWvfCKNDizsb!0?y_8Qzw0MTYnvA(*+h!r2ex%uA)S~P#Lhuad5*>=) z7GiztEsL-O2^^u6MQ~Vq@w(hdXBw2^`b(ihqU#^2!n!DHo!DSc2K&&R zcTtOS&Y^~UcjsxqpGpfyIRI?4#ve$&o zu@oaB8(4yby}C3Dlx>&wGO)y6!8RWi)+G8qk1Li?%gz!uOIC~^T}xmI5_W#nJYHP4 z5?EsAV4G14YqIJ}pq9NRY@V|i5!t{JBw}aK4{i9UWv@Y-uPsJIHc-o6^EQiIj8Gc} zwd}lNbKAv;$Oe`mVdr_xzQ^j?SN6KIIVJ7&SSIf{V@;$F1Zvq?!e-Wt5e&&F!4f2R zR^qGKdKqFRB(S}9g|pcSu_n>?)dp(W+1chyj1dJ}g#=#Bt_{4l?QCarSjKEbtwJq3 ztJ%z&F`{6rkihfAt+EkLmgrjvfqq?qWsW)UY^Q(cFEB)ePZEE1!O>zO{7!?^WQo+A z1?dYiy}kOyizM@x=I%nj)b%V&i0Dkj;Y9qj`;KhAy&FkOIF_-z`w=2z_AFyMgv4Q? z4b@^o?YYDtBCa4C^Swpsq6Y^CV~6z({@#9a_U*X?gN2Lx1}(==$-2EegNSR1IEo0= zI{(&z!BgM&4H~VOnn!#(dqKJh5v@{-(r0=DgA;G<6D+>DBKu(Zz~GvX`UKryxH*s5 zLB#1q{7lDzT7zyM7`%2`-(bP^iFw3SYE>x_9jR5=UL^4NjOlXUg7k;!-d-Cbn(RFw z`0mcq;MyOjXWP8dKX~h|(qKg2d{2K8wQ3pJxHq{defh8**=MG93f_BpRd#h^JEZ~Z z6!@7{S?pA<{TLnj>vSBb#drAhEnJsqN;b|W8yAs{Nf!(VJ{te;VDEP0vKyWn5PZA&-@%0? zxgMs8xAM|KQT;rNJ}pW;lM3G4I{GApI-#)tN-# z)rrK(hx8AAoKhO3?wvt1!+B)mT(WT_^%WjR|7!;XYb#5GqqpCdXQKrj`AI~4LN-uq z>sbSWJ8v%yri_@LM;v#z9f^E>V5^Y8F-;?pj$HW56-9ZJMOb$`j;)W22m(wBX+ zw4={H@oV#-{&OQFyLB!j7vhm9-^PWf(H-u1D8h)&CCrf$0^jat{=KOlHyLTNC0*ll^lPc)YdA{$j_yCV?^ z9B;<-4$sH$bR4|bA9cYMvdZ9>HgHTEb1o4)!||$yTKHuTmuO5x2@(B>z!D^Ijxpvy z8oLJ(F`8P1*IJi1`UNMCD-Blul7H2vX=YeV#5fx5sCD|}e!;HmOM_jV?#R2MrjU(& zh&YXG;1z|$cCTMBs$Xd^f5shjo$pP@u{RNql8rSN4Gf-XQyM&Q-!0h}TMY~bonIQ9 z+4|PJ^RbSI?}!+2)}l0OJ$2Z?U|Y}9;O^J+iRMH!AmTW(fqjJpjvixpHMoq3AL%%7 zT^dsB_iV6Nr=ZrH{58rDF^q`mM4;A;H-61-S=uoOy5$oSi8z6XnREv6I!EH_M}Emp z8Q3ux`(J*`e>$Cy&xm-H2)qvXIS|d(g4c~P57F4|OT^!aKrPI@;u0UwdcK@SyGO@? zB}nj77P&kuhtqL%AscPT2A7i0sy$wzJ)7^L?t4Ice>W!@FV^A(@LZDa$rUhh9Jc=_1U z;QT#q$s@j`-u;3`$sn3{@rptMuV%^!L^l3GHd;`t@J!u&U}>BTbNaZ%H*|fx zLFeN~x<0T33H)+}F`bC`oQPwnRj7q|cwAyQwQ3!;sy(#|OOW6vxpLWh*vnW!Hr^lt zXRY}d`|F5$;)|Xkayec z`>>~vT0D;R`(b!yjafqHd^8dNq2s_3Byi3*W;}VXl|=AboBzEc{YDY?2xWnyGuV&L zU_Cm6_`M?hu8>P4A6k$;p8D#4?cLr*0>_&%Tc}lIsa2)aD%>^VZql`}Cms2XL|jD# zYVqDczX0VDgC1RwzJFP7Z%AINkiZ_{n2h}%U3f<^tM?sopHS`X9q8X#f7BxSa-B!t zxAag8_aZKV-_GO`eaZs0+MUR6!Z7>Zvdc~uLeA5rFdie;rBmIVY|Q4xV!6xT>;j(FDw#jlbYzoKYV ziN>rb)>ERfEL!KKwS1R@-Z&(plv>4+BGjTtQ3=GBfS_npQln8=7ZMbuD$!V$c;V7l zVN51!J4IDenh~F_f|N#dBqo#8m<($1Qym(UaS6HxlX?we2@-TgLB?K+Wh6D0LHno@ zjag7EqXeQK(U~=d;w4Fqm!KBKPD&uc0tCfmk{Xl2QGx`;YDzT1MxO*E8$7N!27u=o zkI6mf-x6^c5x$*EqF!V|zl!G)6q!guWFi`INYK2J(zu2(6thff%o5KnMW9MFeu-m& zuD*K~BsG48S`@!3fruB3EAq&a8Y9EHkf7LEiALCrX+Xp@I`ZFXRipVTr7;q^=2IFw z;U^<3rsIO3quI-dYRa}DNB}s^vM0FuSQIROWEZs%EdqGm;C8$NwkCets zT!QwTN!@eOt}&(iOxk~@V3!)5CwoQdz81A;kDG$nD-aZ|O=`3jM*|Y%@1``0D-q$O zMuc&UQY5WJqr*6+DU&B1Iosek9BNSjBO3rBrbOe3EtM(9xs=MR^lcw$oHi8vZ4wpaoY$Y}IP%rdDlOVpwW zREfqf(Y7%Zze+;N8C$A-f?>pm}LIQUH#{7du zB1ejjCjzez9N`oLioSI~xh9EtlZb8PU!fL75K{UbR+pgIQc`0}Sb_vU@tun;QKTrT zks^xqq%=B2QKJ;ZmSB`nG%BglDAc0JR7zv)I2Py$Q?daO`lv1>(C?C%OcG);(N&9A zDjpL>CddZFWTLB+qAJlhEnI?PGD(QZL_Lax+gB7rmQ^i`AxEh={yQj-u#jq0K( zUPR!PCQ-GdM%A#r6i-U&*TwPp=qpN(E=X!L3boirf%p}SD~fm}HR6SJA%S~b@?@U7 zE=hi}>GJZ33?HGsiM0L)8&zquQ@$n`y_H1Iq^`a$dEb{~s}=+!GN?uGO3}<|-<9H; z^gSlZA+*lg7(fI)w@~%OY|Y)oaMF8 zWCBZ&a9dSoTjeu>TKLUDmk6Iz^SQtD9VJ6gX{pRln6g}S=EVX3o%Q7DIiH1mEzO|o z5*!s|jw6H^jbd5r#fXJ5OspR;sl~4$p<1u6UYgQxD7XZ_oxnEuUo6Qda=GqRn+eq7 zcQZ8eG{5Dc-*cm&1`78*0_sxw;uN;ZC=FQD)nE|9rA`Z`<9HjqFq_ls3Dx4a-86%?OFVs0 z<2GYRyFXD_f&})u?JJ#6aE8NJxW3QH?-&W;hXg%CpL;h{=U|?r^zU&3wN&Prx@H1P zkigbPt>SwJ{M>7_0;tTb!ui-C__w$>&hdtx|YOjMD;4cM@}`l zL}Y`qI23CZ2ezm3*2#_J@29WhuLZGs*eV~jCN^#>T8~Uwn~f0+$$ge{8;ldJBOhFq zZIm7)a>X~VNnfcwHj%S|TDusQjanDJV{Np#tY4}iVRD46 z3kfS%1WePn$th&nSsJw_Ra+Y^xAaLBButL5bs=HpihyZj7EsPiwy}Y7Y@ybj zGpvp49=Ik|kT5yI)`f(XD*~pCIr#HS#71q(6^2^hTw`sl9eGu%AYpQZtqTb&R|HHO zv!YQ4vC&~^lMHGd+RWNmHRhkGf`rKtwk{;BToEvB%*Xr7H{ARk?-wdjXiWh8c5 z+$&X(Fge23g@lzW0;Y|5pK@mM`FNOeW}?=(XRVEAkL#H#NSGX9>q5fH6#>(q5fH6#>)6JVQA%gm2Lc+=w0n_wFw(`yVHCq1_$~B2ve>>IMsJ(CZR6)Yz2wN8t zR;~z`rrN1~`AusfYJI<{Qu>Ou{)(`5Az@{;(Ufv#vJKAtiCP^htPMWezang1NLX3> z>bqlmrrE~JbUsk)fHSQPK5KtP*t(FgvYvAqS7L)l32J$tPnEvn(f3z`tqTb&>)5?! z$0o7S^zwcg)LJ>q+Tg3`uLxTg5?0o0kgoF#_Z45YsCDy2)&|c8e?{23kg&4OB{W}& z4W4~a>*9LW2G44LMcBHKu(HmH|Dv3kd>qp#XC`X>w0?@551yU>im-JdVP&1$Y5foz zyc(cZqx-B4USCLc+?r*3x<|Hh6VL ztudXf4PN#Cim-JdVP*9hnynu!HnxpfmPV~5CtDlrz5Eqn>q5fH>ih6_e)ycTw}e_F z4zV`a3;Qd=)`f(X)rTWLQEaf+hgy5?Wo@u$`B#Lk3kfT$FG}94*kJD!wI&#AgFV^5 zB5YkqSlRl);vusQpKP>8Ewy*Ve3LdTg9~a)MA7BDv4U!Z?BwueRLcS z!9I0JR0Ldtg!V4e;!}$aI_F8$()0h?lQ%@?!y$N-ghWNaB}nKfVcM8u?b!93k6o5T zEgi?J>yL`Yl|z^uQ4w$n5<1$MHs(Eh4f-wV+fJyZ*W>N~935RB4q`-=T&zH$imsY9Y7 z;1VQs7G>I))9u{ur{7C>sHO9KuO{Q7dDkIKj;IK@1PNUYm=>Q}j)T@H54Ci?dUD&i zX#H>q_NhanBH$7vbX8+od}^^lYpsV`y6&DdXMD5{Is~uIAyE-<2@<+GGi^-hGx+2) zc&MfR!@4FDqV?P%Opd4sxC9CHGME;hn*9>?sXf$EKWOji6C%IFA=sx5iHd+rkWg=l zX}a5Yy!deb0{WH^YNSRULVuOwCyLpsQ=~IF&=8EUpjW@gvd{H z2$Lf!0xm&9y;r7rbrPT2r++-uQvaH@3K9&4L`A?_NT?TXZLA$&_XeBkI8aOX3EaYh zgvk-s1`@iPut&b3vEAdmMdt&xbic#rpdev#gzYONbXR20AnesV)Y5$$kC=jlj4Lcb zLU(s|B*LE4LoMBZ^2jep@SZay?AS#@cd7P@g1xhcTDp(sE4Uyb*EyCTp}TE6Ge8W$ zLoJO5@Jv*YkogKrkkF`so#CDiV;QG@knm7T;~G4p79>oLuyZ038vU>{HN<2*)YAA2 zuM7nVSwFA@360{|6$fHV9%^ZviC4IS1jm*_!md$BXf(>MoV1>Me97|aj9MBmj4}Q#`>!ukwIXmrqeG7z&&qL#)l*>fsLh<}A8 zNNAMPdSnoLO`?{@Vc7#LNQj?^B}iD=`j1gWSo~eoveDs6hz{ow;_qTzNMLN)u@Od{ zv0fW>c8PDpIJK$j)qcsDlW1pZeQGOP`rEel?Df-S{p&Slx&2ApCeGKGGHy2D7KMp(~7%LE7iIpvJ z&u7-gS7&luG$i~Sfm)RTYq`X(PbIQ#=Fperu*Axi=-6zsjD;@)j>Cn7pCeGKGGHy2 zIM_zE&EMz?d01j)OZ2a>HXgbAQnC>eevUw`%7C?8VryNAY@40rg=2}8Em7+WYva)u zE+!ix;pYg{stj1mCA_T?**2GxPmLv3w#3#hyeqBtB{c6h-=wn za)}9HWcxKbA6Q~#OBA2WcTXzhY#I|1evUvbdv08UMu|k7d6Zy@l`V1EDr>`z-H?!R z71y$3%q3{Fiwz#_SYl;M98t`7nM9a+Rlj~;pYg{vU8V9&@3v)!LulqSlJTy zkG3}K+#V7#@5Z(49PARb8i);E4Y0(@mdLKRHtZS|60&~8wd`8r5+Bglp!j@rC7&8g ztZa#aji$>vw`*-k_&EZ#?3(Bj}6nyl`XOB7Hh-$J|Q7~NnFeNC@w+XlGtEx2}`VOiJ1$m4eP^&g!or+E$eH!1bKa8 zgS|d1v9cwOT4`-qUo<4dPmF6>pV1}Adleh(y<&-#Ezxz2wZTzt@pqXJe>bjWeP5Rd zqv!r=ik@SMl`WyR-#>8}*@zRURT;3BOTdw*u|!+Qg!W-^Y6RJc6SiLMH`a2Av+Ozd zyU`cGumlMm3!6(gz8fbh1Fl!kKWn)JjKnmSAfaRW?Rp%0jT4mt*Q?{0wOj(Os5F)! zq1WaCUvgYDPE-b5uU@sRO9X{E^(7xqx^5_?kkoc zp=(#k=aa}roFE%iudZsWkkB>wkzXg1jW|&maJ{V1sL}kGBs@KO_F7aNujcoI?A6SBf`l!2`R+5c4Q5kT( z>X)*XOUNv3J>(3QAfdjtwGk&O11?c7JSSJk-=$TuH{j#Ix{%O4gY7Fi4kqL|nYflc zCYRVZ+~aJe^MNJ0|KNIc?_yq zW6ULJw2KYCKCndhgIurfMeQ{h5^_bwwd}Rx5_Hvid>nk8V~Or-xnA8<+qonpWM+tK z*=yb<`h+onwlrU1iSEz2UX2adIWZ*s9AWF#r~zxa1kIvy96ax02@)FPuycDz$V?sA zvU9LY&}twy$fwR=iN;H~UX9h*H7X?7rw$2QuSRiL%OxPTl)(}tH0EU2T8J%$gk7UF z8pT>J0Wq>PmLQ?AGwU-zj4UMVTB}ht)^Z8*GNiA_r%q!D5*h=vzE4Q7PaP80XVB;% zYqVe8ds z?f+x!Jm9P<&Ne=Zy`b2lVmH{(1cL~o?A^OjP_ZR3_7W9Yimq6qh?FC4Y-=ni3L3k{ z#BM;v!rtY?nt%~|G?v(6?-k2_?=$at=R9-Hz54C%!_0T*`OiE3%$ateiV556jPWk} zFu`-n`8;(_=vb6zonr!RTsqMj{>v*PJPzWu7wG(435bMMJgO-X5PKW!!$iEg#5P#P zqnZ*CY_Ja#ycQ;6N(ZZWR8t~?4fbJz*Ysi=tm08ki3m2>hlzOAlGDK|9@UhHV1s>_ z;5F7b9jxL}O^FCL*oTRD6`9k)DjwC8h+u<#n21-y*#@h4R8t~?4fbK8-m1QyF<}*t zY8n!beVB;%GjKXs#iN>rgkv8j;(ZxxgH=4LX-GKsVItmN!ZujNqnd_rgkv8j;(dK=gH=4LX-GKsVItn2$TnETqnd__!IY^P_**aoX? zJcrfk#svE?VPn*AmQ2iO563DSmtu{+F~L4e*jPK*NX!PSY3(t*kB(fY_Do~)&?7_vQ;@e zYfD71!9GmbF4iz+2sT({tBV*jl!#!1eVDMls$uLCY_Q5!Nip^*5y1xgFkyRDOJg|4 zDqHQv7_LMF8|=e`?Nu#}MIEbb)f!{b5)o{$4-@e&R>-q{*RhIM-%CWW!9GlwZ9N%A zHWF5uJq$k zJgKLuOJp(9+j?LF))5o9*IVLz@5W6!Ay!8Cl(P!%rTX@-PaqNDMj*DqL`X-;hN)oV zj0d*F?LgtCAcU;nF=ZQ8228-MHg7%f_*0v)4OVIE(p9;}1p6?dJw}H=eHzExyA(hFriPP4u7gSw!tcW`sk`$V}gB{&?ljZE#pjSYH|5ih>PRS^t>76?J6y zEwK#_iwW!V!`of3!7A(L^S8T>?7k(o!C^6>ZP4&VH$l5@E5*R*^LwYpB7zO} zVM1qEsqO|FtkPLkt-B>6*kB(fbf%cvV6ed|ogLOTSR#TA_F+P2ys0kfwS7R_ScLo-rvA!3O&6qYN>>;4j7f_!Q<+sk5I$@Qr_NuBxn2m&em@wPXjF%tpvdZjX#;ZgG8|=e`W;EmFZLrGP zBWAoxM6kg=OjsWi&3Jhmtg?OuGhQVk*kB(ftgqA?HXye#1FNim#6IK_5p1vz6V~VJ zO(AT9Ro2gA2X=`FHrR&=dv@uKCv1aN_PoNr@DdShun!aV4Az@l*aoZYxr_bnB_h~h zA0})pp*PU54OZFs0sHhzM6kg=OxT!6Z_;5Ktg>+sPAHU!V1s>_u(6%q2*fs6W#c)V zn%2Kz8!qgTE?NypTzvT-R+jFgCAgMFB=QFz$~t89D?8x4stZbw*5*gHcxuUKX8 z3COF4M0o!|SWMWvPr1xlW$!yE^M*utk3v{X*gIUQ?q;jVdJ@Ur+fa8K6YRr;y^EIG zpktN2f1(XGCfJ7wd#5h-C5~109*w@FF~L4e*xW#=PjsxZ`2h5ZB|_>h{*qd|o7OUN z}ZTDQ`%K4Xtqqs~(~2IEX)Dq<>bgHt?%tS_bOoluyck*@acf5_MEk}&JGyT5%<*w<+PYZ68`4J=Ukl(ND z@<3t)tJJNk5+Ru8Fj9`Nx-P$<*@&;ZuWU8~ZncpSzRZnam4)=+1O4L!dyQY!`LE?Bkxhr?s51+g<*5ih-;Sz%B>$|{e%HNpRdZop`d#<> znOk#2V`W6F(tiH`LvZezjXV$4##3+K)^zBYanhpDBVXWGT;XmVd3EaC93LzDy-cu* z-4P*JUU&1STHas5p8c*ixL&~ry_%`mZM{#Rd^%7|y1TEGxa$)DZhP9P+v~-M{GTV# zN12M<7E-2HGljfDPn72ydInpgNBCxgRoZt!g&5|<=d<3uQ+n$fdTgsD&fV5vw?_n9y42*=+11b*WG@bR49=OKj|^mc;ePR9dgJ#vmm8 zWvw>UL_7^NcdZ_4&xhYVki-P5*nOXWuE-s9d0!$n^02Hl(LSv9U}T^+{wH;+J9|du z<$?BbSvqm%J-%FO&|o2%44VdO*7&rb`#tBs6Ej9?YJ5kpVFhA$J% zQRE4FPAz|H!P;uG@<=km+sNCqj#26ou;E7<8Y_&7H}LgUM>0AtQ8&gbkY_}e4jo75 zqnbK|%gUn`ryS{+o(}21s-#Kt3b_CiE@6UI>_)B!0#KG$+SlgK4DCNO-}K3#HZnre zp+w#@G$1?%N(qa5oDQ^kq{(df{PHQ!)KPk=Gx4^C}W}3g92?ASQW>%4-J3 zq9aFMnSRmxsje{6j|kKTQ+@WA%%CyMM;w}O zb9`+NDbLcu{Z%|7vvElv5)GGC+y>QNNCzCj2EsBrZ^8O(uSdl54Egv9gx1}V@(?Rl zS&On(P|t>jWqD;p-U^B(h0`ph|9;y`V6&^_e3^2iwK`r=2IlDIbTk1v@9nI0)hlyW z*}JUSF4d9{E9*;QqR^reiBzI7iQj(h{nK(Cu@CJdv!P)bofBF%WdboV71CtypU7#9 zCCY^PSY8={+Fl~EbmZYe)sXPU5Uzz~?wp{fGg6PfJ>;&Hxy8^%9QYl&^0-9fYok}B z*4!Xu99lX3;qgt+c5g249~ZYg)B75E&-jac8Rp`aad+Y!=lpD3qN8)?ddQbm4-sO1 zMzAVxU3gv@VQE105bhbf{Y~1$O3@4ye&<&rxJ{Bvuzw2Jf$IxO6T_6$*%T%yu z?h*mi$LffYx1#T5zrW8>sMwA24@3f&<&Whk@@?Mb3!CwKDR)66@L?6(D-kf^ZI};e zCak_e7v~kLkPgHhbX~&R;4hj&c!J|@8@t%DpUcLJAzrl$7pC5%=|6#pPxtAys)?ejs#oU*`Ei;_Hufn`2VrA`| z$JF>8Em-HNnP3&%YVYN)m_UDZ>{tJ}Q)bn?4GSwK@;P$3pL)q(RW@H@Vds@gFKprw zJ=^2h%FU5Ntg+O>&h8~+f>kzasI#2^6++aVi!HUViB)i09g7LXYOAFec3!rG+0f@8 z6Nn+6T{>Hvy&L$kJNwilFjE}f4M6x>VlgoSwbDXT;vC^yOXy3AZhIdDqJ*jP(Y?){YDjwK2l67J z?*{oyDtaQ=3q-KNK1eN8psg2$Tg3iPN0;?fb_UDnh>-t>t(S9wXFqHNTUH@|I7ea| z>|<^vG)-k2^;C5UF=XYDhh@0{!tx5wk|zi_B;n|V?(ZE&N0C% zxbZZ=%yvxRDKKoWRqy(eo(6dhH$tBVT6c>!tZX$+I;U?b*C!`d6eqpA`)pY&@^ecz z#zjnQWDyhfRumBy6L72T!t>mp)PoOl)Kt7`i4mYLk3>q)KW|xOZm29)N>HqPe+3&h z(zCIM5n6(@mg)&pX?f@yYfMn?`W1KBKv+y@PTTC@XOgQQbayu`G=K z;@8!Ngkv8jhW9+%$8hDT_-;7cVAZ16)JEHxdsZ6~j(wPzyqDTI>BH<>*kVTCC|LF8 z?wZ=6OYB~4NI3RkqIQYem^5SGLONLWR!wb8eRO9U z$zbi<*@ubyPf>u@~T7x8|=fxr}LE#Wgcv>YP+5P=u?X_FA>28`!F%D zx6+~R1{mgO4?v{vPgMFCTu2t#K27?V&^{Slf(}6ZvB7zO}VPe9qY9sU|!3L{V zxkGKBFDVhh2Kz8^%SUP>^ohX+tGd@d&!+=@Vu=Vg*oTP~x6&AfzCGAr)g`B>4fO3L zBG_ObCSE&SZG>l3u)(T(7pe_Bqe?`u!9Gm<@dCAh=c9fLFW6w!-3O?R-nSf7EfK*6`!KP~vudMzqazD8SoLLx+Iau0-&IRQ zu)#h|ocX@mSo}JC8xV5KD^{I%o7(8;7*Q<|fP7xD4-?-mQX9!OV+uA{wfci<Q)<%&OfSPgH^lCR~t~32(uwKY~*1vVYXK}@|c1RR++u)J{=d*5r|-e z!(zhn@chq@E!bd{<=Zkne;>*t5WxnA#e~&`3s1!NaUi!cXO)%zpI16L)T=-M@*_(Q ziwUdK7r%01!3L|W9-s8y#Lzwh5o~Z+Ojz68f6J2#Hdtlt@wnZR(9Q!9Y;agiSReEC zuTLr1V3qYNpDZyo^jCohHaILMtgl>rk4Xg^tg`;`!b{~_*jY~FE_MD;(R@wL=+2PDEE(t`i!C^6BW1^$JDB56^jf0MP z^UN^53PiBMVKHH2yZ44nE#wueY&`eeerJVoVjzMI4vPsJqrP@n(FUt*TzdGlv%+{c z5WxnA#e|Ku1Cdw>vdYHSu+fkR<939_guOF_lm{EEviAh!RYM}Ye;_O-?A<4nV6ed| zd*4BsHzdM)6vAS{-r+(`G#idp_TGlN+n8VKu(hY6dj2~V70 zgH<*^gJ)D@f_<2kta17n}Y1p6>Cu9w;fBeGzFRb%#48yLeiCfJ9G?|P_>FtQ9bSoQjL zY6D}@#svE?F{xW;*h*t+$Ew?Zp*D0(-H70@m@wPrcLP?LJ-i!~2$ww#9Q!a~d02kO zVU^_@-f>Dqu)#h|SX~J3YDOfivhv5fT0_FI4-;0W!#iiN!78iAc;{?LIQC(}+Gcon z4mMb2?Gf+J^$3Avg=tdvsVVjv=)C%fdF;+-&!N>nC2a5M#O@1Hb0egPNQ{V8)5V^i zmTXM0kE~!2F+H`@hgpRD|6uK4#5}Ve)0{pLC(2nmu)5$^VnVo}^IT9h{u}2NVr4M_ zft8f7HsR$;-K4=X1?=fBI1i}+%p!yS!?34C~$>5{ASk-Mgt-s5@_nb(~MzI93qt!x+ z2pID=P*S3jnF6hS?3Nv^&CrE(WU=!5ESo`OJ1D)|LJC9zw5ZBAi{x~dk$Ap|*=n`4> zZVpFTF1DvxN&nRk01x zX1iWAf6x!w6CfvYdm6Ux-Z8?$Wg8Juuz?;9BLLJTZK*t7L5%$Tm5ea7CKgqAE|my~ zLHP1OsoB#KZi`{=hB7De=ea%Gj~IEb^b8k-@>hL5B39+20X2{j39GnXp_~w_x;wUl z4-?2Y5U9uXV5B*$^RYr1n95ozXs+#XnJb~?!DVA5i;z8$z=5z>#W`IffcjXO4`?Q`J?YKR#Wq-# z?T2dyT_S*b8|DL5k&wOaUC_lgSOvG7=IAmT5#dqxF&jpdayNmoqRL7Qzn6$$!+fA( zLd!|VS8+O6#r28_AAAM>)gDeq^ybEB8NCFL+x@r%grq~?0a(RuIji3+I{C{^tY^Kb zj?cA+)3gw<=Yhzrsp}(sQCRoL83rPw2 zZzBKaZD@b4@6M*u80vd;nP3&WEhOhoxpv+D%Bc+KH=yrxg6%A}9|XS>)-^c?KJCT;=;+m&hpv zB5XDSDt2S!4B{`d-W`w=d01QqHkRn_zDaFZy~>`o`J1rSaI0e&Ir(HHSxxi=tJrOC zVbQZzYDqrE(QtXBucI2ZVfTKpO4~;R0w*+ahmhFN{3*WoD@``UknbIw&dRo@Sg+C9 z+V&KyGP=Igoi%h+0&z!7APRqiwJu;n;@>-OoflL$JXr z-NU4PNkhW14->jCi+Z?VgH^g0OZ&uzgkv8jbblH3)WHU;bWfS~?F|XXK1}F7IeOv* z8?4g3ar%sENI3RkLihXW^U>ii*kG0J@zW<~L&C8S6S{A)J&pD=_`ZZyx>vD1&G$go zAsmOrgzmqToaVip-Uh35&t-d>@8zsRI1Y;m-ACGnwej?T$ue^aO{+gymthpCL*I!nJ(M_iRh6wO!u928YFj)rDw3gSWvdEB}1&d3#zW zI4mZtPDlG0ybV@aJStd9w2 z8iY}TRo1WMXByhmGQnXnVSQydn-OfV%KFFrY=*Q!KB?zZ&S5cOeSSD&5^S)_`uY5f zNqbr*I4maY*%i*p1RJcf=T&}IM*2rSspr$dVKHIP;Be+A*kF}Cck?qp?P;0du$Zv1 zL^!(?Y_Q745Bb@p_OwiJSWMWMD4f9xHdtljp!^J0ds-$qEGBGh7tWFe8?3VNTz;0U zJuMR)785o`4QJwl4OZE>G(Qv9o|Xv?iwPTR2O_aH$SNCO=V$xs*vR$>s}B?Q&Ja=_ zY_Q7S6Y?{Pb@D3PBdk75*t<_C!C-?`_P&#!wX9R-*&bo_VZz?wLQOOq*&gBc6ye%? zTYhG=PTkGUjH(Y4_AVM)RItG+d;iSOzSe1j+1Xe1VZz?2L(dRwu*%+}^E144`jYGn zulg`ya|5A=3pQ9~^8xu;x`+hJ;EBU?&Y;HHTDP-5o~Z+OqlKR zy8){#R{36MdN&9*I4mYC56ka3tg?K|_iWQUPO!mYF=2I~{I13-EB}1&IlZd|8yprB z);`MboUF2XobREhcg|pg!(zhPd2|-QkH}bM?J++KQ0HyiuSeM~y>pM7CM$fpqY*RS zaLfHy%@QI1r(>1eo?Fs>DrC1L21gd<1=253PfT=cJG|uJBC=r#Jg8csUT|4-hJ>~ z?isWNclh^ZYt8!Zyx7$fK_pLJ-KUdP+8#Un+izXM<%ITqOjv8ycjuqpbfUMh*rGE# zS*7iFbK%k6#$oMqJ6Xkh_UjTZC-hmwgtcaUcYbiI!@Z66 zCqL|D74PP+OSqiSCnpouGw2AQ*XzIWHhTPZr5da3j1k5nbqSXfIyPV;9?9&o&M&=< z#qaM^W0jqi!kDTq;c`MpGEBrHvfr)P&)Ycn`R!_~vNK;88`dRUPUwh?iFjoB!h36b z8`sVsUSpM=UBei+F5z-QN0v;)BjoQF_3$?OA9!GmRdxmsWA(a(%LyGJGZByUZ~gX} zCbYq8_Zd}Vm7OKTJ4ao@<%Ev(nTX$5j+}6!sUd%K{8SA zO<1hR^Mh6T9^K*Juz!ruH(@6DE!??Bj(%}2EIc9UV3p1XboiNqA0yNT6Fi&X+>z@a zI{eJdj}cntOz`ZEbJ9M% z4b*m4={%Es-&xw=j}cmTnc!I}`3}~i9yOE)S}m(|zO2K~r2QD7ZIB6`ZF5fgD{lk6 z536+Eufxv>{urTs2@^bP=$!Ps-UfP6R_Xj@ho9N}F+%%9CV2MKIeC6~8+aP9O6Ora z{0!}n5!$yi!Lzu|$#c-#z*CJ?I-jifxcwNR&nPB%HrhFPo_ia3I21ZM)(sc?Q!~GbcV;?4XmBYC+ zuA1m=oOMf|PFCr9i;gURjL@k3jL`RRCajj|+jbwVyUBX(bDLOY^%(EVbqSXf`X0`N)pmW`&e~uiZLo<|)*kV` zT$jL@IwSNwoC#~S`nLVaYrZc@`u%mK3ahMN!M?7#gv$wi4`;%9AKev}^@)k}i4|5^ z|A>8EbqS2AGeTGInXq0|cZFT3eS31q^V?NeW&J$%b=4(YPDmS+m@r{a1KkytJ);tN zMpam4&nxWfs!L!@oe{d5f(d)7>8`MyzVXl6WZ44`tgyzlDy*_`5cYM|C0tJEE*&Op zw4}SjzF60f;gWBAOsKHR#&g)$RhMu%p}UQkuu-4x3Y$01k42M5rcAD|%EqPG*HxEr zIib6zn6S|+?+VlXwXCx7HB@y88IvUy_F=+C;cBCAo4y+?lytDl-V>18x`fLK-C@Rr zy-jFdP251=ah{X%V3ob^pw#LTE+;gvn6S4ZE%SR$)_1kPNxfo~y|7BE}DtrG#E2>NQ_Q5_(*juW$L22g|6dzhGtL!}*y+K{Vw{!Mk!rr#EFQGYr z3ae~B0KHmW!uMC~!-UNmXrCzku1^PgQC8W!271)GgztCRhY6eg(7v7KWGbw(`58QA z>Jt9>!9GmbERH^-cmfu|a)Y@P{E%esVr4zdpuHXEhSTACwkVwKI8;i+Gj@XvGh zVZvtBbj%>*5}yu?GFWBvei*&fCH%OAeVDM>K^^K(8R*GQk(2{VIUX(DSUktu)9NLz+cT1gmUy0jd(=F+4wXhTcv~>KR3x z3&8JoCQ@y{4hWsc&k4JwphN)k1p8=A^u_{j-Ue_~8rtnZvjkDL2+vX8oz;8j59$^@%A{65Q=*g@Z&lfAz_vy*+) ztpu+EI#(uG)#3Mx#>D9CJ?bA*=XSD>x|QHnKp~r)#1;|#6+)k{oGRW zll%JA*hk$;*zOCQf{~e`62Ynte^w?Y=4S6v17wbjeblXl?IOV`80X3at2+EynV6WQ zbA8D>GS|mG>Q=&b!{8K*oH#2Htm^P*Wn!Y5y+i05-%s%Q?!uDd}6pV9af>j;< ztV~QiwSm4zJt=eH?4xcaY_}Lr!8lhYSk>Xr%EZLRI;WpJEpz(pqi!W^*Beg3I9DcE z)#1;|#00u&{g>3(N8L);?me|pCRoL8JvZZHg*6t3bZ}VeR>F26YFX&s9R zsqNkd+6VinTM66Us%9NIbis9Oo!rK^2OMkG1GDsA&IA-#{c zf&Pkp)UAZ=Hr75dBYY26R%zcA6AM=G?@?~87q+XhkGhqxUDMjPXGD?{tkOO>Cgf@0 z(}Cv)`>0z9+x@N2sEqJWoU%%vB{A_IeUEZ`$=U?_s9Oo!#g3gk@`b$Y%_}EZrO(8e z7^iy;+yhU2*vUTXR>F4UV>IAgMkG1GDt)%cgp4wLUSV9qKI&G&P8DEhl^>UYNOFQz zI!1{J87+Am7+pv*w|v51z|D>Q;hJt2mbtNlvgz$G$P~cxGeK(+3Y>A9X8Xwz2cj zxr|71f>j-Ue_~8fUNy0grK5NXCd#Xf&~PnxvCqjHDTK@=D zOijHD+S354n4o7=g?*T?XBT#^`!Tgohdk9Xq9aAP_Pl~B zCfc%R?MDY3SYaO~>=}$5_|9cSk`o;%!nLO}R55W$HfDHTRvOrc2^&k`B!P1ok>o^2 zig0cG098y}kd1wgkU3=bVZz2lIJqDa#k>OUhoQux>?Ja@PI-+iq7ejmh&!(zhT88ojX9UzjNP#g9(0aZ+V znCazgXU-in}#38^K1FAi!6`!Heea9VdW!q=;^O6yomNNx8v zP}|wZ-Ukt`y^Cra%n08;$|`LuF(IwCCb{d-YT3u$YZ0!!Q)^$65x$+5RodobB738E z=zZA7-p>)P%?)Uum=PFLXM~1pvj$MbL^dmyr?LC)(@>R56jw zusA#o*oO(5tI=mvMqo^x5!$!gEDlsLkR56jw zcsZFPYhoWJZ0=0Q3^YfU5&EpPSv9C)BAdB$GD2=*A0})LP{%$r*Ow7GX0X{osA3|U zf%M}m_F=;2GQ)@rwcU@e$|@ae#Y8rf>SXSl_m=E%~e3{qn8tz2xiJxYNn*Ji~0got3b8cwK3b zZdwk1xjrzvGrP zO$QUKf?L-Oa9?AQ$nLZBHY_YA>~<)-aZyqtPiFj=Q>(X`>21pSDG01=$X}8Xe*MGl z3d>bj`n<|>!886pwpOHfsp(osey=ODhszvWTls-CBJbZOi+N1xrRhjiFIht7Q<#1f+JkrAx2T!@M6hFD*B%?9rNGdJuR@kQ^a zn!}y4Af)c*u`-q2pJ+ER$_Pec^{IRlc{%waGzhCpM#COx#87X#eMN|#l7s4NN^P8Dt!tURF zSMJ}MnBk?V4r$~_Qw`0xfOUotjcemwUCtf zuaw|_FF&oR<(kd1x@)&OBIS8r*}bFAy)VRTLTr-}tg@Vj?VNy(u37KSUdrDiY%#eh zORe5JXtC9MYhM!r^%h?o*ZAhQ6WXngbqW8Ca`pkuL>?oRsThKo-ZTM^5vIW7!j;ud(OQo#BxG>yyU_rR;l}r z6ng?<08^VtSmoUotDJap{}saBpRx5gR=Mp0VW%a1%At~0 z9T9?QevKT7HQzPTy-rw0@=4uWRhRVk@cWW(1M$~Ct?l(x z7Q?!PvveRVbAv#xM?_-nqn@fRArZ53A5qZ5MZb%_Lo#m@#?WH zQ%)F}c0F@eb(7bSrndC0q2~#4lo0z1agz|NdiKh(Ewk<%nLgONqd-g(;us;;5rV^F zV)!4&w%q*T$n@4j&Ji0^ueiNwR_7s=SDI!w^;vpM%exn~ri-03tvc|r(JfC-Z%vnX z7Z!5&jG4DL%@rGe5@O#I7FBOOdssU5kVmWYxBI$U9X>4Wwg00&cdeY9OC?rggjh;q zb^CW?S{564Wct+S?bZI3u`QeY@yK-8`llDt@v7M9BgBB}>?T%SJ#K8v5icK^zO(lk z1!9p9TM6-*lpv>^3C?xrc9d8R7h;VYuxR?rWuU!>+o$>3AuR!zFiF_0t~ntKY5G zFP-q#)M`xpZTjs^Z;Or7B(J#CcAYl5Ww?~)>qi$$@Fj`WAR%6s@?cfRZ$`KL_Wahg z`RO9@{GV=b`a)vWUkGk%Ol-Ew=$22;YE7RQaFNuj4a7!^*cc@#fBu!xEpKizB7OhW zQ>$m}KBnd6sUy-EPfab9U|%726yjGxukK2Q@mubAML=G;z_yH`r?9xEld=Z%k7mzy~-T`#$|`ti`mt0xZ|nC`L1HH8*+ z>Wtf){wuM1Pg2gRpDgisb;hD?(}!2RxEM@+I5NLzT2*Ij*dp%YC z*;oD2eHTnF5O+#CE)Zg<5Ukqo(kH8s^4ERrscZRq?Yu&pg_DReaY5hJoK2g%aZEcUoMzw5qYHPahc^4IG;#|qAi-p)w>J_WDI&)OZcfW5< z&--tY=nxxA39)sdoip)uWmLZh3T zT2qK!#m25euxjC?-?ePqbH8-rSN*Cn@e3h-CB$+nP~-W40;cbMJ8s+(6I+w#Fdt?A}R zo>w6H39*(C8%geRI+)mp_`BHPcCgkz&kkyBY57HK`jZz?+lPpaKMS$15U-s5MD^CI2Bj0b{#<=!!NTe#LkFe% z-hX|eJdTwT+(C$KBv!0ibE}2bBR&|IzI^?41>!$q17o=PVuO2XCip4g+;(CE}l{h5B`s1aA{Vh3GHo zVAZp)ep{`5HY{DZ%%cUOMF_kbtSJOP&zazIa&A>2&_3>xSh4ESTfV6t_V;1wg}*Ge z^D~9mK!_W}28YE2kF}lKSyKK}A^syaxaCYc^zrIawSMX5?-twOaB1iN7Gj9xE~{2} z_+QoPE&bBkW>X98{1#~+i=@>K7aRQKWMc16pQs*v?tt{VzL!WX87A%gbs^Rlf>p=& z{a1CHnf=n$51Cqxi81nwx?N)Rv80@Rm>BfO$EqJp?Uydmfx6pQQvRUWcudm4J=qec ze^xzu&7tYjOBegZEhKmMgAIultHz)BX?1+MQ~Ld=hYM}+Txo-d)zdP*;_`@y@t;&L z7_(D4@V18}R%>4%&srG)+#*jLw9ZRk7?AGq=B3pa{`Yuw?xO?JIlsEJ&<5wqNdE}* ziDH9QXyH2H_sgS$9W{2RVO@Nz52re>D!YoD-gS0BRzwZ;9^pOoV!eLeRJ-3iPbcT)oBtderx2n zNe;=m$x?#7g;*#lXBFNJHXXEWdi}32FT`pEiPicNs~romVuEwTxqs|BckX|lnOwPH z$vHc4?(!VauupV$XZ$0}%2@7FAzlz-__A|$U=`0Z#Y8@LX9ym~fF z<=lQ=1v)42tUW}^{O!u@PLv1cuMif;1i$Aqp6d#M8L#(*U=@cO6ZYhknLBU8!eZjn z?hC6cExm54^RURHEG>@b_r^Z1bA5-8y}P=`%;{1)PRzbpPDH{gS&s;+hJ<6EdPF%^ ztTH!L7LpPNjy=0+;_JV!eDKH9=W;r@=5uW2-5?{bKmYW(tl}CT6G-{zvP%82{M-NK zoh7wi4m-V8>*OCXK$US{^Nr2P3!}=5hssO8~1eHBsMZ4F@jZaLlqN0UG`X?j=>Xe zZeky}ji_FtHg>u56S0vIi4m-V8>*P-9NX$`eDK!uP3!}=5wo9F8&{3(U2`rY5+hgz zH&iik<3$I08@pfDqryIL8}W-iTE~9XxxLuPh{On1!3|YRtZ?P--p1ditXW|nxQ+O3 zvf5at#{ps^BN8K61vgYNal@@cyp8ML?OS0VxQ!U}y4u+I!{3XIj7W@N72Ht8#9F)c z70xA(G&R*$-l4y5A1&Yiqzi5ka8G~W7ykD}JND{wu1K&7Zm42n$jNJX8`t#Sv&KGf z8`0jPHaSOqs!G4XD9x70_Myg2LN8vDR)#IU=!_UV|r&r)KeNU#cSsA6Jz z<#BIg^Or`~*avPSM*dW79NWCO*eDXLf*Y!sSY_Z9-o~lpkFT*0+(u09=;zb1{P6E3 zUyB5*;D#zD+Wxn+kOpSpX-XAyz|#a!3b<1-B75 zuBbLNuOuBH5+hgzH&ihpx!VLAC=d35+lbb2TTz*3L}CQ1;6|Cpgwzsm1NDl1;5K6N zvs-!_T6Z%dF@jZaLlqNJ+r1665B7oEh%0tf8`=glA~Awha6=Un(rUd8v~%`>+la&N z>Fd*>eMv_6o}s7$0aZ*$?^8i}pub`txQ*CzpxV$rF(VQqSOqs!F(JLEw}F0_ec(3Y z>Dm@P9q5C7zYD_m)I}8tsAA&mUTWjmm|TVvwGKiuW}#BINsTw@<~EAg8}Y6Gji&SgZB6Rc|UYr-*c+9uQP zM;n~>`IH*_s9TA9uNd#s@uyc|BO{WWU{zZRRZPrp>c%t5Ew$L>8vCeQiI=J;dmC5X z0~;BUmu^6%$9Fy;Kc(HS?eQ*VsqhN^Eht+Ia3k^j8^?R1M?Al`x6yO^o;CJSw-Os2 zFv;iD*Q-tt8yS)01gqLosA3{{R&NDk^CuakY$t;9L6s*OIQaVkC|lAK^wTMAW7-295(3cT8hXLPcUx|LX==VYH( zuY7Qv*vN<^Cs@^%LKPEx&(>RkFM4?6PWDl^68mqaHm*H#oY=^SBqvzamO>R1-(Ib^ z0-x~f1Lv}jx|P^;Yqjz6Q%8%9j7V~VRc$F$F(GA$@<{MceOT3&?z7MA<{u|mwa-4+ zl?dlnlbz8|$S&zMW?mrquHmjRbDA%MoY3DTE)tuZD5}Outjq=xE}Orq*$k8f(rhzR zaNGRV9aFlS)rJwQve_@FN(3TS0sGi3uM#H+gz3xHtWFk$3O+;8ycgZUn2$-SeZ%yGn%FaBQ8453W~x4=hiHN8;(nj=~9^+k@9Kb24x!-tAtgSCbSjK5s8rh(^|qlOjtSfd3keldMej3e-#?UFRItIS+ZNM4c+wK5mWg14+XoZegNxYzX{>fw{9zxH z$r~Keway^yY8sa=e(#}O8!k29yRZG|z%G?r%31ZyNsoHfW``YICInL(92OH@bN}J# z`Rg1M+i)B!rb~TdQqRVAyD#wJZvEmxKhU>xSWFB*?=er;UOq%)71F^ze}3d&UN!cU z!^$>7tXOr==Z|}0@CqY;fZ$j$!Ot#Ho|7oPu+Y?v5^sp5v+n6s+f34XXxEqvktDX58OuBO!{scuPQb&A~Awh za6=Un*?hZ``NRtQz-@%hq`MVY5*su_?+I4H4OL8J^X*RNMJwzBw-GjzzWOK2iw&Bg z_XMlphAJkq`F6|*SJ($`BWxzU^1`xWgJ$SG!78|+iivE#9W&b%_JP|7n@K^qyc9 z+)%{?&9_(B2W}&5CSA+CNU#cSsA7WV+biq?w-Gjzu64IaunKOdVuI$|E9?We5jIDz zZLmnN3T~)kLRzi#h0eEH_JP|7n@QKcq)4y|Zm42HdLO(I`QC?p;5NeM$hA)_60Cw7 zs+h=T=rP}3VIR1Szw!ftOMxU!>Zc*Mpi$uOpX4%7E_D|$*?mD}Q-KIWAWf$xAoAmH$ zT6^X&`Az;3`9ELotim2x5ZEbOA^`b!XZB$NJN`lAWDiW>PYImbf)CDi;S3JkIP=A{ z>WyF(PVj)hnV*J4!YZ5?0)cZy4GG6SOq9}*Sgf!+8sTDRH+KKQjUC_E6$rPUd_l5} zz|Ma7;2a$G?81%naa>M5<$+)ocKm|C9^n!JGrm07hY2p(*v1mFI~L($cPw_z`WVXo ze-6nL!3Gno!X8-A*fkpy*ryL4oD9Kv6S#3I1!wT!#;Lt1Rtc+cmJBMKJ}VI+R_wzB zPQHQ0dBr$Z*c}NU?8e2OOSrK!6MGfm#_q-_R?d8IipmI_!D?LQ^=bxom^LI5^MMNc zQxOyFVzuAP^@@FBB5(h3I#`8Wv#^2P%3PXpUa=1or7};fCURZE857vRsT2+=juoqL zrUe8}zLW^T%C(&dZfQ|E67GGt&2#OEVwJE8=RXiDoERw)Ay(|e1h;}HR*vf^P6NUQ z&JS`(ajaN{lVP5avt%WrZmgKVNhHuX#}>zmeQ@##Dx8lh*+{bTNLYol86a?)qeNJ& z682#NCrLo-S!9iumbvx2ILCth?NDJyJoYEUjnlbaA7aHSZu2F=V&&MU9)a_`u?<#P zFM}M1*v(4-=M$cy@Ukf#7_{*>L12PLp#vMX_?M z!dY|>II&(Lf(`az0;jM++?in{T09C@VokX#dkF$5?1k>UWpJxzI`NYgWq>Jq$pMitN87uM3@c7K1}d? ziMJ659!K5y^Z~w{G5+N7V?-pZ8g|ESo;Yvml^PR{!(!r!U3&#Ow!tc%18PV(_F*D^ z|4=Jh9<1UypoWBFA13&1DNYBgxNI5{j(wQmx4zg0tGJvS5{~OD(+%54RNHx;sYFQB z^l898OmN%Ow>zXp-qk8t|5){ByQQd0m`~p2+C$UPM^0_FbrJj?hy(~7QnRhG;fzU%NQ_{W&3|ur%xPu9Y&Z)Gd(|wp z{!DeYf{7T4*#Bv)Y~5hO8!HC_`D4Fdu=da6J~1&H$RAF*+0)z=6N#mxo(i!l5rF(V z4*QrJM8jA)&Rv^nzUTduqMY{0HX8}6Y@Yg&=Zs?UXIpb*!_Cgt9kJ`7 z_f5Krh+P)Ae*n)x-8-m^rkumF+4|jY9@{up39D=spzE)r8xv7Fn6Pzu zx%Szr%mZW2DA&Y5u*z0}ZkyXtCM5Bia;{fQ*s9Yr2b^Ds75mtF?QYvlFWU&QVwJ5A zw{3rEnW!5pCb<2JSQ26BXnI=L;<1i>j^gJ~MN( zs;!t_SlzIYa#q<2=74QCDHFj4hsA`gv2HqVW3^Et#&@n)HPvfP>oo3t5>}awXC7Uv zG2yr;W5Uw6?Z}@LV#Pj|Z?o54y|ImiRko^|zQ0OiB8pW#0#Av!1i8*!3EUuno)bI9xfjJbar~~kykWp%#VVX} zh6?+%OGJnj+rT{vAaDZ-hoo_`bg&AypMb#L{Kf?PFu^$z+u%I;`#ri>4LfB^L`XTS zPMWJbwy=Mw_ zm>lUSs&IB5v1&|kF2-fRxh^&m{Z}q?%d5DYvRDC_zpwh;Tj#B5l&am~onm+2LXQqDx<@?aI$#JYr7!JYki zZD1mUpilWW`k8WM+#Mm2sYS<37ap~Q>G#h1gmV0)K1=`oC1+ptXO4pp*Z{6*arJB zVY8}wh8I!FY&iC@`F5NW=8#066A7zqejlfoO9UWa=Iq0SttaRyY_j3l$JQfox|>5n zq(V9pR@v-uT>?-kR!rD@vYtg|8?3S!VdQ9u2sT*7w9O&wx$Qm_hg zjP|=THePNP`Yn4JKxO^PjMjI$VnV*nQEBbjciW}qq!6crRUBLGcYo8q#B9WhRcs?B z5Swx5eKZT{VIL-}Ki2a&xDN-J^@aRzQH7L46%#V1_O%dWYWA_d9R!aC{V3eqFv7y+ zXKhT}cgo>3beAGL{%jKHlbo?sQ+P{oAY_2O;FT`x8Ef!m0!f4Yf(CO&`p9!Q50xE;n5 ztb!Y=n2;tzE zM_r;eX7pNCY-B`Y1gqeNDkkI(A#dZ{1>-x}2W}%)xL<92yVRqy?=vG3BUlADR52lU zA$c49n%C%LAGnR!;t92};FF1BBO?+cSOqs!F|pzudaCg+-|ak?ec(3Y{C}$r+z91d zMkGeC3T~)kqVnXiO|zc5ZT9Zz_zK^1#rI$F&2P?~`|h#WDeIo>8efUOF%W;PApa%- z6ByCkHw|obZ{Ih_iJ=m!J@%W~AF+z9>*yW?$5w8Y7h<__mkj{H;o6A2hkU6gCUy`4 z>DZ%?awg!;?>sx}(~2+i^$*{?#y5%c4dc9XNKRqz+vJHy`t_~wEzP`M&-)=86Rd*U z?sAuUb?~0Bk!-Qv8WmO*cWIUgXN37M0k_@$?%ZK3AL4Bs(fNH7tBQL^ON2ARe3*dS zzDXe8uN^kV+ZcDmWBzLe=&9{%1#sJr))L{2FdyE9#$PdzI}=_$-rG3s-pl;=4J=&V zGgFsvM%Z@~n1I{9j^Nw__f7CNF1vP*CRXwOo4SNE!op%A{)WOWk52YBzFzs^A*?Fy z_pxtXIF}L52=ieAZu=sJ+#~ay_CA*nT>gGm6?Y^xCd`Kkxa}Jn(ue%Ur^BW9ygipy z#eGtZ3G-nB?)*y|`!75hdF772_0Ue<1IM=;Gc6;soyXUzD||Y5`lORRVYeppZOSom z?SNCfjc5Aq)X6?YB9GO$NuxY*{oc=X^1eRa6&T-Q-skcoYa{x6H+Z8FuXeHz-+Rs> zId@#sSYJz4|LoDuqRMV#kBRH29_M4V*W%Yjv4YA7&O^Bad-%B8ZufpQc;yRk_k^kV zzUY`(IO7CwV@&T$qVgybmh18r@P~Hu#LTPLs&W6y{Vwmhlp7gu+_`r2-xu3{(iiJ_ z!oC>6H~YuL4iD_*^XjwBSF06O_9cOsc=kmdk-fb^j~e^HZ3LI5+*N$wezm2JcH95$ z*=0OoD*kdoOmtTd^(p`Q%tgM;(O(t6h`@Ehxu@^lwl?O5#kZfmYyZ+S3N-e;^4Ch_ zHr>|keabHx`Lif@dDm=Atnu}ho>=9c9ct{u1b^Q|zE<$wrnTd5oI3dF!BtNnCfG5~ zJIQ0>?0Fmcl=r%HryBb(0k?fOMMeOtuIXbn{KMUA?85|qzrnfIX{*#0^_@C+-PeBQ z3B(G!;CXj^OzidL3O-i9z2mnv_F)2U`!0lYtDm@-kJZ5E52~>b6a1A2=l<2_%gzZO zB!k!J|2t10R@lwY`%41@=5vTiD^&(~lnQdU z&vU;Sf}JNXuX>PZqf+c!`e6b#5ECYN50kX>g%iDv zP2S$1iB(+3ewcs_#Ds}>Z`2RfUC^knF@c=5*+E;igv#9ci8`fKo)JksRb3+3u$2+m zFgFOBLDKaPZ$;-U6H{T;isyao_pmaWSgcsZb4w+n6f1XO+g6MSDZ-%rhV3QJt_bU0ICR>f${;#eMryaMD)hVP)`yW;rHs>nvd z*3BO7*)G{gEU#FFd%e*HF(*?ZLaf+_2@6T*mLRuu*nQEM-?Mw2&yo2=4oUPmk+90r zgjvoK0mzp*`_v;!wcYV8o1CLu{ytV_!?B9*^lD774-*aRRTL|}qmym>SS6NnR`HFS zC8BORnBcoKBO8u=_%_UvjSwqV@!ggsqHe61;9DzwtOCJz>2bJxkDn(J%PUs#je;d2 z*sz+2o6D^p(4fbJzbK2Vo1eYM+TF4=JBC%MpitivS5y1xgFu}JA#x}Ujxty(E!42~< z!79GxzeI#su@4jWWYF&$K%S-CnGc>BMqs6Zd%ieUtg;xsBI_R|BE*V)n6R3!>o^i4 zq+eo&f4HHCZ|l*ohv@J2Mc;aaeRbIOd(|bxgxRpL@;zQQtB!G(7iy`%NqdUSjT!nM zwt-z!rs5R=tODrQkF#`m3vpQS8Ukn~Zu`Uh`w#0rV28Wkm{P-D=Vd=WEXDm$yLA4u ziuI43Up>&@GzGWbP1Wb7b?`)U`<=LJ&EC~;J0aX(e0YriN{sC)#!Ylt*oO(*#jLjoj$C=k%7@ig z?m1xA0X0?)eCNSx?-^s$(|*}nxCLs-?f=uX&RQ>QH>r1Pjl*KX_F3b$E}7qb;`t^? zc+%t8)>=`u@~F`%R55Yp)qe%yu0Q+O8vDR)gzf&uEnv>I_xWqnpU#~>=kTkPFjfEN zN2(jWH!>Y?!=cf=Va|=-a!k{E?>;za?|~=RI4mY?A3E+0lUbbqwKgr9J9W;`{ZFp3 z>K~IIt$x}zBCYOwa92#c^whUQ*8g_upj(%lTw@<5Y^S{5F1Fa@MgF#)Pgb2=E2^xH z#l(VJ&yuev$qMNgS`#tSLs(oZZU4U3#EeLcuyCQW`Wh1t9zJI-Z2bH4$u;(2!p<1s zHZ$ikA~C|kg$iz{Vq)0YJNx;P&z3u-#y(8gDJZ@3EF%&lEL^DIhAJi|?l#lgIJtU4 zjeVG~vst(?&AE(7jIeN_f*Y!sSnb03-o|fk8B=2)ChS}p?pTwZxJo2OSh!HZ4OL7W zaN2j?#<($Z<2&8g#)O@G!!4rDWkh0xg$otjP{qXF53TO|-OVrGv&KG5*m*qMPAa$Q zDv=mr;X(yBR59_$>i+90?wVtoYV5;=ooQe-zVVm> zK|O;f&}tDD6V_wk+)UK_R9Iy_(F4aH)f^Ml!&TUa3G4avbPe^Q6;@fli_r@_wF;~33HSH?PHv6~ zdU96ShY5S4>Y1Ov%hUPF!Droj;)Go*tg`1hPW{Hj>e&e3h!c0Mun!Y9G63z|#TU-M zf9Ns??lAj}DKfvSBa3HljccOy!FryP`u3f3#7M~gyOOjsX`6Q@z%9<>ka zwV{d$>f0;q!-Vz0I0q}=8P%r&`gTvC%n`2j+EB#=_3ah*VZ!=gJ;O_VyC+}+;aaZ^ zRZP4)P5btPZW&WyA1161#wla@ijoqE5wL-9t=EPsCZ=S4`^Yi!)!Xa|U@FjXTOW+G z&vM#DiNpw2!3|YR?3nfK11{gQ!ai^tVSO-8vdfxR*0&qMD!8GFiTTn~JNbL(!W}E@ z1Gf>@2g~44Y&<1Tg9G}1yxsI;nml1D>$PzfJtjtFefwr-ZC_y@Cg8Ta0T3VAyCk~- z|1JBedHsXioqqdJzu~9^b~9op`Lwkk@#l(gmT1x04_A}do6^MxPObiP{=MOMPhj7< zCs>8kRd;qb`E$ZCal_E-?wr$OK;>sk4XCjX6F0R!RNZF7N_yE`+{AYBU#k7h2Cq%< zT1cFZ=T7Z8;5vy_i|jpT6|d05#HcmjnKfMU>M}_=`!K%x* zZpIsKz(Og()ujYk#cNYBao&IaH)}~L!T%O&2@_mHott!H|GSAyD-6Kj9*#~sDojM_SMj_AMHGMrm%s6%+Q=Q0&HKAGnR^I9zS$9%1`xs1dA!8>*O)UBwl&L+mPMAGnR^ z{fOGoz0Mio_aPTmAfSo~`f6w=`@n6)V!P-aYdY6QUk&vHtKfzzCU(od8v3^E@@5~n zjrjgGwV`{@Ga@m9Rd7QU6ZF;48vDR)#6_3r7)AHc(^o@1!78|+iiw`L=~qMh%RYVf zf!heXb?jEZmYNZX5v+n6s+gd!>dxi0H2%VFysk!H)%66zMJs{|ZnTe>;P$~jOz_-t z{#9MHkD>}T&^}^<+Xwr=Z3NFo`~B^x?S6lIQ3V3+BPQ&tx`U*Bun*iu@SJu2Rb3-k z1vloxW8(hotGb`b=>hhE+lY8BJR=e#SOquQM@-z5wU1q;eXtMQM%W4jR!N=9h{On1 z!3|YRESG&%_f9#@!9H*sVJi^YJ~ARPf>m%s6%%qQ#rIb@mBK!78)54Sy84*3+n! zu?<$;vaZ@d|5ze|4eqaOT)LO+9I^U_zALuDDz3*RBG_ObCai|8u+vU{%_r3MgjH4( z(Iy%aj_at=7|B?DEBCvsvNU1bULry|*oO(rLmihuZneaoIA^Z+WwZ4L*IxZ~v(<(6 zEk0`&eNKd@8dTPo{Nv_t$^;)1X|ZBO8uYe73Vh1RLzb1fSaUHtG^~?h^a@ ztaah}7{w}Km4%BvB_$%*U>_zdhWA)68VGB(7St%#AbE@qVxEn)JN$oZb3HH$mGy!hE~2-e=i}u-g}* zf_uhY!&4<5@2%&ZzaPIwg;jRrqTRAtBAgNC!vx%?Z!I@0^16mED&I%Dt#U8^jHvbqS!3eT+6@ z+LgmR?c7_M4mWVEXXdiX(uDhP@q1&!e3*dy=I)`X-lm(C2XeQQRaQ2*vG<1w3yTT3 zM;|^k1?}9iTCd!Mfmd|0%IX;I5&mJq!eRpMQy<+a1s&Q4YI`TEtgYC+#f=H`VFK=} zw%sY!yI4P*<=eTtS-$efDr@t&@AZcX3yTT3N8Gey3R=#RX@7;@r^YJlyKu|<4-*y^ z6L9}}^Bq&YBmT&&-#ufuzBN`^ziaomHzv%73A+swv|q8Toxk=U{2fAe|ItxDUB7BK zCgCgK{B>}h<3roPZE1Yt1>c0h-=WZ3-UhvY-z+2E++>9R#u?j-39P{VbJQ-guo}lc zOxRs$dOJQ=;3jPO0PfPOu!`eeCQg0&VNb9R6LuGxep63oQhi?n8$}gnQlW~8-}dh3 ziQg{R&hLl&*;oDiU3#`>0{80ecj=SW(O>pUufJ`Il*hZh`qs|rxzcuPAKw&q&_T6Y zwvz+*+sDL4Z}))>_ul6%HTGcw?oB@Hm)(xP{E*&0Ru`Q&yv9CE?0V@F)sY|cOSkxQ zBC$&ETfMgTw>`Ie^twGgfu8gK8aop(D~hY{7qTjgAc!FDh#MxL%yvnbJ9k_lFE2VM zfgr}X5Lv`g*}@{w%8LefF(Tj_g}4*N$TEn)G_(s~6hU8+h@wGpOGH5kAi{j-Uv*E< zzxv+c`Q~|um0Ramr%qLMcXijPQ?@1)vDYUWo!rvaYU@Lnhn&L^dyzixi>@}_uj~Mw z*}&GS^57LA=P>caw2hhRTe?Pj2EZ@l7lRH~fBaU9)YdUqSpu~}>|l;5>=WgKcG~y) z;Pq=l&S3)SKdkR+;}#op+UCt5g4SOR2|0&}8Nb_*X%u#i&i)r_)p+1r)yrqKNbOxV z+!Cl2Vjy#DWS&5dhYh=B|S?=g)D`Lz4NxY=)FF%eBAut5(6X37`e2)`+1Sw!j+?Op*T)@urbvJMJ@a%;m+0niT)b$N!taO> zjo*!7g3DqZKGCJ{m>wTa9G&{*CZ|KoVgj+M@jGHw%QNbX8Sv8&W=O4)Qn(%=_P#y4~z61aF9 zIwDX4I-s2+f%`<4Yi3v-(<{b=oWlfr)*18Z-j7O8{hyN5&>h!XuQ2GqNgM1_#<9wc zx%-X{&=Fj*ewgQZ#vXJ&QT^-rwpKfa-x6{T6I?@M)^~on^o;LcPhI+Pr6tstkA3`n zqVwdrr3IsEiW{{bWi5vtXhkM`%d4k1O1GPlN;PKe>%+&{ zTFvi$Ntl#U&pe;_siGMO^S6`wdzP~|9ouKj@@6MmV!=88z1^{zrIM%Xs2^cW7$r1^3HA__h@^k9(P4t~yAN8)Qt;BlI7Tdkyo6Cg zeG-{KT0IYI{rX0H-^=smORk^e&BCpDsb8NGs0qFf@@f{+wFLPsS&u)>VFD{y_}#5} zkzZfzBM9xh&gB#2w`4v3v@9mDVus({Dl+-?#Xf@2&g*JEp?*utVzJ


<34N!sL?C*sEm+4vn_wLn zG0>5o-s+?>tOy|;`!mD@OCjz#q;5OA-C+nWiwUdY>hZIgI;H$tXP&-%(5%EDFhJFqqm}@J}VM0e@d|E+F zaQt>2$MEHZzSu)3&|0ySdV(UVMh+3{;6A`gHOj^OLEA022U$vKte@u)u@25*g6~8k zEJmmuETtm`>-9MVAUhH{hY1~_@f|KEbcADE>BvWm@^};B+FBWw((4h=s~jTM!8uIu zIJP=s0w+eG4{)z#TE{f*c9saVtyqe$i5w!9h}32_D8PQ zi#{{(oX}?pr}12|wNf4S6b!T#r1aXx=@Clj5Z*J22~FqR8SE+G(1CKbZ*ay$orFEj z0|d%qLR&pP1tYdY+X{C89mlwr)Ttw2DSejUepM&o)rtwNA;z2Ex-6y75F~L&0rvcBtx(UulZdZ#jA|x$ELa_Z_PLHY`Tu1)4?~&C~~dKIZSZh_?EMj zj&RKM>m&^4FyZ%kb`=?WWzv{s;7u5F5uS0VSE$>~0pf~x=Pbf|Crk0HCYOLU(7`!L z0{d~Cj`rf)1!8S!sof2vIqGc+DfJzNSGh5L#kXn1_@bW-Z6;eD;Qy00T!s{pm)9Hf;%4YjtielEUUOK!X?_3@(L zrq#b0{^kP*Hz+@KTSatTlU?549Ai#d-O$!*RL?8YNvTfF`bMyc?K9@4daW$6Yu{y_ z<&aW>?K9?v^oG*UdrwcDzjatzJy+o?Kl_IUUirt=gmI(N>O+eiH4CRVET8tf3JGid zpH*C2snyqZ9Vd9B;+Aw$>dFZf5p42_2X?$_Yc*l&2+wjzDZ%y`Q~71T(qQ(Tsm((s zq}3-CyJ%`oZ&dzOVMWww_}*1qTVu{^c9Ydn`t7)MQffi3iU>COMEhY6T4LCkF`ng+ zQiAO>X819idJH-5&eWkt0sDuLrMv@57(-Rs#n&E+5dQB6@!8zExaX zi4Zb&lU*6T>+2~p+I^{!Z7U+sKJlv;=VE0v*k3rsvm8=Nuzj|zR%%_#;&au#;!?7>DcuNxjt;I zaD8wN(n_#>_I_2Z_bc_;#vP~K`$xw2EA_K>dw%3Px3$7`o|Mw-+$ZE-Vrzwa3FjcK z1lwot?bVy_b@z7NZ-Be=z`;kw_jazWG2iytZEIEX~1k$xn=cYC1 zlwwYD{uf=+HLp#SxzE^WaPy@ZJyjXL=RG@&kl!n#j@=f(%OXphCj?9BS4Z1JceM1Iu;ndF-qGl|`bDvxS{Q-h=nDFN%(y*8_*gy5Ohi^X8$up## z2Y!|a1PPK-dU~0b0)t*L+^BQsTBA*DYKB73Q z5vSDZ>Cndfx$Sd>Z;Nz$p;$^iS8x2XbAoufO^aoqc)^b0|xls?LaF%cy(OXzbBUm9ip--Ue zI*T1Egy0+|G%l7KSN+FayR^MXzbzEo!FA_nyD_at+)=euh^|7gwfrPwT4FlQn^|<3 zNIxtTOYw8iCw_PA-flG_{iXcl947o{)ZP_GSN)$zA0v9;{|(RnMKjy3hW|Ep613{p zX0`v_XYsh_mwq5|qgqOoCZ0#}{EBtUsa8*w6rCgGuM|Bj#q%AXxNghT;+<$G(Ze}R z_^rFO|J%jyOO09y!4~qRn$$S7YW6!%Pk7ee(ZHv)wI2g_D|+CE`(<^#x7qu>eJqB;hBkR@S08a zinIq=N`3QRy1r9_n0r=zOSG1n@m0%&?geOBzoXRZ)MF17?2{VF+2r0+jh_r`lQE-~ z&0hM6^hIOo6PDs5d zVp+|ug7o&bvi?l-E1sA5#PZ<}7q*jjS}(QY9463D*gf?4D@RLPZEpXk)S7mM=@WJq z*tw6+cX(#u6Q`YgUuDdbd4LGP`MEy3e5kJ6qBj63<0^Vp?glrQ=0MrRd-sCb(zi)VJ!J zQ$3FJkRMe=djgn-NDM5U`v&^0bjoA`k=S*-L8gp*@g+<+^{1=2^DWtLQs^8rm z5>JDTRZmvEB+^Afaam0Gceo4hXtHF#5V)7{?1jIo@Kf8E6YptT)K<#hAOuVCHyNLp z{(G}zuSm;Y6K@8G@k*cd5T;e(WVP{O0F zPQn|BOw?&dz*6c7idC{4Lh9vOmvfllR?nJ|CAdvg3jV2Fl25Rd>VUUt-2~?_q4qg1 zRv3}A1E-xZt>ev|H$gd^{1z6O;Dc(2U670ctjM7&Q&glpR>U@6{9ltZWv!#a5143}heC=sv}?<=X3Fr32# z$IQ*@kof{)J9C@~jsCo-)r3_VQx@?jB+CD;W{b12qvLZ%A*HdSCpEY&LF}K>MB`Ii z-&(9eqAVsfZuQUs(?rMkXIGz(IU@dXjz$~4fA{o+j-6LFTbzwmoMtJFRorm>eF@^D zoA(rKl=3AWvR5l6G*0pv?>{89!XMs(@g+wi=;Gu>q}k(F({iRoX6-_?gUiyGyK8=& zm8jJTLw9S$-ZV?GKEFRo{&7V?Hjc1oM=s&~lZ~nW#z{-Q6&rp`*ukF5?Ds4ubSGkR zeDKl*5x&-8j*H$z8Z%C$@lCF7f~Ango^eOyi^+i;ynmuxe67n%#4$OvEGCe~-goz{ zFJDX!M&(6X378hiLb$1w*2jj*BR;{6k8eErBv#4$OvEGCdfECSGR z?1VsKC&V#1kc0O&=)gCUyab{xSVGHUg5Ub&r15M_j%O$)2g=2}6LjExFfU=0KzxQI z!S9LrV{)KeysJS6-XQZ5aZC=xnMe}+o``Fh_gftn-+SMR%6S2;YPWijD@nUF7>b|p@pQ#J;Ko|eQ?8bQLH zqQnGCu@0a3siobWX?lHD5+-LE7+07-@Qfoq%{-9P%Zl15JYR`VGY{l6^JXN)j$VkNO=jP z1kP1QO_<<0QGA+tAg7sEvlQk*(1F=fUcxAWbJbB3CU_Pl-(H_}do4R{zI-K1VYUPv zn9t=Uj1r%;9${<31kbhXV?+MOy`}PK=Xsm{p7TZ@u#~1X5*un56FEANlaFw*+wLe& zTknLICr=DN*!EK<+}$(U*koM>X_Slfx)!^HL~75t7AsDb(y?`Q9{dN0_mt>&j_?J%dWYm z6kYiF%i}|qnz8;rR>!Y*tj**lj1pQF6G;F2m5U?LvZDC&C#;SEPmc^)>e?swSRJkI zTARsB7$vkUCXk+f!^IJ3*{i?%J*(rBa@dfN@!V3ApPL*iz3j*JU7+)y#@=Lm4+vxObKEO!Bqn$Adu0?zd?&80Un`b!C(T#dI8}8MEH&uH(Y6&3Z>vtia1Ikqei&tG z#1Qjq#nuiyYqFJk;`cY_S{`Uyv6MRzywaZ3RA*Zm&SAox8(wM8lJaZiPGYaLQEuFs z?v*xL58G#nxK=FX&W*3Mv59gB!Q8lFseAkX!CKz=@3+_8x&cf5YROnjG`RaPgx4QT z@aXX^=QeR?tXJC8xws8|%UQ~u>0W71OUohDa<4y_aA(L@+SA$mTDcSKD{VxeUAIoK zmd`SIEDyAGS?aTYj<*Ej@YLBKURyE2W5KT#*LT3fw^|*yb(wsavCC2oPId&M(ft3l z6%#pj#Fn$vIe&KdF2obcA;cK>Eu3?h;8yoMGeh-sL5y&uao5g`7cP4|1NZx7M>mwY9k8qfjf(Hz2V*6r0WFvmap5^LUlWYZvu~qb#^$UMQ zVzR2wwvAk!^+@{T)SnVDuvR@}tEus1?sTBN+U=!OXUxdH2KHFx%)DfQL>bI0^ z1?NM5-aNZEBUJx(MB`TPwi`DbEc0q>o@;J>&x13+9Tz60;EM&DeB#=n*1z3M8#BhU zT)pO!t>E?>)Rfu18KLe4Kpb-Q@q1$Sp)#(mG39@?e)>WBok?L*3O;_Y$tM;(YWD(| zN2?}!maFezvK8F@YFm4|HzR~EC$10FO1(6H_~=j>*Vafs*gXxwonKFp(eCzUs1)`z zfcA+F&)Izw=E$Ncp5^MxnQR5Ok44)`cb}l{>Ys|X;@TP`I_#SlbR?y;Rz4wi;C;}* z4$eVZ3AWFUt5O|Tx>E=JtiGHWS6o|Tq(9s{j`T-TN_)^Jq|ZSZ^f~7stpwX=uR*&% z3fG|SYQi<4URw6*%AVk3N2QUmYiotEo0QVg&i2XsM{~P($>93n9QA=swt~H1mFoRU zcYxvkppUr?cLtx3dx>o;+)FqIX(iY`Z{J-A-zbcB+-ud-3HNrc ztueG;O-8%hou^WI7xjtko;ZX1F6SVv1lwoKj@LV+$2M)+{qr+=hw4X$XOwz~;W@~) z_4f9KNhy8e_(XR9pppH9p5^KxmTU#La}pyFPiHO*HPojl*VfJe?Atb;wJfFk4KM@9 zOW3C~=P;q3@}P~uiY00#>#3ppX5~&lMH;b}8k$oQr|QB(?@+f#3S{Nd5-Npt?sMNb z*T(zw*R~&*m0jrQB?RX%!K>u)yK{gyNPNx4IwvAm>z8e$-<>0M#`uBe_e$UK(=ajQ? z(>Nm+dUPKP-pO_T$?2oNe_@p|@WgE;I@T3$TcT3PQGNI>$|V^C=}k^Y;7f50YxkQZ ziP{{colWj{x5wn5l>d$VJNDe$3Vf*tr4=e8ykTpt9I@fWIR#liORopHrCT=JlbJNE zF#4+faOuH`zxY{kD7Ks_{lQOXeyZsji#t8yo$rqBmTh7C9L)EW@resZnnhV#yk{*x zqkN)sqdirV#PVTsE;8pZ;Xgn6{$Ix>SkcGz!S@or9(A?GMKNExWPR1ELUa@zEX7x< zPb}$s-s0?D0Nc9Q+k>*0z}T%lmHqy|?Y4VmaGkRhdujRv#%$KVJzkH(`S47rH>JGU zlt~QiYmY0;`tQaObC848H;sGm_xTr1IxhW8EUzz?vlLodquBdIul+3wF|NK7J2;04 zzGE2k=Bg)_jur3go8>ffo=x(+lzY~gdwza!={(v0aZ%$!Z}!TwKA-r0K%e5zW$Ydy zwc;EmcuY&A+tJ&q?hre!7R!13@VtaeGUgxqE-S7U9cRIEn!WHm$S0nk5Ea3{{eeWS znBWgst4KGAr| zD^-6JqF4ydVS>kk?0;PR+R_)K2fIrTa(#I&;#+?EJNFfD7Ry_RkY zNv&R!T5%2&{wxF6M|M|AsJl-RP1OZcCeK0g25ccCt8ncXRj#S zOE`xKes7oO$*{G>zn1+557saA-st&Fm`gI|!((nq^_HC$bM_Y4cW0gN@SC?!9GW%1 z>N#nv9*G`gLOloE*)@m0Iasnlc8ru9D)8Ru`Ayg-Uin#DyDLSWZgyPh*)>e4=YTtd z=lJezm$j7rKJPa#v~SxwH{^G9pFj`F(;wb#A?Glmo&)YIp>6FZr9L~hFkNqVf%gr7 z-<^G;X!G-h$H_>P9SZgir)LQmzHd!Hznc|>YU zi^6o%0|nmLWrFW~66tp3{Z&iE^79j|t3D+-4ao2FZYR#s{c0ggsizCh_VJ0!&N;m* zyDP;TyG*DrjytvI-l;F?9+QxBkk+$ud>v)`->7;<>`03pECn5U@{Ld2zu@Ddp3)zW zC$1?kB_yA@~l!?;n1D%zR^cRh86A_H)`d79ENFzT*@7cAQ!Dfe^AQ z*Xuzh{29RW7oJ-@T52^?YQ<9g{^1iHj&E6nk@$`foWlgqSKL`Ia*1FTZlh3QPK~HM z8u<(9$NsC8od>&_4DN^BPxu69NPBTBxet5-`*5~3>0x&^at;%iuR!n#`A8LvBvN~qUe-YgV1iq+3>aPb%qLD8?Bjq|Nq(8muT}#}$>gmG} zEQR!fxh0V!evmi-q9Rx?=M{NUZ*R2a4p{M&)saIOB{b(#CM5mH#U&AFDO_UiK*vn^ zNWoI=e&%$1^3;X6gYs#$91#ZJd1m;5D@m%#aIme8`8Kze-7;s~_7ZA)|;*io^) zUo}e|*TCsm(|TSeFJY9>vY0@6){jLIXk!jatQzQO*?QATmYTTj9otq1-+MfhmoQ3b zSxg|^V^L8AT1wBZsUeY+!c}zhjK?y03802^lvbk4$RbM{QU9^?`uE;q|Tz~3@%>EOLqT3h$uMpo#502T{Cbe^WzfzXc z-aNV8|7LumpTu8*j%7#PRLVI_ApJ(eq6oAx`$mtkMB5P$mU0dgb4PE;%=@k|`g+o% zLLipY%3*C%i0#BuI*u>-Vot^{7D&-s|kZ%7< zVFcQk?GpD2#3`5VF6A61J{Z3-Q}ujdRMZW`Lb+cZ`A+LpnS2yxDZO_cJAYQjClG%H zI%Hic%{fdUJ#SuN1X^nI<7Jk3xu#{BbC}q7*1MUlvkIf>oj~l8JN1frty6N}wFKG< zW$9h}`==hs_{7Ney{(Q3A9P4_4iiZKVMbvDT68>DZi%w;Gt!*HM86;2&D?ljVf18Y z5I4y)>iGv+rzT3AIF{096rO`Vao0;JtK%;vUDKSy1k(CUGzK#Ow61*5@ScP0O`|lP zi7^p?U@7*+@ri}gMplc8VC2$qp3mL17DLM4>M$gFwed{P|4v2;5iEr?`X-lv4cYsm zQ9l`+sW<8-_~KMV^}{CK`Fc)V@1!jCY?pqPz$tv{Cxf&{81>|EUkBZHIHdJ=d|UJh z|9he%AmtPD7xW0h&66nq9CwhT7ILIxW3oh`1lk9&4Izaz_Hg>dQIGerI+jdnm*yNM z{0PcfB2Yrhg%r|=Vd@j}|1`+z=<{g(ILf<9DZx>X#STXVO0X2th{fs?)i(^WI(EqE z5uAgx5*!=Zm@E+}!BR*gLaa~7DHv7<&a~hhq?PbvE@z2A36??{5ovuw&gQT>?tOb) zDd!-qgvQ)PTvlVUM4$vqAq^>?kTXWCjxJ?wOF0K=B{V9yJ3Tu~1WK?J(vb3r<}XdL zI-Wh)s|V*GtprCm7jHvH1WK?J((vW=iL>VXt+ZcwRk}&p;%dFl;ajIy@TYe-wElPM zi)T!CA^Hl@PzaXVIO*#0jxBy;cWtrvo;;X@I9-S>LU0Zf6PsOIKB3RR$X(~Z8Z<8a z#}(u3`W<|lae_7cjR(K?T4vSjH%71iX1wf0cE3B%2kbfWo%|YFNdBmDb%b~-f=HK2_roZP9hI5$E^yE{n6CJ%o#}}@31IZyple#$2@TG>GUSN;Vd zOY!P!l5l=%V0lMHa;HV@{#q#;tt&cAkd#t8@OxfDpuw5zuU{V3)ZpFHr4QAdY|}%o ze&P#aOGwf7549VdsV!^pYD$yt_Xgh6t-k41;B5sW_4P9;D8`x}7 zeY3Wk5c1^l>TZb|&kod3{sQ;U5-gRi72A;|0AyQO?XXgLh{t0cF~L%v<+ZKg>Mv^S y-(as+WI2_TB?1BDpChvE!#X^|jlwLEp+2|zxOH9sJ4xL)>g#Z{Yb&H3@&5pgFc6pk diff --git a/act/assets/vx300s_dependencies.xml b/act/assets/vx300s_dependencies.xml deleted file mode 100644 index c75d3ad5..00000000 --- a/act/assets/vx300s_dependencies.xml +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/assets/vx300s_left.xml b/act/assets/vx300s_left.xml deleted file mode 100644 index 61e6219e..00000000 --- a/act/assets/vx300s_left.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/assets/vx300s_right.xml b/act/assets/vx300s_right.xml deleted file mode 100644 index 2c6f007c..00000000 --- a/act/assets/vx300s_right.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/act/conda_env.yaml b/act/conda_env.yaml deleted file mode 100644 index 0f44d6b0..00000000 --- a/act/conda_env.yaml +++ /dev/null @@ -1,23 +0,0 @@ -name: aloha -channels: - - pytorch - - nvidia - - conda-forge -dependencies: - - python=3.9 - - pip=23.0.1 - - pytorch=2.0.0 - - torchvision=0.15.0 - - pytorch-cuda=11.8 - - pyquaternion=0.9.9 - - pyyaml=6.0 - - rospkg=1.5.0 - - pexpect=4.8.0 - - mujoco=2.3.3 - - dm_control=1.0.9 - - py-opencv=4.7.0 - - matplotlib=3.7.1 - - einops=0.6.0 - - packaging=23.0 - - h5py=3.8.0 - - ipython=8.12.0 diff --git a/act/constants.py b/act/constants.py deleted file mode 100644 index f445350a..00000000 --- a/act/constants.py +++ /dev/null @@ -1,76 +0,0 @@ -import pathlib - -### Task parameters -DATA_DIR = '' -SIM_TASK_CONFIGS = { - 'sim_transfer_cube_scripted':{ - 'dataset_dir': DATA_DIR + '/sim_transfer_cube_scripted', - 'num_episodes': 50, - 'episode_len': 400, - 'camera_names': ['top'] - }, - - 'sim_transfer_cube_human':{ - 'dataset_dir': DATA_DIR + '/sim_transfer_cube_human', - 'num_episodes': 50, - 'episode_len': 400, - 'camera_names': ['top'] - }, - - 'sim_insertion_scripted': { - 'dataset_dir': DATA_DIR + '/sim_insertion_scripted', - 'num_episodes': 50, - 'episode_len': 400, - 'camera_names': ['top'] - }, - - 'sim_insertion_human': { - 'dataset_dir': DATA_DIR + '/sim_insertion_human', - 'num_episodes': 50, - 'episode_len': 500, - 'camera_names': ['top'] - }, -} - -### Simulation envs fixed constants -DT = 0.02 -JOINT_NAMES = ["waist", "shoulder", "elbow", "forearm_roll", "wrist_angle", "wrist_rotate"] -START_ARM_POSE = [0, -0.96, 1.16, 0, -0.3, 0, 0.02239, -0.02239, 0, -0.96, 1.16, 0, -0.3, 0, 0.02239, -0.02239] - -XML_DIR = str(pathlib.Path(__file__).parent.resolve()) + '/assets/' # note: absolute path - -# Left finger position limits (qpos[7]), right_finger = -1 * left_finger -MASTER_GRIPPER_POSITION_OPEN = 0.02417 -MASTER_GRIPPER_POSITION_CLOSE = 0.01244 -PUPPET_GRIPPER_POSITION_OPEN = 0.05800 -PUPPET_GRIPPER_POSITION_CLOSE = 0.01844 - -# Gripper joint limits (qpos[6]) -MASTER_GRIPPER_JOINT_OPEN = 0.3083 -MASTER_GRIPPER_JOINT_CLOSE = -0.6842 -PUPPET_GRIPPER_JOINT_OPEN = 1.4910 -PUPPET_GRIPPER_JOINT_CLOSE = -0.6213 - -############################ Helper functions ############################ - -MASTER_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_POSITION_CLOSE) / (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) -PUPPET_GRIPPER_POSITION_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_POSITION_CLOSE) / (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) -MASTER_GRIPPER_POSITION_UNNORMALIZE_FN = lambda x: x * (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) + MASTER_GRIPPER_POSITION_CLOSE -PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN = lambda x: x * (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) + PUPPET_GRIPPER_POSITION_CLOSE -MASTER2PUPPET_POSITION_FN = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(MASTER_GRIPPER_POSITION_NORMALIZE_FN(x)) - -MASTER_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - MASTER_GRIPPER_JOINT_CLOSE) / (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) -PUPPET_GRIPPER_JOINT_NORMALIZE_FN = lambda x: (x - PUPPET_GRIPPER_JOINT_CLOSE) / (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) -MASTER_GRIPPER_JOINT_UNNORMALIZE_FN = lambda x: x * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + MASTER_GRIPPER_JOINT_CLOSE -PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN = lambda x: x * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + PUPPET_GRIPPER_JOINT_CLOSE -MASTER2PUPPET_JOINT_FN = lambda x: PUPPET_GRIPPER_JOINT_UNNORMALIZE_FN(MASTER_GRIPPER_JOINT_NORMALIZE_FN(x)) - -MASTER_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (MASTER_GRIPPER_POSITION_OPEN - MASTER_GRIPPER_POSITION_CLOSE) -PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN = lambda x: x / (PUPPET_GRIPPER_POSITION_OPEN - PUPPET_GRIPPER_POSITION_CLOSE) - -MASTER_POS2JOINT = lambda x: MASTER_GRIPPER_POSITION_NORMALIZE_FN(x) * (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE) + MASTER_GRIPPER_JOINT_CLOSE -MASTER_JOINT2POS = lambda x: MASTER_GRIPPER_POSITION_UNNORMALIZE_FN((x - MASTER_GRIPPER_JOINT_CLOSE) / (MASTER_GRIPPER_JOINT_OPEN - MASTER_GRIPPER_JOINT_CLOSE)) -PUPPET_POS2JOINT = lambda x: PUPPET_GRIPPER_POSITION_NORMALIZE_FN(x) * (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE) + PUPPET_GRIPPER_JOINT_CLOSE -PUPPET_JOINT2POS = lambda x: PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN((x - PUPPET_GRIPPER_JOINT_CLOSE) / (PUPPET_GRIPPER_JOINT_OPEN - PUPPET_GRIPPER_JOINT_CLOSE)) - -MASTER_GRIPPER_JOINT_MID = (MASTER_GRIPPER_JOINT_OPEN + MASTER_GRIPPER_JOINT_CLOSE)/2 diff --git a/act/detr/LICENSE b/act/detr/LICENSE deleted file mode 100644 index b1395e94..00000000 --- a/act/detr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2020 - present, Facebook, Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/act/detr/README.md b/act/detr/README.md deleted file mode 100644 index 500b1b8d..00000000 --- a/act/detr/README.md +++ /dev/null @@ -1,9 +0,0 @@ -This part of the codebase is modified from DETR https://github.com/facebookresearch/detr under APACHE 2.0. - - @article{Carion2020EndtoEndOD, - title={End-to-End Object Detection with Transformers}, - author={Nicolas Carion and Francisco Massa and Gabriel Synnaeve and Nicolas Usunier and Alexander Kirillov and Sergey Zagoruyko}, - journal={ArXiv}, - year={2020}, - volume={abs/2005.12872} - } \ No newline at end of file diff --git a/act/detr/main.py b/act/detr/main.py deleted file mode 100644 index 07ea86c7..00000000 --- a/act/detr/main.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import argparse -from pathlib import Path - -import numpy as np -import torch -from .models import build_ACT_model, build_CNNMLP_model - -import IPython -e = IPython.embed - -def get_args_parser(): - parser = argparse.ArgumentParser('Set transformer detector', add_help=False) - parser.add_argument('--lr', default=1e-4, type=float) # will be overridden - parser.add_argument('--lr_backbone', default=1e-5, type=float) # will be overridden - parser.add_argument('--batch_size', default=2, type=int) # not used - parser.add_argument('--weight_decay', default=1e-4, type=float) - parser.add_argument('--epochs', default=300, type=int) # not used - parser.add_argument('--lr_drop', default=200, type=int) # not used - parser.add_argument('--clip_max_norm', default=0.1, type=float, # not used - help='gradient clipping max norm') - - # Model parameters - # * Backbone - parser.add_argument('--backbone', default='resnet18', type=str, # will be overridden - help="Name of the convolutional backbone to use") - parser.add_argument('--dilation', action='store_true', - help="If true, we replace stride with dilation in the last convolutional block (DC5)") - parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), - help="Type of positional embedding to use on top of the image features") - parser.add_argument('--camera_names', default=[], type=list, # will be overridden - help="A list of camera names") - - # * Transformer - parser.add_argument('--enc_layers', default=4, type=int, # will be overridden - help="Number of encoding layers in the transformer") - parser.add_argument('--dec_layers', default=6, type=int, # will be overridden - help="Number of decoding layers in the transformer") - parser.add_argument('--dim_feedforward', default=2048, type=int, # will be overridden - help="Intermediate size of the feedforward layers in the transformer blocks") - parser.add_argument('--hidden_dim', default=256, type=int, # will be overridden - help="Size of the embeddings (dimension of the transformer)") - parser.add_argument('--dropout', default=0.1, type=float, - help="Dropout applied in the transformer") - parser.add_argument('--nheads', default=8, type=int, # will be overridden - help="Number of attention heads inside the transformer's attentions") - parser.add_argument('--num_queries', default=400, type=int, # will be overridden - help="Number of query slots") - parser.add_argument('--pre_norm', action='store_true') - - # * Segmentation - parser.add_argument('--masks', action='store_true', - help="Train segmentation head if the flag is provided") - - parser.add_argument('--a_dim', default=-1, type=float, - help="Action dim") - parser.add_argument('--latent_dim', default=-1, type=float, - help="Latent dim") - parser.add_argument('--state_dim', default=-1, type=float, - help="State dim") - - return parser - - -def build_ACT_model_and_optimizer(args_override): - parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) - args, _ = parser.parse_known_args() - - for k, v in args_override.items(): - setattr(args, k, v) - - model = build_ACT_model(args) - model.cuda() - - param_dicts = [ - {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]}, - { - "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad], - "lr": args.lr_backbone, - }, - ] - # optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, - # weight_decay=args.weight_decay) - optimizer = None - - return model, optimizer - - -def build_CNNMLP_model_and_optimizer(args_override): - parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) - args = parser.parse_args() - - for k, v in args_override.items(): - setattr(args, k, v) - - model = build_CNNMLP_model(args) - model.cuda() - - param_dicts = [ - {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]}, - { - "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad], - "lr": args.lr_backbone, - }, - ] - optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, - weight_decay=args.weight_decay) - - return model, optimizer - diff --git a/act/detr/models/__init__.py b/act/detr/models/__init__.py deleted file mode 100644 index cc78db10..00000000 --- a/act/detr/models/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .detr_vae import build as build_vae -from .detr_vae import build_cnnmlp as build_cnnmlp - -def build_ACT_model(args): - return build_vae(args) - -def build_CNNMLP_model(args): - return build_cnnmlp(args) \ No newline at end of file diff --git a/act/detr/models/backbone.py b/act/detr/models/backbone.py deleted file mode 100644 index f28637ea..00000000 --- a/act/detr/models/backbone.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Backbone modules. -""" -from collections import OrderedDict - -import torch -import torch.nn.functional as F -import torchvision -from torch import nn -from torchvision.models._utils import IntermediateLayerGetter -from typing import Dict, List - -from util.misc import NestedTensor, is_main_process - -from .position_encoding import build_position_encoding - -import IPython -e = IPython.embed - -class FrozenBatchNorm2d(torch.nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed. - - Copy-paste from torchvision.misc.ops with added eps before rqsrt, - without which any other policy_models than torchvision.policy_models.resnet[18,34,50,101] - produce nans. - """ - - def __init__(self, n): - super(FrozenBatchNorm2d, self).__init__() - self.register_buffer("weight", torch.ones(n)) - self.register_buffer("bias", torch.zeros(n)) - self.register_buffer("running_mean", torch.zeros(n)) - self.register_buffer("running_var", torch.ones(n)) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - num_batches_tracked_key = prefix + 'num_batches_tracked' - if num_batches_tracked_key in state_dict: - del state_dict[num_batches_tracked_key] - - super(FrozenBatchNorm2d, self)._load_from_state_dict( - state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs) - - def forward(self, x): - # move reshapes to the beginning - # to make it fuser-friendly - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - rv = self.running_var.reshape(1, -1, 1, 1) - rm = self.running_mean.reshape(1, -1, 1, 1) - eps = 1e-5 - scale = w * (rv + eps).rsqrt() - bias = b - rm * scale - return x * scale + bias - - -class BackboneBase(nn.Module): - - def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): - super().__init__() - # for name, parameter in backbone.named_parameters(): # only train later layers # TODO do we want this? - # if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: - # parameter.requires_grad_(False) - if return_interm_layers: - return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} - else: - return_layers = {'layer4': "0"} - self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) - self.num_channels = num_channels - - def forward(self, tensor): - xs = self.body(tensor) - return xs - # out: Dict[str, NestedTensor] = {} - # for name, x in xs.items(): - # m = tensor_list.mask - # assert m is not None - # mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] - # out[name] = NestedTensor(x, mask) - # return out - - -class Backbone(BackboneBase): - """ResNet backbone with frozen BatchNorm.""" - def __init__(self, name: str, - train_backbone: bool, - return_interm_layers: bool, - dilation: bool): - backbone = getattr(torchvision.models, name)( - replace_stride_with_dilation=[False, False, dilation], - pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) # pretrained # TODO do we want frozen batch_norm?? - num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 - super().__init__(backbone, train_backbone, num_channels, return_interm_layers) - - -class Joiner(nn.Sequential): - def __init__(self, backbone, position_embedding): - super().__init__(backbone, position_embedding) - - def forward(self, tensor_list: NestedTensor): - xs = self[0](tensor_list) - out: List[NestedTensor] = [] - pos = [] - for name, x in xs.items(): - out.append(x) - # position encoding - pos.append(self[1](x).to(x.dtype)) - - return out, pos - - -def build_backbone(args): - position_embedding = build_position_encoding(args) - train_backbone = args.lr_backbone > 0 - return_interm_layers = args.masks - backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) - model = Joiner(backbone, position_embedding) - model.num_channels = backbone.num_channels - return model diff --git a/act/detr/models/detr_vae.py b/act/detr/models/detr_vae.py deleted file mode 100644 index 59925029..00000000 --- a/act/detr/models/detr_vae.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR model and criterion classes. -""" -import torch -from torch import nn -from torch.autograd import Variable -from .backbone import build_backbone -from .transformer import build_transformer, TransformerEncoder, TransformerEncoderLayer - -import numpy as np - -import IPython -e = IPython.embed - - -def reparametrize(mu, logvar): - std = logvar.div(2).exp() - eps = Variable(std.data.new(std.size()).normal_()) - return mu + std * eps - - -def get_sinusoid_encoding_table(n_position, d_hid): - def get_position_angle_vec(position): - return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] - - sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) - sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i - sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 - - return torch.FloatTensor(sinusoid_table).unsqueeze(0) - - -class DETRVAE(nn.Module): - """ This is the DETR module that performs object detection """ - def __init__(self, backbones, transformer, encoder, latent_dim, a_dim, state_dim, num_queries, camera_names): - """ Initializes the model. - Parameters: - backbones: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - state_dim: robot state dimension of the environment - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - """ - super().__init__() - self.action_dim = a_dim - self.latent_dim = latent_dim - self.state_dim = state_dim - - self.num_queries = num_queries - self.camera_names = camera_names - self.transformer = transformer - self.encoder = encoder - hidden_dim = transformer.d_model - self.action_head = nn.Linear(hidden_dim, self.action_dim) - self.is_pad_head = nn.Linear(hidden_dim, 1) - self.query_embed = nn.Embedding(num_queries, hidden_dim) - if backbones is not None: - self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1) - self.backbones = nn.ModuleList(backbones) - self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) - else: - # input_dim = 14 + 7 # robot_state + env_state - self.input_proj_robot_state = nn.Linear(state_dim, hidden_dim) - self.input_proj_env_state = nn.Linear(10, hidden_dim) # TODO not used in robomimic - self.pos = torch.nn.Embedding(2, hidden_dim) - self.backbones = None - - # encoder extra parameters - self.latent_dim = self.latent_dim # final size of latent z # TODO tune - self.cls_embed = nn.Embedding(1, hidden_dim) # extra cls token embedding - self.encoder_action_proj = nn.Linear(self.action_dim, hidden_dim) # project action to embedding - self.encoder_joint_proj = nn.Linear(state_dim, hidden_dim) # project qpos to embedding - self.latent_proj = nn.Linear(hidden_dim, self.latent_dim*2) # project hidden state to latent std, var - self.register_buffer('pos_table', get_sinusoid_encoding_table(1+1+num_queries, hidden_dim)) # [CLS], qpos, a_seq - - # decoder extra parameters - self.latent_out_proj = nn.Linear(self.latent_dim, hidden_dim) # project latent sample to embedding - self.additional_pos_embed = nn.Embedding(2, hidden_dim) # learned position embedding for proprio and latent - - def forward(self, qpos, image, env_state, actions=None, is_pad=None): - """ - qpos: batch, qpos_dim - image: batch, num_cam, channel, height, width - env_state: None - actions: batch, seq, action_dim - """ - is_training = actions is not None # train or val - bs, _ = qpos.shape - ### Obtain latent z from action sequence - if is_training: - # project action sequence to embedding dim, and concat with a CLS token - action_embed = self.encoder_action_proj(actions) # (bs, seq, hidden_dim) - qpos_embed = self.encoder_joint_proj(qpos) # (bs, hidden_dim) - qpos_embed = torch.unsqueeze(qpos_embed, axis=1) # (bs, 1, hidden_dim) - cls_embed = self.cls_embed.weight # (1, hidden_dim) - cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1) # (bs, 1, hidden_dim) - encoder_input = torch.cat([cls_embed, qpos_embed, action_embed], axis=1) # (bs, seq+2, hidden_dim) - encoder_input = encoder_input.permute(1, 0, 2) # (seq+1, bs, hidden_dim) - # do not mask cls token - cls_joint_is_pad = torch.full((bs, 2), False).to(qpos.device) # False: not a padding - is_pad = torch.cat([cls_joint_is_pad, is_pad], axis=1) # (bs, seq+1) - # obtain position embedding - pos_embed = self.pos_table.clone().detach() - pos_embed = pos_embed.permute(1, 0, 2) # (seq+1, 1, hidden_dim) - # query model - encoder_output = self.encoder(encoder_input, pos=pos_embed, src_key_padding_mask=is_pad) - encoder_output = encoder_output[0] # take cls output only - latent_info = self.latent_proj(encoder_output) - mu = latent_info[:, :self.latent_dim] - logvar = latent_info[:, self.latent_dim:] - latent_sample = reparametrize(mu, logvar) - latent_input = self.latent_out_proj(latent_sample) - else: - mu = logvar = None - latent_sample = torch.zeros([bs, self.latent_dim], dtype=torch.float32).to(qpos.device) - latent_input = self.latent_out_proj(latent_sample) - - if self.backbones is not None: - # Image observation features and position embeddings - all_cam_features = [] - all_cam_pos = [] - for cam_id, cam_name in enumerate(self.camera_names): - features, pos = self.backbones[0](image[:, cam_id]) # HARDCODED - features = features[0] # take the last layer feature - pos = pos[0] - all_cam_features.append(self.input_proj(features)) - all_cam_pos.append(pos) - # proprioception features - proprio_input = self.input_proj_robot_state(qpos) - # fold camera dimension into width dimension - src = torch.cat(all_cam_features, axis=3) - pos = torch.cat(all_cam_pos, axis=3) - hs = self.transformer(src, None, self.query_embed.weight, pos, latent_input, proprio_input, self.additional_pos_embed.weight)[0] - else: - qpos = self.input_proj_robot_state(qpos).unsqueeze(dim=1) - env_state = self.input_proj_env_state(env_state).unsqueeze(dim=1) - transformer_input = torch.cat([qpos, env_state], axis=1) # seq length = 2 - hs = self.transformer(transformer_input, None, self.query_embed.weight, self.pos.weight)[0] - a_hat = self.action_head(hs) - is_pad_hat = self.is_pad_head(hs) - return a_hat, is_pad_hat, [mu, logvar] - - - -class CNNMLP(nn.Module): - def __init__(self, backbones, state_dim, camera_names): - """ Initializes the model. - Parameters: - backbones: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - state_dim: robot state dimension of the environment - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - """ - super().__init__() - self.camera_names = camera_names - self.action_head = nn.Linear(1000, state_dim) # TODO add more - if backbones is not None: - self.backbones = nn.ModuleList(backbones) - backbone_down_projs = [] - for backbone in backbones: - down_proj = nn.Sequential( - nn.Conv2d(backbone.num_channels, 128, kernel_size=5), - nn.Conv2d(128, 64, kernel_size=5), - nn.Conv2d(64, 32, kernel_size=5) - ) - backbone_down_projs.append(down_proj) - self.backbone_down_projs = nn.ModuleList(backbone_down_projs) - - mlp_in_dim = 768 * len(backbones) + 14 - self.mlp = mlp(input_dim=mlp_in_dim, hidden_dim=1024, output_dim=14, hidden_depth=2) - else: - raise NotImplementedError - - def forward(self, qpos, image, env_state, actions=None): - """ - qpos: batch, qpos_dim - image: batch, num_cam, channel, height, width - env_state: None - actions: batch, seq, action_dim - """ - is_training = actions is not None # train or val - bs, _ = qpos.shape - # Image observation features and position embeddings - all_cam_features = [] - for cam_id, cam_name in enumerate(self.camera_names): - features, pos = self.backbones[cam_id](image[:, cam_id]) - features = features[0] # take the last layer feature - pos = pos[0] # not used - all_cam_features.append(self.backbone_down_projs[cam_id](features)) - # flatten everything - flattened_features = [] - for cam_feature in all_cam_features: - flattened_features.append(cam_feature.reshape([bs, -1])) - flattened_features = torch.cat(flattened_features, axis=1) # 768 each - features = torch.cat([flattened_features, qpos], axis=1) # qpos: 14 - a_hat = self.mlp(features) - return a_hat - - -def mlp(input_dim, hidden_dim, output_dim, hidden_depth): - if hidden_depth == 0: - mods = [nn.Linear(input_dim, output_dim)] - else: - mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)] - for i in range(hidden_depth - 1): - mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)] - mods.append(nn.Linear(hidden_dim, output_dim)) - trunk = nn.Sequential(*mods) - return trunk - - -def build_encoder(args): - d_model = args.hidden_dim # 256 - dropout = args.dropout # 0.1 - nhead = args.nheads # 8 - dim_feedforward = args.dim_feedforward # 2048 - num_encoder_layers = args.enc_layers # 4 # TODO shared with VAE decoder - normalize_before = args.pre_norm # False - activation = "relu" - - encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - return encoder - - -def build(args): - - # From state - # backbone = None # from state for now, no need for conv nets - # From image - backbones = [] - backbone = build_backbone(args) - backbones.append(backbone) - - transformer = build_transformer(args) - - encoder = build_encoder(args) - - model = DETRVAE( - backbones, - transformer, - encoder, - latent_dim=args.latent_dim, - a_dim=args.a_dim, - state_dim=args.state_dim, - num_queries=args.num_queries, - camera_names=args.camera_names, - ) - - n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) - print("number of parameters: %.2fM" % (n_parameters/1e6,)) - - return model - -def build_cnnmlp(args): - state_dim = 14 # TODO hardcode - - # From state - # backbone = None # from state for now, no need for conv nets - # From image - backbones = [] - for _ in args.camera_names: - backbone = build_backbone(args) - backbones.append(backbone) - - model = CNNMLP( - backbones, - state_dim=state_dim, - camera_names=args.camera_names, - ) - - n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) - print("number of parameters: %.2fM" % (n_parameters/1e6,)) - - return model - diff --git a/act/detr/models/position_encoding.py b/act/detr/models/position_encoding.py deleted file mode 100644 index 209d9171..00000000 --- a/act/detr/models/position_encoding.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Various positional encodings for the transformer. -""" -import math -import torch -from torch import nn - -from util.misc import NestedTensor - -import IPython -e = IPython.embed - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, tensor): - x = tensor - # mask = tensor_list.mask - # assert mask is not None - # not_mask = ~mask - - not_mask = torch.ones_like(x[0, [0]]) - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - -class PositionEmbeddingLearned(nn.Module): - """ - Absolute pos embedding, learned. - """ - def __init__(self, num_pos_feats=256): - super().__init__() - self.row_embed = nn.Embedding(50, num_pos_feats) - self.col_embed = nn.Embedding(50, num_pos_feats) - self.reset_parameters() - - def reset_parameters(self): - nn.init.uniform_(self.row_embed.weight) - nn.init.uniform_(self.col_embed.weight) - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - h, w = x.shape[-2:] - i = torch.arange(w, device=x.device) - j = torch.arange(h, device=x.device) - x_emb = self.col_embed(i) - y_emb = self.row_embed(j) - pos = torch.cat([ - x_emb.unsqueeze(0).repeat(h, 1, 1), - y_emb.unsqueeze(1).repeat(1, w, 1), - ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) - return pos - - -def build_position_encoding(args): - N_steps = args.hidden_dim // 2 - if args.position_embedding in ('v2', 'sine'): - # TODO find a better way of exposing other arguments - position_embedding = PositionEmbeddingSine(N_steps, normalize=True) - elif args.position_embedding in ('v3', 'learned'): - position_embedding = PositionEmbeddingLearned(N_steps) - else: - raise ValueError(f"not supported {args.position_embedding}") - - return position_embedding diff --git a/act/detr/models/transformer.py b/act/detr/models/transformer.py deleted file mode 100644 index f38afd0e..00000000 --- a/act/detr/models/transformer.py +++ /dev/null @@ -1,314 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR Transformer class. - -Copy-paste from torch.nn.Transformer with modifications: - * positional encodings are passed in MHattention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers -""" -import copy -from typing import Optional, List - -import torch -import torch.nn.functional as F -from torch import nn, Tensor - -import IPython -e = IPython.embed - -class Transformer(nn.Module): - - def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, - num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False, - return_intermediate_dec=False): - super().__init__() - - encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, - return_intermediate=return_intermediate_dec) - - self._reset_parameters() - - self.d_model = d_model - self.nhead = nhead - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, src, mask, query_embed, pos_embed, latent_input=None, proprio_input=None, additional_pos_embed=None): - # TODO flatten only when input has H and W - if len(src.shape) == 4: # has H and W - # flatten NxCxHxW to HWxNxC - bs, c, h, w = src.shape - src = src.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - # mask = mask.flatten(1) - - additional_pos_embed = additional_pos_embed.unsqueeze(1).repeat(1, bs, 1) # seq, bs, dim - pos_embed = torch.cat([additional_pos_embed, pos_embed], axis=0) - - addition_input = torch.stack([latent_input, proprio_input], axis=0) - src = torch.cat([addition_input, src], axis=0) - else: - assert len(src.shape) == 3 - # flatten NxHWxC to HWxNxC - bs, hw, c = src.shape - src = src.permute(1, 0, 2) - pos_embed = pos_embed.unsqueeze(1).repeat(1, bs, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - - tgt = torch.zeros_like(query_embed) - memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) - hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, - pos=pos_embed, query_pos=query_embed) - hs = hs.transpose(1, 2) - return hs - -class TransformerEncoder(nn.Module): - - def __init__(self, encoder_layer, num_layers, norm=None): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - - def forward(self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - output = src - - for layer in self.layers: - output = layer(output, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class TransformerDecoder(nn.Module): - - def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): - super().__init__() - self.layers = _get_clones(decoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - self.return_intermediate = return_intermediate - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - output = tgt - - intermediate = [] - - for layer in self.layers: - output = layer(output, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - if self.return_intermediate: - intermediate.append(self.norm(output)) - - if self.norm is not None: - output = self.norm(output) - if self.return_intermediate: - intermediate.pop() - intermediate.append(output) - - if self.return_intermediate: - return torch.stack(intermediate) - - return output.unsqueeze(0) - - -class TransformerEncoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, - src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(src, pos) - src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src = self.norm1(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = src + self.dropout2(src2) - src = self.norm2(src) - return src - - def forward_pre(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - src2 = self.norm1(src) - q = k = self.with_pos_embed(src2, pos) - src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src2 = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) - src = src + self.dropout2(src2) - return src - - def forward(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(src, src_mask, src_key_padding_mask, pos) - return self.forward_post(src, src_mask, src_key_padding_mask, pos) - - -class TransformerDecoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.norm3 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.dropout3 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(tgt, query_pos) - tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) - tgt = tgt + self.dropout3(tgt2) - tgt = self.norm3(tgt) - return tgt - - def forward_pre(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - tgt2 = self.norm1(tgt) - q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt2 = self.norm2(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt2 = self.norm3(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) - tgt = tgt + self.dropout3(tgt2) - return tgt - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - return self.forward_post(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - return_intermediate_dec=True, - ) - - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(F"activation should be relu/gelu, not {activation}.") diff --git a/act/detr/setup.py b/act/detr/setup.py deleted file mode 100644 index 55d18c0d..00000000 --- a/act/detr/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -from distutils.core import setup -from setuptools import find_packages - -setup( - name='detr', - version='0.0.0', - packages=find_packages(), - license='MIT License', - long_description=open('README.md').read(), -) \ No newline at end of file diff --git a/act/detr/util/__init__.py b/act/detr/util/__init__.py deleted file mode 100644 index 168f9979..00000000 --- a/act/detr/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/act/detr/util/box_ops.py b/act/detr/util/box_ops.py deleted file mode 100644 index 9c088e5b..00000000 --- a/act/detr/util/box_ops.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Utilities for bounding box manipulation and GIoU. -""" -import torch -from torchvision.ops.boxes import box_area - - -def box_cxcywh_to_xyxy(x): - x_c, y_c, w, h = x.unbind(-1) - b = [(x_c - 0.5 * w), (y_c - 0.5 * h), - (x_c + 0.5 * w), (y_c + 0.5 * h)] - return torch.stack(b, dim=-1) - - -def box_xyxy_to_cxcywh(x): - x0, y0, x1, y1 = x.unbind(-1) - b = [(x0 + x1) / 2, (y0 + y1) / 2, - (x1 - x0), (y1 - y0)] - return torch.stack(b, dim=-1) - - -# modified from torchvision to also return the union -def box_iou(boxes1, boxes2): - area1 = box_area(boxes1) - area2 = box_area(boxes2) - - lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] - rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] - - wh = (rb - lt).clamp(min=0) # [N,M,2] - inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] - - union = area1[:, None] + area2 - inter - - iou = inter / union - return iou, union - - -def generalized_box_iou(boxes1, boxes2): - """ - Generalized IoU from https://giou.stanford.edu/ - - The boxes should be in [x0, y0, x1, y1] format - - Returns a [N, M] pairwise matrix, where N = len(boxes1) - and M = len(boxes2) - """ - # degenerate boxes gives inf / nan results - # so do an early check - assert (boxes1[:, 2:] >= boxes1[:, :2]).all() - assert (boxes2[:, 2:] >= boxes2[:, :2]).all() - iou, union = box_iou(boxes1, boxes2) - - lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) - rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) - - wh = (rb - lt).clamp(min=0) # [N,M,2] - area = wh[:, :, 0] * wh[:, :, 1] - - return iou - (area - union) / area - - -def masks_to_boxes(masks): - """Compute the bounding boxes around the provided masks - - The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. - - Returns a [N, 4] tensors, with the boxes in xyxy format - """ - if masks.numel() == 0: - return torch.zeros((0, 4), device=masks.device) - - h, w = masks.shape[-2:] - - y = torch.arange(0, h, dtype=torch.float) - x = torch.arange(0, w, dtype=torch.float) - y, x = torch.meshgrid(y, x) - - x_mask = (masks * x.unsqueeze(0)) - x_max = x_mask.flatten(1).max(-1)[0] - x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - y_mask = (masks * y.unsqueeze(0)) - y_max = y_mask.flatten(1).max(-1)[0] - y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - return torch.stack([x_min, y_min, x_max, y_max], 1) diff --git a/act/detr/util/misc.py b/act/detr/util/misc.py deleted file mode 100644 index dfa9fb5b..00000000 --- a/act/detr/util/misc.py +++ /dev/null @@ -1,468 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -import os -import subprocess -import time -from collections import defaultdict, deque -import datetime -import pickle -from packaging import version -from typing import Optional, List - -import torch -import torch.distributed as dist -from torch import Tensor - -# needed due to empty tensor bug in pytorch and torchvision 0.5 -import torchvision -if version.parse(torchvision.__version__) < version.parse('0.7'): - from torchvision.ops import _new_empty_tensor - from torchvision.ops.misc import _output_size - - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value) - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device="cuda") - size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {}".format(name, str(meter)) - ) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = '' - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt='{avg:.4f}') - data_time = SmoothedValue(fmt='{avg:.4f}') - space_fmt = ':' + str(len(str(len(iterable)))) + 'd' - if torch.cuda.is_available(): - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}', - 'max mem: {memory:.0f}' - ]) - else: - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}' - ]) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB)) - else: - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time))) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('{} Total time: {} ({:.4f} s / it)'.format( - header, total_time_str, total_time / len(iterable))) - - -def get_sha(): - cwd = os.path.dirname(os.path.abspath(__file__)) - - def _run(command): - return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() - sha = 'N/A' - diff = "clean" - branch = 'N/A' - try: - sha = _run(['git', 'rev-parse', 'HEAD']) - subprocess.check_output(['git', 'diff'], cwd=cwd) - diff = _run(['git', 'diff-index', 'HEAD']) - diff = "has uncommited changes" if diff else "clean" - branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) - except Exception: - pass - message = f"sha: {sha}, status: {diff}, branch: {branch}" - return message - - -def collate_fn(batch): - batch = list(zip(*batch)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], :img.shape[2]] = False - else: - raise ValueError('not supported') - return NestedTensor(tensor, mask) - - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop('force', False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ['WORLD_SIZE']) - args.gpu = int(os.environ['LOCAL_RANK']) - elif 'SLURM_PROCID' in os.environ: - args.rank = int(os.environ['SLURM_PROCID']) - args.gpu = args.rank % torch.cuda.device_count() - else: - print('Not using distributed mode') - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = 'nccl' - print('| distributed init (rank {}): {}'.format( - args.rank, args.dist_url), flush=True) - torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -@torch.no_grad() -def accuracy(output, target, topk=(1,)): - """Computes the precision@k for the specified values of k""" - if target.numel() == 0: - return [torch.zeros([], device=output.device)] - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): - # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor - """ - Equivalent to nn.functional.interpolate, but with support for empty batch sizes. - This will eventually be supported natively by PyTorch, and this - class can go away. - """ - if version.parse(torchvision.__version__) < version.parse('0.7'): - if input.numel() > 0: - return torch.nn.functional.interpolate( - input, size, scale_factor, mode, align_corners - ) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/act/detr/util/plot_utils.py b/act/detr/util/plot_utils.py deleted file mode 100644 index 0f24bed0..00000000 --- a/act/detr/util/plot_utils.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Plotting utilities to visualize training logs. -""" -import torch -import pandas as pd -import numpy as np -import seaborn as sns -import matplotlib.pyplot as plt - -from pathlib import Path, PurePath - - -def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): - ''' - Function to plot specific fields from training log(s). Plots both training and test results. - - :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file - - fields = which results to plot from each log file - plots both training and test for each field. - - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots - - log_name = optional, name of log file if different than default 'log.txt'. - - :: Outputs - matplotlib plots of results in fields, color coded for each log file. - - solid lines are training results, dashed lines are test results. - - ''' - func_name = "plot_utils.py::plot_logs" - - # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, - # convert single Path to list to avoid 'not iterable' error - - if not isinstance(logs, list): - if isinstance(logs, PurePath): - logs = [logs] - print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") - else: - raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ - Expect list[Path] or single Path obj, received {type(logs)}") - - # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir - for i, dir in enumerate(logs): - if not isinstance(dir, PurePath): - raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") - if not dir.exists(): - raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") - # verify log_name exists - fn = Path(dir / log_name) - if not fn.exists(): - print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?") - print(f"--> full path of missing log file: {fn}") - return - - # load log file(s) and plot - dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] - - fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) - - for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): - for j, field in enumerate(fields): - if field == 'mAP': - coco_eval = pd.DataFrame( - np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1] - ).ewm(com=ewm_col).mean() - axs[j].plot(coco_eval, c=color) - else: - df.interpolate().ewm(com=ewm_col).mean().plot( - y=[f'train_{field}', f'test_{field}'], - ax=axs[j], - color=[color] * 2, - style=['-', '--'] - ) - for ax, field in zip(axs, fields): - ax.legend([Path(p).name for p in logs]) - ax.set_title(field) - - -def plot_precision_recall(files, naming_scheme='iter'): - if naming_scheme == 'exp_id': - # name becomes exp_id - names = [f.parts[-3] for f in files] - elif naming_scheme == 'iter': - names = [f.stem for f in files] - else: - raise ValueError(f'not supported {naming_scheme}') - fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) - for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): - data = torch.load(f) - # precision is n_iou, n_points, n_cat, n_area, max_det - precision = data['precision'] - recall = data['params'].recThrs - scores = data['scores'] - # take precision for all classes, all areas and 100 detections - precision = precision[0, :, :, 0, -1].mean(1) - scores = scores[0, :, :, 0, -1].mean(1) - prec = precision.mean() - rec = data['recall'][0, :, 0, -1].mean() - print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + - f'score={scores.mean():0.3f}, ' + - f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' - ) - axs[0].plot(recall, precision, c=color) - axs[1].plot(recall, scores, c=color) - - axs[0].set_title('Precision / Recall') - axs[0].legend(names) - axs[1].set_title('Scores / Recall') - axs[1].legend(names) - return fig, axs diff --git a/act/ee_sim_env.py b/act/ee_sim_env.py deleted file mode 100644 index 01df2337..00000000 --- a/act/ee_sim_env.py +++ /dev/null @@ -1,267 +0,0 @@ -import numpy as np -import collections -import os - -from constants import DT, XML_DIR, START_ARM_POSE -from constants import PUPPET_GRIPPER_POSITION_CLOSE -from constants import PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN -from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN -from constants import PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN - -from utils import sample_box_pose, sample_insertion_pose -from dm_control import mujoco -from dm_control.rl import control -from dm_control.suite import base - -import IPython -e = IPython.embed - - -def make_ee_sim_env(task_name): - """ - Environment for simulated robot bi-manual manipulation, with end-effector control. - Action space: [left_arm_pose (7), # position and quaternion for end effector - left_gripper_positions (1), # normalized gripper position (0: close, 1: open) - right_arm_pose (7), # position and quaternion for end effector - right_gripper_positions (1),] # normalized gripper position (0: close, 1: open) - - Observation space: {"qpos": Concat[ left_arm_qpos (6), # absolute joint position - left_gripper_position (1), # normalized gripper position (0: close, 1: open) - right_arm_qpos (6), # absolute joint position - right_gripper_qpos (1)] # normalized gripper position (0: close, 1: open) - "qvel": Concat[ left_arm_qvel (6), # absolute joint velocity (rad) - left_gripper_velocity (1), # normalized gripper velocity (pos: opening, neg: closing) - right_arm_qvel (6), # absolute joint velocity (rad) - right_gripper_qvel (1)] # normalized gripper velocity (pos: opening, neg: closing) - "images": {"main": (480x640x3)} # h, w, c, dtype='uint8' - """ - if 'sim_transfer_cube' in task_name: - xml_path = os.path.join(XML_DIR, f'bimanual_viperx_ee_transfer_cube.xml') - physics = mujoco.Physics.from_xml_path(xml_path) - task = TransferCubeEETask(random=False) - env = control.Environment(physics, task, time_limit=20, control_timestep=DT, - n_sub_steps=None, flat_observation=False) - elif 'sim_insertion' in task_name: - xml_path = os.path.join(XML_DIR, f'bimanual_viperx_ee_insertion.xml') - physics = mujoco.Physics.from_xml_path(xml_path) - task = InsertionEETask(random=False) - env = control.Environment(physics, task, time_limit=20, control_timestep=DT, - n_sub_steps=None, flat_observation=False) - else: - raise NotImplementedError - return env - -class BimanualViperXEETask(base.Task): - def __init__(self, random=None): - super().__init__(random=random) - - def before_step(self, action, physics): - a_len = len(action) // 2 - action_left = action[:a_len] - action_right = action[a_len:] - - # set mocap position and quat - # left - np.copyto(physics.data.mocap_pos[0], action_left[:3]) - np.copyto(physics.data.mocap_quat[0], action_left[3:7]) - # right - np.copyto(physics.data.mocap_pos[1], action_right[:3]) - np.copyto(physics.data.mocap_quat[1], action_right[3:7]) - - # set gripper - g_left_ctrl = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(action_left[7]) - g_right_ctrl = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(action_right[7]) - np.copyto(physics.data.ctrl, np.array([g_left_ctrl, -g_left_ctrl, g_right_ctrl, -g_right_ctrl])) - - def initialize_robots(self, physics): - # reset joint position - physics.named.data.qpos[:16] = START_ARM_POSE - - # reset mocap to align with end effector - # to obtain these numbers: - # (1) make an ee_sim env and reset to the same start_pose - # (2) get env._physics.named.data.xpos['vx300s_left/gripper_link'] - # get env._physics.named.data.xquat['vx300s_left/gripper_link'] - # repeat the same for right side - np.copyto(physics.data.mocap_pos[0], [-0.31718881, 0.5, 0.29525084]) - np.copyto(physics.data.mocap_quat[0], [1, 0, 0, 0]) - # right - np.copyto(physics.data.mocap_pos[1], np.array([0.31718881, 0.49999888, 0.29525084])) - np.copyto(physics.data.mocap_quat[1], [1, 0, 0, 0]) - - # reset gripper control - close_gripper_control = np.array([ - PUPPET_GRIPPER_POSITION_CLOSE, - -PUPPET_GRIPPER_POSITION_CLOSE, - PUPPET_GRIPPER_POSITION_CLOSE, - -PUPPET_GRIPPER_POSITION_CLOSE, - ]) - np.copyto(physics.data.ctrl, close_gripper_control) - - def initialize_episode(self, physics): - """Sets the state of the environment at the start of each episode.""" - super().initialize_episode(physics) - - @staticmethod - def get_qpos(physics): - qpos_raw = physics.data.qpos.copy() - left_qpos_raw = qpos_raw[:8] - right_qpos_raw = qpos_raw[8:16] - left_arm_qpos = left_qpos_raw[:6] - right_arm_qpos = right_qpos_raw[:6] - left_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(left_qpos_raw[6])] - right_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(right_qpos_raw[6])] - return np.concatenate([left_arm_qpos, left_gripper_qpos, right_arm_qpos, right_gripper_qpos]) - - @staticmethod - def get_qvel(physics): - qvel_raw = physics.data.qvel.copy() - left_qvel_raw = qvel_raw[:8] - right_qvel_raw = qvel_raw[8:16] - left_arm_qvel = left_qvel_raw[:6] - right_arm_qvel = right_qvel_raw[:6] - left_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(left_qvel_raw[6])] - right_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(right_qvel_raw[6])] - return np.concatenate([left_arm_qvel, left_gripper_qvel, right_arm_qvel, right_gripper_qvel]) - - @staticmethod - def get_env_state(physics): - raise NotImplementedError - - def get_observation(self, physics): - # note: it is important to do .copy() - obs = collections.OrderedDict() - obs['qpos'] = self.get_qpos(physics) - obs['qvel'] = self.get_qvel(physics) - obs['env_state'] = self.get_env_state(physics) - obs['images'] = dict() - obs['images']['top'] = physics.render(height=480, width=640, camera_id='top') - obs['images']['angle'] = physics.render(height=480, width=640, camera_id='angle') - obs['images']['vis'] = physics.render(height=480, width=640, camera_id='front_close') - # used in scripted policy to obtain starting pose - obs['mocap_pose_left'] = np.concatenate([physics.data.mocap_pos[0], physics.data.mocap_quat[0]]).copy() - obs['mocap_pose_right'] = np.concatenate([physics.data.mocap_pos[1], physics.data.mocap_quat[1]]).copy() - - # used when replaying joint trajectory - obs['gripper_ctrl'] = physics.data.ctrl.copy() - return obs - - def get_reward(self, physics): - raise NotImplementedError - - -class TransferCubeEETask(BimanualViperXEETask): - def __init__(self, random=None): - super().__init__(random=random) - self.max_reward = 4 - - def initialize_episode(self, physics): - """Sets the state of the environment at the start of each episode.""" - self.initialize_robots(physics) - # randomize box position - cube_pose = sample_box_pose() - box_start_idx = physics.model.name2id('red_box_joint', 'joint') - np.copyto(physics.data.qpos[box_start_idx : box_start_idx + 7], cube_pose) - # print(f"randomized cube position to {cube_position}") - - super().initialize_episode(physics) - - @staticmethod - def get_env_state(physics): - env_state = physics.data.qpos.copy()[16:] - return env_state - - def get_reward(self, physics): - # return whether left gripper is holding the box - all_contact_pairs = [] - for i_contact in range(physics.data.ncon): - id_geom_1 = physics.data.contact[i_contact].geom1 - id_geom_2 = physics.data.contact[i_contact].geom2 - name_geom_1 = physics.model.id2name(id_geom_1, 'geom') - name_geom_2 = physics.model.id2name(id_geom_2, 'geom') - contact_pair = (name_geom_1, name_geom_2) - all_contact_pairs.append(contact_pair) - - touch_left_gripper = ("red_box", "vx300s_left/10_left_gripper_finger") in all_contact_pairs - touch_right_gripper = ("red_box", "vx300s_right/10_right_gripper_finger") in all_contact_pairs - touch_table = ("red_box", "table") in all_contact_pairs - - reward = 0 - if touch_right_gripper: - reward = 1 - if touch_right_gripper and not touch_table: # lifted - reward = 2 - if touch_left_gripper: # attempted transfer - reward = 3 - if touch_left_gripper and not touch_table: # successful transfer - reward = 4 - return reward - - -class InsertionEETask(BimanualViperXEETask): - def __init__(self, random=None): - super().__init__(random=random) - self.max_reward = 4 - - def initialize_episode(self, physics): - """Sets the state of the environment at the start of each episode.""" - self.initialize_robots(physics) - # randomize peg and socket position - peg_pose, socket_pose = sample_insertion_pose() - id2index = lambda j_id: 16 + (j_id - 16) * 7 # first 16 is robot qpos, 7 is pose dim # hacky - - peg_start_id = physics.model.name2id('red_peg_joint', 'joint') - peg_start_idx = id2index(peg_start_id) - np.copyto(physics.data.qpos[peg_start_idx : peg_start_idx + 7], peg_pose) - # print(f"randomized cube position to {cube_position}") - - socket_start_id = physics.model.name2id('blue_socket_joint', 'joint') - socket_start_idx = id2index(socket_start_id) - np.copyto(physics.data.qpos[socket_start_idx : socket_start_idx + 7], socket_pose) - # print(f"randomized cube position to {cube_position}") - - super().initialize_episode(physics) - - @staticmethod - def get_env_state(physics): - env_state = physics.data.qpos.copy()[16:] - return env_state - - def get_reward(self, physics): - # return whether peg touches the pin - all_contact_pairs = [] - for i_contact in range(physics.data.ncon): - id_geom_1 = physics.data.contact[i_contact].geom1 - id_geom_2 = physics.data.contact[i_contact].geom2 - name_geom_1 = physics.model.id2name(id_geom_1, 'geom') - name_geom_2 = physics.model.id2name(id_geom_2, 'geom') - contact_pair = (name_geom_1, name_geom_2) - all_contact_pairs.append(contact_pair) - - touch_right_gripper = ("red_peg", "vx300s_right/10_right_gripper_finger") in all_contact_pairs - touch_left_gripper = ("socket-1", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ - ("socket-2", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ - ("socket-3", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ - ("socket-4", "vx300s_left/10_left_gripper_finger") in all_contact_pairs - - peg_touch_table = ("red_peg", "table") in all_contact_pairs - socket_touch_table = ("socket-1", "table") in all_contact_pairs or \ - ("socket-2", "table") in all_contact_pairs or \ - ("socket-3", "table") in all_contact_pairs or \ - ("socket-4", "table") in all_contact_pairs - peg_touch_socket = ("red_peg", "socket-1") in all_contact_pairs or \ - ("red_peg", "socket-2") in all_contact_pairs or \ - ("red_peg", "socket-3") in all_contact_pairs or \ - ("red_peg", "socket-4") in all_contact_pairs - pin_touched = ("red_peg", "pin") in all_contact_pairs - - reward = 0 - if touch_left_gripper and touch_right_gripper: # touch both - reward = 1 - if touch_left_gripper and touch_right_gripper and (not peg_touch_table) and (not socket_touch_table): # grasp both - reward = 2 - if peg_touch_socket and (not peg_touch_table) and (not socket_touch_table): # peg and socket touching - reward = 3 - if pin_touched: # successful insertion - reward = 4 - return reward diff --git a/act/imitate_episodes.py b/act/imitate_episodes.py deleted file mode 100644 index 34f9a372..00000000 --- a/act/imitate_episodes.py +++ /dev/null @@ -1,435 +0,0 @@ -import torch -import numpy as np -import os -import pickle -import argparse -import matplotlib.pyplot as plt -from copy import deepcopy -from tqdm import tqdm -from einops import rearrange - -from constants import DT -from constants import PUPPET_GRIPPER_JOINT_OPEN -from utils import load_data # data functions -from utils import sample_box_pose, sample_insertion_pose # robot functions -from utils import compute_dict_mean, set_seed, detach_dict # helper functions -from policy import ACTPolicy, CNNMLPPolicy -from visualize_episodes import save_videos - -from sim_env import BOX_POSE - -import IPython -e = IPython.embed - -def main(args): - set_seed(1) - # command line parameters - is_eval = args['eval'] - ckpt_dir = args['ckpt_dir'] - policy_class = args['policy_class'] - onscreen_render = args['onscreen_render'] - task_name = args['task_name'] - batch_size_train = args['batch_size'] - batch_size_val = args['batch_size'] - num_epochs = args['num_epochs'] - - # get task parameters - is_sim = task_name[:4] == 'sim_' - if is_sim: - from constants import SIM_TASK_CONFIGS - task_config = SIM_TASK_CONFIGS[task_name] - else: - from aloha_scripts.constants import TASK_CONFIGS - task_config = TASK_CONFIGS[task_name] - dataset_dir = task_config['dataset_dir'] - num_episodes = task_config['num_episodes'] - episode_len = task_config['episode_len'] - camera_names = task_config['camera_names'] - - # fixed parameters - state_dim = 14 - lr_backbone = 1e-5 - backbone = 'resnet18' - if policy_class == 'ACT': - enc_layers = 4 - dec_layers = 7 - nheads = 8 - policy_config = {'lr': args['lr'], - 'num_queries': args['chunk_size'], - 'kl_weight': args['kl_weight'], - 'hidden_dim': args['hidden_dim'], - 'dim_feedforward': args['dim_feedforward'], - 'lr_backbone': lr_backbone, - 'backbone': backbone, - 'enc_layers': enc_layers, - 'dec_layers': dec_layers, - 'nheads': nheads, - 'camera_names': camera_names, - } - elif policy_class == 'CNNMLP': - policy_config = {'lr': args['lr'], 'lr_backbone': lr_backbone, 'backbone' : backbone, 'num_queries': 1, - 'camera_names': camera_names,} - else: - raise NotImplementedError - - config = { - 'num_epochs': num_epochs, - 'ckpt_dir': ckpt_dir, - 'episode_len': episode_len, - 'state_dim': state_dim, - 'lr': args['lr'], - 'policy_class': policy_class, - 'onscreen_render': onscreen_render, - 'policy_config': policy_config, - 'task_name': task_name, - 'seed': args['seed'], - 'temporal_agg': args['temporal_agg'], - 'camera_names': camera_names, - 'real_robot': not is_sim - } - - if is_eval: - ckpt_names = [f'policy_best.ckpt'] - results = [] - for ckpt_name in ckpt_names: - success_rate, avg_return = eval_bc(config, ckpt_name, save_episode=True) - results.append([ckpt_name, success_rate, avg_return]) - - for ckpt_name, success_rate, avg_return in results: - print(f'{ckpt_name}: {success_rate=} {avg_return=}') - print() - exit() - - train_dataloader, val_dataloader, stats, _ = load_data(dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val) - - # save dataset stats - if not os.path.isdir(ckpt_dir): - os.makedirs(ckpt_dir) - stats_path = os.path.join(ckpt_dir, f'dataset_stats.pkl') - with open(stats_path, 'wb') as f: - pickle.dump(stats, f) - - best_ckpt_info = train_bc(train_dataloader, val_dataloader, config) - best_epoch, min_val_loss, best_state_dict = best_ckpt_info - - # save best checkpoint - ckpt_path = os.path.join(ckpt_dir, f'policy_best.ckpt') - torch.save(best_state_dict, ckpt_path) - print(f'Best ckpt, val loss {min_val_loss:.6f} @ epoch{best_epoch}') - - -def make_policy(policy_class, policy_config): - if policy_class == 'ACT': - policy = ACTPolicy(policy_config) - elif policy_class == 'CNNMLP': - policy = CNNMLPPolicy(policy_config) - else: - raise NotImplementedError - return policy - - -def make_optimizer(policy_class, policy): - if policy_class == 'ACT': - optimizer = policy.configure_optimizers() - elif policy_class == 'CNNMLP': - optimizer = policy.configure_optimizers() - else: - raise NotImplementedError - return optimizer - - -def get_image(ts, camera_names): - curr_images = [] - for cam_name in camera_names: - curr_image = rearrange(ts.observation['images'][cam_name], 'h w c -> c h w') - curr_images.append(curr_image) - curr_image = np.stack(curr_images, axis=0) - curr_image = torch.from_numpy(curr_image / 255.0).float().cuda().unsqueeze(0) - return curr_image - - -def eval_bc(config, ckpt_name, save_episode=True): - set_seed(1000) - ckpt_dir = config['ckpt_dir'] - state_dim = config['state_dim'] - real_robot = config['real_robot'] - policy_class = config['policy_class'] - onscreen_render = config['onscreen_render'] - policy_config = config['policy_config'] - camera_names = config['camera_names'] - max_timesteps = config['episode_len'] - task_name = config['task_name'] - temporal_agg = config['temporal_agg'] - onscreen_cam = 'angle' - - # load policy and stats - ckpt_path = os.path.join(ckpt_dir, ckpt_name) - policy = make_policy(policy_class, policy_config) - loading_status = policy.load_state_dict(torch.load(ckpt_path)) - print(loading_status) - policy.cuda() - policy.eval() - print(f'Loaded: {ckpt_path}') - stats_path = os.path.join(ckpt_dir, f'dataset_stats.pkl') - with open(stats_path, 'rb') as f: - stats = pickle.load(f) - - pre_process = lambda s_qpos: (s_qpos - stats['qpos_mean']) / stats['qpos_std'] - post_process = lambda a: a * stats['action_std'] + stats['action_mean'] - - # load environment - if real_robot: - from aloha_scripts.robot_utils import move_grippers # requires aloha - from aloha_scripts.real_env import make_real_env # requires aloha - env = make_real_env(init_node=True) - env_max_reward = 0 - else: - from sim_env import make_sim_env - env = make_sim_env(task_name) - env_max_reward = env.task.max_reward - - query_frequency = policy_config['num_queries'] - if temporal_agg: - query_frequency = 1 - num_queries = policy_config['num_queries'] - - max_timesteps = int(max_timesteps * 1) # may increase for real-world tasks - - num_rollouts = 50 - episode_returns = [] - highest_rewards = [] - for rollout_id in range(num_rollouts): - rollout_id += 0 - ### set task - if 'sim_transfer_cube' in task_name: - BOX_POSE[0] = sample_box_pose() # used in sim reset - elif 'sim_insertion' in task_name: - BOX_POSE[0] = np.concatenate(sample_insertion_pose()) # used in sim reset - - ts = env.reset() - - ### onscreen render - if onscreen_render: - ax = plt.subplot() - plt_img = ax.imshow(env._physics.render(height=480, width=640, camera_id=onscreen_cam)) - plt.ion() - - ### evaluation loop - if temporal_agg: - all_time_actions = torch.zeros([max_timesteps, max_timesteps+num_queries, state_dim]).cuda() - - qpos_history = torch.zeros((1, max_timesteps, state_dim)).cuda() - image_list = [] # for visualization - qpos_list = [] - target_qpos_list = [] - rewards = [] - with torch.inference_mode(): - for t in range(max_timesteps): - ### update onscreen render and wait for DT - if onscreen_render: - image = env._physics.render(height=480, width=640, camera_id=onscreen_cam) - plt_img.set_data(image) - plt.pause(DT) - - ### process previous timestep to get qpos and image_list - obs = ts.observation - if 'images' in obs: - image_list.append(obs['images']) - else: - image_list.append({'main': obs['image']}) - qpos_numpy = np.array(obs['qpos']) - qpos = pre_process(qpos_numpy) - qpos = torch.from_numpy(qpos).float().cuda().unsqueeze(0) - qpos_history[:, t] = qpos - curr_image = get_image(ts, camera_names) - - ### query policy - if config['policy_class'] == "ACT": - if t % query_frequency == 0: - all_actions = policy(qpos, curr_image) - if temporal_agg: - all_time_actions[[t], t:t+num_queries] = all_actions - actions_for_curr_step = all_time_actions[:, t] - actions_populated = torch.all(actions_for_curr_step != 0, axis=1) - actions_for_curr_step = actions_for_curr_step[actions_populated] - k = 0.01 - exp_weights = np.exp(-k * np.arange(len(actions_for_curr_step))) - exp_weights = exp_weights / exp_weights.sum() - exp_weights = torch.from_numpy(exp_weights).cuda().unsqueeze(dim=1) - raw_action = (actions_for_curr_step * exp_weights).sum(dim=0, keepdim=True) - else: - raw_action = all_actions[:, t % query_frequency] - elif config['policy_class'] == "CNNMLP": - raw_action = policy(qpos, curr_image) - else: - raise NotImplementedError - - ### post-process actions - raw_action = raw_action.squeeze(0).cpu().numpy() - action = post_process(raw_action) - target_qpos = action - - ### step the environment - ts = env.step(target_qpos) - - ### for visualization - qpos_list.append(qpos_numpy) - target_qpos_list.append(target_qpos) - rewards.append(ts.reward) - - plt.close() - if real_robot: - move_grippers([env.puppet_bot_left, env.puppet_bot_right], [PUPPET_GRIPPER_JOINT_OPEN] * 2, move_time=0.5) # open - pass - - rewards = np.array(rewards) - episode_return = np.sum(rewards[rewards!=None]) - episode_returns.append(episode_return) - episode_highest_reward = np.max(rewards) - highest_rewards.append(episode_highest_reward) - print(f'Rollout {rollout_id}\n{episode_return=}, {episode_highest_reward=}, {env_max_reward=}, Success: {episode_highest_reward==env_max_reward}') - - if save_episode: - save_videos(image_list, DT, video_path=os.path.join(ckpt_dir, f'video{rollout_id}.mp4')) - - success_rate = np.mean(np.array(highest_rewards) == env_max_reward) - avg_return = np.mean(episode_returns) - summary_str = f'\nSuccess rate: {success_rate}\nAverage return: {avg_return}\n\n' - for r in range(env_max_reward+1): - more_or_equal_r = (np.array(highest_rewards) >= r).sum() - more_or_equal_r_rate = more_or_equal_r / num_rollouts - summary_str += f'Reward >= {r}: {more_or_equal_r}/{num_rollouts} = {more_or_equal_r_rate*100}%\n' - - print(summary_str) - - # save success rate to txt - result_file_name = 'result_' + ckpt_name.split('.')[0] + '.txt' - with open(os.path.join(ckpt_dir, result_file_name), 'w') as f: - f.write(summary_str) - f.write(repr(episode_returns)) - f.write('\n\n') - f.write(repr(highest_rewards)) - - return success_rate, avg_return - - -def forward_pass(data, policy): - image_data, qpos_data, action_data, is_pad = data - image_data, qpos_data, action_data, is_pad = image_data.cuda(), qpos_data.cuda(), action_data.cuda(), is_pad.cuda() - return policy(qpos_data, image_data, action_data, is_pad) # TODO remove None - - -def train_bc(train_dataloader, val_dataloader, config): - num_epochs = config['num_epochs'] - ckpt_dir = config['ckpt_dir'] - seed = config['seed'] - policy_class = config['policy_class'] - policy_config = config['policy_config'] - - set_seed(seed) - - policy = make_policy(policy_class, policy_config) - policy.cuda() - optimizer = make_optimizer(policy_class, policy) - - train_history = [] - validation_history = [] - min_val_loss = np.inf - best_ckpt_info = None - for epoch in tqdm(range(num_epochs)): - print(f'\nEpoch {epoch}') - # validation - with torch.inference_mode(): - policy.eval() - epoch_dicts = [] - for batch_idx, data in enumerate(val_dataloader): - forward_dict = forward_pass(data, policy) - epoch_dicts.append(forward_dict) - epoch_summary = compute_dict_mean(epoch_dicts) - validation_history.append(epoch_summary) - - epoch_val_loss = epoch_summary['loss'] - if epoch_val_loss < min_val_loss: - min_val_loss = epoch_val_loss - best_ckpt_info = (epoch, min_val_loss, deepcopy(policy.state_dict())) - print(f'Val loss: {epoch_val_loss:.5f}') - summary_string = '' - for k, v in epoch_summary.items(): - summary_string += f'{k}: {v.item():.3f} ' - print(summary_string) - - # training - policy.train() - optimizer.zero_grad() - for batch_idx, data in enumerate(train_dataloader): - forward_dict = forward_pass(data, policy) - # backward - loss = forward_dict['loss'] - loss.backward() - optimizer.step() - optimizer.zero_grad() - train_history.append(detach_dict(forward_dict)) - epoch_summary = compute_dict_mean(train_history[(batch_idx+1)*epoch:(batch_idx+1)*(epoch+1)]) - epoch_train_loss = epoch_summary['loss'] - print(f'Train loss: {epoch_train_loss:.5f}') - summary_string = '' - for k, v in epoch_summary.items(): - summary_string += f'{k}: {v.item():.3f} ' - print(summary_string) - - if epoch % 100 == 0: - ckpt_path = os.path.join(ckpt_dir, f'policy_epoch_{epoch}_seed_{seed}.ckpt') - torch.save(policy.state_dict(), ckpt_path) - plot_history(train_history, validation_history, epoch, ckpt_dir, seed) - - ckpt_path = os.path.join(ckpt_dir, f'policy_last.ckpt') - torch.save(policy.state_dict(), ckpt_path) - - best_epoch, min_val_loss, best_state_dict = best_ckpt_info - ckpt_path = os.path.join(ckpt_dir, f'policy_epoch_{best_epoch}_seed_{seed}.ckpt') - torch.save(best_state_dict, ckpt_path) - print(f'Training finished:\nSeed {seed}, val loss {min_val_loss:.6f} at epoch {best_epoch}') - - # save training curves - plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed) - - return best_ckpt_info - - -def plot_history(train_history, validation_history, num_epochs, ckpt_dir, seed): - # save training curves - for key in train_history[0]: - plot_path = os.path.join(ckpt_dir, f'train_val_{key}_seed_{seed}.png') - plt.figure() - train_values = [summary[key].item() for summary in train_history] - val_values = [summary[key].item() for summary in validation_history] - plt.plot(np.linspace(0, num_epochs-1, len(train_history)), train_values, label='train') - plt.plot(np.linspace(0, num_epochs-1, len(validation_history)), val_values, label='validation') - # plt.ylim([-0.1, 1]) - plt.tight_layout() - plt.legend() - plt.title(key) - plt.savefig(plot_path) - print(f'Saved plots to {ckpt_dir}') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--eval', action='store_true') - parser.add_argument('--onscreen_render', action='store_true') - parser.add_argument('--ckpt_dir', action='store', type=str, help='ckpt_dir', required=True) - parser.add_argument('--policy_class', action='store', type=str, help='policy_class, capitalize', required=True) - parser.add_argument('--task_name', action='store', type=str, help='task_name', required=True) - parser.add_argument('--batch_size', action='store', type=int, help='batch_size', required=True) - parser.add_argument('--seed', action='store', type=int, help='seed', required=True) - parser.add_argument('--num_epochs', action='store', type=int, help='num_epochs', required=True) - parser.add_argument('--lr', action='store', type=float, help='lr', required=True) - - # for ACT - parser.add_argument('--kl_weight', action='store', type=int, help='KL Weight', required=False) - parser.add_argument('--chunk_size', action='store', type=int, help='chunk_size', required=False) - parser.add_argument('--hidden_dim', action='store', type=int, help='hidden_dim', required=False) - parser.add_argument('--dim_feedforward', action='store', type=int, help='dim_feedforward', required=False) - parser.add_argument('--temporal_agg', action='store_true') - - main(vars(parser.parse_args())) diff --git a/act/policy.py b/act/policy.py deleted file mode 100644 index 7b091e5e..00000000 --- a/act/policy.py +++ /dev/null @@ -1,84 +0,0 @@ -import torch.nn as nn -from torch.nn import functional as F -import torchvision.transforms as transforms - -from detr.main import build_ACT_model_and_optimizer, build_CNNMLP_model_and_optimizer -import IPython -e = IPython.embed - -class ACTPolicy(nn.Module): - def __init__(self, args_override): - super().__init__() - model, optimizer = build_ACT_model_and_optimizer(args_override) - self.model = model # CVAE decoder - self.optimizer = optimizer - self.kl_weight = args_override['kl_weight'] - print(f'KL Weight {self.kl_weight}') - - def __call__(self, qpos, image, actions=None, is_pad=None): - env_state = None - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - image = normalize(image) - if actions is not None: # training time - actions = actions[:, :self.model.num_queries] - is_pad = is_pad[:, :self.model.num_queries] - - a_hat, is_pad_hat, (mu, logvar) = self.model(qpos, image, env_state, actions, is_pad) - total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar) - loss_dict = dict() - all_l1 = F.l1_loss(actions, a_hat, reduction='none') - l1 = (all_l1 * ~is_pad.unsqueeze(-1)).mean() - loss_dict['l1'] = l1 - loss_dict['kl'] = total_kld[0] - loss_dict['loss'] = loss_dict['l1'] + loss_dict['kl'] * self.kl_weight - return loss_dict - else: # inference time - a_hat, _, (_, _) = self.model(qpos, image, env_state) # no action, sample from prior - return a_hat - - def configure_optimizers(self): - return self.optimizer - - -class CNNMLPPolicy(nn.Module): - def __init__(self, args_override): - super().__init__() - model, optimizer = build_CNNMLP_model_and_optimizer(args_override) - self.model = model # decoder - self.optimizer = optimizer - - def __call__(self, qpos, image, actions=None, is_pad=None): - env_state = None # TODO - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - image = normalize(image) - if actions is not None: # training time - actions = actions[:, 0] - a_hat = self.model(qpos, image, env_state, actions) - mse = F.mse_loss(actions, a_hat) - loss_dict = dict() - loss_dict['mse'] = mse - loss_dict['loss'] = loss_dict['mse'] - return loss_dict - else: # inference time - a_hat = self.model(qpos, image, env_state) # no action, sample from prior - return a_hat - - def configure_optimizers(self): - return self.optimizer - -def kl_divergence(mu, logvar): - batch_size = mu.size(0) - assert batch_size != 0 - if mu.data.ndimension() == 4: - mu = mu.view(mu.size(0), mu.size(1)) - if logvar.data.ndimension() == 4: - logvar = logvar.view(logvar.size(0), logvar.size(1)) - - klds = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()) - total_kld = klds.sum(1).mean(0, True) - dimension_wise_kld = klds.mean(0) - mean_kld = klds.mean(1).mean(0, True) - - return total_kld, dimension_wise_kld, mean_kld diff --git a/act/record_sim_episodes.py b/act/record_sim_episodes.py deleted file mode 100644 index 253fdea1..00000000 --- a/act/record_sim_episodes.py +++ /dev/null @@ -1,189 +0,0 @@ -import time -import os -import numpy as np -import argparse -import matplotlib.pyplot as plt -import h5py - -from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN, SIM_TASK_CONFIGS -from ee_sim_env import make_ee_sim_env -from sim_env import make_sim_env, BOX_POSE -from scripted_policy import PickAndTransferPolicy, InsertionPolicy - -import IPython -e = IPython.embed - - -def main(args): - """ - Generate demonstration data in simulation. - First rollout the policy (defined in ee space) in ee_sim_env. Obtain the joint trajectory. - Replace the gripper joint positions with the commanded joint position. - Replay this joint trajectory (as action sequence) in sim_env, and record all observations. - Save this episode of data, and continue to next episode of data collection. - """ - - task_name = args['task_name'] - dataset_dir = args['dataset_dir'] - num_episodes = args['num_episodes'] - onscreen_render = args['onscreen_render'] - inject_noise = False - render_cam_name = 'angle' - - if not os.path.isdir(dataset_dir): - os.makedirs(dataset_dir, exist_ok=True) - - episode_len = SIM_TASK_CONFIGS[task_name]['episode_len'] - camera_names = SIM_TASK_CONFIGS[task_name]['camera_names'] - if task_name == 'sim_transfer_cube_scripted': - policy_cls = PickAndTransferPolicy - elif task_name == 'sim_insertion_scripted': - policy_cls = InsertionPolicy - else: - raise NotImplementedError - - success = [] - for episode_idx in range(num_episodes): - print(f'{episode_idx=}') - print('Rollout out EE space scripted policy') - # setup the environment - env = make_ee_sim_env(task_name) - ts = env.reset() - episode = [ts] - policy = policy_cls(inject_noise) - # setup plotting - if onscreen_render: - ax = plt.subplot() - plt_img = ax.imshow(ts.observation['images'][render_cam_name]) - plt.ion() - for step in range(episode_len): - action = policy(ts) - ts = env.step(action) - episode.append(ts) - if onscreen_render: - plt_img.set_data(ts.observation['images'][render_cam_name]) - plt.pause(0.002) - plt.close() - - episode_return = np.sum([ts.reward for ts in episode[1:]]) - episode_max_reward = np.max([ts.reward for ts in episode[1:]]) - if episode_max_reward == env.task.max_reward: - print(f"{episode_idx=} Successful, {episode_return=}") - else: - print(f"{episode_idx=} Failed") - - joint_traj = [ts.observation['qpos'] for ts in episode] - # replace gripper pose with gripper control - gripper_ctrl_traj = [ts.observation['gripper_ctrl'] for ts in episode] - for joint, ctrl in zip(joint_traj, gripper_ctrl_traj): - left_ctrl = PUPPET_GRIPPER_POSITION_NORMALIZE_FN(ctrl[0]) - right_ctrl = PUPPET_GRIPPER_POSITION_NORMALIZE_FN(ctrl[2]) - joint[6] = left_ctrl - joint[6+7] = right_ctrl - - subtask_info = episode[0].observation['env_state'].copy() # box pose at step 0 - - # clear unused variables - del env - del episode - del policy - - # setup the environment - print('Replaying joint commands') - env = make_sim_env(task_name) - BOX_POSE[0] = subtask_info # make sure the sim_env has the same object configurations as ee_sim_env - ts = env.reset() - - episode_replay = [ts] - # setup plotting - if onscreen_render: - ax = plt.subplot() - plt_img = ax.imshow(ts.observation['images'][render_cam_name]) - plt.ion() - for t in range(len(joint_traj)): # note: this will increase episode length by 1 - action = joint_traj[t] - ts = env.step(action) - episode_replay.append(ts) - if onscreen_render: - plt_img.set_data(ts.observation['images'][render_cam_name]) - plt.pause(0.02) - - episode_return = np.sum([ts.reward for ts in episode_replay[1:]]) - episode_max_reward = np.max([ts.reward for ts in episode_replay[1:]]) - if episode_max_reward == env.task.max_reward: - success.append(1) - print(f"{episode_idx=} Successful, {episode_return=}") - else: - success.append(0) - print(f"{episode_idx=} Failed") - - plt.close() - - """ - For each timestep: - observations - - images - - each_cam_name (480, 640, 3) 'uint8' - - qpos (14,) 'float64' - - qvel (14,) 'float64' - - action (14,) 'float64' - """ - - data_dict = { - '/observations/qpos': [], - '/observations/qvel': [], - '/action': [], - } - for cam_name in camera_names: - data_dict[f'/observations/images/{cam_name}'] = [] - - # because the replaying, there will be eps_len + 1 actions and eps_len + 2 timesteps - # truncate here to be consistent - joint_traj = joint_traj[:-1] - episode_replay = episode_replay[:-1] - - # len(joint_traj) i.e. actions: max_timesteps - # len(episode_replay) i.e. time steps: max_timesteps + 1 - max_timesteps = len(joint_traj) - while joint_traj: - action = joint_traj.pop(0) - ts = episode_replay.pop(0) - data_dict['/observations/qpos'].append(ts.observation['qpos']) - data_dict['/observations/qvel'].append(ts.observation['qvel']) - data_dict['/action'].append(action) - for cam_name in camera_names: - data_dict[f'/observations/images/{cam_name}'].append(ts.observation['images'][cam_name]) - - # HDF5 - t0 = time.time() - dataset_path = os.path.join(dataset_dir, f'episode_{episode_idx}') - with h5py.File(dataset_path + '.hdf5', 'w', rdcc_nbytes=1024 ** 2 * 2) as root: - root.attrs['sim'] = True - obs = root.create_group('observations') - image = obs.create_group('images') - for cam_name in camera_names: - _ = image.create_dataset(cam_name, (max_timesteps, 480, 640, 3), dtype='uint8', - chunks=(1, 480, 640, 3), ) - # compression='gzip',compression_opts=2,) - # compression=32001, compression_opts=(0, 0, 0, 0, 9, 1, 1), shuffle=False) - qpos = obs.create_dataset('qpos', (max_timesteps, 14)) - qvel = obs.create_dataset('qvel', (max_timesteps, 14)) - action = root.create_dataset('action', (max_timesteps, 14)) - - for name, array in data_dict.items(): - root[name][...] = array - print(f'Saving: {time.time() - t0:.1f} secs\n') - - print(f'Saved to {dataset_dir}') - print(f'Success: {np.sum(success)} / {len(success)}') - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--task_name', action='store', type=str, help='task_name', required=True) - parser.add_argument('--dataset_dir', action='store', type=str, help='dataset saving dir', required=True) - parser.add_argument('--num_episodes', action='store', type=int, help='num_episodes', required=False) - parser.add_argument('--onscreen_render', action='store_true') - - main(vars(parser.parse_args())) - diff --git a/act/scripted_policy.py b/act/scripted_policy.py deleted file mode 100644 index 4fd8f000..00000000 --- a/act/scripted_policy.py +++ /dev/null @@ -1,194 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from pyquaternion import Quaternion - -from constants import SIM_TASK_CONFIGS -from ee_sim_env import make_ee_sim_env - -import IPython -e = IPython.embed - - -class BasePolicy: - def __init__(self, inject_noise=False): - self.inject_noise = inject_noise - self.step_count = 0 - self.left_trajectory = None - self.right_trajectory = None - - def generate_trajectory(self, ts_first): - raise NotImplementedError - - @staticmethod - def interpolate(curr_waypoint, next_waypoint, t): - t_frac = (t - curr_waypoint["t"]) / (next_waypoint["t"] - curr_waypoint["t"]) - curr_xyz = curr_waypoint['xyz'] - curr_quat = curr_waypoint['quat'] - curr_grip = curr_waypoint['gripper'] - next_xyz = next_waypoint['xyz'] - next_quat = next_waypoint['quat'] - next_grip = next_waypoint['gripper'] - xyz = curr_xyz + (next_xyz - curr_xyz) * t_frac - quat = curr_quat + (next_quat - curr_quat) * t_frac - gripper = curr_grip + (next_grip - curr_grip) * t_frac - return xyz, quat, gripper - - def __call__(self, ts): - # generate trajectory at first timestep, then open-loop execution - if self.step_count == 0: - self.generate_trajectory(ts) - - # obtain left and right waypoints - if self.left_trajectory[0]['t'] == self.step_count: - self.curr_left_waypoint = self.left_trajectory.pop(0) - next_left_waypoint = self.left_trajectory[0] - - if self.right_trajectory[0]['t'] == self.step_count: - self.curr_right_waypoint = self.right_trajectory.pop(0) - next_right_waypoint = self.right_trajectory[0] - - # interpolate between waypoints to obtain current pose and gripper command - left_xyz, left_quat, left_gripper = self.interpolate(self.curr_left_waypoint, next_left_waypoint, self.step_count) - right_xyz, right_quat, right_gripper = self.interpolate(self.curr_right_waypoint, next_right_waypoint, self.step_count) - - # Inject noise - if self.inject_noise: - scale = 0.01 - left_xyz = left_xyz + np.random.uniform(-scale, scale, left_xyz.shape) - right_xyz = right_xyz + np.random.uniform(-scale, scale, right_xyz.shape) - - action_left = np.concatenate([left_xyz, left_quat, [left_gripper]]) - action_right = np.concatenate([right_xyz, right_quat, [right_gripper]]) - - self.step_count += 1 - return np.concatenate([action_left, action_right]) - - -class PickAndTransferPolicy(BasePolicy): - - def generate_trajectory(self, ts_first): - init_mocap_pose_right = ts_first.observation['mocap_pose_right'] - init_mocap_pose_left = ts_first.observation['mocap_pose_left'] - - box_info = np.array(ts_first.observation['env_state']) - box_xyz = box_info[:3] - box_quat = box_info[3:] - # print(f"Generate trajectory for {box_xyz=}") - - gripper_pick_quat = Quaternion(init_mocap_pose_right[3:]) - gripper_pick_quat = gripper_pick_quat * Quaternion(axis=[0.0, 1.0, 0.0], degrees=-60) - - meet_left_quat = Quaternion(axis=[1.0, 0.0, 0.0], degrees=90) - - meet_xyz = np.array([0, 0.5, 0.25]) - - self.left_trajectory = [ - {"t": 0, "xyz": init_mocap_pose_left[:3], "quat": init_mocap_pose_left[3:], "gripper": 0}, # sleep - {"t": 100, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 1}, # approach meet position - {"t": 260, "xyz": meet_xyz + np.array([0.02, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 1}, # move to meet position - {"t": 310, "xyz": meet_xyz + np.array([0.02, 0, -0.02]), "quat": meet_left_quat.elements, "gripper": 0}, # close gripper - {"t": 360, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": np.array([1, 0, 0, 0]), "gripper": 0}, # move left - {"t": 400, "xyz": meet_xyz + np.array([-0.1, 0, -0.02]), "quat": np.array([1, 0, 0, 0]), "gripper": 0}, # stay - ] - - self.right_trajectory = [ - {"t": 0, "xyz": init_mocap_pose_right[:3], "quat": init_mocap_pose_right[3:], "gripper": 0}, # sleep - {"t": 90, "xyz": box_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat.elements, "gripper": 1}, # approach the cube - {"t": 130, "xyz": box_xyz + np.array([0, 0, -0.015]), "quat": gripper_pick_quat.elements, "gripper": 1}, # go down - {"t": 170, "xyz": box_xyz + np.array([0, 0, -0.015]), "quat": gripper_pick_quat.elements, "gripper": 0}, # close gripper - {"t": 200, "xyz": meet_xyz + np.array([0.05, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 0}, # approach meet position - {"t": 220, "xyz": meet_xyz, "quat": gripper_pick_quat.elements, "gripper": 0}, # move to meet position - {"t": 310, "xyz": meet_xyz, "quat": gripper_pick_quat.elements, "gripper": 1}, # open gripper - {"t": 360, "xyz": meet_xyz + np.array([0.1, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 1}, # move to right - {"t": 400, "xyz": meet_xyz + np.array([0.1, 0, 0]), "quat": gripper_pick_quat.elements, "gripper": 1}, # stay - ] - - -class InsertionPolicy(BasePolicy): - - def generate_trajectory(self, ts_first): - init_mocap_pose_right = ts_first.observation['mocap_pose_right'] - init_mocap_pose_left = ts_first.observation['mocap_pose_left'] - - peg_info = np.array(ts_first.observation['env_state'])[:7] - peg_xyz = peg_info[:3] - peg_quat = peg_info[3:] - - socket_info = np.array(ts_first.observation['env_state'])[7:] - socket_xyz = socket_info[:3] - socket_quat = socket_info[3:] - - gripper_pick_quat_right = Quaternion(init_mocap_pose_right[3:]) - gripper_pick_quat_right = gripper_pick_quat_right * Quaternion(axis=[0.0, 1.0, 0.0], degrees=-60) - - gripper_pick_quat_left = Quaternion(init_mocap_pose_right[3:]) - gripper_pick_quat_left = gripper_pick_quat_left * Quaternion(axis=[0.0, 1.0, 0.0], degrees=60) - - meet_xyz = np.array([0, 0.5, 0.15]) - lift_right = 0.00715 - - self.left_trajectory = [ - {"t": 0, "xyz": init_mocap_pose_left[:3], "quat": init_mocap_pose_left[3:], "gripper": 0}, # sleep - {"t": 120, "xyz": socket_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat_left.elements, "gripper": 1}, # approach the cube - {"t": 170, "xyz": socket_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_left.elements, "gripper": 1}, # go down - {"t": 220, "xyz": socket_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_left.elements, "gripper": 0}, # close gripper - {"t": 285, "xyz": meet_xyz + np.array([-0.1, 0, 0]), "quat": gripper_pick_quat_left.elements, "gripper": 0}, # approach meet position - {"t": 340, "xyz": meet_xyz + np.array([-0.05, 0, 0]), "quat": gripper_pick_quat_left.elements,"gripper": 0}, # insertion - {"t": 400, "xyz": meet_xyz + np.array([-0.05, 0, 0]), "quat": gripper_pick_quat_left.elements, "gripper": 0}, # insertion - ] - - self.right_trajectory = [ - {"t": 0, "xyz": init_mocap_pose_right[:3], "quat": init_mocap_pose_right[3:], "gripper": 0}, # sleep - {"t": 120, "xyz": peg_xyz + np.array([0, 0, 0.08]), "quat": gripper_pick_quat_right.elements, "gripper": 1}, # approach the cube - {"t": 170, "xyz": peg_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_right.elements, "gripper": 1}, # go down - {"t": 220, "xyz": peg_xyz + np.array([0, 0, -0.03]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # close gripper - {"t": 285, "xyz": meet_xyz + np.array([0.1, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # approach meet position - {"t": 340, "xyz": meet_xyz + np.array([0.05, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # insertion - {"t": 400, "xyz": meet_xyz + np.array([0.05, 0, lift_right]), "quat": gripper_pick_quat_right.elements, "gripper": 0}, # insertion - - ] - - -def test_policy(task_name): - # example rolling out pick_and_transfer policy - onscreen_render = True - inject_noise = False - - # setup the environment - episode_len = SIM_TASK_CONFIGS[task_name]['episode_len'] - if 'sim_transfer_cube' in task_name: - env = make_ee_sim_env('sim_transfer_cube') - elif 'sim_insertion' in task_name: - env = make_ee_sim_env('sim_insertion') - else: - raise NotImplementedError - - for episode_idx in range(2): - ts = env.reset() - episode = [ts] - if onscreen_render: - ax = plt.subplot() - plt_img = ax.imshow(ts.observation['images']['angle']) - plt.ion() - - policy = PickAndTransferPolicy(inject_noise) - for step in range(episode_len): - action = policy(ts) - ts = env.step(action) - episode.append(ts) - if onscreen_render: - plt_img.set_data(ts.observation['images']['angle']) - plt.pause(0.02) - plt.close() - - episode_return = np.sum([ts.reward for ts in episode[1:]]) - if episode_return > 0: - print(f"{episode_idx=} Successful, {episode_return=}") - else: - print(f"{episode_idx=} Failed") - - -if __name__ == '__main__': - test_task_name = 'sim_transfer_cube_scripted' - test_policy(test_task_name) - diff --git a/act/sim_env.py b/act/sim_env.py deleted file mode 100644 index b79b935b..00000000 --- a/act/sim_env.py +++ /dev/null @@ -1,278 +0,0 @@ -import numpy as np -import os -import collections -import matplotlib.pyplot as plt -from dm_control import mujoco -from dm_control.rl import control -from dm_control.suite import base - -from constants import DT, XML_DIR, START_ARM_POSE -from constants import PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN -from constants import MASTER_GRIPPER_POSITION_NORMALIZE_FN -from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN -from constants import PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN - -import IPython -e = IPython.embed - -BOX_POSE = [None] # to be changed from outside - -def make_sim_env(task_name): - """ - Environment for simulated robot bi-manual manipulation, with joint position control - Action space: [left_arm_qpos (6), # absolute joint position - left_gripper_positions (1), # normalized gripper position (0: close, 1: open) - right_arm_qpos (6), # absolute joint position - right_gripper_positions (1),] # normalized gripper position (0: close, 1: open) - - Observation space: {"qpos": Concat[ left_arm_qpos (6), # absolute joint position - left_gripper_position (1), # normalized gripper position (0: close, 1: open) - right_arm_qpos (6), # absolute joint position - right_gripper_qpos (1)] # normalized gripper position (0: close, 1: open) - "qvel": Concat[ left_arm_qvel (6), # absolute joint velocity (rad) - left_gripper_velocity (1), # normalized gripper velocity (pos: opening, neg: closing) - right_arm_qvel (6), # absolute joint velocity (rad) - right_gripper_qvel (1)] # normalized gripper velocity (pos: opening, neg: closing) - "images": {"main": (480x640x3)} # h, w, c, dtype='uint8' - """ - if 'sim_transfer_cube' in task_name: - xml_path = os.path.join(XML_DIR, f'bimanual_viperx_transfer_cube.xml') - physics = mujoco.Physics.from_xml_path(xml_path) - task = TransferCubeTask(random=False) - env = control.Environment(physics, task, time_limit=20, control_timestep=DT, - n_sub_steps=None, flat_observation=False) - elif 'sim_insertion' in task_name: - xml_path = os.path.join(XML_DIR, f'bimanual_viperx_insertion.xml') - physics = mujoco.Physics.from_xml_path(xml_path) - task = InsertionTask(random=False) - env = control.Environment(physics, task, time_limit=20, control_timestep=DT, - n_sub_steps=None, flat_observation=False) - else: - raise NotImplementedError - return env - -class BimanualViperXTask(base.Task): - def __init__(self, random=None): - super().__init__(random=random) - - def before_step(self, action, physics): - left_arm_action = action[:6] - right_arm_action = action[7:7+6] - normalized_left_gripper_action = action[6] - normalized_right_gripper_action = action[7+6] - - left_gripper_action = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(normalized_left_gripper_action) - right_gripper_action = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(normalized_right_gripper_action) - - full_left_gripper_action = [left_gripper_action, -left_gripper_action] - full_right_gripper_action = [right_gripper_action, -right_gripper_action] - - env_action = np.concatenate([left_arm_action, full_left_gripper_action, right_arm_action, full_right_gripper_action]) - super().before_step(env_action, physics) - return - - def initialize_episode(self, physics): - """Sets the state of the environment at the start of each episode.""" - super().initialize_episode(physics) - - @staticmethod - def get_qpos(physics): - qpos_raw = physics.data.qpos.copy() - left_qpos_raw = qpos_raw[:8] - right_qpos_raw = qpos_raw[8:16] - left_arm_qpos = left_qpos_raw[:6] - right_arm_qpos = right_qpos_raw[:6] - left_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(left_qpos_raw[6])] - right_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(right_qpos_raw[6])] - return np.concatenate([left_arm_qpos, left_gripper_qpos, right_arm_qpos, right_gripper_qpos]) - - @staticmethod - def get_qvel(physics): - qvel_raw = physics.data.qvel.copy() - left_qvel_raw = qvel_raw[:8] - right_qvel_raw = qvel_raw[8:16] - left_arm_qvel = left_qvel_raw[:6] - right_arm_qvel = right_qvel_raw[:6] - left_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(left_qvel_raw[6])] - right_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(right_qvel_raw[6])] - return np.concatenate([left_arm_qvel, left_gripper_qvel, right_arm_qvel, right_gripper_qvel]) - - @staticmethod - def get_env_state(physics): - raise NotImplementedError - - def get_observation(self, physics): - obs = collections.OrderedDict() - obs['qpos'] = self.get_qpos(physics) - obs['qvel'] = self.get_qvel(physics) - obs['env_state'] = self.get_env_state(physics) - obs['images'] = dict() - obs['images']['top'] = physics.render(height=480, width=640, camera_id='top') - obs['images']['angle'] = physics.render(height=480, width=640, camera_id='angle') - obs['images']['vis'] = physics.render(height=480, width=640, camera_id='front_close') - - return obs - - def get_reward(self, physics): - # return whether left gripper is holding the box - raise NotImplementedError - - -class TransferCubeTask(BimanualViperXTask): - def __init__(self, random=None): - super().__init__(random=random) - self.max_reward = 4 - - def initialize_episode(self, physics): - """Sets the state of the environment at the start of each episode.""" - # TODO Notice: this function does not randomize the env configuration. Instead, set BOX_POSE from outside - # reset qpos, control and box position - with physics.reset_context(): - physics.named.data.qpos[:16] = START_ARM_POSE - np.copyto(physics.data.ctrl, START_ARM_POSE) - assert BOX_POSE[0] is not None - physics.named.data.qpos[-7:] = BOX_POSE[0] - # print(f"{BOX_POSE=}") - super().initialize_episode(physics) - - @staticmethod - def get_env_state(physics): - env_state = physics.data.qpos.copy()[16:] - return env_state - - def get_reward(self, physics): - # return whether left gripper is holding the box - all_contact_pairs = [] - for i_contact in range(physics.data.ncon): - id_geom_1 = physics.data.contact[i_contact].geom1 - id_geom_2 = physics.data.contact[i_contact].geom2 - name_geom_1 = physics.model.id2name(id_geom_1, 'geom') - name_geom_2 = physics.model.id2name(id_geom_2, 'geom') - contact_pair = (name_geom_1, name_geom_2) - all_contact_pairs.append(contact_pair) - - touch_left_gripper = ("red_box", "vx300s_left/10_left_gripper_finger") in all_contact_pairs - touch_right_gripper = ("red_box", "vx300s_right/10_right_gripper_finger") in all_contact_pairs - touch_table = ("red_box", "table") in all_contact_pairs - - reward = 0 - if touch_right_gripper: - reward = 1 - if touch_right_gripper and not touch_table: # lifted - reward = 2 - if touch_left_gripper: # attempted transfer - reward = 3 - if touch_left_gripper and not touch_table: # successful transfer - reward = 4 - return reward - - -class InsertionTask(BimanualViperXTask): - def __init__(self, random=None): - super().__init__(random=random) - self.max_reward = 4 - - def initialize_episode(self, physics): - """Sets the state of the environment at the start of each episode.""" - # TODO Notice: this function does not randomize the env configuration. Instead, set BOX_POSE from outside - # reset qpos, control and box position - with physics.reset_context(): - physics.named.data.qpos[:16] = START_ARM_POSE - np.copyto(physics.data.ctrl, START_ARM_POSE) - assert BOX_POSE[0] is not None - physics.named.data.qpos[-7*2:] = BOX_POSE[0] # two objects - # print(f"{BOX_POSE=}") - super().initialize_episode(physics) - - @staticmethod - def get_env_state(physics): - env_state = physics.data.qpos.copy()[16:] - return env_state - - def get_reward(self, physics): - # return whether peg touches the pin - all_contact_pairs = [] - for i_contact in range(physics.data.ncon): - id_geom_1 = physics.data.contact[i_contact].geom1 - id_geom_2 = physics.data.contact[i_contact].geom2 - name_geom_1 = physics.model.id2name(id_geom_1, 'geom') - name_geom_2 = physics.model.id2name(id_geom_2, 'geom') - contact_pair = (name_geom_1, name_geom_2) - all_contact_pairs.append(contact_pair) - - touch_right_gripper = ("red_peg", "vx300s_right/10_right_gripper_finger") in all_contact_pairs - touch_left_gripper = ("socket-1", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ - ("socket-2", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ - ("socket-3", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \ - ("socket-4", "vx300s_left/10_left_gripper_finger") in all_contact_pairs - - peg_touch_table = ("red_peg", "table") in all_contact_pairs - socket_touch_table = ("socket-1", "table") in all_contact_pairs or \ - ("socket-2", "table") in all_contact_pairs or \ - ("socket-3", "table") in all_contact_pairs or \ - ("socket-4", "table") in all_contact_pairs - peg_touch_socket = ("red_peg", "socket-1") in all_contact_pairs or \ - ("red_peg", "socket-2") in all_contact_pairs or \ - ("red_peg", "socket-3") in all_contact_pairs or \ - ("red_peg", "socket-4") in all_contact_pairs - pin_touched = ("red_peg", "pin") in all_contact_pairs - - reward = 0 - if touch_left_gripper and touch_right_gripper: # touch both - reward = 1 - if touch_left_gripper and touch_right_gripper and (not peg_touch_table) and (not socket_touch_table): # grasp both - reward = 2 - if peg_touch_socket and (not peg_touch_table) and (not socket_touch_table): # peg and socket touching - reward = 3 - if pin_touched: # successful insertion - reward = 4 - return reward - - -def get_action(master_bot_left, master_bot_right): - action = np.zeros(14) - # arm action - action[:6] = master_bot_left.dxl.joint_states.position[:6] - action[7:7+6] = master_bot_right.dxl.joint_states.position[:6] - # gripper action - left_gripper_pos = master_bot_left.dxl.joint_states.position[7] - right_gripper_pos = master_bot_right.dxl.joint_states.position[7] - normalized_left_pos = MASTER_GRIPPER_POSITION_NORMALIZE_FN(left_gripper_pos) - normalized_right_pos = MASTER_GRIPPER_POSITION_NORMALIZE_FN(right_gripper_pos) - action[6] = normalized_left_pos - action[7+6] = normalized_right_pos - return action - -def test_sim_teleop(): - """ Testing teleoperation in sim with ALOHA. Requires hardware and ALOHA repo to work. """ - from interbotix_xs_modules.arm import InterbotixManipulatorXS - - BOX_POSE[0] = [0.2, 0.5, 0.05, 1, 0, 0, 0] - - # source of data - master_bot_left = InterbotixManipulatorXS(robot_model="wx250s", group_name="arm", gripper_name="gripper", - robot_name=f'master_left', init_node=True) - master_bot_right = InterbotixManipulatorXS(robot_model="wx250s", group_name="arm", gripper_name="gripper", - robot_name=f'master_right', init_node=False) - - # setup the environment - env = make_sim_env('sim_transfer_cube') - ts = env.reset() - episode = [ts] - # setup plotting - ax = plt.subplot() - plt_img = ax.imshow(ts.observation['images']['angle']) - plt.ion() - - for t in range(1000): - action = get_action(master_bot_left, master_bot_right) - ts = env.step(action) - episode.append(ts) - - plt_img.set_data(ts.observation['images']['angle']) - plt.pause(0.02) - - -if __name__ == '__main__': - test_sim_teleop() - diff --git a/act/utils.py b/act/utils.py deleted file mode 100644 index 673cbb10..00000000 --- a/act/utils.py +++ /dev/null @@ -1,189 +0,0 @@ -import numpy as np -import torch -import os -import h5py -from torch.utils.data import TensorDataset, DataLoader - -import IPython -e = IPython.embed - -class EpisodicDataset(torch.utils.data.Dataset): - def __init__(self, episode_ids, dataset_dir, camera_names, norm_stats): - super(EpisodicDataset).__init__() - self.episode_ids = episode_ids - self.dataset_dir = dataset_dir - self.camera_names = camera_names - self.norm_stats = norm_stats - self.is_sim = None - self.__getitem__(0) # initialize self.is_sim - - def __len__(self): - return len(self.episode_ids) - - def __getitem__(self, index): - sample_full_episode = False # hardcode - - episode_id = self.episode_ids[index] - dataset_path = os.path.join(self.dataset_dir, f'episode_{episode_id}.hdf5') - with h5py.File(dataset_path, 'r') as root: - is_sim = root.attrs['sim'] - original_action_shape = root['/action'].shape - episode_len = original_action_shape[0] - if sample_full_episode: - start_ts = 0 - else: - start_ts = np.random.choice(episode_len) - # get observation at start_ts only - qpos = root['/observations/qpos'][start_ts] - qvel = root['/observations/qvel'][start_ts] - image_dict = dict() - for cam_name in self.camera_names: - image_dict[cam_name] = root[f'/observations/images/{cam_name}'][start_ts] - # get all actions after and including start_ts - if is_sim: - action = root['/action'][start_ts:] - action_len = episode_len - start_ts - else: - action = root['/action'][max(0, start_ts - 1):] # hack, to make timesteps more aligned - action_len = episode_len - max(0, start_ts - 1) # hack, to make timesteps more aligned - - self.is_sim = is_sim - padded_action = np.zeros(original_action_shape, dtype=np.float32) - padded_action[:action_len] = action - is_pad = np.zeros(episode_len) - is_pad[action_len:] = 1 - - # new axis for different cameras - all_cam_images = [] - for cam_name in self.camera_names: - all_cam_images.append(image_dict[cam_name]) - all_cam_images = np.stack(all_cam_images, axis=0) - - # construct observations - image_data = torch.from_numpy(all_cam_images) - qpos_data = torch.from_numpy(qpos).float() - action_data = torch.from_numpy(padded_action).float() - is_pad = torch.from_numpy(is_pad).bool() - - # channel last - image_data = torch.einsum('k h w c -> k c h w', image_data) - - # normalize image and change dtype to float - image_data = image_data / 255.0 - action_data = (action_data - self.norm_stats["action_mean"]) / self.norm_stats["action_std"] - qpos_data = (qpos_data - self.norm_stats["qpos_mean"]) / self.norm_stats["qpos_std"] - - return image_data, qpos_data, action_data, is_pad - - -def get_norm_stats(dataset_dir, num_episodes): - all_qpos_data = [] - all_action_data = [] - for episode_idx in range(num_episodes): - dataset_path = os.path.join(dataset_dir, f'episode_{episode_idx}.hdf5') - with h5py.File(dataset_path, 'r') as root: - qpos = root['/observations/qpos'][()] - qvel = root['/observations/qvel'][()] - action = root['/action'][()] - all_qpos_data.append(torch.from_numpy(qpos)) - all_action_data.append(torch.from_numpy(action)) - all_qpos_data = torch.stack(all_qpos_data) - all_action_data = torch.stack(all_action_data) - all_action_data = all_action_data - - # normalize action data - action_mean = all_action_data.mean(dim=[0, 1], keepdim=True) - action_std = all_action_data.std(dim=[0, 1], keepdim=True) - action_std = torch.clip(action_std, 1e-2, 10) # clipping - - # normalize qpos data - qpos_mean = all_qpos_data.mean(dim=[0, 1], keepdim=True) - qpos_std = all_qpos_data.std(dim=[0, 1], keepdim=True) - qpos_std = torch.clip(qpos_std, 1e-2, 10) # clipping - - stats = {"action_mean": action_mean.numpy().squeeze(), "action_std": action_std.numpy().squeeze(), - "qpos_mean": qpos_mean.numpy().squeeze(), "qpos_std": qpos_std.numpy().squeeze(), - "example_qpos": qpos} - - return stats - - -def load_data(dataset_dir, num_episodes, camera_names, batch_size_train, batch_size_val): - print(f'\nData from: {dataset_dir}\n') - # obtain train test split - train_ratio = 0.8 - shuffled_indices = np.random.permutation(num_episodes) - train_indices = shuffled_indices[:int(train_ratio * num_episodes)] - val_indices = shuffled_indices[int(train_ratio * num_episodes):] - - # obtain normalization stats for qpos and action - norm_stats = get_norm_stats(dataset_dir, num_episodes) - - # construct dataset and dataloader - train_dataset = EpisodicDataset(train_indices, dataset_dir, camera_names, norm_stats) - val_dataset = EpisodicDataset(val_indices, dataset_dir, camera_names, norm_stats) - train_dataloader = DataLoader(train_dataset, batch_size=batch_size_train, shuffle=True, pin_memory=True, num_workers=1, prefetch_factor=1) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size_val, shuffle=True, pin_memory=True, num_workers=1, prefetch_factor=1) - - return train_dataloader, val_dataloader, norm_stats, train_dataset.is_sim - - -### env utils - -def sample_box_pose(): - x_range = [0.0, 0.2] - y_range = [0.4, 0.6] - z_range = [0.05, 0.05] - - ranges = np.vstack([x_range, y_range, z_range]) - cube_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) - - cube_quat = np.array([1, 0, 0, 0]) - return np.concatenate([cube_position, cube_quat]) - -def sample_insertion_pose(): - # Peg - x_range = [0.1, 0.2] - y_range = [0.4, 0.6] - z_range = [0.05, 0.05] - - ranges = np.vstack([x_range, y_range, z_range]) - peg_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) - - peg_quat = np.array([1, 0, 0, 0]) - peg_pose = np.concatenate([peg_position, peg_quat]) - - # Socket - x_range = [-0.2, -0.1] - y_range = [0.4, 0.6] - z_range = [0.05, 0.05] - - ranges = np.vstack([x_range, y_range, z_range]) - socket_position = np.random.uniform(ranges[:, 0], ranges[:, 1]) - - socket_quat = np.array([1, 0, 0, 0]) - socket_pose = np.concatenate([socket_position, socket_quat]) - - return peg_pose, socket_pose - -### helper functions - -def compute_dict_mean(epoch_dicts): - result = {k: None for k in epoch_dicts[0]} - num_items = len(epoch_dicts) - for k in result: - value_sum = 0 - for epoch_dict in epoch_dicts: - value_sum += epoch_dict[k] - result[k] = value_sum / num_items - return result - -def detach_dict(d): - new_d = dict() - for k, v in d.items(): - new_d[k] = v.detach() - return new_d - -def set_seed(seed): - torch.manual_seed(seed) - np.random.seed(seed) diff --git a/act/visualize_episodes.py b/act/visualize_episodes.py deleted file mode 100644 index 4e55e471..00000000 --- a/act/visualize_episodes.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import numpy as np -import cv2 -import h5py -import argparse - -import matplotlib.pyplot as plt -from constants import DT - -import IPython -e = IPython.embed - -JOINT_NAMES = ["waist", "shoulder", "elbow", "forearm_roll", "wrist_angle", "wrist_rotate"] -STATE_NAMES = JOINT_NAMES + ["gripper"] - -def load_hdf5(dataset_dir, dataset_name): - dataset_path = os.path.join(dataset_dir, dataset_name + '.hdf5') - if not os.path.isfile(dataset_path): - print(f'Dataset does not exist at \n{dataset_path}\n') - exit() - - with h5py.File(dataset_path, 'r') as root: - is_sim = root.attrs['sim'] - qpos = root['/observations/qpos'][()] - qvel = root['/observations/qvel'][()] - action = root['/action'][()] - image_dict = dict() - for cam_name in root[f'/observations/images/'].keys(): - image_dict[cam_name] = root[f'/observations/images/{cam_name}'][()] - - return qpos, qvel, action, image_dict - -def main(args): - dataset_dir = args['dataset_dir'] - episode_idx = args['episode_idx'] - dataset_name = f'episode_{episode_idx}' - - qpos, qvel, action, image_dict = load_hdf5(dataset_dir, dataset_name) - save_videos(image_dict, DT, video_path=os.path.join(dataset_dir, dataset_name + '_video.mp4')) - visualize_joints(qpos, action, plot_path=os.path.join(dataset_dir, dataset_name + '_qpos.png')) - # visualize_timestamp(t_list, dataset_path) # TODO addn timestamp back - - -def save_videos(video, dt, video_path=None): - if isinstance(video, list): - cam_names = list(video[0].keys()) - h, w, _ = video[0][cam_names[0]].shape - w = w * len(cam_names) - fps = int(1/dt) - out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - for ts, image_dict in enumerate(video): - images = [] - for cam_name in cam_names: - image = image_dict[cam_name] - image = image[:, :, [2, 1, 0]] # swap B and R channel - images.append(image) - images = np.concatenate(images, axis=1) - out.write(images) - out.release() - print(f'Saved video to: {video_path}') - elif isinstance(video, dict): - cam_names = list(video.keys()) - all_cam_videos = [] - for cam_name in cam_names: - all_cam_videos.append(video[cam_name]) - all_cam_videos = np.concatenate(all_cam_videos, axis=2) # width dimension - - n_frames, h, w, _ = all_cam_videos.shape - fps = int(1 / dt) - out = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - for t in range(n_frames): - image = all_cam_videos[t] - image = image[:, :, [2, 1, 0]] # swap B and R channel - out.write(image) - out.release() - print(f'Saved video to: {video_path}') - - -def visualize_joints(qpos_list, command_list, plot_path=None, ylim=None, label_overwrite=None): - if label_overwrite: - label1, label2 = label_overwrite - else: - label1, label2 = 'State', 'Command' - - qpos = np.array(qpos_list) # ts, dim - command = np.array(command_list) - num_ts, num_dim = qpos.shape - h, w = 2, num_dim - num_figs = num_dim - fig, axs = plt.subplots(num_figs, 1, figsize=(w, h * num_figs)) - - # plot joint state - all_names = [name + '_left' for name in STATE_NAMES] + [name + '_right' for name in STATE_NAMES] - for dim_idx in range(num_dim): - ax = axs[dim_idx] - ax.plot(qpos[:, dim_idx], label=label1) - ax.set_title(f'Joint {dim_idx}: {all_names[dim_idx]}') - ax.legend() - - # plot arm command - for dim_idx in range(num_dim): - ax = axs[dim_idx] - ax.plot(command[:, dim_idx], label=label2) - ax.legend() - - if ylim: - for dim_idx in range(num_dim): - ax = axs[dim_idx] - ax.set_ylim(ylim) - - plt.tight_layout() - plt.savefig(plot_path) - print(f'Saved qpos plot to: {plot_path}') - plt.close() - -def visualize_timestamp(t_list, dataset_path): - plot_path = dataset_path.replace('.pkl', '_timestamp.png') - h, w = 4, 10 - fig, axs = plt.subplots(2, 1, figsize=(w, h*2)) - # process t_list - t_float = [] - for secs, nsecs in t_list: - t_float.append(secs + nsecs * 10E-10) - t_float = np.array(t_float) - - ax = axs[0] - ax.plot(np.arange(len(t_float)), t_float) - ax.set_title(f'Camera frame timestamps') - ax.set_xlabel('timestep') - ax.set_ylabel('time (sec)') - - ax = axs[1] - ax.plot(np.arange(len(t_float)-1), t_float[:-1] - t_float[1:]) - ax.set_title(f'dt') - ax.set_xlabel('timestep') - ax.set_ylabel('time (sec)') - - plt.tight_layout() - plt.savefig(plot_path) - print(f'Saved timestamp plot to: {plot_path}') - plt.close() - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--dataset_dir', action='store', type=str, help='Dataset dir.', required=True) - parser.add_argument('--episode_idx', action='store', type=int, help='Episode index.', required=False) - main(vars(parser.parse_args())) diff --git a/requirements.txt b/requirements.txt index 6f64af97..752fb152 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,3 +11,9 @@ matplotlib egl_probe>=1.0.1 torch torchvision +wandb +pytorch_lightning +ipython +cv2 +scipy +pytorch-kinematics \ No newline at end of file From 206876821e72c3dfe2663ed1a585f04e28665f43 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 23 May 2024 17:36:46 -0400 Subject: [PATCH 21/44] Sequence dataset can now interpolate sequence length for low dim keys --- robomimic/utils/dataset.py | 50 +++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 4ecbe268..e4fc6c37 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -14,6 +14,8 @@ import robomimic.utils.obs_utils as ObsUtils import robomimic.utils.log_utils as LogUtils import time +import scipy +import matplotlib.pyplot as plt class SequenceDataset(torch.utils.data.Dataset): def __init__( @@ -467,6 +469,28 @@ def get_item(self, index): return meta + def interpolate_keys(self, obs, keys, seq_length, seq_length_to_load): + if seq_length == seq_length_to_load: + return + + for k in keys: + v = obs[k] + if k == "pad_mask": + # interpolate it by simply copying each index (seq_length / seq_length_to_load) times + obs[k] = np.repeat(v, seq_length // seq_length_to_load, axis=0) + elif k != 'pad_mask': + assert v.shape[0] == seq_length_to_load, "low_dim obs should have shape (seq_length, ...)" + assert len(v.shape) == 2, "low_dim obs should have shape (seq_length, ...)" + # plot v[:, 3] + # plt.plot(v[:, 2]) + # plt.savefig('v_3.png') + # plt.close() + interp = scipy.interpolate.interp1d(np.linspace(0, 1, seq_length_to_load), v, axis=0) + obs[k] = interp(np.linspace(0, 1, seq_length)) + # plt.plot(obs[k][:, 2]) + # plt.savefig('v_3_after.png') + # plt.close() + def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, dont_load_fut=None): """ Extract a (sub)sequence of data items from a demo given the @keys of the items. @@ -520,7 +544,7 @@ def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_sta return seq, pad_mask - def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False): + def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False, seq_length_to_load=None): """ Extract a (sub)sequence of observation items from a demo given the @keys of the items. @@ -535,21 +559,30 @@ def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to Returns: a dictionary of extracted items. """ + if seq_length_to_load is None: + seq_length_to_load = seq_length + obs, pad_mask = self.get_sequence_from_demo( demo_id, index_in_demo=index_in_demo, keys=tuple('{}/{}'.format(prefix, k) for k in keys), num_frames_to_stack=num_frames_to_stack, - seq_length=seq_length, + seq_length=seq_length_to_load, dont_load_fut=dont_load_fut ) obs = {k.split('/')[1]: obs[k] for k in obs} # strip the prefix if self.get_pad_mask: obs["pad_mask"] = pad_mask + + # Interpolate obs + to_interp = [k for k in obs if ObsUtils.key_is_obs_modality(k, "low_dim")] + # t = time.time() + self.interpolate_keys(obs, to_interp, seq_length, seq_length_to_load) + # print("Interpolation time: ", time.time() - t) return obs - def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1): + def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, seq_length_to_load=None): """ Extract a (sub)sequence of dataset items from a demo given the @keys of the items (e.g., states, actions). @@ -563,15 +596,24 @@ def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frame Returns: a dictionary of extracted items. """ + if seq_length_to_load is None: + seq_length_to_load = seq_length + data, pad_mask = self.get_sequence_from_demo( demo_id, index_in_demo=index_in_demo, keys=keys, num_frames_to_stack=num_frames_to_stack, - seq_length=seq_length, + seq_length=seq_length_to_load, ) if self.get_pad_mask: data["pad_mask"] = pad_mask + + # interpolate actions + to_interp = [k for k in data] + # t = time.time() + self.interpolate_keys(data, to_interp, seq_length, seq_length_to_load) + # print("Interpolation time: ", time.time() - t) return data def get_trajectory_at_index(self, index): From 20e374ecf73e59460c9458b45287afaf1e4f2e3f Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 30 May 2024 22:23:18 -0400 Subject: [PATCH 22/44] added CropResizeColorRandomizer for color jitter --- robomimic/models/obs_core.py | 82 ++++++++++++++++++++++++++++++++---- 1 file changed, 73 insertions(+), 9 deletions(-) diff --git a/robomimic/models/obs_core.py b/robomimic/models/obs_core.py index 195dbf5d..005c15aa 100644 --- a/robomimic/models/obs_core.py +++ b/robomimic/models/obs_core.py @@ -13,6 +13,7 @@ import torch.nn as nn from torchvision.transforms import Lambda, Compose, RandomResizedCrop import torchvision.transforms.functional as TVF +import torchvision.transforms as TT import robomimic.models.base_nets as BaseNets import robomimic.utils.tensor_utils as TensorUtils @@ -23,6 +24,9 @@ from robomimic.models.base_nets import * from robomimic.utils.vis_utils import visualize_image_randomizer from robomimic.macros import VISUALIZE_RANDOMIZER +import datetime +import matplotlib.pyplot as plt + """ @@ -618,16 +622,40 @@ def __init__( self.resize_crop = RandomResizedCrop(size=size, scale=scale, ratio=ratio, interpolation=TVF.InterpolationMode.BILINEAR) def output_shape_in(self, input_shape=None): - out_c = self.input_shape[0] + 2 if self.pos_enc else self.input_shape[0] - return [out_c, self.size[0], self.size[1]] + shape = [self.input_shape[0], self.size[0], self.size[1]] + return shape def output_shape_out(self, input_shape=None): return list(input_shape) + def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): + """ + pre_random_input: (B, C, H, W) + randomized_input: (B, C, H, W) + num_samples_to_visualize: + Use plt.imsave to save a plot with the original input and the randomized input side by side. Save it to debug/augIms/ with a unique name. + """ + fig, axes = plt.subplots(num_samples_to_visualize, 2, figsize=(10, 5*num_samples_to_visualize)) + for i in range(num_samples_to_visualize): + axes[i, 0].imshow(pre_random_input[i].permute(1, 2, 0).cpu().numpy()) + axes[i, 0].set_title("Original Input") + axes[i, 1].imshow(randomized_input[i].permute(1, 2, 0).cpu().numpy()) + axes[i, 1].set_title("Randomized Input") + plt.tight_layout() + plt.savefig(f"debug/augIms/sample_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.png") + plt.close(fig) + # plt.close(fig) + # fig, axes = plt.subplots(1, 2) + # axes[0].imshow(pre_random_input[i].permute(1, 2, 0).cpu().numpy()) + # axes[0].set_title("Original Input") + # axes[1].imshow(randomized_input[i].permute(1, 2, 0).cpu().numpy()) + # axes[1].set_title("Randomized Input") + # plt.savefig(f"debug/augIms/sample_{i}.png") + # plt.close(fig) + def _forward_in(self, inputs): """ - Samples N random crops for each input in the batch, and then reshapes - inputs to [B * N, ...]. + Samples single random crop for each input """ # assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions # out, _ = ObsUtils.sample_random_image_crops( @@ -640,6 +668,7 @@ def _forward_in(self, inputs): # # [B, N, ...] -> [B * N, ...] # out = TensorUtils.join_dimensions(out, 0, 1) out = self.resize_crop(inputs) + # self._visualize(inputs, out) return out @@ -662,13 +691,48 @@ def _forward_out(self, inputs): Splits the outputs from shape [B * N, ...] -> [B, N, ...] and then average across N to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. + + In this class I assume N = 1 so I just return input """ - batch_size = (inputs.shape[0] // self.num_crops) - out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_crops)) - return out.mean(dim=1) - + return inputs + +class CropResizeColorRandomizer(CropResizeRandomizer): + """ + Does the same thing as CropResizeRandomizer, but additionally performs color jitter + """ + def __init__( + self, + input_shape, + size, + scale, + ratio, + num_crops=1, + pos_enc=False, + brightness=0.5, + contrast=0.2, + saturation=0.2, + hue=0.05, + ): + super(CropResizeColorRandomizer, self).__init__( + input_shape=input_shape, + size=size, + scale=scale, + ratio=ratio, + num_crops=num_crops, + pos_enc=pos_enc, + ) + self.color_jitter = TT.ColorJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue) + + def _forward_in(self, inputs): + out = super(CropResizeColorRandomizer, self)._forward_in(inputs) + out = self.color_jitter(out) + # self._visualize(inputs, out) + return out + + def _forward_in_eval(self, inputs): + out = super(CropResizeColorRandomizer, self)._forward_in_eval(inputs) + return out From e3a916300a363be2f764b9dbfaff9a7bcd8759b8 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 6 Jun 2024 18:59:21 -0400 Subject: [PATCH 23/44] merged and added ac_key support --- robomimic/utils/train_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index fa916eb6..58de759d 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -534,7 +534,7 @@ def get_gpu_usage_mb(index): return info.used / 1024 / 1024 -def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None): +def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): """ Run an epoch of training or validation. @@ -590,7 +590,7 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor # process batch for training t = time.time() - input_batch = model.process_batch_for_training(batch) + input_batch = model.process_batch_for_training(batch, ac_key=ac_key) input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) timing_stats["Process_Batch"].append(time.time() - t) @@ -622,7 +622,7 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor return step_log_all -def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=False, num_steps=None, obs_normalization_stats=None): +def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): """ Run an epoch of training or validation. @@ -684,8 +684,8 @@ def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=F # process batch for training t = time.time() # breakpoint() - input_batch = model.process_batch_for_training(batch) - input_batch_2 = None if batch_2 is None else model.process_batch_for_training(batch_2) + input_batch = model.process_batch_for_training(batch, ac_key=ac_key) + input_batch_2 = None if batch_2 is None else model.process_batch_for_training(batch_2, ac_key=ac_key) # breakpoint() input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) From 498c9958db6ceab9be73d087dc7dcb09de2174eb Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Wed, 12 Jun 2024 12:05:04 -0400 Subject: [PATCH 24/44] added ranges for color jitter params --- robomimic/models/obs_core.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/robomimic/models/obs_core.py b/robomimic/models/obs_core.py index 005c15aa..81f12b66 100644 --- a/robomimic/models/obs_core.py +++ b/robomimic/models/obs_core.py @@ -709,10 +709,14 @@ def __init__( ratio, num_crops=1, pos_enc=False, - brightness=0.5, - contrast=0.2, - saturation=0.2, - hue=0.05, + brightness_min=1.0, + brightness_max=1.0, + contrast_min=1.0, + contrast_max=1.0, + saturation_min=1.0, + saturation_max=1.0, + hue_min=0.0, + hue_max=0.0 ): super(CropResizeColorRandomizer, self).__init__( input_shape=input_shape, @@ -722,7 +726,7 @@ def __init__( num_crops=num_crops, pos_enc=pos_enc, ) - self.color_jitter = TT.ColorJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue) + self.color_jitter = TT.ColorJitter(brightness=(brightness_min, brightness_max), contrast=(contrast_min, contrast_max), saturation=(saturation_min, saturation_max), hue=(hue_min, hue_max)) def _forward_in(self, inputs): out = super(CropResizeColorRandomizer, self)._forward_in(inputs) From 4958d8ca26c3ab987234aaf44e0db6a954514ee6 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Thu, 13 Jun 2024 13:32:22 -0400 Subject: [PATCH 25/44] black format --- docs/conf.py | 61 +- examples/add_new_modality.py | 54 +- examples/simple_config.py | 11 +- examples/simple_obs_nets.py | 14 +- examples/simple_train_loop.py | 57 +- examples/train_bc_rnn.py | 365 +++++++---- robomimic/__init__.py | 140 +++- robomimic/algo/__init__.py | 13 +- robomimic/algo/algo.py | 93 ++- robomimic/algo/bc.py | 158 +++-- robomimic/algo/bcq.py | 320 ++++++--- robomimic/algo/cql.py | 301 ++++++--- robomimic/algo/gl.py | 179 +++-- robomimic/algo/hbc.py | 112 +++- robomimic/algo/iql.py | 90 ++- robomimic/algo/iris.py | 60 +- robomimic/algo/td3_bc.py | 139 ++-- robomimic/config/__init__.py | 1 + robomimic/config/base_config.py | 253 ++++--- robomimic/config/bc_config.py | 177 +++-- robomimic/config/bcq_config.py | 130 ++-- robomimic/config/config.py | 96 +-- robomimic/config/cql_config.py | 110 ++-- robomimic/config/gl_config.py | 122 ++-- robomimic/config/hbc_config.py | 62 +- robomimic/config/iql_config.py | 108 +-- robomimic/config/iris_config.py | 57 +- robomimic/config/td3_bc_config.py | 100 ++- robomimic/envs/env_base.py | 37 +- robomimic/envs/env_gym.py | 57 +- robomimic/envs/env_ig_momart.py | 199 ++++-- robomimic/envs/env_robosuite.py | 131 ++-- robomimic/envs/wrappers.py | 39 +- robomimic/macros.py | 10 +- robomimic/models/base_nets.py | 537 ++++++++++----- robomimic/models/distributions.py | 15 +- robomimic/models/obs_core.py | 280 +++++--- robomimic/models/obs_nets.py | 325 +++++---- robomimic/models/policy_nets.py | 287 +++++--- robomimic/models/transformers.py | 9 +- robomimic/models/vae_nets.py | 317 ++++++--- robomimic/models/value_nets.py | 32 +- robomimic/models/vit_rein.py | 13 +- robomimic/scripts/config_gen/act_gen.py | 23 +- robomimic/scripts/config_gen/helper.py | 385 +++++------ robomimic/scripts/conversion/convert_d4rl.py | 33 +- .../scripts/conversion/convert_robosuite.py | 2 +- .../conversion/convert_roboturk_pilot.py | 36 +- robomimic/scripts/dataset_states_to_obs.py | 142 ++-- robomimic/scripts/download_datasets.py | 73 ++- robomimic/scripts/download_momart_datasets.py | 50 +- .../scripts/generate_config_templates.py | 3 +- robomimic/scripts/generate_paper_configs.py | 616 +++++++++++------- robomimic/scripts/get_dataset_info.py | 35 +- robomimic/scripts/hyperparam_helper.py | 55 +- robomimic/scripts/playback_dataset.py | 122 ++-- robomimic/scripts/run_trained_agent.py | 102 ++- robomimic/scripts/setup_macros.py | 4 +- robomimic/scripts/split_train_val.py | 29 +- robomimic/scripts/train.py | 131 ++-- robomimic/utils/dataset.py | 272 +++++--- robomimic/utils/env_utils.py | 109 ++-- robomimic/utils/file_utils.py | 111 +++- robomimic/utils/hyperparam_utils.py | 79 ++- robomimic/utils/log_utils.py | 84 ++- robomimic/utils/loss_utils.py | 71 +- robomimic/utils/obs_utils.py | 215 ++++-- robomimic/utils/python_utils.py | 11 +- robomimic/utils/tensor_utils.py | 160 +++-- robomimic/utils/test_utils.py | 40 +- robomimic/utils/torch_utils.py | 25 +- robomimic/utils/train_utils.py | 255 +++++--- robomimic/utils/vis_utils.py | 9 +- setup.py | 13 +- tests/test_bc.py | 47 +- tests/test_bcq.py | 55 +- tests/test_cql.py | 53 +- tests/test_examples.py | 35 +- tests/test_hbc.py | 33 +- tests/test_iql.py | 45 +- tests/test_iris.py | 43 +- tests/test_scripts.py | 62 +- 82 files changed, 6123 insertions(+), 3216 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 59eff968..40bf6e96 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,7 +12,8 @@ import os import sys -sys.path.insert(0, os.path.abspath('.')) + +sys.path.insert(0, os.path.abspath(".")) import sphinx_book_theme import robomimic @@ -28,13 +29,13 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.napoleon', - 'sphinx_markdown_tables', - 'sphinx.ext.mathjax', - 'sphinx.ext.githubpages', - 'sphinx.ext.autodoc', - 'recommonmark', # use Sphinx-1.4 or newer - 'nbsphinx', + "sphinx.ext.napoleon", + "sphinx_markdown_tables", + "sphinx.ext.mathjax", + "sphinx.ext.githubpages", + "sphinx.ext.autodoc", + "recommonmark", # use Sphinx-1.4 or newer + "nbsphinx", ] @@ -44,7 +45,7 @@ # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # source_parsers = { # '.md': CommonMarkParser, @@ -52,15 +53,15 @@ # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: -source_suffix = ['.rst', '.md', '.ipynb'] +source_suffix = [".rst", ".md", ".ipynb"] # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'robomimic' -copyright = 'the robomimic core team, 2023' -author = 'the robomimic core team' +project = "robomimic" +copyright = "the robomimic core team, 2023" +author = "the robomimic core team" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -83,10 +84,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -97,7 +98,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_book_theme' +html_theme = "sphinx_book_theme" html_logo = "robomimic_logo.png" # Theme options are theme-specific and customize the look and feel of a theme @@ -109,7 +110,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # html_context = { # 'css_files': [ @@ -120,7 +121,7 @@ # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'robomimicdoc' +htmlhelp_basename = "robomimicdoc" # -- Options for LaTeX output --------------------------------------------- @@ -129,15 +130,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -147,7 +145,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'robomimic.tex', u'robomimic Documentation', author, 'manual'), + (master_doc, "robomimic.tex", "robomimic Documentation", author, "manual"), ] @@ -155,10 +153,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'robomimic', u'robomimic Documentation', - [author], 1) -] +man_pages = [(master_doc, "robomimic", "robomimic Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -167,7 +162,13 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'robomimic', u'robomimic Documentation', - author, 'robomimic', 'ARISE', - 'Miscellaneous'), + ( + master_doc, + "robomimic", + "robomimic Documentation", + author, + "robomimic", + "ARISE", + "Miscellaneous", + ), ] diff --git a/examples/add_new_modality.py b/examples/add_new_modality.py index cf7fc876..f14ab3f9 100644 --- a/examples/add_new_modality.py +++ b/examples/add_new_modality.py @@ -45,8 +45,11 @@ def custom_scan_processor(obs): def custom_scan_unprocessor(obs): # Re-add the padding # Note: need to check type - return np.concatenate([np.zeros(1), obs, np.zeros(1)]) if isinstance(obs, np.ndarray) else \ - torch.concat([torch.zeros(1), obs, torch.zeros(1)]) + return ( + np.concatenate([np.zeros(1), obs, np.zeros(1)]) + if isinstance(obs, np.ndarray) + else torch.concat([torch.zeros(1), obs, torch.zeros(1)]) + ) # Override the default functions for ScanModality @@ -58,11 +61,10 @@ def custom_scan_unprocessor(obs): class CustomImageEncoderCore(EncoderCore): # For simplicity, this will be a pass-through with some simple kwargs def __init__( - self, - input_shape, # Required, will be inferred automatically at runtime - - # Any args below here you can specify arbitrarily - welcome_str, + self, + input_shape, # Required, will be inferred automatically at runtime + # Any args below here you can specify arbitrarily + welcome_str, ): # Always need to run super init first and pass in input_shape super().__init__(input_shape=input_shape) @@ -90,6 +92,7 @@ class CustomImageRandomizer(Randomizer): through the network, resulting in outputs corresponding to each copy - we will pool these outputs across the copies with a simple average. """ + def __init__( self, input_shape, @@ -104,7 +107,7 @@ def __init__( """ super(CustomImageRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) self.input_shape = input_shape self.num_rand = num_rand @@ -118,7 +121,7 @@ def output_shape_in(self, input_shape=None): Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -138,13 +141,13 @@ def output_shape_out(self, input_shape=None): Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - + # since the @forward_out operation splits [B * N, ...] -> [B, N, ...] # and then pools to result in [B, ...], only the batch dimension changes, # and so the other dimensions retain their shape. @@ -164,7 +167,7 @@ def forward_in(self, inputs): out = TensorUtils.unsqueeze_expand_at(inputs, size=self.num_rand, dim=1) # add random noise to each copy - out = out + self.noise_scale * (2. * torch.rand_like(out) - 1.) + out = out + self.noise_scale * (2.0 * torch.rand_like(out) - 1.0) # reshape [B, N, C, H, W] -> [B * N, C, H, W] to ensure network forward pass is unchanged return TensorUtils.join_dimensions(out, 0, 1) @@ -180,26 +183,37 @@ def forward_out(self, inputs): # note the use of @self.training to ensure no randomization at test-time if self.training: - batch_size = (inputs.shape[0] // self.num_rand) - out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_rand)) + batch_size = inputs.shape[0] // self.num_rand + out = TensorUtils.reshape_dimensions( + inputs, + begin_axis=0, + end_axis=0, + target_dims=(batch_size, self.num_rand), + ) return out.mean(dim=1) return inputs def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) + header = "{}".format(str(self.__class__.__name__)) msg = header + "(input_shape={}, num_rand={}, noise_scale={})".format( - self.input_shape, self.num_rand, self.noise_scale) + self.input_shape, self.num_rand, self.noise_scale + ) return msg if __name__ == "__main__": # Now, we can directly reference the classes in our config! config = BCConfig() - config.observation.encoder.custom_image.core_class = "CustomImageEncoderCore" # Custom class, in string form - config.observation.encoder.custom_image.core_kwargs.welcome_str = "hi there!" # Any custom arguments, of any primitive type that is json-able - config.observation.encoder.custom_image.obs_randomizer_class = "CustomImageRandomizer" + config.observation.encoder.custom_image.core_class = ( + "CustomImageEncoderCore" # Custom class, in string form + ) + config.observation.encoder.custom_image.core_kwargs.welcome_str = ( + "hi there!" # Any custom arguments, of any primitive type that is json-able + ) + config.observation.encoder.custom_image.obs_randomizer_class = ( + "CustomImageRandomizer" + ) config.observation.encoder.custom_image.obs_randomizer_kwargs.num_rand = 3 config.observation.encoder.custom_image.obs_randomizer_kwargs.noise_scale = 0.05 diff --git a/examples/simple_config.py b/examples/simple_config.py index e80c6219..01f274e5 100644 --- a/examples/simple_config.py +++ b/examples/simple_config.py @@ -1,6 +1,7 @@ """ An example for creating and using the custom Config object. """ + from robomimic.config.base_config import Config if __name__ == "__main__": @@ -36,14 +37,10 @@ # read external config from a dict ext_config = { - "train": { - "learning_rate": 1e-3 - }, - "algo": { - "actor_network_size": [1000, 1000] - } + "train": {"learning_rate": 1e-3}, + "algo": {"actor_network_size": [1000, 1000]}, } with config.values_unlocked(): config.update(ext_config) - print(config) \ No newline at end of file + print(config) diff --git a/examples/simple_obs_nets.py b/examples/simple_obs_nets.py index 236beaa8..c719f65c 100644 --- a/examples/simple_obs_nets.py +++ b/examples/simple_obs_nets.py @@ -33,7 +33,7 @@ def simple_obs_example(): "backbone_class": "ResNet18Conv", # use ResNet18 as the visualcore backbone "backbone_kwargs": {"pretrained": False, "input_coord_conv": False}, "pool_class": "SpatialSoftmax", # use spatial softmax to regularize the model output - "pool_kwargs": {"num_kp": 32} + "pool_kwargs": {"num_kp": 32}, } # register the network for processing the observation key @@ -50,7 +50,9 @@ def simple_obs_example(): camera2_shape = [3, 160, 240] # We could also attach an observation randomizer to perturb the input observation key before sending to the network - image_randomizer = CropRandomizer(input_shape=camera2_shape, crop_height=140, crop_width=220) + image_randomizer = CropRandomizer( + input_shape=camera2_shape, crop_height=140, crop_width=220 + ) # the cropper will alter the input shape net_kwargs["input_shape"] = image_randomizer.output_shape_in(camera2_shape) @@ -86,7 +88,9 @@ def simple_obs_example(): "low_dim": ["proprio"], "rgb": ["camera1", "camera2", "camera3"], } - ObsUtils.initialize_obs_modality_mapping_from_dict(modality_mapping=obs_modality_mapping) + ObsUtils.initialize_obs_modality_mapping_from_dict( + modality_mapping=obs_modality_mapping + ) # Finally, construct the observation encoder obs_encoder.make() @@ -99,7 +103,7 @@ def simple_obs_example(): "camera1": torch.randn(camera1_shape), "camera2": torch.randn(camera2_shape), "camera3": torch.randn(camera3_shape), - "proprio": torch.randn(proprio_shape) + "proprio": torch.randn(proprio_shape), } # Add a batch dimension @@ -119,7 +123,7 @@ def simple_obs_example(): # A convenient wrapper for decoding the feature vector to named output is ObservationDecoder obs_decoder = ObservationDecoder( input_feat_dim=obs_encoder.output_shape()[0], - decode_shapes=OrderedDict({"action": (7,)}) + decode_shapes=OrderedDict({"action": (7,)}), ) # Send to GPU if applicable diff --git a/examples/simple_train_loop.py b/examples/simple_train_loop.py index 3e5c3c50..acaf0985 100644 --- a/examples/simple_train_loop.py +++ b/examples/simple_train_loop.py @@ -6,6 +6,7 @@ can interact. This is meant to help others who would like to use our provided datasets and dataset class in other applications. """ + import numpy as np import torch @@ -31,28 +32,28 @@ def get_data_loader(dataset_path): """ dataset = SequenceDataset( hdf5_path=dataset_path, - obs_keys=( # observations we want to appear in batches - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", + obs_keys=( # observations we want to appear in batches + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", "object", ), - dataset_keys=( # can optionally specify more keys here if they should appear in batches - "actions", - "rewards", + dataset_keys=( # can optionally specify more keys here if they should appear in batches + "actions", + "rewards", "dones", ), load_next_obs=True, frame_stack=1, - seq_length=10, # length-10 temporal sequences + seq_length=10, # length-10 temporal sequences pad_frame_stack=True, - pad_seq_length=True, # pad last obs per trajectory to ensure all sequences are sampled + pad_seq_length=True, # pad last obs per trajectory to ensure all sequences are sampled get_pad_mask=False, goal_mode=None, - hdf5_cache_mode="all", # cache dataset in memory to avoid repeated file i/o + hdf5_cache_mode="all", # cache dataset in memory to avoid repeated file i/o hdf5_use_swmr=True, hdf5_normalize_obs=False, - filter_by_attribute=None, # can optionally provide a filter key here + filter_by_attribute=None, # can optionally provide a filter key here ) print("\n============= Created Dataset =============") print(dataset) @@ -60,11 +61,11 @@ def get_data_loader(dataset_path): data_loader = DataLoader( dataset=dataset, - sampler=None, # no custom sampling logic (uniform sampling) - batch_size=100, # batches of size 100 + sampler=None, # no custom sampling logic (uniform sampling) + batch_size=100, # batches of size 100 shuffle=True, num_workers=0, - drop_last=True # don't provide last batch in dataset pass if it's less than 100 in size + drop_last=True, # don't provide last batch in dataset pass if it's less than 100 in size ) return data_loader @@ -82,13 +83,15 @@ def get_example_model(dataset_path, device): # read dataset to get some metadata for constructing model shape_meta = FileUtils.get_shape_metadata_from_dataset( - dataset_path=dataset_path, - all_obs_keys=sorted(( - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - )), + dataset_path=dataset_path, + all_obs_keys=sorted( + ( + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ) + ), ) # make BC model @@ -108,7 +111,11 @@ def print_batch_info(batch): if k in ["obs", "next_obs"]: print("key {}".format(k)) for obs_key in batch[k]: - print(" obs key {} with shape {}".format(obs_key, batch[k][obs_key].shape)) + print( + " obs key {} with shape {}".format( + obs_key, batch[k][obs_key].shape + ) + ) else: print("key {} with shape {}".format(k, batch[k].shape)) print("") @@ -131,7 +138,7 @@ def run_train_loop(model, data_loader): # ensure model is in train mode model.set_train() - for epoch in range(1, num_epochs + 1): # epoch numbers start at 1 + for epoch in range(1, num_epochs + 1): # epoch numbers start at 1 # iterator for data_loader - it yields batches data_loader_iter = iter(data_loader) @@ -155,7 +162,9 @@ def run_train_loop(model, data_loader): # process batch for training input_batch = model.process_batch_for_training(batch) - input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=None) + input_batch = model.postprocess_batch_for_training( + input_batch, obs_normalization_stats=None + ) # forward and backward pass info = model.train_on_batch(batch=input_batch, epoch=epoch, validate=False) diff --git a/examples/train_bc_rnn.py b/examples/train_bc_rnn.py index b3ec7c51..21ffa206 100644 --- a/examples/train_bc_rnn.py +++ b/examples/train_bc_rnn.py @@ -16,6 +16,7 @@ python train_bc_rnn.py --dataset /path/to/dataset.hdf5 --output /path/to/output_dir """ + import argparse import robomimic @@ -37,40 +38,62 @@ def robosuite_hyperparameters(config): Config: Modified config """ ## save config - if and when to save checkpoints ## - config.experiment.save.enabled = True # whether model saving should be enabled or disabled - config.experiment.save.every_n_seconds = None # save model every n seconds (set to None to disable) - config.experiment.save.every_n_epochs = 50 # save model every n epochs (set to None to disable) - config.experiment.save.epochs = [] # save model on these specific epochs - config.experiment.save.on_best_validation = False # save models that achieve best validation score - config.experiment.save.on_best_rollout_return = False # save models that achieve best rollout return - config.experiment.save.on_best_rollout_success_rate = True # save models that achieve best success rate + config.experiment.save.enabled = ( + True # whether model saving should be enabled or disabled + ) + config.experiment.save.every_n_seconds = ( + None # save model every n seconds (set to None to disable) + ) + config.experiment.save.every_n_epochs = ( + 50 # save model every n epochs (set to None to disable) + ) + config.experiment.save.epochs = [] # save model on these specific epochs + config.experiment.save.on_best_validation = ( + False # save models that achieve best validation score + ) + config.experiment.save.on_best_rollout_return = ( + False # save models that achieve best rollout return + ) + config.experiment.save.on_best_rollout_success_rate = ( + True # save models that achieve best success rate + ) # epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used - config.experiment.epoch_every_n_steps = 100 # each epoch is 100 gradient steps - config.experiment.validation_epoch_every_n_steps = 10 # each validation epoch is 10 gradient steps + config.experiment.epoch_every_n_steps = 100 # each epoch is 100 gradient steps + config.experiment.validation_epoch_every_n_steps = ( + 10 # each validation epoch is 10 gradient steps + ) # envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset - config.experiment.env = None # no need to set this (unless you want to override) - config.experiment.additional_envs = None # additional environments that should get evaluated + config.experiment.env = None # no need to set this (unless you want to override) + config.experiment.additional_envs = ( + None # additional environments that should get evaluated + ) ## rendering config ## - config.experiment.render = False # render on-screen or not - config.experiment.render_video = True # render evaluation rollouts to videos - config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints - config.experiment.video_skip = 5 # render video frame every n environment steps during rollout + config.experiment.render = False # render on-screen or not + config.experiment.render_video = True # render evaluation rollouts to videos + config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints + config.experiment.video_skip = ( + 5 # render video frame every n environment steps during rollout + ) ## evaluation rollout config ## - config.experiment.rollout.enabled = True # enable evaluation rollouts - config.experiment.rollout.n = 50 # number of rollouts per evaluation - config.experiment.rollout.horizon = 400 # set horizon based on length of demonstrations (can be obtained with scripts/get_dataset_info.py) - config.experiment.rollout.rate = 50 # do rollouts every @rate epochs - config.experiment.rollout.warmstart = 0 # number of epochs to wait before starting rollouts - config.experiment.rollout.terminate_on_success = True # end rollout early after task success + config.experiment.rollout.enabled = True # enable evaluation rollouts + config.experiment.rollout.n = 50 # number of rollouts per evaluation + config.experiment.rollout.horizon = 400 # set horizon based on length of demonstrations (can be obtained with scripts/get_dataset_info.py) + config.experiment.rollout.rate = 50 # do rollouts every @rate epochs + config.experiment.rollout.warmstart = ( + 0 # number of epochs to wait before starting rollouts + ) + config.experiment.rollout.terminate_on_success = ( + True # end rollout early after task success + ) ## dataset loader config ## # num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets - config.train.num_data_workers = 0 # assume low-dim dataset + config.train.num_data_workers = 0 # assume low-dim dataset # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is # by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set @@ -78,15 +101,15 @@ def robosuite_hyperparameters(config): # You should almost never set this to None, even for large image datasets. config.train.hdf5_cache_mode = "all" - config.train.hdf5_use_swmr = True # used for parallel data loading + config.train.hdf5_use_swmr = True # used for parallel data loading # if true, normalize observations at train and test time, using the global mean and standard deviation # of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs # in utils/dataset.py for more information. - config.train.hdf5_normalize_obs = False # no obs normalization + config.train.hdf5_normalize_obs = False # no obs normalization # if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key" - config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split + config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split config.train.hdf5_validation_filter_key = "valid" # fetch sequences of length 10 from dataset for RNN training @@ -100,38 +123,51 @@ def robosuite_hyperparameters(config): ) # one of [None, "last"] - set to "last" to include goal observations in each batch - config.train.goal_mode = None # no need for goal observations + config.train.goal_mode = None # no need for goal observations ## learning config ## - config.train.cuda = True # try to use GPU (if present) or not - config.train.batch_size = 100 # batch size - config.train.num_epochs = 2000 # number of training epochs - config.train.seed = 1 # seed for training - + config.train.cuda = True # try to use GPU (if present) or not + config.train.batch_size = 100 # batch size + config.train.num_epochs = 2000 # number of training epochs + config.train.seed = 1 # seed for training ### Observation Config ### - config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] - config.observation.modalities.obs.rgb = [] # no image observations - config.observation.modalities.goal.low_dim = [] # no low-dim goals - config.observation.modalities.goal.rgb = [] # no image goals + config.observation.modalities.obs.low_dim = ( + [ # specify low-dim observations for agent + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] + ) + config.observation.modalities.obs.rgb = [] # no image observations + config.observation.modalities.goal.low_dim = [] # no low-dim goals + config.observation.modalities.goal.rgb = [] # no image goals # observation encoder architecture - applies to all networks that take observation dicts as input config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core + config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( + False # kwargs for visual core + ) config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_class = ( + "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( + 32 # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( + False # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( + 1.0 # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = ( + 0.0 # Default arguments for "SpatialSoftmax" + ) # if you prefer to use pre-trained visual representations, uncomment the following lines # R3M @@ -157,34 +193,46 @@ def robosuite_hyperparameters(config): ### Algo Config ### # optimization parameters - config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate - config.algo.optim_params.policy.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - config.algo.optim_params.policy.regularization.L2 = 0.00 # L2 regularization strength + config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate + config.algo.optim_params.policy.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + config.algo.optim_params.policy.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + config.algo.optim_params.policy.regularization.L2 = ( + 0.00 # L2 regularization strength + ) # loss weights - config.algo.loss.l2_weight = 1.0 # L2 loss weight - config.algo.loss.l1_weight = 0.0 # L1 loss weight - config.algo.loss.cos_weight = 0.0 # cosine loss weight + config.algo.loss.l2_weight = 1.0 # L2 loss weight + config.algo.loss.l1_weight = 0.0 # L1 loss weight + config.algo.loss.cos_weight = 0.0 # cosine loss weight # MLP network architecture (layers after observation encoder and RNN, if present) - config.algo.actor_layer_dims = () # empty MLP - go from RNN layer directly to action output + config.algo.actor_layer_dims = () # empty MLP - go from RNN layer directly to action output # stochastic GMM policy - config.algo.gmm.enabled = True # enable GMM policy - policy outputs GMM action distribution - config.algo.gmm.num_modes = 5 # number of GMM modes - config.algo.gmm.min_std = 0.0001 # minimum std output from network - config.algo.gmm.std_activation = "softplus" # activation to use for std output from policy net - config.algo.gmm.low_noise_eval = True # low-std at test-time + config.algo.gmm.enabled = ( + True # enable GMM policy - policy outputs GMM action distribution + ) + config.algo.gmm.num_modes = 5 # number of GMM modes + config.algo.gmm.min_std = 0.0001 # minimum std output from network + config.algo.gmm.std_activation = ( + "softplus" # activation to use for std output from policy net + ) + config.algo.gmm.low_noise_eval = True # low-std at test-time # rnn policy config - config.algo.rnn.enabled = True # enable RNN policy - config.algo.rnn.horizon = 10 # unroll length for RNN - should usually match train.seq_length - config.algo.rnn.hidden_dim = 400 # hidden dimension size - config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" - config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked - config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state - config.algo.rnn.kwargs.bidirectional = False # rnn kwargs + config.algo.rnn.enabled = True # enable RNN policy + config.algo.rnn.horizon = ( + 10 # unroll length for RNN - should usually match train.seq_length + ) + config.algo.rnn.hidden_dim = 400 # hidden dimension size + config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" + config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked + config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state + config.algo.rnn.kwargs.bidirectional = False # rnn kwargs return config @@ -200,40 +248,62 @@ def momart_hyperparameters(config): Config: Modified config """ ## save config - if and when to save checkpoints ## - config.experiment.save.enabled = True # whether model saving should be enabled or disabled - config.experiment.save.every_n_seconds = None # save model every n seconds (set to None to disable) - config.experiment.save.every_n_epochs = 3 # save model every n epochs (set to None to disable) - config.experiment.save.epochs = [] # save model on these specific epochs - config.experiment.save.on_best_validation = True # save models that achieve best validation score - config.experiment.save.on_best_rollout_return = False # save models that achieve best rollout return - config.experiment.save.on_best_rollout_success_rate = True # save models that achieve best success rate + config.experiment.save.enabled = ( + True # whether model saving should be enabled or disabled + ) + config.experiment.save.every_n_seconds = ( + None # save model every n seconds (set to None to disable) + ) + config.experiment.save.every_n_epochs = ( + 3 # save model every n epochs (set to None to disable) + ) + config.experiment.save.epochs = [] # save model on these specific epochs + config.experiment.save.on_best_validation = ( + True # save models that achieve best validation score + ) + config.experiment.save.on_best_rollout_return = ( + False # save models that achieve best rollout return + ) + config.experiment.save.on_best_rollout_success_rate = ( + True # save models that achieve best success rate + ) # epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used - config.experiment.epoch_every_n_steps = None # each epoch is 100 gradient steps - config.experiment.validation_epoch_every_n_steps = 10 # each validation epoch is 10 gradient steps + config.experiment.epoch_every_n_steps = None # each epoch is 100 gradient steps + config.experiment.validation_epoch_every_n_steps = ( + 10 # each validation epoch is 10 gradient steps + ) # envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset - config.experiment.env = None # no need to set this (unless you want to override) - config.experiment.additional_envs = None # additional environments that should get evaluated + config.experiment.env = None # no need to set this (unless you want to override) + config.experiment.additional_envs = ( + None # additional environments that should get evaluated + ) ## rendering config ## - config.experiment.render = False # render on-screen or not - config.experiment.render_video = True # render evaluation rollouts to videos - config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints - config.experiment.video_skip = 5 # render video frame every n environment steps during rollout + config.experiment.render = False # render on-screen or not + config.experiment.render_video = True # render evaluation rollouts to videos + config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints + config.experiment.video_skip = ( + 5 # render video frame every n environment steps during rollout + ) ## evaluation rollout config ## - config.experiment.rollout.enabled = True # enable evaluation rollouts - config.experiment.rollout.n = 30 # number of rollouts per evaluation - config.experiment.rollout.horizon = 1500 # maximum number of env steps per rollout - config.experiment.rollout.rate = 3 # do rollouts every @rate epochs - config.experiment.rollout.warmstart = 0 # number of epochs to wait before starting rollouts - config.experiment.rollout.terminate_on_success = True # end rollout early after task success + config.experiment.rollout.enabled = True # enable evaluation rollouts + config.experiment.rollout.n = 30 # number of rollouts per evaluation + config.experiment.rollout.horizon = 1500 # maximum number of env steps per rollout + config.experiment.rollout.rate = 3 # do rollouts every @rate epochs + config.experiment.rollout.warmstart = ( + 0 # number of epochs to wait before starting rollouts + ) + config.experiment.rollout.terminate_on_success = ( + True # end rollout early after task success + ) ## dataset loader config ## # num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets - config.train.num_data_workers = 2 # assume low-dim dataset + config.train.num_data_workers = 2 # assume low-dim dataset # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is # by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set @@ -241,15 +311,15 @@ def momart_hyperparameters(config): # You should almost never set this to None, even for large image datasets. config.train.hdf5_cache_mode = "low_dim" - config.train.hdf5_use_swmr = True # used for parallel data loading + config.train.hdf5_use_swmr = True # used for parallel data loading # if true, normalize observations at train and test time, using the global mean and standard deviation # of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs # in utils/dataset.py for more information. - config.train.hdf5_normalize_obs = False # no obs normalization + config.train.hdf5_normalize_obs = False # no obs normalization # if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key" - config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split + config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split config.train.hdf5_validation_filter_key = "valid" # fetch sequences of length 10 from dataset for RNN training @@ -263,19 +333,20 @@ def momart_hyperparameters(config): ) # one of [None, "last"] - set to "last" to include goal observations in each batch - config.train.goal_mode = "last" # no need for goal observations + config.train.goal_mode = "last" # no need for goal observations ## learning config ## - config.train.cuda = True # try to use GPU (if present) or not - config.train.batch_size = 4 # batch size - config.train.num_epochs = 31 # number of training epochs - config.train.seed = 1 # seed for training - + config.train.cuda = True # try to use GPU (if present) or not + config.train.batch_size = 4 # batch size + config.train.num_epochs = 31 # number of training epochs + config.train.seed = 1 # seed for training ### Observation Config ### - config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent - "proprio", - ] + config.observation.modalities.obs.low_dim = ( + [ # specify low-dim observations for agent + "proprio", + ] + ) config.observation.modalities.obs.rgb = [ "rgb", "rgb_wrist", @@ -288,40 +359,55 @@ def momart_hyperparameters(config): config.observation.modalities.obs.scan = [ "scan", ] - config.observation.modalities.goal.low_dim = [] # no low-dim goals - config.observation.modalities.goal.rgb = [] # no rgb image goals + config.observation.modalities.goal.low_dim = [] # no low-dim goals + config.observation.modalities.goal.rgb = [] # no rgb image goals ### Algo Config ### # optimization parameters - config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate - config.algo.optim_params.policy.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - config.algo.optim_params.policy.regularization.L2 = 0.00 # L2 regularization strength + config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate + config.algo.optim_params.policy.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + config.algo.optim_params.policy.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + config.algo.optim_params.policy.regularization.L2 = ( + 0.00 # L2 regularization strength + ) # loss weights - config.algo.loss.l2_weight = 1.0 # L2 loss weight - config.algo.loss.l1_weight = 0.0 # L1 loss weight - config.algo.loss.cos_weight = 0.0 # cosine loss weight + config.algo.loss.l2_weight = 1.0 # L2 loss weight + config.algo.loss.l1_weight = 0.0 # L1 loss weight + config.algo.loss.cos_weight = 0.0 # cosine loss weight # MLP network architecture (layers after observation encoder and RNN, if present) - config.algo.actor_layer_dims = (300, 400) # MLP layers between RNN layer and action output + config.algo.actor_layer_dims = ( + 300, + 400, + ) # MLP layers between RNN layer and action output # stochastic GMM policy - config.algo.gmm.enabled = True # enable GMM policy - policy outputs GMM action distribution - config.algo.gmm.num_modes = 5 # number of GMM modes - config.algo.gmm.min_std = 0.01 # minimum std output from network - config.algo.gmm.std_activation = "softplus" # activation to use for std output from policy net - config.algo.gmm.low_noise_eval = True # low-std at test-time + config.algo.gmm.enabled = ( + True # enable GMM policy - policy outputs GMM action distribution + ) + config.algo.gmm.num_modes = 5 # number of GMM modes + config.algo.gmm.min_std = 0.01 # minimum std output from network + config.algo.gmm.std_activation = ( + "softplus" # activation to use for std output from policy net + ) + config.algo.gmm.low_noise_eval = True # low-std at test-time # rnn policy config - config.algo.rnn.enabled = True # enable RNN policy - config.algo.rnn.horizon = 50 # unroll length for RNN - should usually match train.seq_length - config.algo.rnn.hidden_dim = 1200 # hidden dimension size - config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" - config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked - config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state - config.algo.rnn.kwargs.bidirectional = False # rnn kwargs + config.algo.rnn.enabled = True # enable RNN policy + config.algo.rnn.horizon = ( + 50 # unroll length for RNN - should usually match train.seq_length + ) + config.algo.rnn.hidden_dim = 1200 # hidden dimension size + config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" + config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked + config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state + config.algo.rnn.kwargs.bidirectional = False # rnn kwargs return config @@ -339,7 +425,9 @@ def momart_hyperparameters(config): } -def get_config(dataset_type="robosuite", dataset_path=None, output_dir=None, debug=False): +def get_config( + dataset_type="robosuite", dataset_path=None, output_dir=None, debug=False +): """ Construct config for training. @@ -352,8 +440,9 @@ def get_config(dataset_type="robosuite", dataset_path=None, output_dir=None, deb debug (bool): if True, shrink training and rollout times to test a full training run quickly. """ - assert dataset_type in DATASET_TYPES, \ - f"Invalid dataset type. Valid options are: {list(DATASET_TYPES.keys())}, got: {dataset_type}" + assert ( + dataset_type in DATASET_TYPES + ), f"Invalid dataset type. Valid options are: {list(DATASET_TYPES.keys())}, got: {dataset_type}" # handle args if dataset_path is None: @@ -368,20 +457,24 @@ def get_config(dataset_type="robosuite", dataset_path=None, output_dir=None, deb config = config_factory(algo_name="bc") ### Experiment Config ### - config.experiment.name = f"{dataset_type}_bc_rnn_example" # name of experiment used to make log files - config.experiment.validate = True # whether to do validation or not - config.experiment.logging.terminal_output_to_txt = False # whether to log stdout to txt file - config.experiment.logging.log_tb = True # enable tensorboard logging + config.experiment.name = ( + f"{dataset_type}_bc_rnn_example" # name of experiment used to make log files + ) + config.experiment.validate = True # whether to do validation or not + config.experiment.logging.terminal_output_to_txt = ( + False # whether to log stdout to txt file + ) + config.experiment.logging.log_tb = True # enable tensorboard logging ### Train Config ### - config.train.data = dataset_path # path to hdf5 dataset + config.train.data = dataset_path # path to hdf5 dataset # Write all results to this directory. A new folder with the timestamp will be created # in this directory, and it will contain three subfolders - "log", "models", and "videos". # The "log" directory will contain tensorboard and stdout txt logs. The "models" directory # will contain saved model checkpoints. The "videos" directory contains evaluation rollout # videos. - config.train.output_dir = output_dir # path to output folder + config.train.output_dir = output_dir # path to output folder # Load default hyperparameters based on dataset type config = DATASET_TYPES[dataset_type]["hp"](config) @@ -427,8 +520,8 @@ def get_config(dataset_type="robosuite", dataset_path=None, output_dir=None, deb # debug flag for quick training run parser.add_argument( "--debug", - action='store_true', - help="set this flag to run a quick training run for debugging purposes" + action="store_true", + help="set this flag to run a quick training run for debugging purposes", ) # type @@ -438,7 +531,7 @@ def get_config(dataset_type="robosuite", dataset_path=None, output_dir=None, deb default="robosuite", choices=list(DATASET_TYPES.keys()), help=f"Dataset type to use. This will determine the default hyperparameter settings to use for training." - f"Valid options are: {list(DATASET_TYPES.keys())}. Default is robosuite." + f"Valid options are: {list(DATASET_TYPES.keys())}. Default is robosuite.", ) args = parser.parse_args() @@ -452,7 +545,7 @@ def get_config(dataset_type="robosuite", dataset_path=None, output_dir=None, deb dataset_type=args.dataset_type, dataset_path=args.dataset, output_dir=args.output, - debug=args.debug + debug=args.debug, ) # set torch device diff --git a/robomimic/__init__.py b/robomimic/__init__.py index 1930630a..a6bfb900 100644 --- a/robomimic/__init__.py +++ b/robomimic/__init__.py @@ -56,53 +56,135 @@ def register_all_links(): """ # all proficient human datasets - ph_tasks = ["lift", "can", "square", "transport", "tool_hang", "lift_real", "can_real", "tool_hang_real"] + ph_tasks = [ + "lift", + "can", + "square", + "transport", + "tool_hang", + "lift_real", + "can_real", + "tool_hang_real", + ] ph_horizons = [400, 400, 400, 700, 700, 1000, 1000, 1000] for task, horizon in zip(ph_tasks, ph_horizons): - register_dataset_link(task=task, dataset_type="ph", hdf5_type="raw", horizon=horizon, + register_dataset_link( + task=task, + dataset_type="ph", + hdf5_type="raw", + horizon=horizon, link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/demo{}.hdf5".format( task, "" if "real" in task else "_v141" - ) + ), ) # real world datasets only have demo.hdf5 files which already contain all observation modalities # while sim datasets store raw low-dim mujoco states in the demo.hdf5 if "real" not in task: - register_dataset_link(task=task, dataset_type="ph", hdf5_type="low_dim", horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/low_dim_v141.hdf5".format(task)) - register_dataset_link(task=task, dataset_type="ph", hdf5_type="image", horizon=horizon, - link=None) + register_dataset_link( + task=task, + dataset_type="ph", + hdf5_type="low_dim", + horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/low_dim_v141.hdf5".format( + task + ), + ) + register_dataset_link( + task=task, + dataset_type="ph", + hdf5_type="image", + horizon=horizon, + link=None, + ) # all multi human datasets mh_tasks = ["lift", "can", "square", "transport"] mh_horizons = [500, 500, 500, 1100] for task, horizon in zip(mh_tasks, mh_horizons): - register_dataset_link(task=task, dataset_type="mh", hdf5_type="raw", horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/demo_v141.hdf5".format(task)) - register_dataset_link(task=task, dataset_type="mh", hdf5_type="low_dim", horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/low_dim_v141.hdf5".format(task)) - register_dataset_link(task=task, dataset_type="mh", hdf5_type="image", horizon=horizon, - link=None) + register_dataset_link( + task=task, + dataset_type="mh", + hdf5_type="raw", + horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/demo_v141.hdf5".format( + task + ), + ) + register_dataset_link( + task=task, + dataset_type="mh", + hdf5_type="low_dim", + horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/low_dim_v141.hdf5".format( + task + ), + ) + register_dataset_link( + task=task, dataset_type="mh", hdf5_type="image", horizon=horizon, link=None + ) # all machine generated datasets for task, horizon in zip(["lift", "can"], [400, 400]): - register_dataset_link(task=task, dataset_type="mg", hdf5_type="raw", horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/demo_v141.hdf5".format(task)) - register_dataset_link(task=task, dataset_type="mg", hdf5_type="low_dim_sparse", horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_sparse_v141.hdf5".format(task)) - register_dataset_link(task=task, dataset_type="mg", hdf5_type="image_sparse", horizon=horizon, - link=None) - register_dataset_link(task=task, dataset_type="mg", hdf5_type="low_dim_dense", horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_dense_v141.hdf5".format(task)) - register_dataset_link(task=task, dataset_type="mg", hdf5_type="image_dense", horizon=horizon, - link=None) + register_dataset_link( + task=task, + dataset_type="mg", + hdf5_type="raw", + horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/demo_v141.hdf5".format( + task + ), + ) + register_dataset_link( + task=task, + dataset_type="mg", + hdf5_type="low_dim_sparse", + horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_sparse_v141.hdf5".format( + task + ), + ) + register_dataset_link( + task=task, + dataset_type="mg", + hdf5_type="image_sparse", + horizon=horizon, + link=None, + ) + register_dataset_link( + task=task, + dataset_type="mg", + hdf5_type="low_dim_dense", + horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_dense_v141.hdf5".format( + task + ), + ) + register_dataset_link( + task=task, + dataset_type="mg", + hdf5_type="image_dense", + horizon=horizon, + link=None, + ) # can-paired dataset - register_dataset_link(task="can", dataset_type="paired", hdf5_type="raw", horizon=400, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/demo_v141.hdf5") - register_dataset_link(task="can", dataset_type="paired", hdf5_type="low_dim", horizon=400, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/low_dim_v141.hdf5") - register_dataset_link(task="can", dataset_type="paired", hdf5_type="image", horizon=400, - link=None) + register_dataset_link( + task="can", + dataset_type="paired", + hdf5_type="raw", + horizon=400, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/demo_v141.hdf5", + ) + register_dataset_link( + task="can", + dataset_type="paired", + hdf5_type="low_dim", + horizon=400, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/low_dim_v141.hdf5", + ) + register_dataset_link( + task="can", dataset_type="paired", hdf5_type="image", horizon=400, link=None + ) def register_momart_dataset_link(task, dataset_type, link, dataset_size): diff --git a/robomimic/algo/__init__.py b/robomimic/algo/__init__.py index dbe2ea4d..c35bbebc 100644 --- a/robomimic/algo/__init__.py +++ b/robomimic/algo/__init__.py @@ -1,4 +1,14 @@ -from robomimic.algo.algo import register_algo_factory_func, algo_name_to_factory_func, algo_factory, Algo, PolicyAlgo, ValueAlgo, PlannerAlgo, HierarchicalAlgo, RolloutPolicy +from robomimic.algo.algo import ( + register_algo_factory_func, + algo_name_to_factory_func, + algo_factory, + Algo, + PolicyAlgo, + ValueAlgo, + PlannerAlgo, + HierarchicalAlgo, + RolloutPolicy, +) # note: these imports are needed to register these classes in the global algo registry from robomimic.algo.bc import BC, BC_Gaussian, BC_GMM, BC_VAE, BC_RNN, BC_RNN_GMM @@ -9,4 +19,5 @@ from robomimic.algo.hbc import HBC from robomimic.algo.iris import IRIS from robomimic.algo.td3_bc import TD3_BC + # from robomimic.algo.diffusion_policy import DiffusionPolicyUNet diff --git a/robomimic/algo/algo.py b/robomimic/algo/algo.py index 9211f1d9..4f2950da 100644 --- a/robomimic/algo/algo.py +++ b/robomimic/algo/algo.py @@ -7,6 +7,7 @@ @register_algo_factory_func function decorator. This makes it easy for @algo_factory to instantiate the correct `Algo` subclass. """ + import textwrap from copy import deepcopy from collections import OrderedDict @@ -30,8 +31,10 @@ def register_algo_factory_func(algo_name): Args: algo_name (str): the algorithm name to register the algorithm under """ + def decorator(factory_func): REGISTERED_ALGO_FACTORY_FUNCS[algo_name] = factory_func + return decorator @@ -87,14 +90,9 @@ class Algo(object): a standard API to be used by training functions such as @run_epoch in utils/train_utils.py. """ + def __init__( - self, - algo_config, - obs_config, - global_config, - obs_key_shapes, - ac_dim, - device + self, algo_config, obs_config, global_config, obs_key_shapes, ac_dim, device ): """ Args: @@ -147,11 +145,23 @@ def _create_shapes(self, obs_keys, obs_key_shapes): # We check across all modality groups (obs, goal, subgoal), and see if the inputted observation key exists # across all modalitie specified in the config. If so, we store its corresponding shape internally for k in obs_key_shapes: - if "obs" in self.obs_config.modalities and k in [obs_key for modality in self.obs_config.modalities.obs.values() for obs_key in modality]: + if "obs" in self.obs_config.modalities and k in [ + obs_key + for modality in self.obs_config.modalities.obs.values() + for obs_key in modality + ]: self.obs_shapes[k] = obs_key_shapes[k] - if "goal" in self.obs_config.modalities and k in [obs_key for modality in self.obs_config.modalities.goal.values() for obs_key in modality]: + if "goal" in self.obs_config.modalities and k in [ + obs_key + for modality in self.obs_config.modalities.goal.values() + for obs_key in modality + ]: self.goal_shapes[k] = obs_key_shapes[k] - if "subgoal" in self.obs_config.modalities and k in [obs_key for modality in self.obs_config.modalities.subgoal.values() for obs_key in modality]: + if "subgoal" in self.obs_config.modalities and k in [ + obs_key + for modality in self.obs_config.modalities.subgoal.values() + for obs_key in modality + ]: self.subgoal_shapes[k] = obs_key_shapes[k] def _create_networks(self): @@ -174,18 +184,28 @@ def _create_optimizers(self): if k in self.nets: if isinstance(self.nets[k], nn.ModuleList): self.optimizers[k] = [ - TorchUtils.optimizer_from_optim_params(net_optim_params=self.optim_params[k], net=self.nets[k][i]) + TorchUtils.optimizer_from_optim_params( + net_optim_params=self.optim_params[k], net=self.nets[k][i] + ) for i in range(len(self.nets[k])) ] self.lr_schedulers[k] = [ - TorchUtils.lr_scheduler_from_optim_params(net_optim_params=self.optim_params[k], net=self.nets[k][i], optimizer=self.optimizers[k][i]) + TorchUtils.lr_scheduler_from_optim_params( + net_optim_params=self.optim_params[k], + net=self.nets[k][i], + optimizer=self.optimizers[k][i], + ) for i in range(len(self.nets[k])) ] else: self.optimizers[k] = TorchUtils.optimizer_from_optim_params( - net_optim_params=self.optim_params[k], net=self.nets[k]) + net_optim_params=self.optim_params[k], net=self.nets[k] + ) self.lr_schedulers[k] = TorchUtils.lr_scheduler_from_optim_params( - net_optim_params=self.optim_params[k], net=self.nets[k], optimizer=self.optimizers[k]) + net_optim_params=self.optim_params[k], + net=self.nets[k], + optimizer=self.optimizers[k], + ) def process_batch_for_training(self, batch): """ @@ -198,7 +218,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ return batch @@ -214,8 +234,8 @@ def postprocess_batch_for_training(self, batch, obs_normalization_stats): training will occur (after @process_batch_for_training is called) - obs_normalization_stats (dict or None): if provided, this should map observation - keys to dicts with a "mean" and "std" of shape (1, ...) where ... is the + obs_normalization_stats (dict or None): if provided, this should map observation + keys to dicts with a "mean" and "std" of shape (1, ...) where ... is the default shape for the observation. Returns: @@ -223,7 +243,11 @@ def postprocess_batch_for_training(self, batch, obs_normalization_stats): """ # ensure obs_normalization_stats are torch Tensors on proper device - obs_normalization_stats = TensorUtils.to_float(TensorUtils.to_device(TensorUtils.to_tensor(obs_normalization_stats), self.device)) + obs_normalization_stats = TensorUtils.to_float( + TensorUtils.to_device( + TensorUtils.to_tensor(obs_normalization_stats), self.device + ) + ) # we will search the nested batch dictionary for the following special batch dict keys # and apply the processing function to their values (which correspond to observations) @@ -239,7 +263,9 @@ def recurse_helper(d): if d[k] is not None: d[k] = ObsUtils.process_obs_dict(d[k]) if obs_normalization_stats is not None: - d[k] = ObsUtils.normalize_obs(d[k], obs_normalization_stats=obs_normalization_stats) + d[k] = ObsUtils.normalize_obs( + d[k], obs_normalization_stats=obs_normalization_stats + ) elif isinstance(d[k], dict): # search down into dictionary recurse_helper(d[k]) @@ -329,8 +355,11 @@ def __repr__(self): """ Pretty print algorithm and network description. """ - return "{} (\n".format(self.__class__.__name__) + \ - textwrap.indent(self.nets.__repr__(), ' ') + "\n)" + return ( + "{} (\n".format(self.__class__.__name__) + + textwrap.indent(self.nets.__repr__(), " ") + + "\n)" + ) def reset(self): """ @@ -343,6 +372,7 @@ class PolicyAlgo(Algo): """ Base class for all algorithms that can be used as policies. """ + def get_action(self, obs_dict, goal_dict=None): """ Get policy action outputs. @@ -361,6 +391,7 @@ class ValueAlgo(Algo): """ Base class for all algorithms that can learn a value function. """ + def get_state_value(self, obs_dict, goal_dict=None): """ Get state value outputs. @@ -394,6 +425,7 @@ class PlannerAlgo(Algo): Base class for all algorithms that can be used for planning subgoals conditioned on current observations and potential goal observations. """ + def get_subgoal_predictions(self, obs_dict, goal_dict=None): """ Get predicted subgoal outputs. @@ -426,6 +458,7 @@ class HierarchicalAlgo(Algo): Base class for all hierarchical algorithms that consist of (1) subgoal planning and (2) subgoal-conditioned policy learning. """ + def get_action(self, obs_dict, goal_dict=None): """ Get policy action outputs. @@ -467,6 +500,7 @@ class RolloutPolicy(object): """ Wraps @Algo object to make it easy to run policies in a rollout loop. """ + def __init__(self, policy, obs_normalization_stats=None): """ Args: @@ -492,7 +526,7 @@ def _prepare_observation(self, ob): Prepare raw observation dict from environment for policy. Args: - ob (dict): single observation dictionary from environment (no batch dimension, + ob (dict): single observation dictionary from environment (no batch dimension, and np.array values for each key) """ ob = TensorUtils.to_tensor(ob) @@ -501,10 +535,17 @@ def _prepare_observation(self, ob): ob = TensorUtils.to_float(ob) if self.obs_normalization_stats is not None: # ensure obs_normalization_stats are torch Tensors on proper device - obs_normalization_stats = TensorUtils.to_float(TensorUtils.to_device(TensorUtils.to_tensor(self.obs_normalization_stats), self.policy.device)) + obs_normalization_stats = TensorUtils.to_float( + TensorUtils.to_device( + TensorUtils.to_tensor(self.obs_normalization_stats), + self.policy.device, + ) + ) # limit normalization to obs keys being used, in case environment includes extra keys - ob = { k : ob[k] for k in self.policy.global_config.all_obs_keys } - ob = ObsUtils.normalize_obs(ob, obs_normalization_stats=obs_normalization_stats) + ob = {k: ob[k] for k in self.policy.global_config.all_obs_keys} + ob = ObsUtils.normalize_obs( + ob, obs_normalization_stats=obs_normalization_stats + ) return ob def __repr__(self): @@ -516,7 +557,7 @@ def __call__(self, ob, goal=None): Produce action from raw observation dict (and maybe goal dict) from environment. Args: - ob (dict): single observation dictionary from environment (no batch dimension, + ob (dict): single observation dictionary from environment (no batch dimension, and np.array values for each key) goal (dict): goal observation """ diff --git a/robomimic/algo/bc.py b/robomimic/algo/bc.py index 02c7dfe8..e5255d75 100644 --- a/robomimic/algo/bc.py +++ b/robomimic/algo/bc.py @@ -1,6 +1,7 @@ """ Implementation of Behavioral Cloning (BC). """ + from collections import OrderedDict import torch @@ -35,13 +36,15 @@ def algo_config_to_class(algo_config): # note: we need the check below because some configs import BCConfig and exclude # some of these options - gaussian_enabled = ("gaussian" in algo_config and algo_config.gaussian.enabled) - gmm_enabled = ("gmm" in algo_config and algo_config.gmm.enabled) - vae_enabled = ("vae" in algo_config and algo_config.vae.enabled) + gaussian_enabled = "gaussian" in algo_config and algo_config.gaussian.enabled + gmm_enabled = "gmm" in algo_config and algo_config.gmm.enabled + vae_enabled = "vae" in algo_config and algo_config.vae.enabled rnn_enabled = algo_config.rnn.enabled # support legacy configs that do not have "transformer" item - transformer_enabled = ("transformer" in algo_config) and algo_config.transformer.enabled + transformer_enabled = ( + "transformer" in algo_config + ) and algo_config.transformer.enabled if gaussian_enabled: if rnn_enabled: @@ -79,6 +82,7 @@ class BC(PolicyAlgo): """ Normal BC training. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -89,7 +93,9 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor_layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) self.nets = self.nets.float().to(self.device) @@ -104,18 +110,21 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() - #input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["obs"] = {k: v[:, 0, :] if v.ndim != 1 else v for k, v in batch['obs'].items()} - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + # input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} + input_batch["obs"] = { + k: v[:, 0, :] if v.ndim != 1 else v for k, v in batch["obs"].items() + } + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device)) - def train_on_batch(self, batch, epoch, validate=False): """ Training on a single batch of data. @@ -160,7 +169,9 @@ def _forward_training(self, batch): predictions (dict): dictionary containing network outputs """ predictions = OrderedDict() - actions = self.nets["policy"](obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) + actions = self.nets["policy"]( + obs_dict=batch["obs"], goal_dict=batch["goal_obs"] + ) predictions["actions"] = actions return predictions @@ -255,6 +266,7 @@ class BC_Gaussian(BC): """ BC training with a Gaussian policy. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -272,7 +284,9 @@ def _create_networks(self): std_limits=(self.algo_config.gaussian.min_std, 7.5), std_activation=self.algo_config.gaussian.std_activation, low_noise_eval=self.algo_config.gaussian.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) self.nets = self.nets.float().to(self.device) @@ -290,7 +304,7 @@ def _forward_training(self, batch): predictions (dict): dictionary containing network outputs """ dists = self.nets["policy"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], goal_dict=batch["goal_obs"], ) @@ -338,7 +352,7 @@ def log_info(self, info): """ log = PolicyAlgo.log_info(self, info) log["Loss"] = info["losses"]["action_loss"].item() - log["Log_Likelihood"] = info["losses"]["log_probs"].item() + log["Log_Likelihood"] = info["losses"]["log_probs"].item() if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log @@ -348,6 +362,7 @@ class BC_GMM(BC_Gaussian): """ BC training with a Gaussian Mixture Model policy. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -364,7 +379,9 @@ def _create_networks(self): min_std=self.algo_config.gmm.min_std, std_activation=self.algo_config.gmm.std_activation, low_noise_eval=self.algo_config.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) self.nets = self.nets.float().to(self.device) @@ -374,6 +391,7 @@ class BC_VAE(BC): """ BC training with a VAE policy. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -384,10 +402,12 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, device=self.device, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **VAENets.vae_args_from_config(self.algo_config.vae), ) - + self.nets = self.nets.float().to(self.device) def train_on_batch(self, batch, epoch, validate=False): @@ -395,8 +415,13 @@ def train_on_batch(self, batch, epoch, validate=False): Update from superclass to set categorical temperature, for categorical VAEs. """ if self.algo_config.vae.prior.use_categorical: - temperature = self.algo_config.vae.prior.categorical_init_temp - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step - temperature = max(temperature, self.algo_config.vae.prior.categorical_min_temp) + temperature = ( + self.algo_config.vae.prior.categorical_init_temp + - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step + ) + temperature = max( + temperature, self.algo_config.vae.prior.categorical_min_temp + ) self.nets["policy"].set_gumbel_temperature(temperature) return super(BC_VAE, self).train_on_batch(batch, epoch, validate=validate) @@ -474,7 +499,9 @@ def log_info(self, info): if self.algo_config.vae.prior.use_categorical: log["Gumbel_Temperature"] = self.nets["policy"].get_gumbel_temperature() else: - log["Encoder_Variance"] = info["predictions"]["encoder_variance"].mean().item() + log["Encoder_Variance"] = ( + info["predictions"]["encoder_variance"].mean().item() + ) if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log @@ -484,6 +511,7 @@ class BC_RNN(BC): """ BC training with an RNN policy. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -494,7 +522,9 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor_layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **BaseNets.rnn_args_from_config(self.algo_config.rnn), ) @@ -520,7 +550,9 @@ def process_batch_for_training(self, batch): """ input_batch = dict() input_batch["obs"] = batch["obs"] - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present input_batch["actions"] = batch["actions"] if self._rnn_is_open_loop: @@ -529,7 +561,9 @@ def process_batch_for_training(self, batch): # on the rnn hidden state. n_steps = batch["actions"].shape[1] obs_seq_start = TensorUtils.index_at_time(batch["obs"], ind=0) - input_batch["obs"] = TensorUtils.unsqueeze_expand_at(obs_seq_start, size=n_steps, dim=1) + input_batch["obs"] = TensorUtils.unsqueeze_expand_at( + obs_seq_start, size=n_steps, dim=1 + ) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -550,7 +584,9 @@ def get_action(self, obs_dict, goal_dict=None): if self._rnn_hidden_state is None or self._rnn_counter % self._rnn_horizon == 0: batch_size = list(obs_dict.values())[0].shape[0] - self._rnn_hidden_state = self.nets["policy"].get_rnn_init_state(batch_size=batch_size, device=self.device) + self._rnn_hidden_state = self.nets["policy"].get_rnn_init_state( + batch_size=batch_size, device=self.device + ) if self._rnn_is_open_loop: # remember the initial observation, and use it instead of the current observation @@ -564,7 +600,8 @@ def get_action(self, obs_dict, goal_dict=None): self._rnn_counter += 1 action, self._rnn_hidden_state = self.nets["policy"].forward_step( - obs_to_use, goal_dict=goal_dict, rnn_state=self._rnn_hidden_state) + obs_to_use, goal_dict=goal_dict, rnn_state=self._rnn_hidden_state + ) return action def reset(self): @@ -579,6 +616,7 @@ class BC_RNN_GMM(BC_RNN): """ BC training with an RNN GMM policy. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -596,7 +634,9 @@ def _create_networks(self): min_std=self.algo_config.gmm.min_std, std_activation=self.algo_config.gmm.std_activation, low_noise_eval=self.algo_config.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **BaseNets.rnn_args_from_config(self.algo_config.rnn), ) @@ -620,13 +660,13 @@ def _forward_training(self, batch): predictions (dict): dictionary containing network outputs """ dists = self.nets["policy"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], goal_dict=batch["goal_obs"], ) # make sure that this is a batch of multivariate action distributions, so that # the log probability computation will be correct - assert len(dists.batch_shape) == 2 # [B, T] + assert len(dists.batch_shape) == 2 # [B, T] log_probs = dists.log_prob(batch["actions"]) predictions = OrderedDict( @@ -668,7 +708,7 @@ def log_info(self, info): """ log = PolicyAlgo.log_info(self, info) log["Loss"] = info["losses"]["action_loss"].item() - log["Log_Likelihood"] = info["losses"]["log_probs"].item() + log["Log_Likelihood"] = info["losses"]["log_probs"].item() if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log @@ -678,6 +718,7 @@ class BC_Transformer(BC): """ BC training with a Transformer policy. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -689,12 +730,14 @@ def _create_networks(self): obs_shapes=self.obs_shapes, goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **BaseNets.transformer_args_from_config(self.algo_config.transformer), ) self._set_params_from_config() self.nets = self.nets.float().to(self.device) - + def _set_params_from_config(self): """ Read specific config variables we need for training / eval. @@ -717,16 +760,20 @@ def process_batch_for_training(self, batch): input_batch = dict() h = self.context_length input_batch["obs"] = {k: batch["obs"][k][:, :h, :] for k in batch["obs"]} - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present if self.supervise_all_steps: # supervision on entire sequence (instead of just current timestep) input_batch["actions"] = batch["actions"][:, :h, :] else: # just use current timestep - input_batch["actions"] = batch["actions"][:, h-1, :] + input_batch["actions"] = batch["actions"][:, h - 1, :] - input_batch = TensorUtils.to_device(TensorUtils.to_float(input_batch), self.device) + input_batch = TensorUtils.to_device( + TensorUtils.to_float(input_batch), self.device + ) return input_batch def _forward_training(self, batch, epoch=None): @@ -743,14 +790,18 @@ def _forward_training(self, batch, epoch=None): """ # ensure that transformer context length is consistent with temporal dimension of observations TensorUtils.assert_size_at_dim( - batch["obs"], - size=(self.context_length), - dim=1, - msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format(self.context_length), + batch["obs"], + size=(self.context_length), + dim=1, + msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format( + self.context_length + ), ) predictions = OrderedDict() - predictions["actions"] = self.nets["policy"](obs_dict=batch["obs"], actions=None, goal_dict=batch["goal_obs"]) + predictions["actions"] = self.nets["policy"]( + obs_dict=batch["obs"], actions=None, goal_dict=batch["goal_obs"] + ) if not self.supervise_all_steps: # only supervise final timestep predictions["actions"] = predictions["actions"][:, -1, :] @@ -767,13 +818,16 @@ def get_action(self, obs_dict, goal_dict=None): """ assert not self.nets.training - return self.nets["policy"](obs_dict, actions=None, goal_dict=goal_dict)[:, -1, :] + return self.nets["policy"](obs_dict, actions=None, goal_dict=goal_dict)[ + :, -1, : + ] class BC_Transformer_GMM(BC_Transformer): """ BC training with a Transformer GMM policy. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -790,7 +844,9 @@ def _create_networks(self): min_std=self.algo_config.gmm.min_std, std_activation=self.algo_config.gmm.std_activation, low_noise_eval=self.algo_config.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **BaseNets.transformer_args_from_config(self.algo_config.transformer), ) self._set_params_from_config() @@ -802,14 +858,16 @@ def _forward_training(self, batch, epoch=None): """ # ensure that transformer context length is consistent with temporal dimension of observations TensorUtils.assert_size_at_dim( - batch["obs"], - size=(self.context_length), - dim=1, - msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format(self.context_length), + batch["obs"], + size=(self.context_length), + dim=1, + msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format( + self.context_length + ), ) dists = self.nets["policy"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], actions=None, goal_dict=batch["goal_obs"], low_noise_eval=False, @@ -817,7 +875,7 @@ def _forward_training(self, batch, epoch=None): # make sure that this is a batch of multivariate action distributions, so that # the log probability computation will be correct - assert len(dists.batch_shape) == 2 # [B, T] + assert len(dists.batch_shape) == 2 # [B, T] if not self.supervise_all_steps: # only use final timestep prediction by making a new distribution with only final timestep. @@ -827,7 +885,9 @@ def _forward_training(self, batch, epoch=None): scale=dists.component_distribution.base_dist.scale[:, -1], ) component_distribution = D.Independent(component_distribution, 1) - mixture_distribution = D.Categorical(logits=dists.mixture_distribution.logits[:, -1]) + mixture_distribution = D.Categorical( + logits=dists.mixture_distribution.logits[:, -1] + ) dists = D.MixtureSameFamily( mixture_distribution=mixture_distribution, component_distribution=component_distribution, @@ -870,7 +930,7 @@ def log_info(self, info): """ log = PolicyAlgo.log_info(self, info) log["Loss"] = info["losses"]["action_loss"].item() - log["Log_Likelihood"] = info["losses"]["log_probs"].item() + log["Log_Likelihood"] = info["losses"]["log_probs"].item() if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log diff --git a/robomimic/algo/bcq.py b/robomimic/algo/bcq.py index 5843ccb5..e7fdd7b9 100644 --- a/robomimic/algo/bcq.py +++ b/robomimic/algo/bcq.py @@ -3,6 +3,7 @@ generative action models (the original paper uses a cVAE). (Paper - https://arxiv.org/abs/1812.02900). """ + from collections import OrderedDict import torch @@ -46,6 +47,7 @@ class BCQ(PolicyAlgo, ValueAlgo): Default BCQ training, based on https://arxiv.org/abs/1812.02900 and https://github.com/sfujim/BCQ """ + def __init__(self, **kwargs): PolicyAlgo.__init__(self, **kwargs) @@ -67,13 +69,13 @@ def _create_networks(self): with torch.no_grad(): for critic_ind in range(len(self.nets["critic"])): TorchUtils.hard_update( - source=self.nets["critic"][critic_ind], + source=self.nets["critic"][critic_ind], target=self.nets["critic_target"][critic_ind], ) if self.algo_config.actor.enabled: TorchUtils.hard_update( - source=self.nets["actor"], + source=self.nets["actor"], target=self.nets["actor_target"], ) @@ -90,7 +92,9 @@ def _create_critics(self): mlp_layer_dims=self.algo_config.critic.layer_dims, value_bounds=self.algo_config.critic.value_bounds, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) # Q network ensemble and target ensemble @@ -115,7 +119,9 @@ def _create_action_sampler(self): ac_dim=self.ac_dim, device=self.device, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **VAENets.vae_args_from_config(self.algo_config.action_sampler.vae), ) @@ -131,7 +137,9 @@ def _create_actor(self): ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, perturbation_scale=self.algo_config.actor.perturbation_scale, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) self.nets["actor"] = actor_class(**actor_args) @@ -145,9 +153,13 @@ def _check_epoch(self, net_name, epoch): net_name (str): name of network in @self.nets and @self.optim_params epoch (int): epoch number """ - epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or (epoch >= self.optim_params[net_name]["start_epoch"]) - epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or (epoch < self.optim_params[net_name]["end_epoch"]) - return (epoch_start_check and epoch_end_check) + epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or ( + epoch >= self.optim_params[net_name]["start_epoch"] + ) + epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or ( + epoch < self.optim_params[net_name]["end_epoch"] + ) + return epoch_start_check and epoch_end_check def set_discount(self, discount): """ @@ -166,7 +178,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -176,19 +188,25 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = {k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"]} - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["next_obs"] = { + k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"] + } + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # note: ensure scalar signals (rewards, done) retain last dimension of 1 to be compatible with model outputs # single timestep reward is discounted sum of intermediate rewards in sequence reward_seq = batch["rewards"][:, :n_step] - discounts = torch.pow(self.algo_config.discount, torch.arange(n_step).float()).unsqueeze(0) + discounts = torch.pow( + self.algo_config.discount, torch.arange(n_step).float() + ).unsqueeze(0) input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1) # discount rate will be gamma^N for computing n-step returns - new_discount = (self.algo_config.discount ** n_step) + new_discount = self.algo_config.discount**n_step self.set_discount(new_discount) # consider this n-step seqeunce done if any intermediate dones are present @@ -197,9 +215,13 @@ def process_batch_for_training(self, batch): if self.algo_config.infinite_horizon: # scale terminal rewards by 1 / (1 - gamma) for infinite horizon MDPs - done_inds = input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] + done_inds = ( + input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] + ) if done_inds.shape[0] > 0: - input_batch["rewards"][done_inds] = input_batch["rewards"][done_inds] * (1. / (1. - self.discount)) + input_batch["rewards"][done_inds] = input_batch["rewards"][ + done_inds + ] * (1.0 / (1.0 - self.discount)) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -228,8 +250,15 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): """ info = OrderedDict() if self.algo_config.action_sampler.vae.prior.use_categorical: - temperature = self.algo_config.action_sampler.vae.prior.categorical_init_temp - epoch * self.algo_config.action_sampler.vae.prior.categorical_temp_anneal_step - temperature = max(temperature, self.algo_config.action_sampler.vae.prior.categorical_min_temp) + temperature = ( + self.algo_config.action_sampler.vae.prior.categorical_init_temp + - epoch + * self.algo_config.action_sampler.vae.prior.categorical_temp_anneal_step + ) + temperature = max( + temperature, + self.algo_config.action_sampler.vae.prior.categorical_min_temp, + ) self.nets["action_sampler"].set_gumbel_temperature(temperature) vae_inputs = dict( @@ -239,7 +268,9 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): ) # maybe freeze encoder weights - if (self.algo_config.action_sampler.freeze_encoder_epoch != -1) and (epoch >= self.algo_config.action_sampler.freeze_encoder_epoch): + if (self.algo_config.action_sampler.freeze_encoder_epoch != -1) and ( + epoch >= self.algo_config.action_sampler.freeze_encoder_epoch + ): vae_inputs["freeze_encoder"] = True # VAE forward @@ -252,7 +283,9 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): info["action_sampler/kl_loss"] = kl_loss if not self.algo_config.action_sampler.vae.prior.use_categorical: with torch.no_grad(): - encoder_variance = torch.exp(vae_outputs["encoder_params"]["logvar"]).mean() + encoder_variance = torch.exp( + vae_outputs["encoder_params"]["logvar"] + ).mean() info["action_sampler/encoder_variance"] = encoder_variance outputs = TensorUtils.detach(vae_outputs) @@ -266,7 +299,9 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): info["action_sampler/grad_norms"] = vae_grad_norms return info, outputs - def _train_critic_on_batch(self, batch, action_sampler_outputs, epoch, no_backprop=False): + def _train_critic_on_batch( + self, batch, action_sampler_outputs, epoch, no_backprop=False + ): """ A modular helper function that can be overridden in case subclasses would like to modify training behavior for the @@ -287,7 +322,7 @@ def _train_critic_on_batch(self, batch, action_sampler_outputs, epoch, no_backpr Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging - critic_outputs (dict): dictionary of critic outputs - useful for + critic_outputs (dict): dictionary of critic outputs - useful for logging purposes """ info = OrderedDict() @@ -300,14 +335,14 @@ def _train_critic_on_batch(self, batch, action_sampler_outputs, epoch, no_backpr goal_s_batch = batch["goal_obs"] # 1 if not done, 0 otherwise - done_mask_batch = 1. - batch["dones"] + done_mask_batch = 1.0 - batch["dones"] info["done_masks"] = done_mask_batch # Bellman backup for Q-targets q_targets = self._get_target_values( - next_states=ns_batch, - goal_states=goal_s_batch, - rewards=r_batch, + next_states=ns_batch, + goal_states=goal_s_batch, + rewards=r_batch, dones=done_mask_batch, action_sampler_outputs=action_sampler_outputs, ) @@ -317,10 +352,10 @@ def _train_critic_on_batch(self, batch, action_sampler_outputs, epoch, no_backpr critic_outputs = [] for critic_ind, critic in enumerate(self.nets["critic"]): critic_loss, critic_output = self._compute_critic_loss( - critic=critic, - states=s_batch, - actions=a_batch, - goal_states=goal_s_batch, + critic=critic, + states=s_batch, + actions=a_batch, + goal_states=goal_s_batch, q_targets=q_targets, ) info["critic/critic{}_loss".format(critic_ind + 1)] = critic_loss @@ -330,14 +365,18 @@ def _train_critic_on_batch(self, batch, action_sampler_outputs, epoch, no_backpr critic_grad_norms = TorchUtils.backprop_for_loss( net=self.nets["critic"][critic_ind], optim=self.optimizers["critic"][critic_ind], - loss=critic_loss, + loss=critic_loss, max_grad_norm=self.algo_config.critic.max_gradient_norm, ) - info["critic/critic{}_grad_norms".format(critic_ind + 1)] = critic_grad_norms + info["critic/critic{}_grad_norms".format(critic_ind + 1)] = ( + critic_grad_norms + ) return info, critic_outputs - def _train_actor_on_batch(self, batch, action_sampler_outputs, critic_outputs, epoch, no_backprop=False): + def _train_actor_on_batch( + self, batch, action_sampler_outputs, critic_outputs, epoch, no_backprop=False + ): """ A modular helper function that can be overridden in case subclasses would like to modify training behavior for the @@ -372,9 +411,13 @@ def _train_actor_on_batch(self, batch, action_sampler_outputs, critic_outputs, e # sample some actions from action sampler and perturb them, then improve perturbations # where improvement is measured by the critic - sampled_actions = self.nets["action_sampler"](s_batch, goal_s_batch).detach() # don't backprop into samples + sampled_actions = self.nets["action_sampler"]( + s_batch, goal_s_batch + ).detach() # don't backprop into samples perturbed_actions = self.nets["actor"](s_batch, sampled_actions, goal_s_batch) - actor_loss = -(self.nets["critic"][0](s_batch, perturbed_actions, goal_s_batch)).mean() + actor_loss = -( + self.nets["critic"][0](s_batch, perturbed_actions, goal_s_batch) + ).mean() info["actor/loss"] = actor_loss if not no_backprop: @@ -387,7 +430,9 @@ def _train_actor_on_batch(self, batch, action_sampler_outputs, critic_outputs, e return info - def _get_target_values(self, next_states, goal_states, rewards, dones, action_sampler_outputs=None): + def _get_target_values( + self, next_states, goal_states, rewards, dones, action_sampler_outputs=None + ): """ Helper function to get target values for training Q-function with TD-loss. @@ -404,13 +449,17 @@ def _get_target_values(self, next_states, goal_states, rewards, dones, action_sa """ with torch.no_grad(): - # we need to stack the observations with redundancy @num_action_samples here, then decode + # we need to stack the observations with redundancy @num_action_samples here, then decode # to get all sampled actions. for example, if we generate 2 samples per observation and # the batch size is 3, then ob_tiled = [ob1; ob1; ob2; ob2; ob3; ob3] - next_states_tiled = ObsUtils.repeat_and_stack_observation(next_states, n=self.algo_config.critic.num_action_samples) + next_states_tiled = ObsUtils.repeat_and_stack_observation( + next_states, n=self.algo_config.critic.num_action_samples + ) goal_states_tiled = None if len(self.goal_shapes) > 0: - goal_states_tiled = ObsUtils.repeat_and_stack_observation(goal_states, n=self.algo_config.critic.num_action_samples) + goal_states_tiled = ObsUtils.repeat_and_stack_observation( + goal_states, n=self.algo_config.critic.num_action_samples + ) # sample action proposals next_sampled_actions = self._sample_actions_for_value_maximization( @@ -420,18 +469,20 @@ def _get_target_values(self, next_states, goal_states, rewards, dones, action_sa ) q_targets = self._get_target_values_from_sampled_actions( - next_states_tiled=next_states_tiled, - next_sampled_actions=next_sampled_actions, - goal_states_tiled=goal_states_tiled, - rewards=rewards, + next_states_tiled=next_states_tiled, + next_sampled_actions=next_sampled_actions, + goal_states_tiled=goal_states_tiled, + rewards=rewards, dones=dones, ) return q_targets - def _sample_actions_for_value_maximization(self, states_tiled, goal_states_tiled, for_target_update): + def _sample_actions_for_value_maximization( + self, states_tiled, goal_states_tiled, for_target_update + ): """ - Helper function to sample actions for maximization (the "batch-constrained" part of + Helper function to sample actions for maximization (the "batch-constrained" part of batch-constrained q-learning). Args: @@ -451,24 +502,30 @@ def _sample_actions_for_value_maximization(self, states_tiled, goal_states_tiled """ with torch.no_grad(): - sampled_actions = self.nets["action_sampler"](states_tiled, goal_states_tiled) + sampled_actions = self.nets["action_sampler"]( + states_tiled, goal_states_tiled + ) if self.algo_config.actor.enabled: actor = self.nets["actor"] if for_target_update: actor = self.nets["actor_target"] # perturb the actions with the policy - sampled_actions = actor(states_tiled, sampled_actions, goal_states_tiled) + sampled_actions = actor( + states_tiled, sampled_actions, goal_states_tiled + ) return sampled_actions - def _get_target_values_from_sampled_actions(self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones): + def _get_target_values_from_sampled_actions( + self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones + ): """ Helper function to get target values for training Q-function with TD-loss. The function assumes that action candidates to maximize over have already been computed, and that the input states have been tiled (repeated) to be compatible with the sampled actions. Args: - next_states_tiled (dict): next observations to use for sampling actions. Assumes that + next_states_tiled (dict): next observations to use for sampling actions. Assumes that tiling has already occurred - so that if the batch size is B, and N samples are desired for each observation in the batch, the leading dimension for each observation in the dict is B * N @@ -488,19 +545,23 @@ def _get_target_values_from_sampled_actions(self, next_states_tiled, next_sample with torch.no_grad(): # feed tiled observations and sampled actions into the critics and then # reshape to get all Q-values in second dimension per observation in batch. - all_value_targets = self.nets["critic_target"][0](next_states_tiled, next_sampled_actions, goal_states_tiled).reshape( - -1, self.algo_config.critic.num_action_samples) + all_value_targets = self.nets["critic_target"][0]( + next_states_tiled, next_sampled_actions, goal_states_tiled + ).reshape(-1, self.algo_config.critic.num_action_samples) max_value_targets = all_value_targets min_value_targets = all_value_targets # TD3 trick to combine max and min over all Q-ensemble estimates into single target estimates for critic_target in self.nets["critic_target"][1:]: - all_value_targets = critic_target(next_states_tiled, next_sampled_actions, goal_states_tiled).reshape( - -1, self.algo_config.critic.num_action_samples) + all_value_targets = critic_target( + next_states_tiled, next_sampled_actions, goal_states_tiled + ).reshape(-1, self.algo_config.critic.num_action_samples) max_value_targets = torch.max(max_value_targets, all_value_targets) min_value_targets = torch.min(min_value_targets, all_value_targets) - all_value_targets = self.algo_config.critic.ensemble.weight * min_value_targets + \ - (1. - self.algo_config.critic.ensemble.weight) * max_value_targets + all_value_targets = ( + self.algo_config.critic.ensemble.weight * min_value_targets + + (1.0 - self.algo_config.critic.ensemble.weight) * max_value_targets + ) # take maximum over all sampled action values per observation and compute targets value_targets = torch.max(all_value_targets, dim=1, keepdim=True)[0] @@ -555,12 +616,16 @@ def train_on_batch(self, batch, epoch, validate=False): info = PolicyAlgo.train_on_batch(self, batch, epoch, validate=validate) # Action Sampler training - no_action_sampler_backprop = validate or (not self._check_epoch(net_name="action_sampler", epoch=epoch)) + no_action_sampler_backprop = validate or ( + not self._check_epoch(net_name="action_sampler", epoch=epoch) + ) with TorchUtils.maybe_no_grad(no_grad=no_action_sampler_backprop): - action_sampler_info, action_sampler_outputs = self._train_action_sampler_on_batch( - batch=batch, - epoch=epoch, - no_backprop=no_action_sampler_backprop, + action_sampler_info, action_sampler_outputs = ( + self._train_action_sampler_on_batch( + batch=batch, + epoch=epoch, + no_backprop=no_action_sampler_backprop, + ) ) info.update(action_sampler_info) @@ -569,25 +634,29 @@ def train_on_batch(self, batch, epoch, validate=False): self.nets["action_sampler"].eval() # Critic training - no_critic_backprop = validate or (not self._check_epoch(net_name="critic", epoch=epoch)) + no_critic_backprop = validate or ( + not self._check_epoch(net_name="critic", epoch=epoch) + ) with TorchUtils.maybe_no_grad(no_grad=no_critic_backprop): critic_info, critic_outputs = self._train_critic_on_batch( - batch=batch, + batch=batch, action_sampler_outputs=action_sampler_outputs, - epoch=epoch, + epoch=epoch, no_backprop=no_critic_backprop, ) info.update(critic_info) if self.algo_config.actor.enabled: # Actor training - no_actor_backprop = validate or (not self._check_epoch(net_name="actor", epoch=epoch)) + no_actor_backprop = validate or ( + not self._check_epoch(net_name="actor", epoch=epoch) + ) with TorchUtils.maybe_no_grad(no_grad=no_actor_backprop): actor_info = self._train_actor_on_batch( - batch=batch, - action_sampler_outputs=action_sampler_outputs, - critic_outputs=critic_outputs, - epoch=epoch, + batch=batch, + action_sampler_outputs=action_sampler_outputs, + critic_outputs=critic_outputs, + epoch=epoch, no_backprop=no_actor_backprop, ) info.update(actor_info) @@ -601,8 +670,8 @@ def train_on_batch(self, batch, epoch, validate=False): with torch.no_grad(): for critic_ind in range(len(self.nets["critic"])): TorchUtils.soft_update( - source=self.nets["critic"][critic_ind], - target=self.nets["critic_target"][critic_ind], + source=self.nets["critic"][critic_ind], + target=self.nets["critic_target"][critic_ind], tau=self.algo_config.target_tau, ) @@ -610,8 +679,8 @@ def train_on_batch(self, batch, epoch, validate=False): if self.algo_config.actor.enabled and (not no_actor_backprop): with torch.no_grad(): TorchUtils.soft_update( - source=self.nets["actor"], - target=self.nets["actor_target"], + source=self.nets["actor"], + target=self.nets["actor_target"], tau=self.algo_config.target_tau, ) @@ -636,15 +705,22 @@ def log_info(self, info): optims = [self.optimizers[k]] if k == "critic": # account for critic having one optimizer per ensemble member - keys = ["{}{}".format(k, critic_ind) for critic_ind in range(len(self.nets["critic"]))] + keys = [ + "{}{}".format(k, critic_ind) + for critic_ind in range(len(self.nets["critic"])) + ] optims = self.optimizers[k] for kp, optimizer in zip(keys, optims): for i, param_group in enumerate(optimizer.param_groups): loss_log["Optimizer/{}{}_lr".format(kp, i)] = param_group["lr"] # extract relevant logs for action sampler, critic, and actor - loss_log["Loss"] = 0. - for loss_logger in [self._log_action_sampler_info, self._log_critic_info, self._log_actor_info]: + loss_log["Loss"] = 0.0 + for loss_logger in [ + self._log_action_sampler_info, + self._log_critic_info, + self._log_actor_info, + ]: this_log = loss_logger(info) if "Loss" in this_log: # manually merge total loss @@ -660,12 +736,18 @@ def _log_action_sampler_info(self, info): """ loss_log = OrderedDict() loss_log["Action_Sampler/Loss"] = info["action_sampler/loss"].item() - loss_log["Action_Sampler/Reconsruction_Loss"] = info["action_sampler/recons_loss"].item() + loss_log["Action_Sampler/Reconsruction_Loss"] = info[ + "action_sampler/recons_loss" + ].item() loss_log["Action_Sampler/KL_Loss"] = info["action_sampler/kl_loss"].item() if self.algo_config.action_sampler.vae.prior.use_categorical: - loss_log["Action_Sampler/Gumbel_Temperature"] = self.nets["action_sampler"].get_gumbel_temperature() + loss_log["Action_Sampler/Gumbel_Temperature"] = self.nets[ + "action_sampler" + ].get_gumbel_temperature() else: - loss_log["Action_Sampler/Encoder_Variance"] = info["action_sampler/encoder_variance"].item() + loss_log["Action_Sampler/Encoder_Variance"] = info[ + "action_sampler/encoder_variance" + ].item() if "action_sampler/grad_norms" in info: loss_log["Action_Sampler/Grad_Norms"] = info["action_sampler/grad_norms"] loss_log["Loss"] = loss_log["Action_Sampler/Loss"] @@ -677,14 +759,20 @@ def _log_critic_info(self, info): """ loss_log = OrderedDict() if "done_masks" in info: - loss_log["Critic/Done_Mask_Percentage"] = 100. * torch.mean(info["done_masks"]).item() + loss_log["Critic/Done_Mask_Percentage"] = ( + 100.0 * torch.mean(info["done_masks"]).item() + ) if "critic/q_targets" in info: loss_log["Critic/Q_Targets"] = info["critic/q_targets"].mean().item() - loss_log["Loss"] = 0. + loss_log["Loss"] = 0.0 for critic_ind in range(len(self.nets["critic"])): - loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info["critic/critic{}_loss".format(critic_ind + 1)].item() + loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info[ + "critic/critic{}_loss".format(critic_ind + 1) + ].item() if "critic/critic{}_grad_norms".format(critic_ind + 1) in info: - loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info["critic/critic{}_grad_norms".format(critic_ind + 1)] + loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info[ + "critic/critic{}_grad_norms".format(critic_ind + 1) + ] loss_log["Loss"] += loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] return loss_log @@ -732,10 +820,10 @@ def on_epoch_end(self, epoch): def _get_best_value(self, obs_dict, goal_dict=None): """ - Internal helper function for getting the best value for a given state and - the corresponding best action. Meant to be used at test-time. Key differences - between this and retrieving target values at train-time are that (1) only a - single critic is used for the value estimate and (2) the critic and actor + Internal helper function for getting the best value for a given state and + the corresponding best action. Meant to be used at test-time. Key differences + between this and retrieving target values at train-time are that (1) only a + single critic is used for the value estimate and (2) the critic and actor are used instead of the target critic and target actor. Args: @@ -754,16 +842,18 @@ def _get_best_value(self, obs_dict, goal_dict=None): # number of action proposals from action sampler num_action_samples = self.algo_config.critic.num_action_samples_rollout - # we need to stack the observations with redundancy @num_action_samples here, then decode + # we need to stack the observations with redundancy @num_action_samples here, then decode # to get all sampled actions. for example, if we generate 2 samples per observation and # the batch size is 3, then ob_tiled = [ob1; ob1; ob2; ob2; ob3; ob3] ob_tiled = ObsUtils.repeat_and_stack_observation(obs_dict, n=num_action_samples) goal_tiled = None if len(self.goal_shapes) > 0: - goal_tiled = ObsUtils.repeat_and_stack_observation(goal_dict, n=num_action_samples) + goal_tiled = ObsUtils.repeat_and_stack_observation( + goal_dict, n=num_action_samples + ) sampled_actions = self._sample_actions_for_value_maximization( - states_tiled=ob_tiled, + states_tiled=ob_tiled, goal_states_tiled=goal_tiled, for_target_update=False, ) @@ -771,12 +861,16 @@ def _get_best_value(self, obs_dict, goal_dict=None): # feed tiled observations and perturbed sampled actions into the critic and then # reshape to get all Q-values in second dimension per observation in batch. # finally, just take a maximum across that second dimension to take the best sampled action - all_critic_values = self.nets["critic"][0](ob_tiled, sampled_actions, goal_tiled).reshape(-1, num_action_samples) + all_critic_values = self.nets["critic"][0]( + ob_tiled, sampled_actions, goal_tiled + ).reshape(-1, num_action_samples) best_action_index = torch.argmax(all_critic_values, dim=1) all_actions = sampled_actions.reshape(batch_size, num_action_samples, -1) best_action = all_actions[torch.arange(all_actions.shape[0]), best_action_index] - best_value = all_critic_values[torch.arange(all_critic_values.shape[0]), best_action_index].unsqueeze(1) + best_value = all_critic_values[ + torch.arange(all_critic_values.shape[0]), best_action_index + ].unsqueeze(1) return best_value, best_action @@ -834,6 +928,7 @@ class BCQ_GMM(BCQ): A simple modification to BCQ that replaces the VAE used to sample action proposals from the batch with a GMM. """ + def _create_action_sampler(self): """ Called in @_create_networks to make action sampler network. @@ -850,7 +945,9 @@ def _create_action_sampler(self): min_std=self.algo_config.action_sampler.gmm.min_std, std_activation=self.algo_config.action_sampler.gmm.std_activation, low_noise_eval=self.algo_config.action_sampler.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): @@ -877,7 +974,7 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): # GMM forward dists = self.nets["action_sampler"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], goal_dict=batch["goal_obs"], ) @@ -916,6 +1013,7 @@ class BCQ_Distributional(BCQ): distributions over a discrete set of values instead of expected returns. Some parts of this implementation were adapted from ACME (https://github.com/deepmind/acme). """ + def _create_critics(self): """ Called in @_create_networks to make critic networks. @@ -929,7 +1027,9 @@ def _create_critics(self): value_bounds=self.algo_config.critic.value_bounds, num_atoms=self.algo_config.critic.distributional.num_atoms, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) # Q network ensemble and target ensemble @@ -943,13 +1043,15 @@ def _create_critics(self): critic_target = critic_class(**critic_args) self.nets["critic_target"].append(critic_target) - def _get_target_values_from_sampled_actions(self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones): + def _get_target_values_from_sampled_actions( + self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones + ): """ Helper function to get target values for training Q-function with TD-loss. Update from superclass to account for distributional value functions. Args: - next_states_tiled (dict): next observations to use for sampling actions. Assumes that + next_states_tiled (dict): next observations to use for sampling actions. Assumes that tiling has already occurred - so that if the batch size is B, and N samples are desired for each observation in the batch, the leading dimension for each observation in the dict is B * N @@ -970,15 +1072,29 @@ def _get_target_values_from_sampled_actions(self, next_states_tiled, next_sample with torch.no_grad(): # compute expected returns of the sampled actions and maximize to find the best action - all_vds = self.nets["critic_target"][0].forward_train(next_states_tiled, next_sampled_actions, goal_states_tiled) - expected_values = all_vds.mean().reshape(-1, self.algo_config.critic.num_action_samples) + all_vds = self.nets["critic_target"][0].forward_train( + next_states_tiled, next_sampled_actions, goal_states_tiled + ) + expected_values = all_vds.mean().reshape( + -1, self.algo_config.critic.num_action_samples + ) best_action_index = torch.argmax(expected_values, dim=1) - all_actions = next_sampled_actions.reshape(-1, self.algo_config.critic.num_action_samples, self.ac_dim) - best_action = all_actions[torch.arange(all_actions.shape[0]), best_action_index] + all_actions = next_sampled_actions.reshape( + -1, self.algo_config.critic.num_action_samples, self.ac_dim + ) + best_action = all_actions[ + torch.arange(all_actions.shape[0]), best_action_index + ] # get the corresponding probabilities for the categorical distributions corresponding to the best actions - all_vd_probs = all_vds.probs.reshape(-1, self.algo_config.critic.num_action_samples, self.algo_config.critic.distributional.num_atoms) - target_vd_probs = all_vd_probs[torch.arange(all_vd_probs.shape[0]), best_action_index] + all_vd_probs = all_vds.probs.reshape( + -1, + self.algo_config.critic.num_action_samples, + self.algo_config.critic.distributional.num_atoms, + ) + target_vd_probs = all_vd_probs[ + torch.arange(all_vd_probs.shape[0]), best_action_index + ] # bellman backup to get a new grid of values - then project onto the canonical atoms to obtain a # target set of categorical probabilities over the atoms @@ -1018,5 +1134,5 @@ def _compute_critic_loss(self, critic, states, actions, goal_states, q_targets): # this should be the equivalent of softmax with logits from tf vd = critic.forward_train(states, actions, goal_states) log_probs = F.log_softmax(vd.logits, dim=-1) - critic_loss = nn.KLDivLoss(reduction='batchmean')(log_probs, q_targets) + critic_loss = nn.KLDivLoss(reduction="batchmean")(log_probs, q_targets) return critic_loss, None diff --git a/robomimic/algo/cql.py b/robomimic/algo/cql.py index 0c24d50a..7eb17106 100644 --- a/robomimic/algo/cql.py +++ b/robomimic/algo/cql.py @@ -3,6 +3,7 @@ Based off of https://github.com/aviralkumar2907/CQL. (Paper - https://arxiv.org/abs/2006.04779). """ + import numpy as np from collections import OrderedDict @@ -39,42 +40,66 @@ class CQL(PolicyAlgo, ValueAlgo): """ CQL-extension of SAC for the off-policy, offline setting. See https://arxiv.org/abs/2006.04779 """ + def __init__(self, **kwargs): # Store entropy / cql settings first since the super init call requires them - self.automatic_entropy_tuning = kwargs["algo_config"].actor.target_entropy is not None - self.automatic_cql_tuning = kwargs["algo_config"].critic.target_q_gap is not None and \ - kwargs["algo_config"].critic.target_q_gap >= 0.0 + self.automatic_entropy_tuning = ( + kwargs["algo_config"].actor.target_entropy is not None + ) + self.automatic_cql_tuning = ( + kwargs["algo_config"].critic.target_q_gap is not None + and kwargs["algo_config"].critic.target_q_gap >= 0.0 + ) # Run super init first super().__init__(**kwargs) # Reward settings self.n_step = self.algo_config.n_step - self.discount = self.algo_config.discount ** self.n_step + self.discount = self.algo_config.discount**self.n_step # Now also store additional SAC- and CQL-specific stuff from the config self._num_batch_steps = 0 self.bc_start_steps = self.algo_config.actor.bc_start_steps self.deterministic_backup = self.algo_config.critic.deterministic_backup - self.td_loss_fcn = nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() + self.td_loss_fcn = ( + nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() + ) # Entropy settings - self.target_entropy = -np.prod(self.ac_dim) if self.algo_config.actor.target_entropy in {None, "default"} else\ - self.algo_config.actor.target_entropy + self.target_entropy = ( + -np.prod(self.ac_dim) + if self.algo_config.actor.target_entropy in {None, "default"} + else self.algo_config.actor.target_entropy + ) # CQL settings self.min_q_weight = self.algo_config.critic.min_q_weight - self.target_q_gap = self.algo_config.critic.target_q_gap if self.automatic_cql_tuning else 0.0 + self.target_q_gap = ( + self.algo_config.critic.target_q_gap if self.automatic_cql_tuning else 0.0 + ) @property def log_entropy_weight(self): - return self.nets["log_entropy_weight"]() if self.automatic_entropy_tuning else\ - torch.zeros(1, requires_grad=False, device=self.device) + return ( + self.nets["log_entropy_weight"]() + if self.automatic_entropy_tuning + else torch.zeros(1, requires_grad=False, device=self.device) + ) @property def log_cql_weight(self): - return self.nets["log_cql_weight"]() if self.automatic_cql_tuning else\ - torch.log(torch.tensor(self.algo_config.critic.cql_weight, requires_grad=False, device=self.device)) + return ( + self.nets["log_cql_weight"]() + if self.automatic_cql_tuning + else torch.log( + torch.tensor( + self.algo_config.critic.cql_weight, + requires_grad=False, + device=self.device, + ) + ) + ) def _create_networks(self): """ @@ -95,9 +120,11 @@ def _create_networks(self): actor_args.update(dict(self.algo_config.actor.net.gaussian)) else: # Unsupported actor type! - raise ValueError(f"Unsupported actor requested. " - f"Requested: {self.algo_config.actor.net.type}, " - f"valid options are: {['gaussian']}") + raise ValueError( + f"Unsupported actor requested. " + f"Requested: {self.algo_config.actor.net.type}, " + f"valid options are: {['gaussian']}" + ) # Policy self.nets["actor"] = actor_cls( @@ -105,7 +132,9 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **actor_args, ) @@ -120,7 +149,9 @@ def _create_networks(self): mlp_layer_dims=self.algo_config.critic.layer_dims, value_bounds=self.algo_config.critic.value_bounds, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) net_list.append(critic) @@ -137,7 +168,9 @@ def _create_networks(self): # sync target networks at beginning of training with torch.no_grad(): - for critic, critic_target in zip(self.nets["critic"], self.nets["critic_target"]): + for critic, critic_target in zip( + self.nets["critic"], self.nets["critic_target"] + ): TorchUtils.hard_update( source=critic, target=critic_target, @@ -193,19 +226,25 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = {k: batch["next_obs"][k][:, self.n_step - 1, :] for k in batch["next_obs"]} - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["next_obs"] = { + k: batch["next_obs"][k][:, self.n_step - 1, :] for k in batch["next_obs"] + } + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # note: ensure scalar signals (rewards, done) retain last dimension of 1 to be compatible with model outputs # single timestep reward is discounted sum of intermediate rewards in sequence - reward_seq = batch["rewards"][:, :self.n_step] - discounts = torch.pow(self.algo_config.discount, torch.arange(self.n_step).float()).unsqueeze(0) + reward_seq = batch["rewards"][:, : self.n_step] + discounts = torch.pow( + self.algo_config.discount, torch.arange(self.n_step).float() + ).unsqueeze(0) input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1) # consider this n-step seqeunce done if any intermediate dones are present - done_seq = batch["dones"][:, :self.n_step] + done_seq = batch["dones"][:, : self.n_step] input_batch["dones"] = (done_seq.sum(dim=1) > 0).float().unsqueeze(1) # we move to device first before float conversion because image observation modalities will be uint8 - @@ -283,29 +322,44 @@ def _train_policy_on_batch(self, batch, epoch, validate=False): info = OrderedDict() # Sample actions from policy and get log probs - dist = self.nets["actor"].forward_train(obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) + dist = self.nets["actor"].forward_train( + obs_dict=batch["obs"], goal_dict=batch["goal_obs"] + ) actions, log_prob = self._get_actions_and_log_prob(dist=dist) # Calculate alpha - entropy_weight_loss = -(self.log_entropy_weight * (log_prob + self.target_entropy).detach()).mean() if\ - self.automatic_entropy_tuning else 0.0 + entropy_weight_loss = ( + -( + self.log_entropy_weight * (log_prob + self.target_entropy).detach() + ).mean() + if self.automatic_entropy_tuning + else 0.0 + ) entropy_weight = self.log_entropy_weight.exp() # Get predicted Q-values for all state, action pairs - pred_qs = [critic(obs_dict=batch["obs"], acts=actions, goal_dict=batch["goal_obs"]) - for critic in self.nets["critic"]] + pred_qs = [ + critic(obs_dict=batch["obs"], acts=actions, goal_dict=batch["goal_obs"]) + for critic in self.nets["critic"] + ] # We take the minimum for stability pred_qs, _ = torch.cat(pred_qs, dim=1).min(dim=1, keepdim=True) # Use BC if we're in the beginning of training, otherwise calculate policy loss normally - baseline = dist.log_prob(batch["actions"]).unsqueeze(dim=-1) if\ - self._num_batch_steps < self.bc_start_steps else pred_qs + baseline = ( + dist.log_prob(batch["actions"]).unsqueeze(dim=-1) + if self._num_batch_steps < self.bc_start_steps + else pred_qs + ) policy_loss = (entropy_weight * log_prob - baseline).mean() # Add info info["entropy_weight"] = entropy_weight.item() - info["entropy_weight_loss"] = entropy_weight_loss.item() if \ - self.automatic_entropy_tuning else entropy_weight_loss + info["entropy_weight_loss"] = ( + entropy_weight_loss.item() + if self.automatic_entropy_tuning + else entropy_weight_loss + ) info["actor/loss"] = policy_loss # Take a training step if we're not validating @@ -317,7 +371,9 @@ def _train_policy_on_batch(self, batch, epoch, validate=False): self.optimizers["entropy"].zero_grad() entropy_weight_loss.backward() self.optimizers["entropy"].step() - info["entropy_grad_norms"] = self.log_entropy_weight.grad.data.norm(2).pow(2).item() + info["entropy_grad_norms"] = ( + self.log_entropy_weight.grad.data.norm(2).pow(2).item() + ) # Policy actor_grad_norms = TorchUtils.backprop_for_loss( @@ -387,12 +443,22 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): N = self.algo_config.critic.num_random_actions # Get predicted Q-values from taken actions - q_preds = [critic(obs_dict=batch["obs"], acts=batch["actions"], goal_dict=batch["goal_obs"]) - for critic in self.nets["critic"]] + q_preds = [ + critic( + obs_dict=batch["obs"], + acts=batch["actions"], + goal_dict=batch["goal_obs"], + ) + for critic in self.nets["critic"] + ] # Sample actions at the current and next step - curr_dist = self.nets["actor"].forward_train(obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) - next_dist = self.nets["actor"].forward_train(obs_dict=batch["next_obs"], goal_dict=batch["goal_obs"]) + curr_dist = self.nets["actor"].forward_train( + obs_dict=batch["obs"], goal_dict=batch["goal_obs"] + ) + next_dist = self.nets["actor"].forward_train( + obs_dict=batch["next_obs"], goal_dict=batch["goal_obs"] + ) next_actions, next_log_prob = self._get_actions_and_log_prob(dist=next_dist) # Don't capture gradients here, since the critic target network doesn't get trained (only soft updated) @@ -400,43 +466,88 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): # We take the max over all samples if the number of action samples is > 1 if self.algo_config.critic.num_action_samples > 1: # Generate the target q values, using the backup from the next state - temp_actions = next_dist.rsample(sample_shape=(self.algo_config.critic.num_action_samples,)).permute(1, 0, 2) - target_qs = [self._get_qs_from_actions( - obs_dict=batch["next_obs"], actions=temp_actions, goal_dict=batch["goal_obs"], q_net=critic) - .max(dim=1, keepdim=True)[0] for critic in self.nets["critic_target"]] + temp_actions = next_dist.rsample( + sample_shape=(self.algo_config.critic.num_action_samples,) + ).permute(1, 0, 2) + target_qs = [ + self._get_qs_from_actions( + obs_dict=batch["next_obs"], + actions=temp_actions, + goal_dict=batch["goal_obs"], + q_net=critic, + ).max(dim=1, keepdim=True)[0] + for critic in self.nets["critic_target"] + ] else: - target_qs = [critic(obs_dict=batch["next_obs"], acts=next_actions, goal_dict=batch["goal_obs"]) - for critic in self.nets["critic_target"]] + target_qs = [ + critic( + obs_dict=batch["next_obs"], + acts=next_actions, + goal_dict=batch["goal_obs"], + ) + for critic in self.nets["critic_target"] + ] # Take the minimum over all critics target_qs, _ = torch.cat(target_qs, dim=1).min(dim=1, keepdim=True) # If only sampled once from each critic and not using a deterministic backup, subtract the logprob as well - if self.algo_config.critic.num_action_samples == 1 and not self.deterministic_backup: + if ( + self.algo_config.critic.num_action_samples == 1 + and not self.deterministic_backup + ): target_qs = target_qs - self.log_entropy_weight.exp() * next_log_prob # Calculate the q target values - done_mask_batch = 1. - batch["dones"] + done_mask_batch = 1.0 - batch["dones"] info["done_masks"] = done_mask_batch q_target = batch["rewards"] + done_mask_batch * self.discount * target_qs # Calculate CQL stuff - cql_random_actions = torch.FloatTensor(N, B, A).uniform_(-1., 1.).to(self.device) # shape (N, B, A) - cql_random_log_prob = np.log(0.5 ** A) - cql_curr_actions, cql_curr_log_prob = self._get_actions_and_log_prob(dist=curr_dist, sample_shape=(N,)) # shape (N, B, A) and (N, B, 1) - cql_next_actions, cql_next_log_prob = self._get_actions_and_log_prob(dist=next_dist, sample_shape=(N,)) # shape (N, B, A) and (N, B, 1) - cql_curr_log_prob = cql_curr_log_prob.squeeze(dim=-1).permute(1, 0).detach() # shape (B, N) - cql_next_log_prob = cql_next_log_prob.squeeze(dim=-1).permute(1, 0).detach() # shape (B, N) - q_cats = [] # Each entry shape will be (B, N) + cql_random_actions = ( + torch.FloatTensor(N, B, A).uniform_(-1.0, 1.0).to(self.device) + ) # shape (N, B, A) + cql_random_log_prob = np.log(0.5**A) + cql_curr_actions, cql_curr_log_prob = self._get_actions_and_log_prob( + dist=curr_dist, sample_shape=(N,) + ) # shape (N, B, A) and (N, B, 1) + cql_next_actions, cql_next_log_prob = self._get_actions_and_log_prob( + dist=next_dist, sample_shape=(N,) + ) # shape (N, B, A) and (N, B, 1) + cql_curr_log_prob = ( + cql_curr_log_prob.squeeze(dim=-1).permute(1, 0).detach() + ) # shape (B, N) + cql_next_log_prob = ( + cql_next_log_prob.squeeze(dim=-1).permute(1, 0).detach() + ) # shape (B, N) + q_cats = [] # Each entry shape will be (B, N) for critic, q_pred in zip(self.nets["critic"], q_preds): # Compose Q values over all sampled actions (importance sampled) - q_rand = self._get_qs_from_actions(obs_dict=batch["obs"], actions=cql_random_actions.permute(1, 0, 2), goal_dict=batch["goal_obs"], q_net=critic) - q_curr = self._get_qs_from_actions(obs_dict=batch["obs"], actions=cql_curr_actions.permute(1, 0, 2), goal_dict=batch["goal_obs"], q_net=critic) - q_next = self._get_qs_from_actions(obs_dict=batch["obs"], actions=cql_next_actions.permute(1, 0, 2), goal_dict=batch["goal_obs"], q_net=critic) - q_cat = torch.cat([ - q_rand - cql_random_log_prob, - q_next - cql_next_log_prob, - q_curr - cql_curr_log_prob, - ], dim=1) # shape (B, 3 * N) + q_rand = self._get_qs_from_actions( + obs_dict=batch["obs"], + actions=cql_random_actions.permute(1, 0, 2), + goal_dict=batch["goal_obs"], + q_net=critic, + ) + q_curr = self._get_qs_from_actions( + obs_dict=batch["obs"], + actions=cql_curr_actions.permute(1, 0, 2), + goal_dict=batch["goal_obs"], + q_net=critic, + ) + q_next = self._get_qs_from_actions( + obs_dict=batch["obs"], + actions=cql_next_actions.permute(1, 0, 2), + goal_dict=batch["goal_obs"], + q_net=critic, + ) + q_cat = torch.cat( + [ + q_rand - cql_random_log_prob, + q_next - cql_next_log_prob, + q_curr - cql_curr_log_prob, + ], + dim=1, + ) # shape (B, 3 * N) q_cats.append(q_cat) # Calculate the losses for all critics @@ -448,8 +559,11 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): # Calculate td error loss td_loss = self.td_loss_fcn(q_pred, q_target) # Calculate cql loss - cql_loss = cql_weight * (self.min_q_weight * (torch.logsumexp(q_cat, dim=1).mean() - q_pred.mean()) - - self.target_q_gap) + cql_loss = cql_weight * ( + self.min_q_weight + * (torch.logsumexp(q_cat, dim=1).mean() - q_pred.mean()) + - self.target_q_gap + ) cql_losses.append(cql_loss) # Calculate total loss loss = td_loss + cql_loss @@ -461,18 +575,26 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): # Train CQL weight if tuning automatically if self.automatic_cql_tuning: cql_weight_loss = -torch.stack(cql_losses).mean() - info[ - "critic/cql_weight_loss"] = cql_weight_loss.item() # Make sure to not store computation graph since we retain graph after backward() call + info["critic/cql_weight_loss"] = ( + cql_weight_loss.item() + ) # Make sure to not store computation graph since we retain graph after backward() call self.optimizers["cql"].zero_grad() cql_weight_loss.backward(retain_graph=True) self.optimizers["cql"].step() - info["critic/cql_grad_norms"] = self.log_cql_weight.grad.data.norm(2).pow(2).item() + info["critic/cql_grad_norms"] = ( + self.log_cql_weight.grad.data.norm(2).pow(2).item() + ) # Train critics - for i, (critic_loss, critic, critic_target, optimizer) in enumerate(zip( - critic_losses, self.nets["critic"], self.nets["critic_target"], self.optimizers["critic"] - )): - retain_graph = (i < (len(critic_losses) - 1)) + for i, (critic_loss, critic, critic_target, optimizer) in enumerate( + zip( + critic_losses, + self.nets["critic"], + self.nets["critic_target"], + self.optimizers["critic"], + ) + ): + retain_graph = i < (len(critic_losses) - 1) critic_grad_norms = TorchUtils.backprop_for_loss( net=critic, optim=optimizer, @@ -482,7 +604,11 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): ) info[f"critic/critic{i+1}_grad_norms"] = critic_grad_norms with torch.no_grad(): - TorchUtils.soft_update(source=critic, target=critic_target, tau=self.algo_config.target_tau) + TorchUtils.soft_update( + source=critic, + target=critic_target, + tau=self.algo_config.target_tau, + ) # Return stats return info @@ -502,8 +628,12 @@ def _get_actions_and_log_prob(self, dist, sample_shape=torch.Size()): """ # Process networks with tanh differently than normal distributions if self.algo_config.actor.net.common.use_tanh: - actions, actions_pre_tanh = dist.rsample(sample_shape=sample_shape, return_pretanh_value=True) - log_prob = dist.log_prob(actions, pre_tanh_value=actions_pre_tanh).unsqueeze(dim=-1) + actions, actions_pre_tanh = dist.rsample( + sample_shape=sample_shape, return_pretanh_value=True + ) + log_prob = dist.log_prob( + actions, pre_tanh_value=actions_pre_tanh + ).unsqueeze(dim=-1) else: actions = dist.rsample(sample_shape=sample_shape) log_prob = dist.log_prob(actions) @@ -532,7 +662,11 @@ def _get_qs_from_actions(obs_dict, actions, goal_dict, q_net): goal_dict_stacked = ObsUtils.repeat_and_stack_observation(goal_dict, N) # Pass the obs and (flattened) actions through to get the Q values - qs = q_net(obs_dict=obs_dict_stacked, acts=actions.reshape(-1, D), goal_dict=goal_dict_stacked) + qs = q_net( + obs_dict=obs_dict_stacked, + acts=actions.reshape(-1, D), + goal_dict=goal_dict_stacked, + ) # Unflatten output qs = qs.reshape(B, N) @@ -558,14 +692,17 @@ def log_info(self, info): optims = [self.optimizers[k]] if k == "critic": # account for critic having one optimizer per ensemble member - keys = ["{}{}".format(k, critic_ind) for critic_ind in range(len(self.nets["critic"]))] + keys = [ + "{}{}".format(k, critic_ind) + for critic_ind in range(len(self.nets["critic"])) + ] optims = self.optimizers[k] for kp, optimizer in zip(keys, optims): for i, param_group in enumerate(optimizer.param_groups): loss_log["Optimizer/{}{}_lr".format(kp, i)] = param_group["lr"] # extract relevant logs for critic, and actor - loss_log["Loss"] = 0. + loss_log["Loss"] = 0.0 for loss_logger in [self._log_critic_info, self._log_actor_info]: this_log = loss_logger(info) if "Loss" in this_log: @@ -582,14 +719,20 @@ def _log_critic_info(self, info): """ loss_log = OrderedDict() if "done_masks" in info: - loss_log["Critic/Done_Mask_Percentage"] = 100. * torch.mean(info["done_masks"]).item() + loss_log["Critic/Done_Mask_Percentage"] = ( + 100.0 * torch.mean(info["done_masks"]).item() + ) if "critic/q_targets" in info: loss_log["Critic/Q_Targets"] = info["critic/q_targets"].mean().item() - loss_log["Loss"] = 0. + loss_log["Loss"] = 0.0 for critic_ind in range(len(self.nets["critic"])): - loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info["critic/critic{}_loss".format(critic_ind + 1)].item() + loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info[ + "critic/critic{}_loss".format(critic_ind + 1) + ].item() if "critic/critic{}_grad_norms".format(critic_ind + 1) in info: - loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info["critic/critic{}_grad_norms".format(critic_ind + 1)] + loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info[ + "critic/critic{}_grad_norms".format(critic_ind + 1) + ] loss_log["Loss"] += loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] if "critic/cql_weight_loss" in info: loss_log["Critic/CQL_Weight"] = info["critic/cql_weight"] diff --git a/robomimic/algo/gl.py b/robomimic/algo/gl.py index 24ae8008..fc6702a4 100644 --- a/robomimic/algo/gl.py +++ b/robomimic/algo/gl.py @@ -1,6 +1,7 @@ """ Subgoal prediction models, used in HBC / IRIS. """ + import numpy as np from collections import OrderedDict from copy import deepcopy @@ -38,14 +39,9 @@ class GL(PlannerAlgo): """ Implements goal prediction component for HBC and IRIS. """ + def __init__( - self, - algo_config, - obs_config, - global_config, - obs_key_shapes, - ac_dim, - device + self, algo_config, obs_config, global_config, obs_key_shapes, ac_dim, device ): """ Args: @@ -71,7 +67,7 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device + device=device, ) def _create_networks(self): @@ -87,10 +83,12 @@ def _create_networks(self): # deterministic goal prediction network self.nets["goal_network"] = ObsNets.MIMO_MLP( - input_obs_group_shapes=obs_group_shapes, + input_obs_group_shapes=obs_group_shapes, output_shapes=self.subgoal_shapes, layer_dims=self.algo_config.ae.planner_layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) self.nets = self.nets.float().to(self.device) @@ -106,22 +104,29 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() # remove temporal batches for all except scalar signals (to be compatible with model outputs) - input_batch["obs"] = { k: batch["obs"][k][:, 0, :] for k in batch["obs"] } + input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} # extract multi-horizon subgoal target - input_batch["subgoals"] = {k: batch["next_obs"][k][:, self._subgoal_horizon - 1, :] for k in batch["next_obs"]} + input_batch["subgoals"] = { + k: batch["next_obs"][k][:, self._subgoal_horizon - 1, :] + for k in batch["next_obs"] + } input_batch["target_subgoals"] = input_batch["subgoals"] - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device)) - def get_actor_goal_for_training_from_processed_batch(self, processed_batch, **kwargs): + def get_actor_goal_for_training_from_processed_batch( + self, processed_batch, **kwargs + ): """ Retrieve subgoals from processed batch to use for training the actor. Subclasses can modify this function to change the subgoals. @@ -155,14 +160,18 @@ def train_on_batch(self, batch, epoch, validate=False): info = super(GL, self).train_on_batch(batch, epoch, validate=validate) # predict subgoal observations with goal network - pred_subgoals = self.nets["goal_network"](obs=batch["obs"], goal=batch["goal_obs"]) + pred_subgoals = self.nets["goal_network"]( + obs=batch["obs"], goal=batch["goal_obs"] + ) # compute loss as L2 error for each observation key losses = OrderedDict() target_subgoals = batch["target_subgoals"] # targets for network prediction - goal_loss = 0. + goal_loss = 0.0 for k in pred_subgoals: - assert pred_subgoals[k].shape == target_subgoals[k].shape, "mismatch in predicted and target subgoals!" + assert ( + pred_subgoals[k].shape == target_subgoals[k].shape + ), "mismatch in predicted and target subgoals!" mode_loss = nn.MSELoss()(pred_subgoals[k], target_subgoals[k]) goal_loss += mode_loss losses["goal_{}_loss".format(k)] = mode_loss @@ -218,7 +227,7 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): """ Sample @num_samples subgoals from the network per observation. - Since this class implements a deterministic subgoal prediction, + Since this class implements a deterministic subgoal prediction, this function returns identical subgoals for each input observation. Args: @@ -238,7 +247,9 @@ def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): # [batch_size * num_samples, ...] goals = self.get_subgoal_predictions(obs_dict=obs_tiled, goal_dict=goal_tiled) # reshape to [batch_size, num_samples, ...] - return TensorUtils.reshape_dimensions(goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples)) + return TensorUtils.reshape_dimensions( + goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples) + ) def get_action(self, obs_dict, goal_dict=None): """ @@ -258,6 +269,7 @@ class GL_VAE(GL): """ Implements goal prediction via VAE. """ + def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -270,7 +282,9 @@ def _create_networks(self): condition_shapes=self.obs_shapes, goal_shapes=self.goal_shapes, device=self.device, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **VAENets.vae_args_from_config(self.algo_config.vae), ) @@ -286,7 +300,7 @@ def get_actor_goal_for_training_from_processed_batch( ): """ Modify from superclass to support a @use_latent_subgoals option. - The VAE can optionally return latent subgoals by passing the subgoal + The VAE can optionally return latent subgoals by passing the subgoal observations in the batch through the encoder. Args: @@ -298,8 +312,8 @@ def get_actor_goal_for_training_from_processed_batch( use_prior_correction (bool): if True, use a "prior correction" trick to choose a latent subgoal sampled from the prior that is close to the - latent from the VAE encoder (posterior). This can help with issues at - test-time where the encoder latent distribution might not match + latent from the VAE encoder (posterior). This can help with issues at + test-time where the encoder latent distribution might not match the prior latent distribution. num_prior_samples (int): number of VAE prior samples to take and choose among, @@ -315,16 +329,18 @@ def get_actor_goal_for_training_from_processed_batch( # batch variables obs = processed_batch["obs"] subgoals = processed_batch["subgoals"] # full subgoal observations - target_subgoals = processed_batch["target_subgoals"] # targets for network prediction + target_subgoals = processed_batch[ + "target_subgoals" + ] # targets for network prediction goal_obs = processed_batch["goal_obs"] with torch.no_grad(): # run VAE forward pass to get samples from posterior for the current observation and subgoal vae_outputs = self.nets["goal_network"]( - inputs=subgoals, # encoder takes full subgoals - outputs=target_subgoals, # reconstruct target subgoals + inputs=subgoals, # encoder takes full subgoals + outputs=target_subgoals, # reconstruct target subgoals goals=goal_obs, - conditions=obs, # condition on observations + conditions=obs, # condition on observations ) posterior_z = vae_outputs["encoder_z"] latent_subgoals = posterior_z @@ -337,10 +353,14 @@ def get_actor_goal_for_training_from_processed_batch( batch_size = obs[random_key].shape[0] # for each batch member, get @num_prior_samples samples from the prior - obs_tiled = ObsUtils.repeat_and_stack_observation(obs, n=num_prior_samples) + obs_tiled = ObsUtils.repeat_and_stack_observation( + obs, n=num_prior_samples + ) goal_tiled = None if len(self.goal_shapes) > 0: - goal_tiled = ObsUtils.repeat_and_stack_observation(goal_obs, n=num_prior_samples) + goal_tiled = ObsUtils.repeat_and_stack_observation( + goal_obs, n=num_prior_samples + ) prior_z_samples = self.nets["goal_network"].sample_prior( conditions=obs_tiled, @@ -351,7 +371,9 @@ def get_actor_goal_for_training_from_processed_batch( # note: every posterior sample in the batch has @num_prior_samples corresponding prior samples # reshape prior samples to (batch_size, num_samples, latent_dim) - prior_z_samples = prior_z_samples.reshape(batch_size, num_prior_samples, -1) + prior_z_samples = prior_z_samples.reshape( + batch_size, num_prior_samples, -1 + ) # reshape posterior latents to (batch_size, 1, latent_dim) posterior_z_expanded = posterior_z.unsqueeze(1) @@ -362,9 +384,11 @@ def get_actor_goal_for_training_from_processed_batch( # then gather the closest prior sample for each posterior sample neighbors = torch.argmin(distances, dim=1) - latent_subgoals = prior_z_samples[torch.arange(batch_size).long(), neighbors] + latent_subgoals = prior_z_samples[ + torch.arange(batch_size).long(), neighbors + ] - return { "latent_subgoal" : latent_subgoals } + return {"latent_subgoal": latent_subgoals} def train_on_batch(self, batch, epoch, validate=False): """ @@ -387,8 +411,13 @@ def train_on_batch(self, batch, epoch, validate=False): info = super(GL, self).train_on_batch(batch, epoch, validate=validate) if self.algo_config.vae.prior.use_categorical: - temperature = self.algo_config.vae.prior.categorical_init_temp - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step - temperature = max(temperature, self.algo_config.vae.prior.categorical_min_temp) + temperature = ( + self.algo_config.vae.prior.categorical_init_temp + - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step + ) + temperature = max( + temperature, self.algo_config.vae.prior.categorical_min_temp + ) self.nets["goal_network"].set_gumbel_temperature(temperature) # batch variables @@ -398,10 +427,10 @@ def train_on_batch(self, batch, epoch, validate=False): goal_obs = batch["goal_obs"] vae_outputs = self.nets["goal_network"]( - inputs=subgoals, # encoder takes full subgoals - outputs=target_subgoals, # reconstruct target subgoals + inputs=subgoals, # encoder takes full subgoals + outputs=target_subgoals, # reconstruct target subgoals goals=goal_obs, - conditions=obs, # condition on observations + conditions=obs, # condition on observations ) recons_loss = vae_outputs["reconstruction_loss"] kl_loss = vae_outputs["kl_loss"] @@ -412,7 +441,9 @@ def train_on_batch(self, batch, epoch, validate=False): if not self.algo_config.vae.prior.use_categorical: with torch.no_grad(): - info["encoder_variance"] = torch.exp(vae_outputs["encoder_params"]["logvar"]) + info["encoder_variance"] = torch.exp( + vae_outputs["encoder_params"]["logvar"] + ) # VAE gradient step if not validate: @@ -440,7 +471,9 @@ def log_info(self, info): loss_log["Reconstruction_Loss"] = info["recons_loss"].item() loss_log["KL_Loss"] = info["kl_loss"].item() if self.algo_config.vae.prior.use_categorical: - loss_log["Gumbel_Temperature"] = self.nets["goal_network"].get_gumbel_temperature() + loss_log["Gumbel_Temperature"] = self.nets[ + "goal_network" + ].get_gumbel_temperature() else: loss_log["Encoder_Variance"] = info["encoder_variance"].mean().item() return loss_log @@ -467,8 +500,10 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): return OrderedDict(latent_subgoal=latent_subgoals) # sample a single goal from the VAE - goals = self.sample_subgoals(obs_dict=obs_dict, goal_dict=goal_dict, num_samples=1) - return { k : goals[k][:, 0, ...] for k in goals } + goals = self.sample_subgoals( + obs_dict=obs_dict, goal_dict=goal_dict, num_samples=1 + ) + return {k: goals[k][:, 0, ...] for k in goals} def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): """ @@ -492,9 +527,13 @@ def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): mod = list(obs_tiled.keys())[0] n = obs_tiled[mod].shape[0] # [batch_size * num_samples, ...] - goals = self.nets["goal_network"].decode(n=n, conditions=obs_tiled, goals=goal_tiled) + goals = self.nets["goal_network"].decode( + n=n, conditions=obs_tiled, goals=goal_tiled + ) # reshape to [batch_size, num_samples, ...] - return TensorUtils.reshape_dimensions(goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples)) + return TensorUtils.reshape_dimensions( + goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples) + ) class ValuePlanner(PlannerAlgo, ValueAlgo): @@ -503,6 +542,7 @@ class ValuePlanner(PlannerAlgo, ValueAlgo): based on (1) a @PlannerAlgo that is used to sample candidate subgoals and (2) a @ValueAlgo that is used to select one of the subgoals. """ + def __init__( self, planner_algo_class, @@ -513,7 +553,6 @@ def __init__( obs_key_shapes, ac_dim, device, - ): """ Args: @@ -548,7 +587,7 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device + device=device, ) self.value_net = value_algo_class( @@ -557,7 +596,7 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device + device=device, ) self.subgoal_shapes = self.planner.subgoal_shapes @@ -573,7 +612,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -608,10 +647,14 @@ def train_on_batch(self, batch, epoch, validate=False): info = dict(planner=dict(), value_net=dict()) # train planner - info["planner"].update(self.planner.train_on_batch(batch["planner"], epoch, validate=validate)) + info["planner"].update( + self.planner.train_on_batch(batch["planner"], epoch, validate=validate) + ) # train value network - info["value_net"].update(self.value_net.train_on_batch(batch["value_net"], epoch, validate=validate)) + info["value_net"].update( + self.value_net.train_on_batch(batch["value_net"], epoch, validate=validate) + ) return info @@ -626,7 +669,7 @@ def log_info(self, info): Returns: loss_log (dict): name -> summary statistic """ - loss = 0. + loss = 0.0 # planner planner_log = self.planner.log_info(info["planner"]) @@ -696,8 +739,14 @@ def __repr__(self): """ msg = str(self.__class__.__name__) import textwrap - return msg + "Planner:\n" + textwrap.indent(self.planner.__repr__(), ' ') + \ - "\n\nValue Network:\n" + textwrap.indent(self.value_net.__repr__(), ' ') + + return ( + msg + + "Planner:\n" + + textwrap.indent(self.planner.__repr__(), " ") + + "\n\nValue Network:\n" + + textwrap.indent(self.value_net.__repr__(), " ") + ) def get_subgoal_predictions(self, obs_dict, goal_dict=None): """ @@ -714,12 +763,16 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): num_samples = self.algo_config.num_samples # sample subgoals from the planner (shape: [batch_size, num_samples, ...]) - subgoals = self.sample_subgoals(obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples) + subgoals = self.sample_subgoals( + obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples + ) # stack subgoals to get all values in one forward pass (shape [batch_size * num_samples, ...]) k = list(obs_dict.keys())[0] bsize = obs_dict[k].shape[0] - subgoals_tiled = TensorUtils.reshape_dimensions(subgoals, begin_axis=0, end_axis=1, target_dims=(bsize * num_samples,)) + subgoals_tiled = TensorUtils.reshape_dimensions( + subgoals, begin_axis=0, end_axis=1, target_dims=(bsize * num_samples,) + ) # also repeat goals if necessary goal_tiled = None @@ -727,11 +780,15 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): goal_tiled = ObsUtils.repeat_and_stack_observation(goal_dict, n=num_samples) # evaluate the value of each subgoal - subgoal_values = self.value_net.get_state_value(obs_dict=subgoals_tiled, goal_dict=goal_tiled).reshape(-1, num_samples) + subgoal_values = self.value_net.get_state_value( + obs_dict=subgoals_tiled, goal_dict=goal_tiled + ).reshape(-1, num_samples) # pick the best subgoal best_index = torch.argmax(subgoal_values, dim=1) - best_subgoal = {k: subgoals[k][torch.arange(bsize), best_index] for k in subgoals} + best_subgoal = { + k: subgoals[k][torch.arange(bsize), best_index] for k in subgoals + } return best_subgoal def sample_subgoals(self, obs_dict, goal_dict, num_samples=1): @@ -745,7 +802,9 @@ def sample_subgoals(self, obs_dict, goal_dict, num_samples=1): Returns: subgoals (dict): name -> Tensor [batch_size, num_samples, ...] """ - return self.planner.sample_subgoals(obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples) + return self.planner.sample_subgoals( + obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples + ) def get_state_value(self, obs_dict, goal_dict=None): """ @@ -772,4 +831,6 @@ def get_state_action_value(self, obs_dict, actions, goal_dict=None): Returns: value (torch.Tensor): value tensor """ - return self.value_net.get_state_action_value(obs_dict=obs_dict, actions=actions, goal_dict=goal_dict) + return self.value_net.get_state_action_value( + obs_dict=obs_dict, actions=actions, goal_dict=goal_dict + ) diff --git a/robomimic/algo/hbc.py b/robomimic/algo/hbc.py index 543b1fbc..6d8f6dc4 100644 --- a/robomimic/algo/hbc.py +++ b/robomimic/algo/hbc.py @@ -5,6 +5,7 @@ reach them. Largely based on the Generalization Through Imitation (GTI) paper (see https://arxiv.org/abs/2003.06085). """ + import textwrap import numpy as np from collections import OrderedDict @@ -15,7 +16,12 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils from robomimic.config.config import Config -from robomimic.algo import register_algo_factory_func, algo_name_to_factory_func, HierarchicalAlgo, GL_VAE +from robomimic.algo import ( + register_algo_factory_func, + algo_name_to_factory_func, + HierarchicalAlgo, + GL_VAE, +) @register_algo_factory_func("hbc") @@ -39,6 +45,7 @@ class HBC(HierarchicalAlgo): """ Default HBC training, largely based on https://arxiv.org/abs/2003.06085 """ + def __init__( self, planner_algo_class, @@ -77,9 +84,13 @@ def __init__( self.ac_dim = ac_dim self.device = device - self._subgoal_step_count = 0 # current step count for deciding when to update subgoal + self._subgoal_step_count = ( + 0 # current step count for deciding when to update subgoal + ) self._current_subgoal = None # latest subgoal - self._subgoal_update_interval = self.algo_config.subgoal_update_interval # subgoal update frequency + self._subgoal_update_interval = ( + self.algo_config.subgoal_update_interval + ) # subgoal update frequency self._subgoal_horizon = self.algo_config.planner.subgoal_horizon self._actor_horizon = self.algo_config.actor.rnn.horizon @@ -92,14 +103,16 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device + device=device, ) # goal-conditional actor follows goals set by the planner self.actor_goal_shapes = self.planner.subgoal_shapes if self.algo_config.latent_subgoal.enabled: assert planner_algo_class == GL_VAE # only VAE supported for now - self.actor_goal_shapes = OrderedDict(latent_subgoal=(self.planner.algo_config.vae.latent_dim,)) + self.actor_goal_shapes = OrderedDict( + latent_subgoal=(self.planner.algo_config.vae.latent_dim,) + ) # only for the actor: override goal modalities and shapes to match the subgoal set by the planner actor_obs_key_shapes = deepcopy(obs_key_shapes) @@ -109,7 +122,9 @@ def __init__( assert actor_obs_key_shapes[k] == self.actor_goal_shapes[k] actor_obs_key_shapes.update(self.actor_goal_shapes) - goal_obs_keys = {obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys()} + goal_obs_keys = { + obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys() + } for k in self.actor_goal_shapes.keys(): goal_obs_keys[ObsUtils.OBS_KEYS_TO_MODALITIES[k]].append(k) @@ -137,7 +152,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -147,25 +162,34 @@ def process_batch_for_training(self, batch): if self.algo_config.actor_use_random_subgoals: # optionally use randomly sampled step between [1, seq_length] as policy goal policy_subgoal_indices = torch.randint( - low=0, high=self.global_config.train.seq_length, size=(batch["actions"].shape[0],)) - goal_obs = TensorUtils.gather_sequence(batch["next_obs"], policy_subgoal_indices) - goal_obs = TensorUtils.to_float(TensorUtils.to_device(goal_obs, self.device)) - input_batch["actor"]["goal_obs"] = \ + low=0, + high=self.global_config.train.seq_length, + size=(batch["actions"].shape[0],), + ) + goal_obs = TensorUtils.gather_sequence( + batch["next_obs"], policy_subgoal_indices + ) + goal_obs = TensorUtils.to_float( + TensorUtils.to_device(goal_obs, self.device) + ) + input_batch["actor"]["goal_obs"] = ( self.planner.get_actor_goal_for_training_from_processed_batch( goal_obs, use_latent_subgoals=self.algo_config.latent_subgoal.enabled, use_prior_correction=self.algo_config.latent_subgoal.prior_correction.enabled, num_prior_samples=self.algo_config.latent_subgoal.prior_correction.num_samples, ) + ) else: # otherwise, use planner subgoal target as goal for the policy - input_batch["actor"]["goal_obs"] = \ + input_batch["actor"]["goal_obs"] = ( self.planner.get_actor_goal_for_training_from_processed_batch( input_batch["planner"], use_latent_subgoals=self.algo_config.latent_subgoal.enabled, use_prior_correction=self.algo_config.latent_subgoal.prior_correction.enabled, num_prior_samples=self.algo_config.latent_subgoal.prior_correction.num_samples, ) + ) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -190,24 +214,34 @@ def train_on_batch(self, batch, epoch, validate=False): """ info = dict(planner=dict(), actor=dict()) # train planner - info["planner"].update(self.planner.train_on_batch(batch["planner"], epoch, validate=validate)) + info["planner"].update( + self.planner.train_on_batch(batch["planner"], epoch, validate=validate) + ) # train actor if self._algo_mode == "separate": # train low-level actor by getting subgoals from the dataset - info["actor"].update(self.actor.train_on_batch(batch["actor"], epoch, validate=validate)) + info["actor"].update( + self.actor.train_on_batch(batch["actor"], epoch, validate=validate) + ) elif self._algo_mode == "cascade": # get predictions from the planner with torch.no_grad(): batch["actor"]["goal_obs"] = self.planner.get_subgoal_predictions( - obs_dict=batch["planner"]["obs"], goal_dict=batch["planner"]["goal_obs"]) + obs_dict=batch["planner"]["obs"], + goal_dict=batch["planner"]["goal_obs"], + ) # train actor with the predicted goal - info["actor"].update(self.actor.train_on_batch(batch["actor"], epoch, validate=validate)) + info["actor"].update( + self.actor.train_on_batch(batch["actor"], epoch, validate=validate) + ) else: - raise NotImplementedError("algo mode {} is not implemented".format(self._algo_mode)) + raise NotImplementedError( + "algo mode {} is not implemented".format(self._algo_mode) + ) return info @@ -224,7 +258,7 @@ def log_info(self, info): """ planner_log = dict() actor_log = dict() - loss = 0. + loss = 0.0 planner_log = self.planner.log_info(info["planner"]) planner_log = dict(("Planner/" + k, v) for k, v in planner_log.items()) @@ -284,7 +318,7 @@ def current_subgoal(self): """ Return the current subgoal (at rollout time) with shape (batch, ...) """ - return { k : self._current_subgoal[k].clone() for k in self._current_subgoal } + return {k: self._current_subgoal[k].clone() for k in self._current_subgoal} @current_subgoal.setter def current_subgoal(self, sg): @@ -297,7 +331,7 @@ def current_subgoal(self, sg): assert list(v.shape[1:]) == list(self.planner.subgoal_shapes[k]) # subgoal shapes should always match actor goal shapes assert list(v.shape[1:]) == list(self.actor_goal_shapes[k]) - self._current_subgoal = { k : sg[k].clone() for k in sg } + self._current_subgoal = {k: sg[k].clone() for k in sg} def get_action(self, obs_dict, goal_dict=None): """ @@ -310,11 +344,18 @@ def get_action(self, obs_dict, goal_dict=None): Returns: action (torch.Tensor): action tensor """ - if self._current_subgoal is None or self._subgoal_step_count % self._subgoal_update_interval == 0: + if ( + self._current_subgoal is None + or self._subgoal_step_count % self._subgoal_update_interval == 0 + ): # update current subgoal - self.current_subgoal = self.planner.get_subgoal_predictions(obs_dict=obs_dict, goal_dict=goal_dict) + self.current_subgoal = self.planner.get_subgoal_predictions( + obs_dict=obs_dict, goal_dict=goal_dict + ) - action = self.actor.get_action(obs_dict=obs_dict, goal_dict=self.current_subgoal) + action = self.actor.get_action( + obs_dict=obs_dict, goal_dict=self.current_subgoal + ) self._subgoal_step_count += 1 return action @@ -332,13 +373,20 @@ def __repr__(self): Pretty print algorithm and network description. """ msg = str(self.__class__.__name__) - msg += "(subgoal_horizon={}, actor_horizon={}, subgoal_update_interval={}, mode={}, " \ - "actor_use_random_subgoals={})\n".format( - self._subgoal_horizon, - self._actor_horizon, - self._subgoal_update_interval, - self._algo_mode, - self.algo_config.actor_use_random_subgoals + msg += ( + "(subgoal_horizon={}, actor_horizon={}, subgoal_update_interval={}, mode={}, " + "actor_use_random_subgoals={})\n".format( + self._subgoal_horizon, + self._actor_horizon, + self._subgoal_update_interval, + self._algo_mode, + self.algo_config.actor_use_random_subgoals, + ) + ) + return ( + msg + + "Planner:\n" + + textwrap.indent(self.planner.__repr__(), " ") + + "\n\nPolicy:\n" + + textwrap.indent(self.actor.__repr__(), " ") ) - return msg + "Planner:\n" + textwrap.indent(self.planner.__repr__(), ' ') + \ - "\n\nPolicy:\n" + textwrap.indent(self.actor.__repr__(), ' ') diff --git a/robomimic/algo/iql.py b/robomimic/algo/iql.py index bde522b2..cc4239a3 100644 --- a/robomimic/algo/iql.py +++ b/robomimic/algo/iql.py @@ -3,6 +3,7 @@ Based off of https://github.com/rail-berkeley/rlkit/blob/master/rlkit/torch/sac/iql_trainer.py. (Paper - https://arxiv.org/abs/2110.06169). """ + import numpy as np from collections import OrderedDict @@ -56,9 +57,11 @@ def _create_networks(self): actor_args.update(dict(self.algo_config.actor.net.gmm)) else: # Unsupported actor type! - raise ValueError(f"Unsupported actor requested. " - f"Requested: {self.algo_config.actor.net.type}, " - f"valid options are: {['gaussian', 'gmm']}") + raise ValueError( + f"Unsupported actor requested. " + f"Requested: {self.algo_config.actor.net.type}, " + f"valid options are: {['gaussian', 'gmm']}" + ) # Actor self.nets["actor"] = actor_cls( @@ -66,7 +69,9 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), **actor_args, ) @@ -80,7 +85,9 @@ def _create_networks(self): ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.critic.layer_dims, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) net_list.append(critic) @@ -89,7 +96,9 @@ def _create_networks(self): obs_shapes=self.obs_shapes, mlp_layer_dims=self.algo_config.critic.layer_dims, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) # Send networks to appropriate device @@ -97,7 +106,9 @@ def _create_networks(self): # sync target networks at beginning of training with torch.no_grad(): - for critic, critic_target in zip(self.nets["critic"], self.nets["critic_target"]): + for critic, critic_target in zip( + self.nets["critic"], self.nets["critic_target"] + ): TorchUtils.hard_update( source=critic, target=critic_target, @@ -120,8 +131,12 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = {k: batch["next_obs"][k][:, 0, :] for k in batch["next_obs"]} - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["next_obs"] = { + k: batch["next_obs"][k][:, 0, :] for k in batch["next_obs"] + } + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] input_batch["dones"] = batch["dones"][:, 0] input_batch["rewards"] = batch["rewards"][:, 0] @@ -160,7 +175,7 @@ def train_on_batch(self, batch, epoch, validate=False): if not validate: # Critic update self._update_critic(critic_losses, vf_loss) - + # Actor update self._update_actor(actor_loss) @@ -195,38 +210,46 @@ def _compute_critic_loss(self, batch): dones = torch.unsqueeze(batch["dones"], 1) # Q predictions - pred_qs = [critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) - for critic in self.nets["critic"]] + pred_qs = [ + critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) + for critic in self.nets["critic"] + ] info["critic/critic1_pred"] = pred_qs[0].mean() # Q target values target_vf_pred = self.nets["vf"](obs_dict=next_obs, goal_dict=goal_obs).detach() - q_target = rewards + (1. - dones) * self.algo_config.discount * target_vf_pred + q_target = rewards + (1.0 - dones) * self.algo_config.discount * target_vf_pred q_target = q_target.detach() # Q losses critic_losses = [] - td_loss_fcn = nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() - for (i, q_pred) in enumerate(pred_qs): + td_loss_fcn = ( + nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() + ) + for i, q_pred in enumerate(pred_qs): # Calculate td error loss td_loss = td_loss_fcn(q_pred, q_target) info[f"critic/critic{i+1}_loss"] = td_loss critic_losses.append(td_loss) # V predictions - pred_qs = [critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) - for critic in self.nets["critic_target"]] + pred_qs = [ + critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) + for critic in self.nets["critic_target"] + ] q_pred, _ = torch.cat(pred_qs, dim=1).min(dim=1, keepdim=True) q_pred = q_pred.detach() vf_pred = self.nets["vf"](obs) - + # V losses: expectile regression. see section 4.1 in https://arxiv.org/pdf/2110.06169.pdf vf_err = vf_pred - q_pred vf_sign = (vf_err > 0).float() - vf_weight = (1 - vf_sign) * self.algo_config.vf_quantile + vf_sign * (1 - self.algo_config.vf_quantile) - vf_loss = (vf_weight * (vf_err ** 2)).mean() - + vf_weight = (1 - vf_sign) * self.algo_config.vf_quantile + vf_sign * ( + 1 - self.algo_config.vf_quantile + ) + vf_loss = (vf_weight * (vf_err**2)).mean() + # update logs for V loss info["vf/q_pred"] = q_pred info["vf/v_pred"] = vf_pred @@ -245,8 +268,11 @@ def _update_critic(self, critic_losses, vf_loss): """ # update ensemble of critics - for (critic_loss, critic, critic_target, optimizer) in zip( - critic_losses, self.nets["critic"], self.nets["critic_target"], self.optimizers["critic"] + for critic_loss, critic, critic_target, optimizer in zip( + critic_losses, + self.nets["critic"], + self.nets["critic_target"], + self.optimizers["critic"], ): TorchUtils.backprop_for_loss( net=critic, @@ -258,7 +284,9 @@ def _update_critic(self, critic_losses, vf_loss): # update target network with torch.no_grad(): - TorchUtils.soft_update(source=critic, target=critic_target, tau=self.algo_config.target_tau) + TorchUtils.soft_update( + source=critic, target=critic_target, tau=self.algo_config.target_tau + ) # update V function network TorchUtils.backprop_for_loss( @@ -287,7 +315,9 @@ def _compute_actor_loss(self, batch, critic_info): info = OrderedDict() # compute log probability of batch actions - dist = self.nets["actor"].forward_train(obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) + dist = self.nets["actor"].forward_train( + obs_dict=batch["obs"], goal_dict=batch["goal_obs"] + ) log_prob = dist.log_prob(batch["actions"]) info["actor/log_prob"] = log_prob.mean() @@ -296,7 +326,7 @@ def _compute_actor_loss(self, batch, critic_info): q_pred = critic_info["vf/q_pred"] v_pred = critic_info["vf/v_pred"] adv = q_pred - v_pred - + # compute weights weights = self._get_adv_weights(adv) @@ -326,7 +356,7 @@ def _update_actor(self, actor_loss): loss=actor_loss, max_grad_norm=self.algo_config.actor.max_gradient_norm, ) - + def _get_adv_weights(self, adv): """ Helper function for computing advantage weights. Called by @_compute_actor_loss @@ -338,13 +368,13 @@ def _get_adv_weights(self, adv): weights (torch.Tensor): weights computed based on advantage estimates, in shape (B,) where B is batch size """ - + # clip raw advantage values if self.algo_config.adv.clip_adv_value is not None: adv = adv.clamp(max=self.algo_config.adv.clip_adv_value) # compute weights based on advantage values - beta = self.algo_config.adv.beta # temprature factor + beta = self.algo_config.adv.beta # temprature factor weights = torch.exp(adv / beta) # clip final weights @@ -425,4 +455,4 @@ def get_action(self, obs_dict, goal_dict=None): """ assert not self.nets.training - return self.nets["actor"](obs_dict=obs_dict, goal_dict=goal_dict) \ No newline at end of file + return self.nets["actor"](obs_dict=obs_dict, goal_dict=goal_dict) diff --git a/robomimic/algo/iris.py b/robomimic/algo/iris.py index 7b441470..d5f29e39 100644 --- a/robomimic/algo/iris.py +++ b/robomimic/algo/iris.py @@ -1,6 +1,7 @@ """ Implementation of IRIS (https://arxiv.org/abs/1911.05321). """ + import numpy as np from collections import OrderedDict from copy import deepcopy @@ -10,7 +11,14 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils from robomimic.config.config import Config -from robomimic.algo import register_algo_factory_func, algo_name_to_factory_func, HBC, ValuePlanner, ValueAlgo, GL_VAE +from robomimic.algo import ( + register_algo_factory_func, + algo_name_to_factory_func, + HBC, + ValuePlanner, + ValueAlgo, + GL_VAE, +) @register_algo_factory_func("iris") @@ -28,13 +36,18 @@ def algo_config_to_class(algo_config): pol_cls, _ = algo_name_to_factory_func("bc")(algo_config.actor) plan_cls, _ = algo_name_to_factory_func("gl")(algo_config.value_planner.planner) value_cls, _ = algo_name_to_factory_func("bcq")(algo_config.value_planner.value) - return IRIS, dict(policy_algo_class=pol_cls, planner_algo_class=plan_cls, value_algo_class=value_cls) + return IRIS, dict( + policy_algo_class=pol_cls, + planner_algo_class=plan_cls, + value_algo_class=value_cls, + ) class IRIS(HBC, ValueAlgo): """ Implementation of IRIS (https://arxiv.org/abs/1911.05321). """ + def __init__( self, planner_algo_class, @@ -74,9 +87,13 @@ def __init__( self.ac_dim = ac_dim self.device = device - self._subgoal_step_count = 0 # current step count for deciding when to update subgoal + self._subgoal_step_count = ( + 0 # current step count for deciding when to update subgoal + ) self._current_subgoal = None # latest subgoal - self._subgoal_update_interval = self.algo_config.subgoal_update_interval # subgoal update frequency + self._subgoal_update_interval = ( + self.algo_config.subgoal_update_interval + ) # subgoal update frequency self._subgoal_horizon = self.algo_config.value_planner.planner.subgoal_horizon self._actor_horizon = self.algo_config.actor.rnn.horizon @@ -91,11 +108,13 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device + device=device, ) self.actor_goal_shapes = self.planner.subgoal_shapes - assert not algo_config.latent_subgoal.enabled, "IRIS does not support latent subgoals" + assert ( + not algo_config.latent_subgoal.enabled + ), "IRIS does not support latent subgoals" # only for the actor: override goal modalities and shapes to match the subgoal set by the planner actor_obs_key_shapes = deepcopy(obs_key_shapes) @@ -105,7 +124,9 @@ def __init__( assert actor_obs_key_shapes[k] == self.actor_goal_shapes[k] actor_obs_key_shapes.update(self.actor_goal_shapes) - goal_modalities = {obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys()} + goal_modalities = { + obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys() + } for k in self.actor_goal_shapes.keys(): goal_modalities[ObsUtils.OBS_KEYS_TO_MODALITIES[k]].append(k) @@ -119,7 +140,7 @@ def __init__( global_config=global_config, obs_key_shapes=actor_obs_key_shapes, ac_dim=ac_dim, - device=device + device=device, ) def process_batch_for_training(self, batch): @@ -133,7 +154,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -143,13 +164,22 @@ def process_batch_for_training(self, batch): if self.algo_config.actor_use_random_subgoals: # optionally use randomly sampled step between [1, seq_length] as policy goal policy_subgoal_indices = torch.randint( - low=0, high=self.global_config.train.seq_length, size=(batch["actions"].shape[0],)) - goal_obs = TensorUtils.gather_sequence(batch["next_obs"], policy_subgoal_indices) - goal_obs = TensorUtils.to_float(TensorUtils.to_device(goal_obs, self.device)) + low=0, + high=self.global_config.train.seq_length, + size=(batch["actions"].shape[0],), + ) + goal_obs = TensorUtils.gather_sequence( + batch["next_obs"], policy_subgoal_indices + ) + goal_obs = TensorUtils.to_float( + TensorUtils.to_device(goal_obs, self.device) + ) input_batch["actor"]["goal_obs"] = goal_obs else: # otherwise, use planner subgoal target as goal for the policy - input_batch["actor"]["goal_obs"] = input_batch["planner"]["planner"]["target_subgoals"] + input_batch["actor"]["goal_obs"] = input_batch["planner"]["planner"][ + "target_subgoals" + ] # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -180,4 +210,6 @@ def get_state_action_value(self, obs_dict, actions, goal_dict=None): Returns: value (torch.Tensor): value tensor """ - return self.planner.get_state_action_value(obs_dict=obs_dict, actions=actions, goal_dict=goal_dict) + return self.planner.get_state_action_value( + obs_dict=obs_dict, actions=actions, goal_dict=goal_dict + ) diff --git a/robomimic/algo/td3_bc.py b/robomimic/algo/td3_bc.py index e324c54a..188a5777 100644 --- a/robomimic/algo/td3_bc.py +++ b/robomimic/algo/td3_bc.py @@ -9,6 +9,7 @@ from the BCQ algo class) to be explicit and have implementation details self-contained in this file. """ + from collections import OrderedDict import torch @@ -48,6 +49,7 @@ class TD3_BC(PolicyAlgo, ValueAlgo): Default TD3_BC training, based on https://arxiv.org/abs/2106.06860 and https://github.com/sfujim/TD3_BC. """ + def __init__(self, **kwargs): PolicyAlgo.__init__(self, **kwargs) @@ -70,12 +72,12 @@ def _create_networks(self): with torch.no_grad(): for critic_ind in range(len(self.nets["critic"])): TorchUtils.hard_update( - source=self.nets["critic"][critic_ind], + source=self.nets["critic"][critic_ind], target=self.nets["critic_target"][critic_ind], ) TorchUtils.hard_update( - source=self.nets["actor"], + source=self.nets["actor"], target=self.nets["actor_target"], ) @@ -94,7 +96,9 @@ def _create_critics(self): mlp_layer_dims=self.algo_config.critic.layer_dims, value_bounds=self.algo_config.critic.value_bounds, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) # Q network ensemble and target ensemble @@ -117,7 +121,9 @@ def _create_actor(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( + self.obs_config.encoder + ), ) self.nets["actor"] = actor_class(**actor_args) @@ -131,9 +137,13 @@ def _check_epoch(self, net_name, epoch): net_name (str): name of network in @self.nets and @self.optim_params epoch (int): epoch number """ - epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or (epoch >= self.optim_params[net_name]["start_epoch"]) - epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or (epoch < self.optim_params[net_name]["end_epoch"]) - return (epoch_start_check and epoch_end_check) + epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or ( + epoch >= self.optim_params[net_name]["start_epoch"] + ) + epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or ( + epoch < self.optim_params[net_name]["end_epoch"] + ) + return epoch_start_check and epoch_end_check def set_discount(self, discount): """ @@ -154,7 +164,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -164,19 +174,25 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = {k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"]} - input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["next_obs"] = { + k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"] + } + input_batch["goal_obs"] = batch.get( + "goal_obs", None + ) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # note: ensure scalar signals (rewards, done) retain last dimension of 1 to be compatible with model outputs # single timestep reward is discounted sum of intermediate rewards in sequence reward_seq = batch["rewards"][:, :n_step] - discounts = torch.pow(self.algo_config.discount, torch.arange(n_step).float()).unsqueeze(0) + discounts = torch.pow( + self.algo_config.discount, torch.arange(n_step).float() + ).unsqueeze(0) input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1) # discount rate will be gamma^N for computing n-step returns - new_discount = (self.algo_config.discount ** n_step) + new_discount = self.algo_config.discount**n_step self.set_discount(new_discount) # consider this n-step seqeunce done if any intermediate dones are present @@ -185,9 +201,13 @@ def process_batch_for_training(self, batch): if self.algo_config.infinite_horizon: # scale terminal rewards by 1 / (1 - gamma) for infinite horizon MDPs - done_inds = input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] + done_inds = ( + input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] + ) if done_inds.shape[0] > 0: - input_batch["rewards"][done_inds] = input_batch["rewards"][done_inds] * (1. / (1. - self.discount)) + input_batch["rewards"][done_inds] = input_batch["rewards"][ + done_inds + ] * (1.0 / (1.0 - self.discount)) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -224,14 +244,14 @@ def _train_critic_on_batch(self, batch, epoch, no_backprop=False): goal_s_batch = batch["goal_obs"] # 1 if not done, 0 otherwise - done_mask_batch = 1. - batch["dones"] + done_mask_batch = 1.0 - batch["dones"] info["done_masks"] = done_mask_batch # Bellman backup for Q-targets q_targets = self._get_target_values( - next_states=ns_batch, - goal_states=goal_s_batch, - rewards=r_batch, + next_states=ns_batch, + goal_states=goal_s_batch, + rewards=r_batch, dones=done_mask_batch, ) info["critic/q_targets"] = q_targets @@ -239,10 +259,10 @@ def _train_critic_on_batch(self, batch, epoch, no_backprop=False): # Train all critics using this set of targets for regression for critic_ind, critic in enumerate(self.nets["critic"]): critic_loss = self._compute_critic_loss( - critic=critic, - states=s_batch, - actions=a_batch, - goal_states=goal_s_batch, + critic=critic, + states=s_batch, + actions=a_batch, + goal_states=goal_s_batch, q_targets=q_targets, ) info["critic/critic{}_loss".format(critic_ind + 1)] = critic_loss @@ -251,10 +271,12 @@ def _train_critic_on_batch(self, batch, epoch, no_backprop=False): critic_grad_norms = TorchUtils.backprop_for_loss( net=self.nets["critic"][critic_ind], optim=self.optimizers["critic"][critic_ind], - loss=critic_loss, + loss=critic_loss, max_grad_norm=self.algo_config.critic.max_gradient_norm, ) - info["critic/critic{}_grad_norms".format(critic_ind + 1)] = critic_grad_norms + info["critic/critic{}_grad_norms".format(critic_ind + 1)] = ( + critic_grad_norms + ) return info @@ -320,19 +342,27 @@ def _get_target_values(self, next_states, goal_states, rewards, dones): next_target_actions = self.nets["actor_target"](next_states, goal_states) noise = ( torch.randn_like(next_target_actions) * self.algo_config.actor.noise_std - ).clamp(-self.algo_config.actor.noise_clip, self.algo_config.actor.noise_clip) + ).clamp( + -self.algo_config.actor.noise_clip, self.algo_config.actor.noise_clip + ) next_actions = (next_target_actions + noise).clamp(-1.0, 1.0) # TD3 trick to combine max and min over all Q-ensemble estimates into single target estimates - all_value_targets = self.nets["critic_target"][0](next_states, next_actions, goal_states).reshape(-1, 1) + all_value_targets = self.nets["critic_target"][0]( + next_states, next_actions, goal_states + ).reshape(-1, 1) max_value_targets = all_value_targets min_value_targets = all_value_targets for critic_target in self.nets["critic_target"][1:]: - all_value_targets = critic_target(next_states, next_actions, goal_states).reshape(-1, 1) + all_value_targets = critic_target( + next_states, next_actions, goal_states + ).reshape(-1, 1) max_value_targets = torch.max(max_value_targets, all_value_targets) min_value_targets = torch.min(min_value_targets, all_value_targets) - value_targets = self.algo_config.critic.ensemble.weight * min_value_targets + \ - (1. - self.algo_config.critic.ensemble.weight) * max_value_targets + value_targets = ( + self.algo_config.critic.ensemble.weight * min_value_targets + + (1.0 - self.algo_config.critic.ensemble.weight) * max_value_targets + ) q_targets = rewards + dones * self.discount * value_targets return q_targets @@ -381,11 +411,13 @@ def train_on_batch(self, batch, epoch, validate=False): info = PolicyAlgo.train_on_batch(self, batch, epoch, validate=validate) # Critic training - no_critic_backprop = validate or (not self._check_epoch(net_name="critic", epoch=epoch)) + no_critic_backprop = validate or ( + not self._check_epoch(net_name="critic", epoch=epoch) + ) with TorchUtils.maybe_no_grad(no_grad=no_critic_backprop): critic_info = self._train_critic_on_batch( - batch=batch, - epoch=epoch, + batch=batch, + epoch=epoch, no_backprop=no_critic_backprop, ) info.update(critic_info) @@ -394,35 +426,39 @@ def train_on_batch(self, batch, epoch, validate=False): if not no_critic_backprop: # update counter only on critic training gradient steps self.actor_update_counter += 1 - do_actor_update = (self.actor_update_counter % self.algo_config.actor.update_freq == 0) + do_actor_update = ( + self.actor_update_counter % self.algo_config.actor.update_freq == 0 + ) # Actor training - no_actor_backprop = validate or (not self._check_epoch(net_name="actor", epoch=epoch)) + no_actor_backprop = validate or ( + not self._check_epoch(net_name="actor", epoch=epoch) + ) no_actor_backprop = no_actor_backprop or (not do_actor_update) with TorchUtils.maybe_no_grad(no_grad=no_actor_backprop): actor_info = self._train_actor_on_batch( - batch=batch, - epoch=epoch, + batch=batch, + epoch=epoch, no_backprop=no_actor_backprop, ) info.update(actor_info) if not no_actor_backprop: - # to match original implementation, only update target networks on + # to match original implementation, only update target networks on # actor gradient steps with torch.no_grad(): # update the target critic networks for critic_ind in range(len(self.nets["critic"])): TorchUtils.soft_update( - source=self.nets["critic"][critic_ind], - target=self.nets["critic_target"][critic_ind], + source=self.nets["critic"][critic_ind], + target=self.nets["critic_target"][critic_ind], tau=self.algo_config.target_tau, ) # update target actor network TorchUtils.soft_update( - source=self.nets["actor"], - target=self.nets["actor_target"], + source=self.nets["actor"], + target=self.nets["actor_target"], tau=self.algo_config.target_tau, ) @@ -447,14 +483,17 @@ def log_info(self, info): optims = [self.optimizers[k]] if k == "critic": # account for critic having one optimizer per ensemble member - keys = ["{}{}".format(k, critic_ind) for critic_ind in range(len(self.nets["critic"]))] + keys = [ + "{}{}".format(k, critic_ind) + for critic_ind in range(len(self.nets["critic"])) + ] optims = self.optimizers[k] for kp, optimizer in zip(keys, optims): for i, param_group in enumerate(optimizer.param_groups): loss_log["Optimizer/{}{}_lr".format(kp, i)] = param_group["lr"] # extract relevant logs for critic, and actor - loss_log["Loss"] = 0. + loss_log["Loss"] = 0.0 for loss_logger in [self._log_critic_info, self._log_actor_info]: this_log = loss_logger(info) if "Loss" in this_log: @@ -471,14 +510,20 @@ def _log_critic_info(self, info): """ loss_log = OrderedDict() if "done_masks" in info: - loss_log["Critic/Done_Mask_Percentage"] = 100. * torch.mean(info["done_masks"]).item() + loss_log["Critic/Done_Mask_Percentage"] = ( + 100.0 * torch.mean(info["done_masks"]).item() + ) if "critic/q_targets" in info: loss_log["Critic/Q_Targets"] = info["critic/q_targets"].mean().item() - loss_log["Loss"] = 0. + loss_log["Loss"] = 0.0 for critic_ind in range(len(self.nets["critic"])): - loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info["critic/critic{}_loss".format(critic_ind + 1)].item() + loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info[ + "critic/critic{}_loss".format(critic_ind + 1) + ].item() if "critic/critic{}_grad_norms".format(critic_ind + 1) in info: - loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info["critic/critic{}_grad_norms".format(critic_ind + 1)] + loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info[ + "critic/critic{}_grad_norms".format(critic_ind + 1) + ] loss_log["Loss"] += loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] return loss_log diff --git a/robomimic/config/__init__.py b/robomimic/config/__init__.py index b4f857f1..fb0fae75 100644 --- a/robomimic/config/__init__.py +++ b/robomimic/config/__init__.py @@ -10,4 +10,5 @@ from robomimic.config.hbc_config import HBCConfig from robomimic.config.iris_config import IRISConfig from robomimic.config.td3_bc_config import TD3_BCConfig + # from robomimic.config.diffusion_policy_config import DiffusionPolicyConfig diff --git a/robomimic/config/base_config.py b/robomimic/config/base_config.py index 62129cd0..c71e4c0f 100644 --- a/robomimic/config/base_config.py +++ b/robomimic/config/base_config.py @@ -4,7 +4,7 @@ the correct config class given the algorithm name. """ -import six # preserve metaclass compatibility between python 2 and 3 +import six # preserve metaclass compatibility between python 2 and 3 from copy import deepcopy import robomimic @@ -27,8 +27,11 @@ def config_factory(algo_name, dic=None): a dictionary to instantiate the config from the dictionary. """ if algo_name not in REGISTERED_CONFIGS: - raise Exception("Config for algo name {} not found. Make sure it is a registered config among: {}".format( - algo_name, ', '.join(REGISTERED_CONFIGS))) + raise Exception( + "Config for algo name {} not found. Make sure it is a registered config among: {}".format( + algo_name, ", ".join(REGISTERED_CONFIGS) + ) + ) return REGISTERED_CONFIGS[algo_name](dict_to_load=dic) @@ -37,6 +40,7 @@ class ConfigMeta(type): Define a metaclass for constructing a config class. It registers configs into the global registry. """ + def __new__(meta, name, bases, class_dict): cls = super(ConfigMeta, meta).__new__(meta, name, bases, class_dict) if cls.__name__ != "BaseConfig": @@ -74,64 +78,87 @@ def ALGO_NAME(cls): def experiment_config(self): """ - This function populates the `config.experiment` attribute of the config, - which has several experiment settings such as the name of the training run, - whether to do logging, whether to save models (and how often), whether to render - videos, and whether to do rollouts (and how often). This class has a default + This function populates the `config.experiment` attribute of the config, + which has several experiment settings such as the name of the training run, + whether to do logging, whether to save models (and how often), whether to render + videos, and whether to do rollouts (and how often). This class has a default implementation that usually doesn't need to be overriden. """ - self.experiment.name = "test" # name of experiment used to make log files - self.experiment.validate = False # whether to do validation or not - self.experiment.logging.terminal_output_to_txt = True # whether to log stdout to txt file - self.experiment.logging.log_tb = True # enable tensorboard logging - self.experiment.logging.log_wandb = False # enable wandb logging - self.experiment.logging.wandb_proj_name = "debug" # project name if using wandb - + self.experiment.name = "test" # name of experiment used to make log files + self.experiment.validate = False # whether to do validation or not + self.experiment.logging.terminal_output_to_txt = ( + True # whether to log stdout to txt file + ) + self.experiment.logging.log_tb = True # enable tensorboard logging + self.experiment.logging.log_wandb = False # enable wandb logging + self.experiment.logging.wandb_proj_name = "debug" # project name if using wandb ## save config - if and when to save model checkpoints ## - self.experiment.save.enabled = True # whether model saving should be enabled or disabled - self.experiment.save.every_n_seconds = None # save model every n seconds (set to None to disable) - self.experiment.save.every_n_epochs = 50 # save model every n epochs (set to None to disable) - self.experiment.save.epochs = [] # save model on these specific epochs - self.experiment.save.on_best_validation = False # save models that achieve best validation score - self.experiment.save.on_best_rollout_return = False # save models that achieve best rollout return - self.experiment.save.on_best_rollout_success_rate = True # save models that achieve best success rate + self.experiment.save.enabled = ( + True # whether model saving should be enabled or disabled + ) + self.experiment.save.every_n_seconds = ( + None # save model every n seconds (set to None to disable) + ) + self.experiment.save.every_n_epochs = ( + 50 # save model every n epochs (set to None to disable) + ) + self.experiment.save.epochs = [] # save model on these specific epochs + self.experiment.save.on_best_validation = ( + False # save models that achieve best validation score + ) + self.experiment.save.on_best_rollout_return = ( + False # save models that achieve best rollout return + ) + self.experiment.save.on_best_rollout_success_rate = ( + True # save models that achieve best success rate + ) # epoch definitions - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used - self.experiment.epoch_every_n_steps = 100 # number of gradient steps in train epoch (None for full dataset pass) - self.experiment.validation_epoch_every_n_steps = 10 # number of gradient steps in valid epoch (None for full dataset pass) + self.experiment.epoch_every_n_steps = ( + 100 # number of gradient steps in train epoch (None for full dataset pass) + ) + self.experiment.validation_epoch_every_n_steps = ( + 10 # number of gradient steps in valid epoch (None for full dataset pass) + ) # envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset - self.experiment.env = None # no need to set this (unless you want to override) - self.experiment.additional_envs = None # additional environments that should get evaluated - + self.experiment.env = None # no need to set this (unless you want to override) + self.experiment.additional_envs = ( + None # additional environments that should get evaluated + ) ## rendering config ## - self.experiment.render = False # render on-screen or not - self.experiment.render_video = True # render evaluation rollouts to videos - self.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints - self.experiment.video_skip = 5 # render video frame every n environment steps during rollout - + self.experiment.render = False # render on-screen or not + self.experiment.render_video = True # render evaluation rollouts to videos + self.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints + self.experiment.video_skip = ( + 5 # render video frame every n environment steps during rollout + ) ## evaluation rollout config ## - self.experiment.rollout.enabled = True # enable evaluation rollouts - self.experiment.rollout.n = 50 # number of rollouts per evaluation - self.experiment.rollout.horizon = 400 # maximum number of env steps per rollout - self.experiment.rollout.rate = 50 # do rollouts every @rate epochs - self.experiment.rollout.warmstart = 0 # number of epochs to wait before starting rollouts - self.experiment.rollout.terminate_on_success = True # end rollout early after task success + self.experiment.rollout.enabled = True # enable evaluation rollouts + self.experiment.rollout.n = 50 # number of rollouts per evaluation + self.experiment.rollout.horizon = 400 # maximum number of env steps per rollout + self.experiment.rollout.rate = 50 # do rollouts every @rate epochs + self.experiment.rollout.warmstart = ( + 0 # number of epochs to wait before starting rollouts + ) + self.experiment.rollout.terminate_on_success = ( + True # end rollout early after task success + ) def train_config(self): """ - This function populates the `config.train` attribute of the config, which - has several settings related to the training process, such as the dataset - to use for training, and how the data loader should load the data. This + This function populates the `config.train` attribute of the config, which + has several settings related to the training process, such as the dataset + to use for training, and how the data loader should load the data. This class has a default implementation that usually doesn't need to be overriden. """ # Path to hdf5 dataset to use for training - self.train.data = None + self.train.data = None # Write all results to this directory. A new folder with the timestamp will be created # in this directory, and it will contain three subfolders - "log", "models", and "videos". @@ -140,13 +167,12 @@ class has a default implementation that usually doesn't need to be overriden. # videos. self.train.output_dir = "../{}_trained_models".format(self.algo_name) - ## dataset loader config ## # num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets - self.train.num_data_workers = 0 + self.train.num_data_workers = 0 - # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is + # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is # by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set # to None to use no caching - in this case, every batch sample is retrieved via file i/o. # You should almost never set this to None, even for large image datasets. @@ -163,7 +189,7 @@ class has a default implementation that usually doesn't need to be overriden. # in utils/dataset.py for more information. self.train.hdf5_normalize_obs = False - # if provided, use the list of demo keys under the hdf5 group "mask/@hdf5_filter_key" for training, instead + # if provided, use the list of demo keys under the hdf5 group "mask/@hdf5_filter_key" for training, instead # of the full dataset. This provides a convenient way to train on only a subset of the trajectories in a dataset. self.train.hdf5_filter_key = None @@ -181,55 +207,62 @@ class has a default implementation that usually doesn't need to be overriden. # keys from hdf5 to load into each batch, besides "obs" and "next_obs". If algorithms # require additional keys from each trajectory in the hdf5, they should be specified here. self.train.dataset_keys = ( - "actions", - "rewards", + "actions", + "rewards", "dones", ) # one of [None, "last"] - set to "last" to include goal observations in each batch self.train.goal_mode = None - ## learning config ## - self.train.cuda = True # use GPU or not - self.train.batch_size = 100 # batch size - self.train.num_epochs = 2000 # number of training epochs - self.train.seed = 1 # seed for training (for reproducibility) + self.train.cuda = True # use GPU or not + self.train.batch_size = 100 # batch size + self.train.num_epochs = 2000 # number of training epochs + self.train.seed = 1 # seed for training (for reproducibility) def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its - training and test-time behavior should be populated here. This function should be + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its + training and test-time behavior should be populated here. This function should be implemented by every subclass. """ pass def observation_config(self): """ - This function populates the `config.observation` attribute of the config, and is given - to the `Algo` subclass (see `algo/algo.py`) for each algorithm through the `obs_config` - argument to the constructor. This portion of the config is used to specify what - observation modalities should be used by the networks for training, and how the - observation modalities should be encoded by the networks. While this class has a - default implementation that usually doesn't need to be overriden, certain algorithm - configs may choose to, in order to have seperate configs for different networks - in the algorithm. + This function populates the `config.observation` attribute of the config, and is given + to the `Algo` subclass (see `algo/algo.py`) for each algorithm through the `obs_config` + argument to the constructor. This portion of the config is used to specify what + observation modalities should be used by the networks for training, and how the + observation modalities should be encoded by the networks. While this class has a + default implementation that usually doesn't need to be overriden, certain algorithm + configs may choose to, in order to have seperate configs for different networks + in the algorithm. """ # observation modalities - self.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] - self.observation.modalities.obs.rgb = [] # specify rgb image observations for agent + self.observation.modalities.obs.low_dim = ( + [ # specify low-dim observations for agent + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] + ) + self.observation.modalities.obs.rgb = ( + [] + ) # specify rgb image observations for agent self.observation.modalities.obs.depth = [] self.observation.modalities.obs.scan = [] - self.observation.modalities.goal.low_dim = [] # specify low-dim goal observations to condition agent on - self.observation.modalities.goal.rgb = [] # specify rgb image goal observations to condition agent on + self.observation.modalities.goal.low_dim = ( + [] + ) # specify low-dim goal observations to condition agent on + self.observation.modalities.goal.rgb = ( + [] + ) # specify rgb image goal observations to condition agent on self.observation.modalities.goal.depth = [] self.observation.modalities.goal.scan = [] self.observation.modalities.obs.do_not_lock_keys() @@ -240,22 +273,30 @@ def observation_config(self): # =============== Low Dim default encoder (no encoder) =============== self.observation.encoder.low_dim.core_class = None - self.observation.encoder.low_dim.core_kwargs = Config() # No kwargs by default + self.observation.encoder.low_dim.core_kwargs = Config() # No kwargs by default self.observation.encoder.low_dim.core_kwargs.do_not_lock_keys() # Low Dim: Obs Randomizer settings self.observation.encoder.low_dim.obs_randomizer_class = None - self.observation.encoder.low_dim.obs_randomizer_kwargs = Config() # No kwargs by default + self.observation.encoder.low_dim.obs_randomizer_kwargs = ( + Config() + ) # No kwargs by default self.observation.encoder.low_dim.obs_randomizer_kwargs.do_not_lock_keys() # =============== RGB default encoder (ResNet backbone + linear layer output) =============== - self.observation.encoder.rgb.core_class = "VisualCore" # Default VisualCore class combines backbone (like ResNet-18) with pooling operation (like spatial softmax) - self.observation.encoder.rgb.core_kwargs = Config() # See models/obs_core.py for important kwargs to set and defaults used + self.observation.encoder.rgb.core_class = "VisualCore" # Default VisualCore class combines backbone (like ResNet-18) with pooling operation (like spatial softmax) + self.observation.encoder.rgb.core_kwargs = ( + Config() + ) # See models/obs_core.py for important kwargs to set and defaults used self.observation.encoder.rgb.core_kwargs.do_not_lock_keys() # RGB: Obs Randomizer settings - self.observation.encoder.rgb.obs_randomizer_class = None # Can set to 'CropRandomizer' to use crop randomization - self.observation.encoder.rgb.obs_randomizer_kwargs = Config() # See models/obs_core.py for important kwargs to set and defaults used + self.observation.encoder.rgb.obs_randomizer_class = ( + None # Can set to 'CropRandomizer' to use crop randomization + ) + self.observation.encoder.rgb.obs_randomizer_kwargs = ( + Config() + ) # See models/obs_core.py for important kwargs to set and defaults used self.observation.encoder.rgb.obs_randomizer_kwargs.do_not_lock_keys() # Allow for other custom modalities to be specified @@ -268,27 +309,40 @@ def observation_config(self): self.observation.encoder.scan = deepcopy(self.observation.encoder.rgb) # Scan: Modify the core class + kwargs, otherwise, is same as rgb encoder - self.observation.encoder.scan.core_class = "ScanCore" # Default ScanCore class uses Conv1D to process this modality - self.observation.encoder.scan.core_kwargs = Config() # See models/obs_core.py for important kwargs to set and defaults used + self.observation.encoder.scan.core_class = ( + "ScanCore" # Default ScanCore class uses Conv1D to process this modality + ) + self.observation.encoder.scan.core_kwargs = ( + Config() + ) # See models/obs_core.py for important kwargs to set and defaults used self.observation.encoder.scan.core_kwargs.do_not_lock_keys() def meta_config(self): """ - This function populates the `config.meta` attribute of the config. This portion of the config + This function populates the `config.meta` attribute of the config. This portion of the config is used to specify job information primarily for hyperparameter sweeps. It contains hyperparameter keys and values, which are populated automatically by the hyperparameter config generator (see `utils/hyperparam_utils.py`). These values are read by the wandb logger (see `utils/log_utils.py`) to set job tags. """ - - self.meta.hp_base_config_file = None # base config file in hyperparam sweep - self.meta.hp_keys = [] # relevant keys (swept) in hyperparam sweep - self.meta.hp_values = [] # values corresponding to keys in hyperparam sweep - + + self.meta.hp_base_config_file = None # base config file in hyperparam sweep + self.meta.hp_keys = [] # relevant keys (swept) in hyperparam sweep + self.meta.hp_values = [] # values corresponding to keys in hyperparam sweep + @property def use_goals(self): # whether the agent is goal-conditioned - return len([obs_key for modality in self.observation.modalities.goal.values() for obs_key in modality]) > 0 + return ( + len( + [ + obs_key + for modality in self.observation.modalities.goal.values() + for obs_key in modality + ] + ) + > 0 + ) @property def all_obs_keys(self): @@ -300,11 +354,18 @@ def all_obs_keys(self): n-array: all observation keys used for this model """ # pool all modalities - return sorted(tuple(set([ - obs_key for group in [ - self.observation.modalities.obs.values(), - self.observation.modalities.goal.values() - ] - for modality in group - for obs_key in modality - ]))) + return sorted( + tuple( + set( + [ + obs_key + for group in [ + self.observation.modalities.obs.values(), + self.observation.modalities.goal.values(), + ] + for modality in group + for obs_key in modality + ] + ) + ) + ) diff --git a/robomimic/config/bc_config.py b/robomimic/config/bc_config.py index 1f701c68..8d3c1d0e 100644 --- a/robomimic/config/bc_config.py +++ b/robomimic/config/bc_config.py @@ -17,90 +17,143 @@ def train_config(self): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters self.algo.optim_params.policy.optimizer_type = "adam" - self.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate - self.algo.optim_params.policy.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.policy.learning_rate.scheduler_type = "multistep" # learning rate scheduler ("multistep", "linear", etc) - self.algo.optim_params.policy.regularization.L2 = 0.00 # L2 regularization strength + self.algo.optim_params.policy.learning_rate.initial = ( + 1e-4 # policy learning rate + ) + self.algo.optim_params.policy.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.policy.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.policy.learning_rate.scheduler_type = ( + "multistep" # learning rate scheduler ("multistep", "linear", etc) + ) + self.algo.optim_params.policy.regularization.L2 = ( + 0.00 # L2 regularization strength + ) # loss weights - self.algo.loss.l2_weight = 1.0 # L2 loss weight - self.algo.loss.l1_weight = 0.0 # L1 loss weight - self.algo.loss.cos_weight = 0.0 # cosine loss weight + self.algo.loss.l2_weight = 1.0 # L2 loss weight + self.algo.loss.l1_weight = 0.0 # L1 loss weight + self.algo.loss.cos_weight = 0.0 # cosine loss weight # MLP network architecture (layers after observation encoder and RNN, if present) self.algo.actor_layer_dims = (1024, 1024) # stochastic Gaussian policy settings - self.algo.gaussian.enabled = False # whether to train a Gaussian policy - self.algo.gaussian.fixed_std = False # whether to train std output or keep it constant - self.algo.gaussian.init_std = 0.1 # initial standard deviation (or constant) - self.algo.gaussian.min_std = 0.01 # minimum std output from network - self.algo.gaussian.std_activation = "softplus" # activation to use for std output from policy net - self.algo.gaussian.low_noise_eval = True # low-std at test-time + self.algo.gaussian.enabled = False # whether to train a Gaussian policy + self.algo.gaussian.fixed_std = ( + False # whether to train std output or keep it constant + ) + self.algo.gaussian.init_std = 0.1 # initial standard deviation (or constant) + self.algo.gaussian.min_std = 0.01 # minimum std output from network + self.algo.gaussian.std_activation = ( + "softplus" # activation to use for std output from policy net + ) + self.algo.gaussian.low_noise_eval = True # low-std at test-time # stochastic GMM policy settings - self.algo.gmm.enabled = False # whether to train a GMM policy - self.algo.gmm.num_modes = 5 # number of GMM modes - self.algo.gmm.min_std = 0.0001 # minimum std output from network - self.algo.gmm.std_activation = "softplus" # activation to use for std output from policy net - self.algo.gmm.low_noise_eval = True # low-std at test-time + self.algo.gmm.enabled = False # whether to train a GMM policy + self.algo.gmm.num_modes = 5 # number of GMM modes + self.algo.gmm.min_std = 0.0001 # minimum std output from network + self.algo.gmm.std_activation = ( + "softplus" # activation to use for std output from policy net + ) + self.algo.gmm.low_noise_eval = True # low-std at test-time # stochastic VAE policy settings - self.algo.vae.enabled = False # whether to train a VAE policy - self.algo.vae.latent_dim = 14 # VAE latent dimnsion - set to twice the dimensionality of action space - self.algo.vae.latent_clip = None # clip latent space when decoding (set to None to disable) - self.algo.vae.kl_weight = 1. # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO + self.algo.vae.enabled = False # whether to train a VAE policy + self.algo.vae.latent_dim = ( + 14 # VAE latent dimnsion - set to twice the dimensionality of action space + ) + self.algo.vae.latent_clip = ( + None # clip latent space when decoding (set to None to disable) + ) + self.algo.vae.kl_weight = 1.0 # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO # VAE decoder settings - self.algo.vae.decoder.is_conditioned = True # whether decoder should condition on observation - self.algo.vae.decoder.reconstruction_sum_across_elements = False # sum instead of mean for reconstruction loss + self.algo.vae.decoder.is_conditioned = ( + True # whether decoder should condition on observation + ) + self.algo.vae.decoder.reconstruction_sum_across_elements = ( + False # sum instead of mean for reconstruction loss + ) # VAE prior settings - self.algo.vae.prior.learn = False # learn Gaussian / GMM prior instead of N(0, 1) - self.algo.vae.prior.is_conditioned = False # whether to condition prior on observations - self.algo.vae.prior.use_gmm = False # whether to use GMM prior - self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes - self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights - self.algo.vae.prior.use_categorical = False # whether to use categorical prior - self.algo.vae.prior.categorical_dim = 10 # the number of categorical classes for each latent dimension - self.algo.vae.prior.categorical_gumbel_softmax_hard = False # use hard selection in forward pass - self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp - self.algo.vae.prior.categorical_temp_anneal_step = 0.001 # linear temp annealing rate - self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp - - self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions - self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions - self.algo.vae.prior_layer_dims = (300, 400) # prior MLP layer dimensions (if learning conditioned prior) + self.algo.vae.prior.learn = ( + False # learn Gaussian / GMM prior instead of N(0, 1) + ) + self.algo.vae.prior.is_conditioned = ( + False # whether to condition prior on observations + ) + self.algo.vae.prior.use_gmm = False # whether to use GMM prior + self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes + self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights + self.algo.vae.prior.use_categorical = False # whether to use categorical prior + self.algo.vae.prior.categorical_dim = ( + 10 # the number of categorical classes for each latent dimension + ) + self.algo.vae.prior.categorical_gumbel_softmax_hard = ( + False # use hard selection in forward pass + ) + self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp + self.algo.vae.prior.categorical_temp_anneal_step = ( + 0.001 # linear temp annealing rate + ) + self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp + + self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions + self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions + self.algo.vae.prior_layer_dims = ( + 300, + 400, + ) # prior MLP layer dimensions (if learning conditioned prior) # RNN policy settings - self.algo.rnn.enabled = False # whether to train RNN policy - self.algo.rnn.horizon = 10 # unroll length for RNN - should usually match train.seq_length - self.algo.rnn.hidden_dim = 400 # hidden dimension size - self.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" - self.algo.rnn.num_layers = 2 # number of RNN layers that are stacked - self.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) - self.algo.rnn.kwargs.bidirectional = False # rnn kwargs + self.algo.rnn.enabled = False # whether to train RNN policy + self.algo.rnn.horizon = ( + 10 # unroll length for RNN - should usually match train.seq_length + ) + self.algo.rnn.hidden_dim = 400 # hidden dimension size + self.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" + self.algo.rnn.num_layers = 2 # number of RNN layers that are stacked + self.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + self.algo.rnn.kwargs.bidirectional = False # rnn kwargs self.algo.rnn.kwargs.do_not_lock_keys() # Transformer policy settings - self.algo.transformer.enabled = False # whether to train transformer policy - self.algo.transformer.context_length = 10 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack - self.algo.transformer.embed_dim = 512 # dimension for embeddings used by transformer - self.algo.transformer.num_layers = 6 # number of transformer blocks to stack - self.algo.transformer.num_heads = 8 # number of attention heads for each transformer block (should divide embed_dim evenly) - self.algo.transformer.emb_dropout = 0.1 # dropout probability for embedding inputs in transformer - self.algo.transformer.attn_dropout = 0.1 # dropout probability for attention outputs for each transformer block - self.algo.transformer.block_output_dropout = 0.1 # dropout probability for final outputs for each transformer block - self.algo.transformer.sinusoidal_embedding = False # if True, use standard positional encodings (sin/cos) - self.algo.transformer.activation = "gelu" # activation function for MLP in Transformer Block - self.algo.transformer.supervise_all_steps = False # if true, supervise all intermediate actions, otherwise only final one - self.algo.transformer.nn_parameter_for_timesteps = True # if true, use nn.Parameter otherwise use nn.Embedding + self.algo.transformer.enabled = False # whether to train transformer policy + self.algo.transformer.context_length = 10 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack + self.algo.transformer.embed_dim = ( + 512 # dimension for embeddings used by transformer + ) + self.algo.transformer.num_layers = 6 # number of transformer blocks to stack + self.algo.transformer.num_heads = 8 # number of attention heads for each transformer block (should divide embed_dim evenly) + self.algo.transformer.emb_dropout = ( + 0.1 # dropout probability for embedding inputs in transformer + ) + self.algo.transformer.attn_dropout = ( + 0.1 # dropout probability for attention outputs for each transformer block + ) + self.algo.transformer.block_output_dropout = ( + 0.1 # dropout probability for final outputs for each transformer block + ) + self.algo.transformer.sinusoidal_embedding = ( + False # if True, use standard positional encodings (sin/cos) + ) + self.algo.transformer.activation = ( + "gelu" # activation function for MLP in Transformer Block + ) + self.algo.transformer.supervise_all_steps = False # if true, supervise all intermediate actions, otherwise only final one + self.algo.transformer.nn_parameter_for_timesteps = ( + True # if true, use nn.Parameter otherwise use nn.Embedding + ) diff --git a/robomimic/config/bcq_config.py b/robomimic/config/bcq_config.py index e28f5ba5..e250ea63 100644 --- a/robomimic/config/bcq_config.py +++ b/robomimic/config/bcq_config.py @@ -11,63 +11,111 @@ class BCQConfig(BaseConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ - + # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = 1e-3 # critic learning rate - self.algo.optim_params.critic.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength - self.algo.optim_params.critic.start_epoch = -1 # number of epochs before starting critic training (-1 means start right away) - self.algo.optim_params.critic.end_epoch = -1 # number of epochs before ending critic training (-1 means start right away) + self.algo.optim_params.critic.learning_rate.initial = ( + 1e-3 # critic learning rate + ) + self.algo.optim_params.critic.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.critic.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = ( + 0.00 # L2 regularization strength + ) + self.algo.optim_params.critic.start_epoch = ( + -1 + ) # number of epochs before starting critic training (-1 means start right away) + self.algo.optim_params.critic.end_epoch = ( + -1 + ) # number of epochs before ending critic training (-1 means start right away) - self.algo.optim_params.action_sampler.learning_rate.initial = 1e-3 # action sampler learning rate - self.algo.optim_params.action_sampler.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.action_sampler.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.action_sampler.regularization.L2 = 0.00 # L2 regularization strength - self.algo.optim_params.action_sampler.start_epoch = -1 # number of epochs before starting action sampler training (-1 means start right away) - self.algo.optim_params.action_sampler.end_epoch = -1 # number of epochs before ending action sampler training (-1 means start right away) + self.algo.optim_params.action_sampler.learning_rate.initial = ( + 1e-3 # action sampler learning rate + ) + self.algo.optim_params.action_sampler.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.action_sampler.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.action_sampler.regularization.L2 = ( + 0.00 # L2 regularization strength + ) + self.algo.optim_params.action_sampler.start_epoch = ( + -1 + ) # number of epochs before starting action sampler training (-1 means start right away) + self.algo.optim_params.action_sampler.end_epoch = ( + -1 + ) # number of epochs before ending action sampler training (-1 means start right away) - self.algo.optim_params.actor.learning_rate.initial = 1e-3 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength - self.algo.optim_params.actor.start_epoch = -1 # number of epochs before starting actor training (-1 means start right away) - self.algo.optim_params.actor.end_epoch = -1 # number of epochs before ending actor training (-1 means start right away) + self.algo.optim_params.actor.learning_rate.initial = 1e-3 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.actor.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = ( + 0.00 # L2 regularization strength + ) + self.algo.optim_params.actor.start_epoch = ( + -1 + ) # number of epochs before starting actor training (-1 means start right away) + self.algo.optim_params.actor.end_epoch = ( + -1 + ) # number of epochs before ending actor training (-1 means start right away) # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.n_step = 1 # for using n-step returns in TD-updates - self.algo.target_tau = 0.005 # update rate for target networks - self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon + self.algo.discount = 0.99 # discount factor to use + self.algo.n_step = 1 # for using n-step returns in TD-updates + self.algo.target_tau = 0.005 # update rate for target networks + self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon # ================== Critic Network Config =================== - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = None # L2 gradient clipping for critic (None to use no clipping) - self.algo.critic.value_bounds = None # optional 2-tuple to ensure lower and upper bound on value estimates - self.algo.critic.num_action_samples = 10 # number of actions to sample per training batch to get target critic value - self.algo.critic.num_action_samples_rollout = 100 # number of actions to sample per environment step + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = ( + None # L2 gradient clipping for critic (None to use no clipping) + ) + self.algo.critic.value_bounds = ( + None # optional 2-tuple to ensure lower and upper bound on value estimates + ) + self.algo.critic.num_action_samples = 10 # number of actions to sample per training batch to get target critic value + self.algo.critic.num_action_samples_rollout = ( + 100 # number of actions to sample per environment step + ) # critic ensemble parameters (TD3 trick) - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.ensemble.weight = 0.75 # weighting for mixing min and max for target Q value + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.ensemble.weight = ( + 0.75 # weighting for mixing min and max for target Q value + ) # distributional critic - self.algo.critic.distributional.enabled = False # train distributional critic (C51) - self.algo.critic.distributional.num_atoms = 51 # number of values in categorical distribution + self.algo.critic.distributional.enabled = ( + False # train distributional critic (C51) + ) + self.algo.critic.distributional.num_atoms = ( + 51 # number of values in categorical distribution + ) - self.algo.critic.layer_dims = (300, 400) # size of critic MLP + self.algo.critic.layer_dims = (300, 400) # size of critic MLP # ================== Action Sampler Config =================== self.algo.action_sampler = BCConfig().algo # use VAE by default self.algo.action_sampler.vae.enabled = True # remove unused parts of BCConfig algo config - del self.algo.action_sampler.optim_params # since action sampler optim params specified at top-level + del ( + self.algo.action_sampler.optim_params + ) # since action sampler optim params specified at top-level del self.algo.action_sampler.loss del self.algo.action_sampler.gaussian del self.algo.action_sampler.rnn @@ -78,6 +126,8 @@ def algo_config(self): self.algo.action_sampler.freeze_encoder_epoch = -1 # ================== Actor Network Config =================== - self.algo.actor.enabled = False # whether to use the actor perturbation network - self.algo.actor.perturbation_scale = 0.05 # size of learned action perturbations - self.algo.actor.layer_dims = (300, 400) # size of actor MLP + self.algo.actor.enabled = False # whether to use the actor perturbation network + self.algo.actor.perturbation_scale = ( + 0.05 # size of learned action perturbations + ) + self.algo.actor.layer_dims = (300, 400) # size of actor MLP diff --git a/robomimic/config/config.py b/robomimic/config/config.py index 74da6535..9fd39bbe 100644 --- a/robomimic/config/config.py +++ b/robomimic/config/config.py @@ -14,11 +14,13 @@ class Config(dict): def __init__(__self, *args, **kwargs): - object.__setattr__(__self, '__key_locked', False) # disallow adding new keys - object.__setattr__(__self, '__all_locked', False) # disallow both key and value update - object.__setattr__(__self, '__do_not_lock_keys', False) # cannot be key-locked - object.__setattr__(__self, '__parent', kwargs.pop('__parent', None)) - object.__setattr__(__self, '__key', kwargs.pop('__key', None)) + object.__setattr__(__self, "__key_locked", False) # disallow adding new keys + object.__setattr__( + __self, "__all_locked", False + ) # disallow both key and value update + object.__setattr__(__self, "__do_not_lock_keys", False) # cannot be key-locked + object.__setattr__(__self, "__parent", kwargs.pop("__parent", None)) + object.__setattr__(__self, "__key", kwargs.pop("__key", None)) for arg in args: if not arg: continue @@ -39,9 +41,9 @@ def lock(self): Lock the config. Afterwards, new keys cannot be added to the config, and the values of existing keys cannot be modified. """ - object.__setattr__(self, '__all_locked', True) + object.__setattr__(self, "__all_locked", True) if self.key_lockable: - object.__setattr__(self, '__key_locked', True) + object.__setattr__(self, "__key_locked", True) for k in self: if isinstance(self[k], Config): @@ -52,8 +54,8 @@ def unlock(self): Unlock the config. Afterwards, new keys can be added to the config, and the values of existing keys can be modified. """ - object.__setattr__(self, '__all_locked', False) - object.__setattr__(self, '__key_locked', False) + object.__setattr__(self, "__all_locked", False) + object.__setattr__(self, "__key_locked", False) for k in self: if isinstance(self[k], Config): @@ -63,7 +65,10 @@ def _get_lock_state_recursive(self): """ Internal helper function to get the lock state of all sub-configs recursively. """ - lock_state = {"__all_locked": self.is_locked, "__key_locked": self.is_key_locked} + lock_state = { + "__all_locked": self.is_locked, + "__key_locked": self.is_key_locked, + } for k in self: if isinstance(self[k], Config): assert k not in ["__all_locked", "__key_locked"] @@ -75,8 +80,8 @@ def _set_lock_state_recursive(self, lock_state): Internal helper function to set the lock state of all sub-configs recursively. """ lock_state = deepcopy(lock_state) - object.__setattr__(self, '__all_locked', lock_state.pop("__all_locked")) - object.__setattr__(self, '__key_locked', lock_state.pop("__key_locked")) + object.__setattr__(self, "__all_locked", lock_state.pop("__all_locked")) + object.__setattr__(self, "__key_locked", lock_state.pop("__key_locked")) for k in lock_state: if isinstance(self[k], Config): self[k]._set_lock_state_recursive(lock_state[k]) @@ -91,10 +96,7 @@ def _get_lock_state(self): a "key_locked" key that is True if only key updates are locked (value updates still allowed) and False otherwise """ - return { - "all_locked": self.is_locked, - "key_locked": self.is_key_locked - } + return {"all_locked": self.is_locked, "key_locked": self.is_key_locked} def _set_lock_state(self, lock_state): """ @@ -127,7 +129,7 @@ def unlocked(self): def values_unlocked(self): """ A context scope for modifying a Config object. Within the scope, - only values can be updated (new keys cannot be created). Upon + only values can be updated (new keys cannot be created). Upon leaving the scope, the initial level of locking is restored. """ lock_state = self._get_lock_state() @@ -142,7 +144,7 @@ def lock_keys(self): """ if not self.key_lockable: return - object.__setattr__(self, '__key_locked', True) + object.__setattr__(self, "__key_locked", True) for k in self: if isinstance(self[k], Config): self[k].lock_keys() @@ -151,7 +153,7 @@ def unlock_keys(self): """ Unlock this config so that new keys can be added. """ - object.__setattr__(self, '__key_locked', False) + object.__setattr__(self, "__key_locked", False) for k in self: if isinstance(self[k], Config): self[k].unlock_keys() @@ -161,48 +163,55 @@ def is_locked(self): """ Returns True if the config is locked (no key or value updates allowed). """ - return object.__getattribute__(self, '__all_locked') + return object.__getattribute__(self, "__all_locked") @property def is_key_locked(self): """ Returns True if the config is key-locked (no key updates allowed). """ - return object.__getattribute__(self, '__key_locked') + return object.__getattribute__(self, "__key_locked") def do_not_lock_keys(self): """ - Calling this function on this config indicates that key updates should be + Calling this function on this config indicates that key updates should be allowed even when this config is key-locked (but not when it is completely locked). This is convenient for attributes that contain kwargs, where there might be a variable type and number of arguments contained in the sub-config. """ - object.__setattr__(self, '__do_not_lock_keys', True) + object.__setattr__(self, "__do_not_lock_keys", True) @property def key_lockable(self): """ - Returns true if this config is key-lockable (new keys cannot be inserted in a + Returns true if this config is key-lockable (new keys cannot be inserted in a key-locked lock level). """ - return not object.__getattribute__(self, '__do_not_lock_keys') + return not object.__getattribute__(self, "__do_not_lock_keys") def __setattr__(self, name, value): if self.is_locked: - raise RuntimeError("This config has been locked - cannot set attribute '{}' to {}".format(name, value)) + raise RuntimeError( + "This config has been locked - cannot set attribute '{}' to {}".format( + name, value + ) + ) if hasattr(Config, name): - raise AttributeError("'Dict' object attribute " - "'{0}' is read-only".format(name)) + raise AttributeError( + "'Dict' object attribute " "'{0}' is read-only".format(name) + ) elif not hasattr(self, name) and self.is_key_locked: - raise RuntimeError("This config is key-locked - cannot add key '{}'".format(name)) + raise RuntimeError( + "This config is key-locked - cannot add key '{}'".format(name) + ) else: self[name] = value def __setitem__(self, name, value): super(Config, self).__setitem__(name, value) - p = object.__getattribute__(self, '__parent') - key = object.__getattribute__(self, '__key') + p = object.__getattribute__(self, "__parent") + key = object.__getattribute__(self, "__key") if p is not None: p[key] = self @@ -233,8 +242,14 @@ def __repr__(self): def __getitem__(self, name): if name not in self: - if object.__getattribute__(self, '__all_locked') or object.__getattribute__(self, '__key_locked'): - raise RuntimeError("This config has been locked and '{}' is not in this config".format(name)) + if object.__getattribute__(self, "__all_locked") or object.__getattribute__( + self, "__key_locked" + ): + raise RuntimeError( + "This config has been locked and '{}' is not in this config".format( + name + ) + ) return Config(__parent=self, __key=name) return super(Config, self).__getitem__(name) @@ -248,8 +263,9 @@ def to_dict(self): base[key] = value.to_dict() elif isinstance(value, (list, tuple)): base[key] = type(value)( - item.to_dict() if isinstance(item, type(self)) else - item for item in value) + item.to_dict() if isinstance(item, type(self)) else item + for item in value + ) else: base[key] = value return base @@ -272,7 +288,7 @@ def update(self, *args, **kwargs): Update this config using another config or nested dictionary. """ if self.is_locked: - raise RuntimeError('Cannot update - this config has been locked') + raise RuntimeError("Cannot update - this config has been locked") other = {} if args: if len(args) > 1: @@ -281,7 +297,11 @@ def update(self, *args, **kwargs): other.update(kwargs) for k, v in other.items(): if self.is_key_locked and k not in self: - raise RuntimeError("Cannot update - this config has been key-locked and key '{}' does not exist".format(k)) + raise RuntimeError( + "Cannot update - this config has been key-locked and key '{}' does not exist".format( + k + ) + ) if (not isinstance(self[k], dict)) or (not isinstance(v, dict)): self[k] = v else: @@ -319,4 +339,4 @@ def dump(self, filename=None): f = open(filename, "w") f.write(json_string) f.close() - return json_string \ No newline at end of file + return json_string diff --git a/robomimic/config/cql_config.py b/robomimic/config/cql_config.py index 26fea048..15858e99 100644 --- a/robomimic/config/cql_config.py +++ b/robomimic/config/cql_config.py @@ -19,64 +19,100 @@ def train_config(self): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = 1e-3 # critic learning rate - self.algo.optim_params.critic.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength - - self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength + self.algo.optim_params.critic.learning_rate.initial = ( + 1e-3 # critic learning rate + ) + self.algo.optim_params.critic.learning_rate.decay_factor = ( + 0.0 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.critic.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = ( + 0.00 # L2 regularization strength + ) + + self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = ( + 0.0 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.actor.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = ( + 0.00 # L2 regularization strength + ) # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.n_step = 1 # for using n-step returns in TD-updates - self.algo.target_tau = 0.005 # update rate for target networks + self.algo.discount = 0.99 # discount factor to use + self.algo.n_step = 1 # for using n-step returns in TD-updates + self.algo.target_tau = 0.005 # update rate for target networks # ================== Actor Network Config =================== - self.algo.actor.bc_start_steps = 0 # uses BC policy loss for first n-training steps - self.algo.actor.target_entropy = "default" # None is fixed entropy, otherwise is automatically tuned to match target. Can specify "default" as well for default tuning target - self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor + self.algo.actor.bc_start_steps = ( + 0 # uses BC policy loss for first n-training steps + ) + self.algo.actor.target_entropy = "default" # None is fixed entropy, otherwise is automatically tuned to match target. Can specify "default" as well for default tuning target + self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor # Actor network settings - self.algo.actor.net.type = "gaussian" # Options are currently only "gaussian" (no support for GMM yet) + self.algo.actor.net.type = ( + "gaussian" # Options are currently only "gaussian" (no support for GMM yet) + ) # Actor network settings - shared - self.algo.actor.net.common.std_activation = "exp" # Activation to use for std output from policy net - self.algo.actor.net.common.use_tanh = True # Whether to use tanh at output of actor network - self.algo.actor.net.common.low_noise_eval = True # Whether to use deterministic action sampling at eval stage + self.algo.actor.net.common.std_activation = ( + "exp" # Activation to use for std output from policy net + ) + self.algo.actor.net.common.use_tanh = ( + True # Whether to use tanh at output of actor network + ) + self.algo.actor.net.common.low_noise_eval = ( + True # Whether to use deterministic action sampling at eval stage + ) # Actor network settings - gaussian - self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value - self.algo.actor.net.gaussian.init_std = 0.3 # Relative scaling factor for std from policy net - self.algo.actor.net.gaussian.fixed_std = False # Whether to learn std dev or not + self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value + self.algo.actor.net.gaussian.init_std = ( + 0.3 # Relative scaling factor for std from policy net + ) + self.algo.actor.net.gaussian.fixed_std = ( + False # Whether to learn std dev or not + ) - self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions + self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions # ================== Critic Network Config =================== - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = None # L2 gradient clipping for critic (None to use no clipping) + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = ( + None # L2 gradient clipping for critic (None to use no clipping) + ) - self.algo.critic.value_bounds = None # optional 2-tuple to ensure lower and upper bound on value estimates + self.algo.critic.value_bounds = ( + None # optional 2-tuple to ensure lower and upper bound on value estimates + ) - self.algo.critic.num_action_samples = 1 # number of actions to sample per training batch to get target critic value; use maximum Q value from n random sampled actions when doing TD error backup + self.algo.critic.num_action_samples = 1 # number of actions to sample per training batch to get target critic value; use maximum Q value from n random sampled actions when doing TD error backup # cql settings for critic - self.algo.critic.cql_weight = 1.0 # weighting for cql component of critic loss (only used if target_q_gap is < 0 or None) - self.algo.critic.deterministic_backup = True # if not set, subtract weighted logprob of action when doing backup - self.algo.critic.min_q_weight = 1.0 # min q weight (scaling factor) to apply - self.algo.critic.target_q_gap = 5.0 # if set, sets the diff threshold at which Q-values will be penalized more (note: this overrides cql weight above!) Use None or a negative value if not set - self.algo.critic.num_random_actions = 10 # Number of random actions to sample when calculating CQL loss + self.algo.critic.cql_weight = 1.0 # weighting for cql component of critic loss (only used if target_q_gap is < 0 or None) + self.algo.critic.deterministic_backup = ( + True # if not set, subtract weighted logprob of action when doing backup + ) + self.algo.critic.min_q_weight = 1.0 # min q weight (scaling factor) to apply + self.algo.critic.target_q_gap = 5.0 # if set, sets the diff threshold at which Q-values will be penalized more (note: this overrides cql weight above!) Use None or a negative value if not set + self.algo.critic.num_random_actions = ( + 10 # Number of random actions to sample when calculating CQL loss + ) # critic ensemble parameters (TD3 trick) - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions + self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions diff --git a/robomimic/config/gl_config.py b/robomimic/config/gl_config.py index 939103e6..5e826c5c 100644 --- a/robomimic/config/gl_config.py +++ b/robomimic/config/gl_config.py @@ -11,63 +11,92 @@ class GLConfig(BaseConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters - self.algo.optim_params.goal_network.learning_rate.initial = 1e-4 # goal network learning rate - self.algo.optim_params.goal_network.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.goal_network.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.goal_network.learning_rate.initial = ( + 1e-4 # goal network learning rate + ) + self.algo.optim_params.goal_network.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.goal_network.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs self.algo.optim_params.goal_network.regularization.L2 = 0.00 # subgoal definition: observation that is @subgoal_horizon number of timesteps in future from current observation - self.algo.subgoal_horizon = 10 + self.algo.subgoal_horizon = 10 # MLP size for deterministic goal network (unused if VAE is enabled) self.algo.ae.planner_layer_dims = (300, 400) # ================== VAE config ================== - self.algo.vae.enabled = True # set to true to use VAE network - self.algo.vae.latent_dim = 16 # VAE latent dimension - self.algo.vae.latent_clip = None # clip latent space when decoding (set to None to disable) - self.algo.vae.kl_weight = 1. # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO + self.algo.vae.enabled = True # set to true to use VAE network + self.algo.vae.latent_dim = 16 # VAE latent dimension + self.algo.vae.latent_clip = ( + None # clip latent space when decoding (set to None to disable) + ) + self.algo.vae.kl_weight = 1.0 # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO # VAE decoder settings - self.algo.vae.decoder.is_conditioned = True # whether decoder should condition on observation - self.algo.vae.decoder.reconstruction_sum_across_elements = False # sum instead of mean for reconstruction loss + self.algo.vae.decoder.is_conditioned = ( + True # whether decoder should condition on observation + ) + self.algo.vae.decoder.reconstruction_sum_across_elements = ( + False # sum instead of mean for reconstruction loss + ) # VAE prior settings - self.algo.vae.prior.learn = False # learn Gaussian / GMM prior instead of N(0, 1) - self.algo.vae.prior.is_conditioned = False # whether to condition prior on observations - self.algo.vae.prior.use_gmm = False # whether to use GMM prior - self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes - self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights - self.algo.vae.prior.use_categorical = False # whether to use categorical prior - self.algo.vae.prior.categorical_dim = 10 # the number of categorical classes for each latent dimension - self.algo.vae.prior.categorical_gumbel_softmax_hard = False # use hard selection in forward pass - self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp - self.algo.vae.prior.categorical_temp_anneal_step = 0.001 # linear temp annealing rate - self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp + self.algo.vae.prior.learn = ( + False # learn Gaussian / GMM prior instead of N(0, 1) + ) + self.algo.vae.prior.is_conditioned = ( + False # whether to condition prior on observations + ) + self.algo.vae.prior.use_gmm = False # whether to use GMM prior + self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes + self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights + self.algo.vae.prior.use_categorical = False # whether to use categorical prior + self.algo.vae.prior.categorical_dim = ( + 10 # the number of categorical classes for each latent dimension + ) + self.algo.vae.prior.categorical_gumbel_softmax_hard = ( + False # use hard selection in forward pass + ) + self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp + self.algo.vae.prior.categorical_temp_anneal_step = ( + 0.001 # linear temp annealing rate + ) + self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp - self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions - self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions - self.algo.vae.prior_layer_dims = (300, 400) # prior MLP layer dimensions (if learning conditioned prior) + self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions + self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions + self.algo.vae.prior_layer_dims = ( + 300, + 400, + ) # prior MLP layer dimensions (if learning conditioned prior) def observation_config(self): """ Update from superclass to specify subgoal modalities. """ super(GLConfig, self).observation_config() - self.observation.modalities.subgoal.low_dim = [ # specify low-dim subgoal observations for agent to predict - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] - self.observation.modalities.subgoal.rgb = [] # specify rgb image subgoal observations for agent to predict + self.observation.modalities.subgoal.low_dim = ( + [ # specify low-dim subgoal observations for agent to predict + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] + ) + self.observation.modalities.subgoal.rgb = ( + [] + ) # specify rgb image subgoal observations for agent to predict self.observation.modalities.subgoal.depth = [] self.observation.modalities.subgoal.scan = [] self.observation.modalities.subgoal.do_not_lock_keys() @@ -78,12 +107,19 @@ def all_obs_keys(self): Update from superclass to include subgoals. """ # pool all modalities - return sorted(tuple(set([ - obs_key for group in [ - self.observation.modalities.obs.values(), - self.observation.modalities.goal.values(), - self.observation.modalities.subgoal.values(), - ] - for modality in group - for obs_key in modality - ]))) + return sorted( + tuple( + set( + [ + obs_key + for group in [ + self.observation.modalities.obs.values(), + self.observation.modalities.goal.values(), + self.observation.modalities.subgoal.values(), + ] + for modality in group + for obs_key in modality + ] + ) + ) + ) diff --git a/robomimic/config/hbc_config.py b/robomimic/config/hbc_config.py index ae65c9b8..16a1dbdf 100644 --- a/robomimic/config/hbc_config.py +++ b/robomimic/config/hbc_config.py @@ -15,13 +15,15 @@ def train_config(self): Update from superclass to change default sequence length to load from dataset. """ super(HBCConfig, self).train_config() - self.train.seq_length = 10 # length of experience sequence to fetch from the buffer + self.train.seq_length = ( + 10 # length of experience sequence to fetch from the buffer + ) def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ @@ -31,14 +33,17 @@ def algo_config(self): # on planner subgoal predictions. In "actor_only" mode, only the actor is trained, and in # "planner_only" mode, only the planner is trained. self.algo.mode = "separate" - self.algo.actor_use_random_subgoals = False # whether to sample subgoal index from [1, subgoal_horizon] - self.algo.subgoal_update_interval = 10 # how frequently the subgoal should be updated at test-time - + self.algo.actor_use_random_subgoals = ( + False # whether to sample subgoal index from [1, subgoal_horizon] + ) + self.algo.subgoal_update_interval = ( + 10 # how frequently the subgoal should be updated at test-time + ) # ================== Latent Subgoal Config ================== - self.algo.latent_subgoal.enabled = False # if True, use VAE latent space as subgoals for actor, instead of reconstructions + self.algo.latent_subgoal.enabled = False # if True, use VAE latent space as subgoals for actor, instead of reconstructions - # prior correction trick for actor and value training: instead of using encoder for + # prior correction trick for actor and value training: instead of using encoder for # transforming subgoals to latent subgoals, generate prior samples and choose # the closest one to the encoder output self.algo.latent_subgoal.prior_correction.enabled = False @@ -73,9 +78,13 @@ def use_goals(self): """ Update from superclass - planner goal modalities determine goal-conditioning """ - return len( - self.observation.planner.modalities.goal.low_dim + - self.observation.planner.modalities.goal.rgb) > 0 + return ( + len( + self.observation.planner.modalities.goal.low_dim + + self.observation.planner.modalities.goal.rgb + ) + > 0 + ) @property def all_obs_keys(self): @@ -83,14 +92,21 @@ def all_obs_keys(self): Update from superclass to include modalities from planner and actor. """ # pool all modalities - return sorted(tuple(set([ - obs_key for group in [ - self.observation.planner.modalities.obs.values(), - self.observation.planner.modalities.goal.values(), - self.observation.planner.modalities.subgoal.values(), - self.observation.actor.modalities.obs.values(), - self.observation.actor.modalities.goal.values(), - ] - for modality in group - for obs_key in modality - ]))) + return sorted( + tuple( + set( + [ + obs_key + for group in [ + self.observation.planner.modalities.obs.values(), + self.observation.planner.modalities.goal.values(), + self.observation.planner.modalities.subgoal.values(), + self.observation.actor.modalities.obs.values(), + self.observation.actor.modalities.goal.values(), + ] + for modality in group + for obs_key in modality + ] + ) + ) + ) diff --git a/robomimic/config/iql_config.py b/robomimic/config/iql_config.py index bd603d1a..16bbd2ea 100644 --- a/robomimic/config/iql_config.py +++ b/robomimic/config/iql_config.py @@ -10,64 +10,94 @@ class IQLConfig(BaseConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ super(IQLConfig, self).algo_config() - # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = 1e-4 # critic learning rate - self.algo.optim_params.critic.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength - - self.algo.optim_params.vf.learning_rate.initial = 1e-4 # vf learning rate - self.algo.optim_params.vf.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.vf.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.vf.regularization.L2 = 0.00 # L2 regularization strength - - self.algo.optim_params.actor.learning_rate.initial = 1e-4 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength + # optimization parameters + self.algo.optim_params.critic.learning_rate.initial = ( + 1e-4 # critic learning rate + ) + self.algo.optim_params.critic.learning_rate.decay_factor = ( + 0.0 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.critic.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = ( + 0.00 # L2 regularization strength + ) + + self.algo.optim_params.vf.learning_rate.initial = 1e-4 # vf learning rate + self.algo.optim_params.vf.learning_rate.decay_factor = ( + 0.0 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.vf.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.vf.regularization.L2 = 0.00 # L2 regularization strength + + self.algo.optim_params.actor.learning_rate.initial = 1e-4 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = ( + 0.0 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.actor.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = ( + 0.00 # L2 regularization strength + ) # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.target_tau = 0.01 # update rate for target networks + self.algo.discount = 0.99 # discount factor to use + self.algo.target_tau = 0.01 # update rate for target networks # ================== Actor Network Config =================== # Actor network settings - self.algo.actor.net.type = "gaussian" # Options are currently ["gaussian", "gmm"] + self.algo.actor.net.type = ( + "gaussian" # Options are currently ["gaussian", "gmm"] + ) # Actor network settings - shared - self.algo.actor.net.common.std_activation = "softplus" # Activation to use for std output from policy net - self.algo.actor.net.common.low_noise_eval = True # Whether to use deterministic action sampling at eval stage - self.algo.actor.net.common.use_tanh = False # Whether to use tanh at output of actor network + self.algo.actor.net.common.std_activation = ( + "softplus" # Activation to use for std output from policy net + ) + self.algo.actor.net.common.low_noise_eval = ( + True # Whether to use deterministic action sampling at eval stage + ) + self.algo.actor.net.common.use_tanh = ( + False # Whether to use tanh at output of actor network + ) # Actor network settings - gaussian - self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value - self.algo.actor.net.gaussian.init_std = 0.3 # Relative scaling factor for std from policy net - self.algo.actor.net.gaussian.fixed_std = False # Whether to learn std dev or not + self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value + self.algo.actor.net.gaussian.init_std = ( + 0.3 # Relative scaling factor for std from policy net + ) + self.algo.actor.net.gaussian.fixed_std = ( + False # Whether to learn std dev or not + ) - self.algo.actor.net.gmm.num_modes = 5 # number of GMM modes - self.algo.actor.net.gmm.min_std = 0.0001 # minimum std output from network + self.algo.actor.net.gmm.num_modes = 5 # number of GMM modes + self.algo.actor.net.gmm.min_std = 0.0001 # minimum std output from network - self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions + self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions - self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor + self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor # ================== Critic Network Config =================== # critic ensemble parameters - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = None # L2 gradient clipping for actor + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = None # L2 gradient clipping for actor # ================== Adv Config ============================== - self.algo.adv.clip_adv_value = None # whether to clip raw advantage estimates - self.algo.adv.beta = 1.0 # temperature for operator - self.algo.adv.use_final_clip = True # whether to clip final weight calculations + self.algo.adv.clip_adv_value = None # whether to clip raw advantage estimates + self.algo.adv.beta = 1.0 # temperature for operator + self.algo.adv.use_final_clip = True # whether to clip final weight calculations - self.algo.vf_quantile = 0.9 # quantile factor in quantile regression + self.algo.vf_quantile = 0.9 # quantile factor in quantile regression diff --git a/robomimic/config/iris_config.py b/robomimic/config/iris_config.py index c03328ce..c16da304 100644 --- a/robomimic/config/iris_config.py +++ b/robomimic/config/iris_config.py @@ -13,9 +13,9 @@ class IRISConfig(HBCConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ @@ -26,8 +26,10 @@ def algo_config(self): # "planner_only" mode, only the planner is trained. self.algo.mode = "separate" - self.algo.actor_use_random_subgoals = False # whether to sample subgoal index from [1, subgoal_horizon] - self.algo.subgoal_update_interval = 10 # how frequently the subgoal should be updated at test-time (usually matches train.seq_length) + self.algo.actor_use_random_subgoals = ( + False # whether to sample subgoal index from [1, subgoal_horizon] + ) + self.algo.subgoal_update_interval = 10 # how frequently the subgoal should be updated at test-time (usually matches train.seq_length) # ================== Latent Subgoal Config ================== @@ -47,7 +49,7 @@ def algo_config(self): # The ValuePlanner value component is a BCQ model self.algo.value_planner.value = BCQConfig().algo - self.algo.value_planner.value.actor.enabled = False # ensure no BCQ actor + self.algo.value_planner.value.actor.enabled = False # ensure no BCQ actor # number of subgoal samples to use for value planner self.algo.value_planner.num_samples = 100 @@ -74,9 +76,13 @@ def use_goals(self): """ Update from superclass - value planner goal modalities determine goal-conditioning. """ - return len( - self.observation.value_planner.planner.modalities.goal.low_dim + - self.observation.value_planner.planner.modalities.goal.rgb) > 0 + return ( + len( + self.observation.value_planner.planner.modalities.goal.low_dim + + self.observation.value_planner.planner.modalities.goal.rgb + ) + > 0 + ) @property def all_obs_keys(self): @@ -84,16 +90,23 @@ def all_obs_keys(self): Update from superclass to include modalities from value planner and actor. """ # pool all modalities - return sorted(tuple(set([ - obs_key for group in [ - self.observation.value_planner.planner.modalities.obs.values(), - self.observation.value_planner.planner.modalities.goal.values(), - self.observation.value_planner.planner.modalities.subgoal.values(), - self.observation.value_planner.value.modalities.obs.values(), - self.observation.value_planner.value.modalities.goal.values(), - self.observation.actor.modalities.obs.values(), - self.observation.actor.modalities.goal.values(), - ] - for modality in group - for obs_key in modality - ]))) + return sorted( + tuple( + set( + [ + obs_key + for group in [ + self.observation.value_planner.planner.modalities.obs.values(), + self.observation.value_planner.planner.modalities.goal.values(), + self.observation.value_planner.planner.modalities.subgoal.values(), + self.observation.value_planner.value.modalities.obs.values(), + self.observation.value_planner.value.modalities.goal.values(), + self.observation.actor.modalities.obs.values(), + self.observation.actor.modalities.goal.values(), + ] + for modality in group + for obs_key in modality + ] + ) + ) + ) diff --git a/robomimic/config/td3_bc_config.py b/robomimic/config/td3_bc_config.py index 036a2591..e52879b2 100644 --- a/robomimic/config/td3_bc_config.py +++ b/robomimic/config/td3_bc_config.py @@ -19,7 +19,7 @@ def experiment_config(self): self.experiment.render_video = False # save 10 checkpoints throughout training - self.experiment.save.every_n_epochs = 20 + self.experiment.save.every_n_epochs = 20 # save models that achieve best rollout return instead of best success rate self.experiment.save.on_best_rollout_return = True @@ -30,9 +30,9 @@ def experiment_config(self): # evaluate with normal environment rollouts self.experiment.rollout.enabled = True - self.experiment.rollout.n = 50 # paper uses 10, but we can afford to do 50 + self.experiment.rollout.n = 50 # paper uses 10, but we can afford to do 50 self.experiment.rollout.horizon = 1000 - self.experiment.rollout.rate = 1 # rollout every epoch to match paper + self.experiment.rollout.rate = 1 # rollout every epoch to match paper def train_config(self): """ @@ -41,7 +41,7 @@ def train_config(self): super(TD3_BCConfig, self).train_config() # update to normalize observations - self.train.hdf5_normalize_obs = True + self.train.hdf5_normalize_obs = True # increase batch size to 256 self.train.batch_size = 256 @@ -51,46 +51,74 @@ def train_config(self): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = 3e-4 # critic learning rate - self.algo.optim_params.critic.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength - self.algo.optim_params.critic.start_epoch = -1 # number of epochs before starting critic training (-1 means start right away) - self.algo.optim_params.critic.end_epoch = -1 # number of epochs before ending critic training (-1 means start right away) - - self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) - self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength - self.algo.optim_params.actor.start_epoch = -1 # number of epochs before starting actor training (-1 means start right away) - self.algo.optim_params.actor.end_epoch = -1 # number of epochs before ending actor training (-1 means start right away) + self.algo.optim_params.critic.learning_rate.initial = ( + 3e-4 # critic learning rate + ) + self.algo.optim_params.critic.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.critic.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = ( + 0.00 # L2 regularization strength + ) + self.algo.optim_params.critic.start_epoch = ( + -1 + ) # number of epochs before starting critic training (-1 means start right away) + self.algo.optim_params.critic.end_epoch = ( + -1 + ) # number of epochs before ending critic training (-1 means start right away) + + self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = ( + 0.1 # factor to decay LR by (if epoch schedule non-empty) + ) + self.algo.optim_params.actor.learning_rate.epoch_schedule = ( + [] + ) # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = ( + 0.00 # L2 regularization strength + ) + self.algo.optim_params.actor.start_epoch = ( + -1 + ) # number of epochs before starting actor training (-1 means start right away) + self.algo.optim_params.actor.end_epoch = ( + -1 + ) # number of epochs before ending actor training (-1 means start right away) # alpha value - for weighting critic loss vs. BC loss self.algo.alpha = 2.5 # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.n_step = 1 # for using n-step returns in TD-updates - self.algo.target_tau = 0.005 # update rate for target networks - self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon + self.algo.discount = 0.99 # discount factor to use + self.algo.n_step = 1 # for using n-step returns in TD-updates + self.algo.target_tau = 0.005 # update rate for target networks + self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon # ================== Critic Network Config =================== - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = None # L2 gradient clipping for critic (None to use no clipping) - self.algo.critic.value_bounds = None # optional 2-tuple to ensure lower and upper bound on value estimates + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = ( + None # L2 gradient clipping for critic (None to use no clipping) + ) + self.algo.critic.value_bounds = ( + None # optional 2-tuple to ensure lower and upper bound on value estimates + ) # critic ensemble parameters (TD3 trick) - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.ensemble.weight = 1.0 # weighting for mixing min and max for target Q value + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.ensemble.weight = ( + 1.0 # weighting for mixing min and max for target Q value + ) - self.algo.critic.layer_dims = (256, 256) # size of critic MLP + self.algo.critic.layer_dims = (256, 256) # size of critic MLP # ================== Actor Network Config =================== @@ -98,10 +126,14 @@ def algo_config(self): self.algo.actor.update_freq = 2 # exploration noise used to form target action for Q-update - clipped Gaussian noise - self.algo.actor.noise_std = 0.2 # zero-mean gaussian noise with this std is applied to actions - self.algo.actor.noise_clip = 0.5 # noise is clipped in each dimension to (-noise_clip, noise_clip) - - self.algo.actor.layer_dims = (256, 256) # size of actor MLP + self.algo.actor.noise_std = ( + 0.2 # zero-mean gaussian noise with this std is applied to actions + ) + self.algo.actor.noise_clip = ( + 0.5 # noise is clipped in each dimension to (-noise_clip, noise_clip) + ) + + self.algo.actor.layer_dims = (256, 256) # size of actor MLP def observation_config(self): """ diff --git a/robomimic/envs/env_base.py b/robomimic/envs/env_base.py index 9634db01..58f44cec 100644 --- a/robomimic/envs/env_base.py +++ b/robomimic/envs/env_base.py @@ -3,6 +3,7 @@ to provide a standardized environment API for training policies and interacting with metadata present in datasets. """ + import abc @@ -11,6 +12,7 @@ class EnvType: Holds environment types - one per environment class. These act as identifiers for different environments. """ + ROBOSUITE_TYPE = 1 GYM_TYPE = 2 IG_MOMART_TYPE = 3 @@ -18,15 +20,16 @@ class EnvType: class EnvBase(abc.ABC): """A base class method for environments used by this repo.""" + @abc.abstractmethod def __init__( self, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - postprocess_visual_obs=True, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + postprocess_visual_obs=True, **kwargs, ): """ @@ -86,7 +89,7 @@ def reset_to(self, state): Args: state (dict): current simulator state - + Returns: observation (dict): observation dictionary after setting the simulator state """ @@ -189,21 +192,21 @@ def serialize(self): @classmethod @abc.abstractmethod def create_for_data_processing( - cls, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + cls, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, **kwargs, ): """ Create environment for processing datasets, which includes extracting observations, labeling dense / sparse rewards, and annotating dones in - transitions. + transitions. Args: camera_names ([str]): list of camera names that correspond to image observations diff --git a/robomimic/envs/env_gym.py b/robomimic/envs/env_gym.py index 7b56d1eb..6e9f659c 100644 --- a/robomimic/envs/env_gym.py +++ b/robomimic/envs/env_gym.py @@ -3,11 +3,13 @@ to provide a standardized environment API for training policies and interacting with metadata present in datasets. """ + import json import numpy as np from copy import deepcopy import gym + try: import d4rl except: @@ -19,14 +21,15 @@ class EnvGym(EB.EnvBase): """Wrapper class for gym""" + def __init__( self, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - postprocess_visual_obs=True, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + postprocess_visual_obs=True, **kwargs, ): """ @@ -88,14 +91,14 @@ def reset_to(self, state): Args: state (dict): current simulator state that contains: - states (np.ndarray): initial state of the mujoco environment - + Returns: observation (dict): observation dictionary after setting the simulator state """ if hasattr(self.env.unwrapped.sim, "set_state_from_flattened"): self.env.unwrapped.sim.set_state_from_flattened(state["states"]) self.env.unwrapped.sim.forward() - return { "flat" : self.env.unwrapped._get_obs() } + return {"flat": self.env.unwrapped._get_obs()} else: raise NotImplementedError @@ -108,7 +111,7 @@ def render(self, mode="human", height=None, width=None, camera_name=None, **kwar height (int): height of image to render - only used if mode is "rgb_array" width (int): width of image to render - only used if mode is "rgb_array" """ - if mode =="human": + if mode == "human": return self.env.render(mode=mode, **kwargs) if mode == "rgb_array": return self.env.render(mode="rgb_array", height=height, width=width) @@ -126,15 +129,15 @@ def get_observation(self, obs=None): if obs is None: assert self._current_obs is not None obs = self._current_obs - return { "flat" : np.copy(obs) } + return {"flat": np.copy(obs)} def get_state(self): """ Get current environment simulator state as a dictionary. Should be compatible with @reset_to. """ # NOTE: assumes MuJoCo gym task! - xml = self.env.sim.model.get_xml() # model xml file - state = np.array(self.env.sim.get_state().flatten()) # simulator state + xml = self.env.sim.model.get_xml() # model xml file + state = np.array(self.env.sim.get_state().flatten()) # simulator state return dict(model=xml, states=state) def get_reward(self): @@ -173,7 +176,7 @@ def is_success(self): return self.env.unwrapped._check_success() # gym envs generally don't check task success - we only compare returns - return { "task" : False } + return {"task": False} @property def action_dimension(self): @@ -203,20 +206,22 @@ def serialize(self): This is the same as @env_meta - environment metadata stored in hdf5 datasets, and used in utils/env_utils.py. """ - return dict(env_name=self.name, type=self.type, env_kwargs=deepcopy(self._init_kwargs)) + return dict( + env_name=self.name, type=self.type, env_kwargs=deepcopy(self._init_kwargs) + ) @classmethod def create_for_data_processing( - cls, - env_name, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + cls, + env_name, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, **kwargs, ): """ @@ -264,4 +269,6 @@ def __repr__(self): """ Pretty-print env description. """ - return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) + return ( + self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) + ) diff --git a/robomimic/envs/env_ig_momart.py b/robomimic/envs/env_ig_momart.py index 951eedf3..14fdf094 100644 --- a/robomimic/envs/env_ig_momart.py +++ b/robomimic/envs/env_ig_momart.py @@ -31,20 +31,21 @@ class EnvGibsonMOMART(EB.EnvBase): Wrapper class for gibson environments (https://github.com/StanfordVL/iGibson) specifically compatible with MoMaRT datasets """ + def __init__( - self, - env_name, - ig_config, - postprocess_visual_obs=True, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - image_height=None, - image_width=None, - physics_timestep=1./240., - action_timestep=1./20., - **kwargs, + self, + env_name, + ig_config, + postprocess_visual_obs=True, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + image_height=None, + image_width=None, + physics_timestep=1.0 / 240.0, + action_timestep=1.0 / 20.0, + **kwargs, ): """ Args: @@ -92,7 +93,9 @@ def __init__( # Warn user that iG always uses a renderer if (not render) and (not render_offscreen): - print("WARNING: iGibson always uses a renderer -- using headless by default.") + print( + "WARNING: iGibson always uses a renderer -- using headless by default." + ) # Update ig config for k, v in kwargs.items(): @@ -100,19 +103,30 @@ def __init__( self.ig_config[k] = v # Set rendering values - self.obs_img_height = image_height if image_height is not None else self.ig_config.get("obs_image_height", 120) - self.obs_img_width = image_width if image_width is not None else self.ig_config.get("obs_image_width", 120) + self.obs_img_height = ( + image_height + if image_height is not None + else self.ig_config.get("obs_image_height", 120) + ) + self.obs_img_width = ( + image_width + if image_width is not None + else self.ig_config.get("obs_image_width", 120) + ) # Get class to create envClass = ENV_MAPPING.get(self._env_name, None) # Make sure we have a valid environment class - assert envClass is not None, "No valid environment for the requested task was found!" + assert ( + envClass is not None + ), "No valid environment for the requested task was found!" # Set device idx for rendering # ensure that we select the correct GPU device for rendering by testing for EGL rendering # NOTE: this package should be installed from this link (https://github.com/StanfordVL/egl_probe) import egl_probe + device_idx = 0 valid_gpu_devices = egl_probe.get_available_devices() if len(valid_gpu_devices) > 0: @@ -128,10 +142,14 @@ def __init__( ) # If we have a viewer, make sure to remove all bodies belonging to the visual markers - self.exclude_body_ids = [] # Bodies to exclude when saving state + self.exclude_body_ids = [] # Bodies to exclude when saving state if self.env.simulator.viewer is not None: - self.exclude_body_ids.append(self.env.simulator.viewer.constraint_marker.body_id) - self.exclude_body_ids.append(self.env.simulator.viewer.constraint_marker2.body_id) + self.exclude_body_ids.append( + self.env.simulator.viewer.constraint_marker.body_id + ) + self.exclude_body_ids.append( + self.env.simulator.viewer.constraint_marker2.body_id + ) def step(self, action): """ @@ -189,27 +207,37 @@ def render(self, mode="human", camera_name="rgb", height=None, width=None): array or None: If rendering to frame, returns the rendered frame. Otherwise, returns None """ # Only robotview camera is currently supported - assert camera_name in {"rgb", "rgb_wrist"}, \ - f"Only rgb, rgb_wrist cameras currently supported, got {camera_name}." + assert camera_name in { + "rgb", + "rgb_wrist", + }, f"Only rgb, rgb_wrist cameras currently supported, got {camera_name}." if mode == "human": assert self.render_onscreen, "Rendering has not been enabled for onscreen!" self.env.simulator.sync() else: - assert self.env.simulator.renderer is not None, "No renderer enabled for this env!" + assert ( + self.env.simulator.renderer is not None + ), "No renderer enabled for this env!" frame = self.env.sensors["vision"].get_obs(self.env)[camera_name] # Reshape all frames if height is not None and width is not None: - frame = cv2.resize(frame, dsize=(height, width), interpolation=cv2.INTER_CUBIC) + frame = cv2.resize( + frame, dsize=(height, width), interpolation=cv2.INTER_CUBIC + ) return frame def resize_obs_frame(self, frame): """ Resizes frame to be internal height and width values """ - return cv2.resize(frame, dsize=(self.obs_img_width, self.obs_img_height), interpolation=cv2.INTER_CUBIC) + return cv2.resize( + frame, + dsize=(self.obs_img_width, self.obs_img_height), + interpolation=cv2.INTER_CUBIC, + ) def get_observation(self, di=None): """Get environment observation""" @@ -222,7 +250,9 @@ def get_observation(self, di=None): ret[k] = di[k] # ret[k] = np.transpose(di[k], (2, 0, 1)) if self.postprocess_visual_obs: - ret[k] = ObsUtils.process_obs(obs=self.resize_obs_frame(ret[k]), obs_key=k) + ret[k] = ObsUtils.process_obs( + obs=self.resize_obs_frame(ret[k]), obs_key=k + ) # Depth images elif "depth" in k: @@ -230,13 +260,17 @@ def get_observation(self, di=None): # Values can be corrupted (negative or > 1.0, so we clip values) ret[k] = np.clip(di[k], 0.0, 1.0) if self.postprocess_visual_obs: - ret[k] = ObsUtils.process_obs(obs=self.resize_obs_frame(ret[k])[..., None], obs_key=k) + ret[k] = ObsUtils.process_obs( + obs=self.resize_obs_frame(ret[k])[..., None], obs_key=k + ) # Segmentation Images elif "seg" in k: ret[k] = di[k][..., None] if self.postprocess_visual_obs: - ret[k] = ObsUtils.process_obs(obs=self.resize_obs_frame(ret[k]), obs_key=k) + ret[k] = ObsUtils.process_obs( + obs=self.resize_obs_frame(ret[k]), obs_key=k + ) # Scans elif "scan" in k: @@ -249,30 +283,38 @@ def get_observation(self, di=None): lin_vel = np.linalg.norm(proprio_obs["base_lin_vel"][:2]) ang_vel = proprio_obs["base_ang_vel"][2] - ret["proprio"] = np.concatenate([ - proprio_obs["head_joint_pos"], - proprio_obs["grasped"], - proprio_obs["eef_pos"], - proprio_obs["eef_quat"], - ]) + ret["proprio"] = np.concatenate( + [ + proprio_obs["head_joint_pos"], + proprio_obs["grasped"], + proprio_obs["eef_pos"], + proprio_obs["eef_quat"], + ] + ) # Proprio info that's only relevant for navigation - ret["proprio_nav"] = np.concatenate([ - [lin_vel], - [ang_vel], - ]) + ret["proprio_nav"] = np.concatenate( + [ + [lin_vel], + [ang_vel], + ] + ) # Compose task obs - ret["object"] = np.concatenate([ - np.array(di["task_obs"]["object-state"]), - ]) + ret["object"] = np.concatenate( + [ + np.array(di["task_obs"]["object-state"]), + ] + ) # Add ground truth navigational state - ret["gt_nav"] = np.concatenate([ - proprio_obs["base_pos"][:2], - [np.sin(proprio_obs["base_rpy"][2])], - [np.cos(proprio_obs["base_rpy"][2])], - ]) + ret["gt_nav"] = np.concatenate( + [ + proprio_obs["base_pos"][:2], + [np.sin(proprio_obs["base_rpy"][2])], + [np.cos(proprio_obs["base_rpy"][2])], + ] + ) return ret @@ -296,7 +338,9 @@ def set_task_conditions(self, task_conditions): def get_state(self): """Get iG flattened state""" - return {"states": PBU.WorldSaver(exclude_body_ids=self.exclude_body_ids).serialize()} + return { + "states": PBU.WorldSaver(exclude_body_ids=self.exclude_body_ids).serialize() + } def get_reward(self): return self.env.task.get_reward(self.env)[0] @@ -326,21 +370,21 @@ def is_success(self): if isinstance(succ, dict): assert "task" in succ return succ - return { "task" : succ } + return {"task": succ} @classmethod def create_for_data_processing( - cls, - env_name, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, - **kwargs, + cls, + env_name, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, + **kwargs, ): """ Create environment for processing datasets, which includes extracting @@ -357,14 +401,16 @@ def create_for_data_processing( render_offscreen (bool or None): optionally override rendering behavior use_image_obs (bool or None): optionally override rendering behavior """ - has_camera = (len(camera_names) > 0) + has_camera = len(camera_names) > 0 # note that @postprocess_visual_obs is False since this env's images will be written to a dataset return cls( env_name=env_name, - render=(False if render is None else render), - render_offscreen=(has_camera if render_offscreen is None else render_offscreen), - use_image_obs=(has_camera if use_image_obs is None else use_image_obs), + render=(False if render is None else render), + render_offscreen=( + has_camera if render_offscreen is None else render_offscreen + ), + use_image_obs=(has_camera if use_image_obs is None else use_image_obs), postprocess_visual_obs=False, image_height=camera_height, image_width=camera_width, @@ -388,19 +434,27 @@ def type(self): def serialize(self): """Serialize to dictionary""" - return dict(env_name=self.name, type=self.type, - ig_config=self.ig_config, - env_kwargs=deepcopy(self._init_kwargs)) + return dict( + env_name=self.name, + type=self.type, + ig_config=self.ig_config, + env_kwargs=deepcopy(self._init_kwargs), + ) @classmethod def deserialize(cls, info, postprocess_visual_obs=True): """Create environment with external info""" - return cls(env_name=info["env_name"], ig_config=info["ig_config"], postprocess_visual_obs=postprocess_visual_obs, **info["env_kwargs"]) + return cls( + env_name=info["env_name"], + ig_config=info["ig_config"], + postprocess_visual_obs=postprocess_visual_obs, + **info["env_kwargs"], + ) @property def rollout_exceptions(self): """Return tuple of exceptions to except when doing rollouts""" - return (RuntimeError) + return RuntimeError @property def base_env(self): @@ -410,5 +464,10 @@ def base_env(self): return self.env def __repr__(self): - return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) + \ - "\niGibson Config: \n" + json.dumps(self.ig_config, sort_keys=True, indent=4) + return ( + self.name + + "\n" + + json.dumps(self._init_kwargs, sort_keys=True, indent=4) + + "\niGibson Config: \n" + + json.dumps(self.ig_config, sort_keys=True, indent=4) + ) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 942cb623..7ff20044 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -3,12 +3,14 @@ to provide a standardized environment API for training policies and interacting with metadata present in datasets. """ + import json import numpy as np from copy import deepcopy import robosuite import robosuite.utils.transform_utils as T + try: # this is needed for ensuring robosuite can find the additional mimicgen environments (see https://mimicgen.github.io) import mimicgen_envs @@ -22,6 +24,7 @@ # protect against missing mujoco-py module, since robosuite might be using mujoco-py or DM backend try: import mujoco_py + MUJOCO_EXCEPTIONS = [mujoco_py.builder.MujocoException] except ImportError: MUJOCO_EXCEPTIONS = [] @@ -29,14 +32,15 @@ class EnvRobosuite(EB.EnvBase): """Wrapper class for robosuite environments (https://github.com/ARISE-Initiative/robosuite)""" + def __init__( - self, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - postprocess_visual_obs=True, + self, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + postprocess_visual_obs=True, **kwargs, ): """ @@ -65,9 +69,11 @@ def __init__( self.use_depth_obs = use_depth_obs # robosuite version check - self._is_v1 = (robosuite.__version__.split(".")[0] == "1") + self._is_v1 = robosuite.__version__.split(".")[0] == "1" if self._is_v1: - assert (int(robosuite.__version__.split(".")[1]) >= 2), "only support robosuite v0.3 and v1.2+" + assert ( + int(robosuite.__version__.split(".")[1]) >= 2 + ), "only support robosuite v0.3 and v1.2+" kwargs = deepcopy(kwargs) @@ -87,6 +93,7 @@ def __init__( # ensure that we select the correct GPU device for rendering by testing for EGL rendering # NOTE: this package should be installed from this link (https://github.com/StanfordVL/egl_probe) import egl_probe + valid_gpu_devices = egl_probe.get_available_devices() if len(valid_gpu_devices) > 0: kwargs["render_gpu_device_id"] = valid_gpu_devices[0] @@ -94,7 +101,7 @@ def __init__( # make sure gripper visualization is turned off (we almost always want this for learning) kwargs["gripper_visualization"] = False del kwargs["camera_depths"] - kwargs["camera_depth"] = use_depth_obs # rename kwarg + kwargs["camera_depth"] = use_depth_obs # rename kwarg self._env_name = env_name self._init_kwargs = deepcopy(kwargs) @@ -104,7 +111,9 @@ def __init__( # Make sure joint position observations and eef vel observations are active for ob_name in self.env.observation_names: if ("joint_pos" in ob_name) or ("eef_vel" in ob_name): - self.env.modify_observable(observable_name=ob_name, attribute="active", modifier=True) + self.env.modify_observable( + observable_name=ob_name, attribute="active", modifier=True + ) def step(self, action): """ @@ -141,7 +150,7 @@ def reset_to(self, state): state (dict): current simulator state that contains one or more of: - states (np.ndarray): initial state of the mujoco environment - model (str): mujoco scene xml - + Returns: observation (dict): observation dictionary after setting the simulator state (only if "states" is in @state) @@ -156,8 +165,12 @@ def reset_to(self, state): self.env.sim.reset() if not self._is_v1: # hide teleop visualization after restoring from model - self.env.sim.model.site_rgba[self.env.eef_site_id] = np.array([0., 0., 0., 0.]) - self.env.sim.model.site_rgba[self.env.eef_cylinder_id] = np.array([0., 0., 0., 0.]) + self.env.sim.model.site_rgba[self.env.eef_site_id] = np.array( + [0.0, 0.0, 0.0, 0.0] + ) + self.env.sim.model.site_rgba[self.env.eef_cylinder_id] = np.array( + [0.0, 0.0, 0.0, 0.0] + ) if "states" in state: self.env.sim.set_state_from_flattened(state["states"]) self.env.sim.forward() @@ -185,7 +198,9 @@ def render(self, mode="human", height=None, width=None, camera_name="agentview") self.env.viewer.set_camera(cam_id) return self.env.render() elif mode == "rgb_array": - im = self.env.sim.render(height=height, width=width, camera_name=camera_name) + im = self.env.sim.render( + height=height, width=width, camera_name=camera_name + ) if self.use_depth_obs: # render() returns a tuple when self.use_depth_obs=True return im[0][::-1] @@ -198,24 +213,32 @@ def get_observation(self, di=None): Get current environment observation dictionary. Args: - di (dict): current raw observation dictionary from robosuite to wrap and provide + di (dict): current raw observation dictionary from robosuite to wrap and provide as a dictionary. If not provided, will be queried from robosuite. """ if di is None: - di = self.env._get_observations(force_update=True) if self._is_v1 else self.env._get_observation() + di = ( + self.env._get_observations(force_update=True) + if self._is_v1 + else self.env._get_observation() + ) ret = {} for k in di: - if (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality(key=k, obs_modality="rgb"): + if (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality( + key=k, obs_modality="rgb" + ): # by default images from mujoco are flipped in height ret[k] = di[k][::-1] if self.postprocess_visual_obs: ret[k] = ObsUtils.process_obs(obs=ret[k], obs_key=k) - elif (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality(key=k, obs_modality="depth"): + elif ( + k in ObsUtils.OBS_KEYS_TO_MODALITIES + ) and ObsUtils.key_is_obs_modality(key=k, obs_modality="depth"): # by default depth images from mujoco are flipped in height ret[k] = di[k][::-1] if len(ret[k].shape) == 2: - ret[k] = ret[k][..., None] # (H, W, 1) - assert len(ret[k].shape) == 3 + ret[k] = ret[k][..., None] # (H, W, 1) + assert len(ret[k].shape) == 3 # scale entries in depth map to correspond to real distance. ret[k] = self.get_real_depth_map(ret[k]) if self.postprocess_visual_obs: @@ -230,8 +253,11 @@ def get_observation(self, di=None): # ensures that we don't accidentally add robot wrist images a second time pf = robot.robot_model.naming_prefix for k in di: - if k.startswith(pf) and (k not in ret) and \ - (not k.endswith("proprio-state")): + if ( + k.startswith(pf) + and (k not in ret) + and (not k.endswith("proprio-state")) + ): ret[k] = np.array(di[k]) else: # minimal proprioception for older versions of robosuite @@ -290,7 +316,12 @@ def get_camera_extrinsic_matrix(self, camera_name): # IMPORTANT! This is a correction so that the camera axis is set up along the viewpoint correctly. camera_axis_correction = np.array( - [[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] + [ + [1.0, 0.0, 0.0, 0.0], + [0.0, -1.0, 0.0, 0.0], + [0.0, 0.0, -1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ] ) R = R @ camera_axis_correction return R @@ -307,7 +338,9 @@ def get_camera_transform_matrix(self, camera_name, camera_height, camera_width): """ R = self.get_camera_extrinsic_matrix(camera_name=camera_name) K = self.get_camera_intrinsic_matrix( - camera_name=camera_name, camera_height=camera_height, camera_width=camera_width + camera_name=camera_name, + camera_height=camera_height, + camera_width=camera_width, ) K_exp = np.eye(4) K_exp[:3, :3] = K @@ -319,8 +352,8 @@ def get_state(self): """ Get current environment simulator state as a dictionary. Should be compatible with @reset_to. """ - xml = self.env.sim.model.get_xml() # model xml file - state = np.array(self.env.sim.get_state().flatten()) # simulator state + xml = self.env.sim.model.get_xml() # model xml file + state = np.array(self.env.sim.get_state().flatten()) # simulator state return dict(model=xml, states=state) def get_reward(self): @@ -359,7 +392,7 @@ def is_success(self): if isinstance(succ, dict): assert "task" in succ return succ - return { "task" : succ } + return {"task": succ} @property def action_dimension(self): @@ -400,27 +433,27 @@ def serialize(self): env_name=self.name, env_version=self.version, type=self.type, - env_kwargs=deepcopy(self._init_kwargs) + env_kwargs=deepcopy(self._init_kwargs), ) @classmethod def create_for_data_processing( - cls, - env_name, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + cls, + env_name, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, **kwargs, ): """ Create environment for processing datasets, which includes extracting observations, labeling dense / sparse rewards, and annotating dones in - transitions. + transitions. Args: env_name (str): name of environment @@ -435,8 +468,8 @@ def create_for_data_processing( @camera_names is non-empty, False otherwise. use_depth_obs (bool): if True, use depth observations """ - is_v1 = (robosuite.__version__.split(".")[0] == "1") - has_camera = (len(camera_names) > 0) + is_v1 = robosuite.__version__.split(".")[0] == "1" + has_camera = len(camera_names) > 0 new_kwargs = { "reward_shaping": reward_shaping, @@ -469,7 +502,7 @@ def create_for_data_processing( depth_modalities = ["depth"] obs_modality_specs = { "obs": { - "low_dim": [], # technically unused, so we don't have to specify all of them + "low_dim": [], # technically unused, so we don't have to specify all of them "rgb": image_modalities, } } @@ -480,9 +513,11 @@ def create_for_data_processing( # note that @postprocess_visual_obs is False since this env's images will be written to a dataset return cls( env_name=env_name, - render=(False if render is None else render), - render_offscreen=(has_camera if render_offscreen is None else render_offscreen), - use_image_obs=(has_camera if use_image_obs is None else use_image_obs), + render=(False if render is None else render), + render_offscreen=( + has_camera if render_offscreen is None else render_offscreen + ), + use_image_obs=(has_camera if use_image_obs is None else use_image_obs), use_depth_obs=use_depth_obs, postprocess_visual_obs=False, **kwargs, @@ -508,4 +543,6 @@ def __repr__(self): """ Pretty-print env description. """ - return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) \ No newline at end of file + return ( + self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) + ) diff --git a/robomimic/envs/wrappers.py b/robomimic/envs/wrappers.py index 9936f9de..1df0f54e 100644 --- a/robomimic/envs/wrappers.py +++ b/robomimic/envs/wrappers.py @@ -1,6 +1,7 @@ """ A collection of useful environment wrappers. """ + from copy import deepcopy import textwrap import numpy as np @@ -13,6 +14,7 @@ class EnvWrapper(object): """ Base class for all environment wrappers in robomimic. """ + def __init__(self, env): """ Args: @@ -59,20 +61,20 @@ def unwrapped(self): def _to_string(self): """ - Subclasses should override this method to print out info about the + Subclasses should override this method to print out info about the wrapper (such as arguments passed to it). """ - return '' + return "" def __repr__(self): """Pretty print environment.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 4 - if self._to_string() != '': + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 4 + if self._to_string() != "": msg += textwrap.indent("\n" + self._to_string(), indent) msg += textwrap.indent("\nenv={}".format(self.env), indent) - msg = header + '(' + msg + '\n)' + msg = header + "(" + msg + "\n)" return msg # this method is a fallback option on any methods the original env might support @@ -100,6 +102,7 @@ class FrameStackWrapper(EnvWrapper): receives a sequence of past observations instead of a single observation when it calls @env.reset, @env.reset_to, or @env.step in the rollout loop. """ + def __init__(self, env, num_frames): """ Args: @@ -108,7 +111,11 @@ def __init__(self, env, num_frames): to stack together. Must be greater than 1 (otherwise this wrapper would be a no-op). """ - assert num_frames > 1, "error: FrameStackWrapper must have num_frames > 1 but got num_frames of {}".format(num_frames) + assert ( + num_frames > 1 + ), "error: FrameStackWrapper must have num_frames > 1 but got num_frames of {}".format( + num_frames + ) super(FrameStackWrapper, self).__init__(env=env) self.num_frames = num_frames @@ -128,19 +135,21 @@ def _get_initial_obs_history(self, init_obs): obs_history = {} for k in init_obs: obs_history[k] = deque( - [init_obs[k][None] for _ in range(self.num_frames)], + [init_obs[k][None] for _ in range(self.num_frames)], maxlen=self.num_frames, ) return obs_history def _get_stacked_obs_from_history(self): """ - Helper method to convert internal variable @self.obs_history to a + Helper method to convert internal variable @self.obs_history to a stacked observation where each key is a numpy array with leading dimension @self.num_frames. """ # concatenate all frames per key so we return a numpy array per key - return { k : np.concatenate(self.obs_history[k], axis=0) for k in self.obs_history } + return { + k: np.concatenate(self.obs_history[k], axis=0) for k in self.obs_history + } def cache_obs_history(self): self.obs_history_cache = deepcopy(self.obs_history) @@ -151,7 +160,7 @@ def uncache_obs_history(self): def reset(self): """ - Modify to return frame stacked observation which is @self.num_frames copies of + Modify to return frame stacked observation which is @self.num_frames copies of the initial observation. Returns: @@ -167,7 +176,7 @@ def reset(self): def reset_to(self, state): """ - Modify to return frame stacked observation which is @self.num_frames copies of + Modify to return frame stacked observation which is @self.num_frames copies of the initial observation. Returns: @@ -208,7 +217,7 @@ def step(self, action): def update_obs(self, obs, action=None, reset=False): obs["timesteps"] = np.array([self.timestep]) - + if reset: obs["actions"] = np.zeros(self.env.action_dimension) else: @@ -217,4 +226,4 @@ def update_obs(self, obs, action=None, reset=False): def _to_string(self): """Info to pretty print.""" - return "num_frames={}".format(self.num_frames) \ No newline at end of file + return "num_frames={}".format(self.num_frames) diff --git a/robomimic/macros.py b/robomimic/macros.py index 3b6c0503..9f7bb00d 100644 --- a/robomimic/macros.py +++ b/robomimic/macros.py @@ -1,6 +1,7 @@ """ Set of global variables shared across robomimic """ + # Sets debugging mode. Should be set at top-level script so that internal # debugging functionalities are made active DEBUG = False @@ -20,8 +21,11 @@ except ImportError: from robomimic.utils.log_utils import log_warning import robomimic + log_warning( - "No private macro file found!"\ - "\nIt is recommended to use a private macro file"\ - "\nTo setup, run: python {}/scripts/setup_macros.py".format(robomimic.__path__[0]) + "No private macro file found!" + "\nIt is recommended to use a private macro file" + "\nTo setup, run: python {}/scripts/setup_macros.py".format( + robomimic.__path__[0] + ) ) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index 9faea1f7..ff91ee3f 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -56,7 +56,7 @@ def transformer_args_from_config(transformer_config): transformer_activation=transformer_config.activation, transformer_nn_parameter_for_timesteps=transformer_config.nn_parameter_for_timesteps, ) - + if "num_layers" in transformer_config: transformer_args["transformer_num_layers"] = transformer_config.num_layers @@ -68,14 +68,15 @@ class Module(torch.nn.Module): Base class for networks. The only difference from torch.nn.Module is that it requires implementing @output_shape. """ + @abc.abstractmethod def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -88,7 +89,8 @@ class Sequential(torch.nn.Sequential, Module): """ Compose multiple Modules together (defined above). """ - def __init__(self, *args, has_output_shape = True): + + def __init__(self, *args, has_output_shape=True): """ Args: has_output_shape (bool, optional): indicates whether output_shape can be called on the Sequential module. @@ -105,11 +107,11 @@ def __init__(self, *args, has_output_shape = True): def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -137,6 +139,7 @@ class Parameter(Module): A class that is a thin wrapper around a torch.nn.Parameter to make for easy saving and optimization. """ + def __init__(self, init_tensor): """ Args: @@ -147,11 +150,11 @@ def __init__(self, init_tensor): def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -170,13 +173,18 @@ class Unsqueeze(Module): """ Trivial class that unsqueezes the input. Useful for including in a nn.Sequential network """ + def __init__(self, dim): super(Unsqueeze, self).__init__() self.dim = dim def output_shape(self, input_shape=None): assert input_shape is not None - return input_shape + [1] if self.dim == -1 else input_shape[:self.dim + 1] + [1] + input_shape[self.dim + 1:] + return ( + input_shape + [1] + if self.dim == -1 + else input_shape[: self.dim + 1] + [1] + input_shape[self.dim + 1 :] + ) def forward(self, x): return x.unsqueeze(dim=self.dim) @@ -193,7 +201,11 @@ def __init__(self, dim): def output_shape(self, input_shape=None): assert input_shape is not None - return input_shape[:self.dim] + input_shape[self.dim+1:] if input_shape[self.dim] == 1 else input_shape + return ( + input_shape[: self.dim] + input_shape[self.dim + 1 :] + if input_shape[self.dim] == 1 + else input_shape + ) def forward(self, x): return x.squeeze(dim=self.dim) @@ -203,6 +215,7 @@ class MLP(Module): """ Base class for simple Multi-Layer Perceptrons. """ + def __init__( self, input_dim, @@ -242,13 +255,13 @@ def __init__( if layer_func_kwargs is None: layer_func_kwargs = dict() if dropouts is not None: - assert(len(dropouts) == len(layer_dims)) + assert len(dropouts) == len(layer_dims) for i, l in enumerate(layer_dims): layers.append(layer_func(dim, l, **layer_func_kwargs)) if normalization: layers.append(nn.LayerNorm(l)) layers.append(activation()) - if dropouts is not None and dropouts[i] > 0.: + if dropouts is not None and dropouts[i] > 0.0: layers.append(nn.Dropout(dropouts[i])) dim = l layers.append(layer_func(dim, output_dim)) @@ -267,11 +280,11 @@ def __init__( def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -291,13 +304,18 @@ def __repr__(self): act = None if self._act is None else self._act.__name__ output_act = None if self._output_act is None else self._output_act.__name__ - indent = ' ' * 4 + indent = " " * 4 msg = "input_dim={}\noutput_dim={}\nlayer_dims={}\nlayer_func={}\ndropout={}\nact={}\noutput_act={}".format( - self._input_dim, self._output_dim, self._layer_dims, - self._layer_func.__name__, self._dropouts, act, output_act + self._input_dim, + self._output_dim, + self._layer_dims, + self._layer_func.__name__, + self._dropouts, + act, + output_act, ) msg = textwrap.indent(msg, indent) - msg = header + '(\n' + msg + '\n)' + msg = header + "(\n" + msg + "\n)" return msg @@ -305,6 +323,7 @@ class RNN_Base(Module): """ A wrapper class for a multi-step RNN and a per-step network. """ + def __init__( self, input_dim, @@ -331,7 +350,9 @@ def __init__( super(RNN_Base, self).__init__() self.per_step_net = per_step_net if per_step_net is not None: - assert isinstance(per_step_net, Module), "RNN_Base: per_step_net is not instance of Module" + assert isinstance( + per_step_net, Module + ), "RNN_Base: per_step_net is not instance of Module" assert rnn_type in ["LSTM", "GRU"] rnn_cls = nn.LSTM if rnn_type == "LSTM" else nn.GRU @@ -349,7 +370,9 @@ def __init__( self._hidden_dim = rnn_hidden_dim self._num_layers = rnn_num_layers self._rnn_type = rnn_type - self._num_directions = int(rnn_is_bidirectional) + 1 # 2 if bidirectional, 1 otherwise + self._num_directions = ( + int(rnn_is_bidirectional) + 1 + ) # 2 if bidirectional, 1 otherwise @property def rnn_type(self): @@ -367,20 +390,24 @@ def get_rnn_init_state(self, batch_size, device): hidden_state (torch.Tensor or tuple): returns hidden state tensor or tuple of hidden state tensors depending on the RNN type """ - h_0 = torch.zeros(self._num_layers * self._num_directions, batch_size, self._hidden_dim).to(device) + h_0 = torch.zeros( + self._num_layers * self._num_directions, batch_size, self._hidden_dim + ).to(device) if self._rnn_type == "LSTM": - c_0 = torch.zeros(self._num_layers * self._num_directions, batch_size, self._hidden_dim).to(device) + c_0 = torch.zeros( + self._num_layers * self._num_directions, batch_size, self._hidden_dim + ).to(device) return h_0, c_0 else: return h_0 def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -456,21 +483,24 @@ def forward_step(self, inputs, rnn_state): Visual Backbone Networks ================================================ """ + + class ConvBase(Module): """ Base class for ConvNets. """ + def __init__(self): super(ConvBase, self).__init__() # dirty hack - re-implement to pass the buck onto subclasses from ABC parent def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -481,15 +511,21 @@ def output_shape(self, input_shape): def forward(self, inputs): x = self.nets(inputs) if list(self.output_shape(list(inputs.shape)[1:])) != list(x.shape)[1:]: - raise ValueError('Size mismatch: expect size %s, but got size %s' % ( - str(self.output_shape(list(inputs.shape)[1:])), str(list(x.shape)[1:])) + raise ValueError( + "Size mismatch: expect size %s, but got size %s" + % ( + str(self.output_shape(list(inputs.shape)[1:])), + str(list(x.shape)[1:]), + ) ) return x + class ResNet18Conv(ConvBase): """ A ResNet18 block that can be used to process input images. """ + def __init__( self, input_channel=3, @@ -509,9 +545,13 @@ def __init__( net = vision_models.resnet18(pretrained=pretrained) if input_coord_conv: - net.conv1 = CoordConv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) + net.conv1 = CoordConv2d( + input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False + ) elif input_channel != 3: - net.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) + net.conv1 = nn.Conv2d( + input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False + ) # cut the last fc layer self._input_coord_conv = input_coord_conv @@ -520,39 +560,44 @@ def __init__( def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert(len(input_shape) == 3) - out_h = int(math.ceil(input_shape[1] / 32.)) - out_w = int(math.ceil(input_shape[2] / 32.)) + assert len(input_shape) == 3 + out_h = int(math.ceil(input_shape[1] / 32.0)) + out_w = int(math.ceil(input_shape[2] / 32.0)) return [512, out_h, out_w] def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - return header + '(input_channel={}, input_coord_conv={})'.format(self._input_channel, self._input_coord_conv) - + header = "{}".format(str(self.__class__.__name__)) + return header + "(input_channel={}, input_coord_conv={})".format( + self._input_channel, self._input_coord_conv + ) + + class ViT_Rein(ConvBase): """ ViT LoRA using Rein method """ + def __init__( self, input_channel=3, - vit_model_class = 'vit_b', - lora_dim = 16, - patch_size = 16, - freeze = True): + vit_model_class="vit_b", + lora_dim=16, + patch_size=16, + freeze=True, + ): """ - Using pretrained observation encoder network proposed in Vision Transformers + Using pretrained observation encoder network proposed in Vision Transformers git clone https://github.com/facebookresearch/dinov2 pip install -r requirements.txt Args: @@ -564,8 +609,13 @@ def __init__( """ super(ViT_Rein, self).__init__() - assert input_channel == 3 - assert vit_model_class in ["vit_b", "vit_l" ,"vit_g", "vit_s"] # make sure the selected vit model do exist + assert input_channel == 3 + assert vit_model_class in [ + "vit_b", + "vit_l", + "vit_g", + "vit_s", + ] # make sure the selected vit model do exist # cut the last fc layer self._input_channel = input_channel @@ -575,32 +625,47 @@ def __init__( self._pretrained = False self._lora_dim = lora_dim self._patch_size = patch_size - self._out_indices = [7, 11, 15, 23], + self._out_indices = ([7, 11, 15, 23],) self.preprocess = nn.Sequential( - transforms.Resize((294,294)), + transforms.Resize((294, 294)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) - + try: - if self._vit_model_class=="vit_s": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_lc') - if self._vit_model_class=="vit_l": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_lc') - if self._vit_model_class=="vit_g": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_lc') - if self._vit_model_class=="vit_b": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_lc') + if self._vit_model_class == "vit_s": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vits14_lc" + ) + if self._vit_model_class == "vit_l": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitl14_lc" + ) + if self._vit_model_class == "vit_g": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitg14_lc" + ) + if self._vit_model_class == "vit_b": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitb14_lc" + ) except ImportError: print("WARNING: could not load Vit") - - try : - self._rein_layers = LoRAReins(lora_dim=self._lora_dim, num_layers=len(self.nets.backbone.blocks),embed_dims = self.nets.backbone.patch_embed.proj.out_channels,patch_size=self._patch_size) - self._mlp_lora_head = MLPhead(in_dim=3*self.nets.backbone.patch_embed.proj.out_channels, out_dim = 5*self.nets.backbone.patch_embed.proj.out_channels) + + try: + self._rein_layers = LoRAReins( + lora_dim=self._lora_dim, + num_layers=len(self.nets.backbone.blocks), + embed_dims=self.nets.backbone.patch_embed.proj.out_channels, + patch_size=self._patch_size, + ) + self._mlp_lora_head = MLPhead( + in_dim=3 * self.nets.backbone.patch_embed.proj.out_channels, + out_dim=5 * self.nets.backbone.patch_embed.proj.out_channels, + ) except ImportError: print("WARNING: could not load rein layer") - if self._freeze: for param in self.nets.parameters(): param.requires_grad = False @@ -619,16 +684,16 @@ def forward(self, inputs): ) q_avg = x.mean(dim=1).unsqueeze(1) - q_max = torch.max(x,1)[0].unsqueeze(1) - q_N = x[:,x.shape[1]-1,:].unsqueeze(1) + q_max = torch.max(x, 1)[0].unsqueeze(1) + q_N = x[:, x.shape[1] - 1, :].unsqueeze(1) _q = torch.cat((q_avg, q_max, q_N), dim=1) x = self.nets.backbone.norm(_q) - x = x.flatten(-2,-1) + x = x.flatten(-2, -1) x = self._mlp_lora_head(x) x = self.nets.linear_head(x) - return x + return x def output_shape(self, input_shape): """ @@ -640,7 +705,7 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert(len(input_shape) == 3) + assert len(input_shape) == 3 out_dim = 1000 @@ -648,25 +713,34 @@ def output_shape(self, input_shape): def __repr__(self): """Pretty print network.""" - print("**Number of learnable params:",sum(p.numel() for p in self.nets.parameters() if p.requires_grad)," Freeze:",self._freeze) - print("**Number of params:",sum(p.numel() for p in self.nets.parameters())) + print( + "**Number of learnable params:", + sum(p.numel() for p in self.nets.parameters() if p.requires_grad), + " Freeze:", + self._freeze, + ) + print("**Number of params:", sum(p.numel() for p in self.nets.parameters())) + + header = "{}".format(str(self.__class__.__name__)) + return ( + header + + "(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})".format( + self._input_channel, + self._input_coord_conv, + self._pretrained, + self._freeze, + ) + ) - header = '{}'.format(str(self.__class__.__name__)) - return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) - class Vit(ConvBase): """ Vision transformer """ - def __init__( - self, - input_channel=3, - vit_model_class = 'vit_b', - freeze = True - ): + + def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True): """ - Using pretrained observation encoder network proposed in Vision Transformers + Using pretrained observation encoder network proposed in Vision Transformers git clone https://github.com/facebookresearch/dinov2 pip install -r requirements.txt Args: @@ -678,8 +752,13 @@ def __init__( """ super(Vit, self).__init__() - assert input_channel == 3 - assert vit_model_class in ["vit_b", "vit_l" ,"vit_g", "vit_s"] # make sure the selected vit model do exist + assert input_channel == 3 + assert vit_model_class in [ + "vit_b", + "vit_l", + "vit_g", + "vit_s", + ] # make sure the selected vit model do exist # cut the last fc layer self._input_channel = input_channel @@ -689,19 +768,27 @@ def __init__( self._pretrained = False self.preprocess = nn.Sequential( - transforms.Resize((294,294)), + transforms.Resize((294, 294)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) - + try: - if self._vit_model_class=="vit_s": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_lc') - if self._vit_model_class=="vit_l": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_lc') - if self._vit_model_class=="vit_g": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_lc') - if self._vit_model_class=="vit_b": - self.nets = dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_lc') + if self._vit_model_class == "vit_s": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vits14_lc" + ) + if self._vit_model_class == "vit_l": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitl14_lc" + ) + if self._vit_model_class == "vit_g": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitg14_lc" + ) + if self._vit_model_class == "vit_b": + self.nets = dinov2_vits14_lc = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitb14_lc" + ) except ImportError: print("WARNING: could not load Vit") @@ -715,7 +802,7 @@ def __init__( def forward(self, inputs): x = self.preprocess(inputs) x = self.nets(x) - return x + return x def output_shape(self, input_shape): """ @@ -727,7 +814,7 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert(len(input_shape) == 3) + assert len(input_shape) == 3 out_dim = 1000 @@ -735,20 +822,35 @@ def output_shape(self, input_shape): def __repr__(self): """Pretty print network.""" - print("**Number of learnable params:",sum(p.numel() for p in self.nets.parameters() if p.requires_grad)," Freeze:",self._freeze) - print("**Number of params:",sum(p.numel() for p in self.nets.parameters())) + print( + "**Number of learnable params:", + sum(p.numel() for p in self.nets.parameters() if p.requires_grad), + " Freeze:", + self._freeze, + ) + print("**Number of params:", sum(p.numel() for p in self.nets.parameters())) + + header = "{}".format(str(self.__class__.__name__)) + return ( + header + + "(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})".format( + self._input_channel, + self._input_coord_conv, + self._pretrained, + self._freeze, + ) + ) - header = '{}'.format(str(self.__class__.__name__)) - return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) class R3MConv(ConvBase): """ Base class for ConvNets pretrained with R3M (https://arxiv.org/abs/2203.12601) """ + def __init__( self, input_channel=3, - r3m_model_class='resnet18', + r3m_model_class="resnet18", freeze=True, ): """ @@ -765,12 +867,18 @@ def __init__( try: from r3m import load_r3m except ImportError: - print("WARNING: could not load r3m library! Please follow https://github.com/facebookresearch/r3m to install R3M") + print( + "WARNING: could not load r3m library! Please follow https://github.com/facebookresearch/r3m to install R3M" + ) net = load_r3m(r3m_model_class) - assert input_channel == 3 # R3M only support input image with channel size 3 - assert r3m_model_class in ["resnet18", "resnet34", "resnet50"] # make sure the selected r3m model do exist + assert input_channel == 3 # R3M only support input image with channel size 3 + assert r3m_model_class in [ + "resnet18", + "resnet34", + "resnet50", + ] # make sure the selected r3m model do exist # cut the last fc layer self._input_channel = input_channel @@ -784,11 +892,16 @@ def __init__( transforms.CenterCrop(224), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) - self.nets = Sequential(*([preprocess] + list(net.module.convnet.children())), has_output_shape = False) + self.nets = Sequential( + *([preprocess] + list(net.module.convnet.children())), + has_output_shape=False, + ) if freeze: self.nets.freeze() - self.weight_sum = np.sum([param.cpu().data.numpy().sum() for param in self.nets.parameters()]) + self.weight_sum = np.sum( + [param.cpu().data.numpy().sum() for param in self.nets.parameters()] + ) if freeze: for param in self.nets.parameters(): param.requires_grad = False @@ -805,9 +918,9 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert(len(input_shape) == 3) + assert len(input_shape) == 3 - if self._r3m_model_class == 'resnet50': + if self._r3m_model_class == "resnet50": out_dim = 2048 else: out_dim = 512 @@ -816,18 +929,27 @@ def output_shape(self, input_shape): def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) + header = "{}".format(str(self.__class__.__name__)) + return ( + header + + "(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})".format( + self._input_channel, + self._input_coord_conv, + self._pretrained, + self._freeze, + ) + ) class MVPConv(ConvBase): """ Base class for ConvNets pretrained with MVP (https://arxiv.org/abs/2203.06173) """ + def __init__( self, input_channel=3, - mvp_model_class='vitb-mae-egosoup', + mvp_model_class="vitb-mae-egosoup", freeze=True, ): """ @@ -844,14 +966,22 @@ def __init__( try: import mvp except ImportError: - print("WARNING: could not load mvp library! Please follow https://github.com/ir413/mvp to install MVP.") + print( + "WARNING: could not load mvp library! Please follow https://github.com/ir413/mvp to install MVP." + ) self.nets = mvp.load(mvp_model_class) if freeze: self.nets.freeze() - assert input_channel == 3 # MVP only support input image with channel size 3 - assert mvp_model_class in ["vits-mae-hoi", "vits-mae-in", "vits-sup-in", "vitb-mae-egosoup", "vitl-256-mae-egosoup"] # make sure the selected r3m model do exist + assert input_channel == 3 # MVP only support input image with channel size 3 + assert mvp_model_class in [ + "vits-mae-hoi", + "vits-mae-in", + "vits-sup-in", + "vitb-mae-egosoup", + "vitl-256-mae-egosoup", + ] # make sure the selected r3m model do exist self._input_channel = input_channel self._freeze = freeze @@ -859,20 +989,22 @@ def __init__( self._input_coord_conv = False self._pretrained = True - if '256' in mvp_model_class: + if "256" in mvp_model_class: input_img_size = 256 else: input_img_size = 224 - self.preprocess = nn.Sequential( - transforms.Resize(input_img_size) - ) + self.preprocess = nn.Sequential(transforms.Resize(input_img_size)) def forward(self, inputs): x = self.preprocess(inputs) x = self.nets(x) if list(self.output_shape(list(inputs.shape)[1:])) != list(x.shape)[1:]: - raise ValueError('Size mismatch: expect size %s, but got size %s' % ( - str(self.output_shape(list(inputs.shape)[1:])), str(list(x.shape)[1:])) + raise ValueError( + "Size mismatch: expect size %s, but got size %s" + % ( + str(self.output_shape(list(inputs.shape)[1:])), + str(list(x.shape)[1:]), + ) ) return x @@ -886,10 +1018,10 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert(len(input_shape) == 3) - if 'vitb' in self._mvp_model_class: + assert len(input_shape) == 3 + if "vitb" in self._mvp_model_class: output_shape = [768] - elif 'vitl' in self._mvp_model_class: + elif "vitl" in self._mvp_model_class: output_shape = [1024] else: output_shape = [384] @@ -897,8 +1029,16 @@ def output_shape(self, input_shape): def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) + header = "{}".format(str(self.__class__.__name__)) + return ( + header + + "(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})".format( + self._input_channel, + self._input_coord_conv, + self._pretrained, + self._freeze, + ) + ) class CoordConv2d(nn.Conv2d, Module): @@ -909,6 +1049,7 @@ class CoordConv2d(nn.Conv2d, Module): https://arxiv.org/abs/1807.03247 (e.g. adds 2 channels per input feature map corresponding to (x, y) location on map) """ + def __init__( self, in_channels, @@ -919,8 +1060,8 @@ def __init__( dilation=1, groups=1, bias=True, - padding_mode='zeros', - coord_encoding='position', + padding_mode="zeros", + coord_encoding="position", ): """ Args: @@ -936,13 +1077,17 @@ def __init__( coord_encoding: type of coordinate encoding. currently only 'position' is implemented """ - assert(coord_encoding in ['position']) + assert coord_encoding in ["position"] self.coord_encoding = coord_encoding - if coord_encoding == 'position': + if coord_encoding == "position": in_channels += 2 # two extra channel for positional encoding self._position_enc = None # position encoding else: - raise Exception("CoordConv2d: coord encoding {} not implemented".format(self.coord_encoding)) + raise Exception( + "CoordConv2d: coord encoding {} not implemented".format( + self.coord_encoding + ) + ) nn.Conv2d.__init__( self, in_channels=in_channels, @@ -953,16 +1098,16 @@ def __init__( dilation=dilation, groups=groups, bias=bias, - padding_mode=padding_mode + padding_mode=padding_mode, ) def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -974,7 +1119,7 @@ def output_shape(self, input_shape): def forward(self, input): b, c, h, w = input.shape - if self.coord_encoding == 'position': + if self.coord_encoding == "position": if self._position_enc is None: pos_y, pos_x = torch.meshgrid(torch.arange(h), torch.arange(w)) pos_y = pos_y.float().to(input.device) / float(h) @@ -989,6 +1134,7 @@ class ShallowConv(ConvBase): """ A shallow convolutional encoder from https://rll.berkeley.edu/dsae/dsae.pdf """ + def __init__(self, input_channel=3, output_channel=32): super(ShallowConv, self).__init__() self._input_channel = input_channel @@ -1005,20 +1151,20 @@ def __init__(self, input_channel=3, output_channel=32): def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert(len(input_shape) == 3) - assert(input_shape[0] == self._input_channel) - out_h = int(math.floor(input_shape[1] / 2.)) - out_w = int(math.floor(input_shape[2] / 2.)) + assert len(input_shape) == 3 + assert input_shape[0] == self._input_channel + out_h = int(math.floor(input_shape[1] / 2.0)) + out_w = int(math.floor(input_shape[2] / 2.0)) return [self._output_channel, out_h, out_w] @@ -1037,6 +1183,7 @@ class Conv1dBase(Module): argument to be passed to the ith Conv1D layer. See https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html for specific possible arguments. """ + def __init__( self, input_channel=1, @@ -1050,7 +1197,7 @@ def __init__( # Get activation requested activation = CONV_ACTIVATIONS[activation] - + # Add layer kwargs conv_kwargs["out_channels"] = out_channels conv_kwargs["kernel_size"] = kernel_size @@ -1061,12 +1208,12 @@ def __init__( layers = OrderedDict() for i in range(self.n_layers): layer_kwargs = {k: v[i] for k, v in conv_kwargs.items()} - layers[f'conv{i}'] = nn.Conv1d( + layers[f"conv{i}"] = nn.Conv1d( in_channels=input_channel, **layer_kwargs, ) if activation is not None: - layers[f'act{i}'] = activation() + layers[f"act{i}"] = activation() input_channel = layer_kwargs["out_channels"] # Store network @@ -1088,14 +1235,29 @@ def output_shape(self, input_shape): for i in range(self.n_layers): net = getattr(self.nets, f"conv{i}") channels = net.out_channels - length = int((length + 2 * net.padding[0] - net.dilation[0] * (net.kernel_size[0] - 1) - 1) / net.stride[0]) + 1 + length = ( + int( + ( + length + + 2 * net.padding[0] + - net.dilation[0] * (net.kernel_size[0] - 1) + - 1 + ) + / net.stride[0] + ) + + 1 + ) return [channels, length] def forward(self, inputs): x = self.nets(inputs) if list(self.output_shape(list(inputs.shape)[1:])) != list(x.shape)[1:]: - raise ValueError('Size mismatch: expect size %s, but got size %s' % ( - str(self.output_shape(list(inputs.shape)[1:])), str(list(x.shape)[1:])) + raise ValueError( + "Size mismatch: expect size %s, but got size %s" + % ( + str(self.output_shape(list(inputs.shape)[1:])), + str(list(x.shape)[1:]), + ) ) return x @@ -1105,6 +1267,8 @@ def forward(self, inputs): Pooling Networks ================================================ """ + + class SpatialSoftmax(ConvBase): """ Spatial Softmax Layer. @@ -1112,11 +1276,12 @@ class SpatialSoftmax(ConvBase): Based on Deep Spatial Autoencoders for Visuomotor Learning by Finn et al. https://rll.berkeley.edu/dsae/dsae.pdf """ + def __init__( self, input_shape, num_kp=32, - temperature=1., + temperature=1.0, learnable_temperature=False, output_variance=False, noise_std=0.0, @@ -1132,7 +1297,7 @@ def __init__( """ super(SpatialSoftmax, self).__init__() assert len(input_shape) == 3 - self._in_c, self._in_h, self._in_w = input_shape # (C, H, W) + self._in_c, self._in_h, self._in_w = input_shape # (C, H, W) if num_kp is not None: self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) @@ -1146,51 +1311,55 @@ def __init__( if self.learnable_temperature: # temperature will be learned - temperature = torch.nn.Parameter(torch.ones(1) * temperature, requires_grad=True) - self.register_parameter('temperature', temperature) + temperature = torch.nn.Parameter( + torch.ones(1) * temperature, requires_grad=True + ) + self.register_parameter("temperature", temperature) else: # temperature held constant after initialization - temperature = torch.nn.Parameter(torch.ones(1) * temperature, requires_grad=False) - self.register_buffer('temperature', temperature) + temperature = torch.nn.Parameter( + torch.ones(1) * temperature, requires_grad=False + ) + self.register_buffer("temperature", temperature) pos_x, pos_y = np.meshgrid( - np.linspace(-1., 1., self._in_w), - np.linspace(-1., 1., self._in_h) - ) + np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h) + ) pos_x = torch.from_numpy(pos_x.reshape(1, self._in_h * self._in_w)).float() pos_y = torch.from_numpy(pos_y.reshape(1, self._in_h * self._in_w)).float() - self.register_buffer('pos_x', pos_x) - self.register_buffer('pos_y', pos_y) + self.register_buffer("pos_x", pos_x) + self.register_buffer("pos_y", pos_y) self.kps = None def __repr__(self): """Pretty print network.""" header = format(str(self.__class__.__name__)) - return header + '(num_kp={}, temperature={}, noise={})'.format( - self._num_kp, self.temperature.item(), self.noise_std) + return header + "(num_kp={}, temperature={}, noise={})".format( + self._num_kp, self.temperature.item(), self.noise_std + ) def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert(len(input_shape) == 3) - assert(input_shape[0] == self._in_c) + assert len(input_shape) == 3 + assert input_shape[0] == self._in_c return [self._num_kp, 2] def forward(self, feature): """ - Forward pass through spatial softmax layer. For each keypoint, a 2D spatial - probability distribution is created using a softmax, where the support is the - pixel locations. This distribution is used to compute the expected value of + Forward pass through spatial softmax layer. For each keypoint, a 2D spatial + probability distribution is created using a softmax, where the support is the + pixel locations. This distribution is used to compute the expected value of the pixel location, which becomes a keypoint of dimension 2. K such keypoints are created. @@ -1199,9 +1368,9 @@ def forward(self, feature): keypoint variance of shape [B, K, 2, 2] corresponding to the covariance under the 2D spatial softmax distribution """ - assert(feature.shape[1] == self._in_c) - assert(feature.shape[2] == self._in_h) - assert(feature.shape[3] == self._in_w) + assert feature.shape[1] == self._in_c + assert feature.shape[2] == self._in_h + assert feature.shape[3] == self._in_w if self.nets is not None: feature = self.nets(feature) @@ -1223,14 +1392,22 @@ def forward(self, feature): if self.output_variance: # treat attention as a distribution, and compute second-order statistics to return - expected_xx = torch.sum(self.pos_x * self.pos_x * attention, dim=1, keepdim=True) - expected_yy = torch.sum(self.pos_y * self.pos_y * attention, dim=1, keepdim=True) - expected_xy = torch.sum(self.pos_x * self.pos_y * attention, dim=1, keepdim=True) + expected_xx = torch.sum( + self.pos_x * self.pos_x * attention, dim=1, keepdim=True + ) + expected_yy = torch.sum( + self.pos_y * self.pos_y * attention, dim=1, keepdim=True + ) + expected_xy = torch.sum( + self.pos_x * self.pos_y * attention, dim=1, keepdim=True + ) var_x = expected_xx - expected_x * expected_x var_y = expected_yy - expected_y * expected_y var_xy = expected_xy - expected_x * expected_y # stack to [B * K, 4] and then reshape to [B, K, 2, 2] where last 2 dims are covariance matrix - feature_covar = torch.cat([var_x, var_xy, var_xy, var_y], 1).reshape(-1, self._num_kp, 2, 2) + feature_covar = torch.cat([var_x, var_xy, var_xy, var_y], 1).reshape( + -1, self._num_kp, 2, 2 + ) feature_keypoints = (feature_keypoints, feature_covar) if isinstance(feature_keypoints, tuple): @@ -1245,24 +1422,25 @@ class SpatialMeanPool(Module): Module that averages inputs across all spatial dimensions (dimension 2 and after), leaving only the batch and channel dimensions. """ + def __init__(self, input_shape): super(SpatialMeanPool, self).__init__() - assert len(input_shape) == 3 # [C, H, W] + assert len(input_shape) == 3 # [C, H, W] self.in_shape = input_shape def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - return list(self.in_shape[:1]) # [C, H, W] -> [C] + return list(self.in_shape[:1]) # [C, H, W] -> [C] def forward(self, inputs): """Forward pass - average across all dimensions except batch and channel.""" @@ -1271,11 +1449,12 @@ def forward(self, inputs): class FeatureAggregator(Module): """ - Helpful class for aggregating features across a dimension. This is useful in + Helpful class for aggregating features across a dimension. This is useful in practice when training models that break an input image up into several patches - since features can be extraced per-patch using the same encoder and then + since features can be extraced per-patch using the same encoder and then aggregated using this module. """ + def __init__(self, dim=1, agg_type="avg"): super(FeatureAggregator, self).__init__() self.dim = dim @@ -1291,18 +1470,18 @@ def clear_weight(self): def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - # aggregates on @self.dim, so it is removed from the output shape - return list(input_shape[:self.dim]) + list(input_shape[self.dim+1:]) + # aggregates on @self.dim, so it is removed from the output shape + return list(input_shape[: self.dim]) + list(input_shape[self.dim + 1 :]) def forward(self, x): """Forward pooling pass.""" diff --git a/robomimic/models/distributions.py b/robomimic/models/distributions.py index 411efb1a..cd33a94c 100644 --- a/robomimic/models/distributions.py +++ b/robomimic/models/distributions.py @@ -2,6 +2,7 @@ Contains distribution models used as parts of other networks. These classes usually inherit or emulate torch distributions. """ + import torch import torch.nn as nn import torch.nn.functional as F @@ -15,6 +16,7 @@ class TanhWrappedDistribution(D.Distribution): Tanh Normal distribution - adapted from rlkit and CQL codebase (https://github.com/aviralkumar2907/CQL/blob/d67dbe9cf5d2b96e3b462b6146f249b3d6569796/d4rl/rlkit/torch/distributions.py#L6). """ + def __init__(self, base_dist, scale=1.0, epsilon=1e-6): """ Args: @@ -35,13 +37,17 @@ def log_prob(self, value, pre_tanh_value=None): """ value = value / self.scale if pre_tanh_value is None: - one_plus_x = (1. + value).clamp(min=self.tanh_epsilon) - one_minus_x = (1. - value).clamp(min=self.tanh_epsilon) + one_plus_x = (1.0 + value).clamp(min=self.tanh_epsilon) + one_minus_x = (1.0 - value).clamp(min=self.tanh_epsilon) pre_tanh_value = 0.5 * torch.log(one_plus_x / one_minus_x) lp = self.base_dist.log_prob(pre_tanh_value) tanh_lp = torch.log(1 - value * value + self.tanh_epsilon) # In case the base dist already sums up the log probs, make sure we do the same - return lp - tanh_lp if len(lp.shape) == len(tanh_lp.shape) else lp - tanh_lp.sum(-1) + return ( + lp - tanh_lp + if len(lp.shape) == len(tanh_lp.shape) + else lp - tanh_lp.sum(-1) + ) def sample(self, sample_shape=torch.Size(), return_pretanh_value=False): """ @@ -81,6 +87,7 @@ class DiscreteValueDistribution(object): of the support (categorical values, or in this case, value atoms). This is used for distributional value networks. """ + def __init__(self, values, probs=None, logits=None): """ Creates a categorical distribution parameterized by either @probs or @@ -114,7 +121,7 @@ def variance(self): """ dist_squared = (self.mean().unsqueeze(-1) - self.values).pow(2) return (self._categorical_dist.probs * dist_squared).sum(dim=-1) - + def sample(self, sample_shape=torch.Size()): """ Sample from the distribution. Make sure to return value atoms, not categorical class indices. diff --git a/robomimic/models/obs_core.py b/robomimic/models/obs_core.py index 81f12b66..0d037efa 100644 --- a/robomimic/models/obs_core.py +++ b/robomimic/models/obs_core.py @@ -28,16 +28,18 @@ import matplotlib.pyplot as plt - """ ================================================ Encoder Core Networks (Abstract class) ================================================ """ + + class EncoderCore(BaseNets.Module): """ Abstract class used to categorize all cores used to encode observations """ + def __init__(self, input_shape): self.input_shape = input_shape super(EncoderCore, self).__init__() @@ -62,11 +64,14 @@ def __init_subclass__(cls, **kwargs): Visual Core Networks (Backbone + Pool) ================================================ """ + + class VisualCore(EncoderCore, BaseNets.ConvBase): """ A network block that combines a visual backbone network with optional pooling and linear layers. """ + def __init__( self, input_shape, @@ -101,7 +106,9 @@ def __init__( backbone_kwargs["input_channel"] = input_shape[0] # extract only relevant kwargs for this specific backbone - backbone_kwargs = extract_class_init_kwargs_from_dict(cls=eval(backbone_class), dic=backbone_kwargs, copy=True) + backbone_kwargs = extract_class_init_kwargs_from_dict( + cls=eval(backbone_class), dic=backbone_kwargs, copy=True + ) # visual backbone assert isinstance(backbone_class, str) @@ -120,7 +127,9 @@ def __init__( pool_kwargs = dict() # extract only relevant kwargs for this specific backbone pool_kwargs["input_shape"] = feat_shape - pool_kwargs = extract_class_init_kwargs_from_dict(cls=eval(pool_class), dic=pool_kwargs, copy=True) + pool_kwargs = extract_class_init_kwargs_from_dict( + cls=eval(pool_class), dic=pool_kwargs, copy=True + ) self.pool = eval(pool_class)(**pool_kwargs) assert isinstance(self.pool, BaseNets.Module) @@ -144,11 +153,11 @@ def __init__( def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -177,14 +186,18 @@ def forward(self, inputs): def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 2 + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 2 msg += textwrap.indent( - "\ninput_shape={}\noutput_shape={}".format(self.input_shape, self.output_shape(self.input_shape)), indent) + "\ninput_shape={}\noutput_shape={}".format( + self.input_shape, self.output_shape(self.input_shape) + ), + indent, + ) msg += textwrap.indent("\nbackbone_net={}".format(self.backbone), indent) msg += textwrap.indent("\npool_net={}".format(self.pool), indent) - msg = header + '(' + msg + '\n)' + msg = header + "(" + msg + "\n)" return msg @@ -193,11 +206,14 @@ def __repr__(self): Scan Core Networks (Conv1D Sequential + Pool) ================================================ """ + + class ScanCore(EncoderCore, BaseNets.ConvBase): """ A network block that combines a Conv1D backbone network with optional pooling and linear layers. """ + def __init__( self, input_shape, @@ -305,14 +321,18 @@ def forward(self, inputs): def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 2 + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 2 msg += textwrap.indent( - "\ninput_shape={}\noutput_shape={}".format(self.input_shape, self.output_shape(self.input_shape)), indent) + "\ninput_shape={}\noutput_shape={}".format( + self.input_shape, self.output_shape(self.input_shape) + ), + indent, + ) msg += textwrap.indent("\nbackbone_net={}".format(self.backbone), indent) msg += textwrap.indent("\npool_net={}".format(self.pool), indent) - msg = header + '(' + msg + '\n)' + msg = header + "(" + msg + "\n)" return msg @@ -321,6 +341,8 @@ def __repr__(self): Observation Randomizer Networks ================================================ """ + + class Randomizer(BaseNets.Module): """ Base class for randomizer networks. Each randomizer should implement the @output_shape_in, @@ -329,6 +351,7 @@ class Randomizer(BaseNets.Module): (usually processed by a @VisualCore instance). Note that the self.training property can be used to change the randomizer's behavior at train vs. test time. """ + def __init__(self): super(Randomizer, self).__init__() @@ -394,7 +417,11 @@ def forward_in(self, inputs): randomized_inputs = self._forward_in(inputs=inputs) if VISUALIZE_RANDOMIZER: num_samples_to_visualize = min(4, inputs.shape[0]) - self._visualize(inputs, randomized_inputs, num_samples_to_visualize=num_samples_to_visualize) + self._visualize( + inputs, + randomized_inputs, + num_samples_to_visualize=num_samples_to_visualize, + ) return randomized_inputs else: return self._forward_in_eval(inputs) @@ -435,7 +462,9 @@ def _forward_out_eval(self, inputs): return inputs @abc.abstractmethod - def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): + def _visualize( + self, pre_random_input, randomized_input, num_samples_to_visualize=2 + ): """ Visualize the original input and the randomized input for _forward_in for debugging purposes. """ @@ -446,6 +475,7 @@ class CropRandomizer(Randomizer): """ Randomly sample crops at input, and then average across crop features at output. """ + def __init__( self, input_shape, @@ -465,7 +495,7 @@ def __init__( """ super(CropRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) assert crop_height < input_shape[1] assert crop_width < input_shape[2] @@ -521,7 +551,7 @@ def _forward_in(self, inputs): Samples N random crops for each input in the batch, and then reshapes inputs to [B * N, ...]. """ - assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions out, _ = ObsUtils.sample_random_image_crops( images=inputs, crop_height=self.crop_height, @@ -536,10 +566,17 @@ def _forward_in_eval(self, inputs): """ Do center crops during eval """ - assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions - inputs = inputs.permute(*range(inputs.dim()-3), inputs.dim()-2, inputs.dim()-1, inputs.dim()-3) + assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + inputs = inputs.permute( + *range(inputs.dim() - 3), + inputs.dim() - 2, + inputs.dim() - 1, + inputs.dim() - 3, + ) out = ObsUtils.center_crop(inputs, self.crop_height, self.crop_width) - out = out.permute(*range(out.dim()-3), out.dim()-1, out.dim()-3, out.dim()-2) + out = out.permute( + *range(out.dim() - 3), out.dim() - 1, out.dim() - 3, out.dim() - 2 + ) return out def _forward_out(self, inputs): @@ -548,37 +585,47 @@ def _forward_out(self, inputs): to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. """ - batch_size = (inputs.shape[0] // self.num_crops) - out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_crops)) + batch_size = inputs.shape[0] // self.num_crops + out = TensorUtils.reshape_dimensions( + inputs, begin_axis=0, end_axis=0, target_dims=(batch_size, self.num_crops) + ) return out.mean(dim=1) - def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): + def _visualize( + self, pre_random_input, randomized_input, num_samples_to_visualize=2 + ): batch_size = pre_random_input.shape[0] - random_sample_inds = torch.randint(0, batch_size, size=(num_samples_to_visualize,)) + random_sample_inds = torch.randint( + 0, batch_size, size=(num_samples_to_visualize,) + ) pre_random_input_np = TensorUtils.to_numpy(pre_random_input)[random_sample_inds] randomized_input = TensorUtils.reshape_dimensions( randomized_input, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_crops) + target_dims=(batch_size, self.num_crops), ) # [B * N, ...] -> [B, N, ...] randomized_input_np = TensorUtils.to_numpy(randomized_input[random_sample_inds]) - pre_random_input_np = pre_random_input_np.transpose((0, 2, 3, 1)) # [B, C, H, W] -> [B, H, W, C] - randomized_input_np = randomized_input_np.transpose((0, 1, 3, 4, 2)) # [B, N, C, H, W] -> [B, N, H, W, C] + pre_random_input_np = pre_random_input_np.transpose( + (0, 2, 3, 1) + ) # [B, C, H, W] -> [B, H, W, C] + randomized_input_np = randomized_input_np.transpose( + (0, 1, 3, 4, 2) + ) # [B, N, C, H, W] -> [B, N, H, W, C] visualize_image_randomizer( pre_random_input_np, randomized_input_np, - randomizer_name='{}'.format(str(self.__class__.__name__)) + randomizer_name="{}".format(str(self.__class__.__name__)), ) def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) + header = "{}".format(str(self.__class__.__name__)) msg = header + "(input_shape={}, crop_size=[{}, {}], num_crops={})".format( - self.input_shape, self.crop_height, self.crop_width, self.num_crops) + self.input_shape, self.crop_height, self.crop_width, self.num_crops + ) return msg @@ -586,6 +633,7 @@ class CropResizeRandomizer(Randomizer): """ Randomly sample crop, then resize to specified size """ + def __init__( self, input_shape, @@ -608,7 +656,7 @@ def __init__( """ super(CropResizeRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) # assert crop_height < input_shape[1] # assert crop_width < input_shape[2] @@ -619,7 +667,12 @@ def __init__( self.num_crops = num_crops self.pos_enc = pos_enc - self.resize_crop = RandomResizedCrop(size=size, scale=scale, ratio=ratio, interpolation=TVF.InterpolationMode.BILINEAR) + self.resize_crop = RandomResizedCrop( + size=size, + scale=scale, + ratio=ratio, + interpolation=TVF.InterpolationMode.BILINEAR, + ) def output_shape_in(self, input_shape=None): shape = [self.input_shape[0], self.size[0], self.size[1]] @@ -628,21 +681,27 @@ def output_shape_in(self, input_shape=None): def output_shape_out(self, input_shape=None): return list(input_shape) - def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): + def _visualize( + self, pre_random_input, randomized_input, num_samples_to_visualize=2 + ): """ pre_random_input: (B, C, H, W) randomized_input: (B, C, H, W) - num_samples_to_visualize: + num_samples_to_visualize: Use plt.imsave to save a plot with the original input and the randomized input side by side. Save it to debug/augIms/ with a unique name. """ - fig, axes = plt.subplots(num_samples_to_visualize, 2, figsize=(10, 5*num_samples_to_visualize)) + fig, axes = plt.subplots( + num_samples_to_visualize, 2, figsize=(10, 5 * num_samples_to_visualize) + ) for i in range(num_samples_to_visualize): axes[i, 0].imshow(pre_random_input[i].permute(1, 2, 0).cpu().numpy()) axes[i, 0].set_title("Original Input") axes[i, 1].imshow(randomized_input[i].permute(1, 2, 0).cpu().numpy()) axes[i, 1].set_title("Randomized Input") plt.tight_layout() - plt.savefig(f"debug/augIms/sample_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.png") + plt.savefig( + f"debug/augIms/sample_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.png" + ) plt.close(fig) # plt.close(fig) # fig, axes = plt.subplots(1, 2) @@ -683,8 +742,9 @@ def _forward_in_eval(self, inputs): # return out # just resize - return TVF.resize(inputs, size=self.size, interpolation=TVF.InterpolationMode.BILINEAR) - + return TVF.resize( + inputs, size=self.size, interpolation=TVF.InterpolationMode.BILINEAR + ) def _forward_out(self, inputs): """ @@ -696,11 +756,13 @@ def _forward_out(self, inputs): """ return inputs - + + class CropResizeColorRandomizer(CropResizeRandomizer): """ Does the same thing as CropResizeRandomizer, but additionally performs color jitter """ + def __init__( self, input_shape, @@ -716,7 +778,7 @@ def __init__( saturation_min=1.0, saturation_max=1.0, hue_min=0.0, - hue_max=0.0 + hue_max=0.0, ): super(CropResizeColorRandomizer, self).__init__( input_shape=input_shape, @@ -726,24 +788,29 @@ def __init__( num_crops=num_crops, pos_enc=pos_enc, ) - self.color_jitter = TT.ColorJitter(brightness=(brightness_min, brightness_max), contrast=(contrast_min, contrast_max), saturation=(saturation_min, saturation_max), hue=(hue_min, hue_max)) - + self.color_jitter = TT.ColorJitter( + brightness=(brightness_min, brightness_max), + contrast=(contrast_min, contrast_max), + saturation=(saturation_min, saturation_max), + hue=(hue_min, hue_max), + ) + def _forward_in(self, inputs): out = super(CropResizeColorRandomizer, self)._forward_in(inputs) out = self.color_jitter(out) # self._visualize(inputs, out) return out - + def _forward_in_eval(self, inputs): out = super(CropResizeColorRandomizer, self)._forward_in_eval(inputs) return out - class ColorRandomizer(Randomizer): """ Randomly sample color jitter at input, and then average across color jtters at output. """ + def __init__( self, input_shape, @@ -771,12 +838,24 @@ def __init__( """ super(ColorRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) self.input_shape = input_shape - self.brightness = [max(0, 1 - brightness), 1 + brightness] if type(brightness) in {float, int} else brightness - self.contrast = [max(0, 1 - contrast), 1 + contrast] if type(contrast) in {float, int} else contrast - self.saturation = [max(0, 1 - saturation), 1 + saturation] if type(saturation) in {float, int} else saturation + self.brightness = ( + [max(0, 1 - brightness), 1 + brightness] + if type(brightness) in {float, int} + else brightness + ) + self.contrast = ( + [max(0, 1 - contrast), 1 + contrast] + if type(contrast) in {float, int} + else contrast + ) + self.saturation = ( + [max(0, 1 - saturation), 1 + saturation] + if type(saturation) in {float, int} + else saturation + ) self.hue = [-hue, hue] if type(hue) in {float, int} else hue self.num_samples = num_samples @@ -797,15 +876,21 @@ def get_transform(self): if self.brightness is not None: brightness_factor = random.uniform(self.brightness[0], self.brightness[1]) - transforms.append(Lambda(lambda img: TVF.adjust_brightness(img, brightness_factor))) + transforms.append( + Lambda(lambda img: TVF.adjust_brightness(img, brightness_factor)) + ) if self.contrast is not None: contrast_factor = random.uniform(self.contrast[0], self.contrast[1]) - transforms.append(Lambda(lambda img: TVF.adjust_contrast(img, contrast_factor))) + transforms.append( + Lambda(lambda img: TVF.adjust_contrast(img, contrast_factor)) + ) if self.saturation is not None: saturation_factor = random.uniform(self.saturation[0], self.saturation[1]) - transforms.append(Lambda(lambda img: TVF.adjust_saturation(img, saturation_factor))) + transforms.append( + Lambda(lambda img: TVF.adjust_saturation(img, saturation_factor)) + ) if self.hue is not None: hue_factor = random.uniform(self.hue[0], self.hue[1]) @@ -829,7 +914,11 @@ def get_batch_transform(self, N): each sub-set of samples along batch dimension, assumed to be the FIRST dimension in the inputted tensor Note: This function will MULTIPLY the first dimension by N """ - return Lambda(lambda x: torch.stack([self.get_transform()(x_) for x_ in x for _ in range(N)])) + return Lambda( + lambda x: torch.stack( + [self.get_transform()(x_) for x_ in x for _ in range(N)] + ) + ) def output_shape_in(self, input_shape=None): # outputs are same shape as inputs @@ -846,7 +935,7 @@ def _forward_in(self, inputs): Samples N random color jitters for each input in the batch, and then reshapes inputs to [B * N, ...]. """ - assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions # Make sure shape is exactly 4 if len(inputs.shape) == 3: @@ -863,37 +952,49 @@ def _forward_out(self, inputs): to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. """ - batch_size = (inputs.shape[0] // self.num_samples) - out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_samples)) + batch_size = inputs.shape[0] // self.num_samples + out = TensorUtils.reshape_dimensions( + inputs, begin_axis=0, end_axis=0, target_dims=(batch_size, self.num_samples) + ) return out.mean(dim=1) - def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): + def _visualize( + self, pre_random_input, randomized_input, num_samples_to_visualize=2 + ): batch_size = pre_random_input.shape[0] - random_sample_inds = torch.randint(0, batch_size, size=(num_samples_to_visualize,)) + random_sample_inds = torch.randint( + 0, batch_size, size=(num_samples_to_visualize,) + ) pre_random_input_np = TensorUtils.to_numpy(pre_random_input)[random_sample_inds] randomized_input = TensorUtils.reshape_dimensions( randomized_input, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_samples) + target_dims=(batch_size, self.num_samples), ) # [B * N, ...] -> [B, N, ...] randomized_input_np = TensorUtils.to_numpy(randomized_input[random_sample_inds]) - pre_random_input_np = pre_random_input_np.transpose((0, 2, 3, 1)) # [B, C, H, W] -> [B, H, W, C] - randomized_input_np = randomized_input_np.transpose((0, 1, 3, 4, 2)) # [B, N, C, H, W] -> [B, N, H, W, C] + pre_random_input_np = pre_random_input_np.transpose( + (0, 2, 3, 1) + ) # [B, C, H, W] -> [B, H, W, C] + randomized_input_np = randomized_input_np.transpose( + (0, 1, 3, 4, 2) + ) # [B, N, C, H, W] -> [B, N, H, W, C] visualize_image_randomizer( pre_random_input_np, randomized_input_np, - randomizer_name='{}'.format(str(self.__class__.__name__)) + randomizer_name="{}".format(str(self.__class__.__name__)), ) def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = header + f"(input_shape={self.input_shape}, brightness={self.brightness}, contrast={self.contrast}, " \ - f"saturation={self.saturation}, hue={self.hue}, num_samples={self.num_samples})" + header = "{}".format(str(self.__class__.__name__)) + msg = ( + header + + f"(input_shape={self.input_shape}, brightness={self.brightness}, contrast={self.contrast}, " + f"saturation={self.saturation}, hue={self.hue}, num_samples={self.num_samples})" + ) return msg @@ -901,6 +1002,7 @@ class GaussianNoiseRandomizer(Randomizer): """ Randomly sample gaussian noise at input, and then average across noises at output. """ + def __init__( self, input_shape, @@ -943,7 +1045,11 @@ def _forward_in(self, inputs): out = TensorUtils.repeat_by_expand_at(inputs, repeats=self.num_samples, dim=0) # Sample noise across all samples - out = torch.rand(size=out.shape).to(inputs.device) * self.noise_std + self.noise_mean + out + out = ( + torch.rand(size=out.shape).to(inputs.device) * self.noise_std + + self.noise_mean + + out + ) # Possibly clamp if self.limits is not None: @@ -957,35 +1063,47 @@ def _forward_out(self, inputs): to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. """ - batch_size = (inputs.shape[0] // self.num_samples) - out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_samples)) + batch_size = inputs.shape[0] // self.num_samples + out = TensorUtils.reshape_dimensions( + inputs, begin_axis=0, end_axis=0, target_dims=(batch_size, self.num_samples) + ) return out.mean(dim=1) - def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): + def _visualize( + self, pre_random_input, randomized_input, num_samples_to_visualize=2 + ): batch_size = pre_random_input.shape[0] - random_sample_inds = torch.randint(0, batch_size, size=(num_samples_to_visualize,)) + random_sample_inds = torch.randint( + 0, batch_size, size=(num_samples_to_visualize,) + ) pre_random_input_np = TensorUtils.to_numpy(pre_random_input)[random_sample_inds] randomized_input = TensorUtils.reshape_dimensions( randomized_input, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_samples) + target_dims=(batch_size, self.num_samples), ) # [B * N, ...] -> [B, N, ...] randomized_input_np = TensorUtils.to_numpy(randomized_input[random_sample_inds]) - pre_random_input_np = pre_random_input_np.transpose((0, 2, 3, 1)) # [B, C, H, W] -> [B, H, W, C] - randomized_input_np = randomized_input_np.transpose((0, 1, 3, 4, 2)) # [B, N, C, H, W] -> [B, N, H, W, C] + pre_random_input_np = pre_random_input_np.transpose( + (0, 2, 3, 1) + ) # [B, C, H, W] -> [B, H, W, C] + randomized_input_np = randomized_input_np.transpose( + (0, 1, 3, 4, 2) + ) # [B, N, C, H, W] -> [B, N, H, W, C] visualize_image_randomizer( pre_random_input_np, randomized_input_np, - randomizer_name='{}'.format(str(self.__class__.__name__)) + randomizer_name="{}".format(str(self.__class__.__name__)), ) def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = header + f"(input_shape={self.input_shape}, noise_mean={self.noise_mean}, noise_std={self.noise_std}, " \ - f"limits={self.limits}, num_samples={self.num_samples})" + header = "{}".format(str(self.__class__.__name__)) + msg = ( + header + + f"(input_shape={self.input_shape}, noise_mean={self.noise_mean}, noise_std={self.noise_std}, " + f"limits={self.limits}, num_samples={self.num_samples})" + ) return msg diff --git a/robomimic/models/obs_nets.py b/robomimic/models/obs_nets.py index 4a0b9483..f1fc7a79 100644 --- a/robomimic/models/obs_nets.py +++ b/robomimic/models/obs_nets.py @@ -7,6 +7,7 @@ As an example, an observation could consist of a flat "robot0_eef_pos" observation key, and a 3-channel RGB "agentview_image" observation key. """ + import sys import numpy as np import textwrap @@ -21,18 +22,25 @@ from robomimic.utils.python_utils import extract_class_init_kwargs_from_dict import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils -from robomimic.models.base_nets import Module, Sequential, MLP, RNN_Base, ResNet18Conv, SpatialSoftmax, \ - FeatureAggregator +from robomimic.models.base_nets import ( + Module, + Sequential, + MLP, + RNN_Base, + ResNet18Conv, + SpatialSoftmax, + FeatureAggregator, +) from robomimic.models.obs_core import VisualCore, Randomizer from robomimic.models.transformers import PositionalEncoding, GPT_Backbone from robomimic.models.base_nets import Vit def obs_encoder_factory( - obs_shapes, - feature_activation=nn.ReLU, - encoder_kwargs=None, - ): + obs_shapes, + feature_activation=nn.ReLU, + encoder_kwargs=None, +): """ Utility function to create an @ObservationEncoder from kwargs specified in config. @@ -63,11 +71,16 @@ def obs_encoder_factory( enc = ObservationEncoder(feature_activation=feature_activation) for k, obs_shape in obs_shapes.items(): obs_modality = ObsUtils.OBS_KEYS_TO_MODALITIES[k] - enc_kwargs = deepcopy(ObsUtils.DEFAULT_ENCODER_KWARGS[obs_modality]) if encoder_kwargs is None else \ - deepcopy(encoder_kwargs[obs_modality]) + enc_kwargs = ( + deepcopy(ObsUtils.DEFAULT_ENCODER_KWARGS[obs_modality]) + if encoder_kwargs is None + else deepcopy(encoder_kwargs[obs_modality]) + ) - for obs_module, cls_mapping in zip(("core", "obs_randomizer"), - (ObsUtils.OBS_ENCODER_CORES, ObsUtils.OBS_RANDOMIZERS)): + for obs_module, cls_mapping in zip( + ("core", "obs_randomizer"), + (ObsUtils.OBS_ENCODER_CORES, ObsUtils.OBS_RANDOMIZERS), + ): # Sanity check for kwargs in case they don't exist / are None if enc_kwargs.get(f"{obs_module}_kwargs", None) is None: enc_kwargs[f"{obs_module}_kwargs"] = {} @@ -75,15 +88,22 @@ def obs_encoder_factory( enc_kwargs[f"{obs_module}_kwargs"]["input_shape"] = obs_shape # If group class is specified, then make sure corresponding kwargs only contain relevant kwargs if enc_kwargs[f"{obs_module}_class"] is not None: - enc_kwargs[f"{obs_module}_kwargs"] = extract_class_init_kwargs_from_dict( - cls=cls_mapping[enc_kwargs[f"{obs_module}_class"]], - dic=enc_kwargs[f"{obs_module}_kwargs"], - copy=False, + enc_kwargs[f"{obs_module}_kwargs"] = ( + extract_class_init_kwargs_from_dict( + cls=cls_mapping[enc_kwargs[f"{obs_module}_class"]], + dic=enc_kwargs[f"{obs_module}_kwargs"], + copy=False, + ) ) # Add in input shape info - randomizer = None if enc_kwargs["obs_randomizer_class"] is None else \ - ObsUtils.OBS_RANDOMIZERS[enc_kwargs["obs_randomizer_class"]](**enc_kwargs["obs_randomizer_kwargs"]) + randomizer = ( + None + if enc_kwargs["obs_randomizer_class"] is None + else ObsUtils.OBS_RANDOMIZERS[enc_kwargs["obs_randomizer_class"]]( + **enc_kwargs["obs_randomizer_kwargs"] + ) + ) enc.register_obs_key( name=k, @@ -104,6 +124,7 @@ class ObservationEncoder(Module): Call @register_obs_key to register observation keys with the encoder and then finally call @make to create the encoder networks. """ + def __init__(self, feature_activation=nn.ReLU): """ Args: @@ -148,13 +169,22 @@ def register_obs_key( as another observation key. This observation key must already exist in this encoder. Warning: Note that this does not share the observation key randomizer """ - assert not self._locked, "ObservationEncoder: @register_obs_key called after @make" - assert name not in self.obs_shapes, "ObservationEncoder: modality {} already exists".format(name) + assert ( + not self._locked + ), "ObservationEncoder: @register_obs_key called after @make" + assert ( + name not in self.obs_shapes + ), "ObservationEncoder: modality {} already exists".format(name) if net is not None: - assert isinstance(net, Module), "ObservationEncoder: @net must be instance of Module class" - assert (net_class is None) and (net_kwargs is None) and (share_net_from is None), \ - "ObservationEncoder: @net provided - ignore other net creation options" + assert isinstance( + net, Module + ), "ObservationEncoder: @net must be instance of Module class" + assert ( + (net_class is None) + and (net_kwargs is None) + and (share_net_from is None) + ), "ObservationEncoder: @net provided - ignore other net creation options" if share_net_from is not None: # share processing with another modality @@ -192,7 +222,9 @@ def _create_layers(self): for k in self.obs_shapes: if self.obs_nets_classes[k] is not None: # create net to process this modality - self.obs_nets[k] = ObsUtils.OBS_ENCODER_CORES[self.obs_nets_classes[k]](**self.obs_nets_kwargs[k]) + self.obs_nets[k] = ObsUtils.OBS_ENCODER_CORES[self.obs_nets_classes[k]]( + **self.obs_nets_kwargs[k] + ) elif self.obs_share_mods[k] is not None: # make sure net is shared with another modality self.obs_nets[k] = self.obs_nets[self.obs_share_mods[k]] @@ -220,7 +252,9 @@ def forward(self, obs_dict): assert self._locked, "ObservationEncoder: @make has not been called yet" # ensure all modalities that the encoder handles are present - assert set(self.obs_shapes.keys()).issubset(obs_dict), "ObservationEncoder: {} does not contain all modalities {}".format( + assert set(self.obs_shapes.keys()).issubset( + obs_dict + ), "ObservationEncoder: {} does not contain all modalities {}".format( list(obs_dict.keys()), list(self.obs_shapes.keys()) ) @@ -266,19 +300,27 @@ def __repr__(self): """ Pretty print the encoder. """ - header = '{}'.format(str(self.__class__.__name__)) - msg = '' + header = "{}".format(str(self.__class__.__name__)) + msg = "" for k in self.obs_shapes: - msg += textwrap.indent('\nKey(\n', ' ' * 4) - indent = ' ' * 8 - msg += textwrap.indent("name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent) - msg += textwrap.indent("modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent) - msg += textwrap.indent("randomizer={}\n".format(self.obs_randomizers[k]), indent) + msg += textwrap.indent("\nKey(\n", " " * 4) + indent = " " * 8 + msg += textwrap.indent( + "name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent + ) + msg += textwrap.indent( + "modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent + ) + msg += textwrap.indent( + "randomizer={}\n".format(self.obs_randomizers[k]), indent + ) msg += textwrap.indent("net={}\n".format(self.obs_nets[k]), indent) - msg += textwrap.indent("sharing_from={}\n".format(self.obs_share_mods[k]), indent) - msg += textwrap.indent(")", ' ' * 4) - msg += textwrap.indent("\noutput_shape={}".format(self.output_shape()), ' ' * 4) - msg = header + '(' + msg + '\n)' + msg += textwrap.indent( + "sharing_from={}\n".format(self.obs_share_mods[k]), indent + ) + msg += textwrap.indent(")", " " * 4) + msg += textwrap.indent("\noutput_shape={}".format(self.output_shape()), " " * 4) + msg = header + "(" + msg + "\n)" return msg @@ -290,6 +332,7 @@ class ObservationDecoder(Module): module in order to implement more complex schemes for generating each modality. """ + def __init__( self, decode_shapes, @@ -328,7 +371,7 @@ def output_shape(self, input_shape=None): Returns output shape for this module, which is a dictionary instead of a list since outputs are dictionaries. """ - return { k : list(self.obs_shapes[k]) for k in self.obs_shapes } + return {k: list(self.obs_shapes[k]) for k in self.obs_shapes} def forward(self, feats): """ @@ -342,16 +385,20 @@ def forward(self, feats): def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' + header = "{}".format(str(self.__class__.__name__)) + msg = "" for k in self.obs_shapes: - msg += textwrap.indent('\nKey(\n', ' ' * 4) - indent = ' ' * 8 - msg += textwrap.indent("name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent) - msg += textwrap.indent("modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent) + msg += textwrap.indent("\nKey(\n", " " * 4) + indent = " " * 8 + msg += textwrap.indent( + "name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent + ) + msg += textwrap.indent( + "modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent + ) msg += textwrap.indent("net=({})\n".format(self.nets[k]), indent) - msg += textwrap.indent(")", ' ' * 4) - msg = header + '(' + msg + '\n)' + msg += textwrap.indent(")", " " * 4) + msg = header + "(" + msg + "\n)" return msg @@ -366,6 +413,7 @@ class ObservationGroupEncoder(Module): and each OrderedDict should be a map between modalities and expected input shapes (e.g. { 'image' : (3, 120, 160) }). """ + def __init__( self, observation_group_shapes, @@ -403,7 +451,12 @@ def __init__( # type checking assert isinstance(observation_group_shapes, OrderedDict) - assert np.all([isinstance(observation_group_shapes[k], OrderedDict) for k in observation_group_shapes]) + assert np.all( + [ + isinstance(observation_group_shapes[k], OrderedDict) + for k in observation_group_shapes + ] + ) self.observation_group_shapes = observation_group_shapes @@ -434,7 +487,9 @@ def forward(self, **inputs): """ # ensure all observation groups we need are present - assert set(self.observation_group_shapes.keys()).issubset(inputs), "{} does not contain all observation groups {}".format( + assert set(self.observation_group_shapes.keys()).issubset( + inputs + ), "{} does not contain all observation groups {}".format( list(inputs.keys()), list(self.observation_group_shapes.keys()) ) @@ -442,9 +497,7 @@ def forward(self, **inputs): # Deterministic order since self.observation_group_shapes is OrderedDict for obs_group in self.observation_group_shapes: # pass through encoder - outputs.append( - self.nets[obs_group].forward(inputs[obs_group]) - ) + outputs.append(self.nets[obs_group].forward(inputs[obs_group])) return torch.cat(outputs, dim=-1) @@ -460,35 +513,36 @@ def output_shape(self): def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' + header = "{}".format(str(self.__class__.__name__)) + msg = "" for k in self.observation_group_shapes: - msg += '\n' - indent = ' ' * 4 + msg += "\n" + indent = " " * 4 msg += textwrap.indent("group={}\n{}".format(k, self.nets[k]), indent) - msg = header + '(' + msg + '\n)' + msg = header + "(" + msg + "\n)" return msg class MIMO_MLP(Module): """ Extension to MLP to accept multiple observation dictionaries as input and - to output dictionaries of tensors. Inputs are specified as a dictionary of + to output dictionaries of tensors. Inputs are specified as a dictionary of observation dictionaries, with each key corresponding to an observation group. This module utilizes @ObservationGroupEncoder to process the multiple input dictionaries and @ObservationDecoder to generate tensor dictionaries. The default behavior for encoding the inputs is to process visual inputs with a learned CNN and concatenating - the flat encodings with the other flat inputs. The default behavior for generating + the flat encodings with the other flat inputs. The default behavior for generating outputs is to use a linear layer branch to produce each modality separately (including visual outputs). """ + def __init__( self, input_obs_group_shapes, output_shapes, layer_dims, - layer_func=nn.Linear, + layer_func=nn.Linear, activation=nn.ReLU, encoder_kwargs=None, ): @@ -528,7 +582,12 @@ def __init__( super(MIMO_MLP, self).__init__() assert isinstance(input_obs_group_shapes, OrderedDict) - assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes]) + assert np.all( + [ + isinstance(input_obs_group_shapes[k], OrderedDict) + for k in input_obs_group_shapes + ] + ) assert isinstance(output_shapes, OrderedDict) self.input_obs_group_shapes = input_obs_group_shapes @@ -552,7 +611,7 @@ def __init__( layer_dims=layer_dims[:-1], layer_func=layer_func, activation=activation, - output_activation=activation, # make sure non-linearity is applied before decoder + output_activation=activation, # make sure non-linearity is applied before decoder ) # decoder for output modalities @@ -566,7 +625,7 @@ def output_shape(self, input_shape=None): Returns output shape for this module, which is a dictionary instead of a list since outputs are dictionaries. """ - return { k : list(self.output_shapes[k]) for k in self.output_shapes } + return {k: list(self.output_shapes[k]) for k in self.output_shapes} def forward(self, return_latent=False, **inputs): """ @@ -592,21 +651,22 @@ def _to_string(self): """ Subclasses should override this method to print out info about network / policy. """ - return '' + return "" def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 4 - if self._to_string() != '': + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 4 + if self._to_string() != "": msg += textwrap.indent("\n" + self._to_string() + "\n", indent) msg += textwrap.indent("\nencoder={}".format(self.nets["encoder"]), indent) msg += textwrap.indent("\n\nmlp={}".format(self.nets["mlp"]), indent) msg += textwrap.indent("\n\ndecoder={}".format(self.nets["decoder"]), indent) - msg = header + '(' + msg + '\n)' + msg = header + "(" + msg + "\n)" return msg + class RNN_MIMO_MLP(Module): """ A wrapper class for a multi-step RNN and a per-step MLP and a decoder. @@ -614,8 +674,9 @@ class RNN_MIMO_MLP(Module): Structure: [encoder -> rnn -> mlp -> decoder] All temporal inputs are processed by a shared @ObservationGroupEncoder, - followed by an RNN, and then a per-step multi-output MLP. + followed by an RNN, and then a per-step multi-output MLP. """ + def __init__( self, input_obs_group_shapes, @@ -649,7 +710,7 @@ def __init__( rnn_kwargs (dict): kwargs for the rnn model per_step (bool): if True, apply the MLP and observation decoder into @output_shapes - at every step of the RNN. Otherwise, apply them to the final hidden state of the + at every step of the RNN. Otherwise, apply them to the final hidden state of the RNN. encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should @@ -671,7 +732,12 @@ def __init__( """ super(RNN_MIMO_MLP, self).__init__() assert isinstance(input_obs_group_shapes, OrderedDict) - assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes]) + assert np.all( + [ + isinstance(input_obs_group_shapes[k], OrderedDict) + for k in input_obs_group_shapes + ] + ) assert isinstance(output_shapes, OrderedDict) self.input_obs_group_shapes = input_obs_group_shapes self.output_shapes = output_shapes @@ -690,18 +756,20 @@ def __init__( # bidirectional RNNs mean that the output of RNN will be twice the hidden dimension rnn_is_bidirectional = rnn_kwargs.get("bidirectional", False) - num_directions = int(rnn_is_bidirectional) + 1 # 2 if bidirectional, 1 otherwise + num_directions = ( + int(rnn_is_bidirectional) + 1 + ) # 2 if bidirectional, 1 otherwise rnn_output_dim = num_directions * rnn_hidden_dim per_step_net = None - self._has_mlp = (len(mlp_layer_dims) > 0) + self._has_mlp = len(mlp_layer_dims) > 0 if self._has_mlp: self.nets["mlp"] = MLP( input_dim=rnn_output_dim, output_dim=mlp_layer_dims[-1], layer_dims=mlp_layer_dims[:-1], output_activation=mlp_activation, - layer_func=mlp_layer_func + layer_func=mlp_layer_func, ) self.nets["decoder"] = ObservationDecoder( decode_shapes=self.output_shapes, @@ -724,7 +792,7 @@ def __init__( rnn_num_layers=rnn_num_layers, rnn_type=rnn_type, per_step_net=per_step_net, - rnn_kwargs=rnn_kwargs + rnn_kwargs=rnn_kwargs, ) def get_rnn_init_state(self, batch_size, device): @@ -757,10 +825,14 @@ def output_shape(self, input_shape): obs_group = list(self.input_obs_group_shapes.keys())[0] mod = list(self.input_obs_group_shapes[obs_group].keys())[0] T = input_shape[obs_group][mod][0] - TensorUtils.assert_size_at_dim(input_shape, size=T, dim=0, - msg="RNN_MIMO_MLP: input_shape inconsistent in temporal dimension") + TensorUtils.assert_size_at_dim( + input_shape, + size=T, + dim=0, + msg="RNN_MIMO_MLP: input_shape inconsistent in temporal dimension", + ) # returns a dictionary instead of list since outputs are dictionaries - return { k : [T] + list(self.output_shapes[k]) for k in self.output_shapes } + return {k: [T] + list(self.output_shapes[k]) for k in self.output_shapes} def forward(self, rnn_init_state=None, return_state=False, **inputs): """ @@ -785,20 +857,30 @@ def forward(self, rnn_init_state=None, return_state=False, **inputs): for obs_group in self.input_obs_group_shapes: for k in self.input_obs_group_shapes[obs_group]: # first two dimensions should be [B, T] for inputs - assert inputs[obs_group][k].ndim - 2 == len(self.input_obs_group_shapes[obs_group][k]) + assert inputs[obs_group][k].ndim - 2 == len( + self.input_obs_group_shapes[obs_group][k] + ) # use encoder to extract flat rnn inputs - rnn_inputs = TensorUtils.time_distributed(inputs, self.nets["encoder"], inputs_as_kwargs=True) + rnn_inputs = TensorUtils.time_distributed( + inputs, self.nets["encoder"], inputs_as_kwargs=True + ) assert rnn_inputs.ndim == 3 # [B, T, D] if self.per_step: - return self.nets["rnn"].forward(inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state) - + return self.nets["rnn"].forward( + inputs=rnn_inputs, + rnn_init_state=rnn_init_state, + return_state=return_state, + ) + # apply MLP + decoder to last RNN output - outputs = self.nets["rnn"].forward(inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state) + outputs = self.nets["rnn"].forward( + inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state + ) if return_state: outputs, rnn_state = outputs - assert outputs.ndim == 3 # [B, T, D] + assert outputs.ndim == 3 # [B, T, D] if self._has_mlp: outputs = self.nets["decoder"](self.nets["mlp"](outputs[:, -1])) else: @@ -814,7 +896,7 @@ def forward_step(self, rnn_state, **inputs): Args: inputs (dict): expects same modalities as @self.input_shapes, with - additional batch dimension (but NOT time), since this is a + additional batch dimension (but NOT time), since this is a single time step. rnn_state (torch.Tensor): rnn hidden state @@ -825,12 +907,14 @@ def forward_step(self, rnn_state, **inputs): rnn_state: return the new rnn state """ - # ensure that the only extra dimension is batch dim, not temporal dim - assert np.all([inputs[k].ndim - 1 == len(self.input_shapes[k]) for k in self.input_shapes]) + # ensure that the only extra dimension is batch dim, not temporal dim + assert np.all( + [inputs[k].ndim - 1 == len(self.input_shapes[k]) for k in self.input_shapes] + ) inputs = TensorUtils.to_sequence(inputs) outputs, rnn_state = self.forward( - inputs, + inputs, rnn_init_state=rnn_state, return_state=True, ) @@ -843,32 +927,33 @@ def _to_string(self): """ Subclasses should override this method to print out info about network / policy. """ - return '' + return "" def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 4 + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 4 msg += textwrap.indent("\n" + self._to_string(), indent) msg += textwrap.indent("\n\nencoder={}".format(self.nets["encoder"]), indent) msg += textwrap.indent("\n\nrnn={}".format(self.nets["rnn"]), indent) - msg = header + '(' + msg + '\n)' + msg = header + "(" + msg + "\n)" return msg class MIMO_Transformer(Module): """ - Extension to Transformer (based on GPT architecture) to accept multiple observation - dictionaries as input and to output dictionaries of tensors. Inputs are specified as + Extension to Transformer (based on GPT architecture) to accept multiple observation + dictionaries as input and to output dictionaries of tensors. Inputs are specified as a dictionary of observation dictionaries, with each key corresponding to an observation group. This module utilizes @ObservationGroupEncoder to process the multiple input dictionaries and @ObservationDecoder to generate tensor dictionaries. The default behavior for encoding the inputs is to process visual inputs with a learned CNN and concatenating - the flat encodings with the other flat inputs. The default behavior for generating + the flat encodings with the other flat inputs. The default behavior for generating outputs is to use a linear layer branch to produce each modality separately (including visual outputs). """ + def __init__( self, input_obs_group_shapes, @@ -896,7 +981,7 @@ def __init__( transformer_embed_dim (int): dimension for embeddings used by transformer transformer_num_layers (int): number of transformer blocks to stack transformer_num_heads (int): number of attention heads for each - transformer block - must divide @transformer_embed_dim evenly. Self-attention is + transformer block - must divide @transformer_embed_dim evenly. Self-attention is computed over this many partitions of the embedding dimension separately. transformer_context_length (int): expected length of input sequences transformer_activation: non-linearity for input and output layers used in transformer @@ -906,9 +991,14 @@ def __init__( encoder_kwargs (dict): observation encoder config """ super(MIMO_Transformer, self).__init__() - + assert isinstance(input_obs_group_shapes, OrderedDict) - assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes]) + assert np.all( + [ + isinstance(input_obs_group_shapes[k], OrderedDict) + for k in input_obs_group_shapes + ] + ) assert isinstance(output_shapes, OrderedDict) self.input_obs_group_shapes = input_obs_group_shapes @@ -943,11 +1033,13 @@ def __init__( torch.zeros(1, max_timestep, transformer_embed_dim) ) else: - self.nets["embed_timestep"] = nn.Embedding(max_timestep, transformer_embed_dim) + self.nets["embed_timestep"] = nn.Embedding( + max_timestep, transformer_embed_dim + ) # layer norm for embeddings self.nets["embed_ln"] = nn.LayerNorm(transformer_embed_dim) - + # dropout for input embeddings self.nets["embed_drop"] = nn.Dropout(transformer_emb_dropout) @@ -971,14 +1063,16 @@ def __init__( self.transformer_context_length = transformer_context_length self.transformer_embed_dim = transformer_embed_dim self.transformer_sinusoidal_embedding = transformer_sinusoidal_embedding - self.transformer_nn_parameter_for_timesteps = transformer_nn_parameter_for_timesteps + self.transformer_nn_parameter_for_timesteps = ( + transformer_nn_parameter_for_timesteps + ) def output_shape(self, input_shape=None): """ Returns output shape for this module, which is a dictionary instead of a list since outputs are dictionaries. """ - return { k : list(self.output_shapes[k]) for k in self.output_shapes } + return {k: list(self.output_shapes[k]) for k in self.output_shapes} def embed_timesteps(self, embeddings): """ @@ -1012,7 +1106,9 @@ def embed_timesteps(self, embeddings): ) # these are NOT fed into transformer, only added to the inputs. # compute how many modalities were combined into embeddings, replicate time embeddings that many times num_replicates = embeddings.shape[-1] // self.transformer_embed_dim - time_embeddings = torch.cat([time_embeddings for _ in range(num_replicates)], -1) + time_embeddings = torch.cat( + [time_embeddings for _ in range(num_replicates)], -1 + ) assert ( embeddings.shape == time_embeddings.shape ), f"{embeddings.shape}, {time_embeddings.shape}" @@ -1038,7 +1134,6 @@ def input_embedding( return embeddings - def forward(self, **inputs): """ Process each set of inputs in its own observation group. @@ -1058,7 +1153,9 @@ def forward(self, **inputs): # first two dimensions should be [B, T] for inputs if inputs[obs_group][k] is None: continue - assert inputs[obs_group][k].ndim - 2 == len(self.input_obs_group_shapes[obs_group][k]) + assert inputs[obs_group][k].ndim - 2 == len( + self.input_obs_group_shapes[obs_group][k] + ) inputs = inputs.copy() @@ -1071,7 +1168,9 @@ def forward(self, **inputs): if transformer_encoder_outputs is None: transformer_embeddings = self.input_embedding(transformer_inputs) # pass encoded sequences through transformer - transformer_encoder_outputs = self.nets["transformer"].forward(transformer_embeddings) + transformer_encoder_outputs = self.nets["transformer"].forward( + transformer_embeddings + ) transformer_outputs = transformer_encoder_outputs # apply decoder to each timestep of sequence to get a dictionary of outputs @@ -1085,17 +1184,19 @@ def _to_string(self): """ Subclasses should override this method to print out info about network / policy. """ - return '' + return "" def __repr__(self): """Pretty print network.""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 4 - if self._to_string() != '': + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 4 + if self._to_string() != "": msg += textwrap.indent("\n" + self._to_string() + "\n", indent) msg += textwrap.indent("\nencoder={}".format(self.nets["encoder"]), indent) - msg += textwrap.indent("\n\ntransformer={}".format(self.nets["transformer"]), indent) + msg += textwrap.indent( + "\n\ntransformer={}".format(self.nets["transformer"]), indent + ) msg += textwrap.indent("\n\ndecoder={}".format(self.nets["decoder"]), indent) - msg = header + '(' + msg + '\n)' - return msg \ No newline at end of file + msg = header + "(" + msg + "\n)" + return msg diff --git a/robomimic/models/policy_nets.py b/robomimic/models/policy_nets.py index 8dba1d93..b45dcbc5 100644 --- a/robomimic/models/policy_nets.py +++ b/robomimic/models/policy_nets.py @@ -6,6 +6,7 @@ are assumed to lie in [-1, 1], and most networks will have a final tanh activation to help ensure this range. """ + import textwrap import numpy as np from collections import OrderedDict @@ -18,7 +19,12 @@ import robomimic.utils.tensor_utils as TensorUtils from robomimic.models.base_nets import Module from robomimic.models.transformers import GPT_Backbone -from robomimic.models.obs_nets import MIMO_MLP, RNN_MIMO_MLP, MIMO_Transformer, ObservationDecoder +from robomimic.models.obs_nets import ( + MIMO_MLP, + RNN_MIMO_MLP, + MIMO_Transformer, + ObservationDecoder, +) from robomimic.models.vae_nets import VAE from robomimic.models.distributions import TanhWrappedDistribution @@ -28,6 +34,7 @@ class ActorNetwork(MIMO_MLP): A basic policy network that predicts actions from observations. Can optionally be goal conditioned on future observations. """ + def __init__( self, obs_shapes, @@ -102,7 +109,9 @@ def output_shape(self, input_shape=None): return [self.ac_dim] def forward(self, obs_dict, goal_dict=None): - actions = super(ActorNetwork, self).forward(obs=obs_dict, goal=goal_dict)["action"] + actions = super(ActorNetwork, self).forward(obs=obs_dict, goal=goal_dict)[ + "action" + ] # apply tanh squashing to ensure actions are in [-1, 1] return torch.tanh(actions) @@ -116,6 +125,7 @@ class PerturbationActorNetwork(ActorNetwork): An action perturbation network - primarily used in BCQ. It takes states and actions and returns action perturbations. """ + def __init__( self, obs_shapes, @@ -134,8 +144,8 @@ def __init__( mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. - perturbation_scale (float): the perturbation network output is always squashed to - lie in +/- @perturbation_scale. The final action output is equal to the original + perturbation_scale (float): the perturbation network output is always squashed to + lie in +/- @perturbation_scale. The final action output is equal to the original input action added to the output perturbation (and clipped to lie in [-1, 1]). goal_shapes (OrderedDict): a dictionary that maps modality to @@ -187,7 +197,9 @@ def forward(self, obs_dict, acts, goal_dict=None): def _to_string(self): """Info to pretty print.""" - return "action_dim={}, perturbation_scale={}".format(self.ac_dim, self.perturbation_scale) + return "action_dim={}, perturbation_scale={}".format( + self.ac_dim, self.perturbation_scale + ) class GaussianActorNetwork(ActorNetwork): @@ -195,6 +207,7 @@ class GaussianActorNetwork(ActorNetwork): Variant of actor network that learns a diagonal unimodal Gaussian distribution over actions. """ + def __init__( self, obs_shapes, @@ -287,8 +300,11 @@ def softplus_scaled(x): "softplus": softplus_scaled, "exp": torch.exp, } - assert std_activation in self.activations, \ - "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) + assert ( + std_activation in self.activations + ), "std_activation must be one of: {}; instead got: {}".format( + self.activations.keys(), std_activation + ) self.std_activation = std_activation if not self.fixed_std else None self.low_noise_eval = low_noise_eval @@ -306,8 +322,12 @@ def softplus_scaled(x): if init_last_fc_weight is not None: with torch.no_grad(): for name, layer in self.nets["decoder"].nets.items(): - torch.nn.init.uniform_(layer.weight, -init_last_fc_weight, init_last_fc_weight) - torch.nn.init.uniform_(layer.bias, -init_last_fc_weight, init_last_fc_weight) + torch.nn.init.uniform_( + layer.weight, -init_last_fc_weight, init_last_fc_weight + ) + torch.nn.init.uniform_( + layer.bias, -init_last_fc_weight, init_last_fc_weight + ) def _get_output_shapes(self): """ @@ -315,14 +335,14 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of Gaussian distribution. """ return OrderedDict( - mean=(self.ac_dim,), + mean=(self.ac_dim,), scale=(self.ac_dim,), ) def forward_train(self, obs_dict, goal_dict=None): """ Return full Gaussian distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -335,7 +355,11 @@ def forward_train(self, obs_dict, goal_dict=None): out = MIMO_MLP.forward(self, obs=obs_dict, goal=goal_dict) mean = out["mean"] # Use either constant std or learned std depending on setting - scale = out["scale"] if not self.fixed_std else torch.ones_like(mean) * self.init_std + scale = ( + out["scale"] + if not self.fixed_std + else torch.ones_like(mean) * self.init_std + ) # Clamp the mean mean = torch.clamp(mean, min=self.mean_limits[0], max=self.mean_limits[1]) @@ -354,16 +378,15 @@ def forward_train(self, obs_dict, goal_dict=None): # Clamp the scale scale = torch.clamp(scale, min=self.std_limits[0], max=self.std_limits[1]) - # the Independent call will make it so that `batch_shape` for dist will be equal to batch size - # while `event_shape` will be equal to action dimension - ensuring that log-probability + # while `event_shape` will be equal to action dimension - ensuring that log-probability # computations are summed across the action dimension dist = D.Normal(loc=mean, scale=scale) dist = D.Independent(dist, 1) if self.use_tanh: # Wrap distribution with Tanh - dist = TanhWrappedDistribution(base_dist=dist, scale=1.) + dist = TanhWrappedDistribution(base_dist=dist, scale=1.0) return dist @@ -390,7 +413,14 @@ def forward(self, obs_dict, goal_dict=None): def _to_string(self): """Info to pretty print.""" msg = "action_dim={}\nfixed_std={}\nstd_activation={}\ninit_std={}\nmean_limits={}\nstd_limits={}\nlow_noise_eval={}".format( - self.ac_dim, self.fixed_std, self.std_activation, self.init_std, self.mean_limits, self.std_limits, self.low_noise_eval) + self.ac_dim, + self.fixed_std, + self.std_activation, + self.init_std, + self.mean_limits, + self.std_limits, + self.low_noise_eval, + ) return msg @@ -399,6 +429,7 @@ class GMMActorNetwork(ActorNetwork): Variant of actor network that learns a multimodal Gaussian mixture distribution over actions. """ + def __init__( self, obs_shapes, @@ -468,8 +499,11 @@ def __init__( "softplus": F.softplus, "exp": torch.exp, } - assert std_activation in self.activations, \ - "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) + assert ( + std_activation in self.activations + ), "std_activation must be one of: {}; instead got: {}".format( + self.activations.keys(), std_activation + ) self.std_activation = std_activation super(GMMActorNetwork, self).__init__( @@ -486,15 +520,15 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of GMM distribution. """ return OrderedDict( - mean=(self.num_modes, self.ac_dim), - scale=(self.num_modes, self.ac_dim), + mean=(self.num_modes, self.ac_dim), + scale=(self.num_modes, self.ac_dim), logits=(self.num_modes,), ) def forward_train(self, obs_dict, goal_dict=None): """ Return full GMM distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -536,7 +570,7 @@ def forward_train(self, obs_dict, goal_dict=None): if self.use_tanh: # Wrap distribution with Tanh - dist = TanhWrappedDistribution(base_dist=dist, scale=1.) + dist = TanhWrappedDistribution(base_dist=dist, scale=1.0) return dist @@ -557,13 +591,19 @@ def forward(self, obs_dict, goal_dict=None): def _to_string(self): """Info to pretty print.""" return "action_dim={}\nnum_modes={}\nmin_std={}\nstd_activation={}\nlow_noise_eval={}".format( - self.ac_dim, self.num_modes, self.min_std, self.std_activation, self.low_noise_eval) + self.ac_dim, + self.num_modes, + self.min_std, + self.std_activation, + self.low_noise_eval, + ) class RNNActorNetwork(RNN_MIMO_MLP): """ An RNN policy network that predicts actions from observations. """ + def __init__( self, obs_shapes, @@ -659,11 +699,17 @@ def output_shape(self, input_shape): # infers temporal dimension from input shape mod = list(self.obs_shapes.keys())[0] T = input_shape[mod][0] - TensorUtils.assert_size_at_dim(input_shape, size=T, dim=0, - msg="RNNActorNetwork: input_shape inconsistent in temporal dimension") + TensorUtils.assert_size_at_dim( + input_shape, + size=T, + dim=0, + msg="RNNActorNetwork: input_shape inconsistent in temporal dimension", + ) return [T, self.ac_dim] - def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False): + def forward( + self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False + ): """ Forward a sequence of inputs through the RNN and the per-step network. @@ -682,17 +728,23 @@ def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=Fa assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) + goal_dict = TensorUtils.unsqueeze_expand_at( + goal_dict, size=obs_dict[mod].shape[1], dim=1 + ) outputs = super(RNNActorNetwork, self).forward( - obs=obs_dict, goal=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state) + obs=obs_dict, + goal=goal_dict, + rnn_init_state=rnn_init_state, + return_state=return_state, + ) if return_state: actions, state = outputs else: actions = outputs state = None - + # apply tanh squashing to ensure actions are in [-1, 1] actions = torch.tanh(actions["action"]) @@ -717,7 +769,8 @@ def forward_step(self, obs_dict, goal_dict=None, rnn_state=None): """ obs_dict = TensorUtils.to_sequence(obs_dict) action, state = self.forward( - obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True) + obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True + ) return action[:, 0], state def _to_string(self): @@ -729,6 +782,7 @@ class RNNGMMActorNetwork(RNNActorNetwork): """ An RNN GMM policy network that predicts sequences of action distributions from observation sequences. """ + def __init__( self, obs_shapes, @@ -801,8 +855,11 @@ def __init__( "softplus": F.softplus, "exp": torch.exp, } - assert std_activation in self.activations, \ - "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) + assert ( + std_activation in self.activations + ), "std_activation must be one of: {}; instead got: {}".format( + self.activations.keys(), std_activation + ) self.std_activation = std_activation super(RNNGMMActorNetwork, self).__init__( @@ -823,15 +880,17 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of GMM distribution. """ return OrderedDict( - mean=(self.num_modes, self.ac_dim), - scale=(self.num_modes, self.ac_dim), + mean=(self.num_modes, self.ac_dim), + scale=(self.num_modes, self.ac_dim), logits=(self.num_modes,), ) - def forward_train(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False): + def forward_train( + self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False + ): """ Return full GMM distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -848,16 +907,23 @@ def forward_train(self, obs_dict, goal_dict=None, rnn_init_state=None, return_st assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) + goal_dict = TensorUtils.unsqueeze_expand_at( + goal_dict, size=obs_dict[mod].shape[1], dim=1 + ) outputs = RNN_MIMO_MLP.forward( - self, obs=obs_dict, goal=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state) + self, + obs=obs_dict, + goal=goal_dict, + rnn_init_state=rnn_init_state, + return_state=return_state, + ) if return_state: outputs, state = outputs else: state = None - + means = outputs["mean"] scales = outputs["scale"] logits = outputs["logits"] @@ -876,7 +942,9 @@ def forward_train(self, obs_dict, goal_dict=None, rnn_init_state=None, return_st # mixture components - make sure that `batch_shape` for the distribution is equal # to (batch_size, timesteps, num_modes) since MixtureSameFamily expects this shape component_distribution = D.Normal(loc=means, scale=scales) - component_distribution = D.Independent(component_distribution, 1) # shift action dim to event shape + component_distribution = D.Independent( + component_distribution, 1 + ) # shift action dim to event shape # unnormalized logits to categorical distribution for mixing the modes mixture_distribution = D.Categorical(logits=logits) @@ -888,14 +956,16 @@ def forward_train(self, obs_dict, goal_dict=None, rnn_init_state=None, return_st if self.use_tanh: # Wrap distribution with Tanh - dists = TanhWrappedDistribution(base_dist=dists, scale=1.) + dists = TanhWrappedDistribution(base_dist=dists, scale=1.0) if return_state: return dists, state else: return dists - def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False): + def forward( + self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False + ): """ Samples actions from the policy distribution. @@ -906,7 +976,12 @@ def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=Fa Returns: action (torch.Tensor): batch of actions from policy distribution """ - out = self.forward_train(obs_dict=obs_dict, goal_dict=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state) + out = self.forward_train( + obs_dict=obs_dict, + goal_dict=goal_dict, + rnn_init_state=rnn_init_state, + return_state=return_state, + ) if return_state: ad, state = out return ad.sample(), state @@ -914,8 +989,8 @@ def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=Fa def forward_train_step(self, obs_dict, goal_dict=None, rnn_state=None): """ - Unroll RNN over single timestep to get action GMM distribution, which - is useful for computing quantities necessary at train-time, like + Unroll RNN over single timestep to get action GMM distribution, which + is useful for computing quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -930,7 +1005,8 @@ def forward_train_step(self, obs_dict, goal_dict=None, rnn_state=None): """ obs_dict = TensorUtils.to_sequence(obs_dict) ad, state = self.forward_train( - obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True) + obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True + ) # to squeeze time dimension, make another action distribution assert ad.component_distribution.base_dist.loc.shape[1] == 1 @@ -941,7 +1017,9 @@ def forward_train_step(self, obs_dict, goal_dict=None, rnn_state=None): scale=ad.component_distribution.base_dist.scale.squeeze(1), ) component_distribution = D.Independent(component_distribution, 1) - mixture_distribution = D.Categorical(logits=ad.mixture_distribution.logits.squeeze(1)) + mixture_distribution = D.Categorical( + logits=ad.mixture_distribution.logits.squeeze(1) + ) ad = D.MixtureSameFamily( mixture_distribution=mixture_distribution, component_distribution=component_distribution, @@ -964,14 +1042,20 @@ def forward_step(self, obs_dict, goal_dict=None, rnn_state=None): """ obs_dict = TensorUtils.to_sequence(obs_dict) acts, state = self.forward( - obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True) + obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True + ) assert acts.shape[1] == 1 return acts[:, 0], state def _to_string(self): """Info to pretty print.""" msg = "action_dim={}, std_activation={}, low_noise_eval={}, num_nodes={}, min_std={}".format( - self.ac_dim, self.std_activation, self.low_noise_eval, self.num_modes, self.min_std) + self.ac_dim, + self.std_activation, + self.low_noise_eval, + self.num_modes, + self.min_std, + ) return msg @@ -980,6 +1064,7 @@ class TransformerActorNetwork(MIMO_Transformer): An Transformer policy network that predicts actions from observation sequences (assumed to be frame stacked from previous observations) and possible from previous actions as well (in an autoregressive manner). """ + def __init__( self, obs_shapes, @@ -1002,7 +1087,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. - + ac_dim (int): dimension of action space. transformer_embed_dim (int): dimension for embeddings used by transformer @@ -1010,9 +1095,9 @@ def __init__( transformer_num_layers (int): number of transformer blocks to stack transformer_num_heads (int): number of attention heads for each - transformer block - must divide @transformer_embed_dim evenly. Self-attention is + transformer block - must divide @transformer_embed_dim evenly. Self-attention is computed over this many partitions of the embedding dimension separately. - + transformer_context_length (int): expected length of input sequences transformer_embedding_dropout (float): dropout probability for embedding inputs in transformer @@ -1020,10 +1105,10 @@ def __init__( transformer_attn_dropout (float): dropout probability for attention outputs for each transformer block transformer_block_output_dropout (float): dropout probability for final outputs for each transformer block - + goal_shapes (OrderedDict): a dictionary that maps modality to expected shapes for goal observations. - + encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should be nested dictionary containing relevant per-modality information for encoder networks. Should be of form: @@ -1046,7 +1131,9 @@ def __init__( assert isinstance(obs_shapes, OrderedDict) self.obs_shapes = obs_shapes - self.transformer_nn_parameter_for_timesteps = transformer_nn_parameter_for_timesteps + self.transformer_nn_parameter_for_timesteps = ( + transformer_nn_parameter_for_timesteps + ) # set up different observation groups for @RNN_MIMO_MLP observation_group_shapes = OrderedDict() @@ -1075,7 +1162,6 @@ def __init__( transformer_sinusoidal_embedding=transformer_sinusoidal_embedding, transformer_activation=transformer_activation, transformer_nn_parameter_for_timesteps=transformer_nn_parameter_for_timesteps, - encoder_kwargs=encoder_kwargs, ) @@ -1093,8 +1179,12 @@ def output_shape(self, input_shape): # infers temporal dimension from input shape mod = list(self.obs_shapes.keys())[0] T = input_shape[mod][0] - TensorUtils.assert_size_at_dim(input_shape, size=T, dim=0, - msg="TransformerActorNetwork: input_shape inconsistent in temporal dimension") + TensorUtils.assert_size_at_dim( + input_shape, + size=T, + dim=0, + msg="TransformerActorNetwork: input_shape inconsistent in temporal dimension", + ) return [T, self.ac_dim] def forward(self, obs_dict, actions=None, goal_dict=None): @@ -1113,7 +1203,9 @@ def forward(self, obs_dict, actions=None, goal_dict=None): assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) + goal_dict = TensorUtils.unsqueeze_expand_at( + goal_dict, size=obs_dict[mod].shape[1], dim=1 + ) forward_kwargs = dict(obs=obs_dict, goal=goal_dict) outputs = super(TransformerActorNetwork, self).forward(**forward_kwargs) @@ -1121,7 +1213,7 @@ def forward(self, obs_dict, actions=None, goal_dict=None): # apply tanh squashing to ensure actions are in [-1, 1] outputs["action"] = torch.tanh(outputs["action"]) - return outputs["action"] # only action sequences + return outputs["action"] # only action sequences def _to_string(self): """Info to pretty print.""" @@ -1130,9 +1222,10 @@ def _to_string(self): class TransformerGMMActorNetwork(TransformerActorNetwork): """ - A Transformer GMM policy network that predicts sequences of action distributions from observation + A Transformer GMM policy network that predicts sequences of action distributions from observation sequences (assumed to be frame stacked from previous observations). """ + def __init__( self, obs_shapes, @@ -1160,7 +1253,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. - + ac_dim (int): dimension of action space. transformer_embed_dim (int): dimension for embeddings used by transformer @@ -1168,9 +1261,9 @@ def __init__( transformer_num_layers (int): number of transformer blocks to stack transformer_num_heads (int): number of attention heads for each - transformer block - must divide @transformer_embed_dim evenly. Self-attention is + transformer block - must divide @transformer_embed_dim evenly. Self-attention is computed over this many partitions of the embedding dimension separately. - + transformer_context_length (int): expected length of input sequences transformer_embedding_dropout (float): dropout probability for embedding inputs in transformer @@ -1211,7 +1304,7 @@ def __init__( obs_modality2: dict ... """ - + # parameters specific to GMM actor self.num_modes = num_modes self.min_std = min_std @@ -1223,8 +1316,11 @@ def __init__( "softplus": F.softplus, "exp": torch.exp, } - assert std_activation in self.activations, \ - "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) + assert ( + std_activation in self.activations + ), "std_activation must be one of: {}; instead got: {}".format( + self.activations.keys(), std_activation + ) self.std_activation = std_activation super(TransformerGMMActorNetwork, self).__init__( @@ -1239,7 +1335,7 @@ def __init__( transformer_block_output_dropout=transformer_block_output_dropout, transformer_sinusoidal_embedding=transformer_sinusoidal_embedding, transformer_activation=transformer_activation, - transformer_nn_parameter_for_timesteps=transformer_nn_parameter_for_timesteps, + transformer_nn_parameter_for_timesteps=transformer_nn_parameter_for_timesteps, encoder_kwargs=encoder_kwargs, goal_shapes=goal_shapes, ) @@ -1250,15 +1346,17 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of GMM distribution. """ return OrderedDict( - mean=(self.num_modes, self.ac_dim), - scale=(self.num_modes, self.ac_dim), + mean=(self.num_modes, self.ac_dim), + scale=(self.num_modes, self.ac_dim), logits=(self.num_modes,), ) - def forward_train(self, obs_dict, actions=None, goal_dict=None, low_noise_eval=None): + def forward_train( + self, obs_dict, actions=None, goal_dict=None, low_noise_eval=None + ): """ Return full GMM distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: obs_dict (dict): batch of observations @@ -1271,12 +1369,14 @@ def forward_train(self, obs_dict, actions=None, goal_dict=None, low_noise_eval=N assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) + goal_dict = TensorUtils.unsqueeze_expand_at( + goal_dict, size=obs_dict[mod].shape[1], dim=1 + ) forward_kwargs = dict(obs=obs_dict, goal=goal_dict) outputs = MIMO_Transformer.forward(self, **forward_kwargs) - + means = outputs["mean"] scales = outputs["scale"] logits = outputs["logits"] @@ -1297,7 +1397,9 @@ def forward_train(self, obs_dict, actions=None, goal_dict=None, low_noise_eval=N # mixture components - make sure that `batch_shape` for the distribution is equal # to (batch_size, timesteps, num_modes) since MixtureSameFamily expects this shape component_distribution = D.Normal(loc=means, scale=scales) - component_distribution = D.Independent(component_distribution, 1) # shift action dim to event shape + component_distribution = D.Independent( + component_distribution, 1 + ) # shift action dim to event shape # unnormalized logits to categorical distribution for mixing the modes mixture_distribution = D.Categorical(logits=logits) @@ -1309,7 +1411,7 @@ def forward_train(self, obs_dict, actions=None, goal_dict=None, low_noise_eval=N if self.use_tanh: # Wrap distribution with Tanh - dists = TanhWrappedDistribution(base_dist=dists, scale=1.) + dists = TanhWrappedDistribution(base_dist=dists, scale=1.0) return dists @@ -1323,13 +1425,20 @@ def forward(self, obs_dict, actions=None, goal_dict=None): Returns: action (torch.Tensor): batch of actions from policy distribution """ - out = self.forward_train(obs_dict=obs_dict, actions=actions, goal_dict=goal_dict) + out = self.forward_train( + obs_dict=obs_dict, actions=actions, goal_dict=goal_dict + ) return out.sample() def _to_string(self): """Info to pretty print.""" msg = "action_dim={}, std_activation={}, low_noise_eval={}, num_nodes={}, min_std={}".format( - self.ac_dim, self.std_activation, self.low_noise_eval, self.num_modes, self.min_std) + self.ac_dim, + self.std_activation, + self.low_noise_eval, + self.num_modes, + self.min_std, + ) return msg @@ -1338,6 +1447,7 @@ class VAEActor(Module): A VAE that models a distribution of actions conditioned on observations. The VAE prior and decoder are used at test-time as the policy. """ + def __init__( self, obs_shapes, @@ -1395,8 +1505,8 @@ def __init__( action_shapes = OrderedDict(action=(self.ac_dim,)) # ensure VAE decoder will squash actions into [-1, 1] - output_squash = ['action'] - output_scales = OrderedDict(action=1.) + output_squash = ["action"] + output_scales = OrderedDict(action=1.0) self._vae = VAE( input_shapes=action_shapes, @@ -1430,7 +1540,7 @@ def encode(self, actions, obs_dict, goal_dict=None): actions (torch.Tensor): a batch of actions obs_dict (dict): a dictionary that maps modalities to torch.Tensor - batches. These should correspond to the observation modalities + batches. These should correspond to the observation modalities used for conditioning in either the decoder or the prior (or both). goal_dict (dict): a dictionary that maps modalities to torch.Tensor @@ -1461,7 +1571,7 @@ def decode(self, obs_dict=None, goal_dict=None, z=None, n=None): z (torch.Tensor): if provided, these latents are used to generate reconstructions from the VAE, and the prior is not sampled. - n (int): this argument is used to specify the number of samples to + n (int): this argument is used to specify the number of samples to generate from the prior. Only required if @z is None - i.e. sampling takes place @@ -1507,7 +1617,7 @@ def get_gumbel_temperature(self): def output_shape(self, input_shape=None): """ - This implementation is required by the Module superclass, but is unused since we + This implementation is required by the Module superclass, but is unused since we never chain this module to other ones. """ return [self.ac_dim] @@ -1521,7 +1631,7 @@ def forward_train(self, actions, obs_dict, goal_dict=None, freeze_encoder=False) actions (torch.Tensor): a batch of actions obs_dict (dict): a dictionary that maps modalities to torch.Tensor - batches. These should correspond to the observation modalities + batches. These should correspond to the observation modalities used for conditioning in either the decoder or the prior (or both). goal_dict (dict): a dictionary that maps modalities to torch.Tensor @@ -1543,11 +1653,12 @@ def forward_train(self, actions, obs_dict, goal_dict=None, freeze_encoder=False) """ action_inputs = OrderedDict(action=actions) return self._vae.forward( - inputs=action_inputs, - outputs=action_inputs, - conditions=obs_dict, + inputs=action_inputs, + outputs=action_inputs, + conditions=obs_dict, goals=goal_dict, - freeze_encoder=freeze_encoder) + freeze_encoder=freeze_encoder, + ) def forward(self, obs_dict, goal_dict=None, z=None): """ diff --git a/robomimic/models/transformers.py b/robomimic/models/transformers.py index 309bff30..3b891b80 100644 --- a/robomimic/models/transformers.py +++ b/robomimic/models/transformers.py @@ -15,6 +15,7 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.torch_utils as TorchUtils + class GEGLU(nn.Module): """ References: @@ -120,7 +121,9 @@ def __init__( assert ( embed_dim % num_heads == 0 - ), "num_heads: {} does not divide embed_dim: {} exactly".format(num_heads, embed_dim) + ), "num_heads: {} does not divide embed_dim: {} exactly".format( + num_heads, embed_dim + ) self.embed_dim = embed_dim self.num_heads = num_heads @@ -277,7 +280,7 @@ def __init__( nn.Linear(embed_dim, 4 * embed_dim * mult), activation, nn.Linear(4 * embed_dim, embed_dim), - nn.Dropout(output_dropout) + nn.Dropout(output_dropout), ) # layer normalization for inputs to self-attention module and MLP @@ -423,4 +426,4 @@ def forward(self, inputs): assert inputs.shape[1:] == (self.context_length, self.embed_dim), inputs.shape x = self.nets["transformer"](inputs) transformer_output = self.nets["output_ln"](x) - return transformer_output \ No newline at end of file + return transformer_output diff --git a/robomimic/models/vae_nets.py b/robomimic/models/vae_nets.py index 91b4e7f0..a8a7985a 100644 --- a/robomimic/models/vae_nets.py +++ b/robomimic/models/vae_nets.py @@ -2,6 +2,7 @@ Contains an implementation of Variational Autoencoder (VAE) and other variants, including other priors, and RNN-VAEs. """ + import textwrap import numpy as np from copy import deepcopy @@ -47,10 +48,11 @@ def vae_args_from_config(vae_config): class Prior(Module): """ Base class for VAE priors. It's basically the same as a @MIMO_MLP network (it - instantiates one) but it supports additional methods such as KL loss computation - and sampling, and also may learn prior parameters as observation-independent + instantiates one) but it supports additional methods such as KL loss computation + and sampling, and also may learn prior parameters as observation-independent torch Parameters instead of observation-dependent mappings. """ + def __init__( self, param_shapes, @@ -68,7 +70,7 @@ def __init__( param_obs_dependent (OrderedDict): a dictionary with boolean values consistent with @param_shapes which determines whether - to learn parameters as part of the (obs-dependent) network or + to learn parameters as part of the (obs-dependent) network or directly as learnable parameters. obs_shapes (OrderedDict): a dictionary that maps modality to @@ -98,7 +100,9 @@ def __init__( """ super(Prior, self).__init__() - assert isinstance(param_shapes, OrderedDict) and isinstance(param_obs_dependent, OrderedDict) + assert isinstance(param_shapes, OrderedDict) and isinstance( + param_obs_dependent, OrderedDict + ) assert set(param_shapes.keys()) == set(param_obs_dependent.keys()) self.param_shapes = param_shapes self.param_obs_dependent = param_obs_dependent @@ -125,7 +129,9 @@ def _create_layers(self, net_kwargs): mlp_output_shapes[pp] = self.param_shapes[pp] else: # learnable prior parameters independent of observation - param_init = torch.randn(*self.param_shapes[pp]) / np.sqrt(np.prod(self.param_shapes[pp])) + param_init = torch.randn(*self.param_shapes[pp]) / np.sqrt( + np.prod(self.param_shapes[pp]) + ) self.prior_params[pp] = torch.nn.Parameter(param_init) # only make networks if we have obs-dependent prior parameters @@ -170,7 +176,7 @@ def sample(self, n, obs_dict=None, goal_dict=None): def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): """ Computes sample-based KL divergence loss between the Gaussian distribution - given by @mu, @logvar and the prior distribution. + given by @mu, @logvar and the prior distribution. Args: posterior_params (dict): dictionary with keys "mu" and "logvar" corresponding @@ -197,7 +203,7 @@ def output_shape(self, input_shape=None): """ if self.prior_module is not None: return self.prior_module.output_shape(input_shape) - return { k : list(self.param_shapes[k]) for k in self.param_shapes } + return {k: list(self.param_shapes[k]) for k in self.param_shapes} def forward(self, batch_size, obs_dict=None, goal_dict=None): """ @@ -225,11 +231,17 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): for pp in self.param_shapes: if not self.param_obs_dependent[pp]: # ensure leading dimension will be consistent with other params - prior_params[pp] = TensorUtils.expand_at(self.prior_params[pp], size=batch_size, dim=0) + prior_params[pp] = TensorUtils.expand_at( + self.prior_params[pp], size=batch_size, dim=0 + ) # ensure leading dimensions are all consistent - TensorUtils.assert_size_at_dim(prior_params, size=batch_size, dim=0, - msg="prior params dim 0 mismatch in forward") + TensorUtils.assert_size_at_dim( + prior_params, + size=batch_size, + dim=0, + msg="prior params dim 0 mismatch in forward", + ) return prior_params @@ -239,6 +251,7 @@ class GaussianPrior(Prior): A class that holds functionality for learning both unimodal Gaussian priors and multimodal Gaussian Mixture Model priors for use in VAEs. """ + def __init__( self, latent_dim, @@ -278,7 +291,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. If provided, assumes that - the prior should depend on observation inputs, and networks + the prior should depend on observation inputs, and networks will be created to output prior parameters. mlp_layer_dims ([int]): sequence of integers for the MLP hidden layer sizes @@ -324,8 +337,14 @@ def __init__( # network will generate mean and logvar param_shapes = OrderedDict( - mean=(self.num_modes, self.latent_dim,), - logvar=(self.num_modes, self.latent_dim,), + mean=( + self.num_modes, + self.latent_dim, + ), + logvar=( + self.num_modes, + self.latent_dim, + ), ) param_obs_dependent = OrderedDict(mean=True, logvar=True) @@ -383,14 +402,19 @@ def sample(self, n, obs_dict=None, goal_dict=None): # check consistency between n and obs_dict if self._input_dependent: - TensorUtils.assert_size_at_dim(obs_dict, size=n, dim=0, - msg="obs dict and n mismatch in @sample") + TensorUtils.assert_size_at_dim( + obs_dict, size=n, dim=0, msg="obs dict and n mismatch in @sample" + ) if self.learnable: # forward to get parameters out = self.forward(batch_size=n, obs_dict=obs_dict, goal_dict=goal_dict) - prior_means, prior_logvars, prior_logweights = out["means"], out["logvars"], out["logweights"] + prior_means, prior_logvars, prior_logweights = ( + out["means"], + out["logvars"], + out["logweights"], + ) if prior_logweights is not None: prior_weights = torch.exp(prior_logweights) @@ -400,19 +424,28 @@ def sample(self, n, obs_dict=None, goal_dict=None): # make uniform weights (in the case that weights were not learned) if not self.gmm_learn_weights: - prior_weights = torch.ones(n, self.num_modes).to(prior_means.device) / self.num_modes + prior_weights = ( + torch.ones(n, self.num_modes).to(prior_means.device) + / self.num_modes + ) # sample modes gmm_mode_indices = D.Categorical(prior_weights).sample() - + # get GMM centers and sample using reparametrization trick - selected_means = TensorUtils.gather_sequence(prior_means, indices=gmm_mode_indices) - selected_logvars = TensorUtils.gather_sequence(prior_logvars, indices=gmm_mode_indices) + selected_means = TensorUtils.gather_sequence( + prior_means, indices=gmm_mode_indices + ) + selected_logvars = TensorUtils.gather_sequence( + prior_logvars, indices=gmm_mode_indices + ) z = TorchUtils.reparameterize(selected_means, selected_logvars) else: # learned unimodal Gaussian - remove mode dim and sample from Gaussian using reparametrization trick - z = TorchUtils.reparameterize(prior_means[:, 0, :], prior_logvars[:, 0, :]) + z = TorchUtils.reparameterize( + prior_means[:, 0, :], prior_logvars[:, 0, :] + ) else: # sample from N(0, 1) @@ -426,7 +459,7 @@ def sample(self, n, obs_dict=None, goal_dict=None): def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): """ Computes sample-based KL divergence loss between the Gaussian distribution - given by @mu, @logvar and the prior distribution. + given by @mu, @logvar and the prior distribution. Args: posterior_params (dict): dictionary with keys "mu" and "logvar" corresponding @@ -452,25 +485,32 @@ def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): return LossUtils.KLD_0_1_loss(mu=mu, logvar=logvar) # forward to get parameters - out = self.forward(batch_size=mu.shape[0], obs_dict=obs_dict, goal_dict=goal_dict) - prior_means, prior_logvars, prior_logweights = out["means"], out["logvars"], out["logweights"] + out = self.forward( + batch_size=mu.shape[0], obs_dict=obs_dict, goal_dict=goal_dict + ) + prior_means, prior_logvars, prior_logweights = ( + out["means"], + out["logvars"], + out["logweights"], + ) if not self.use_gmm: # collapse mode dimension and compute Gaussian KL in closed-form prior_means = prior_means[:, 0, :] prior_logvars = prior_logvars[:, 0, :] return LossUtils.KLD_gaussian_loss( - mu_1=mu, - logvar_1=logvar, - mu_2=prior_means, + mu_1=mu, + logvar_1=logvar, + mu_2=prior_means, logvar_2=prior_logvars, ) # GMM KL loss computation - var = torch.exp(logvar.clamp(-8, 30)) # clamp for numerical stability + var = torch.exp(logvar.clamp(-8, 30)) # clamp for numerical stability prior_vars = torch.exp(prior_logvars.clamp(-8, 30)) - kl_loss = LossUtils.log_normal(x=z, m=mu, v=var) \ - - LossUtils.log_normal_mixture(x=z, m=prior_means, v=prior_vars, log_w=prior_logweights) + kl_loss = LossUtils.log_normal(x=z, m=mu, v=var) - LossUtils.log_normal_mixture( + x=z, m=prior_means, v=prior_vars, log_w=prior_logweights + ) return kl_loss.mean() def forward(self, batch_size, obs_dict=None, goal_dict=None): @@ -492,7 +532,8 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): """ assert self.learnable prior_params = super(GaussianPrior, self).forward( - batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict) + batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict + ) if self.use_gmm and self.gmm_learn_weights: # normalize learned weight outputs to sum to 1 @@ -501,27 +542,39 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): logweights = None assert "weight" not in prior_params - out = dict(means=prior_params["mean"], logvars=prior_params["logvar"], logweights=logweights) + out = dict( + means=prior_params["mean"], + logvars=prior_params["logvar"], + logweights=logweights, + ) return out def __repr__(self): """Pretty print network""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 4 + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 4 msg += textwrap.indent("latent_dim={}\n".format(self.latent_dim), indent) msg += textwrap.indent("latent_clip={}\n".format(self.latent_clip), indent) msg += textwrap.indent("learnable={}\n".format(self.learnable), indent) - msg += textwrap.indent("input_dependent={}\n".format(self._input_dependent), indent) + msg += textwrap.indent( + "input_dependent={}\n".format(self._input_dependent), indent + ) msg += textwrap.indent("use_gmm={}\n".format(self.use_gmm), indent) if self.use_gmm: msg += textwrap.indent("gmm_num_nodes={}\n".format(self.num_modes), indent) - msg += textwrap.indent("gmm_learn_weights={}\n".format(self.gmm_learn_weights), indent) + msg += textwrap.indent( + "gmm_learn_weights={}\n".format(self.gmm_learn_weights), indent + ) if self.learnable: if self.prior_module is not None: - msg += textwrap.indent("\nprior_module={}\n".format(self.prior_module), indent) - msg += textwrap.indent("prior_params={}\n".format(self.prior_params), indent) - msg = header + '(\n' + msg + ')' + msg += textwrap.indent( + "\nprior_module={}\n".format(self.prior_module), indent + ) + msg += textwrap.indent( + "prior_params={}\n".format(self.prior_params), indent + ) + msg = header + "(\n" + msg + ")" return msg @@ -530,6 +583,7 @@ class CategoricalPrior(Prior): A class that holds functionality for learning categorical priors for use in VAEs. """ + def __init__( self, latent_dim, @@ -540,7 +594,6 @@ def __init__( mlp_layer_dims=(), goal_shapes=None, encoder_kwargs=None, - ): """ Args: @@ -556,7 +609,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. If provided, assumes that - the prior should depend on observation inputs, and networks + the prior should depend on observation inputs, and networks will be created to output prior parameters. mlp_layer_dims ([int]): sequence of integers for the MLP hidden layer sizes @@ -594,7 +647,10 @@ def __init__( # network will generate logits for categorical distributions param_shapes = OrderedDict( - logit=(self.latent_dim, self.categorical_dim,) + logit=( + self.latent_dim, + self.categorical_dim, + ) ) param_obs_dependent = OrderedDict(logit=True) else: @@ -641,8 +697,9 @@ def sample(self, n, obs_dict=None, goal_dict=None): # check consistency between n and obs_dict if self._input_dependent: - TensorUtils.assert_size_at_dim(obs_dict, size=n, dim=0, - msg="obs dict and n mismatch in @sample") + TensorUtils.assert_size_at_dim( + obs_dict, size=n, dim=0, msg="obs dict and n mismatch in @sample" + ) if self.learnable: @@ -658,10 +715,19 @@ def sample(self, n, obs_dict=None, goal_dict=None): # try to include a categorical sample for each class if possible (ensuring rough uniformity) if (self.latent_dim == 1) and (self.categorical_dim <= n): # include samples [0, 1, ..., C - 1] and then repeat until batch is filled - dist_samples = torch.arange(n).remainder(self.categorical_dim).unsqueeze(-1).to(self.device) + dist_samples = ( + torch.arange(n) + .remainder(self.categorical_dim) + .unsqueeze(-1) + .to(self.device) + ) else: # sample one-hot latents from uniform categorical distribution for each latent dimension - probs = torch.ones(n, self.latent_dim, self.categorical_dim).float().to(self.device) + probs = ( + torch.ones(n, self.latent_dim, self.categorical_dim) + .float() + .to(self.device) + ) dist_samples = D.Categorical(probs=probs).sample() z = TensorUtils.to_one_hot(dist_samples, num_class=self.categorical_dim) @@ -672,11 +738,11 @@ def sample(self, n, obs_dict=None, goal_dict=None): def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): """ Computes KL divergence loss between the Categorical distribution - given by the unnormalized logits @logits and the prior distribution. + given by the unnormalized logits @logits and the prior distribution. Args: posterior_params (dict): dictionary with key "logits" corresponding - to torch.Tensor batch of unnormalized logits of shape [B, D * C] + to torch.Tensor batch of unnormalized logits of shape [B, D * C] that corresponds to the posterior categorical distribution z (torch.Tensor): samples from encoder - unused for this prior @@ -689,13 +755,19 @@ def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): Returns: kl_loss (torch.Tensor): KL divergence loss """ - logits = posterior_params["logit"].reshape(-1, self.latent_dim, self.categorical_dim) + logits = posterior_params["logit"].reshape( + -1, self.latent_dim, self.categorical_dim + ) if not self.learnable: # prior logits correspond to uniform categorical distribution prior_logits = torch.zeros_like(logits) else: # forward to get parameters - out = self.forward(batch_size=posterior_params["logit"].shape[0], obs_dict=obs_dict, goal_dict=goal_dict) + out = self.forward( + batch_size=posterior_params["logit"].shape[0], + obs_dict=obs_dict, + goal_dict=goal_dict, + ) prior_logits = out["logit"] prior_dist = D.Categorical(logits=prior_logits) @@ -725,22 +797,31 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): """ assert self.learnable return super(CategoricalPrior, self).forward( - batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict) + batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict + ) def __repr__(self): """Pretty print network""" - header = '{}'.format(str(self.__class__.__name__)) - msg = '' - indent = ' ' * 4 + header = "{}".format(str(self.__class__.__name__)) + msg = "" + indent = " " * 4 msg += textwrap.indent("latent_dim={}\n".format(self.latent_dim), indent) - msg += textwrap.indent("categorical_dim={}\n".format(self.categorical_dim), indent) + msg += textwrap.indent( + "categorical_dim={}\n".format(self.categorical_dim), indent + ) msg += textwrap.indent("learnable={}\n".format(self.learnable), indent) - msg += textwrap.indent("input_dependent={}\n".format(self._input_dependent), indent) + msg += textwrap.indent( + "input_dependent={}\n".format(self._input_dependent), indent + ) if self.learnable: if self.prior_module is not None: - msg += textwrap.indent("\nprior_module={}\n".format(self.prior_module), indent) - msg += textwrap.indent("prior_params={}\n".format(self.prior_params), indent) - msg = header + '(\n' + msg + ')' + msg += textwrap.indent( + "\nprior_module={}\n".format(self.prior_module), indent + ) + msg += textwrap.indent( + "prior_params={}\n".format(self.prior_params), indent + ) + msg = header + "(\n" + msg + ")" return msg @@ -757,20 +838,21 @@ class VAE(torch.nn.Module): expected reconstructions - this allows for asymmetric reconstruction (for example, reconstructing low-resolution images). - This implementation supports learning conditional distributions as well (cVAE). + This implementation supports learning conditional distributions as well (cVAE). The conditioning variable Y is specified through the @condition_shapes argument, which is also a map between modalities (strings) and expected shapes. In this way, - variables with multiple kinds of data (e.g. image and flat-dimensional) can - jointly be conditioned on. By default, the decoder takes the conditioning + variables with multiple kinds of data (e.g. image and flat-dimensional) can + jointly be conditioned on. By default, the decoder takes the conditioning variable Y as input. To force the decoder to reconstruct from just the latent, set @decoder_is_conditioned to False (in this case, the prior must be conditioned). The implementation also supports learning expressive priors instead of using the usual N(0, 1) prior. There are three kinds of priors supported - Gaussian, - Gaussian Mixture Model (GMM), and Categorical. For each prior, the parameters can + Gaussian Mixture Model (GMM), and Categorical. For each prior, the parameters can be learned as independent parameters, or be learned as functions of the conditioning variable Y (by setting @prior_is_conditioned). """ + def __init__( self, input_shapes, @@ -804,13 +886,13 @@ def __init__( expected shapes for all encoder-specific inputs. This corresponds to the variable X whose distribution we are learning. - output_shapes (OrderedDict): a dictionary that maps modality to + output_shapes (OrderedDict): a dictionary that maps modality to expected shape for outputs to reconstruct. Usually, this is the same as @input_shapes but this argument allows for asymmetries, such as reconstructing low-resolution images. - encoder_layer_dims ([int]): sequence of integers for the encoder hidden + encoder_layer_dims ([int]): sequence of integers for the encoder hidden layer sizes. decoder_layer_dims ([int]): sequence of integers for the decoder hidden @@ -837,7 +919,7 @@ def __init__( latent_clip (float): if provided, clip all latents sampled at test-time in each dimension to (-@latent_clip, @latent_clip) - output_squash ([str]): an iterable of modalities that should be + output_squash ([str]): an iterable of modalities that should be a subset of @output_shapes. The decoder outputs for these modalities will be squashed into a symmetric range [-a, a] by using a tanh layer and then scaling the output with the @@ -850,20 +932,20 @@ def __init__( when output_ranges is specified (not None), output_scales should be None prior_learn (bool): if True, the prior distribution parameters - are also learned through the KL-divergence loss (instead + are also learned through the KL-divergence loss (instead of being constrained to a N(0, 1) Gaussian distribution). If @prior_is_conditioned is True, a global set of parameters - are learned, otherwise, a prior network that maps between - modalities in @condition_shapes and prior parameters is - learned. By default, a Gaussian prior is learned, unless - @prior_use_gmm is True, in which case a Gaussian Mixture + are learned, otherwise, a prior network that maps between + modalities in @condition_shapes and prior parameters is + learned. By default, a Gaussian prior is learned, unless + @prior_use_gmm is True, in which case a Gaussian Mixture Model (GMM) prior is learned. prior_is_conditioned (bool): whether to condition the prior on the conditioning variables. False by default. Only used if @condition_shapes is not empty. If this is set to True, @prior_learn must be True. - + prior_layer_dims ([int]): sequence of integers for the prior hidden layer sizes. Only used for learned priors that take condition variables as input (i.e. when @prior_learn and @prior_is_conditioned are set to True, @@ -887,7 +969,7 @@ def __init__( prior_categorical_dim (int): categorical dimension - each latent sampled from the prior will be of shape (@latent_dim, @prior_categorical_dim) - and will be "one-hot" in the latter dimension. Only used if + and will be "one-hot" in the latter dimension. Only used if @prior_use_categorical is True. prior_categorical_gumbel_softmax_hard (bool): if True, use the "hard" version of @@ -930,21 +1012,30 @@ def __init__( # check for conditioning (cVAE) self._is_cvae = False - self.condition_shapes = deepcopy(condition_shapes) if condition_shapes is not None else OrderedDict() + self.condition_shapes = ( + deepcopy(condition_shapes) + if condition_shapes is not None + else OrderedDict() + ) if len(self.condition_shapes) > 0: # this is a cVAE - we learn a conditional distribution p(X | Y) assert isinstance(self.condition_shapes, OrderedDict) self._is_cvae = True self.decoder_is_conditioned = decoder_is_conditioned self.prior_is_conditioned = prior_is_conditioned - assert self.decoder_is_conditioned or self.prior_is_conditioned, \ - "cVAE must be conditioned in decoder and/or prior" + assert ( + self.decoder_is_conditioned or self.prior_is_conditioned + ), "cVAE must be conditioned in decoder and/or prior" if self.prior_is_conditioned: - assert prior_learn, "to pass conditioning inputs to prior, prior must be learned" + assert ( + prior_learn + ), "to pass conditioning inputs to prior, prior must be learned" # check for goal conditioning self._is_goal_conditioned = False - self.goal_shapes = deepcopy(goal_shapes) if goal_shapes is not None else OrderedDict() + self.goal_shapes = ( + deepcopy(goal_shapes) if goal_shapes is not None else OrderedDict() + ) if len(self.goal_shapes) > 0: assert self._is_cvae, "to condition VAE on goals, it must be a cVAE" assert isinstance(self.goal_shapes, OrderedDict) @@ -956,14 +1047,20 @@ def __init__( # determines whether outputs are squashed with tanh and if so, to what scaling assert not (output_scales is not None and output_ranges is not None) self.output_squash = output_squash - self.output_scales = output_scales if output_scales is not None else OrderedDict() - self.output_ranges = output_ranges if output_ranges is not None else OrderedDict() + self.output_scales = ( + output_scales if output_scales is not None else OrderedDict() + ) + self.output_ranges = ( + output_ranges if output_ranges is not None else OrderedDict() + ) assert set(self.output_squash) == set(self.output_scales.keys()) assert set(self.output_squash).issubset(set(self.output_shapes)) # decoder settings - self.decoder_reconstruction_sum_across_elements = decoder_reconstruction_sum_across_elements + self.decoder_reconstruction_sum_across_elements = ( + decoder_reconstruction_sum_across_elements + ) # prior parameters self.prior_learn = prior_learn @@ -973,7 +1070,9 @@ def __init__( self.prior_gmm_learn_weights = prior_gmm_learn_weights self.prior_use_categorical = prior_use_categorical self.prior_categorical_dim = prior_categorical_dim - self.prior_categorical_gumbel_softmax_hard = prior_categorical_gumbel_softmax_hard + self.prior_categorical_gumbel_softmax_hard = ( + prior_categorical_gumbel_softmax_hard + ) assert np.sum([self.prior_use_gmm, self.prior_use_categorical]) <= 1 # for obs core @@ -1016,7 +1115,7 @@ def _create_encoder(self): encoder_obs_group_shapes["condition"] = OrderedDict(self.condition_shapes) if self._is_goal_conditioned: encoder_obs_group_shapes["goal"] = OrderedDict(self.goal_shapes) - + # encoder outputs posterior distribution parameters if self.prior_use_categorical: encoder_output_shapes = OrderedDict( @@ -1024,13 +1123,13 @@ def _create_encoder(self): ) else: encoder_output_shapes = OrderedDict( - mean=(self.latent_dim,), + mean=(self.latent_dim,), logvar=(self.latent_dim,), ) self.nets["encoder"] = MIMO_MLP( input_obs_group_shapes=encoder_obs_group_shapes, - output_shapes=encoder_output_shapes, + output_shapes=encoder_output_shapes, layer_dims=self.encoder_layer_dims, encoder_kwargs=self._encoder_kwargs, ) @@ -1053,7 +1152,7 @@ def _create_decoder(self): self.nets["decoder"] = MIMO_MLP( input_obs_group_shapes=decoder_obs_group_shapes, - output_shapes=self.output_shapes, + output_shapes=self.output_shapes, layer_dims=self.decoder_layer_dims, encoder_kwargs=self._encoder_kwargs, ) @@ -1130,7 +1229,9 @@ def reparameterize(self, posterior_params): """ if self.prior_use_categorical: # reshape to [B, D, C] to take softmax across categorical classes - logits = posterior_params["logit"].reshape(-1, self.latent_dim, self.prior_categorical_dim) + logits = posterior_params["logit"].reshape( + -1, self.latent_dim, self.prior_categorical_dim + ) z = F.gumbel_softmax( logits=logits, tau=self._gumbel_temperature, @@ -1141,7 +1242,7 @@ def reparameterize(self, posterior_params): return TensorUtils.flatten(z) return TorchUtils.reparameterize( - mu=posterior_params["mean"], + mu=posterior_params["mean"], logvar=posterior_params["logvar"], ) @@ -1163,7 +1264,7 @@ def decode(self, conditions=None, goals=None, z=None, n=None): z (torch.Tensor): if provided, these latents are used to generate reconstructions from the VAE, and the prior is not sampled. - n (int): this argument is used to specify the number of samples to + n (int): this argument is used to specify the number of samples to generate from the prior. Only required if @z is None - i.e. sampling takes place @@ -1176,11 +1277,11 @@ def decode(self, conditions=None, goals=None, z=None, n=None): assert n is not None z = self.sample_prior(n=n, conditions=conditions, goals=goals) - # decoder takes latents as input, and maybe condition variables + # decoder takes latents as input, and maybe condition variables # and goal variables inputs = dict( - input=dict(latent=z), - condition=conditions, + input=dict(latent=z), + condition=conditions, goal=goals, ) @@ -1193,7 +1294,9 @@ def decode(self, conditions=None, goals=None, z=None, n=None): for k, v_range in self.output_ranges.items(): assert v_range[1] > v_range[0] - recons[k] = torch.sigmoid(recons[k]) * (v_range[1] - v_range[0]) + v_range[0] + recons[k] = ( + torch.sigmoid(recons[k]) * (v_range[1] - v_range[0]) + v_range[0] + ) return recons def sample_prior(self, n, conditions=None, goals=None): @@ -1240,7 +1343,7 @@ def kl_loss(self, posterior_params, encoder_z=None, conditions=None, goals=None) return self.nets["prior"].kl_loss( posterior_params=posterior_params, z=encoder_z, - obs_dict=conditions, + obs_dict=conditions, goal_dict=goals, ) @@ -1251,7 +1354,7 @@ def reconstruction_loss(self, reconstructions, targets): The beta term for weighting between reconstruction and kl losses will need to be tuned in practice for each situation (see - https://twitter.com/memotv/status/973323454350090240 for more + https://twitter.com/memotv/status/973323454350090240 for more discussion). Args: @@ -1284,7 +1387,9 @@ def reconstruction_loss(self, reconstructions, targets): loss /= num_mods return loss - def forward(self, inputs, outputs, conditions=None, goals=None, freeze_encoder=False): + def forward( + self, inputs, outputs, conditions=None, goals=None, freeze_encoder=False + ): """ A full pass through the VAE network to construct KL and reconstruction losses. @@ -1329,7 +1434,7 @@ def forward(self, inputs, outputs, conditions=None, goals=None, freeze_encoder=F # mu, logvar <- Enc(X, Y) posterior_params = self.encode( - inputs=inputs, + inputs=inputs, conditions=conditions, goals=goals, ) @@ -1342,11 +1447,11 @@ def forward(self, inputs, outputs, conditions=None, goals=None, freeze_encoder=F # hat(X) = Dec(z, Y) reconstructions = self.decode( - conditions=conditions, + conditions=conditions, goals=goals, z=encoder_z, ) - + # this will also train prior network z ~ Prior(z | Y) kl_loss = self.kl_loss( posterior_params=posterior_params, @@ -1356,16 +1461,16 @@ def forward(self, inputs, outputs, conditions=None, goals=None, freeze_encoder=F ) reconstruction_loss = self.reconstruction_loss( - reconstructions=reconstructions, + reconstructions=reconstructions, targets=outputs, ) return { - "encoder_params" : posterior_params, - "encoder_z" : encoder_z, - "decoder_outputs" : reconstructions, - "kl_loss" : kl_loss, - "reconstruction_loss" : reconstruction_loss, + "encoder_params": posterior_params, + "encoder_z": encoder_z, + "decoder_outputs": reconstructions, + "kl_loss": kl_loss, + "reconstruction_loss": reconstruction_loss, } def set_gumbel_temperature(self, temperature): diff --git a/robomimic/models/value_nets.py b/robomimic/models/value_nets.py index c98fa7e4..a7e958f1 100644 --- a/robomimic/models/value_nets.py +++ b/robomimic/models/value_nets.py @@ -4,6 +4,7 @@ such as subgoal or goal dictionaries) and produce value or action-value estimates or distributions. """ + import numpy as np from collections import OrderedDict @@ -22,6 +23,7 @@ class ValueNetwork(MIMO_MLP): A basic value network that predicts values from observations. Can optionally be goal conditioned on future observations. """ + def __init__( self, obs_shapes, @@ -35,7 +37,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps observation keys to expected shapes for observations. - mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. + mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. value_bounds (tuple): a 2-tuple corresponding to the lowest and highest possible return that the network should be possible of generating. The network will rescale outputs @@ -64,8 +66,12 @@ def __init__( self.value_bounds = value_bounds if self.value_bounds is not None: # convert [lb, ub] to a scale and offset for the tanh output, which is in [-1, 1] - self._value_scale = (float(self.value_bounds[1]) - float(self.value_bounds[0])) / 2. - self._value_offset = (float(self.value_bounds[1]) + float(self.value_bounds[0])) / 2. + self._value_scale = ( + float(self.value_bounds[1]) - float(self.value_bounds[0]) + ) / 2.0 + self._value_offset = ( + float(self.value_bounds[1]) + float(self.value_bounds[0]) + ) / 2.0 assert isinstance(obs_shapes, OrderedDict) self.obs_shapes = obs_shapes @@ -101,11 +107,11 @@ def _get_output_shapes(self): def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -117,7 +123,9 @@ def forward(self, obs_dict, goal_dict=None): """ Forward through value network, and then optionally use tanh scaling. """ - values = super(ValueNetwork, self).forward(obs=obs_dict, goal=goal_dict)["value"] + values = super(ValueNetwork, self).forward(obs=obs_dict, goal=goal_dict)[ + "value" + ] if self.value_bounds is not None: values = self._value_offset + self._value_scale * torch.tanh(values) return values @@ -131,6 +139,7 @@ class ActionValueNetwork(ValueNetwork): A basic Q (action-value) network that predicts values from observations and actions. Can optionally be goal conditioned on future observations. """ + def __init__( self, obs_shapes, @@ -147,7 +156,7 @@ def __init__( ac_dim (int): dimension of action space. - mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. + mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. value_bounds (tuple): a 2-tuple corresponding to the lowest and highest possible return that the network should be possible of generating. The network will rescale outputs @@ -203,9 +212,10 @@ def _to_string(self): class DistributionalActionValueNetwork(ActionValueNetwork): """ Distributional Q (action-value) network that outputs a categorical distribution over - a discrete grid of value atoms. See https://arxiv.org/pdf/1707.06887.pdf for + a discrete grid of value atoms. See https://arxiv.org/pdf/1707.06887.pdf for more details. """ + def __init__( self, obs_shapes, @@ -223,7 +233,7 @@ def __init__( ac_dim (int): dimension of action space. - mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. + mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. value_bounds (tuple): a 2-tuple corresponding to the lowest and highest possible return that the network should be possible of generating. This defines the support @@ -315,4 +325,6 @@ def forward(self, obs_dict, acts, goal_dict=None): return vd.mean() def _to_string(self): - return "action_dim={}\nvalue_bounds={}\nnum_atoms={}".format(self.ac_dim, self.value_bounds, self.num_atoms) \ No newline at end of file + return "action_dim={}\nvalue_bounds={}\nnum_atoms={}".format( + self.ac_dim, self.value_bounds, self.num_atoms + ) diff --git a/robomimic/models/vit_rein.py b/robomimic/models/vit_rein.py index e73f9327..b2a6d07d 100644 --- a/robomimic/models/vit_rein.py +++ b/robomimic/models/vit_rein.py @@ -2,6 +2,7 @@ Contains torch Modules for implementation of rein method for domain adaptation of DINOv2 """ + import torch import torch.nn as nn import torch.nn.functional as F @@ -10,21 +11,20 @@ from operator import mul from torch import Tensor + class MLPhead(nn.Module): - def __init__(self, - in_dim: int, - out_dim: int, - **kwargs) -> None: + def __init__(self, in_dim: int, out_dim: int, **kwargs) -> None: super().__init__(**kwargs) self._in_dim = in_dim self._out_dim = out_dim - + self._mlp = nn.Linear(self._in_dim, self._out_dim) def forward(self, x: Tensor) -> Tensor: x = self._mlp.forward(x) return x + class Reins(nn.Module): def __init__( self, @@ -131,6 +131,7 @@ def forward_delta_feat(self, feats: Tensor, tokens: Tensor, layers: int) -> Tens delta_f = self.mlp_delta_f(delta_f + feats) return delta_f + class LoRAReins(Reins): def __init__(self, lora_dim=16, **kwargs): self.lora_dim = lora_dim @@ -159,4 +160,4 @@ def get_tokens(self, layer): if layer == -1: return self.learnable_tokens_a @ self.learnable_tokens_b else: - return self.learnable_tokens_a[layer] @ self.learnable_tokens_b[layer] \ No newline at end of file + return self.learnable_tokens_a[layer] @ self.learnable_tokens_b[layer] diff --git a/robomimic/scripts/config_gen/act_gen.py b/robomimic/scripts/config_gen/act_gen.py index 8962941d..a54b532e 100644 --- a/robomimic/scripts/config_gen/act_gen.py +++ b/robomimic/scripts/config_gen/act_gen.py @@ -1,10 +1,11 @@ from robomimic.scripts.config_gen.helper import * + def make_generator_helper(args): algo_name_short = "act" generator = get_generator( algo_name="act", - config_file=os.path.join(base_path, 'robomimic/exps/templates/act.json'), + config_file=os.path.join(base_path, "robomimic/exps/templates/act.json"), args=args, algo_name_short=algo_name_short, pt=True, @@ -12,7 +13,6 @@ def make_generator_helper(args): if args.ckpt_mode is None: args.ckpt_mode = "off" - generator.add_param( key="train.num_epochs", name="", @@ -40,7 +40,12 @@ def make_generator_helper(args): name="ds", group=2, values=[ - [{"path": p} for p in scan_datasets("~/Downloads/example_pen_in_cup", postfix="trajectory_im128.h5")], + [ + {"path": p} + for p in scan_datasets( + "~/Downloads/example_pen_in_cup", postfix="trajectory_im128.h5" + ) + ], ], value_names=[ "pen-in-cup", @@ -70,7 +75,7 @@ def make_generator_helper(args): group=2, values=[ [ - {"path": "TODO.hdf5"}, # replace with your own path + {"path": "TODO.hdf5"}, # replace with your own path ], ], value_names=[ @@ -83,11 +88,9 @@ def make_generator_helper(args): key="experiment.env_meta_update_dict", name="", group=-1, - values=[ - {"env_kwargs": {"controller_configs": {"control_delta": False}}} - ], + values=[{"env_kwargs": {"controller_configs": {"control_delta": False}}}], ) - + generator.add_param( key="train.action_keys", name="ac_keys", @@ -105,10 +108,9 @@ def make_generator_helper(args): ], ) - else: raise ValueError - + generator.add_param( key="train.output_dir", name="", @@ -124,6 +126,7 @@ def make_generator_helper(args): return generator + if __name__ == "__main__": parser = get_argparser() diff --git a/robomimic/scripts/config_gen/helper.py b/robomimic/scripts/config_gen/helper.py index 48a3af07..7735b7f6 100644 --- a/robomimic/scripts/config_gen/helper.py +++ b/robomimic/scripts/config_gen/helper.py @@ -6,7 +6,10 @@ import robomimic import robomimic.utils.hyperparam_utils as HyperparamUtils -base_path = os.path.abspath(os.path.join(os.path.dirname(robomimic.__file__), os.pardir)) +base_path = os.path.abspath( + os.path.join(os.path.dirname(robomimic.__file__), os.pardir) +) + def scan_datasets(folder, postfix=".h5"): dataset_paths = [] @@ -25,14 +28,23 @@ def get_generator(algo_name, config_file, args, algo_name_short=None, pt=False): args.env, args.mod, ] - args.wandb_proj_name = '_'.join([str(s) for s in strings if s is not None]) + args.wandb_proj_name = "_".join([str(s) for s in strings if s is not None]) if args.script is not None: generated_config_dir = os.path.join(os.path.dirname(args.script), "json") else: - curr_time = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-%y-%H-%M-%S') - generated_config_dir=os.path.join( - '~/', 'tmp/autogen_configs/ril', algo_name, args.env, args.mod, args.name, curr_time, "json", + curr_time = datetime.datetime.fromtimestamp(time.time()).strftime( + "%m-%d-%y-%H-%M-%S" + ) + generated_config_dir = os.path.join( + "~/", + "tmp/autogen_configs/ril", + algo_name, + args.env, + args.mod, + args.name, + curr_time, + "json", ) generator = HyperparamUtils.ConfigGenerator( @@ -55,9 +67,7 @@ def set_env_settings(generator, args): key="experiment.rollout.enabled", name="", group=-1, - values=[ - False - ], + values=[False], ) generator.add_param( key="experiment.save.every_n_epochs", @@ -112,7 +122,8 @@ def set_env_settings(generator, args): values=[ [ "camera/image/hand_camera_left_image", - "camera/image/varied_camera_1_left_image", "camera/image/varied_camera_2_left_image" # uncomment to use all 3 cameras + "camera/image/varied_camera_1_left_image", + "camera/image/varied_camera_2_left_image", # uncomment to use all 3 cameras ] ], ) @@ -123,7 +134,7 @@ def set_env_settings(generator, args): values=[ # "CropRandomizer", # crop only # "ColorRandomizer", # jitter only - ["ColorRandomizer", "CropRandomizer"], # jitter, followed by crop + ["ColorRandomizer", "CropRandomizer"], # jitter, followed by crop ], hidename=True, ) @@ -134,44 +145,48 @@ def set_env_settings(generator, args): values=[ # {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}, # crop only # {}, # jitter only - [{}, {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}], # jitter, followed by crop + [ + {}, + { + "crop_height": 116, + "crop_width": 116, + "num_crops": 1, + "pos_enc": False, + }, + ], # jitter, followed by crop ], hidename=True, ) - if ("observation.encoder.rgb.obs_randomizer_kwargs" not in generator.parameters) and \ - ("observation.encoder.rgb.obs_randomizer_kwargs.crop_height" not in generator.parameters): + if ( + "observation.encoder.rgb.obs_randomizer_kwargs" not in generator.parameters + ) and ( + "observation.encoder.rgb.obs_randomizer_kwargs.crop_height" + not in generator.parameters + ): generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", name="", group=-1, - values=[ - 116 - ], + values=[116], ) generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", name="", group=-1, - values=[ - 116 - ], + values=[116], ) # remove spatial softmax by default for r2d2 dataset generator.add_param( key="observation.encoder.rgb.core_kwargs.pool_class", name="", group=-1, - values=[ - None - ], + values=[None], ) generator.add_param( key="observation.encoder.rgb.core_kwargs.pool_kwargs", name="", group=-1, - values=[ - None - ], + values=[None], ) # specify dataset type is r2d2 rather than default robomimic @@ -179,11 +194,9 @@ def set_env_settings(generator, args): key="train.data_format", name="", group=-1, - values=[ - "r2d2" - ], + values=["r2d2"], ) - + # here, we list how each action key should be treated (normalized etc) generator.add_param( key="train.action_config", @@ -191,40 +204,40 @@ def set_env_settings(generator, args): group=-1, values=[ { - "action/cartesian_position":{ + "action/cartesian_position": { "normalization": "min_max", }, - "action/abs_pos":{ + "action/abs_pos": { "normalization": "min_max", }, - "action/abs_rot_6d":{ + "action/abs_rot_6d": { "normalization": "min_max", "format": "rot_6d", "convert_at_runtime": "rot_euler", }, - "action/abs_rot_euler":{ + "action/abs_rot_euler": { "normalization": "min_max", "format": "rot_euler", }, - "action/gripper_position":{ + "action/gripper_position": { "normalization": "min_max", }, - "action/cartesian_velocity":{ + "action/cartesian_velocity": { "normalization": None, }, - "action/rel_pos":{ + "action/rel_pos": { "normalization": None, }, - "action/rel_rot_6d":{ + "action/rel_rot_6d": { "format": "rot_6d", "normalization": None, "convert_at_runtime": "rot_euler", }, - "action/rel_rot_euler":{ + "action/rel_rot_euler": { "format": "rot_euler", "normalization": None, }, - "action/gripper_velocity":{ + "action/gripper_velocity": { "normalization": None, }, } @@ -257,20 +270,24 @@ def set_env_settings(generator, args): key="train.shuffled_obs_key_groups", name="", group=-1, - values=[[[ - ( - "camera/image/varied_camera_1_left_image", - "camera/image/varied_camera_1_right_image", - "camera/extrinsics/varied_camera_1_left", - "camera/extrinsics/varied_camera_1_right", - ), - ( - "camera/image/varied_camera_2_left_image", - "camera/image/varied_camera_2_right_image", - "camera/extrinsics/varied_camera_2_left", - "camera/extrinsics/varied_camera_2_right", - ), - ]]], + values=[ + [ + [ + ( + "camera/image/varied_camera_1_left_image", + "camera/image/varied_camera_1_right_image", + "camera/extrinsics/varied_camera_1_left", + "camera/extrinsics/varied_camera_1_right", + ), + ( + "camera/image/varied_camera_2_left_image", + "camera/image/varied_camera_2_right_image", + "camera/extrinsics/varied_camera_2_left", + "camera/extrinsics/varied_camera_2_right", + ), + ] + ] + ], ) elif args.env == "kitchen": generator.add_param( @@ -279,51 +296,51 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions":{ + "actions": { "normalization": None, }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, + "action_dict/abs_pos": {"normalization": "min_max"}, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/gripper": { "normalization": None, }, "action_dict/base_mode": { "normalization": None, - } + }, } ], ) - - if args.mod == 'im': + + if args.mod == "im": generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_base_pos", - "robot0_gripper_qpos"] + [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_base_pos", + "robot0_gripper_qpos", + ] ], ) generator.add_param( @@ -331,9 +348,11 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - ["robot0_agentview_left_image", - "robot0_agentview_right_image", - "robot0_eye_in_hand_image"] + [ + "robot0_agentview_left_image", + "robot0_agentview_right_image", + "robot0_eye_in_hand_image", + ] ], ) else: @@ -342,15 +361,16 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot0_base_pos", - "object", + [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot0_base_pos", + "object", ] ], ) - elif args.env in ['square', 'lift', 'place_close']: + elif args.env in ["square", "lift", "place_close"]: # # set videos off # args.no_video = True @@ -360,57 +380,48 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions":{ + "actions": { "normalization": None, }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, + "action_dict/abs_pos": {"normalization": "min_max"}, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/gripper": { "normalization": None, - } + }, } ], ) - if args.mod == 'im': + if args.mod == "im": generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos"] - ], + values=[["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"]], ) generator.add_param( key="observation.modalities.obs.rgb", name="", group=-1, - values=[ - ["agentview_image", - "robot0_eye_in_hand_image"] - ], + values=[["agentview_image", "robot0_eye_in_hand_image"]], ) else: generator.add_param( @@ -418,13 +429,15 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object"] + [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] ], ) - elif args.env == 'transport': + elif args.env == "transport": # set videos off args.no_video = True @@ -435,50 +448,50 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions":{ + "actions": { "normalization": None, }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, + "action_dict/abs_pos": {"normalization": "min_max"}, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/gripper": { "normalization": None, - } + }, } ], ) - if args.mod == 'im': + if args.mod == "im": generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos"] + [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", + ] ], ) generator.add_param( @@ -486,10 +499,12 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - ["shouldercamera0_image", - "robot0_eye_in_hand_image", - "shouldercamera1_image", - "robot1_eye_in_hand_image"] + [ + "shouldercamera0_image", + "robot0_eye_in_hand_image", + "shouldercamera1_image", + "robot1_eye_in_hand_image", + ] ], ) else: @@ -498,13 +513,15 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", - "object"] + [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", + "object", + ] ], ) @@ -514,7 +531,7 @@ def set_env_settings(generator, args): group=-1, values=[700], ) - elif args.env == 'tool_hang': + elif args.env == "tool_hang": # set videos off args.no_video = True @@ -524,89 +541,72 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions":{ + "actions": { "normalization": None, }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, + "action_dict/abs_pos": {"normalization": "min_max"}, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle" + "format": "rot_axis_angle", }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d" + "format": "rot_6d", }, "action_dict/gripper": { "normalization": None, - } + }, } ], ) - if args.mod == 'im': + if args.mod == "im": generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos"] - ], + values=[["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"]], ) generator.add_param( key="observation.modalities.obs.rgb", name="", group=-1, - values=[ - ["sideview_image", - "robot0_eye_in_hand_image"] - ], + values=[["sideview_image", "robot0_eye_in_hand_image"]], ) generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", name="", group=-1, - values=[ - 216 - ], + values=[216], ) generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", name="", group=-1, - values=[ - 216 - ], + values=[216], ) generator.add_param( key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_height", name="", group=-1, - values=[ - 216 - ], + values=[216], ) generator.add_param( key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_width", name="", group=-1, - values=[ - 216 - ], + values=[216], ) else: generator.add_param( @@ -614,10 +614,12 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object"] + [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] ], ) @@ -632,17 +634,15 @@ def set_env_settings(generator, args): def set_mod_settings(generator, args): - if args.mod == 'ld': + if args.mod == "ld": if "experiment.save.epochs" not in generator.parameters: generator.add_param( key="experiment.save.epochs", name="", group=-1, - values=[ - [2000] - ], + values=[[2000]], ) - elif args.mod == 'im': + elif args.mod == "im": if "experiment.save.every_n_epochs" not in generator.parameters: generator.add_param( key="experiment.save.every_n_epochs", @@ -830,14 +830,14 @@ def get_argparser(): parser.add_argument( "--env", type=str, - default='r2d2', + default="r2d2", ) parser.add_argument( - '--mod', + "--mod", type=str, - choices=['ld', 'im'], - default='im', + choices=["ld", "im"], + default="im", ) parser.add_argument( @@ -847,55 +847,32 @@ def get_argparser(): default=None, ) - parser.add_argument( - "--script", - type=str, - default=None - ) + parser.add_argument("--script", type=str, default=None) - parser.add_argument( - "--wandb_proj_name", - type=str, - default=None - ) + parser.add_argument("--wandb_proj_name", type=str, default=None) parser.add_argument( "--debug", action="store_true", ) - parser.add_argument( - '--no_video', - action='store_true' - ) + parser.add_argument("--no_video", action="store_true") parser.add_argument( "--tmplog", action="store_true", ) - parser.add_argument( - "--nr", - type=int, - default=-1 - ) + parser.add_argument("--nr", type=int, default=-1) parser.add_argument( "--no_wandb", action="store_true", ) - parser.add_argument( - "--n_seeds", - type=int, - default=None - ) + parser.add_argument("--n_seeds", type=int, default=None) - parser.add_argument( - "--num_cmd_groups", - type=int, - default=None - ) + parser.add_argument("--num_cmd_groups", type=int, default=None) return parser @@ -904,7 +881,7 @@ def make_generator(args, make_generator_helper): if args.tmplog or args.debug and args.name is None: args.name = "debug" else: - time_str = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-') + time_str = datetime.datetime.fromtimestamp(time.time()).strftime("%m-%d-") args.name = time_str + str(args.name) if args.debug or args.tmplog: @@ -917,7 +894,7 @@ def make_generator(args, make_generator_helper): pass if (args.debug or args.tmplog) and (args.wandb_proj_name is None): - args.wandb_proj_name = 'debug' + args.wandb_proj_name = "debug" if not args.debug: assert args.name is not None diff --git a/robomimic/scripts/conversion/convert_d4rl.py b/robomimic/scripts/conversion/convert_d4rl.py index 99fc1d93..537c8a7f 100644 --- a/robomimic/scripts/conversion/convert_d4rl.py +++ b/robomimic/scripts/conversion/convert_d4rl.py @@ -75,24 +75,26 @@ write_folder = os.path.join(base_folder, "converted") if not os.path.exists(write_folder): os.makedirs(write_folder) - output_path = os.path.join(base_folder, "converted", "{}.hdf5".format(args.env.replace("-", "_"))) + output_path = os.path.join( + base_folder, "converted", "{}.hdf5".format(args.env.replace("-", "_")) + ) f_sars = h5py.File(output_path, "w") f_sars_grp = f_sars.create_group("data") # code to split D4RL data into trajectories # (modified from https://github.com/aviralkumar2907/d4rl_evaluations/blob/bear_intergrate/bear/examples/bear_hdf5_d4rl.py#L18) - all_obs = ds['observations'] - all_act = ds['actions'] + all_obs = ds["observations"] + all_act = ds["actions"] N = all_obs.shape[0] - obs = all_obs[:N-1] - actions = all_act[:N-1] + obs = all_obs[: N - 1] + actions = all_act[: N - 1] next_obs = all_obs[1:] - rewards = np.squeeze(ds['rewards'][:N-1]) - dones = np.squeeze(ds['terminals'][:N-1]).astype(np.int32) + rewards = np.squeeze(ds["rewards"][: N - 1]) + dones = np.squeeze(ds["terminals"][: N - 1]).astype(np.int32) - assert 'timeouts' in ds - timeouts = ds['timeouts'][:] + assert "timeouts" in ds + timeouts = ds["timeouts"][:] ctr = 0 total_samples = 0 @@ -132,12 +134,19 @@ ctr = 0 traj = dict(obs=[], next_obs=[], actions=[], rewards=[], dones=[]) - print("\nExcluding {} samples at end of file due to no trajectory truncation.".format(len(traj["actions"]))) - print("Wrote {} trajectories to new converted hdf5 at {}\n".format(num_traj, output_path)) + print( + "\nExcluding {} samples at end of file due to no trajectory truncation.".format( + len(traj["actions"]) + ) + ) + print( + "Wrote {} trajectories to new converted hdf5 at {}\n".format( + num_traj, output_path + ) + ) # metadata f_sars_grp.attrs["total"] = total_samples f_sars_grp.attrs["env_args"] = json.dumps(env.serialize(), indent=4) f_sars.close() - diff --git a/robomimic/scripts/conversion/convert_robosuite.py b/robomimic/scripts/conversion/convert_robosuite.py index 88258698..8ebdbb2e 100644 --- a/robomimic/scripts/conversion/convert_robosuite.py +++ b/robomimic/scripts/conversion/convert_robosuite.py @@ -33,7 +33,7 @@ ) args = parser.parse_args() - f = h5py.File(args.dataset, "a") # edit mode + f = h5py.File(args.dataset, "a") # edit mode # store env meta env_name = f["data"].attrs["env"] diff --git a/robomimic/scripts/conversion/convert_roboturk_pilot.py b/robomimic/scripts/conversion/convert_roboturk_pilot.py index 21059804..e289e802 100644 --- a/robomimic/scripts/conversion/convert_roboturk_pilot.py +++ b/robomimic/scripts/conversion/convert_roboturk_pilot.py @@ -70,11 +70,13 @@ def convert_rt_pilot_hdf5(ref_folder): actions = np.concatenate([jvels, gripper_acts], axis=1) # IMPORTANT: clip actions to -1, 1, since this is expected by the codebase - actions = np.clip(actions, -1., 1.) + actions = np.clip(actions, -1.0, 1.0) ep_data_grp.create_dataset("actions", data=actions) # store model xml directly in the new hdf5 file - model_path = os.path.join(ref_folder, "models", f["data/{}".format(ep)].attrs["model_file"]) + model_path = os.path.join( + ref_folder, "models", f["data/{}".format(ep)].attrs["model_file"] + ) f_model = open(model_path, "r") model_xml = f_model.read() f_model.close() @@ -82,7 +84,9 @@ def convert_rt_pilot_hdf5(ref_folder): # store num samples for this ep num_samples = actions.shape[0] - ep_data_grp.attrs["num_samples"] = num_samples # number of transitions in this episode + ep_data_grp.attrs["num_samples"] = ( + num_samples # number of transitions in this episode + ) num_samples_arr.append(num_samples) # write dataset attributes (metadata) @@ -91,7 +95,7 @@ def convert_rt_pilot_hdf5(ref_folder): # construct and save env metadata env_meta = dict() env_meta["type"] = EB.EnvType.ROBOSUITE_TYPE - env_meta["env_name"] = (f["data"].attrs["env"] + "Teleop") + env_meta["env_name"] = f["data"].attrs["env"] + "Teleop" # hardcode robosuite v0.3 args robosuite_args = { "has_renderer": False, @@ -108,7 +112,7 @@ def convert_rt_pilot_hdf5(ref_folder): "control_freq": 100, } env_meta["env_kwargs"] = robosuite_args - f_new_grp.attrs["env_args"] = json.dumps(env_meta, indent=4) # environment info + f_new_grp.attrs["env_args"] = json.dumps(env_meta, indent=4) # environment info print("\n====== Added env meta ======") print(f_new_grp.attrs["env_args"]) @@ -144,10 +148,14 @@ def split_fastest_from_hdf5(hdf5_path, n): # create filter key name = "fastest_{}".format(n) - lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=filtered_demos, key_name=name) + lengths = create_hdf5_filter_key( + hdf5_path=hdf5_path, demo_keys=filtered_demos, key_name=name + ) print("Total number of samples in fastest {} demos: {}".format(n, np.sum(lengths))) - print("Average number of samples in fastest {} demos: {}".format(n, np.mean(lengths))) + print( + "Average number of samples in fastest {} demos: {}".format(n, np.mean(lengths)) + ) if __name__ == "__main__": @@ -177,8 +185,14 @@ def split_fastest_from_hdf5(hdf5_path, n): print("\nCreating filter key for fastest {} trajectories...".format(args.n)) split_fastest_from_hdf5(hdf5_path=hdf5_path, n=args.n) - print("\nCreating 90-10 train-validation split for fastest {} trajectories...".format(args.n)) - split_train_val_from_hdf5(hdf5_path=hdf5_path, val_ratio=0.1, filter_key="fastest_{}".format(args.n)) + print( + "\nCreating 90-10 train-validation split for fastest {} trajectories...".format( + args.n + ) + ) + split_train_val_from_hdf5( + hdf5_path=hdf5_path, val_ratio=0.1, filter_key="fastest_{}".format(args.n) + ) print( "\nWARNING: new dataset has replaced old one in demo.hdf5 file. " @@ -188,5 +202,7 @@ def split_fastest_from_hdf5(hdf5_path, n): print( "\nNOTE: the new dataset also contains a fastest_{} filter key, for an easy way " "to train on the fastest trajectories. Just set config.train.hdf5_filter to train on this " - "subset. A common choice is 225 when training on the bins-Can dataset.\n".format(args.n) + "subset. A common choice is 225 when training on the bins-Can dataset.\n".format( + args.n + ) ) diff --git a/robomimic/scripts/dataset_states_to_obs.py b/robomimic/scripts/dataset_states_to_obs.py index e08f4eb9..e2664c20 100644 --- a/robomimic/scripts/dataset_states_to_obs.py +++ b/robomimic/scripts/dataset_states_to_obs.py @@ -47,6 +47,7 @@ python dataset_states_to_obs.py --dataset /path/to/demo.hdf5 --output_name image_dense_done_1.hdf5 \ --done_mode 1 --dense --camera_names agentview robot0_eye_in_hand --camera_height 84 --camera_width 84 """ + import os import json import h5py @@ -62,13 +63,13 @@ def extract_trajectory( - env, - initial_state, - states, + env, + initial_state, + states, actions, done_mode, - camera_names=None, - camera_height=84, + camera_names=None, + camera_height=84, camera_width=84, ): """ @@ -80,8 +81,8 @@ def extract_trajectory( initial_state (dict): initial simulation state to load states (np.array): array of simulation states to load to extract information actions (np.array): array of actions - done_mode (int): how to write done signal. If 0, done is 1 whenever s' is a - success state. If 1, done is 1 at the end of each trajectory. + done_mode (int): how to write done signal. If 0, done is 1 whenever s' is a + success state. If 1, done is 1 at the end of each trajectory. If 2, do both. """ assert isinstance(env, EnvBase) @@ -97,18 +98,18 @@ def extract_trajectory( if is_robosuite_env: camera_info = get_camera_info( env=env, - camera_names=camera_names, - camera_height=camera_height, + camera_names=camera_names, + camera_height=camera_height, camera_width=camera_width, ) traj = dict( - obs=[], - next_obs=[], - rewards=[], - dones=[], - actions=np.array(actions), - states=np.array(states), + obs=[], + next_obs=[], + rewards=[], + dones=[], + actions=np.array(actions), + states=np.array(states), initial_state_dict=initial_state, ) traj_len = states.shape[0] @@ -121,7 +122,7 @@ def extract_trajectory( next_obs, _, _, _ = env.step(actions[t - 1]) else: # reset to simulator state to get observation - next_obs = env.reset_to({"states" : states[t]}) + next_obs = env.reset_to({"states": states[t]}) # infer reward signal # note: our tasks use reward r(s'), reward AFTER transition, so this is @@ -166,8 +167,8 @@ def extract_trajectory( def get_camera_info( env, - camera_names=None, - camera_height=84, + camera_names=None, + camera_height=84, camera_width=84, ): """ @@ -182,15 +183,27 @@ def get_camera_info( camera_info = dict() for cam_name in camera_names: - K = env.get_camera_intrinsic_matrix(camera_name=cam_name, camera_height=camera_height, camera_width=camera_width) - R = env.get_camera_extrinsic_matrix(camera_name=cam_name) # camera pose in world frame + K = env.get_camera_intrinsic_matrix( + camera_name=cam_name, camera_height=camera_height, camera_width=camera_width + ) + R = env.get_camera_extrinsic_matrix( + camera_name=cam_name + ) # camera pose in world frame if "eye_in_hand" in cam_name: # convert extrinsic matrix to be relative to robot eef control frame assert cam_name.startswith("robot0") eef_site_name = env.base_env.robots[0].controller.eef_name - eef_pos = np.array(env.base_env.sim.data.site_xpos[env.base_env.sim.model.site_name2id(eef_site_name)]) - eef_rot = np.array(env.base_env.sim.data.site_xmat[env.base_env.sim.model.site_name2id(eef_site_name)].reshape([3, 3])) - eef_pose = np.zeros((4, 4)) # eef pose in world frame + eef_pos = np.array( + env.base_env.sim.data.site_xpos[ + env.base_env.sim.model.site_name2id(eef_site_name) + ] + ) + eef_rot = np.array( + env.base_env.sim.data.site_xmat[ + env.base_env.sim.model.site_name2id(eef_site_name) + ].reshape([3, 3]) + ) + eef_pose = np.zeros((4, 4)) # eef pose in world frame eef_pose[:3, :3] = eef_rot eef_pose[:3, 3] = eef_pos eef_pose[3, 3] = 1.0 @@ -198,7 +211,7 @@ def get_camera_info( eef_pose_inv[:3, :3] = eef_pose[:3, :3].T eef_pose_inv[:3, 3] = -eef_pose_inv[:3, :3].dot(eef_pose[:3, 3]) eef_pose_inv[3, 3] = 1.0 - R = R.dot(eef_pose_inv) # T_E^W * T_W^C = T_E^C + R = R.dot(eef_pose_inv) # T_E^W * T_W^C = T_E^C camera_info[cam_name] = dict( intrinsics=K.tolist(), extrinsics=R.tolist(), @@ -214,9 +227,9 @@ def dataset_states_to_obs(args): env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) env = EnvUtils.create_env_for_data_processing( env_meta=env_meta, - camera_names=args.camera_names, - camera_height=args.camera_height, - camera_width=args.camera_width, + camera_names=args.camera_names, + camera_height=args.camera_height, + camera_width=args.camera_width, reward_shaping=args.shaped, use_depth_obs=args.depth, ) @@ -236,7 +249,7 @@ def dataset_states_to_obs(args): # maybe reduce the number of demonstrations to playback if args.n is not None: - demos = demos[:args.n] + demos = demos[: args.n] # output file in same directory as input file output_path = os.path.join(os.path.dirname(args.dataset), args.output_name) @@ -258,13 +271,13 @@ def dataset_states_to_obs(args): # extract obs, rewards, dones actions = f["data/{}/actions".format(ep)][()] traj, camera_info = extract_trajectory( - env=env, - initial_state=initial_state, - states=states, + env=env, + initial_state=initial_state, + states=states, actions=actions, done_mode=args.done_mode, - camera_names=args.camera_names, - camera_height=args.camera_height, + camera_names=args.camera_names, + camera_height=args.camera_height, camera_width=args.camera_width, ) @@ -285,19 +298,35 @@ def dataset_states_to_obs(args): ep_data_grp.create_dataset("dones", data=np.array(traj["dones"])) for k in traj["obs"]: if args.compress: - ep_data_grp.create_dataset("obs/{}".format(k), data=np.array(traj["obs"][k]), compression="gzip") + ep_data_grp.create_dataset( + "obs/{}".format(k), + data=np.array(traj["obs"][k]), + compression="gzip", + ) else: - ep_data_grp.create_dataset("obs/{}".format(k), data=np.array(traj["obs"][k])) + ep_data_grp.create_dataset( + "obs/{}".format(k), data=np.array(traj["obs"][k]) + ) if not args.exclude_next_obs: if args.compress: - ep_data_grp.create_dataset("next_obs/{}".format(k), data=np.array(traj["next_obs"][k]), compression="gzip") + ep_data_grp.create_dataset( + "next_obs/{}".format(k), + data=np.array(traj["next_obs"][k]), + compression="gzip", + ) else: - ep_data_grp.create_dataset("next_obs/{}".format(k), data=np.array(traj["next_obs"][k])) + ep_data_grp.create_dataset( + "next_obs/{}".format(k), data=np.array(traj["next_obs"][k]) + ) # episode metadata if is_robosuite_env: - ep_data_grp.attrs["model_file"] = traj["initial_state_dict"]["model"] # model xml for this episode - ep_data_grp.attrs["num_samples"] = traj["actions"].shape[0] # number of transitions in this episode + ep_data_grp.attrs["model_file"] = traj["initial_state_dict"][ + "model" + ] # model xml for this episode + ep_data_grp.attrs["num_samples"] = traj["actions"].shape[ + 0 + ] # number of transitions in this episode if camera_info is not None: assert is_robosuite_env @@ -305,14 +334,15 @@ def dataset_states_to_obs(args): total_samples += traj["actions"].shape[0] - # copy over all filter keys that exist in the original hdf5 if "mask" in f: f.copy("mask", f_out) # global metadata data_grp.attrs["total"] = total_samples - data_grp.attrs["env_args"] = json.dumps(env.serialize(), indent=4) # environment info + data_grp.attrs["env_args"] = json.dumps( + env.serialize(), indent=4 + ) # environment info print("Wrote {} trajectories to {}".format(len(demos), output_path)) f.close() @@ -346,8 +376,8 @@ def dataset_states_to_obs(args): # flag for reward shaping parser.add_argument( - "--shaped", - action='store_true', + "--shaped", + action="store_true", help="(optional) use shaped rewards", ) @@ -355,7 +385,7 @@ def dataset_states_to_obs(args): parser.add_argument( "--camera_names", type=str, - nargs='+', + nargs="+", default=[], help="(optional) camera name(s) to use for image observations. Leave out to not use image observations.", ) @@ -376,13 +406,13 @@ def dataset_states_to_obs(args): # flag for including depth observations per camera parser.add_argument( - "--depth", - action='store_true', + "--depth", + action="store_true", help="(optional) use depth observations for each camera", ) - # specifies how the "done" signal is written. If "0", then the "done" signal is 1 wherever - # the transition (s, a, s') has s' in a task completion state. If "1", the "done" signal + # specifies how the "done" signal is written. If "0", then the "done" signal is 1 wherever + # the transition (s, a, s') has s' in a task completion state. If "1", the "done" signal # is one at the end of every trajectory. If "2", the "done" signal is 1 at task completion # states for successful trajectories and 1 at the end of all trajectories. parser.add_argument( @@ -395,29 +425,29 @@ def dataset_states_to_obs(args): # flag for copying rewards from source file instead of re-writing them parser.add_argument( - "--copy_rewards", - action='store_true', + "--copy_rewards", + action="store_true", help="(optional) copy rewards from source file instead of inferring them", ) # flag for copying dones from source file instead of re-writing them parser.add_argument( - "--copy_dones", - action='store_true', + "--copy_dones", + action="store_true", help="(optional) copy dones from source file instead of inferring them", ) # flag to exclude next obs in dataset parser.add_argument( - "--exclude-next-obs", - action='store_true', + "--exclude-next-obs", + action="store_true", help="(optional) exclude next obs in dataset", ) # flag to compress observations with gzip option in hdf5 parser.add_argument( - "--compress", - action='store_true', + "--compress", + action="store_true", help="(optional) compress observations with gzip option in hdf5", ) diff --git a/robomimic/scripts/download_datasets.py b/robomimic/scripts/download_datasets.py index caf3a280..c620902e 100644 --- a/robomimic/scripts/download_datasets.py +++ b/robomimic/scripts/download_datasets.py @@ -43,6 +43,7 @@ # download all real robot datasets python download_datasets.py --tasks real """ + import os import argparse @@ -50,9 +51,26 @@ import robomimic.utils.file_utils as FileUtils from robomimic import DATASET_REGISTRY -ALL_TASKS = ["lift", "can", "square", "transport", "tool_hang", "lift_real", "can_real", "tool_hang_real"] +ALL_TASKS = [ + "lift", + "can", + "square", + "transport", + "tool_hang", + "lift_real", + "can_real", + "tool_hang_real", +] ALL_DATASET_TYPES = ["ph", "mh", "mg", "paired"] -ALL_HDF5_TYPES = ["raw", "low_dim", "image", "low_dim_sparse", "low_dim_dense", "image_sparse", "image_dense"] +ALL_HDF5_TYPES = [ + "raw", + "low_dim", + "image", + "low_dim_sparse", + "low_dim_dense", + "image_sparse", + "image_dense", +] if __name__ == "__main__": @@ -70,7 +88,7 @@ parser.add_argument( "--tasks", type=str, - nargs='+', + nargs="+", default=["lift"], help="Tasks to download datasets for. Defaults to lift task. Pass 'all' to download all tasks (sim + real)\ 'sim' to download all sim tasks, 'real' to download all real tasks, or directly specify the list of\ @@ -81,7 +99,7 @@ parser.add_argument( "--dataset_types", type=str, - nargs='+', + nargs="+", default=["ph"], help="Dataset types to download datasets for (e.g. ph, mh, mg). Defaults to ph. Pass 'all' to download \ datasets for all available dataset types per task, or directly specify the list of dataset types.", @@ -91,7 +109,7 @@ parser.add_argument( "--hdf5_types", type=str, - nargs='+', + nargs="+", default=["low_dim"], help="hdf5 types to download datasets for (e.g. raw, low_dim, image). Defaults to raw. Pass 'all' \ to download datasets for all available hdf5 types per task and dataset, or directly specify the list\ @@ -101,8 +119,8 @@ # dry run - don't actually download datasets, but print which datasets would be downloaded parser.add_argument( "--dry_run", - action='store_true', - help="set this flag to do a dry run to only print which datasets would be downloaded" + action="store_true", + help="set this flag to do a dry run to only print which datasets would be downloaded", ) args = parser.parse_args() @@ -115,23 +133,35 @@ # load args download_tasks = args.tasks if "all" in download_tasks: - assert len(download_tasks) == 1, "all should be only tasks argument but got: {}".format(args.tasks) + assert ( + len(download_tasks) == 1 + ), "all should be only tasks argument but got: {}".format(args.tasks) download_tasks = ALL_TASKS elif "sim" in download_tasks: - assert len(download_tasks) == 1, "sim should be only tasks argument but got: {}".format(args.tasks) + assert ( + len(download_tasks) == 1 + ), "sim should be only tasks argument but got: {}".format(args.tasks) download_tasks = [task for task in ALL_TASKS if "real" not in task] elif "real" in download_tasks: - assert len(download_tasks) == 1, "real should be only tasks argument but got: {}".format(args.tasks) + assert ( + len(download_tasks) == 1 + ), "real should be only tasks argument but got: {}".format(args.tasks) download_tasks = [task for task in ALL_TASKS if "real" in task] download_dataset_types = args.dataset_types if "all" in download_dataset_types: - assert len(download_dataset_types) == 1, "all should be only dataset_types argument but got: {}".format(args.dataset_types) + assert ( + len(download_dataset_types) == 1 + ), "all should be only dataset_types argument but got: {}".format( + args.dataset_types + ) download_dataset_types = ALL_DATASET_TYPES download_hdf5_types = args.hdf5_types if "all" in download_hdf5_types: - assert len(download_hdf5_types) == 1, "all should be only hdf5_types argument but got: {}".format(args.hdf5_types) + assert ( + len(download_hdf5_types) == 1 + ), "all should be only hdf5_types argument but got: {}".format(args.hdf5_types) download_hdf5_types = ALL_HDF5_TYPES # download requested datasets @@ -141,13 +171,20 @@ if dataset_type in download_dataset_types: for hdf5_type in DATASET_REGISTRY[task][dataset_type]: if hdf5_type in download_hdf5_types: - download_dir = os.path.abspath(os.path.join(default_base_dir, task, dataset_type)) - print("\nDownloading dataset:\n task: {}\n dataset type: {}\n hdf5 type: {}\n download path: {}" - .format(task, dataset_type, hdf5_type, download_dir)) + download_dir = os.path.abspath( + os.path.join(default_base_dir, task, dataset_type) + ) + print( + "\nDownloading dataset:\n task: {}\n dataset type: {}\n hdf5 type: {}\n download path: {}".format( + task, dataset_type, hdf5_type, download_dir + ) + ) url = DATASET_REGISTRY[task][dataset_type][hdf5_type]["url"] if url is None: print( - "Skipping {}-{}-{}, no url for dataset exists.".format(task, dataset_type, hdf5_type) + "Skipping {}-{}-{}, no url for dataset exists.".format( + task, dataset_type, hdf5_type + ) + " Create this dataset locally by running the appropriate command from robomimic/scripts/extract_obs_from_raw_datasets.sh." ) continue @@ -157,7 +194,9 @@ # Make sure path exists and create if it doesn't os.makedirs(download_dir, exist_ok=True) FileUtils.download_url( - url=DATASET_REGISTRY[task][dataset_type][hdf5_type]["url"], + url=DATASET_REGISTRY[task][dataset_type][hdf5_type][ + "url" + ], download_dir=download_dir, ) print("") diff --git a/robomimic/scripts/download_momart_datasets.py b/robomimic/scripts/download_momart_datasets.py index affecf11..fc230e3b 100644 --- a/robomimic/scripts/download_momart_datasets.py +++ b/robomimic/scripts/download_momart_datasets.py @@ -45,6 +45,7 @@ # download all datasets python download_datasets.py --tasks all --dataset_types all """ + import os import argparse @@ -82,28 +83,28 @@ parser.add_argument( "--tasks", type=str, - nargs='+', + nargs="+", default=["table_setup_from_dishwasher"], help="Tasks to download datasets for. Defaults to table_setup_from_dishwasher task. Pass 'all' to download all" - f"5 tasks, or directly specify the list of tasks. Options are any of: {ALL_TASKS}", + f"5 tasks, or directly specify the list of tasks. Options are any of: {ALL_TASKS}", ) # dataset types to download datasets for parser.add_argument( "--dataset_types", type=str, - nargs='+', + nargs="+", default=["expert"], help="Dataset types to download datasets for (e.g. expert, suboptimal). Defaults to expert. Pass 'all' to " - "download datasets for all available dataset types per task, or directly specify the list of dataset " - f"types. Options are any of: {ALL_DATASET_TYPES}", + "download datasets for all available dataset types per task, or directly specify the list of dataset " + f"types. Options are any of: {ALL_DATASET_TYPES}", ) # dry run - don't actually download datasets, but print which datasets would be downloaded parser.add_argument( "--dry_run", - action='store_true', - help="set this flag to do a dry run to only print which datasets would be downloaded" + action="store_true", + help="set this flag to do a dry run to only print which datasets would be downloaded", ) args = parser.parse_args() @@ -116,12 +117,18 @@ # load args download_tasks = args.tasks if "all" in download_tasks: - assert len(download_tasks) == 1, "all should be only tasks argument but got: {}".format(args.tasks) + assert ( + len(download_tasks) == 1 + ), "all should be only tasks argument but got: {}".format(args.tasks) download_tasks = ALL_TASKS download_dataset_types = args.dataset_types if "all" in download_dataset_types: - assert len(download_dataset_types) == 1, "all should be only dataset_types argument but got: {}".format(args.dataset_types) + assert ( + len(download_dataset_types) == 1 + ), "all should be only dataset_types argument but got: {}".format( + args.dataset_types + ) download_dataset_types = ALL_DATASET_TYPES # Run sanity check first to warn user if they're about to download a huge amount of data @@ -134,8 +141,13 @@ # Verify user acknowledgement if we're not doing a dry run if not args.dry_run: - user_response = input(f"Warning: requested datasets will take a total of {total_size}GB. Proceed? y/n\n") - assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." + user_response = input( + f"Warning: requested datasets will take a total of {total_size}GB. Proceed? y/n\n" + ) + assert user_response.lower() in { + "yes", + "y", + }, f"Did not receive confirmation. Aborting download." # download requested datasets for task in MOMART_DATASET_REGISTRY: @@ -143,12 +155,16 @@ for dataset_type in MOMART_DATASET_REGISTRY[task]: if dataset_type in download_dataset_types: dataset_info = MOMART_DATASET_REGISTRY[task][dataset_type] - download_dir = os.path.abspath(os.path.join(default_base_dir, task, dataset_type)) - print(f"\nDownloading dataset:\n" - f" task: {task}\n" - f" dataset type: {dataset_type}\n" - f" dataset size: {dataset_info['size']}GB\n" - f" download path: {download_dir}") + download_dir = os.path.abspath( + os.path.join(default_base_dir, task, dataset_type) + ) + print( + f"\nDownloading dataset:\n" + f" task: {task}\n" + f" dataset type: {dataset_type}\n" + f" dataset size: {dataset_info['size']}GB\n" + f" download path: {download_dir}" + ) if args.dry_run: print("\ndry run: skip download") else: diff --git a/robomimic/scripts/generate_config_templates.py b/robomimic/scripts/generate_config_templates.py index 56e1d871..43523d92 100644 --- a/robomimic/scripts/generate_config_templates.py +++ b/robomimic/scripts/generate_config_templates.py @@ -2,6 +2,7 @@ Helpful script to generate example config files for each algorithm. These should be re-generated when new config options are added, or when default settings in the config classes are modified. """ + import os import json @@ -24,5 +25,5 @@ def main(): c.dump(filename=json_path) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/robomimic/scripts/generate_paper_configs.py b/robomimic/scripts/generate_paper_configs.py index 52ed7d5b..6ebf676d 100644 --- a/robomimic/scripts/generate_paper_configs.py +++ b/robomimic/scripts/generate_paper_configs.py @@ -18,11 +18,20 @@ # Specify where datasets exist, and specify where configs should be generated. python generate_paper_configs.py --config_dir /tmp/configs --dataset_dir /tmp/datasets --output_dir /tmp/experiment_results """ + import os import argparse import robomimic from robomimic import DATASET_REGISTRY -from robomimic.config import Config, BCConfig, BCQConfig, CQLConfig, HBCConfig, IRISConfig, config_factory +from robomimic.config import ( + Config, + BCConfig, + BCQConfig, + CQLConfig, + HBCConfig, + IRISConfig, + config_factory, +) def modify_config_for_default_low_dim_exp(config): @@ -63,11 +72,11 @@ def modify_config_for_default_low_dim_exp(config): with config.observation.values_unlocked(): # default observation is eef pose, gripper finger position, and object information, - # all of which are low-dim. + # all of which are low-dim. default_low_dim_obs = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", "object", ] # handle hierarchical observation configs @@ -131,14 +140,13 @@ def modify_config_for_default_image_exp(config): config.train.batch_size = 16 config.train.num_epochs = 600 - with config.observation.values_unlocked(): # default low-dim observation is eef pose, gripper finger position # default image observation is external camera and wrist camera config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", ] config.observation.modalities.obs.rgb = [ "agentview_image", @@ -150,13 +158,23 @@ def modify_config_for_default_image_exp(config): # default image encoder architecture is ResNet with spatial softmax config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( + False # kwargs for visual core + ) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = ( + False + ) + config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( + 32 # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( + False # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( + 1.0 # Default arguments for "SpatialSoftmax" + ) config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -171,7 +189,9 @@ def modify_config_for_default_image_exp(config): return config -def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_dataset_dir, filter_key=None): +def modify_config_for_dataset( + config, task_name, dataset_type, hdf5_type, base_dataset_dir, filter_key=None +): """ Modifies a Config object with experiment, training, and observation settings to correspond to experiment settings for the dataset collected on @task_name with @@ -185,7 +205,7 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). base_dataset_dir (str): path to directory where datasets are on disk. Directory structure is expected to be consistent with the output @@ -194,12 +214,19 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d filter_key (str): if not None, use the provided filter key to select a subset of the provided dataset """ - assert task_name in DATASET_REGISTRY, \ - "task {} not found in dataset registry!".format(task_name) - assert dataset_type in DATASET_REGISTRY[task_name], \ - "dataset type {} not found for task {} in dataset registry!".format(dataset_type, task_name) - assert hdf5_type in DATASET_REGISTRY[task_name][dataset_type], \ - "hdf5 type {} not found for dataset type {} and task {} in dataset registry!".format(hdf5_type, dataset_type, task_name) + assert ( + task_name in DATASET_REGISTRY + ), "task {} not found in dataset registry!".format(task_name) + assert ( + dataset_type in DATASET_REGISTRY[task_name] + ), "dataset type {} not found for task {} in dataset registry!".format( + dataset_type, task_name + ) + assert ( + hdf5_type in DATASET_REGISTRY[task_name][dataset_type] + ), "hdf5 type {} not found for dataset type {} and task {} in dataset registry!".format( + hdf5_type, dataset_type, task_name + ) is_real_dataset = "real" in task_name if is_real_dataset: @@ -210,7 +237,9 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d with config.experiment.values_unlocked(): # look up rollout evaluation horizon in registry and set it - config.experiment.rollout.horizon = DATASET_REGISTRY[task_name][dataset_type][hdf5_type]["horizon"] + config.experiment.rollout.horizon = DATASET_REGISTRY[task_name][dataset_type][ + hdf5_type + ]["horizon"] if dataset_type == "mg": # machine-generated datasets did not use validation @@ -236,13 +265,19 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d raise ValueError("Unknown dataset type") else: file_name = url.split("/")[-1] - config.train.data = os.path.join(base_dataset_dir, task_name, dataset_type, file_name) + config.train.data = os.path.join( + base_dataset_dir, task_name, dataset_type, file_name + ) config.train.hdf5_filter_key = None if filter_key is None else filter_key config.train.hdf5_validation_filter_key = None if config.experiment.validate: # set train and valid keys for validation - config.train.hdf5_filter_key = "train" if filter_key is None else "{}_train".format(filter_key) - config.train.hdf5_validation_filter_key = "valid" if filter_key is None else "{}_valid".format(filter_key) + config.train.hdf5_filter_key = ( + "train" if filter_key is None else "{}_train".format(filter_key) + ) + config.train.hdf5_validation_filter_key = ( + "valid" if filter_key is None else "{}_valid".format(filter_key) + ) with config.observation.values_unlocked(): # maybe modify observation names and randomization sizes (since image size might be different) @@ -250,8 +285,8 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d if is_real_dataset: # modify observation names for real robot datasets config.observation.modalities.obs.low_dim = [ - "ee_pose", - "gripper_position", + "ee_pose", + "gripper_position", ] if task_name == "tool_hang_real": @@ -277,12 +312,12 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d if task_name == "transport": # robot proprioception per arm config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", ] # shoulder and wrist cameras per arm @@ -306,12 +341,12 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d if task_name == "transport": # robot proprioception per arm default_low_dim_obs = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", "object", ] # handle hierarchical observation configs @@ -341,7 +376,7 @@ def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_d def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a BCConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -352,7 +387,7 @@ def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, BCConfig), "must be BCConfig" assert config.algo_name == "bc", "must be BCConfig" @@ -363,16 +398,20 @@ def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): with config.algo.values_unlocked(): # base parameters that may get modified - config.algo.optim_params.policy.learning_rate.initial = 1e-4 # learning rate 1e-4 - config.algo.actor_layer_dims = (1024, 1024) # MLP size (1024, 1024) - config.algo.gmm.enabled = True # enable GMM + config.algo.optim_params.policy.learning_rate.initial = ( + 1e-4 # learning rate 1e-4 + ) + config.algo.actor_layer_dims = (1024, 1024) # MLP size (1024, 1024) + config.algo.gmm.enabled = True # enable GMM if dataset_type == "mg": # machine-generated datasets don't use GMM - config.algo.gmm.enabled = False # disable GMM + config.algo.gmm.enabled = False # disable GMM if hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # low-dim mg uses LR 1e-3 - config.algo.optim_params.policy.learning_rate.initial = 1e-3 # learning rate 1e-3 + config.algo.optim_params.policy.learning_rate.initial = ( + 1e-3 # learning rate 1e-3 + ) return config @@ -380,7 +419,7 @@ def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a BCConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -391,7 +430,7 @@ def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type) dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, BCConfig), "must be BCConfig" assert config.algo_name == "bc", "must be BCConfig" @@ -410,22 +449,24 @@ def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type) config.algo.rnn.horizon = 10 # base parameters that may get modified - config.algo.optim_params.policy.learning_rate.initial = 1e-4 # learning rate 1e-4 - config.algo.actor_layer_dims = () # no MLP layers between rnn layer and output - config.algo.gmm.enabled = True # enable GMM - config.algo.rnn.hidden_dim = 400 # rnn dim 400 + config.algo.optim_params.policy.learning_rate.initial = ( + 1e-4 # learning rate 1e-4 + ) + config.algo.actor_layer_dims = () # no MLP layers between rnn layer and output + config.algo.gmm.enabled = True # enable GMM + config.algo.rnn.hidden_dim = 400 # rnn dim 400 if dataset_type == "mg": # update hyperparams for machine-generated datasets - config.algo.gmm.enabled = False # disable GMM + config.algo.gmm.enabled = False # disable GMM if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # image datasets use RNN dim 1000 - config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 + config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 else: # update hyperparams for all other dataset types (ph, mh, paired) if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # image datasets use RNN dim 1000 - config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 + config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 return config @@ -433,7 +474,7 @@ def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type) def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a BCQConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -444,7 +485,7 @@ def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, BCQConfig), "must be BCQConfig" assert config.algo_name == "bcq", "must be BCQConfig" @@ -456,31 +497,37 @@ def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.optim_params.critic.learning_rate.initial = 1e-4 # all learning rates 1e-3 + config.algo.optim_params.critic.learning_rate.initial = ( + 1e-4 # all learning rates 1e-3 + ) config.algo.optim_params.action_sampler.learning_rate.initial = 1e-4 config.algo.optim_params.actor.learning_rate.initial = 1e-3 - config.algo.actor.enabled = False # disable actor by default - config.algo.action_sampler.vae.enabled = True # use VAE action sampler + config.algo.actor.enabled = False # disable actor by default + config.algo.action_sampler.vae.enabled = True # use VAE action sampler config.algo.action_sampler.gmm.enabled = False - config.algo.action_sampler.vae.kl_weight = 0.05 # beta 0.05 for VAE - config.algo.action_sampler.vae.latent_dim = 14 # latent dim 14 - config.algo.action_sampler.vae.prior.learn = False # N(0, 1) prior - config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) + config.algo.action_sampler.vae.kl_weight = 0.05 # beta 0.05 for VAE + config.algo.action_sampler.vae.latent_dim = 14 # latent dim 14 + config.algo.action_sampler.vae.prior.learn = False # N(0, 1) prior + config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) config.algo.action_sampler.vae.encoder_layer_dims = (300, 400) config.algo.action_sampler.vae.decoder_layer_dims = (300, 400) config.algo.actor.layer_dims = (300, 400) - config.algo.target_tau = 5e-4 # tau 5e-4 - config.algo.discount = 0.99 # discount 0.99 - config.algo.critic.num_action_samples = 10 # number of action sampler samples at train and test + config.algo.target_tau = 5e-4 # tau 5e-4 + config.algo.discount = 0.99 # discount 0.99 + config.algo.critic.num_action_samples = ( + 10 # number of action sampler samples at train and test + ) config.algo.critic.num_action_samples_rollout = 100 if dataset_type == "mg": # update hyperparams for machine-generated datasets - config.algo.optim_params.critic.learning_rate.initial = 1e-3 # all learning rates 1e-3 + config.algo.optim_params.critic.learning_rate.initial = ( + 1e-3 # all learning rates 1e-3 + ) config.algo.optim_params.action_sampler.learning_rate.initial = 1e-3 config.algo.optim_params.actor.learning_rate.initial = 1e-3 - config.algo.action_sampler.vae.kl_weight = 0.5 # beta 0.5 for VAE - config.algo.target_tau = 5e-3 # tau 5e-3 + config.algo.action_sampler.vae.kl_weight = 0.5 # beta 0.5 for VAE + config.algo.target_tau = 5e-3 # tau 5e-3 if hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # enable actor only on low-dim @@ -519,7 +566,7 @@ def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a CQLConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -530,7 +577,7 @@ def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, CQLConfig), "must be CQLConfig" assert config.algo_name == "cql", "must be CQLConfig" @@ -549,15 +596,17 @@ def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.optim_params.critic.learning_rate.initial = 1e-3 # learning rates + config.algo.optim_params.critic.learning_rate.initial = 1e-3 # learning rates config.algo.optim_params.actor.learning_rate.initial = 3e-4 - config.algo.actor.target_entropy = "default" # use automatic entropy tuning to default target value - config.algo.critic.deterministic_backup = True # deterministic Q-backup - config.algo.critic.target_q_gap = 5.0 # use Lagrange, with threshold 5.0 + config.algo.actor.target_entropy = ( + "default" # use automatic entropy tuning to default target value + ) + config.algo.critic.deterministic_backup = True # deterministic Q-backup + config.algo.critic.target_q_gap = 5.0 # use Lagrange, with threshold 5.0 config.algo.critic.min_q_weight = 1.0 - config.algo.target_tau = 5e-3 # tau 5e-3 - config.algo.discount = 0.99 # discount 0.99 - config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) + config.algo.target_tau = 5e-3 # tau 5e-3 + config.algo.discount = 0.99 # discount 0.99 + config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) config.algo.actor.layer_dims = (300, 400) if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: @@ -570,7 +619,7 @@ def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_hbc_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a HBCConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -581,34 +630,40 @@ def modify_hbc_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, HBCConfig), "must be HBCConfig" assert config.algo_name == "hbc", "must be HBCConfig" assert dataset_type in ["ph", "mh", "mg", "paired"], "invalid dataset type" - assert hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"], "HBC only runs on low-dim" + assert hdf5_type in [ + "low_dim", + "low_dim_sparse", + "low_dim_dense", + ], "HBC only runs on low-dim" is_real_dataset = "real" in task_name assert not is_real_dataset, "we only ran BC-RNN on real robot" with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.actor.optim_params.policy.learning_rate.initial = 1e-3 # learning rates + config.algo.actor.optim_params.policy.learning_rate.initial = ( + 1e-3 # learning rates + ) config.algo.planner.optim_params.goal_network.learning_rate.initial = 1e-3 - config.algo.planner.vae.enabled = True # goal VAE settings - config.algo.planner.vae.kl_weight = 5e-4 # beta 5e-4 - config.algo.planner.vae.latent_dim = 16 # latent dim 16 - config.algo.planner.vae.prior.learn = True # learn GMM prior with 10 modes + config.algo.planner.vae.enabled = True # goal VAE settings + config.algo.planner.vae.kl_weight = 5e-4 # beta 5e-4 + config.algo.planner.vae.latent_dim = 16 # latent dim 16 + config.algo.planner.vae.prior.learn = True # learn GMM prior with 10 modes config.algo.planner.vae.prior.is_conditioned = True config.algo.planner.vae.prior.use_gmm = True config.algo.planner.vae.prior.gmm_learn_weights = True config.algo.planner.vae.prior.gmm_num_modes = 10 - config.algo.planner.vae.encoder_layer_dims = (1024, 1024) # VAE network sizes + config.algo.planner.vae.encoder_layer_dims = (1024, 1024) # VAE network sizes config.algo.planner.vae.decoder_layer_dims = (1024, 1024) config.algo.planner.vae.prior_layer_dims = (1024, 1024) - config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim - config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output + config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim + config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output if dataset_type == "mg": # update hyperparams for machine-generated datasets @@ -621,7 +676,7 @@ def modify_hbc_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_iris_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a IRISConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -632,65 +687,84 @@ def modify_iris_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, IRISConfig), "must be IRISConfig" assert config.algo_name == "iris", "must be IRISConfig" assert dataset_type in ["ph", "mh", "mg", "paired"], "invalid dataset type" - assert hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"], "IRIS only runs on low-dim" + assert hdf5_type in [ + "low_dim", + "low_dim_sparse", + "low_dim_dense", + ], "IRIS only runs on low-dim" is_real_dataset = "real" in task_name assert not is_real_dataset, "we only ran BC-RNN on real robot" with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.actor.optim_params.policy.learning_rate.initial = 1e-3 # learning rates - config.algo.value_planner.planner.optim_params.goal_network.learning_rate.initial = 1e-3 + config.algo.actor.optim_params.policy.learning_rate.initial = ( + 1e-3 # learning rates + ) + config.algo.value_planner.planner.optim_params.goal_network.learning_rate.initial = ( + 1e-3 + ) config.algo.value_planner.value.optim_params.critic.learning_rate.initial = 1e-3 - config.algo.value_planner.value.optim_params.action_sampler.learning_rate.initial = 1e-4 + config.algo.value_planner.value.optim_params.action_sampler.learning_rate.initial = ( + 1e-4 + ) - config.algo.value_planner.planner.vae.enabled = True # goal VAE settings - config.algo.value_planner.planner.vae.kl_weight = 5e-4 # beta 5e-4 - config.algo.value_planner.planner.vae.latent_dim = 14 # latent dim 14 - config.algo.value_planner.planner.vae.prior.learn = True # learn GMM prior with 10 modes + config.algo.value_planner.planner.vae.enabled = True # goal VAE settings + config.algo.value_planner.planner.vae.kl_weight = 5e-4 # beta 5e-4 + config.algo.value_planner.planner.vae.latent_dim = 14 # latent dim 14 + config.algo.value_planner.planner.vae.prior.learn = ( + True # learn GMM prior with 10 modes + ) config.algo.value_planner.planner.vae.prior.is_conditioned = True config.algo.value_planner.planner.vae.prior.use_gmm = True config.algo.value_planner.planner.vae.prior.gmm_learn_weights = True config.algo.value_planner.planner.vae.prior.gmm_num_modes = 10 - config.algo.value_planner.planner.vae.encoder_layer_dims = (1024, 1024) # VAE network sizes + config.algo.value_planner.planner.vae.encoder_layer_dims = ( + 1024, + 1024, + ) # VAE network sizes config.algo.value_planner.planner.vae.decoder_layer_dims = (1024, 1024) config.algo.value_planner.planner.vae.prior_layer_dims = (1024, 1024) - config.algo.value_planner.value.target_tau = 5e-4 # Value tau - config.algo.value_planner.value.action_sampler.vae.kl_weight = 0.5 # Value KL + config.algo.value_planner.value.target_tau = 5e-4 # Value tau + config.algo.value_planner.value.action_sampler.vae.kl_weight = 0.5 # Value KL config.algo.value_planner.value.action_sampler.vae.latent_dim = 16 config.algo.value_planner.value.action_sampler.actor_layer_dims = (300, 400) - config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim - config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output + config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim + config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output if dataset_type in ["mh", "paired"]: # value LR 1e-4, KL weight is 0.05 for multi-human datasets - config.algo.value_planner.value.optim_params.critic.learning_rate.initial = 1e-4 + config.algo.value_planner.value.optim_params.critic.learning_rate.initial = ( + 1e-4 + ) config.algo.value_planner.value.action_sampler.vae.kl_weight = 0.05 if dataset_type in ["mg"]: # Enable value actor and set larger target tau config.algo.value_planner.value.actor.enabled = True - config.algo.value_planner.value.optim_params.actor.learning_rate.initial = 1e-3 + config.algo.value_planner.value.optim_params.actor.learning_rate.initial = ( + 1e-3 + ) config.algo.value_planner.value.target_tau = 5e-3 return config def generate_experiment_config( - base_exp_name, - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_name, - algo_config_modifier, - task_name, - dataset_type, + base_exp_name, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_name, + algo_config_modifier, + task_name, + dataset_type, hdf5_type, filter_key=None, additional_name=None, @@ -721,7 +795,7 @@ def generate_experiment_config( dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). filter_key (str): if not None, use the provided filter key to select a subset of the provided dataset @@ -746,18 +820,18 @@ def generate_experiment_config( config = modifier_for_obs(config) # add in config based on the dataset config = modify_config_for_dataset( - config=config, - task_name=task_name, - dataset_type=dataset_type, - hdf5_type=hdf5_type, + config=config, + task_name=task_name, + dataset_type=dataset_type, + hdf5_type=hdf5_type, base_dataset_dir=base_dataset_dir, filter_key=filter_key, ) # add in algo hypers based on dataset config = algo_config_modifier( - config=config, - task_name=task_name, - dataset_type=dataset_type, + config=config, + task_name=task_name, + dataset_type=dataset_type, hdf5_type=hdf5_type, ) if additional_config_modifier is not None: @@ -766,23 +840,47 @@ def generate_experiment_config( # account for filter key in experiment naming and directory naming filter_key_str = "_{}".format(filter_key) if filter_key is not None else "" - dataset_type_dir = "{}/{}".format(dataset_type, filter_key) if filter_key is not None else dataset_type + dataset_type_dir = ( + "{}/{}".format(dataset_type, filter_key) + if filter_key is not None + else dataset_type + ) # account for @additional_name - additional_name_str = "_{}".format(additional_name) if additional_name is not None else "" + additional_name_str = ( + "_{}".format(additional_name) if additional_name is not None else "" + ) json_name = "{}{}".format(algo_name, additional_name_str) # set experiment name with config.experiment.values_unlocked(): - config.experiment.name = "{}_{}_{}_{}{}_{}{}".format(base_exp_name, algo_name, task_name, dataset_type, filter_key_str, hdf5_type, additional_name_str) + config.experiment.name = "{}_{}_{}_{}{}_{}{}".format( + base_exp_name, + algo_name, + task_name, + dataset_type, + filter_key_str, + hdf5_type, + additional_name_str, + ) # set output folder with config.train.values_unlocked(): if base_output_dir is None: base_output_dir = config.train.output_dir - config.train.output_dir = os.path.join(base_output_dir, base_exp_name, algo_name, task_name, dataset_type_dir, hdf5_type, "trained_models") - + config.train.output_dir = os.path.join( + base_output_dir, + base_exp_name, + algo_name, + task_name, + dataset_type_dir, + hdf5_type, + "trained_models", + ) + # save config to json file - dir_to_save = os.path.join(base_config_dir, base_exp_name, task_name, dataset_type_dir, hdf5_type) + dir_to_save = os.path.join( + base_config_dir, base_exp_name, task_name, dataset_type_dir, hdf5_type + ) os.makedirs(dir_to_save, exist_ok=True) json_path = os.path.join(dir_to_save, "{}.json".format(json_name)) config.dump(filename=json_path) @@ -791,10 +889,10 @@ def generate_experiment_config( def generate_core_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for core set of experiments. @@ -809,18 +907,18 @@ def generate_core_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ - core_json_paths = Config() # use for convenient nested dict + core_json_paths = Config() # use for convenient nested dict for task in DATASET_REGISTRY: for dataset_type in DATASET_REGISTRY[task]: for hdf5_type in DATASET_REGISTRY[task][dataset_type]: # if not real robot dataset, skip raw hdf5 - is_real_dataset = ("real" in task) + is_real_dataset = "real" in task if not is_real_dataset and hdf5_type == "raw": continue - + # get list of algorithms to generate configs for, for this hdf5 dataset algos_to_generate = ["bc", "bc_rnn", "bcq", "cql", "hbc", "iris"] if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: @@ -838,24 +936,26 @@ def generate_core_configs( base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, ) # save json path into dict - core_json_paths[task][dataset_type][hdf5_type][algo_name] = json_path + core_json_paths[task][dataset_type][hdf5_type][ + algo_name + ] = json_path return core_json_paths def generate_subopt_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for the suboptimal human subsets of the multi-human datasets. @@ -872,10 +972,10 @@ def generate_subopt_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ - subopt_json_paths = Config() # use for convenient nested dict + subopt_json_paths = Config() # use for convenient nested dict for task in ["lift", "can", "square", "transport"]: # only generate configs for multi-human data subsets for dataset_type in ["mh"]: @@ -890,7 +990,14 @@ def generate_subopt_configs( for algo_name in algos_to_generate: - for fk in ["worse", "okay", "better", "worse_okay", "worse_better", "okay_better"]: + for fk in [ + "worse", + "okay", + "better", + "worse_okay", + "worse_better", + "okay_better", + ]: # generate config for this experiment config, json_path = generate_experiment_config( @@ -898,26 +1005,28 @@ def generate_subopt_configs( base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, filter_key=fk, ) # save json path into dict dataset_type_dir = "{}/{}".format(dataset_type, fk) - subopt_json_paths[task][dataset_type_dir][hdf5_type][algo_name] = json_path + subopt_json_paths[task][dataset_type_dir][hdf5_type][ + algo_name + ] = json_path return subopt_json_paths def generate_dataset_size_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for the dataset size ablation experiments, where BC-RNN models @@ -933,10 +1042,10 @@ def generate_dataset_size_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ - size_ablation_json_paths = Config() # use for convenient nested dict + size_ablation_json_paths = Config() # use for convenient nested dict for task in ["lift", "can", "square", "transport"]: for dataset_type in ["ph", "mh"]: for hdf5_type in ["low_dim", "image"]: @@ -951,26 +1060,28 @@ def generate_dataset_size_configs( base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, filter_key=fk, ) # save json path into dict dataset_type_dir = "{}/{}".format(dataset_type, fk) - size_ablation_json_paths[task][dataset_type_dir][hdf5_type][algo_name] = json_path + size_ablation_json_paths[task][dataset_type_dir][hdf5_type][ + algo_name + ] = json_path return size_ablation_json_paths def generate_obs_ablation_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for the observation ablation experiments, where BC and BC-RNN models @@ -986,7 +1097,7 @@ def generate_obs_ablation_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ @@ -994,25 +1105,35 @@ def generate_obs_ablation_configs( def add_eef_vel(config): with config.observation.values_unlocked(): old_low_dim_mods = list(config.observation.modalities.obs.low_dim) - old_low_dim_mods.extend(["robot0_eef_vel_lin", "robot0_eef_vel_ang", "robot0_gripper_qvel"]) + old_low_dim_mods.extend( + ["robot0_eef_vel_lin", "robot0_eef_vel_ang", "robot0_gripper_qvel"] + ) if "robot1_eef_pos" in old_low_dim_mods: - old_low_dim_mods.extend(["robot1_eef_vel_lin", "robot1_eef_vel_ang", "robot1_gripper_qvel"]) + old_low_dim_mods.extend( + ["robot1_eef_vel_lin", "robot1_eef_vel_ang", "robot1_gripper_qvel"] + ) config.observation.modalities.obs.low_dim = old_low_dim_mods return config def add_proprio(config): with config.observation.values_unlocked(): old_low_dim_mods = list(config.observation.modalities.obs.low_dim) - old_low_dim_mods.extend(["robot0_joint_pos_cos", "robot0_joint_pos_sin", "robot0_joint_vel"]) + old_low_dim_mods.extend( + ["robot0_joint_pos_cos", "robot0_joint_pos_sin", "robot0_joint_vel"] + ) if "robot1_eef_pos" in old_low_dim_mods: - old_low_dim_mods.extend(["robot1_joint_pos_cos", "robot1_joint_pos_sin", "robot1_joint_vel"]) + old_low_dim_mods.extend( + ["robot1_joint_pos_cos", "robot1_joint_pos_sin", "robot1_joint_vel"] + ) config.observation.modalities.obs.low_dim = old_low_dim_mods return config def remove_wrist(config): with config.observation.values_unlocked(): old_image_mods = list(config.observation.modalities.obs.rgb) - config.observation.modalities.obs.rgb = [m for m in old_image_mods if "eye_in_hand" not in m] + config.observation.modalities.obs.rgb = [ + m for m in old_image_mods if "eye_in_hand" not in m + ] return config def remove_rand(config): @@ -1020,7 +1141,7 @@ def remove_rand(config): config.observation.encoder.rgb.obs_randomizer_class = None return config - obs_ablation_json_paths = Config() # use for convenient nested dict + obs_ablation_json_paths = Config() # use for convenient nested dict for task in ["square", "transport"]: for dataset_type in ["ph", "mh"]: for hdf5_type in ["low_dim", "image"]: @@ -1029,7 +1150,12 @@ def remove_rand(config): if hdf5_type == "low_dim": obs_modifiers = [add_eef_vel, add_proprio] else: - obs_modifiers = [add_eef_vel, add_proprio, remove_wrist, remove_rand] + obs_modifiers = [ + add_eef_vel, + add_proprio, + remove_wrist, + remove_rand, + ] # only bc and bc-rnn algos_to_generate = ["bc", "bc_rnn"] @@ -1041,10 +1167,10 @@ def remove_rand(config): base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, additional_name=obs_modifier.__name__, additional_config_modifier=obs_modifier, @@ -1052,19 +1178,21 @@ def remove_rand(config): # save json path into dict algo_name_str = "{}_{}".format(algo_name, obs_modifier.__name__) - obs_ablation_json_paths[task][dataset_type][hdf5_type][algo_name_str] = json_path + obs_ablation_json_paths[task][dataset_type][hdf5_type][ + algo_name_str + ] = json_path return obs_ablation_json_paths def generate_hyper_ablation_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ - Helper function to generate all configs for the hyperparameter sensitivity experiments, + Helper function to generate all configs for the hyperparameter sensitivity experiments, where BC-RNN models were trained on different ablations. Args: @@ -1077,7 +1205,7 @@ def generate_hyper_ablation_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ @@ -1094,12 +1222,12 @@ def change_gmm(config): def change_mlp(config): with config.algo.values_unlocked(): - config.algo.actor_layer_dims = (1024, 1024) + config.algo.actor_layer_dims = (1024, 1024) return config def change_conv(config): with config.observation.values_unlocked(): - config.observation.encoder.rgb.core_class = 'ShallowConv' + config.observation.encoder.rgb.core_class = "ShallowConv" config.observation.encoder.rgb.core_kwargs = Config() return config @@ -1113,16 +1241,26 @@ def change_rnnd_image(config): config.algo.rnn.hidden_dim = 400 return config - hyper_ablation_json_paths = Config() # use for convenient nested dict + hyper_ablation_json_paths = Config() # use for convenient nested dict for task in ["square", "transport"]: for dataset_type in ["ph", "mh"]: for hdf5_type in ["low_dim", "image"]: # observation modifiers to apply if hdf5_type == "low_dim": - hyper_modifiers = [change_lr, change_gmm, change_mlp, change_rnnd_low_dim] + hyper_modifiers = [ + change_lr, + change_gmm, + change_mlp, + change_rnnd_low_dim, + ] else: - hyper_modifiers = [change_lr, change_gmm, change_conv, change_rnnd_image] + hyper_modifiers = [ + change_lr, + change_gmm, + change_conv, + change_rnnd_image, + ] # only bc and bc-rnn algo_name = "bc_rnn" @@ -1133,10 +1271,10 @@ def change_rnnd_image(config): base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, additional_name=hyper_modifier.__name__, additional_config_modifier=hyper_modifier, @@ -1144,16 +1282,18 @@ def change_rnnd_image(config): # save json path into dict algo_name_str = "{}_{}".format(algo_name, hyper_modifier.__name__) - hyper_ablation_json_paths[task][dataset_type][hdf5_type][algo_name_str] = json_path + hyper_ablation_json_paths[task][dataset_type][hdf5_type][ + algo_name_str + ] = json_path return hyper_ablation_json_paths def generate_d4rl_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for reproducing BCQ, CQL, and TD3-BC runs on some D4RL @@ -1169,7 +1309,7 @@ def generate_d4rl_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ @@ -1188,15 +1328,19 @@ def cql_algo_config_modifier(config): # taken from TD3-BC settings described in their paper config.algo.optim_params.critic.learning_rate.initial = 3e-4 config.algo.optim_params.actor.learning_rate.initial = 3e-5 - config.algo.actor.bc_start_steps = 40000 # pre-training steps for actor - config.algo.critic.target_q_gap = None # no Lagrange, and fixed weight of 10.0 + config.algo.actor.bc_start_steps = 40000 # pre-training steps for actor + config.algo.critic.target_q_gap = ( + None # no Lagrange, and fixed weight of 10.0 + ) config.algo.critic.cql_weight = 10.0 - config.algo.critic.min_q_weight = 1.0 - config.algo.critic.deterministic_backup = True # deterministic backup (no entropy in Q-target) - config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes + config.algo.critic.min_q_weight = 1.0 + config.algo.critic.deterministic_backup = ( + True # deterministic backup (no entropy in Q-target) + ) + config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes config.algo.critic.layer_dims = (256, 256, 256) return config - + def iql_algo_config_modifier(config): with config.algo.values_unlocked(): # taken from IQL settings described in their paper @@ -1206,7 +1350,7 @@ def iql_algo_config_modifier(config): config.algo.optim_params.critic.learning_rate.initial = 3e-4 config.algo.optim_params.vf.learning_rate.initial = 3e-4 config.algo.optim_params.actor.learning_rate.initial = 3e-4 - config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes + config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes config.algo.critic.layer_dims = (256, 256, 256) return config @@ -1227,7 +1371,7 @@ def iql_algo_config_modifier(config): # "hopper-medium-replay-v2", # "walker2d-medium-replay-v2", ] - d4rl_json_paths = Config() # use for convenient nested dict + d4rl_json_paths = Config() # use for convenient nested dict for task_name in d4rl_tasks: for algo_name in ["bcq", "cql", "td3_bc", "iql"]: config = config_factory(algo_name=algo_name) @@ -1239,7 +1383,9 @@ def iql_algo_config_modifier(config): config.experiment = ref_config.experiment config.train = ref_config.train config.observation = ref_config.observation - config.train.hdf5_normalize_obs = False # only TD3-BC uses observation normalization + config.train.hdf5_normalize_obs = ( + False # only TD3-BC uses observation normalization + ) # modify algo section for d4rl defaults if algo_name == "bcq": @@ -1258,9 +1404,19 @@ def iql_algo_config_modifier(config): base_output_dir_for_algo = "../{}_trained_models".format(algo_name) else: base_output_dir_for_algo = base_output_dir - config.train.output_dir = os.path.join(base_output_dir_for_algo, "d4rl", algo_name, task_name, "trained_models") - config.train.data = os.path.join(base_dataset_dir, "d4rl", "converted", - "{}.hdf5".format(task_name.replace("-", "_"))) + config.train.output_dir = os.path.join( + base_output_dir_for_algo, + "d4rl", + algo_name, + task_name, + "trained_models", + ) + config.train.data = os.path.join( + base_dataset_dir, + "d4rl", + "converted", + "{}.hdf5".format(task_name.replace("-", "_")), + ) # save config to json file dir_to_save = os.path.join(base_config_dir, "d4rl", task_name) @@ -1316,7 +1472,7 @@ def iql_algo_config_modifier(config): # algo to modifier algo_to_modifier = dict( - bc=modify_bc_config_for_dataset, + bc=modify_bc_config_for_dataset, bc_rnn=modify_bc_rnn_config_for_dataset, bcq=modify_bcq_config_for_dataset, cql=modify_cql_config_for_dataset, @@ -1335,13 +1491,13 @@ def iql_algo_config_modifier(config): ) # generate configs for each experiment name - config_json_paths = Config() # use for convenient nested dict + config_json_paths = Config() # use for convenient nested dict for exp_name in exp_name_to_generator: config_json_paths[exp_name] = exp_name_to_generator[exp_name]( - base_config_dir=generated_configs_base_dir, - base_dataset_dir=datasets_base_dir, - base_output_dir=output_base_dir, - algo_to_config_modifier=algo_to_modifier, + base_config_dir=generated_configs_base_dir, + base_dataset_dir=datasets_base_dir, + base_output_dir=output_base_dir, + algo_to_config_modifier=algo_to_modifier, ) # write output shell scripts @@ -1361,9 +1517,15 @@ def iql_algo_config_modifier(config): f.write("# dataset type: {}\n".format(dataset_type)) if len(hdf5_type) > 0: f.write("# hdf5 type: {}\n".format(hdf5_type)) - for algo_name in config_json_paths[exp_name][task][dataset_type][hdf5_type]: + for algo_name in config_json_paths[exp_name][task][ + dataset_type + ][hdf5_type]: # f.write("# {}\n".format(algo_name)) - exp_json_path = config_json_paths[exp_name][task][dataset_type][hdf5_type][algo_name] - cmd = "python {} --config {}\n".format(train_script_loc, exp_json_path) + exp_json_path = config_json_paths[exp_name][task][ + dataset_type + ][hdf5_type][algo_name] + cmd = "python {} --config {}\n".format( + train_script_loc, exp_json_path + ) f.write(cmd) f.write("\n") diff --git a/robomimic/scripts/get_dataset_info.py b/robomimic/scripts/get_dataset_info.py index 9349ed8a..3fc88e00 100644 --- a/robomimic/scripts/get_dataset_info.py +++ b/robomimic/scripts/get_dataset_info.py @@ -22,6 +22,7 @@ # run script only on validation data python get_dataset_info.py --dataset ../../tests/assets/test.hdf5 --filter_key valid """ + import h5py import json import argparse @@ -43,7 +44,7 @@ ) parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="verbose output", ) args = parser.parse_args() @@ -55,7 +56,9 @@ if filter_key is not None: # use the demonstrations from the filter key instead print("NOTE: using filter key {}".format(filter_key)) - demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])]) + demos = sorted( + [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])] + ) else: # use all demonstrations demos = sorted(list(f["data"].keys())) @@ -64,7 +67,9 @@ if "mask" in f: all_filter_keys = {} for fk in f["mask"]: - fk_demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(fk)])]) + fk_demos = sorted( + [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(fk)])] + ) all_filter_keys[fk] = fk_demos # put demonstration list in increasing episode order @@ -103,7 +108,11 @@ if all_filter_keys is not None: print("==== Filter Key Contents ====") for fk in all_filter_keys: - print("filter_key {} with {} demos: {}".format(fk, len(all_filter_keys[fk]), all_filter_keys[fk])) + print( + "filter_key {} with {} demos: {}".format( + fk, len(all_filter_keys[fk]), all_filter_keys[fk] + ) + ) print("") env_meta = json.loads(f["data"].attrs["env_args"]) print("==== Env Meta ====") @@ -112,13 +121,19 @@ print("==== Dataset Structure ====") for ep in demos: - print("episode {} with {} transitions".format(ep, f["data/{}".format(ep)].attrs["num_samples"])) + print( + "episode {} with {} transitions".format( + ep, f["data/{}".format(ep)].attrs["num_samples"] + ) + ) for k in f["data/{}".format(ep)]: if k in ["obs", "next_obs"]: print(" key: {}".format(k)) for obs_k in f["data/{}/{}".format(ep, k)]: shape = f["data/{}/{}/{}".format(ep, k, obs_k)].shape - print(" observation key {} with shape {}".format(obs_k, shape)) + print( + " observation key {} with shape {}".format(obs_k, shape) + ) elif isinstance(f["data/{}/{}".format(ep, k)], h5py.Dataset): key_shape = f["data/{}/{}".format(ep, k)].shape print(" key: {} with shape {}".format(k, key_shape)) @@ -130,5 +145,9 @@ # maybe display error message print("") - if (action_min < -1.) or (action_max > 1.): - raise Exception("Dataset should have actions in [-1., 1.] but got bounds [{}, {}]".format(action_min, action_max)) + if (action_min < -1.0) or (action_max > 1.0): + raise Exception( + "Dataset should have actions in [-1., 1.] but got bounds [{}, {}]".format( + action_min, action_max + ) + ) diff --git a/robomimic/scripts/hyperparam_helper.py b/robomimic/scripts/hyperparam_helper.py index 870c739e..e9fb437b 100644 --- a/robomimic/scripts/hyperparam_helper.py +++ b/robomimic/scripts/hyperparam_helper.py @@ -35,6 +35,7 @@ # assumes that /tmp/gen_configs/base.json has already been created (see quickstart section of docs for an example) python hyperparam_helper.py --config /tmp/gen_configs/base.json --script /tmp/gen_configs/out.sh """ + import argparse import robomimic @@ -52,58 +53,58 @@ def make_generator(config_file, script_file): # use RNN with horizon 10 generator.add_param( key="algo.rnn.enabled", - name="", - group=0, + name="", + group=0, values=[True], ) generator.add_param( - key="train.seq_length", - name="", - group=0, - values=[10], + key="train.seq_length", + name="", + group=0, + values=[10], ) generator.add_param( key="algo.rnn.horizon", - name="", - group=0, - values=[10], + name="", + group=0, + values=[10], ) # LR - 1e-3, 1e-4 generator.add_param( - key="algo.optim_params.policy.learning_rate.initial", - name="plr", - group=1, - values=[1e-3, 1e-4], + key="algo.optim_params.policy.learning_rate.initial", + name="plr", + group=1, + values=[1e-3, 1e-4], ) # GMM y / n generator.add_param( - key="algo.gmm.enabled", - name="gmm", - group=2, - values=[True, False], + key="algo.gmm.enabled", + name="gmm", + group=2, + values=[True, False], value_names=["t", "f"], ) # RNN dim 400 + MLP dims (1024, 1024) vs. RNN dim 1000 + empty MLP dims () generator.add_param( - key="algo.rnn.hidden_dim", - name="rnnd", - group=3, + key="algo.rnn.hidden_dim", + name="rnnd", + group=3, values=[ - 400, + 400, 1000, - ], + ], ) generator.add_param( - key="algo.actor_layer_dims", - name="mlp", - group=3, + key="algo.actor_layer_dims", + name="mlp", + group=3, values=[ - [1024, 1024], + [1024, 1024], [], - ], + ], value_names=["1024", "0"], ) diff --git a/robomimic/scripts/playback_dataset.py b/robomimic/scripts/playback_dataset.py index 96cef1f2..148af36f 100644 --- a/robomimic/scripts/playback_dataset.py +++ b/robomimic/scripts/playback_dataset.py @@ -85,19 +85,19 @@ def playback_trajectory_with_env( - env, - initial_state, - states, - actions=None, - render=False, - video_writer=None, - video_skip=5, + env, + initial_state, + states, + actions=None, + render=False, + video_writer=None, + video_skip=5, camera_names=None, first=False, ): """ Helper function to playback a single trajectory using the simulator environment. - If @actions are not None, it will play them open-loop after loading the initial state. + If @actions are not None, it will play them open-loop after loading the initial state. Otherwise, @states are loaded one by one. Args: @@ -114,7 +114,7 @@ def playback_trajectory_with_env( """ assert isinstance(env, EnvBase) - write_video = (video_writer is not None) + write_video = video_writer is not None video_count = 0 assert not (render and write_video) @@ -123,7 +123,7 @@ def playback_trajectory_with_env( env.reset_to(initial_state) traj_len = states.shape[0] - action_playback = (actions is not None) + action_playback = actions is not None if action_playback: assert states.shape[0] == actions.shape[0] @@ -137,7 +137,7 @@ def playback_trajectory_with_env( err = np.linalg.norm(states[i + 1] - state_playback) print("warning: playback diverged by {} at step {}".format(err, i)) else: - env.reset_to({"states" : states[i]}) + env.reset_to({"states": states[i]}) # on-screen render if render: @@ -148,8 +148,17 @@ def playback_trajectory_with_env( if video_count % video_skip == 0: video_img = [] for cam_name in camera_names: - video_img.append(env.render(mode="rgb_array", height=512, width=512, camera_name=cam_name)) - video_img = np.concatenate(video_img, axis=1) # concatenate horizontally + video_img.append( + env.render( + mode="rgb_array", + height=512, + width=512, + camera_name=cam_name, + ) + ) + video_img = np.concatenate( + video_img, axis=1 + ) # concatenate horizontally video_writer.append_data(video_img) video_count += 1 @@ -159,8 +168,8 @@ def playback_trajectory_with_env( def playback_trajectory_with_obs( traj_grp, - video_writer, - video_skip=5, + video_writer, + video_skip=5, image_names=None, depth_names=None, first=False, @@ -178,20 +187,33 @@ def playback_trajectory_with_obs( depth_names (list): determines which depth observations are used for rendering (if any). first (bool): if True, only use the first frame of each episode. """ - assert image_names is not None, "error: must specify at least one image observation to use in @image_names" + assert ( + image_names is not None + ), "error: must specify at least one image observation to use in @image_names" video_count = 0 if depth_names is not None: # compute min and max depth value across trajectory for normalization - depth_min = { k : traj_grp["obs/{}".format(k)][:].min() for k in depth_names } - depth_max = { k : traj_grp["obs/{}".format(k)][:].max() for k in depth_names } + depth_min = {k: traj_grp["obs/{}".format(k)][:].min() for k in depth_names} + depth_max = {k: traj_grp["obs/{}".format(k)][:].max() for k in depth_names} traj_len = traj_grp["actions"].shape[0] for i in range(traj_len): if video_count % video_skip == 0: # concatenate image obs together im = [traj_grp["obs/{}".format(k)][i] for k in image_names] - depth = [depth_to_rgb(traj_grp["obs/{}".format(k)][i], depth_min=depth_min[k], depth_max=depth_max[k]) for k in depth_names] if depth_names is not None else [] + depth = ( + [ + depth_to_rgb( + traj_grp["obs/{}".format(k)][i], + depth_min=depth_min[k], + depth_max=depth_max[k], + ) + for k in depth_names + ] + if depth_names is not None + else [] + ) frame = np.concatenate(im + depth, axis=1) video_writer.append_data(frame) video_count += 1 @@ -202,8 +224,8 @@ def playback_trajectory_with_obs( def playback_dataset(args): # some arg checking - write_video = (args.video_path is not None) - assert not (args.render and write_video) # either on-screen or video but not both + write_video = args.video_path is not None + assert not (args.render and write_video) # either on-screen or video but not both # Auto-fill camera rendering info if not specified if args.render_image_names is None: @@ -218,25 +240,31 @@ def playback_dataset(args): if args.use_obs: assert write_video, "playback with observations can only write to video" - assert not args.use_actions, "playback with observations is offline and does not support action playback" + assert ( + not args.use_actions + ), "playback with observations is offline and does not support action playback" if args.render_depth_names is not None: - assert args.use_obs, "depth observations can only be visualized from observations currently" + assert ( + args.use_obs + ), "depth observations can only be visualized from observations currently" # create environment only if not playing back with observations if not args.use_obs: - # need to make sure ObsUtils knows which observations are images, but it doesn't matter + # need to make sure ObsUtils knows which observations are images, but it doesn't matter # for playback since observations are unused. Pass a dummy spec here. dummy_spec = dict( obs=dict( - low_dim=["robot0_eef_pos"], - rgb=[], - ), + low_dim=["robot0_eef_pos"], + rgb=[], + ), ) ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs=dummy_spec) env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) - env = EnvUtils.create_env_from_metadata(env_meta=env_meta, render=args.render, render_offscreen=write_video) + env = EnvUtils.create_env_from_metadata( + env_meta=env_meta, render=args.render, render_offscreen=write_video + ) # some operations for playback are robosuite-specific, so determine if this environment is a robosuite env is_robosuite_env = EnvUtils.is_robosuite_env(env_meta) @@ -246,7 +274,10 @@ def playback_dataset(args): # list of all demonstration episodes (sorted in increasing number order) if args.filter_key is not None: print("using filter key: {}".format(args.filter_key)) - demos = [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(args.filter_key)])] + demos = [ + elem.decode("utf-8") + for elem in np.array(f["mask/{}".format(args.filter_key)]) + ] else: demos = list(f["data"].keys()) inds = np.argsort([int(elem[5:]) for elem in demos]) @@ -254,7 +285,7 @@ def playback_dataset(args): # maybe reduce the number of demonstrations to playback if args.n is not None: - demos = demos[:args.n] + demos = demos[: args.n] # maybe dump video video_writer = None @@ -267,8 +298,8 @@ def playback_dataset(args): if args.use_obs: playback_trajectory_with_obs( - traj_grp=f["data/{}".format(ep)], - video_writer=video_writer, + traj_grp=f["data/{}".format(ep)], + video_writer=video_writer, video_skip=args.video_skip, image_names=args.render_image_names, depth_names=args.render_depth_names, @@ -288,11 +319,12 @@ def playback_dataset(args): actions = f["data/{}/actions".format(ep)][()] playback_trajectory_with_env( - env=env, - initial_state=initial_state, - states=states, actions=actions, - render=args.render, - video_writer=video_writer, + env=env, + initial_state=initial_state, + states=states, + actions=actions, + render=args.render, + video_writer=video_writer, video_skip=args.video_skip, camera_names=args.render_image_names, first=args.first, @@ -328,21 +360,21 @@ def playback_dataset(args): # Use image observations instead of doing playback using the simulator env. parser.add_argument( "--use-obs", - action='store_true', + action="store_true", help="visualize trajectories with dataset image observations instead of simulator", ) # Playback stored dataset actions open-loop instead of loading from simulation states. parser.add_argument( "--use-actions", - action='store_true', + action="store_true", help="use open-loop action playback instead of loading sim states", ) # Whether to render playback to screen parser.add_argument( "--render", - action='store_true', + action="store_true", help="on-screen rendering", ) @@ -366,25 +398,25 @@ def playback_dataset(args): parser.add_argument( "--render_image_names", type=str, - nargs='+', + nargs="+", default=None, help="(optional) camera name(s) / image observation(s) to use for rendering on-screen or to video. Default is" - "None, which corresponds to a predefined camera for each env type", + "None, which corresponds to a predefined camera for each env type", ) # depth observations to use for writing to video parser.add_argument( "--render_depth_names", type=str, - nargs='+', + nargs="+", default=None, - help="(optional) depth observation(s) to use for rendering to video" + help="(optional) depth observation(s) to use for rendering to video", ) # Only use the first frame of each episode parser.add_argument( "--first", - action='store_true', + action="store_true", help="use first frame of each episode", ) diff --git a/robomimic/scripts/run_trained_agent.py b/robomimic/scripts/run_trained_agent.py index 95bddb21..22698487 100644 --- a/robomimic/scripts/run_trained_agent.py +++ b/robomimic/scripts/run_trained_agent.py @@ -51,6 +51,7 @@ --n_rollouts 50 --horizon 400 --seed 0 \ --dataset_path /path/to/output.hdf5 """ + import argparse import json import h5py @@ -69,9 +70,18 @@ from robomimic.algo import RolloutPolicy -def rollout(policy, env, horizon, render=False, video_writer=None, video_skip=5, return_obs=False, camera_names=None): +def rollout( + policy, + env, + horizon, + render=False, + video_writer=None, + video_skip=5, + return_obs=False, + camera_names=None, +): """ - Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video, + Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video, and returns the rollout trajectory. Args: @@ -81,9 +91,9 @@ def rollout(policy, env, horizon, render=False, video_writer=None, video_skip=5, render (bool): whether to render rollout on-screen video_writer (imageio writer): if provided, use to write rollout to video video_skip (int): how often to write video frames - return_obs (bool): if True, return possibly high-dimensional observations along the trajectoryu. - They are excluded by default because the low-dimensional simulation states should be a minimal - representation of the environment. + return_obs (bool): if True, return possibly high-dimensional observations along the trajectoryu. + They are excluded by default because the low-dimensional simulation states should be a minimal + representation of the environment. camera_names (list): determines which camera(s) are used for rendering. Pass more than one to output a video with multiple camera views concatenated horizontally. @@ -104,8 +114,10 @@ def rollout(policy, env, horizon, render=False, video_writer=None, video_skip=5, results = {} video_count = 0 # video frame counter - total_reward = 0. - traj = dict(actions=[], rewards=[], dones=[], states=[], initial_state_dict=state_dict) + total_reward = 0.0 + traj = dict( + actions=[], rewards=[], dones=[], states=[], initial_state_dict=state_dict + ) if return_obs: # store observations too traj.update(dict(obs=[], next_obs=[])) @@ -129,8 +141,17 @@ def rollout(policy, env, horizon, render=False, video_writer=None, video_skip=5, if video_count % video_skip == 0: video_img = [] for cam_name in camera_names: - video_img.append(env.render(mode="rgb_array", height=512, width=512, camera_name=cam_name)) - video_img = np.concatenate(video_img, axis=1) # concatenate horizontally + video_img.append( + env.render( + mode="rgb_array", + height=512, + width=512, + camera_name=cam_name, + ) + ) + video_img = np.concatenate( + video_img, axis=1 + ) # concatenate horizontally video_writer.append_data(video_img) video_count += 1 @@ -162,7 +183,9 @@ def rollout(policy, env, horizon, render=False, video_writer=None, video_skip=5, if return_obs: # convert list of dict to dict of list for obs dictionaries (for convenient writes to hdf5 dataset) traj["obs"] = TensorUtils.list_of_flat_dict_to_dict_of_list(traj["obs"]) - traj["next_obs"] = TensorUtils.list_of_flat_dict_to_dict_of_list(traj["next_obs"]) + traj["next_obs"] = TensorUtils.list_of_flat_dict_to_dict_of_list( + traj["next_obs"] + ) # list to numpy array for k in traj: @@ -179,8 +202,8 @@ def rollout(policy, env, horizon, render=False, video_writer=None, video_skip=5, def run_trained_agent(args): # some arg checking - write_video = (args.video_path is not None) - assert not (args.render and write_video) # either on-screen or video but not both + write_video = args.video_path is not None + assert not (args.render and write_video) # either on-screen or video but not both if args.render: # on-screen rendering can only support one camera assert len(args.camera_names) == 1 @@ -192,7 +215,9 @@ def run_trained_agent(args): device = TorchUtils.get_torch_device(try_to_use_cuda=True) # restore policy - policy, ckpt_dict = FileUtils.policy_from_checkpoint(ckpt_path=ckpt_path, device=device, verbose=True) + policy, ckpt_dict = FileUtils.policy_from_checkpoint( + ckpt_path=ckpt_path, device=device, verbose=True + ) # read rollout settings rollout_num_episodes = args.n_rollouts @@ -204,10 +229,10 @@ def run_trained_agent(args): # create environment from saved checkpoint env, _ = FileUtils.env_from_checkpoint( - ckpt_dict=ckpt_dict, - env_name=args.env, - render=args.render, - render_offscreen=(args.video_path is not None), + ckpt_dict=ckpt_dict, + env_name=args.env, + render=args.render, + render_offscreen=(args.video_path is not None), verbose=True, ) @@ -222,7 +247,7 @@ def run_trained_agent(args): video_writer = imageio.get_writer(args.video_path, fps=20) # maybe open hdf5 to write rollouts - write_dataset = (args.dataset_path is not None) + write_dataset = args.dataset_path is not None if write_dataset: data_writer = h5py.File(args.dataset_path, "w") data_grp = data_writer.create_group("data") @@ -231,12 +256,12 @@ def run_trained_agent(args): rollout_stats = [] for i in range(rollout_num_episodes): stats, traj = rollout( - policy=policy, - env=env, - horizon=rollout_horizon, - render=args.render, - video_writer=video_writer, - video_skip=args.video_skip, + policy=policy, + env=env, + horizon=rollout_horizon, + render=args.render, + video_writer=video_writer, + video_skip=args.video_skip, return_obs=(write_dataset and args.dataset_obs), camera_names=args.camera_names, ) @@ -251,17 +276,25 @@ def run_trained_agent(args): ep_data_grp.create_dataset("dones", data=np.array(traj["dones"])) if args.dataset_obs: for k in traj["obs"]: - ep_data_grp.create_dataset("obs/{}".format(k), data=np.array(traj["obs"][k])) - ep_data_grp.create_dataset("next_obs/{}".format(k), data=np.array(traj["next_obs"][k])) + ep_data_grp.create_dataset( + "obs/{}".format(k), data=np.array(traj["obs"][k]) + ) + ep_data_grp.create_dataset( + "next_obs/{}".format(k), data=np.array(traj["next_obs"][k]) + ) # episode metadata if "model" in traj["initial_state_dict"]: - ep_data_grp.attrs["model_file"] = traj["initial_state_dict"]["model"] # model xml for this episode - ep_data_grp.attrs["num_samples"] = traj["actions"].shape[0] # number of transitions in this episode + ep_data_grp.attrs["model_file"] = traj["initial_state_dict"][ + "model" + ] # model xml for this episode + ep_data_grp.attrs["num_samples"] = traj["actions"].shape[ + 0 + ] # number of transitions in this episode total_samples += traj["actions"].shape[0] rollout_stats = TensorUtils.list_of_flat_dict_to_dict_of_list(rollout_stats) - avg_rollout_stats = { k : np.mean(rollout_stats[k]) for k in rollout_stats } + avg_rollout_stats = {k: np.mean(rollout_stats[k]) for k in rollout_stats} avg_rollout_stats["Num_Success"] = np.sum(rollout_stats["Success_Rate"]) print("Average Rollout Stats") print(json.dumps(avg_rollout_stats, indent=4)) @@ -272,7 +305,9 @@ def run_trained_agent(args): if write_dataset: # global metadata data_grp.attrs["total"] = total_samples - data_grp.attrs["env_args"] = json.dumps(env.serialize(), indent=4) # environment info + data_grp.attrs["env_args"] = json.dumps( + env.serialize(), indent=4 + ) # environment info data_writer.close() print("Wrote dataset trajectories to {}".format(args.dataset_path)) @@ -316,7 +351,7 @@ def run_trained_agent(args): # Whether to render rollouts to screen parser.add_argument( "--render", - action='store_true', + action="store_true", help="on-screen rendering", ) @@ -340,7 +375,7 @@ def run_trained_agent(args): parser.add_argument( "--camera_names", type=str, - nargs='+', + nargs="+", default=["agentview"], help="(optional) camera name(s) to use for rendering on-screen or to video", ) @@ -356,7 +391,7 @@ def run_trained_agent(args): # If True and @dataset_path is supplied, will write possibly high-dimensional observations to dataset. parser.add_argument( "--dataset_obs", - action='store_true', + action="store_true", help="include possibly high-dimensional observations in output dataset hdf5 file (by default,\ observations are excluded and only simulator states are saved)", ) @@ -371,4 +406,3 @@ def run_trained_agent(args): args = parser.parse_args() run_trained_agent(args) - diff --git a/robomimic/scripts/setup_macros.py b/robomimic/scripts/setup_macros.py index 92c47271..5ae57d92 100644 --- a/robomimic/scripts/setup_macros.py +++ b/robomimic/scripts/setup_macros.py @@ -21,7 +21,9 @@ print("{} does not exist! Aborting...".format(macros_path)) if os.path.exists(macros_private_path): - ans = input("{} already exists! \noverwrite? (y/n)\n".format(macros_private_path)) + ans = input( + "{} already exists! \noverwrite? (y/n)\n".format(macros_private_path) + ) if ans == "y": print("REMOVING") diff --git a/robomimic/scripts/split_train_val.py b/robomimic/scripts/split_train_val.py index 9d0502ea..41d06e41 100644 --- a/robomimic/scripts/split_train_val.py +++ b/robomimic/scripts/split_train_val.py @@ -40,7 +40,9 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): f = h5py.File(hdf5_path, "r") if filter_key is not None: print("using filter key: {}".format(filter_key)) - demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])]) + demos = sorted( + [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])] + ) else: demos = sorted(list(f["data"].keys())) num_demos = len(demos) @@ -50,14 +52,18 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): num_demos = len(demos) num_val = int(val_ratio * num_demos) mask = np.zeros(num_demos) - mask[:num_val] = 1. + mask[:num_val] = 1.0 np.random.shuffle(mask) mask = mask.astype(int) train_inds = (1 - mask).nonzero()[0] valid_inds = mask.nonzero()[0] train_keys = [demos[i] for i in train_inds] valid_keys = [demos[i] for i in valid_inds] - print("{} validation demonstrations out of {} total demonstrations.".format(num_val, num_demos)) + print( + "{} validation demonstrations out of {} total demonstrations.".format( + num_val, num_demos + ) + ) # pass mask to generate split name_1 = "train" @@ -66,8 +72,12 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): name_1 = "{}_{}".format(filter_key, name_1) name_2 = "{}_{}".format(filter_key, name_2) - train_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1) - valid_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2) + train_lengths = create_hdf5_filter_key( + hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1 + ) + valid_lengths = create_hdf5_filter_key( + hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2 + ) print("Total number of train samples: {}".format(np.sum(train_lengths))) print("Average number of train samples {}".format(np.mean(train_lengths))) @@ -92,14 +102,13 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): splitting the full set of trajectories", ) parser.add_argument( - "--ratio", - type=float, - default=0.1, - help="validation ratio, in (0, 1)" + "--ratio", type=float, default=0.1, help="validation ratio, in (0, 1)" ) args = parser.parse_args() # seed to make sure results are consistent np.random.seed(0) - split_train_val_from_hdf5(args.dataset, val_ratio=args.ratio, filter_key=args.filter_key) + split_train_val_from_hdf5( + args.dataset, val_ratio=args.ratio, filter_key=args.filter_key + ) diff --git a/robomimic/scripts/train.py b/robomimic/scripts/train.py index 210b4172..1b603740 100644 --- a/robomimic/scripts/train.py +++ b/robomimic/scripts/train.py @@ -60,7 +60,7 @@ def train(config, device): if config.experiment.logging.terminal_output_to_txt: # log stdout and stderr to a text file - logger = PrintLogger(os.path.join(log_dir, 'log.txt')) + logger = PrintLogger(os.path.join(log_dir, "log.txt")) sys.stdout = logger sys.stderr = logger @@ -76,14 +76,17 @@ def train(config, device): print("\n============= Loaded Environment Metadata =============") env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=config.train.data) shape_meta = FileUtils.get_shape_metadata_from_dataset( - dataset_path=config.train.data, - all_obs_keys=config.all_obs_keys, - verbose=True + dataset_path=config.train.data, all_obs_keys=config.all_obs_keys, verbose=True ) if config.experiment.env is not None: env_meta["env_name"] = config.experiment.env - print("=" * 30 + "\n" + "Replacing Env to {}\n".format(env_meta["env_name"]) + "=" * 30) + print( + "=" * 30 + + "\n" + + "Replacing Env to {}\n".format(env_meta["env_name"]) + + "=" * 30 + ) # create environment envs = OrderedDict() @@ -98,13 +101,15 @@ def train(config, device): for env_name in env_names: env = EnvUtils.create_env_from_metadata( env_meta=env_meta, - env_name=env_name, - render=False, + env_name=env_name, + render=False, render_offscreen=config.experiment.render_video, use_image_obs=shape_meta["use_images"], use_depth_obs=shape_meta["use_depths"], ) - env = EnvUtils.wrap_env_from_config(env, config=config) # apply environment warpper, if applicable + env = EnvUtils.wrap_env_from_config( + env, config=config + ) # apply environment warpper, if applicable envs[env.name] = env print(envs[env.name]) @@ -124,9 +129,9 @@ def train(config, device): ac_dim=shape_meta["ac_dim"], device=device, ) - + # save the config as a json file - with open(os.path.join(log_dir, '..', 'config.json'), 'w') as outfile: + with open(os.path.join(log_dir, "..", "config.json"), "w") as outfile: json.dump(config, outfile, indent=4) print("\n============= Model Summary =============") @@ -135,7 +140,8 @@ def train(config, device): # load training data trainset, validset = TrainUtils.load_data_for_training( - config, obs_keys=shape_meta["all_obs_keys"]) + config, obs_keys=shape_meta["all_obs_keys"] + ) train_sampler = trainset.get_dataset_sampler() print("\n============= Training Dataset =============") print(trainset) @@ -157,7 +163,7 @@ def train(config, device): batch_size=config.train.batch_size, shuffle=(train_sampler is None), num_workers=config.train.num_data_workers, - drop_last=True + drop_last=True, ) if config.experiment.validate: @@ -170,29 +176,35 @@ def train(config, device): batch_size=config.train.batch_size, shuffle=(valid_sampler is None), num_workers=num_workers, - drop_last=True + drop_last=True, ) else: valid_loader = None # print all warnings before training begins print("*" * 50) - print("Warnings generated by robomimic have been duplicated here (from above) for convenience. Please check them carefully.") + print( + "Warnings generated by robomimic have been duplicated here (from above) for convenience. Please check them carefully." + ) flush_warnings() print("*" * 50) print("") # main training loop best_valid_loss = None - best_return = {k: -np.inf for k in envs} if config.experiment.rollout.enabled else None - best_success_rate = {k: -1. for k in envs} if config.experiment.rollout.enabled else None + best_return = ( + {k: -np.inf for k in envs} if config.experiment.rollout.enabled else None + ) + best_success_rate = ( + {k: -1.0 for k in envs} if config.experiment.rollout.enabled else None + ) last_ckpt_time = time.time() # number of learning steps per epoch (defaults to a full dataset pass) train_num_steps = config.experiment.epoch_every_n_steps valid_num_steps = config.experiment.validation_epoch_every_n_steps - for epoch in range(1, config.train.num_epochs + 1): # epoch numbers start at 1 + for epoch in range(1, config.train.num_epochs + 1): # epoch numbers start at 1 step_log = TrainUtils.run_epoch( model=model, data_loader=train_loader, @@ -208,12 +220,16 @@ def train(config, device): # check for recurring checkpoint saving conditions should_save_ckpt = False if config.experiment.save.enabled: - time_check = (config.experiment.save.every_n_seconds is not None) and \ - (time.time() - last_ckpt_time > config.experiment.save.every_n_seconds) - epoch_check = (config.experiment.save.every_n_epochs is not None) and \ - (epoch > 0) and (epoch % config.experiment.save.every_n_epochs == 0) - epoch_list_check = (epoch in config.experiment.save.epochs) - should_save_ckpt = (time_check or epoch_check or epoch_list_check) + time_check = (config.experiment.save.every_n_seconds is not None) and ( + time.time() - last_ckpt_time > config.experiment.save.every_n_seconds + ) + epoch_check = ( + (config.experiment.save.every_n_epochs is not None) + and (epoch > 0) + and (epoch % config.experiment.save.every_n_epochs == 0) + ) + epoch_list_check = epoch in config.experiment.save.epochs + should_save_ckpt = time_check or epoch_check or epoch_list_check ckpt_reason = None if should_save_ckpt: last_ckpt_time = time.time() @@ -230,7 +246,13 @@ def train(config, device): # Evaluate the model on validation set if config.experiment.validate: with torch.no_grad(): - step_log = TrainUtils.run_epoch(model=model, data_loader=valid_loader, epoch=epoch, validate=True, num_steps=valid_num_steps) + step_log = TrainUtils.run_epoch( + model=model, + data_loader=valid_loader, + epoch=epoch, + validate=True, + num_steps=valid_num_steps, + ) for k, v in step_log.items(): if k.startswith("Time_"): data_logger.record("Timing_Stats/Valid_{}".format(k[5:]), v, epoch) @@ -242,9 +264,14 @@ def train(config, device): # save checkpoint if achieve new best validation loss valid_check = "Loss" in step_log - if valid_check and (best_valid_loss is None or (step_log["Loss"] <= best_valid_loss)): + if valid_check and ( + best_valid_loss is None or (step_log["Loss"] <= best_valid_loss) + ): best_valid_loss = step_log["Loss"] - if config.experiment.save.enabled and config.experiment.save.on_best_validation: + if ( + config.experiment.save.enabled + and config.experiment.save.on_best_validation + ): epoch_ckpt_name += "_best_validation_{}".format(best_valid_loss) should_save_ckpt = True ckpt_reason = "valid" if ckpt_reason is None else ckpt_reason @@ -253,11 +280,19 @@ def train(config, device): # do rollouts at fixed rate or if it's time to save a new ckpt video_paths = None - rollout_check = (epoch % config.experiment.rollout.rate == 0) or (should_save_ckpt and ckpt_reason == "time") - if config.experiment.rollout.enabled and (epoch > config.experiment.rollout.warmstart) and rollout_check: + rollout_check = (epoch % config.experiment.rollout.rate == 0) or ( + should_save_ckpt and ckpt_reason == "time" + ) + if ( + config.experiment.rollout.enabled + and (epoch > config.experiment.rollout.warmstart) + and rollout_check + ): # wrap model as a RolloutPolicy to prepare for rollouts - rollout_model = RolloutPolicy(model, obs_normalization_stats=obs_normalization_stats) + rollout_model = RolloutPolicy( + model, obs_normalization_stats=obs_normalization_stats + ) num_episodes = config.experiment.rollout.n all_rollout_logs, video_paths = TrainUtils.rollout_with_stats( @@ -278,12 +313,25 @@ def train(config, device): rollout_logs = all_rollout_logs[env_name] for k, v in rollout_logs.items(): if k.startswith("Time_"): - data_logger.record("Timing_Stats/Rollout_{}_{}".format(env_name, k[5:]), v, epoch) + data_logger.record( + "Timing_Stats/Rollout_{}_{}".format(env_name, k[5:]), + v, + epoch, + ) else: - data_logger.record("Rollout/{}/{}".format(k, env_name), v, epoch, log_stats=True) - - print("\nEpoch {} Rollouts took {}s (avg) with results:".format(epoch, rollout_logs["time"])) - print('Env: {}'.format(env_name)) + data_logger.record( + "Rollout/{}/{}".format(k, env_name), + v, + epoch, + log_stats=True, + ) + + print( + "\nEpoch {} Rollouts took {}s (avg) with results:".format( + epoch, rollout_logs["time"] + ) + ) + print("Env: {}".format(env_name)) print(json.dumps(rollout_logs, sort_keys=True, indent=4)) # checkpoint and video saving logic @@ -298,12 +346,16 @@ def train(config, device): best_return = updated_stats["best_return"] best_success_rate = updated_stats["best_success_rate"] epoch_ckpt_name = updated_stats["epoch_ckpt_name"] - should_save_ckpt = (config.experiment.save.enabled and updated_stats["should_save_ckpt"]) or should_save_ckpt + should_save_ckpt = ( + config.experiment.save.enabled and updated_stats["should_save_ckpt"] + ) or should_save_ckpt if updated_stats["ckpt_reason"] is not None: ckpt_reason = updated_stats["ckpt_reason"] # Only keep saved videos if the ckpt should be saved (but not because of validation score) - should_save_video = (should_save_ckpt and (ckpt_reason != "valid")) or config.experiment.keep_all_videos + should_save_video = ( + should_save_ckpt and (ckpt_reason != "valid") + ) or config.experiment.keep_all_videos if video_paths is not None and not should_save_video: for env_name in video_paths: os.remove(video_paths[env_name]) @@ -332,7 +384,7 @@ def train(config, device): def main(args): if args.config is not None: - ext_cfg = json.load(open(args.config, 'r')) + ext_cfg = json.load(open(args.config, "r")) config = config_factory(ext_cfg["algo_name"]) # update config with external json - this will throw errors if # the external config has keys not present in the base algo config @@ -419,10 +471,9 @@ def main(args): # debug mode parser.add_argument( "--debug", - action='store_true', - help="set this flag to run a quick training run for debugging purposes" + action="store_true", + help="set this flag to run a quick training run for debugging purposes", ) args = parser.parse_args() main(args) - diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index e4fc6c37..322fdea8 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -2,6 +2,7 @@ This file contains Dataset classes that are used by torch dataloaders to fetch batches from hdf5 files. """ + import os import h5py import numpy as np @@ -17,6 +18,7 @@ import scipy import matplotlib.pyplot as plt + class SequenceDataset(torch.utils.data.Dataset): def __init__( self, @@ -64,10 +66,10 @@ def __init__( goal_mode (str): either "last" or None. Defaults to None, which is to not fetch goals - hdf5_cache_mode (str): one of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 - in memory - this is by far the fastest for data loading. Set to "low_dim" to cache all - non-image data. Set to None to use no caching - in this case, every batch sample is - retrieved via file i/o. You should almost never set this to None, even for large + hdf5_cache_mode (str): one of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 + in memory - this is by far the fastest for data loading. Set to "low_dim" to cache all + non-image data. Set to None to use no caching - in this case, every batch sample is + retrieved via file i/o. You should almost never set this to None, even for large image datasets. hdf5_use_swmr (bool): whether to use swmr feature when opening the hdf5 file. This ensures @@ -138,14 +140,16 @@ def __init__( hdf5_file=self.hdf5_file, obs_keys=self.obs_keys_in_memory, dataset_keys=self.dataset_keys, - load_next_obs=self.load_next_obs + load_next_obs=self.load_next_obs, ) if self.hdf5_cache_mode == "all": # cache getitem calls for even more speedup. We don't do this for # "low-dim" since image observations require calls to getitem anyways. print("SequenceDataset: caching get_item calls...") - self.getitem_cache = [self.get_item(i) for i in LogUtils.custom_tqdm(range(len(self)))] + self.getitem_cache = [ + self.get_item(i) for i in LogUtils.custom_tqdm(range(len(self))) + ] # don't need the previous cache anymore del self.hdf5_cache @@ -161,15 +165,20 @@ def load_demo_info(self, filter_by_attribute=None, demos=None): filter_by_attribute (str): if provided, use the provided filter key to select a subset of demonstration trajectories to load - demos (list): list of demonstration keys to load from the hdf5 file. If - omitted, all demos in the file (or under the @filter_by_attribute + demos (list): list of demonstration keys to load from the hdf5 file. If + omitted, all demos in the file (or under the @filter_by_attribute filter key) are used. """ # filter demo trajectory by mask if demos is not None: self.demos = demos elif filter_by_attribute is not None: - self.demos = [elem.decode("utf-8") for elem in np.array(self.hdf5_file["mask/{}".format(filter_by_attribute)][:])] + self.demos = [ + elem.decode("utf-8") + for elem in np.array( + self.hdf5_file["mask/{}".format(filter_by_attribute)][:] + ) + ] else: self.demos = list(self.hdf5_file["data"].keys()) @@ -194,15 +203,17 @@ def load_demo_info(self, filter_by_attribute=None, demos=None): num_sequences = demo_length # determine actual number of sequences taking into account whether to pad for frame_stack and seq_length if not self.pad_frame_stack: - num_sequences -= (self.n_frame_stack - 1) + num_sequences -= self.n_frame_stack - 1 if not self.pad_seq_length: - num_sequences -= (self.seq_length - 1) + num_sequences -= self.seq_length - 1 if self.pad_seq_length: assert demo_length >= 1 # sequence needs to have at least one sample num_sequences = max(num_sequences, 1) else: - assert num_sequences >= 1 # assume demo_length >= (self.n_frame_stack - 1 + self.seq_length) + assert ( + num_sequences >= 1 + ) # assume demo_length >= (self.n_frame_stack - 1 + self.seq_length) for _ in range(num_sequences): self._index_to_demo_id[self.total_num_sequences] = ep @@ -215,7 +226,13 @@ def hdf5_file(self): """ if self._hdf5_file is None: print("opening hdf5") - self._hdf5_file = h5py.File(self.hdf5_path, 'r', swmr=self.hdf5_use_swmr, libver='latest', rdcc_nbytes=1e10) + self._hdf5_file = h5py.File( + self.hdf5_path, + "r", + swmr=self.hdf5_use_swmr, + libver="latest", + rdcc_nbytes=1e10, + ) return self._hdf5_file def close_and_delete_hdf5_handle(self): @@ -249,22 +266,38 @@ def __repr__(self): msg += "\tpad_seq_length={}\n\tpad_frame_stack={}\n\tgoal_mode={}\n" msg += "\tcache_mode={}\n" msg += "\tnum_demos={}\n\tnum_sequences={}\n)" - filter_key_str = self.filter_by_attribute if self.filter_by_attribute is not None else "none" + filter_key_str = ( + self.filter_by_attribute if self.filter_by_attribute is not None else "none" + ) goal_mode_str = self.goal_mode if self.goal_mode is not None else "none" - cache_mode_str = self.hdf5_cache_mode if self.hdf5_cache_mode is not None else "none" - msg = msg.format(self.hdf5_path, self.obs_keys, self.seq_length, filter_key_str, self.n_frame_stack, - self.pad_seq_length, self.pad_frame_stack, goal_mode_str, cache_mode_str, - self.n_demos, self.total_num_sequences) + cache_mode_str = ( + self.hdf5_cache_mode if self.hdf5_cache_mode is not None else "none" + ) + msg = msg.format( + self.hdf5_path, + self.obs_keys, + self.seq_length, + filter_key_str, + self.n_frame_stack, + self.pad_seq_length, + self.pad_frame_stack, + goal_mode_str, + cache_mode_str, + self.n_demos, + self.total_num_sequences, + ) return msg def __len__(self): """ - Ensure that the torch dataloader will do a complete pass through all sequences in + Ensure that the torch dataloader will do a complete pass through all sequences in the dataset before starting a new iteration. """ return self.total_num_sequences - def load_dataset_in_memory(self, demo_list, hdf5_file, obs_keys, dataset_keys, load_next_obs): + def load_dataset_in_memory( + self, demo_list, hdf5_file, obs_keys, dataset_keys, load_next_obs + ): """ Loads the hdf5 dataset into memory, preserving the structure of the file. Note that this differs from `self.getitem_cache`, which, if active, actually caches the outputs of the @@ -285,37 +318,57 @@ def load_dataset_in_memory(self, demo_list, hdf5_file, obs_keys, dataset_keys, l for ep in LogUtils.custom_tqdm(demo_list): all_data[ep] = {} all_data[ep]["attrs"] = {} - all_data[ep]["attrs"]["num_samples"] = hdf5_file["data/{}".format(ep)].attrs["num_samples"] + all_data[ep]["attrs"]["num_samples"] = hdf5_file[ + "data/{}".format(ep) + ].attrs["num_samples"] # get obs - all_data[ep]["obs"] = {k: hdf5_file["data/{}/obs/{}".format(ep, k)][()] for k in obs_keys} + all_data[ep]["obs"] = { + k: hdf5_file["data/{}/obs/{}".format(ep, k)][()] for k in obs_keys + } if load_next_obs: - all_data[ep]["next_obs"] = {k: hdf5_file["data/{}/next_obs/{}".format(ep, k)][()] for k in obs_keys} + all_data[ep]["next_obs"] = { + k: hdf5_file["data/{}/next_obs/{}".format(ep, k)][()] + for k in obs_keys + } # get other dataset keys for k in dataset_keys: if k in hdf5_file["data/{}".format(ep)]: - all_data[ep][k] = hdf5_file["data/{}/{}".format(ep, k)][()].astype('float32') + all_data[ep][k] = hdf5_file["data/{}/{}".format(ep, k)][()].astype( + "float32" + ) else: - all_data[ep][k] = np.zeros((all_data[ep]["attrs"]["num_samples"], 1), dtype=np.float32) + all_data[ep][k] = np.zeros( + (all_data[ep]["attrs"]["num_samples"], 1), dtype=np.float32 + ) if "model_file" in hdf5_file["data/{}".format(ep)].attrs: - all_data[ep]["attrs"]["model_file"] = hdf5_file["data/{}".format(ep)].attrs["model_file"] + all_data[ep]["attrs"]["model_file"] = hdf5_file[ + "data/{}".format(ep) + ].attrs["model_file"] return all_data def normalize_obs(self): """ - Computes a dataset-wide mean and standard deviation for the observations + Computes a dataset-wide mean and standard deviation for the observations (per dimension and per obs key) and returns it. """ + def _compute_traj_stats(traj_obs_dict): """ Helper function to compute statistics over a single trajectory of observations. """ - traj_stats = { k : {} for k in traj_obs_dict } + traj_stats = {k: {} for k in traj_obs_dict} for k in traj_obs_dict: traj_stats[k]["n"] = traj_obs_dict[k].shape[0] - traj_stats[k]["mean"] = traj_obs_dict[k].mean(axis=0, keepdims=True) # [1, ...] - traj_stats[k]["sqdiff"] = ((traj_obs_dict[k] - traj_stats[k]["mean"]) ** 2).sum(axis=0, keepdims=True) # [1, ...] + traj_stats[k]["mean"] = traj_obs_dict[k].mean( + axis=0, keepdims=True + ) # [1, ...] + traj_stats[k]["sqdiff"] = ( + (traj_obs_dict[k] - traj_stats[k]["mean"]) ** 2 + ).sum( + axis=0, keepdims=True + ) # [1, ...] return traj_stats def _aggregate_traj_stats(traj_stats_a, traj_stats_b): @@ -326,33 +379,51 @@ def _aggregate_traj_stats(traj_stats_a, traj_stats_b): """ merged_stats = {} for k in traj_stats_a: - n_a, avg_a, M2_a = traj_stats_a[k]["n"], traj_stats_a[k]["mean"], traj_stats_a[k]["sqdiff"] - n_b, avg_b, M2_b = traj_stats_b[k]["n"], traj_stats_b[k]["mean"], traj_stats_b[k]["sqdiff"] + n_a, avg_a, M2_a = ( + traj_stats_a[k]["n"], + traj_stats_a[k]["mean"], + traj_stats_a[k]["sqdiff"], + ) + n_b, avg_b, M2_b = ( + traj_stats_b[k]["n"], + traj_stats_b[k]["mean"], + traj_stats_b[k]["sqdiff"], + ) n = n_a + n_b mean = (n_a * avg_a + n_b * avg_b) / n - delta = (avg_b - avg_a) - M2 = M2_a + M2_b + (delta ** 2) * (n_a * n_b) / n + delta = avg_b - avg_a + M2 = M2_a + M2_b + (delta**2) * (n_a * n_b) / n merged_stats[k] = dict(n=n, mean=mean, sqdiff=M2) return merged_stats # Run through all trajectories. For each one, compute minimal observation statistics, and then aggregate # with the previous statistics. ep = self.demos[0] - obs_traj = {k: self.hdf5_file["data/{}/obs/{}".format(ep, k)][()].astype('float32') for k in self.obs_keys} + obs_traj = { + k: self.hdf5_file["data/{}/obs/{}".format(ep, k)][()].astype("float32") + for k in self.obs_keys + } obs_traj = ObsUtils.process_obs_dict(obs_traj) merged_stats = _compute_traj_stats(obs_traj) print("SequenceDataset: normalizing observations...") for ep in LogUtils.custom_tqdm(self.demos[1:]): - obs_traj = {k: self.hdf5_file["data/{}/obs/{}".format(ep, k)][()].astype('float32') for k in self.obs_keys} + obs_traj = { + k: self.hdf5_file["data/{}/obs/{}".format(ep, k)][()].astype("float32") + for k in self.obs_keys + } obs_traj = ObsUtils.process_obs_dict(obs_traj) traj_stats = _compute_traj_stats(obs_traj) merged_stats = _aggregate_traj_stats(merged_stats, traj_stats) - obs_normalization_stats = { k : {} for k in merged_stats } + obs_normalization_stats = {k: {} for k in merged_stats} for k in merged_stats: # note we add a small tolerance of 1e-3 for std - obs_normalization_stats[k]["mean"] = merged_stats[k]["mean"].astype(np.float32) - obs_normalization_stats[k]["std"] = (np.sqrt(merged_stats[k]["sqdiff"] / merged_stats[k]["n"]) + 1e-3).astype(np.float32) + obs_normalization_stats[k]["mean"] = merged_stats[k]["mean"].astype( + np.float32 + ) + obs_normalization_stats[k]["std"] = ( + np.sqrt(merged_stats[k]["sqdiff"] / merged_stats[k]["n"]) + 1e-3 + ).astype(np.float32) return obs_normalization_stats def get_obs_normalization_stats(self): @@ -376,20 +447,20 @@ def get_dataset_for_ep(self, ep, key): """ # check if this key should be in memory - key_should_be_in_memory = (self.hdf5_cache_mode in ["all", "low_dim"]) + key_should_be_in_memory = self.hdf5_cache_mode in ["all", "low_dim"] if key_should_be_in_memory: # if key is an observation, it may not be in memory - if '/' in key: - key1, key2 = key.split('/') - assert(key1 in ['obs', 'next_obs']) + if "/" in key: + key1, key2 = key.split("/") + assert key1 in ["obs", "next_obs"] if key2 not in self.obs_keys_in_memory: key_should_be_in_memory = False if key_should_be_in_memory: # read cache - if '/' in key: - key1, key2 = key.split('/') - assert(key1 in ['obs', 'next_obs']) + if "/" in key: + key1, key2 = key.split("/") + assert key1 in ["obs", "next_obs"] ret = self.hdf5_cache[ep][key1][key2] else: ret = self.hdf5_cache[ep][key] @@ -428,8 +499,9 @@ def get_item(self, index): demo_id, index_in_demo=index_in_demo, keys=self.dataset_keys, - num_frames_to_stack=self.n_frame_stack - 1, # note: need to decrement self.n_frame_stack by one - seq_length=self.seq_length + num_frames_to_stack=self.n_frame_stack + - 1, # note: need to decrement self.n_frame_stack by one + seq_length=self.seq_length, ) # determine goal index @@ -443,7 +515,7 @@ def get_item(self, index): keys=self.obs_keys, num_frames_to_stack=self.n_frame_stack - 1, seq_length=self.seq_length, - prefix="obs" + prefix="obs", ) if self.load_next_obs: @@ -453,7 +525,7 @@ def get_item(self, index): keys=self.obs_keys, num_frames_to_stack=self.n_frame_stack - 1, seq_length=self.seq_length, - prefix="next_obs" + prefix="next_obs", ) if goal_index is not None: @@ -465,7 +537,9 @@ def get_item(self, index): seq_length=1, prefix="next_obs", ) - meta["goal_obs"] = {k: goal[k][0] for k in goal} # remove sequence dimension for goal + meta["goal_obs"] = { + k: goal[k][0] for k in goal + } # remove sequence dimension for goal return meta @@ -478,20 +552,34 @@ def interpolate_keys(self, obs, keys, seq_length, seq_length_to_load): if k == "pad_mask": # interpolate it by simply copying each index (seq_length / seq_length_to_load) times obs[k] = np.repeat(v, seq_length // seq_length_to_load, axis=0) - elif k != 'pad_mask': - assert v.shape[0] == seq_length_to_load, "low_dim obs should have shape (seq_length, ...)" - assert len(v.shape) == 2, "low_dim obs should have shape (seq_length, ...)" + elif k != "pad_mask": + assert ( + v.shape[0] == seq_length_to_load + ), "low_dim obs should have shape (seq_length, ...)" + assert ( + len(v.shape) == 2 + ), "low_dim obs should have shape (seq_length, ...)" # plot v[:, 3] # plt.plot(v[:, 2]) # plt.savefig('v_3.png') # plt.close() - interp = scipy.interpolate.interp1d(np.linspace(0, 1, seq_length_to_load), v, axis=0) + interp = scipy.interpolate.interp1d( + np.linspace(0, 1, seq_length_to_load), v, axis=0 + ) obs[k] = interp(np.linspace(0, 1, seq_length)) # plt.plot(obs[k][:, 2]) # plt.savefig('v_3_after.png') # plt.close() - def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, dont_load_fut=None): + def get_sequence_from_demo( + self, + demo_id, + index_in_demo, + keys, + num_frames_to_stack=0, + seq_length=1, + dont_load_fut=None, + ): """ Extract a (sub)sequence of data items from a demo given the @keys of the items. @@ -519,8 +607,12 @@ def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_sta seq_end_index = min(demo_length, index_in_demo + seq_length) # determine sequence padding - seq_begin_pad = max(0, num_frames_to_stack - index_in_demo) # pad for frame stacking - seq_end_pad = max(0, index_in_demo + seq_length - demo_length) # pad for sequence length + seq_begin_pad = max( + 0, num_frames_to_stack - index_in_demo + ) # pad for frame stacking + seq_end_pad = max( + 0, index_in_demo + seq_length - demo_length + ) # pad for sequence length # make sure we are not padding if specified. if not self.pad_frame_stack: @@ -533,18 +625,38 @@ def get_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_sta for k in keys: t = time.time() data = self.get_dataset_for_ep(demo_id, k) - true_end_index = seq_begin_index + 1 if k.split("/")[-1] in dont_load_fut else seq_end_index - seq[k] = data[seq_begin_index: true_end_index] + true_end_index = ( + seq_begin_index + 1 + if k.split("/")[-1] in dont_load_fut + else seq_end_index + ) + seq[k] = data[seq_begin_index:true_end_index] for k in seq: if k.split("/")[-1] not in dont_load_fut: - seq[k] = TensorUtils.pad_sequence(seq[k], padding=(seq_begin_pad, seq_end_pad), pad_same=True) - pad_mask = np.array([0] * seq_begin_pad + [1] * (seq_end_index - seq_begin_index) + [0] * seq_end_pad) + seq[k] = TensorUtils.pad_sequence( + seq[k], padding=(seq_begin_pad, seq_end_pad), pad_same=True + ) + pad_mask = np.array( + [0] * seq_begin_pad + + [1] * (seq_end_index - seq_begin_index) + + [0] * seq_end_pad + ) pad_mask = pad_mask[:, None].astype(bool) return seq, pad_mask - def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False, seq_length_to_load=None): + def get_obs_sequence_from_demo( + self, + demo_id, + index_in_demo, + keys, + num_frames_to_stack=0, + seq_length=1, + prefix="obs", + dont_load_fut=False, + seq_length_to_load=None, + ): """ Extract a (sub)sequence of observation items from a demo given the @keys of the items. @@ -565,15 +677,15 @@ def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to obs, pad_mask = self.get_sequence_from_demo( demo_id, index_in_demo=index_in_demo, - keys=tuple('{}/{}'.format(prefix, k) for k in keys), + keys=tuple("{}/{}".format(prefix, k) for k in keys), num_frames_to_stack=num_frames_to_stack, seq_length=seq_length_to_load, - dont_load_fut=dont_load_fut + dont_load_fut=dont_load_fut, ) - obs = {k.split('/')[1]: obs[k] for k in obs} # strip the prefix + obs = {k.split("/")[1]: obs[k] for k in obs} # strip the prefix if self.get_pad_mask: obs["pad_mask"] = pad_mask - + # Interpolate obs to_interp = [k for k in obs if ObsUtils.key_is_obs_modality(k, "low_dim")] # t = time.time() @@ -582,10 +694,18 @@ def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to return obs - def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, seq_length_to_load=None): + def get_dataset_sequence_from_demo( + self, + demo_id, + index_in_demo, + keys, + num_frames_to_stack=0, + seq_length=1, + seq_length_to_load=None, + ): """ Extract a (sub)sequence of dataset items from a demo given the @keys of the items (e.g., states, actions). - + Args: demo_id (str): id of the demo, e.g., demo_0 index_in_demo (int): beginning index of the sequence wrt the demo @@ -608,7 +728,7 @@ def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frame ) if self.get_pad_mask: data["pad_mask"] = pad_mask - + # interpolate actions to_interp = [k for k in data] # t = time.time() @@ -628,14 +748,12 @@ def get_trajectory_at_index(self, index): demo_id, index_in_demo=0, keys=self.dataset_keys, - num_frames_to_stack=self.n_frame_stack - 1, # note: need to decrement self.n_frame_stack by one - seq_length=demo_length + num_frames_to_stack=self.n_frame_stack + - 1, # note: need to decrement self.n_frame_stack by one + seq_length=demo_length, ) meta["obs"] = self.get_obs_sequence_from_demo( - demo_id, - index_in_demo=0, - keys=self.obs_keys, - seq_length=demo_length + demo_id, index_in_demo=0, keys=self.obs_keys, seq_length=demo_length ) if self.load_next_obs: meta["next_obs"] = self.get_obs_sequence_from_demo( @@ -643,7 +761,7 @@ def get_trajectory_at_index(self, index): index_in_demo=0, keys=self.obs_keys, seq_length=demo_length, - prefix="next_obs" + prefix="next_obs", ) meta["ep"] = demo_id diff --git a/robomimic/utils/env_utils.py b/robomimic/utils/env_utils.py index b656ea64..f7b511e6 100644 --- a/robomimic/utils/env_utils.py +++ b/robomimic/utils/env_utils.py @@ -3,6 +3,7 @@ wrappers provided by the repository, and with environment metadata saved in dataset files. """ + from copy import deepcopy import robomimic.envs.env_base as EB from robomimic.utils.log_utils import log_warning @@ -34,12 +35,15 @@ def get_env_class(env_meta=None, env_type=None, env=None): env_type = get_env_type(env_meta=env_meta, env_type=env_type, env=env) if env_type == EB.EnvType.ROBOSUITE_TYPE: from robomimic.envs.env_robosuite import EnvRobosuite + return EnvRobosuite elif env_type == EB.EnvType.GYM_TYPE: from robomimic.envs.env_gym import EnvGym + return EnvGym elif env_type == EB.EnvType.IG_MOMART_TYPE: from robomimic.envs.env_ig_momart import EnvGibsonMOMART + return EnvGibsonMOMART raise Exception("code should never reach this point") @@ -93,7 +97,7 @@ def check_env_type(type_to_check, env_meta=None, env_type=None, env=None): env (instance of EB.EnvBase): environment instance """ env_type = get_env_type(env_meta=env_meta, env_type=env_type, env=env) - return (env_type == type_to_check) + return env_type == type_to_check def check_env_version(env, env_meta): @@ -115,13 +119,13 @@ def check_env_version(env, env_meta): if env_meta_version is None: log_warning( - "No environment version found in dataset!"\ - "\nCannot verify if dataset and installed environment versions match"\ + "No environment version found in dataset!" + "\nCannot verify if dataset and installed environment versions match" ) elif env_system_version != env_meta_version: log_warning( - "Dataset and installed environment version mismatch!"\ - "\nDataset environment version: {meta}"\ + "Dataset and installed environment version mismatch!" + "\nDataset environment version: {meta}" "\nInstalled environment version: {sys}".format( sys=env_system_version, meta=env_meta_version, @@ -135,16 +139,21 @@ def is_robosuite_env(env_meta=None, env_type=None, env=None): either env_meta, env_type, or env. """ return False - return check_env_type(type_to_check=EB.EnvType.ROBOSUITE_TYPE, env_meta=env_meta, env_type=env_type, env=env) + return check_env_type( + type_to_check=EB.EnvType.ROBOSUITE_TYPE, + env_meta=env_meta, + env_type=env_type, + env=env, + ) def create_env( env_type, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, **kwargs, ): """ @@ -173,9 +182,9 @@ def create_env( # note: pass @postprocess_visual_obs True, to make sure images are processed for network inputs env_class = get_env_class(env_type=env_type) env = env_class( - env_name=env_name, - render=render, - render_offscreen=render_offscreen, + env_name=env_name, + render=render, + render_offscreen=render_offscreen, use_image_obs=use_image_obs, use_depth_obs=use_depth_obs, postprocess_visual_obs=True, @@ -188,11 +197,11 @@ def create_env( def create_env_from_metadata( env_meta, - env_name=None, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, + env_name=None, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, ): """ Create environment. @@ -229,11 +238,11 @@ def create_env_from_metadata( env = create_env( env_type=env_type, - env_name=env_name, - render=render, - render_offscreen=render_offscreen, - use_image_obs=use_image_obs, - use_depth_obs=use_depth_obs, + env_name=env_name, + render=render, + render_offscreen=render_offscreen, + use_image_obs=use_image_obs, + use_depth_obs=use_depth_obs, **env_kwargs, ) check_env_version(env, env_meta) @@ -242,15 +251,15 @@ def create_env_from_metadata( def create_env_for_data_processing( env_meta, - camera_names, - camera_height, - camera_width, + camera_names, + camera_height, + camera_width, reward_shaping, env_class=None, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, ): """ Creates environment for processing dataset observations and rewards. @@ -299,14 +308,14 @@ def create_env_for_data_processing( env_kwargs.pop("use_depth_obs", None) env = env_class.create_for_data_processing( - env_name=env_name, - camera_names=camera_names, - camera_height=camera_height, - camera_width=camera_width, - reward_shaping=reward_shaping, - render=render, - render_offscreen=render_offscreen, - use_image_obs=use_image_obs, + env_name=env_name, + camera_names=camera_names, + camera_height=camera_height, + camera_width=camera_width, + reward_shaping=reward_shaping, + render=render, + render_offscreen=render_offscreen, + use_image_obs=use_image_obs, use_depth_obs=use_depth_obs, **env_kwargs, ) @@ -321,13 +330,20 @@ def set_env_specific_obs_processing(env_meta=None, env_type=None, env=None): processing normalizes and clips all values to [0, 1]. """ if is_robosuite_env(env_meta=env_meta, env_type=env_type, env=env): - from robomimic.utils.obs_utils import DepthModality, process_frame, unprocess_frame - DepthModality.set_obs_processor(processor=( - lambda obs: process_frame(frame=obs, channel_dim=1, scale=None) - )) - DepthModality.set_obs_unprocessor(unprocessor=( - lambda obs: unprocess_frame(frame=obs, channel_dim=1, scale=None) - )) + from robomimic.utils.obs_utils import ( + DepthModality, + process_frame, + unprocess_frame, + ) + + DepthModality.set_obs_processor( + processor=(lambda obs: process_frame(frame=obs, channel_dim=1, scale=None)) + ) + DepthModality.set_obs_unprocessor( + unprocessor=( + lambda obs: unprocess_frame(frame=obs, channel_dim=1, scale=None) + ) + ) def wrap_env_from_config(env, config): @@ -337,6 +353,7 @@ def wrap_env_from_config(env, config): """ if ("frame_stack" in config.train) and (config.train.frame_stack > 1): from robomimic.envs.wrappers import FrameStackWrapper + env = FrameStackWrapper(env, num_frames=config.train.frame_stack) return env diff --git a/robomimic/utils/file_utils.py b/robomimic/utils/file_utils.py index c3d74be8..8968b379 100644 --- a/robomimic/utils/file_utils.py +++ b/robomimic/utils/file_utils.py @@ -2,6 +2,7 @@ A collection of utility functions for working with files, such as reading metadata from demonstration datasets, loading model checkpoints, or downloading dataset files. """ + import os import h5py import json @@ -35,7 +36,7 @@ def create_hdf5_filter_key(hdf5_path, demo_keys, key_name): Args: hdf5_path (str): path to hdf5 file demo_keys ([str]): list of demonstration keys which should - correspond to this filter key. For example, ["demo_0", + correspond to this filter key. For example, ["demo_0", "demo_1"]. key_name (str): name of filter key to create @@ -43,7 +44,7 @@ def create_hdf5_filter_key(hdf5_path, demo_keys, key_name): ep_lengths ([int]): list of episode lengths that corresponds to each demonstration in the new filter key """ - f = h5py.File(hdf5_path, "a") + f = h5py.File(hdf5_path, "a") demos = sorted(list(f["data"].keys())) # collect episode lengths for the keys of interest @@ -57,7 +58,7 @@ def create_hdf5_filter_key(hdf5_path, demo_keys, key_name): k = "mask/{}".format(key_name) if k in f: del f[k] - f[k] = np.array(demo_keys, dtype='S') + f[k] = np.array(demo_keys, dtype="S") f.close() return ep_lengths @@ -73,11 +74,13 @@ def get_demos_for_filter_key(hdf5_path, filter_key): Returns: demo_keys ([str]): list of demonstration keys that - correspond to this filter key. For example, ["demo_0", + correspond to this filter key. For example, ["demo_0", "demo_1"]. """ f = h5py.File(hdf5_path, "r") - demo_keys = [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)][:])] + demo_keys = [ + elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)][:]) + ] f.close() return demo_keys @@ -91,7 +94,7 @@ def get_env_metadata_from_dataset(dataset_path, set_env_specific_obs_processors= set_env_specific_obs_processors (bool): environment might have custom rules for how to process observations - if this flag is true, make sure ObsUtils will use these custom settings. This - is a good place to do this operation to make sure it happens before loading data, running a + is a good place to do this operation to make sure it happens before loading data, running a trained model, etc. Returns: @@ -111,7 +114,9 @@ def get_env_metadata_from_dataset(dataset_path, set_env_specific_obs_processors= return env_meta -def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=False, ac_key="actions"): +def get_shape_metadata_from_dataset( + dataset_path, all_obs_keys=None, verbose=False, ac_key="actions" +): """ Retrieves shape metadata from dataset. @@ -141,7 +146,7 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal demo = f["data/{}".format(demo_id)] # action dimension - shape_meta['ac_dim'] = f[f"data/{demo_id}/{ac_key}"].shape[1] + shape_meta["ac_dim"] = f[f"data/{demo_id}/{ac_key}"].shape[1] # observation dimensions all_shapes = OrderedDict() @@ -162,10 +167,10 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal f.close() - shape_meta['all_shapes'] = all_shapes - shape_meta['all_obs_keys'] = all_obs_keys - shape_meta['use_images'] = ObsUtils.has_modality("rgb", all_obs_keys) - shape_meta['use_depths'] = ObsUtils.has_modality("depth", all_obs_keys) + shape_meta["all_shapes"] = all_shapes + shape_meta["all_obs_keys"] = all_obs_keys + shape_meta["use_images"] = ObsUtils.has_modality("rgb", all_obs_keys) + shape_meta["use_depths"] = ObsUtils.has_modality("depth", all_obs_keys) return shape_meta @@ -173,7 +178,7 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal def load_dict_from_checkpoint(ckpt_path): """ Load checkpoint dictionary from a checkpoint file. - + Args: ckpt_path (str): Path to checkpoint file. @@ -279,22 +284,41 @@ def find_obs_dicts_recursively(dic): } if "visual_feature_dimension" in old_encoder_cfg: - rgb_encoder_cfg["core_kwargs"]["feature_dimension"] = old_encoder_cfg["visual_feature_dimension"] + rgb_encoder_cfg["core_kwargs"]["feature_dimension"] = old_encoder_cfg[ + "visual_feature_dimension" + ] if "visual_core" in old_encoder_cfg: - rgb_encoder_cfg["core_kwargs"]["backbone_class"] = old_encoder_cfg["visual_core"] + rgb_encoder_cfg["core_kwargs"]["backbone_class"] = old_encoder_cfg[ + "visual_core" + ] for kwarg in ("pretrained", "input_coord_conv"): - if "visual_core_kwargs" in old_encoder_cfg and kwarg in old_encoder_cfg["visual_core_kwargs"]: - rgb_encoder_cfg["core_kwargs"]["backbone_kwargs"][kwarg] = old_encoder_cfg["visual_core_kwargs"][kwarg] + if ( + "visual_core_kwargs" in old_encoder_cfg + and kwarg in old_encoder_cfg["visual_core_kwargs"] + ): + rgb_encoder_cfg["core_kwargs"]["backbone_kwargs"][kwarg] = ( + old_encoder_cfg["visual_core_kwargs"][kwarg] + ) # Optionally add pooling info too if old_encoder_cfg.get("use_spatial_softmax", True): rgb_encoder_cfg["core_kwargs"]["pool_class"] = "SpatialSoftmax" - for kwarg in ("num_kp", "learnable_temperature", "temperature", "noise_std"): - if "spatial_softmax_kwargs" in old_encoder_cfg and kwarg in old_encoder_cfg["spatial_softmax_kwargs"]: - rgb_encoder_cfg["core_kwargs"]["pool_kwargs"][kwarg] = old_encoder_cfg["spatial_softmax_kwargs"][kwarg] + for kwarg in ( + "num_kp", + "learnable_temperature", + "temperature", + "noise_std", + ): + if ( + "spatial_softmax_kwargs" in old_encoder_cfg + and kwarg in old_encoder_cfg["spatial_softmax_kwargs"] + ): + rgb_encoder_cfg["core_kwargs"]["pool_kwargs"][kwarg] = ( + old_encoder_cfg["spatial_softmax_kwargs"][kwarg] + ) # Update obs randomizer as well for kwarg in ("obs_randomizer_class", "obs_randomizer_kwargs"): @@ -316,7 +340,9 @@ def find_obs_dicts_recursively(dic): } -def config_from_checkpoint(algo_name=None, ckpt_path=None, ckpt_dict=None, verbose=False): +def config_from_checkpoint( + algo_name=None, ckpt_path=None, ckpt_dict=None, verbose=False +): """ Helper function to restore config from a checkpoint file or loaded model dictionary. @@ -340,7 +366,7 @@ def config_from_checkpoint(algo_name=None, ckpt_path=None, ckpt_dict=None, verbo algo_name, _ = algo_name_from_checkpoint(ckpt_dict=ckpt_dict) # restore config from loaded model dictionary - config_dict = json.loads(ckpt_dict['config']) + config_dict = json.loads(ckpt_dict["config"]) update_config(cfg=config_dict) if verbose: @@ -381,7 +407,9 @@ def policy_from_checkpoint(device=None, ckpt_path=None, ckpt_dict=None, verbose= # algo name and config from model dict algo_name, _ = algo_name_from_checkpoint(ckpt_dict=ckpt_dict) - config, _ = config_from_checkpoint(algo_name=algo_name, ckpt_dict=ckpt_dict, verbose=verbose) + config, _ = config_from_checkpoint( + algo_name=algo_name, ckpt_dict=ckpt_dict, verbose=verbose + ) # read config to set up metadata for observation modalities (e.g. detecting rgb observations) ObsUtils.initialize_obs_utils_with_config(config) @@ -418,7 +446,14 @@ def policy_from_checkpoint(device=None, ckpt_path=None, ckpt_dict=None, verbose= return model, ckpt_dict -def env_from_checkpoint(ckpt_path=None, ckpt_dict=None, env_name=None, render=False, render_offscreen=False, verbose=False): +def env_from_checkpoint( + ckpt_path=None, + ckpt_dict=None, + env_name=None, + render=False, + render_offscreen=False, + verbose=False, +): """ Creates an environment using the metadata saved in a checkpoint. @@ -448,15 +483,19 @@ def env_from_checkpoint(ckpt_path=None, ckpt_dict=None, env_name=None, render=Fa # create env from saved metadata env = EnvUtils.create_env_from_metadata( - env_meta=env_meta, - env_name=env_name, - render=render, + env_meta=env_meta, + env_name=env_name, + render=render, render_offscreen=render_offscreen, use_image_obs=shape_meta.get("use_images", False), use_depth_obs=shape_meta.get("use_depths", False), ) - config, _ = config_from_checkpoint(algo_name=ckpt_dict["algo_name"], ckpt_dict=ckpt_dict, verbose=False) - env = EnvUtils.wrap_env_from_config(env, config=config) # apply environment wrapper, if applicable + config, _ = config_from_checkpoint( + algo_name=ckpt_dict["algo_name"], ckpt_dict=ckpt_dict, verbose=False + ) + env = EnvUtils.wrap_env_from_config( + env, config=config + ) # apply environment wrapper, if applicable if verbose: print("============= Loaded Environment =============") print(env) @@ -482,7 +521,7 @@ def url_is_alive(url): is_alive (bool): True if url is reachable, False otherwise """ request = urllib.request.Request(url) - request.get_method = lambda: 'HEAD' + request.get_method = lambda: "HEAD" try: urllib.request.urlopen(request) @@ -518,9 +557,13 @@ def download_url(url, download_dir, check_overwrite=True): # If we're checking overwrite and the path already exists, # we ask the user to verify that they want to overwrite the file if check_overwrite and os.path.exists(file_to_write): - user_response = input(f"Warning: file {file_to_write} already exists. Overwrite? y/n\n") - assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." + user_response = input( + f"Warning: file {file_to_write} already exists. Overwrite? y/n\n" + ) + assert user_response.lower() in { + "yes", + "y", + }, f"Did not receive confirmation. Aborting download." - with DownloadProgressBar(unit='B', unit_scale=True, - miniters=1, desc=fname) as t: + with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc=fname) as t: urllib.request.urlretrieve(url, filename=file_to_write, reporthook=t.update_to) diff --git a/robomimic/utils/hyperparam_utils.py b/robomimic/utils/hyperparam_utils.py index 267536d3..6c54a4ac 100644 --- a/robomimic/utils/hyperparam_utils.py +++ b/robomimic/utils/hyperparam_utils.py @@ -1,6 +1,7 @@ """ A collection of utility functions and classes for generating config jsons for hyperparameter sweeps. """ + import argparse import os import json @@ -16,7 +17,14 @@ class ConfigGenerator(object): Useful class to keep track of hyperparameters to sweep, and to generate the json configs for each experiment run. """ - def __init__(self, base_config_file, wandb_proj_name="debug", script_file=None, generated_config_dir=None): + + def __init__( + self, + base_config_file, + wandb_proj_name="debug", + script_file=None, + generated_config_dir=None, + ): """ Args: base_config_file (str): path to a base json config to use as a starting point @@ -32,7 +40,7 @@ def __init__(self, base_config_file, wandb_proj_name="debug", script_file=None, self.generated_config_dir = generated_config_dir assert script_file is None or isinstance(script_file, str) if script_file is None: - self.script_file = os.path.join('~', 'tmp/tmpp.sh') + self.script_file = os.path.join("~", "tmp/tmpp.sh") else: self.script_file = script_file self.script_file = os.path.expanduser(self.script_file) @@ -63,10 +71,10 @@ def add_param(self, key, name, group, values, value_names=None): if value_names is not None: assert len(values) == len(value_names) self.parameters[key] = argparse.Namespace( - key=key, - name=name, - group=group, - values=values, + key=key, + name=name, + group=group, + values=values, value_names=value_names, ) @@ -107,13 +115,15 @@ def _name_for_experiment(self, base_name, parameter_values, parameter_value_name val_str = parameter_value_names[k] else: val_str = parameter_values[k] - if isinstance(parameter_values[k], list) or isinstance(parameter_values[k], tuple): + if isinstance(parameter_values[k], list) or isinstance( + parameter_values[k], tuple + ): # convert list to string to avoid weird spaces and naming problems val_str = "_".join([str(x) for x in parameter_values[k]]) val_str = str(val_str) - name += '_{}'.format(self.parameters[k].name) + name += "_{}".format(self.parameters[k].name) if len(val_str) > 0: - name += '_{}'.format(val_str) + name += "_{}".format(val_str) return name def _get_parameter_ranges(self): @@ -123,17 +133,17 @@ def _get_parameter_ranges(self): Returns: parameter_ranges (dict): dictionary that maps the parameter to a list - of all values it should take for each generated config. The length + of all values it should take for each generated config. The length of the list will be the total number of configs that will be generated from this scan. parameter_names (dict): dictionary that maps the parameter to a list of all name strings that should contribute to each invididual - experiment's name. The length of the list will be the total + experiment's name. The length of the list will be the total number of configs that will be generated from this scan. """ - # mapping from group id to list of indices to grab from each parameter's list + # mapping from group id to list of indices to grab from each parameter's list # of values in the parameter group parameter_group_indices = OrderedDict() for k in self.parameters: @@ -143,21 +153,22 @@ def _get_parameter_ranges(self): if group_id not in parameter_group_indices: parameter_group_indices[group_id] = list(range(num_param_values)) else: - assert len(parameter_group_indices[group_id]) == num_param_values, \ - "error: inconsistent number of parameter values in group with id {}".format(group_id) + assert ( + len(parameter_group_indices[group_id]) == num_param_values + ), "error: inconsistent number of parameter values in group with id {}".format( + group_id + ) keys = list(parameter_group_indices.keys()) inds = list(parameter_group_indices.values()) - new_parameter_group_indices = OrderedDict( - { k : [] for k in keys } - ) + new_parameter_group_indices = OrderedDict({k: [] for k in keys}) # get all combinations of the different parameter group indices # and then use these indices to determine the new parameter ranges # per member of each parameter group. # # e.g. with two parameter groups, one with two values, and another with three values # we have [0, 1] x [0, 1, 2] = [0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2] - # so the corresponding parameter group indices are [0, 0, 0, 1, 1, 1] and + # so the corresponding parameter group indices are [0, 0, 0, 1, 1, 1] and # [0, 1, 2, 0, 1, 2], and all parameters in each parameter group are indexed # together using these indices, to get each parameter range. for comb in itertools.product(*inds): @@ -186,7 +197,9 @@ def _get_parameter_ranges(self): first_key = list(parameter_ranges.keys())[0] num_settings = len(parameter_ranges[first_key]) for k in parameter_ranges: - assert len(parameter_ranges[k]) == num_settings, "inconsistent number of values" + assert ( + len(parameter_ranges[k]) == num_settings + ), "inconsistent number of values" return parameter_ranges, parameter_names @@ -211,7 +224,7 @@ def _generate_jsons(self): base_config = load_json(self.base_config_file, verbose=False) # base exp name from this base config - base_exp_name = base_config['experiment']['name'] + base_exp_name = base_config["experiment"]["name"] # use base json to determine the parameter ranges parameter_ranges, parameter_names = self._get_parameter_ranges() @@ -225,7 +238,7 @@ def _generate_jsons(self): for i in range(num_settings): # the specific parameter setting for this experiment - setting = { k : parameter_ranges[k][i] for k in parameter_ranges } + setting = {k: parameter_ranges[k][i] for k in parameter_ranges} maybe_parameter_names = OrderedDict() for k in parameter_names: maybe_parameter_names[k] = None @@ -234,14 +247,14 @@ def _generate_jsons(self): # experiment name from setting exp_name = self._name_for_experiment( - base_name=base_exp_name, - parameter_values=setting, + base_name=base_exp_name, + parameter_values=setting, parameter_value_names=maybe_parameter_names, ) # copy old json, but override name, and parameter values json_dict = deepcopy(base_config) - json_dict['experiment']['name'] = exp_name + json_dict["experiment"]["name"] = exp_name for k in parameter_ranges: set_value_for_key(json_dict, k, v=parameter_ranges[k][i]) @@ -263,7 +276,7 @@ def _generate_jsons(self): value_name = maybe_parameter_names[k] else: value_name = setting[k] - + json_dict["meta"]["hp_keys"].append(key_name) json_dict["meta"]["hp_values"].append(value_name) @@ -281,12 +294,12 @@ def _script_from_jsons(self, json_paths): Generates a bash script to run the experiments that correspond to the input jsons. """ - with open(self.script_file, 'w') as f: + with open(self.script_file, "w") as f: f.write("#!/bin/bash\n\n") for path in json_paths: # write python command to file cmd = "python train.py --config {}\n".format(path) - + print() print(cmd) f.write(cmd) @@ -303,12 +316,12 @@ def load_json(json_file, verbose=True): Returns: config (dict): json dictionary """ - with open(json_file, 'r') as f: + with open(json_file, "r") as f: config = json.load(f) if verbose: - print('loading external config: =================') + print("loading external config: =================") print(json.dumps(config, indent=4)) - print('==========================================') + print("==========================================") return config @@ -320,7 +333,7 @@ def save_json(config, json_file): config (dict): dictionary to save json_file (str): path to json file to write """ - with open(json_file, 'w') as f: + with open(json_file, "w") as f: # preserve original key ordering json.dump(config, f, sort_keys=False, indent=4) @@ -340,7 +353,7 @@ def get_value_for_key(dic, k): val: the nested dictionary value for the provided key """ val = dic - subkeys = re.split('/|\.', k) + subkeys = re.split("/|\.", k) for s in subkeys[:-1]: val = val[s] return val[subkeys[-1]] @@ -358,7 +371,7 @@ def set_value_for_key(dic, k, v): v: the value to set at the provided key """ val = dic - subkeys = re.split('/|\.', k) #k.split('/') + subkeys = re.split("/|\.", k) # k.split('/') for s in subkeys[:-1]: val = val[s] val[subkeys[-1]] = v diff --git a/robomimic/utils/log_utils.py b/robomimic/utils/log_utils.py index ca1a3c6d..f3521978 100644 --- a/robomimic/utils/log_utils.py +++ b/robomimic/utils/log_utils.py @@ -2,6 +2,7 @@ This file contains utility classes and functions for logging to stdout, stderr, and to tensorboard. """ + import os import sys import numpy as np @@ -22,9 +23,10 @@ class PrintLogger(object): """ This class redirects print statements to both console and a file. """ + def __init__(self, log_file): self.terminal = sys.stdout - print('STDOUT will be forked to %s' % log_file) + print("STDOUT will be forked to %s" % log_file) self.log_file = open(log_file, "a") def write(self, message): @@ -43,6 +45,7 @@ class DataLogger(object): """ Logging class to log metrics to tensorboard and/or retrieve running statistics about logged data. """ + def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): """ Args: @@ -51,25 +54,29 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): """ self._tb_logger = None self._wandb_logger = None - self._data = dict() # store all the scalar data logged so far + self._data = dict() # store all the scalar data logged so far if log_tb: from tensorboardX import SummaryWriter - self._tb_logger = SummaryWriter(os.path.join(log_dir, 'tb')) - + + self._tb_logger = SummaryWriter(os.path.join(log_dir, "tb")) if log_wandb: import wandb import robomimic.macros as Macros - + # set up wandb api key if specified in macros if Macros.WANDB_API_KEY is not None: os.environ["WANDB_API_KEY"] = Macros.WANDB_API_KEY - assert Macros.WANDB_ENTITY is not None, "WANDB_ENTITY macro is set to None." \ - "\nSet this macro in {base_path}/macros_private.py" \ - "\nIf this file does not exist, first run python {base_path}/scripts/setup_macros.py".format(base_path=robomimic.__path__[0]) - + assert Macros.WANDB_ENTITY is not None, ( + "WANDB_ENTITY macro is set to None." + "\nSet this macro in {base_path}/macros_private.py" + "\nIf this file does not exist, first run python {base_path}/scripts/setup_macros.py".format( + base_path=robomimic.__path__[0] + ) + ) + # attempt to set up wandb 10 times. If unsuccessful after these trials, don't use wandb num_attempts = 10 for attempt in range(num_attempts): @@ -86,8 +93,12 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): ) # set up info for identifying experiment - wandb_config = {k: v for (k, v) in config.meta.items() if k not in ["hp_keys", "hp_values"]} - for (k, v) in zip(config.meta["hp_keys"], config.meta["hp_values"]): + wandb_config = { + k: v + for (k, v) in config.meta.items() + if k not in ["hp_keys", "hp_values"] + } + for k, v in zip(config.meta["hp_keys"], config.meta["hp_values"]): wandb_config[k] = v if "algo" not in wandb_config: wandb_config["algo"] = config.algo_name @@ -95,11 +106,15 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): break except Exception as e: - log_warning("wandb initialization error (attempt #{}): {}".format(attempt + 1, e)) + log_warning( + "wandb initialization error (attempt #{}): {}".format( + attempt + 1, e + ) + ) self._wandb_logger = None time.sleep(30) - def record(self, k, v, epoch, data_type='scalar', log_stats=False): + def record(self, k, v, epoch, data_type="scalar", log_stats=False): """ Record data with logger. Args: @@ -110,36 +125,42 @@ def record(self, k, v, epoch, data_type='scalar', log_stats=False): log_stats (bool): whether to store the mean/max/min/std for all data logged so far with key k """ - assert data_type in ['scalar', 'image'] + assert data_type in ["scalar", "image"] - if data_type == 'scalar': + if data_type == "scalar": # maybe update internal cache if logging stats for this key - if log_stats or k in self._data: # any key that we're logging or previously logged + if ( + log_stats or k in self._data + ): # any key that we're logging or previously logged if k not in self._data: self._data[k] = [] self._data[k].append(v) # maybe log to tensorboard if self._tb_logger is not None: - if data_type == 'scalar': + if data_type == "scalar": self._tb_logger.add_scalar(k, v, epoch) if log_stats: stats = self.get_stats(k) - for (stat_k, stat_v) in stats.items(): - stat_k_name = '{}-{}'.format(k, stat_k) + for stat_k, stat_v in stats.items(): + stat_k_name = "{}-{}".format(k, stat_k) self._tb_logger.add_scalar(stat_k_name, stat_v, epoch) - elif data_type == 'image': - self._tb_logger.add_images(k, img_tensor=v, global_step=epoch, dataformats="NHWC") + elif data_type == "image": + self._tb_logger.add_images( + k, img_tensor=v, global_step=epoch, dataformats="NHWC" + ) if self._wandb_logger is not None: try: - if data_type == 'scalar': + if data_type == "scalar": self._wandb_logger.log({k: v}, step=epoch) if log_stats: stats = self.get_stats(k) - for (stat_k, stat_v) in stats.items(): - self._wandb_logger.log({"{}/{}".format(k, stat_k): stat_v}, step=epoch) - elif data_type == 'image': + for stat_k, stat_v in stats.items(): + self._wandb_logger.log( + {"{}/{}".format(k, stat_k): stat_v}, step=epoch + ) + elif data_type == "image": raise NotImplementedError except Exception as e: log_warning("wandb logging: {}".format(e)) @@ -153,10 +174,10 @@ def get_stats(self, k): stats (dict): dictionary of statistics """ stats = dict() - stats['mean'] = np.mean(self._data[k]) - stats['std'] = np.std(self._data[k]) - stats['min'] = np.min(self._data[k]) - stats['max'] = np.max(self._data[k]) + stats["mean"] = np.mean(self._data[k]) + stats["std"] = np.std(self._data[k]) + stats["min"] = np.min(self._data[k]) + stats["max"] = np.max(self._data[k]) return stats def close(self): @@ -176,6 +197,7 @@ class custom_tqdm(tqdm): By default tqdm writes to stderr. Instead, we change it to write to stdout. """ + def __init__(self, *args, **kwargs): assert "file" not in kwargs super(custom_tqdm, self).__init__(*args, file=sys.stdout, **kwargs) @@ -211,7 +233,9 @@ def log_warning(message, color="yellow", print_now=True): addition to adding it to the global warning buffer """ global WARNINGS_BUFFER - buffer_message = colored("ROBOMIMIC WARNING(\n{}\n)".format(textwrap.indent(message, " ")), color) + buffer_message = colored( + "ROBOMIMIC WARNING(\n{}\n)".format(textwrap.indent(message, " ")), color + ) WARNINGS_BUFFER.append(buffer_message) if print_now: print(buffer_message) diff --git a/robomimic/utils/loss_utils.py b/robomimic/utils/loss_utils.py index b3f5bf22..511d4909 100644 --- a/robomimic/utils/loss_utils.py +++ b/robomimic/utils/loss_utils.py @@ -25,7 +25,7 @@ def cosine_loss(preds, labels): def KLD_0_1_loss(mu, logvar): """ - KL divergence loss. Computes D_KL( N(mu, sigma) || N(0, 1) ). Note that + KL divergence loss. Computes D_KL( N(mu, sigma) || N(0, 1) ). Note that this function averages across the batch dimension, but sums across dimension. Args: @@ -36,12 +36,12 @@ def KLD_0_1_loss(mu, logvar): loss (torch.Tensor): KL divergence loss between the input gaussian distribution and N(0, 1) """ - return -0.5 * (1. + logvar - mu.pow(2) - logvar.exp()).sum(dim=1).mean() + return -0.5 * (1.0 + logvar - mu.pow(2) - logvar.exp()).sum(dim=1).mean() def KLD_gaussian_loss(mu_1, logvar_1, mu_2, logvar_2): """ - KL divergence loss between two Gaussian distributions. This function + KL divergence loss between two Gaussian distributions. This function computes the average loss across the batch. Args: @@ -53,11 +53,18 @@ def KLD_gaussian_loss(mu_1, logvar_1, mu_2, logvar_2): Returns: loss (torch.Tensor): KL divergence loss between the two gaussian distributions """ - return -0.5 * (1. + \ - logvar_1 - logvar_2 \ - - ((mu_2 - mu_1).pow(2) / logvar_2.exp()) \ - - (logvar_1.exp() / logvar_2.exp()) \ - ).sum(dim=1).mean() + return ( + -0.5 + * ( + 1.0 + + logvar_1 + - logvar_2 + - ((mu_2 - mu_1).pow(2) / logvar_2.exp()) + - (logvar_1.exp() / logvar_2.exp()) + ) + .sum(dim=1) + .mean() + ) def log_normal(x, m, v): @@ -82,18 +89,18 @@ def log_normal(x, m, v): def log_normal_mixture(x, m, v, w=None, log_w=None): """ - Log probability of tensor x under a uniform mixture of Gaussians. + Log probability of tensor x under a uniform mixture of Gaussians. Adapted from CS 236 at Stanford. Args: x (torch.Tensor): tensor with shape (B, D) - m (torch.Tensor): means tensor with shape (B, M, D) or (1, M, D), where + m (torch.Tensor): means tensor with shape (B, M, D) or (1, M, D), where M is number of mixture components - v (torch.Tensor): variances tensor with shape (B, M, D) or (1, M, D) where + v (torch.Tensor): variances tensor with shape (B, M, D) or (1, M, D) where M is number of mixture components - w (torch.Tensor): weights tensor - if provided, should be + w (torch.Tensor): weights tensor - if provided, should be shape (B, M) or (1, M) - log_w (torch.Tensor): log-weights tensor - if provided, should be + log_w (torch.Tensor): log-weights tensor - if provided, should be shape (B, M) or (1, M) Returns: @@ -112,10 +119,10 @@ def log_normal_mixture(x, m, v, w=None, log_w=None): log_prob += log_w # then compute log sum_i exp [log(w_i * N(x | m_i, v_i))] # (B, M) -> (B,) - log_prob = log_sum_exp(log_prob , dim=1) + log_prob = log_sum_exp(log_prob, dim=1) else: # (B, M) -> (B,) - log_prob = log_mean_exp(log_prob , dim=1) # mean accounts for uniform weights + log_prob = log_mean_exp(log_prob, dim=1) # mean accounts for uniform weights return log_prob @@ -125,7 +132,7 @@ def log_mean_exp(x, dim): Adapted from CS 236 at Stanford. Args: - x (torch.Tensor): a tensor + x (torch.Tensor): a tensor dim (int): dimension along which mean is computed Returns: @@ -140,7 +147,7 @@ def log_sum_exp(x, dim=0): Adapted from CS 236 at Stanford. Args: - x (torch.Tensor): a tensor + x (torch.Tensor): a tensor dim (int): dimension along which sum is computed Returns: @@ -157,16 +164,16 @@ def project_values_onto_atoms(values, probabilities, atoms): grid of values given by @values onto a grid of values given by @atoms. This is useful when computing a bellman backup where the backed up values from the original grid will not be in the original support, - requiring L2 projection. + requiring L2 projection. Each value in @values has a corresponding probability in @probabilities - this probability mass is shifted to the closest neighboring grid points in @atoms in proportion. For example, if the value in question is 0.2, and the - neighboring atoms are 0 and 1, then 0.8 of the probability weight goes to + neighboring atoms are 0 and 1, then 0.8 of the probability weight goes to atom 0 and 0.2 of the probability weight will go to 1. Adapted from https://github.com/deepmind/acme/blob/master/acme/tf/losses/distributional.py#L42 - + Args: values: value grid to project, of shape (batch_size, n_atoms) probabilities: probabilities for categorical distribution on @values, shape (batch_size, n_atoms) @@ -187,22 +194,28 @@ def project_values_onto_atoms(values, probabilities, atoms): d_neg = torch.cat([vmax[None], atoms], dim=0)[:-1] # ensure that @values grid is within the support of @atoms - clipped_values = values.clamp(min=vmin, max=vmax)[:, None, :] # (batch_size, 1, n_atoms) - clipped_atoms = atoms[None, :, None] # (1, n_atoms, 1) + clipped_values = values.clamp(min=vmin, max=vmax)[ + :, None, : + ] # (batch_size, 1, n_atoms) + clipped_atoms = atoms[None, :, None] # (1, n_atoms, 1) # distance between atom values in support - d_pos = (d_pos - atoms)[None, :, None] # atoms[i + 1] - atoms[i], shape (1, n_atoms, 1) - d_neg = (atoms - d_neg)[None, :, None] # atoms[i] - atoms[i - 1], shape (1, n_atoms, 1) + d_pos = (d_pos - atoms)[ + None, :, None + ] # atoms[i + 1] - atoms[i], shape (1, n_atoms, 1) + d_neg = (atoms - d_neg)[ + None, :, None + ] # atoms[i] - atoms[i - 1], shape (1, n_atoms, 1) # distances between all pairs of grid values - deltas = clipped_values - clipped_atoms # (batch_size, n_atoms, n_atoms) + deltas = clipped_values - clipped_atoms # (batch_size, n_atoms, n_atoms) # computes eqn (7) in distributional RL paper by doing the following - for each # output atom in @atoms, consider values that are close enough, and weight their - # probability mass contribution by the normalized distance in [0, 1] given + # probability mass contribution by the normalized distance in [0, 1] given # by (1. - (z_j - z_i) / (delta_z)). - d_sign = (deltas >= 0.).float() - delta_hat = (d_sign * deltas / d_pos) - ((1. - d_sign) * deltas / d_neg) - delta_hat = (1. - delta_hat).clamp(min=0., max=1.) + d_sign = (deltas >= 0.0).float() + delta_hat = (d_sign * deltas / d_pos) - ((1.0 - d_sign) * deltas / d_neg) + delta_hat = (1.0 - delta_hat).clamp(min=0.0, max=1.0) probabilities = probabilities[:, None, :] return (delta_hat * probabilities).sum(dim=2) diff --git a/robomimic/utils/obs_utils.py b/robomimic/utils/obs_utils.py index 66fb1272..6d05ac0c 100644 --- a/robomimic/utils/obs_utils.py +++ b/robomimic/utils/obs_utils.py @@ -2,6 +2,7 @@ A collection of utilities for working with observation dictionaries and different kinds of modalities such as images. """ + import numpy as np from copy import deepcopy from collections import OrderedDict @@ -12,7 +13,7 @@ import robomimic.utils.tensor_utils as TU # MACRO FOR VALID IMAGE CHANNEL SIZES -VALID_IMAGE_CHANNEL_DIMS = {1, 3} # depth, rgb +VALID_IMAGE_CHANNEL_DIMS = {1, 3} # depth, rgb # DO NOT MODIFY THIS! # This keeps track of observation types (modalities) - and is populated on call to @initialize_obs_utils_with_obs_specs. @@ -41,22 +42,28 @@ # in their config, without having to manually register their class internally. # This also future-proofs us for any additional encoder / randomizer classes we would # like to add ourselves. -OBS_ENCODER_CORES = {"None": None} # Include default None -OBS_RANDOMIZERS = {"None": None} # Include default None +OBS_ENCODER_CORES = {"None": None} # Include default None +OBS_RANDOMIZERS = {"None": None} # Include default None def register_obs_key(target_class): - assert target_class not in OBS_MODALITY_CLASSES, f"Already registered modality {target_class}!" + assert ( + target_class not in OBS_MODALITY_CLASSES + ), f"Already registered modality {target_class}!" OBS_MODALITY_CLASSES[target_class.name] = target_class def register_encoder_core(target_class): - assert target_class not in OBS_ENCODER_CORES, f"Already registered obs encoder core {target_class}!" + assert ( + target_class not in OBS_ENCODER_CORES + ), f"Already registered obs encoder core {target_class}!" OBS_ENCODER_CORES[target_class.__name__] = target_class def register_randomizer(target_class): - assert target_class not in OBS_RANDOMIZERS, f"Already registered obs randomizer {target_class}!" + assert ( + target_class not in OBS_RANDOMIZERS + ), f"Already registered obs randomizer {target_class}!" OBS_RANDOMIZERS[target_class.__name__] = target_class @@ -68,11 +75,14 @@ class ObservationKeyToModalityDict(dict): config. Thus, this dictionary will automatically handle those keys by implicitly associating them with the low_dim modality. """ + def __getitem__(self, item): # If a key doesn't already exist, warn the user and add default mapping if item not in self.keys(): - print(f"ObservationKeyToModalityDict: {item} not found," - f" adding {item} to mapping with assumed low_dim modality!") + print( + f"ObservationKeyToModalityDict: {item} not found," + f" adding {item} to mapping with assumed low_dim modality!" + ) self.__setitem__(item, "low_dim") return super(ObservationKeyToModalityDict, self).__getitem__(item) @@ -94,19 +104,29 @@ def obs_encoder_kwargs_from_config(obs_encoder_config): obs_encoder_config.unlock() for obs_modality, encoder_kwargs in obs_encoder_config.items(): # First run some sanity checks and store the classes - for cls_name, cores in zip(("core", "obs_randomizer"), (OBS_ENCODER_CORES, OBS_RANDOMIZERS)): + for cls_name, cores in zip( + ("core", "obs_randomizer"), (OBS_ENCODER_CORES, OBS_RANDOMIZERS) + ): # Make sure the requested encoder for each obs_modality exists cfg_cls = encoder_kwargs[f"{cls_name}_class"] if cfg_cls is not None: - assert cfg_cls in cores, f"No {cls_name} class with name {cfg_cls} found, must register this class before" \ + assert cfg_cls in cores, ( + f"No {cls_name} class with name {cfg_cls} found, must register this class before" f"creating model!" + ) # encoder_kwargs[f"{cls_name}_class"] = cores[cfg_cls] # Process core and randomizer kwargs - encoder_kwargs.core_kwargs = dict() if encoder_kwargs.core_kwargs is None else \ - deepcopy(encoder_kwargs.core_kwargs) - encoder_kwargs.obs_randomizer_kwargs = dict() if encoder_kwargs.obs_randomizer_kwargs is None else \ - deepcopy(encoder_kwargs.obs_randomizer_kwargs) + encoder_kwargs.core_kwargs = ( + dict() + if encoder_kwargs.core_kwargs is None + else deepcopy(encoder_kwargs.core_kwargs) + ) + encoder_kwargs.obs_randomizer_kwargs = ( + dict() + if encoder_kwargs.obs_randomizer_kwargs is None + else deepcopy(encoder_kwargs.obs_randomizer_kwargs) + ) # Re-lock keys obs_encoder_config.lock() @@ -200,12 +220,16 @@ def initialize_obs_utils_with_obs_specs(obs_modality_specs): OBS_KEYS_TO_MODALITIES[obs_key] = obs_modality # otherwise, run sanity check to make sure we don't have conflicting, duplicate entries else: - assert OBS_KEYS_TO_MODALITIES[obs_key] == obs_modality, \ - f"Cannot register obs key {obs_key} with modality {obs_modality}; " \ + assert OBS_KEYS_TO_MODALITIES[obs_key] == obs_modality, ( + f"Cannot register obs key {obs_key} with modality {obs_modality}; " f"already exists with corresponding modality {OBS_KEYS_TO_MODALITIES[obs_key]}" + ) # remove duplicate entries and store in global mapping - OBS_MODALITIES_TO_KEYS = { obs_modality : list(set(obs_modality_mapping[obs_modality])) for obs_modality in obs_modality_mapping } + OBS_MODALITIES_TO_KEYS = { + obs_modality: list(set(obs_modality_mapping[obs_modality])) + for obs_modality in obs_modality_mapping + } print("\n============= Initialized Observation Utils with Obs Spec =============\n") for obs_modality, obs_keys in OBS_MODALITIES_TO_KEYS.items(): @@ -235,14 +259,14 @@ def initialize_obs_utils_with_config(config): """ if config.algo_name == "hbc": obs_modality_specs = [ - config.observation.planner.modalities, + config.observation.planner.modalities, config.observation.actor.modalities, ] obs_encoder_config = config.observation.actor.encoder elif config.algo_name == "iris": obs_modality_specs = [ - config.observation.value_planner.planner.modalities, - config.observation.value_planner.value.modalities, + config.observation.value_planner.planner.modalities, + config.observation.value_planner.value.modalities, config.observation.actor.modalities, ] obs_encoder_config = config.observation.actor.encoder @@ -261,7 +285,9 @@ def key_is_obs_modality(key, obs_modality): key (str): obs key name to check obs_modality (str): observation modality - e.g.: "low_dim", "rgb" """ - assert OBS_KEYS_TO_MODALITIES is not None, "error: must call ObsUtils.initialize_obs_utils_with_obs_config first" + assert ( + OBS_KEYS_TO_MODALITIES is not None + ), "error: must call ObsUtils.initialize_obs_utils_with_obs_config first" return OBS_KEYS_TO_MODALITIES[key] == obs_modality @@ -277,11 +303,11 @@ def center_crop(im, t_h, t_w): Returns: im (np.array or torch.Tensor): center cropped image """ - assert(im.shape[-3] >= t_h and im.shape[-2] >= t_w) - assert(im.shape[-1] in [1, 3]) + assert im.shape[-3] >= t_h and im.shape[-2] >= t_w + assert im.shape[-1] in [1, 3] crop_h = int((im.shape[-3] - t_h) / 2) crop_w = int((im.shape[-2] - t_w) / 2) - return im[..., crop_h:crop_h + t_h, crop_w:crop_w + t_w, :] + return im[..., crop_h : crop_h + t_h, crop_w : crop_w + t_w, :] def batch_image_hwc_to_chw(im): @@ -342,7 +368,9 @@ def process_obs(obs, obs_modality=None, obs_key=None): Returns: processed_obs (np.array or torch.Tensor): processed observation """ - assert obs_modality is not None or obs_key is not None, "Either obs_modality or obs_key must be specified!" + assert ( + obs_modality is not None or obs_key is not None + ), "Either obs_modality or obs_key must be specified!" if obs_key is not None: obs_modality = OBS_KEYS_TO_MODALITIES[obs_key] return OBS_MODALITY_CLASSES[obs_modality].process_obs(obs) @@ -359,7 +387,9 @@ def process_obs_dict(obs_dict): Returns: new_dict (dict): dictionary where observation keys have been processed by their corresponding processors """ - return { k : process_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() } # shallow copy + return { + k: process_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() + } # shallow copy def process_frame(frame, channel_dim, scale): @@ -377,7 +407,7 @@ def process_frame(frame, channel_dim, scale): processed_frame (np.array or torch.Tensor): processed frame """ # Channel size should either be 3 (RGB) or 1 (depth) - assert (frame.shape[-1] == channel_dim) + assert frame.shape[-1] == channel_dim frame = TU.to_float(frame) if scale is not None: frame = frame / scale @@ -404,7 +434,9 @@ def unprocess_obs(obs, obs_modality=None, obs_key=None): Returns: unprocessed_obs (np.array or torch.Tensor): unprocessed observation """ - assert obs_modality is not None or obs_key is not None, "Either obs_modality or obs_key must be specified!" + assert ( + obs_modality is not None or obs_key is not None + ), "Either obs_modality or obs_key must be specified!" if obs_key is not None: obs_modality = OBS_KEYS_TO_MODALITIES[obs_key] return OBS_MODALITY_CLASSES[obs_modality].unprocess_obs(obs) @@ -423,7 +455,9 @@ def unprocess_obs_dict(obs_dict): new_dict (dict): dictionary where observation keys have been unprocessed by their respective unprocessor methods """ - return { k : unprocess_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() } # shallow copy + return { + k: unprocess_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() + } # shallow copy def unprocess_frame(frame, channel_dim, scale): @@ -440,7 +474,7 @@ def unprocess_frame(frame, channel_dim, scale): unprocessed_frame (np.array or torch.Tensor): frame passed through inverse operation of @process_frame """ - assert frame.shape[-3] == channel_dim # check for channel dimension + assert frame.shape[-3] == channel_dim # check for channel dimension frame = batch_image_chw_to_hwc(frame) if scale is not None: frame = scale * frame @@ -464,7 +498,7 @@ def get_processed_shape(obs_modality, input_shape): def normalize_obs(obs_dict, obs_normalization_stats): """ - Normalize observations using the provided "mean" and "std" entries + Normalize observations using the provided "mean" and "std" entries for each observation key. The observation dictionary will be modified in-place. @@ -492,10 +526,12 @@ def normalize_obs(obs_dict, obs_normalization_stats): m_num_dims = len(mean.shape) shape_len_diff = len(obs_dict[m].shape) - m_num_dims assert shape_len_diff >= 0, "shape length mismatch in @normalize_obs" - assert obs_dict[m].shape[-m_num_dims:] == mean.shape, "shape mismatch in @normalize_obs" + assert ( + obs_dict[m].shape[-m_num_dims:] == mean.shape + ), "shape mismatch in @normalize_obs" # Obs can have one or more leading batch dims - prepare for broadcasting. - # + # # As an example, if the obs has shape [B, T, D] and our mean / std stats are shape [D] # then we should pad the stats to shape [1, 1, D]. reshape_padding = tuple([1] * shape_len_diff) @@ -526,7 +562,7 @@ def repeat_and_stack_observation(obs_dict, n): Given an observation dictionary and a desired repeat value @n, this function will return a new observation dictionary where each modality is repeated @n times and the copies are - stacked in the first dimension. + stacked in the first dimension. For example, if a batch of 3 observations comes in, and n is 2, the output will look like [ob1; ob1; ob2; ob2; ob3; ob3] in @@ -546,7 +582,7 @@ def repeat_and_stack_observation(obs_dict, n): def crop_image_from_indices(images, crop_indices, crop_height, crop_width): """ - Crops images at the locations specified by @crop_indices. Crops will be + Crops images at the locations specified by @crop_indices. Crops will be taken across all channels. Args: @@ -574,7 +610,9 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): assert crop_indices.shape[-1] == 2 ndim_im_shape = len(images.shape) ndim_indices_shape = len(crop_indices.shape) - assert (ndim_im_shape == ndim_indices_shape + 1) or (ndim_im_shape == ndim_indices_shape + 2) + assert (ndim_im_shape == ndim_indices_shape + 1) or ( + ndim_im_shape == ndim_indices_shape + 2 + ) # maybe pad so that @crop_indices is shape [..., N, 2] is_padded = False @@ -604,20 +642,30 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): crop_ind_grid_w = torch.arange(crop_width).to(device) crop_ind_grid_w = TU.unsqueeze_expand_at(crop_ind_grid_w, size=crop_height, dim=0) # combine into shape [CH, CW, 2] - crop_in_grid = torch.cat((crop_ind_grid_h.unsqueeze(-1), crop_ind_grid_w.unsqueeze(-1)), dim=-1) + crop_in_grid = torch.cat( + (crop_ind_grid_h.unsqueeze(-1), crop_ind_grid_w.unsqueeze(-1)), dim=-1 + ) # Add above grid with the offset index of each sampled crop to get 2d indices for each crop. # After broadcasting, this will be shape [..., N, CH, CW, 2] and each crop has a [CH, CW, 2] # shape array that tells us which pixels from the corresponding source image to grab. grid_reshape = [1] * len(crop_indices.shape[:-1]) + [crop_height, crop_width, 2] - all_crop_inds = crop_indices.unsqueeze(-2).unsqueeze(-2) + crop_in_grid.reshape(grid_reshape) + all_crop_inds = crop_indices.unsqueeze(-2).unsqueeze(-2) + crop_in_grid.reshape( + grid_reshape + ) # For using @torch.gather, convert to flat indices from 2D indices, and also - # repeat across the channel dimension. To get flat index of each pixel to grab for + # repeat across the channel dimension. To get flat index of each pixel to grab for # each sampled crop, we just use the mapping: ind = h_ind * @image_w + w_ind - all_crop_inds = all_crop_inds[..., 0] * image_w + all_crop_inds[..., 1] # shape [..., N, CH, CW] - all_crop_inds = TU.unsqueeze_expand_at(all_crop_inds, size=image_c, dim=-3) # shape [..., N, C, CH, CW] - all_crop_inds = TU.flatten(all_crop_inds, begin_axis=-2) # shape [..., N, C, CH * CW] + all_crop_inds = ( + all_crop_inds[..., 0] * image_w + all_crop_inds[..., 1] + ) # shape [..., N, CH, CW] + all_crop_inds = TU.unsqueeze_expand_at( + all_crop_inds, size=image_c, dim=-3 + ) # shape [..., N, C, CH, CW] + all_crop_inds = TU.flatten( + all_crop_inds, begin_axis=-2 + ) # shape [..., N, C, CH * CW] # Repeat and flatten the source images -> [..., N, C, H * W] and then use gather to index with crop pixel inds images_to_crop = TU.unsqueeze_expand_at(images, size=num_crops, dim=-4) @@ -625,8 +673,12 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): crops = torch.gather(images_to_crop, dim=-1, index=all_crop_inds) # [..., N, C, CH * CW] -> [..., N, C, CH, CW] reshape_axis = len(crops.shape) - 1 - crops = TU.reshape_dimensions(crops, begin_axis=reshape_axis, end_axis=reshape_axis, - target_dims=(crop_height, crop_width)) + crops = TU.reshape_dimensions( + crops, + begin_axis=reshape_axis, + end_axis=reshape_axis, + target_dims=(crop_height, crop_width), + ) if is_padded: # undo padding -> [..., C, CH, CW] @@ -634,7 +686,9 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): return crops -def sample_random_image_crops(images, crop_height, crop_width, num_crops, pos_enc=False): +def sample_random_image_crops( + images, crop_height, crop_width, num_crops, pos_enc=False +): """ For each image, randomly sample @num_crops crops of size (@crop_height, @crop_width), from @images. @@ -643,18 +697,18 @@ def sample_random_image_crops(images, crop_height, crop_width, num_crops, pos_en images (torch.Tensor): batch of images of shape [..., C, H, W] crop_height (int): height of crop to take - + crop_width (int): width of crop to take num_crops (n): number of crops to sample - pos_enc (bool): if True, also add 2 channels to the outputs that gives a spatial + pos_enc (bool): if True, also add 2 channels to the outputs that gives a spatial encoding of the original source pixel locations. This means that the - output crops will contain information about where in the source image + output crops will contain information about where in the source image it was sampled from. Returns: - crops (torch.Tensor): crops of shape (..., @num_crops, C, @crop_height, @crop_width) + crops (torch.Tensor): crops of shape (..., @num_crops, C, @crop_height, @crop_width) if @pos_enc is False, otherwise (..., @num_crops, C + 2, @crop_height, @crop_width) crop_inds (torch.Tensor): sampled crop indices of shape (..., N, 2) @@ -669,7 +723,7 @@ def sample_random_image_crops(images, crop_height, crop_width, num_crops, pos_en pos_y, pos_x = torch.meshgrid(torch.arange(h), torch.arange(w)) pos_y = pos_y.float().to(device) / float(h) pos_x = pos_x.float().to(device) / float(w) - position_enc = torch.stack((pos_y, pos_x)) # shape [C, H, W] + position_enc = torch.stack((pos_y, pos_x)) # shape [C, H, W] # unsqueeze and expand to match leading dimensions -> shape [..., C, H, W] leading_shape = source_im.shape[:-3] @@ -685,20 +739,26 @@ def sample_random_image_crops(images, crop_height, crop_width, num_crops, pos_en max_sample_w = image_w - crop_width # Sample crop locations for all tensor dimensions up to the last 3, which are [C, H, W]. - # Each gets @num_crops samples - typically this will just be the batch dimension (B), so + # Each gets @num_crops samples - typically this will just be the batch dimension (B), so # we will sample [B, N] indices, but this supports having more than one leading dimension, # or possibly no leading dimension. # # Trick: sample in [0, 1) with rand, then re-scale to [0, M) and convert to long to get sampled ints - crop_inds_h = (max_sample_h * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() - crop_inds_w = (max_sample_w * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() - crop_inds = torch.cat((crop_inds_h.unsqueeze(-1), crop_inds_w.unsqueeze(-1)), dim=-1) # shape [..., N, 2] + crop_inds_h = ( + max_sample_h * torch.rand(*source_im.shape[:-3], num_crops).to(device) + ).long() + crop_inds_w = ( + max_sample_w * torch.rand(*source_im.shape[:-3], num_crops).to(device) + ).long() + crop_inds = torch.cat( + (crop_inds_h.unsqueeze(-1), crop_inds_w.unsqueeze(-1)), dim=-1 + ) # shape [..., N, 2] crops = crop_image_from_indices( - images=source_im, - crop_indices=crop_inds, - crop_height=crop_height, - crop_width=crop_width, + images=source_im, + crop_indices=crop_inds, + crop_height=crop_height, + crop_width=crop_width, ) return crops, crop_inds @@ -709,6 +769,7 @@ class Modality: Observation Modality class to encapsulate necessary functions needed to process observations of this modality """ + # observation keys to associate with this modality keys = set() @@ -725,7 +786,9 @@ def __init_subclass__(cls, **kwargs): """ Hook method to automatically register all valid subclasses so we can keep track of valid modalities """ - assert cls.name is not None, f"Name of modality {cls.__name__} must be specified!" + assert ( + cls.name is not None + ), f"Name of modality {cls.__name__} must be specified!" register_obs_key(cls) @classmethod @@ -820,8 +883,11 @@ def process_obs(cls, obs): Returns: np.array or torch.Tensor: processed observation """ - processor = cls._custom_obs_processor if \ - cls._custom_obs_processor is not None else cls._default_obs_processor + processor = ( + cls._custom_obs_processor + if cls._custom_obs_processor is not None + else cls._default_obs_processor + ) return processor(obs) @classmethod @@ -835,8 +901,11 @@ def unprocess_obs(cls, obs): Returns: np.array or torch.Tensor: unprocessed observation """ - unprocessor = cls._custom_obs_unprocessor if \ - cls._custom_obs_unprocessor is not None else cls._default_obs_unprocessor + unprocessor = ( + cls._custom_obs_unprocessor + if cls._custom_obs_unprocessor is not None + else cls._default_obs_unprocessor + ) return unprocessor(obs) @classmethod @@ -866,6 +935,7 @@ class ImageModality(Modality): """ Modality for RGB image observations """ + name = "rgb" @classmethod @@ -881,7 +951,7 @@ def _default_obs_processor(cls, obs): Returns: processed_obs (np.array or torch.Tensor): processed image """ - return process_frame(frame=obs, channel_dim=3, scale=255.) + return process_frame(frame=obs, channel_dim=3, scale=255.0) @classmethod def _default_obs_unprocessor(cls, obs): @@ -896,13 +966,14 @@ def _default_obs_unprocessor(cls, obs): unprocessed_obs (np.array or torch.Tensor): image passed through inverse operation of @process_frame """ - return TU.to_uint8(unprocess_frame(frame=obs, channel_dim=3, scale=255.)) + return TU.to_uint8(unprocess_frame(frame=obs, channel_dim=3, scale=255.0)) class DepthModality(Modality): """ Modality for depth observations """ + name = "depth" @classmethod @@ -918,7 +989,7 @@ def _default_obs_processor(cls, obs): Returns: processed_obs (np.array or torch.Tensor): processed depth """ - return process_frame(frame=obs, channel_dim=1, scale=1.) + return process_frame(frame=obs, channel_dim=1, scale=1.0) @classmethod def _default_obs_unprocessor(cls, obs): @@ -933,27 +1004,28 @@ def _default_obs_unprocessor(cls, obs): unprocessed_obs (np.array or torch.Tensor): depth passed through inverse operation of @process_depth """ - return unprocess_frame(frame=obs, channel_dim=1, scale=1.) + return unprocess_frame(frame=obs, channel_dim=1, scale=1.0) class ScanModality(Modality): """ Modality for scan observations """ + name = "scan" @classmethod def _default_obs_processor(cls, obs): # Channel swaps ([...,] L, C) --> ([...,] C, L) - + # First, add extra dimension at 2nd to last index to treat this as a frame shape = obs.shape new_shape = [*shape[:-2], 1, *shape[-2:]] obs = obs.reshape(new_shape) - + # Convert shape obs = batch_image_hwc_to_chw(obs) - + # Remove extra dimension (it's the second from last dimension) obs = obs.squeeze(-2) return obs @@ -961,7 +1033,7 @@ def _default_obs_processor(cls, obs): @classmethod def _default_obs_unprocessor(cls, obs): # Channel swaps ([B,] C, L) --> ([B,] L, C) - + # First, add extra dimension at 1st index to treat this as a frame shape = obs.shape new_shape = [*shape[:-2], 1, *shape[-2:]] @@ -979,6 +1051,7 @@ class LowDimModality(Modality): """ Modality for low dimensional observations """ + name = "low_dim" @classmethod diff --git a/robomimic/utils/python_utils.py b/robomimic/utils/python_utils.py index 5bc71bd1..ea8fd40c 100644 --- a/robomimic/utils/python_utils.py +++ b/robomimic/utils/python_utils.py @@ -1,6 +1,7 @@ """ Set of general purpose utility functions for easier interfacing with Python API """ + import inspect from copy import deepcopy import robomimic.macros as Macros @@ -66,8 +67,12 @@ def extract_class_init_kwargs_from_dict(cls, dic, copy=False, verbose=False): keys_not_in_cls = [k for k in dic if k not in cls_keys] keys_not_in_dic = [k for k in cls_keys if k not in list(dic.keys())] if len(keys_not_in_cls) > 0: - print(f"Warning: For class {cls.__name__}, got unknown keys: {keys_not_in_cls} ") + print( + f"Warning: For class {cls.__name__}, got unknown keys: {keys_not_in_cls} " + ) if len(keys_not_in_dic) > 0: - print(f"Warning: For class {cls.__name__}, got missing keys: {keys_not_in_dic} ") + print( + f"Warning: For class {cls.__name__}, got missing keys: {keys_not_in_dic} " + ) - return subdic \ No newline at end of file + return subdic diff --git a/robomimic/utils/tensor_utils.py b/robomimic/utils/tensor_utils.py index ec2063b2..8e720d91 100644 --- a/robomimic/utils/tensor_utils.py +++ b/robomimic/utils/tensor_utils.py @@ -2,6 +2,7 @@ A collection of utilities for working with nested tensor structures consisting of numpy arrays and torch tensors. """ + import collections import numpy as np import torch @@ -9,23 +10,27 @@ def recursive_dict_list_tuple_apply(x, type_func_dict): """ - Recursively apply functions to a nested dictionary or list or tuple, given a dictionary of + Recursively apply functions to a nested dictionary or list or tuple, given a dictionary of {data_type: function_to_apply}. Args: x (dict or list or tuple): a possibly nested dictionary or list or tuple - type_func_dict (dict): a mapping from data types to the functions to be + type_func_dict (dict): a mapping from data types to the functions to be applied for each data type. Returns: y (dict or list or tuple): new nested dict-list-tuple """ - assert(list not in type_func_dict) - assert(tuple not in type_func_dict) - assert(dict not in type_func_dict) + assert list not in type_func_dict + assert tuple not in type_func_dict + assert dict not in type_func_dict if isinstance(x, (dict, collections.OrderedDict)): - new_x = collections.OrderedDict() if isinstance(x, collections.OrderedDict) else dict() + new_x = ( + collections.OrderedDict() + if isinstance(x, collections.OrderedDict) + else dict() + ) for k, v in x.items(): new_x[k] = recursive_dict_list_tuple_apply(v, type_func_dict) return new_x @@ -39,8 +44,7 @@ def recursive_dict_list_tuple_apply(x, type_func_dict): if isinstance(x, t): return f(x) else: - raise NotImplementedError( - 'Cannot handle data type %s' % str(type(x))) + raise NotImplementedError("Cannot handle data type %s" % str(type(x))) def map_tensor(x, func): @@ -60,7 +64,7 @@ def map_tensor(x, func): { torch.Tensor: func, type(None): lambda x: x, - } + }, ) @@ -81,13 +85,13 @@ def map_ndarray(x, func): { np.ndarray: func, type(None): lambda x: x, - } + }, ) def map_tensor_ndarray(x, tensor_func, ndarray_func): """ - Apply function @tensor_func to torch.Tensor objects and @ndarray_func to + Apply function @tensor_func to torch.Tensor objects and @ndarray_func to np.ndarray objects in a nested dictionary or list or tuple. Args: @@ -104,7 +108,7 @@ def map_tensor_ndarray(x, tensor_func, ndarray_func): torch.Tensor: tensor_func, np.ndarray: ndarray_func, type(None): lambda x: x, - } + }, ) @@ -125,7 +129,7 @@ def clone(x): torch.Tensor: lambda x: x.clone(), np.ndarray: lambda x: x.copy(), type(None): lambda x: x, - } + }, ) @@ -144,13 +148,13 @@ def detach(x): x, { torch.Tensor: lambda x: x.detach(), - } + }, ) def to_batch(x): """ - Introduces a leading batch dimension of 1 for all torch tensors and numpy + Introduces a leading batch dimension of 1 for all torch tensors and numpy arrays in nested dictionary or list or tuple and returns a new nested structure. Args: @@ -165,13 +169,13 @@ def to_batch(x): torch.Tensor: lambda x: x[None, ...], np.ndarray: lambda x: x[None, ...], type(None): lambda x: x, - } + }, ) def to_sequence(x): """ - Introduces a time dimension of 1 at dimension 1 for all torch tensors and numpy + Introduces a time dimension of 1 at dimension 1 for all torch tensors and numpy arrays in nested dictionary or list or tuple and returns a new nested structure. Args: @@ -186,7 +190,7 @@ def to_sequence(x): torch.Tensor: lambda x: x[:, None, ...], np.ndarray: lambda x: x[:, None, ...], type(None): lambda x: x, - } + }, ) @@ -208,7 +212,7 @@ def index_at_time(x, ind): torch.Tensor: lambda x: x[:, ind, ...], np.ndarray: lambda x: x[:, ind, ...], type(None): lambda x: x, - } + }, ) @@ -230,13 +234,13 @@ def unsqueeze(x, dim): torch.Tensor: lambda x: x.unsqueeze(dim=dim), np.ndarray: lambda x: np.expand_dims(x, axis=dim), type(None): lambda x: x, - } + }, ) def contiguous(x): """ - Makes all torch tensors and numpy arrays contiguous in nested dictionary or + Makes all torch tensors and numpy arrays contiguous in nested dictionary or list or tuple and returns a new nested structure. Args: @@ -251,7 +255,7 @@ def contiguous(x): torch.Tensor: lambda x: x.contiguous(), np.ndarray: lambda x: np.ascontiguousarray(x), type(None): lambda x: x, - } + }, ) @@ -272,14 +276,14 @@ def to_device(x, device): { torch.Tensor: lambda x, d=device: x.to(d), type(None): lambda x: x, - } + }, ) def to_tensor(x): """ Converts all numpy arrays in nested dictionary or list or tuple to - torch tensors (and leaves existing torch Tensors as-is), and returns + torch tensors (and leaves existing torch Tensors as-is), and returns a new nested structure. Args: @@ -294,14 +298,14 @@ def to_tensor(x): torch.Tensor: lambda x: x, np.ndarray: lambda x: torch.from_numpy(x), type(None): lambda x: x, - } + }, ) def to_numpy(x): """ Converts all torch tensors in nested dictionary or list or tuple to - numpy (and leaves existing numpy arrays as-is), and returns + numpy (and leaves existing numpy arrays as-is), and returns a new nested structure. Args: @@ -310,24 +314,26 @@ def to_numpy(x): Returns: y (dict or list or tuple): new nested dict-list-tuple """ + def f(tensor): if tensor.is_cuda: return tensor.detach().cpu().numpy() else: return tensor.detach().numpy() + return recursive_dict_list_tuple_apply( x, { torch.Tensor: f, np.ndarray: lambda x: x, type(None): lambda x: x, - } + }, ) def to_list(x): """ - Converts all torch tensors and numpy arrays in nested dictionary or list + Converts all torch tensors and numpy arrays in nested dictionary or list or tuple to a list, and returns a new nested structure. Useful for json encoding. @@ -337,24 +343,26 @@ def to_list(x): Returns: y (dict or list or tuple): new nested dict-list-tuple """ + def f(tensor): if tensor.is_cuda: return tensor.detach().cpu().numpy().tolist() else: return tensor.detach().numpy().tolist() + return recursive_dict_list_tuple_apply( x, { torch.Tensor: f, np.ndarray: lambda x: x.tolist(), type(None): lambda x: x, - } + }, ) def to_float(x): """ - Converts all torch tensors and numpy arrays in nested dictionary or list + Converts all torch tensors and numpy arrays in nested dictionary or list or tuple to float type entries, and returns a new nested structure. Args: @@ -369,13 +377,13 @@ def to_float(x): torch.Tensor: lambda x: x.float(), np.ndarray: lambda x: x.astype(np.float32), type(None): lambda x: x, - } + }, ) def to_uint8(x): """ - Converts all torch tensors and numpy arrays in nested dictionary or list + Converts all torch tensors and numpy arrays in nested dictionary or list or tuple to uint8 type entries, and returns a new nested structure. Args: @@ -390,13 +398,13 @@ def to_uint8(x): torch.Tensor: lambda x: x.byte(), np.ndarray: lambda x: x.astype(np.uint8), type(None): lambda x: x, - } + }, ) def to_torch(x, device): """ - Converts all numpy arrays and torch tensors in nested dictionary or list or tuple to + Converts all numpy arrays and torch tensors in nested dictionary or list or tuple to torch tensors on device @device and returns a new nested structure. Args: @@ -427,7 +435,7 @@ def to_one_hot_single(tensor, num_class): def to_one_hot(tensor, num_class): """ - Convert all tensors in nested dictionary or list or tuple to one-hot representation, + Convert all tensors in nested dictionary or list or tuple to one-hot representation, assuming a certain number of total class labels. Args: @@ -471,7 +479,7 @@ def flatten(x, begin_axis=1): x, { torch.Tensor: lambda x, b=begin_axis: flatten_single(x, begin_axis=b), - } + }, ) @@ -489,10 +497,10 @@ def reshape_dimensions_single(x, begin_axis, end_axis, target_dims): Returns: y (torch.Tensor): reshaped tensor """ - assert(begin_axis <= end_axis) - assert(begin_axis >= 0) - assert(end_axis < len(x.shape)) - assert(isinstance(target_dims, (tuple, list))) + assert begin_axis <= end_axis + assert begin_axis >= 0 + assert end_axis < len(x.shape) + assert isinstance(target_dims, (tuple, list)) s = x.shape final_s = [] for i in range(len(s)): @@ -505,9 +513,9 @@ def reshape_dimensions_single(x, begin_axis, end_axis, target_dims): def reshape_dimensions(x, begin_axis, end_axis, target_dims): """ - Reshape selected dimensions for all tensors in nested dictionary or list or tuple + Reshape selected dimensions for all tensors in nested dictionary or list or tuple to a target dimension. - + Args: x (dict or list or tuple): a possibly nested dictionary or list or tuple begin_axis (int): begin dimension @@ -522,11 +530,13 @@ def reshape_dimensions(x, begin_axis, end_axis, target_dims): x, { torch.Tensor: lambda x, b=begin_axis, e=end_axis, t=target_dims: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=t), + x, begin_axis=b, end_axis=e, target_dims=t + ), np.ndarray: lambda x, b=begin_axis, e=end_axis, t=target_dims: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=t), + x, begin_axis=b, end_axis=e, target_dims=t + ), type(None): lambda x: x, - } + }, ) @@ -547,11 +557,13 @@ def join_dimensions(x, begin_axis, end_axis): x, { torch.Tensor: lambda x, b=begin_axis, e=end_axis: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=[-1]), + x, begin_axis=b, end_axis=e, target_dims=[-1] + ), np.ndarray: lambda x, b=begin_axis, e=end_axis: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=[-1]), + x, begin_axis=b, end_axis=e, target_dims=[-1] + ), type(None): lambda x: x, - } + }, ) @@ -660,13 +672,15 @@ def named_reduce(x, reduction, dim): Returns: y (dict or list or tuple): new nested dict-list-tuple """ - return map_tensor(x, func=lambda t, r=reduction, d=dim: named_reduce_single(t, r, d)) + return map_tensor( + x, func=lambda t, r=reduction, d=dim: named_reduce_single(t, r, d) + ) def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): """ This function indexes out a target dimension of a tensor in a structured way, - by allowing a different value to be selected for each member of a flat index + by allowing a different value to be selected for each member of a flat index tensor (@indices) corresponding to a source dimension. This can be interpreted as moving along the source dimension, using the corresponding index value in @indices to select values for all other dimensions outside of the @@ -680,7 +694,7 @@ def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): from the other dimensions indices (torch.Tensor): flat index tensor with same shape as tensor @x along @source_dim - + Returns: y (torch.Tensor): gathered tensor, with dimension @target_dim indexed out """ @@ -705,7 +719,7 @@ def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): def gather_along_dim_with_dim(x, target_dim, source_dim, indices): """ - Apply @gather_along_dim_with_dim_single to all tensors in a nested + Apply @gather_along_dim_with_dim_single to all tensors in a nested dictionary or list or tuple. Args: @@ -719,13 +733,17 @@ def gather_along_dim_with_dim(x, target_dim, source_dim, indices): Returns: y (dict or list or tuple): new nested dict-list-tuple """ - return map_tensor(x, - lambda y, t=target_dim, s=source_dim, i=indices: gather_along_dim_with_dim_single(y, t, s, i)) - + return map_tensor( + x, + lambda y, t=target_dim, s=source_dim, i=indices: gather_along_dim_with_dim_single( + y, t, s, i + ), + ) + def gather_sequence_single(seq, indices): """ - Given a tensor with leading dimensions [B, T, ...], gather an element from each sequence in + Given a tensor with leading dimensions [B, T, ...], gather an element from each sequence in the batch given an index for each sequence. Args: @@ -735,7 +753,9 @@ def gather_sequence_single(seq, indices): Return: y (torch.Tensor): indexed tensor of shape [B, ....] """ - return gather_along_dim_with_dim_single(seq, target_dim=1, source_dim=0, indices=indices) + return gather_along_dim_with_dim_single( + seq, target_dim=1, source_dim=0, indices=indices + ) def gather_sequence(seq, indices): @@ -808,12 +828,14 @@ def pad_sequence(seq, padding, batched=False, pad_same=True, pad_values=None): return recursive_dict_list_tuple_apply( seq, { - torch.Tensor: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: - pad_sequence_single(x, p, b, ps, pv), - np.ndarray: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: - pad_sequence_single(x, p, b, ps, pv), + torch.Tensor: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: pad_sequence_single( + x, p, b, ps, pv + ), + np.ndarray: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: pad_sequence_single( + x, p, b, ps, pv + ), type(None): lambda x: x, - } + }, ) @@ -832,7 +854,7 @@ def assert_size_at_dim_single(x, size, dim, msg): def assert_size_at_dim(x, size, dim, msg): """ - Ensure that arrays and tensors in nested dictionary or list or tuple have + Ensure that arrays and tensors in nested dictionary or list or tuple have size @size in dim @dim. Args: @@ -860,7 +882,7 @@ def get_shape(x): torch.Tensor: lambda x: x.shape, np.ndarray: lambda x: x.shape, type(None): lambda x: x, - } + }, ) @@ -886,7 +908,7 @@ def list_of_flat_dict_to_dict_of_list(list_of_dict): return dic -def flatten_nested_dict_list(d, parent_key='', sep='_', item_key=''): +def flatten_nested_dict_list(d, parent_key="", sep="_", item_key=""): """ Flatten a nested dict or list to a list. @@ -926,7 +948,9 @@ def flatten_nested_dict_list(d, parent_key='', sep='_', item_key=''): return [(new_key, d)] -def time_distributed(inputs, op, activation=None, inputs_as_kwargs=False, inputs_as_args=False, **kwargs): +def time_distributed( + inputs, op, activation=None, inputs_as_kwargs=False, inputs_as_args=False, **kwargs +): """ Apply function @op to all tensors in nested dictionary or list or tuple @inputs in both the batch (B) and time (T) dimension, where the tensors are expected to have shape [B, T, ...]. @@ -956,5 +980,7 @@ def time_distributed(inputs, op, activation=None, inputs_as_kwargs=False, inputs if activation is not None: outputs = map_tensor(outputs, activation) - outputs = reshape_dimensions(outputs, begin_axis=0, end_axis=0, target_dims=(batch_size, seq_len)) + outputs = reshape_dimensions( + outputs, begin_axis=0, end_axis=0, target_dims=(batch_size, seq_len) + ) return outputs diff --git a/robomimic/utils/test_utils.py b/robomimic/utils/test_utils.py index 86f125e0..0cfdca23 100644 --- a/robomimic/utils/test_utils.py +++ b/robomimic/utils/test_utils.py @@ -1,6 +1,7 @@ """ Utilities for testing algorithm implementations - used mainly by scripts in tests directory. """ + import os import json import shutil @@ -42,7 +43,7 @@ def maybe_remove_file(file_to_remove): def example_dataset_path(): """ Path to dataset to use for testing and example purposes. It should - exist under the tests/assets directory, and will be downloaded + exist under the tests/assets directory, and will be downloaded from a server if it does not exist. """ dataset_folder = os.path.join(robomimic.__path__[0], "../tests/assets/") @@ -51,7 +52,7 @@ def example_dataset_path(): print("\nWARNING: test hdf5 does not exist! Downloading from server...") os.makedirs(dataset_folder, exist_ok=True) FileUtils.download_url( - url="http://downloads.cs.stanford.edu/downloads/rt_benchmark/test_v141.hdf5", + url="http://downloads.cs.stanford.edu/downloads/rt_benchmark/test_v141.hdf5", download_dir=dataset_folder, ) return dataset_path @@ -66,9 +67,14 @@ def example_momart_dataset_path(): dataset_folder = os.path.join(robomimic.__path__[0], "../tests/assets/") dataset_path = os.path.join(dataset_folder, "test_momart.hdf5") if not os.path.exists(dataset_path): - user_response = input("\nWARNING: momart test hdf5 does not exist! We will download sample dataset. " - "This will take 0.6GB space. Proceed? y/n\n") - assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." + user_response = input( + "\nWARNING: momart test hdf5 does not exist! We will download sample dataset. " + "This will take 0.6GB space. Proceed? y/n\n" + ) + assert user_response.lower() in { + "yes", + "y", + }, f"Did not receive confirmation. Aborting download." print("\nDownloading from server...") @@ -111,8 +117,10 @@ def get_base_config(algo_name): """ # we will load and override defaults from template config - base_config_path = os.path.join(robomimic.__path__[0], "exps/templates/{}.json".format(algo_name)) - with open(base_config_path, 'r') as f: + base_config_path = os.path.join( + robomimic.__path__[0], "exps/templates/{}.json".format(algo_name) + ) + with open(base_config_path, "r") as f: config = Config(json.load(f)) # small dataset with a handful of trajectories @@ -189,13 +197,15 @@ def checkpoint_path_from_test_run(): time_dir_names = [f.name for f in os.scandir(exp_dir) if f.is_dir()] assert len(time_dir_names) == 1 path_to_models = os.path.join(exp_dir, time_dir_names[0], "models") - epoch_name = [f.name for f in os.scandir(path_to_models) if f.name.startswith("model")][0] + epoch_name = [ + f.name for f in os.scandir(path_to_models) if f.name.startswith("model") + ][0] return os.path.join(path_to_models, epoch_name) def test_eval_agent_from_checkpoint(ckpt_path, device): """ - Test loading a model from checkpoint and running a rollout with the + Test loading a model from checkpoint and running a rollout with the trained agent for a small number of steps. Args: @@ -205,7 +215,9 @@ def test_eval_agent_from_checkpoint(ckpt_path, device): """ # get policy and env from checkpoint - policy, ckpt_dict = FileUtils.policy_from_checkpoint(ckpt_path=ckpt_path, device=device, verbose=True) + policy, ckpt_dict = FileUtils.policy_from_checkpoint( + ckpt_path=ckpt_path, device=device, verbose=True + ) env, _ = FileUtils.env_from_checkpoint(ckpt_dict=ckpt_dict, verbose=True) # run a test rollout @@ -239,7 +251,9 @@ def test_run(base_config, config_modifier): """ try: # get config - config = config_from_modifier(base_config=base_config, config_modifier=config_modifier) + config = config_from_modifier( + base_config=base_config, config_modifier=config_modifier + ) # set torch device device = TorchUtils.get_torch_device(try_to_use_cuda=config.train.cuda) @@ -256,7 +270,9 @@ def test_run(base_config, config_modifier): except Exception as e: # indicate failure by returning error string - ret = colored("failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red") + ret = colored( + "failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red" + ) # make sure model directory is cleaned up before returning from this function maybe_remove_dir(temp_model_dir_path()) diff --git a/robomimic/utils/torch_utils.py b/robomimic/utils/torch_utils.py index 433c8797..2bccf92d 100644 --- a/robomimic/utils/torch_utils.py +++ b/robomimic/utils/torch_utils.py @@ -1,6 +1,7 @@ """ This file contains some PyTorch utilities. """ + import numpy as np import torch import torch.optim as optim @@ -16,9 +17,7 @@ def soft_update(source, target, tau): target (torch.nn.Module): target network to update """ for target_param, param in zip(target.parameters(), source.parameters()): - target_param.copy_( - target_param * (1.0 - tau) + param * tau - ) + target_param.copy_(target_param * (1.0 - tau) + param * tau) def hard_update(source, target): @@ -30,7 +29,7 @@ def hard_update(source, target): target (torch.nn.Module): target network to update parameters for """ for target_param, param in zip(target.parameters(), source.parameters()): - target_param.copy_(param) + target_param.copy_(param) def get_torch_device(try_to_use_cuda): @@ -88,7 +87,7 @@ def reparameterize(mu, logvar): def optimizer_from_optim_params(net_optim_params, net): """ - Helper function to return a torch Optimizer from the optim_params + Helper function to return a torch Optimizer from the optim_params section of the config for a particular network. Args: @@ -120,7 +119,7 @@ def optimizer_from_optim_params(net_optim_params, net): def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): """ - Helper function to return a LRScheduler from the optim_params + Helper function to return a LRScheduler from the optim_params section of the config for a particular network. Returns None if a scheduler is not needed. @@ -136,7 +135,9 @@ def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): Returns: lr_scheduler (torch.optim.lr_scheduler or None): learning rate scheduler """ - lr_scheduler_type = net_optim_params["learning_rate"].get("scheduler_type", "multistep") + lr_scheduler_type = net_optim_params["learning_rate"].get( + "scheduler_type", "multistep" + ) epoch_schedule = net_optim_params["learning_rate"]["epoch_schedule"] lr_scheduler = None @@ -144,7 +145,7 @@ def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): if lr_scheduler_type == "linear": assert len(epoch_schedule) == 1 end_epoch = epoch_schedule[0] - + return optim.lr_scheduler.LinearLR( optimizer, start_factor=1.0, @@ -159,7 +160,7 @@ def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): ) else: raise ValueError("Invalid LR scheduler type: {}".format(lr_scheduler_type)) - + return lr_scheduler @@ -192,7 +193,7 @@ def backprop_for_loss(net, optim, loss, max_grad_norm=None, retain_graph=False): torch.nn.utils.clip_grad_norm_(net.parameters(), max_grad_norm) # compute grad norms - grad_norms = 0. + grad_norms = 0.0 for p in net.parameters(): # only clip gradients for parameters for which requires_grad is True if p.grad is not None: @@ -204,13 +205,15 @@ def backprop_for_loss(net, optim, loss, max_grad_norm=None, retain_graph=False): return grad_norms -class dummy_context_mgr(): +class dummy_context_mgr: """ A dummy context manager - useful for having conditional scopes (such as @maybe_no_grad). Nothing happens in this scope. """ + def __enter__(self): return None + def __exit__(self, exc_type, exc_value, traceback): return False diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index 58de759d..91bb0944 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -3,6 +3,7 @@ mainly consists of functions to assist with logging, rollouts, and the @run_epoch function, which is the core training logic for models in this repository. """ + import os import time import datetime @@ -30,14 +31,14 @@ def get_exp_dir(config, auto_remove_exp_dir=False): """ Create experiment directory from config. If an identical experiment directory - exists and @auto_remove_exp_dir is False (default), the function will prompt + exists and @auto_remove_exp_dir is False (default), the function will prompt the user on whether to remove and replace it, or keep the existing one and add a new subdirectory with the new timestamp for the current run. Args: auto_remove_exp_dir (bool): if True, automatically remove the existing experiment folder if it exists at the same path. - + Returns: log_dir (str): path to created log directory (sub-folder in experiment directory) output_dir (str): path to created models directory (sub-folder in experiment directory) @@ -47,7 +48,7 @@ def get_exp_dir(config, auto_remove_exp_dir=False): """ # timestamp for directory names t_now = time.time() - time_str = datetime.datetime.fromtimestamp(t_now).strftime('%Y%m%d%H%M%S') + time_str = datetime.datetime.fromtimestamp(t_now).strftime("%Y%m%d%H%M%S") # create directory for where to dump model parameters, tensorboard logs, and videos base_output_dir = os.path.expanduser(config.train.output_dir) @@ -57,7 +58,11 @@ def get_exp_dir(config, auto_remove_exp_dir=False): base_output_dir = os.path.join(base_output_dir, config.experiment.name) if os.path.exists(base_output_dir): if not auto_remove_exp_dir: - ans = input("WARNING: model directory ({}) already exists! \noverwrite? (y/n)\n".format(base_output_dir)) + ans = input( + "WARNING: model directory ({}) already exists! \noverwrite? (y/n)\n".format( + base_output_dir + ) + ) else: ans = "y" if ans == "y": @@ -98,14 +103,23 @@ def load_data_for_training(config, obs_keys): train_filter_by_attribute = config.train.hdf5_filter_key valid_filter_by_attribute = config.train.hdf5_validation_filter_key if valid_filter_by_attribute is not None: - assert config.experiment.validate, "specified validation filter key {}, but config.experiment.validate is not set".format(valid_filter_by_attribute) + assert ( + config.experiment.validate + ), "specified validation filter key {}, but config.experiment.validate is not set".format( + valid_filter_by_attribute + ) # load the dataset into memory if config.experiment.validate: - assert not config.train.hdf5_normalize_obs, "no support for observation normalization with validation data yet" - assert (train_filter_by_attribute is not None) and (valid_filter_by_attribute is not None), \ - "did not specify filter keys corresponding to train and valid split in dataset" \ + assert ( + not config.train.hdf5_normalize_obs + ), "no support for observation normalization with validation data yet" + assert (train_filter_by_attribute is not None) and ( + valid_filter_by_attribute is not None + ), ( + "did not specify filter keys corresponding to train and valid split in dataset" " - please fill config.train.hdf5_filter_key and config.train.hdf5_validation_filter_key" + ) train_demo_keys = FileUtils.get_demos_for_filter_key( hdf5_path=os.path.expanduser(config.train.data), filter_key=train_filter_by_attribute, @@ -114,12 +128,19 @@ def load_data_for_training(config, obs_keys): hdf5_path=os.path.expanduser(config.train.data), filter_key=valid_filter_by_attribute, ) - assert set(train_demo_keys).isdisjoint(set(valid_demo_keys)), "training demonstrations overlap with " \ - "validation demonstrations!" - train_dataset = dataset_factory(config, obs_keys, filter_by_attribute=train_filter_by_attribute) - valid_dataset = dataset_factory(config, obs_keys, filter_by_attribute=valid_filter_by_attribute) + assert set(train_demo_keys).isdisjoint(set(valid_demo_keys)), ( + "training demonstrations overlap with " "validation demonstrations!" + ) + train_dataset = dataset_factory( + config, obs_keys, filter_by_attribute=train_filter_by_attribute + ) + valid_dataset = dataset_factory( + config, obs_keys, filter_by_attribute=valid_filter_by_attribute + ) else: - train_dataset = dataset_factory(config, obs_keys, filter_by_attribute=train_filter_by_attribute) + train_dataset = dataset_factory( + config, obs_keys, filter_by_attribute=train_filter_by_attribute + ) valid_dataset = None return train_dataset, valid_dataset @@ -151,7 +172,7 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non hdf5_path=dataset_path, obs_keys=obs_keys, dataset_keys=config.train.dataset_keys, - load_next_obs=config.train.hdf5_load_next_obs, # whether to load next observations (s') from dataset + load_next_obs=config.train.hdf5_load_next_obs, # whether to load next observations (s') from dataset frame_stack=config.train.frame_stack, seq_length=config.train.seq_length, pad_frame_stack=config.train.pad_frame_stack, @@ -161,7 +182,7 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non hdf5_cache_mode=config.train.hdf5_cache_mode, hdf5_use_swmr=config.train.hdf5_use_swmr, hdf5_normalize_obs=config.train.hdf5_normalize_obs, - filter_by_attribute=filter_by_attribute + filter_by_attribute=filter_by_attribute, ) dataset = SequenceDataset(**ds_kwargs) @@ -169,15 +190,15 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non def run_rollout( - policy, - env, - horizon, - use_goals=False, - render=False, - video_writer=None, - video_skip=5, - terminate_on_success=False, - ): + policy, + env, + horizon, + use_goals=False, + render=False, + video_writer=None, + video_skip=5, + terminate_on_success=False, +): """ Runs a rollout in an environment with the current network parameters. @@ -192,7 +213,7 @@ def run_rollout( render (bool): if True, render the rollout to the screen - video_writer (imageio Writer instance): if not None, use video writer object to append frames at + video_writer (imageio Writer instance): if not None, use video writer object to append frames at rate given by @video_skip video_skip (int): how often to write video frame @@ -216,8 +237,8 @@ def run_rollout( results = {} video_count = 0 # video frame counter - total_reward = 0. - success = { k: False for k in env.is_success() } # success metrics + total_reward = 0.0 + success = {k: False for k in env.is_success()} # success metrics try: for step_i in range(horizon): @@ -267,19 +288,19 @@ def run_rollout( def rollout_with_stats( - policy, - envs, - horizon, - use_goals=False, - num_episodes=None, - render=False, - video_dir=None, - video_path=None, - epoch=None, - video_skip=5, - terminate_on_success=False, - verbose=False, - ): + policy, + envs, + horizon, + use_goals=False, + num_episodes=None, + render=False, + video_dir=None, + video_path=None, + epoch=None, + video_skip=5, + terminate_on_success=False, + verbose=False, +): """ A helper function used in the train loop to conduct evaluation rollouts per environment and summarize the results. @@ -312,10 +333,10 @@ def rollout_with_stats( terminate_on_success (bool): if True, terminate episode early as soon as a success is encountered verbose (bool): if True, print results of each rollout - + Returns: - all_rollout_logs (dict): dictionary of rollout statistics (e.g. return, success rate, ...) - averaged across all rollouts + all_rollout_logs (dict): dictionary of rollout statistics (e.g. return, success rate, ...) + averaged across all rollouts video_paths (dict): path to rollout videos for each environment """ @@ -324,20 +345,24 @@ def rollout_with_stats( all_rollout_logs = OrderedDict() # handle paths and create writers for video writing - assert (video_path is None) or (video_dir is None), "rollout_with_stats: can't specify both video path and dir" + assert (video_path is None) or ( + video_dir is None + ), "rollout_with_stats: can't specify both video path and dir" write_video = (video_path is not None) or (video_dir is not None) video_paths = OrderedDict() video_writers = OrderedDict() if video_path is not None: # a single video is written for all envs - video_paths = { k : video_path for k in envs } + video_paths = {k: video_path for k in envs} video_writer = imageio.get_writer(video_path, fps=20) - video_writers = { k : video_writer for k in envs } + video_writers = {k: video_writer for k in envs} if video_dir is not None: # video is written per env - video_str = "_epoch_{}.mp4".format(epoch) if epoch is not None else ".mp4" - video_paths = { k : os.path.join(video_dir, "{}{}".format(k, video_str)) for k in envs } - video_writers = { k : imageio.get_writer(video_paths[k], fps=20) for k in envs } + video_str = "_epoch_{}.mp4".format(epoch) if epoch is not None else ".mp4" + video_paths = { + k: os.path.join(video_dir, "{}{}".format(k, video_str)) for k in envs + } + video_writers = {k: imageio.get_writer(video_paths[k], fps=20) for k in envs} for env_name, env in envs.items(): env_video_writer = None @@ -345,9 +370,14 @@ def rollout_with_stats( print("video writes to " + video_paths[env_name]) env_video_writer = video_writers[env_name] - print("rollout: env={}, horizon={}, use_goals={}, num_episodes={}".format( - env.name, horizon, use_goals, num_episodes, - )) + print( + "rollout: env={}, horizon={}, use_goals={}, num_episodes={}".format( + env.name, + horizon, + use_goals, + num_episodes, + ) + ) rollout_logs = [] iterator = range(num_episodes) if not verbose: @@ -370,7 +400,11 @@ def rollout_with_stats( rollout_logs.append(rollout_info) num_success += rollout_info["Success_Rate"] if verbose: - print("Episode {}, horizon={}, num_success={}".format(ep_i + 1, horizon, num_success)) + print( + "Episode {}, horizon={}, num_success={}".format( + ep_i + 1, horizon, num_success + ) + ) print(json.dumps(rollout_info, sort_keys=True, indent=4)) if video_dir is not None: @@ -378,9 +412,14 @@ def rollout_with_stats( env_video_writer.close() # average metric across all episodes - rollout_logs = dict((k, [rollout_logs[i][k] for i in range(len(rollout_logs))]) for k in rollout_logs[0]) + rollout_logs = dict( + (k, [rollout_logs[i][k] for i in range(len(rollout_logs))]) + for k in rollout_logs[0] + ) rollout_logs_mean = dict((k, np.mean(v)) for k, v in rollout_logs.items()) - rollout_logs_mean["Time_Episode"] = np.sum(rollout_logs["time"]) / 60. # total time taken for rollouts in minutes + rollout_logs_mean["Time_Episode"] = ( + np.sum(rollout_logs["time"]) / 60.0 + ) # total time taken for rollouts in minutes all_rollout_logs[env_name] = rollout_logs_mean if video_path is not None: @@ -391,13 +430,13 @@ def rollout_with_stats( def should_save_from_rollout_logs( - all_rollout_logs, - best_return, - best_success_rate, - epoch_ckpt_name, - save_on_best_rollout_return, - save_on_best_rollout_success_rate, - ): + all_rollout_logs, + best_return, + best_success_rate, + epoch_ckpt_name, + save_on_best_rollout_return, + save_on_best_rollout_success_rate, +): """ Helper function used during training to determine whether checkpoints and videos should be saved. It will modify input attributes appropriately (such as updating @@ -417,10 +456,10 @@ def should_save_from_rollout_logs( epoch_ckpt_name (str): what to name the checkpoint file - this name might be modified by this function - save_on_best_rollout_return (bool): if True, should save checkpoints that achieve a + save_on_best_rollout_return (bool): if True, should save checkpoints that achieve a new best rollout return - save_on_best_rollout_success_rate (bool): if True, should save checkpoints that achieve a + save_on_best_rollout_success_rate (bool): if True, should save checkpoints that achieve a new best rollout success rate Returns: @@ -438,7 +477,9 @@ def should_save_from_rollout_logs( best_return[env_name] = rollout_logs["Return"] if save_on_best_rollout_return: # save checkpoint if achieve new best return - epoch_ckpt_name += "_{}_return_{}".format(env_name, best_return[env_name]) + epoch_ckpt_name += "_{}_return_{}".format( + env_name, best_return[env_name] + ) should_save_ckpt = True ckpt_reason = "return" @@ -446,7 +487,9 @@ def should_save_from_rollout_logs( best_success_rate[env_name] = rollout_logs["Success_Rate"] if save_on_best_rollout_success_rate: # save checkpoint if achieve new best success rate - epoch_ckpt_name += "_{}_success_{}".format(env_name, best_success_rate[env_name]) + epoch_ckpt_name += "_{}_success_{}".format( + env_name, best_success_rate[env_name] + ) should_save_ckpt = True ckpt_reason = "success" @@ -460,7 +503,9 @@ def should_save_from_rollout_logs( ) -def save_model(model, config, env_meta, shape_meta, ckpt_path, obs_normalization_stats=None): +def save_model( + model, config, env_meta, shape_meta, ckpt_path, obs_normalization_stats=None +): """ Save model to a torch pth file. @@ -496,6 +541,7 @@ def save_model(model, config, env_meta, shape_meta, ckpt_path, obs_normalization torch.save(params, ckpt_path) print("save checkpoint to {}".format(ckpt_path)) + def delete_checkpoints(ckpt_dir, top_n=3, smallest=True): """ Delete checkpoints in a directory, keeping top @top_n checkpoints based on lowest validation loss. Where checkpoints are saved in the form "model_epoch_{n}_best_validation_{validation loss}.pth @@ -524,17 +570,27 @@ def delete_checkpoints(ckpt_dir, top_n=3, smallest=True): for ckpt in all_checkpoints[:-top_n]: os.remove(os.path.join(ckpt_dir, ckpt)) + def get_gpu_usage_mb(index): """Returns the GPU usage in B.""" h = nvmlDeviceGetHandleByIndex(index) info = nvmlDeviceGetMemoryInfo(h) - print(f'total : {info.total}') - print(f'free : {info.free}') - print(f'used : {info.used}') + print(f"total : {info.total}") + print(f"free : {info.free}") + print(f"used : {info.used}") return info.used / 1024 / 1024 -def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): + +def run_epoch( + model, + data_loader, + epoch, + validate=False, + num_steps=None, + obs_normalization_stats=None, + ac_key=None, +): """ Run an epoch of training or validation. @@ -560,8 +616,8 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor step_log_all (dict): dictionary of logged training metrics averaged across all batches """ - #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) - + # print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) + epoch_timestamp = time.time() if validate: model.set_eval() @@ -591,7 +647,9 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor # process batch for training t = time.time() input_batch = model.process_batch_for_training(batch, ac_key=ac_key) - input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) + input_batch = model.postprocess_batch_for_training( + input_batch, obs_normalization_stats=obs_normalization_stats + ) timing_stats["Process_Batch"].append(time.time() - t) # forward and backward pass @@ -617,12 +675,22 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor # add in timing stats for k in timing_stats: # sum across all training steps, and convert from seconds to minutes - step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60. - step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60. + step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60.0 + step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60.0 return step_log_all -def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): + +def run_epoch_2_dataloaders( + model, + data_loader, + epoch, + data_loader_2, + validate=False, + num_steps=None, + obs_normalization_stats=None, + ac_key=None, +): """ Run an epoch of training or validation. @@ -648,7 +716,7 @@ def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=F step_log_all (dict): dictionary of logged training metrics averaged across all batches """ - #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) + # print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) # breakpoint() epoch_timestamp = time.time() if validate: @@ -685,11 +753,23 @@ def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=F t = time.time() # breakpoint() input_batch = model.process_batch_for_training(batch, ac_key=ac_key) - input_batch_2 = None if batch_2 is None else model.process_batch_for_training(batch_2, ac_key=ac_key) + input_batch_2 = ( + None + if batch_2 is None + else model.process_batch_for_training(batch_2, ac_key=ac_key) + ) # breakpoint() - input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) - input_batch_2 = None if input_batch_2 is None else model.postprocess_batch_for_training(input_batch_2, obs_normalization_stats=obs_normalization_stats) + input_batch = model.postprocess_batch_for_training( + input_batch, obs_normalization_stats=obs_normalization_stats + ) + input_batch_2 = ( + None + if input_batch_2 is None + else model.postprocess_batch_for_training( + input_batch_2, obs_normalization_stats=obs_normalization_stats + ) + ) timing_stats["Process_Batch"].append(time.time() - t) @@ -697,7 +777,9 @@ def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=F t = time.time() # breakpoint() if input_batch_2 is not None: - info = model.train_on_batch([input_batch, input_batch_2], epoch, validate=validate) + info = model.train_on_batch( + [input_batch, input_batch_2], epoch, validate=validate + ) else: info = model.train_on_batch(input_batch, epoch, validate=validate) timing_stats["Train_Batch"].append(time.time() - t) @@ -720,16 +802,17 @@ def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=F # add in timing stats for k in timing_stats: # sum across all training steps, and convert from seconds to minutes - step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60. - step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60. + step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60.0 + step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60.0 return step_log_all + def is_every_n_steps(interval, current_step, skip_zero=False): """ - Convenient function to check whether current_step is at the interval. + Convenient function to check whether current_step is at the interval. Returns True if current_step % interval == 0 and asserts a few corner cases (e.g., interval <= 0) - + Args: interval (int): target interval current_step (int): current step diff --git a/robomimic/utils/vis_utils.py b/robomimic/utils/vis_utils.py index 19c73d7a..df6b3956 100644 --- a/robomimic/utils/vis_utils.py +++ b/robomimic/utils/vis_utils.py @@ -2,6 +2,7 @@ This file contains utility functions for visualizing image observations in the training pipeline. These functions can be a useful debugging tool. """ + import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm @@ -21,9 +22,7 @@ def image_tensor_to_numpy(image): Returns: image (np.array): converted images of shape [..., H, W, C] and type uint8 """ - return TensorUtils.to_numpy( - ObsUtils.unprocess_image(image) - ).astype(np.uint8) + return TensorUtils.to_numpy(ObsUtils.unprocess_image(image)).astype(np.uint8) def image_to_disk(image, fname): @@ -107,5 +106,5 @@ def depth_to_rgb(depth_map, depth_min=None, depth_max=None): if len(depth_map.shape) == 3: assert depth_map.shape[-1] == 1 depth_map = depth_map[..., 0] - assert len(depth_map.shape) == 2 # [H, W] - return (255. * cm.hot(depth_map, 3)).astype(np.uint8)[..., :3] + assert len(depth_map.shape) == 2 # [H, W] + return (255.0 * cm.hot(depth_map, 3)).astype(np.uint8)[..., :3] diff --git a/setup.py b/setup.py index 0e1c510b..9eb82082 100644 --- a/setup.py +++ b/setup.py @@ -2,13 +2,14 @@ # read the contents of your README file from os import path + this_directory = path.abspath(path.dirname(__file__)) -with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: +with open(path.join(this_directory, "README.md"), encoding="utf-8") as f: lines = f.readlines() # remove images from README -lines = [x for x in lines if (('.png' not in x) and ('.gif' not in x))] -long_description = ''.join(lines) +lines = [x for x in lines if ((".png" not in x) and (".gif" not in x))] +long_description = "".join(lines) setup( name="robomimic", @@ -30,14 +31,14 @@ "torch", "torchvision", ], - eager_resources=['*'], + eager_resources=["*"], include_package_data=True, - python_requires='>=3', + python_requires=">=3", description="robomimic: A Modular Framework for Robot Learning from Demonstration", author="Ajay Mandlekar, Danfei Xu, Josiah Wong, Soroush Nasiriany, Chen Wang, Matthew Bronars", url="https://github.com/ARISE-Initiative/robomimic", author_email="amandlek@cs.stanford.edu", version="0.3.0", long_description=long_description, - long_description_content_type='text/markdown' + long_description_content_type="text/markdown", ) diff --git a/tests/test_bc.py b/tests/test_bc.py index adc12501..7f823e66 100644 --- a/tests/test_bc.py +++ b/tests/test_bc.py @@ -4,6 +4,7 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse from collections import OrderedDict @@ -22,9 +23,14 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="bc") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.modalities.obs.rgb = [] # by default, vanilla BC @@ -47,19 +53,33 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + ] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core + config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( + False # kwargs for visual core + ) config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_class = ( + "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( + 32 # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( + False # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( + 1.0 # Default arguments for "SpatialSoftmax" + ) config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -79,9 +99,12 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() + + def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier + return decorator @@ -279,7 +302,9 @@ def test_bc(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) + res_str = TestUtils.test_run( + base_config=base_config, config_modifier=MODIFIERS[test_name] + ) print("{}: {}".format(test_name, res_str)) @@ -287,7 +312,7 @@ def test_bc(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_bcq.py b/tests/test_bcq.py index b8bd0835..b246a2cb 100644 --- a/tests/test_bcq.py +++ b/tests/test_bcq.py @@ -4,6 +4,7 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse from collections import OrderedDict @@ -22,15 +23,20 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="bcq") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.modalities.obs.rgb = [] # by default, vanilla BCQ - config.algo.actor.enabled = True # perturbation actor - config.algo.critic.distributional.enabled = False # vanilla critic training - config.algo.action_sampler.vae.enabled = True # action sampler is VAE + config.algo.actor.enabled = True # perturbation actor + config.algo.critic.distributional.enabled = False # vanilla critic training + config.algo.action_sampler.vae.enabled = True # action sampler is VAE config.algo.action_sampler.gmm.enabled = False return config @@ -47,19 +53,33 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + ] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core + config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( + False # kwargs for visual core + ) config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_class = ( + "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( + 32 # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( + False # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( + 1.0 # Default arguments for "SpatialSoftmax" + ) config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -79,9 +99,12 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() + + def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier + return decorator @@ -94,7 +117,7 @@ def bcq_no_actor_modifier(config): @register_mod("bcq-distributional") def bcq_distributional_modifier(config): config.algo.critic.distributional.enabled = True - config.algo.critic.value_bounds = [-100., 100.] + config.algo.critic.value_bounds = [-100.0, 100.0] return config @@ -247,7 +270,9 @@ def test_bcq(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) + res_str = TestUtils.test_run( + base_config=base_config, config_modifier=MODIFIERS[test_name] + ) print("{}: {}".format(test_name, res_str)) @@ -255,7 +280,7 @@ def test_bcq(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_cql.py b/tests/test_cql.py index a78c4bf2..84811ee5 100644 --- a/tests/test_cql.py +++ b/tests/test_cql.py @@ -4,6 +4,7 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse from collections import OrderedDict @@ -22,15 +23,20 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="cql") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.modalities.obs.rgb = [] # by default, vanilla CQL - config.algo.actor.bc_start_steps = 40 # BC training initially - config.algo.critic.target_q_gap = 5.0 # use automatic cql tuning - config.algo.actor.target_entropy = "default" # use automatic entropy tuning + config.algo.actor.bc_start_steps = 40 # BC training initially + config.algo.critic.target_q_gap = 5.0 # use automatic cql tuning + config.algo.actor.target_entropy = "default" # use automatic entropy tuning # lower batch size to 100 to accomodate small test dataset config.train.batch_size = 100 @@ -49,19 +55,33 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + ] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core + config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( + False # kwargs for visual core + ) config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_class = ( + "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( + 32 # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( + False # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( + 1.0 # Default arguments for "SpatialSoftmax" + ) config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -81,9 +101,12 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() + + def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier + return decorator @@ -136,7 +159,9 @@ def test_cql(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) + res_str = TestUtils.test_run( + base_config=base_config, config_modifier=MODIFIERS[test_name] + ) print("{}: {}".format(test_name, res_str)) @@ -144,7 +169,7 @@ def test_cql(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_examples.py b/tests/test_examples.py index 6696015f..21a3ff33 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -2,6 +2,7 @@ Tests for the provided examples in the repository. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse import traceback import os @@ -29,16 +30,24 @@ def test_example_script(script_name, args_string, test_name, silence=True): # run example script stdout = subprocess.DEVNULL if silence else None - path_to_script = os.path.join(robomimic.__path__[0], "../examples/{}".format(script_name)) - example_job = subprocess.Popen("python {} {}".format(path_to_script, args_string), - shell=True, stdout=stdout, stderr=subprocess.PIPE) + path_to_script = os.path.join( + robomimic.__path__[0], "../examples/{}".format(script_name) + ) + example_job = subprocess.Popen( + "python {} {}".format(path_to_script, args_string), + shell=True, + stdout=stdout, + stderr=subprocess.PIPE, + ) example_job.wait() # get stderr output out, err = example_job.communicate() err = err.decode("utf-8") if len(err) > 0: - ret = "maybe failed - stderr output below (if it's only from tqdm, the test passed)\n{}".format(err) + ret = "maybe failed - stderr output below (if it's only from tqdm, the test passed)\n{}".format( + err + ) ret = colored(ret, "red") else: ret = colored("passed", "green") @@ -49,35 +58,35 @@ def test_example_script(script_name, args_string, test_name, silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() test_example_script( - script_name="simple_config.py", + script_name="simple_config.py", args_string="", - test_name="simple-config-example", + test_name="simple-config-example", silence=(not args.verbose), ) test_example_script( - script_name="simple_obs_nets.py", + script_name="simple_obs_nets.py", args_string="", - test_name="simple-obs-nets-example", + test_name="simple-obs-nets-example", silence=(not args.verbose), ) test_example_script( - script_name="simple_train_loop.py", + script_name="simple_train_loop.py", args_string="", - test_name="simple-train-loop-example", + test_name="simple-train-loop-example", silence=(not args.verbose), ) # clear tmp model dir before running script TestUtils.maybe_remove_dir(TestUtils.temp_model_dir_path()) test_example_script( - script_name="train_bc_rnn.py", + script_name="train_bc_rnn.py", args_string="--debug", - test_name="train-bc-rnn-example", + test_name="train-bc-rnn-example", silence=(not args.verbose), ) # cleanup diff --git a/tests/test_hbc.py b/tests/test_hbc.py index e5560696..1a1c2946 100644 --- a/tests/test_hbc.py +++ b/tests/test_hbc.py @@ -4,6 +4,7 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse from collections import OrderedDict @@ -21,15 +22,30 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="hbc") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.planner.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.planner.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.planner.modalities.obs.rgb = [] - config.observation.planner.modalities.subgoal.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.planner.modalities.subgoal.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.planner.modalities.subgoal.rgb = [] - config.observation.actor.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.actor.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.actor.modalities.obs.rgb = [] # by default, planner is deterministic prediction @@ -40,9 +56,12 @@ def get_algo_base_config(): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() + + def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier + return decorator @@ -168,7 +187,9 @@ def test_hbc(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) + res_str = TestUtils.test_run( + base_config=base_config, config_modifier=MODIFIERS[test_name] + ) print("{}: {}".format(test_name, res_str)) @@ -176,7 +197,7 @@ def test_hbc(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_iql.py b/tests/test_iql.py index e80a8f3b..ebd521bc 100644 --- a/tests/test_iql.py +++ b/tests/test_iql.py @@ -4,6 +4,7 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse from collections import OrderedDict @@ -24,7 +25,12 @@ def get_algo_base_config(): # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.modalities.obs.rgb = [] return config @@ -41,19 +47,33 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + config.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + ] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core + config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( + False # kwargs for visual core + ) config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_class = ( + "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( + 32 # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( + False # Default arguments for "SpatialSoftmax" + ) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( + 1.0 # Default arguments for "SpatialSoftmax" + ) config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -73,9 +93,12 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() + + def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier + return decorator @@ -127,7 +150,9 @@ def test_iql(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) + res_str = TestUtils.test_run( + base_config=base_config, config_modifier=MODIFIERS[test_name] + ) print("{}: {}".format(test_name, res_str)) @@ -135,7 +160,7 @@ def test_iql(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_iris.py b/tests/test_iris.py index 126c5c28..44f130dc 100644 --- a/tests/test_iris.py +++ b/tests/test_iris.py @@ -4,6 +4,7 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse from collections import OrderedDict @@ -21,18 +22,38 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="iris") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example iris) - config.observation.value_planner.planner.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.value_planner.planner.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.value_planner.planner.modalities.obs.rgb = [] - config.observation.value_planner.planner.modalities.subgoal.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.value_planner.planner.modalities.subgoal.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.value_planner.planner.modalities.subgoal.rgb = [] - config.observation.value_planner.value.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.value_planner.value.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.value_planner.value.modalities.obs.rgb = [] - config.observation.actor.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] + config.observation.actor.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] config.observation.actor.modalities.obs.rgb = [] # by default, basic N(0, 1) prior for both planner VAE and BCQ cVAE @@ -48,9 +69,12 @@ def get_algo_base_config(): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() + + def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier + return decorator @@ -175,9 +199,10 @@ def iris_modifier_11(config): def iris_modifier_12(config): # bcq value function is distributional config.algo.value_planner.value.critic.distributional.enabled = True - config.algo.value_planner.value.critic.value_bounds = [-100., 100.] + config.algo.value_planner.value.critic.value_bounds = [-100.0, 100.0] return config + @register_mod("iris, bcq cVAE Gaussian prior (obs-independent)") def iris_modifier_13(config): # learn parameters of Gaussian prior (obs-independent) @@ -286,7 +311,9 @@ def test_iris(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) + res_str = TestUtils.test_run( + base_config=base_config, config_modifier=MODIFIERS[test_name] + ) print("{}: {}".format(test_name, res_str)) @@ -294,7 +321,7 @@ def test_iris(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_scripts.py b/tests/test_scripts.py index 30ed7f61..8807f3bb 100644 --- a/tests/test_scripts.py +++ b/tests/test_scripts.py @@ -2,6 +2,7 @@ Tests for a handful of scripts. Excludes stdout output by default (pass --verbose to see stdout output). """ + import argparse import traceback import h5py @@ -39,19 +40,33 @@ def image_modifier(conf): conf.train.batch_size = 16 # replace object with rgb modality - conf.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] + conf.observation.modalities.obs.low_dim = [ + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + ] conf.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders conf.observation.encoder.rgb.core_class = "VisualCore" conf.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - conf.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) - conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core - conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - conf.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - conf.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" - conf.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" - conf.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" + conf.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) + conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( + False # kwargs for visual core + ) + conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = ( + False + ) + conf.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + conf.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( + 32 # Default arguments for "SpatialSoftmax" + ) + conf.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( + False # Default arguments for "SpatialSoftmax" + ) + conf.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( + 1.0 # Default arguments for "SpatialSoftmax" + ) conf.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -59,7 +74,9 @@ def image_modifier(conf): return conf - config = TestUtils.config_from_modifier(base_config=config, config_modifier=image_modifier) + config = TestUtils.config_from_modifier( + base_config=config, config_modifier=image_modifier + ) # run training device = TorchUtils.get_torch_device(try_to_use_cuda=True) @@ -79,15 +96,18 @@ def test_playback_script(silence=True, use_actions=False, use_obs=False): args = argparse.Namespace() args.dataset = TestUtils.example_dataset_path() args.filter_key = None - args.n = 3 # playback 3 demonstrations + args.n = 3 # playback 3 demonstrations args.use_actions = use_actions args.use_obs = use_obs args.render = False - args.video_path = TestUtils.temp_video_path() # dump video + args.video_path = TestUtils.temp_video_path() # dump video args.video_skip = 5 if use_obs: # camera observation names - args.render_image_names = ["agentview_image", "robot0_eye_in_hand_image"] + args.render_image_names = [ + "agentview_image", + "robot0_eye_in_hand_image", + ] else: # camera names args.render_image_names = ["agentview", "robot0_eye_in_hand"] @@ -99,7 +119,9 @@ def test_playback_script(silence=True, use_actions=False, use_obs=False): except Exception as e: # indicate failure by returning error string - ret = colored("failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red") + ret = colored( + "failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red" + ) # delete output video TestUtils.maybe_remove_file(TestUtils.temp_video_path()) @@ -121,14 +143,14 @@ def test_run_agent_script(silence=True): # setup args and run script args = argparse.Namespace() args.agent = ckpt_path - args.n_rollouts = 3 # 3 rollouts - args.horizon = 10 # short rollouts - 10 steps + args.n_rollouts = 3 # 3 rollouts + args.horizon = 10 # short rollouts - 10 steps args.env = None args.render = False - args.video_path = TestUtils.temp_video_path() # dump video + args.video_path = TestUtils.temp_video_path() # dump video args.video_skip = 5 args.camera_names = ["agentview", "robot0_eye_in_hand"] - args.dataset_path = TestUtils.temp_dataset_path() # dump dataset + args.dataset_path = TestUtils.temp_dataset_path() # dump dataset args.dataset_obs = True args.seed = 0 run_trained_agent(args) @@ -144,7 +166,9 @@ def test_run_agent_script(silence=True): except Exception as e: # indicate failure by returning error string - ret = colored("failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red") + ret = colored( + "failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red" + ) # delete trained model directory, output video, and output dataset TestUtils.maybe_remove_dir(TestUtils.temp_model_dir_path()) @@ -159,7 +183,7 @@ def test_run_agent_script(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action='store_true', + action="store_true", help="don't suppress stdout during tests", ) args = parser.parse_args() From 652de8d0796da8390fa8e382b71e3c8247becf82 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Sat, 22 Jun 2024 07:35:30 -0400 Subject: [PATCH 26/44] integrated dual dataloader ee_pose normalization, tested --- robomimic/algo/algo.py | 16 +++--- robomimic/utils/dataset.py | 97 ++++++++++-------------------------- robomimic/utils/obs_utils.py | 91 +++++++++++++++++++++++++++++---- 3 files changed, 113 insertions(+), 91 deletions(-) diff --git a/robomimic/algo/algo.py b/robomimic/algo/algo.py index 4f2950da..39d7d9a7 100644 --- a/robomimic/algo/algo.py +++ b/robomimic/algo/algo.py @@ -222,7 +222,7 @@ def process_batch_for_training(self, batch): """ return batch - def postprocess_batch_for_training(self, batch, obs_normalization_stats): + def postprocess_batch_for_training(self, batch, normalization_stats, normalize_actions=True): """ Does some operations (like channel swap, uint8 to float conversion, normalization) after @process_batch_for_training is called, in order to ensure these operations @@ -243,9 +243,9 @@ def postprocess_batch_for_training(self, batch, obs_normalization_stats): """ # ensure obs_normalization_stats are torch Tensors on proper device - obs_normalization_stats = TensorUtils.to_float( + normalization_stats = TensorUtils.to_float( TensorUtils.to_device( - TensorUtils.to_tensor(obs_normalization_stats), self.device + TensorUtils.to_tensor(normalization_stats), self.device ) ) @@ -262,15 +262,15 @@ def recurse_helper(d): # found key - stop search and process observation if d[k] is not None: d[k] = ObsUtils.process_obs_dict(d[k]) - if obs_normalization_stats is not None: - d[k] = ObsUtils.normalize_obs( - d[k], obs_normalization_stats=obs_normalization_stats - ) elif isinstance(d[k], dict): # search down into dictionary recurse_helper(d[k]) recurse_helper(batch) + if normalization_stats is not None: + batch = ObsUtils.normalize_batch( + batch, normalization_stats=normalization_stats, normalize_actions=normalize_actions + ) return batch def train_on_batch(self, batch, epoch, validate=False): @@ -543,7 +543,7 @@ def _prepare_observation(self, ob): ) # limit normalization to obs keys being used, in case environment includes extra keys ob = {k: ob[k] for k in self.policy.global_config.all_obs_keys} - ob = ObsUtils.normalize_obs( + ob = ObsUtils.normalize_batch( ob, obs_normalization_stats=obs_normalization_stats ) return ob diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 322fdea8..b09fcd86 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -353,77 +353,27 @@ def normalize_obs(self): Computes a dataset-wide mean and standard deviation for the observations (per dimension and per obs key) and returns it. """ - - def _compute_traj_stats(traj_obs_dict): - """ - Helper function to compute statistics over a single trajectory of observations. - """ - traj_stats = {k: {} for k in traj_obs_dict} - for k in traj_obs_dict: - traj_stats[k]["n"] = traj_obs_dict[k].shape[0] - traj_stats[k]["mean"] = traj_obs_dict[k].mean( - axis=0, keepdims=True - ) # [1, ...] - traj_stats[k]["sqdiff"] = ( - (traj_obs_dict[k] - traj_stats[k]["mean"]) ** 2 - ).sum( - axis=0, keepdims=True - ) # [1, ...] - return traj_stats - - def _aggregate_traj_stats(traj_stats_a, traj_stats_b): - """ - Helper function to aggregate trajectory statistics. - See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm - for more information. - """ - merged_stats = {} - for k in traj_stats_a: - n_a, avg_a, M2_a = ( - traj_stats_a[k]["n"], - traj_stats_a[k]["mean"], - traj_stats_a[k]["sqdiff"], - ) - n_b, avg_b, M2_b = ( - traj_stats_b[k]["n"], - traj_stats_b[k]["mean"], - traj_stats_b[k]["sqdiff"], - ) - n = n_a + n_b - mean = (n_a * avg_a + n_b * avg_b) / n - delta = avg_b - avg_a - M2 = M2_a + M2_b + (delta**2) * (n_a * n_b) / n - merged_stats[k] = dict(n=n, mean=mean, sqdiff=M2) - return merged_stats - - # Run through all trajectories. For each one, compute minimal observation statistics, and then aggregate - # with the previous statistics. - ep = self.demos[0] - obs_traj = { - k: self.hdf5_file["data/{}/obs/{}".format(ep, k)][()].astype("float32") - for k in self.obs_keys - } - obs_traj = ObsUtils.process_obs_dict(obs_traj) - merged_stats = _compute_traj_stats(obs_traj) - print("SequenceDataset: normalizing observations...") - for ep in LogUtils.custom_tqdm(self.demos[1:]): - obs_traj = { - k: self.hdf5_file["data/{}/obs/{}".format(ep, k)][()].astype("float32") - for k in self.obs_keys - } - obs_traj = ObsUtils.process_obs_dict(obs_traj) - traj_stats = _compute_traj_stats(obs_traj) - merged_stats = _aggregate_traj_stats(merged_stats, traj_stats) - - obs_normalization_stats = {k: {} for k in merged_stats} - for k in merged_stats: - # note we add a small tolerance of 1e-3 for std - obs_normalization_stats[k]["mean"] = merged_stats[k]["mean"].astype( - np.float32 - ) - obs_normalization_stats[k]["std"] = ( - np.sqrt(merged_stats[k]["sqdiff"] / merged_stats[k]["n"]) + 1e-3 - ).astype(np.float32) + def _calc_helper(hdf5_key): + obs = [] + demo_keys = [k for k in self.hdf5_file["data"].keys() if "demo" in k] + for ep in demo_keys: + obs_traj = self.hdf5_file[f"data/{ep}/{hdf5_key}"][()].astype('float32') + obs.append(obs_traj) + if len(obs) == 0: + breakpoint() + obs = np.concatenate(obs, axis=0) + mean = obs.mean(axis=0, keepdims=True) + std = obs.std(axis=0, keepdims=True) + 1e-3 + return dict(mean=mean, std=std) + + + obs_normalization_stats = {} + # keys_to_norm = [f"obs/{k}" for k in self.obs_keys if ObsUtils.key_is_obs_modality(k, "low_dim")] + ["actions"] + for key in self.obs_keys: + if ObsUtils.key_is_obs_modality(key, "low_dim"): + obs_normalization_stats[key] = _calc_helper(f"obs/{key}") + + obs_normalization_stats["actions"] = _calc_helper("actions") return obs_normalization_stats def get_obs_normalization_stats(self): @@ -437,7 +387,10 @@ def get_obs_normalization_stats(self): with a "mean" and "std" of shape (1, ...) where ... is the default shape for the observation. """ - assert self.hdf5_normalize_obs, "not using observation normalization!" + # assert self.hdf5_normalize_obs, "not using observation normalization!" + if not self.hdf5_normalize_obs: + print("Warning: not using observation normalization!") + return None return deepcopy(self.obs_normalization_stats) def get_dataset_for_ep(self, ep, key): diff --git a/robomimic/utils/obs_utils.py b/robomimic/utils/obs_utils.py index 6d05ac0c..31099616 100644 --- a/robomimic/utils/obs_utils.py +++ b/robomimic/utils/obs_utils.py @@ -496,7 +496,7 @@ def get_processed_shape(obs_modality, input_shape): return list(process_obs(obs=np.zeros(input_shape), obs_modality=obs_modality).shape) -def normalize_obs(obs_dict, obs_normalization_stats): +def normalize_batch(batch, normalization_stats, normalize_actions=True): """ Normalize observations using the provided "mean" and "std" entries for each observation key. The observation dictionary will be @@ -515,19 +515,15 @@ def normalize_obs(obs_dict, obs_normalization_stats): """ # ensure we have statistics for each modality key in the observation - assert set(obs_dict.keys()).issubset(obs_normalization_stats) - - for m in obs_dict: - # get rid of extra dimension - we will pad for broadcasting later - mean = obs_normalization_stats[m]["mean"][0] - std = obs_normalization_stats[m]["std"][0] + # assert set(obs_dict.keys()).issubset(obs_normalization_stats) + def _norm_helper(obs, mean, std): # shape consistency checks m_num_dims = len(mean.shape) - shape_len_diff = len(obs_dict[m].shape) - m_num_dims + shape_len_diff = len(obs.shape) - m_num_dims assert shape_len_diff >= 0, "shape length mismatch in @normalize_obs" assert ( - obs_dict[m].shape[-m_num_dims:] == mean.shape + obs.shape[-m_num_dims:] == mean.shape ), "shape mismatch in @normalize_obs" # Obs can have one or more leading batch dims - prepare for broadcasting. @@ -538,9 +534,82 @@ def normalize_obs(obs_dict, obs_normalization_stats): mean = mean.reshape(reshape_padding + tuple(mean.shape)) std = std.reshape(reshape_padding + tuple(std.shape)) - obs_dict[m] = (obs_dict[m] - mean) / std + return (obs - mean) / std + + for m in batch["obs"]: + if m not in normalization_stats: + continue + # get rid of extra dimension - we will pad for broadcasting later + mean = normalization_stats[m]["mean"][0] + std = normalization_stats[m]["std"][0] + + batch["obs"][m] = _norm_helper(batch["obs"][m], mean, std) + + if normalize_actions: + ac_mean = normalization_stats["actions"]["mean"][0] + ac_std = normalization_stats["actions"]["std"][0] + + batch["actions"] = _norm_helper(batch["actions"], ac_mean, ac_std) + + + return batch + +def unnormalize_batch(batch, normalization_stats): + """ + Unnormalize observations using the provided "mean" and "std" entries + for each observation key. The observation dictionary will be + modified in-place. + + Args: + obs_dict (dict): dictionary mapping observation key to np.array or + torch.Tensor. Can have any number of leading batch dimensions. + + obs_normalization_stats (dict): this should map observation keys to dicts + with a "mean" and "std" of shape (1, ...) where ... is the default + shape for the observation. + + Returns: + obs_dict (dict): obs dict with unnormalized observation arrays + """ + + # ensure we have statistics for each modality key in the observation + # assert set(obs_dict.keys()).issubset(obs_normalization_stats) + + def _unnorm_helper(obs, mean, std): + # shape consistency checks + m_num_dims = len(mean.shape) + shape_len_diff = len(obs.shape) - m_num_dims + assert shape_len_diff >= 0, "shape length mismatch in @normalize_obs" + assert ( + obs.shape[-m_num_dims:] == mean.shape + ), "shape mismatch in @normalize_obs" - return obs_dict + # Obs can have one or more leading batch dims - prepare for broadcasting. + # + # As an example, if the obs has shape [B, T, D] and our mean / std stats are shape [D] + # then we should pad the stats to shape [1, 1, D]. + reshape_padding = tuple([1] * shape_len_diff) + mean = torch.from_numpy(mean.reshape(reshape_padding + tuple(mean.shape))).to(obs.device) + std = torch.from_numpy(std.reshape(reshape_padding + tuple(std.shape))).to(obs.device) + + return (obs * std) + mean + + if "obs" in batch: + for m in batch["obs"]: + if m not in normalization_stats: + continue + # get rid of extra dimension - we will pad for broadcasting later + mean = normalization_stats[m]["mean"][0] + std = normalization_stats[m]["std"][0] + + batch["obs"][m] = _unnorm_helper(batch["obs"][m], mean, std) + + ac_mean = normalization_stats["actions"]["mean"][0] + ac_std = normalization_stats["actions"]["std"][0] + + batch["actions"] = _unnorm_helper(batch["actions"], ac_mean, ac_std) + + return batch def has_modality(modality, obs_keys): From 014e9b602c764cdfa158e376818de6732aed16d7 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Mon, 1 Jul 2024 11:10:55 -0400 Subject: [PATCH 27/44] obs norm takes in ac_key to normalize correct key --- robomimic/utils/dataset.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index b09fcd86..debe6844 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -25,6 +25,7 @@ def __init__( hdf5_path, obs_keys, dataset_keys, + ac_key, frame_stack=1, seq_length=1, pad_frame_stack=True, @@ -90,6 +91,7 @@ def __init__( self.hdf5_use_swmr = hdf5_use_swmr self.hdf5_normalize_obs = hdf5_normalize_obs self._hdf5_file = None + self.ac_key = ac_key assert hdf5_cache_mode in ["all", "low_dim", None] self.hdf5_cache_mode = hdf5_cache_mode @@ -373,7 +375,7 @@ def _calc_helper(hdf5_key): if ObsUtils.key_is_obs_modality(key, "low_dim"): obs_normalization_stats[key] = _calc_helper(f"obs/{key}") - obs_normalization_stats["actions"] = _calc_helper("actions") + obs_normalization_stats["actions"] = _calc_helper(self.ac_key) return obs_normalization_stats def get_obs_normalization_stats(self): From 616e551d98f24c839b42d641acbd95706e24460d Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Fri, 12 Jul 2024 17:14:50 -0400 Subject: [PATCH 28/44] GMM works with prestacked actions --- robomimic/models/base_nets.py | 3 ++- robomimic/utils/dataset.py | 36 +++++++++++++++++++---------------- robomimic/utils/file_utils.py | 5 ++++- robomimic/utils/log_utils.py | 11 ++++++++++- 4 files changed, 36 insertions(+), 19 deletions(-) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index ff91ee3f..c7e7e4e7 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -17,6 +17,7 @@ import robomimic.utils.tensor_utils as TensorUtils from robomimic.models.vit_rein import Reins, LoRAReins, MLPhead +from robomimic.utils.log_utils import bcolors CONV_ACTIVATIONS = { "relu": nn.ReLU, @@ -608,7 +609,7 @@ def __init__( freeze (bool): if True, use a frozen ViT pretrained model. """ super(ViT_Rein, self).__init__() - + print(f"{bcolors.WARNING}BACKBONE FREEZE: {freeze}{bcolors.ENDC}") assert input_channel == 3 assert vit_model_class in [ "vit_b", diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index debe6844..5aea2fa3 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -37,6 +37,7 @@ def __init__( hdf5_normalize_obs=False, filter_by_attribute=None, load_next_obs=True, + prestacked_actions=False, ): """ Dataset class for fetching sequences of experience. @@ -87,6 +88,8 @@ def __init__( """ super(SequenceDataset, self).__init__() + self.prestacked_actions = prestacked_actions + self.hdf5_path = os.path.expanduser(hdf5_path) self.hdf5_use_swmr = hdf5_use_swmr self.hdf5_normalize_obs = hdf5_normalize_obs @@ -498,30 +501,28 @@ def get_item(self, index): return meta - def interpolate_keys(self, obs, keys, seq_length, seq_length_to_load): - if seq_length == seq_length_to_load: - return - + def interpolate_keys(self, obs, keys, seq_length): for k in keys: v = obs[k] + L = v.shape[0] + if L == seq_length: + continue + if k == "pad_mask": # interpolate it by simply copying each index (seq_length / seq_length_to_load) times - obs[k] = np.repeat(v, seq_length // seq_length_to_load, axis=0) + obs[k] = np.repeat(v, (seq_length // L), axis=0) elif k != "pad_mask": - assert ( - v.shape[0] == seq_length_to_load - ), "low_dim obs should have shape (seq_length, ...)" - assert ( - len(v.shape) == 2 - ), "low_dim obs should have shape (seq_length, ...)" # plot v[:, 3] # plt.plot(v[:, 2]) # plt.savefig('v_3.png') # plt.close() interp = scipy.interpolate.interp1d( - np.linspace(0, 1, seq_length_to_load), v, axis=0 + np.linspace(0, 1, L), v, axis=0 ) - obs[k] = interp(np.linspace(0, 1, seq_length)) + try: + obs[k] = interp(np.linspace(0, 1, seq_length)) + except: + breakpoint() # plt.plot(obs[k][:, 2]) # plt.savefig('v_3_after.png') # plt.close() @@ -642,9 +643,10 @@ def get_obs_sequence_from_demo( obs["pad_mask"] = pad_mask # Interpolate obs - to_interp = [k for k in obs if ObsUtils.key_is_obs_modality(k, "low_dim")] + # to_interp = [k for k in obs if ObsUtils.key_is_obs_modality(k, "low_dim")] + to_interp = ["pad_mask"] # t = time.time() - self.interpolate_keys(obs, to_interp, seq_length, seq_length_to_load) + self.interpolate_keys(obs, to_interp, seq_length) # print("Interpolation time: ", time.time() - t) return obs @@ -687,7 +689,9 @@ def get_dataset_sequence_from_demo( # interpolate actions to_interp = [k for k in data] # t = time.time() - self.interpolate_keys(data, to_interp, seq_length, seq_length_to_load) + if data["actions"].shape[0] == 1 and len(data["actions"].shape) == 3: + data["actions"] = data["actions"][0] + self.interpolate_keys(data, to_interp, seq_length) # print("Interpolation time: ", time.time() - t) return data diff --git a/robomimic/utils/file_utils.py b/robomimic/utils/file_utils.py index 8968b379..372969dd 100644 --- a/robomimic/utils/file_utils.py +++ b/robomimic/utils/file_utils.py @@ -20,6 +20,7 @@ from robomimic.config import config_factory from robomimic.algo import algo_factory from robomimic.algo import RolloutPolicy +from robomimic.utils.log_utils import bcolors def create_hdf5_filter_key(hdf5_path, demo_keys, key_name): @@ -146,7 +147,9 @@ def get_shape_metadata_from_dataset( demo = f["data/{}".format(demo_id)] # action dimension - shape_meta["ac_dim"] = f[f"data/{demo_id}/{ac_key}"].shape[1] + shape_meta["ac_dim"] = f[f"data/{demo_id}/{ac_key}"].shape[-1] + if len(f[f"data/{demo_id}/{ac_key}"].shape) > 2: + print(f"{bcolors.WARNING}Warning: action shape has more than 2 dims, if these aren't prepacked actions something may be wrong?{bcolors.ENDC}") # observation dimensions all_shapes = OrderedDict() diff --git a/robomimic/utils/log_utils.py b/robomimic/utils/log_utils.py index f3521978..7cc32b49 100644 --- a/robomimic/utils/log_utils.py +++ b/robomimic/utils/log_utils.py @@ -17,7 +17,16 @@ # global list of warning messages can be populated with @log_warning and flushed with @flush_warnings WARNINGS_BUFFER = [] - +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKCYAN = '\033[96m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' class PrintLogger(object): """ From 691627e08987cf1fae2e7dc6c3be8ff6443ded0a Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Mon, 22 Jul 2024 17:23:23 -0400 Subject: [PATCH 29/44] return patch tokens for ViT instead of linear classifier --- robomimic/models/base_nets.py | 98 +++++++++++++++++++++-------------- robomimic/utils/dataset.py | 98 ++++++++++++++++++++++++----------- 2 files changed, 127 insertions(+), 69 deletions(-) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index c7e7e4e7..488e4695 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -596,6 +596,7 @@ def __init__( lora_dim=16, patch_size=16, freeze=True, + return_key="x_norm_patchtokens" ): """ Using pretrained observation encoder network proposed in Vision Transformers @@ -627,6 +628,9 @@ def __init__( self._lora_dim = lora_dim self._patch_size = patch_size self._out_indices = ([7, 11, 15, 23],) + self.return_key = return_key + if self.return_key not in ["x_norm_patchtokens", "x_norm_clstoken"]: + raise ValueError(f"return_key {self.return_key} not supported") self.preprocess = nn.Sequential( transforms.Resize((294, 294)), @@ -635,20 +639,20 @@ def __init__( try: if self._vit_model_class == "vit_s": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vits14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vits14" ) if self._vit_model_class == "vit_l": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vitl14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitl14" ) if self._vit_model_class == "vit_g": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vitg14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitg14" ) if self._vit_model_class == "vit_b": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vitb14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitb14" ) except ImportError: print("WARNING: could not load Vit") @@ -656,13 +660,13 @@ def __init__( try: self._rein_layers = LoRAReins( lora_dim=self._lora_dim, - num_layers=len(self.nets.backbone.blocks), - embed_dims=self.nets.backbone.patch_embed.proj.out_channels, + num_layers=len(self.nets.blocks), + embed_dims=self.nets.patch_embed.proj.out_channels, patch_size=self._patch_size, ) self._mlp_lora_head = MLPhead( - in_dim=3 * self.nets.backbone.patch_embed.proj.out_channels, - out_dim=5 * self.nets.backbone.patch_embed.proj.out_channels, + in_dim=3 * self.nets.patch_embed.proj.out_channels, + out_dim=5 * self.nets.patch_embed.proj.out_channels, ) except ImportError: print("WARNING: could not load rein layer") @@ -674,8 +678,8 @@ def __init__( def forward(self, inputs): x = self.preprocess(inputs) - x = self.nets.backbone.patch_embed(x) - for idx, blk in enumerate(self.nets.backbone.blocks): + x = self.nets.patch_embed(x) + for idx, blk in enumerate(self.nets.blocks): x = blk(x) x = self._rein_layers.forward( x, @@ -683,18 +687,20 @@ def forward(self, inputs): batch_first=True, has_cls_token=True, ) - + if self.return_key == "x_norm_patchtokens": + return x q_avg = x.mean(dim=1).unsqueeze(1) q_max = torch.max(x, 1)[0].unsqueeze(1) q_N = x[:, x.shape[1] - 1, :].unsqueeze(1) _q = torch.cat((q_avg, q_max, q_N), dim=1) - x = self.nets.backbone.norm(_q) + x = self.nets.norm(_q) x = x.flatten(-2, -1) x = self._mlp_lora_head(x) - x = self.nets.linear_head(x) - return x + if self.return_key == "x_norm_clstoken": + return x + def output_shape(self, input_shape): """ @@ -708,19 +714,25 @@ def output_shape(self, input_shape): """ assert len(input_shape) == 3 - out_dim = 1000 + C, H, W = input_shape + out_dim = self._mlp_lora_head._out_dim - return [out_dim, 1, 1] + if self.return_key == "x_norm_patchtokens": + return [441, out_dim] + elif self.return_key == "x_norm_clstoken": + return [out_dim] + else: + raise NotImplementedError def __repr__(self): """Pretty print network.""" - print( - "**Number of learnable params:", - sum(p.numel() for p in self.nets.parameters() if p.requires_grad), - " Freeze:", - self._freeze, - ) - print("**Number of params:", sum(p.numel() for p in self.nets.parameters())) + # print( + # "**Number of learnable params:", + # sum(p.numel() for p in self.nets.parameters() if p.requires_grad), + # " Freeze:", + # self._freeze, + # ) + # print("**Number of params:", sum(p.numel() for p in self.nets.parameters())) header = "{}".format(str(self.__class__.__name__)) return ( @@ -739,7 +751,7 @@ class Vit(ConvBase): Vision transformer """ - def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True): + def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return_key="x_norm_patchtokens"): """ Using pretrained observation encoder network proposed in Vision Transformers git clone https://github.com/facebookresearch/dinov2 @@ -767,6 +779,9 @@ def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True): self._freeze = freeze self._input_coord_conv = False self._pretrained = False + self.return_key = return_key + if self.return_key not in ["x_norm_patchtokens", "x_norm_clstoken"]: + raise ValueError(f"return_key {self.return_key} not supported") self.preprocess = nn.Sequential( transforms.Resize((294, 294)), @@ -775,20 +790,20 @@ def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True): try: if self._vit_model_class == "vit_s": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vits14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vits14" ) if self._vit_model_class == "vit_l": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vitl14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitl14" ) if self._vit_model_class == "vit_g": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vitg14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitg14" ) if self._vit_model_class == "vit_b": - self.nets = dinov2_vits14_lc = torch.hub.load( - "facebookresearch/dinov2", "dinov2_vitb14_lc" + self.nets = dinov2_vits14 = torch.hub.load( + "facebookresearch/dinov2", "dinov2_vitb14" ) except ImportError: print("WARNING: could not load Vit") @@ -802,7 +817,8 @@ def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True): def forward(self, inputs): x = self.preprocess(inputs) - x = self.nets(x) + # x = self.nets(x) + x = self.nets.forward_features(x)[self.return_key] return x def output_shape(self, input_shape): @@ -817,9 +833,13 @@ def output_shape(self, input_shape): """ assert len(input_shape) == 3 - out_dim = 1000 + C, H, W = input_shape + out_dim = self.nets.patch_embed.proj.out_channels - return [out_dim, 1, 1] + if self.return_key == "x_norm_patchtokens": + return [441, out_dim] + elif self.return_key == "x_norm_clstoken": + return [out_dim] def __repr__(self): """Pretty print network.""" diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 5aea2fa3..94d0e13b 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -18,6 +18,70 @@ import scipy import matplotlib.pyplot as plt +def interpolate_arr(v, seq_length): + """ + v: (B, T, D) + seq_length: int + """ + assert len(v.shape) == 3 + if v.shape[1] == seq_length: + return + + interpolated = [] + for i in range(v.shape[0]): + index = v[i] + if i == 20: + plt.plot(index[:, 2]) + plt.savefig('index.png') + plt.close() + + interp = scipy.interpolate.interp1d( + np.linspace(0, 1, index.shape[0]), index, axis=0 + ) + interpolated.append(interp(np.linspace(0, 1, seq_length))) + + if i == 20: + plt.plot(interpolated[-1][:, 2]) + plt.savefig('interpolated.png') + plt.close() + + + # L = v.shape[0] + # if L == seq_length: + # return v + + # interp = scipy.interpolate.interp1d( + # np.linspace(0, 1, L), v, axis=0 + # ) + # return interp(np.linspace(0, 1, seq_length)) + + return np.array(interpolated) + +def interpolate_keys(obs, keys, seq_length): + for k in keys: + v = obs[k] + L = v.shape[0] + if L == seq_length: + continue + + if k == "pad_mask": + # interpolate it by simply copying each index (seq_length / seq_length_to_load) times + obs[k] = np.repeat(v, (seq_length // L), axis=0) + elif k != "pad_mask": + # plot v[:, 3] + # plt.plot(v[:, 2]) + # plt.savefig('v_3.png') + # plt.close() + interp = scipy.interpolate.interp1d( + np.linspace(0, 1, L), v, axis=0 + ) + try: + obs[k] = interp(np.linspace(0, 1, seq_length)) + except: + breakpoint() + # plt.plot(obs[k][:, 2]) + # plt.savefig('v_3_after.png') + # plt.close() class SequenceDataset(torch.utils.data.Dataset): def __init__( @@ -501,32 +565,6 @@ def get_item(self, index): return meta - def interpolate_keys(self, obs, keys, seq_length): - for k in keys: - v = obs[k] - L = v.shape[0] - if L == seq_length: - continue - - if k == "pad_mask": - # interpolate it by simply copying each index (seq_length / seq_length_to_load) times - obs[k] = np.repeat(v, (seq_length // L), axis=0) - elif k != "pad_mask": - # plot v[:, 3] - # plt.plot(v[:, 2]) - # plt.savefig('v_3.png') - # plt.close() - interp = scipy.interpolate.interp1d( - np.linspace(0, 1, L), v, axis=0 - ) - try: - obs[k] = interp(np.linspace(0, 1, seq_length)) - except: - breakpoint() - # plt.plot(obs[k][:, 2]) - # plt.savefig('v_3_after.png') - # plt.close() - def get_sequence_from_demo( self, demo_id, @@ -646,7 +684,7 @@ def get_obs_sequence_from_demo( # to_interp = [k for k in obs if ObsUtils.key_is_obs_modality(k, "low_dim")] to_interp = ["pad_mask"] # t = time.time() - self.interpolate_keys(obs, to_interp, seq_length) + interpolate_keys(obs, to_interp, seq_length) # print("Interpolation time: ", time.time() - t) return obs @@ -689,9 +727,9 @@ def get_dataset_sequence_from_demo( # interpolate actions to_interp = [k for k in data] # t = time.time() - if data["actions"].shape[0] == 1 and len(data["actions"].shape) == 3: - data["actions"] = data["actions"][0] - self.interpolate_keys(data, to_interp, seq_length) + if data[self.ac_key].shape[0] == 1 and len(data[self.ac_key].shape) == 3: + data[self.ac_key] = data[self.ac_key][0] + interpolate_keys(data, to_interp, seq_length) # print("Interpolation time: ", time.time() - t) return data From 5783f01740554649c56587554b2ac9cf4777ffc4 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Wed, 31 Jul 2024 10:55:28 -0400 Subject: [PATCH 30/44] removed unintended plt save --- robomimic/utils/dataset.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 94d0e13b..2cf7712e 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -30,20 +30,20 @@ def interpolate_arr(v, seq_length): interpolated = [] for i in range(v.shape[0]): index = v[i] - if i == 20: - plt.plot(index[:, 2]) - plt.savefig('index.png') - plt.close() + # if i == 20: + # plt.plot(index[:, 2]) + # plt.savefig('index.png') + # plt.close() interp = scipy.interpolate.interp1d( np.linspace(0, 1, index.shape[0]), index, axis=0 ) interpolated.append(interp(np.linspace(0, 1, seq_length))) - if i == 20: - plt.plot(interpolated[-1][:, 2]) - plt.savefig('interpolated.png') - plt.close() + # if i == 20: + # plt.plot(interpolated[-1][:, 2]) + # plt.savefig('interpolated.png') + # plt.close() # L = v.shape[0] From f0dcd7a667a5ff2ac03909d8a126473c74dbfc35 Mon Sep 17 00:00:00 2001 From: rl2aloha Date: Mon, 12 Aug 2024 17:17:08 -0400 Subject: [PATCH 31/44] move norm stats to cuda if needed --- robomimic/utils/obs_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/robomimic/utils/obs_utils.py b/robomimic/utils/obs_utils.py index 31099616..c6db024d 100644 --- a/robomimic/utils/obs_utils.py +++ b/robomimic/utils/obs_utils.py @@ -533,6 +533,9 @@ def _norm_helper(obs, mean, std): reshape_padding = tuple([1] * shape_len_diff) mean = mean.reshape(reshape_padding + tuple(mean.shape)) std = std.reshape(reshape_padding + tuple(std.shape)) + if isinstance(obs, torch.Tensor) and isinstance(mean, np.ndarray): + mean = torch.from_numpy(mean).to(obs.device) + std = torch.from_numpy(std).to(obs.device) return (obs - mean) / std From 63ac6cedd10afa8cbfa313002f697f4c647cae41 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Mon, 19 Aug 2024 17:58:41 -0400 Subject: [PATCH 32/44] normalize actions option --- robomimic/utils/dataset.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 2cf7712e..fe1026bb 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -58,6 +58,11 @@ def interpolate_arr(v, seq_length): return np.array(interpolated) def interpolate_keys(obs, keys, seq_length): + """ + obs: dict with values of shape (T, D) + keys: list of keys to interpolate + seq_length: int changes shape (T, D) to (seq_length, D) + """ for k in keys: v = obs[k] L = v.shape[0] @@ -78,7 +83,7 @@ def interpolate_keys(obs, keys, seq_length): try: obs[k] = interp(np.linspace(0, 1, seq_length)) except: - breakpoint() + raise ValueError(f"Interpolation failed for key: {k} with shape{k.shape}") # plt.plot(obs[k][:, 2]) # plt.savefig('v_3_after.png') # plt.close() @@ -102,6 +107,7 @@ def __init__( filter_by_attribute=None, load_next_obs=True, prestacked_actions=False, + hdf5_normalize_actions=False ): """ Dataset class for fetching sequences of experience. @@ -157,6 +163,7 @@ def __init__( self.hdf5_path = os.path.expanduser(hdf5_path) self.hdf5_use_swmr = hdf5_use_swmr self.hdf5_normalize_obs = hdf5_normalize_obs + self.hdf5_normalize_actions = hdf5_normalize_actions self._hdf5_file = None self.ac_key = ac_key @@ -442,7 +449,8 @@ def _calc_helper(hdf5_key): if ObsUtils.key_is_obs_modality(key, "low_dim"): obs_normalization_stats[key] = _calc_helper(f"obs/{key}") - obs_normalization_stats["actions"] = _calc_helper(self.ac_key) + if self.hdf5_normalize_actions: + obs_normalization_stats["actions"] = _calc_helper(self.ac_key) return obs_normalization_stats def get_obs_normalization_stats(self): @@ -727,8 +735,14 @@ def get_dataset_sequence_from_demo( # interpolate actions to_interp = [k for k in data] # t = time.time() - if data[self.ac_key].shape[0] == 1 and len(data[self.ac_key].shape) == 3: - data[self.ac_key] = data[self.ac_key][0] + for k in data: + if k == "pad_mask": + continue + if data[k].shape[0] == 1 and len(data[k].shape) == 3: + data[k] = data[k][0] + if not "actions" in k: + raise ValueError("Interpolating actions, but key is not an action, key: ", k) + interpolate_keys(data, to_interp, seq_length) # print("Interpolation time: ", time.time() - t) return data From 1e01423947a5e912ee69d2ac2b82ae2c4dea48e1 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Fri, 23 Aug 2024 14:51:50 -0400 Subject: [PATCH 33/44] action norm stats --- robomimic/utils/dataset.py | 6 ++++-- robomimic/utils/obs_utils.py | 16 ++++++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index fe1026bb..00bac5de 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -449,8 +449,10 @@ def _calc_helper(hdf5_key): if ObsUtils.key_is_obs_modality(key, "low_dim"): obs_normalization_stats[key] = _calc_helper(f"obs/{key}") - if self.hdf5_normalize_actions: - obs_normalization_stats["actions"] = _calc_helper(self.ac_key) + for key in self.dataset_keys: + if "actions" in key: + obs_normalization_stats[key] = _calc_helper(key) + return obs_normalization_stats def get_obs_normalization_stats(self): diff --git a/robomimic/utils/obs_utils.py b/robomimic/utils/obs_utils.py index c6db024d..6b76373e 100644 --- a/robomimic/utils/obs_utils.py +++ b/robomimic/utils/obs_utils.py @@ -549,10 +549,12 @@ def _norm_helper(obs, mean, std): batch["obs"][m] = _norm_helper(batch["obs"][m], mean, std) if normalize_actions: - ac_mean = normalization_stats["actions"]["mean"][0] - ac_std = normalization_stats["actions"]["std"][0] + for k in batch: + if "actions" in k: + ac_mean = normalization_stats[k]["mean"][0] + ac_std = normalization_stats[k]["std"][0] - batch["actions"] = _norm_helper(batch["actions"], ac_mean, ac_std) + batch[k] = _norm_helper(batch[k], ac_mean, ac_std) return batch @@ -607,10 +609,12 @@ def _unnorm_helper(obs, mean, std): batch["obs"][m] = _unnorm_helper(batch["obs"][m], mean, std) - ac_mean = normalization_stats["actions"]["mean"][0] - ac_std = normalization_stats["actions"]["std"][0] + for k in batch: + if "actions" in k: + ac_mean = normalization_stats[k]["mean"][0] + ac_std = normalization_stats[k]["std"][0] - batch["actions"] = _unnorm_helper(batch["actions"], ac_mean, ac_std) + batch[k] = _unnorm_helper(batch[k], ac_mean, ac_std) return batch From 1dfd46acafed29527cf4dd5e26a1e6a364ab8b64 Mon Sep 17 00:00:00 2001 From: Dhruv2012 Date: Mon, 9 Sep 2024 16:26:57 -0400 Subject: [PATCH 34/44] unnorm case bug fixed --- robomimic/utils/obs_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/robomimic/utils/obs_utils.py b/robomimic/utils/obs_utils.py index 6b76373e..e2047996 100644 --- a/robomimic/utils/obs_utils.py +++ b/robomimic/utils/obs_utils.py @@ -608,9 +608,11 @@ def _unnorm_helper(obs, mean, std): std = normalization_stats[m]["std"][0] batch["obs"][m] = _unnorm_helper(batch["obs"][m], mean, std) - + for k in batch: if "actions" in k: + if normalization_stats is None: + continue ac_mean = normalization_stats[k]["mean"][0] ac_std = normalization_stats[k]["std"][0] From a05e1d09023e69ccf68c57a752b0c1e82c8afb48 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Fri, 18 Oct 2024 14:19:41 -0400 Subject: [PATCH 35/44] revert black formatting for PR --- docs/conf.py | 61 +- examples/add_new_modality.py | 54 +- examples/simple_config.py | 11 +- examples/simple_obs_nets.py | 14 +- examples/simple_train_loop.py | 57 +- examples/train_bc_rnn.py | 365 ++++------- robomimic/__init__.py | 140 +--- robomimic/algo/__init__.py | 13 +- robomimic/algo/algo.py | 77 +-- robomimic/algo/bc.py | 158 ++--- robomimic/algo/bcq.py | 320 +++------ robomimic/algo/cql.py | 301 +++------ robomimic/algo/gl.py | 179 ++--- robomimic/algo/hbc.py | 112 +--- robomimic/algo/iql.py | 90 +-- robomimic/algo/iris.py | 60 +- robomimic/algo/td3_bc.py | 139 ++-- robomimic/config/__init__.py | 1 - robomimic/config/base_config.py | 253 +++---- robomimic/config/bc_config.py | 177 ++--- robomimic/config/bcq_config.py | 130 ++-- robomimic/config/config.py | 96 ++- robomimic/config/cql_config.py | 110 ++-- robomimic/config/gl_config.py | 122 ++-- robomimic/config/hbc_config.py | 62 +- robomimic/config/iql_config.py | 108 ++- robomimic/config/iris_config.py | 57 +- robomimic/config/td3_bc_config.py | 100 +-- robomimic/envs/env_base.py | 37 +- robomimic/envs/env_gym.py | 57 +- robomimic/envs/env_ig_momart.py | 199 ++---- robomimic/envs/env_robosuite.py | 131 ++-- robomimic/envs/wrappers.py | 39 +- robomimic/macros.py | 10 +- robomimic/models/base_nets.py | 419 ++++-------- robomimic/models/distributions.py | 15 +- robomimic/models/obs_core.py | 280 +++----- robomimic/models/obs_nets.py | 325 ++++----- robomimic/models/policy_nets.py | 287 +++----- robomimic/models/transformers.py | 9 +- robomimic/models/vae_nets.py | 317 +++------ robomimic/models/value_nets.py | 32 +- robomimic/models/vit_rein.py | 13 +- robomimic/scripts/config_gen/act_gen.py | 23 +- robomimic/scripts/config_gen/helper.py | 385 ++++++----- robomimic/scripts/conversion/convert_d4rl.py | 33 +- .../scripts/conversion/convert_robosuite.py | 2 +- .../conversion/convert_roboturk_pilot.py | 36 +- robomimic/scripts/dataset_states_to_obs.py | 142 ++-- robomimic/scripts/download_datasets.py | 73 +-- robomimic/scripts/download_momart_datasets.py | 50 +- .../scripts/generate_config_templates.py | 3 +- robomimic/scripts/generate_paper_configs.py | 616 +++++++----------- robomimic/scripts/get_dataset_info.py | 35 +- robomimic/scripts/hyperparam_helper.py | 55 +- robomimic/scripts/playback_dataset.py | 122 ++-- robomimic/scripts/run_trained_agent.py | 102 +-- robomimic/scripts/setup_macros.py | 4 +- robomimic/scripts/split_train_val.py | 29 +- robomimic/scripts/train.py | 131 ++-- robomimic/utils/dataset.py | 198 ++---- robomimic/utils/env_utils.py | 109 ++-- robomimic/utils/file_utils.py | 109 +--- robomimic/utils/hyperparam_utils.py | 79 +-- robomimic/utils/log_utils.py | 84 +-- robomimic/utils/loss_utils.py | 71 +- robomimic/utils/obs_utils.py | 211 ++---- robomimic/utils/python_utils.py | 11 +- robomimic/utils/tensor_utils.py | 160 ++--- robomimic/utils/test_utils.py | 40 +- robomimic/utils/torch_utils.py | 25 +- robomimic/utils/train_utils.py | 255 +++----- robomimic/utils/vis_utils.py | 9 +- setup.py | 13 +- tests/test_bc.py | 47 +- tests/test_bcq.py | 55 +- tests/test_cql.py | 53 +- tests/test_examples.py | 35 +- tests/test_hbc.py | 33 +- tests/test_iql.py | 45 +- tests/test_iris.py | 43 +- tests/test_scripts.py | 62 +- 82 files changed, 3155 insertions(+), 5970 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 40bf6e96..59eff968 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,8 +12,7 @@ import os import sys - -sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath('.')) import sphinx_book_theme import robomimic @@ -29,13 +28,13 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - "sphinx.ext.napoleon", - "sphinx_markdown_tables", - "sphinx.ext.mathjax", - "sphinx.ext.githubpages", - "sphinx.ext.autodoc", - "recommonmark", # use Sphinx-1.4 or newer - "nbsphinx", + 'sphinx.ext.napoleon', + 'sphinx_markdown_tables', + 'sphinx.ext.mathjax', + 'sphinx.ext.githubpages', + 'sphinx.ext.autodoc', + 'recommonmark', # use Sphinx-1.4 or newer + 'nbsphinx', ] @@ -45,7 +44,7 @@ # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = ['_templates'] # source_parsers = { # '.md': CommonMarkParser, @@ -53,15 +52,15 @@ # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md", ".ipynb"] +source_suffix = ['.rst', '.md', '.ipynb'] # The master toctree document. -master_doc = "index" +master_doc = 'index' # General information about the project. -project = "robomimic" -copyright = "the robomimic core team, 2023" -author = "the robomimic core team" +project = 'robomimic' +copyright = 'the robomimic core team, 2023' +author = 'the robomimic core team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -84,10 +83,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" +pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -98,7 +97,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "sphinx_book_theme" +html_theme = 'sphinx_book_theme' html_logo = "robomimic_logo.png" # Theme options are theme-specific and customize the look and feel of a theme @@ -110,7 +109,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] # html_context = { # 'css_files': [ @@ -121,7 +120,7 @@ # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = "robomimicdoc" +htmlhelp_basename = 'robomimicdoc' # -- Options for LaTeX output --------------------------------------------- @@ -130,12 +129,15 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. # # 'preamble': '', + # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -145,7 +147,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, "robomimic.tex", "robomimic Documentation", author, "manual"), + (master_doc, 'robomimic.tex', u'robomimic Documentation', author, 'manual'), ] @@ -153,7 +155,10 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "robomimic", "robomimic Documentation", [author], 1)] +man_pages = [ + (master_doc, 'robomimic', u'robomimic Documentation', + [author], 1) +] # -- Options for Texinfo output ------------------------------------------- @@ -162,13 +167,7 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ( - master_doc, - "robomimic", - "robomimic Documentation", - author, - "robomimic", - "ARISE", - "Miscellaneous", - ), + (master_doc, 'robomimic', u'robomimic Documentation', + author, 'robomimic', 'ARISE', + 'Miscellaneous'), ] diff --git a/examples/add_new_modality.py b/examples/add_new_modality.py index f14ab3f9..cf7fc876 100644 --- a/examples/add_new_modality.py +++ b/examples/add_new_modality.py @@ -45,11 +45,8 @@ def custom_scan_processor(obs): def custom_scan_unprocessor(obs): # Re-add the padding # Note: need to check type - return ( - np.concatenate([np.zeros(1), obs, np.zeros(1)]) - if isinstance(obs, np.ndarray) - else torch.concat([torch.zeros(1), obs, torch.zeros(1)]) - ) + return np.concatenate([np.zeros(1), obs, np.zeros(1)]) if isinstance(obs, np.ndarray) else \ + torch.concat([torch.zeros(1), obs, torch.zeros(1)]) # Override the default functions for ScanModality @@ -61,10 +58,11 @@ def custom_scan_unprocessor(obs): class CustomImageEncoderCore(EncoderCore): # For simplicity, this will be a pass-through with some simple kwargs def __init__( - self, - input_shape, # Required, will be inferred automatically at runtime - # Any args below here you can specify arbitrarily - welcome_str, + self, + input_shape, # Required, will be inferred automatically at runtime + + # Any args below here you can specify arbitrarily + welcome_str, ): # Always need to run super init first and pass in input_shape super().__init__(input_shape=input_shape) @@ -92,7 +90,6 @@ class CustomImageRandomizer(Randomizer): through the network, resulting in outputs corresponding to each copy - we will pool these outputs across the copies with a simple average. """ - def __init__( self, input_shape, @@ -107,7 +104,7 @@ def __init__( """ super(CustomImageRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) self.input_shape = input_shape self.num_rand = num_rand @@ -121,7 +118,7 @@ def output_shape_in(self, input_shape=None): Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -141,13 +138,13 @@ def output_shape_out(self, input_shape=None): Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - + # since the @forward_out operation splits [B * N, ...] -> [B, N, ...] # and then pools to result in [B, ...], only the batch dimension changes, # and so the other dimensions retain their shape. @@ -167,7 +164,7 @@ def forward_in(self, inputs): out = TensorUtils.unsqueeze_expand_at(inputs, size=self.num_rand, dim=1) # add random noise to each copy - out = out + self.noise_scale * (2.0 * torch.rand_like(out) - 1.0) + out = out + self.noise_scale * (2. * torch.rand_like(out) - 1.) # reshape [B, N, C, H, W] -> [B * N, C, H, W] to ensure network forward pass is unchanged return TensorUtils.join_dimensions(out, 0, 1) @@ -183,37 +180,26 @@ def forward_out(self, inputs): # note the use of @self.training to ensure no randomization at test-time if self.training: - batch_size = inputs.shape[0] // self.num_rand - out = TensorUtils.reshape_dimensions( - inputs, - begin_axis=0, - end_axis=0, - target_dims=(batch_size, self.num_rand), - ) + batch_size = (inputs.shape[0] // self.num_rand) + out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, + target_dims=(batch_size, self.num_rand)) return out.mean(dim=1) return inputs def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) + header = '{}'.format(str(self.__class__.__name__)) msg = header + "(input_shape={}, num_rand={}, noise_scale={})".format( - self.input_shape, self.num_rand, self.noise_scale - ) + self.input_shape, self.num_rand, self.noise_scale) return msg if __name__ == "__main__": # Now, we can directly reference the classes in our config! config = BCConfig() - config.observation.encoder.custom_image.core_class = ( - "CustomImageEncoderCore" # Custom class, in string form - ) - config.observation.encoder.custom_image.core_kwargs.welcome_str = ( - "hi there!" # Any custom arguments, of any primitive type that is json-able - ) - config.observation.encoder.custom_image.obs_randomizer_class = ( - "CustomImageRandomizer" - ) + config.observation.encoder.custom_image.core_class = "CustomImageEncoderCore" # Custom class, in string form + config.observation.encoder.custom_image.core_kwargs.welcome_str = "hi there!" # Any custom arguments, of any primitive type that is json-able + config.observation.encoder.custom_image.obs_randomizer_class = "CustomImageRandomizer" config.observation.encoder.custom_image.obs_randomizer_kwargs.num_rand = 3 config.observation.encoder.custom_image.obs_randomizer_kwargs.noise_scale = 0.05 diff --git a/examples/simple_config.py b/examples/simple_config.py index 01f274e5..e80c6219 100644 --- a/examples/simple_config.py +++ b/examples/simple_config.py @@ -1,7 +1,6 @@ """ An example for creating and using the custom Config object. """ - from robomimic.config.base_config import Config if __name__ == "__main__": @@ -37,10 +36,14 @@ # read external config from a dict ext_config = { - "train": {"learning_rate": 1e-3}, - "algo": {"actor_network_size": [1000, 1000]}, + "train": { + "learning_rate": 1e-3 + }, + "algo": { + "actor_network_size": [1000, 1000] + } } with config.values_unlocked(): config.update(ext_config) - print(config) + print(config) \ No newline at end of file diff --git a/examples/simple_obs_nets.py b/examples/simple_obs_nets.py index c719f65c..236beaa8 100644 --- a/examples/simple_obs_nets.py +++ b/examples/simple_obs_nets.py @@ -33,7 +33,7 @@ def simple_obs_example(): "backbone_class": "ResNet18Conv", # use ResNet18 as the visualcore backbone "backbone_kwargs": {"pretrained": False, "input_coord_conv": False}, "pool_class": "SpatialSoftmax", # use spatial softmax to regularize the model output - "pool_kwargs": {"num_kp": 32}, + "pool_kwargs": {"num_kp": 32} } # register the network for processing the observation key @@ -50,9 +50,7 @@ def simple_obs_example(): camera2_shape = [3, 160, 240] # We could also attach an observation randomizer to perturb the input observation key before sending to the network - image_randomizer = CropRandomizer( - input_shape=camera2_shape, crop_height=140, crop_width=220 - ) + image_randomizer = CropRandomizer(input_shape=camera2_shape, crop_height=140, crop_width=220) # the cropper will alter the input shape net_kwargs["input_shape"] = image_randomizer.output_shape_in(camera2_shape) @@ -88,9 +86,7 @@ def simple_obs_example(): "low_dim": ["proprio"], "rgb": ["camera1", "camera2", "camera3"], } - ObsUtils.initialize_obs_modality_mapping_from_dict( - modality_mapping=obs_modality_mapping - ) + ObsUtils.initialize_obs_modality_mapping_from_dict(modality_mapping=obs_modality_mapping) # Finally, construct the observation encoder obs_encoder.make() @@ -103,7 +99,7 @@ def simple_obs_example(): "camera1": torch.randn(camera1_shape), "camera2": torch.randn(camera2_shape), "camera3": torch.randn(camera3_shape), - "proprio": torch.randn(proprio_shape), + "proprio": torch.randn(proprio_shape) } # Add a batch dimension @@ -123,7 +119,7 @@ def simple_obs_example(): # A convenient wrapper for decoding the feature vector to named output is ObservationDecoder obs_decoder = ObservationDecoder( input_feat_dim=obs_encoder.output_shape()[0], - decode_shapes=OrderedDict({"action": (7,)}), + decode_shapes=OrderedDict({"action": (7,)}) ) # Send to GPU if applicable diff --git a/examples/simple_train_loop.py b/examples/simple_train_loop.py index acaf0985..3e5c3c50 100644 --- a/examples/simple_train_loop.py +++ b/examples/simple_train_loop.py @@ -6,7 +6,6 @@ can interact. This is meant to help others who would like to use our provided datasets and dataset class in other applications. """ - import numpy as np import torch @@ -32,28 +31,28 @@ def get_data_loader(dataset_path): """ dataset = SequenceDataset( hdf5_path=dataset_path, - obs_keys=( # observations we want to appear in batches - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", + obs_keys=( # observations we want to appear in batches + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", "object", ), - dataset_keys=( # can optionally specify more keys here if they should appear in batches - "actions", - "rewards", + dataset_keys=( # can optionally specify more keys here if they should appear in batches + "actions", + "rewards", "dones", ), load_next_obs=True, frame_stack=1, - seq_length=10, # length-10 temporal sequences + seq_length=10, # length-10 temporal sequences pad_frame_stack=True, - pad_seq_length=True, # pad last obs per trajectory to ensure all sequences are sampled + pad_seq_length=True, # pad last obs per trajectory to ensure all sequences are sampled get_pad_mask=False, goal_mode=None, - hdf5_cache_mode="all", # cache dataset in memory to avoid repeated file i/o + hdf5_cache_mode="all", # cache dataset in memory to avoid repeated file i/o hdf5_use_swmr=True, hdf5_normalize_obs=False, - filter_by_attribute=None, # can optionally provide a filter key here + filter_by_attribute=None, # can optionally provide a filter key here ) print("\n============= Created Dataset =============") print(dataset) @@ -61,11 +60,11 @@ def get_data_loader(dataset_path): data_loader = DataLoader( dataset=dataset, - sampler=None, # no custom sampling logic (uniform sampling) - batch_size=100, # batches of size 100 + sampler=None, # no custom sampling logic (uniform sampling) + batch_size=100, # batches of size 100 shuffle=True, num_workers=0, - drop_last=True, # don't provide last batch in dataset pass if it's less than 100 in size + drop_last=True # don't provide last batch in dataset pass if it's less than 100 in size ) return data_loader @@ -83,15 +82,13 @@ def get_example_model(dataset_path, device): # read dataset to get some metadata for constructing model shape_meta = FileUtils.get_shape_metadata_from_dataset( - dataset_path=dataset_path, - all_obs_keys=sorted( - ( - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ) - ), + dataset_path=dataset_path, + all_obs_keys=sorted(( + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + )), ) # make BC model @@ -111,11 +108,7 @@ def print_batch_info(batch): if k in ["obs", "next_obs"]: print("key {}".format(k)) for obs_key in batch[k]: - print( - " obs key {} with shape {}".format( - obs_key, batch[k][obs_key].shape - ) - ) + print(" obs key {} with shape {}".format(obs_key, batch[k][obs_key].shape)) else: print("key {} with shape {}".format(k, batch[k].shape)) print("") @@ -138,7 +131,7 @@ def run_train_loop(model, data_loader): # ensure model is in train mode model.set_train() - for epoch in range(1, num_epochs + 1): # epoch numbers start at 1 + for epoch in range(1, num_epochs + 1): # epoch numbers start at 1 # iterator for data_loader - it yields batches data_loader_iter = iter(data_loader) @@ -162,9 +155,7 @@ def run_train_loop(model, data_loader): # process batch for training input_batch = model.process_batch_for_training(batch) - input_batch = model.postprocess_batch_for_training( - input_batch, obs_normalization_stats=None - ) + input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=None) # forward and backward pass info = model.train_on_batch(batch=input_batch, epoch=epoch, validate=False) diff --git a/examples/train_bc_rnn.py b/examples/train_bc_rnn.py index 21ffa206..b3ec7c51 100644 --- a/examples/train_bc_rnn.py +++ b/examples/train_bc_rnn.py @@ -16,7 +16,6 @@ python train_bc_rnn.py --dataset /path/to/dataset.hdf5 --output /path/to/output_dir """ - import argparse import robomimic @@ -38,62 +37,40 @@ def robosuite_hyperparameters(config): Config: Modified config """ ## save config - if and when to save checkpoints ## - config.experiment.save.enabled = ( - True # whether model saving should be enabled or disabled - ) - config.experiment.save.every_n_seconds = ( - None # save model every n seconds (set to None to disable) - ) - config.experiment.save.every_n_epochs = ( - 50 # save model every n epochs (set to None to disable) - ) - config.experiment.save.epochs = [] # save model on these specific epochs - config.experiment.save.on_best_validation = ( - False # save models that achieve best validation score - ) - config.experiment.save.on_best_rollout_return = ( - False # save models that achieve best rollout return - ) - config.experiment.save.on_best_rollout_success_rate = ( - True # save models that achieve best success rate - ) + config.experiment.save.enabled = True # whether model saving should be enabled or disabled + config.experiment.save.every_n_seconds = None # save model every n seconds (set to None to disable) + config.experiment.save.every_n_epochs = 50 # save model every n epochs (set to None to disable) + config.experiment.save.epochs = [] # save model on these specific epochs + config.experiment.save.on_best_validation = False # save models that achieve best validation score + config.experiment.save.on_best_rollout_return = False # save models that achieve best rollout return + config.experiment.save.on_best_rollout_success_rate = True # save models that achieve best success rate # epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used - config.experiment.epoch_every_n_steps = 100 # each epoch is 100 gradient steps - config.experiment.validation_epoch_every_n_steps = ( - 10 # each validation epoch is 10 gradient steps - ) + config.experiment.epoch_every_n_steps = 100 # each epoch is 100 gradient steps + config.experiment.validation_epoch_every_n_steps = 10 # each validation epoch is 10 gradient steps # envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset - config.experiment.env = None # no need to set this (unless you want to override) - config.experiment.additional_envs = ( - None # additional environments that should get evaluated - ) + config.experiment.env = None # no need to set this (unless you want to override) + config.experiment.additional_envs = None # additional environments that should get evaluated ## rendering config ## - config.experiment.render = False # render on-screen or not - config.experiment.render_video = True # render evaluation rollouts to videos - config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints - config.experiment.video_skip = ( - 5 # render video frame every n environment steps during rollout - ) + config.experiment.render = False # render on-screen or not + config.experiment.render_video = True # render evaluation rollouts to videos + config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints + config.experiment.video_skip = 5 # render video frame every n environment steps during rollout ## evaluation rollout config ## - config.experiment.rollout.enabled = True # enable evaluation rollouts - config.experiment.rollout.n = 50 # number of rollouts per evaluation - config.experiment.rollout.horizon = 400 # set horizon based on length of demonstrations (can be obtained with scripts/get_dataset_info.py) - config.experiment.rollout.rate = 50 # do rollouts every @rate epochs - config.experiment.rollout.warmstart = ( - 0 # number of epochs to wait before starting rollouts - ) - config.experiment.rollout.terminate_on_success = ( - True # end rollout early after task success - ) + config.experiment.rollout.enabled = True # enable evaluation rollouts + config.experiment.rollout.n = 50 # number of rollouts per evaluation + config.experiment.rollout.horizon = 400 # set horizon based on length of demonstrations (can be obtained with scripts/get_dataset_info.py) + config.experiment.rollout.rate = 50 # do rollouts every @rate epochs + config.experiment.rollout.warmstart = 0 # number of epochs to wait before starting rollouts + config.experiment.rollout.terminate_on_success = True # end rollout early after task success ## dataset loader config ## # num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets - config.train.num_data_workers = 0 # assume low-dim dataset + config.train.num_data_workers = 0 # assume low-dim dataset # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is # by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set @@ -101,15 +78,15 @@ def robosuite_hyperparameters(config): # You should almost never set this to None, even for large image datasets. config.train.hdf5_cache_mode = "all" - config.train.hdf5_use_swmr = True # used for parallel data loading + config.train.hdf5_use_swmr = True # used for parallel data loading # if true, normalize observations at train and test time, using the global mean and standard deviation # of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs # in utils/dataset.py for more information. - config.train.hdf5_normalize_obs = False # no obs normalization + config.train.hdf5_normalize_obs = False # no obs normalization # if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key" - config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split + config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split config.train.hdf5_validation_filter_key = "valid" # fetch sequences of length 10 from dataset for RNN training @@ -123,51 +100,38 @@ def robosuite_hyperparameters(config): ) # one of [None, "last"] - set to "last" to include goal observations in each batch - config.train.goal_mode = None # no need for goal observations + config.train.goal_mode = None # no need for goal observations ## learning config ## - config.train.cuda = True # try to use GPU (if present) or not - config.train.batch_size = 100 # batch size - config.train.num_epochs = 2000 # number of training epochs - config.train.seed = 1 # seed for training + config.train.cuda = True # try to use GPU (if present) or not + config.train.batch_size = 100 # batch size + config.train.num_epochs = 2000 # number of training epochs + config.train.seed = 1 # seed for training + ### Observation Config ### - config.observation.modalities.obs.low_dim = ( - [ # specify low-dim observations for agent - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] - ) - config.observation.modalities.obs.rgb = [] # no image observations - config.observation.modalities.goal.low_dim = [] # no low-dim goals - config.observation.modalities.goal.rgb = [] # no image goals + config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] + config.observation.modalities.obs.rgb = [] # no image observations + config.observation.modalities.goal.low_dim = [] # no low-dim goals + config.observation.modalities.goal.rgb = [] # no image goals # observation encoder architecture - applies to all networks that take observation dicts as input config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( - False # kwargs for visual core - ) + config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = ( - "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( - 32 # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( - False # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( - 1.0 # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = ( - 0.0 # Default arguments for "SpatialSoftmax" - ) + config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # Default arguments for "SpatialSoftmax" # if you prefer to use pre-trained visual representations, uncomment the following lines # R3M @@ -193,46 +157,34 @@ def robosuite_hyperparameters(config): ### Algo Config ### # optimization parameters - config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate - config.algo.optim_params.policy.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - config.algo.optim_params.policy.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - config.algo.optim_params.policy.regularization.L2 = ( - 0.00 # L2 regularization strength - ) + config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate + config.algo.optim_params.policy.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + config.algo.optim_params.policy.regularization.L2 = 0.00 # L2 regularization strength # loss weights - config.algo.loss.l2_weight = 1.0 # L2 loss weight - config.algo.loss.l1_weight = 0.0 # L1 loss weight - config.algo.loss.cos_weight = 0.0 # cosine loss weight + config.algo.loss.l2_weight = 1.0 # L2 loss weight + config.algo.loss.l1_weight = 0.0 # L1 loss weight + config.algo.loss.cos_weight = 0.0 # cosine loss weight # MLP network architecture (layers after observation encoder and RNN, if present) - config.algo.actor_layer_dims = () # empty MLP - go from RNN layer directly to action output + config.algo.actor_layer_dims = () # empty MLP - go from RNN layer directly to action output # stochastic GMM policy - config.algo.gmm.enabled = ( - True # enable GMM policy - policy outputs GMM action distribution - ) - config.algo.gmm.num_modes = 5 # number of GMM modes - config.algo.gmm.min_std = 0.0001 # minimum std output from network - config.algo.gmm.std_activation = ( - "softplus" # activation to use for std output from policy net - ) - config.algo.gmm.low_noise_eval = True # low-std at test-time + config.algo.gmm.enabled = True # enable GMM policy - policy outputs GMM action distribution + config.algo.gmm.num_modes = 5 # number of GMM modes + config.algo.gmm.min_std = 0.0001 # minimum std output from network + config.algo.gmm.std_activation = "softplus" # activation to use for std output from policy net + config.algo.gmm.low_noise_eval = True # low-std at test-time # rnn policy config - config.algo.rnn.enabled = True # enable RNN policy - config.algo.rnn.horizon = ( - 10 # unroll length for RNN - should usually match train.seq_length - ) - config.algo.rnn.hidden_dim = 400 # hidden dimension size - config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" - config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked - config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state - config.algo.rnn.kwargs.bidirectional = False # rnn kwargs + config.algo.rnn.enabled = True # enable RNN policy + config.algo.rnn.horizon = 10 # unroll length for RNN - should usually match train.seq_length + config.algo.rnn.hidden_dim = 400 # hidden dimension size + config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" + config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked + config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state + config.algo.rnn.kwargs.bidirectional = False # rnn kwargs return config @@ -248,62 +200,40 @@ def momart_hyperparameters(config): Config: Modified config """ ## save config - if and when to save checkpoints ## - config.experiment.save.enabled = ( - True # whether model saving should be enabled or disabled - ) - config.experiment.save.every_n_seconds = ( - None # save model every n seconds (set to None to disable) - ) - config.experiment.save.every_n_epochs = ( - 3 # save model every n epochs (set to None to disable) - ) - config.experiment.save.epochs = [] # save model on these specific epochs - config.experiment.save.on_best_validation = ( - True # save models that achieve best validation score - ) - config.experiment.save.on_best_rollout_return = ( - False # save models that achieve best rollout return - ) - config.experiment.save.on_best_rollout_success_rate = ( - True # save models that achieve best success rate - ) + config.experiment.save.enabled = True # whether model saving should be enabled or disabled + config.experiment.save.every_n_seconds = None # save model every n seconds (set to None to disable) + config.experiment.save.every_n_epochs = 3 # save model every n epochs (set to None to disable) + config.experiment.save.epochs = [] # save model on these specific epochs + config.experiment.save.on_best_validation = True # save models that achieve best validation score + config.experiment.save.on_best_rollout_return = False # save models that achieve best rollout return + config.experiment.save.on_best_rollout_success_rate = True # save models that achieve best success rate # epoch definition - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used - config.experiment.epoch_every_n_steps = None # each epoch is 100 gradient steps - config.experiment.validation_epoch_every_n_steps = ( - 10 # each validation epoch is 10 gradient steps - ) + config.experiment.epoch_every_n_steps = None # each epoch is 100 gradient steps + config.experiment.validation_epoch_every_n_steps = 10 # each validation epoch is 10 gradient steps # envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset - config.experiment.env = None # no need to set this (unless you want to override) - config.experiment.additional_envs = ( - None # additional environments that should get evaluated - ) + config.experiment.env = None # no need to set this (unless you want to override) + config.experiment.additional_envs = None # additional environments that should get evaluated ## rendering config ## - config.experiment.render = False # render on-screen or not - config.experiment.render_video = True # render evaluation rollouts to videos - config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints - config.experiment.video_skip = ( - 5 # render video frame every n environment steps during rollout - ) + config.experiment.render = False # render on-screen or not + config.experiment.render_video = True # render evaluation rollouts to videos + config.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints + config.experiment.video_skip = 5 # render video frame every n environment steps during rollout ## evaluation rollout config ## - config.experiment.rollout.enabled = True # enable evaluation rollouts - config.experiment.rollout.n = 30 # number of rollouts per evaluation - config.experiment.rollout.horizon = 1500 # maximum number of env steps per rollout - config.experiment.rollout.rate = 3 # do rollouts every @rate epochs - config.experiment.rollout.warmstart = ( - 0 # number of epochs to wait before starting rollouts - ) - config.experiment.rollout.terminate_on_success = ( - True # end rollout early after task success - ) + config.experiment.rollout.enabled = True # enable evaluation rollouts + config.experiment.rollout.n = 30 # number of rollouts per evaluation + config.experiment.rollout.horizon = 1500 # maximum number of env steps per rollout + config.experiment.rollout.rate = 3 # do rollouts every @rate epochs + config.experiment.rollout.warmstart = 0 # number of epochs to wait before starting rollouts + config.experiment.rollout.terminate_on_success = True # end rollout early after task success ## dataset loader config ## # num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets - config.train.num_data_workers = 2 # assume low-dim dataset + config.train.num_data_workers = 2 # assume low-dim dataset # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is # by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set @@ -311,15 +241,15 @@ def momart_hyperparameters(config): # You should almost never set this to None, even for large image datasets. config.train.hdf5_cache_mode = "low_dim" - config.train.hdf5_use_swmr = True # used for parallel data loading + config.train.hdf5_use_swmr = True # used for parallel data loading # if true, normalize observations at train and test time, using the global mean and standard deviation # of each observation in each dimension, computed across the training set. See SequenceDataset.normalize_obs # in utils/dataset.py for more information. - config.train.hdf5_normalize_obs = False # no obs normalization + config.train.hdf5_normalize_obs = False # no obs normalization # if provided, demonstrations are filtered by the list of demo keys under "mask/@hdf5_filter_key" - config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split + config.train.hdf5_filter_key = "train" # by default, use "train" and "valid" filter keys corresponding to train-valid split config.train.hdf5_validation_filter_key = "valid" # fetch sequences of length 10 from dataset for RNN training @@ -333,20 +263,19 @@ def momart_hyperparameters(config): ) # one of [None, "last"] - set to "last" to include goal observations in each batch - config.train.goal_mode = "last" # no need for goal observations + config.train.goal_mode = "last" # no need for goal observations ## learning config ## - config.train.cuda = True # try to use GPU (if present) or not - config.train.batch_size = 4 # batch size - config.train.num_epochs = 31 # number of training epochs - config.train.seed = 1 # seed for training + config.train.cuda = True # try to use GPU (if present) or not + config.train.batch_size = 4 # batch size + config.train.num_epochs = 31 # number of training epochs + config.train.seed = 1 # seed for training + ### Observation Config ### - config.observation.modalities.obs.low_dim = ( - [ # specify low-dim observations for agent - "proprio", - ] - ) + config.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent + "proprio", + ] config.observation.modalities.obs.rgb = [ "rgb", "rgb_wrist", @@ -359,55 +288,40 @@ def momart_hyperparameters(config): config.observation.modalities.obs.scan = [ "scan", ] - config.observation.modalities.goal.low_dim = [] # no low-dim goals - config.observation.modalities.goal.rgb = [] # no rgb image goals + config.observation.modalities.goal.low_dim = [] # no low-dim goals + config.observation.modalities.goal.rgb = [] # no rgb image goals ### Algo Config ### # optimization parameters - config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate - config.algo.optim_params.policy.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - config.algo.optim_params.policy.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - config.algo.optim_params.policy.regularization.L2 = ( - 0.00 # L2 regularization strength - ) + config.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate + config.algo.optim_params.policy.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + config.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + config.algo.optim_params.policy.regularization.L2 = 0.00 # L2 regularization strength # loss weights - config.algo.loss.l2_weight = 1.0 # L2 loss weight - config.algo.loss.l1_weight = 0.0 # L1 loss weight - config.algo.loss.cos_weight = 0.0 # cosine loss weight + config.algo.loss.l2_weight = 1.0 # L2 loss weight + config.algo.loss.l1_weight = 0.0 # L1 loss weight + config.algo.loss.cos_weight = 0.0 # cosine loss weight # MLP network architecture (layers after observation encoder and RNN, if present) - config.algo.actor_layer_dims = ( - 300, - 400, - ) # MLP layers between RNN layer and action output + config.algo.actor_layer_dims = (300, 400) # MLP layers between RNN layer and action output # stochastic GMM policy - config.algo.gmm.enabled = ( - True # enable GMM policy - policy outputs GMM action distribution - ) - config.algo.gmm.num_modes = 5 # number of GMM modes - config.algo.gmm.min_std = 0.01 # minimum std output from network - config.algo.gmm.std_activation = ( - "softplus" # activation to use for std output from policy net - ) - config.algo.gmm.low_noise_eval = True # low-std at test-time + config.algo.gmm.enabled = True # enable GMM policy - policy outputs GMM action distribution + config.algo.gmm.num_modes = 5 # number of GMM modes + config.algo.gmm.min_std = 0.01 # minimum std output from network + config.algo.gmm.std_activation = "softplus" # activation to use for std output from policy net + config.algo.gmm.low_noise_eval = True # low-std at test-time # rnn policy config - config.algo.rnn.enabled = True # enable RNN policy - config.algo.rnn.horizon = ( - 50 # unroll length for RNN - should usually match train.seq_length - ) - config.algo.rnn.hidden_dim = 1200 # hidden dimension size - config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" - config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked - config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state - config.algo.rnn.kwargs.bidirectional = False # rnn kwargs + config.algo.rnn.enabled = True # enable RNN policy + config.algo.rnn.horizon = 50 # unroll length for RNN - should usually match train.seq_length + config.algo.rnn.hidden_dim = 1200 # hidden dimension size + config.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" + config.algo.rnn.num_layers = 2 # number of RNN layers that are stacked + config.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + hidden state + config.algo.rnn.kwargs.bidirectional = False # rnn kwargs return config @@ -425,9 +339,7 @@ def momart_hyperparameters(config): } -def get_config( - dataset_type="robosuite", dataset_path=None, output_dir=None, debug=False -): +def get_config(dataset_type="robosuite", dataset_path=None, output_dir=None, debug=False): """ Construct config for training. @@ -440,9 +352,8 @@ def get_config( debug (bool): if True, shrink training and rollout times to test a full training run quickly. """ - assert ( - dataset_type in DATASET_TYPES - ), f"Invalid dataset type. Valid options are: {list(DATASET_TYPES.keys())}, got: {dataset_type}" + assert dataset_type in DATASET_TYPES, \ + f"Invalid dataset type. Valid options are: {list(DATASET_TYPES.keys())}, got: {dataset_type}" # handle args if dataset_path is None: @@ -457,24 +368,20 @@ def get_config( config = config_factory(algo_name="bc") ### Experiment Config ### - config.experiment.name = ( - f"{dataset_type}_bc_rnn_example" # name of experiment used to make log files - ) - config.experiment.validate = True # whether to do validation or not - config.experiment.logging.terminal_output_to_txt = ( - False # whether to log stdout to txt file - ) - config.experiment.logging.log_tb = True # enable tensorboard logging + config.experiment.name = f"{dataset_type}_bc_rnn_example" # name of experiment used to make log files + config.experiment.validate = True # whether to do validation or not + config.experiment.logging.terminal_output_to_txt = False # whether to log stdout to txt file + config.experiment.logging.log_tb = True # enable tensorboard logging ### Train Config ### - config.train.data = dataset_path # path to hdf5 dataset + config.train.data = dataset_path # path to hdf5 dataset # Write all results to this directory. A new folder with the timestamp will be created # in this directory, and it will contain three subfolders - "log", "models", and "videos". # The "log" directory will contain tensorboard and stdout txt logs. The "models" directory # will contain saved model checkpoints. The "videos" directory contains evaluation rollout # videos. - config.train.output_dir = output_dir # path to output folder + config.train.output_dir = output_dir # path to output folder # Load default hyperparameters based on dataset type config = DATASET_TYPES[dataset_type]["hp"](config) @@ -520,8 +427,8 @@ def get_config( # debug flag for quick training run parser.add_argument( "--debug", - action="store_true", - help="set this flag to run a quick training run for debugging purposes", + action='store_true', + help="set this flag to run a quick training run for debugging purposes" ) # type @@ -531,7 +438,7 @@ def get_config( default="robosuite", choices=list(DATASET_TYPES.keys()), help=f"Dataset type to use. This will determine the default hyperparameter settings to use for training." - f"Valid options are: {list(DATASET_TYPES.keys())}. Default is robosuite.", + f"Valid options are: {list(DATASET_TYPES.keys())}. Default is robosuite." ) args = parser.parse_args() @@ -545,7 +452,7 @@ def get_config( dataset_type=args.dataset_type, dataset_path=args.dataset, output_dir=args.output, - debug=args.debug, + debug=args.debug ) # set torch device diff --git a/robomimic/__init__.py b/robomimic/__init__.py index a6bfb900..1930630a 100644 --- a/robomimic/__init__.py +++ b/robomimic/__init__.py @@ -56,135 +56,53 @@ def register_all_links(): """ # all proficient human datasets - ph_tasks = [ - "lift", - "can", - "square", - "transport", - "tool_hang", - "lift_real", - "can_real", - "tool_hang_real", - ] + ph_tasks = ["lift", "can", "square", "transport", "tool_hang", "lift_real", "can_real", "tool_hang_real"] ph_horizons = [400, 400, 400, 700, 700, 1000, 1000, 1000] for task, horizon in zip(ph_tasks, ph_horizons): - register_dataset_link( - task=task, - dataset_type="ph", - hdf5_type="raw", - horizon=horizon, + register_dataset_link(task=task, dataset_type="ph", hdf5_type="raw", horizon=horizon, link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/demo{}.hdf5".format( task, "" if "real" in task else "_v141" - ), + ) ) # real world datasets only have demo.hdf5 files which already contain all observation modalities # while sim datasets store raw low-dim mujoco states in the demo.hdf5 if "real" not in task: - register_dataset_link( - task=task, - dataset_type="ph", - hdf5_type="low_dim", - horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/low_dim_v141.hdf5".format( - task - ), - ) - register_dataset_link( - task=task, - dataset_type="ph", - hdf5_type="image", - horizon=horizon, - link=None, - ) + register_dataset_link(task=task, dataset_type="ph", hdf5_type="low_dim", horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/ph/low_dim_v141.hdf5".format(task)) + register_dataset_link(task=task, dataset_type="ph", hdf5_type="image", horizon=horizon, + link=None) # all multi human datasets mh_tasks = ["lift", "can", "square", "transport"] mh_horizons = [500, 500, 500, 1100] for task, horizon in zip(mh_tasks, mh_horizons): - register_dataset_link( - task=task, - dataset_type="mh", - hdf5_type="raw", - horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/demo_v141.hdf5".format( - task - ), - ) - register_dataset_link( - task=task, - dataset_type="mh", - hdf5_type="low_dim", - horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/low_dim_v141.hdf5".format( - task - ), - ) - register_dataset_link( - task=task, dataset_type="mh", hdf5_type="image", horizon=horizon, link=None - ) + register_dataset_link(task=task, dataset_type="mh", hdf5_type="raw", horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/demo_v141.hdf5".format(task)) + register_dataset_link(task=task, dataset_type="mh", hdf5_type="low_dim", horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mh/low_dim_v141.hdf5".format(task)) + register_dataset_link(task=task, dataset_type="mh", hdf5_type="image", horizon=horizon, + link=None) # all machine generated datasets for task, horizon in zip(["lift", "can"], [400, 400]): - register_dataset_link( - task=task, - dataset_type="mg", - hdf5_type="raw", - horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/demo_v141.hdf5".format( - task - ), - ) - register_dataset_link( - task=task, - dataset_type="mg", - hdf5_type="low_dim_sparse", - horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_sparse_v141.hdf5".format( - task - ), - ) - register_dataset_link( - task=task, - dataset_type="mg", - hdf5_type="image_sparse", - horizon=horizon, - link=None, - ) - register_dataset_link( - task=task, - dataset_type="mg", - hdf5_type="low_dim_dense", - horizon=horizon, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_dense_v141.hdf5".format( - task - ), - ) - register_dataset_link( - task=task, - dataset_type="mg", - hdf5_type="image_dense", - horizon=horizon, - link=None, - ) + register_dataset_link(task=task, dataset_type="mg", hdf5_type="raw", horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/demo_v141.hdf5".format(task)) + register_dataset_link(task=task, dataset_type="mg", hdf5_type="low_dim_sparse", horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_sparse_v141.hdf5".format(task)) + register_dataset_link(task=task, dataset_type="mg", hdf5_type="image_sparse", horizon=horizon, + link=None) + register_dataset_link(task=task, dataset_type="mg", hdf5_type="low_dim_dense", horizon=horizon, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/{}/mg/low_dim_dense_v141.hdf5".format(task)) + register_dataset_link(task=task, dataset_type="mg", hdf5_type="image_dense", horizon=horizon, + link=None) # can-paired dataset - register_dataset_link( - task="can", - dataset_type="paired", - hdf5_type="raw", - horizon=400, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/demo_v141.hdf5", - ) - register_dataset_link( - task="can", - dataset_type="paired", - hdf5_type="low_dim", - horizon=400, - link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/low_dim_v141.hdf5", - ) - register_dataset_link( - task="can", dataset_type="paired", hdf5_type="image", horizon=400, link=None - ) + register_dataset_link(task="can", dataset_type="paired", hdf5_type="raw", horizon=400, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/demo_v141.hdf5") + register_dataset_link(task="can", dataset_type="paired", hdf5_type="low_dim", horizon=400, + link="http://downloads.cs.stanford.edu/downloads/rt_benchmark/can/paired/low_dim_v141.hdf5") + register_dataset_link(task="can", dataset_type="paired", hdf5_type="image", horizon=400, + link=None) def register_momart_dataset_link(task, dataset_type, link, dataset_size): diff --git a/robomimic/algo/__init__.py b/robomimic/algo/__init__.py index c35bbebc..dbe2ea4d 100644 --- a/robomimic/algo/__init__.py +++ b/robomimic/algo/__init__.py @@ -1,14 +1,4 @@ -from robomimic.algo.algo import ( - register_algo_factory_func, - algo_name_to_factory_func, - algo_factory, - Algo, - PolicyAlgo, - ValueAlgo, - PlannerAlgo, - HierarchicalAlgo, - RolloutPolicy, -) +from robomimic.algo.algo import register_algo_factory_func, algo_name_to_factory_func, algo_factory, Algo, PolicyAlgo, ValueAlgo, PlannerAlgo, HierarchicalAlgo, RolloutPolicy # note: these imports are needed to register these classes in the global algo registry from robomimic.algo.bc import BC, BC_Gaussian, BC_GMM, BC_VAE, BC_RNN, BC_RNN_GMM @@ -19,5 +9,4 @@ from robomimic.algo.hbc import HBC from robomimic.algo.iris import IRIS from robomimic.algo.td3_bc import TD3_BC - # from robomimic.algo.diffusion_policy import DiffusionPolicyUNet diff --git a/robomimic/algo/algo.py b/robomimic/algo/algo.py index 39d7d9a7..27e6c50b 100644 --- a/robomimic/algo/algo.py +++ b/robomimic/algo/algo.py @@ -7,7 +7,6 @@ @register_algo_factory_func function decorator. This makes it easy for @algo_factory to instantiate the correct `Algo` subclass. """ - import textwrap from copy import deepcopy from collections import OrderedDict @@ -31,10 +30,8 @@ def register_algo_factory_func(algo_name): Args: algo_name (str): the algorithm name to register the algorithm under """ - def decorator(factory_func): REGISTERED_ALGO_FACTORY_FUNCS[algo_name] = factory_func - return decorator @@ -90,9 +87,14 @@ class Algo(object): a standard API to be used by training functions such as @run_epoch in utils/train_utils.py. """ - def __init__( - self, algo_config, obs_config, global_config, obs_key_shapes, ac_dim, device + self, + algo_config, + obs_config, + global_config, + obs_key_shapes, + ac_dim, + device ): """ Args: @@ -145,23 +147,11 @@ def _create_shapes(self, obs_keys, obs_key_shapes): # We check across all modality groups (obs, goal, subgoal), and see if the inputted observation key exists # across all modalitie specified in the config. If so, we store its corresponding shape internally for k in obs_key_shapes: - if "obs" in self.obs_config.modalities and k in [ - obs_key - for modality in self.obs_config.modalities.obs.values() - for obs_key in modality - ]: + if "obs" in self.obs_config.modalities and k in [obs_key for modality in self.obs_config.modalities.obs.values() for obs_key in modality]: self.obs_shapes[k] = obs_key_shapes[k] - if "goal" in self.obs_config.modalities and k in [ - obs_key - for modality in self.obs_config.modalities.goal.values() - for obs_key in modality - ]: + if "goal" in self.obs_config.modalities and k in [obs_key for modality in self.obs_config.modalities.goal.values() for obs_key in modality]: self.goal_shapes[k] = obs_key_shapes[k] - if "subgoal" in self.obs_config.modalities and k in [ - obs_key - for modality in self.obs_config.modalities.subgoal.values() - for obs_key in modality - ]: + if "subgoal" in self.obs_config.modalities and k in [obs_key for modality in self.obs_config.modalities.subgoal.values() for obs_key in modality]: self.subgoal_shapes[k] = obs_key_shapes[k] def _create_networks(self): @@ -184,28 +174,18 @@ def _create_optimizers(self): if k in self.nets: if isinstance(self.nets[k], nn.ModuleList): self.optimizers[k] = [ - TorchUtils.optimizer_from_optim_params( - net_optim_params=self.optim_params[k], net=self.nets[k][i] - ) + TorchUtils.optimizer_from_optim_params(net_optim_params=self.optim_params[k], net=self.nets[k][i]) for i in range(len(self.nets[k])) ] self.lr_schedulers[k] = [ - TorchUtils.lr_scheduler_from_optim_params( - net_optim_params=self.optim_params[k], - net=self.nets[k][i], - optimizer=self.optimizers[k][i], - ) + TorchUtils.lr_scheduler_from_optim_params(net_optim_params=self.optim_params[k], net=self.nets[k][i], optimizer=self.optimizers[k][i]) for i in range(len(self.nets[k])) ] else: self.optimizers[k] = TorchUtils.optimizer_from_optim_params( - net_optim_params=self.optim_params[k], net=self.nets[k] - ) + net_optim_params=self.optim_params[k], net=self.nets[k]) self.lr_schedulers[k] = TorchUtils.lr_scheduler_from_optim_params( - net_optim_params=self.optim_params[k], - net=self.nets[k], - optimizer=self.optimizers[k], - ) + net_optim_params=self.optim_params[k], net=self.nets[k], optimizer=self.optimizers[k]) def process_batch_for_training(self, batch): """ @@ -218,7 +198,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ return batch @@ -234,8 +214,8 @@ def postprocess_batch_for_training(self, batch, normalization_stats, normalize_a training will occur (after @process_batch_for_training is called) - obs_normalization_stats (dict or None): if provided, this should map observation - keys to dicts with a "mean" and "std" of shape (1, ...) where ... is the + obs_normalization_stats (dict or None): if provided, this should map observation + keys to dicts with a "mean" and "std" of shape (1, ...) where ... is the default shape for the observation. Returns: @@ -355,11 +335,8 @@ def __repr__(self): """ Pretty print algorithm and network description. """ - return ( - "{} (\n".format(self.__class__.__name__) - + textwrap.indent(self.nets.__repr__(), " ") - + "\n)" - ) + return "{} (\n".format(self.__class__.__name__) + \ + textwrap.indent(self.nets.__repr__(), ' ') + "\n)" def reset(self): """ @@ -372,7 +349,6 @@ class PolicyAlgo(Algo): """ Base class for all algorithms that can be used as policies. """ - def get_action(self, obs_dict, goal_dict=None): """ Get policy action outputs. @@ -391,7 +367,6 @@ class ValueAlgo(Algo): """ Base class for all algorithms that can learn a value function. """ - def get_state_value(self, obs_dict, goal_dict=None): """ Get state value outputs. @@ -425,7 +400,6 @@ class PlannerAlgo(Algo): Base class for all algorithms that can be used for planning subgoals conditioned on current observations and potential goal observations. """ - def get_subgoal_predictions(self, obs_dict, goal_dict=None): """ Get predicted subgoal outputs. @@ -458,7 +432,6 @@ class HierarchicalAlgo(Algo): Base class for all hierarchical algorithms that consist of (1) subgoal planning and (2) subgoal-conditioned policy learning. """ - def get_action(self, obs_dict, goal_dict=None): """ Get policy action outputs. @@ -500,7 +473,6 @@ class RolloutPolicy(object): """ Wraps @Algo object to make it easy to run policies in a rollout loop. """ - def __init__(self, policy, obs_normalization_stats=None): """ Args: @@ -526,7 +498,7 @@ def _prepare_observation(self, ob): Prepare raw observation dict from environment for policy. Args: - ob (dict): single observation dictionary from environment (no batch dimension, + ob (dict): single observation dictionary from environment (no batch dimension, and np.array values for each key) """ ob = TensorUtils.to_tensor(ob) @@ -535,12 +507,7 @@ def _prepare_observation(self, ob): ob = TensorUtils.to_float(ob) if self.obs_normalization_stats is not None: # ensure obs_normalization_stats are torch Tensors on proper device - obs_normalization_stats = TensorUtils.to_float( - TensorUtils.to_device( - TensorUtils.to_tensor(self.obs_normalization_stats), - self.policy.device, - ) - ) + obs_normalization_stats = TensorUtils.to_float(TensorUtils.to_device(TensorUtils.to_tensor(self.obs_normalization_stats), self.policy.device)) # limit normalization to obs keys being used, in case environment includes extra keys ob = {k: ob[k] for k in self.policy.global_config.all_obs_keys} ob = ObsUtils.normalize_batch( @@ -557,7 +524,7 @@ def __call__(self, ob, goal=None): Produce action from raw observation dict (and maybe goal dict) from environment. Args: - ob (dict): single observation dictionary from environment (no batch dimension, + ob (dict): single observation dictionary from environment (no batch dimension, and np.array values for each key) goal (dict): goal observation """ diff --git a/robomimic/algo/bc.py b/robomimic/algo/bc.py index e5255d75..02c7dfe8 100644 --- a/robomimic/algo/bc.py +++ b/robomimic/algo/bc.py @@ -1,7 +1,6 @@ """ Implementation of Behavioral Cloning (BC). """ - from collections import OrderedDict import torch @@ -36,15 +35,13 @@ def algo_config_to_class(algo_config): # note: we need the check below because some configs import BCConfig and exclude # some of these options - gaussian_enabled = "gaussian" in algo_config and algo_config.gaussian.enabled - gmm_enabled = "gmm" in algo_config and algo_config.gmm.enabled - vae_enabled = "vae" in algo_config and algo_config.vae.enabled + gaussian_enabled = ("gaussian" in algo_config and algo_config.gaussian.enabled) + gmm_enabled = ("gmm" in algo_config and algo_config.gmm.enabled) + vae_enabled = ("vae" in algo_config and algo_config.vae.enabled) rnn_enabled = algo_config.rnn.enabled # support legacy configs that do not have "transformer" item - transformer_enabled = ( - "transformer" in algo_config - ) and algo_config.transformer.enabled + transformer_enabled = ("transformer" in algo_config) and algo_config.transformer.enabled if gaussian_enabled: if rnn_enabled: @@ -82,7 +79,6 @@ class BC(PolicyAlgo): """ Normal BC training. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -93,9 +89,7 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor_layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) self.nets = self.nets.float().to(self.device) @@ -110,21 +104,18 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() - # input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["obs"] = { - k: v[:, 0, :] if v.ndim != 1 else v for k, v in batch["obs"].items() - } - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + #input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} + input_batch["obs"] = {k: v[:, 0, :] if v.ndim != 1 else v for k, v in batch['obs'].items()} + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device)) + def train_on_batch(self, batch, epoch, validate=False): """ Training on a single batch of data. @@ -169,9 +160,7 @@ def _forward_training(self, batch): predictions (dict): dictionary containing network outputs """ predictions = OrderedDict() - actions = self.nets["policy"]( - obs_dict=batch["obs"], goal_dict=batch["goal_obs"] - ) + actions = self.nets["policy"](obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) predictions["actions"] = actions return predictions @@ -266,7 +255,6 @@ class BC_Gaussian(BC): """ BC training with a Gaussian policy. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -284,9 +272,7 @@ def _create_networks(self): std_limits=(self.algo_config.gaussian.min_std, 7.5), std_activation=self.algo_config.gaussian.std_activation, low_noise_eval=self.algo_config.gaussian.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) self.nets = self.nets.float().to(self.device) @@ -304,7 +290,7 @@ def _forward_training(self, batch): predictions (dict): dictionary containing network outputs """ dists = self.nets["policy"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], goal_dict=batch["goal_obs"], ) @@ -352,7 +338,7 @@ def log_info(self, info): """ log = PolicyAlgo.log_info(self, info) log["Loss"] = info["losses"]["action_loss"].item() - log["Log_Likelihood"] = info["losses"]["log_probs"].item() + log["Log_Likelihood"] = info["losses"]["log_probs"].item() if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log @@ -362,7 +348,6 @@ class BC_GMM(BC_Gaussian): """ BC training with a Gaussian Mixture Model policy. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -379,9 +364,7 @@ def _create_networks(self): min_std=self.algo_config.gmm.min_std, std_activation=self.algo_config.gmm.std_activation, low_noise_eval=self.algo_config.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) self.nets = self.nets.float().to(self.device) @@ -391,7 +374,6 @@ class BC_VAE(BC): """ BC training with a VAE policy. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -402,12 +384,10 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, device=self.device, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **VAENets.vae_args_from_config(self.algo_config.vae), ) - + self.nets = self.nets.float().to(self.device) def train_on_batch(self, batch, epoch, validate=False): @@ -415,13 +395,8 @@ def train_on_batch(self, batch, epoch, validate=False): Update from superclass to set categorical temperature, for categorical VAEs. """ if self.algo_config.vae.prior.use_categorical: - temperature = ( - self.algo_config.vae.prior.categorical_init_temp - - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step - ) - temperature = max( - temperature, self.algo_config.vae.prior.categorical_min_temp - ) + temperature = self.algo_config.vae.prior.categorical_init_temp - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step + temperature = max(temperature, self.algo_config.vae.prior.categorical_min_temp) self.nets["policy"].set_gumbel_temperature(temperature) return super(BC_VAE, self).train_on_batch(batch, epoch, validate=validate) @@ -499,9 +474,7 @@ def log_info(self, info): if self.algo_config.vae.prior.use_categorical: log["Gumbel_Temperature"] = self.nets["policy"].get_gumbel_temperature() else: - log["Encoder_Variance"] = ( - info["predictions"]["encoder_variance"].mean().item() - ) + log["Encoder_Variance"] = info["predictions"]["encoder_variance"].mean().item() if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log @@ -511,7 +484,6 @@ class BC_RNN(BC): """ BC training with an RNN policy. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -522,9 +494,7 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor_layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **BaseNets.rnn_args_from_config(self.algo_config.rnn), ) @@ -550,9 +520,7 @@ def process_batch_for_training(self, batch): """ input_batch = dict() input_batch["obs"] = batch["obs"] - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present input_batch["actions"] = batch["actions"] if self._rnn_is_open_loop: @@ -561,9 +529,7 @@ def process_batch_for_training(self, batch): # on the rnn hidden state. n_steps = batch["actions"].shape[1] obs_seq_start = TensorUtils.index_at_time(batch["obs"], ind=0) - input_batch["obs"] = TensorUtils.unsqueeze_expand_at( - obs_seq_start, size=n_steps, dim=1 - ) + input_batch["obs"] = TensorUtils.unsqueeze_expand_at(obs_seq_start, size=n_steps, dim=1) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -584,9 +550,7 @@ def get_action(self, obs_dict, goal_dict=None): if self._rnn_hidden_state is None or self._rnn_counter % self._rnn_horizon == 0: batch_size = list(obs_dict.values())[0].shape[0] - self._rnn_hidden_state = self.nets["policy"].get_rnn_init_state( - batch_size=batch_size, device=self.device - ) + self._rnn_hidden_state = self.nets["policy"].get_rnn_init_state(batch_size=batch_size, device=self.device) if self._rnn_is_open_loop: # remember the initial observation, and use it instead of the current observation @@ -600,8 +564,7 @@ def get_action(self, obs_dict, goal_dict=None): self._rnn_counter += 1 action, self._rnn_hidden_state = self.nets["policy"].forward_step( - obs_to_use, goal_dict=goal_dict, rnn_state=self._rnn_hidden_state - ) + obs_to_use, goal_dict=goal_dict, rnn_state=self._rnn_hidden_state) return action def reset(self): @@ -616,7 +579,6 @@ class BC_RNN_GMM(BC_RNN): """ BC training with an RNN GMM policy. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -634,9 +596,7 @@ def _create_networks(self): min_std=self.algo_config.gmm.min_std, std_activation=self.algo_config.gmm.std_activation, low_noise_eval=self.algo_config.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **BaseNets.rnn_args_from_config(self.algo_config.rnn), ) @@ -660,13 +620,13 @@ def _forward_training(self, batch): predictions (dict): dictionary containing network outputs """ dists = self.nets["policy"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], goal_dict=batch["goal_obs"], ) # make sure that this is a batch of multivariate action distributions, so that # the log probability computation will be correct - assert len(dists.batch_shape) == 2 # [B, T] + assert len(dists.batch_shape) == 2 # [B, T] log_probs = dists.log_prob(batch["actions"]) predictions = OrderedDict( @@ -708,7 +668,7 @@ def log_info(self, info): """ log = PolicyAlgo.log_info(self, info) log["Loss"] = info["losses"]["action_loss"].item() - log["Log_Likelihood"] = info["losses"]["log_probs"].item() + log["Log_Likelihood"] = info["losses"]["log_probs"].item() if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log @@ -718,7 +678,6 @@ class BC_Transformer(BC): """ BC training with a Transformer policy. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -730,14 +689,12 @@ def _create_networks(self): obs_shapes=self.obs_shapes, goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **BaseNets.transformer_args_from_config(self.algo_config.transformer), ) self._set_params_from_config() self.nets = self.nets.float().to(self.device) - + def _set_params_from_config(self): """ Read specific config variables we need for training / eval. @@ -760,20 +717,16 @@ def process_batch_for_training(self, batch): input_batch = dict() h = self.context_length input_batch["obs"] = {k: batch["obs"][k][:, :h, :] for k in batch["obs"]} - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present if self.supervise_all_steps: # supervision on entire sequence (instead of just current timestep) input_batch["actions"] = batch["actions"][:, :h, :] else: # just use current timestep - input_batch["actions"] = batch["actions"][:, h - 1, :] + input_batch["actions"] = batch["actions"][:, h-1, :] - input_batch = TensorUtils.to_device( - TensorUtils.to_float(input_batch), self.device - ) + input_batch = TensorUtils.to_device(TensorUtils.to_float(input_batch), self.device) return input_batch def _forward_training(self, batch, epoch=None): @@ -790,18 +743,14 @@ def _forward_training(self, batch, epoch=None): """ # ensure that transformer context length is consistent with temporal dimension of observations TensorUtils.assert_size_at_dim( - batch["obs"], - size=(self.context_length), - dim=1, - msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format( - self.context_length - ), + batch["obs"], + size=(self.context_length), + dim=1, + msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format(self.context_length), ) predictions = OrderedDict() - predictions["actions"] = self.nets["policy"]( - obs_dict=batch["obs"], actions=None, goal_dict=batch["goal_obs"] - ) + predictions["actions"] = self.nets["policy"](obs_dict=batch["obs"], actions=None, goal_dict=batch["goal_obs"]) if not self.supervise_all_steps: # only supervise final timestep predictions["actions"] = predictions["actions"][:, -1, :] @@ -818,16 +767,13 @@ def get_action(self, obs_dict, goal_dict=None): """ assert not self.nets.training - return self.nets["policy"](obs_dict, actions=None, goal_dict=goal_dict)[ - :, -1, : - ] + return self.nets["policy"](obs_dict, actions=None, goal_dict=goal_dict)[:, -1, :] class BC_Transformer_GMM(BC_Transformer): """ BC training with a Transformer GMM policy. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -844,9 +790,7 @@ def _create_networks(self): min_std=self.algo_config.gmm.min_std, std_activation=self.algo_config.gmm.std_activation, low_noise_eval=self.algo_config.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **BaseNets.transformer_args_from_config(self.algo_config.transformer), ) self._set_params_from_config() @@ -858,16 +802,14 @@ def _forward_training(self, batch, epoch=None): """ # ensure that transformer context length is consistent with temporal dimension of observations TensorUtils.assert_size_at_dim( - batch["obs"], - size=(self.context_length), - dim=1, - msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format( - self.context_length - ), + batch["obs"], + size=(self.context_length), + dim=1, + msg="Error: expect temporal dimension of obs batch to match transformer context length {}".format(self.context_length), ) dists = self.nets["policy"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], actions=None, goal_dict=batch["goal_obs"], low_noise_eval=False, @@ -875,7 +817,7 @@ def _forward_training(self, batch, epoch=None): # make sure that this is a batch of multivariate action distributions, so that # the log probability computation will be correct - assert len(dists.batch_shape) == 2 # [B, T] + assert len(dists.batch_shape) == 2 # [B, T] if not self.supervise_all_steps: # only use final timestep prediction by making a new distribution with only final timestep. @@ -885,9 +827,7 @@ def _forward_training(self, batch, epoch=None): scale=dists.component_distribution.base_dist.scale[:, -1], ) component_distribution = D.Independent(component_distribution, 1) - mixture_distribution = D.Categorical( - logits=dists.mixture_distribution.logits[:, -1] - ) + mixture_distribution = D.Categorical(logits=dists.mixture_distribution.logits[:, -1]) dists = D.MixtureSameFamily( mixture_distribution=mixture_distribution, component_distribution=component_distribution, @@ -930,7 +870,7 @@ def log_info(self, info): """ log = PolicyAlgo.log_info(self, info) log["Loss"] = info["losses"]["action_loss"].item() - log["Log_Likelihood"] = info["losses"]["log_probs"].item() + log["Log_Likelihood"] = info["losses"]["log_probs"].item() if "policy_grad_norms" in info: log["Policy_Grad_Norms"] = info["policy_grad_norms"] return log diff --git a/robomimic/algo/bcq.py b/robomimic/algo/bcq.py index e7fdd7b9..5843ccb5 100644 --- a/robomimic/algo/bcq.py +++ b/robomimic/algo/bcq.py @@ -3,7 +3,6 @@ generative action models (the original paper uses a cVAE). (Paper - https://arxiv.org/abs/1812.02900). """ - from collections import OrderedDict import torch @@ -47,7 +46,6 @@ class BCQ(PolicyAlgo, ValueAlgo): Default BCQ training, based on https://arxiv.org/abs/1812.02900 and https://github.com/sfujim/BCQ """ - def __init__(self, **kwargs): PolicyAlgo.__init__(self, **kwargs) @@ -69,13 +67,13 @@ def _create_networks(self): with torch.no_grad(): for critic_ind in range(len(self.nets["critic"])): TorchUtils.hard_update( - source=self.nets["critic"][critic_ind], + source=self.nets["critic"][critic_ind], target=self.nets["critic_target"][critic_ind], ) if self.algo_config.actor.enabled: TorchUtils.hard_update( - source=self.nets["actor"], + source=self.nets["actor"], target=self.nets["actor_target"], ) @@ -92,9 +90,7 @@ def _create_critics(self): mlp_layer_dims=self.algo_config.critic.layer_dims, value_bounds=self.algo_config.critic.value_bounds, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) # Q network ensemble and target ensemble @@ -119,9 +115,7 @@ def _create_action_sampler(self): ac_dim=self.ac_dim, device=self.device, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **VAENets.vae_args_from_config(self.algo_config.action_sampler.vae), ) @@ -137,9 +131,7 @@ def _create_actor(self): ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, perturbation_scale=self.algo_config.actor.perturbation_scale, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) self.nets["actor"] = actor_class(**actor_args) @@ -153,13 +145,9 @@ def _check_epoch(self, net_name, epoch): net_name (str): name of network in @self.nets and @self.optim_params epoch (int): epoch number """ - epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or ( - epoch >= self.optim_params[net_name]["start_epoch"] - ) - epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or ( - epoch < self.optim_params[net_name]["end_epoch"] - ) - return epoch_start_check and epoch_end_check + epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or (epoch >= self.optim_params[net_name]["start_epoch"]) + epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or (epoch < self.optim_params[net_name]["end_epoch"]) + return (epoch_start_check and epoch_end_check) def set_discount(self, discount): """ @@ -178,7 +166,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -188,25 +176,19 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = { - k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"] - } - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + input_batch["next_obs"] = {k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"]} + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # note: ensure scalar signals (rewards, done) retain last dimension of 1 to be compatible with model outputs # single timestep reward is discounted sum of intermediate rewards in sequence reward_seq = batch["rewards"][:, :n_step] - discounts = torch.pow( - self.algo_config.discount, torch.arange(n_step).float() - ).unsqueeze(0) + discounts = torch.pow(self.algo_config.discount, torch.arange(n_step).float()).unsqueeze(0) input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1) # discount rate will be gamma^N for computing n-step returns - new_discount = self.algo_config.discount**n_step + new_discount = (self.algo_config.discount ** n_step) self.set_discount(new_discount) # consider this n-step seqeunce done if any intermediate dones are present @@ -215,13 +197,9 @@ def process_batch_for_training(self, batch): if self.algo_config.infinite_horizon: # scale terminal rewards by 1 / (1 - gamma) for infinite horizon MDPs - done_inds = ( - input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] - ) + done_inds = input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] if done_inds.shape[0] > 0: - input_batch["rewards"][done_inds] = input_batch["rewards"][ - done_inds - ] * (1.0 / (1.0 - self.discount)) + input_batch["rewards"][done_inds] = input_batch["rewards"][done_inds] * (1. / (1. - self.discount)) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -250,15 +228,8 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): """ info = OrderedDict() if self.algo_config.action_sampler.vae.prior.use_categorical: - temperature = ( - self.algo_config.action_sampler.vae.prior.categorical_init_temp - - epoch - * self.algo_config.action_sampler.vae.prior.categorical_temp_anneal_step - ) - temperature = max( - temperature, - self.algo_config.action_sampler.vae.prior.categorical_min_temp, - ) + temperature = self.algo_config.action_sampler.vae.prior.categorical_init_temp - epoch * self.algo_config.action_sampler.vae.prior.categorical_temp_anneal_step + temperature = max(temperature, self.algo_config.action_sampler.vae.prior.categorical_min_temp) self.nets["action_sampler"].set_gumbel_temperature(temperature) vae_inputs = dict( @@ -268,9 +239,7 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): ) # maybe freeze encoder weights - if (self.algo_config.action_sampler.freeze_encoder_epoch != -1) and ( - epoch >= self.algo_config.action_sampler.freeze_encoder_epoch - ): + if (self.algo_config.action_sampler.freeze_encoder_epoch != -1) and (epoch >= self.algo_config.action_sampler.freeze_encoder_epoch): vae_inputs["freeze_encoder"] = True # VAE forward @@ -283,9 +252,7 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): info["action_sampler/kl_loss"] = kl_loss if not self.algo_config.action_sampler.vae.prior.use_categorical: with torch.no_grad(): - encoder_variance = torch.exp( - vae_outputs["encoder_params"]["logvar"] - ).mean() + encoder_variance = torch.exp(vae_outputs["encoder_params"]["logvar"]).mean() info["action_sampler/encoder_variance"] = encoder_variance outputs = TensorUtils.detach(vae_outputs) @@ -299,9 +266,7 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): info["action_sampler/grad_norms"] = vae_grad_norms return info, outputs - def _train_critic_on_batch( - self, batch, action_sampler_outputs, epoch, no_backprop=False - ): + def _train_critic_on_batch(self, batch, action_sampler_outputs, epoch, no_backprop=False): """ A modular helper function that can be overridden in case subclasses would like to modify training behavior for the @@ -322,7 +287,7 @@ def _train_critic_on_batch( Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging - critic_outputs (dict): dictionary of critic outputs - useful for + critic_outputs (dict): dictionary of critic outputs - useful for logging purposes """ info = OrderedDict() @@ -335,14 +300,14 @@ def _train_critic_on_batch( goal_s_batch = batch["goal_obs"] # 1 if not done, 0 otherwise - done_mask_batch = 1.0 - batch["dones"] + done_mask_batch = 1. - batch["dones"] info["done_masks"] = done_mask_batch # Bellman backup for Q-targets q_targets = self._get_target_values( - next_states=ns_batch, - goal_states=goal_s_batch, - rewards=r_batch, + next_states=ns_batch, + goal_states=goal_s_batch, + rewards=r_batch, dones=done_mask_batch, action_sampler_outputs=action_sampler_outputs, ) @@ -352,10 +317,10 @@ def _train_critic_on_batch( critic_outputs = [] for critic_ind, critic in enumerate(self.nets["critic"]): critic_loss, critic_output = self._compute_critic_loss( - critic=critic, - states=s_batch, - actions=a_batch, - goal_states=goal_s_batch, + critic=critic, + states=s_batch, + actions=a_batch, + goal_states=goal_s_batch, q_targets=q_targets, ) info["critic/critic{}_loss".format(critic_ind + 1)] = critic_loss @@ -365,18 +330,14 @@ def _train_critic_on_batch( critic_grad_norms = TorchUtils.backprop_for_loss( net=self.nets["critic"][critic_ind], optim=self.optimizers["critic"][critic_ind], - loss=critic_loss, + loss=critic_loss, max_grad_norm=self.algo_config.critic.max_gradient_norm, ) - info["critic/critic{}_grad_norms".format(critic_ind + 1)] = ( - critic_grad_norms - ) + info["critic/critic{}_grad_norms".format(critic_ind + 1)] = critic_grad_norms return info, critic_outputs - def _train_actor_on_batch( - self, batch, action_sampler_outputs, critic_outputs, epoch, no_backprop=False - ): + def _train_actor_on_batch(self, batch, action_sampler_outputs, critic_outputs, epoch, no_backprop=False): """ A modular helper function that can be overridden in case subclasses would like to modify training behavior for the @@ -411,13 +372,9 @@ def _train_actor_on_batch( # sample some actions from action sampler and perturb them, then improve perturbations # where improvement is measured by the critic - sampled_actions = self.nets["action_sampler"]( - s_batch, goal_s_batch - ).detach() # don't backprop into samples + sampled_actions = self.nets["action_sampler"](s_batch, goal_s_batch).detach() # don't backprop into samples perturbed_actions = self.nets["actor"](s_batch, sampled_actions, goal_s_batch) - actor_loss = -( - self.nets["critic"][0](s_batch, perturbed_actions, goal_s_batch) - ).mean() + actor_loss = -(self.nets["critic"][0](s_batch, perturbed_actions, goal_s_batch)).mean() info["actor/loss"] = actor_loss if not no_backprop: @@ -430,9 +387,7 @@ def _train_actor_on_batch( return info - def _get_target_values( - self, next_states, goal_states, rewards, dones, action_sampler_outputs=None - ): + def _get_target_values(self, next_states, goal_states, rewards, dones, action_sampler_outputs=None): """ Helper function to get target values for training Q-function with TD-loss. @@ -449,17 +404,13 @@ def _get_target_values( """ with torch.no_grad(): - # we need to stack the observations with redundancy @num_action_samples here, then decode + # we need to stack the observations with redundancy @num_action_samples here, then decode # to get all sampled actions. for example, if we generate 2 samples per observation and # the batch size is 3, then ob_tiled = [ob1; ob1; ob2; ob2; ob3; ob3] - next_states_tiled = ObsUtils.repeat_and_stack_observation( - next_states, n=self.algo_config.critic.num_action_samples - ) + next_states_tiled = ObsUtils.repeat_and_stack_observation(next_states, n=self.algo_config.critic.num_action_samples) goal_states_tiled = None if len(self.goal_shapes) > 0: - goal_states_tiled = ObsUtils.repeat_and_stack_observation( - goal_states, n=self.algo_config.critic.num_action_samples - ) + goal_states_tiled = ObsUtils.repeat_and_stack_observation(goal_states, n=self.algo_config.critic.num_action_samples) # sample action proposals next_sampled_actions = self._sample_actions_for_value_maximization( @@ -469,20 +420,18 @@ def _get_target_values( ) q_targets = self._get_target_values_from_sampled_actions( - next_states_tiled=next_states_tiled, - next_sampled_actions=next_sampled_actions, - goal_states_tiled=goal_states_tiled, - rewards=rewards, + next_states_tiled=next_states_tiled, + next_sampled_actions=next_sampled_actions, + goal_states_tiled=goal_states_tiled, + rewards=rewards, dones=dones, ) return q_targets - def _sample_actions_for_value_maximization( - self, states_tiled, goal_states_tiled, for_target_update - ): + def _sample_actions_for_value_maximization(self, states_tiled, goal_states_tiled, for_target_update): """ - Helper function to sample actions for maximization (the "batch-constrained" part of + Helper function to sample actions for maximization (the "batch-constrained" part of batch-constrained q-learning). Args: @@ -502,30 +451,24 @@ def _sample_actions_for_value_maximization( """ with torch.no_grad(): - sampled_actions = self.nets["action_sampler"]( - states_tiled, goal_states_tiled - ) + sampled_actions = self.nets["action_sampler"](states_tiled, goal_states_tiled) if self.algo_config.actor.enabled: actor = self.nets["actor"] if for_target_update: actor = self.nets["actor_target"] # perturb the actions with the policy - sampled_actions = actor( - states_tiled, sampled_actions, goal_states_tiled - ) + sampled_actions = actor(states_tiled, sampled_actions, goal_states_tiled) return sampled_actions - def _get_target_values_from_sampled_actions( - self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones - ): + def _get_target_values_from_sampled_actions(self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones): """ Helper function to get target values for training Q-function with TD-loss. The function assumes that action candidates to maximize over have already been computed, and that the input states have been tiled (repeated) to be compatible with the sampled actions. Args: - next_states_tiled (dict): next observations to use for sampling actions. Assumes that + next_states_tiled (dict): next observations to use for sampling actions. Assumes that tiling has already occurred - so that if the batch size is B, and N samples are desired for each observation in the batch, the leading dimension for each observation in the dict is B * N @@ -545,23 +488,19 @@ def _get_target_values_from_sampled_actions( with torch.no_grad(): # feed tiled observations and sampled actions into the critics and then # reshape to get all Q-values in second dimension per observation in batch. - all_value_targets = self.nets["critic_target"][0]( - next_states_tiled, next_sampled_actions, goal_states_tiled - ).reshape(-1, self.algo_config.critic.num_action_samples) + all_value_targets = self.nets["critic_target"][0](next_states_tiled, next_sampled_actions, goal_states_tiled).reshape( + -1, self.algo_config.critic.num_action_samples) max_value_targets = all_value_targets min_value_targets = all_value_targets # TD3 trick to combine max and min over all Q-ensemble estimates into single target estimates for critic_target in self.nets["critic_target"][1:]: - all_value_targets = critic_target( - next_states_tiled, next_sampled_actions, goal_states_tiled - ).reshape(-1, self.algo_config.critic.num_action_samples) + all_value_targets = critic_target(next_states_tiled, next_sampled_actions, goal_states_tiled).reshape( + -1, self.algo_config.critic.num_action_samples) max_value_targets = torch.max(max_value_targets, all_value_targets) min_value_targets = torch.min(min_value_targets, all_value_targets) - all_value_targets = ( - self.algo_config.critic.ensemble.weight * min_value_targets - + (1.0 - self.algo_config.critic.ensemble.weight) * max_value_targets - ) + all_value_targets = self.algo_config.critic.ensemble.weight * min_value_targets + \ + (1. - self.algo_config.critic.ensemble.weight) * max_value_targets # take maximum over all sampled action values per observation and compute targets value_targets = torch.max(all_value_targets, dim=1, keepdim=True)[0] @@ -616,16 +555,12 @@ def train_on_batch(self, batch, epoch, validate=False): info = PolicyAlgo.train_on_batch(self, batch, epoch, validate=validate) # Action Sampler training - no_action_sampler_backprop = validate or ( - not self._check_epoch(net_name="action_sampler", epoch=epoch) - ) + no_action_sampler_backprop = validate or (not self._check_epoch(net_name="action_sampler", epoch=epoch)) with TorchUtils.maybe_no_grad(no_grad=no_action_sampler_backprop): - action_sampler_info, action_sampler_outputs = ( - self._train_action_sampler_on_batch( - batch=batch, - epoch=epoch, - no_backprop=no_action_sampler_backprop, - ) + action_sampler_info, action_sampler_outputs = self._train_action_sampler_on_batch( + batch=batch, + epoch=epoch, + no_backprop=no_action_sampler_backprop, ) info.update(action_sampler_info) @@ -634,29 +569,25 @@ def train_on_batch(self, batch, epoch, validate=False): self.nets["action_sampler"].eval() # Critic training - no_critic_backprop = validate or ( - not self._check_epoch(net_name="critic", epoch=epoch) - ) + no_critic_backprop = validate or (not self._check_epoch(net_name="critic", epoch=epoch)) with TorchUtils.maybe_no_grad(no_grad=no_critic_backprop): critic_info, critic_outputs = self._train_critic_on_batch( - batch=batch, + batch=batch, action_sampler_outputs=action_sampler_outputs, - epoch=epoch, + epoch=epoch, no_backprop=no_critic_backprop, ) info.update(critic_info) if self.algo_config.actor.enabled: # Actor training - no_actor_backprop = validate or ( - not self._check_epoch(net_name="actor", epoch=epoch) - ) + no_actor_backprop = validate or (not self._check_epoch(net_name="actor", epoch=epoch)) with TorchUtils.maybe_no_grad(no_grad=no_actor_backprop): actor_info = self._train_actor_on_batch( - batch=batch, - action_sampler_outputs=action_sampler_outputs, - critic_outputs=critic_outputs, - epoch=epoch, + batch=batch, + action_sampler_outputs=action_sampler_outputs, + critic_outputs=critic_outputs, + epoch=epoch, no_backprop=no_actor_backprop, ) info.update(actor_info) @@ -670,8 +601,8 @@ def train_on_batch(self, batch, epoch, validate=False): with torch.no_grad(): for critic_ind in range(len(self.nets["critic"])): TorchUtils.soft_update( - source=self.nets["critic"][critic_ind], - target=self.nets["critic_target"][critic_ind], + source=self.nets["critic"][critic_ind], + target=self.nets["critic_target"][critic_ind], tau=self.algo_config.target_tau, ) @@ -679,8 +610,8 @@ def train_on_batch(self, batch, epoch, validate=False): if self.algo_config.actor.enabled and (not no_actor_backprop): with torch.no_grad(): TorchUtils.soft_update( - source=self.nets["actor"], - target=self.nets["actor_target"], + source=self.nets["actor"], + target=self.nets["actor_target"], tau=self.algo_config.target_tau, ) @@ -705,22 +636,15 @@ def log_info(self, info): optims = [self.optimizers[k]] if k == "critic": # account for critic having one optimizer per ensemble member - keys = [ - "{}{}".format(k, critic_ind) - for critic_ind in range(len(self.nets["critic"])) - ] + keys = ["{}{}".format(k, critic_ind) for critic_ind in range(len(self.nets["critic"]))] optims = self.optimizers[k] for kp, optimizer in zip(keys, optims): for i, param_group in enumerate(optimizer.param_groups): loss_log["Optimizer/{}{}_lr".format(kp, i)] = param_group["lr"] # extract relevant logs for action sampler, critic, and actor - loss_log["Loss"] = 0.0 - for loss_logger in [ - self._log_action_sampler_info, - self._log_critic_info, - self._log_actor_info, - ]: + loss_log["Loss"] = 0. + for loss_logger in [self._log_action_sampler_info, self._log_critic_info, self._log_actor_info]: this_log = loss_logger(info) if "Loss" in this_log: # manually merge total loss @@ -736,18 +660,12 @@ def _log_action_sampler_info(self, info): """ loss_log = OrderedDict() loss_log["Action_Sampler/Loss"] = info["action_sampler/loss"].item() - loss_log["Action_Sampler/Reconsruction_Loss"] = info[ - "action_sampler/recons_loss" - ].item() + loss_log["Action_Sampler/Reconsruction_Loss"] = info["action_sampler/recons_loss"].item() loss_log["Action_Sampler/KL_Loss"] = info["action_sampler/kl_loss"].item() if self.algo_config.action_sampler.vae.prior.use_categorical: - loss_log["Action_Sampler/Gumbel_Temperature"] = self.nets[ - "action_sampler" - ].get_gumbel_temperature() + loss_log["Action_Sampler/Gumbel_Temperature"] = self.nets["action_sampler"].get_gumbel_temperature() else: - loss_log["Action_Sampler/Encoder_Variance"] = info[ - "action_sampler/encoder_variance" - ].item() + loss_log["Action_Sampler/Encoder_Variance"] = info["action_sampler/encoder_variance"].item() if "action_sampler/grad_norms" in info: loss_log["Action_Sampler/Grad_Norms"] = info["action_sampler/grad_norms"] loss_log["Loss"] = loss_log["Action_Sampler/Loss"] @@ -759,20 +677,14 @@ def _log_critic_info(self, info): """ loss_log = OrderedDict() if "done_masks" in info: - loss_log["Critic/Done_Mask_Percentage"] = ( - 100.0 * torch.mean(info["done_masks"]).item() - ) + loss_log["Critic/Done_Mask_Percentage"] = 100. * torch.mean(info["done_masks"]).item() if "critic/q_targets" in info: loss_log["Critic/Q_Targets"] = info["critic/q_targets"].mean().item() - loss_log["Loss"] = 0.0 + loss_log["Loss"] = 0. for critic_ind in range(len(self.nets["critic"])): - loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info[ - "critic/critic{}_loss".format(critic_ind + 1) - ].item() + loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info["critic/critic{}_loss".format(critic_ind + 1)].item() if "critic/critic{}_grad_norms".format(critic_ind + 1) in info: - loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info[ - "critic/critic{}_grad_norms".format(critic_ind + 1) - ] + loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info["critic/critic{}_grad_norms".format(critic_ind + 1)] loss_log["Loss"] += loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] return loss_log @@ -820,10 +732,10 @@ def on_epoch_end(self, epoch): def _get_best_value(self, obs_dict, goal_dict=None): """ - Internal helper function for getting the best value for a given state and - the corresponding best action. Meant to be used at test-time. Key differences - between this and retrieving target values at train-time are that (1) only a - single critic is used for the value estimate and (2) the critic and actor + Internal helper function for getting the best value for a given state and + the corresponding best action. Meant to be used at test-time. Key differences + between this and retrieving target values at train-time are that (1) only a + single critic is used for the value estimate and (2) the critic and actor are used instead of the target critic and target actor. Args: @@ -842,18 +754,16 @@ def _get_best_value(self, obs_dict, goal_dict=None): # number of action proposals from action sampler num_action_samples = self.algo_config.critic.num_action_samples_rollout - # we need to stack the observations with redundancy @num_action_samples here, then decode + # we need to stack the observations with redundancy @num_action_samples here, then decode # to get all sampled actions. for example, if we generate 2 samples per observation and # the batch size is 3, then ob_tiled = [ob1; ob1; ob2; ob2; ob3; ob3] ob_tiled = ObsUtils.repeat_and_stack_observation(obs_dict, n=num_action_samples) goal_tiled = None if len(self.goal_shapes) > 0: - goal_tiled = ObsUtils.repeat_and_stack_observation( - goal_dict, n=num_action_samples - ) + goal_tiled = ObsUtils.repeat_and_stack_observation(goal_dict, n=num_action_samples) sampled_actions = self._sample_actions_for_value_maximization( - states_tiled=ob_tiled, + states_tiled=ob_tiled, goal_states_tiled=goal_tiled, for_target_update=False, ) @@ -861,16 +771,12 @@ def _get_best_value(self, obs_dict, goal_dict=None): # feed tiled observations and perturbed sampled actions into the critic and then # reshape to get all Q-values in second dimension per observation in batch. # finally, just take a maximum across that second dimension to take the best sampled action - all_critic_values = self.nets["critic"][0]( - ob_tiled, sampled_actions, goal_tiled - ).reshape(-1, num_action_samples) + all_critic_values = self.nets["critic"][0](ob_tiled, sampled_actions, goal_tiled).reshape(-1, num_action_samples) best_action_index = torch.argmax(all_critic_values, dim=1) all_actions = sampled_actions.reshape(batch_size, num_action_samples, -1) best_action = all_actions[torch.arange(all_actions.shape[0]), best_action_index] - best_value = all_critic_values[ - torch.arange(all_critic_values.shape[0]), best_action_index - ].unsqueeze(1) + best_value = all_critic_values[torch.arange(all_critic_values.shape[0]), best_action_index].unsqueeze(1) return best_value, best_action @@ -928,7 +834,6 @@ class BCQ_GMM(BCQ): A simple modification to BCQ that replaces the VAE used to sample action proposals from the batch with a GMM. """ - def _create_action_sampler(self): """ Called in @_create_networks to make action sampler network. @@ -945,9 +850,7 @@ def _create_action_sampler(self): min_std=self.algo_config.action_sampler.gmm.min_std, std_activation=self.algo_config.action_sampler.gmm.std_activation, low_noise_eval=self.algo_config.action_sampler.gmm.low_noise_eval, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): @@ -974,7 +877,7 @@ def _train_action_sampler_on_batch(self, batch, epoch, no_backprop=False): # GMM forward dists = self.nets["action_sampler"].forward_train( - obs_dict=batch["obs"], + obs_dict=batch["obs"], goal_dict=batch["goal_obs"], ) @@ -1013,7 +916,6 @@ class BCQ_Distributional(BCQ): distributions over a discrete set of values instead of expected returns. Some parts of this implementation were adapted from ACME (https://github.com/deepmind/acme). """ - def _create_critics(self): """ Called in @_create_networks to make critic networks. @@ -1027,9 +929,7 @@ def _create_critics(self): value_bounds=self.algo_config.critic.value_bounds, num_atoms=self.algo_config.critic.distributional.num_atoms, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) # Q network ensemble and target ensemble @@ -1043,15 +943,13 @@ def _create_critics(self): critic_target = critic_class(**critic_args) self.nets["critic_target"].append(critic_target) - def _get_target_values_from_sampled_actions( - self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones - ): + def _get_target_values_from_sampled_actions(self, next_states_tiled, next_sampled_actions, goal_states_tiled, rewards, dones): """ Helper function to get target values for training Q-function with TD-loss. Update from superclass to account for distributional value functions. Args: - next_states_tiled (dict): next observations to use for sampling actions. Assumes that + next_states_tiled (dict): next observations to use for sampling actions. Assumes that tiling has already occurred - so that if the batch size is B, and N samples are desired for each observation in the batch, the leading dimension for each observation in the dict is B * N @@ -1072,29 +970,15 @@ def _get_target_values_from_sampled_actions( with torch.no_grad(): # compute expected returns of the sampled actions and maximize to find the best action - all_vds = self.nets["critic_target"][0].forward_train( - next_states_tiled, next_sampled_actions, goal_states_tiled - ) - expected_values = all_vds.mean().reshape( - -1, self.algo_config.critic.num_action_samples - ) + all_vds = self.nets["critic_target"][0].forward_train(next_states_tiled, next_sampled_actions, goal_states_tiled) + expected_values = all_vds.mean().reshape(-1, self.algo_config.critic.num_action_samples) best_action_index = torch.argmax(expected_values, dim=1) - all_actions = next_sampled_actions.reshape( - -1, self.algo_config.critic.num_action_samples, self.ac_dim - ) - best_action = all_actions[ - torch.arange(all_actions.shape[0]), best_action_index - ] + all_actions = next_sampled_actions.reshape(-1, self.algo_config.critic.num_action_samples, self.ac_dim) + best_action = all_actions[torch.arange(all_actions.shape[0]), best_action_index] # get the corresponding probabilities for the categorical distributions corresponding to the best actions - all_vd_probs = all_vds.probs.reshape( - -1, - self.algo_config.critic.num_action_samples, - self.algo_config.critic.distributional.num_atoms, - ) - target_vd_probs = all_vd_probs[ - torch.arange(all_vd_probs.shape[0]), best_action_index - ] + all_vd_probs = all_vds.probs.reshape(-1, self.algo_config.critic.num_action_samples, self.algo_config.critic.distributional.num_atoms) + target_vd_probs = all_vd_probs[torch.arange(all_vd_probs.shape[0]), best_action_index] # bellman backup to get a new grid of values - then project onto the canonical atoms to obtain a # target set of categorical probabilities over the atoms @@ -1134,5 +1018,5 @@ def _compute_critic_loss(self, critic, states, actions, goal_states, q_targets): # this should be the equivalent of softmax with logits from tf vd = critic.forward_train(states, actions, goal_states) log_probs = F.log_softmax(vd.logits, dim=-1) - critic_loss = nn.KLDivLoss(reduction="batchmean")(log_probs, q_targets) + critic_loss = nn.KLDivLoss(reduction='batchmean')(log_probs, q_targets) return critic_loss, None diff --git a/robomimic/algo/cql.py b/robomimic/algo/cql.py index 7eb17106..0c24d50a 100644 --- a/robomimic/algo/cql.py +++ b/robomimic/algo/cql.py @@ -3,7 +3,6 @@ Based off of https://github.com/aviralkumar2907/CQL. (Paper - https://arxiv.org/abs/2006.04779). """ - import numpy as np from collections import OrderedDict @@ -40,66 +39,42 @@ class CQL(PolicyAlgo, ValueAlgo): """ CQL-extension of SAC for the off-policy, offline setting. See https://arxiv.org/abs/2006.04779 """ - def __init__(self, **kwargs): # Store entropy / cql settings first since the super init call requires them - self.automatic_entropy_tuning = ( - kwargs["algo_config"].actor.target_entropy is not None - ) - self.automatic_cql_tuning = ( - kwargs["algo_config"].critic.target_q_gap is not None - and kwargs["algo_config"].critic.target_q_gap >= 0.0 - ) + self.automatic_entropy_tuning = kwargs["algo_config"].actor.target_entropy is not None + self.automatic_cql_tuning = kwargs["algo_config"].critic.target_q_gap is not None and \ + kwargs["algo_config"].critic.target_q_gap >= 0.0 # Run super init first super().__init__(**kwargs) # Reward settings self.n_step = self.algo_config.n_step - self.discount = self.algo_config.discount**self.n_step + self.discount = self.algo_config.discount ** self.n_step # Now also store additional SAC- and CQL-specific stuff from the config self._num_batch_steps = 0 self.bc_start_steps = self.algo_config.actor.bc_start_steps self.deterministic_backup = self.algo_config.critic.deterministic_backup - self.td_loss_fcn = ( - nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() - ) + self.td_loss_fcn = nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() # Entropy settings - self.target_entropy = ( - -np.prod(self.ac_dim) - if self.algo_config.actor.target_entropy in {None, "default"} - else self.algo_config.actor.target_entropy - ) + self.target_entropy = -np.prod(self.ac_dim) if self.algo_config.actor.target_entropy in {None, "default"} else\ + self.algo_config.actor.target_entropy # CQL settings self.min_q_weight = self.algo_config.critic.min_q_weight - self.target_q_gap = ( - self.algo_config.critic.target_q_gap if self.automatic_cql_tuning else 0.0 - ) + self.target_q_gap = self.algo_config.critic.target_q_gap if self.automatic_cql_tuning else 0.0 @property def log_entropy_weight(self): - return ( - self.nets["log_entropy_weight"]() - if self.automatic_entropy_tuning - else torch.zeros(1, requires_grad=False, device=self.device) - ) + return self.nets["log_entropy_weight"]() if self.automatic_entropy_tuning else\ + torch.zeros(1, requires_grad=False, device=self.device) @property def log_cql_weight(self): - return ( - self.nets["log_cql_weight"]() - if self.automatic_cql_tuning - else torch.log( - torch.tensor( - self.algo_config.critic.cql_weight, - requires_grad=False, - device=self.device, - ) - ) - ) + return self.nets["log_cql_weight"]() if self.automatic_cql_tuning else\ + torch.log(torch.tensor(self.algo_config.critic.cql_weight, requires_grad=False, device=self.device)) def _create_networks(self): """ @@ -120,11 +95,9 @@ def _create_networks(self): actor_args.update(dict(self.algo_config.actor.net.gaussian)) else: # Unsupported actor type! - raise ValueError( - f"Unsupported actor requested. " - f"Requested: {self.algo_config.actor.net.type}, " - f"valid options are: {['gaussian']}" - ) + raise ValueError(f"Unsupported actor requested. " + f"Requested: {self.algo_config.actor.net.type}, " + f"valid options are: {['gaussian']}") # Policy self.nets["actor"] = actor_cls( @@ -132,9 +105,7 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **actor_args, ) @@ -149,9 +120,7 @@ def _create_networks(self): mlp_layer_dims=self.algo_config.critic.layer_dims, value_bounds=self.algo_config.critic.value_bounds, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) net_list.append(critic) @@ -168,9 +137,7 @@ def _create_networks(self): # sync target networks at beginning of training with torch.no_grad(): - for critic, critic_target in zip( - self.nets["critic"], self.nets["critic_target"] - ): + for critic, critic_target in zip(self.nets["critic"], self.nets["critic_target"]): TorchUtils.hard_update( source=critic, target=critic_target, @@ -226,25 +193,19 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = { - k: batch["next_obs"][k][:, self.n_step - 1, :] for k in batch["next_obs"] - } - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + input_batch["next_obs"] = {k: batch["next_obs"][k][:, self.n_step - 1, :] for k in batch["next_obs"]} + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # note: ensure scalar signals (rewards, done) retain last dimension of 1 to be compatible with model outputs # single timestep reward is discounted sum of intermediate rewards in sequence - reward_seq = batch["rewards"][:, : self.n_step] - discounts = torch.pow( - self.algo_config.discount, torch.arange(self.n_step).float() - ).unsqueeze(0) + reward_seq = batch["rewards"][:, :self.n_step] + discounts = torch.pow(self.algo_config.discount, torch.arange(self.n_step).float()).unsqueeze(0) input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1) # consider this n-step seqeunce done if any intermediate dones are present - done_seq = batch["dones"][:, : self.n_step] + done_seq = batch["dones"][:, :self.n_step] input_batch["dones"] = (done_seq.sum(dim=1) > 0).float().unsqueeze(1) # we move to device first before float conversion because image observation modalities will be uint8 - @@ -322,44 +283,29 @@ def _train_policy_on_batch(self, batch, epoch, validate=False): info = OrderedDict() # Sample actions from policy and get log probs - dist = self.nets["actor"].forward_train( - obs_dict=batch["obs"], goal_dict=batch["goal_obs"] - ) + dist = self.nets["actor"].forward_train(obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) actions, log_prob = self._get_actions_and_log_prob(dist=dist) # Calculate alpha - entropy_weight_loss = ( - -( - self.log_entropy_weight * (log_prob + self.target_entropy).detach() - ).mean() - if self.automatic_entropy_tuning - else 0.0 - ) + entropy_weight_loss = -(self.log_entropy_weight * (log_prob + self.target_entropy).detach()).mean() if\ + self.automatic_entropy_tuning else 0.0 entropy_weight = self.log_entropy_weight.exp() # Get predicted Q-values for all state, action pairs - pred_qs = [ - critic(obs_dict=batch["obs"], acts=actions, goal_dict=batch["goal_obs"]) - for critic in self.nets["critic"] - ] + pred_qs = [critic(obs_dict=batch["obs"], acts=actions, goal_dict=batch["goal_obs"]) + for critic in self.nets["critic"]] # We take the minimum for stability pred_qs, _ = torch.cat(pred_qs, dim=1).min(dim=1, keepdim=True) # Use BC if we're in the beginning of training, otherwise calculate policy loss normally - baseline = ( - dist.log_prob(batch["actions"]).unsqueeze(dim=-1) - if self._num_batch_steps < self.bc_start_steps - else pred_qs - ) + baseline = dist.log_prob(batch["actions"]).unsqueeze(dim=-1) if\ + self._num_batch_steps < self.bc_start_steps else pred_qs policy_loss = (entropy_weight * log_prob - baseline).mean() # Add info info["entropy_weight"] = entropy_weight.item() - info["entropy_weight_loss"] = ( - entropy_weight_loss.item() - if self.automatic_entropy_tuning - else entropy_weight_loss - ) + info["entropy_weight_loss"] = entropy_weight_loss.item() if \ + self.automatic_entropy_tuning else entropy_weight_loss info["actor/loss"] = policy_loss # Take a training step if we're not validating @@ -371,9 +317,7 @@ def _train_policy_on_batch(self, batch, epoch, validate=False): self.optimizers["entropy"].zero_grad() entropy_weight_loss.backward() self.optimizers["entropy"].step() - info["entropy_grad_norms"] = ( - self.log_entropy_weight.grad.data.norm(2).pow(2).item() - ) + info["entropy_grad_norms"] = self.log_entropy_weight.grad.data.norm(2).pow(2).item() # Policy actor_grad_norms = TorchUtils.backprop_for_loss( @@ -443,22 +387,12 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): N = self.algo_config.critic.num_random_actions # Get predicted Q-values from taken actions - q_preds = [ - critic( - obs_dict=batch["obs"], - acts=batch["actions"], - goal_dict=batch["goal_obs"], - ) - for critic in self.nets["critic"] - ] + q_preds = [critic(obs_dict=batch["obs"], acts=batch["actions"], goal_dict=batch["goal_obs"]) + for critic in self.nets["critic"]] # Sample actions at the current and next step - curr_dist = self.nets["actor"].forward_train( - obs_dict=batch["obs"], goal_dict=batch["goal_obs"] - ) - next_dist = self.nets["actor"].forward_train( - obs_dict=batch["next_obs"], goal_dict=batch["goal_obs"] - ) + curr_dist = self.nets["actor"].forward_train(obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) + next_dist = self.nets["actor"].forward_train(obs_dict=batch["next_obs"], goal_dict=batch["goal_obs"]) next_actions, next_log_prob = self._get_actions_and_log_prob(dist=next_dist) # Don't capture gradients here, since the critic target network doesn't get trained (only soft updated) @@ -466,88 +400,43 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): # We take the max over all samples if the number of action samples is > 1 if self.algo_config.critic.num_action_samples > 1: # Generate the target q values, using the backup from the next state - temp_actions = next_dist.rsample( - sample_shape=(self.algo_config.critic.num_action_samples,) - ).permute(1, 0, 2) - target_qs = [ - self._get_qs_from_actions( - obs_dict=batch["next_obs"], - actions=temp_actions, - goal_dict=batch["goal_obs"], - q_net=critic, - ).max(dim=1, keepdim=True)[0] - for critic in self.nets["critic_target"] - ] + temp_actions = next_dist.rsample(sample_shape=(self.algo_config.critic.num_action_samples,)).permute(1, 0, 2) + target_qs = [self._get_qs_from_actions( + obs_dict=batch["next_obs"], actions=temp_actions, goal_dict=batch["goal_obs"], q_net=critic) + .max(dim=1, keepdim=True)[0] for critic in self.nets["critic_target"]] else: - target_qs = [ - critic( - obs_dict=batch["next_obs"], - acts=next_actions, - goal_dict=batch["goal_obs"], - ) - for critic in self.nets["critic_target"] - ] + target_qs = [critic(obs_dict=batch["next_obs"], acts=next_actions, goal_dict=batch["goal_obs"]) + for critic in self.nets["critic_target"]] # Take the minimum over all critics target_qs, _ = torch.cat(target_qs, dim=1).min(dim=1, keepdim=True) # If only sampled once from each critic and not using a deterministic backup, subtract the logprob as well - if ( - self.algo_config.critic.num_action_samples == 1 - and not self.deterministic_backup - ): + if self.algo_config.critic.num_action_samples == 1 and not self.deterministic_backup: target_qs = target_qs - self.log_entropy_weight.exp() * next_log_prob # Calculate the q target values - done_mask_batch = 1.0 - batch["dones"] + done_mask_batch = 1. - batch["dones"] info["done_masks"] = done_mask_batch q_target = batch["rewards"] + done_mask_batch * self.discount * target_qs # Calculate CQL stuff - cql_random_actions = ( - torch.FloatTensor(N, B, A).uniform_(-1.0, 1.0).to(self.device) - ) # shape (N, B, A) - cql_random_log_prob = np.log(0.5**A) - cql_curr_actions, cql_curr_log_prob = self._get_actions_and_log_prob( - dist=curr_dist, sample_shape=(N,) - ) # shape (N, B, A) and (N, B, 1) - cql_next_actions, cql_next_log_prob = self._get_actions_and_log_prob( - dist=next_dist, sample_shape=(N,) - ) # shape (N, B, A) and (N, B, 1) - cql_curr_log_prob = ( - cql_curr_log_prob.squeeze(dim=-1).permute(1, 0).detach() - ) # shape (B, N) - cql_next_log_prob = ( - cql_next_log_prob.squeeze(dim=-1).permute(1, 0).detach() - ) # shape (B, N) - q_cats = [] # Each entry shape will be (B, N) + cql_random_actions = torch.FloatTensor(N, B, A).uniform_(-1., 1.).to(self.device) # shape (N, B, A) + cql_random_log_prob = np.log(0.5 ** A) + cql_curr_actions, cql_curr_log_prob = self._get_actions_and_log_prob(dist=curr_dist, sample_shape=(N,)) # shape (N, B, A) and (N, B, 1) + cql_next_actions, cql_next_log_prob = self._get_actions_and_log_prob(dist=next_dist, sample_shape=(N,)) # shape (N, B, A) and (N, B, 1) + cql_curr_log_prob = cql_curr_log_prob.squeeze(dim=-1).permute(1, 0).detach() # shape (B, N) + cql_next_log_prob = cql_next_log_prob.squeeze(dim=-1).permute(1, 0).detach() # shape (B, N) + q_cats = [] # Each entry shape will be (B, N) for critic, q_pred in zip(self.nets["critic"], q_preds): # Compose Q values over all sampled actions (importance sampled) - q_rand = self._get_qs_from_actions( - obs_dict=batch["obs"], - actions=cql_random_actions.permute(1, 0, 2), - goal_dict=batch["goal_obs"], - q_net=critic, - ) - q_curr = self._get_qs_from_actions( - obs_dict=batch["obs"], - actions=cql_curr_actions.permute(1, 0, 2), - goal_dict=batch["goal_obs"], - q_net=critic, - ) - q_next = self._get_qs_from_actions( - obs_dict=batch["obs"], - actions=cql_next_actions.permute(1, 0, 2), - goal_dict=batch["goal_obs"], - q_net=critic, - ) - q_cat = torch.cat( - [ - q_rand - cql_random_log_prob, - q_next - cql_next_log_prob, - q_curr - cql_curr_log_prob, - ], - dim=1, - ) # shape (B, 3 * N) + q_rand = self._get_qs_from_actions(obs_dict=batch["obs"], actions=cql_random_actions.permute(1, 0, 2), goal_dict=batch["goal_obs"], q_net=critic) + q_curr = self._get_qs_from_actions(obs_dict=batch["obs"], actions=cql_curr_actions.permute(1, 0, 2), goal_dict=batch["goal_obs"], q_net=critic) + q_next = self._get_qs_from_actions(obs_dict=batch["obs"], actions=cql_next_actions.permute(1, 0, 2), goal_dict=batch["goal_obs"], q_net=critic) + q_cat = torch.cat([ + q_rand - cql_random_log_prob, + q_next - cql_next_log_prob, + q_curr - cql_curr_log_prob, + ], dim=1) # shape (B, 3 * N) q_cats.append(q_cat) # Calculate the losses for all critics @@ -559,11 +448,8 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): # Calculate td error loss td_loss = self.td_loss_fcn(q_pred, q_target) # Calculate cql loss - cql_loss = cql_weight * ( - self.min_q_weight - * (torch.logsumexp(q_cat, dim=1).mean() - q_pred.mean()) - - self.target_q_gap - ) + cql_loss = cql_weight * (self.min_q_weight * (torch.logsumexp(q_cat, dim=1).mean() - q_pred.mean()) - + self.target_q_gap) cql_losses.append(cql_loss) # Calculate total loss loss = td_loss + cql_loss @@ -575,26 +461,18 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): # Train CQL weight if tuning automatically if self.automatic_cql_tuning: cql_weight_loss = -torch.stack(cql_losses).mean() - info["critic/cql_weight_loss"] = ( - cql_weight_loss.item() - ) # Make sure to not store computation graph since we retain graph after backward() call + info[ + "critic/cql_weight_loss"] = cql_weight_loss.item() # Make sure to not store computation graph since we retain graph after backward() call self.optimizers["cql"].zero_grad() cql_weight_loss.backward(retain_graph=True) self.optimizers["cql"].step() - info["critic/cql_grad_norms"] = ( - self.log_cql_weight.grad.data.norm(2).pow(2).item() - ) + info["critic/cql_grad_norms"] = self.log_cql_weight.grad.data.norm(2).pow(2).item() # Train critics - for i, (critic_loss, critic, critic_target, optimizer) in enumerate( - zip( - critic_losses, - self.nets["critic"], - self.nets["critic_target"], - self.optimizers["critic"], - ) - ): - retain_graph = i < (len(critic_losses) - 1) + for i, (critic_loss, critic, critic_target, optimizer) in enumerate(zip( + critic_losses, self.nets["critic"], self.nets["critic_target"], self.optimizers["critic"] + )): + retain_graph = (i < (len(critic_losses) - 1)) critic_grad_norms = TorchUtils.backprop_for_loss( net=critic, optim=optimizer, @@ -604,11 +482,7 @@ def _train_critic_on_batch(self, batch, epoch, validate=False): ) info[f"critic/critic{i+1}_grad_norms"] = critic_grad_norms with torch.no_grad(): - TorchUtils.soft_update( - source=critic, - target=critic_target, - tau=self.algo_config.target_tau, - ) + TorchUtils.soft_update(source=critic, target=critic_target, tau=self.algo_config.target_tau) # Return stats return info @@ -628,12 +502,8 @@ def _get_actions_and_log_prob(self, dist, sample_shape=torch.Size()): """ # Process networks with tanh differently than normal distributions if self.algo_config.actor.net.common.use_tanh: - actions, actions_pre_tanh = dist.rsample( - sample_shape=sample_shape, return_pretanh_value=True - ) - log_prob = dist.log_prob( - actions, pre_tanh_value=actions_pre_tanh - ).unsqueeze(dim=-1) + actions, actions_pre_tanh = dist.rsample(sample_shape=sample_shape, return_pretanh_value=True) + log_prob = dist.log_prob(actions, pre_tanh_value=actions_pre_tanh).unsqueeze(dim=-1) else: actions = dist.rsample(sample_shape=sample_shape) log_prob = dist.log_prob(actions) @@ -662,11 +532,7 @@ def _get_qs_from_actions(obs_dict, actions, goal_dict, q_net): goal_dict_stacked = ObsUtils.repeat_and_stack_observation(goal_dict, N) # Pass the obs and (flattened) actions through to get the Q values - qs = q_net( - obs_dict=obs_dict_stacked, - acts=actions.reshape(-1, D), - goal_dict=goal_dict_stacked, - ) + qs = q_net(obs_dict=obs_dict_stacked, acts=actions.reshape(-1, D), goal_dict=goal_dict_stacked) # Unflatten output qs = qs.reshape(B, N) @@ -692,17 +558,14 @@ def log_info(self, info): optims = [self.optimizers[k]] if k == "critic": # account for critic having one optimizer per ensemble member - keys = [ - "{}{}".format(k, critic_ind) - for critic_ind in range(len(self.nets["critic"])) - ] + keys = ["{}{}".format(k, critic_ind) for critic_ind in range(len(self.nets["critic"]))] optims = self.optimizers[k] for kp, optimizer in zip(keys, optims): for i, param_group in enumerate(optimizer.param_groups): loss_log["Optimizer/{}{}_lr".format(kp, i)] = param_group["lr"] # extract relevant logs for critic, and actor - loss_log["Loss"] = 0.0 + loss_log["Loss"] = 0. for loss_logger in [self._log_critic_info, self._log_actor_info]: this_log = loss_logger(info) if "Loss" in this_log: @@ -719,20 +582,14 @@ def _log_critic_info(self, info): """ loss_log = OrderedDict() if "done_masks" in info: - loss_log["Critic/Done_Mask_Percentage"] = ( - 100.0 * torch.mean(info["done_masks"]).item() - ) + loss_log["Critic/Done_Mask_Percentage"] = 100. * torch.mean(info["done_masks"]).item() if "critic/q_targets" in info: loss_log["Critic/Q_Targets"] = info["critic/q_targets"].mean().item() - loss_log["Loss"] = 0.0 + loss_log["Loss"] = 0. for critic_ind in range(len(self.nets["critic"])): - loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info[ - "critic/critic{}_loss".format(critic_ind + 1) - ].item() + loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info["critic/critic{}_loss".format(critic_ind + 1)].item() if "critic/critic{}_grad_norms".format(critic_ind + 1) in info: - loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info[ - "critic/critic{}_grad_norms".format(critic_ind + 1) - ] + loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info["critic/critic{}_grad_norms".format(critic_ind + 1)] loss_log["Loss"] += loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] if "critic/cql_weight_loss" in info: loss_log["Critic/CQL_Weight"] = info["critic/cql_weight"] diff --git a/robomimic/algo/gl.py b/robomimic/algo/gl.py index fc6702a4..24ae8008 100644 --- a/robomimic/algo/gl.py +++ b/robomimic/algo/gl.py @@ -1,7 +1,6 @@ """ Subgoal prediction models, used in HBC / IRIS. """ - import numpy as np from collections import OrderedDict from copy import deepcopy @@ -39,9 +38,14 @@ class GL(PlannerAlgo): """ Implements goal prediction component for HBC and IRIS. """ - def __init__( - self, algo_config, obs_config, global_config, obs_key_shapes, ac_dim, device + self, + algo_config, + obs_config, + global_config, + obs_key_shapes, + ac_dim, + device ): """ Args: @@ -67,7 +71,7 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device, + device=device ) def _create_networks(self): @@ -83,12 +87,10 @@ def _create_networks(self): # deterministic goal prediction network self.nets["goal_network"] = ObsNets.MIMO_MLP( - input_obs_group_shapes=obs_group_shapes, + input_obs_group_shapes=obs_group_shapes, output_shapes=self.subgoal_shapes, layer_dims=self.algo_config.ae.planner_layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) self.nets = self.nets.float().to(self.device) @@ -104,29 +106,22 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() # remove temporal batches for all except scalar signals (to be compatible with model outputs) - input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} + input_batch["obs"] = { k: batch["obs"][k][:, 0, :] for k in batch["obs"] } # extract multi-horizon subgoal target - input_batch["subgoals"] = { - k: batch["next_obs"][k][:, self._subgoal_horizon - 1, :] - for k in batch["next_obs"] - } + input_batch["subgoals"] = {k: batch["next_obs"][k][:, self._subgoal_horizon - 1, :] for k in batch["next_obs"]} input_batch["target_subgoals"] = input_batch["subgoals"] - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU return TensorUtils.to_float(TensorUtils.to_device(input_batch, self.device)) - def get_actor_goal_for_training_from_processed_batch( - self, processed_batch, **kwargs - ): + def get_actor_goal_for_training_from_processed_batch(self, processed_batch, **kwargs): """ Retrieve subgoals from processed batch to use for training the actor. Subclasses can modify this function to change the subgoals. @@ -160,18 +155,14 @@ def train_on_batch(self, batch, epoch, validate=False): info = super(GL, self).train_on_batch(batch, epoch, validate=validate) # predict subgoal observations with goal network - pred_subgoals = self.nets["goal_network"]( - obs=batch["obs"], goal=batch["goal_obs"] - ) + pred_subgoals = self.nets["goal_network"](obs=batch["obs"], goal=batch["goal_obs"]) # compute loss as L2 error for each observation key losses = OrderedDict() target_subgoals = batch["target_subgoals"] # targets for network prediction - goal_loss = 0.0 + goal_loss = 0. for k in pred_subgoals: - assert ( - pred_subgoals[k].shape == target_subgoals[k].shape - ), "mismatch in predicted and target subgoals!" + assert pred_subgoals[k].shape == target_subgoals[k].shape, "mismatch in predicted and target subgoals!" mode_loss = nn.MSELoss()(pred_subgoals[k], target_subgoals[k]) goal_loss += mode_loss losses["goal_{}_loss".format(k)] = mode_loss @@ -227,7 +218,7 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): """ Sample @num_samples subgoals from the network per observation. - Since this class implements a deterministic subgoal prediction, + Since this class implements a deterministic subgoal prediction, this function returns identical subgoals for each input observation. Args: @@ -247,9 +238,7 @@ def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): # [batch_size * num_samples, ...] goals = self.get_subgoal_predictions(obs_dict=obs_tiled, goal_dict=goal_tiled) # reshape to [batch_size, num_samples, ...] - return TensorUtils.reshape_dimensions( - goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples) - ) + return TensorUtils.reshape_dimensions(goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples)) def get_action(self, obs_dict, goal_dict=None): """ @@ -269,7 +258,6 @@ class GL_VAE(GL): """ Implements goal prediction via VAE. """ - def _create_networks(self): """ Creates networks and places them into @self.nets. @@ -282,9 +270,7 @@ def _create_networks(self): condition_shapes=self.obs_shapes, goal_shapes=self.goal_shapes, device=self.device, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **VAENets.vae_args_from_config(self.algo_config.vae), ) @@ -300,7 +286,7 @@ def get_actor_goal_for_training_from_processed_batch( ): """ Modify from superclass to support a @use_latent_subgoals option. - The VAE can optionally return latent subgoals by passing the subgoal + The VAE can optionally return latent subgoals by passing the subgoal observations in the batch through the encoder. Args: @@ -312,8 +298,8 @@ def get_actor_goal_for_training_from_processed_batch( use_prior_correction (bool): if True, use a "prior correction" trick to choose a latent subgoal sampled from the prior that is close to the - latent from the VAE encoder (posterior). This can help with issues at - test-time where the encoder latent distribution might not match + latent from the VAE encoder (posterior). This can help with issues at + test-time where the encoder latent distribution might not match the prior latent distribution. num_prior_samples (int): number of VAE prior samples to take and choose among, @@ -329,18 +315,16 @@ def get_actor_goal_for_training_from_processed_batch( # batch variables obs = processed_batch["obs"] subgoals = processed_batch["subgoals"] # full subgoal observations - target_subgoals = processed_batch[ - "target_subgoals" - ] # targets for network prediction + target_subgoals = processed_batch["target_subgoals"] # targets for network prediction goal_obs = processed_batch["goal_obs"] with torch.no_grad(): # run VAE forward pass to get samples from posterior for the current observation and subgoal vae_outputs = self.nets["goal_network"]( - inputs=subgoals, # encoder takes full subgoals - outputs=target_subgoals, # reconstruct target subgoals + inputs=subgoals, # encoder takes full subgoals + outputs=target_subgoals, # reconstruct target subgoals goals=goal_obs, - conditions=obs, # condition on observations + conditions=obs, # condition on observations ) posterior_z = vae_outputs["encoder_z"] latent_subgoals = posterior_z @@ -353,14 +337,10 @@ def get_actor_goal_for_training_from_processed_batch( batch_size = obs[random_key].shape[0] # for each batch member, get @num_prior_samples samples from the prior - obs_tiled = ObsUtils.repeat_and_stack_observation( - obs, n=num_prior_samples - ) + obs_tiled = ObsUtils.repeat_and_stack_observation(obs, n=num_prior_samples) goal_tiled = None if len(self.goal_shapes) > 0: - goal_tiled = ObsUtils.repeat_and_stack_observation( - goal_obs, n=num_prior_samples - ) + goal_tiled = ObsUtils.repeat_and_stack_observation(goal_obs, n=num_prior_samples) prior_z_samples = self.nets["goal_network"].sample_prior( conditions=obs_tiled, @@ -371,9 +351,7 @@ def get_actor_goal_for_training_from_processed_batch( # note: every posterior sample in the batch has @num_prior_samples corresponding prior samples # reshape prior samples to (batch_size, num_samples, latent_dim) - prior_z_samples = prior_z_samples.reshape( - batch_size, num_prior_samples, -1 - ) + prior_z_samples = prior_z_samples.reshape(batch_size, num_prior_samples, -1) # reshape posterior latents to (batch_size, 1, latent_dim) posterior_z_expanded = posterior_z.unsqueeze(1) @@ -384,11 +362,9 @@ def get_actor_goal_for_training_from_processed_batch( # then gather the closest prior sample for each posterior sample neighbors = torch.argmin(distances, dim=1) - latent_subgoals = prior_z_samples[ - torch.arange(batch_size).long(), neighbors - ] + latent_subgoals = prior_z_samples[torch.arange(batch_size).long(), neighbors] - return {"latent_subgoal": latent_subgoals} + return { "latent_subgoal" : latent_subgoals } def train_on_batch(self, batch, epoch, validate=False): """ @@ -411,13 +387,8 @@ def train_on_batch(self, batch, epoch, validate=False): info = super(GL, self).train_on_batch(batch, epoch, validate=validate) if self.algo_config.vae.prior.use_categorical: - temperature = ( - self.algo_config.vae.prior.categorical_init_temp - - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step - ) - temperature = max( - temperature, self.algo_config.vae.prior.categorical_min_temp - ) + temperature = self.algo_config.vae.prior.categorical_init_temp - epoch * self.algo_config.vae.prior.categorical_temp_anneal_step + temperature = max(temperature, self.algo_config.vae.prior.categorical_min_temp) self.nets["goal_network"].set_gumbel_temperature(temperature) # batch variables @@ -427,10 +398,10 @@ def train_on_batch(self, batch, epoch, validate=False): goal_obs = batch["goal_obs"] vae_outputs = self.nets["goal_network"]( - inputs=subgoals, # encoder takes full subgoals - outputs=target_subgoals, # reconstruct target subgoals + inputs=subgoals, # encoder takes full subgoals + outputs=target_subgoals, # reconstruct target subgoals goals=goal_obs, - conditions=obs, # condition on observations + conditions=obs, # condition on observations ) recons_loss = vae_outputs["reconstruction_loss"] kl_loss = vae_outputs["kl_loss"] @@ -441,9 +412,7 @@ def train_on_batch(self, batch, epoch, validate=False): if not self.algo_config.vae.prior.use_categorical: with torch.no_grad(): - info["encoder_variance"] = torch.exp( - vae_outputs["encoder_params"]["logvar"] - ) + info["encoder_variance"] = torch.exp(vae_outputs["encoder_params"]["logvar"]) # VAE gradient step if not validate: @@ -471,9 +440,7 @@ def log_info(self, info): loss_log["Reconstruction_Loss"] = info["recons_loss"].item() loss_log["KL_Loss"] = info["kl_loss"].item() if self.algo_config.vae.prior.use_categorical: - loss_log["Gumbel_Temperature"] = self.nets[ - "goal_network" - ].get_gumbel_temperature() + loss_log["Gumbel_Temperature"] = self.nets["goal_network"].get_gumbel_temperature() else: loss_log["Encoder_Variance"] = info["encoder_variance"].mean().item() return loss_log @@ -500,10 +467,8 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): return OrderedDict(latent_subgoal=latent_subgoals) # sample a single goal from the VAE - goals = self.sample_subgoals( - obs_dict=obs_dict, goal_dict=goal_dict, num_samples=1 - ) - return {k: goals[k][:, 0, ...] for k in goals} + goals = self.sample_subgoals(obs_dict=obs_dict, goal_dict=goal_dict, num_samples=1) + return { k : goals[k][:, 0, ...] for k in goals } def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): """ @@ -527,13 +492,9 @@ def sample_subgoals(self, obs_dict, goal_dict=None, num_samples=1): mod = list(obs_tiled.keys())[0] n = obs_tiled[mod].shape[0] # [batch_size * num_samples, ...] - goals = self.nets["goal_network"].decode( - n=n, conditions=obs_tiled, goals=goal_tiled - ) + goals = self.nets["goal_network"].decode(n=n, conditions=obs_tiled, goals=goal_tiled) # reshape to [batch_size, num_samples, ...] - return TensorUtils.reshape_dimensions( - goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples) - ) + return TensorUtils.reshape_dimensions(goals, begin_axis=0, end_axis=0, target_dims=(-1, num_samples)) class ValuePlanner(PlannerAlgo, ValueAlgo): @@ -542,7 +503,6 @@ class ValuePlanner(PlannerAlgo, ValueAlgo): based on (1) a @PlannerAlgo that is used to sample candidate subgoals and (2) a @ValueAlgo that is used to select one of the subgoals. """ - def __init__( self, planner_algo_class, @@ -553,6 +513,7 @@ def __init__( obs_key_shapes, ac_dim, device, + ): """ Args: @@ -587,7 +548,7 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device, + device=device ) self.value_net = value_algo_class( @@ -596,7 +557,7 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device, + device=device ) self.subgoal_shapes = self.planner.subgoal_shapes @@ -612,7 +573,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -647,14 +608,10 @@ def train_on_batch(self, batch, epoch, validate=False): info = dict(planner=dict(), value_net=dict()) # train planner - info["planner"].update( - self.planner.train_on_batch(batch["planner"], epoch, validate=validate) - ) + info["planner"].update(self.planner.train_on_batch(batch["planner"], epoch, validate=validate)) # train value network - info["value_net"].update( - self.value_net.train_on_batch(batch["value_net"], epoch, validate=validate) - ) + info["value_net"].update(self.value_net.train_on_batch(batch["value_net"], epoch, validate=validate)) return info @@ -669,7 +626,7 @@ def log_info(self, info): Returns: loss_log (dict): name -> summary statistic """ - loss = 0.0 + loss = 0. # planner planner_log = self.planner.log_info(info["planner"]) @@ -739,14 +696,8 @@ def __repr__(self): """ msg = str(self.__class__.__name__) import textwrap - - return ( - msg - + "Planner:\n" - + textwrap.indent(self.planner.__repr__(), " ") - + "\n\nValue Network:\n" - + textwrap.indent(self.value_net.__repr__(), " ") - ) + return msg + "Planner:\n" + textwrap.indent(self.planner.__repr__(), ' ') + \ + "\n\nValue Network:\n" + textwrap.indent(self.value_net.__repr__(), ' ') def get_subgoal_predictions(self, obs_dict, goal_dict=None): """ @@ -763,16 +714,12 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): num_samples = self.algo_config.num_samples # sample subgoals from the planner (shape: [batch_size, num_samples, ...]) - subgoals = self.sample_subgoals( - obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples - ) + subgoals = self.sample_subgoals(obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples) # stack subgoals to get all values in one forward pass (shape [batch_size * num_samples, ...]) k = list(obs_dict.keys())[0] bsize = obs_dict[k].shape[0] - subgoals_tiled = TensorUtils.reshape_dimensions( - subgoals, begin_axis=0, end_axis=1, target_dims=(bsize * num_samples,) - ) + subgoals_tiled = TensorUtils.reshape_dimensions(subgoals, begin_axis=0, end_axis=1, target_dims=(bsize * num_samples,)) # also repeat goals if necessary goal_tiled = None @@ -780,15 +727,11 @@ def get_subgoal_predictions(self, obs_dict, goal_dict=None): goal_tiled = ObsUtils.repeat_and_stack_observation(goal_dict, n=num_samples) # evaluate the value of each subgoal - subgoal_values = self.value_net.get_state_value( - obs_dict=subgoals_tiled, goal_dict=goal_tiled - ).reshape(-1, num_samples) + subgoal_values = self.value_net.get_state_value(obs_dict=subgoals_tiled, goal_dict=goal_tiled).reshape(-1, num_samples) # pick the best subgoal best_index = torch.argmax(subgoal_values, dim=1) - best_subgoal = { - k: subgoals[k][torch.arange(bsize), best_index] for k in subgoals - } + best_subgoal = {k: subgoals[k][torch.arange(bsize), best_index] for k in subgoals} return best_subgoal def sample_subgoals(self, obs_dict, goal_dict, num_samples=1): @@ -802,9 +745,7 @@ def sample_subgoals(self, obs_dict, goal_dict, num_samples=1): Returns: subgoals (dict): name -> Tensor [batch_size, num_samples, ...] """ - return self.planner.sample_subgoals( - obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples - ) + return self.planner.sample_subgoals(obs_dict=obs_dict, goal_dict=goal_dict, num_samples=num_samples) def get_state_value(self, obs_dict, goal_dict=None): """ @@ -831,6 +772,4 @@ def get_state_action_value(self, obs_dict, actions, goal_dict=None): Returns: value (torch.Tensor): value tensor """ - return self.value_net.get_state_action_value( - obs_dict=obs_dict, actions=actions, goal_dict=goal_dict - ) + return self.value_net.get_state_action_value(obs_dict=obs_dict, actions=actions, goal_dict=goal_dict) diff --git a/robomimic/algo/hbc.py b/robomimic/algo/hbc.py index 6d8f6dc4..543b1fbc 100644 --- a/robomimic/algo/hbc.py +++ b/robomimic/algo/hbc.py @@ -5,7 +5,6 @@ reach them. Largely based on the Generalization Through Imitation (GTI) paper (see https://arxiv.org/abs/2003.06085). """ - import textwrap import numpy as np from collections import OrderedDict @@ -16,12 +15,7 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils from robomimic.config.config import Config -from robomimic.algo import ( - register_algo_factory_func, - algo_name_to_factory_func, - HierarchicalAlgo, - GL_VAE, -) +from robomimic.algo import register_algo_factory_func, algo_name_to_factory_func, HierarchicalAlgo, GL_VAE @register_algo_factory_func("hbc") @@ -45,7 +39,6 @@ class HBC(HierarchicalAlgo): """ Default HBC training, largely based on https://arxiv.org/abs/2003.06085 """ - def __init__( self, planner_algo_class, @@ -84,13 +77,9 @@ def __init__( self.ac_dim = ac_dim self.device = device - self._subgoal_step_count = ( - 0 # current step count for deciding when to update subgoal - ) + self._subgoal_step_count = 0 # current step count for deciding when to update subgoal self._current_subgoal = None # latest subgoal - self._subgoal_update_interval = ( - self.algo_config.subgoal_update_interval - ) # subgoal update frequency + self._subgoal_update_interval = self.algo_config.subgoal_update_interval # subgoal update frequency self._subgoal_horizon = self.algo_config.planner.subgoal_horizon self._actor_horizon = self.algo_config.actor.rnn.horizon @@ -103,16 +92,14 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device, + device=device ) # goal-conditional actor follows goals set by the planner self.actor_goal_shapes = self.planner.subgoal_shapes if self.algo_config.latent_subgoal.enabled: assert planner_algo_class == GL_VAE # only VAE supported for now - self.actor_goal_shapes = OrderedDict( - latent_subgoal=(self.planner.algo_config.vae.latent_dim,) - ) + self.actor_goal_shapes = OrderedDict(latent_subgoal=(self.planner.algo_config.vae.latent_dim,)) # only for the actor: override goal modalities and shapes to match the subgoal set by the planner actor_obs_key_shapes = deepcopy(obs_key_shapes) @@ -122,9 +109,7 @@ def __init__( assert actor_obs_key_shapes[k] == self.actor_goal_shapes[k] actor_obs_key_shapes.update(self.actor_goal_shapes) - goal_obs_keys = { - obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys() - } + goal_obs_keys = {obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys()} for k in self.actor_goal_shapes.keys(): goal_obs_keys[ObsUtils.OBS_KEYS_TO_MODALITIES[k]].append(k) @@ -152,7 +137,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -162,34 +147,25 @@ def process_batch_for_training(self, batch): if self.algo_config.actor_use_random_subgoals: # optionally use randomly sampled step between [1, seq_length] as policy goal policy_subgoal_indices = torch.randint( - low=0, - high=self.global_config.train.seq_length, - size=(batch["actions"].shape[0],), - ) - goal_obs = TensorUtils.gather_sequence( - batch["next_obs"], policy_subgoal_indices - ) - goal_obs = TensorUtils.to_float( - TensorUtils.to_device(goal_obs, self.device) - ) - input_batch["actor"]["goal_obs"] = ( + low=0, high=self.global_config.train.seq_length, size=(batch["actions"].shape[0],)) + goal_obs = TensorUtils.gather_sequence(batch["next_obs"], policy_subgoal_indices) + goal_obs = TensorUtils.to_float(TensorUtils.to_device(goal_obs, self.device)) + input_batch["actor"]["goal_obs"] = \ self.planner.get_actor_goal_for_training_from_processed_batch( goal_obs, use_latent_subgoals=self.algo_config.latent_subgoal.enabled, use_prior_correction=self.algo_config.latent_subgoal.prior_correction.enabled, num_prior_samples=self.algo_config.latent_subgoal.prior_correction.num_samples, ) - ) else: # otherwise, use planner subgoal target as goal for the policy - input_batch["actor"]["goal_obs"] = ( + input_batch["actor"]["goal_obs"] = \ self.planner.get_actor_goal_for_training_from_processed_batch( input_batch["planner"], use_latent_subgoals=self.algo_config.latent_subgoal.enabled, use_prior_correction=self.algo_config.latent_subgoal.prior_correction.enabled, num_prior_samples=self.algo_config.latent_subgoal.prior_correction.num_samples, ) - ) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -214,34 +190,24 @@ def train_on_batch(self, batch, epoch, validate=False): """ info = dict(planner=dict(), actor=dict()) # train planner - info["planner"].update( - self.planner.train_on_batch(batch["planner"], epoch, validate=validate) - ) + info["planner"].update(self.planner.train_on_batch(batch["planner"], epoch, validate=validate)) # train actor if self._algo_mode == "separate": # train low-level actor by getting subgoals from the dataset - info["actor"].update( - self.actor.train_on_batch(batch["actor"], epoch, validate=validate) - ) + info["actor"].update(self.actor.train_on_batch(batch["actor"], epoch, validate=validate)) elif self._algo_mode == "cascade": # get predictions from the planner with torch.no_grad(): batch["actor"]["goal_obs"] = self.planner.get_subgoal_predictions( - obs_dict=batch["planner"]["obs"], - goal_dict=batch["planner"]["goal_obs"], - ) + obs_dict=batch["planner"]["obs"], goal_dict=batch["planner"]["goal_obs"]) # train actor with the predicted goal - info["actor"].update( - self.actor.train_on_batch(batch["actor"], epoch, validate=validate) - ) + info["actor"].update(self.actor.train_on_batch(batch["actor"], epoch, validate=validate)) else: - raise NotImplementedError( - "algo mode {} is not implemented".format(self._algo_mode) - ) + raise NotImplementedError("algo mode {} is not implemented".format(self._algo_mode)) return info @@ -258,7 +224,7 @@ def log_info(self, info): """ planner_log = dict() actor_log = dict() - loss = 0.0 + loss = 0. planner_log = self.planner.log_info(info["planner"]) planner_log = dict(("Planner/" + k, v) for k, v in planner_log.items()) @@ -318,7 +284,7 @@ def current_subgoal(self): """ Return the current subgoal (at rollout time) with shape (batch, ...) """ - return {k: self._current_subgoal[k].clone() for k in self._current_subgoal} + return { k : self._current_subgoal[k].clone() for k in self._current_subgoal } @current_subgoal.setter def current_subgoal(self, sg): @@ -331,7 +297,7 @@ def current_subgoal(self, sg): assert list(v.shape[1:]) == list(self.planner.subgoal_shapes[k]) # subgoal shapes should always match actor goal shapes assert list(v.shape[1:]) == list(self.actor_goal_shapes[k]) - self._current_subgoal = {k: sg[k].clone() for k in sg} + self._current_subgoal = { k : sg[k].clone() for k in sg } def get_action(self, obs_dict, goal_dict=None): """ @@ -344,18 +310,11 @@ def get_action(self, obs_dict, goal_dict=None): Returns: action (torch.Tensor): action tensor """ - if ( - self._current_subgoal is None - or self._subgoal_step_count % self._subgoal_update_interval == 0 - ): + if self._current_subgoal is None or self._subgoal_step_count % self._subgoal_update_interval == 0: # update current subgoal - self.current_subgoal = self.planner.get_subgoal_predictions( - obs_dict=obs_dict, goal_dict=goal_dict - ) + self.current_subgoal = self.planner.get_subgoal_predictions(obs_dict=obs_dict, goal_dict=goal_dict) - action = self.actor.get_action( - obs_dict=obs_dict, goal_dict=self.current_subgoal - ) + action = self.actor.get_action(obs_dict=obs_dict, goal_dict=self.current_subgoal) self._subgoal_step_count += 1 return action @@ -373,20 +332,13 @@ def __repr__(self): Pretty print algorithm and network description. """ msg = str(self.__class__.__name__) - msg += ( - "(subgoal_horizon={}, actor_horizon={}, subgoal_update_interval={}, mode={}, " - "actor_use_random_subgoals={})\n".format( - self._subgoal_horizon, - self._actor_horizon, - self._subgoal_update_interval, - self._algo_mode, - self.algo_config.actor_use_random_subgoals, - ) - ) - return ( - msg - + "Planner:\n" - + textwrap.indent(self.planner.__repr__(), " ") - + "\n\nPolicy:\n" - + textwrap.indent(self.actor.__repr__(), " ") + msg += "(subgoal_horizon={}, actor_horizon={}, subgoal_update_interval={}, mode={}, " \ + "actor_use_random_subgoals={})\n".format( + self._subgoal_horizon, + self._actor_horizon, + self._subgoal_update_interval, + self._algo_mode, + self.algo_config.actor_use_random_subgoals ) + return msg + "Planner:\n" + textwrap.indent(self.planner.__repr__(), ' ') + \ + "\n\nPolicy:\n" + textwrap.indent(self.actor.__repr__(), ' ') diff --git a/robomimic/algo/iql.py b/robomimic/algo/iql.py index cc4239a3..bde522b2 100644 --- a/robomimic/algo/iql.py +++ b/robomimic/algo/iql.py @@ -3,7 +3,6 @@ Based off of https://github.com/rail-berkeley/rlkit/blob/master/rlkit/torch/sac/iql_trainer.py. (Paper - https://arxiv.org/abs/2110.06169). """ - import numpy as np from collections import OrderedDict @@ -57,11 +56,9 @@ def _create_networks(self): actor_args.update(dict(self.algo_config.actor.net.gmm)) else: # Unsupported actor type! - raise ValueError( - f"Unsupported actor requested. " - f"Requested: {self.algo_config.actor.net.type}, " - f"valid options are: {['gaussian', 'gmm']}" - ) + raise ValueError(f"Unsupported actor requested. " + f"Requested: {self.algo_config.actor.net.type}, " + f"valid options are: {['gaussian', 'gmm']}") # Actor self.nets["actor"] = actor_cls( @@ -69,9 +66,7 @@ def _create_networks(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), **actor_args, ) @@ -85,9 +80,7 @@ def _create_networks(self): ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.critic.layer_dims, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) net_list.append(critic) @@ -96,9 +89,7 @@ def _create_networks(self): obs_shapes=self.obs_shapes, mlp_layer_dims=self.algo_config.critic.layer_dims, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) # Send networks to appropriate device @@ -106,9 +97,7 @@ def _create_networks(self): # sync target networks at beginning of training with torch.no_grad(): - for critic, critic_target in zip( - self.nets["critic"], self.nets["critic_target"] - ): + for critic, critic_target in zip(self.nets["critic"], self.nets["critic_target"]): TorchUtils.hard_update( source=critic, target=critic_target, @@ -131,12 +120,8 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = { - k: batch["next_obs"][k][:, 0, :] for k in batch["next_obs"] - } - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + input_batch["next_obs"] = {k: batch["next_obs"][k][:, 0, :] for k in batch["next_obs"]} + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] input_batch["dones"] = batch["dones"][:, 0] input_batch["rewards"] = batch["rewards"][:, 0] @@ -175,7 +160,7 @@ def train_on_batch(self, batch, epoch, validate=False): if not validate: # Critic update self._update_critic(critic_losses, vf_loss) - + # Actor update self._update_actor(actor_loss) @@ -210,46 +195,38 @@ def _compute_critic_loss(self, batch): dones = torch.unsqueeze(batch["dones"], 1) # Q predictions - pred_qs = [ - critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) - for critic in self.nets["critic"] - ] + pred_qs = [critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) + for critic in self.nets["critic"]] info["critic/critic1_pred"] = pred_qs[0].mean() # Q target values target_vf_pred = self.nets["vf"](obs_dict=next_obs, goal_dict=goal_obs).detach() - q_target = rewards + (1.0 - dones) * self.algo_config.discount * target_vf_pred + q_target = rewards + (1. - dones) * self.algo_config.discount * target_vf_pred q_target = q_target.detach() # Q losses critic_losses = [] - td_loss_fcn = ( - nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() - ) - for i, q_pred in enumerate(pred_qs): + td_loss_fcn = nn.SmoothL1Loss() if self.algo_config.critic.use_huber else nn.MSELoss() + for (i, q_pred) in enumerate(pred_qs): # Calculate td error loss td_loss = td_loss_fcn(q_pred, q_target) info[f"critic/critic{i+1}_loss"] = td_loss critic_losses.append(td_loss) # V predictions - pred_qs = [ - critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) - for critic in self.nets["critic_target"] - ] + pred_qs = [critic(obs_dict=obs, acts=actions, goal_dict=goal_obs) + for critic in self.nets["critic_target"]] q_pred, _ = torch.cat(pred_qs, dim=1).min(dim=1, keepdim=True) q_pred = q_pred.detach() vf_pred = self.nets["vf"](obs) - + # V losses: expectile regression. see section 4.1 in https://arxiv.org/pdf/2110.06169.pdf vf_err = vf_pred - q_pred vf_sign = (vf_err > 0).float() - vf_weight = (1 - vf_sign) * self.algo_config.vf_quantile + vf_sign * ( - 1 - self.algo_config.vf_quantile - ) - vf_loss = (vf_weight * (vf_err**2)).mean() - + vf_weight = (1 - vf_sign) * self.algo_config.vf_quantile + vf_sign * (1 - self.algo_config.vf_quantile) + vf_loss = (vf_weight * (vf_err ** 2)).mean() + # update logs for V loss info["vf/q_pred"] = q_pred info["vf/v_pred"] = vf_pred @@ -268,11 +245,8 @@ def _update_critic(self, critic_losses, vf_loss): """ # update ensemble of critics - for critic_loss, critic, critic_target, optimizer in zip( - critic_losses, - self.nets["critic"], - self.nets["critic_target"], - self.optimizers["critic"], + for (critic_loss, critic, critic_target, optimizer) in zip( + critic_losses, self.nets["critic"], self.nets["critic_target"], self.optimizers["critic"] ): TorchUtils.backprop_for_loss( net=critic, @@ -284,9 +258,7 @@ def _update_critic(self, critic_losses, vf_loss): # update target network with torch.no_grad(): - TorchUtils.soft_update( - source=critic, target=critic_target, tau=self.algo_config.target_tau - ) + TorchUtils.soft_update(source=critic, target=critic_target, tau=self.algo_config.target_tau) # update V function network TorchUtils.backprop_for_loss( @@ -315,9 +287,7 @@ def _compute_actor_loss(self, batch, critic_info): info = OrderedDict() # compute log probability of batch actions - dist = self.nets["actor"].forward_train( - obs_dict=batch["obs"], goal_dict=batch["goal_obs"] - ) + dist = self.nets["actor"].forward_train(obs_dict=batch["obs"], goal_dict=batch["goal_obs"]) log_prob = dist.log_prob(batch["actions"]) info["actor/log_prob"] = log_prob.mean() @@ -326,7 +296,7 @@ def _compute_actor_loss(self, batch, critic_info): q_pred = critic_info["vf/q_pred"] v_pred = critic_info["vf/v_pred"] adv = q_pred - v_pred - + # compute weights weights = self._get_adv_weights(adv) @@ -356,7 +326,7 @@ def _update_actor(self, actor_loss): loss=actor_loss, max_grad_norm=self.algo_config.actor.max_gradient_norm, ) - + def _get_adv_weights(self, adv): """ Helper function for computing advantage weights. Called by @_compute_actor_loss @@ -368,13 +338,13 @@ def _get_adv_weights(self, adv): weights (torch.Tensor): weights computed based on advantage estimates, in shape (B,) where B is batch size """ - + # clip raw advantage values if self.algo_config.adv.clip_adv_value is not None: adv = adv.clamp(max=self.algo_config.adv.clip_adv_value) # compute weights based on advantage values - beta = self.algo_config.adv.beta # temprature factor + beta = self.algo_config.adv.beta # temprature factor weights = torch.exp(adv / beta) # clip final weights @@ -455,4 +425,4 @@ def get_action(self, obs_dict, goal_dict=None): """ assert not self.nets.training - return self.nets["actor"](obs_dict=obs_dict, goal_dict=goal_dict) + return self.nets["actor"](obs_dict=obs_dict, goal_dict=goal_dict) \ No newline at end of file diff --git a/robomimic/algo/iris.py b/robomimic/algo/iris.py index d5f29e39..7b441470 100644 --- a/robomimic/algo/iris.py +++ b/robomimic/algo/iris.py @@ -1,7 +1,6 @@ """ Implementation of IRIS (https://arxiv.org/abs/1911.05321). """ - import numpy as np from collections import OrderedDict from copy import deepcopy @@ -11,14 +10,7 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils from robomimic.config.config import Config -from robomimic.algo import ( - register_algo_factory_func, - algo_name_to_factory_func, - HBC, - ValuePlanner, - ValueAlgo, - GL_VAE, -) +from robomimic.algo import register_algo_factory_func, algo_name_to_factory_func, HBC, ValuePlanner, ValueAlgo, GL_VAE @register_algo_factory_func("iris") @@ -36,18 +28,13 @@ def algo_config_to_class(algo_config): pol_cls, _ = algo_name_to_factory_func("bc")(algo_config.actor) plan_cls, _ = algo_name_to_factory_func("gl")(algo_config.value_planner.planner) value_cls, _ = algo_name_to_factory_func("bcq")(algo_config.value_planner.value) - return IRIS, dict( - policy_algo_class=pol_cls, - planner_algo_class=plan_cls, - value_algo_class=value_cls, - ) + return IRIS, dict(policy_algo_class=pol_cls, planner_algo_class=plan_cls, value_algo_class=value_cls) class IRIS(HBC, ValueAlgo): """ Implementation of IRIS (https://arxiv.org/abs/1911.05321). """ - def __init__( self, planner_algo_class, @@ -87,13 +74,9 @@ def __init__( self.ac_dim = ac_dim self.device = device - self._subgoal_step_count = ( - 0 # current step count for deciding when to update subgoal - ) + self._subgoal_step_count = 0 # current step count for deciding when to update subgoal self._current_subgoal = None # latest subgoal - self._subgoal_update_interval = ( - self.algo_config.subgoal_update_interval - ) # subgoal update frequency + self._subgoal_update_interval = self.algo_config.subgoal_update_interval # subgoal update frequency self._subgoal_horizon = self.algo_config.value_planner.planner.subgoal_horizon self._actor_horizon = self.algo_config.actor.rnn.horizon @@ -108,13 +91,11 @@ def __init__( global_config=global_config, obs_key_shapes=obs_key_shapes, ac_dim=ac_dim, - device=device, + device=device ) self.actor_goal_shapes = self.planner.subgoal_shapes - assert ( - not algo_config.latent_subgoal.enabled - ), "IRIS does not support latent subgoals" + assert not algo_config.latent_subgoal.enabled, "IRIS does not support latent subgoals" # only for the actor: override goal modalities and shapes to match the subgoal set by the planner actor_obs_key_shapes = deepcopy(obs_key_shapes) @@ -124,9 +105,7 @@ def __init__( assert actor_obs_key_shapes[k] == self.actor_goal_shapes[k] actor_obs_key_shapes.update(self.actor_goal_shapes) - goal_modalities = { - obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys() - } + goal_modalities = {obs_modality: [] for obs_modality in ObsUtils.OBS_MODALITY_CLASSES.keys()} for k in self.actor_goal_shapes.keys(): goal_modalities[ObsUtils.OBS_KEYS_TO_MODALITIES[k]].append(k) @@ -140,7 +119,7 @@ def __init__( global_config=global_config, obs_key_shapes=actor_obs_key_shapes, ac_dim=ac_dim, - device=device, + device=device ) def process_batch_for_training(self, batch): @@ -154,7 +133,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -164,22 +143,13 @@ def process_batch_for_training(self, batch): if self.algo_config.actor_use_random_subgoals: # optionally use randomly sampled step between [1, seq_length] as policy goal policy_subgoal_indices = torch.randint( - low=0, - high=self.global_config.train.seq_length, - size=(batch["actions"].shape[0],), - ) - goal_obs = TensorUtils.gather_sequence( - batch["next_obs"], policy_subgoal_indices - ) - goal_obs = TensorUtils.to_float( - TensorUtils.to_device(goal_obs, self.device) - ) + low=0, high=self.global_config.train.seq_length, size=(batch["actions"].shape[0],)) + goal_obs = TensorUtils.gather_sequence(batch["next_obs"], policy_subgoal_indices) + goal_obs = TensorUtils.to_float(TensorUtils.to_device(goal_obs, self.device)) input_batch["actor"]["goal_obs"] = goal_obs else: # otherwise, use planner subgoal target as goal for the policy - input_batch["actor"]["goal_obs"] = input_batch["planner"]["planner"][ - "target_subgoals" - ] + input_batch["actor"]["goal_obs"] = input_batch["planner"]["planner"]["target_subgoals"] # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -210,6 +180,4 @@ def get_state_action_value(self, obs_dict, actions, goal_dict=None): Returns: value (torch.Tensor): value tensor """ - return self.planner.get_state_action_value( - obs_dict=obs_dict, actions=actions, goal_dict=goal_dict - ) + return self.planner.get_state_action_value(obs_dict=obs_dict, actions=actions, goal_dict=goal_dict) diff --git a/robomimic/algo/td3_bc.py b/robomimic/algo/td3_bc.py index 188a5777..e324c54a 100644 --- a/robomimic/algo/td3_bc.py +++ b/robomimic/algo/td3_bc.py @@ -9,7 +9,6 @@ from the BCQ algo class) to be explicit and have implementation details self-contained in this file. """ - from collections import OrderedDict import torch @@ -49,7 +48,6 @@ class TD3_BC(PolicyAlgo, ValueAlgo): Default TD3_BC training, based on https://arxiv.org/abs/2106.06860 and https://github.com/sfujim/TD3_BC. """ - def __init__(self, **kwargs): PolicyAlgo.__init__(self, **kwargs) @@ -72,12 +70,12 @@ def _create_networks(self): with torch.no_grad(): for critic_ind in range(len(self.nets["critic"])): TorchUtils.hard_update( - source=self.nets["critic"][critic_ind], + source=self.nets["critic"][critic_ind], target=self.nets["critic_target"][critic_ind], ) TorchUtils.hard_update( - source=self.nets["actor"], + source=self.nets["actor"], target=self.nets["actor_target"], ) @@ -96,9 +94,7 @@ def _create_critics(self): mlp_layer_dims=self.algo_config.critic.layer_dims, value_bounds=self.algo_config.critic.value_bounds, goal_shapes=self.goal_shapes, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) # Q network ensemble and target ensemble @@ -121,9 +117,7 @@ def _create_actor(self): goal_shapes=self.goal_shapes, ac_dim=self.ac_dim, mlp_layer_dims=self.algo_config.actor.layer_dims, - encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config( - self.obs_config.encoder - ), + encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder), ) self.nets["actor"] = actor_class(**actor_args) @@ -137,13 +131,9 @@ def _check_epoch(self, net_name, epoch): net_name (str): name of network in @self.nets and @self.optim_params epoch (int): epoch number """ - epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or ( - epoch >= self.optim_params[net_name]["start_epoch"] - ) - epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or ( - epoch < self.optim_params[net_name]["end_epoch"] - ) - return epoch_start_check and epoch_end_check + epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or (epoch >= self.optim_params[net_name]["start_epoch"]) + epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or (epoch < self.optim_params[net_name]["end_epoch"]) + return (epoch_start_check and epoch_end_check) def set_discount(self, discount): """ @@ -164,7 +154,7 @@ def process_batch_for_training(self, batch): Returns: input_batch (dict): processed and filtered batch that - will be used for training + will be used for training """ input_batch = dict() @@ -174,25 +164,19 @@ def process_batch_for_training(self, batch): # remove temporal batches for all input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]} - input_batch["next_obs"] = { - k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"] - } - input_batch["goal_obs"] = batch.get( - "goal_obs", None - ) # goals may not be present + input_batch["next_obs"] = {k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"]} + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present input_batch["actions"] = batch["actions"][:, 0, :] # note: ensure scalar signals (rewards, done) retain last dimension of 1 to be compatible with model outputs # single timestep reward is discounted sum of intermediate rewards in sequence reward_seq = batch["rewards"][:, :n_step] - discounts = torch.pow( - self.algo_config.discount, torch.arange(n_step).float() - ).unsqueeze(0) + discounts = torch.pow(self.algo_config.discount, torch.arange(n_step).float()).unsqueeze(0) input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1) # discount rate will be gamma^N for computing n-step returns - new_discount = self.algo_config.discount**n_step + new_discount = (self.algo_config.discount ** n_step) self.set_discount(new_discount) # consider this n-step seqeunce done if any intermediate dones are present @@ -201,13 +185,9 @@ def process_batch_for_training(self, batch): if self.algo_config.infinite_horizon: # scale terminal rewards by 1 / (1 - gamma) for infinite horizon MDPs - done_inds = ( - input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] - ) + done_inds = input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0] if done_inds.shape[0] > 0: - input_batch["rewards"][done_inds] = input_batch["rewards"][ - done_inds - ] * (1.0 / (1.0 - self.discount)) + input_batch["rewards"][done_inds] = input_batch["rewards"][done_inds] * (1. / (1. - self.discount)) # we move to device first before float conversion because image observation modalities will be uint8 - # this minimizes the amount of data transferred to GPU @@ -244,14 +224,14 @@ def _train_critic_on_batch(self, batch, epoch, no_backprop=False): goal_s_batch = batch["goal_obs"] # 1 if not done, 0 otherwise - done_mask_batch = 1.0 - batch["dones"] + done_mask_batch = 1. - batch["dones"] info["done_masks"] = done_mask_batch # Bellman backup for Q-targets q_targets = self._get_target_values( - next_states=ns_batch, - goal_states=goal_s_batch, - rewards=r_batch, + next_states=ns_batch, + goal_states=goal_s_batch, + rewards=r_batch, dones=done_mask_batch, ) info["critic/q_targets"] = q_targets @@ -259,10 +239,10 @@ def _train_critic_on_batch(self, batch, epoch, no_backprop=False): # Train all critics using this set of targets for regression for critic_ind, critic in enumerate(self.nets["critic"]): critic_loss = self._compute_critic_loss( - critic=critic, - states=s_batch, - actions=a_batch, - goal_states=goal_s_batch, + critic=critic, + states=s_batch, + actions=a_batch, + goal_states=goal_s_batch, q_targets=q_targets, ) info["critic/critic{}_loss".format(critic_ind + 1)] = critic_loss @@ -271,12 +251,10 @@ def _train_critic_on_batch(self, batch, epoch, no_backprop=False): critic_grad_norms = TorchUtils.backprop_for_loss( net=self.nets["critic"][critic_ind], optim=self.optimizers["critic"][critic_ind], - loss=critic_loss, + loss=critic_loss, max_grad_norm=self.algo_config.critic.max_gradient_norm, ) - info["critic/critic{}_grad_norms".format(critic_ind + 1)] = ( - critic_grad_norms - ) + info["critic/critic{}_grad_norms".format(critic_ind + 1)] = critic_grad_norms return info @@ -342,27 +320,19 @@ def _get_target_values(self, next_states, goal_states, rewards, dones): next_target_actions = self.nets["actor_target"](next_states, goal_states) noise = ( torch.randn_like(next_target_actions) * self.algo_config.actor.noise_std - ).clamp( - -self.algo_config.actor.noise_clip, self.algo_config.actor.noise_clip - ) + ).clamp(-self.algo_config.actor.noise_clip, self.algo_config.actor.noise_clip) next_actions = (next_target_actions + noise).clamp(-1.0, 1.0) # TD3 trick to combine max and min over all Q-ensemble estimates into single target estimates - all_value_targets = self.nets["critic_target"][0]( - next_states, next_actions, goal_states - ).reshape(-1, 1) + all_value_targets = self.nets["critic_target"][0](next_states, next_actions, goal_states).reshape(-1, 1) max_value_targets = all_value_targets min_value_targets = all_value_targets for critic_target in self.nets["critic_target"][1:]: - all_value_targets = critic_target( - next_states, next_actions, goal_states - ).reshape(-1, 1) + all_value_targets = critic_target(next_states, next_actions, goal_states).reshape(-1, 1) max_value_targets = torch.max(max_value_targets, all_value_targets) min_value_targets = torch.min(min_value_targets, all_value_targets) - value_targets = ( - self.algo_config.critic.ensemble.weight * min_value_targets - + (1.0 - self.algo_config.critic.ensemble.weight) * max_value_targets - ) + value_targets = self.algo_config.critic.ensemble.weight * min_value_targets + \ + (1. - self.algo_config.critic.ensemble.weight) * max_value_targets q_targets = rewards + dones * self.discount * value_targets return q_targets @@ -411,13 +381,11 @@ def train_on_batch(self, batch, epoch, validate=False): info = PolicyAlgo.train_on_batch(self, batch, epoch, validate=validate) # Critic training - no_critic_backprop = validate or ( - not self._check_epoch(net_name="critic", epoch=epoch) - ) + no_critic_backprop = validate or (not self._check_epoch(net_name="critic", epoch=epoch)) with TorchUtils.maybe_no_grad(no_grad=no_critic_backprop): critic_info = self._train_critic_on_batch( - batch=batch, - epoch=epoch, + batch=batch, + epoch=epoch, no_backprop=no_critic_backprop, ) info.update(critic_info) @@ -426,39 +394,35 @@ def train_on_batch(self, batch, epoch, validate=False): if not no_critic_backprop: # update counter only on critic training gradient steps self.actor_update_counter += 1 - do_actor_update = ( - self.actor_update_counter % self.algo_config.actor.update_freq == 0 - ) + do_actor_update = (self.actor_update_counter % self.algo_config.actor.update_freq == 0) # Actor training - no_actor_backprop = validate or ( - not self._check_epoch(net_name="actor", epoch=epoch) - ) + no_actor_backprop = validate or (not self._check_epoch(net_name="actor", epoch=epoch)) no_actor_backprop = no_actor_backprop or (not do_actor_update) with TorchUtils.maybe_no_grad(no_grad=no_actor_backprop): actor_info = self._train_actor_on_batch( - batch=batch, - epoch=epoch, + batch=batch, + epoch=epoch, no_backprop=no_actor_backprop, ) info.update(actor_info) if not no_actor_backprop: - # to match original implementation, only update target networks on + # to match original implementation, only update target networks on # actor gradient steps with torch.no_grad(): # update the target critic networks for critic_ind in range(len(self.nets["critic"])): TorchUtils.soft_update( - source=self.nets["critic"][critic_ind], - target=self.nets["critic_target"][critic_ind], + source=self.nets["critic"][critic_ind], + target=self.nets["critic_target"][critic_ind], tau=self.algo_config.target_tau, ) # update target actor network TorchUtils.soft_update( - source=self.nets["actor"], - target=self.nets["actor_target"], + source=self.nets["actor"], + target=self.nets["actor_target"], tau=self.algo_config.target_tau, ) @@ -483,17 +447,14 @@ def log_info(self, info): optims = [self.optimizers[k]] if k == "critic": # account for critic having one optimizer per ensemble member - keys = [ - "{}{}".format(k, critic_ind) - for critic_ind in range(len(self.nets["critic"])) - ] + keys = ["{}{}".format(k, critic_ind) for critic_ind in range(len(self.nets["critic"]))] optims = self.optimizers[k] for kp, optimizer in zip(keys, optims): for i, param_group in enumerate(optimizer.param_groups): loss_log["Optimizer/{}{}_lr".format(kp, i)] = param_group["lr"] # extract relevant logs for critic, and actor - loss_log["Loss"] = 0.0 + loss_log["Loss"] = 0. for loss_logger in [self._log_critic_info, self._log_actor_info]: this_log = loss_logger(info) if "Loss" in this_log: @@ -510,20 +471,14 @@ def _log_critic_info(self, info): """ loss_log = OrderedDict() if "done_masks" in info: - loss_log["Critic/Done_Mask_Percentage"] = ( - 100.0 * torch.mean(info["done_masks"]).item() - ) + loss_log["Critic/Done_Mask_Percentage"] = 100. * torch.mean(info["done_masks"]).item() if "critic/q_targets" in info: loss_log["Critic/Q_Targets"] = info["critic/q_targets"].mean().item() - loss_log["Loss"] = 0.0 + loss_log["Loss"] = 0. for critic_ind in range(len(self.nets["critic"])): - loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info[ - "critic/critic{}_loss".format(critic_ind + 1) - ].item() + loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info["critic/critic{}_loss".format(critic_ind + 1)].item() if "critic/critic{}_grad_norms".format(critic_ind + 1) in info: - loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info[ - "critic/critic{}_grad_norms".format(critic_ind + 1) - ] + loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info["critic/critic{}_grad_norms".format(critic_ind + 1)] loss_log["Loss"] += loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] return loss_log diff --git a/robomimic/config/__init__.py b/robomimic/config/__init__.py index fb0fae75..b4f857f1 100644 --- a/robomimic/config/__init__.py +++ b/robomimic/config/__init__.py @@ -10,5 +10,4 @@ from robomimic.config.hbc_config import HBCConfig from robomimic.config.iris_config import IRISConfig from robomimic.config.td3_bc_config import TD3_BCConfig - # from robomimic.config.diffusion_policy_config import DiffusionPolicyConfig diff --git a/robomimic/config/base_config.py b/robomimic/config/base_config.py index c71e4c0f..62129cd0 100644 --- a/robomimic/config/base_config.py +++ b/robomimic/config/base_config.py @@ -4,7 +4,7 @@ the correct config class given the algorithm name. """ -import six # preserve metaclass compatibility between python 2 and 3 +import six # preserve metaclass compatibility between python 2 and 3 from copy import deepcopy import robomimic @@ -27,11 +27,8 @@ def config_factory(algo_name, dic=None): a dictionary to instantiate the config from the dictionary. """ if algo_name not in REGISTERED_CONFIGS: - raise Exception( - "Config for algo name {} not found. Make sure it is a registered config among: {}".format( - algo_name, ", ".join(REGISTERED_CONFIGS) - ) - ) + raise Exception("Config for algo name {} not found. Make sure it is a registered config among: {}".format( + algo_name, ', '.join(REGISTERED_CONFIGS))) return REGISTERED_CONFIGS[algo_name](dict_to_load=dic) @@ -40,7 +37,6 @@ class ConfigMeta(type): Define a metaclass for constructing a config class. It registers configs into the global registry. """ - def __new__(meta, name, bases, class_dict): cls = super(ConfigMeta, meta).__new__(meta, name, bases, class_dict) if cls.__name__ != "BaseConfig": @@ -78,87 +74,64 @@ def ALGO_NAME(cls): def experiment_config(self): """ - This function populates the `config.experiment` attribute of the config, - which has several experiment settings such as the name of the training run, - whether to do logging, whether to save models (and how often), whether to render - videos, and whether to do rollouts (and how often). This class has a default + This function populates the `config.experiment` attribute of the config, + which has several experiment settings such as the name of the training run, + whether to do logging, whether to save models (and how often), whether to render + videos, and whether to do rollouts (and how often). This class has a default implementation that usually doesn't need to be overriden. """ - self.experiment.name = "test" # name of experiment used to make log files - self.experiment.validate = False # whether to do validation or not - self.experiment.logging.terminal_output_to_txt = ( - True # whether to log stdout to txt file - ) - self.experiment.logging.log_tb = True # enable tensorboard logging - self.experiment.logging.log_wandb = False # enable wandb logging - self.experiment.logging.wandb_proj_name = "debug" # project name if using wandb + self.experiment.name = "test" # name of experiment used to make log files + self.experiment.validate = False # whether to do validation or not + self.experiment.logging.terminal_output_to_txt = True # whether to log stdout to txt file + self.experiment.logging.log_tb = True # enable tensorboard logging + self.experiment.logging.log_wandb = False # enable wandb logging + self.experiment.logging.wandb_proj_name = "debug" # project name if using wandb + ## save config - if and when to save model checkpoints ## - self.experiment.save.enabled = ( - True # whether model saving should be enabled or disabled - ) - self.experiment.save.every_n_seconds = ( - None # save model every n seconds (set to None to disable) - ) - self.experiment.save.every_n_epochs = ( - 50 # save model every n epochs (set to None to disable) - ) - self.experiment.save.epochs = [] # save model on these specific epochs - self.experiment.save.on_best_validation = ( - False # save models that achieve best validation score - ) - self.experiment.save.on_best_rollout_return = ( - False # save models that achieve best rollout return - ) - self.experiment.save.on_best_rollout_success_rate = ( - True # save models that achieve best success rate - ) + self.experiment.save.enabled = True # whether model saving should be enabled or disabled + self.experiment.save.every_n_seconds = None # save model every n seconds (set to None to disable) + self.experiment.save.every_n_epochs = 50 # save model every n epochs (set to None to disable) + self.experiment.save.epochs = [] # save model on these specific epochs + self.experiment.save.on_best_validation = False # save models that achieve best validation score + self.experiment.save.on_best_rollout_return = False # save models that achieve best rollout return + self.experiment.save.on_best_rollout_success_rate = True # save models that achieve best success rate # epoch definitions - if not None, set an epoch to be this many gradient steps, else the full dataset size will be used - self.experiment.epoch_every_n_steps = ( - 100 # number of gradient steps in train epoch (None for full dataset pass) - ) - self.experiment.validation_epoch_every_n_steps = ( - 10 # number of gradient steps in valid epoch (None for full dataset pass) - ) + self.experiment.epoch_every_n_steps = 100 # number of gradient steps in train epoch (None for full dataset pass) + self.experiment.validation_epoch_every_n_steps = 10 # number of gradient steps in valid epoch (None for full dataset pass) # envs to evaluate model on (assuming rollouts are enabled), to override the metadata stored in dataset - self.experiment.env = None # no need to set this (unless you want to override) - self.experiment.additional_envs = ( - None # additional environments that should get evaluated - ) + self.experiment.env = None # no need to set this (unless you want to override) + self.experiment.additional_envs = None # additional environments that should get evaluated + ## rendering config ## - self.experiment.render = False # render on-screen or not - self.experiment.render_video = True # render evaluation rollouts to videos - self.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints - self.experiment.video_skip = ( - 5 # render video frame every n environment steps during rollout - ) + self.experiment.render = False # render on-screen or not + self.experiment.render_video = True # render evaluation rollouts to videos + self.experiment.keep_all_videos = False # save all videos, instead of only saving those for saved model checkpoints + self.experiment.video_skip = 5 # render video frame every n environment steps during rollout + ## evaluation rollout config ## - self.experiment.rollout.enabled = True # enable evaluation rollouts - self.experiment.rollout.n = 50 # number of rollouts per evaluation - self.experiment.rollout.horizon = 400 # maximum number of env steps per rollout - self.experiment.rollout.rate = 50 # do rollouts every @rate epochs - self.experiment.rollout.warmstart = ( - 0 # number of epochs to wait before starting rollouts - ) - self.experiment.rollout.terminate_on_success = ( - True # end rollout early after task success - ) + self.experiment.rollout.enabled = True # enable evaluation rollouts + self.experiment.rollout.n = 50 # number of rollouts per evaluation + self.experiment.rollout.horizon = 400 # maximum number of env steps per rollout + self.experiment.rollout.rate = 50 # do rollouts every @rate epochs + self.experiment.rollout.warmstart = 0 # number of epochs to wait before starting rollouts + self.experiment.rollout.terminate_on_success = True # end rollout early after task success def train_config(self): """ - This function populates the `config.train` attribute of the config, which - has several settings related to the training process, such as the dataset - to use for training, and how the data loader should load the data. This + This function populates the `config.train` attribute of the config, which + has several settings related to the training process, such as the dataset + to use for training, and how the data loader should load the data. This class has a default implementation that usually doesn't need to be overriden. """ # Path to hdf5 dataset to use for training - self.train.data = None + self.train.data = None # Write all results to this directory. A new folder with the timestamp will be created # in this directory, and it will contain three subfolders - "log", "models", and "videos". @@ -167,12 +140,13 @@ class has a default implementation that usually doesn't need to be overriden. # videos. self.train.output_dir = "../{}_trained_models".format(self.algo_name) + ## dataset loader config ## # num workers for loading data - generally set to 0 for low-dim datasets, and 2 for image datasets - self.train.num_data_workers = 0 + self.train.num_data_workers = 0 - # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is + # One of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 in memory - this is # by far the fastest for data loading. Set to "low_dim" to cache all non-image data. Set # to None to use no caching - in this case, every batch sample is retrieved via file i/o. # You should almost never set this to None, even for large image datasets. @@ -189,7 +163,7 @@ class has a default implementation that usually doesn't need to be overriden. # in utils/dataset.py for more information. self.train.hdf5_normalize_obs = False - # if provided, use the list of demo keys under the hdf5 group "mask/@hdf5_filter_key" for training, instead + # if provided, use the list of demo keys under the hdf5 group "mask/@hdf5_filter_key" for training, instead # of the full dataset. This provides a convenient way to train on only a subset of the trajectories in a dataset. self.train.hdf5_filter_key = None @@ -207,62 +181,55 @@ class has a default implementation that usually doesn't need to be overriden. # keys from hdf5 to load into each batch, besides "obs" and "next_obs". If algorithms # require additional keys from each trajectory in the hdf5, they should be specified here. self.train.dataset_keys = ( - "actions", - "rewards", + "actions", + "rewards", "dones", ) # one of [None, "last"] - set to "last" to include goal observations in each batch self.train.goal_mode = None + ## learning config ## - self.train.cuda = True # use GPU or not - self.train.batch_size = 100 # batch size - self.train.num_epochs = 2000 # number of training epochs - self.train.seed = 1 # seed for training (for reproducibility) + self.train.cuda = True # use GPU or not + self.train.batch_size = 100 # batch size + self.train.num_epochs = 2000 # number of training epochs + self.train.seed = 1 # seed for training (for reproducibility) def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its - training and test-time behavior should be populated here. This function should be + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its + training and test-time behavior should be populated here. This function should be implemented by every subclass. """ pass def observation_config(self): """ - This function populates the `config.observation` attribute of the config, and is given - to the `Algo` subclass (see `algo/algo.py`) for each algorithm through the `obs_config` - argument to the constructor. This portion of the config is used to specify what - observation modalities should be used by the networks for training, and how the - observation modalities should be encoded by the networks. While this class has a - default implementation that usually doesn't need to be overriden, certain algorithm - configs may choose to, in order to have seperate configs for different networks - in the algorithm. + This function populates the `config.observation` attribute of the config, and is given + to the `Algo` subclass (see `algo/algo.py`) for each algorithm through the `obs_config` + argument to the constructor. This portion of the config is used to specify what + observation modalities should be used by the networks for training, and how the + observation modalities should be encoded by the networks. While this class has a + default implementation that usually doesn't need to be overriden, certain algorithm + configs may choose to, in order to have seperate configs for different networks + in the algorithm. """ # observation modalities - self.observation.modalities.obs.low_dim = ( - [ # specify low-dim observations for agent - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] - ) - self.observation.modalities.obs.rgb = ( - [] - ) # specify rgb image observations for agent + self.observation.modalities.obs.low_dim = [ # specify low-dim observations for agent + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] + self.observation.modalities.obs.rgb = [] # specify rgb image observations for agent self.observation.modalities.obs.depth = [] self.observation.modalities.obs.scan = [] - self.observation.modalities.goal.low_dim = ( - [] - ) # specify low-dim goal observations to condition agent on - self.observation.modalities.goal.rgb = ( - [] - ) # specify rgb image goal observations to condition agent on + self.observation.modalities.goal.low_dim = [] # specify low-dim goal observations to condition agent on + self.observation.modalities.goal.rgb = [] # specify rgb image goal observations to condition agent on self.observation.modalities.goal.depth = [] self.observation.modalities.goal.scan = [] self.observation.modalities.obs.do_not_lock_keys() @@ -273,30 +240,22 @@ def observation_config(self): # =============== Low Dim default encoder (no encoder) =============== self.observation.encoder.low_dim.core_class = None - self.observation.encoder.low_dim.core_kwargs = Config() # No kwargs by default + self.observation.encoder.low_dim.core_kwargs = Config() # No kwargs by default self.observation.encoder.low_dim.core_kwargs.do_not_lock_keys() # Low Dim: Obs Randomizer settings self.observation.encoder.low_dim.obs_randomizer_class = None - self.observation.encoder.low_dim.obs_randomizer_kwargs = ( - Config() - ) # No kwargs by default + self.observation.encoder.low_dim.obs_randomizer_kwargs = Config() # No kwargs by default self.observation.encoder.low_dim.obs_randomizer_kwargs.do_not_lock_keys() # =============== RGB default encoder (ResNet backbone + linear layer output) =============== - self.observation.encoder.rgb.core_class = "VisualCore" # Default VisualCore class combines backbone (like ResNet-18) with pooling operation (like spatial softmax) - self.observation.encoder.rgb.core_kwargs = ( - Config() - ) # See models/obs_core.py for important kwargs to set and defaults used + self.observation.encoder.rgb.core_class = "VisualCore" # Default VisualCore class combines backbone (like ResNet-18) with pooling operation (like spatial softmax) + self.observation.encoder.rgb.core_kwargs = Config() # See models/obs_core.py for important kwargs to set and defaults used self.observation.encoder.rgb.core_kwargs.do_not_lock_keys() # RGB: Obs Randomizer settings - self.observation.encoder.rgb.obs_randomizer_class = ( - None # Can set to 'CropRandomizer' to use crop randomization - ) - self.observation.encoder.rgb.obs_randomizer_kwargs = ( - Config() - ) # See models/obs_core.py for important kwargs to set and defaults used + self.observation.encoder.rgb.obs_randomizer_class = None # Can set to 'CropRandomizer' to use crop randomization + self.observation.encoder.rgb.obs_randomizer_kwargs = Config() # See models/obs_core.py for important kwargs to set and defaults used self.observation.encoder.rgb.obs_randomizer_kwargs.do_not_lock_keys() # Allow for other custom modalities to be specified @@ -309,40 +268,27 @@ def observation_config(self): self.observation.encoder.scan = deepcopy(self.observation.encoder.rgb) # Scan: Modify the core class + kwargs, otherwise, is same as rgb encoder - self.observation.encoder.scan.core_class = ( - "ScanCore" # Default ScanCore class uses Conv1D to process this modality - ) - self.observation.encoder.scan.core_kwargs = ( - Config() - ) # See models/obs_core.py for important kwargs to set and defaults used + self.observation.encoder.scan.core_class = "ScanCore" # Default ScanCore class uses Conv1D to process this modality + self.observation.encoder.scan.core_kwargs = Config() # See models/obs_core.py for important kwargs to set and defaults used self.observation.encoder.scan.core_kwargs.do_not_lock_keys() def meta_config(self): """ - This function populates the `config.meta` attribute of the config. This portion of the config + This function populates the `config.meta` attribute of the config. This portion of the config is used to specify job information primarily for hyperparameter sweeps. It contains hyperparameter keys and values, which are populated automatically by the hyperparameter config generator (see `utils/hyperparam_utils.py`). These values are read by the wandb logger (see `utils/log_utils.py`) to set job tags. """ - - self.meta.hp_base_config_file = None # base config file in hyperparam sweep - self.meta.hp_keys = [] # relevant keys (swept) in hyperparam sweep - self.meta.hp_values = [] # values corresponding to keys in hyperparam sweep - + + self.meta.hp_base_config_file = None # base config file in hyperparam sweep + self.meta.hp_keys = [] # relevant keys (swept) in hyperparam sweep + self.meta.hp_values = [] # values corresponding to keys in hyperparam sweep + @property def use_goals(self): # whether the agent is goal-conditioned - return ( - len( - [ - obs_key - for modality in self.observation.modalities.goal.values() - for obs_key in modality - ] - ) - > 0 - ) + return len([obs_key for modality in self.observation.modalities.goal.values() for obs_key in modality]) > 0 @property def all_obs_keys(self): @@ -354,18 +300,11 @@ def all_obs_keys(self): n-array: all observation keys used for this model """ # pool all modalities - return sorted( - tuple( - set( - [ - obs_key - for group in [ - self.observation.modalities.obs.values(), - self.observation.modalities.goal.values(), - ] - for modality in group - for obs_key in modality - ] - ) - ) - ) + return sorted(tuple(set([ + obs_key for group in [ + self.observation.modalities.obs.values(), + self.observation.modalities.goal.values() + ] + for modality in group + for obs_key in modality + ]))) diff --git a/robomimic/config/bc_config.py b/robomimic/config/bc_config.py index 8d3c1d0e..1f701c68 100644 --- a/robomimic/config/bc_config.py +++ b/robomimic/config/bc_config.py @@ -17,143 +17,90 @@ def train_config(self): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters self.algo.optim_params.policy.optimizer_type = "adam" - self.algo.optim_params.policy.learning_rate.initial = ( - 1e-4 # policy learning rate - ) - self.algo.optim_params.policy.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.policy.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.policy.learning_rate.scheduler_type = ( - "multistep" # learning rate scheduler ("multistep", "linear", etc) - ) - self.algo.optim_params.policy.regularization.L2 = ( - 0.00 # L2 regularization strength - ) + self.algo.optim_params.policy.learning_rate.initial = 1e-4 # policy learning rate + self.algo.optim_params.policy.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.policy.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.policy.learning_rate.scheduler_type = "multistep" # learning rate scheduler ("multistep", "linear", etc) + self.algo.optim_params.policy.regularization.L2 = 0.00 # L2 regularization strength # loss weights - self.algo.loss.l2_weight = 1.0 # L2 loss weight - self.algo.loss.l1_weight = 0.0 # L1 loss weight - self.algo.loss.cos_weight = 0.0 # cosine loss weight + self.algo.loss.l2_weight = 1.0 # L2 loss weight + self.algo.loss.l1_weight = 0.0 # L1 loss weight + self.algo.loss.cos_weight = 0.0 # cosine loss weight # MLP network architecture (layers after observation encoder and RNN, if present) self.algo.actor_layer_dims = (1024, 1024) # stochastic Gaussian policy settings - self.algo.gaussian.enabled = False # whether to train a Gaussian policy - self.algo.gaussian.fixed_std = ( - False # whether to train std output or keep it constant - ) - self.algo.gaussian.init_std = 0.1 # initial standard deviation (or constant) - self.algo.gaussian.min_std = 0.01 # minimum std output from network - self.algo.gaussian.std_activation = ( - "softplus" # activation to use for std output from policy net - ) - self.algo.gaussian.low_noise_eval = True # low-std at test-time + self.algo.gaussian.enabled = False # whether to train a Gaussian policy + self.algo.gaussian.fixed_std = False # whether to train std output or keep it constant + self.algo.gaussian.init_std = 0.1 # initial standard deviation (or constant) + self.algo.gaussian.min_std = 0.01 # minimum std output from network + self.algo.gaussian.std_activation = "softplus" # activation to use for std output from policy net + self.algo.gaussian.low_noise_eval = True # low-std at test-time # stochastic GMM policy settings - self.algo.gmm.enabled = False # whether to train a GMM policy - self.algo.gmm.num_modes = 5 # number of GMM modes - self.algo.gmm.min_std = 0.0001 # minimum std output from network - self.algo.gmm.std_activation = ( - "softplus" # activation to use for std output from policy net - ) - self.algo.gmm.low_noise_eval = True # low-std at test-time + self.algo.gmm.enabled = False # whether to train a GMM policy + self.algo.gmm.num_modes = 5 # number of GMM modes + self.algo.gmm.min_std = 0.0001 # minimum std output from network + self.algo.gmm.std_activation = "softplus" # activation to use for std output from policy net + self.algo.gmm.low_noise_eval = True # low-std at test-time # stochastic VAE policy settings - self.algo.vae.enabled = False # whether to train a VAE policy - self.algo.vae.latent_dim = ( - 14 # VAE latent dimnsion - set to twice the dimensionality of action space - ) - self.algo.vae.latent_clip = ( - None # clip latent space when decoding (set to None to disable) - ) - self.algo.vae.kl_weight = 1.0 # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO + self.algo.vae.enabled = False # whether to train a VAE policy + self.algo.vae.latent_dim = 14 # VAE latent dimnsion - set to twice the dimensionality of action space + self.algo.vae.latent_clip = None # clip latent space when decoding (set to None to disable) + self.algo.vae.kl_weight = 1. # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO # VAE decoder settings - self.algo.vae.decoder.is_conditioned = ( - True # whether decoder should condition on observation - ) - self.algo.vae.decoder.reconstruction_sum_across_elements = ( - False # sum instead of mean for reconstruction loss - ) + self.algo.vae.decoder.is_conditioned = True # whether decoder should condition on observation + self.algo.vae.decoder.reconstruction_sum_across_elements = False # sum instead of mean for reconstruction loss # VAE prior settings - self.algo.vae.prior.learn = ( - False # learn Gaussian / GMM prior instead of N(0, 1) - ) - self.algo.vae.prior.is_conditioned = ( - False # whether to condition prior on observations - ) - self.algo.vae.prior.use_gmm = False # whether to use GMM prior - self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes - self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights - self.algo.vae.prior.use_categorical = False # whether to use categorical prior - self.algo.vae.prior.categorical_dim = ( - 10 # the number of categorical classes for each latent dimension - ) - self.algo.vae.prior.categorical_gumbel_softmax_hard = ( - False # use hard selection in forward pass - ) - self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp - self.algo.vae.prior.categorical_temp_anneal_step = ( - 0.001 # linear temp annealing rate - ) - self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp - - self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions - self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions - self.algo.vae.prior_layer_dims = ( - 300, - 400, - ) # prior MLP layer dimensions (if learning conditioned prior) + self.algo.vae.prior.learn = False # learn Gaussian / GMM prior instead of N(0, 1) + self.algo.vae.prior.is_conditioned = False # whether to condition prior on observations + self.algo.vae.prior.use_gmm = False # whether to use GMM prior + self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes + self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights + self.algo.vae.prior.use_categorical = False # whether to use categorical prior + self.algo.vae.prior.categorical_dim = 10 # the number of categorical classes for each latent dimension + self.algo.vae.prior.categorical_gumbel_softmax_hard = False # use hard selection in forward pass + self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp + self.algo.vae.prior.categorical_temp_anneal_step = 0.001 # linear temp annealing rate + self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp + + self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions + self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions + self.algo.vae.prior_layer_dims = (300, 400) # prior MLP layer dimensions (if learning conditioned prior) # RNN policy settings - self.algo.rnn.enabled = False # whether to train RNN policy - self.algo.rnn.horizon = ( - 10 # unroll length for RNN - should usually match train.seq_length - ) - self.algo.rnn.hidden_dim = 400 # hidden dimension size - self.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" - self.algo.rnn.num_layers = 2 # number of RNN layers that are stacked - self.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) - self.algo.rnn.kwargs.bidirectional = False # rnn kwargs + self.algo.rnn.enabled = False # whether to train RNN policy + self.algo.rnn.horizon = 10 # unroll length for RNN - should usually match train.seq_length + self.algo.rnn.hidden_dim = 400 # hidden dimension size + self.algo.rnn.rnn_type = "LSTM" # rnn type - one of "LSTM" or "GRU" + self.algo.rnn.num_layers = 2 # number of RNN layers that are stacked + self.algo.rnn.open_loop = False # if True, action predictions are only based on a single observation (not sequence) + self.algo.rnn.kwargs.bidirectional = False # rnn kwargs self.algo.rnn.kwargs.do_not_lock_keys() # Transformer policy settings - self.algo.transformer.enabled = False # whether to train transformer policy - self.algo.transformer.context_length = 10 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack - self.algo.transformer.embed_dim = ( - 512 # dimension for embeddings used by transformer - ) - self.algo.transformer.num_layers = 6 # number of transformer blocks to stack - self.algo.transformer.num_heads = 8 # number of attention heads for each transformer block (should divide embed_dim evenly) - self.algo.transformer.emb_dropout = ( - 0.1 # dropout probability for embedding inputs in transformer - ) - self.algo.transformer.attn_dropout = ( - 0.1 # dropout probability for attention outputs for each transformer block - ) - self.algo.transformer.block_output_dropout = ( - 0.1 # dropout probability for final outputs for each transformer block - ) - self.algo.transformer.sinusoidal_embedding = ( - False # if True, use standard positional encodings (sin/cos) - ) - self.algo.transformer.activation = ( - "gelu" # activation function for MLP in Transformer Block - ) - self.algo.transformer.supervise_all_steps = False # if true, supervise all intermediate actions, otherwise only final one - self.algo.transformer.nn_parameter_for_timesteps = ( - True # if true, use nn.Parameter otherwise use nn.Embedding - ) + self.algo.transformer.enabled = False # whether to train transformer policy + self.algo.transformer.context_length = 10 # length of (s, a) seqeunces to feed to transformer - should usually match train.frame_stack + self.algo.transformer.embed_dim = 512 # dimension for embeddings used by transformer + self.algo.transformer.num_layers = 6 # number of transformer blocks to stack + self.algo.transformer.num_heads = 8 # number of attention heads for each transformer block (should divide embed_dim evenly) + self.algo.transformer.emb_dropout = 0.1 # dropout probability for embedding inputs in transformer + self.algo.transformer.attn_dropout = 0.1 # dropout probability for attention outputs for each transformer block + self.algo.transformer.block_output_dropout = 0.1 # dropout probability for final outputs for each transformer block + self.algo.transformer.sinusoidal_embedding = False # if True, use standard positional encodings (sin/cos) + self.algo.transformer.activation = "gelu" # activation function for MLP in Transformer Block + self.algo.transformer.supervise_all_steps = False # if true, supervise all intermediate actions, otherwise only final one + self.algo.transformer.nn_parameter_for_timesteps = True # if true, use nn.Parameter otherwise use nn.Embedding diff --git a/robomimic/config/bcq_config.py b/robomimic/config/bcq_config.py index e250ea63..e28f5ba5 100644 --- a/robomimic/config/bcq_config.py +++ b/robomimic/config/bcq_config.py @@ -11,111 +11,63 @@ class BCQConfig(BaseConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ - + # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = ( - 1e-3 # critic learning rate - ) - self.algo.optim_params.critic.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.critic.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = ( - 0.00 # L2 regularization strength - ) - self.algo.optim_params.critic.start_epoch = ( - -1 - ) # number of epochs before starting critic training (-1 means start right away) - self.algo.optim_params.critic.end_epoch = ( - -1 - ) # number of epochs before ending critic training (-1 means start right away) + self.algo.optim_params.critic.learning_rate.initial = 1e-3 # critic learning rate + self.algo.optim_params.critic.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength + self.algo.optim_params.critic.start_epoch = -1 # number of epochs before starting critic training (-1 means start right away) + self.algo.optim_params.critic.end_epoch = -1 # number of epochs before ending critic training (-1 means start right away) - self.algo.optim_params.action_sampler.learning_rate.initial = ( - 1e-3 # action sampler learning rate - ) - self.algo.optim_params.action_sampler.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.action_sampler.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.action_sampler.regularization.L2 = ( - 0.00 # L2 regularization strength - ) - self.algo.optim_params.action_sampler.start_epoch = ( - -1 - ) # number of epochs before starting action sampler training (-1 means start right away) - self.algo.optim_params.action_sampler.end_epoch = ( - -1 - ) # number of epochs before ending action sampler training (-1 means start right away) + self.algo.optim_params.action_sampler.learning_rate.initial = 1e-3 # action sampler learning rate + self.algo.optim_params.action_sampler.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.action_sampler.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.action_sampler.regularization.L2 = 0.00 # L2 regularization strength + self.algo.optim_params.action_sampler.start_epoch = -1 # number of epochs before starting action sampler training (-1 means start right away) + self.algo.optim_params.action_sampler.end_epoch = -1 # number of epochs before ending action sampler training (-1 means start right away) - self.algo.optim_params.actor.learning_rate.initial = 1e-3 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.actor.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = ( - 0.00 # L2 regularization strength - ) - self.algo.optim_params.actor.start_epoch = ( - -1 - ) # number of epochs before starting actor training (-1 means start right away) - self.algo.optim_params.actor.end_epoch = ( - -1 - ) # number of epochs before ending actor training (-1 means start right away) + self.algo.optim_params.actor.learning_rate.initial = 1e-3 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength + self.algo.optim_params.actor.start_epoch = -1 # number of epochs before starting actor training (-1 means start right away) + self.algo.optim_params.actor.end_epoch = -1 # number of epochs before ending actor training (-1 means start right away) # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.n_step = 1 # for using n-step returns in TD-updates - self.algo.target_tau = 0.005 # update rate for target networks - self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon + self.algo.discount = 0.99 # discount factor to use + self.algo.n_step = 1 # for using n-step returns in TD-updates + self.algo.target_tau = 0.005 # update rate for target networks + self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon # ================== Critic Network Config =================== - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = ( - None # L2 gradient clipping for critic (None to use no clipping) - ) - self.algo.critic.value_bounds = ( - None # optional 2-tuple to ensure lower and upper bound on value estimates - ) - self.algo.critic.num_action_samples = 10 # number of actions to sample per training batch to get target critic value - self.algo.critic.num_action_samples_rollout = ( - 100 # number of actions to sample per environment step - ) + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = None # L2 gradient clipping for critic (None to use no clipping) + self.algo.critic.value_bounds = None # optional 2-tuple to ensure lower and upper bound on value estimates + self.algo.critic.num_action_samples = 10 # number of actions to sample per training batch to get target critic value + self.algo.critic.num_action_samples_rollout = 100 # number of actions to sample per environment step # critic ensemble parameters (TD3 trick) - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.ensemble.weight = ( - 0.75 # weighting for mixing min and max for target Q value - ) + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.ensemble.weight = 0.75 # weighting for mixing min and max for target Q value # distributional critic - self.algo.critic.distributional.enabled = ( - False # train distributional critic (C51) - ) - self.algo.critic.distributional.num_atoms = ( - 51 # number of values in categorical distribution - ) + self.algo.critic.distributional.enabled = False # train distributional critic (C51) + self.algo.critic.distributional.num_atoms = 51 # number of values in categorical distribution - self.algo.critic.layer_dims = (300, 400) # size of critic MLP + self.algo.critic.layer_dims = (300, 400) # size of critic MLP # ================== Action Sampler Config =================== self.algo.action_sampler = BCConfig().algo # use VAE by default self.algo.action_sampler.vae.enabled = True # remove unused parts of BCConfig algo config - del ( - self.algo.action_sampler.optim_params - ) # since action sampler optim params specified at top-level + del self.algo.action_sampler.optim_params # since action sampler optim params specified at top-level del self.algo.action_sampler.loss del self.algo.action_sampler.gaussian del self.algo.action_sampler.rnn @@ -126,8 +78,6 @@ def algo_config(self): self.algo.action_sampler.freeze_encoder_epoch = -1 # ================== Actor Network Config =================== - self.algo.actor.enabled = False # whether to use the actor perturbation network - self.algo.actor.perturbation_scale = ( - 0.05 # size of learned action perturbations - ) - self.algo.actor.layer_dims = (300, 400) # size of actor MLP + self.algo.actor.enabled = False # whether to use the actor perturbation network + self.algo.actor.perturbation_scale = 0.05 # size of learned action perturbations + self.algo.actor.layer_dims = (300, 400) # size of actor MLP diff --git a/robomimic/config/config.py b/robomimic/config/config.py index 9fd39bbe..74da6535 100644 --- a/robomimic/config/config.py +++ b/robomimic/config/config.py @@ -14,13 +14,11 @@ class Config(dict): def __init__(__self, *args, **kwargs): - object.__setattr__(__self, "__key_locked", False) # disallow adding new keys - object.__setattr__( - __self, "__all_locked", False - ) # disallow both key and value update - object.__setattr__(__self, "__do_not_lock_keys", False) # cannot be key-locked - object.__setattr__(__self, "__parent", kwargs.pop("__parent", None)) - object.__setattr__(__self, "__key", kwargs.pop("__key", None)) + object.__setattr__(__self, '__key_locked', False) # disallow adding new keys + object.__setattr__(__self, '__all_locked', False) # disallow both key and value update + object.__setattr__(__self, '__do_not_lock_keys', False) # cannot be key-locked + object.__setattr__(__self, '__parent', kwargs.pop('__parent', None)) + object.__setattr__(__self, '__key', kwargs.pop('__key', None)) for arg in args: if not arg: continue @@ -41,9 +39,9 @@ def lock(self): Lock the config. Afterwards, new keys cannot be added to the config, and the values of existing keys cannot be modified. """ - object.__setattr__(self, "__all_locked", True) + object.__setattr__(self, '__all_locked', True) if self.key_lockable: - object.__setattr__(self, "__key_locked", True) + object.__setattr__(self, '__key_locked', True) for k in self: if isinstance(self[k], Config): @@ -54,8 +52,8 @@ def unlock(self): Unlock the config. Afterwards, new keys can be added to the config, and the values of existing keys can be modified. """ - object.__setattr__(self, "__all_locked", False) - object.__setattr__(self, "__key_locked", False) + object.__setattr__(self, '__all_locked', False) + object.__setattr__(self, '__key_locked', False) for k in self: if isinstance(self[k], Config): @@ -65,10 +63,7 @@ def _get_lock_state_recursive(self): """ Internal helper function to get the lock state of all sub-configs recursively. """ - lock_state = { - "__all_locked": self.is_locked, - "__key_locked": self.is_key_locked, - } + lock_state = {"__all_locked": self.is_locked, "__key_locked": self.is_key_locked} for k in self: if isinstance(self[k], Config): assert k not in ["__all_locked", "__key_locked"] @@ -80,8 +75,8 @@ def _set_lock_state_recursive(self, lock_state): Internal helper function to set the lock state of all sub-configs recursively. """ lock_state = deepcopy(lock_state) - object.__setattr__(self, "__all_locked", lock_state.pop("__all_locked")) - object.__setattr__(self, "__key_locked", lock_state.pop("__key_locked")) + object.__setattr__(self, '__all_locked', lock_state.pop("__all_locked")) + object.__setattr__(self, '__key_locked', lock_state.pop("__key_locked")) for k in lock_state: if isinstance(self[k], Config): self[k]._set_lock_state_recursive(lock_state[k]) @@ -96,7 +91,10 @@ def _get_lock_state(self): a "key_locked" key that is True if only key updates are locked (value updates still allowed) and False otherwise """ - return {"all_locked": self.is_locked, "key_locked": self.is_key_locked} + return { + "all_locked": self.is_locked, + "key_locked": self.is_key_locked + } def _set_lock_state(self, lock_state): """ @@ -129,7 +127,7 @@ def unlocked(self): def values_unlocked(self): """ A context scope for modifying a Config object. Within the scope, - only values can be updated (new keys cannot be created). Upon + only values can be updated (new keys cannot be created). Upon leaving the scope, the initial level of locking is restored. """ lock_state = self._get_lock_state() @@ -144,7 +142,7 @@ def lock_keys(self): """ if not self.key_lockable: return - object.__setattr__(self, "__key_locked", True) + object.__setattr__(self, '__key_locked', True) for k in self: if isinstance(self[k], Config): self[k].lock_keys() @@ -153,7 +151,7 @@ def unlock_keys(self): """ Unlock this config so that new keys can be added. """ - object.__setattr__(self, "__key_locked", False) + object.__setattr__(self, '__key_locked', False) for k in self: if isinstance(self[k], Config): self[k].unlock_keys() @@ -163,55 +161,48 @@ def is_locked(self): """ Returns True if the config is locked (no key or value updates allowed). """ - return object.__getattribute__(self, "__all_locked") + return object.__getattribute__(self, '__all_locked') @property def is_key_locked(self): """ Returns True if the config is key-locked (no key updates allowed). """ - return object.__getattribute__(self, "__key_locked") + return object.__getattribute__(self, '__key_locked') def do_not_lock_keys(self): """ - Calling this function on this config indicates that key updates should be + Calling this function on this config indicates that key updates should be allowed even when this config is key-locked (but not when it is completely locked). This is convenient for attributes that contain kwargs, where there might be a variable type and number of arguments contained in the sub-config. """ - object.__setattr__(self, "__do_not_lock_keys", True) + object.__setattr__(self, '__do_not_lock_keys', True) @property def key_lockable(self): """ - Returns true if this config is key-lockable (new keys cannot be inserted in a + Returns true if this config is key-lockable (new keys cannot be inserted in a key-locked lock level). """ - return not object.__getattribute__(self, "__do_not_lock_keys") + return not object.__getattribute__(self, '__do_not_lock_keys') def __setattr__(self, name, value): if self.is_locked: - raise RuntimeError( - "This config has been locked - cannot set attribute '{}' to {}".format( - name, value - ) - ) + raise RuntimeError("This config has been locked - cannot set attribute '{}' to {}".format(name, value)) if hasattr(Config, name): - raise AttributeError( - "'Dict' object attribute " "'{0}' is read-only".format(name) - ) + raise AttributeError("'Dict' object attribute " + "'{0}' is read-only".format(name)) elif not hasattr(self, name) and self.is_key_locked: - raise RuntimeError( - "This config is key-locked - cannot add key '{}'".format(name) - ) + raise RuntimeError("This config is key-locked - cannot add key '{}'".format(name)) else: self[name] = value def __setitem__(self, name, value): super(Config, self).__setitem__(name, value) - p = object.__getattribute__(self, "__parent") - key = object.__getattribute__(self, "__key") + p = object.__getattribute__(self, '__parent') + key = object.__getattribute__(self, '__key') if p is not None: p[key] = self @@ -242,14 +233,8 @@ def __repr__(self): def __getitem__(self, name): if name not in self: - if object.__getattribute__(self, "__all_locked") or object.__getattribute__( - self, "__key_locked" - ): - raise RuntimeError( - "This config has been locked and '{}' is not in this config".format( - name - ) - ) + if object.__getattribute__(self, '__all_locked') or object.__getattribute__(self, '__key_locked'): + raise RuntimeError("This config has been locked and '{}' is not in this config".format(name)) return Config(__parent=self, __key=name) return super(Config, self).__getitem__(name) @@ -263,9 +248,8 @@ def to_dict(self): base[key] = value.to_dict() elif isinstance(value, (list, tuple)): base[key] = type(value)( - item.to_dict() if isinstance(item, type(self)) else item - for item in value - ) + item.to_dict() if isinstance(item, type(self)) else + item for item in value) else: base[key] = value return base @@ -288,7 +272,7 @@ def update(self, *args, **kwargs): Update this config using another config or nested dictionary. """ if self.is_locked: - raise RuntimeError("Cannot update - this config has been locked") + raise RuntimeError('Cannot update - this config has been locked') other = {} if args: if len(args) > 1: @@ -297,11 +281,7 @@ def update(self, *args, **kwargs): other.update(kwargs) for k, v in other.items(): if self.is_key_locked and k not in self: - raise RuntimeError( - "Cannot update - this config has been key-locked and key '{}' does not exist".format( - k - ) - ) + raise RuntimeError("Cannot update - this config has been key-locked and key '{}' does not exist".format(k)) if (not isinstance(self[k], dict)) or (not isinstance(v, dict)): self[k] = v else: @@ -339,4 +319,4 @@ def dump(self, filename=None): f = open(filename, "w") f.write(json_string) f.close() - return json_string + return json_string \ No newline at end of file diff --git a/robomimic/config/cql_config.py b/robomimic/config/cql_config.py index 15858e99..26fea048 100644 --- a/robomimic/config/cql_config.py +++ b/robomimic/config/cql_config.py @@ -19,100 +19,64 @@ def train_config(self): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = ( - 1e-3 # critic learning rate - ) - self.algo.optim_params.critic.learning_rate.decay_factor = ( - 0.0 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.critic.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = ( - 0.00 # L2 regularization strength - ) - - self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = ( - 0.0 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.actor.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = ( - 0.00 # L2 regularization strength - ) + self.algo.optim_params.critic.learning_rate.initial = 1e-3 # critic learning rate + self.algo.optim_params.critic.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength + + self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.n_step = 1 # for using n-step returns in TD-updates - self.algo.target_tau = 0.005 # update rate for target networks + self.algo.discount = 0.99 # discount factor to use + self.algo.n_step = 1 # for using n-step returns in TD-updates + self.algo.target_tau = 0.005 # update rate for target networks # ================== Actor Network Config =================== - self.algo.actor.bc_start_steps = ( - 0 # uses BC policy loss for first n-training steps - ) - self.algo.actor.target_entropy = "default" # None is fixed entropy, otherwise is automatically tuned to match target. Can specify "default" as well for default tuning target - self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor + self.algo.actor.bc_start_steps = 0 # uses BC policy loss for first n-training steps + self.algo.actor.target_entropy = "default" # None is fixed entropy, otherwise is automatically tuned to match target. Can specify "default" as well for default tuning target + self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor # Actor network settings - self.algo.actor.net.type = ( - "gaussian" # Options are currently only "gaussian" (no support for GMM yet) - ) + self.algo.actor.net.type = "gaussian" # Options are currently only "gaussian" (no support for GMM yet) # Actor network settings - shared - self.algo.actor.net.common.std_activation = ( - "exp" # Activation to use for std output from policy net - ) - self.algo.actor.net.common.use_tanh = ( - True # Whether to use tanh at output of actor network - ) - self.algo.actor.net.common.low_noise_eval = ( - True # Whether to use deterministic action sampling at eval stage - ) + self.algo.actor.net.common.std_activation = "exp" # Activation to use for std output from policy net + self.algo.actor.net.common.use_tanh = True # Whether to use tanh at output of actor network + self.algo.actor.net.common.low_noise_eval = True # Whether to use deterministic action sampling at eval stage # Actor network settings - gaussian - self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value - self.algo.actor.net.gaussian.init_std = ( - 0.3 # Relative scaling factor for std from policy net - ) - self.algo.actor.net.gaussian.fixed_std = ( - False # Whether to learn std dev or not - ) + self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value + self.algo.actor.net.gaussian.init_std = 0.3 # Relative scaling factor for std from policy net + self.algo.actor.net.gaussian.fixed_std = False # Whether to learn std dev or not - self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions + self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions # ================== Critic Network Config =================== - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = ( - None # L2 gradient clipping for critic (None to use no clipping) - ) + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = None # L2 gradient clipping for critic (None to use no clipping) - self.algo.critic.value_bounds = ( - None # optional 2-tuple to ensure lower and upper bound on value estimates - ) + self.algo.critic.value_bounds = None # optional 2-tuple to ensure lower and upper bound on value estimates - self.algo.critic.num_action_samples = 1 # number of actions to sample per training batch to get target critic value; use maximum Q value from n random sampled actions when doing TD error backup + self.algo.critic.num_action_samples = 1 # number of actions to sample per training batch to get target critic value; use maximum Q value from n random sampled actions when doing TD error backup # cql settings for critic - self.algo.critic.cql_weight = 1.0 # weighting for cql component of critic loss (only used if target_q_gap is < 0 or None) - self.algo.critic.deterministic_backup = ( - True # if not set, subtract weighted logprob of action when doing backup - ) - self.algo.critic.min_q_weight = 1.0 # min q weight (scaling factor) to apply - self.algo.critic.target_q_gap = 5.0 # if set, sets the diff threshold at which Q-values will be penalized more (note: this overrides cql weight above!) Use None or a negative value if not set - self.algo.critic.num_random_actions = ( - 10 # Number of random actions to sample when calculating CQL loss - ) + self.algo.critic.cql_weight = 1.0 # weighting for cql component of critic loss (only used if target_q_gap is < 0 or None) + self.algo.critic.deterministic_backup = True # if not set, subtract weighted logprob of action when doing backup + self.algo.critic.min_q_weight = 1.0 # min q weight (scaling factor) to apply + self.algo.critic.target_q_gap = 5.0 # if set, sets the diff threshold at which Q-values will be penalized more (note: this overrides cql weight above!) Use None or a negative value if not set + self.algo.critic.num_random_actions = 10 # Number of random actions to sample when calculating CQL loss # critic ensemble parameters (TD3 trick) - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions + self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions diff --git a/robomimic/config/gl_config.py b/robomimic/config/gl_config.py index 5e826c5c..939103e6 100644 --- a/robomimic/config/gl_config.py +++ b/robomimic/config/gl_config.py @@ -11,92 +11,63 @@ class GLConfig(BaseConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters - self.algo.optim_params.goal_network.learning_rate.initial = ( - 1e-4 # goal network learning rate - ) - self.algo.optim_params.goal_network.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.goal_network.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs + self.algo.optim_params.goal_network.learning_rate.initial = 1e-4 # goal network learning rate + self.algo.optim_params.goal_network.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.goal_network.learning_rate.epoch_schedule = [] # epochs where LR decay occurs self.algo.optim_params.goal_network.regularization.L2 = 0.00 # subgoal definition: observation that is @subgoal_horizon number of timesteps in future from current observation - self.algo.subgoal_horizon = 10 + self.algo.subgoal_horizon = 10 # MLP size for deterministic goal network (unused if VAE is enabled) self.algo.ae.planner_layer_dims = (300, 400) # ================== VAE config ================== - self.algo.vae.enabled = True # set to true to use VAE network - self.algo.vae.latent_dim = 16 # VAE latent dimension - self.algo.vae.latent_clip = ( - None # clip latent space when decoding (set to None to disable) - ) - self.algo.vae.kl_weight = 1.0 # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO + self.algo.vae.enabled = True # set to true to use VAE network + self.algo.vae.latent_dim = 16 # VAE latent dimension + self.algo.vae.latent_clip = None # clip latent space when decoding (set to None to disable) + self.algo.vae.kl_weight = 1. # beta-VAE weight to scale KL loss relative to reconstruction loss in ELBO # VAE decoder settings - self.algo.vae.decoder.is_conditioned = ( - True # whether decoder should condition on observation - ) - self.algo.vae.decoder.reconstruction_sum_across_elements = ( - False # sum instead of mean for reconstruction loss - ) + self.algo.vae.decoder.is_conditioned = True # whether decoder should condition on observation + self.algo.vae.decoder.reconstruction_sum_across_elements = False # sum instead of mean for reconstruction loss # VAE prior settings - self.algo.vae.prior.learn = ( - False # learn Gaussian / GMM prior instead of N(0, 1) - ) - self.algo.vae.prior.is_conditioned = ( - False # whether to condition prior on observations - ) - self.algo.vae.prior.use_gmm = False # whether to use GMM prior - self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes - self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights - self.algo.vae.prior.use_categorical = False # whether to use categorical prior - self.algo.vae.prior.categorical_dim = ( - 10 # the number of categorical classes for each latent dimension - ) - self.algo.vae.prior.categorical_gumbel_softmax_hard = ( - False # use hard selection in forward pass - ) - self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp - self.algo.vae.prior.categorical_temp_anneal_step = ( - 0.001 # linear temp annealing rate - ) - self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp + self.algo.vae.prior.learn = False # learn Gaussian / GMM prior instead of N(0, 1) + self.algo.vae.prior.is_conditioned = False # whether to condition prior on observations + self.algo.vae.prior.use_gmm = False # whether to use GMM prior + self.algo.vae.prior.gmm_num_modes = 10 # number of GMM modes + self.algo.vae.prior.gmm_learn_weights = False # whether to learn GMM weights + self.algo.vae.prior.use_categorical = False # whether to use categorical prior + self.algo.vae.prior.categorical_dim = 10 # the number of categorical classes for each latent dimension + self.algo.vae.prior.categorical_gumbel_softmax_hard = False # use hard selection in forward pass + self.algo.vae.prior.categorical_init_temp = 1.0 # initial gumbel-softmax temp + self.algo.vae.prior.categorical_temp_anneal_step = 0.001 # linear temp annealing rate + self.algo.vae.prior.categorical_min_temp = 0.3 # lowest gumbel-softmax temp - self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions - self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions - self.algo.vae.prior_layer_dims = ( - 300, - 400, - ) # prior MLP layer dimensions (if learning conditioned prior) + self.algo.vae.encoder_layer_dims = (300, 400) # encoder MLP layer dimensions + self.algo.vae.decoder_layer_dims = (300, 400) # decoder MLP layer dimensions + self.algo.vae.prior_layer_dims = (300, 400) # prior MLP layer dimensions (if learning conditioned prior) def observation_config(self): """ Update from superclass to specify subgoal modalities. """ super(GLConfig, self).observation_config() - self.observation.modalities.subgoal.low_dim = ( - [ # specify low-dim subgoal observations for agent to predict - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] - ) - self.observation.modalities.subgoal.rgb = ( - [] - ) # specify rgb image subgoal observations for agent to predict + self.observation.modalities.subgoal.low_dim = [ # specify low-dim subgoal observations for agent to predict + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object", + ] + self.observation.modalities.subgoal.rgb = [] # specify rgb image subgoal observations for agent to predict self.observation.modalities.subgoal.depth = [] self.observation.modalities.subgoal.scan = [] self.observation.modalities.subgoal.do_not_lock_keys() @@ -107,19 +78,12 @@ def all_obs_keys(self): Update from superclass to include subgoals. """ # pool all modalities - return sorted( - tuple( - set( - [ - obs_key - for group in [ - self.observation.modalities.obs.values(), - self.observation.modalities.goal.values(), - self.observation.modalities.subgoal.values(), - ] - for modality in group - for obs_key in modality - ] - ) - ) - ) + return sorted(tuple(set([ + obs_key for group in [ + self.observation.modalities.obs.values(), + self.observation.modalities.goal.values(), + self.observation.modalities.subgoal.values(), + ] + for modality in group + for obs_key in modality + ]))) diff --git a/robomimic/config/hbc_config.py b/robomimic/config/hbc_config.py index 16a1dbdf..ae65c9b8 100644 --- a/robomimic/config/hbc_config.py +++ b/robomimic/config/hbc_config.py @@ -15,15 +15,13 @@ def train_config(self): Update from superclass to change default sequence length to load from dataset. """ super(HBCConfig, self).train_config() - self.train.seq_length = ( - 10 # length of experience sequence to fetch from the buffer - ) + self.train.seq_length = 10 # length of experience sequence to fetch from the buffer def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ @@ -33,17 +31,14 @@ def algo_config(self): # on planner subgoal predictions. In "actor_only" mode, only the actor is trained, and in # "planner_only" mode, only the planner is trained. self.algo.mode = "separate" - self.algo.actor_use_random_subgoals = ( - False # whether to sample subgoal index from [1, subgoal_horizon] - ) - self.algo.subgoal_update_interval = ( - 10 # how frequently the subgoal should be updated at test-time - ) + self.algo.actor_use_random_subgoals = False # whether to sample subgoal index from [1, subgoal_horizon] + self.algo.subgoal_update_interval = 10 # how frequently the subgoal should be updated at test-time + # ================== Latent Subgoal Config ================== - self.algo.latent_subgoal.enabled = False # if True, use VAE latent space as subgoals for actor, instead of reconstructions + self.algo.latent_subgoal.enabled = False # if True, use VAE latent space as subgoals for actor, instead of reconstructions - # prior correction trick for actor and value training: instead of using encoder for + # prior correction trick for actor and value training: instead of using encoder for # transforming subgoals to latent subgoals, generate prior samples and choose # the closest one to the encoder output self.algo.latent_subgoal.prior_correction.enabled = False @@ -78,13 +73,9 @@ def use_goals(self): """ Update from superclass - planner goal modalities determine goal-conditioning """ - return ( - len( - self.observation.planner.modalities.goal.low_dim - + self.observation.planner.modalities.goal.rgb - ) - > 0 - ) + return len( + self.observation.planner.modalities.goal.low_dim + + self.observation.planner.modalities.goal.rgb) > 0 @property def all_obs_keys(self): @@ -92,21 +83,14 @@ def all_obs_keys(self): Update from superclass to include modalities from planner and actor. """ # pool all modalities - return sorted( - tuple( - set( - [ - obs_key - for group in [ - self.observation.planner.modalities.obs.values(), - self.observation.planner.modalities.goal.values(), - self.observation.planner.modalities.subgoal.values(), - self.observation.actor.modalities.obs.values(), - self.observation.actor.modalities.goal.values(), - ] - for modality in group - for obs_key in modality - ] - ) - ) - ) + return sorted(tuple(set([ + obs_key for group in [ + self.observation.planner.modalities.obs.values(), + self.observation.planner.modalities.goal.values(), + self.observation.planner.modalities.subgoal.values(), + self.observation.actor.modalities.obs.values(), + self.observation.actor.modalities.goal.values(), + ] + for modality in group + for obs_key in modality + ]))) diff --git a/robomimic/config/iql_config.py b/robomimic/config/iql_config.py index 16bbd2ea..bd603d1a 100644 --- a/robomimic/config/iql_config.py +++ b/robomimic/config/iql_config.py @@ -10,94 +10,64 @@ class IQLConfig(BaseConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ super(IQLConfig, self).algo_config() - # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = ( - 1e-4 # critic learning rate - ) - self.algo.optim_params.critic.learning_rate.decay_factor = ( - 0.0 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.critic.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = ( - 0.00 # L2 regularization strength - ) - - self.algo.optim_params.vf.learning_rate.initial = 1e-4 # vf learning rate - self.algo.optim_params.vf.learning_rate.decay_factor = ( - 0.0 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.vf.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.vf.regularization.L2 = 0.00 # L2 regularization strength - - self.algo.optim_params.actor.learning_rate.initial = 1e-4 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = ( - 0.0 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.actor.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = ( - 0.00 # L2 regularization strength - ) + # optimization parameters + self.algo.optim_params.critic.learning_rate.initial = 1e-4 # critic learning rate + self.algo.optim_params.critic.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength + + self.algo.optim_params.vf.learning_rate.initial = 1e-4 # vf learning rate + self.algo.optim_params.vf.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.vf.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.vf.regularization.L2 = 0.00 # L2 regularization strength + + self.algo.optim_params.actor.learning_rate.initial = 1e-4 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = 0.0 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.target_tau = 0.01 # update rate for target networks + self.algo.discount = 0.99 # discount factor to use + self.algo.target_tau = 0.01 # update rate for target networks # ================== Actor Network Config =================== # Actor network settings - self.algo.actor.net.type = ( - "gaussian" # Options are currently ["gaussian", "gmm"] - ) + self.algo.actor.net.type = "gaussian" # Options are currently ["gaussian", "gmm"] # Actor network settings - shared - self.algo.actor.net.common.std_activation = ( - "softplus" # Activation to use for std output from policy net - ) - self.algo.actor.net.common.low_noise_eval = ( - True # Whether to use deterministic action sampling at eval stage - ) - self.algo.actor.net.common.use_tanh = ( - False # Whether to use tanh at output of actor network - ) + self.algo.actor.net.common.std_activation = "softplus" # Activation to use for std output from policy net + self.algo.actor.net.common.low_noise_eval = True # Whether to use deterministic action sampling at eval stage + self.algo.actor.net.common.use_tanh = False # Whether to use tanh at output of actor network # Actor network settings - gaussian - self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value - self.algo.actor.net.gaussian.init_std = ( - 0.3 # Relative scaling factor for std from policy net - ) - self.algo.actor.net.gaussian.fixed_std = ( - False # Whether to learn std dev or not - ) + self.algo.actor.net.gaussian.init_last_fc_weight = 0.001 # If set, will override the initialization of the final fc layer to be uniformly sampled limited by this value + self.algo.actor.net.gaussian.init_std = 0.3 # Relative scaling factor for std from policy net + self.algo.actor.net.gaussian.fixed_std = False # Whether to learn std dev or not - self.algo.actor.net.gmm.num_modes = 5 # number of GMM modes - self.algo.actor.net.gmm.min_std = 0.0001 # minimum std output from network + self.algo.actor.net.gmm.num_modes = 5 # number of GMM modes + self.algo.actor.net.gmm.min_std = 0.0001 # minimum std output from network - self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions + self.algo.actor.layer_dims = (300, 400) # actor MLP layer dimensions - self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor + self.algo.actor.max_gradient_norm = None # L2 gradient clipping for actor # ================== Critic Network Config =================== # critic ensemble parameters - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = None # L2 gradient clipping for actor + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.layer_dims = (300, 400) # critic MLP layer dimensions + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = None # L2 gradient clipping for actor # ================== Adv Config ============================== - self.algo.adv.clip_adv_value = None # whether to clip raw advantage estimates - self.algo.adv.beta = 1.0 # temperature for operator - self.algo.adv.use_final_clip = True # whether to clip final weight calculations + self.algo.adv.clip_adv_value = None # whether to clip raw advantage estimates + self.algo.adv.beta = 1.0 # temperature for operator + self.algo.adv.use_final_clip = True # whether to clip final weight calculations - self.algo.vf_quantile = 0.9 # quantile factor in quantile regression + self.algo.vf_quantile = 0.9 # quantile factor in quantile regression diff --git a/robomimic/config/iris_config.py b/robomimic/config/iris_config.py index c16da304..c03328ce 100644 --- a/robomimic/config/iris_config.py +++ b/robomimic/config/iris_config.py @@ -13,9 +13,9 @@ class IRISConfig(HBCConfig): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ @@ -26,10 +26,8 @@ def algo_config(self): # "planner_only" mode, only the planner is trained. self.algo.mode = "separate" - self.algo.actor_use_random_subgoals = ( - False # whether to sample subgoal index from [1, subgoal_horizon] - ) - self.algo.subgoal_update_interval = 10 # how frequently the subgoal should be updated at test-time (usually matches train.seq_length) + self.algo.actor_use_random_subgoals = False # whether to sample subgoal index from [1, subgoal_horizon] + self.algo.subgoal_update_interval = 10 # how frequently the subgoal should be updated at test-time (usually matches train.seq_length) # ================== Latent Subgoal Config ================== @@ -49,7 +47,7 @@ def algo_config(self): # The ValuePlanner value component is a BCQ model self.algo.value_planner.value = BCQConfig().algo - self.algo.value_planner.value.actor.enabled = False # ensure no BCQ actor + self.algo.value_planner.value.actor.enabled = False # ensure no BCQ actor # number of subgoal samples to use for value planner self.algo.value_planner.num_samples = 100 @@ -76,13 +74,9 @@ def use_goals(self): """ Update from superclass - value planner goal modalities determine goal-conditioning. """ - return ( - len( - self.observation.value_planner.planner.modalities.goal.low_dim - + self.observation.value_planner.planner.modalities.goal.rgb - ) - > 0 - ) + return len( + self.observation.value_planner.planner.modalities.goal.low_dim + + self.observation.value_planner.planner.modalities.goal.rgb) > 0 @property def all_obs_keys(self): @@ -90,23 +84,16 @@ def all_obs_keys(self): Update from superclass to include modalities from value planner and actor. """ # pool all modalities - return sorted( - tuple( - set( - [ - obs_key - for group in [ - self.observation.value_planner.planner.modalities.obs.values(), - self.observation.value_planner.planner.modalities.goal.values(), - self.observation.value_planner.planner.modalities.subgoal.values(), - self.observation.value_planner.value.modalities.obs.values(), - self.observation.value_planner.value.modalities.goal.values(), - self.observation.actor.modalities.obs.values(), - self.observation.actor.modalities.goal.values(), - ] - for modality in group - for obs_key in modality - ] - ) - ) - ) + return sorted(tuple(set([ + obs_key for group in [ + self.observation.value_planner.planner.modalities.obs.values(), + self.observation.value_planner.planner.modalities.goal.values(), + self.observation.value_planner.planner.modalities.subgoal.values(), + self.observation.value_planner.value.modalities.obs.values(), + self.observation.value_planner.value.modalities.goal.values(), + self.observation.actor.modalities.obs.values(), + self.observation.actor.modalities.goal.values(), + ] + for modality in group + for obs_key in modality + ]))) diff --git a/robomimic/config/td3_bc_config.py b/robomimic/config/td3_bc_config.py index e52879b2..036a2591 100644 --- a/robomimic/config/td3_bc_config.py +++ b/robomimic/config/td3_bc_config.py @@ -19,7 +19,7 @@ def experiment_config(self): self.experiment.render_video = False # save 10 checkpoints throughout training - self.experiment.save.every_n_epochs = 20 + self.experiment.save.every_n_epochs = 20 # save models that achieve best rollout return instead of best success rate self.experiment.save.on_best_rollout_return = True @@ -30,9 +30,9 @@ def experiment_config(self): # evaluate with normal environment rollouts self.experiment.rollout.enabled = True - self.experiment.rollout.n = 50 # paper uses 10, but we can afford to do 50 + self.experiment.rollout.n = 50 # paper uses 10, but we can afford to do 50 self.experiment.rollout.horizon = 1000 - self.experiment.rollout.rate = 1 # rollout every epoch to match paper + self.experiment.rollout.rate = 1 # rollout every epoch to match paper def train_config(self): """ @@ -41,7 +41,7 @@ def train_config(self): super(TD3_BCConfig, self).train_config() # update to normalize observations - self.train.hdf5_normalize_obs = True + self.train.hdf5_normalize_obs = True # increase batch size to 256 self.train.batch_size = 256 @@ -51,74 +51,46 @@ def train_config(self): def algo_config(self): """ - This function populates the `config.algo` attribute of the config, and is given to the - `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` - argument to the constructor. Any parameter that an algorithm needs to determine its + This function populates the `config.algo` attribute of the config, and is given to the + `Algo` subclass (see `algo/algo.py`) for each algorithm through the `algo_config` + argument to the constructor. Any parameter that an algorithm needs to determine its training and test-time behavior should be populated here. """ # optimization parameters - self.algo.optim_params.critic.learning_rate.initial = ( - 3e-4 # critic learning rate - ) - self.algo.optim_params.critic.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.critic.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.critic.regularization.L2 = ( - 0.00 # L2 regularization strength - ) - self.algo.optim_params.critic.start_epoch = ( - -1 - ) # number of epochs before starting critic training (-1 means start right away) - self.algo.optim_params.critic.end_epoch = ( - -1 - ) # number of epochs before ending critic training (-1 means start right away) - - self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate - self.algo.optim_params.actor.learning_rate.decay_factor = ( - 0.1 # factor to decay LR by (if epoch schedule non-empty) - ) - self.algo.optim_params.actor.learning_rate.epoch_schedule = ( - [] - ) # epochs where LR decay occurs - self.algo.optim_params.actor.regularization.L2 = ( - 0.00 # L2 regularization strength - ) - self.algo.optim_params.actor.start_epoch = ( - -1 - ) # number of epochs before starting actor training (-1 means start right away) - self.algo.optim_params.actor.end_epoch = ( - -1 - ) # number of epochs before ending actor training (-1 means start right away) + self.algo.optim_params.critic.learning_rate.initial = 3e-4 # critic learning rate + self.algo.optim_params.critic.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.critic.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.critic.regularization.L2 = 0.00 # L2 regularization strength + self.algo.optim_params.critic.start_epoch = -1 # number of epochs before starting critic training (-1 means start right away) + self.algo.optim_params.critic.end_epoch = -1 # number of epochs before ending critic training (-1 means start right away) + + self.algo.optim_params.actor.learning_rate.initial = 3e-4 # actor learning rate + self.algo.optim_params.actor.learning_rate.decay_factor = 0.1 # factor to decay LR by (if epoch schedule non-empty) + self.algo.optim_params.actor.learning_rate.epoch_schedule = [] # epochs where LR decay occurs + self.algo.optim_params.actor.regularization.L2 = 0.00 # L2 regularization strength + self.algo.optim_params.actor.start_epoch = -1 # number of epochs before starting actor training (-1 means start right away) + self.algo.optim_params.actor.end_epoch = -1 # number of epochs before ending actor training (-1 means start right away) # alpha value - for weighting critic loss vs. BC loss self.algo.alpha = 2.5 # target network related parameters - self.algo.discount = 0.99 # discount factor to use - self.algo.n_step = 1 # for using n-step returns in TD-updates - self.algo.target_tau = 0.005 # update rate for target networks - self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon + self.algo.discount = 0.99 # discount factor to use + self.algo.n_step = 1 # for using n-step returns in TD-updates + self.algo.target_tau = 0.005 # update rate for target networks + self.algo.infinite_horizon = False # if True, scale terminal rewards by 1 / (1 - discount) to treat as infinite horizon # ================== Critic Network Config =================== - self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic - self.algo.critic.max_gradient_norm = ( - None # L2 gradient clipping for critic (None to use no clipping) - ) - self.algo.critic.value_bounds = ( - None # optional 2-tuple to ensure lower and upper bound on value estimates - ) + self.algo.critic.use_huber = False # Huber Loss instead of L2 for critic + self.algo.critic.max_gradient_norm = None # L2 gradient clipping for critic (None to use no clipping) + self.algo.critic.value_bounds = None # optional 2-tuple to ensure lower and upper bound on value estimates # critic ensemble parameters (TD3 trick) - self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble - self.algo.critic.ensemble.weight = ( - 1.0 # weighting for mixing min and max for target Q value - ) + self.algo.critic.ensemble.n = 2 # number of Q networks in the ensemble + self.algo.critic.ensemble.weight = 1.0 # weighting for mixing min and max for target Q value - self.algo.critic.layer_dims = (256, 256) # size of critic MLP + self.algo.critic.layer_dims = (256, 256) # size of critic MLP # ================== Actor Network Config =================== @@ -126,14 +98,10 @@ def algo_config(self): self.algo.actor.update_freq = 2 # exploration noise used to form target action for Q-update - clipped Gaussian noise - self.algo.actor.noise_std = ( - 0.2 # zero-mean gaussian noise with this std is applied to actions - ) - self.algo.actor.noise_clip = ( - 0.5 # noise is clipped in each dimension to (-noise_clip, noise_clip) - ) - - self.algo.actor.layer_dims = (256, 256) # size of actor MLP + self.algo.actor.noise_std = 0.2 # zero-mean gaussian noise with this std is applied to actions + self.algo.actor.noise_clip = 0.5 # noise is clipped in each dimension to (-noise_clip, noise_clip) + + self.algo.actor.layer_dims = (256, 256) # size of actor MLP def observation_config(self): """ diff --git a/robomimic/envs/env_base.py b/robomimic/envs/env_base.py index 58f44cec..9634db01 100644 --- a/robomimic/envs/env_base.py +++ b/robomimic/envs/env_base.py @@ -3,7 +3,6 @@ to provide a standardized environment API for training policies and interacting with metadata present in datasets. """ - import abc @@ -12,7 +11,6 @@ class EnvType: Holds environment types - one per environment class. These act as identifiers for different environments. """ - ROBOSUITE_TYPE = 1 GYM_TYPE = 2 IG_MOMART_TYPE = 3 @@ -20,16 +18,15 @@ class EnvType: class EnvBase(abc.ABC): """A base class method for environments used by this repo.""" - @abc.abstractmethod def __init__( self, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - postprocess_visual_obs=True, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + postprocess_visual_obs=True, **kwargs, ): """ @@ -89,7 +86,7 @@ def reset_to(self, state): Args: state (dict): current simulator state - + Returns: observation (dict): observation dictionary after setting the simulator state """ @@ -192,21 +189,21 @@ def serialize(self): @classmethod @abc.abstractmethod def create_for_data_processing( - cls, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + cls, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, **kwargs, ): """ Create environment for processing datasets, which includes extracting observations, labeling dense / sparse rewards, and annotating dones in - transitions. + transitions. Args: camera_names ([str]): list of camera names that correspond to image observations diff --git a/robomimic/envs/env_gym.py b/robomimic/envs/env_gym.py index 6e9f659c..7b56d1eb 100644 --- a/robomimic/envs/env_gym.py +++ b/robomimic/envs/env_gym.py @@ -3,13 +3,11 @@ to provide a standardized environment API for training policies and interacting with metadata present in datasets. """ - import json import numpy as np from copy import deepcopy import gym - try: import d4rl except: @@ -21,15 +19,14 @@ class EnvGym(EB.EnvBase): """Wrapper class for gym""" - def __init__( self, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - postprocess_visual_obs=True, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + postprocess_visual_obs=True, **kwargs, ): """ @@ -91,14 +88,14 @@ def reset_to(self, state): Args: state (dict): current simulator state that contains: - states (np.ndarray): initial state of the mujoco environment - + Returns: observation (dict): observation dictionary after setting the simulator state """ if hasattr(self.env.unwrapped.sim, "set_state_from_flattened"): self.env.unwrapped.sim.set_state_from_flattened(state["states"]) self.env.unwrapped.sim.forward() - return {"flat": self.env.unwrapped._get_obs()} + return { "flat" : self.env.unwrapped._get_obs() } else: raise NotImplementedError @@ -111,7 +108,7 @@ def render(self, mode="human", height=None, width=None, camera_name=None, **kwar height (int): height of image to render - only used if mode is "rgb_array" width (int): width of image to render - only used if mode is "rgb_array" """ - if mode == "human": + if mode =="human": return self.env.render(mode=mode, **kwargs) if mode == "rgb_array": return self.env.render(mode="rgb_array", height=height, width=width) @@ -129,15 +126,15 @@ def get_observation(self, obs=None): if obs is None: assert self._current_obs is not None obs = self._current_obs - return {"flat": np.copy(obs)} + return { "flat" : np.copy(obs) } def get_state(self): """ Get current environment simulator state as a dictionary. Should be compatible with @reset_to. """ # NOTE: assumes MuJoCo gym task! - xml = self.env.sim.model.get_xml() # model xml file - state = np.array(self.env.sim.get_state().flatten()) # simulator state + xml = self.env.sim.model.get_xml() # model xml file + state = np.array(self.env.sim.get_state().flatten()) # simulator state return dict(model=xml, states=state) def get_reward(self): @@ -176,7 +173,7 @@ def is_success(self): return self.env.unwrapped._check_success() # gym envs generally don't check task success - we only compare returns - return {"task": False} + return { "task" : False } @property def action_dimension(self): @@ -206,22 +203,20 @@ def serialize(self): This is the same as @env_meta - environment metadata stored in hdf5 datasets, and used in utils/env_utils.py. """ - return dict( - env_name=self.name, type=self.type, env_kwargs=deepcopy(self._init_kwargs) - ) + return dict(env_name=self.name, type=self.type, env_kwargs=deepcopy(self._init_kwargs)) @classmethod def create_for_data_processing( - cls, - env_name, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + cls, + env_name, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, **kwargs, ): """ @@ -269,6 +264,4 @@ def __repr__(self): """ Pretty-print env description. """ - return ( - self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) - ) + return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) diff --git a/robomimic/envs/env_ig_momart.py b/robomimic/envs/env_ig_momart.py index 14fdf094..951eedf3 100644 --- a/robomimic/envs/env_ig_momart.py +++ b/robomimic/envs/env_ig_momart.py @@ -31,21 +31,20 @@ class EnvGibsonMOMART(EB.EnvBase): Wrapper class for gibson environments (https://github.com/StanfordVL/iGibson) specifically compatible with MoMaRT datasets """ - def __init__( - self, - env_name, - ig_config, - postprocess_visual_obs=True, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - image_height=None, - image_width=None, - physics_timestep=1.0 / 240.0, - action_timestep=1.0 / 20.0, - **kwargs, + self, + env_name, + ig_config, + postprocess_visual_obs=True, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + image_height=None, + image_width=None, + physics_timestep=1./240., + action_timestep=1./20., + **kwargs, ): """ Args: @@ -93,9 +92,7 @@ def __init__( # Warn user that iG always uses a renderer if (not render) and (not render_offscreen): - print( - "WARNING: iGibson always uses a renderer -- using headless by default." - ) + print("WARNING: iGibson always uses a renderer -- using headless by default.") # Update ig config for k, v in kwargs.items(): @@ -103,30 +100,19 @@ def __init__( self.ig_config[k] = v # Set rendering values - self.obs_img_height = ( - image_height - if image_height is not None - else self.ig_config.get("obs_image_height", 120) - ) - self.obs_img_width = ( - image_width - if image_width is not None - else self.ig_config.get("obs_image_width", 120) - ) + self.obs_img_height = image_height if image_height is not None else self.ig_config.get("obs_image_height", 120) + self.obs_img_width = image_width if image_width is not None else self.ig_config.get("obs_image_width", 120) # Get class to create envClass = ENV_MAPPING.get(self._env_name, None) # Make sure we have a valid environment class - assert ( - envClass is not None - ), "No valid environment for the requested task was found!" + assert envClass is not None, "No valid environment for the requested task was found!" # Set device idx for rendering # ensure that we select the correct GPU device for rendering by testing for EGL rendering # NOTE: this package should be installed from this link (https://github.com/StanfordVL/egl_probe) import egl_probe - device_idx = 0 valid_gpu_devices = egl_probe.get_available_devices() if len(valid_gpu_devices) > 0: @@ -142,14 +128,10 @@ def __init__( ) # If we have a viewer, make sure to remove all bodies belonging to the visual markers - self.exclude_body_ids = [] # Bodies to exclude when saving state + self.exclude_body_ids = [] # Bodies to exclude when saving state if self.env.simulator.viewer is not None: - self.exclude_body_ids.append( - self.env.simulator.viewer.constraint_marker.body_id - ) - self.exclude_body_ids.append( - self.env.simulator.viewer.constraint_marker2.body_id - ) + self.exclude_body_ids.append(self.env.simulator.viewer.constraint_marker.body_id) + self.exclude_body_ids.append(self.env.simulator.viewer.constraint_marker2.body_id) def step(self, action): """ @@ -207,37 +189,27 @@ def render(self, mode="human", camera_name="rgb", height=None, width=None): array or None: If rendering to frame, returns the rendered frame. Otherwise, returns None """ # Only robotview camera is currently supported - assert camera_name in { - "rgb", - "rgb_wrist", - }, f"Only rgb, rgb_wrist cameras currently supported, got {camera_name}." + assert camera_name in {"rgb", "rgb_wrist"}, \ + f"Only rgb, rgb_wrist cameras currently supported, got {camera_name}." if mode == "human": assert self.render_onscreen, "Rendering has not been enabled for onscreen!" self.env.simulator.sync() else: - assert ( - self.env.simulator.renderer is not None - ), "No renderer enabled for this env!" + assert self.env.simulator.renderer is not None, "No renderer enabled for this env!" frame = self.env.sensors["vision"].get_obs(self.env)[camera_name] # Reshape all frames if height is not None and width is not None: - frame = cv2.resize( - frame, dsize=(height, width), interpolation=cv2.INTER_CUBIC - ) + frame = cv2.resize(frame, dsize=(height, width), interpolation=cv2.INTER_CUBIC) return frame def resize_obs_frame(self, frame): """ Resizes frame to be internal height and width values """ - return cv2.resize( - frame, - dsize=(self.obs_img_width, self.obs_img_height), - interpolation=cv2.INTER_CUBIC, - ) + return cv2.resize(frame, dsize=(self.obs_img_width, self.obs_img_height), interpolation=cv2.INTER_CUBIC) def get_observation(self, di=None): """Get environment observation""" @@ -250,9 +222,7 @@ def get_observation(self, di=None): ret[k] = di[k] # ret[k] = np.transpose(di[k], (2, 0, 1)) if self.postprocess_visual_obs: - ret[k] = ObsUtils.process_obs( - obs=self.resize_obs_frame(ret[k]), obs_key=k - ) + ret[k] = ObsUtils.process_obs(obs=self.resize_obs_frame(ret[k]), obs_key=k) # Depth images elif "depth" in k: @@ -260,17 +230,13 @@ def get_observation(self, di=None): # Values can be corrupted (negative or > 1.0, so we clip values) ret[k] = np.clip(di[k], 0.0, 1.0) if self.postprocess_visual_obs: - ret[k] = ObsUtils.process_obs( - obs=self.resize_obs_frame(ret[k])[..., None], obs_key=k - ) + ret[k] = ObsUtils.process_obs(obs=self.resize_obs_frame(ret[k])[..., None], obs_key=k) # Segmentation Images elif "seg" in k: ret[k] = di[k][..., None] if self.postprocess_visual_obs: - ret[k] = ObsUtils.process_obs( - obs=self.resize_obs_frame(ret[k]), obs_key=k - ) + ret[k] = ObsUtils.process_obs(obs=self.resize_obs_frame(ret[k]), obs_key=k) # Scans elif "scan" in k: @@ -283,38 +249,30 @@ def get_observation(self, di=None): lin_vel = np.linalg.norm(proprio_obs["base_lin_vel"][:2]) ang_vel = proprio_obs["base_ang_vel"][2] - ret["proprio"] = np.concatenate( - [ - proprio_obs["head_joint_pos"], - proprio_obs["grasped"], - proprio_obs["eef_pos"], - proprio_obs["eef_quat"], - ] - ) + ret["proprio"] = np.concatenate([ + proprio_obs["head_joint_pos"], + proprio_obs["grasped"], + proprio_obs["eef_pos"], + proprio_obs["eef_quat"], + ]) # Proprio info that's only relevant for navigation - ret["proprio_nav"] = np.concatenate( - [ - [lin_vel], - [ang_vel], - ] - ) + ret["proprio_nav"] = np.concatenate([ + [lin_vel], + [ang_vel], + ]) # Compose task obs - ret["object"] = np.concatenate( - [ - np.array(di["task_obs"]["object-state"]), - ] - ) + ret["object"] = np.concatenate([ + np.array(di["task_obs"]["object-state"]), + ]) # Add ground truth navigational state - ret["gt_nav"] = np.concatenate( - [ - proprio_obs["base_pos"][:2], - [np.sin(proprio_obs["base_rpy"][2])], - [np.cos(proprio_obs["base_rpy"][2])], - ] - ) + ret["gt_nav"] = np.concatenate([ + proprio_obs["base_pos"][:2], + [np.sin(proprio_obs["base_rpy"][2])], + [np.cos(proprio_obs["base_rpy"][2])], + ]) return ret @@ -338,9 +296,7 @@ def set_task_conditions(self, task_conditions): def get_state(self): """Get iG flattened state""" - return { - "states": PBU.WorldSaver(exclude_body_ids=self.exclude_body_ids).serialize() - } + return {"states": PBU.WorldSaver(exclude_body_ids=self.exclude_body_ids).serialize()} def get_reward(self): return self.env.task.get_reward(self.env)[0] @@ -370,21 +326,21 @@ def is_success(self): if isinstance(succ, dict): assert "task" in succ return succ - return {"task": succ} + return { "task" : succ } @classmethod def create_for_data_processing( - cls, - env_name, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, - **kwargs, + cls, + env_name, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, + **kwargs, ): """ Create environment for processing datasets, which includes extracting @@ -401,16 +357,14 @@ def create_for_data_processing( render_offscreen (bool or None): optionally override rendering behavior use_image_obs (bool or None): optionally override rendering behavior """ - has_camera = len(camera_names) > 0 + has_camera = (len(camera_names) > 0) # note that @postprocess_visual_obs is False since this env's images will be written to a dataset return cls( env_name=env_name, - render=(False if render is None else render), - render_offscreen=( - has_camera if render_offscreen is None else render_offscreen - ), - use_image_obs=(has_camera if use_image_obs is None else use_image_obs), + render=(False if render is None else render), + render_offscreen=(has_camera if render_offscreen is None else render_offscreen), + use_image_obs=(has_camera if use_image_obs is None else use_image_obs), postprocess_visual_obs=False, image_height=camera_height, image_width=camera_width, @@ -434,27 +388,19 @@ def type(self): def serialize(self): """Serialize to dictionary""" - return dict( - env_name=self.name, - type=self.type, - ig_config=self.ig_config, - env_kwargs=deepcopy(self._init_kwargs), - ) + return dict(env_name=self.name, type=self.type, + ig_config=self.ig_config, + env_kwargs=deepcopy(self._init_kwargs)) @classmethod def deserialize(cls, info, postprocess_visual_obs=True): """Create environment with external info""" - return cls( - env_name=info["env_name"], - ig_config=info["ig_config"], - postprocess_visual_obs=postprocess_visual_obs, - **info["env_kwargs"], - ) + return cls(env_name=info["env_name"], ig_config=info["ig_config"], postprocess_visual_obs=postprocess_visual_obs, **info["env_kwargs"]) @property def rollout_exceptions(self): """Return tuple of exceptions to except when doing rollouts""" - return RuntimeError + return (RuntimeError) @property def base_env(self): @@ -464,10 +410,5 @@ def base_env(self): return self.env def __repr__(self): - return ( - self.name - + "\n" - + json.dumps(self._init_kwargs, sort_keys=True, indent=4) - + "\niGibson Config: \n" - + json.dumps(self.ig_config, sort_keys=True, indent=4) - ) + return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) + \ + "\niGibson Config: \n" + json.dumps(self.ig_config, sort_keys=True, indent=4) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 7ff20044..942cb623 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -3,14 +3,12 @@ to provide a standardized environment API for training policies and interacting with metadata present in datasets. """ - import json import numpy as np from copy import deepcopy import robosuite import robosuite.utils.transform_utils as T - try: # this is needed for ensuring robosuite can find the additional mimicgen environments (see https://mimicgen.github.io) import mimicgen_envs @@ -24,7 +22,6 @@ # protect against missing mujoco-py module, since robosuite might be using mujoco-py or DM backend try: import mujoco_py - MUJOCO_EXCEPTIONS = [mujoco_py.builder.MujocoException] except ImportError: MUJOCO_EXCEPTIONS = [] @@ -32,15 +29,14 @@ class EnvRobosuite(EB.EnvBase): """Wrapper class for robosuite environments (https://github.com/ARISE-Initiative/robosuite)""" - def __init__( - self, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, - postprocess_visual_obs=True, + self, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, + postprocess_visual_obs=True, **kwargs, ): """ @@ -69,11 +65,9 @@ def __init__( self.use_depth_obs = use_depth_obs # robosuite version check - self._is_v1 = robosuite.__version__.split(".")[0] == "1" + self._is_v1 = (robosuite.__version__.split(".")[0] == "1") if self._is_v1: - assert ( - int(robosuite.__version__.split(".")[1]) >= 2 - ), "only support robosuite v0.3 and v1.2+" + assert (int(robosuite.__version__.split(".")[1]) >= 2), "only support robosuite v0.3 and v1.2+" kwargs = deepcopy(kwargs) @@ -93,7 +87,6 @@ def __init__( # ensure that we select the correct GPU device for rendering by testing for EGL rendering # NOTE: this package should be installed from this link (https://github.com/StanfordVL/egl_probe) import egl_probe - valid_gpu_devices = egl_probe.get_available_devices() if len(valid_gpu_devices) > 0: kwargs["render_gpu_device_id"] = valid_gpu_devices[0] @@ -101,7 +94,7 @@ def __init__( # make sure gripper visualization is turned off (we almost always want this for learning) kwargs["gripper_visualization"] = False del kwargs["camera_depths"] - kwargs["camera_depth"] = use_depth_obs # rename kwarg + kwargs["camera_depth"] = use_depth_obs # rename kwarg self._env_name = env_name self._init_kwargs = deepcopy(kwargs) @@ -111,9 +104,7 @@ def __init__( # Make sure joint position observations and eef vel observations are active for ob_name in self.env.observation_names: if ("joint_pos" in ob_name) or ("eef_vel" in ob_name): - self.env.modify_observable( - observable_name=ob_name, attribute="active", modifier=True - ) + self.env.modify_observable(observable_name=ob_name, attribute="active", modifier=True) def step(self, action): """ @@ -150,7 +141,7 @@ def reset_to(self, state): state (dict): current simulator state that contains one or more of: - states (np.ndarray): initial state of the mujoco environment - model (str): mujoco scene xml - + Returns: observation (dict): observation dictionary after setting the simulator state (only if "states" is in @state) @@ -165,12 +156,8 @@ def reset_to(self, state): self.env.sim.reset() if not self._is_v1: # hide teleop visualization after restoring from model - self.env.sim.model.site_rgba[self.env.eef_site_id] = np.array( - [0.0, 0.0, 0.0, 0.0] - ) - self.env.sim.model.site_rgba[self.env.eef_cylinder_id] = np.array( - [0.0, 0.0, 0.0, 0.0] - ) + self.env.sim.model.site_rgba[self.env.eef_site_id] = np.array([0., 0., 0., 0.]) + self.env.sim.model.site_rgba[self.env.eef_cylinder_id] = np.array([0., 0., 0., 0.]) if "states" in state: self.env.sim.set_state_from_flattened(state["states"]) self.env.sim.forward() @@ -198,9 +185,7 @@ def render(self, mode="human", height=None, width=None, camera_name="agentview") self.env.viewer.set_camera(cam_id) return self.env.render() elif mode == "rgb_array": - im = self.env.sim.render( - height=height, width=width, camera_name=camera_name - ) + im = self.env.sim.render(height=height, width=width, camera_name=camera_name) if self.use_depth_obs: # render() returns a tuple when self.use_depth_obs=True return im[0][::-1] @@ -213,32 +198,24 @@ def get_observation(self, di=None): Get current environment observation dictionary. Args: - di (dict): current raw observation dictionary from robosuite to wrap and provide + di (dict): current raw observation dictionary from robosuite to wrap and provide as a dictionary. If not provided, will be queried from robosuite. """ if di is None: - di = ( - self.env._get_observations(force_update=True) - if self._is_v1 - else self.env._get_observation() - ) + di = self.env._get_observations(force_update=True) if self._is_v1 else self.env._get_observation() ret = {} for k in di: - if (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality( - key=k, obs_modality="rgb" - ): + if (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality(key=k, obs_modality="rgb"): # by default images from mujoco are flipped in height ret[k] = di[k][::-1] if self.postprocess_visual_obs: ret[k] = ObsUtils.process_obs(obs=ret[k], obs_key=k) - elif ( - k in ObsUtils.OBS_KEYS_TO_MODALITIES - ) and ObsUtils.key_is_obs_modality(key=k, obs_modality="depth"): + elif (k in ObsUtils.OBS_KEYS_TO_MODALITIES) and ObsUtils.key_is_obs_modality(key=k, obs_modality="depth"): # by default depth images from mujoco are flipped in height ret[k] = di[k][::-1] if len(ret[k].shape) == 2: - ret[k] = ret[k][..., None] # (H, W, 1) - assert len(ret[k].shape) == 3 + ret[k] = ret[k][..., None] # (H, W, 1) + assert len(ret[k].shape) == 3 # scale entries in depth map to correspond to real distance. ret[k] = self.get_real_depth_map(ret[k]) if self.postprocess_visual_obs: @@ -253,11 +230,8 @@ def get_observation(self, di=None): # ensures that we don't accidentally add robot wrist images a second time pf = robot.robot_model.naming_prefix for k in di: - if ( - k.startswith(pf) - and (k not in ret) - and (not k.endswith("proprio-state")) - ): + if k.startswith(pf) and (k not in ret) and \ + (not k.endswith("proprio-state")): ret[k] = np.array(di[k]) else: # minimal proprioception for older versions of robosuite @@ -316,12 +290,7 @@ def get_camera_extrinsic_matrix(self, camera_name): # IMPORTANT! This is a correction so that the camera axis is set up along the viewpoint correctly. camera_axis_correction = np.array( - [ - [1.0, 0.0, 0.0, 0.0], - [0.0, -1.0, 0.0, 0.0], - [0.0, 0.0, -1.0, 0.0], - [0.0, 0.0, 0.0, 1.0], - ] + [[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] ) R = R @ camera_axis_correction return R @@ -338,9 +307,7 @@ def get_camera_transform_matrix(self, camera_name, camera_height, camera_width): """ R = self.get_camera_extrinsic_matrix(camera_name=camera_name) K = self.get_camera_intrinsic_matrix( - camera_name=camera_name, - camera_height=camera_height, - camera_width=camera_width, + camera_name=camera_name, camera_height=camera_height, camera_width=camera_width ) K_exp = np.eye(4) K_exp[:3, :3] = K @@ -352,8 +319,8 @@ def get_state(self): """ Get current environment simulator state as a dictionary. Should be compatible with @reset_to. """ - xml = self.env.sim.model.get_xml() # model xml file - state = np.array(self.env.sim.get_state().flatten()) # simulator state + xml = self.env.sim.model.get_xml() # model xml file + state = np.array(self.env.sim.get_state().flatten()) # simulator state return dict(model=xml, states=state) def get_reward(self): @@ -392,7 +359,7 @@ def is_success(self): if isinstance(succ, dict): assert "task" in succ return succ - return {"task": succ} + return { "task" : succ } @property def action_dimension(self): @@ -433,27 +400,27 @@ def serialize(self): env_name=self.name, env_version=self.version, type=self.type, - env_kwargs=deepcopy(self._init_kwargs), + env_kwargs=deepcopy(self._init_kwargs) ) @classmethod def create_for_data_processing( - cls, - env_name, - camera_names, - camera_height, - camera_width, - reward_shaping, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + cls, + env_name, + camera_names, + camera_height, + camera_width, + reward_shaping, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, **kwargs, ): """ Create environment for processing datasets, which includes extracting observations, labeling dense / sparse rewards, and annotating dones in - transitions. + transitions. Args: env_name (str): name of environment @@ -468,8 +435,8 @@ def create_for_data_processing( @camera_names is non-empty, False otherwise. use_depth_obs (bool): if True, use depth observations """ - is_v1 = robosuite.__version__.split(".")[0] == "1" - has_camera = len(camera_names) > 0 + is_v1 = (robosuite.__version__.split(".")[0] == "1") + has_camera = (len(camera_names) > 0) new_kwargs = { "reward_shaping": reward_shaping, @@ -502,7 +469,7 @@ def create_for_data_processing( depth_modalities = ["depth"] obs_modality_specs = { "obs": { - "low_dim": [], # technically unused, so we don't have to specify all of them + "low_dim": [], # technically unused, so we don't have to specify all of them "rgb": image_modalities, } } @@ -513,11 +480,9 @@ def create_for_data_processing( # note that @postprocess_visual_obs is False since this env's images will be written to a dataset return cls( env_name=env_name, - render=(False if render is None else render), - render_offscreen=( - has_camera if render_offscreen is None else render_offscreen - ), - use_image_obs=(has_camera if use_image_obs is None else use_image_obs), + render=(False if render is None else render), + render_offscreen=(has_camera if render_offscreen is None else render_offscreen), + use_image_obs=(has_camera if use_image_obs is None else use_image_obs), use_depth_obs=use_depth_obs, postprocess_visual_obs=False, **kwargs, @@ -543,6 +508,4 @@ def __repr__(self): """ Pretty-print env description. """ - return ( - self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) - ) + return self.name + "\n" + json.dumps(self._init_kwargs, sort_keys=True, indent=4) \ No newline at end of file diff --git a/robomimic/envs/wrappers.py b/robomimic/envs/wrappers.py index 1df0f54e..9936f9de 100644 --- a/robomimic/envs/wrappers.py +++ b/robomimic/envs/wrappers.py @@ -1,7 +1,6 @@ """ A collection of useful environment wrappers. """ - from copy import deepcopy import textwrap import numpy as np @@ -14,7 +13,6 @@ class EnvWrapper(object): """ Base class for all environment wrappers in robomimic. """ - def __init__(self, env): """ Args: @@ -61,20 +59,20 @@ def unwrapped(self): def _to_string(self): """ - Subclasses should override this method to print out info about the + Subclasses should override this method to print out info about the wrapper (such as arguments passed to it). """ - return "" + return '' def __repr__(self): """Pretty print environment.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 4 - if self._to_string() != "": + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 4 + if self._to_string() != '': msg += textwrap.indent("\n" + self._to_string(), indent) msg += textwrap.indent("\nenv={}".format(self.env), indent) - msg = header + "(" + msg + "\n)" + msg = header + '(' + msg + '\n)' return msg # this method is a fallback option on any methods the original env might support @@ -102,7 +100,6 @@ class FrameStackWrapper(EnvWrapper): receives a sequence of past observations instead of a single observation when it calls @env.reset, @env.reset_to, or @env.step in the rollout loop. """ - def __init__(self, env, num_frames): """ Args: @@ -111,11 +108,7 @@ def __init__(self, env, num_frames): to stack together. Must be greater than 1 (otherwise this wrapper would be a no-op). """ - assert ( - num_frames > 1 - ), "error: FrameStackWrapper must have num_frames > 1 but got num_frames of {}".format( - num_frames - ) + assert num_frames > 1, "error: FrameStackWrapper must have num_frames > 1 but got num_frames of {}".format(num_frames) super(FrameStackWrapper, self).__init__(env=env) self.num_frames = num_frames @@ -135,21 +128,19 @@ def _get_initial_obs_history(self, init_obs): obs_history = {} for k in init_obs: obs_history[k] = deque( - [init_obs[k][None] for _ in range(self.num_frames)], + [init_obs[k][None] for _ in range(self.num_frames)], maxlen=self.num_frames, ) return obs_history def _get_stacked_obs_from_history(self): """ - Helper method to convert internal variable @self.obs_history to a + Helper method to convert internal variable @self.obs_history to a stacked observation where each key is a numpy array with leading dimension @self.num_frames. """ # concatenate all frames per key so we return a numpy array per key - return { - k: np.concatenate(self.obs_history[k], axis=0) for k in self.obs_history - } + return { k : np.concatenate(self.obs_history[k], axis=0) for k in self.obs_history } def cache_obs_history(self): self.obs_history_cache = deepcopy(self.obs_history) @@ -160,7 +151,7 @@ def uncache_obs_history(self): def reset(self): """ - Modify to return frame stacked observation which is @self.num_frames copies of + Modify to return frame stacked observation which is @self.num_frames copies of the initial observation. Returns: @@ -176,7 +167,7 @@ def reset(self): def reset_to(self, state): """ - Modify to return frame stacked observation which is @self.num_frames copies of + Modify to return frame stacked observation which is @self.num_frames copies of the initial observation. Returns: @@ -217,7 +208,7 @@ def step(self, action): def update_obs(self, obs, action=None, reset=False): obs["timesteps"] = np.array([self.timestep]) - + if reset: obs["actions"] = np.zeros(self.env.action_dimension) else: @@ -226,4 +217,4 @@ def update_obs(self, obs, action=None, reset=False): def _to_string(self): """Info to pretty print.""" - return "num_frames={}".format(self.num_frames) + return "num_frames={}".format(self.num_frames) \ No newline at end of file diff --git a/robomimic/macros.py b/robomimic/macros.py index 9f7bb00d..3b6c0503 100644 --- a/robomimic/macros.py +++ b/robomimic/macros.py @@ -1,7 +1,6 @@ """ Set of global variables shared across robomimic """ - # Sets debugging mode. Should be set at top-level script so that internal # debugging functionalities are made active DEBUG = False @@ -21,11 +20,8 @@ except ImportError: from robomimic.utils.log_utils import log_warning import robomimic - log_warning( - "No private macro file found!" - "\nIt is recommended to use a private macro file" - "\nTo setup, run: python {}/scripts/setup_macros.py".format( - robomimic.__path__[0] - ) + "No private macro file found!"\ + "\nIt is recommended to use a private macro file"\ + "\nTo setup, run: python {}/scripts/setup_macros.py".format(robomimic.__path__[0]) ) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index 488e4695..b9654d37 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -57,7 +57,7 @@ def transformer_args_from_config(transformer_config): transformer_activation=transformer_config.activation, transformer_nn_parameter_for_timesteps=transformer_config.nn_parameter_for_timesteps, ) - + if "num_layers" in transformer_config: transformer_args["transformer_num_layers"] = transformer_config.num_layers @@ -69,15 +69,14 @@ class Module(torch.nn.Module): Base class for networks. The only difference from torch.nn.Module is that it requires implementing @output_shape. """ - @abc.abstractmethod def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -90,8 +89,7 @@ class Sequential(torch.nn.Sequential, Module): """ Compose multiple Modules together (defined above). """ - - def __init__(self, *args, has_output_shape=True): + def __init__(self, *args, has_output_shape = True): """ Args: has_output_shape (bool, optional): indicates whether output_shape can be called on the Sequential module. @@ -108,11 +106,11 @@ def __init__(self, *args, has_output_shape=True): def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -140,7 +138,6 @@ class Parameter(Module): A class that is a thin wrapper around a torch.nn.Parameter to make for easy saving and optimization. """ - def __init__(self, init_tensor): """ Args: @@ -151,11 +148,11 @@ def __init__(self, init_tensor): def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -174,18 +171,13 @@ class Unsqueeze(Module): """ Trivial class that unsqueezes the input. Useful for including in a nn.Sequential network """ - def __init__(self, dim): super(Unsqueeze, self).__init__() self.dim = dim def output_shape(self, input_shape=None): assert input_shape is not None - return ( - input_shape + [1] - if self.dim == -1 - else input_shape[: self.dim + 1] + [1] + input_shape[self.dim + 1 :] - ) + return input_shape + [1] if self.dim == -1 else input_shape[:self.dim + 1] + [1] + input_shape[self.dim + 1:] def forward(self, x): return x.unsqueeze(dim=self.dim) @@ -202,11 +194,7 @@ def __init__(self, dim): def output_shape(self, input_shape=None): assert input_shape is not None - return ( - input_shape[: self.dim] + input_shape[self.dim + 1 :] - if input_shape[self.dim] == 1 - else input_shape - ) + return input_shape[:self.dim] + input_shape[self.dim+1:] if input_shape[self.dim] == 1 else input_shape def forward(self, x): return x.squeeze(dim=self.dim) @@ -216,7 +204,6 @@ class MLP(Module): """ Base class for simple Multi-Layer Perceptrons. """ - def __init__( self, input_dim, @@ -256,13 +243,13 @@ def __init__( if layer_func_kwargs is None: layer_func_kwargs = dict() if dropouts is not None: - assert len(dropouts) == len(layer_dims) + assert(len(dropouts) == len(layer_dims)) for i, l in enumerate(layer_dims): layers.append(layer_func(dim, l, **layer_func_kwargs)) if normalization: layers.append(nn.LayerNorm(l)) layers.append(activation()) - if dropouts is not None and dropouts[i] > 0.0: + if dropouts is not None and dropouts[i] > 0.: layers.append(nn.Dropout(dropouts[i])) dim = l layers.append(layer_func(dim, output_dim)) @@ -281,11 +268,11 @@ def __init__( def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -305,18 +292,13 @@ def __repr__(self): act = None if self._act is None else self._act.__name__ output_act = None if self._output_act is None else self._output_act.__name__ - indent = " " * 4 + indent = ' ' * 4 msg = "input_dim={}\noutput_dim={}\nlayer_dims={}\nlayer_func={}\ndropout={}\nact={}\noutput_act={}".format( - self._input_dim, - self._output_dim, - self._layer_dims, - self._layer_func.__name__, - self._dropouts, - act, - output_act, + self._input_dim, self._output_dim, self._layer_dims, + self._layer_func.__name__, self._dropouts, act, output_act ) msg = textwrap.indent(msg, indent) - msg = header + "(\n" + msg + "\n)" + msg = header + '(\n' + msg + '\n)' return msg @@ -324,7 +306,6 @@ class RNN_Base(Module): """ A wrapper class for a multi-step RNN and a per-step network. """ - def __init__( self, input_dim, @@ -351,9 +332,7 @@ def __init__( super(RNN_Base, self).__init__() self.per_step_net = per_step_net if per_step_net is not None: - assert isinstance( - per_step_net, Module - ), "RNN_Base: per_step_net is not instance of Module" + assert isinstance(per_step_net, Module), "RNN_Base: per_step_net is not instance of Module" assert rnn_type in ["LSTM", "GRU"] rnn_cls = nn.LSTM if rnn_type == "LSTM" else nn.GRU @@ -371,9 +350,7 @@ def __init__( self._hidden_dim = rnn_hidden_dim self._num_layers = rnn_num_layers self._rnn_type = rnn_type - self._num_directions = ( - int(rnn_is_bidirectional) + 1 - ) # 2 if bidirectional, 1 otherwise + self._num_directions = int(rnn_is_bidirectional) + 1 # 2 if bidirectional, 1 otherwise @property def rnn_type(self): @@ -391,24 +368,20 @@ def get_rnn_init_state(self, batch_size, device): hidden_state (torch.Tensor or tuple): returns hidden state tensor or tuple of hidden state tensors depending on the RNN type """ - h_0 = torch.zeros( - self._num_layers * self._num_directions, batch_size, self._hidden_dim - ).to(device) + h_0 = torch.zeros(self._num_layers * self._num_directions, batch_size, self._hidden_dim).to(device) if self._rnn_type == "LSTM": - c_0 = torch.zeros( - self._num_layers * self._num_directions, batch_size, self._hidden_dim - ).to(device) + c_0 = torch.zeros(self._num_layers * self._num_directions, batch_size, self._hidden_dim).to(device) return h_0, c_0 else: return h_0 def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -484,24 +457,21 @@ def forward_step(self, inputs, rnn_state): Visual Backbone Networks ================================================ """ - - class ConvBase(Module): """ Base class for ConvNets. """ - def __init__(self): super(ConvBase, self).__init__() # dirty hack - re-implement to pass the buck onto subclasses from ABC parent def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -512,21 +482,15 @@ def output_shape(self, input_shape): def forward(self, inputs): x = self.nets(inputs) if list(self.output_shape(list(inputs.shape)[1:])) != list(x.shape)[1:]: - raise ValueError( - "Size mismatch: expect size %s, but got size %s" - % ( - str(self.output_shape(list(inputs.shape)[1:])), - str(list(x.shape)[1:]), - ) + raise ValueError('Size mismatch: expect size %s, but got size %s' % ( + str(self.output_shape(list(inputs.shape)[1:])), str(list(x.shape)[1:])) ) return x - class ResNet18Conv(ConvBase): """ A ResNet18 block that can be used to process input images. """ - def __init__( self, input_channel=3, @@ -546,13 +510,9 @@ def __init__( net = vision_models.resnet18(pretrained=pretrained) if input_coord_conv: - net.conv1 = CoordConv2d( - input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False - ) + net.conv1 = CoordConv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) elif input_channel != 3: - net.conv1 = nn.Conv2d( - input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False - ) + net.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=False) # cut the last fc layer self._input_coord_conv = input_coord_conv @@ -561,34 +521,30 @@ def __init__( def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert len(input_shape) == 3 - out_h = int(math.ceil(input_shape[1] / 32.0)) - out_w = int(math.ceil(input_shape[2] / 32.0)) + assert(len(input_shape) == 3) + out_h = int(math.ceil(input_shape[1] / 32.)) + out_w = int(math.ceil(input_shape[2] / 32.)) return [512, out_h, out_w] def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - return header + "(input_channel={}, input_coord_conv={})".format( - self._input_channel, self._input_coord_conv - ) - - + header = '{}'.format(str(self.__class__.__name__)) + return header + '(input_channel={}, input_coord_conv={})'.format(self._input_channel, self._input_coord_conv) + class ViT_Rein(ConvBase): """ ViT LoRA using Rein method """ - def __init__( self, input_channel=3, @@ -599,7 +555,7 @@ def __init__( return_key="x_norm_patchtokens" ): """ - Using pretrained observation encoder network proposed in Vision Transformers + Using pretrained observation encoder network proposed in Vision Transformers git clone https://github.com/facebookresearch/dinov2 pip install -r requirements.txt Args: @@ -633,10 +589,10 @@ def __init__( raise ValueError(f"return_key {self.return_key} not supported") self.preprocess = nn.Sequential( - transforms.Resize((294, 294)), + transforms.Resize((294,294)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) - + try: if self._vit_model_class == "vit_s": self.nets = dinov2_vits14 = torch.hub.load( @@ -671,6 +627,7 @@ def __init__( except ImportError: print("WARNING: could not load rein layer") + if self._freeze: for param in self.nets.parameters(): param.requires_grad = False @@ -690,8 +647,8 @@ def forward(self, inputs): if self.return_key == "x_norm_patchtokens": return x q_avg = x.mean(dim=1).unsqueeze(1) - q_max = torch.max(x, 1)[0].unsqueeze(1) - q_N = x[:, x.shape[1] - 1, :].unsqueeze(1) + q_max = torch.max(x,1)[0].unsqueeze(1) + q_N = x[:,x.shape[1]-1,:].unsqueeze(1) _q = torch.cat((q_avg, q_max, q_N), dim=1) @@ -712,7 +669,7 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert len(input_shape) == 3 + assert(len(input_shape) == 3) C, H, W = input_shape out_dim = self._mlp_lora_head._out_dim @@ -745,6 +702,9 @@ def __repr__(self): ) ) + header = '{}'.format(str(self.__class__.__name__)) + return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) + class Vit(ConvBase): """ @@ -753,7 +713,7 @@ class Vit(ConvBase): def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return_key="x_norm_patchtokens"): """ - Using pretrained observation encoder network proposed in Vision Transformers + Using pretrained observation encoder network proposed in Vision Transformers git clone https://github.com/facebookresearch/dinov2 pip install -r requirements.txt Args: @@ -765,13 +725,8 @@ def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return """ super(Vit, self).__init__() - assert input_channel == 3 - assert vit_model_class in [ - "vit_b", - "vit_l", - "vit_g", - "vit_s", - ] # make sure the selected vit model do exist + assert input_channel == 3 + assert vit_model_class in ["vit_b", "vit_l" ,"vit_g", "vit_s"] # make sure the selected vit model do exist # cut the last fc layer self._input_channel = input_channel @@ -784,10 +739,10 @@ def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return raise ValueError(f"return_key {self.return_key} not supported") self.preprocess = nn.Sequential( - transforms.Resize((294, 294)), + transforms.Resize((294,294)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) - + try: if self._vit_model_class == "vit_s": self.nets = dinov2_vits14 = torch.hub.load( @@ -831,7 +786,7 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert len(input_shape) == 3 + assert(len(input_shape) == 3) C, H, W = input_shape out_dim = self.nets.patch_embed.proj.out_channels @@ -843,35 +798,20 @@ def output_shape(self, input_shape): def __repr__(self): """Pretty print network.""" - print( - "**Number of learnable params:", - sum(p.numel() for p in self.nets.parameters() if p.requires_grad), - " Freeze:", - self._freeze, - ) - print("**Number of params:", sum(p.numel() for p in self.nets.parameters())) - - header = "{}".format(str(self.__class__.__name__)) - return ( - header - + "(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})".format( - self._input_channel, - self._input_coord_conv, - self._pretrained, - self._freeze, - ) - ) + print("**Number of learnable params:",sum(p.numel() for p in self.nets.parameters() if p.requires_grad)," Freeze:",self._freeze) + print("**Number of params:",sum(p.numel() for p in self.nets.parameters())) + header = '{}'.format(str(self.__class__.__name__)) + return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) class R3MConv(ConvBase): """ Base class for ConvNets pretrained with R3M (https://arxiv.org/abs/2203.12601) """ - def __init__( self, input_channel=3, - r3m_model_class="resnet18", + r3m_model_class='resnet18', freeze=True, ): """ @@ -888,18 +828,12 @@ def __init__( try: from r3m import load_r3m except ImportError: - print( - "WARNING: could not load r3m library! Please follow https://github.com/facebookresearch/r3m to install R3M" - ) + print("WARNING: could not load r3m library! Please follow https://github.com/facebookresearch/r3m to install R3M") net = load_r3m(r3m_model_class) - assert input_channel == 3 # R3M only support input image with channel size 3 - assert r3m_model_class in [ - "resnet18", - "resnet34", - "resnet50", - ] # make sure the selected r3m model do exist + assert input_channel == 3 # R3M only support input image with channel size 3 + assert r3m_model_class in ["resnet18", "resnet34", "resnet50"] # make sure the selected r3m model do exist # cut the last fc layer self._input_channel = input_channel @@ -913,16 +847,11 @@ def __init__( transforms.CenterCrop(224), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) - self.nets = Sequential( - *([preprocess] + list(net.module.convnet.children())), - has_output_shape=False, - ) + self.nets = Sequential(*([preprocess] + list(net.module.convnet.children())), has_output_shape = False) if freeze: self.nets.freeze() - self.weight_sum = np.sum( - [param.cpu().data.numpy().sum() for param in self.nets.parameters()] - ) + self.weight_sum = np.sum([param.cpu().data.numpy().sum() for param in self.nets.parameters()]) if freeze: for param in self.nets.parameters(): param.requires_grad = False @@ -939,9 +868,9 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert len(input_shape) == 3 + assert(len(input_shape) == 3) - if self._r3m_model_class == "resnet50": + if self._r3m_model_class == 'resnet50': out_dim = 2048 else: out_dim = 512 @@ -950,27 +879,18 @@ def output_shape(self, input_shape): def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - return ( - header - + "(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})".format( - self._input_channel, - self._input_coord_conv, - self._pretrained, - self._freeze, - ) - ) + header = '{}'.format(str(self.__class__.__name__)) + return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) class MVPConv(ConvBase): """ Base class for ConvNets pretrained with MVP (https://arxiv.org/abs/2203.06173) """ - def __init__( self, input_channel=3, - mvp_model_class="vitb-mae-egosoup", + mvp_model_class='vitb-mae-egosoup', freeze=True, ): """ @@ -987,22 +907,14 @@ def __init__( try: import mvp except ImportError: - print( - "WARNING: could not load mvp library! Please follow https://github.com/ir413/mvp to install MVP." - ) + print("WARNING: could not load mvp library! Please follow https://github.com/ir413/mvp to install MVP.") self.nets = mvp.load(mvp_model_class) if freeze: self.nets.freeze() - assert input_channel == 3 # MVP only support input image with channel size 3 - assert mvp_model_class in [ - "vits-mae-hoi", - "vits-mae-in", - "vits-sup-in", - "vitb-mae-egosoup", - "vitl-256-mae-egosoup", - ] # make sure the selected r3m model do exist + assert input_channel == 3 # MVP only support input image with channel size 3 + assert mvp_model_class in ["vits-mae-hoi", "vits-mae-in", "vits-sup-in", "vitb-mae-egosoup", "vitl-256-mae-egosoup"] # make sure the selected r3m model do exist self._input_channel = input_channel self._freeze = freeze @@ -1010,22 +922,20 @@ def __init__( self._input_coord_conv = False self._pretrained = True - if "256" in mvp_model_class: + if '256' in mvp_model_class: input_img_size = 256 else: input_img_size = 224 - self.preprocess = nn.Sequential(transforms.Resize(input_img_size)) + self.preprocess = nn.Sequential( + transforms.Resize(input_img_size) + ) def forward(self, inputs): x = self.preprocess(inputs) x = self.nets(x) if list(self.output_shape(list(inputs.shape)[1:])) != list(x.shape)[1:]: - raise ValueError( - "Size mismatch: expect size %s, but got size %s" - % ( - str(self.output_shape(list(inputs.shape)[1:])), - str(list(x.shape)[1:]), - ) + raise ValueError('Size mismatch: expect size %s, but got size %s' % ( + str(self.output_shape(list(inputs.shape)[1:])), str(list(x.shape)[1:])) ) return x @@ -1039,10 +949,10 @@ def output_shape(self, input_shape): Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert len(input_shape) == 3 - if "vitb" in self._mvp_model_class: + assert(len(input_shape) == 3) + if 'vitb' in self._mvp_model_class: output_shape = [768] - elif "vitl" in self._mvp_model_class: + elif 'vitl' in self._mvp_model_class: output_shape = [1024] else: output_shape = [384] @@ -1050,16 +960,8 @@ def output_shape(self, input_shape): def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - return ( - header - + "(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})".format( - self._input_channel, - self._input_coord_conv, - self._pretrained, - self._freeze, - ) - ) + header = '{}'.format(str(self.__class__.__name__)) + return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) class CoordConv2d(nn.Conv2d, Module): @@ -1070,7 +972,6 @@ class CoordConv2d(nn.Conv2d, Module): https://arxiv.org/abs/1807.03247 (e.g. adds 2 channels per input feature map corresponding to (x, y) location on map) """ - def __init__( self, in_channels, @@ -1081,8 +982,8 @@ def __init__( dilation=1, groups=1, bias=True, - padding_mode="zeros", - coord_encoding="position", + padding_mode='zeros', + coord_encoding='position', ): """ Args: @@ -1098,17 +999,13 @@ def __init__( coord_encoding: type of coordinate encoding. currently only 'position' is implemented """ - assert coord_encoding in ["position"] + assert(coord_encoding in ['position']) self.coord_encoding = coord_encoding - if coord_encoding == "position": + if coord_encoding == 'position': in_channels += 2 # two extra channel for positional encoding self._position_enc = None # position encoding else: - raise Exception( - "CoordConv2d: coord encoding {} not implemented".format( - self.coord_encoding - ) - ) + raise Exception("CoordConv2d: coord encoding {} not implemented".format(self.coord_encoding)) nn.Conv2d.__init__( self, in_channels=in_channels, @@ -1119,16 +1016,16 @@ def __init__( dilation=dilation, groups=groups, bias=bias, - padding_mode=padding_mode, + padding_mode=padding_mode ) def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -1140,7 +1037,7 @@ def output_shape(self, input_shape): def forward(self, input): b, c, h, w = input.shape - if self.coord_encoding == "position": + if self.coord_encoding == 'position': if self._position_enc is None: pos_y, pos_x = torch.meshgrid(torch.arange(h), torch.arange(w)) pos_y = pos_y.float().to(input.device) / float(h) @@ -1155,7 +1052,6 @@ class ShallowConv(ConvBase): """ A shallow convolutional encoder from https://rll.berkeley.edu/dsae/dsae.pdf """ - def __init__(self, input_channel=3, output_channel=32): super(ShallowConv, self).__init__() self._input_channel = input_channel @@ -1172,20 +1068,20 @@ def __init__(self, input_channel=3, output_channel=32): def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert len(input_shape) == 3 - assert input_shape[0] == self._input_channel - out_h = int(math.floor(input_shape[1] / 2.0)) - out_w = int(math.floor(input_shape[2] / 2.0)) + assert(len(input_shape) == 3) + assert(input_shape[0] == self._input_channel) + out_h = int(math.floor(input_shape[1] / 2.)) + out_w = int(math.floor(input_shape[2] / 2.)) return [self._output_channel, out_h, out_w] @@ -1204,7 +1100,6 @@ class Conv1dBase(Module): argument to be passed to the ith Conv1D layer. See https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html for specific possible arguments. """ - def __init__( self, input_channel=1, @@ -1218,7 +1113,7 @@ def __init__( # Get activation requested activation = CONV_ACTIVATIONS[activation] - + # Add layer kwargs conv_kwargs["out_channels"] = out_channels conv_kwargs["kernel_size"] = kernel_size @@ -1229,12 +1124,12 @@ def __init__( layers = OrderedDict() for i in range(self.n_layers): layer_kwargs = {k: v[i] for k, v in conv_kwargs.items()} - layers[f"conv{i}"] = nn.Conv1d( + layers[f'conv{i}'] = nn.Conv1d( in_channels=input_channel, **layer_kwargs, ) if activation is not None: - layers[f"act{i}"] = activation() + layers[f'act{i}'] = activation() input_channel = layer_kwargs["out_channels"] # Store network @@ -1256,29 +1151,14 @@ def output_shape(self, input_shape): for i in range(self.n_layers): net = getattr(self.nets, f"conv{i}") channels = net.out_channels - length = ( - int( - ( - length - + 2 * net.padding[0] - - net.dilation[0] * (net.kernel_size[0] - 1) - - 1 - ) - / net.stride[0] - ) - + 1 - ) + length = int((length + 2 * net.padding[0] - net.dilation[0] * (net.kernel_size[0] - 1) - 1) / net.stride[0]) + 1 return [channels, length] def forward(self, inputs): x = self.nets(inputs) if list(self.output_shape(list(inputs.shape)[1:])) != list(x.shape)[1:]: - raise ValueError( - "Size mismatch: expect size %s, but got size %s" - % ( - str(self.output_shape(list(inputs.shape)[1:])), - str(list(x.shape)[1:]), - ) + raise ValueError('Size mismatch: expect size %s, but got size %s' % ( + str(self.output_shape(list(inputs.shape)[1:])), str(list(x.shape)[1:])) ) return x @@ -1288,8 +1168,6 @@ def forward(self, inputs): Pooling Networks ================================================ """ - - class SpatialSoftmax(ConvBase): """ Spatial Softmax Layer. @@ -1297,12 +1175,11 @@ class SpatialSoftmax(ConvBase): Based on Deep Spatial Autoencoders for Visuomotor Learning by Finn et al. https://rll.berkeley.edu/dsae/dsae.pdf """ - def __init__( self, input_shape, num_kp=32, - temperature=1.0, + temperature=1., learnable_temperature=False, output_variance=False, noise_std=0.0, @@ -1318,7 +1195,7 @@ def __init__( """ super(SpatialSoftmax, self).__init__() assert len(input_shape) == 3 - self._in_c, self._in_h, self._in_w = input_shape # (C, H, W) + self._in_c, self._in_h, self._in_w = input_shape # (C, H, W) if num_kp is not None: self.nets = torch.nn.Conv2d(self._in_c, num_kp, kernel_size=1) @@ -1332,55 +1209,51 @@ def __init__( if self.learnable_temperature: # temperature will be learned - temperature = torch.nn.Parameter( - torch.ones(1) * temperature, requires_grad=True - ) - self.register_parameter("temperature", temperature) + temperature = torch.nn.Parameter(torch.ones(1) * temperature, requires_grad=True) + self.register_parameter('temperature', temperature) else: # temperature held constant after initialization - temperature = torch.nn.Parameter( - torch.ones(1) * temperature, requires_grad=False - ) - self.register_buffer("temperature", temperature) + temperature = torch.nn.Parameter(torch.ones(1) * temperature, requires_grad=False) + self.register_buffer('temperature', temperature) pos_x, pos_y = np.meshgrid( - np.linspace(-1.0, 1.0, self._in_w), np.linspace(-1.0, 1.0, self._in_h) - ) + np.linspace(-1., 1., self._in_w), + np.linspace(-1., 1., self._in_h) + ) pos_x = torch.from_numpy(pos_x.reshape(1, self._in_h * self._in_w)).float() pos_y = torch.from_numpy(pos_y.reshape(1, self._in_h * self._in_w)).float() - self.register_buffer("pos_x", pos_x) - self.register_buffer("pos_y", pos_y) + self.register_buffer('pos_x', pos_x) + self.register_buffer('pos_y', pos_y) self.kps = None def __repr__(self): """Pretty print network.""" header = format(str(self.__class__.__name__)) - return header + "(num_kp={}, temperature={}, noise={})".format( - self._num_kp, self.temperature.item(), self.noise_std - ) + return header + '(num_kp={}, temperature={}, noise={})'.format( + self._num_kp, self.temperature.item(), self.noise_std) def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - assert len(input_shape) == 3 - assert input_shape[0] == self._in_c + assert(len(input_shape) == 3) + assert(input_shape[0] == self._in_c) return [self._num_kp, 2] def forward(self, feature): """ - Forward pass through spatial softmax layer. For each keypoint, a 2D spatial - probability distribution is created using a softmax, where the support is the - pixel locations. This distribution is used to compute the expected value of + Forward pass through spatial softmax layer. For each keypoint, a 2D spatial + probability distribution is created using a softmax, where the support is the + pixel locations. This distribution is used to compute the expected value of the pixel location, which becomes a keypoint of dimension 2. K such keypoints are created. @@ -1389,9 +1262,9 @@ def forward(self, feature): keypoint variance of shape [B, K, 2, 2] corresponding to the covariance under the 2D spatial softmax distribution """ - assert feature.shape[1] == self._in_c - assert feature.shape[2] == self._in_h - assert feature.shape[3] == self._in_w + assert(feature.shape[1] == self._in_c) + assert(feature.shape[2] == self._in_h) + assert(feature.shape[3] == self._in_w) if self.nets is not None: feature = self.nets(feature) @@ -1413,22 +1286,14 @@ def forward(self, feature): if self.output_variance: # treat attention as a distribution, and compute second-order statistics to return - expected_xx = torch.sum( - self.pos_x * self.pos_x * attention, dim=1, keepdim=True - ) - expected_yy = torch.sum( - self.pos_y * self.pos_y * attention, dim=1, keepdim=True - ) - expected_xy = torch.sum( - self.pos_x * self.pos_y * attention, dim=1, keepdim=True - ) + expected_xx = torch.sum(self.pos_x * self.pos_x * attention, dim=1, keepdim=True) + expected_yy = torch.sum(self.pos_y * self.pos_y * attention, dim=1, keepdim=True) + expected_xy = torch.sum(self.pos_x * self.pos_y * attention, dim=1, keepdim=True) var_x = expected_xx - expected_x * expected_x var_y = expected_yy - expected_y * expected_y var_xy = expected_xy - expected_x * expected_y # stack to [B * K, 4] and then reshape to [B, K, 2, 2] where last 2 dims are covariance matrix - feature_covar = torch.cat([var_x, var_xy, var_xy, var_y], 1).reshape( - -1, self._num_kp, 2, 2 - ) + feature_covar = torch.cat([var_x, var_xy, var_xy, var_y], 1).reshape(-1, self._num_kp, 2, 2) feature_keypoints = (feature_keypoints, feature_covar) if isinstance(feature_keypoints, tuple): @@ -1443,25 +1308,24 @@ class SpatialMeanPool(Module): Module that averages inputs across all spatial dimensions (dimension 2 and after), leaving only the batch and channel dimensions. """ - def __init__(self, input_shape): super(SpatialMeanPool, self).__init__() - assert len(input_shape) == 3 # [C, H, W] + assert len(input_shape) == 3 # [C, H, W] self.in_shape = input_shape def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - return list(self.in_shape[:1]) # [C, H, W] -> [C] + return list(self.in_shape[:1]) # [C, H, W] -> [C] def forward(self, inputs): """Forward pass - average across all dimensions except batch and channel.""" @@ -1470,12 +1334,11 @@ def forward(self, inputs): class FeatureAggregator(Module): """ - Helpful class for aggregating features across a dimension. This is useful in + Helpful class for aggregating features across a dimension. This is useful in practice when training models that break an input image up into several patches - since features can be extraced per-patch using the same encoder and then + since features can be extraced per-patch using the same encoder and then aggregated using this module. """ - def __init__(self, dim=1, agg_type="avg"): super(FeatureAggregator, self).__init__() self.dim = dim @@ -1491,18 +1354,18 @@ def clear_weight(self): def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: out_shape ([int]): list of integers corresponding to output shape """ - # aggregates on @self.dim, so it is removed from the output shape - return list(input_shape[: self.dim]) + list(input_shape[self.dim + 1 :]) + # aggregates on @self.dim, so it is removed from the output shape + return list(input_shape[:self.dim]) + list(input_shape[self.dim+1:]) def forward(self, x): """Forward pooling pass.""" diff --git a/robomimic/models/distributions.py b/robomimic/models/distributions.py index cd33a94c..411efb1a 100644 --- a/robomimic/models/distributions.py +++ b/robomimic/models/distributions.py @@ -2,7 +2,6 @@ Contains distribution models used as parts of other networks. These classes usually inherit or emulate torch distributions. """ - import torch import torch.nn as nn import torch.nn.functional as F @@ -16,7 +15,6 @@ class TanhWrappedDistribution(D.Distribution): Tanh Normal distribution - adapted from rlkit and CQL codebase (https://github.com/aviralkumar2907/CQL/blob/d67dbe9cf5d2b96e3b462b6146f249b3d6569796/d4rl/rlkit/torch/distributions.py#L6). """ - def __init__(self, base_dist, scale=1.0, epsilon=1e-6): """ Args: @@ -37,17 +35,13 @@ def log_prob(self, value, pre_tanh_value=None): """ value = value / self.scale if pre_tanh_value is None: - one_plus_x = (1.0 + value).clamp(min=self.tanh_epsilon) - one_minus_x = (1.0 - value).clamp(min=self.tanh_epsilon) + one_plus_x = (1. + value).clamp(min=self.tanh_epsilon) + one_minus_x = (1. - value).clamp(min=self.tanh_epsilon) pre_tanh_value = 0.5 * torch.log(one_plus_x / one_minus_x) lp = self.base_dist.log_prob(pre_tanh_value) tanh_lp = torch.log(1 - value * value + self.tanh_epsilon) # In case the base dist already sums up the log probs, make sure we do the same - return ( - lp - tanh_lp - if len(lp.shape) == len(tanh_lp.shape) - else lp - tanh_lp.sum(-1) - ) + return lp - tanh_lp if len(lp.shape) == len(tanh_lp.shape) else lp - tanh_lp.sum(-1) def sample(self, sample_shape=torch.Size(), return_pretanh_value=False): """ @@ -87,7 +81,6 @@ class DiscreteValueDistribution(object): of the support (categorical values, or in this case, value atoms). This is used for distributional value networks. """ - def __init__(self, values, probs=None, logits=None): """ Creates a categorical distribution parameterized by either @probs or @@ -121,7 +114,7 @@ def variance(self): """ dist_squared = (self.mean().unsqueeze(-1) - self.values).pow(2) return (self._categorical_dist.probs * dist_squared).sum(dim=-1) - + def sample(self, sample_shape=torch.Size()): """ Sample from the distribution. Make sure to return value atoms, not categorical class indices. diff --git a/robomimic/models/obs_core.py b/robomimic/models/obs_core.py index 0d037efa..81f12b66 100644 --- a/robomimic/models/obs_core.py +++ b/robomimic/models/obs_core.py @@ -28,18 +28,16 @@ import matplotlib.pyplot as plt + """ ================================================ Encoder Core Networks (Abstract class) ================================================ """ - - class EncoderCore(BaseNets.Module): """ Abstract class used to categorize all cores used to encode observations """ - def __init__(self, input_shape): self.input_shape = input_shape super(EncoderCore, self).__init__() @@ -64,14 +62,11 @@ def __init_subclass__(cls, **kwargs): Visual Core Networks (Backbone + Pool) ================================================ """ - - class VisualCore(EncoderCore, BaseNets.ConvBase): """ A network block that combines a visual backbone network with optional pooling and linear layers. """ - def __init__( self, input_shape, @@ -106,9 +101,7 @@ def __init__( backbone_kwargs["input_channel"] = input_shape[0] # extract only relevant kwargs for this specific backbone - backbone_kwargs = extract_class_init_kwargs_from_dict( - cls=eval(backbone_class), dic=backbone_kwargs, copy=True - ) + backbone_kwargs = extract_class_init_kwargs_from_dict(cls=eval(backbone_class), dic=backbone_kwargs, copy=True) # visual backbone assert isinstance(backbone_class, str) @@ -127,9 +120,7 @@ def __init__( pool_kwargs = dict() # extract only relevant kwargs for this specific backbone pool_kwargs["input_shape"] = feat_shape - pool_kwargs = extract_class_init_kwargs_from_dict( - cls=eval(pool_class), dic=pool_kwargs, copy=True - ) + pool_kwargs = extract_class_init_kwargs_from_dict(cls=eval(pool_class), dic=pool_kwargs, copy=True) self.pool = eval(pool_class)(**pool_kwargs) assert isinstance(self.pool, BaseNets.Module) @@ -153,11 +144,11 @@ def __init__( def output_shape(self, input_shape): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -186,18 +177,14 @@ def forward(self, inputs): def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 2 + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 2 msg += textwrap.indent( - "\ninput_shape={}\noutput_shape={}".format( - self.input_shape, self.output_shape(self.input_shape) - ), - indent, - ) + "\ninput_shape={}\noutput_shape={}".format(self.input_shape, self.output_shape(self.input_shape)), indent) msg += textwrap.indent("\nbackbone_net={}".format(self.backbone), indent) msg += textwrap.indent("\npool_net={}".format(self.pool), indent) - msg = header + "(" + msg + "\n)" + msg = header + '(' + msg + '\n)' return msg @@ -206,14 +193,11 @@ def __repr__(self): Scan Core Networks (Conv1D Sequential + Pool) ================================================ """ - - class ScanCore(EncoderCore, BaseNets.ConvBase): """ A network block that combines a Conv1D backbone network with optional pooling and linear layers. """ - def __init__( self, input_shape, @@ -321,18 +305,14 @@ def forward(self, inputs): def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 2 + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 2 msg += textwrap.indent( - "\ninput_shape={}\noutput_shape={}".format( - self.input_shape, self.output_shape(self.input_shape) - ), - indent, - ) + "\ninput_shape={}\noutput_shape={}".format(self.input_shape, self.output_shape(self.input_shape)), indent) msg += textwrap.indent("\nbackbone_net={}".format(self.backbone), indent) msg += textwrap.indent("\npool_net={}".format(self.pool), indent) - msg = header + "(" + msg + "\n)" + msg = header + '(' + msg + '\n)' return msg @@ -341,8 +321,6 @@ def __repr__(self): Observation Randomizer Networks ================================================ """ - - class Randomizer(BaseNets.Module): """ Base class for randomizer networks. Each randomizer should implement the @output_shape_in, @@ -351,7 +329,6 @@ class Randomizer(BaseNets.Module): (usually processed by a @VisualCore instance). Note that the self.training property can be used to change the randomizer's behavior at train vs. test time. """ - def __init__(self): super(Randomizer, self).__init__() @@ -417,11 +394,7 @@ def forward_in(self, inputs): randomized_inputs = self._forward_in(inputs=inputs) if VISUALIZE_RANDOMIZER: num_samples_to_visualize = min(4, inputs.shape[0]) - self._visualize( - inputs, - randomized_inputs, - num_samples_to_visualize=num_samples_to_visualize, - ) + self._visualize(inputs, randomized_inputs, num_samples_to_visualize=num_samples_to_visualize) return randomized_inputs else: return self._forward_in_eval(inputs) @@ -462,9 +435,7 @@ def _forward_out_eval(self, inputs): return inputs @abc.abstractmethod - def _visualize( - self, pre_random_input, randomized_input, num_samples_to_visualize=2 - ): + def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): """ Visualize the original input and the randomized input for _forward_in for debugging purposes. """ @@ -475,7 +446,6 @@ class CropRandomizer(Randomizer): """ Randomly sample crops at input, and then average across crop features at output. """ - def __init__( self, input_shape, @@ -495,7 +465,7 @@ def __init__( """ super(CropRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) assert crop_height < input_shape[1] assert crop_width < input_shape[2] @@ -551,7 +521,7 @@ def _forward_in(self, inputs): Samples N random crops for each input in the batch, and then reshapes inputs to [B * N, ...]. """ - assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions out, _ = ObsUtils.sample_random_image_crops( images=inputs, crop_height=self.crop_height, @@ -566,17 +536,10 @@ def _forward_in_eval(self, inputs): """ Do center crops during eval """ - assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions - inputs = inputs.permute( - *range(inputs.dim() - 3), - inputs.dim() - 2, - inputs.dim() - 1, - inputs.dim() - 3, - ) + assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + inputs = inputs.permute(*range(inputs.dim()-3), inputs.dim()-2, inputs.dim()-1, inputs.dim()-3) out = ObsUtils.center_crop(inputs, self.crop_height, self.crop_width) - out = out.permute( - *range(out.dim() - 3), out.dim() - 1, out.dim() - 3, out.dim() - 2 - ) + out = out.permute(*range(out.dim()-3), out.dim()-1, out.dim()-3, out.dim()-2) return out def _forward_out(self, inputs): @@ -585,47 +548,37 @@ def _forward_out(self, inputs): to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. """ - batch_size = inputs.shape[0] // self.num_crops - out = TensorUtils.reshape_dimensions( - inputs, begin_axis=0, end_axis=0, target_dims=(batch_size, self.num_crops) - ) + batch_size = (inputs.shape[0] // self.num_crops) + out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, + target_dims=(batch_size, self.num_crops)) return out.mean(dim=1) - def _visualize( - self, pre_random_input, randomized_input, num_samples_to_visualize=2 - ): + def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): batch_size = pre_random_input.shape[0] - random_sample_inds = torch.randint( - 0, batch_size, size=(num_samples_to_visualize,) - ) + random_sample_inds = torch.randint(0, batch_size, size=(num_samples_to_visualize,)) pre_random_input_np = TensorUtils.to_numpy(pre_random_input)[random_sample_inds] randomized_input = TensorUtils.reshape_dimensions( randomized_input, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_crops), + target_dims=(batch_size, self.num_crops) ) # [B * N, ...] -> [B, N, ...] randomized_input_np = TensorUtils.to_numpy(randomized_input[random_sample_inds]) - pre_random_input_np = pre_random_input_np.transpose( - (0, 2, 3, 1) - ) # [B, C, H, W] -> [B, H, W, C] - randomized_input_np = randomized_input_np.transpose( - (0, 1, 3, 4, 2) - ) # [B, N, C, H, W] -> [B, N, H, W, C] + pre_random_input_np = pre_random_input_np.transpose((0, 2, 3, 1)) # [B, C, H, W] -> [B, H, W, C] + randomized_input_np = randomized_input_np.transpose((0, 1, 3, 4, 2)) # [B, N, C, H, W] -> [B, N, H, W, C] visualize_image_randomizer( pre_random_input_np, randomized_input_np, - randomizer_name="{}".format(str(self.__class__.__name__)), + randomizer_name='{}'.format(str(self.__class__.__name__)) ) def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) + header = '{}'.format(str(self.__class__.__name__)) msg = header + "(input_shape={}, crop_size=[{}, {}], num_crops={})".format( - self.input_shape, self.crop_height, self.crop_width, self.num_crops - ) + self.input_shape, self.crop_height, self.crop_width, self.num_crops) return msg @@ -633,7 +586,6 @@ class CropResizeRandomizer(Randomizer): """ Randomly sample crop, then resize to specified size """ - def __init__( self, input_shape, @@ -656,7 +608,7 @@ def __init__( """ super(CropResizeRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) # assert crop_height < input_shape[1] # assert crop_width < input_shape[2] @@ -667,12 +619,7 @@ def __init__( self.num_crops = num_crops self.pos_enc = pos_enc - self.resize_crop = RandomResizedCrop( - size=size, - scale=scale, - ratio=ratio, - interpolation=TVF.InterpolationMode.BILINEAR, - ) + self.resize_crop = RandomResizedCrop(size=size, scale=scale, ratio=ratio, interpolation=TVF.InterpolationMode.BILINEAR) def output_shape_in(self, input_shape=None): shape = [self.input_shape[0], self.size[0], self.size[1]] @@ -681,27 +628,21 @@ def output_shape_in(self, input_shape=None): def output_shape_out(self, input_shape=None): return list(input_shape) - def _visualize( - self, pre_random_input, randomized_input, num_samples_to_visualize=2 - ): + def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): """ pre_random_input: (B, C, H, W) randomized_input: (B, C, H, W) - num_samples_to_visualize: + num_samples_to_visualize: Use plt.imsave to save a plot with the original input and the randomized input side by side. Save it to debug/augIms/ with a unique name. """ - fig, axes = plt.subplots( - num_samples_to_visualize, 2, figsize=(10, 5 * num_samples_to_visualize) - ) + fig, axes = plt.subplots(num_samples_to_visualize, 2, figsize=(10, 5*num_samples_to_visualize)) for i in range(num_samples_to_visualize): axes[i, 0].imshow(pre_random_input[i].permute(1, 2, 0).cpu().numpy()) axes[i, 0].set_title("Original Input") axes[i, 1].imshow(randomized_input[i].permute(1, 2, 0).cpu().numpy()) axes[i, 1].set_title("Randomized Input") plt.tight_layout() - plt.savefig( - f"debug/augIms/sample_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.png" - ) + plt.savefig(f"debug/augIms/sample_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.png") plt.close(fig) # plt.close(fig) # fig, axes = plt.subplots(1, 2) @@ -742,9 +683,8 @@ def _forward_in_eval(self, inputs): # return out # just resize - return TVF.resize( - inputs, size=self.size, interpolation=TVF.InterpolationMode.BILINEAR - ) + return TVF.resize(inputs, size=self.size, interpolation=TVF.InterpolationMode.BILINEAR) + def _forward_out(self, inputs): """ @@ -756,13 +696,11 @@ def _forward_out(self, inputs): """ return inputs - - + class CropResizeColorRandomizer(CropResizeRandomizer): """ Does the same thing as CropResizeRandomizer, but additionally performs color jitter """ - def __init__( self, input_shape, @@ -778,7 +716,7 @@ def __init__( saturation_min=1.0, saturation_max=1.0, hue_min=0.0, - hue_max=0.0, + hue_max=0.0 ): super(CropResizeColorRandomizer, self).__init__( input_shape=input_shape, @@ -788,29 +726,24 @@ def __init__( num_crops=num_crops, pos_enc=pos_enc, ) - self.color_jitter = TT.ColorJitter( - brightness=(brightness_min, brightness_max), - contrast=(contrast_min, contrast_max), - saturation=(saturation_min, saturation_max), - hue=(hue_min, hue_max), - ) - + self.color_jitter = TT.ColorJitter(brightness=(brightness_min, brightness_max), contrast=(contrast_min, contrast_max), saturation=(saturation_min, saturation_max), hue=(hue_min, hue_max)) + def _forward_in(self, inputs): out = super(CropResizeColorRandomizer, self)._forward_in(inputs) out = self.color_jitter(out) # self._visualize(inputs, out) return out - + def _forward_in_eval(self, inputs): out = super(CropResizeColorRandomizer, self)._forward_in_eval(inputs) return out + class ColorRandomizer(Randomizer): """ Randomly sample color jitter at input, and then average across color jtters at output. """ - def __init__( self, input_shape, @@ -838,24 +771,12 @@ def __init__( """ super(ColorRandomizer, self).__init__() - assert len(input_shape) == 3 # (C, H, W) + assert len(input_shape) == 3 # (C, H, W) self.input_shape = input_shape - self.brightness = ( - [max(0, 1 - brightness), 1 + brightness] - if type(brightness) in {float, int} - else brightness - ) - self.contrast = ( - [max(0, 1 - contrast), 1 + contrast] - if type(contrast) in {float, int} - else contrast - ) - self.saturation = ( - [max(0, 1 - saturation), 1 + saturation] - if type(saturation) in {float, int} - else saturation - ) + self.brightness = [max(0, 1 - brightness), 1 + brightness] if type(brightness) in {float, int} else brightness + self.contrast = [max(0, 1 - contrast), 1 + contrast] if type(contrast) in {float, int} else contrast + self.saturation = [max(0, 1 - saturation), 1 + saturation] if type(saturation) in {float, int} else saturation self.hue = [-hue, hue] if type(hue) in {float, int} else hue self.num_samples = num_samples @@ -876,21 +797,15 @@ def get_transform(self): if self.brightness is not None: brightness_factor = random.uniform(self.brightness[0], self.brightness[1]) - transforms.append( - Lambda(lambda img: TVF.adjust_brightness(img, brightness_factor)) - ) + transforms.append(Lambda(lambda img: TVF.adjust_brightness(img, brightness_factor))) if self.contrast is not None: contrast_factor = random.uniform(self.contrast[0], self.contrast[1]) - transforms.append( - Lambda(lambda img: TVF.adjust_contrast(img, contrast_factor)) - ) + transforms.append(Lambda(lambda img: TVF.adjust_contrast(img, contrast_factor))) if self.saturation is not None: saturation_factor = random.uniform(self.saturation[0], self.saturation[1]) - transforms.append( - Lambda(lambda img: TVF.adjust_saturation(img, saturation_factor)) - ) + transforms.append(Lambda(lambda img: TVF.adjust_saturation(img, saturation_factor))) if self.hue is not None: hue_factor = random.uniform(self.hue[0], self.hue[1]) @@ -914,11 +829,7 @@ def get_batch_transform(self, N): each sub-set of samples along batch dimension, assumed to be the FIRST dimension in the inputted tensor Note: This function will MULTIPLY the first dimension by N """ - return Lambda( - lambda x: torch.stack( - [self.get_transform()(x_) for x_ in x for _ in range(N)] - ) - ) + return Lambda(lambda x: torch.stack([self.get_transform()(x_) for x_ in x for _ in range(N)])) def output_shape_in(self, input_shape=None): # outputs are same shape as inputs @@ -935,7 +846,7 @@ def _forward_in(self, inputs): Samples N random color jitters for each input in the batch, and then reshapes inputs to [B * N, ...]. """ - assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions + assert len(inputs.shape) >= 3 # must have at least (C, H, W) dimensions # Make sure shape is exactly 4 if len(inputs.shape) == 3: @@ -952,49 +863,37 @@ def _forward_out(self, inputs): to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. """ - batch_size = inputs.shape[0] // self.num_samples - out = TensorUtils.reshape_dimensions( - inputs, begin_axis=0, end_axis=0, target_dims=(batch_size, self.num_samples) - ) + batch_size = (inputs.shape[0] // self.num_samples) + out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, + target_dims=(batch_size, self.num_samples)) return out.mean(dim=1) - def _visualize( - self, pre_random_input, randomized_input, num_samples_to_visualize=2 - ): + def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): batch_size = pre_random_input.shape[0] - random_sample_inds = torch.randint( - 0, batch_size, size=(num_samples_to_visualize,) - ) + random_sample_inds = torch.randint(0, batch_size, size=(num_samples_to_visualize,)) pre_random_input_np = TensorUtils.to_numpy(pre_random_input)[random_sample_inds] randomized_input = TensorUtils.reshape_dimensions( randomized_input, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_samples), + target_dims=(batch_size, self.num_samples) ) # [B * N, ...] -> [B, N, ...] randomized_input_np = TensorUtils.to_numpy(randomized_input[random_sample_inds]) - pre_random_input_np = pre_random_input_np.transpose( - (0, 2, 3, 1) - ) # [B, C, H, W] -> [B, H, W, C] - randomized_input_np = randomized_input_np.transpose( - (0, 1, 3, 4, 2) - ) # [B, N, C, H, W] -> [B, N, H, W, C] + pre_random_input_np = pre_random_input_np.transpose((0, 2, 3, 1)) # [B, C, H, W] -> [B, H, W, C] + randomized_input_np = randomized_input_np.transpose((0, 1, 3, 4, 2)) # [B, N, C, H, W] -> [B, N, H, W, C] visualize_image_randomizer( pre_random_input_np, randomized_input_np, - randomizer_name="{}".format(str(self.__class__.__name__)), + randomizer_name='{}'.format(str(self.__class__.__name__)) ) def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = ( - header - + f"(input_shape={self.input_shape}, brightness={self.brightness}, contrast={self.contrast}, " - f"saturation={self.saturation}, hue={self.hue}, num_samples={self.num_samples})" - ) + header = '{}'.format(str(self.__class__.__name__)) + msg = header + f"(input_shape={self.input_shape}, brightness={self.brightness}, contrast={self.contrast}, " \ + f"saturation={self.saturation}, hue={self.hue}, num_samples={self.num_samples})" return msg @@ -1002,7 +901,6 @@ class GaussianNoiseRandomizer(Randomizer): """ Randomly sample gaussian noise at input, and then average across noises at output. """ - def __init__( self, input_shape, @@ -1045,11 +943,7 @@ def _forward_in(self, inputs): out = TensorUtils.repeat_by_expand_at(inputs, repeats=self.num_samples, dim=0) # Sample noise across all samples - out = ( - torch.rand(size=out.shape).to(inputs.device) * self.noise_std - + self.noise_mean - + out - ) + out = torch.rand(size=out.shape).to(inputs.device) * self.noise_std + self.noise_mean + out # Possibly clamp if self.limits is not None: @@ -1063,47 +957,35 @@ def _forward_out(self, inputs): to result in shape [B, ...] to make sure the network output is consistent with what would have happened if there were no randomization. """ - batch_size = inputs.shape[0] // self.num_samples - out = TensorUtils.reshape_dimensions( - inputs, begin_axis=0, end_axis=0, target_dims=(batch_size, self.num_samples) - ) + batch_size = (inputs.shape[0] // self.num_samples) + out = TensorUtils.reshape_dimensions(inputs, begin_axis=0, end_axis=0, + target_dims=(batch_size, self.num_samples)) return out.mean(dim=1) - def _visualize( - self, pre_random_input, randomized_input, num_samples_to_visualize=2 - ): + def _visualize(self, pre_random_input, randomized_input, num_samples_to_visualize=2): batch_size = pre_random_input.shape[0] - random_sample_inds = torch.randint( - 0, batch_size, size=(num_samples_to_visualize,) - ) + random_sample_inds = torch.randint(0, batch_size, size=(num_samples_to_visualize,)) pre_random_input_np = TensorUtils.to_numpy(pre_random_input)[random_sample_inds] randomized_input = TensorUtils.reshape_dimensions( randomized_input, begin_axis=0, end_axis=0, - target_dims=(batch_size, self.num_samples), + target_dims=(batch_size, self.num_samples) ) # [B * N, ...] -> [B, N, ...] randomized_input_np = TensorUtils.to_numpy(randomized_input[random_sample_inds]) - pre_random_input_np = pre_random_input_np.transpose( - (0, 2, 3, 1) - ) # [B, C, H, W] -> [B, H, W, C] - randomized_input_np = randomized_input_np.transpose( - (0, 1, 3, 4, 2) - ) # [B, N, C, H, W] -> [B, N, H, W, C] + pre_random_input_np = pre_random_input_np.transpose((0, 2, 3, 1)) # [B, C, H, W] -> [B, H, W, C] + randomized_input_np = randomized_input_np.transpose((0, 1, 3, 4, 2)) # [B, N, C, H, W] -> [B, N, H, W, C] visualize_image_randomizer( pre_random_input_np, randomized_input_np, - randomizer_name="{}".format(str(self.__class__.__name__)), + randomizer_name='{}'.format(str(self.__class__.__name__)) ) def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = ( - header - + f"(input_shape={self.input_shape}, noise_mean={self.noise_mean}, noise_std={self.noise_std}, " - f"limits={self.limits}, num_samples={self.num_samples})" - ) + header = '{}'.format(str(self.__class__.__name__)) + msg = header + f"(input_shape={self.input_shape}, noise_mean={self.noise_mean}, noise_std={self.noise_std}, " \ + f"limits={self.limits}, num_samples={self.num_samples})" return msg diff --git a/robomimic/models/obs_nets.py b/robomimic/models/obs_nets.py index f1fc7a79..4a0b9483 100644 --- a/robomimic/models/obs_nets.py +++ b/robomimic/models/obs_nets.py @@ -7,7 +7,6 @@ As an example, an observation could consist of a flat "robot0_eef_pos" observation key, and a 3-channel RGB "agentview_image" observation key. """ - import sys import numpy as np import textwrap @@ -22,25 +21,18 @@ from robomimic.utils.python_utils import extract_class_init_kwargs_from_dict import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.obs_utils as ObsUtils -from robomimic.models.base_nets import ( - Module, - Sequential, - MLP, - RNN_Base, - ResNet18Conv, - SpatialSoftmax, - FeatureAggregator, -) +from robomimic.models.base_nets import Module, Sequential, MLP, RNN_Base, ResNet18Conv, SpatialSoftmax, \ + FeatureAggregator from robomimic.models.obs_core import VisualCore, Randomizer from robomimic.models.transformers import PositionalEncoding, GPT_Backbone from robomimic.models.base_nets import Vit def obs_encoder_factory( - obs_shapes, - feature_activation=nn.ReLU, - encoder_kwargs=None, -): + obs_shapes, + feature_activation=nn.ReLU, + encoder_kwargs=None, + ): """ Utility function to create an @ObservationEncoder from kwargs specified in config. @@ -71,16 +63,11 @@ def obs_encoder_factory( enc = ObservationEncoder(feature_activation=feature_activation) for k, obs_shape in obs_shapes.items(): obs_modality = ObsUtils.OBS_KEYS_TO_MODALITIES[k] - enc_kwargs = ( - deepcopy(ObsUtils.DEFAULT_ENCODER_KWARGS[obs_modality]) - if encoder_kwargs is None - else deepcopy(encoder_kwargs[obs_modality]) - ) + enc_kwargs = deepcopy(ObsUtils.DEFAULT_ENCODER_KWARGS[obs_modality]) if encoder_kwargs is None else \ + deepcopy(encoder_kwargs[obs_modality]) - for obs_module, cls_mapping in zip( - ("core", "obs_randomizer"), - (ObsUtils.OBS_ENCODER_CORES, ObsUtils.OBS_RANDOMIZERS), - ): + for obs_module, cls_mapping in zip(("core", "obs_randomizer"), + (ObsUtils.OBS_ENCODER_CORES, ObsUtils.OBS_RANDOMIZERS)): # Sanity check for kwargs in case they don't exist / are None if enc_kwargs.get(f"{obs_module}_kwargs", None) is None: enc_kwargs[f"{obs_module}_kwargs"] = {} @@ -88,22 +75,15 @@ def obs_encoder_factory( enc_kwargs[f"{obs_module}_kwargs"]["input_shape"] = obs_shape # If group class is specified, then make sure corresponding kwargs only contain relevant kwargs if enc_kwargs[f"{obs_module}_class"] is not None: - enc_kwargs[f"{obs_module}_kwargs"] = ( - extract_class_init_kwargs_from_dict( - cls=cls_mapping[enc_kwargs[f"{obs_module}_class"]], - dic=enc_kwargs[f"{obs_module}_kwargs"], - copy=False, - ) + enc_kwargs[f"{obs_module}_kwargs"] = extract_class_init_kwargs_from_dict( + cls=cls_mapping[enc_kwargs[f"{obs_module}_class"]], + dic=enc_kwargs[f"{obs_module}_kwargs"], + copy=False, ) # Add in input shape info - randomizer = ( - None - if enc_kwargs["obs_randomizer_class"] is None - else ObsUtils.OBS_RANDOMIZERS[enc_kwargs["obs_randomizer_class"]]( - **enc_kwargs["obs_randomizer_kwargs"] - ) - ) + randomizer = None if enc_kwargs["obs_randomizer_class"] is None else \ + ObsUtils.OBS_RANDOMIZERS[enc_kwargs["obs_randomizer_class"]](**enc_kwargs["obs_randomizer_kwargs"]) enc.register_obs_key( name=k, @@ -124,7 +104,6 @@ class ObservationEncoder(Module): Call @register_obs_key to register observation keys with the encoder and then finally call @make to create the encoder networks. """ - def __init__(self, feature_activation=nn.ReLU): """ Args: @@ -169,22 +148,13 @@ def register_obs_key( as another observation key. This observation key must already exist in this encoder. Warning: Note that this does not share the observation key randomizer """ - assert ( - not self._locked - ), "ObservationEncoder: @register_obs_key called after @make" - assert ( - name not in self.obs_shapes - ), "ObservationEncoder: modality {} already exists".format(name) + assert not self._locked, "ObservationEncoder: @register_obs_key called after @make" + assert name not in self.obs_shapes, "ObservationEncoder: modality {} already exists".format(name) if net is not None: - assert isinstance( - net, Module - ), "ObservationEncoder: @net must be instance of Module class" - assert ( - (net_class is None) - and (net_kwargs is None) - and (share_net_from is None) - ), "ObservationEncoder: @net provided - ignore other net creation options" + assert isinstance(net, Module), "ObservationEncoder: @net must be instance of Module class" + assert (net_class is None) and (net_kwargs is None) and (share_net_from is None), \ + "ObservationEncoder: @net provided - ignore other net creation options" if share_net_from is not None: # share processing with another modality @@ -222,9 +192,7 @@ def _create_layers(self): for k in self.obs_shapes: if self.obs_nets_classes[k] is not None: # create net to process this modality - self.obs_nets[k] = ObsUtils.OBS_ENCODER_CORES[self.obs_nets_classes[k]]( - **self.obs_nets_kwargs[k] - ) + self.obs_nets[k] = ObsUtils.OBS_ENCODER_CORES[self.obs_nets_classes[k]](**self.obs_nets_kwargs[k]) elif self.obs_share_mods[k] is not None: # make sure net is shared with another modality self.obs_nets[k] = self.obs_nets[self.obs_share_mods[k]] @@ -252,9 +220,7 @@ def forward(self, obs_dict): assert self._locked, "ObservationEncoder: @make has not been called yet" # ensure all modalities that the encoder handles are present - assert set(self.obs_shapes.keys()).issubset( - obs_dict - ), "ObservationEncoder: {} does not contain all modalities {}".format( + assert set(self.obs_shapes.keys()).issubset(obs_dict), "ObservationEncoder: {} does not contain all modalities {}".format( list(obs_dict.keys()), list(self.obs_shapes.keys()) ) @@ -300,27 +266,19 @@ def __repr__(self): """ Pretty print the encoder. """ - header = "{}".format(str(self.__class__.__name__)) - msg = "" + header = '{}'.format(str(self.__class__.__name__)) + msg = '' for k in self.obs_shapes: - msg += textwrap.indent("\nKey(\n", " " * 4) - indent = " " * 8 - msg += textwrap.indent( - "name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent - ) - msg += textwrap.indent( - "modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent - ) - msg += textwrap.indent( - "randomizer={}\n".format(self.obs_randomizers[k]), indent - ) + msg += textwrap.indent('\nKey(\n', ' ' * 4) + indent = ' ' * 8 + msg += textwrap.indent("name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent) + msg += textwrap.indent("modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent) + msg += textwrap.indent("randomizer={}\n".format(self.obs_randomizers[k]), indent) msg += textwrap.indent("net={}\n".format(self.obs_nets[k]), indent) - msg += textwrap.indent( - "sharing_from={}\n".format(self.obs_share_mods[k]), indent - ) - msg += textwrap.indent(")", " " * 4) - msg += textwrap.indent("\noutput_shape={}".format(self.output_shape()), " " * 4) - msg = header + "(" + msg + "\n)" + msg += textwrap.indent("sharing_from={}\n".format(self.obs_share_mods[k]), indent) + msg += textwrap.indent(")", ' ' * 4) + msg += textwrap.indent("\noutput_shape={}".format(self.output_shape()), ' ' * 4) + msg = header + '(' + msg + '\n)' return msg @@ -332,7 +290,6 @@ class ObservationDecoder(Module): module in order to implement more complex schemes for generating each modality. """ - def __init__( self, decode_shapes, @@ -371,7 +328,7 @@ def output_shape(self, input_shape=None): Returns output shape for this module, which is a dictionary instead of a list since outputs are dictionaries. """ - return {k: list(self.obs_shapes[k]) for k in self.obs_shapes} + return { k : list(self.obs_shapes[k]) for k in self.obs_shapes } def forward(self, feats): """ @@ -385,20 +342,16 @@ def forward(self, feats): def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" + header = '{}'.format(str(self.__class__.__name__)) + msg = '' for k in self.obs_shapes: - msg += textwrap.indent("\nKey(\n", " " * 4) - indent = " " * 8 - msg += textwrap.indent( - "name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent - ) - msg += textwrap.indent( - "modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent - ) + msg += textwrap.indent('\nKey(\n', ' ' * 4) + indent = ' ' * 8 + msg += textwrap.indent("name={}\nshape={}\n".format(k, self.obs_shapes[k]), indent) + msg += textwrap.indent("modality={}\n".format(ObsUtils.OBS_KEYS_TO_MODALITIES[k]), indent) msg += textwrap.indent("net=({})\n".format(self.nets[k]), indent) - msg += textwrap.indent(")", " " * 4) - msg = header + "(" + msg + "\n)" + msg += textwrap.indent(")", ' ' * 4) + msg = header + '(' + msg + '\n)' return msg @@ -413,7 +366,6 @@ class ObservationGroupEncoder(Module): and each OrderedDict should be a map between modalities and expected input shapes (e.g. { 'image' : (3, 120, 160) }). """ - def __init__( self, observation_group_shapes, @@ -451,12 +403,7 @@ def __init__( # type checking assert isinstance(observation_group_shapes, OrderedDict) - assert np.all( - [ - isinstance(observation_group_shapes[k], OrderedDict) - for k in observation_group_shapes - ] - ) + assert np.all([isinstance(observation_group_shapes[k], OrderedDict) for k in observation_group_shapes]) self.observation_group_shapes = observation_group_shapes @@ -487,9 +434,7 @@ def forward(self, **inputs): """ # ensure all observation groups we need are present - assert set(self.observation_group_shapes.keys()).issubset( - inputs - ), "{} does not contain all observation groups {}".format( + assert set(self.observation_group_shapes.keys()).issubset(inputs), "{} does not contain all observation groups {}".format( list(inputs.keys()), list(self.observation_group_shapes.keys()) ) @@ -497,7 +442,9 @@ def forward(self, **inputs): # Deterministic order since self.observation_group_shapes is OrderedDict for obs_group in self.observation_group_shapes: # pass through encoder - outputs.append(self.nets[obs_group].forward(inputs[obs_group])) + outputs.append( + self.nets[obs_group].forward(inputs[obs_group]) + ) return torch.cat(outputs, dim=-1) @@ -513,36 +460,35 @@ def output_shape(self): def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" + header = '{}'.format(str(self.__class__.__name__)) + msg = '' for k in self.observation_group_shapes: - msg += "\n" - indent = " " * 4 + msg += '\n' + indent = ' ' * 4 msg += textwrap.indent("group={}\n{}".format(k, self.nets[k]), indent) - msg = header + "(" + msg + "\n)" + msg = header + '(' + msg + '\n)' return msg class MIMO_MLP(Module): """ Extension to MLP to accept multiple observation dictionaries as input and - to output dictionaries of tensors. Inputs are specified as a dictionary of + to output dictionaries of tensors. Inputs are specified as a dictionary of observation dictionaries, with each key corresponding to an observation group. This module utilizes @ObservationGroupEncoder to process the multiple input dictionaries and @ObservationDecoder to generate tensor dictionaries. The default behavior for encoding the inputs is to process visual inputs with a learned CNN and concatenating - the flat encodings with the other flat inputs. The default behavior for generating + the flat encodings with the other flat inputs. The default behavior for generating outputs is to use a linear layer branch to produce each modality separately (including visual outputs). """ - def __init__( self, input_obs_group_shapes, output_shapes, layer_dims, - layer_func=nn.Linear, + layer_func=nn.Linear, activation=nn.ReLU, encoder_kwargs=None, ): @@ -582,12 +528,7 @@ def __init__( super(MIMO_MLP, self).__init__() assert isinstance(input_obs_group_shapes, OrderedDict) - assert np.all( - [ - isinstance(input_obs_group_shapes[k], OrderedDict) - for k in input_obs_group_shapes - ] - ) + assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes]) assert isinstance(output_shapes, OrderedDict) self.input_obs_group_shapes = input_obs_group_shapes @@ -611,7 +552,7 @@ def __init__( layer_dims=layer_dims[:-1], layer_func=layer_func, activation=activation, - output_activation=activation, # make sure non-linearity is applied before decoder + output_activation=activation, # make sure non-linearity is applied before decoder ) # decoder for output modalities @@ -625,7 +566,7 @@ def output_shape(self, input_shape=None): Returns output shape for this module, which is a dictionary instead of a list since outputs are dictionaries. """ - return {k: list(self.output_shapes[k]) for k in self.output_shapes} + return { k : list(self.output_shapes[k]) for k in self.output_shapes } def forward(self, return_latent=False, **inputs): """ @@ -651,22 +592,21 @@ def _to_string(self): """ Subclasses should override this method to print out info about network / policy. """ - return "" + return '' def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 4 - if self._to_string() != "": + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 4 + if self._to_string() != '': msg += textwrap.indent("\n" + self._to_string() + "\n", indent) msg += textwrap.indent("\nencoder={}".format(self.nets["encoder"]), indent) msg += textwrap.indent("\n\nmlp={}".format(self.nets["mlp"]), indent) msg += textwrap.indent("\n\ndecoder={}".format(self.nets["decoder"]), indent) - msg = header + "(" + msg + "\n)" + msg = header + '(' + msg + '\n)' return msg - class RNN_MIMO_MLP(Module): """ A wrapper class for a multi-step RNN and a per-step MLP and a decoder. @@ -674,9 +614,8 @@ class RNN_MIMO_MLP(Module): Structure: [encoder -> rnn -> mlp -> decoder] All temporal inputs are processed by a shared @ObservationGroupEncoder, - followed by an RNN, and then a per-step multi-output MLP. + followed by an RNN, and then a per-step multi-output MLP. """ - def __init__( self, input_obs_group_shapes, @@ -710,7 +649,7 @@ def __init__( rnn_kwargs (dict): kwargs for the rnn model per_step (bool): if True, apply the MLP and observation decoder into @output_shapes - at every step of the RNN. Otherwise, apply them to the final hidden state of the + at every step of the RNN. Otherwise, apply them to the final hidden state of the RNN. encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should @@ -732,12 +671,7 @@ def __init__( """ super(RNN_MIMO_MLP, self).__init__() assert isinstance(input_obs_group_shapes, OrderedDict) - assert np.all( - [ - isinstance(input_obs_group_shapes[k], OrderedDict) - for k in input_obs_group_shapes - ] - ) + assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes]) assert isinstance(output_shapes, OrderedDict) self.input_obs_group_shapes = input_obs_group_shapes self.output_shapes = output_shapes @@ -756,20 +690,18 @@ def __init__( # bidirectional RNNs mean that the output of RNN will be twice the hidden dimension rnn_is_bidirectional = rnn_kwargs.get("bidirectional", False) - num_directions = ( - int(rnn_is_bidirectional) + 1 - ) # 2 if bidirectional, 1 otherwise + num_directions = int(rnn_is_bidirectional) + 1 # 2 if bidirectional, 1 otherwise rnn_output_dim = num_directions * rnn_hidden_dim per_step_net = None - self._has_mlp = len(mlp_layer_dims) > 0 + self._has_mlp = (len(mlp_layer_dims) > 0) if self._has_mlp: self.nets["mlp"] = MLP( input_dim=rnn_output_dim, output_dim=mlp_layer_dims[-1], layer_dims=mlp_layer_dims[:-1], output_activation=mlp_activation, - layer_func=mlp_layer_func, + layer_func=mlp_layer_func ) self.nets["decoder"] = ObservationDecoder( decode_shapes=self.output_shapes, @@ -792,7 +724,7 @@ def __init__( rnn_num_layers=rnn_num_layers, rnn_type=rnn_type, per_step_net=per_step_net, - rnn_kwargs=rnn_kwargs, + rnn_kwargs=rnn_kwargs ) def get_rnn_init_state(self, batch_size, device): @@ -825,14 +757,10 @@ def output_shape(self, input_shape): obs_group = list(self.input_obs_group_shapes.keys())[0] mod = list(self.input_obs_group_shapes[obs_group].keys())[0] T = input_shape[obs_group][mod][0] - TensorUtils.assert_size_at_dim( - input_shape, - size=T, - dim=0, - msg="RNN_MIMO_MLP: input_shape inconsistent in temporal dimension", - ) + TensorUtils.assert_size_at_dim(input_shape, size=T, dim=0, + msg="RNN_MIMO_MLP: input_shape inconsistent in temporal dimension") # returns a dictionary instead of list since outputs are dictionaries - return {k: [T] + list(self.output_shapes[k]) for k in self.output_shapes} + return { k : [T] + list(self.output_shapes[k]) for k in self.output_shapes } def forward(self, rnn_init_state=None, return_state=False, **inputs): """ @@ -857,30 +785,20 @@ def forward(self, rnn_init_state=None, return_state=False, **inputs): for obs_group in self.input_obs_group_shapes: for k in self.input_obs_group_shapes[obs_group]: # first two dimensions should be [B, T] for inputs - assert inputs[obs_group][k].ndim - 2 == len( - self.input_obs_group_shapes[obs_group][k] - ) + assert inputs[obs_group][k].ndim - 2 == len(self.input_obs_group_shapes[obs_group][k]) # use encoder to extract flat rnn inputs - rnn_inputs = TensorUtils.time_distributed( - inputs, self.nets["encoder"], inputs_as_kwargs=True - ) + rnn_inputs = TensorUtils.time_distributed(inputs, self.nets["encoder"], inputs_as_kwargs=True) assert rnn_inputs.ndim == 3 # [B, T, D] if self.per_step: - return self.nets["rnn"].forward( - inputs=rnn_inputs, - rnn_init_state=rnn_init_state, - return_state=return_state, - ) - + return self.nets["rnn"].forward(inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state) + # apply MLP + decoder to last RNN output - outputs = self.nets["rnn"].forward( - inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state - ) + outputs = self.nets["rnn"].forward(inputs=rnn_inputs, rnn_init_state=rnn_init_state, return_state=return_state) if return_state: outputs, rnn_state = outputs - assert outputs.ndim == 3 # [B, T, D] + assert outputs.ndim == 3 # [B, T, D] if self._has_mlp: outputs = self.nets["decoder"](self.nets["mlp"](outputs[:, -1])) else: @@ -896,7 +814,7 @@ def forward_step(self, rnn_state, **inputs): Args: inputs (dict): expects same modalities as @self.input_shapes, with - additional batch dimension (but NOT time), since this is a + additional batch dimension (but NOT time), since this is a single time step. rnn_state (torch.Tensor): rnn hidden state @@ -907,14 +825,12 @@ def forward_step(self, rnn_state, **inputs): rnn_state: return the new rnn state """ - # ensure that the only extra dimension is batch dim, not temporal dim - assert np.all( - [inputs[k].ndim - 1 == len(self.input_shapes[k]) for k in self.input_shapes] - ) + # ensure that the only extra dimension is batch dim, not temporal dim + assert np.all([inputs[k].ndim - 1 == len(self.input_shapes[k]) for k in self.input_shapes]) inputs = TensorUtils.to_sequence(inputs) outputs, rnn_state = self.forward( - inputs, + inputs, rnn_init_state=rnn_state, return_state=True, ) @@ -927,33 +843,32 @@ def _to_string(self): """ Subclasses should override this method to print out info about network / policy. """ - return "" + return '' def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 4 + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 4 msg += textwrap.indent("\n" + self._to_string(), indent) msg += textwrap.indent("\n\nencoder={}".format(self.nets["encoder"]), indent) msg += textwrap.indent("\n\nrnn={}".format(self.nets["rnn"]), indent) - msg = header + "(" + msg + "\n)" + msg = header + '(' + msg + '\n)' return msg class MIMO_Transformer(Module): """ - Extension to Transformer (based on GPT architecture) to accept multiple observation - dictionaries as input and to output dictionaries of tensors. Inputs are specified as + Extension to Transformer (based on GPT architecture) to accept multiple observation + dictionaries as input and to output dictionaries of tensors. Inputs are specified as a dictionary of observation dictionaries, with each key corresponding to an observation group. This module utilizes @ObservationGroupEncoder to process the multiple input dictionaries and @ObservationDecoder to generate tensor dictionaries. The default behavior for encoding the inputs is to process visual inputs with a learned CNN and concatenating - the flat encodings with the other flat inputs. The default behavior for generating + the flat encodings with the other flat inputs. The default behavior for generating outputs is to use a linear layer branch to produce each modality separately (including visual outputs). """ - def __init__( self, input_obs_group_shapes, @@ -981,7 +896,7 @@ def __init__( transformer_embed_dim (int): dimension for embeddings used by transformer transformer_num_layers (int): number of transformer blocks to stack transformer_num_heads (int): number of attention heads for each - transformer block - must divide @transformer_embed_dim evenly. Self-attention is + transformer block - must divide @transformer_embed_dim evenly. Self-attention is computed over this many partitions of the embedding dimension separately. transformer_context_length (int): expected length of input sequences transformer_activation: non-linearity for input and output layers used in transformer @@ -991,14 +906,9 @@ def __init__( encoder_kwargs (dict): observation encoder config """ super(MIMO_Transformer, self).__init__() - + assert isinstance(input_obs_group_shapes, OrderedDict) - assert np.all( - [ - isinstance(input_obs_group_shapes[k], OrderedDict) - for k in input_obs_group_shapes - ] - ) + assert np.all([isinstance(input_obs_group_shapes[k], OrderedDict) for k in input_obs_group_shapes]) assert isinstance(output_shapes, OrderedDict) self.input_obs_group_shapes = input_obs_group_shapes @@ -1033,13 +943,11 @@ def __init__( torch.zeros(1, max_timestep, transformer_embed_dim) ) else: - self.nets["embed_timestep"] = nn.Embedding( - max_timestep, transformer_embed_dim - ) + self.nets["embed_timestep"] = nn.Embedding(max_timestep, transformer_embed_dim) # layer norm for embeddings self.nets["embed_ln"] = nn.LayerNorm(transformer_embed_dim) - + # dropout for input embeddings self.nets["embed_drop"] = nn.Dropout(transformer_emb_dropout) @@ -1063,16 +971,14 @@ def __init__( self.transformer_context_length = transformer_context_length self.transformer_embed_dim = transformer_embed_dim self.transformer_sinusoidal_embedding = transformer_sinusoidal_embedding - self.transformer_nn_parameter_for_timesteps = ( - transformer_nn_parameter_for_timesteps - ) + self.transformer_nn_parameter_for_timesteps = transformer_nn_parameter_for_timesteps def output_shape(self, input_shape=None): """ Returns output shape for this module, which is a dictionary instead of a list since outputs are dictionaries. """ - return {k: list(self.output_shapes[k]) for k in self.output_shapes} + return { k : list(self.output_shapes[k]) for k in self.output_shapes } def embed_timesteps(self, embeddings): """ @@ -1106,9 +1012,7 @@ def embed_timesteps(self, embeddings): ) # these are NOT fed into transformer, only added to the inputs. # compute how many modalities were combined into embeddings, replicate time embeddings that many times num_replicates = embeddings.shape[-1] // self.transformer_embed_dim - time_embeddings = torch.cat( - [time_embeddings for _ in range(num_replicates)], -1 - ) + time_embeddings = torch.cat([time_embeddings for _ in range(num_replicates)], -1) assert ( embeddings.shape == time_embeddings.shape ), f"{embeddings.shape}, {time_embeddings.shape}" @@ -1134,6 +1038,7 @@ def input_embedding( return embeddings + def forward(self, **inputs): """ Process each set of inputs in its own observation group. @@ -1153,9 +1058,7 @@ def forward(self, **inputs): # first two dimensions should be [B, T] for inputs if inputs[obs_group][k] is None: continue - assert inputs[obs_group][k].ndim - 2 == len( - self.input_obs_group_shapes[obs_group][k] - ) + assert inputs[obs_group][k].ndim - 2 == len(self.input_obs_group_shapes[obs_group][k]) inputs = inputs.copy() @@ -1168,9 +1071,7 @@ def forward(self, **inputs): if transformer_encoder_outputs is None: transformer_embeddings = self.input_embedding(transformer_inputs) # pass encoded sequences through transformer - transformer_encoder_outputs = self.nets["transformer"].forward( - transformer_embeddings - ) + transformer_encoder_outputs = self.nets["transformer"].forward(transformer_embeddings) transformer_outputs = transformer_encoder_outputs # apply decoder to each timestep of sequence to get a dictionary of outputs @@ -1184,19 +1085,17 @@ def _to_string(self): """ Subclasses should override this method to print out info about network / policy. """ - return "" + return '' def __repr__(self): """Pretty print network.""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 4 - if self._to_string() != "": + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 4 + if self._to_string() != '': msg += textwrap.indent("\n" + self._to_string() + "\n", indent) msg += textwrap.indent("\nencoder={}".format(self.nets["encoder"]), indent) - msg += textwrap.indent( - "\n\ntransformer={}".format(self.nets["transformer"]), indent - ) + msg += textwrap.indent("\n\ntransformer={}".format(self.nets["transformer"]), indent) msg += textwrap.indent("\n\ndecoder={}".format(self.nets["decoder"]), indent) - msg = header + "(" + msg + "\n)" - return msg + msg = header + '(' + msg + '\n)' + return msg \ No newline at end of file diff --git a/robomimic/models/policy_nets.py b/robomimic/models/policy_nets.py index b45dcbc5..8dba1d93 100644 --- a/robomimic/models/policy_nets.py +++ b/robomimic/models/policy_nets.py @@ -6,7 +6,6 @@ are assumed to lie in [-1, 1], and most networks will have a final tanh activation to help ensure this range. """ - import textwrap import numpy as np from collections import OrderedDict @@ -19,12 +18,7 @@ import robomimic.utils.tensor_utils as TensorUtils from robomimic.models.base_nets import Module from robomimic.models.transformers import GPT_Backbone -from robomimic.models.obs_nets import ( - MIMO_MLP, - RNN_MIMO_MLP, - MIMO_Transformer, - ObservationDecoder, -) +from robomimic.models.obs_nets import MIMO_MLP, RNN_MIMO_MLP, MIMO_Transformer, ObservationDecoder from robomimic.models.vae_nets import VAE from robomimic.models.distributions import TanhWrappedDistribution @@ -34,7 +28,6 @@ class ActorNetwork(MIMO_MLP): A basic policy network that predicts actions from observations. Can optionally be goal conditioned on future observations. """ - def __init__( self, obs_shapes, @@ -109,9 +102,7 @@ def output_shape(self, input_shape=None): return [self.ac_dim] def forward(self, obs_dict, goal_dict=None): - actions = super(ActorNetwork, self).forward(obs=obs_dict, goal=goal_dict)[ - "action" - ] + actions = super(ActorNetwork, self).forward(obs=obs_dict, goal=goal_dict)["action"] # apply tanh squashing to ensure actions are in [-1, 1] return torch.tanh(actions) @@ -125,7 +116,6 @@ class PerturbationActorNetwork(ActorNetwork): An action perturbation network - primarily used in BCQ. It takes states and actions and returns action perturbations. """ - def __init__( self, obs_shapes, @@ -144,8 +134,8 @@ def __init__( mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. - perturbation_scale (float): the perturbation network output is always squashed to - lie in +/- @perturbation_scale. The final action output is equal to the original + perturbation_scale (float): the perturbation network output is always squashed to + lie in +/- @perturbation_scale. The final action output is equal to the original input action added to the output perturbation (and clipped to lie in [-1, 1]). goal_shapes (OrderedDict): a dictionary that maps modality to @@ -197,9 +187,7 @@ def forward(self, obs_dict, acts, goal_dict=None): def _to_string(self): """Info to pretty print.""" - return "action_dim={}, perturbation_scale={}".format( - self.ac_dim, self.perturbation_scale - ) + return "action_dim={}, perturbation_scale={}".format(self.ac_dim, self.perturbation_scale) class GaussianActorNetwork(ActorNetwork): @@ -207,7 +195,6 @@ class GaussianActorNetwork(ActorNetwork): Variant of actor network that learns a diagonal unimodal Gaussian distribution over actions. """ - def __init__( self, obs_shapes, @@ -300,11 +287,8 @@ def softplus_scaled(x): "softplus": softplus_scaled, "exp": torch.exp, } - assert ( - std_activation in self.activations - ), "std_activation must be one of: {}; instead got: {}".format( - self.activations.keys(), std_activation - ) + assert std_activation in self.activations, \ + "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) self.std_activation = std_activation if not self.fixed_std else None self.low_noise_eval = low_noise_eval @@ -322,12 +306,8 @@ def softplus_scaled(x): if init_last_fc_weight is not None: with torch.no_grad(): for name, layer in self.nets["decoder"].nets.items(): - torch.nn.init.uniform_( - layer.weight, -init_last_fc_weight, init_last_fc_weight - ) - torch.nn.init.uniform_( - layer.bias, -init_last_fc_weight, init_last_fc_weight - ) + torch.nn.init.uniform_(layer.weight, -init_last_fc_weight, init_last_fc_weight) + torch.nn.init.uniform_(layer.bias, -init_last_fc_weight, init_last_fc_weight) def _get_output_shapes(self): """ @@ -335,14 +315,14 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of Gaussian distribution. """ return OrderedDict( - mean=(self.ac_dim,), + mean=(self.ac_dim,), scale=(self.ac_dim,), ) def forward_train(self, obs_dict, goal_dict=None): """ Return full Gaussian distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -355,11 +335,7 @@ def forward_train(self, obs_dict, goal_dict=None): out = MIMO_MLP.forward(self, obs=obs_dict, goal=goal_dict) mean = out["mean"] # Use either constant std or learned std depending on setting - scale = ( - out["scale"] - if not self.fixed_std - else torch.ones_like(mean) * self.init_std - ) + scale = out["scale"] if not self.fixed_std else torch.ones_like(mean) * self.init_std # Clamp the mean mean = torch.clamp(mean, min=self.mean_limits[0], max=self.mean_limits[1]) @@ -378,15 +354,16 @@ def forward_train(self, obs_dict, goal_dict=None): # Clamp the scale scale = torch.clamp(scale, min=self.std_limits[0], max=self.std_limits[1]) + # the Independent call will make it so that `batch_shape` for dist will be equal to batch size - # while `event_shape` will be equal to action dimension - ensuring that log-probability + # while `event_shape` will be equal to action dimension - ensuring that log-probability # computations are summed across the action dimension dist = D.Normal(loc=mean, scale=scale) dist = D.Independent(dist, 1) if self.use_tanh: # Wrap distribution with Tanh - dist = TanhWrappedDistribution(base_dist=dist, scale=1.0) + dist = TanhWrappedDistribution(base_dist=dist, scale=1.) return dist @@ -413,14 +390,7 @@ def forward(self, obs_dict, goal_dict=None): def _to_string(self): """Info to pretty print.""" msg = "action_dim={}\nfixed_std={}\nstd_activation={}\ninit_std={}\nmean_limits={}\nstd_limits={}\nlow_noise_eval={}".format( - self.ac_dim, - self.fixed_std, - self.std_activation, - self.init_std, - self.mean_limits, - self.std_limits, - self.low_noise_eval, - ) + self.ac_dim, self.fixed_std, self.std_activation, self.init_std, self.mean_limits, self.std_limits, self.low_noise_eval) return msg @@ -429,7 +399,6 @@ class GMMActorNetwork(ActorNetwork): Variant of actor network that learns a multimodal Gaussian mixture distribution over actions. """ - def __init__( self, obs_shapes, @@ -499,11 +468,8 @@ def __init__( "softplus": F.softplus, "exp": torch.exp, } - assert ( - std_activation in self.activations - ), "std_activation must be one of: {}; instead got: {}".format( - self.activations.keys(), std_activation - ) + assert std_activation in self.activations, \ + "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) self.std_activation = std_activation super(GMMActorNetwork, self).__init__( @@ -520,15 +486,15 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of GMM distribution. """ return OrderedDict( - mean=(self.num_modes, self.ac_dim), - scale=(self.num_modes, self.ac_dim), + mean=(self.num_modes, self.ac_dim), + scale=(self.num_modes, self.ac_dim), logits=(self.num_modes,), ) def forward_train(self, obs_dict, goal_dict=None): """ Return full GMM distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -570,7 +536,7 @@ def forward_train(self, obs_dict, goal_dict=None): if self.use_tanh: # Wrap distribution with Tanh - dist = TanhWrappedDistribution(base_dist=dist, scale=1.0) + dist = TanhWrappedDistribution(base_dist=dist, scale=1.) return dist @@ -591,19 +557,13 @@ def forward(self, obs_dict, goal_dict=None): def _to_string(self): """Info to pretty print.""" return "action_dim={}\nnum_modes={}\nmin_std={}\nstd_activation={}\nlow_noise_eval={}".format( - self.ac_dim, - self.num_modes, - self.min_std, - self.std_activation, - self.low_noise_eval, - ) + self.ac_dim, self.num_modes, self.min_std, self.std_activation, self.low_noise_eval) class RNNActorNetwork(RNN_MIMO_MLP): """ An RNN policy network that predicts actions from observations. """ - def __init__( self, obs_shapes, @@ -699,17 +659,11 @@ def output_shape(self, input_shape): # infers temporal dimension from input shape mod = list(self.obs_shapes.keys())[0] T = input_shape[mod][0] - TensorUtils.assert_size_at_dim( - input_shape, - size=T, - dim=0, - msg="RNNActorNetwork: input_shape inconsistent in temporal dimension", - ) + TensorUtils.assert_size_at_dim(input_shape, size=T, dim=0, + msg="RNNActorNetwork: input_shape inconsistent in temporal dimension") return [T, self.ac_dim] - def forward( - self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False - ): + def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False): """ Forward a sequence of inputs through the RNN and the per-step network. @@ -728,23 +682,17 @@ def forward( assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at( - goal_dict, size=obs_dict[mod].shape[1], dim=1 - ) + goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) outputs = super(RNNActorNetwork, self).forward( - obs=obs_dict, - goal=goal_dict, - rnn_init_state=rnn_init_state, - return_state=return_state, - ) + obs=obs_dict, goal=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state) if return_state: actions, state = outputs else: actions = outputs state = None - + # apply tanh squashing to ensure actions are in [-1, 1] actions = torch.tanh(actions["action"]) @@ -769,8 +717,7 @@ def forward_step(self, obs_dict, goal_dict=None, rnn_state=None): """ obs_dict = TensorUtils.to_sequence(obs_dict) action, state = self.forward( - obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True - ) + obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True) return action[:, 0], state def _to_string(self): @@ -782,7 +729,6 @@ class RNNGMMActorNetwork(RNNActorNetwork): """ An RNN GMM policy network that predicts sequences of action distributions from observation sequences. """ - def __init__( self, obs_shapes, @@ -855,11 +801,8 @@ def __init__( "softplus": F.softplus, "exp": torch.exp, } - assert ( - std_activation in self.activations - ), "std_activation must be one of: {}; instead got: {}".format( - self.activations.keys(), std_activation - ) + assert std_activation in self.activations, \ + "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) self.std_activation = std_activation super(RNNGMMActorNetwork, self).__init__( @@ -880,17 +823,15 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of GMM distribution. """ return OrderedDict( - mean=(self.num_modes, self.ac_dim), - scale=(self.num_modes, self.ac_dim), + mean=(self.num_modes, self.ac_dim), + scale=(self.num_modes, self.ac_dim), logits=(self.num_modes,), ) - def forward_train( - self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False - ): + def forward_train(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False): """ Return full GMM distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -907,23 +848,16 @@ def forward_train( assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at( - goal_dict, size=obs_dict[mod].shape[1], dim=1 - ) + goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) outputs = RNN_MIMO_MLP.forward( - self, - obs=obs_dict, - goal=goal_dict, - rnn_init_state=rnn_init_state, - return_state=return_state, - ) + self, obs=obs_dict, goal=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state) if return_state: outputs, state = outputs else: state = None - + means = outputs["mean"] scales = outputs["scale"] logits = outputs["logits"] @@ -942,9 +876,7 @@ def forward_train( # mixture components - make sure that `batch_shape` for the distribution is equal # to (batch_size, timesteps, num_modes) since MixtureSameFamily expects this shape component_distribution = D.Normal(loc=means, scale=scales) - component_distribution = D.Independent( - component_distribution, 1 - ) # shift action dim to event shape + component_distribution = D.Independent(component_distribution, 1) # shift action dim to event shape # unnormalized logits to categorical distribution for mixing the modes mixture_distribution = D.Categorical(logits=logits) @@ -956,16 +888,14 @@ def forward_train( if self.use_tanh: # Wrap distribution with Tanh - dists = TanhWrappedDistribution(base_dist=dists, scale=1.0) + dists = TanhWrappedDistribution(base_dist=dists, scale=1.) if return_state: return dists, state else: return dists - def forward( - self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False - ): + def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False): """ Samples actions from the policy distribution. @@ -976,12 +906,7 @@ def forward( Returns: action (torch.Tensor): batch of actions from policy distribution """ - out = self.forward_train( - obs_dict=obs_dict, - goal_dict=goal_dict, - rnn_init_state=rnn_init_state, - return_state=return_state, - ) + out = self.forward_train(obs_dict=obs_dict, goal_dict=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state) if return_state: ad, state = out return ad.sample(), state @@ -989,8 +914,8 @@ def forward( def forward_train_step(self, obs_dict, goal_dict=None, rnn_state=None): """ - Unroll RNN over single timestep to get action GMM distribution, which - is useful for computing quantities necessary at train-time, like + Unroll RNN over single timestep to get action GMM distribution, which + is useful for computing quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: @@ -1005,8 +930,7 @@ def forward_train_step(self, obs_dict, goal_dict=None, rnn_state=None): """ obs_dict = TensorUtils.to_sequence(obs_dict) ad, state = self.forward_train( - obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True - ) + obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True) # to squeeze time dimension, make another action distribution assert ad.component_distribution.base_dist.loc.shape[1] == 1 @@ -1017,9 +941,7 @@ def forward_train_step(self, obs_dict, goal_dict=None, rnn_state=None): scale=ad.component_distribution.base_dist.scale.squeeze(1), ) component_distribution = D.Independent(component_distribution, 1) - mixture_distribution = D.Categorical( - logits=ad.mixture_distribution.logits.squeeze(1) - ) + mixture_distribution = D.Categorical(logits=ad.mixture_distribution.logits.squeeze(1)) ad = D.MixtureSameFamily( mixture_distribution=mixture_distribution, component_distribution=component_distribution, @@ -1042,20 +964,14 @@ def forward_step(self, obs_dict, goal_dict=None, rnn_state=None): """ obs_dict = TensorUtils.to_sequence(obs_dict) acts, state = self.forward( - obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True - ) + obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True) assert acts.shape[1] == 1 return acts[:, 0], state def _to_string(self): """Info to pretty print.""" msg = "action_dim={}, std_activation={}, low_noise_eval={}, num_nodes={}, min_std={}".format( - self.ac_dim, - self.std_activation, - self.low_noise_eval, - self.num_modes, - self.min_std, - ) + self.ac_dim, self.std_activation, self.low_noise_eval, self.num_modes, self.min_std) return msg @@ -1064,7 +980,6 @@ class TransformerActorNetwork(MIMO_Transformer): An Transformer policy network that predicts actions from observation sequences (assumed to be frame stacked from previous observations) and possible from previous actions as well (in an autoregressive manner). """ - def __init__( self, obs_shapes, @@ -1087,7 +1002,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. - + ac_dim (int): dimension of action space. transformer_embed_dim (int): dimension for embeddings used by transformer @@ -1095,9 +1010,9 @@ def __init__( transformer_num_layers (int): number of transformer blocks to stack transformer_num_heads (int): number of attention heads for each - transformer block - must divide @transformer_embed_dim evenly. Self-attention is + transformer block - must divide @transformer_embed_dim evenly. Self-attention is computed over this many partitions of the embedding dimension separately. - + transformer_context_length (int): expected length of input sequences transformer_embedding_dropout (float): dropout probability for embedding inputs in transformer @@ -1105,10 +1020,10 @@ def __init__( transformer_attn_dropout (float): dropout probability for attention outputs for each transformer block transformer_block_output_dropout (float): dropout probability for final outputs for each transformer block - + goal_shapes (OrderedDict): a dictionary that maps modality to expected shapes for goal observations. - + encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should be nested dictionary containing relevant per-modality information for encoder networks. Should be of form: @@ -1131,9 +1046,7 @@ def __init__( assert isinstance(obs_shapes, OrderedDict) self.obs_shapes = obs_shapes - self.transformer_nn_parameter_for_timesteps = ( - transformer_nn_parameter_for_timesteps - ) + self.transformer_nn_parameter_for_timesteps = transformer_nn_parameter_for_timesteps # set up different observation groups for @RNN_MIMO_MLP observation_group_shapes = OrderedDict() @@ -1162,6 +1075,7 @@ def __init__( transformer_sinusoidal_embedding=transformer_sinusoidal_embedding, transformer_activation=transformer_activation, transformer_nn_parameter_for_timesteps=transformer_nn_parameter_for_timesteps, + encoder_kwargs=encoder_kwargs, ) @@ -1179,12 +1093,8 @@ def output_shape(self, input_shape): # infers temporal dimension from input shape mod = list(self.obs_shapes.keys())[0] T = input_shape[mod][0] - TensorUtils.assert_size_at_dim( - input_shape, - size=T, - dim=0, - msg="TransformerActorNetwork: input_shape inconsistent in temporal dimension", - ) + TensorUtils.assert_size_at_dim(input_shape, size=T, dim=0, + msg="TransformerActorNetwork: input_shape inconsistent in temporal dimension") return [T, self.ac_dim] def forward(self, obs_dict, actions=None, goal_dict=None): @@ -1203,9 +1113,7 @@ def forward(self, obs_dict, actions=None, goal_dict=None): assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at( - goal_dict, size=obs_dict[mod].shape[1], dim=1 - ) + goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) forward_kwargs = dict(obs=obs_dict, goal=goal_dict) outputs = super(TransformerActorNetwork, self).forward(**forward_kwargs) @@ -1213,7 +1121,7 @@ def forward(self, obs_dict, actions=None, goal_dict=None): # apply tanh squashing to ensure actions are in [-1, 1] outputs["action"] = torch.tanh(outputs["action"]) - return outputs["action"] # only action sequences + return outputs["action"] # only action sequences def _to_string(self): """Info to pretty print.""" @@ -1222,10 +1130,9 @@ def _to_string(self): class TransformerGMMActorNetwork(TransformerActorNetwork): """ - A Transformer GMM policy network that predicts sequences of action distributions from observation + A Transformer GMM policy network that predicts sequences of action distributions from observation sequences (assumed to be frame stacked from previous observations). """ - def __init__( self, obs_shapes, @@ -1253,7 +1160,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. - + ac_dim (int): dimension of action space. transformer_embed_dim (int): dimension for embeddings used by transformer @@ -1261,9 +1168,9 @@ def __init__( transformer_num_layers (int): number of transformer blocks to stack transformer_num_heads (int): number of attention heads for each - transformer block - must divide @transformer_embed_dim evenly. Self-attention is + transformer block - must divide @transformer_embed_dim evenly. Self-attention is computed over this many partitions of the embedding dimension separately. - + transformer_context_length (int): expected length of input sequences transformer_embedding_dropout (float): dropout probability for embedding inputs in transformer @@ -1304,7 +1211,7 @@ def __init__( obs_modality2: dict ... """ - + # parameters specific to GMM actor self.num_modes = num_modes self.min_std = min_std @@ -1316,11 +1223,8 @@ def __init__( "softplus": F.softplus, "exp": torch.exp, } - assert ( - std_activation in self.activations - ), "std_activation must be one of: {}; instead got: {}".format( - self.activations.keys(), std_activation - ) + assert std_activation in self.activations, \ + "std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation) self.std_activation = std_activation super(TransformerGMMActorNetwork, self).__init__( @@ -1335,7 +1239,7 @@ def __init__( transformer_block_output_dropout=transformer_block_output_dropout, transformer_sinusoidal_embedding=transformer_sinusoidal_embedding, transformer_activation=transformer_activation, - transformer_nn_parameter_for_timesteps=transformer_nn_parameter_for_timesteps, + transformer_nn_parameter_for_timesteps=transformer_nn_parameter_for_timesteps, encoder_kwargs=encoder_kwargs, goal_shapes=goal_shapes, ) @@ -1346,17 +1250,15 @@ def _get_output_shapes(self): at the last layer. Network outputs parameters of GMM distribution. """ return OrderedDict( - mean=(self.num_modes, self.ac_dim), - scale=(self.num_modes, self.ac_dim), + mean=(self.num_modes, self.ac_dim), + scale=(self.num_modes, self.ac_dim), logits=(self.num_modes,), ) - def forward_train( - self, obs_dict, actions=None, goal_dict=None, low_noise_eval=None - ): + def forward_train(self, obs_dict, actions=None, goal_dict=None, low_noise_eval=None): """ Return full GMM distribution, which is useful for computing - quantities necessary at train-time, like log-likelihood, KL + quantities necessary at train-time, like log-likelihood, KL divergence, etc. Args: obs_dict (dict): batch of observations @@ -1369,14 +1271,12 @@ def forward_train( assert goal_dict is not None # repeat the goal observation in time to match dimension with obs_dict mod = list(obs_dict.keys())[0] - goal_dict = TensorUtils.unsqueeze_expand_at( - goal_dict, size=obs_dict[mod].shape[1], dim=1 - ) + goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1) forward_kwargs = dict(obs=obs_dict, goal=goal_dict) outputs = MIMO_Transformer.forward(self, **forward_kwargs) - + means = outputs["mean"] scales = outputs["scale"] logits = outputs["logits"] @@ -1397,9 +1297,7 @@ def forward_train( # mixture components - make sure that `batch_shape` for the distribution is equal # to (batch_size, timesteps, num_modes) since MixtureSameFamily expects this shape component_distribution = D.Normal(loc=means, scale=scales) - component_distribution = D.Independent( - component_distribution, 1 - ) # shift action dim to event shape + component_distribution = D.Independent(component_distribution, 1) # shift action dim to event shape # unnormalized logits to categorical distribution for mixing the modes mixture_distribution = D.Categorical(logits=logits) @@ -1411,7 +1309,7 @@ def forward_train( if self.use_tanh: # Wrap distribution with Tanh - dists = TanhWrappedDistribution(base_dist=dists, scale=1.0) + dists = TanhWrappedDistribution(base_dist=dists, scale=1.) return dists @@ -1425,20 +1323,13 @@ def forward(self, obs_dict, actions=None, goal_dict=None): Returns: action (torch.Tensor): batch of actions from policy distribution """ - out = self.forward_train( - obs_dict=obs_dict, actions=actions, goal_dict=goal_dict - ) + out = self.forward_train(obs_dict=obs_dict, actions=actions, goal_dict=goal_dict) return out.sample() def _to_string(self): """Info to pretty print.""" msg = "action_dim={}, std_activation={}, low_noise_eval={}, num_nodes={}, min_std={}".format( - self.ac_dim, - self.std_activation, - self.low_noise_eval, - self.num_modes, - self.min_std, - ) + self.ac_dim, self.std_activation, self.low_noise_eval, self.num_modes, self.min_std) return msg @@ -1447,7 +1338,6 @@ class VAEActor(Module): A VAE that models a distribution of actions conditioned on observations. The VAE prior and decoder are used at test-time as the policy. """ - def __init__( self, obs_shapes, @@ -1505,8 +1395,8 @@ def __init__( action_shapes = OrderedDict(action=(self.ac_dim,)) # ensure VAE decoder will squash actions into [-1, 1] - output_squash = ["action"] - output_scales = OrderedDict(action=1.0) + output_squash = ['action'] + output_scales = OrderedDict(action=1.) self._vae = VAE( input_shapes=action_shapes, @@ -1540,7 +1430,7 @@ def encode(self, actions, obs_dict, goal_dict=None): actions (torch.Tensor): a batch of actions obs_dict (dict): a dictionary that maps modalities to torch.Tensor - batches. These should correspond to the observation modalities + batches. These should correspond to the observation modalities used for conditioning in either the decoder or the prior (or both). goal_dict (dict): a dictionary that maps modalities to torch.Tensor @@ -1571,7 +1461,7 @@ def decode(self, obs_dict=None, goal_dict=None, z=None, n=None): z (torch.Tensor): if provided, these latents are used to generate reconstructions from the VAE, and the prior is not sampled. - n (int): this argument is used to specify the number of samples to + n (int): this argument is used to specify the number of samples to generate from the prior. Only required if @z is None - i.e. sampling takes place @@ -1617,7 +1507,7 @@ def get_gumbel_temperature(self): def output_shape(self, input_shape=None): """ - This implementation is required by the Module superclass, but is unused since we + This implementation is required by the Module superclass, but is unused since we never chain this module to other ones. """ return [self.ac_dim] @@ -1631,7 +1521,7 @@ def forward_train(self, actions, obs_dict, goal_dict=None, freeze_encoder=False) actions (torch.Tensor): a batch of actions obs_dict (dict): a dictionary that maps modalities to torch.Tensor - batches. These should correspond to the observation modalities + batches. These should correspond to the observation modalities used for conditioning in either the decoder or the prior (or both). goal_dict (dict): a dictionary that maps modalities to torch.Tensor @@ -1653,12 +1543,11 @@ def forward_train(self, actions, obs_dict, goal_dict=None, freeze_encoder=False) """ action_inputs = OrderedDict(action=actions) return self._vae.forward( - inputs=action_inputs, - outputs=action_inputs, - conditions=obs_dict, + inputs=action_inputs, + outputs=action_inputs, + conditions=obs_dict, goals=goal_dict, - freeze_encoder=freeze_encoder, - ) + freeze_encoder=freeze_encoder) def forward(self, obs_dict, goal_dict=None, z=None): """ diff --git a/robomimic/models/transformers.py b/robomimic/models/transformers.py index 3b891b80..309bff30 100644 --- a/robomimic/models/transformers.py +++ b/robomimic/models/transformers.py @@ -15,7 +15,6 @@ import robomimic.utils.tensor_utils as TensorUtils import robomimic.utils.torch_utils as TorchUtils - class GEGLU(nn.Module): """ References: @@ -121,9 +120,7 @@ def __init__( assert ( embed_dim % num_heads == 0 - ), "num_heads: {} does not divide embed_dim: {} exactly".format( - num_heads, embed_dim - ) + ), "num_heads: {} does not divide embed_dim: {} exactly".format(num_heads, embed_dim) self.embed_dim = embed_dim self.num_heads = num_heads @@ -280,7 +277,7 @@ def __init__( nn.Linear(embed_dim, 4 * embed_dim * mult), activation, nn.Linear(4 * embed_dim, embed_dim), - nn.Dropout(output_dropout), + nn.Dropout(output_dropout) ) # layer normalization for inputs to self-attention module and MLP @@ -426,4 +423,4 @@ def forward(self, inputs): assert inputs.shape[1:] == (self.context_length, self.embed_dim), inputs.shape x = self.nets["transformer"](inputs) transformer_output = self.nets["output_ln"](x) - return transformer_output + return transformer_output \ No newline at end of file diff --git a/robomimic/models/vae_nets.py b/robomimic/models/vae_nets.py index a8a7985a..91b4e7f0 100644 --- a/robomimic/models/vae_nets.py +++ b/robomimic/models/vae_nets.py @@ -2,7 +2,6 @@ Contains an implementation of Variational Autoencoder (VAE) and other variants, including other priors, and RNN-VAEs. """ - import textwrap import numpy as np from copy import deepcopy @@ -48,11 +47,10 @@ def vae_args_from_config(vae_config): class Prior(Module): """ Base class for VAE priors. It's basically the same as a @MIMO_MLP network (it - instantiates one) but it supports additional methods such as KL loss computation - and sampling, and also may learn prior parameters as observation-independent + instantiates one) but it supports additional methods such as KL loss computation + and sampling, and also may learn prior parameters as observation-independent torch Parameters instead of observation-dependent mappings. """ - def __init__( self, param_shapes, @@ -70,7 +68,7 @@ def __init__( param_obs_dependent (OrderedDict): a dictionary with boolean values consistent with @param_shapes which determines whether - to learn parameters as part of the (obs-dependent) network or + to learn parameters as part of the (obs-dependent) network or directly as learnable parameters. obs_shapes (OrderedDict): a dictionary that maps modality to @@ -100,9 +98,7 @@ def __init__( """ super(Prior, self).__init__() - assert isinstance(param_shapes, OrderedDict) and isinstance( - param_obs_dependent, OrderedDict - ) + assert isinstance(param_shapes, OrderedDict) and isinstance(param_obs_dependent, OrderedDict) assert set(param_shapes.keys()) == set(param_obs_dependent.keys()) self.param_shapes = param_shapes self.param_obs_dependent = param_obs_dependent @@ -129,9 +125,7 @@ def _create_layers(self, net_kwargs): mlp_output_shapes[pp] = self.param_shapes[pp] else: # learnable prior parameters independent of observation - param_init = torch.randn(*self.param_shapes[pp]) / np.sqrt( - np.prod(self.param_shapes[pp]) - ) + param_init = torch.randn(*self.param_shapes[pp]) / np.sqrt(np.prod(self.param_shapes[pp])) self.prior_params[pp] = torch.nn.Parameter(param_init) # only make networks if we have obs-dependent prior parameters @@ -176,7 +170,7 @@ def sample(self, n, obs_dict=None, goal_dict=None): def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): """ Computes sample-based KL divergence loss between the Gaussian distribution - given by @mu, @logvar and the prior distribution. + given by @mu, @logvar and the prior distribution. Args: posterior_params (dict): dictionary with keys "mu" and "logvar" corresponding @@ -203,7 +197,7 @@ def output_shape(self, input_shape=None): """ if self.prior_module is not None: return self.prior_module.output_shape(input_shape) - return {k: list(self.param_shapes[k]) for k in self.param_shapes} + return { k : list(self.param_shapes[k]) for k in self.param_shapes } def forward(self, batch_size, obs_dict=None, goal_dict=None): """ @@ -231,17 +225,11 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): for pp in self.param_shapes: if not self.param_obs_dependent[pp]: # ensure leading dimension will be consistent with other params - prior_params[pp] = TensorUtils.expand_at( - self.prior_params[pp], size=batch_size, dim=0 - ) + prior_params[pp] = TensorUtils.expand_at(self.prior_params[pp], size=batch_size, dim=0) # ensure leading dimensions are all consistent - TensorUtils.assert_size_at_dim( - prior_params, - size=batch_size, - dim=0, - msg="prior params dim 0 mismatch in forward", - ) + TensorUtils.assert_size_at_dim(prior_params, size=batch_size, dim=0, + msg="prior params dim 0 mismatch in forward") return prior_params @@ -251,7 +239,6 @@ class GaussianPrior(Prior): A class that holds functionality for learning both unimodal Gaussian priors and multimodal Gaussian Mixture Model priors for use in VAEs. """ - def __init__( self, latent_dim, @@ -291,7 +278,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. If provided, assumes that - the prior should depend on observation inputs, and networks + the prior should depend on observation inputs, and networks will be created to output prior parameters. mlp_layer_dims ([int]): sequence of integers for the MLP hidden layer sizes @@ -337,14 +324,8 @@ def __init__( # network will generate mean and logvar param_shapes = OrderedDict( - mean=( - self.num_modes, - self.latent_dim, - ), - logvar=( - self.num_modes, - self.latent_dim, - ), + mean=(self.num_modes, self.latent_dim,), + logvar=(self.num_modes, self.latent_dim,), ) param_obs_dependent = OrderedDict(mean=True, logvar=True) @@ -402,19 +383,14 @@ def sample(self, n, obs_dict=None, goal_dict=None): # check consistency between n and obs_dict if self._input_dependent: - TensorUtils.assert_size_at_dim( - obs_dict, size=n, dim=0, msg="obs dict and n mismatch in @sample" - ) + TensorUtils.assert_size_at_dim(obs_dict, size=n, dim=0, + msg="obs dict and n mismatch in @sample") if self.learnable: # forward to get parameters out = self.forward(batch_size=n, obs_dict=obs_dict, goal_dict=goal_dict) - prior_means, prior_logvars, prior_logweights = ( - out["means"], - out["logvars"], - out["logweights"], - ) + prior_means, prior_logvars, prior_logweights = out["means"], out["logvars"], out["logweights"] if prior_logweights is not None: prior_weights = torch.exp(prior_logweights) @@ -424,28 +400,19 @@ def sample(self, n, obs_dict=None, goal_dict=None): # make uniform weights (in the case that weights were not learned) if not self.gmm_learn_weights: - prior_weights = ( - torch.ones(n, self.num_modes).to(prior_means.device) - / self.num_modes - ) + prior_weights = torch.ones(n, self.num_modes).to(prior_means.device) / self.num_modes # sample modes gmm_mode_indices = D.Categorical(prior_weights).sample() - + # get GMM centers and sample using reparametrization trick - selected_means = TensorUtils.gather_sequence( - prior_means, indices=gmm_mode_indices - ) - selected_logvars = TensorUtils.gather_sequence( - prior_logvars, indices=gmm_mode_indices - ) + selected_means = TensorUtils.gather_sequence(prior_means, indices=gmm_mode_indices) + selected_logvars = TensorUtils.gather_sequence(prior_logvars, indices=gmm_mode_indices) z = TorchUtils.reparameterize(selected_means, selected_logvars) else: # learned unimodal Gaussian - remove mode dim and sample from Gaussian using reparametrization trick - z = TorchUtils.reparameterize( - prior_means[:, 0, :], prior_logvars[:, 0, :] - ) + z = TorchUtils.reparameterize(prior_means[:, 0, :], prior_logvars[:, 0, :]) else: # sample from N(0, 1) @@ -459,7 +426,7 @@ def sample(self, n, obs_dict=None, goal_dict=None): def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): """ Computes sample-based KL divergence loss between the Gaussian distribution - given by @mu, @logvar and the prior distribution. + given by @mu, @logvar and the prior distribution. Args: posterior_params (dict): dictionary with keys "mu" and "logvar" corresponding @@ -485,32 +452,25 @@ def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): return LossUtils.KLD_0_1_loss(mu=mu, logvar=logvar) # forward to get parameters - out = self.forward( - batch_size=mu.shape[0], obs_dict=obs_dict, goal_dict=goal_dict - ) - prior_means, prior_logvars, prior_logweights = ( - out["means"], - out["logvars"], - out["logweights"], - ) + out = self.forward(batch_size=mu.shape[0], obs_dict=obs_dict, goal_dict=goal_dict) + prior_means, prior_logvars, prior_logweights = out["means"], out["logvars"], out["logweights"] if not self.use_gmm: # collapse mode dimension and compute Gaussian KL in closed-form prior_means = prior_means[:, 0, :] prior_logvars = prior_logvars[:, 0, :] return LossUtils.KLD_gaussian_loss( - mu_1=mu, - logvar_1=logvar, - mu_2=prior_means, + mu_1=mu, + logvar_1=logvar, + mu_2=prior_means, logvar_2=prior_logvars, ) # GMM KL loss computation - var = torch.exp(logvar.clamp(-8, 30)) # clamp for numerical stability + var = torch.exp(logvar.clamp(-8, 30)) # clamp for numerical stability prior_vars = torch.exp(prior_logvars.clamp(-8, 30)) - kl_loss = LossUtils.log_normal(x=z, m=mu, v=var) - LossUtils.log_normal_mixture( - x=z, m=prior_means, v=prior_vars, log_w=prior_logweights - ) + kl_loss = LossUtils.log_normal(x=z, m=mu, v=var) \ + - LossUtils.log_normal_mixture(x=z, m=prior_means, v=prior_vars, log_w=prior_logweights) return kl_loss.mean() def forward(self, batch_size, obs_dict=None, goal_dict=None): @@ -532,8 +492,7 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): """ assert self.learnable prior_params = super(GaussianPrior, self).forward( - batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict - ) + batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict) if self.use_gmm and self.gmm_learn_weights: # normalize learned weight outputs to sum to 1 @@ -542,39 +501,27 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): logweights = None assert "weight" not in prior_params - out = dict( - means=prior_params["mean"], - logvars=prior_params["logvar"], - logweights=logweights, - ) + out = dict(means=prior_params["mean"], logvars=prior_params["logvar"], logweights=logweights) return out def __repr__(self): """Pretty print network""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 4 + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 4 msg += textwrap.indent("latent_dim={}\n".format(self.latent_dim), indent) msg += textwrap.indent("latent_clip={}\n".format(self.latent_clip), indent) msg += textwrap.indent("learnable={}\n".format(self.learnable), indent) - msg += textwrap.indent( - "input_dependent={}\n".format(self._input_dependent), indent - ) + msg += textwrap.indent("input_dependent={}\n".format(self._input_dependent), indent) msg += textwrap.indent("use_gmm={}\n".format(self.use_gmm), indent) if self.use_gmm: msg += textwrap.indent("gmm_num_nodes={}\n".format(self.num_modes), indent) - msg += textwrap.indent( - "gmm_learn_weights={}\n".format(self.gmm_learn_weights), indent - ) + msg += textwrap.indent("gmm_learn_weights={}\n".format(self.gmm_learn_weights), indent) if self.learnable: if self.prior_module is not None: - msg += textwrap.indent( - "\nprior_module={}\n".format(self.prior_module), indent - ) - msg += textwrap.indent( - "prior_params={}\n".format(self.prior_params), indent - ) - msg = header + "(\n" + msg + ")" + msg += textwrap.indent("\nprior_module={}\n".format(self.prior_module), indent) + msg += textwrap.indent("prior_params={}\n".format(self.prior_params), indent) + msg = header + '(\n' + msg + ')' return msg @@ -583,7 +530,6 @@ class CategoricalPrior(Prior): A class that holds functionality for learning categorical priors for use in VAEs. """ - def __init__( self, latent_dim, @@ -594,6 +540,7 @@ def __init__( mlp_layer_dims=(), goal_shapes=None, encoder_kwargs=None, + ): """ Args: @@ -609,7 +556,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps modality to expected shapes for observations. If provided, assumes that - the prior should depend on observation inputs, and networks + the prior should depend on observation inputs, and networks will be created to output prior parameters. mlp_layer_dims ([int]): sequence of integers for the MLP hidden layer sizes @@ -647,10 +594,7 @@ def __init__( # network will generate logits for categorical distributions param_shapes = OrderedDict( - logit=( - self.latent_dim, - self.categorical_dim, - ) + logit=(self.latent_dim, self.categorical_dim,) ) param_obs_dependent = OrderedDict(logit=True) else: @@ -697,9 +641,8 @@ def sample(self, n, obs_dict=None, goal_dict=None): # check consistency between n and obs_dict if self._input_dependent: - TensorUtils.assert_size_at_dim( - obs_dict, size=n, dim=0, msg="obs dict and n mismatch in @sample" - ) + TensorUtils.assert_size_at_dim(obs_dict, size=n, dim=0, + msg="obs dict and n mismatch in @sample") if self.learnable: @@ -715,19 +658,10 @@ def sample(self, n, obs_dict=None, goal_dict=None): # try to include a categorical sample for each class if possible (ensuring rough uniformity) if (self.latent_dim == 1) and (self.categorical_dim <= n): # include samples [0, 1, ..., C - 1] and then repeat until batch is filled - dist_samples = ( - torch.arange(n) - .remainder(self.categorical_dim) - .unsqueeze(-1) - .to(self.device) - ) + dist_samples = torch.arange(n).remainder(self.categorical_dim).unsqueeze(-1).to(self.device) else: # sample one-hot latents from uniform categorical distribution for each latent dimension - probs = ( - torch.ones(n, self.latent_dim, self.categorical_dim) - .float() - .to(self.device) - ) + probs = torch.ones(n, self.latent_dim, self.categorical_dim).float().to(self.device) dist_samples = D.Categorical(probs=probs).sample() z = TensorUtils.to_one_hot(dist_samples, num_class=self.categorical_dim) @@ -738,11 +672,11 @@ def sample(self, n, obs_dict=None, goal_dict=None): def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): """ Computes KL divergence loss between the Categorical distribution - given by the unnormalized logits @logits and the prior distribution. + given by the unnormalized logits @logits and the prior distribution. Args: posterior_params (dict): dictionary with key "logits" corresponding - to torch.Tensor batch of unnormalized logits of shape [B, D * C] + to torch.Tensor batch of unnormalized logits of shape [B, D * C] that corresponds to the posterior categorical distribution z (torch.Tensor): samples from encoder - unused for this prior @@ -755,19 +689,13 @@ def kl_loss(self, posterior_params, z=None, obs_dict=None, goal_dict=None): Returns: kl_loss (torch.Tensor): KL divergence loss """ - logits = posterior_params["logit"].reshape( - -1, self.latent_dim, self.categorical_dim - ) + logits = posterior_params["logit"].reshape(-1, self.latent_dim, self.categorical_dim) if not self.learnable: # prior logits correspond to uniform categorical distribution prior_logits = torch.zeros_like(logits) else: # forward to get parameters - out = self.forward( - batch_size=posterior_params["logit"].shape[0], - obs_dict=obs_dict, - goal_dict=goal_dict, - ) + out = self.forward(batch_size=posterior_params["logit"].shape[0], obs_dict=obs_dict, goal_dict=goal_dict) prior_logits = out["logit"] prior_dist = D.Categorical(logits=prior_logits) @@ -797,31 +725,22 @@ def forward(self, batch_size, obs_dict=None, goal_dict=None): """ assert self.learnable return super(CategoricalPrior, self).forward( - batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict - ) + batch_size=batch_size, obs_dict=obs_dict, goal_dict=goal_dict) def __repr__(self): """Pretty print network""" - header = "{}".format(str(self.__class__.__name__)) - msg = "" - indent = " " * 4 + header = '{}'.format(str(self.__class__.__name__)) + msg = '' + indent = ' ' * 4 msg += textwrap.indent("latent_dim={}\n".format(self.latent_dim), indent) - msg += textwrap.indent( - "categorical_dim={}\n".format(self.categorical_dim), indent - ) + msg += textwrap.indent("categorical_dim={}\n".format(self.categorical_dim), indent) msg += textwrap.indent("learnable={}\n".format(self.learnable), indent) - msg += textwrap.indent( - "input_dependent={}\n".format(self._input_dependent), indent - ) + msg += textwrap.indent("input_dependent={}\n".format(self._input_dependent), indent) if self.learnable: if self.prior_module is not None: - msg += textwrap.indent( - "\nprior_module={}\n".format(self.prior_module), indent - ) - msg += textwrap.indent( - "prior_params={}\n".format(self.prior_params), indent - ) - msg = header + "(\n" + msg + ")" + msg += textwrap.indent("\nprior_module={}\n".format(self.prior_module), indent) + msg += textwrap.indent("prior_params={}\n".format(self.prior_params), indent) + msg = header + '(\n' + msg + ')' return msg @@ -838,21 +757,20 @@ class VAE(torch.nn.Module): expected reconstructions - this allows for asymmetric reconstruction (for example, reconstructing low-resolution images). - This implementation supports learning conditional distributions as well (cVAE). + This implementation supports learning conditional distributions as well (cVAE). The conditioning variable Y is specified through the @condition_shapes argument, which is also a map between modalities (strings) and expected shapes. In this way, - variables with multiple kinds of data (e.g. image and flat-dimensional) can - jointly be conditioned on. By default, the decoder takes the conditioning + variables with multiple kinds of data (e.g. image and flat-dimensional) can + jointly be conditioned on. By default, the decoder takes the conditioning variable Y as input. To force the decoder to reconstruct from just the latent, set @decoder_is_conditioned to False (in this case, the prior must be conditioned). The implementation also supports learning expressive priors instead of using the usual N(0, 1) prior. There are three kinds of priors supported - Gaussian, - Gaussian Mixture Model (GMM), and Categorical. For each prior, the parameters can + Gaussian Mixture Model (GMM), and Categorical. For each prior, the parameters can be learned as independent parameters, or be learned as functions of the conditioning variable Y (by setting @prior_is_conditioned). """ - def __init__( self, input_shapes, @@ -886,13 +804,13 @@ def __init__( expected shapes for all encoder-specific inputs. This corresponds to the variable X whose distribution we are learning. - output_shapes (OrderedDict): a dictionary that maps modality to + output_shapes (OrderedDict): a dictionary that maps modality to expected shape for outputs to reconstruct. Usually, this is the same as @input_shapes but this argument allows for asymmetries, such as reconstructing low-resolution images. - encoder_layer_dims ([int]): sequence of integers for the encoder hidden + encoder_layer_dims ([int]): sequence of integers for the encoder hidden layer sizes. decoder_layer_dims ([int]): sequence of integers for the decoder hidden @@ -919,7 +837,7 @@ def __init__( latent_clip (float): if provided, clip all latents sampled at test-time in each dimension to (-@latent_clip, @latent_clip) - output_squash ([str]): an iterable of modalities that should be + output_squash ([str]): an iterable of modalities that should be a subset of @output_shapes. The decoder outputs for these modalities will be squashed into a symmetric range [-a, a] by using a tanh layer and then scaling the output with the @@ -932,20 +850,20 @@ def __init__( when output_ranges is specified (not None), output_scales should be None prior_learn (bool): if True, the prior distribution parameters - are also learned through the KL-divergence loss (instead + are also learned through the KL-divergence loss (instead of being constrained to a N(0, 1) Gaussian distribution). If @prior_is_conditioned is True, a global set of parameters - are learned, otherwise, a prior network that maps between - modalities in @condition_shapes and prior parameters is - learned. By default, a Gaussian prior is learned, unless - @prior_use_gmm is True, in which case a Gaussian Mixture + are learned, otherwise, a prior network that maps between + modalities in @condition_shapes and prior parameters is + learned. By default, a Gaussian prior is learned, unless + @prior_use_gmm is True, in which case a Gaussian Mixture Model (GMM) prior is learned. prior_is_conditioned (bool): whether to condition the prior on the conditioning variables. False by default. Only used if @condition_shapes is not empty. If this is set to True, @prior_learn must be True. - + prior_layer_dims ([int]): sequence of integers for the prior hidden layer sizes. Only used for learned priors that take condition variables as input (i.e. when @prior_learn and @prior_is_conditioned are set to True, @@ -969,7 +887,7 @@ def __init__( prior_categorical_dim (int): categorical dimension - each latent sampled from the prior will be of shape (@latent_dim, @prior_categorical_dim) - and will be "one-hot" in the latter dimension. Only used if + and will be "one-hot" in the latter dimension. Only used if @prior_use_categorical is True. prior_categorical_gumbel_softmax_hard (bool): if True, use the "hard" version of @@ -1012,30 +930,21 @@ def __init__( # check for conditioning (cVAE) self._is_cvae = False - self.condition_shapes = ( - deepcopy(condition_shapes) - if condition_shapes is not None - else OrderedDict() - ) + self.condition_shapes = deepcopy(condition_shapes) if condition_shapes is not None else OrderedDict() if len(self.condition_shapes) > 0: # this is a cVAE - we learn a conditional distribution p(X | Y) assert isinstance(self.condition_shapes, OrderedDict) self._is_cvae = True self.decoder_is_conditioned = decoder_is_conditioned self.prior_is_conditioned = prior_is_conditioned - assert ( - self.decoder_is_conditioned or self.prior_is_conditioned - ), "cVAE must be conditioned in decoder and/or prior" + assert self.decoder_is_conditioned or self.prior_is_conditioned, \ + "cVAE must be conditioned in decoder and/or prior" if self.prior_is_conditioned: - assert ( - prior_learn - ), "to pass conditioning inputs to prior, prior must be learned" + assert prior_learn, "to pass conditioning inputs to prior, prior must be learned" # check for goal conditioning self._is_goal_conditioned = False - self.goal_shapes = ( - deepcopy(goal_shapes) if goal_shapes is not None else OrderedDict() - ) + self.goal_shapes = deepcopy(goal_shapes) if goal_shapes is not None else OrderedDict() if len(self.goal_shapes) > 0: assert self._is_cvae, "to condition VAE on goals, it must be a cVAE" assert isinstance(self.goal_shapes, OrderedDict) @@ -1047,20 +956,14 @@ def __init__( # determines whether outputs are squashed with tanh and if so, to what scaling assert not (output_scales is not None and output_ranges is not None) self.output_squash = output_squash - self.output_scales = ( - output_scales if output_scales is not None else OrderedDict() - ) - self.output_ranges = ( - output_ranges if output_ranges is not None else OrderedDict() - ) + self.output_scales = output_scales if output_scales is not None else OrderedDict() + self.output_ranges = output_ranges if output_ranges is not None else OrderedDict() assert set(self.output_squash) == set(self.output_scales.keys()) assert set(self.output_squash).issubset(set(self.output_shapes)) # decoder settings - self.decoder_reconstruction_sum_across_elements = ( - decoder_reconstruction_sum_across_elements - ) + self.decoder_reconstruction_sum_across_elements = decoder_reconstruction_sum_across_elements # prior parameters self.prior_learn = prior_learn @@ -1070,9 +973,7 @@ def __init__( self.prior_gmm_learn_weights = prior_gmm_learn_weights self.prior_use_categorical = prior_use_categorical self.prior_categorical_dim = prior_categorical_dim - self.prior_categorical_gumbel_softmax_hard = ( - prior_categorical_gumbel_softmax_hard - ) + self.prior_categorical_gumbel_softmax_hard = prior_categorical_gumbel_softmax_hard assert np.sum([self.prior_use_gmm, self.prior_use_categorical]) <= 1 # for obs core @@ -1115,7 +1016,7 @@ def _create_encoder(self): encoder_obs_group_shapes["condition"] = OrderedDict(self.condition_shapes) if self._is_goal_conditioned: encoder_obs_group_shapes["goal"] = OrderedDict(self.goal_shapes) - + # encoder outputs posterior distribution parameters if self.prior_use_categorical: encoder_output_shapes = OrderedDict( @@ -1123,13 +1024,13 @@ def _create_encoder(self): ) else: encoder_output_shapes = OrderedDict( - mean=(self.latent_dim,), + mean=(self.latent_dim,), logvar=(self.latent_dim,), ) self.nets["encoder"] = MIMO_MLP( input_obs_group_shapes=encoder_obs_group_shapes, - output_shapes=encoder_output_shapes, + output_shapes=encoder_output_shapes, layer_dims=self.encoder_layer_dims, encoder_kwargs=self._encoder_kwargs, ) @@ -1152,7 +1053,7 @@ def _create_decoder(self): self.nets["decoder"] = MIMO_MLP( input_obs_group_shapes=decoder_obs_group_shapes, - output_shapes=self.output_shapes, + output_shapes=self.output_shapes, layer_dims=self.decoder_layer_dims, encoder_kwargs=self._encoder_kwargs, ) @@ -1229,9 +1130,7 @@ def reparameterize(self, posterior_params): """ if self.prior_use_categorical: # reshape to [B, D, C] to take softmax across categorical classes - logits = posterior_params["logit"].reshape( - -1, self.latent_dim, self.prior_categorical_dim - ) + logits = posterior_params["logit"].reshape(-1, self.latent_dim, self.prior_categorical_dim) z = F.gumbel_softmax( logits=logits, tau=self._gumbel_temperature, @@ -1242,7 +1141,7 @@ def reparameterize(self, posterior_params): return TensorUtils.flatten(z) return TorchUtils.reparameterize( - mu=posterior_params["mean"], + mu=posterior_params["mean"], logvar=posterior_params["logvar"], ) @@ -1264,7 +1163,7 @@ def decode(self, conditions=None, goals=None, z=None, n=None): z (torch.Tensor): if provided, these latents are used to generate reconstructions from the VAE, and the prior is not sampled. - n (int): this argument is used to specify the number of samples to + n (int): this argument is used to specify the number of samples to generate from the prior. Only required if @z is None - i.e. sampling takes place @@ -1277,11 +1176,11 @@ def decode(self, conditions=None, goals=None, z=None, n=None): assert n is not None z = self.sample_prior(n=n, conditions=conditions, goals=goals) - # decoder takes latents as input, and maybe condition variables + # decoder takes latents as input, and maybe condition variables # and goal variables inputs = dict( - input=dict(latent=z), - condition=conditions, + input=dict(latent=z), + condition=conditions, goal=goals, ) @@ -1294,9 +1193,7 @@ def decode(self, conditions=None, goals=None, z=None, n=None): for k, v_range in self.output_ranges.items(): assert v_range[1] > v_range[0] - recons[k] = ( - torch.sigmoid(recons[k]) * (v_range[1] - v_range[0]) + v_range[0] - ) + recons[k] = torch.sigmoid(recons[k]) * (v_range[1] - v_range[0]) + v_range[0] return recons def sample_prior(self, n, conditions=None, goals=None): @@ -1343,7 +1240,7 @@ def kl_loss(self, posterior_params, encoder_z=None, conditions=None, goals=None) return self.nets["prior"].kl_loss( posterior_params=posterior_params, z=encoder_z, - obs_dict=conditions, + obs_dict=conditions, goal_dict=goals, ) @@ -1354,7 +1251,7 @@ def reconstruction_loss(self, reconstructions, targets): The beta term for weighting between reconstruction and kl losses will need to be tuned in practice for each situation (see - https://twitter.com/memotv/status/973323454350090240 for more + https://twitter.com/memotv/status/973323454350090240 for more discussion). Args: @@ -1387,9 +1284,7 @@ def reconstruction_loss(self, reconstructions, targets): loss /= num_mods return loss - def forward( - self, inputs, outputs, conditions=None, goals=None, freeze_encoder=False - ): + def forward(self, inputs, outputs, conditions=None, goals=None, freeze_encoder=False): """ A full pass through the VAE network to construct KL and reconstruction losses. @@ -1434,7 +1329,7 @@ def forward( # mu, logvar <- Enc(X, Y) posterior_params = self.encode( - inputs=inputs, + inputs=inputs, conditions=conditions, goals=goals, ) @@ -1447,11 +1342,11 @@ def forward( # hat(X) = Dec(z, Y) reconstructions = self.decode( - conditions=conditions, + conditions=conditions, goals=goals, z=encoder_z, ) - + # this will also train prior network z ~ Prior(z | Y) kl_loss = self.kl_loss( posterior_params=posterior_params, @@ -1461,16 +1356,16 @@ def forward( ) reconstruction_loss = self.reconstruction_loss( - reconstructions=reconstructions, + reconstructions=reconstructions, targets=outputs, ) return { - "encoder_params": posterior_params, - "encoder_z": encoder_z, - "decoder_outputs": reconstructions, - "kl_loss": kl_loss, - "reconstruction_loss": reconstruction_loss, + "encoder_params" : posterior_params, + "encoder_z" : encoder_z, + "decoder_outputs" : reconstructions, + "kl_loss" : kl_loss, + "reconstruction_loss" : reconstruction_loss, } def set_gumbel_temperature(self, temperature): diff --git a/robomimic/models/value_nets.py b/robomimic/models/value_nets.py index a7e958f1..c98fa7e4 100644 --- a/robomimic/models/value_nets.py +++ b/robomimic/models/value_nets.py @@ -4,7 +4,6 @@ such as subgoal or goal dictionaries) and produce value or action-value estimates or distributions. """ - import numpy as np from collections import OrderedDict @@ -23,7 +22,6 @@ class ValueNetwork(MIMO_MLP): A basic value network that predicts values from observations. Can optionally be goal conditioned on future observations. """ - def __init__( self, obs_shapes, @@ -37,7 +35,7 @@ def __init__( obs_shapes (OrderedDict): a dictionary that maps observation keys to expected shapes for observations. - mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. + mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. value_bounds (tuple): a 2-tuple corresponding to the lowest and highest possible return that the network should be possible of generating. The network will rescale outputs @@ -66,12 +64,8 @@ def __init__( self.value_bounds = value_bounds if self.value_bounds is not None: # convert [lb, ub] to a scale and offset for the tanh output, which is in [-1, 1] - self._value_scale = ( - float(self.value_bounds[1]) - float(self.value_bounds[0]) - ) / 2.0 - self._value_offset = ( - float(self.value_bounds[1]) + float(self.value_bounds[0]) - ) / 2.0 + self._value_scale = (float(self.value_bounds[1]) - float(self.value_bounds[0])) / 2. + self._value_offset = (float(self.value_bounds[1]) + float(self.value_bounds[0])) / 2. assert isinstance(obs_shapes, OrderedDict) self.obs_shapes = obs_shapes @@ -107,11 +101,11 @@ def _get_output_shapes(self): def output_shape(self, input_shape=None): """ - Function to compute output shape from inputs to this module. + Function to compute output shape from inputs to this module. Args: input_shape (iterable of int): shape of input. Does not include batch dimension. - Some modules may not need this argument, if their output does not depend + Some modules may not need this argument, if their output does not depend on the size of the input, or if they assume fixed size input. Returns: @@ -123,9 +117,7 @@ def forward(self, obs_dict, goal_dict=None): """ Forward through value network, and then optionally use tanh scaling. """ - values = super(ValueNetwork, self).forward(obs=obs_dict, goal=goal_dict)[ - "value" - ] + values = super(ValueNetwork, self).forward(obs=obs_dict, goal=goal_dict)["value"] if self.value_bounds is not None: values = self._value_offset + self._value_scale * torch.tanh(values) return values @@ -139,7 +131,6 @@ class ActionValueNetwork(ValueNetwork): A basic Q (action-value) network that predicts values from observations and actions. Can optionally be goal conditioned on future observations. """ - def __init__( self, obs_shapes, @@ -156,7 +147,7 @@ def __init__( ac_dim (int): dimension of action space. - mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. + mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. value_bounds (tuple): a 2-tuple corresponding to the lowest and highest possible return that the network should be possible of generating. The network will rescale outputs @@ -212,10 +203,9 @@ def _to_string(self): class DistributionalActionValueNetwork(ActionValueNetwork): """ Distributional Q (action-value) network that outputs a categorical distribution over - a discrete grid of value atoms. See https://arxiv.org/pdf/1707.06887.pdf for + a discrete grid of value atoms. See https://arxiv.org/pdf/1707.06887.pdf for more details. """ - def __init__( self, obs_shapes, @@ -233,7 +223,7 @@ def __init__( ac_dim (int): dimension of action space. - mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. + mlp_layer_dims ([int]): sequence of integers for the MLP hidden layers sizes. value_bounds (tuple): a 2-tuple corresponding to the lowest and highest possible return that the network should be possible of generating. This defines the support @@ -325,6 +315,4 @@ def forward(self, obs_dict, acts, goal_dict=None): return vd.mean() def _to_string(self): - return "action_dim={}\nvalue_bounds={}\nnum_atoms={}".format( - self.ac_dim, self.value_bounds, self.num_atoms - ) + return "action_dim={}\nvalue_bounds={}\nnum_atoms={}".format(self.ac_dim, self.value_bounds, self.num_atoms) \ No newline at end of file diff --git a/robomimic/models/vit_rein.py b/robomimic/models/vit_rein.py index b2a6d07d..e73f9327 100644 --- a/robomimic/models/vit_rein.py +++ b/robomimic/models/vit_rein.py @@ -2,7 +2,6 @@ Contains torch Modules for implementation of rein method for domain adaptation of DINOv2 """ - import torch import torch.nn as nn import torch.nn.functional as F @@ -11,20 +10,21 @@ from operator import mul from torch import Tensor - class MLPhead(nn.Module): - def __init__(self, in_dim: int, out_dim: int, **kwargs) -> None: + def __init__(self, + in_dim: int, + out_dim: int, + **kwargs) -> None: super().__init__(**kwargs) self._in_dim = in_dim self._out_dim = out_dim - + self._mlp = nn.Linear(self._in_dim, self._out_dim) def forward(self, x: Tensor) -> Tensor: x = self._mlp.forward(x) return x - class Reins(nn.Module): def __init__( self, @@ -131,7 +131,6 @@ def forward_delta_feat(self, feats: Tensor, tokens: Tensor, layers: int) -> Tens delta_f = self.mlp_delta_f(delta_f + feats) return delta_f - class LoRAReins(Reins): def __init__(self, lora_dim=16, **kwargs): self.lora_dim = lora_dim @@ -160,4 +159,4 @@ def get_tokens(self, layer): if layer == -1: return self.learnable_tokens_a @ self.learnable_tokens_b else: - return self.learnable_tokens_a[layer] @ self.learnable_tokens_b[layer] + return self.learnable_tokens_a[layer] @ self.learnable_tokens_b[layer] \ No newline at end of file diff --git a/robomimic/scripts/config_gen/act_gen.py b/robomimic/scripts/config_gen/act_gen.py index a54b532e..8962941d 100644 --- a/robomimic/scripts/config_gen/act_gen.py +++ b/robomimic/scripts/config_gen/act_gen.py @@ -1,11 +1,10 @@ from robomimic.scripts.config_gen.helper import * - def make_generator_helper(args): algo_name_short = "act" generator = get_generator( algo_name="act", - config_file=os.path.join(base_path, "robomimic/exps/templates/act.json"), + config_file=os.path.join(base_path, 'robomimic/exps/templates/act.json'), args=args, algo_name_short=algo_name_short, pt=True, @@ -13,6 +12,7 @@ def make_generator_helper(args): if args.ckpt_mode is None: args.ckpt_mode = "off" + generator.add_param( key="train.num_epochs", name="", @@ -40,12 +40,7 @@ def make_generator_helper(args): name="ds", group=2, values=[ - [ - {"path": p} - for p in scan_datasets( - "~/Downloads/example_pen_in_cup", postfix="trajectory_im128.h5" - ) - ], + [{"path": p} for p in scan_datasets("~/Downloads/example_pen_in_cup", postfix="trajectory_im128.h5")], ], value_names=[ "pen-in-cup", @@ -75,7 +70,7 @@ def make_generator_helper(args): group=2, values=[ [ - {"path": "TODO.hdf5"}, # replace with your own path + {"path": "TODO.hdf5"}, # replace with your own path ], ], value_names=[ @@ -88,9 +83,11 @@ def make_generator_helper(args): key="experiment.env_meta_update_dict", name="", group=-1, - values=[{"env_kwargs": {"controller_configs": {"control_delta": False}}}], + values=[ + {"env_kwargs": {"controller_configs": {"control_delta": False}}} + ], ) - + generator.add_param( key="train.action_keys", name="ac_keys", @@ -108,9 +105,10 @@ def make_generator_helper(args): ], ) + else: raise ValueError - + generator.add_param( key="train.output_dir", name="", @@ -126,7 +124,6 @@ def make_generator_helper(args): return generator - if __name__ == "__main__": parser = get_argparser() diff --git a/robomimic/scripts/config_gen/helper.py b/robomimic/scripts/config_gen/helper.py index 7735b7f6..48a3af07 100644 --- a/robomimic/scripts/config_gen/helper.py +++ b/robomimic/scripts/config_gen/helper.py @@ -6,10 +6,7 @@ import robomimic import robomimic.utils.hyperparam_utils as HyperparamUtils -base_path = os.path.abspath( - os.path.join(os.path.dirname(robomimic.__file__), os.pardir) -) - +base_path = os.path.abspath(os.path.join(os.path.dirname(robomimic.__file__), os.pardir)) def scan_datasets(folder, postfix=".h5"): dataset_paths = [] @@ -28,23 +25,14 @@ def get_generator(algo_name, config_file, args, algo_name_short=None, pt=False): args.env, args.mod, ] - args.wandb_proj_name = "_".join([str(s) for s in strings if s is not None]) + args.wandb_proj_name = '_'.join([str(s) for s in strings if s is not None]) if args.script is not None: generated_config_dir = os.path.join(os.path.dirname(args.script), "json") else: - curr_time = datetime.datetime.fromtimestamp(time.time()).strftime( - "%m-%d-%y-%H-%M-%S" - ) - generated_config_dir = os.path.join( - "~/", - "tmp/autogen_configs/ril", - algo_name, - args.env, - args.mod, - args.name, - curr_time, - "json", + curr_time = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-%y-%H-%M-%S') + generated_config_dir=os.path.join( + '~/', 'tmp/autogen_configs/ril', algo_name, args.env, args.mod, args.name, curr_time, "json", ) generator = HyperparamUtils.ConfigGenerator( @@ -67,7 +55,9 @@ def set_env_settings(generator, args): key="experiment.rollout.enabled", name="", group=-1, - values=[False], + values=[ + False + ], ) generator.add_param( key="experiment.save.every_n_epochs", @@ -122,8 +112,7 @@ def set_env_settings(generator, args): values=[ [ "camera/image/hand_camera_left_image", - "camera/image/varied_camera_1_left_image", - "camera/image/varied_camera_2_left_image", # uncomment to use all 3 cameras + "camera/image/varied_camera_1_left_image", "camera/image/varied_camera_2_left_image" # uncomment to use all 3 cameras ] ], ) @@ -134,7 +123,7 @@ def set_env_settings(generator, args): values=[ # "CropRandomizer", # crop only # "ColorRandomizer", # jitter only - ["ColorRandomizer", "CropRandomizer"], # jitter, followed by crop + ["ColorRandomizer", "CropRandomizer"], # jitter, followed by crop ], hidename=True, ) @@ -145,48 +134,44 @@ def set_env_settings(generator, args): values=[ # {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}, # crop only # {}, # jitter only - [ - {}, - { - "crop_height": 116, - "crop_width": 116, - "num_crops": 1, - "pos_enc": False, - }, - ], # jitter, followed by crop + [{}, {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}], # jitter, followed by crop ], hidename=True, ) - if ( - "observation.encoder.rgb.obs_randomizer_kwargs" not in generator.parameters - ) and ( - "observation.encoder.rgb.obs_randomizer_kwargs.crop_height" - not in generator.parameters - ): + if ("observation.encoder.rgb.obs_randomizer_kwargs" not in generator.parameters) and \ + ("observation.encoder.rgb.obs_randomizer_kwargs.crop_height" not in generator.parameters): generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", name="", group=-1, - values=[116], + values=[ + 116 + ], ) generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", name="", group=-1, - values=[116], + values=[ + 116 + ], ) # remove spatial softmax by default for r2d2 dataset generator.add_param( key="observation.encoder.rgb.core_kwargs.pool_class", name="", group=-1, - values=[None], + values=[ + None + ], ) generator.add_param( key="observation.encoder.rgb.core_kwargs.pool_kwargs", name="", group=-1, - values=[None], + values=[ + None + ], ) # specify dataset type is r2d2 rather than default robomimic @@ -194,9 +179,11 @@ def set_env_settings(generator, args): key="train.data_format", name="", group=-1, - values=["r2d2"], + values=[ + "r2d2" + ], ) - + # here, we list how each action key should be treated (normalized etc) generator.add_param( key="train.action_config", @@ -204,40 +191,40 @@ def set_env_settings(generator, args): group=-1, values=[ { - "action/cartesian_position": { + "action/cartesian_position":{ "normalization": "min_max", }, - "action/abs_pos": { + "action/abs_pos":{ "normalization": "min_max", }, - "action/abs_rot_6d": { + "action/abs_rot_6d":{ "normalization": "min_max", "format": "rot_6d", "convert_at_runtime": "rot_euler", }, - "action/abs_rot_euler": { + "action/abs_rot_euler":{ "normalization": "min_max", "format": "rot_euler", }, - "action/gripper_position": { + "action/gripper_position":{ "normalization": "min_max", }, - "action/cartesian_velocity": { + "action/cartesian_velocity":{ "normalization": None, }, - "action/rel_pos": { + "action/rel_pos":{ "normalization": None, }, - "action/rel_rot_6d": { + "action/rel_rot_6d":{ "format": "rot_6d", "normalization": None, "convert_at_runtime": "rot_euler", }, - "action/rel_rot_euler": { + "action/rel_rot_euler":{ "format": "rot_euler", "normalization": None, }, - "action/gripper_velocity": { + "action/gripper_velocity":{ "normalization": None, }, } @@ -270,24 +257,20 @@ def set_env_settings(generator, args): key="train.shuffled_obs_key_groups", name="", group=-1, - values=[ - [ - [ - ( - "camera/image/varied_camera_1_left_image", - "camera/image/varied_camera_1_right_image", - "camera/extrinsics/varied_camera_1_left", - "camera/extrinsics/varied_camera_1_right", - ), - ( - "camera/image/varied_camera_2_left_image", - "camera/image/varied_camera_2_right_image", - "camera/extrinsics/varied_camera_2_left", - "camera/extrinsics/varied_camera_2_right", - ), - ] - ] - ], + values=[[[ + ( + "camera/image/varied_camera_1_left_image", + "camera/image/varied_camera_1_right_image", + "camera/extrinsics/varied_camera_1_left", + "camera/extrinsics/varied_camera_1_right", + ), + ( + "camera/image/varied_camera_2_left_image", + "camera/image/varied_camera_2_right_image", + "camera/extrinsics/varied_camera_2_left", + "camera/extrinsics/varied_camera_2_right", + ), + ]]], ) elif args.env == "kitchen": generator.add_param( @@ -296,51 +279,51 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions": { + "actions":{ "normalization": None, }, - "action_dict/abs_pos": {"normalization": "min_max"}, + "action_dict/abs_pos": { + "normalization": "min_max" + }, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/gripper": { "normalization": None, }, "action_dict/base_mode": { "normalization": None, - }, + } } ], ) - - if args.mod == "im": + + if args.mod == 'im': generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, values=[ - [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_base_pos", - "robot0_gripper_qpos", - ] + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_base_pos", + "robot0_gripper_qpos"] ], ) generator.add_param( @@ -348,11 +331,9 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - [ - "robot0_agentview_left_image", - "robot0_agentview_right_image", - "robot0_eye_in_hand_image", - ] + ["robot0_agentview_left_image", + "robot0_agentview_right_image", + "robot0_eye_in_hand_image"] ], ) else: @@ -361,16 +342,15 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot0_base_pos", - "object", + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot0_base_pos", + "object", ] ], ) - elif args.env in ["square", "lift", "place_close"]: + elif args.env in ['square', 'lift', 'place_close']: # # set videos off # args.no_video = True @@ -380,48 +360,57 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions": { + "actions":{ "normalization": None, }, - "action_dict/abs_pos": {"normalization": "min_max"}, + "action_dict/abs_pos": { + "normalization": "min_max" + }, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/gripper": { "normalization": None, - }, + } } ], ) - if args.mod == "im": + if args.mod == 'im': generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, - values=[["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"]], + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos"] + ], ) generator.add_param( key="observation.modalities.obs.rgb", name="", group=-1, - values=[["agentview_image", "robot0_eye_in_hand_image"]], + values=[ + ["agentview_image", + "robot0_eye_in_hand_image"] + ], ) else: generator.add_param( @@ -429,15 +418,13 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object"] ], ) - elif args.env == "transport": + elif args.env == 'transport': # set videos off args.no_video = True @@ -448,50 +435,50 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions": { + "actions":{ "normalization": None, }, - "action_dict/abs_pos": {"normalization": "min_max"}, + "action_dict/abs_pos": { + "normalization": "min_max" + }, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/gripper": { "normalization": None, - }, + } } ], ) - if args.mod == "im": + if args.mod == 'im': generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, values=[ - [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", - ] + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos"] ], ) generator.add_param( @@ -499,12 +486,10 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - [ - "shouldercamera0_image", - "robot0_eye_in_hand_image", - "shouldercamera1_image", - "robot1_eye_in_hand_image", - ] + ["shouldercamera0_image", + "robot0_eye_in_hand_image", + "shouldercamera1_image", + "robot1_eye_in_hand_image"] ], ) else: @@ -513,15 +498,13 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", - "object", - ] + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", + "object"] ], ) @@ -531,7 +514,7 @@ def set_env_settings(generator, args): group=-1, values=[700], ) - elif args.env == "tool_hang": + elif args.env == 'tool_hang': # set videos off args.no_video = True @@ -541,72 +524,89 @@ def set_env_settings(generator, args): group=-1, values=[ { - "actions": { + "actions":{ "normalization": None, }, - "action_dict/abs_pos": {"normalization": "min_max"}, + "action_dict/abs_pos": { + "normalization": "min_max" + }, "action_dict/abs_rot_axis_angle": { "normalization": "min_max", - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/abs_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/rel_pos": { "normalization": None, }, "action_dict/rel_rot_axis_angle": { "normalization": None, - "format": "rot_axis_angle", + "format": "rot_axis_angle" }, "action_dict/rel_rot_6d": { "normalization": None, - "format": "rot_6d", + "format": "rot_6d" }, "action_dict/gripper": { "normalization": None, - }, + } } ], ) - if args.mod == "im": + if args.mod == 'im': generator.add_param( key="observation.modalities.obs.low_dim", name="", group=-1, - values=[["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"]], + values=[ + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos"] + ], ) generator.add_param( key="observation.modalities.obs.rgb", name="", group=-1, - values=[["sideview_image", "robot0_eye_in_hand_image"]], + values=[ + ["sideview_image", + "robot0_eye_in_hand_image"] + ], ) generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", name="", group=-1, - values=[216], + values=[ + 216 + ], ) generator.add_param( key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", name="", group=-1, - values=[216], + values=[ + 216 + ], ) generator.add_param( key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_height", name="", group=-1, - values=[216], + values=[ + 216 + ], ) generator.add_param( key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_width", name="", group=-1, - values=[216], + values=[ + 216 + ], ) else: generator.add_param( @@ -614,12 +614,10 @@ def set_env_settings(generator, args): name="", group=-1, values=[ - [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + ["robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "object"] ], ) @@ -634,15 +632,17 @@ def set_env_settings(generator, args): def set_mod_settings(generator, args): - if args.mod == "ld": + if args.mod == 'ld': if "experiment.save.epochs" not in generator.parameters: generator.add_param( key="experiment.save.epochs", name="", group=-1, - values=[[2000]], + values=[ + [2000] + ], ) - elif args.mod == "im": + elif args.mod == 'im': if "experiment.save.every_n_epochs" not in generator.parameters: generator.add_param( key="experiment.save.every_n_epochs", @@ -830,14 +830,14 @@ def get_argparser(): parser.add_argument( "--env", type=str, - default="r2d2", + default='r2d2', ) parser.add_argument( - "--mod", + '--mod', type=str, - choices=["ld", "im"], - default="im", + choices=['ld', 'im'], + default='im', ) parser.add_argument( @@ -847,32 +847,55 @@ def get_argparser(): default=None, ) - parser.add_argument("--script", type=str, default=None) + parser.add_argument( + "--script", + type=str, + default=None + ) - parser.add_argument("--wandb_proj_name", type=str, default=None) + parser.add_argument( + "--wandb_proj_name", + type=str, + default=None + ) parser.add_argument( "--debug", action="store_true", ) - parser.add_argument("--no_video", action="store_true") + parser.add_argument( + '--no_video', + action='store_true' + ) parser.add_argument( "--tmplog", action="store_true", ) - parser.add_argument("--nr", type=int, default=-1) + parser.add_argument( + "--nr", + type=int, + default=-1 + ) parser.add_argument( "--no_wandb", action="store_true", ) - parser.add_argument("--n_seeds", type=int, default=None) + parser.add_argument( + "--n_seeds", + type=int, + default=None + ) - parser.add_argument("--num_cmd_groups", type=int, default=None) + parser.add_argument( + "--num_cmd_groups", + type=int, + default=None + ) return parser @@ -881,7 +904,7 @@ def make_generator(args, make_generator_helper): if args.tmplog or args.debug and args.name is None: args.name = "debug" else: - time_str = datetime.datetime.fromtimestamp(time.time()).strftime("%m-%d-") + time_str = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-') args.name = time_str + str(args.name) if args.debug or args.tmplog: @@ -894,7 +917,7 @@ def make_generator(args, make_generator_helper): pass if (args.debug or args.tmplog) and (args.wandb_proj_name is None): - args.wandb_proj_name = "debug" + args.wandb_proj_name = 'debug' if not args.debug: assert args.name is not None diff --git a/robomimic/scripts/conversion/convert_d4rl.py b/robomimic/scripts/conversion/convert_d4rl.py index 537c8a7f..99fc1d93 100644 --- a/robomimic/scripts/conversion/convert_d4rl.py +++ b/robomimic/scripts/conversion/convert_d4rl.py @@ -75,26 +75,24 @@ write_folder = os.path.join(base_folder, "converted") if not os.path.exists(write_folder): os.makedirs(write_folder) - output_path = os.path.join( - base_folder, "converted", "{}.hdf5".format(args.env.replace("-", "_")) - ) + output_path = os.path.join(base_folder, "converted", "{}.hdf5".format(args.env.replace("-", "_"))) f_sars = h5py.File(output_path, "w") f_sars_grp = f_sars.create_group("data") # code to split D4RL data into trajectories # (modified from https://github.com/aviralkumar2907/d4rl_evaluations/blob/bear_intergrate/bear/examples/bear_hdf5_d4rl.py#L18) - all_obs = ds["observations"] - all_act = ds["actions"] + all_obs = ds['observations'] + all_act = ds['actions'] N = all_obs.shape[0] - obs = all_obs[: N - 1] - actions = all_act[: N - 1] + obs = all_obs[:N-1] + actions = all_act[:N-1] next_obs = all_obs[1:] - rewards = np.squeeze(ds["rewards"][: N - 1]) - dones = np.squeeze(ds["terminals"][: N - 1]).astype(np.int32) + rewards = np.squeeze(ds['rewards'][:N-1]) + dones = np.squeeze(ds['terminals'][:N-1]).astype(np.int32) - assert "timeouts" in ds - timeouts = ds["timeouts"][:] + assert 'timeouts' in ds + timeouts = ds['timeouts'][:] ctr = 0 total_samples = 0 @@ -134,19 +132,12 @@ ctr = 0 traj = dict(obs=[], next_obs=[], actions=[], rewards=[], dones=[]) - print( - "\nExcluding {} samples at end of file due to no trajectory truncation.".format( - len(traj["actions"]) - ) - ) - print( - "Wrote {} trajectories to new converted hdf5 at {}\n".format( - num_traj, output_path - ) - ) + print("\nExcluding {} samples at end of file due to no trajectory truncation.".format(len(traj["actions"]))) + print("Wrote {} trajectories to new converted hdf5 at {}\n".format(num_traj, output_path)) # metadata f_sars_grp.attrs["total"] = total_samples f_sars_grp.attrs["env_args"] = json.dumps(env.serialize(), indent=4) f_sars.close() + diff --git a/robomimic/scripts/conversion/convert_robosuite.py b/robomimic/scripts/conversion/convert_robosuite.py index 8ebdbb2e..88258698 100644 --- a/robomimic/scripts/conversion/convert_robosuite.py +++ b/robomimic/scripts/conversion/convert_robosuite.py @@ -33,7 +33,7 @@ ) args = parser.parse_args() - f = h5py.File(args.dataset, "a") # edit mode + f = h5py.File(args.dataset, "a") # edit mode # store env meta env_name = f["data"].attrs["env"] diff --git a/robomimic/scripts/conversion/convert_roboturk_pilot.py b/robomimic/scripts/conversion/convert_roboturk_pilot.py index e289e802..21059804 100644 --- a/robomimic/scripts/conversion/convert_roboturk_pilot.py +++ b/robomimic/scripts/conversion/convert_roboturk_pilot.py @@ -70,13 +70,11 @@ def convert_rt_pilot_hdf5(ref_folder): actions = np.concatenate([jvels, gripper_acts], axis=1) # IMPORTANT: clip actions to -1, 1, since this is expected by the codebase - actions = np.clip(actions, -1.0, 1.0) + actions = np.clip(actions, -1., 1.) ep_data_grp.create_dataset("actions", data=actions) # store model xml directly in the new hdf5 file - model_path = os.path.join( - ref_folder, "models", f["data/{}".format(ep)].attrs["model_file"] - ) + model_path = os.path.join(ref_folder, "models", f["data/{}".format(ep)].attrs["model_file"]) f_model = open(model_path, "r") model_xml = f_model.read() f_model.close() @@ -84,9 +82,7 @@ def convert_rt_pilot_hdf5(ref_folder): # store num samples for this ep num_samples = actions.shape[0] - ep_data_grp.attrs["num_samples"] = ( - num_samples # number of transitions in this episode - ) + ep_data_grp.attrs["num_samples"] = num_samples # number of transitions in this episode num_samples_arr.append(num_samples) # write dataset attributes (metadata) @@ -95,7 +91,7 @@ def convert_rt_pilot_hdf5(ref_folder): # construct and save env metadata env_meta = dict() env_meta["type"] = EB.EnvType.ROBOSUITE_TYPE - env_meta["env_name"] = f["data"].attrs["env"] + "Teleop" + env_meta["env_name"] = (f["data"].attrs["env"] + "Teleop") # hardcode robosuite v0.3 args robosuite_args = { "has_renderer": False, @@ -112,7 +108,7 @@ def convert_rt_pilot_hdf5(ref_folder): "control_freq": 100, } env_meta["env_kwargs"] = robosuite_args - f_new_grp.attrs["env_args"] = json.dumps(env_meta, indent=4) # environment info + f_new_grp.attrs["env_args"] = json.dumps(env_meta, indent=4) # environment info print("\n====== Added env meta ======") print(f_new_grp.attrs["env_args"]) @@ -148,14 +144,10 @@ def split_fastest_from_hdf5(hdf5_path, n): # create filter key name = "fastest_{}".format(n) - lengths = create_hdf5_filter_key( - hdf5_path=hdf5_path, demo_keys=filtered_demos, key_name=name - ) + lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=filtered_demos, key_name=name) print("Total number of samples in fastest {} demos: {}".format(n, np.sum(lengths))) - print( - "Average number of samples in fastest {} demos: {}".format(n, np.mean(lengths)) - ) + print("Average number of samples in fastest {} demos: {}".format(n, np.mean(lengths))) if __name__ == "__main__": @@ -185,14 +177,8 @@ def split_fastest_from_hdf5(hdf5_path, n): print("\nCreating filter key for fastest {} trajectories...".format(args.n)) split_fastest_from_hdf5(hdf5_path=hdf5_path, n=args.n) - print( - "\nCreating 90-10 train-validation split for fastest {} trajectories...".format( - args.n - ) - ) - split_train_val_from_hdf5( - hdf5_path=hdf5_path, val_ratio=0.1, filter_key="fastest_{}".format(args.n) - ) + print("\nCreating 90-10 train-validation split for fastest {} trajectories...".format(args.n)) + split_train_val_from_hdf5(hdf5_path=hdf5_path, val_ratio=0.1, filter_key="fastest_{}".format(args.n)) print( "\nWARNING: new dataset has replaced old one in demo.hdf5 file. " @@ -202,7 +188,5 @@ def split_fastest_from_hdf5(hdf5_path, n): print( "\nNOTE: the new dataset also contains a fastest_{} filter key, for an easy way " "to train on the fastest trajectories. Just set config.train.hdf5_filter to train on this " - "subset. A common choice is 225 when training on the bins-Can dataset.\n".format( - args.n - ) + "subset. A common choice is 225 when training on the bins-Can dataset.\n".format(args.n) ) diff --git a/robomimic/scripts/dataset_states_to_obs.py b/robomimic/scripts/dataset_states_to_obs.py index e2664c20..e08f4eb9 100644 --- a/robomimic/scripts/dataset_states_to_obs.py +++ b/robomimic/scripts/dataset_states_to_obs.py @@ -47,7 +47,6 @@ python dataset_states_to_obs.py --dataset /path/to/demo.hdf5 --output_name image_dense_done_1.hdf5 \ --done_mode 1 --dense --camera_names agentview robot0_eye_in_hand --camera_height 84 --camera_width 84 """ - import os import json import h5py @@ -63,13 +62,13 @@ def extract_trajectory( - env, - initial_state, - states, + env, + initial_state, + states, actions, done_mode, - camera_names=None, - camera_height=84, + camera_names=None, + camera_height=84, camera_width=84, ): """ @@ -81,8 +80,8 @@ def extract_trajectory( initial_state (dict): initial simulation state to load states (np.array): array of simulation states to load to extract information actions (np.array): array of actions - done_mode (int): how to write done signal. If 0, done is 1 whenever s' is a - success state. If 1, done is 1 at the end of each trajectory. + done_mode (int): how to write done signal. If 0, done is 1 whenever s' is a + success state. If 1, done is 1 at the end of each trajectory. If 2, do both. """ assert isinstance(env, EnvBase) @@ -98,18 +97,18 @@ def extract_trajectory( if is_robosuite_env: camera_info = get_camera_info( env=env, - camera_names=camera_names, - camera_height=camera_height, + camera_names=camera_names, + camera_height=camera_height, camera_width=camera_width, ) traj = dict( - obs=[], - next_obs=[], - rewards=[], - dones=[], - actions=np.array(actions), - states=np.array(states), + obs=[], + next_obs=[], + rewards=[], + dones=[], + actions=np.array(actions), + states=np.array(states), initial_state_dict=initial_state, ) traj_len = states.shape[0] @@ -122,7 +121,7 @@ def extract_trajectory( next_obs, _, _, _ = env.step(actions[t - 1]) else: # reset to simulator state to get observation - next_obs = env.reset_to({"states": states[t]}) + next_obs = env.reset_to({"states" : states[t]}) # infer reward signal # note: our tasks use reward r(s'), reward AFTER transition, so this is @@ -167,8 +166,8 @@ def extract_trajectory( def get_camera_info( env, - camera_names=None, - camera_height=84, + camera_names=None, + camera_height=84, camera_width=84, ): """ @@ -183,27 +182,15 @@ def get_camera_info( camera_info = dict() for cam_name in camera_names: - K = env.get_camera_intrinsic_matrix( - camera_name=cam_name, camera_height=camera_height, camera_width=camera_width - ) - R = env.get_camera_extrinsic_matrix( - camera_name=cam_name - ) # camera pose in world frame + K = env.get_camera_intrinsic_matrix(camera_name=cam_name, camera_height=camera_height, camera_width=camera_width) + R = env.get_camera_extrinsic_matrix(camera_name=cam_name) # camera pose in world frame if "eye_in_hand" in cam_name: # convert extrinsic matrix to be relative to robot eef control frame assert cam_name.startswith("robot0") eef_site_name = env.base_env.robots[0].controller.eef_name - eef_pos = np.array( - env.base_env.sim.data.site_xpos[ - env.base_env.sim.model.site_name2id(eef_site_name) - ] - ) - eef_rot = np.array( - env.base_env.sim.data.site_xmat[ - env.base_env.sim.model.site_name2id(eef_site_name) - ].reshape([3, 3]) - ) - eef_pose = np.zeros((4, 4)) # eef pose in world frame + eef_pos = np.array(env.base_env.sim.data.site_xpos[env.base_env.sim.model.site_name2id(eef_site_name)]) + eef_rot = np.array(env.base_env.sim.data.site_xmat[env.base_env.sim.model.site_name2id(eef_site_name)].reshape([3, 3])) + eef_pose = np.zeros((4, 4)) # eef pose in world frame eef_pose[:3, :3] = eef_rot eef_pose[:3, 3] = eef_pos eef_pose[3, 3] = 1.0 @@ -211,7 +198,7 @@ def get_camera_info( eef_pose_inv[:3, :3] = eef_pose[:3, :3].T eef_pose_inv[:3, 3] = -eef_pose_inv[:3, :3].dot(eef_pose[:3, 3]) eef_pose_inv[3, 3] = 1.0 - R = R.dot(eef_pose_inv) # T_E^W * T_W^C = T_E^C + R = R.dot(eef_pose_inv) # T_E^W * T_W^C = T_E^C camera_info[cam_name] = dict( intrinsics=K.tolist(), extrinsics=R.tolist(), @@ -227,9 +214,9 @@ def dataset_states_to_obs(args): env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) env = EnvUtils.create_env_for_data_processing( env_meta=env_meta, - camera_names=args.camera_names, - camera_height=args.camera_height, - camera_width=args.camera_width, + camera_names=args.camera_names, + camera_height=args.camera_height, + camera_width=args.camera_width, reward_shaping=args.shaped, use_depth_obs=args.depth, ) @@ -249,7 +236,7 @@ def dataset_states_to_obs(args): # maybe reduce the number of demonstrations to playback if args.n is not None: - demos = demos[: args.n] + demos = demos[:args.n] # output file in same directory as input file output_path = os.path.join(os.path.dirname(args.dataset), args.output_name) @@ -271,13 +258,13 @@ def dataset_states_to_obs(args): # extract obs, rewards, dones actions = f["data/{}/actions".format(ep)][()] traj, camera_info = extract_trajectory( - env=env, - initial_state=initial_state, - states=states, + env=env, + initial_state=initial_state, + states=states, actions=actions, done_mode=args.done_mode, - camera_names=args.camera_names, - camera_height=args.camera_height, + camera_names=args.camera_names, + camera_height=args.camera_height, camera_width=args.camera_width, ) @@ -298,35 +285,19 @@ def dataset_states_to_obs(args): ep_data_grp.create_dataset("dones", data=np.array(traj["dones"])) for k in traj["obs"]: if args.compress: - ep_data_grp.create_dataset( - "obs/{}".format(k), - data=np.array(traj["obs"][k]), - compression="gzip", - ) + ep_data_grp.create_dataset("obs/{}".format(k), data=np.array(traj["obs"][k]), compression="gzip") else: - ep_data_grp.create_dataset( - "obs/{}".format(k), data=np.array(traj["obs"][k]) - ) + ep_data_grp.create_dataset("obs/{}".format(k), data=np.array(traj["obs"][k])) if not args.exclude_next_obs: if args.compress: - ep_data_grp.create_dataset( - "next_obs/{}".format(k), - data=np.array(traj["next_obs"][k]), - compression="gzip", - ) + ep_data_grp.create_dataset("next_obs/{}".format(k), data=np.array(traj["next_obs"][k]), compression="gzip") else: - ep_data_grp.create_dataset( - "next_obs/{}".format(k), data=np.array(traj["next_obs"][k]) - ) + ep_data_grp.create_dataset("next_obs/{}".format(k), data=np.array(traj["next_obs"][k])) # episode metadata if is_robosuite_env: - ep_data_grp.attrs["model_file"] = traj["initial_state_dict"][ - "model" - ] # model xml for this episode - ep_data_grp.attrs["num_samples"] = traj["actions"].shape[ - 0 - ] # number of transitions in this episode + ep_data_grp.attrs["model_file"] = traj["initial_state_dict"]["model"] # model xml for this episode + ep_data_grp.attrs["num_samples"] = traj["actions"].shape[0] # number of transitions in this episode if camera_info is not None: assert is_robosuite_env @@ -334,15 +305,14 @@ def dataset_states_to_obs(args): total_samples += traj["actions"].shape[0] + # copy over all filter keys that exist in the original hdf5 if "mask" in f: f.copy("mask", f_out) # global metadata data_grp.attrs["total"] = total_samples - data_grp.attrs["env_args"] = json.dumps( - env.serialize(), indent=4 - ) # environment info + data_grp.attrs["env_args"] = json.dumps(env.serialize(), indent=4) # environment info print("Wrote {} trajectories to {}".format(len(demos), output_path)) f.close() @@ -376,8 +346,8 @@ def dataset_states_to_obs(args): # flag for reward shaping parser.add_argument( - "--shaped", - action="store_true", + "--shaped", + action='store_true', help="(optional) use shaped rewards", ) @@ -385,7 +355,7 @@ def dataset_states_to_obs(args): parser.add_argument( "--camera_names", type=str, - nargs="+", + nargs='+', default=[], help="(optional) camera name(s) to use for image observations. Leave out to not use image observations.", ) @@ -406,13 +376,13 @@ def dataset_states_to_obs(args): # flag for including depth observations per camera parser.add_argument( - "--depth", - action="store_true", + "--depth", + action='store_true', help="(optional) use depth observations for each camera", ) - # specifies how the "done" signal is written. If "0", then the "done" signal is 1 wherever - # the transition (s, a, s') has s' in a task completion state. If "1", the "done" signal + # specifies how the "done" signal is written. If "0", then the "done" signal is 1 wherever + # the transition (s, a, s') has s' in a task completion state. If "1", the "done" signal # is one at the end of every trajectory. If "2", the "done" signal is 1 at task completion # states for successful trajectories and 1 at the end of all trajectories. parser.add_argument( @@ -425,29 +395,29 @@ def dataset_states_to_obs(args): # flag for copying rewards from source file instead of re-writing them parser.add_argument( - "--copy_rewards", - action="store_true", + "--copy_rewards", + action='store_true', help="(optional) copy rewards from source file instead of inferring them", ) # flag for copying dones from source file instead of re-writing them parser.add_argument( - "--copy_dones", - action="store_true", + "--copy_dones", + action='store_true', help="(optional) copy dones from source file instead of inferring them", ) # flag to exclude next obs in dataset parser.add_argument( - "--exclude-next-obs", - action="store_true", + "--exclude-next-obs", + action='store_true', help="(optional) exclude next obs in dataset", ) # flag to compress observations with gzip option in hdf5 parser.add_argument( - "--compress", - action="store_true", + "--compress", + action='store_true', help="(optional) compress observations with gzip option in hdf5", ) diff --git a/robomimic/scripts/download_datasets.py b/robomimic/scripts/download_datasets.py index c620902e..caf3a280 100644 --- a/robomimic/scripts/download_datasets.py +++ b/robomimic/scripts/download_datasets.py @@ -43,7 +43,6 @@ # download all real robot datasets python download_datasets.py --tasks real """ - import os import argparse @@ -51,26 +50,9 @@ import robomimic.utils.file_utils as FileUtils from robomimic import DATASET_REGISTRY -ALL_TASKS = [ - "lift", - "can", - "square", - "transport", - "tool_hang", - "lift_real", - "can_real", - "tool_hang_real", -] +ALL_TASKS = ["lift", "can", "square", "transport", "tool_hang", "lift_real", "can_real", "tool_hang_real"] ALL_DATASET_TYPES = ["ph", "mh", "mg", "paired"] -ALL_HDF5_TYPES = [ - "raw", - "low_dim", - "image", - "low_dim_sparse", - "low_dim_dense", - "image_sparse", - "image_dense", -] +ALL_HDF5_TYPES = ["raw", "low_dim", "image", "low_dim_sparse", "low_dim_dense", "image_sparse", "image_dense"] if __name__ == "__main__": @@ -88,7 +70,7 @@ parser.add_argument( "--tasks", type=str, - nargs="+", + nargs='+', default=["lift"], help="Tasks to download datasets for. Defaults to lift task. Pass 'all' to download all tasks (sim + real)\ 'sim' to download all sim tasks, 'real' to download all real tasks, or directly specify the list of\ @@ -99,7 +81,7 @@ parser.add_argument( "--dataset_types", type=str, - nargs="+", + nargs='+', default=["ph"], help="Dataset types to download datasets for (e.g. ph, mh, mg). Defaults to ph. Pass 'all' to download \ datasets for all available dataset types per task, or directly specify the list of dataset types.", @@ -109,7 +91,7 @@ parser.add_argument( "--hdf5_types", type=str, - nargs="+", + nargs='+', default=["low_dim"], help="hdf5 types to download datasets for (e.g. raw, low_dim, image). Defaults to raw. Pass 'all' \ to download datasets for all available hdf5 types per task and dataset, or directly specify the list\ @@ -119,8 +101,8 @@ # dry run - don't actually download datasets, but print which datasets would be downloaded parser.add_argument( "--dry_run", - action="store_true", - help="set this flag to do a dry run to only print which datasets would be downloaded", + action='store_true', + help="set this flag to do a dry run to only print which datasets would be downloaded" ) args = parser.parse_args() @@ -133,35 +115,23 @@ # load args download_tasks = args.tasks if "all" in download_tasks: - assert ( - len(download_tasks) == 1 - ), "all should be only tasks argument but got: {}".format(args.tasks) + assert len(download_tasks) == 1, "all should be only tasks argument but got: {}".format(args.tasks) download_tasks = ALL_TASKS elif "sim" in download_tasks: - assert ( - len(download_tasks) == 1 - ), "sim should be only tasks argument but got: {}".format(args.tasks) + assert len(download_tasks) == 1, "sim should be only tasks argument but got: {}".format(args.tasks) download_tasks = [task for task in ALL_TASKS if "real" not in task] elif "real" in download_tasks: - assert ( - len(download_tasks) == 1 - ), "real should be only tasks argument but got: {}".format(args.tasks) + assert len(download_tasks) == 1, "real should be only tasks argument but got: {}".format(args.tasks) download_tasks = [task for task in ALL_TASKS if "real" in task] download_dataset_types = args.dataset_types if "all" in download_dataset_types: - assert ( - len(download_dataset_types) == 1 - ), "all should be only dataset_types argument but got: {}".format( - args.dataset_types - ) + assert len(download_dataset_types) == 1, "all should be only dataset_types argument but got: {}".format(args.dataset_types) download_dataset_types = ALL_DATASET_TYPES download_hdf5_types = args.hdf5_types if "all" in download_hdf5_types: - assert ( - len(download_hdf5_types) == 1 - ), "all should be only hdf5_types argument but got: {}".format(args.hdf5_types) + assert len(download_hdf5_types) == 1, "all should be only hdf5_types argument but got: {}".format(args.hdf5_types) download_hdf5_types = ALL_HDF5_TYPES # download requested datasets @@ -171,20 +141,13 @@ if dataset_type in download_dataset_types: for hdf5_type in DATASET_REGISTRY[task][dataset_type]: if hdf5_type in download_hdf5_types: - download_dir = os.path.abspath( - os.path.join(default_base_dir, task, dataset_type) - ) - print( - "\nDownloading dataset:\n task: {}\n dataset type: {}\n hdf5 type: {}\n download path: {}".format( - task, dataset_type, hdf5_type, download_dir - ) - ) + download_dir = os.path.abspath(os.path.join(default_base_dir, task, dataset_type)) + print("\nDownloading dataset:\n task: {}\n dataset type: {}\n hdf5 type: {}\n download path: {}" + .format(task, dataset_type, hdf5_type, download_dir)) url = DATASET_REGISTRY[task][dataset_type][hdf5_type]["url"] if url is None: print( - "Skipping {}-{}-{}, no url for dataset exists.".format( - task, dataset_type, hdf5_type - ) + "Skipping {}-{}-{}, no url for dataset exists.".format(task, dataset_type, hdf5_type) + " Create this dataset locally by running the appropriate command from robomimic/scripts/extract_obs_from_raw_datasets.sh." ) continue @@ -194,9 +157,7 @@ # Make sure path exists and create if it doesn't os.makedirs(download_dir, exist_ok=True) FileUtils.download_url( - url=DATASET_REGISTRY[task][dataset_type][hdf5_type][ - "url" - ], + url=DATASET_REGISTRY[task][dataset_type][hdf5_type]["url"], download_dir=download_dir, ) print("") diff --git a/robomimic/scripts/download_momart_datasets.py b/robomimic/scripts/download_momart_datasets.py index fc230e3b..affecf11 100644 --- a/robomimic/scripts/download_momart_datasets.py +++ b/robomimic/scripts/download_momart_datasets.py @@ -45,7 +45,6 @@ # download all datasets python download_datasets.py --tasks all --dataset_types all """ - import os import argparse @@ -83,28 +82,28 @@ parser.add_argument( "--tasks", type=str, - nargs="+", + nargs='+', default=["table_setup_from_dishwasher"], help="Tasks to download datasets for. Defaults to table_setup_from_dishwasher task. Pass 'all' to download all" - f"5 tasks, or directly specify the list of tasks. Options are any of: {ALL_TASKS}", + f"5 tasks, or directly specify the list of tasks. Options are any of: {ALL_TASKS}", ) # dataset types to download datasets for parser.add_argument( "--dataset_types", type=str, - nargs="+", + nargs='+', default=["expert"], help="Dataset types to download datasets for (e.g. expert, suboptimal). Defaults to expert. Pass 'all' to " - "download datasets for all available dataset types per task, or directly specify the list of dataset " - f"types. Options are any of: {ALL_DATASET_TYPES}", + "download datasets for all available dataset types per task, or directly specify the list of dataset " + f"types. Options are any of: {ALL_DATASET_TYPES}", ) # dry run - don't actually download datasets, but print which datasets would be downloaded parser.add_argument( "--dry_run", - action="store_true", - help="set this flag to do a dry run to only print which datasets would be downloaded", + action='store_true', + help="set this flag to do a dry run to only print which datasets would be downloaded" ) args = parser.parse_args() @@ -117,18 +116,12 @@ # load args download_tasks = args.tasks if "all" in download_tasks: - assert ( - len(download_tasks) == 1 - ), "all should be only tasks argument but got: {}".format(args.tasks) + assert len(download_tasks) == 1, "all should be only tasks argument but got: {}".format(args.tasks) download_tasks = ALL_TASKS download_dataset_types = args.dataset_types if "all" in download_dataset_types: - assert ( - len(download_dataset_types) == 1 - ), "all should be only dataset_types argument but got: {}".format( - args.dataset_types - ) + assert len(download_dataset_types) == 1, "all should be only dataset_types argument but got: {}".format(args.dataset_types) download_dataset_types = ALL_DATASET_TYPES # Run sanity check first to warn user if they're about to download a huge amount of data @@ -141,13 +134,8 @@ # Verify user acknowledgement if we're not doing a dry run if not args.dry_run: - user_response = input( - f"Warning: requested datasets will take a total of {total_size}GB. Proceed? y/n\n" - ) - assert user_response.lower() in { - "yes", - "y", - }, f"Did not receive confirmation. Aborting download." + user_response = input(f"Warning: requested datasets will take a total of {total_size}GB. Proceed? y/n\n") + assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." # download requested datasets for task in MOMART_DATASET_REGISTRY: @@ -155,16 +143,12 @@ for dataset_type in MOMART_DATASET_REGISTRY[task]: if dataset_type in download_dataset_types: dataset_info = MOMART_DATASET_REGISTRY[task][dataset_type] - download_dir = os.path.abspath( - os.path.join(default_base_dir, task, dataset_type) - ) - print( - f"\nDownloading dataset:\n" - f" task: {task}\n" - f" dataset type: {dataset_type}\n" - f" dataset size: {dataset_info['size']}GB\n" - f" download path: {download_dir}" - ) + download_dir = os.path.abspath(os.path.join(default_base_dir, task, dataset_type)) + print(f"\nDownloading dataset:\n" + f" task: {task}\n" + f" dataset type: {dataset_type}\n" + f" dataset size: {dataset_info['size']}GB\n" + f" download path: {download_dir}") if args.dry_run: print("\ndry run: skip download") else: diff --git a/robomimic/scripts/generate_config_templates.py b/robomimic/scripts/generate_config_templates.py index 43523d92..56e1d871 100644 --- a/robomimic/scripts/generate_config_templates.py +++ b/robomimic/scripts/generate_config_templates.py @@ -2,7 +2,6 @@ Helpful script to generate example config files for each algorithm. These should be re-generated when new config options are added, or when default settings in the config classes are modified. """ - import os import json @@ -25,5 +24,5 @@ def main(): c.dump(filename=json_path) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/robomimic/scripts/generate_paper_configs.py b/robomimic/scripts/generate_paper_configs.py index 6ebf676d..52ed7d5b 100644 --- a/robomimic/scripts/generate_paper_configs.py +++ b/robomimic/scripts/generate_paper_configs.py @@ -18,20 +18,11 @@ # Specify where datasets exist, and specify where configs should be generated. python generate_paper_configs.py --config_dir /tmp/configs --dataset_dir /tmp/datasets --output_dir /tmp/experiment_results """ - import os import argparse import robomimic from robomimic import DATASET_REGISTRY -from robomimic.config import ( - Config, - BCConfig, - BCQConfig, - CQLConfig, - HBCConfig, - IRISConfig, - config_factory, -) +from robomimic.config import Config, BCConfig, BCQConfig, CQLConfig, HBCConfig, IRISConfig, config_factory def modify_config_for_default_low_dim_exp(config): @@ -72,11 +63,11 @@ def modify_config_for_default_low_dim_exp(config): with config.observation.values_unlocked(): # default observation is eef pose, gripper finger position, and object information, - # all of which are low-dim. + # all of which are low-dim. default_low_dim_obs = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", "object", ] # handle hierarchical observation configs @@ -140,13 +131,14 @@ def modify_config_for_default_image_exp(config): config.train.batch_size = 16 config.train.num_epochs = 600 + with config.observation.values_unlocked(): # default low-dim observation is eef pose, gripper finger position # default image observation is external camera and wrist camera config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", ] config.observation.modalities.obs.rgb = [ "agentview_image", @@ -158,23 +150,13 @@ def modify_config_for_default_image_exp(config): # default image encoder architecture is ResNet with spatial softmax config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( - False # kwargs for visual core - ) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = ( - False - ) - config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( - 32 # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( - False # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( - 1.0 # Default arguments for "SpatialSoftmax" - ) + config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False + config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -189,9 +171,7 @@ def modify_config_for_default_image_exp(config): return config -def modify_config_for_dataset( - config, task_name, dataset_type, hdf5_type, base_dataset_dir, filter_key=None -): +def modify_config_for_dataset(config, task_name, dataset_type, hdf5_type, base_dataset_dir, filter_key=None): """ Modifies a Config object with experiment, training, and observation settings to correspond to experiment settings for the dataset collected on @task_name with @@ -205,7 +185,7 @@ def modify_config_for_dataset( dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). base_dataset_dir (str): path to directory where datasets are on disk. Directory structure is expected to be consistent with the output @@ -214,19 +194,12 @@ def modify_config_for_dataset( filter_key (str): if not None, use the provided filter key to select a subset of the provided dataset """ - assert ( - task_name in DATASET_REGISTRY - ), "task {} not found in dataset registry!".format(task_name) - assert ( - dataset_type in DATASET_REGISTRY[task_name] - ), "dataset type {} not found for task {} in dataset registry!".format( - dataset_type, task_name - ) - assert ( - hdf5_type in DATASET_REGISTRY[task_name][dataset_type] - ), "hdf5 type {} not found for dataset type {} and task {} in dataset registry!".format( - hdf5_type, dataset_type, task_name - ) + assert task_name in DATASET_REGISTRY, \ + "task {} not found in dataset registry!".format(task_name) + assert dataset_type in DATASET_REGISTRY[task_name], \ + "dataset type {} not found for task {} in dataset registry!".format(dataset_type, task_name) + assert hdf5_type in DATASET_REGISTRY[task_name][dataset_type], \ + "hdf5 type {} not found for dataset type {} and task {} in dataset registry!".format(hdf5_type, dataset_type, task_name) is_real_dataset = "real" in task_name if is_real_dataset: @@ -237,9 +210,7 @@ def modify_config_for_dataset( with config.experiment.values_unlocked(): # look up rollout evaluation horizon in registry and set it - config.experiment.rollout.horizon = DATASET_REGISTRY[task_name][dataset_type][ - hdf5_type - ]["horizon"] + config.experiment.rollout.horizon = DATASET_REGISTRY[task_name][dataset_type][hdf5_type]["horizon"] if dataset_type == "mg": # machine-generated datasets did not use validation @@ -265,19 +236,13 @@ def modify_config_for_dataset( raise ValueError("Unknown dataset type") else: file_name = url.split("/")[-1] - config.train.data = os.path.join( - base_dataset_dir, task_name, dataset_type, file_name - ) + config.train.data = os.path.join(base_dataset_dir, task_name, dataset_type, file_name) config.train.hdf5_filter_key = None if filter_key is None else filter_key config.train.hdf5_validation_filter_key = None if config.experiment.validate: # set train and valid keys for validation - config.train.hdf5_filter_key = ( - "train" if filter_key is None else "{}_train".format(filter_key) - ) - config.train.hdf5_validation_filter_key = ( - "valid" if filter_key is None else "{}_valid".format(filter_key) - ) + config.train.hdf5_filter_key = "train" if filter_key is None else "{}_train".format(filter_key) + config.train.hdf5_validation_filter_key = "valid" if filter_key is None else "{}_valid".format(filter_key) with config.observation.values_unlocked(): # maybe modify observation names and randomization sizes (since image size might be different) @@ -285,8 +250,8 @@ def modify_config_for_dataset( if is_real_dataset: # modify observation names for real robot datasets config.observation.modalities.obs.low_dim = [ - "ee_pose", - "gripper_position", + "ee_pose", + "gripper_position", ] if task_name == "tool_hang_real": @@ -312,12 +277,12 @@ def modify_config_for_dataset( if task_name == "transport": # robot proprioception per arm config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", ] # shoulder and wrist cameras per arm @@ -341,12 +306,12 @@ def modify_config_for_dataset( if task_name == "transport": # robot proprioception per arm default_low_dim_obs = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", + "robot0_eef_pos", + "robot0_eef_quat", + "robot0_gripper_qpos", + "robot1_eef_pos", + "robot1_eef_quat", + "robot1_gripper_qpos", "object", ] # handle hierarchical observation configs @@ -376,7 +341,7 @@ def modify_config_for_dataset( def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a BCConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -387,7 +352,7 @@ def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, BCConfig), "must be BCConfig" assert config.algo_name == "bc", "must be BCConfig" @@ -398,20 +363,16 @@ def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): with config.algo.values_unlocked(): # base parameters that may get modified - config.algo.optim_params.policy.learning_rate.initial = ( - 1e-4 # learning rate 1e-4 - ) - config.algo.actor_layer_dims = (1024, 1024) # MLP size (1024, 1024) - config.algo.gmm.enabled = True # enable GMM + config.algo.optim_params.policy.learning_rate.initial = 1e-4 # learning rate 1e-4 + config.algo.actor_layer_dims = (1024, 1024) # MLP size (1024, 1024) + config.algo.gmm.enabled = True # enable GMM if dataset_type == "mg": # machine-generated datasets don't use GMM - config.algo.gmm.enabled = False # disable GMM + config.algo.gmm.enabled = False # disable GMM if hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # low-dim mg uses LR 1e-3 - config.algo.optim_params.policy.learning_rate.initial = ( - 1e-3 # learning rate 1e-3 - ) + config.algo.optim_params.policy.learning_rate.initial = 1e-3 # learning rate 1e-3 return config @@ -419,7 +380,7 @@ def modify_bc_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a BCConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -430,7 +391,7 @@ def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type) dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, BCConfig), "must be BCConfig" assert config.algo_name == "bc", "must be BCConfig" @@ -449,24 +410,22 @@ def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type) config.algo.rnn.horizon = 10 # base parameters that may get modified - config.algo.optim_params.policy.learning_rate.initial = ( - 1e-4 # learning rate 1e-4 - ) - config.algo.actor_layer_dims = () # no MLP layers between rnn layer and output - config.algo.gmm.enabled = True # enable GMM - config.algo.rnn.hidden_dim = 400 # rnn dim 400 + config.algo.optim_params.policy.learning_rate.initial = 1e-4 # learning rate 1e-4 + config.algo.actor_layer_dims = () # no MLP layers between rnn layer and output + config.algo.gmm.enabled = True # enable GMM + config.algo.rnn.hidden_dim = 400 # rnn dim 400 if dataset_type == "mg": # update hyperparams for machine-generated datasets - config.algo.gmm.enabled = False # disable GMM + config.algo.gmm.enabled = False # disable GMM if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # image datasets use RNN dim 1000 - config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 + config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 else: # update hyperparams for all other dataset types (ph, mh, paired) if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # image datasets use RNN dim 1000 - config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 + config.algo.rnn.hidden_dim = 1000 # rnn dim 1000 return config @@ -474,7 +433,7 @@ def modify_bc_rnn_config_for_dataset(config, task_name, dataset_type, hdf5_type) def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a BCQConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -485,7 +444,7 @@ def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, BCQConfig), "must be BCQConfig" assert config.algo_name == "bcq", "must be BCQConfig" @@ -497,37 +456,31 @@ def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.optim_params.critic.learning_rate.initial = ( - 1e-4 # all learning rates 1e-3 - ) + config.algo.optim_params.critic.learning_rate.initial = 1e-4 # all learning rates 1e-3 config.algo.optim_params.action_sampler.learning_rate.initial = 1e-4 config.algo.optim_params.actor.learning_rate.initial = 1e-3 - config.algo.actor.enabled = False # disable actor by default - config.algo.action_sampler.vae.enabled = True # use VAE action sampler + config.algo.actor.enabled = False # disable actor by default + config.algo.action_sampler.vae.enabled = True # use VAE action sampler config.algo.action_sampler.gmm.enabled = False - config.algo.action_sampler.vae.kl_weight = 0.05 # beta 0.05 for VAE - config.algo.action_sampler.vae.latent_dim = 14 # latent dim 14 - config.algo.action_sampler.vae.prior.learn = False # N(0, 1) prior - config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) + config.algo.action_sampler.vae.kl_weight = 0.05 # beta 0.05 for VAE + config.algo.action_sampler.vae.latent_dim = 14 # latent dim 14 + config.algo.action_sampler.vae.prior.learn = False # N(0, 1) prior + config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) config.algo.action_sampler.vae.encoder_layer_dims = (300, 400) config.algo.action_sampler.vae.decoder_layer_dims = (300, 400) config.algo.actor.layer_dims = (300, 400) - config.algo.target_tau = 5e-4 # tau 5e-4 - config.algo.discount = 0.99 # discount 0.99 - config.algo.critic.num_action_samples = ( - 10 # number of action sampler samples at train and test - ) + config.algo.target_tau = 5e-4 # tau 5e-4 + config.algo.discount = 0.99 # discount 0.99 + config.algo.critic.num_action_samples = 10 # number of action sampler samples at train and test config.algo.critic.num_action_samples_rollout = 100 if dataset_type == "mg": # update hyperparams for machine-generated datasets - config.algo.optim_params.critic.learning_rate.initial = ( - 1e-3 # all learning rates 1e-3 - ) + config.algo.optim_params.critic.learning_rate.initial = 1e-3 # all learning rates 1e-3 config.algo.optim_params.action_sampler.learning_rate.initial = 1e-3 config.algo.optim_params.actor.learning_rate.initial = 1e-3 - config.algo.action_sampler.vae.kl_weight = 0.5 # beta 0.5 for VAE - config.algo.target_tau = 5e-3 # tau 5e-3 + config.algo.action_sampler.vae.kl_weight = 0.5 # beta 0.5 for VAE + config.algo.target_tau = 5e-3 # tau 5e-3 if hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"]: # enable actor only on low-dim @@ -566,7 +519,7 @@ def modify_bcq_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a CQLConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -577,7 +530,7 @@ def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, CQLConfig), "must be CQLConfig" assert config.algo_name == "cql", "must be CQLConfig" @@ -596,17 +549,15 @@ def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.optim_params.critic.learning_rate.initial = 1e-3 # learning rates + config.algo.optim_params.critic.learning_rate.initial = 1e-3 # learning rates config.algo.optim_params.actor.learning_rate.initial = 3e-4 - config.algo.actor.target_entropy = ( - "default" # use automatic entropy tuning to default target value - ) - config.algo.critic.deterministic_backup = True # deterministic Q-backup - config.algo.critic.target_q_gap = 5.0 # use Lagrange, with threshold 5.0 + config.algo.actor.target_entropy = "default" # use automatic entropy tuning to default target value + config.algo.critic.deterministic_backup = True # deterministic Q-backup + config.algo.critic.target_q_gap = 5.0 # use Lagrange, with threshold 5.0 config.algo.critic.min_q_weight = 1.0 - config.algo.target_tau = 5e-3 # tau 5e-3 - config.algo.discount = 0.99 # discount 0.99 - config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) + config.algo.target_tau = 5e-3 # tau 5e-3 + config.algo.discount = 0.99 # discount 0.99 + config.algo.critic.layer_dims = (300, 400) # all MLP sizes at (300, 400) config.algo.actor.layer_dims = (300, 400) if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: @@ -619,7 +570,7 @@ def modify_cql_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_hbc_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a HBCConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -630,40 +581,34 @@ def modify_hbc_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, HBCConfig), "must be HBCConfig" assert config.algo_name == "hbc", "must be HBCConfig" assert dataset_type in ["ph", "mh", "mg", "paired"], "invalid dataset type" - assert hdf5_type in [ - "low_dim", - "low_dim_sparse", - "low_dim_dense", - ], "HBC only runs on low-dim" + assert hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"], "HBC only runs on low-dim" is_real_dataset = "real" in task_name assert not is_real_dataset, "we only ran BC-RNN on real robot" with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.actor.optim_params.policy.learning_rate.initial = ( - 1e-3 # learning rates - ) + config.algo.actor.optim_params.policy.learning_rate.initial = 1e-3 # learning rates config.algo.planner.optim_params.goal_network.learning_rate.initial = 1e-3 - config.algo.planner.vae.enabled = True # goal VAE settings - config.algo.planner.vae.kl_weight = 5e-4 # beta 5e-4 - config.algo.planner.vae.latent_dim = 16 # latent dim 16 - config.algo.planner.vae.prior.learn = True # learn GMM prior with 10 modes + config.algo.planner.vae.enabled = True # goal VAE settings + config.algo.planner.vae.kl_weight = 5e-4 # beta 5e-4 + config.algo.planner.vae.latent_dim = 16 # latent dim 16 + config.algo.planner.vae.prior.learn = True # learn GMM prior with 10 modes config.algo.planner.vae.prior.is_conditioned = True config.algo.planner.vae.prior.use_gmm = True config.algo.planner.vae.prior.gmm_learn_weights = True config.algo.planner.vae.prior.gmm_num_modes = 10 - config.algo.planner.vae.encoder_layer_dims = (1024, 1024) # VAE network sizes + config.algo.planner.vae.encoder_layer_dims = (1024, 1024) # VAE network sizes config.algo.planner.vae.decoder_layer_dims = (1024, 1024) config.algo.planner.vae.prior_layer_dims = (1024, 1024) - config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim - config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output + config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim + config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output if dataset_type == "mg": # update hyperparams for machine-generated datasets @@ -676,7 +621,7 @@ def modify_hbc_config_for_dataset(config, task_name, dataset_type, hdf5_type): def modify_iris_config_for_dataset(config, task_name, dataset_type, hdf5_type): """ Modifies a IRISConfig object for training on a particular kind of dataset. This function - just sets algorithm hyperparameters in the algo config depending on the kind of + just sets algorithm hyperparameters in the algo config depending on the kind of dataset. Args: @@ -687,84 +632,65 @@ def modify_iris_config_for_dataset(config, task_name, dataset_type, hdf5_type): dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). """ assert isinstance(config, IRISConfig), "must be IRISConfig" assert config.algo_name == "iris", "must be IRISConfig" assert dataset_type in ["ph", "mh", "mg", "paired"], "invalid dataset type" - assert hdf5_type in [ - "low_dim", - "low_dim_sparse", - "low_dim_dense", - ], "IRIS only runs on low-dim" + assert hdf5_type in ["low_dim", "low_dim_sparse", "low_dim_dense"], "IRIS only runs on low-dim" is_real_dataset = "real" in task_name assert not is_real_dataset, "we only ran BC-RNN on real robot" with config.algo.values_unlocked(): # base parameters that may get modified further - config.algo.actor.optim_params.policy.learning_rate.initial = ( - 1e-3 # learning rates - ) - config.algo.value_planner.planner.optim_params.goal_network.learning_rate.initial = ( - 1e-3 - ) + config.algo.actor.optim_params.policy.learning_rate.initial = 1e-3 # learning rates + config.algo.value_planner.planner.optim_params.goal_network.learning_rate.initial = 1e-3 config.algo.value_planner.value.optim_params.critic.learning_rate.initial = 1e-3 - config.algo.value_planner.value.optim_params.action_sampler.learning_rate.initial = ( - 1e-4 - ) + config.algo.value_planner.value.optim_params.action_sampler.learning_rate.initial = 1e-4 - config.algo.value_planner.planner.vae.enabled = True # goal VAE settings - config.algo.value_planner.planner.vae.kl_weight = 5e-4 # beta 5e-4 - config.algo.value_planner.planner.vae.latent_dim = 14 # latent dim 14 - config.algo.value_planner.planner.vae.prior.learn = ( - True # learn GMM prior with 10 modes - ) + config.algo.value_planner.planner.vae.enabled = True # goal VAE settings + config.algo.value_planner.planner.vae.kl_weight = 5e-4 # beta 5e-4 + config.algo.value_planner.planner.vae.latent_dim = 14 # latent dim 14 + config.algo.value_planner.planner.vae.prior.learn = True # learn GMM prior with 10 modes config.algo.value_planner.planner.vae.prior.is_conditioned = True config.algo.value_planner.planner.vae.prior.use_gmm = True config.algo.value_planner.planner.vae.prior.gmm_learn_weights = True config.algo.value_planner.planner.vae.prior.gmm_num_modes = 10 - config.algo.value_planner.planner.vae.encoder_layer_dims = ( - 1024, - 1024, - ) # VAE network sizes + config.algo.value_planner.planner.vae.encoder_layer_dims = (1024, 1024) # VAE network sizes config.algo.value_planner.planner.vae.decoder_layer_dims = (1024, 1024) config.algo.value_planner.planner.vae.prior_layer_dims = (1024, 1024) - config.algo.value_planner.value.target_tau = 5e-4 # Value tau - config.algo.value_planner.value.action_sampler.vae.kl_weight = 0.5 # Value KL + config.algo.value_planner.value.target_tau = 5e-4 # Value tau + config.algo.value_planner.value.action_sampler.vae.kl_weight = 0.5 # Value KL config.algo.value_planner.value.action_sampler.vae.latent_dim = 16 config.algo.value_planner.value.action_sampler.actor_layer_dims = (300, 400) - config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim - config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output + config.algo.actor.rnn.hidden_dim = 400 # actor RNN dim + config.algo.actor.actor_layer_dims = () # no MLP layers between rnn layer and output if dataset_type in ["mh", "paired"]: # value LR 1e-4, KL weight is 0.05 for multi-human datasets - config.algo.value_planner.value.optim_params.critic.learning_rate.initial = ( - 1e-4 - ) + config.algo.value_planner.value.optim_params.critic.learning_rate.initial = 1e-4 config.algo.value_planner.value.action_sampler.vae.kl_weight = 0.05 if dataset_type in ["mg"]: # Enable value actor and set larger target tau config.algo.value_planner.value.actor.enabled = True - config.algo.value_planner.value.optim_params.actor.learning_rate.initial = ( - 1e-3 - ) + config.algo.value_planner.value.optim_params.actor.learning_rate.initial = 1e-3 config.algo.value_planner.value.target_tau = 5e-3 return config def generate_experiment_config( - base_exp_name, - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_name, - algo_config_modifier, - task_name, - dataset_type, + base_exp_name, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_name, + algo_config_modifier, + task_name, + dataset_type, hdf5_type, filter_key=None, additional_name=None, @@ -795,7 +721,7 @@ def generate_experiment_config( dataset_type (str): dataset type for this dataset (e.g. ph, mh, mg, paired). - hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). + hdf5_type (str): hdf5 type for this dataset (e.g. raw, low_dim, image). filter_key (str): if not None, use the provided filter key to select a subset of the provided dataset @@ -820,18 +746,18 @@ def generate_experiment_config( config = modifier_for_obs(config) # add in config based on the dataset config = modify_config_for_dataset( - config=config, - task_name=task_name, - dataset_type=dataset_type, - hdf5_type=hdf5_type, + config=config, + task_name=task_name, + dataset_type=dataset_type, + hdf5_type=hdf5_type, base_dataset_dir=base_dataset_dir, filter_key=filter_key, ) # add in algo hypers based on dataset config = algo_config_modifier( - config=config, - task_name=task_name, - dataset_type=dataset_type, + config=config, + task_name=task_name, + dataset_type=dataset_type, hdf5_type=hdf5_type, ) if additional_config_modifier is not None: @@ -840,47 +766,23 @@ def generate_experiment_config( # account for filter key in experiment naming and directory naming filter_key_str = "_{}".format(filter_key) if filter_key is not None else "" - dataset_type_dir = ( - "{}/{}".format(dataset_type, filter_key) - if filter_key is not None - else dataset_type - ) + dataset_type_dir = "{}/{}".format(dataset_type, filter_key) if filter_key is not None else dataset_type # account for @additional_name - additional_name_str = ( - "_{}".format(additional_name) if additional_name is not None else "" - ) + additional_name_str = "_{}".format(additional_name) if additional_name is not None else "" json_name = "{}{}".format(algo_name, additional_name_str) # set experiment name with config.experiment.values_unlocked(): - config.experiment.name = "{}_{}_{}_{}{}_{}{}".format( - base_exp_name, - algo_name, - task_name, - dataset_type, - filter_key_str, - hdf5_type, - additional_name_str, - ) + config.experiment.name = "{}_{}_{}_{}{}_{}{}".format(base_exp_name, algo_name, task_name, dataset_type, filter_key_str, hdf5_type, additional_name_str) # set output folder with config.train.values_unlocked(): if base_output_dir is None: base_output_dir = config.train.output_dir - config.train.output_dir = os.path.join( - base_output_dir, - base_exp_name, - algo_name, - task_name, - dataset_type_dir, - hdf5_type, - "trained_models", - ) - + config.train.output_dir = os.path.join(base_output_dir, base_exp_name, algo_name, task_name, dataset_type_dir, hdf5_type, "trained_models") + # save config to json file - dir_to_save = os.path.join( - base_config_dir, base_exp_name, task_name, dataset_type_dir, hdf5_type - ) + dir_to_save = os.path.join(base_config_dir, base_exp_name, task_name, dataset_type_dir, hdf5_type) os.makedirs(dir_to_save, exist_ok=True) json_path = os.path.join(dir_to_save, "{}.json".format(json_name)) config.dump(filename=json_path) @@ -889,10 +791,10 @@ def generate_experiment_config( def generate_core_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for core set of experiments. @@ -907,18 +809,18 @@ def generate_core_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ - core_json_paths = Config() # use for convenient nested dict + core_json_paths = Config() # use for convenient nested dict for task in DATASET_REGISTRY: for dataset_type in DATASET_REGISTRY[task]: for hdf5_type in DATASET_REGISTRY[task][dataset_type]: # if not real robot dataset, skip raw hdf5 - is_real_dataset = "real" in task + is_real_dataset = ("real" in task) if not is_real_dataset and hdf5_type == "raw": continue - + # get list of algorithms to generate configs for, for this hdf5 dataset algos_to_generate = ["bc", "bc_rnn", "bcq", "cql", "hbc", "iris"] if hdf5_type not in ["low_dim", "low_dim_sparse", "low_dim_dense"]: @@ -936,26 +838,24 @@ def generate_core_configs( base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, ) # save json path into dict - core_json_paths[task][dataset_type][hdf5_type][ - algo_name - ] = json_path + core_json_paths[task][dataset_type][hdf5_type][algo_name] = json_path return core_json_paths def generate_subopt_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for the suboptimal human subsets of the multi-human datasets. @@ -972,10 +872,10 @@ def generate_subopt_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ - subopt_json_paths = Config() # use for convenient nested dict + subopt_json_paths = Config() # use for convenient nested dict for task in ["lift", "can", "square", "transport"]: # only generate configs for multi-human data subsets for dataset_type in ["mh"]: @@ -990,14 +890,7 @@ def generate_subopt_configs( for algo_name in algos_to_generate: - for fk in [ - "worse", - "okay", - "better", - "worse_okay", - "worse_better", - "okay_better", - ]: + for fk in ["worse", "okay", "better", "worse_okay", "worse_better", "okay_better"]: # generate config for this experiment config, json_path = generate_experiment_config( @@ -1005,28 +898,26 @@ def generate_subopt_configs( base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, filter_key=fk, ) # save json path into dict dataset_type_dir = "{}/{}".format(dataset_type, fk) - subopt_json_paths[task][dataset_type_dir][hdf5_type][ - algo_name - ] = json_path + subopt_json_paths[task][dataset_type_dir][hdf5_type][algo_name] = json_path return subopt_json_paths def generate_dataset_size_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for the dataset size ablation experiments, where BC-RNN models @@ -1042,10 +933,10 @@ def generate_dataset_size_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ - size_ablation_json_paths = Config() # use for convenient nested dict + size_ablation_json_paths = Config() # use for convenient nested dict for task in ["lift", "can", "square", "transport"]: for dataset_type in ["ph", "mh"]: for hdf5_type in ["low_dim", "image"]: @@ -1060,28 +951,26 @@ def generate_dataset_size_configs( base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, filter_key=fk, ) # save json path into dict dataset_type_dir = "{}/{}".format(dataset_type, fk) - size_ablation_json_paths[task][dataset_type_dir][hdf5_type][ - algo_name - ] = json_path + size_ablation_json_paths[task][dataset_type_dir][hdf5_type][algo_name] = json_path return size_ablation_json_paths def generate_obs_ablation_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for the observation ablation experiments, where BC and BC-RNN models @@ -1097,7 +986,7 @@ def generate_obs_ablation_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ @@ -1105,35 +994,25 @@ def generate_obs_ablation_configs( def add_eef_vel(config): with config.observation.values_unlocked(): old_low_dim_mods = list(config.observation.modalities.obs.low_dim) - old_low_dim_mods.extend( - ["robot0_eef_vel_lin", "robot0_eef_vel_ang", "robot0_gripper_qvel"] - ) + old_low_dim_mods.extend(["robot0_eef_vel_lin", "robot0_eef_vel_ang", "robot0_gripper_qvel"]) if "robot1_eef_pos" in old_low_dim_mods: - old_low_dim_mods.extend( - ["robot1_eef_vel_lin", "robot1_eef_vel_ang", "robot1_gripper_qvel"] - ) + old_low_dim_mods.extend(["robot1_eef_vel_lin", "robot1_eef_vel_ang", "robot1_gripper_qvel"]) config.observation.modalities.obs.low_dim = old_low_dim_mods return config def add_proprio(config): with config.observation.values_unlocked(): old_low_dim_mods = list(config.observation.modalities.obs.low_dim) - old_low_dim_mods.extend( - ["robot0_joint_pos_cos", "robot0_joint_pos_sin", "robot0_joint_vel"] - ) + old_low_dim_mods.extend(["robot0_joint_pos_cos", "robot0_joint_pos_sin", "robot0_joint_vel"]) if "robot1_eef_pos" in old_low_dim_mods: - old_low_dim_mods.extend( - ["robot1_joint_pos_cos", "robot1_joint_pos_sin", "robot1_joint_vel"] - ) + old_low_dim_mods.extend(["robot1_joint_pos_cos", "robot1_joint_pos_sin", "robot1_joint_vel"]) config.observation.modalities.obs.low_dim = old_low_dim_mods return config def remove_wrist(config): with config.observation.values_unlocked(): old_image_mods = list(config.observation.modalities.obs.rgb) - config.observation.modalities.obs.rgb = [ - m for m in old_image_mods if "eye_in_hand" not in m - ] + config.observation.modalities.obs.rgb = [m for m in old_image_mods if "eye_in_hand" not in m] return config def remove_rand(config): @@ -1141,7 +1020,7 @@ def remove_rand(config): config.observation.encoder.rgb.obs_randomizer_class = None return config - obs_ablation_json_paths = Config() # use for convenient nested dict + obs_ablation_json_paths = Config() # use for convenient nested dict for task in ["square", "transport"]: for dataset_type in ["ph", "mh"]: for hdf5_type in ["low_dim", "image"]: @@ -1150,12 +1029,7 @@ def remove_rand(config): if hdf5_type == "low_dim": obs_modifiers = [add_eef_vel, add_proprio] else: - obs_modifiers = [ - add_eef_vel, - add_proprio, - remove_wrist, - remove_rand, - ] + obs_modifiers = [add_eef_vel, add_proprio, remove_wrist, remove_rand] # only bc and bc-rnn algos_to_generate = ["bc", "bc_rnn"] @@ -1167,10 +1041,10 @@ def remove_rand(config): base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, additional_name=obs_modifier.__name__, additional_config_modifier=obs_modifier, @@ -1178,21 +1052,19 @@ def remove_rand(config): # save json path into dict algo_name_str = "{}_{}".format(algo_name, obs_modifier.__name__) - obs_ablation_json_paths[task][dataset_type][hdf5_type][ - algo_name_str - ] = json_path + obs_ablation_json_paths[task][dataset_type][hdf5_type][algo_name_str] = json_path return obs_ablation_json_paths def generate_hyper_ablation_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ - Helper function to generate all configs for the hyperparameter sensitivity experiments, + Helper function to generate all configs for the hyperparameter sensitivity experiments, where BC-RNN models were trained on different ablations. Args: @@ -1205,7 +1077,7 @@ def generate_hyper_ablation_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ @@ -1222,12 +1094,12 @@ def change_gmm(config): def change_mlp(config): with config.algo.values_unlocked(): - config.algo.actor_layer_dims = (1024, 1024) + config.algo.actor_layer_dims = (1024, 1024) return config def change_conv(config): with config.observation.values_unlocked(): - config.observation.encoder.rgb.core_class = "ShallowConv" + config.observation.encoder.rgb.core_class = 'ShallowConv' config.observation.encoder.rgb.core_kwargs = Config() return config @@ -1241,26 +1113,16 @@ def change_rnnd_image(config): config.algo.rnn.hidden_dim = 400 return config - hyper_ablation_json_paths = Config() # use for convenient nested dict + hyper_ablation_json_paths = Config() # use for convenient nested dict for task in ["square", "transport"]: for dataset_type in ["ph", "mh"]: for hdf5_type in ["low_dim", "image"]: # observation modifiers to apply if hdf5_type == "low_dim": - hyper_modifiers = [ - change_lr, - change_gmm, - change_mlp, - change_rnnd_low_dim, - ] + hyper_modifiers = [change_lr, change_gmm, change_mlp, change_rnnd_low_dim] else: - hyper_modifiers = [ - change_lr, - change_gmm, - change_conv, - change_rnnd_image, - ] + hyper_modifiers = [change_lr, change_gmm, change_conv, change_rnnd_image] # only bc and bc-rnn algo_name = "bc_rnn" @@ -1271,10 +1133,10 @@ def change_rnnd_image(config): base_config_dir=base_config_dir, base_dataset_dir=base_dataset_dir, base_output_dir=base_output_dir, - algo_name=algo_name, - algo_config_modifier=algo_to_config_modifier[algo_name], - task_name=task, - dataset_type=dataset_type, + algo_name=algo_name, + algo_config_modifier=algo_to_config_modifier[algo_name], + task_name=task, + dataset_type=dataset_type, hdf5_type=hdf5_type, additional_name=hyper_modifier.__name__, additional_config_modifier=hyper_modifier, @@ -1282,18 +1144,16 @@ def change_rnnd_image(config): # save json path into dict algo_name_str = "{}_{}".format(algo_name, hyper_modifier.__name__) - hyper_ablation_json_paths[task][dataset_type][hdf5_type][ - algo_name_str - ] = json_path + hyper_ablation_json_paths[task][dataset_type][hdf5_type][algo_name_str] = json_path return hyper_ablation_json_paths def generate_d4rl_configs( - base_config_dir, - base_dataset_dir, - base_output_dir, - algo_to_config_modifier, + base_config_dir, + base_dataset_dir, + base_output_dir, + algo_to_config_modifier, ): """ Helper function to generate all configs for reproducing BCQ, CQL, and TD3-BC runs on some D4RL @@ -1309,7 +1169,7 @@ def generate_d4rl_configs( base_output_dir (str): directory to save training results to. If None, will use the directory from the default algorithm configs. - algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs + algo_to_config_modifier (dict): dictionary that maps algo name to a function that modifies configs to add algo hyperparameter settings, given the task, dataset, and hdf5 types. """ @@ -1328,19 +1188,15 @@ def cql_algo_config_modifier(config): # taken from TD3-BC settings described in their paper config.algo.optim_params.critic.learning_rate.initial = 3e-4 config.algo.optim_params.actor.learning_rate.initial = 3e-5 - config.algo.actor.bc_start_steps = 40000 # pre-training steps for actor - config.algo.critic.target_q_gap = ( - None # no Lagrange, and fixed weight of 10.0 - ) + config.algo.actor.bc_start_steps = 40000 # pre-training steps for actor + config.algo.critic.target_q_gap = None # no Lagrange, and fixed weight of 10.0 config.algo.critic.cql_weight = 10.0 - config.algo.critic.min_q_weight = 1.0 - config.algo.critic.deterministic_backup = ( - True # deterministic backup (no entropy in Q-target) - ) - config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes + config.algo.critic.min_q_weight = 1.0 + config.algo.critic.deterministic_backup = True # deterministic backup (no entropy in Q-target) + config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes config.algo.critic.layer_dims = (256, 256, 256) return config - + def iql_algo_config_modifier(config): with config.algo.values_unlocked(): # taken from IQL settings described in their paper @@ -1350,7 +1206,7 @@ def iql_algo_config_modifier(config): config.algo.optim_params.critic.learning_rate.initial = 3e-4 config.algo.optim_params.vf.learning_rate.initial = 3e-4 config.algo.optim_params.actor.learning_rate.initial = 3e-4 - config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes + config.algo.actor.layer_dims = (256, 256, 256) # MLP sizes config.algo.critic.layer_dims = (256, 256, 256) return config @@ -1371,7 +1227,7 @@ def iql_algo_config_modifier(config): # "hopper-medium-replay-v2", # "walker2d-medium-replay-v2", ] - d4rl_json_paths = Config() # use for convenient nested dict + d4rl_json_paths = Config() # use for convenient nested dict for task_name in d4rl_tasks: for algo_name in ["bcq", "cql", "td3_bc", "iql"]: config = config_factory(algo_name=algo_name) @@ -1383,9 +1239,7 @@ def iql_algo_config_modifier(config): config.experiment = ref_config.experiment config.train = ref_config.train config.observation = ref_config.observation - config.train.hdf5_normalize_obs = ( - False # only TD3-BC uses observation normalization - ) + config.train.hdf5_normalize_obs = False # only TD3-BC uses observation normalization # modify algo section for d4rl defaults if algo_name == "bcq": @@ -1404,19 +1258,9 @@ def iql_algo_config_modifier(config): base_output_dir_for_algo = "../{}_trained_models".format(algo_name) else: base_output_dir_for_algo = base_output_dir - config.train.output_dir = os.path.join( - base_output_dir_for_algo, - "d4rl", - algo_name, - task_name, - "trained_models", - ) - config.train.data = os.path.join( - base_dataset_dir, - "d4rl", - "converted", - "{}.hdf5".format(task_name.replace("-", "_")), - ) + config.train.output_dir = os.path.join(base_output_dir_for_algo, "d4rl", algo_name, task_name, "trained_models") + config.train.data = os.path.join(base_dataset_dir, "d4rl", "converted", + "{}.hdf5".format(task_name.replace("-", "_"))) # save config to json file dir_to_save = os.path.join(base_config_dir, "d4rl", task_name) @@ -1472,7 +1316,7 @@ def iql_algo_config_modifier(config): # algo to modifier algo_to_modifier = dict( - bc=modify_bc_config_for_dataset, + bc=modify_bc_config_for_dataset, bc_rnn=modify_bc_rnn_config_for_dataset, bcq=modify_bcq_config_for_dataset, cql=modify_cql_config_for_dataset, @@ -1491,13 +1335,13 @@ def iql_algo_config_modifier(config): ) # generate configs for each experiment name - config_json_paths = Config() # use for convenient nested dict + config_json_paths = Config() # use for convenient nested dict for exp_name in exp_name_to_generator: config_json_paths[exp_name] = exp_name_to_generator[exp_name]( - base_config_dir=generated_configs_base_dir, - base_dataset_dir=datasets_base_dir, - base_output_dir=output_base_dir, - algo_to_config_modifier=algo_to_modifier, + base_config_dir=generated_configs_base_dir, + base_dataset_dir=datasets_base_dir, + base_output_dir=output_base_dir, + algo_to_config_modifier=algo_to_modifier, ) # write output shell scripts @@ -1517,15 +1361,9 @@ def iql_algo_config_modifier(config): f.write("# dataset type: {}\n".format(dataset_type)) if len(hdf5_type) > 0: f.write("# hdf5 type: {}\n".format(hdf5_type)) - for algo_name in config_json_paths[exp_name][task][ - dataset_type - ][hdf5_type]: + for algo_name in config_json_paths[exp_name][task][dataset_type][hdf5_type]: # f.write("# {}\n".format(algo_name)) - exp_json_path = config_json_paths[exp_name][task][ - dataset_type - ][hdf5_type][algo_name] - cmd = "python {} --config {}\n".format( - train_script_loc, exp_json_path - ) + exp_json_path = config_json_paths[exp_name][task][dataset_type][hdf5_type][algo_name] + cmd = "python {} --config {}\n".format(train_script_loc, exp_json_path) f.write(cmd) f.write("\n") diff --git a/robomimic/scripts/get_dataset_info.py b/robomimic/scripts/get_dataset_info.py index 3fc88e00..9349ed8a 100644 --- a/robomimic/scripts/get_dataset_info.py +++ b/robomimic/scripts/get_dataset_info.py @@ -22,7 +22,6 @@ # run script only on validation data python get_dataset_info.py --dataset ../../tests/assets/test.hdf5 --filter_key valid """ - import h5py import json import argparse @@ -44,7 +43,7 @@ ) parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="verbose output", ) args = parser.parse_args() @@ -56,9 +55,7 @@ if filter_key is not None: # use the demonstrations from the filter key instead print("NOTE: using filter key {}".format(filter_key)) - demos = sorted( - [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])] - ) + demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])]) else: # use all demonstrations demos = sorted(list(f["data"].keys())) @@ -67,9 +64,7 @@ if "mask" in f: all_filter_keys = {} for fk in f["mask"]: - fk_demos = sorted( - [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(fk)])] - ) + fk_demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(fk)])]) all_filter_keys[fk] = fk_demos # put demonstration list in increasing episode order @@ -108,11 +103,7 @@ if all_filter_keys is not None: print("==== Filter Key Contents ====") for fk in all_filter_keys: - print( - "filter_key {} with {} demos: {}".format( - fk, len(all_filter_keys[fk]), all_filter_keys[fk] - ) - ) + print("filter_key {} with {} demos: {}".format(fk, len(all_filter_keys[fk]), all_filter_keys[fk])) print("") env_meta = json.loads(f["data"].attrs["env_args"]) print("==== Env Meta ====") @@ -121,19 +112,13 @@ print("==== Dataset Structure ====") for ep in demos: - print( - "episode {} with {} transitions".format( - ep, f["data/{}".format(ep)].attrs["num_samples"] - ) - ) + print("episode {} with {} transitions".format(ep, f["data/{}".format(ep)].attrs["num_samples"])) for k in f["data/{}".format(ep)]: if k in ["obs", "next_obs"]: print(" key: {}".format(k)) for obs_k in f["data/{}/{}".format(ep, k)]: shape = f["data/{}/{}/{}".format(ep, k, obs_k)].shape - print( - " observation key {} with shape {}".format(obs_k, shape) - ) + print(" observation key {} with shape {}".format(obs_k, shape)) elif isinstance(f["data/{}/{}".format(ep, k)], h5py.Dataset): key_shape = f["data/{}/{}".format(ep, k)].shape print(" key: {} with shape {}".format(k, key_shape)) @@ -145,9 +130,5 @@ # maybe display error message print("") - if (action_min < -1.0) or (action_max > 1.0): - raise Exception( - "Dataset should have actions in [-1., 1.] but got bounds [{}, {}]".format( - action_min, action_max - ) - ) + if (action_min < -1.) or (action_max > 1.): + raise Exception("Dataset should have actions in [-1., 1.] but got bounds [{}, {}]".format(action_min, action_max)) diff --git a/robomimic/scripts/hyperparam_helper.py b/robomimic/scripts/hyperparam_helper.py index e9fb437b..870c739e 100644 --- a/robomimic/scripts/hyperparam_helper.py +++ b/robomimic/scripts/hyperparam_helper.py @@ -35,7 +35,6 @@ # assumes that /tmp/gen_configs/base.json has already been created (see quickstart section of docs for an example) python hyperparam_helper.py --config /tmp/gen_configs/base.json --script /tmp/gen_configs/out.sh """ - import argparse import robomimic @@ -53,58 +52,58 @@ def make_generator(config_file, script_file): # use RNN with horizon 10 generator.add_param( key="algo.rnn.enabled", - name="", - group=0, + name="", + group=0, values=[True], ) generator.add_param( - key="train.seq_length", - name="", - group=0, - values=[10], + key="train.seq_length", + name="", + group=0, + values=[10], ) generator.add_param( key="algo.rnn.horizon", - name="", - group=0, - values=[10], + name="", + group=0, + values=[10], ) # LR - 1e-3, 1e-4 generator.add_param( - key="algo.optim_params.policy.learning_rate.initial", - name="plr", - group=1, - values=[1e-3, 1e-4], + key="algo.optim_params.policy.learning_rate.initial", + name="plr", + group=1, + values=[1e-3, 1e-4], ) # GMM y / n generator.add_param( - key="algo.gmm.enabled", - name="gmm", - group=2, - values=[True, False], + key="algo.gmm.enabled", + name="gmm", + group=2, + values=[True, False], value_names=["t", "f"], ) # RNN dim 400 + MLP dims (1024, 1024) vs. RNN dim 1000 + empty MLP dims () generator.add_param( - key="algo.rnn.hidden_dim", - name="rnnd", - group=3, + key="algo.rnn.hidden_dim", + name="rnnd", + group=3, values=[ - 400, + 400, 1000, - ], + ], ) generator.add_param( - key="algo.actor_layer_dims", - name="mlp", - group=3, + key="algo.actor_layer_dims", + name="mlp", + group=3, values=[ - [1024, 1024], + [1024, 1024], [], - ], + ], value_names=["1024", "0"], ) diff --git a/robomimic/scripts/playback_dataset.py b/robomimic/scripts/playback_dataset.py index 148af36f..96cef1f2 100644 --- a/robomimic/scripts/playback_dataset.py +++ b/robomimic/scripts/playback_dataset.py @@ -85,19 +85,19 @@ def playback_trajectory_with_env( - env, - initial_state, - states, - actions=None, - render=False, - video_writer=None, - video_skip=5, + env, + initial_state, + states, + actions=None, + render=False, + video_writer=None, + video_skip=5, camera_names=None, first=False, ): """ Helper function to playback a single trajectory using the simulator environment. - If @actions are not None, it will play them open-loop after loading the initial state. + If @actions are not None, it will play them open-loop after loading the initial state. Otherwise, @states are loaded one by one. Args: @@ -114,7 +114,7 @@ def playback_trajectory_with_env( """ assert isinstance(env, EnvBase) - write_video = video_writer is not None + write_video = (video_writer is not None) video_count = 0 assert not (render and write_video) @@ -123,7 +123,7 @@ def playback_trajectory_with_env( env.reset_to(initial_state) traj_len = states.shape[0] - action_playback = actions is not None + action_playback = (actions is not None) if action_playback: assert states.shape[0] == actions.shape[0] @@ -137,7 +137,7 @@ def playback_trajectory_with_env( err = np.linalg.norm(states[i + 1] - state_playback) print("warning: playback diverged by {} at step {}".format(err, i)) else: - env.reset_to({"states": states[i]}) + env.reset_to({"states" : states[i]}) # on-screen render if render: @@ -148,17 +148,8 @@ def playback_trajectory_with_env( if video_count % video_skip == 0: video_img = [] for cam_name in camera_names: - video_img.append( - env.render( - mode="rgb_array", - height=512, - width=512, - camera_name=cam_name, - ) - ) - video_img = np.concatenate( - video_img, axis=1 - ) # concatenate horizontally + video_img.append(env.render(mode="rgb_array", height=512, width=512, camera_name=cam_name)) + video_img = np.concatenate(video_img, axis=1) # concatenate horizontally video_writer.append_data(video_img) video_count += 1 @@ -168,8 +159,8 @@ def playback_trajectory_with_env( def playback_trajectory_with_obs( traj_grp, - video_writer, - video_skip=5, + video_writer, + video_skip=5, image_names=None, depth_names=None, first=False, @@ -187,33 +178,20 @@ def playback_trajectory_with_obs( depth_names (list): determines which depth observations are used for rendering (if any). first (bool): if True, only use the first frame of each episode. """ - assert ( - image_names is not None - ), "error: must specify at least one image observation to use in @image_names" + assert image_names is not None, "error: must specify at least one image observation to use in @image_names" video_count = 0 if depth_names is not None: # compute min and max depth value across trajectory for normalization - depth_min = {k: traj_grp["obs/{}".format(k)][:].min() for k in depth_names} - depth_max = {k: traj_grp["obs/{}".format(k)][:].max() for k in depth_names} + depth_min = { k : traj_grp["obs/{}".format(k)][:].min() for k in depth_names } + depth_max = { k : traj_grp["obs/{}".format(k)][:].max() for k in depth_names } traj_len = traj_grp["actions"].shape[0] for i in range(traj_len): if video_count % video_skip == 0: # concatenate image obs together im = [traj_grp["obs/{}".format(k)][i] for k in image_names] - depth = ( - [ - depth_to_rgb( - traj_grp["obs/{}".format(k)][i], - depth_min=depth_min[k], - depth_max=depth_max[k], - ) - for k in depth_names - ] - if depth_names is not None - else [] - ) + depth = [depth_to_rgb(traj_grp["obs/{}".format(k)][i], depth_min=depth_min[k], depth_max=depth_max[k]) for k in depth_names] if depth_names is not None else [] frame = np.concatenate(im + depth, axis=1) video_writer.append_data(frame) video_count += 1 @@ -224,8 +202,8 @@ def playback_trajectory_with_obs( def playback_dataset(args): # some arg checking - write_video = args.video_path is not None - assert not (args.render and write_video) # either on-screen or video but not both + write_video = (args.video_path is not None) + assert not (args.render and write_video) # either on-screen or video but not both # Auto-fill camera rendering info if not specified if args.render_image_names is None: @@ -240,31 +218,25 @@ def playback_dataset(args): if args.use_obs: assert write_video, "playback with observations can only write to video" - assert ( - not args.use_actions - ), "playback with observations is offline and does not support action playback" + assert not args.use_actions, "playback with observations is offline and does not support action playback" if args.render_depth_names is not None: - assert ( - args.use_obs - ), "depth observations can only be visualized from observations currently" + assert args.use_obs, "depth observations can only be visualized from observations currently" # create environment only if not playing back with observations if not args.use_obs: - # need to make sure ObsUtils knows which observations are images, but it doesn't matter + # need to make sure ObsUtils knows which observations are images, but it doesn't matter # for playback since observations are unused. Pass a dummy spec here. dummy_spec = dict( obs=dict( - low_dim=["robot0_eef_pos"], - rgb=[], - ), + low_dim=["robot0_eef_pos"], + rgb=[], + ), ) ObsUtils.initialize_obs_utils_with_obs_specs(obs_modality_specs=dummy_spec) env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=args.dataset) - env = EnvUtils.create_env_from_metadata( - env_meta=env_meta, render=args.render, render_offscreen=write_video - ) + env = EnvUtils.create_env_from_metadata(env_meta=env_meta, render=args.render, render_offscreen=write_video) # some operations for playback are robosuite-specific, so determine if this environment is a robosuite env is_robosuite_env = EnvUtils.is_robosuite_env(env_meta) @@ -274,10 +246,7 @@ def playback_dataset(args): # list of all demonstration episodes (sorted in increasing number order) if args.filter_key is not None: print("using filter key: {}".format(args.filter_key)) - demos = [ - elem.decode("utf-8") - for elem in np.array(f["mask/{}".format(args.filter_key)]) - ] + demos = [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(args.filter_key)])] else: demos = list(f["data"].keys()) inds = np.argsort([int(elem[5:]) for elem in demos]) @@ -285,7 +254,7 @@ def playback_dataset(args): # maybe reduce the number of demonstrations to playback if args.n is not None: - demos = demos[: args.n] + demos = demos[:args.n] # maybe dump video video_writer = None @@ -298,8 +267,8 @@ def playback_dataset(args): if args.use_obs: playback_trajectory_with_obs( - traj_grp=f["data/{}".format(ep)], - video_writer=video_writer, + traj_grp=f["data/{}".format(ep)], + video_writer=video_writer, video_skip=args.video_skip, image_names=args.render_image_names, depth_names=args.render_depth_names, @@ -319,12 +288,11 @@ def playback_dataset(args): actions = f["data/{}/actions".format(ep)][()] playback_trajectory_with_env( - env=env, - initial_state=initial_state, - states=states, - actions=actions, - render=args.render, - video_writer=video_writer, + env=env, + initial_state=initial_state, + states=states, actions=actions, + render=args.render, + video_writer=video_writer, video_skip=args.video_skip, camera_names=args.render_image_names, first=args.first, @@ -360,21 +328,21 @@ def playback_dataset(args): # Use image observations instead of doing playback using the simulator env. parser.add_argument( "--use-obs", - action="store_true", + action='store_true', help="visualize trajectories with dataset image observations instead of simulator", ) # Playback stored dataset actions open-loop instead of loading from simulation states. parser.add_argument( "--use-actions", - action="store_true", + action='store_true', help="use open-loop action playback instead of loading sim states", ) # Whether to render playback to screen parser.add_argument( "--render", - action="store_true", + action='store_true', help="on-screen rendering", ) @@ -398,25 +366,25 @@ def playback_dataset(args): parser.add_argument( "--render_image_names", type=str, - nargs="+", + nargs='+', default=None, help="(optional) camera name(s) / image observation(s) to use for rendering on-screen or to video. Default is" - "None, which corresponds to a predefined camera for each env type", + "None, which corresponds to a predefined camera for each env type", ) # depth observations to use for writing to video parser.add_argument( "--render_depth_names", type=str, - nargs="+", + nargs='+', default=None, - help="(optional) depth observation(s) to use for rendering to video", + help="(optional) depth observation(s) to use for rendering to video" ) # Only use the first frame of each episode parser.add_argument( "--first", - action="store_true", + action='store_true', help="use first frame of each episode", ) diff --git a/robomimic/scripts/run_trained_agent.py b/robomimic/scripts/run_trained_agent.py index 22698487..95bddb21 100644 --- a/robomimic/scripts/run_trained_agent.py +++ b/robomimic/scripts/run_trained_agent.py @@ -51,7 +51,6 @@ --n_rollouts 50 --horizon 400 --seed 0 \ --dataset_path /path/to/output.hdf5 """ - import argparse import json import h5py @@ -70,18 +69,9 @@ from robomimic.algo import RolloutPolicy -def rollout( - policy, - env, - horizon, - render=False, - video_writer=None, - video_skip=5, - return_obs=False, - camera_names=None, -): +def rollout(policy, env, horizon, render=False, video_writer=None, video_skip=5, return_obs=False, camera_names=None): """ - Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video, + Helper function to carry out rollouts. Supports on-screen rendering, off-screen rendering to a video, and returns the rollout trajectory. Args: @@ -91,9 +81,9 @@ def rollout( render (bool): whether to render rollout on-screen video_writer (imageio writer): if provided, use to write rollout to video video_skip (int): how often to write video frames - return_obs (bool): if True, return possibly high-dimensional observations along the trajectoryu. - They are excluded by default because the low-dimensional simulation states should be a minimal - representation of the environment. + return_obs (bool): if True, return possibly high-dimensional observations along the trajectoryu. + They are excluded by default because the low-dimensional simulation states should be a minimal + representation of the environment. camera_names (list): determines which camera(s) are used for rendering. Pass more than one to output a video with multiple camera views concatenated horizontally. @@ -114,10 +104,8 @@ def rollout( results = {} video_count = 0 # video frame counter - total_reward = 0.0 - traj = dict( - actions=[], rewards=[], dones=[], states=[], initial_state_dict=state_dict - ) + total_reward = 0. + traj = dict(actions=[], rewards=[], dones=[], states=[], initial_state_dict=state_dict) if return_obs: # store observations too traj.update(dict(obs=[], next_obs=[])) @@ -141,17 +129,8 @@ def rollout( if video_count % video_skip == 0: video_img = [] for cam_name in camera_names: - video_img.append( - env.render( - mode="rgb_array", - height=512, - width=512, - camera_name=cam_name, - ) - ) - video_img = np.concatenate( - video_img, axis=1 - ) # concatenate horizontally + video_img.append(env.render(mode="rgb_array", height=512, width=512, camera_name=cam_name)) + video_img = np.concatenate(video_img, axis=1) # concatenate horizontally video_writer.append_data(video_img) video_count += 1 @@ -183,9 +162,7 @@ def rollout( if return_obs: # convert list of dict to dict of list for obs dictionaries (for convenient writes to hdf5 dataset) traj["obs"] = TensorUtils.list_of_flat_dict_to_dict_of_list(traj["obs"]) - traj["next_obs"] = TensorUtils.list_of_flat_dict_to_dict_of_list( - traj["next_obs"] - ) + traj["next_obs"] = TensorUtils.list_of_flat_dict_to_dict_of_list(traj["next_obs"]) # list to numpy array for k in traj: @@ -202,8 +179,8 @@ def rollout( def run_trained_agent(args): # some arg checking - write_video = args.video_path is not None - assert not (args.render and write_video) # either on-screen or video but not both + write_video = (args.video_path is not None) + assert not (args.render and write_video) # either on-screen or video but not both if args.render: # on-screen rendering can only support one camera assert len(args.camera_names) == 1 @@ -215,9 +192,7 @@ def run_trained_agent(args): device = TorchUtils.get_torch_device(try_to_use_cuda=True) # restore policy - policy, ckpt_dict = FileUtils.policy_from_checkpoint( - ckpt_path=ckpt_path, device=device, verbose=True - ) + policy, ckpt_dict = FileUtils.policy_from_checkpoint(ckpt_path=ckpt_path, device=device, verbose=True) # read rollout settings rollout_num_episodes = args.n_rollouts @@ -229,10 +204,10 @@ def run_trained_agent(args): # create environment from saved checkpoint env, _ = FileUtils.env_from_checkpoint( - ckpt_dict=ckpt_dict, - env_name=args.env, - render=args.render, - render_offscreen=(args.video_path is not None), + ckpt_dict=ckpt_dict, + env_name=args.env, + render=args.render, + render_offscreen=(args.video_path is not None), verbose=True, ) @@ -247,7 +222,7 @@ def run_trained_agent(args): video_writer = imageio.get_writer(args.video_path, fps=20) # maybe open hdf5 to write rollouts - write_dataset = args.dataset_path is not None + write_dataset = (args.dataset_path is not None) if write_dataset: data_writer = h5py.File(args.dataset_path, "w") data_grp = data_writer.create_group("data") @@ -256,12 +231,12 @@ def run_trained_agent(args): rollout_stats = [] for i in range(rollout_num_episodes): stats, traj = rollout( - policy=policy, - env=env, - horizon=rollout_horizon, - render=args.render, - video_writer=video_writer, - video_skip=args.video_skip, + policy=policy, + env=env, + horizon=rollout_horizon, + render=args.render, + video_writer=video_writer, + video_skip=args.video_skip, return_obs=(write_dataset and args.dataset_obs), camera_names=args.camera_names, ) @@ -276,25 +251,17 @@ def run_trained_agent(args): ep_data_grp.create_dataset("dones", data=np.array(traj["dones"])) if args.dataset_obs: for k in traj["obs"]: - ep_data_grp.create_dataset( - "obs/{}".format(k), data=np.array(traj["obs"][k]) - ) - ep_data_grp.create_dataset( - "next_obs/{}".format(k), data=np.array(traj["next_obs"][k]) - ) + ep_data_grp.create_dataset("obs/{}".format(k), data=np.array(traj["obs"][k])) + ep_data_grp.create_dataset("next_obs/{}".format(k), data=np.array(traj["next_obs"][k])) # episode metadata if "model" in traj["initial_state_dict"]: - ep_data_grp.attrs["model_file"] = traj["initial_state_dict"][ - "model" - ] # model xml for this episode - ep_data_grp.attrs["num_samples"] = traj["actions"].shape[ - 0 - ] # number of transitions in this episode + ep_data_grp.attrs["model_file"] = traj["initial_state_dict"]["model"] # model xml for this episode + ep_data_grp.attrs["num_samples"] = traj["actions"].shape[0] # number of transitions in this episode total_samples += traj["actions"].shape[0] rollout_stats = TensorUtils.list_of_flat_dict_to_dict_of_list(rollout_stats) - avg_rollout_stats = {k: np.mean(rollout_stats[k]) for k in rollout_stats} + avg_rollout_stats = { k : np.mean(rollout_stats[k]) for k in rollout_stats } avg_rollout_stats["Num_Success"] = np.sum(rollout_stats["Success_Rate"]) print("Average Rollout Stats") print(json.dumps(avg_rollout_stats, indent=4)) @@ -305,9 +272,7 @@ def run_trained_agent(args): if write_dataset: # global metadata data_grp.attrs["total"] = total_samples - data_grp.attrs["env_args"] = json.dumps( - env.serialize(), indent=4 - ) # environment info + data_grp.attrs["env_args"] = json.dumps(env.serialize(), indent=4) # environment info data_writer.close() print("Wrote dataset trajectories to {}".format(args.dataset_path)) @@ -351,7 +316,7 @@ def run_trained_agent(args): # Whether to render rollouts to screen parser.add_argument( "--render", - action="store_true", + action='store_true', help="on-screen rendering", ) @@ -375,7 +340,7 @@ def run_trained_agent(args): parser.add_argument( "--camera_names", type=str, - nargs="+", + nargs='+', default=["agentview"], help="(optional) camera name(s) to use for rendering on-screen or to video", ) @@ -391,7 +356,7 @@ def run_trained_agent(args): # If True and @dataset_path is supplied, will write possibly high-dimensional observations to dataset. parser.add_argument( "--dataset_obs", - action="store_true", + action='store_true', help="include possibly high-dimensional observations in output dataset hdf5 file (by default,\ observations are excluded and only simulator states are saved)", ) @@ -406,3 +371,4 @@ def run_trained_agent(args): args = parser.parse_args() run_trained_agent(args) + diff --git a/robomimic/scripts/setup_macros.py b/robomimic/scripts/setup_macros.py index 5ae57d92..92c47271 100644 --- a/robomimic/scripts/setup_macros.py +++ b/robomimic/scripts/setup_macros.py @@ -21,9 +21,7 @@ print("{} does not exist! Aborting...".format(macros_path)) if os.path.exists(macros_private_path): - ans = input( - "{} already exists! \noverwrite? (y/n)\n".format(macros_private_path) - ) + ans = input("{} already exists! \noverwrite? (y/n)\n".format(macros_private_path)) if ans == "y": print("REMOVING") diff --git a/robomimic/scripts/split_train_val.py b/robomimic/scripts/split_train_val.py index 41d06e41..9d0502ea 100644 --- a/robomimic/scripts/split_train_val.py +++ b/robomimic/scripts/split_train_val.py @@ -40,9 +40,7 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): f = h5py.File(hdf5_path, "r") if filter_key is not None: print("using filter key: {}".format(filter_key)) - demos = sorted( - [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])] - ) + demos = sorted([elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)])]) else: demos = sorted(list(f["data"].keys())) num_demos = len(demos) @@ -52,18 +50,14 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): num_demos = len(demos) num_val = int(val_ratio * num_demos) mask = np.zeros(num_demos) - mask[:num_val] = 1.0 + mask[:num_val] = 1. np.random.shuffle(mask) mask = mask.astype(int) train_inds = (1 - mask).nonzero()[0] valid_inds = mask.nonzero()[0] train_keys = [demos[i] for i in train_inds] valid_keys = [demos[i] for i in valid_inds] - print( - "{} validation demonstrations out of {} total demonstrations.".format( - num_val, num_demos - ) - ) + print("{} validation demonstrations out of {} total demonstrations.".format(num_val, num_demos)) # pass mask to generate split name_1 = "train" @@ -72,12 +66,8 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): name_1 = "{}_{}".format(filter_key, name_1) name_2 = "{}_{}".format(filter_key, name_2) - train_lengths = create_hdf5_filter_key( - hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1 - ) - valid_lengths = create_hdf5_filter_key( - hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2 - ) + train_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=train_keys, key_name=name_1) + valid_lengths = create_hdf5_filter_key(hdf5_path=hdf5_path, demo_keys=valid_keys, key_name=name_2) print("Total number of train samples: {}".format(np.sum(train_lengths))) print("Average number of train samples {}".format(np.mean(train_lengths))) @@ -102,13 +92,14 @@ def split_train_val_from_hdf5(hdf5_path, val_ratio=0.1, filter_key=None): splitting the full set of trajectories", ) parser.add_argument( - "--ratio", type=float, default=0.1, help="validation ratio, in (0, 1)" + "--ratio", + type=float, + default=0.1, + help="validation ratio, in (0, 1)" ) args = parser.parse_args() # seed to make sure results are consistent np.random.seed(0) - split_train_val_from_hdf5( - args.dataset, val_ratio=args.ratio, filter_key=args.filter_key - ) + split_train_val_from_hdf5(args.dataset, val_ratio=args.ratio, filter_key=args.filter_key) diff --git a/robomimic/scripts/train.py b/robomimic/scripts/train.py index 1b603740..210b4172 100644 --- a/robomimic/scripts/train.py +++ b/robomimic/scripts/train.py @@ -60,7 +60,7 @@ def train(config, device): if config.experiment.logging.terminal_output_to_txt: # log stdout and stderr to a text file - logger = PrintLogger(os.path.join(log_dir, "log.txt")) + logger = PrintLogger(os.path.join(log_dir, 'log.txt')) sys.stdout = logger sys.stderr = logger @@ -76,17 +76,14 @@ def train(config, device): print("\n============= Loaded Environment Metadata =============") env_meta = FileUtils.get_env_metadata_from_dataset(dataset_path=config.train.data) shape_meta = FileUtils.get_shape_metadata_from_dataset( - dataset_path=config.train.data, all_obs_keys=config.all_obs_keys, verbose=True + dataset_path=config.train.data, + all_obs_keys=config.all_obs_keys, + verbose=True ) if config.experiment.env is not None: env_meta["env_name"] = config.experiment.env - print( - "=" * 30 - + "\n" - + "Replacing Env to {}\n".format(env_meta["env_name"]) - + "=" * 30 - ) + print("=" * 30 + "\n" + "Replacing Env to {}\n".format(env_meta["env_name"]) + "=" * 30) # create environment envs = OrderedDict() @@ -101,15 +98,13 @@ def train(config, device): for env_name in env_names: env = EnvUtils.create_env_from_metadata( env_meta=env_meta, - env_name=env_name, - render=False, + env_name=env_name, + render=False, render_offscreen=config.experiment.render_video, use_image_obs=shape_meta["use_images"], use_depth_obs=shape_meta["use_depths"], ) - env = EnvUtils.wrap_env_from_config( - env, config=config - ) # apply environment warpper, if applicable + env = EnvUtils.wrap_env_from_config(env, config=config) # apply environment warpper, if applicable envs[env.name] = env print(envs[env.name]) @@ -129,9 +124,9 @@ def train(config, device): ac_dim=shape_meta["ac_dim"], device=device, ) - + # save the config as a json file - with open(os.path.join(log_dir, "..", "config.json"), "w") as outfile: + with open(os.path.join(log_dir, '..', 'config.json'), 'w') as outfile: json.dump(config, outfile, indent=4) print("\n============= Model Summary =============") @@ -140,8 +135,7 @@ def train(config, device): # load training data trainset, validset = TrainUtils.load_data_for_training( - config, obs_keys=shape_meta["all_obs_keys"] - ) + config, obs_keys=shape_meta["all_obs_keys"]) train_sampler = trainset.get_dataset_sampler() print("\n============= Training Dataset =============") print(trainset) @@ -163,7 +157,7 @@ def train(config, device): batch_size=config.train.batch_size, shuffle=(train_sampler is None), num_workers=config.train.num_data_workers, - drop_last=True, + drop_last=True ) if config.experiment.validate: @@ -176,35 +170,29 @@ def train(config, device): batch_size=config.train.batch_size, shuffle=(valid_sampler is None), num_workers=num_workers, - drop_last=True, + drop_last=True ) else: valid_loader = None # print all warnings before training begins print("*" * 50) - print( - "Warnings generated by robomimic have been duplicated here (from above) for convenience. Please check them carefully." - ) + print("Warnings generated by robomimic have been duplicated here (from above) for convenience. Please check them carefully.") flush_warnings() print("*" * 50) print("") # main training loop best_valid_loss = None - best_return = ( - {k: -np.inf for k in envs} if config.experiment.rollout.enabled else None - ) - best_success_rate = ( - {k: -1.0 for k in envs} if config.experiment.rollout.enabled else None - ) + best_return = {k: -np.inf for k in envs} if config.experiment.rollout.enabled else None + best_success_rate = {k: -1. for k in envs} if config.experiment.rollout.enabled else None last_ckpt_time = time.time() # number of learning steps per epoch (defaults to a full dataset pass) train_num_steps = config.experiment.epoch_every_n_steps valid_num_steps = config.experiment.validation_epoch_every_n_steps - for epoch in range(1, config.train.num_epochs + 1): # epoch numbers start at 1 + for epoch in range(1, config.train.num_epochs + 1): # epoch numbers start at 1 step_log = TrainUtils.run_epoch( model=model, data_loader=train_loader, @@ -220,16 +208,12 @@ def train(config, device): # check for recurring checkpoint saving conditions should_save_ckpt = False if config.experiment.save.enabled: - time_check = (config.experiment.save.every_n_seconds is not None) and ( - time.time() - last_ckpt_time > config.experiment.save.every_n_seconds - ) - epoch_check = ( - (config.experiment.save.every_n_epochs is not None) - and (epoch > 0) - and (epoch % config.experiment.save.every_n_epochs == 0) - ) - epoch_list_check = epoch in config.experiment.save.epochs - should_save_ckpt = time_check or epoch_check or epoch_list_check + time_check = (config.experiment.save.every_n_seconds is not None) and \ + (time.time() - last_ckpt_time > config.experiment.save.every_n_seconds) + epoch_check = (config.experiment.save.every_n_epochs is not None) and \ + (epoch > 0) and (epoch % config.experiment.save.every_n_epochs == 0) + epoch_list_check = (epoch in config.experiment.save.epochs) + should_save_ckpt = (time_check or epoch_check or epoch_list_check) ckpt_reason = None if should_save_ckpt: last_ckpt_time = time.time() @@ -246,13 +230,7 @@ def train(config, device): # Evaluate the model on validation set if config.experiment.validate: with torch.no_grad(): - step_log = TrainUtils.run_epoch( - model=model, - data_loader=valid_loader, - epoch=epoch, - validate=True, - num_steps=valid_num_steps, - ) + step_log = TrainUtils.run_epoch(model=model, data_loader=valid_loader, epoch=epoch, validate=True, num_steps=valid_num_steps) for k, v in step_log.items(): if k.startswith("Time_"): data_logger.record("Timing_Stats/Valid_{}".format(k[5:]), v, epoch) @@ -264,14 +242,9 @@ def train(config, device): # save checkpoint if achieve new best validation loss valid_check = "Loss" in step_log - if valid_check and ( - best_valid_loss is None or (step_log["Loss"] <= best_valid_loss) - ): + if valid_check and (best_valid_loss is None or (step_log["Loss"] <= best_valid_loss)): best_valid_loss = step_log["Loss"] - if ( - config.experiment.save.enabled - and config.experiment.save.on_best_validation - ): + if config.experiment.save.enabled and config.experiment.save.on_best_validation: epoch_ckpt_name += "_best_validation_{}".format(best_valid_loss) should_save_ckpt = True ckpt_reason = "valid" if ckpt_reason is None else ckpt_reason @@ -280,19 +253,11 @@ def train(config, device): # do rollouts at fixed rate or if it's time to save a new ckpt video_paths = None - rollout_check = (epoch % config.experiment.rollout.rate == 0) or ( - should_save_ckpt and ckpt_reason == "time" - ) - if ( - config.experiment.rollout.enabled - and (epoch > config.experiment.rollout.warmstart) - and rollout_check - ): + rollout_check = (epoch % config.experiment.rollout.rate == 0) or (should_save_ckpt and ckpt_reason == "time") + if config.experiment.rollout.enabled and (epoch > config.experiment.rollout.warmstart) and rollout_check: # wrap model as a RolloutPolicy to prepare for rollouts - rollout_model = RolloutPolicy( - model, obs_normalization_stats=obs_normalization_stats - ) + rollout_model = RolloutPolicy(model, obs_normalization_stats=obs_normalization_stats) num_episodes = config.experiment.rollout.n all_rollout_logs, video_paths = TrainUtils.rollout_with_stats( @@ -313,25 +278,12 @@ def train(config, device): rollout_logs = all_rollout_logs[env_name] for k, v in rollout_logs.items(): if k.startswith("Time_"): - data_logger.record( - "Timing_Stats/Rollout_{}_{}".format(env_name, k[5:]), - v, - epoch, - ) + data_logger.record("Timing_Stats/Rollout_{}_{}".format(env_name, k[5:]), v, epoch) else: - data_logger.record( - "Rollout/{}/{}".format(k, env_name), - v, - epoch, - log_stats=True, - ) - - print( - "\nEpoch {} Rollouts took {}s (avg) with results:".format( - epoch, rollout_logs["time"] - ) - ) - print("Env: {}".format(env_name)) + data_logger.record("Rollout/{}/{}".format(k, env_name), v, epoch, log_stats=True) + + print("\nEpoch {} Rollouts took {}s (avg) with results:".format(epoch, rollout_logs["time"])) + print('Env: {}'.format(env_name)) print(json.dumps(rollout_logs, sort_keys=True, indent=4)) # checkpoint and video saving logic @@ -346,16 +298,12 @@ def train(config, device): best_return = updated_stats["best_return"] best_success_rate = updated_stats["best_success_rate"] epoch_ckpt_name = updated_stats["epoch_ckpt_name"] - should_save_ckpt = ( - config.experiment.save.enabled and updated_stats["should_save_ckpt"] - ) or should_save_ckpt + should_save_ckpt = (config.experiment.save.enabled and updated_stats["should_save_ckpt"]) or should_save_ckpt if updated_stats["ckpt_reason"] is not None: ckpt_reason = updated_stats["ckpt_reason"] # Only keep saved videos if the ckpt should be saved (but not because of validation score) - should_save_video = ( - should_save_ckpt and (ckpt_reason != "valid") - ) or config.experiment.keep_all_videos + should_save_video = (should_save_ckpt and (ckpt_reason != "valid")) or config.experiment.keep_all_videos if video_paths is not None and not should_save_video: for env_name in video_paths: os.remove(video_paths[env_name]) @@ -384,7 +332,7 @@ def train(config, device): def main(args): if args.config is not None: - ext_cfg = json.load(open(args.config, "r")) + ext_cfg = json.load(open(args.config, 'r')) config = config_factory(ext_cfg["algo_name"]) # update config with external json - this will throw errors if # the external config has keys not present in the base algo config @@ -471,9 +419,10 @@ def main(args): # debug mode parser.add_argument( "--debug", - action="store_true", - help="set this flag to run a quick training run for debugging purposes", + action='store_true', + help="set this flag to run a quick training run for debugging purposes" ) args = parser.parse_args() main(args) + diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 00bac5de..643fcbf7 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -2,7 +2,6 @@ This file contains Dataset classes that are used by torch dataloaders to fetch batches from hdf5 files. """ - import os import h5py import numpy as np @@ -138,10 +137,10 @@ def __init__( goal_mode (str): either "last" or None. Defaults to None, which is to not fetch goals - hdf5_cache_mode (str): one of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 - in memory - this is by far the fastest for data loading. Set to "low_dim" to cache all - non-image data. Set to None to use no caching - in this case, every batch sample is - retrieved via file i/o. You should almost never set this to None, even for large + hdf5_cache_mode (str): one of ["all", "low_dim", or None]. Set to "all" to cache entire hdf5 + in memory - this is by far the fastest for data loading. Set to "low_dim" to cache all + non-image data. Set to None to use no caching - in this case, every batch sample is + retrieved via file i/o. You should almost never set this to None, even for large image datasets. hdf5_use_swmr (bool): whether to use swmr feature when opening the hdf5 file. This ensures @@ -216,16 +215,14 @@ def __init__( hdf5_file=self.hdf5_file, obs_keys=self.obs_keys_in_memory, dataset_keys=self.dataset_keys, - load_next_obs=self.load_next_obs, + load_next_obs=self.load_next_obs ) if self.hdf5_cache_mode == "all": # cache getitem calls for even more speedup. We don't do this for # "low-dim" since image observations require calls to getitem anyways. print("SequenceDataset: caching get_item calls...") - self.getitem_cache = [ - self.get_item(i) for i in LogUtils.custom_tqdm(range(len(self))) - ] + self.getitem_cache = [self.get_item(i) for i in LogUtils.custom_tqdm(range(len(self)))] # don't need the previous cache anymore del self.hdf5_cache @@ -241,20 +238,15 @@ def load_demo_info(self, filter_by_attribute=None, demos=None): filter_by_attribute (str): if provided, use the provided filter key to select a subset of demonstration trajectories to load - demos (list): list of demonstration keys to load from the hdf5 file. If - omitted, all demos in the file (or under the @filter_by_attribute + demos (list): list of demonstration keys to load from the hdf5 file. If + omitted, all demos in the file (or under the @filter_by_attribute filter key) are used. """ # filter demo trajectory by mask if demos is not None: self.demos = demos elif filter_by_attribute is not None: - self.demos = [ - elem.decode("utf-8") - for elem in np.array( - self.hdf5_file["mask/{}".format(filter_by_attribute)][:] - ) - ] + self.demos = [elem.decode("utf-8") for elem in np.array(self.hdf5_file["mask/{}".format(filter_by_attribute)][:])] else: self.demos = list(self.hdf5_file["data"].keys()) @@ -279,17 +271,15 @@ def load_demo_info(self, filter_by_attribute=None, demos=None): num_sequences = demo_length # determine actual number of sequences taking into account whether to pad for frame_stack and seq_length if not self.pad_frame_stack: - num_sequences -= self.n_frame_stack - 1 + num_sequences -= (self.n_frame_stack - 1) if not self.pad_seq_length: - num_sequences -= self.seq_length - 1 + num_sequences -= (self.seq_length - 1) if self.pad_seq_length: assert demo_length >= 1 # sequence needs to have at least one sample num_sequences = max(num_sequences, 1) else: - assert ( - num_sequences >= 1 - ) # assume demo_length >= (self.n_frame_stack - 1 + self.seq_length) + assert num_sequences >= 1 # assume demo_length >= (self.n_frame_stack - 1 + self.seq_length) for _ in range(num_sequences): self._index_to_demo_id[self.total_num_sequences] = ep @@ -302,13 +292,7 @@ def hdf5_file(self): """ if self._hdf5_file is None: print("opening hdf5") - self._hdf5_file = h5py.File( - self.hdf5_path, - "r", - swmr=self.hdf5_use_swmr, - libver="latest", - rdcc_nbytes=1e10, - ) + self._hdf5_file = h5py.File(self.hdf5_path, 'r', swmr=self.hdf5_use_swmr, libver='latest', rdcc_nbytes=1e10) return self._hdf5_file def close_and_delete_hdf5_handle(self): @@ -342,38 +326,22 @@ def __repr__(self): msg += "\tpad_seq_length={}\n\tpad_frame_stack={}\n\tgoal_mode={}\n" msg += "\tcache_mode={}\n" msg += "\tnum_demos={}\n\tnum_sequences={}\n)" - filter_key_str = ( - self.filter_by_attribute if self.filter_by_attribute is not None else "none" - ) + filter_key_str = self.filter_by_attribute if self.filter_by_attribute is not None else "none" goal_mode_str = self.goal_mode if self.goal_mode is not None else "none" - cache_mode_str = ( - self.hdf5_cache_mode if self.hdf5_cache_mode is not None else "none" - ) - msg = msg.format( - self.hdf5_path, - self.obs_keys, - self.seq_length, - filter_key_str, - self.n_frame_stack, - self.pad_seq_length, - self.pad_frame_stack, - goal_mode_str, - cache_mode_str, - self.n_demos, - self.total_num_sequences, - ) + cache_mode_str = self.hdf5_cache_mode if self.hdf5_cache_mode is not None else "none" + msg = msg.format(self.hdf5_path, self.obs_keys, self.seq_length, filter_key_str, self.n_frame_stack, + self.pad_seq_length, self.pad_frame_stack, goal_mode_str, cache_mode_str, + self.n_demos, self.total_num_sequences) return msg def __len__(self): """ - Ensure that the torch dataloader will do a complete pass through all sequences in + Ensure that the torch dataloader will do a complete pass through all sequences in the dataset before starting a new iteration. """ return self.total_num_sequences - def load_dataset_in_memory( - self, demo_list, hdf5_file, obs_keys, dataset_keys, load_next_obs - ): + def load_dataset_in_memory(self, demo_list, hdf5_file, obs_keys, dataset_keys, load_next_obs): """ Loads the hdf5 dataset into memory, preserving the structure of the file. Note that this differs from `self.getitem_cache`, which, if active, actually caches the outputs of the @@ -394,39 +362,26 @@ def load_dataset_in_memory( for ep in LogUtils.custom_tqdm(demo_list): all_data[ep] = {} all_data[ep]["attrs"] = {} - all_data[ep]["attrs"]["num_samples"] = hdf5_file[ - "data/{}".format(ep) - ].attrs["num_samples"] + all_data[ep]["attrs"]["num_samples"] = hdf5_file["data/{}".format(ep)].attrs["num_samples"] # get obs - all_data[ep]["obs"] = { - k: hdf5_file["data/{}/obs/{}".format(ep, k)][()] for k in obs_keys - } + all_data[ep]["obs"] = {k: hdf5_file["data/{}/obs/{}".format(ep, k)][()] for k in obs_keys} if load_next_obs: - all_data[ep]["next_obs"] = { - k: hdf5_file["data/{}/next_obs/{}".format(ep, k)][()] - for k in obs_keys - } + all_data[ep]["next_obs"] = {k: hdf5_file["data/{}/next_obs/{}".format(ep, k)][()] for k in obs_keys} # get other dataset keys for k in dataset_keys: if k in hdf5_file["data/{}".format(ep)]: - all_data[ep][k] = hdf5_file["data/{}/{}".format(ep, k)][()].astype( - "float32" - ) + all_data[ep][k] = hdf5_file["data/{}/{}".format(ep, k)][()].astype('float32') else: - all_data[ep][k] = np.zeros( - (all_data[ep]["attrs"]["num_samples"], 1), dtype=np.float32 - ) + all_data[ep][k] = np.zeros((all_data[ep]["attrs"]["num_samples"], 1), dtype=np.float32) if "model_file" in hdf5_file["data/{}".format(ep)].attrs: - all_data[ep]["attrs"]["model_file"] = hdf5_file[ - "data/{}".format(ep) - ].attrs["model_file"] + all_data[ep]["attrs"]["model_file"] = hdf5_file["data/{}".format(ep)].attrs["model_file"] return all_data def normalize_obs(self): """ - Computes a dataset-wide mean and standard deviation for the observations + Computes a dataset-wide mean and standard deviation for the observations (per dimension and per obs key) and returns it. """ def _calc_helper(hdf5_key): @@ -479,20 +434,20 @@ def get_dataset_for_ep(self, ep, key): """ # check if this key should be in memory - key_should_be_in_memory = self.hdf5_cache_mode in ["all", "low_dim"] + key_should_be_in_memory = (self.hdf5_cache_mode in ["all", "low_dim"]) if key_should_be_in_memory: # if key is an observation, it may not be in memory - if "/" in key: - key1, key2 = key.split("/") - assert key1 in ["obs", "next_obs"] + if '/' in key: + key1, key2 = key.split('/') + assert(key1 in ['obs', 'next_obs']) if key2 not in self.obs_keys_in_memory: key_should_be_in_memory = False if key_should_be_in_memory: # read cache - if "/" in key: - key1, key2 = key.split("/") - assert key1 in ["obs", "next_obs"] + if '/' in key: + key1, key2 = key.split('/') + assert(key1 in ['obs', 'next_obs']) ret = self.hdf5_cache[ep][key1][key2] else: ret = self.hdf5_cache[ep][key] @@ -531,9 +486,8 @@ def get_item(self, index): demo_id, index_in_demo=index_in_demo, keys=self.dataset_keys, - num_frames_to_stack=self.n_frame_stack - - 1, # note: need to decrement self.n_frame_stack by one - seq_length=self.seq_length, + num_frames_to_stack=self.n_frame_stack - 1, # note: need to decrement self.n_frame_stack by one + seq_length=self.seq_length ) # determine goal index @@ -547,7 +501,7 @@ def get_item(self, index): keys=self.obs_keys, num_frames_to_stack=self.n_frame_stack - 1, seq_length=self.seq_length, - prefix="obs", + prefix="obs" ) if self.load_next_obs: @@ -557,7 +511,7 @@ def get_item(self, index): keys=self.obs_keys, num_frames_to_stack=self.n_frame_stack - 1, seq_length=self.seq_length, - prefix="next_obs", + prefix="next_obs" ) if goal_index is not None: @@ -569,9 +523,7 @@ def get_item(self, index): seq_length=1, prefix="next_obs", ) - meta["goal_obs"] = { - k: goal[k][0] for k in goal - } # remove sequence dimension for goal + meta["goal_obs"] = {k: goal[k][0] for k in goal} # remove sequence dimension for goal return meta @@ -611,12 +563,8 @@ def get_sequence_from_demo( seq_end_index = min(demo_length, index_in_demo + seq_length) # determine sequence padding - seq_begin_pad = max( - 0, num_frames_to_stack - index_in_demo - ) # pad for frame stacking - seq_end_pad = max( - 0, index_in_demo + seq_length - demo_length - ) # pad for sequence length + seq_begin_pad = max(0, num_frames_to_stack - index_in_demo) # pad for frame stacking + seq_end_pad = max(0, index_in_demo + seq_length - demo_length) # pad for sequence length # make sure we are not padding if specified. if not self.pad_frame_stack: @@ -629,38 +577,18 @@ def get_sequence_from_demo( for k in keys: t = time.time() data = self.get_dataset_for_ep(demo_id, k) - true_end_index = ( - seq_begin_index + 1 - if k.split("/")[-1] in dont_load_fut - else seq_end_index - ) - seq[k] = data[seq_begin_index:true_end_index] + true_end_index = seq_begin_index + 1 if k.split("/")[-1] in dont_load_fut else seq_end_index + seq[k] = data[seq_begin_index: true_end_index] for k in seq: if k.split("/")[-1] not in dont_load_fut: - seq[k] = TensorUtils.pad_sequence( - seq[k], padding=(seq_begin_pad, seq_end_pad), pad_same=True - ) - pad_mask = np.array( - [0] * seq_begin_pad - + [1] * (seq_end_index - seq_begin_index) - + [0] * seq_end_pad - ) + seq[k] = TensorUtils.pad_sequence(seq[k], padding=(seq_begin_pad, seq_end_pad), pad_same=True) + pad_mask = np.array([0] * seq_begin_pad + [1] * (seq_end_index - seq_begin_index) + [0] * seq_end_pad) pad_mask = pad_mask[:, None].astype(bool) return seq, pad_mask - def get_obs_sequence_from_demo( - self, - demo_id, - index_in_demo, - keys, - num_frames_to_stack=0, - seq_length=1, - prefix="obs", - dont_load_fut=False, - seq_length_to_load=None, - ): + def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False, seq_length_to_load=None): """ Extract a (sub)sequence of observation items from a demo given the @keys of the items. @@ -681,15 +609,15 @@ def get_obs_sequence_from_demo( obs, pad_mask = self.get_sequence_from_demo( demo_id, index_in_demo=index_in_demo, - keys=tuple("{}/{}".format(prefix, k) for k in keys), + keys=tuple('{}/{}'.format(prefix, k) for k in keys), num_frames_to_stack=num_frames_to_stack, seq_length=seq_length_to_load, - dont_load_fut=dont_load_fut, + dont_load_fut=dont_load_fut ) - obs = {k.split("/")[1]: obs[k] for k in obs} # strip the prefix + obs = {k.split('/')[1]: obs[k] for k in obs} # strip the prefix if self.get_pad_mask: obs["pad_mask"] = pad_mask - + # Interpolate obs # to_interp = [k for k in obs if ObsUtils.key_is_obs_modality(k, "low_dim")] to_interp = ["pad_mask"] @@ -699,18 +627,10 @@ def get_obs_sequence_from_demo( return obs - def get_dataset_sequence_from_demo( - self, - demo_id, - index_in_demo, - keys, - num_frames_to_stack=0, - seq_length=1, - seq_length_to_load=None, - ): + def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, seq_length_to_load=None): """ Extract a (sub)sequence of dataset items from a demo given the @keys of the items (e.g., states, actions). - + Args: demo_id (str): id of the demo, e.g., demo_0 index_in_demo (int): beginning index of the sequence wrt the demo @@ -733,7 +653,7 @@ def get_dataset_sequence_from_demo( ) if self.get_pad_mask: data["pad_mask"] = pad_mask - + # interpolate actions to_interp = [k for k in data] # t = time.time() @@ -761,12 +681,14 @@ def get_trajectory_at_index(self, index): demo_id, index_in_demo=0, keys=self.dataset_keys, - num_frames_to_stack=self.n_frame_stack - - 1, # note: need to decrement self.n_frame_stack by one - seq_length=demo_length, + num_frames_to_stack=self.n_frame_stack - 1, # note: need to decrement self.n_frame_stack by one + seq_length=demo_length ) meta["obs"] = self.get_obs_sequence_from_demo( - demo_id, index_in_demo=0, keys=self.obs_keys, seq_length=demo_length + demo_id, + index_in_demo=0, + keys=self.obs_keys, + seq_length=demo_length ) if self.load_next_obs: meta["next_obs"] = self.get_obs_sequence_from_demo( @@ -774,7 +696,7 @@ def get_trajectory_at_index(self, index): index_in_demo=0, keys=self.obs_keys, seq_length=demo_length, - prefix="next_obs", + prefix="next_obs" ) meta["ep"] = demo_id diff --git a/robomimic/utils/env_utils.py b/robomimic/utils/env_utils.py index f7b511e6..b656ea64 100644 --- a/robomimic/utils/env_utils.py +++ b/robomimic/utils/env_utils.py @@ -3,7 +3,6 @@ wrappers provided by the repository, and with environment metadata saved in dataset files. """ - from copy import deepcopy import robomimic.envs.env_base as EB from robomimic.utils.log_utils import log_warning @@ -35,15 +34,12 @@ def get_env_class(env_meta=None, env_type=None, env=None): env_type = get_env_type(env_meta=env_meta, env_type=env_type, env=env) if env_type == EB.EnvType.ROBOSUITE_TYPE: from robomimic.envs.env_robosuite import EnvRobosuite - return EnvRobosuite elif env_type == EB.EnvType.GYM_TYPE: from robomimic.envs.env_gym import EnvGym - return EnvGym elif env_type == EB.EnvType.IG_MOMART_TYPE: from robomimic.envs.env_ig_momart import EnvGibsonMOMART - return EnvGibsonMOMART raise Exception("code should never reach this point") @@ -97,7 +93,7 @@ def check_env_type(type_to_check, env_meta=None, env_type=None, env=None): env (instance of EB.EnvBase): environment instance """ env_type = get_env_type(env_meta=env_meta, env_type=env_type, env=env) - return env_type == type_to_check + return (env_type == type_to_check) def check_env_version(env, env_meta): @@ -119,13 +115,13 @@ def check_env_version(env, env_meta): if env_meta_version is None: log_warning( - "No environment version found in dataset!" - "\nCannot verify if dataset and installed environment versions match" + "No environment version found in dataset!"\ + "\nCannot verify if dataset and installed environment versions match"\ ) elif env_system_version != env_meta_version: log_warning( - "Dataset and installed environment version mismatch!" - "\nDataset environment version: {meta}" + "Dataset and installed environment version mismatch!"\ + "\nDataset environment version: {meta}"\ "\nInstalled environment version: {sys}".format( sys=env_system_version, meta=env_meta_version, @@ -139,21 +135,16 @@ def is_robosuite_env(env_meta=None, env_type=None, env=None): either env_meta, env_type, or env. """ return False - return check_env_type( - type_to_check=EB.EnvType.ROBOSUITE_TYPE, - env_meta=env_meta, - env_type=env_type, - env=env, - ) + return check_env_type(type_to_check=EB.EnvType.ROBOSUITE_TYPE, env_meta=env_meta, env_type=env_type, env=env) def create_env( env_type, - env_name, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, + env_name, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, **kwargs, ): """ @@ -182,9 +173,9 @@ def create_env( # note: pass @postprocess_visual_obs True, to make sure images are processed for network inputs env_class = get_env_class(env_type=env_type) env = env_class( - env_name=env_name, - render=render, - render_offscreen=render_offscreen, + env_name=env_name, + render=render, + render_offscreen=render_offscreen, use_image_obs=use_image_obs, use_depth_obs=use_depth_obs, postprocess_visual_obs=True, @@ -197,11 +188,11 @@ def create_env( def create_env_from_metadata( env_meta, - env_name=None, - render=False, - render_offscreen=False, - use_image_obs=False, - use_depth_obs=False, + env_name=None, + render=False, + render_offscreen=False, + use_image_obs=False, + use_depth_obs=False, ): """ Create environment. @@ -238,11 +229,11 @@ def create_env_from_metadata( env = create_env( env_type=env_type, - env_name=env_name, - render=render, - render_offscreen=render_offscreen, - use_image_obs=use_image_obs, - use_depth_obs=use_depth_obs, + env_name=env_name, + render=render, + render_offscreen=render_offscreen, + use_image_obs=use_image_obs, + use_depth_obs=use_depth_obs, **env_kwargs, ) check_env_version(env, env_meta) @@ -251,15 +242,15 @@ def create_env_from_metadata( def create_env_for_data_processing( env_meta, - camera_names, - camera_height, - camera_width, + camera_names, + camera_height, + camera_width, reward_shaping, env_class=None, - render=None, - render_offscreen=None, - use_image_obs=None, - use_depth_obs=None, + render=None, + render_offscreen=None, + use_image_obs=None, + use_depth_obs=None, ): """ Creates environment for processing dataset observations and rewards. @@ -308,14 +299,14 @@ def create_env_for_data_processing( env_kwargs.pop("use_depth_obs", None) env = env_class.create_for_data_processing( - env_name=env_name, - camera_names=camera_names, - camera_height=camera_height, - camera_width=camera_width, - reward_shaping=reward_shaping, - render=render, - render_offscreen=render_offscreen, - use_image_obs=use_image_obs, + env_name=env_name, + camera_names=camera_names, + camera_height=camera_height, + camera_width=camera_width, + reward_shaping=reward_shaping, + render=render, + render_offscreen=render_offscreen, + use_image_obs=use_image_obs, use_depth_obs=use_depth_obs, **env_kwargs, ) @@ -330,20 +321,13 @@ def set_env_specific_obs_processing(env_meta=None, env_type=None, env=None): processing normalizes and clips all values to [0, 1]. """ if is_robosuite_env(env_meta=env_meta, env_type=env_type, env=env): - from robomimic.utils.obs_utils import ( - DepthModality, - process_frame, - unprocess_frame, - ) - - DepthModality.set_obs_processor( - processor=(lambda obs: process_frame(frame=obs, channel_dim=1, scale=None)) - ) - DepthModality.set_obs_unprocessor( - unprocessor=( - lambda obs: unprocess_frame(frame=obs, channel_dim=1, scale=None) - ) - ) + from robomimic.utils.obs_utils import DepthModality, process_frame, unprocess_frame + DepthModality.set_obs_processor(processor=( + lambda obs: process_frame(frame=obs, channel_dim=1, scale=None) + )) + DepthModality.set_obs_unprocessor(unprocessor=( + lambda obs: unprocess_frame(frame=obs, channel_dim=1, scale=None) + )) def wrap_env_from_config(env, config): @@ -353,7 +337,6 @@ def wrap_env_from_config(env, config): """ if ("frame_stack" in config.train) and (config.train.frame_stack > 1): from robomimic.envs.wrappers import FrameStackWrapper - env = FrameStackWrapper(env, num_frames=config.train.frame_stack) return env diff --git a/robomimic/utils/file_utils.py b/robomimic/utils/file_utils.py index 372969dd..65519c5a 100644 --- a/robomimic/utils/file_utils.py +++ b/robomimic/utils/file_utils.py @@ -2,7 +2,6 @@ A collection of utility functions for working with files, such as reading metadata from demonstration datasets, loading model checkpoints, or downloading dataset files. """ - import os import h5py import json @@ -37,7 +36,7 @@ def create_hdf5_filter_key(hdf5_path, demo_keys, key_name): Args: hdf5_path (str): path to hdf5 file demo_keys ([str]): list of demonstration keys which should - correspond to this filter key. For example, ["demo_0", + correspond to this filter key. For example, ["demo_0", "demo_1"]. key_name (str): name of filter key to create @@ -45,7 +44,7 @@ def create_hdf5_filter_key(hdf5_path, demo_keys, key_name): ep_lengths ([int]): list of episode lengths that corresponds to each demonstration in the new filter key """ - f = h5py.File(hdf5_path, "a") + f = h5py.File(hdf5_path, "a") demos = sorted(list(f["data"].keys())) # collect episode lengths for the keys of interest @@ -59,7 +58,7 @@ def create_hdf5_filter_key(hdf5_path, demo_keys, key_name): k = "mask/{}".format(key_name) if k in f: del f[k] - f[k] = np.array(demo_keys, dtype="S") + f[k] = np.array(demo_keys, dtype='S') f.close() return ep_lengths @@ -75,13 +74,11 @@ def get_demos_for_filter_key(hdf5_path, filter_key): Returns: demo_keys ([str]): list of demonstration keys that - correspond to this filter key. For example, ["demo_0", + correspond to this filter key. For example, ["demo_0", "demo_1"]. """ f = h5py.File(hdf5_path, "r") - demo_keys = [ - elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)][:]) - ] + demo_keys = [elem.decode("utf-8") for elem in np.array(f["mask/{}".format(filter_key)][:])] f.close() return demo_keys @@ -95,7 +92,7 @@ def get_env_metadata_from_dataset(dataset_path, set_env_specific_obs_processors= set_env_specific_obs_processors (bool): environment might have custom rules for how to process observations - if this flag is true, make sure ObsUtils will use these custom settings. This - is a good place to do this operation to make sure it happens before loading data, running a + is a good place to do this operation to make sure it happens before loading data, running a trained model, etc. Returns: @@ -115,9 +112,7 @@ def get_env_metadata_from_dataset(dataset_path, set_env_specific_obs_processors= return env_meta -def get_shape_metadata_from_dataset( - dataset_path, all_obs_keys=None, verbose=False, ac_key="actions" -): +def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=False, ac_key="actions"): """ Retrieves shape metadata from dataset. @@ -170,10 +165,10 @@ def get_shape_metadata_from_dataset( f.close() - shape_meta["all_shapes"] = all_shapes - shape_meta["all_obs_keys"] = all_obs_keys - shape_meta["use_images"] = ObsUtils.has_modality("rgb", all_obs_keys) - shape_meta["use_depths"] = ObsUtils.has_modality("depth", all_obs_keys) + shape_meta['all_shapes'] = all_shapes + shape_meta['all_obs_keys'] = all_obs_keys + shape_meta['use_images'] = ObsUtils.has_modality("rgb", all_obs_keys) + shape_meta['use_depths'] = ObsUtils.has_modality("depth", all_obs_keys) return shape_meta @@ -181,7 +176,7 @@ def get_shape_metadata_from_dataset( def load_dict_from_checkpoint(ckpt_path): """ Load checkpoint dictionary from a checkpoint file. - + Args: ckpt_path (str): Path to checkpoint file. @@ -287,41 +282,22 @@ def find_obs_dicts_recursively(dic): } if "visual_feature_dimension" in old_encoder_cfg: - rgb_encoder_cfg["core_kwargs"]["feature_dimension"] = old_encoder_cfg[ - "visual_feature_dimension" - ] + rgb_encoder_cfg["core_kwargs"]["feature_dimension"] = old_encoder_cfg["visual_feature_dimension"] if "visual_core" in old_encoder_cfg: - rgb_encoder_cfg["core_kwargs"]["backbone_class"] = old_encoder_cfg[ - "visual_core" - ] + rgb_encoder_cfg["core_kwargs"]["backbone_class"] = old_encoder_cfg["visual_core"] for kwarg in ("pretrained", "input_coord_conv"): - if ( - "visual_core_kwargs" in old_encoder_cfg - and kwarg in old_encoder_cfg["visual_core_kwargs"] - ): - rgb_encoder_cfg["core_kwargs"]["backbone_kwargs"][kwarg] = ( - old_encoder_cfg["visual_core_kwargs"][kwarg] - ) + if "visual_core_kwargs" in old_encoder_cfg and kwarg in old_encoder_cfg["visual_core_kwargs"]: + rgb_encoder_cfg["core_kwargs"]["backbone_kwargs"][kwarg] = old_encoder_cfg["visual_core_kwargs"][kwarg] # Optionally add pooling info too if old_encoder_cfg.get("use_spatial_softmax", True): rgb_encoder_cfg["core_kwargs"]["pool_class"] = "SpatialSoftmax" - for kwarg in ( - "num_kp", - "learnable_temperature", - "temperature", - "noise_std", - ): - if ( - "spatial_softmax_kwargs" in old_encoder_cfg - and kwarg in old_encoder_cfg["spatial_softmax_kwargs"] - ): - rgb_encoder_cfg["core_kwargs"]["pool_kwargs"][kwarg] = ( - old_encoder_cfg["spatial_softmax_kwargs"][kwarg] - ) + for kwarg in ("num_kp", "learnable_temperature", "temperature", "noise_std"): + if "spatial_softmax_kwargs" in old_encoder_cfg and kwarg in old_encoder_cfg["spatial_softmax_kwargs"]: + rgb_encoder_cfg["core_kwargs"]["pool_kwargs"][kwarg] = old_encoder_cfg["spatial_softmax_kwargs"][kwarg] # Update obs randomizer as well for kwarg in ("obs_randomizer_class", "obs_randomizer_kwargs"): @@ -343,9 +319,7 @@ def find_obs_dicts_recursively(dic): } -def config_from_checkpoint( - algo_name=None, ckpt_path=None, ckpt_dict=None, verbose=False -): +def config_from_checkpoint(algo_name=None, ckpt_path=None, ckpt_dict=None, verbose=False): """ Helper function to restore config from a checkpoint file or loaded model dictionary. @@ -369,7 +343,7 @@ def config_from_checkpoint( algo_name, _ = algo_name_from_checkpoint(ckpt_dict=ckpt_dict) # restore config from loaded model dictionary - config_dict = json.loads(ckpt_dict["config"]) + config_dict = json.loads(ckpt_dict['config']) update_config(cfg=config_dict) if verbose: @@ -410,9 +384,7 @@ def policy_from_checkpoint(device=None, ckpt_path=None, ckpt_dict=None, verbose= # algo name and config from model dict algo_name, _ = algo_name_from_checkpoint(ckpt_dict=ckpt_dict) - config, _ = config_from_checkpoint( - algo_name=algo_name, ckpt_dict=ckpt_dict, verbose=verbose - ) + config, _ = config_from_checkpoint(algo_name=algo_name, ckpt_dict=ckpt_dict, verbose=verbose) # read config to set up metadata for observation modalities (e.g. detecting rgb observations) ObsUtils.initialize_obs_utils_with_config(config) @@ -449,14 +421,7 @@ def policy_from_checkpoint(device=None, ckpt_path=None, ckpt_dict=None, verbose= return model, ckpt_dict -def env_from_checkpoint( - ckpt_path=None, - ckpt_dict=None, - env_name=None, - render=False, - render_offscreen=False, - verbose=False, -): +def env_from_checkpoint(ckpt_path=None, ckpt_dict=None, env_name=None, render=False, render_offscreen=False, verbose=False): """ Creates an environment using the metadata saved in a checkpoint. @@ -486,19 +451,15 @@ def env_from_checkpoint( # create env from saved metadata env = EnvUtils.create_env_from_metadata( - env_meta=env_meta, - env_name=env_name, - render=render, + env_meta=env_meta, + env_name=env_name, + render=render, render_offscreen=render_offscreen, use_image_obs=shape_meta.get("use_images", False), use_depth_obs=shape_meta.get("use_depths", False), ) - config, _ = config_from_checkpoint( - algo_name=ckpt_dict["algo_name"], ckpt_dict=ckpt_dict, verbose=False - ) - env = EnvUtils.wrap_env_from_config( - env, config=config - ) # apply environment wrapper, if applicable + config, _ = config_from_checkpoint(algo_name=ckpt_dict["algo_name"], ckpt_dict=ckpt_dict, verbose=False) + env = EnvUtils.wrap_env_from_config(env, config=config) # apply environment wrapper, if applicable if verbose: print("============= Loaded Environment =============") print(env) @@ -524,7 +485,7 @@ def url_is_alive(url): is_alive (bool): True if url is reachable, False otherwise """ request = urllib.request.Request(url) - request.get_method = lambda: "HEAD" + request.get_method = lambda: 'HEAD' try: urllib.request.urlopen(request) @@ -560,13 +521,9 @@ def download_url(url, download_dir, check_overwrite=True): # If we're checking overwrite and the path already exists, # we ask the user to verify that they want to overwrite the file if check_overwrite and os.path.exists(file_to_write): - user_response = input( - f"Warning: file {file_to_write} already exists. Overwrite? y/n\n" - ) - assert user_response.lower() in { - "yes", - "y", - }, f"Did not receive confirmation. Aborting download." + user_response = input(f"Warning: file {file_to_write} already exists. Overwrite? y/n\n") + assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." - with DownloadProgressBar(unit="B", unit_scale=True, miniters=1, desc=fname) as t: + with DownloadProgressBar(unit='B', unit_scale=True, + miniters=1, desc=fname) as t: urllib.request.urlretrieve(url, filename=file_to_write, reporthook=t.update_to) diff --git a/robomimic/utils/hyperparam_utils.py b/robomimic/utils/hyperparam_utils.py index 6c54a4ac..267536d3 100644 --- a/robomimic/utils/hyperparam_utils.py +++ b/robomimic/utils/hyperparam_utils.py @@ -1,7 +1,6 @@ """ A collection of utility functions and classes for generating config jsons for hyperparameter sweeps. """ - import argparse import os import json @@ -17,14 +16,7 @@ class ConfigGenerator(object): Useful class to keep track of hyperparameters to sweep, and to generate the json configs for each experiment run. """ - - def __init__( - self, - base_config_file, - wandb_proj_name="debug", - script_file=None, - generated_config_dir=None, - ): + def __init__(self, base_config_file, wandb_proj_name="debug", script_file=None, generated_config_dir=None): """ Args: base_config_file (str): path to a base json config to use as a starting point @@ -40,7 +32,7 @@ def __init__( self.generated_config_dir = generated_config_dir assert script_file is None or isinstance(script_file, str) if script_file is None: - self.script_file = os.path.join("~", "tmp/tmpp.sh") + self.script_file = os.path.join('~', 'tmp/tmpp.sh') else: self.script_file = script_file self.script_file = os.path.expanduser(self.script_file) @@ -71,10 +63,10 @@ def add_param(self, key, name, group, values, value_names=None): if value_names is not None: assert len(values) == len(value_names) self.parameters[key] = argparse.Namespace( - key=key, - name=name, - group=group, - values=values, + key=key, + name=name, + group=group, + values=values, value_names=value_names, ) @@ -115,15 +107,13 @@ def _name_for_experiment(self, base_name, parameter_values, parameter_value_name val_str = parameter_value_names[k] else: val_str = parameter_values[k] - if isinstance(parameter_values[k], list) or isinstance( - parameter_values[k], tuple - ): + if isinstance(parameter_values[k], list) or isinstance(parameter_values[k], tuple): # convert list to string to avoid weird spaces and naming problems val_str = "_".join([str(x) for x in parameter_values[k]]) val_str = str(val_str) - name += "_{}".format(self.parameters[k].name) + name += '_{}'.format(self.parameters[k].name) if len(val_str) > 0: - name += "_{}".format(val_str) + name += '_{}'.format(val_str) return name def _get_parameter_ranges(self): @@ -133,17 +123,17 @@ def _get_parameter_ranges(self): Returns: parameter_ranges (dict): dictionary that maps the parameter to a list - of all values it should take for each generated config. The length + of all values it should take for each generated config. The length of the list will be the total number of configs that will be generated from this scan. parameter_names (dict): dictionary that maps the parameter to a list of all name strings that should contribute to each invididual - experiment's name. The length of the list will be the total + experiment's name. The length of the list will be the total number of configs that will be generated from this scan. """ - # mapping from group id to list of indices to grab from each parameter's list + # mapping from group id to list of indices to grab from each parameter's list # of values in the parameter group parameter_group_indices = OrderedDict() for k in self.parameters: @@ -153,22 +143,21 @@ def _get_parameter_ranges(self): if group_id not in parameter_group_indices: parameter_group_indices[group_id] = list(range(num_param_values)) else: - assert ( - len(parameter_group_indices[group_id]) == num_param_values - ), "error: inconsistent number of parameter values in group with id {}".format( - group_id - ) + assert len(parameter_group_indices[group_id]) == num_param_values, \ + "error: inconsistent number of parameter values in group with id {}".format(group_id) keys = list(parameter_group_indices.keys()) inds = list(parameter_group_indices.values()) - new_parameter_group_indices = OrderedDict({k: [] for k in keys}) + new_parameter_group_indices = OrderedDict( + { k : [] for k in keys } + ) # get all combinations of the different parameter group indices # and then use these indices to determine the new parameter ranges # per member of each parameter group. # # e.g. with two parameter groups, one with two values, and another with three values # we have [0, 1] x [0, 1, 2] = [0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2] - # so the corresponding parameter group indices are [0, 0, 0, 1, 1, 1] and + # so the corresponding parameter group indices are [0, 0, 0, 1, 1, 1] and # [0, 1, 2, 0, 1, 2], and all parameters in each parameter group are indexed # together using these indices, to get each parameter range. for comb in itertools.product(*inds): @@ -197,9 +186,7 @@ def _get_parameter_ranges(self): first_key = list(parameter_ranges.keys())[0] num_settings = len(parameter_ranges[first_key]) for k in parameter_ranges: - assert ( - len(parameter_ranges[k]) == num_settings - ), "inconsistent number of values" + assert len(parameter_ranges[k]) == num_settings, "inconsistent number of values" return parameter_ranges, parameter_names @@ -224,7 +211,7 @@ def _generate_jsons(self): base_config = load_json(self.base_config_file, verbose=False) # base exp name from this base config - base_exp_name = base_config["experiment"]["name"] + base_exp_name = base_config['experiment']['name'] # use base json to determine the parameter ranges parameter_ranges, parameter_names = self._get_parameter_ranges() @@ -238,7 +225,7 @@ def _generate_jsons(self): for i in range(num_settings): # the specific parameter setting for this experiment - setting = {k: parameter_ranges[k][i] for k in parameter_ranges} + setting = { k : parameter_ranges[k][i] for k in parameter_ranges } maybe_parameter_names = OrderedDict() for k in parameter_names: maybe_parameter_names[k] = None @@ -247,14 +234,14 @@ def _generate_jsons(self): # experiment name from setting exp_name = self._name_for_experiment( - base_name=base_exp_name, - parameter_values=setting, + base_name=base_exp_name, + parameter_values=setting, parameter_value_names=maybe_parameter_names, ) # copy old json, but override name, and parameter values json_dict = deepcopy(base_config) - json_dict["experiment"]["name"] = exp_name + json_dict['experiment']['name'] = exp_name for k in parameter_ranges: set_value_for_key(json_dict, k, v=parameter_ranges[k][i]) @@ -276,7 +263,7 @@ def _generate_jsons(self): value_name = maybe_parameter_names[k] else: value_name = setting[k] - + json_dict["meta"]["hp_keys"].append(key_name) json_dict["meta"]["hp_values"].append(value_name) @@ -294,12 +281,12 @@ def _script_from_jsons(self, json_paths): Generates a bash script to run the experiments that correspond to the input jsons. """ - with open(self.script_file, "w") as f: + with open(self.script_file, 'w') as f: f.write("#!/bin/bash\n\n") for path in json_paths: # write python command to file cmd = "python train.py --config {}\n".format(path) - + print() print(cmd) f.write(cmd) @@ -316,12 +303,12 @@ def load_json(json_file, verbose=True): Returns: config (dict): json dictionary """ - with open(json_file, "r") as f: + with open(json_file, 'r') as f: config = json.load(f) if verbose: - print("loading external config: =================") + print('loading external config: =================') print(json.dumps(config, indent=4)) - print("==========================================") + print('==========================================') return config @@ -333,7 +320,7 @@ def save_json(config, json_file): config (dict): dictionary to save json_file (str): path to json file to write """ - with open(json_file, "w") as f: + with open(json_file, 'w') as f: # preserve original key ordering json.dump(config, f, sort_keys=False, indent=4) @@ -353,7 +340,7 @@ def get_value_for_key(dic, k): val: the nested dictionary value for the provided key """ val = dic - subkeys = re.split("/|\.", k) + subkeys = re.split('/|\.', k) for s in subkeys[:-1]: val = val[s] return val[subkeys[-1]] @@ -371,7 +358,7 @@ def set_value_for_key(dic, k, v): v: the value to set at the provided key """ val = dic - subkeys = re.split("/|\.", k) # k.split('/') + subkeys = re.split('/|\.', k) #k.split('/') for s in subkeys[:-1]: val = val[s] val[subkeys[-1]] = v diff --git a/robomimic/utils/log_utils.py b/robomimic/utils/log_utils.py index 7cc32b49..1f8aa6ae 100644 --- a/robomimic/utils/log_utils.py +++ b/robomimic/utils/log_utils.py @@ -2,7 +2,6 @@ This file contains utility classes and functions for logging to stdout, stderr, and to tensorboard. """ - import os import sys import numpy as np @@ -32,10 +31,9 @@ class PrintLogger(object): """ This class redirects print statements to both console and a file. """ - def __init__(self, log_file): self.terminal = sys.stdout - print("STDOUT will be forked to %s" % log_file) + print('STDOUT will be forked to %s' % log_file) self.log_file = open(log_file, "a") def write(self, message): @@ -54,7 +52,6 @@ class DataLogger(object): """ Logging class to log metrics to tensorboard and/or retrieve running statistics about logged data. """ - def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): """ Args: @@ -63,29 +60,25 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): """ self._tb_logger = None self._wandb_logger = None - self._data = dict() # store all the scalar data logged so far + self._data = dict() # store all the scalar data logged so far if log_tb: from tensorboardX import SummaryWriter - - self._tb_logger = SummaryWriter(os.path.join(log_dir, "tb")) + self._tb_logger = SummaryWriter(os.path.join(log_dir, 'tb')) + if log_wandb: import wandb import robomimic.macros as Macros - + # set up wandb api key if specified in macros if Macros.WANDB_API_KEY is not None: os.environ["WANDB_API_KEY"] = Macros.WANDB_API_KEY - assert Macros.WANDB_ENTITY is not None, ( - "WANDB_ENTITY macro is set to None." - "\nSet this macro in {base_path}/macros_private.py" - "\nIf this file does not exist, first run python {base_path}/scripts/setup_macros.py".format( - base_path=robomimic.__path__[0] - ) - ) - + assert Macros.WANDB_ENTITY is not None, "WANDB_ENTITY macro is set to None." \ + "\nSet this macro in {base_path}/macros_private.py" \ + "\nIf this file does not exist, first run python {base_path}/scripts/setup_macros.py".format(base_path=robomimic.__path__[0]) + # attempt to set up wandb 10 times. If unsuccessful after these trials, don't use wandb num_attempts = 10 for attempt in range(num_attempts): @@ -102,12 +95,8 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): ) # set up info for identifying experiment - wandb_config = { - k: v - for (k, v) in config.meta.items() - if k not in ["hp_keys", "hp_values"] - } - for k, v in zip(config.meta["hp_keys"], config.meta["hp_values"]): + wandb_config = {k: v for (k, v) in config.meta.items() if k not in ["hp_keys", "hp_values"]} + for (k, v) in zip(config.meta["hp_keys"], config.meta["hp_values"]): wandb_config[k] = v if "algo" not in wandb_config: wandb_config["algo"] = config.algo_name @@ -115,15 +104,11 @@ def __init__(self, log_dir, config, log_tb=True, log_wandb=False, uid=None): break except Exception as e: - log_warning( - "wandb initialization error (attempt #{}): {}".format( - attempt + 1, e - ) - ) + log_warning("wandb initialization error (attempt #{}): {}".format(attempt + 1, e)) self._wandb_logger = None time.sleep(30) - def record(self, k, v, epoch, data_type="scalar", log_stats=False): + def record(self, k, v, epoch, data_type='scalar', log_stats=False): """ Record data with logger. Args: @@ -134,42 +119,36 @@ def record(self, k, v, epoch, data_type="scalar", log_stats=False): log_stats (bool): whether to store the mean/max/min/std for all data logged so far with key k """ - assert data_type in ["scalar", "image"] + assert data_type in ['scalar', 'image'] - if data_type == "scalar": + if data_type == 'scalar': # maybe update internal cache if logging stats for this key - if ( - log_stats or k in self._data - ): # any key that we're logging or previously logged + if log_stats or k in self._data: # any key that we're logging or previously logged if k not in self._data: self._data[k] = [] self._data[k].append(v) # maybe log to tensorboard if self._tb_logger is not None: - if data_type == "scalar": + if data_type == 'scalar': self._tb_logger.add_scalar(k, v, epoch) if log_stats: stats = self.get_stats(k) - for stat_k, stat_v in stats.items(): - stat_k_name = "{}-{}".format(k, stat_k) + for (stat_k, stat_v) in stats.items(): + stat_k_name = '{}-{}'.format(k, stat_k) self._tb_logger.add_scalar(stat_k_name, stat_v, epoch) - elif data_type == "image": - self._tb_logger.add_images( - k, img_tensor=v, global_step=epoch, dataformats="NHWC" - ) + elif data_type == 'image': + self._tb_logger.add_images(k, img_tensor=v, global_step=epoch, dataformats="NHWC") if self._wandb_logger is not None: try: - if data_type == "scalar": + if data_type == 'scalar': self._wandb_logger.log({k: v}, step=epoch) if log_stats: stats = self.get_stats(k) - for stat_k, stat_v in stats.items(): - self._wandb_logger.log( - {"{}/{}".format(k, stat_k): stat_v}, step=epoch - ) - elif data_type == "image": + for (stat_k, stat_v) in stats.items(): + self._wandb_logger.log({"{}/{}".format(k, stat_k): stat_v}, step=epoch) + elif data_type == 'image': raise NotImplementedError except Exception as e: log_warning("wandb logging: {}".format(e)) @@ -183,10 +162,10 @@ def get_stats(self, k): stats (dict): dictionary of statistics """ stats = dict() - stats["mean"] = np.mean(self._data[k]) - stats["std"] = np.std(self._data[k]) - stats["min"] = np.min(self._data[k]) - stats["max"] = np.max(self._data[k]) + stats['mean'] = np.mean(self._data[k]) + stats['std'] = np.std(self._data[k]) + stats['min'] = np.min(self._data[k]) + stats['max'] = np.max(self._data[k]) return stats def close(self): @@ -206,7 +185,6 @@ class custom_tqdm(tqdm): By default tqdm writes to stderr. Instead, we change it to write to stdout. """ - def __init__(self, *args, **kwargs): assert "file" not in kwargs super(custom_tqdm, self).__init__(*args, file=sys.stdout, **kwargs) @@ -242,9 +220,7 @@ def log_warning(message, color="yellow", print_now=True): addition to adding it to the global warning buffer """ global WARNINGS_BUFFER - buffer_message = colored( - "ROBOMIMIC WARNING(\n{}\n)".format(textwrap.indent(message, " ")), color - ) + buffer_message = colored("ROBOMIMIC WARNING(\n{}\n)".format(textwrap.indent(message, " ")), color) WARNINGS_BUFFER.append(buffer_message) if print_now: print(buffer_message) diff --git a/robomimic/utils/loss_utils.py b/robomimic/utils/loss_utils.py index 511d4909..b3f5bf22 100644 --- a/robomimic/utils/loss_utils.py +++ b/robomimic/utils/loss_utils.py @@ -25,7 +25,7 @@ def cosine_loss(preds, labels): def KLD_0_1_loss(mu, logvar): """ - KL divergence loss. Computes D_KL( N(mu, sigma) || N(0, 1) ). Note that + KL divergence loss. Computes D_KL( N(mu, sigma) || N(0, 1) ). Note that this function averages across the batch dimension, but sums across dimension. Args: @@ -36,12 +36,12 @@ def KLD_0_1_loss(mu, logvar): loss (torch.Tensor): KL divergence loss between the input gaussian distribution and N(0, 1) """ - return -0.5 * (1.0 + logvar - mu.pow(2) - logvar.exp()).sum(dim=1).mean() + return -0.5 * (1. + logvar - mu.pow(2) - logvar.exp()).sum(dim=1).mean() def KLD_gaussian_loss(mu_1, logvar_1, mu_2, logvar_2): """ - KL divergence loss between two Gaussian distributions. This function + KL divergence loss between two Gaussian distributions. This function computes the average loss across the batch. Args: @@ -53,18 +53,11 @@ def KLD_gaussian_loss(mu_1, logvar_1, mu_2, logvar_2): Returns: loss (torch.Tensor): KL divergence loss between the two gaussian distributions """ - return ( - -0.5 - * ( - 1.0 - + logvar_1 - - logvar_2 - - ((mu_2 - mu_1).pow(2) / logvar_2.exp()) - - (logvar_1.exp() / logvar_2.exp()) - ) - .sum(dim=1) - .mean() - ) + return -0.5 * (1. + \ + logvar_1 - logvar_2 \ + - ((mu_2 - mu_1).pow(2) / logvar_2.exp()) \ + - (logvar_1.exp() / logvar_2.exp()) \ + ).sum(dim=1).mean() def log_normal(x, m, v): @@ -89,18 +82,18 @@ def log_normal(x, m, v): def log_normal_mixture(x, m, v, w=None, log_w=None): """ - Log probability of tensor x under a uniform mixture of Gaussians. + Log probability of tensor x under a uniform mixture of Gaussians. Adapted from CS 236 at Stanford. Args: x (torch.Tensor): tensor with shape (B, D) - m (torch.Tensor): means tensor with shape (B, M, D) or (1, M, D), where + m (torch.Tensor): means tensor with shape (B, M, D) or (1, M, D), where M is number of mixture components - v (torch.Tensor): variances tensor with shape (B, M, D) or (1, M, D) where + v (torch.Tensor): variances tensor with shape (B, M, D) or (1, M, D) where M is number of mixture components - w (torch.Tensor): weights tensor - if provided, should be + w (torch.Tensor): weights tensor - if provided, should be shape (B, M) or (1, M) - log_w (torch.Tensor): log-weights tensor - if provided, should be + log_w (torch.Tensor): log-weights tensor - if provided, should be shape (B, M) or (1, M) Returns: @@ -119,10 +112,10 @@ def log_normal_mixture(x, m, v, w=None, log_w=None): log_prob += log_w # then compute log sum_i exp [log(w_i * N(x | m_i, v_i))] # (B, M) -> (B,) - log_prob = log_sum_exp(log_prob, dim=1) + log_prob = log_sum_exp(log_prob , dim=1) else: # (B, M) -> (B,) - log_prob = log_mean_exp(log_prob, dim=1) # mean accounts for uniform weights + log_prob = log_mean_exp(log_prob , dim=1) # mean accounts for uniform weights return log_prob @@ -132,7 +125,7 @@ def log_mean_exp(x, dim): Adapted from CS 236 at Stanford. Args: - x (torch.Tensor): a tensor + x (torch.Tensor): a tensor dim (int): dimension along which mean is computed Returns: @@ -147,7 +140,7 @@ def log_sum_exp(x, dim=0): Adapted from CS 236 at Stanford. Args: - x (torch.Tensor): a tensor + x (torch.Tensor): a tensor dim (int): dimension along which sum is computed Returns: @@ -164,16 +157,16 @@ def project_values_onto_atoms(values, probabilities, atoms): grid of values given by @values onto a grid of values given by @atoms. This is useful when computing a bellman backup where the backed up values from the original grid will not be in the original support, - requiring L2 projection. + requiring L2 projection. Each value in @values has a corresponding probability in @probabilities - this probability mass is shifted to the closest neighboring grid points in @atoms in proportion. For example, if the value in question is 0.2, and the - neighboring atoms are 0 and 1, then 0.8 of the probability weight goes to + neighboring atoms are 0 and 1, then 0.8 of the probability weight goes to atom 0 and 0.2 of the probability weight will go to 1. Adapted from https://github.com/deepmind/acme/blob/master/acme/tf/losses/distributional.py#L42 - + Args: values: value grid to project, of shape (batch_size, n_atoms) probabilities: probabilities for categorical distribution on @values, shape (batch_size, n_atoms) @@ -194,28 +187,22 @@ def project_values_onto_atoms(values, probabilities, atoms): d_neg = torch.cat([vmax[None], atoms], dim=0)[:-1] # ensure that @values grid is within the support of @atoms - clipped_values = values.clamp(min=vmin, max=vmax)[ - :, None, : - ] # (batch_size, 1, n_atoms) - clipped_atoms = atoms[None, :, None] # (1, n_atoms, 1) + clipped_values = values.clamp(min=vmin, max=vmax)[:, None, :] # (batch_size, 1, n_atoms) + clipped_atoms = atoms[None, :, None] # (1, n_atoms, 1) # distance between atom values in support - d_pos = (d_pos - atoms)[ - None, :, None - ] # atoms[i + 1] - atoms[i], shape (1, n_atoms, 1) - d_neg = (atoms - d_neg)[ - None, :, None - ] # atoms[i] - atoms[i - 1], shape (1, n_atoms, 1) + d_pos = (d_pos - atoms)[None, :, None] # atoms[i + 1] - atoms[i], shape (1, n_atoms, 1) + d_neg = (atoms - d_neg)[None, :, None] # atoms[i] - atoms[i - 1], shape (1, n_atoms, 1) # distances between all pairs of grid values - deltas = clipped_values - clipped_atoms # (batch_size, n_atoms, n_atoms) + deltas = clipped_values - clipped_atoms # (batch_size, n_atoms, n_atoms) # computes eqn (7) in distributional RL paper by doing the following - for each # output atom in @atoms, consider values that are close enough, and weight their - # probability mass contribution by the normalized distance in [0, 1] given + # probability mass contribution by the normalized distance in [0, 1] given # by (1. - (z_j - z_i) / (delta_z)). - d_sign = (deltas >= 0.0).float() - delta_hat = (d_sign * deltas / d_pos) - ((1.0 - d_sign) * deltas / d_neg) - delta_hat = (1.0 - delta_hat).clamp(min=0.0, max=1.0) + d_sign = (deltas >= 0.).float() + delta_hat = (d_sign * deltas / d_pos) - ((1. - d_sign) * deltas / d_neg) + delta_hat = (1. - delta_hat).clamp(min=0., max=1.) probabilities = probabilities[:, None, :] return (delta_hat * probabilities).sum(dim=2) diff --git a/robomimic/utils/obs_utils.py b/robomimic/utils/obs_utils.py index e2047996..3313b631 100644 --- a/robomimic/utils/obs_utils.py +++ b/robomimic/utils/obs_utils.py @@ -2,7 +2,6 @@ A collection of utilities for working with observation dictionaries and different kinds of modalities such as images. """ - import numpy as np from copy import deepcopy from collections import OrderedDict @@ -13,7 +12,7 @@ import robomimic.utils.tensor_utils as TU # MACRO FOR VALID IMAGE CHANNEL SIZES -VALID_IMAGE_CHANNEL_DIMS = {1, 3} # depth, rgb +VALID_IMAGE_CHANNEL_DIMS = {1, 3} # depth, rgb # DO NOT MODIFY THIS! # This keeps track of observation types (modalities) - and is populated on call to @initialize_obs_utils_with_obs_specs. @@ -42,28 +41,22 @@ # in their config, without having to manually register their class internally. # This also future-proofs us for any additional encoder / randomizer classes we would # like to add ourselves. -OBS_ENCODER_CORES = {"None": None} # Include default None -OBS_RANDOMIZERS = {"None": None} # Include default None +OBS_ENCODER_CORES = {"None": None} # Include default None +OBS_RANDOMIZERS = {"None": None} # Include default None def register_obs_key(target_class): - assert ( - target_class not in OBS_MODALITY_CLASSES - ), f"Already registered modality {target_class}!" + assert target_class not in OBS_MODALITY_CLASSES, f"Already registered modality {target_class}!" OBS_MODALITY_CLASSES[target_class.name] = target_class def register_encoder_core(target_class): - assert ( - target_class not in OBS_ENCODER_CORES - ), f"Already registered obs encoder core {target_class}!" + assert target_class not in OBS_ENCODER_CORES, f"Already registered obs encoder core {target_class}!" OBS_ENCODER_CORES[target_class.__name__] = target_class def register_randomizer(target_class): - assert ( - target_class not in OBS_RANDOMIZERS - ), f"Already registered obs randomizer {target_class}!" + assert target_class not in OBS_RANDOMIZERS, f"Already registered obs randomizer {target_class}!" OBS_RANDOMIZERS[target_class.__name__] = target_class @@ -75,14 +68,11 @@ class ObservationKeyToModalityDict(dict): config. Thus, this dictionary will automatically handle those keys by implicitly associating them with the low_dim modality. """ - def __getitem__(self, item): # If a key doesn't already exist, warn the user and add default mapping if item not in self.keys(): - print( - f"ObservationKeyToModalityDict: {item} not found," - f" adding {item} to mapping with assumed low_dim modality!" - ) + print(f"ObservationKeyToModalityDict: {item} not found," + f" adding {item} to mapping with assumed low_dim modality!") self.__setitem__(item, "low_dim") return super(ObservationKeyToModalityDict, self).__getitem__(item) @@ -104,29 +94,19 @@ def obs_encoder_kwargs_from_config(obs_encoder_config): obs_encoder_config.unlock() for obs_modality, encoder_kwargs in obs_encoder_config.items(): # First run some sanity checks and store the classes - for cls_name, cores in zip( - ("core", "obs_randomizer"), (OBS_ENCODER_CORES, OBS_RANDOMIZERS) - ): + for cls_name, cores in zip(("core", "obs_randomizer"), (OBS_ENCODER_CORES, OBS_RANDOMIZERS)): # Make sure the requested encoder for each obs_modality exists cfg_cls = encoder_kwargs[f"{cls_name}_class"] if cfg_cls is not None: - assert cfg_cls in cores, ( - f"No {cls_name} class with name {cfg_cls} found, must register this class before" + assert cfg_cls in cores, f"No {cls_name} class with name {cfg_cls} found, must register this class before" \ f"creating model!" - ) # encoder_kwargs[f"{cls_name}_class"] = cores[cfg_cls] # Process core and randomizer kwargs - encoder_kwargs.core_kwargs = ( - dict() - if encoder_kwargs.core_kwargs is None - else deepcopy(encoder_kwargs.core_kwargs) - ) - encoder_kwargs.obs_randomizer_kwargs = ( - dict() - if encoder_kwargs.obs_randomizer_kwargs is None - else deepcopy(encoder_kwargs.obs_randomizer_kwargs) - ) + encoder_kwargs.core_kwargs = dict() if encoder_kwargs.core_kwargs is None else \ + deepcopy(encoder_kwargs.core_kwargs) + encoder_kwargs.obs_randomizer_kwargs = dict() if encoder_kwargs.obs_randomizer_kwargs is None else \ + deepcopy(encoder_kwargs.obs_randomizer_kwargs) # Re-lock keys obs_encoder_config.lock() @@ -220,16 +200,12 @@ def initialize_obs_utils_with_obs_specs(obs_modality_specs): OBS_KEYS_TO_MODALITIES[obs_key] = obs_modality # otherwise, run sanity check to make sure we don't have conflicting, duplicate entries else: - assert OBS_KEYS_TO_MODALITIES[obs_key] == obs_modality, ( - f"Cannot register obs key {obs_key} with modality {obs_modality}; " + assert OBS_KEYS_TO_MODALITIES[obs_key] == obs_modality, \ + f"Cannot register obs key {obs_key} with modality {obs_modality}; " \ f"already exists with corresponding modality {OBS_KEYS_TO_MODALITIES[obs_key]}" - ) # remove duplicate entries and store in global mapping - OBS_MODALITIES_TO_KEYS = { - obs_modality: list(set(obs_modality_mapping[obs_modality])) - for obs_modality in obs_modality_mapping - } + OBS_MODALITIES_TO_KEYS = { obs_modality : list(set(obs_modality_mapping[obs_modality])) for obs_modality in obs_modality_mapping } print("\n============= Initialized Observation Utils with Obs Spec =============\n") for obs_modality, obs_keys in OBS_MODALITIES_TO_KEYS.items(): @@ -259,14 +235,14 @@ def initialize_obs_utils_with_config(config): """ if config.algo_name == "hbc": obs_modality_specs = [ - config.observation.planner.modalities, + config.observation.planner.modalities, config.observation.actor.modalities, ] obs_encoder_config = config.observation.actor.encoder elif config.algo_name == "iris": obs_modality_specs = [ - config.observation.value_planner.planner.modalities, - config.observation.value_planner.value.modalities, + config.observation.value_planner.planner.modalities, + config.observation.value_planner.value.modalities, config.observation.actor.modalities, ] obs_encoder_config = config.observation.actor.encoder @@ -285,9 +261,7 @@ def key_is_obs_modality(key, obs_modality): key (str): obs key name to check obs_modality (str): observation modality - e.g.: "low_dim", "rgb" """ - assert ( - OBS_KEYS_TO_MODALITIES is not None - ), "error: must call ObsUtils.initialize_obs_utils_with_obs_config first" + assert OBS_KEYS_TO_MODALITIES is not None, "error: must call ObsUtils.initialize_obs_utils_with_obs_config first" return OBS_KEYS_TO_MODALITIES[key] == obs_modality @@ -303,11 +277,11 @@ def center_crop(im, t_h, t_w): Returns: im (np.array or torch.Tensor): center cropped image """ - assert im.shape[-3] >= t_h and im.shape[-2] >= t_w - assert im.shape[-1] in [1, 3] + assert(im.shape[-3] >= t_h and im.shape[-2] >= t_w) + assert(im.shape[-1] in [1, 3]) crop_h = int((im.shape[-3] - t_h) / 2) crop_w = int((im.shape[-2] - t_w) / 2) - return im[..., crop_h : crop_h + t_h, crop_w : crop_w + t_w, :] + return im[..., crop_h:crop_h + t_h, crop_w:crop_w + t_w, :] def batch_image_hwc_to_chw(im): @@ -368,9 +342,7 @@ def process_obs(obs, obs_modality=None, obs_key=None): Returns: processed_obs (np.array or torch.Tensor): processed observation """ - assert ( - obs_modality is not None or obs_key is not None - ), "Either obs_modality or obs_key must be specified!" + assert obs_modality is not None or obs_key is not None, "Either obs_modality or obs_key must be specified!" if obs_key is not None: obs_modality = OBS_KEYS_TO_MODALITIES[obs_key] return OBS_MODALITY_CLASSES[obs_modality].process_obs(obs) @@ -387,9 +359,7 @@ def process_obs_dict(obs_dict): Returns: new_dict (dict): dictionary where observation keys have been processed by their corresponding processors """ - return { - k: process_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() - } # shallow copy + return { k : process_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() } # shallow copy def process_frame(frame, channel_dim, scale): @@ -407,7 +377,7 @@ def process_frame(frame, channel_dim, scale): processed_frame (np.array or torch.Tensor): processed frame """ # Channel size should either be 3 (RGB) or 1 (depth) - assert frame.shape[-1] == channel_dim + assert (frame.shape[-1] == channel_dim) frame = TU.to_float(frame) if scale is not None: frame = frame / scale @@ -434,9 +404,7 @@ def unprocess_obs(obs, obs_modality=None, obs_key=None): Returns: unprocessed_obs (np.array or torch.Tensor): unprocessed observation """ - assert ( - obs_modality is not None or obs_key is not None - ), "Either obs_modality or obs_key must be specified!" + assert obs_modality is not None or obs_key is not None, "Either obs_modality or obs_key must be specified!" if obs_key is not None: obs_modality = OBS_KEYS_TO_MODALITIES[obs_key] return OBS_MODALITY_CLASSES[obs_modality].unprocess_obs(obs) @@ -455,9 +423,7 @@ def unprocess_obs_dict(obs_dict): new_dict (dict): dictionary where observation keys have been unprocessed by their respective unprocessor methods """ - return { - k: unprocess_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() - } # shallow copy + return { k : unprocess_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() } # shallow copy def unprocess_frame(frame, channel_dim, scale): @@ -474,7 +440,7 @@ def unprocess_frame(frame, channel_dim, scale): unprocessed_frame (np.array or torch.Tensor): frame passed through inverse operation of @process_frame """ - assert frame.shape[-3] == channel_dim # check for channel dimension + assert frame.shape[-3] == channel_dim # check for channel dimension frame = batch_image_chw_to_hwc(frame) if scale is not None: frame = scale * frame @@ -498,7 +464,7 @@ def get_processed_shape(obs_modality, input_shape): def normalize_batch(batch, normalization_stats, normalize_actions=True): """ - Normalize observations using the provided "mean" and "std" entries + Normalize observations using the provided "mean" and "std" entries for each observation key. The observation dictionary will be modified in-place. @@ -527,7 +493,7 @@ def _norm_helper(obs, mean, std): ), "shape mismatch in @normalize_obs" # Obs can have one or more leading batch dims - prepare for broadcasting. - # + # # As an example, if the obs has shape [B, T, D] and our mean / std stats are shape [D] # then we should pad the stats to shape [1, 1, D]. reshape_padding = tuple([1] * shape_len_diff) @@ -640,7 +606,7 @@ def repeat_and_stack_observation(obs_dict, n): Given an observation dictionary and a desired repeat value @n, this function will return a new observation dictionary where each modality is repeated @n times and the copies are - stacked in the first dimension. + stacked in the first dimension. For example, if a batch of 3 observations comes in, and n is 2, the output will look like [ob1; ob1; ob2; ob2; ob3; ob3] in @@ -660,7 +626,7 @@ def repeat_and_stack_observation(obs_dict, n): def crop_image_from_indices(images, crop_indices, crop_height, crop_width): """ - Crops images at the locations specified by @crop_indices. Crops will be + Crops images at the locations specified by @crop_indices. Crops will be taken across all channels. Args: @@ -688,9 +654,7 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): assert crop_indices.shape[-1] == 2 ndim_im_shape = len(images.shape) ndim_indices_shape = len(crop_indices.shape) - assert (ndim_im_shape == ndim_indices_shape + 1) or ( - ndim_im_shape == ndim_indices_shape + 2 - ) + assert (ndim_im_shape == ndim_indices_shape + 1) or (ndim_im_shape == ndim_indices_shape + 2) # maybe pad so that @crop_indices is shape [..., N, 2] is_padded = False @@ -720,30 +684,20 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): crop_ind_grid_w = torch.arange(crop_width).to(device) crop_ind_grid_w = TU.unsqueeze_expand_at(crop_ind_grid_w, size=crop_height, dim=0) # combine into shape [CH, CW, 2] - crop_in_grid = torch.cat( - (crop_ind_grid_h.unsqueeze(-1), crop_ind_grid_w.unsqueeze(-1)), dim=-1 - ) + crop_in_grid = torch.cat((crop_ind_grid_h.unsqueeze(-1), crop_ind_grid_w.unsqueeze(-1)), dim=-1) # Add above grid with the offset index of each sampled crop to get 2d indices for each crop. # After broadcasting, this will be shape [..., N, CH, CW, 2] and each crop has a [CH, CW, 2] # shape array that tells us which pixels from the corresponding source image to grab. grid_reshape = [1] * len(crop_indices.shape[:-1]) + [crop_height, crop_width, 2] - all_crop_inds = crop_indices.unsqueeze(-2).unsqueeze(-2) + crop_in_grid.reshape( - grid_reshape - ) + all_crop_inds = crop_indices.unsqueeze(-2).unsqueeze(-2) + crop_in_grid.reshape(grid_reshape) # For using @torch.gather, convert to flat indices from 2D indices, and also - # repeat across the channel dimension. To get flat index of each pixel to grab for + # repeat across the channel dimension. To get flat index of each pixel to grab for # each sampled crop, we just use the mapping: ind = h_ind * @image_w + w_ind - all_crop_inds = ( - all_crop_inds[..., 0] * image_w + all_crop_inds[..., 1] - ) # shape [..., N, CH, CW] - all_crop_inds = TU.unsqueeze_expand_at( - all_crop_inds, size=image_c, dim=-3 - ) # shape [..., N, C, CH, CW] - all_crop_inds = TU.flatten( - all_crop_inds, begin_axis=-2 - ) # shape [..., N, C, CH * CW] + all_crop_inds = all_crop_inds[..., 0] * image_w + all_crop_inds[..., 1] # shape [..., N, CH, CW] + all_crop_inds = TU.unsqueeze_expand_at(all_crop_inds, size=image_c, dim=-3) # shape [..., N, C, CH, CW] + all_crop_inds = TU.flatten(all_crop_inds, begin_axis=-2) # shape [..., N, C, CH * CW] # Repeat and flatten the source images -> [..., N, C, H * W] and then use gather to index with crop pixel inds images_to_crop = TU.unsqueeze_expand_at(images, size=num_crops, dim=-4) @@ -751,12 +705,8 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): crops = torch.gather(images_to_crop, dim=-1, index=all_crop_inds) # [..., N, C, CH * CW] -> [..., N, C, CH, CW] reshape_axis = len(crops.shape) - 1 - crops = TU.reshape_dimensions( - crops, - begin_axis=reshape_axis, - end_axis=reshape_axis, - target_dims=(crop_height, crop_width), - ) + crops = TU.reshape_dimensions(crops, begin_axis=reshape_axis, end_axis=reshape_axis, + target_dims=(crop_height, crop_width)) if is_padded: # undo padding -> [..., C, CH, CW] @@ -764,9 +714,7 @@ def crop_image_from_indices(images, crop_indices, crop_height, crop_width): return crops -def sample_random_image_crops( - images, crop_height, crop_width, num_crops, pos_enc=False -): +def sample_random_image_crops(images, crop_height, crop_width, num_crops, pos_enc=False): """ For each image, randomly sample @num_crops crops of size (@crop_height, @crop_width), from @images. @@ -775,18 +723,18 @@ def sample_random_image_crops( images (torch.Tensor): batch of images of shape [..., C, H, W] crop_height (int): height of crop to take - + crop_width (int): width of crop to take num_crops (n): number of crops to sample - pos_enc (bool): if True, also add 2 channels to the outputs that gives a spatial + pos_enc (bool): if True, also add 2 channels to the outputs that gives a spatial encoding of the original source pixel locations. This means that the - output crops will contain information about where in the source image + output crops will contain information about where in the source image it was sampled from. Returns: - crops (torch.Tensor): crops of shape (..., @num_crops, C, @crop_height, @crop_width) + crops (torch.Tensor): crops of shape (..., @num_crops, C, @crop_height, @crop_width) if @pos_enc is False, otherwise (..., @num_crops, C + 2, @crop_height, @crop_width) crop_inds (torch.Tensor): sampled crop indices of shape (..., N, 2) @@ -801,7 +749,7 @@ def sample_random_image_crops( pos_y, pos_x = torch.meshgrid(torch.arange(h), torch.arange(w)) pos_y = pos_y.float().to(device) / float(h) pos_x = pos_x.float().to(device) / float(w) - position_enc = torch.stack((pos_y, pos_x)) # shape [C, H, W] + position_enc = torch.stack((pos_y, pos_x)) # shape [C, H, W] # unsqueeze and expand to match leading dimensions -> shape [..., C, H, W] leading_shape = source_im.shape[:-3] @@ -817,26 +765,20 @@ def sample_random_image_crops( max_sample_w = image_w - crop_width # Sample crop locations for all tensor dimensions up to the last 3, which are [C, H, W]. - # Each gets @num_crops samples - typically this will just be the batch dimension (B), so + # Each gets @num_crops samples - typically this will just be the batch dimension (B), so # we will sample [B, N] indices, but this supports having more than one leading dimension, # or possibly no leading dimension. # # Trick: sample in [0, 1) with rand, then re-scale to [0, M) and convert to long to get sampled ints - crop_inds_h = ( - max_sample_h * torch.rand(*source_im.shape[:-3], num_crops).to(device) - ).long() - crop_inds_w = ( - max_sample_w * torch.rand(*source_im.shape[:-3], num_crops).to(device) - ).long() - crop_inds = torch.cat( - (crop_inds_h.unsqueeze(-1), crop_inds_w.unsqueeze(-1)), dim=-1 - ) # shape [..., N, 2] + crop_inds_h = (max_sample_h * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() + crop_inds_w = (max_sample_w * torch.rand(*source_im.shape[:-3], num_crops).to(device)).long() + crop_inds = torch.cat((crop_inds_h.unsqueeze(-1), crop_inds_w.unsqueeze(-1)), dim=-1) # shape [..., N, 2] crops = crop_image_from_indices( - images=source_im, - crop_indices=crop_inds, - crop_height=crop_height, - crop_width=crop_width, + images=source_im, + crop_indices=crop_inds, + crop_height=crop_height, + crop_width=crop_width, ) return crops, crop_inds @@ -847,7 +789,6 @@ class Modality: Observation Modality class to encapsulate necessary functions needed to process observations of this modality """ - # observation keys to associate with this modality keys = set() @@ -864,9 +805,7 @@ def __init_subclass__(cls, **kwargs): """ Hook method to automatically register all valid subclasses so we can keep track of valid modalities """ - assert ( - cls.name is not None - ), f"Name of modality {cls.__name__} must be specified!" + assert cls.name is not None, f"Name of modality {cls.__name__} must be specified!" register_obs_key(cls) @classmethod @@ -961,11 +900,8 @@ def process_obs(cls, obs): Returns: np.array or torch.Tensor: processed observation """ - processor = ( - cls._custom_obs_processor - if cls._custom_obs_processor is not None - else cls._default_obs_processor - ) + processor = cls._custom_obs_processor if \ + cls._custom_obs_processor is not None else cls._default_obs_processor return processor(obs) @classmethod @@ -979,11 +915,8 @@ def unprocess_obs(cls, obs): Returns: np.array or torch.Tensor: unprocessed observation """ - unprocessor = ( - cls._custom_obs_unprocessor - if cls._custom_obs_unprocessor is not None - else cls._default_obs_unprocessor - ) + unprocessor = cls._custom_obs_unprocessor if \ + cls._custom_obs_unprocessor is not None else cls._default_obs_unprocessor return unprocessor(obs) @classmethod @@ -1013,7 +946,6 @@ class ImageModality(Modality): """ Modality for RGB image observations """ - name = "rgb" @classmethod @@ -1029,7 +961,7 @@ def _default_obs_processor(cls, obs): Returns: processed_obs (np.array or torch.Tensor): processed image """ - return process_frame(frame=obs, channel_dim=3, scale=255.0) + return process_frame(frame=obs, channel_dim=3, scale=255.) @classmethod def _default_obs_unprocessor(cls, obs): @@ -1044,14 +976,13 @@ def _default_obs_unprocessor(cls, obs): unprocessed_obs (np.array or torch.Tensor): image passed through inverse operation of @process_frame """ - return TU.to_uint8(unprocess_frame(frame=obs, channel_dim=3, scale=255.0)) + return TU.to_uint8(unprocess_frame(frame=obs, channel_dim=3, scale=255.)) class DepthModality(Modality): """ Modality for depth observations """ - name = "depth" @classmethod @@ -1067,7 +998,7 @@ def _default_obs_processor(cls, obs): Returns: processed_obs (np.array or torch.Tensor): processed depth """ - return process_frame(frame=obs, channel_dim=1, scale=1.0) + return process_frame(frame=obs, channel_dim=1, scale=1.) @classmethod def _default_obs_unprocessor(cls, obs): @@ -1082,28 +1013,27 @@ def _default_obs_unprocessor(cls, obs): unprocessed_obs (np.array or torch.Tensor): depth passed through inverse operation of @process_depth """ - return unprocess_frame(frame=obs, channel_dim=1, scale=1.0) + return unprocess_frame(frame=obs, channel_dim=1, scale=1.) class ScanModality(Modality): """ Modality for scan observations """ - name = "scan" @classmethod def _default_obs_processor(cls, obs): # Channel swaps ([...,] L, C) --> ([...,] C, L) - + # First, add extra dimension at 2nd to last index to treat this as a frame shape = obs.shape new_shape = [*shape[:-2], 1, *shape[-2:]] obs = obs.reshape(new_shape) - + # Convert shape obs = batch_image_hwc_to_chw(obs) - + # Remove extra dimension (it's the second from last dimension) obs = obs.squeeze(-2) return obs @@ -1111,7 +1041,7 @@ def _default_obs_processor(cls, obs): @classmethod def _default_obs_unprocessor(cls, obs): # Channel swaps ([B,] C, L) --> ([B,] L, C) - + # First, add extra dimension at 1st index to treat this as a frame shape = obs.shape new_shape = [*shape[:-2], 1, *shape[-2:]] @@ -1129,7 +1059,6 @@ class LowDimModality(Modality): """ Modality for low dimensional observations """ - name = "low_dim" @classmethod diff --git a/robomimic/utils/python_utils.py b/robomimic/utils/python_utils.py index ea8fd40c..5bc71bd1 100644 --- a/robomimic/utils/python_utils.py +++ b/robomimic/utils/python_utils.py @@ -1,7 +1,6 @@ """ Set of general purpose utility functions for easier interfacing with Python API """ - import inspect from copy import deepcopy import robomimic.macros as Macros @@ -67,12 +66,8 @@ def extract_class_init_kwargs_from_dict(cls, dic, copy=False, verbose=False): keys_not_in_cls = [k for k in dic if k not in cls_keys] keys_not_in_dic = [k for k in cls_keys if k not in list(dic.keys())] if len(keys_not_in_cls) > 0: - print( - f"Warning: For class {cls.__name__}, got unknown keys: {keys_not_in_cls} " - ) + print(f"Warning: For class {cls.__name__}, got unknown keys: {keys_not_in_cls} ") if len(keys_not_in_dic) > 0: - print( - f"Warning: For class {cls.__name__}, got missing keys: {keys_not_in_dic} " - ) + print(f"Warning: For class {cls.__name__}, got missing keys: {keys_not_in_dic} ") - return subdic + return subdic \ No newline at end of file diff --git a/robomimic/utils/tensor_utils.py b/robomimic/utils/tensor_utils.py index 8e720d91..ec2063b2 100644 --- a/robomimic/utils/tensor_utils.py +++ b/robomimic/utils/tensor_utils.py @@ -2,7 +2,6 @@ A collection of utilities for working with nested tensor structures consisting of numpy arrays and torch tensors. """ - import collections import numpy as np import torch @@ -10,27 +9,23 @@ def recursive_dict_list_tuple_apply(x, type_func_dict): """ - Recursively apply functions to a nested dictionary or list or tuple, given a dictionary of + Recursively apply functions to a nested dictionary or list or tuple, given a dictionary of {data_type: function_to_apply}. Args: x (dict or list or tuple): a possibly nested dictionary or list or tuple - type_func_dict (dict): a mapping from data types to the functions to be + type_func_dict (dict): a mapping from data types to the functions to be applied for each data type. Returns: y (dict or list or tuple): new nested dict-list-tuple """ - assert list not in type_func_dict - assert tuple not in type_func_dict - assert dict not in type_func_dict + assert(list not in type_func_dict) + assert(tuple not in type_func_dict) + assert(dict not in type_func_dict) if isinstance(x, (dict, collections.OrderedDict)): - new_x = ( - collections.OrderedDict() - if isinstance(x, collections.OrderedDict) - else dict() - ) + new_x = collections.OrderedDict() if isinstance(x, collections.OrderedDict) else dict() for k, v in x.items(): new_x[k] = recursive_dict_list_tuple_apply(v, type_func_dict) return new_x @@ -44,7 +39,8 @@ def recursive_dict_list_tuple_apply(x, type_func_dict): if isinstance(x, t): return f(x) else: - raise NotImplementedError("Cannot handle data type %s" % str(type(x))) + raise NotImplementedError( + 'Cannot handle data type %s' % str(type(x))) def map_tensor(x, func): @@ -64,7 +60,7 @@ def map_tensor(x, func): { torch.Tensor: func, type(None): lambda x: x, - }, + } ) @@ -85,13 +81,13 @@ def map_ndarray(x, func): { np.ndarray: func, type(None): lambda x: x, - }, + } ) def map_tensor_ndarray(x, tensor_func, ndarray_func): """ - Apply function @tensor_func to torch.Tensor objects and @ndarray_func to + Apply function @tensor_func to torch.Tensor objects and @ndarray_func to np.ndarray objects in a nested dictionary or list or tuple. Args: @@ -108,7 +104,7 @@ def map_tensor_ndarray(x, tensor_func, ndarray_func): torch.Tensor: tensor_func, np.ndarray: ndarray_func, type(None): lambda x: x, - }, + } ) @@ -129,7 +125,7 @@ def clone(x): torch.Tensor: lambda x: x.clone(), np.ndarray: lambda x: x.copy(), type(None): lambda x: x, - }, + } ) @@ -148,13 +144,13 @@ def detach(x): x, { torch.Tensor: lambda x: x.detach(), - }, + } ) def to_batch(x): """ - Introduces a leading batch dimension of 1 for all torch tensors and numpy + Introduces a leading batch dimension of 1 for all torch tensors and numpy arrays in nested dictionary or list or tuple and returns a new nested structure. Args: @@ -169,13 +165,13 @@ def to_batch(x): torch.Tensor: lambda x: x[None, ...], np.ndarray: lambda x: x[None, ...], type(None): lambda x: x, - }, + } ) def to_sequence(x): """ - Introduces a time dimension of 1 at dimension 1 for all torch tensors and numpy + Introduces a time dimension of 1 at dimension 1 for all torch tensors and numpy arrays in nested dictionary or list or tuple and returns a new nested structure. Args: @@ -190,7 +186,7 @@ def to_sequence(x): torch.Tensor: lambda x: x[:, None, ...], np.ndarray: lambda x: x[:, None, ...], type(None): lambda x: x, - }, + } ) @@ -212,7 +208,7 @@ def index_at_time(x, ind): torch.Tensor: lambda x: x[:, ind, ...], np.ndarray: lambda x: x[:, ind, ...], type(None): lambda x: x, - }, + } ) @@ -234,13 +230,13 @@ def unsqueeze(x, dim): torch.Tensor: lambda x: x.unsqueeze(dim=dim), np.ndarray: lambda x: np.expand_dims(x, axis=dim), type(None): lambda x: x, - }, + } ) def contiguous(x): """ - Makes all torch tensors and numpy arrays contiguous in nested dictionary or + Makes all torch tensors and numpy arrays contiguous in nested dictionary or list or tuple and returns a new nested structure. Args: @@ -255,7 +251,7 @@ def contiguous(x): torch.Tensor: lambda x: x.contiguous(), np.ndarray: lambda x: np.ascontiguousarray(x), type(None): lambda x: x, - }, + } ) @@ -276,14 +272,14 @@ def to_device(x, device): { torch.Tensor: lambda x, d=device: x.to(d), type(None): lambda x: x, - }, + } ) def to_tensor(x): """ Converts all numpy arrays in nested dictionary or list or tuple to - torch tensors (and leaves existing torch Tensors as-is), and returns + torch tensors (and leaves existing torch Tensors as-is), and returns a new nested structure. Args: @@ -298,14 +294,14 @@ def to_tensor(x): torch.Tensor: lambda x: x, np.ndarray: lambda x: torch.from_numpy(x), type(None): lambda x: x, - }, + } ) def to_numpy(x): """ Converts all torch tensors in nested dictionary or list or tuple to - numpy (and leaves existing numpy arrays as-is), and returns + numpy (and leaves existing numpy arrays as-is), and returns a new nested structure. Args: @@ -314,26 +310,24 @@ def to_numpy(x): Returns: y (dict or list or tuple): new nested dict-list-tuple """ - def f(tensor): if tensor.is_cuda: return tensor.detach().cpu().numpy() else: return tensor.detach().numpy() - return recursive_dict_list_tuple_apply( x, { torch.Tensor: f, np.ndarray: lambda x: x, type(None): lambda x: x, - }, + } ) def to_list(x): """ - Converts all torch tensors and numpy arrays in nested dictionary or list + Converts all torch tensors and numpy arrays in nested dictionary or list or tuple to a list, and returns a new nested structure. Useful for json encoding. @@ -343,26 +337,24 @@ def to_list(x): Returns: y (dict or list or tuple): new nested dict-list-tuple """ - def f(tensor): if tensor.is_cuda: return tensor.detach().cpu().numpy().tolist() else: return tensor.detach().numpy().tolist() - return recursive_dict_list_tuple_apply( x, { torch.Tensor: f, np.ndarray: lambda x: x.tolist(), type(None): lambda x: x, - }, + } ) def to_float(x): """ - Converts all torch tensors and numpy arrays in nested dictionary or list + Converts all torch tensors and numpy arrays in nested dictionary or list or tuple to float type entries, and returns a new nested structure. Args: @@ -377,13 +369,13 @@ def to_float(x): torch.Tensor: lambda x: x.float(), np.ndarray: lambda x: x.astype(np.float32), type(None): lambda x: x, - }, + } ) def to_uint8(x): """ - Converts all torch tensors and numpy arrays in nested dictionary or list + Converts all torch tensors and numpy arrays in nested dictionary or list or tuple to uint8 type entries, and returns a new nested structure. Args: @@ -398,13 +390,13 @@ def to_uint8(x): torch.Tensor: lambda x: x.byte(), np.ndarray: lambda x: x.astype(np.uint8), type(None): lambda x: x, - }, + } ) def to_torch(x, device): """ - Converts all numpy arrays and torch tensors in nested dictionary or list or tuple to + Converts all numpy arrays and torch tensors in nested dictionary or list or tuple to torch tensors on device @device and returns a new nested structure. Args: @@ -435,7 +427,7 @@ def to_one_hot_single(tensor, num_class): def to_one_hot(tensor, num_class): """ - Convert all tensors in nested dictionary or list or tuple to one-hot representation, + Convert all tensors in nested dictionary or list or tuple to one-hot representation, assuming a certain number of total class labels. Args: @@ -479,7 +471,7 @@ def flatten(x, begin_axis=1): x, { torch.Tensor: lambda x, b=begin_axis: flatten_single(x, begin_axis=b), - }, + } ) @@ -497,10 +489,10 @@ def reshape_dimensions_single(x, begin_axis, end_axis, target_dims): Returns: y (torch.Tensor): reshaped tensor """ - assert begin_axis <= end_axis - assert begin_axis >= 0 - assert end_axis < len(x.shape) - assert isinstance(target_dims, (tuple, list)) + assert(begin_axis <= end_axis) + assert(begin_axis >= 0) + assert(end_axis < len(x.shape)) + assert(isinstance(target_dims, (tuple, list))) s = x.shape final_s = [] for i in range(len(s)): @@ -513,9 +505,9 @@ def reshape_dimensions_single(x, begin_axis, end_axis, target_dims): def reshape_dimensions(x, begin_axis, end_axis, target_dims): """ - Reshape selected dimensions for all tensors in nested dictionary or list or tuple + Reshape selected dimensions for all tensors in nested dictionary or list or tuple to a target dimension. - + Args: x (dict or list or tuple): a possibly nested dictionary or list or tuple begin_axis (int): begin dimension @@ -530,13 +522,11 @@ def reshape_dimensions(x, begin_axis, end_axis, target_dims): x, { torch.Tensor: lambda x, b=begin_axis, e=end_axis, t=target_dims: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=t - ), + x, begin_axis=b, end_axis=e, target_dims=t), np.ndarray: lambda x, b=begin_axis, e=end_axis, t=target_dims: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=t - ), + x, begin_axis=b, end_axis=e, target_dims=t), type(None): lambda x: x, - }, + } ) @@ -557,13 +547,11 @@ def join_dimensions(x, begin_axis, end_axis): x, { torch.Tensor: lambda x, b=begin_axis, e=end_axis: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=[-1] - ), + x, begin_axis=b, end_axis=e, target_dims=[-1]), np.ndarray: lambda x, b=begin_axis, e=end_axis: reshape_dimensions_single( - x, begin_axis=b, end_axis=e, target_dims=[-1] - ), + x, begin_axis=b, end_axis=e, target_dims=[-1]), type(None): lambda x: x, - }, + } ) @@ -672,15 +660,13 @@ def named_reduce(x, reduction, dim): Returns: y (dict or list or tuple): new nested dict-list-tuple """ - return map_tensor( - x, func=lambda t, r=reduction, d=dim: named_reduce_single(t, r, d) - ) + return map_tensor(x, func=lambda t, r=reduction, d=dim: named_reduce_single(t, r, d)) def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): """ This function indexes out a target dimension of a tensor in a structured way, - by allowing a different value to be selected for each member of a flat index + by allowing a different value to be selected for each member of a flat index tensor (@indices) corresponding to a source dimension. This can be interpreted as moving along the source dimension, using the corresponding index value in @indices to select values for all other dimensions outside of the @@ -694,7 +680,7 @@ def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): from the other dimensions indices (torch.Tensor): flat index tensor with same shape as tensor @x along @source_dim - + Returns: y (torch.Tensor): gathered tensor, with dimension @target_dim indexed out """ @@ -719,7 +705,7 @@ def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): def gather_along_dim_with_dim(x, target_dim, source_dim, indices): """ - Apply @gather_along_dim_with_dim_single to all tensors in a nested + Apply @gather_along_dim_with_dim_single to all tensors in a nested dictionary or list or tuple. Args: @@ -733,17 +719,13 @@ def gather_along_dim_with_dim(x, target_dim, source_dim, indices): Returns: y (dict or list or tuple): new nested dict-list-tuple """ - return map_tensor( - x, - lambda y, t=target_dim, s=source_dim, i=indices: gather_along_dim_with_dim_single( - y, t, s, i - ), - ) - + return map_tensor(x, + lambda y, t=target_dim, s=source_dim, i=indices: gather_along_dim_with_dim_single(y, t, s, i)) + def gather_sequence_single(seq, indices): """ - Given a tensor with leading dimensions [B, T, ...], gather an element from each sequence in + Given a tensor with leading dimensions [B, T, ...], gather an element from each sequence in the batch given an index for each sequence. Args: @@ -753,9 +735,7 @@ def gather_sequence_single(seq, indices): Return: y (torch.Tensor): indexed tensor of shape [B, ....] """ - return gather_along_dim_with_dim_single( - seq, target_dim=1, source_dim=0, indices=indices - ) + return gather_along_dim_with_dim_single(seq, target_dim=1, source_dim=0, indices=indices) def gather_sequence(seq, indices): @@ -828,14 +808,12 @@ def pad_sequence(seq, padding, batched=False, pad_same=True, pad_values=None): return recursive_dict_list_tuple_apply( seq, { - torch.Tensor: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: pad_sequence_single( - x, p, b, ps, pv - ), - np.ndarray: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: pad_sequence_single( - x, p, b, ps, pv - ), + torch.Tensor: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: + pad_sequence_single(x, p, b, ps, pv), + np.ndarray: lambda x, p=padding, b=batched, ps=pad_same, pv=pad_values: + pad_sequence_single(x, p, b, ps, pv), type(None): lambda x: x, - }, + } ) @@ -854,7 +832,7 @@ def assert_size_at_dim_single(x, size, dim, msg): def assert_size_at_dim(x, size, dim, msg): """ - Ensure that arrays and tensors in nested dictionary or list or tuple have + Ensure that arrays and tensors in nested dictionary or list or tuple have size @size in dim @dim. Args: @@ -882,7 +860,7 @@ def get_shape(x): torch.Tensor: lambda x: x.shape, np.ndarray: lambda x: x.shape, type(None): lambda x: x, - }, + } ) @@ -908,7 +886,7 @@ def list_of_flat_dict_to_dict_of_list(list_of_dict): return dic -def flatten_nested_dict_list(d, parent_key="", sep="_", item_key=""): +def flatten_nested_dict_list(d, parent_key='', sep='_', item_key=''): """ Flatten a nested dict or list to a list. @@ -948,9 +926,7 @@ def flatten_nested_dict_list(d, parent_key="", sep="_", item_key=""): return [(new_key, d)] -def time_distributed( - inputs, op, activation=None, inputs_as_kwargs=False, inputs_as_args=False, **kwargs -): +def time_distributed(inputs, op, activation=None, inputs_as_kwargs=False, inputs_as_args=False, **kwargs): """ Apply function @op to all tensors in nested dictionary or list or tuple @inputs in both the batch (B) and time (T) dimension, where the tensors are expected to have shape [B, T, ...]. @@ -980,7 +956,5 @@ def time_distributed( if activation is not None: outputs = map_tensor(outputs, activation) - outputs = reshape_dimensions( - outputs, begin_axis=0, end_axis=0, target_dims=(batch_size, seq_len) - ) + outputs = reshape_dimensions(outputs, begin_axis=0, end_axis=0, target_dims=(batch_size, seq_len)) return outputs diff --git a/robomimic/utils/test_utils.py b/robomimic/utils/test_utils.py index 0cfdca23..86f125e0 100644 --- a/robomimic/utils/test_utils.py +++ b/robomimic/utils/test_utils.py @@ -1,7 +1,6 @@ """ Utilities for testing algorithm implementations - used mainly by scripts in tests directory. """ - import os import json import shutil @@ -43,7 +42,7 @@ def maybe_remove_file(file_to_remove): def example_dataset_path(): """ Path to dataset to use for testing and example purposes. It should - exist under the tests/assets directory, and will be downloaded + exist under the tests/assets directory, and will be downloaded from a server if it does not exist. """ dataset_folder = os.path.join(robomimic.__path__[0], "../tests/assets/") @@ -52,7 +51,7 @@ def example_dataset_path(): print("\nWARNING: test hdf5 does not exist! Downloading from server...") os.makedirs(dataset_folder, exist_ok=True) FileUtils.download_url( - url="http://downloads.cs.stanford.edu/downloads/rt_benchmark/test_v141.hdf5", + url="http://downloads.cs.stanford.edu/downloads/rt_benchmark/test_v141.hdf5", download_dir=dataset_folder, ) return dataset_path @@ -67,14 +66,9 @@ def example_momart_dataset_path(): dataset_folder = os.path.join(robomimic.__path__[0], "../tests/assets/") dataset_path = os.path.join(dataset_folder, "test_momart.hdf5") if not os.path.exists(dataset_path): - user_response = input( - "\nWARNING: momart test hdf5 does not exist! We will download sample dataset. " - "This will take 0.6GB space. Proceed? y/n\n" - ) - assert user_response.lower() in { - "yes", - "y", - }, f"Did not receive confirmation. Aborting download." + user_response = input("\nWARNING: momart test hdf5 does not exist! We will download sample dataset. " + "This will take 0.6GB space. Proceed? y/n\n") + assert user_response.lower() in {"yes", "y"}, f"Did not receive confirmation. Aborting download." print("\nDownloading from server...") @@ -117,10 +111,8 @@ def get_base_config(algo_name): """ # we will load and override defaults from template config - base_config_path = os.path.join( - robomimic.__path__[0], "exps/templates/{}.json".format(algo_name) - ) - with open(base_config_path, "r") as f: + base_config_path = os.path.join(robomimic.__path__[0], "exps/templates/{}.json".format(algo_name)) + with open(base_config_path, 'r') as f: config = Config(json.load(f)) # small dataset with a handful of trajectories @@ -197,15 +189,13 @@ def checkpoint_path_from_test_run(): time_dir_names = [f.name for f in os.scandir(exp_dir) if f.is_dir()] assert len(time_dir_names) == 1 path_to_models = os.path.join(exp_dir, time_dir_names[0], "models") - epoch_name = [ - f.name for f in os.scandir(path_to_models) if f.name.startswith("model") - ][0] + epoch_name = [f.name for f in os.scandir(path_to_models) if f.name.startswith("model")][0] return os.path.join(path_to_models, epoch_name) def test_eval_agent_from_checkpoint(ckpt_path, device): """ - Test loading a model from checkpoint and running a rollout with the + Test loading a model from checkpoint and running a rollout with the trained agent for a small number of steps. Args: @@ -215,9 +205,7 @@ def test_eval_agent_from_checkpoint(ckpt_path, device): """ # get policy and env from checkpoint - policy, ckpt_dict = FileUtils.policy_from_checkpoint( - ckpt_path=ckpt_path, device=device, verbose=True - ) + policy, ckpt_dict = FileUtils.policy_from_checkpoint(ckpt_path=ckpt_path, device=device, verbose=True) env, _ = FileUtils.env_from_checkpoint(ckpt_dict=ckpt_dict, verbose=True) # run a test rollout @@ -251,9 +239,7 @@ def test_run(base_config, config_modifier): """ try: # get config - config = config_from_modifier( - base_config=base_config, config_modifier=config_modifier - ) + config = config_from_modifier(base_config=base_config, config_modifier=config_modifier) # set torch device device = TorchUtils.get_torch_device(try_to_use_cuda=config.train.cuda) @@ -270,9 +256,7 @@ def test_run(base_config, config_modifier): except Exception as e: # indicate failure by returning error string - ret = colored( - "failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red" - ) + ret = colored("failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red") # make sure model directory is cleaned up before returning from this function maybe_remove_dir(temp_model_dir_path()) diff --git a/robomimic/utils/torch_utils.py b/robomimic/utils/torch_utils.py index 2bccf92d..433c8797 100644 --- a/robomimic/utils/torch_utils.py +++ b/robomimic/utils/torch_utils.py @@ -1,7 +1,6 @@ """ This file contains some PyTorch utilities. """ - import numpy as np import torch import torch.optim as optim @@ -17,7 +16,9 @@ def soft_update(source, target, tau): target (torch.nn.Module): target network to update """ for target_param, param in zip(target.parameters(), source.parameters()): - target_param.copy_(target_param * (1.0 - tau) + param * tau) + target_param.copy_( + target_param * (1.0 - tau) + param * tau + ) def hard_update(source, target): @@ -29,7 +30,7 @@ def hard_update(source, target): target (torch.nn.Module): target network to update parameters for """ for target_param, param in zip(target.parameters(), source.parameters()): - target_param.copy_(param) + target_param.copy_(param) def get_torch_device(try_to_use_cuda): @@ -87,7 +88,7 @@ def reparameterize(mu, logvar): def optimizer_from_optim_params(net_optim_params, net): """ - Helper function to return a torch Optimizer from the optim_params + Helper function to return a torch Optimizer from the optim_params section of the config for a particular network. Args: @@ -119,7 +120,7 @@ def optimizer_from_optim_params(net_optim_params, net): def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): """ - Helper function to return a LRScheduler from the optim_params + Helper function to return a LRScheduler from the optim_params section of the config for a particular network. Returns None if a scheduler is not needed. @@ -135,9 +136,7 @@ def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): Returns: lr_scheduler (torch.optim.lr_scheduler or None): learning rate scheduler """ - lr_scheduler_type = net_optim_params["learning_rate"].get( - "scheduler_type", "multistep" - ) + lr_scheduler_type = net_optim_params["learning_rate"].get("scheduler_type", "multistep") epoch_schedule = net_optim_params["learning_rate"]["epoch_schedule"] lr_scheduler = None @@ -145,7 +144,7 @@ def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): if lr_scheduler_type == "linear": assert len(epoch_schedule) == 1 end_epoch = epoch_schedule[0] - + return optim.lr_scheduler.LinearLR( optimizer, start_factor=1.0, @@ -160,7 +159,7 @@ def lr_scheduler_from_optim_params(net_optim_params, net, optimizer): ) else: raise ValueError("Invalid LR scheduler type: {}".format(lr_scheduler_type)) - + return lr_scheduler @@ -193,7 +192,7 @@ def backprop_for_loss(net, optim, loss, max_grad_norm=None, retain_graph=False): torch.nn.utils.clip_grad_norm_(net.parameters(), max_grad_norm) # compute grad norms - grad_norms = 0.0 + grad_norms = 0. for p in net.parameters(): # only clip gradients for parameters for which requires_grad is True if p.grad is not None: @@ -205,15 +204,13 @@ def backprop_for_loss(net, optim, loss, max_grad_norm=None, retain_graph=False): return grad_norms -class dummy_context_mgr: +class dummy_context_mgr(): """ A dummy context manager - useful for having conditional scopes (such as @maybe_no_grad). Nothing happens in this scope. """ - def __enter__(self): return None - def __exit__(self, exc_type, exc_value, traceback): return False diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index 91bb0944..58de759d 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -3,7 +3,6 @@ mainly consists of functions to assist with logging, rollouts, and the @run_epoch function, which is the core training logic for models in this repository. """ - import os import time import datetime @@ -31,14 +30,14 @@ def get_exp_dir(config, auto_remove_exp_dir=False): """ Create experiment directory from config. If an identical experiment directory - exists and @auto_remove_exp_dir is False (default), the function will prompt + exists and @auto_remove_exp_dir is False (default), the function will prompt the user on whether to remove and replace it, or keep the existing one and add a new subdirectory with the new timestamp for the current run. Args: auto_remove_exp_dir (bool): if True, automatically remove the existing experiment folder if it exists at the same path. - + Returns: log_dir (str): path to created log directory (sub-folder in experiment directory) output_dir (str): path to created models directory (sub-folder in experiment directory) @@ -48,7 +47,7 @@ def get_exp_dir(config, auto_remove_exp_dir=False): """ # timestamp for directory names t_now = time.time() - time_str = datetime.datetime.fromtimestamp(t_now).strftime("%Y%m%d%H%M%S") + time_str = datetime.datetime.fromtimestamp(t_now).strftime('%Y%m%d%H%M%S') # create directory for where to dump model parameters, tensorboard logs, and videos base_output_dir = os.path.expanduser(config.train.output_dir) @@ -58,11 +57,7 @@ def get_exp_dir(config, auto_remove_exp_dir=False): base_output_dir = os.path.join(base_output_dir, config.experiment.name) if os.path.exists(base_output_dir): if not auto_remove_exp_dir: - ans = input( - "WARNING: model directory ({}) already exists! \noverwrite? (y/n)\n".format( - base_output_dir - ) - ) + ans = input("WARNING: model directory ({}) already exists! \noverwrite? (y/n)\n".format(base_output_dir)) else: ans = "y" if ans == "y": @@ -103,23 +98,14 @@ def load_data_for_training(config, obs_keys): train_filter_by_attribute = config.train.hdf5_filter_key valid_filter_by_attribute = config.train.hdf5_validation_filter_key if valid_filter_by_attribute is not None: - assert ( - config.experiment.validate - ), "specified validation filter key {}, but config.experiment.validate is not set".format( - valid_filter_by_attribute - ) + assert config.experiment.validate, "specified validation filter key {}, but config.experiment.validate is not set".format(valid_filter_by_attribute) # load the dataset into memory if config.experiment.validate: - assert ( - not config.train.hdf5_normalize_obs - ), "no support for observation normalization with validation data yet" - assert (train_filter_by_attribute is not None) and ( - valid_filter_by_attribute is not None - ), ( - "did not specify filter keys corresponding to train and valid split in dataset" + assert not config.train.hdf5_normalize_obs, "no support for observation normalization with validation data yet" + assert (train_filter_by_attribute is not None) and (valid_filter_by_attribute is not None), \ + "did not specify filter keys corresponding to train and valid split in dataset" \ " - please fill config.train.hdf5_filter_key and config.train.hdf5_validation_filter_key" - ) train_demo_keys = FileUtils.get_demos_for_filter_key( hdf5_path=os.path.expanduser(config.train.data), filter_key=train_filter_by_attribute, @@ -128,19 +114,12 @@ def load_data_for_training(config, obs_keys): hdf5_path=os.path.expanduser(config.train.data), filter_key=valid_filter_by_attribute, ) - assert set(train_demo_keys).isdisjoint(set(valid_demo_keys)), ( - "training demonstrations overlap with " "validation demonstrations!" - ) - train_dataset = dataset_factory( - config, obs_keys, filter_by_attribute=train_filter_by_attribute - ) - valid_dataset = dataset_factory( - config, obs_keys, filter_by_attribute=valid_filter_by_attribute - ) + assert set(train_demo_keys).isdisjoint(set(valid_demo_keys)), "training demonstrations overlap with " \ + "validation demonstrations!" + train_dataset = dataset_factory(config, obs_keys, filter_by_attribute=train_filter_by_attribute) + valid_dataset = dataset_factory(config, obs_keys, filter_by_attribute=valid_filter_by_attribute) else: - train_dataset = dataset_factory( - config, obs_keys, filter_by_attribute=train_filter_by_attribute - ) + train_dataset = dataset_factory(config, obs_keys, filter_by_attribute=train_filter_by_attribute) valid_dataset = None return train_dataset, valid_dataset @@ -172,7 +151,7 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non hdf5_path=dataset_path, obs_keys=obs_keys, dataset_keys=config.train.dataset_keys, - load_next_obs=config.train.hdf5_load_next_obs, # whether to load next observations (s') from dataset + load_next_obs=config.train.hdf5_load_next_obs, # whether to load next observations (s') from dataset frame_stack=config.train.frame_stack, seq_length=config.train.seq_length, pad_frame_stack=config.train.pad_frame_stack, @@ -182,7 +161,7 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non hdf5_cache_mode=config.train.hdf5_cache_mode, hdf5_use_swmr=config.train.hdf5_use_swmr, hdf5_normalize_obs=config.train.hdf5_normalize_obs, - filter_by_attribute=filter_by_attribute, + filter_by_attribute=filter_by_attribute ) dataset = SequenceDataset(**ds_kwargs) @@ -190,15 +169,15 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non def run_rollout( - policy, - env, - horizon, - use_goals=False, - render=False, - video_writer=None, - video_skip=5, - terminate_on_success=False, -): + policy, + env, + horizon, + use_goals=False, + render=False, + video_writer=None, + video_skip=5, + terminate_on_success=False, + ): """ Runs a rollout in an environment with the current network parameters. @@ -213,7 +192,7 @@ def run_rollout( render (bool): if True, render the rollout to the screen - video_writer (imageio Writer instance): if not None, use video writer object to append frames at + video_writer (imageio Writer instance): if not None, use video writer object to append frames at rate given by @video_skip video_skip (int): how often to write video frame @@ -237,8 +216,8 @@ def run_rollout( results = {} video_count = 0 # video frame counter - total_reward = 0.0 - success = {k: False for k in env.is_success()} # success metrics + total_reward = 0. + success = { k: False for k in env.is_success() } # success metrics try: for step_i in range(horizon): @@ -288,19 +267,19 @@ def run_rollout( def rollout_with_stats( - policy, - envs, - horizon, - use_goals=False, - num_episodes=None, - render=False, - video_dir=None, - video_path=None, - epoch=None, - video_skip=5, - terminate_on_success=False, - verbose=False, -): + policy, + envs, + horizon, + use_goals=False, + num_episodes=None, + render=False, + video_dir=None, + video_path=None, + epoch=None, + video_skip=5, + terminate_on_success=False, + verbose=False, + ): """ A helper function used in the train loop to conduct evaluation rollouts per environment and summarize the results. @@ -333,10 +312,10 @@ def rollout_with_stats( terminate_on_success (bool): if True, terminate episode early as soon as a success is encountered verbose (bool): if True, print results of each rollout - + Returns: - all_rollout_logs (dict): dictionary of rollout statistics (e.g. return, success rate, ...) - averaged across all rollouts + all_rollout_logs (dict): dictionary of rollout statistics (e.g. return, success rate, ...) + averaged across all rollouts video_paths (dict): path to rollout videos for each environment """ @@ -345,24 +324,20 @@ def rollout_with_stats( all_rollout_logs = OrderedDict() # handle paths and create writers for video writing - assert (video_path is None) or ( - video_dir is None - ), "rollout_with_stats: can't specify both video path and dir" + assert (video_path is None) or (video_dir is None), "rollout_with_stats: can't specify both video path and dir" write_video = (video_path is not None) or (video_dir is not None) video_paths = OrderedDict() video_writers = OrderedDict() if video_path is not None: # a single video is written for all envs - video_paths = {k: video_path for k in envs} + video_paths = { k : video_path for k in envs } video_writer = imageio.get_writer(video_path, fps=20) - video_writers = {k: video_writer for k in envs} + video_writers = { k : video_writer for k in envs } if video_dir is not None: # video is written per env - video_str = "_epoch_{}.mp4".format(epoch) if epoch is not None else ".mp4" - video_paths = { - k: os.path.join(video_dir, "{}{}".format(k, video_str)) for k in envs - } - video_writers = {k: imageio.get_writer(video_paths[k], fps=20) for k in envs} + video_str = "_epoch_{}.mp4".format(epoch) if epoch is not None else ".mp4" + video_paths = { k : os.path.join(video_dir, "{}{}".format(k, video_str)) for k in envs } + video_writers = { k : imageio.get_writer(video_paths[k], fps=20) for k in envs } for env_name, env in envs.items(): env_video_writer = None @@ -370,14 +345,9 @@ def rollout_with_stats( print("video writes to " + video_paths[env_name]) env_video_writer = video_writers[env_name] - print( - "rollout: env={}, horizon={}, use_goals={}, num_episodes={}".format( - env.name, - horizon, - use_goals, - num_episodes, - ) - ) + print("rollout: env={}, horizon={}, use_goals={}, num_episodes={}".format( + env.name, horizon, use_goals, num_episodes, + )) rollout_logs = [] iterator = range(num_episodes) if not verbose: @@ -400,11 +370,7 @@ def rollout_with_stats( rollout_logs.append(rollout_info) num_success += rollout_info["Success_Rate"] if verbose: - print( - "Episode {}, horizon={}, num_success={}".format( - ep_i + 1, horizon, num_success - ) - ) + print("Episode {}, horizon={}, num_success={}".format(ep_i + 1, horizon, num_success)) print(json.dumps(rollout_info, sort_keys=True, indent=4)) if video_dir is not None: @@ -412,14 +378,9 @@ def rollout_with_stats( env_video_writer.close() # average metric across all episodes - rollout_logs = dict( - (k, [rollout_logs[i][k] for i in range(len(rollout_logs))]) - for k in rollout_logs[0] - ) + rollout_logs = dict((k, [rollout_logs[i][k] for i in range(len(rollout_logs))]) for k in rollout_logs[0]) rollout_logs_mean = dict((k, np.mean(v)) for k, v in rollout_logs.items()) - rollout_logs_mean["Time_Episode"] = ( - np.sum(rollout_logs["time"]) / 60.0 - ) # total time taken for rollouts in minutes + rollout_logs_mean["Time_Episode"] = np.sum(rollout_logs["time"]) / 60. # total time taken for rollouts in minutes all_rollout_logs[env_name] = rollout_logs_mean if video_path is not None: @@ -430,13 +391,13 @@ def rollout_with_stats( def should_save_from_rollout_logs( - all_rollout_logs, - best_return, - best_success_rate, - epoch_ckpt_name, - save_on_best_rollout_return, - save_on_best_rollout_success_rate, -): + all_rollout_logs, + best_return, + best_success_rate, + epoch_ckpt_name, + save_on_best_rollout_return, + save_on_best_rollout_success_rate, + ): """ Helper function used during training to determine whether checkpoints and videos should be saved. It will modify input attributes appropriately (such as updating @@ -456,10 +417,10 @@ def should_save_from_rollout_logs( epoch_ckpt_name (str): what to name the checkpoint file - this name might be modified by this function - save_on_best_rollout_return (bool): if True, should save checkpoints that achieve a + save_on_best_rollout_return (bool): if True, should save checkpoints that achieve a new best rollout return - save_on_best_rollout_success_rate (bool): if True, should save checkpoints that achieve a + save_on_best_rollout_success_rate (bool): if True, should save checkpoints that achieve a new best rollout success rate Returns: @@ -477,9 +438,7 @@ def should_save_from_rollout_logs( best_return[env_name] = rollout_logs["Return"] if save_on_best_rollout_return: # save checkpoint if achieve new best return - epoch_ckpt_name += "_{}_return_{}".format( - env_name, best_return[env_name] - ) + epoch_ckpt_name += "_{}_return_{}".format(env_name, best_return[env_name]) should_save_ckpt = True ckpt_reason = "return" @@ -487,9 +446,7 @@ def should_save_from_rollout_logs( best_success_rate[env_name] = rollout_logs["Success_Rate"] if save_on_best_rollout_success_rate: # save checkpoint if achieve new best success rate - epoch_ckpt_name += "_{}_success_{}".format( - env_name, best_success_rate[env_name] - ) + epoch_ckpt_name += "_{}_success_{}".format(env_name, best_success_rate[env_name]) should_save_ckpt = True ckpt_reason = "success" @@ -503,9 +460,7 @@ def should_save_from_rollout_logs( ) -def save_model( - model, config, env_meta, shape_meta, ckpt_path, obs_normalization_stats=None -): +def save_model(model, config, env_meta, shape_meta, ckpt_path, obs_normalization_stats=None): """ Save model to a torch pth file. @@ -541,7 +496,6 @@ def save_model( torch.save(params, ckpt_path) print("save checkpoint to {}".format(ckpt_path)) - def delete_checkpoints(ckpt_dir, top_n=3, smallest=True): """ Delete checkpoints in a directory, keeping top @top_n checkpoints based on lowest validation loss. Where checkpoints are saved in the form "model_epoch_{n}_best_validation_{validation loss}.pth @@ -570,27 +524,17 @@ def delete_checkpoints(ckpt_dir, top_n=3, smallest=True): for ckpt in all_checkpoints[:-top_n]: os.remove(os.path.join(ckpt_dir, ckpt)) - def get_gpu_usage_mb(index): """Returns the GPU usage in B.""" h = nvmlDeviceGetHandleByIndex(index) info = nvmlDeviceGetMemoryInfo(h) - print(f"total : {info.total}") - print(f"free : {info.free}") - print(f"used : {info.used}") + print(f'total : {info.total}') + print(f'free : {info.free}') + print(f'used : {info.used}') return info.used / 1024 / 1024 - -def run_epoch( - model, - data_loader, - epoch, - validate=False, - num_steps=None, - obs_normalization_stats=None, - ac_key=None, -): +def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): """ Run an epoch of training or validation. @@ -616,8 +560,8 @@ def run_epoch( step_log_all (dict): dictionary of logged training metrics averaged across all batches """ - # print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) - + #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) + epoch_timestamp = time.time() if validate: model.set_eval() @@ -647,9 +591,7 @@ def run_epoch( # process batch for training t = time.time() input_batch = model.process_batch_for_training(batch, ac_key=ac_key) - input_batch = model.postprocess_batch_for_training( - input_batch, obs_normalization_stats=obs_normalization_stats - ) + input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) timing_stats["Process_Batch"].append(time.time() - t) # forward and backward pass @@ -675,22 +617,12 @@ def run_epoch( # add in timing stats for k in timing_stats: # sum across all training steps, and convert from seconds to minutes - step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60.0 - step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60.0 + step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60. + step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60. return step_log_all - -def run_epoch_2_dataloaders( - model, - data_loader, - epoch, - data_loader_2, - validate=False, - num_steps=None, - obs_normalization_stats=None, - ac_key=None, -): +def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): """ Run an epoch of training or validation. @@ -716,7 +648,7 @@ def run_epoch_2_dataloaders( step_log_all (dict): dictionary of logged training metrics averaged across all batches """ - # print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) + #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) # breakpoint() epoch_timestamp = time.time() if validate: @@ -753,23 +685,11 @@ def run_epoch_2_dataloaders( t = time.time() # breakpoint() input_batch = model.process_batch_for_training(batch, ac_key=ac_key) - input_batch_2 = ( - None - if batch_2 is None - else model.process_batch_for_training(batch_2, ac_key=ac_key) - ) + input_batch_2 = None if batch_2 is None else model.process_batch_for_training(batch_2, ac_key=ac_key) # breakpoint() - input_batch = model.postprocess_batch_for_training( - input_batch, obs_normalization_stats=obs_normalization_stats - ) - input_batch_2 = ( - None - if input_batch_2 is None - else model.postprocess_batch_for_training( - input_batch_2, obs_normalization_stats=obs_normalization_stats - ) - ) + input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) + input_batch_2 = None if input_batch_2 is None else model.postprocess_batch_for_training(input_batch_2, obs_normalization_stats=obs_normalization_stats) timing_stats["Process_Batch"].append(time.time() - t) @@ -777,9 +697,7 @@ def run_epoch_2_dataloaders( t = time.time() # breakpoint() if input_batch_2 is not None: - info = model.train_on_batch( - [input_batch, input_batch_2], epoch, validate=validate - ) + info = model.train_on_batch([input_batch, input_batch_2], epoch, validate=validate) else: info = model.train_on_batch(input_batch, epoch, validate=validate) timing_stats["Train_Batch"].append(time.time() - t) @@ -802,17 +720,16 @@ def run_epoch_2_dataloaders( # add in timing stats for k in timing_stats: # sum across all training steps, and convert from seconds to minutes - step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60.0 - step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60.0 + step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60. + step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60. return step_log_all - def is_every_n_steps(interval, current_step, skip_zero=False): """ - Convenient function to check whether current_step is at the interval. + Convenient function to check whether current_step is at the interval. Returns True if current_step % interval == 0 and asserts a few corner cases (e.g., interval <= 0) - + Args: interval (int): target interval current_step (int): current step diff --git a/robomimic/utils/vis_utils.py b/robomimic/utils/vis_utils.py index df6b3956..19c73d7a 100644 --- a/robomimic/utils/vis_utils.py +++ b/robomimic/utils/vis_utils.py @@ -2,7 +2,6 @@ This file contains utility functions for visualizing image observations in the training pipeline. These functions can be a useful debugging tool. """ - import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm @@ -22,7 +21,9 @@ def image_tensor_to_numpy(image): Returns: image (np.array): converted images of shape [..., H, W, C] and type uint8 """ - return TensorUtils.to_numpy(ObsUtils.unprocess_image(image)).astype(np.uint8) + return TensorUtils.to_numpy( + ObsUtils.unprocess_image(image) + ).astype(np.uint8) def image_to_disk(image, fname): @@ -106,5 +107,5 @@ def depth_to_rgb(depth_map, depth_min=None, depth_max=None): if len(depth_map.shape) == 3: assert depth_map.shape[-1] == 1 depth_map = depth_map[..., 0] - assert len(depth_map.shape) == 2 # [H, W] - return (255.0 * cm.hot(depth_map, 3)).astype(np.uint8)[..., :3] + assert len(depth_map.shape) == 2 # [H, W] + return (255. * cm.hot(depth_map, 3)).astype(np.uint8)[..., :3] diff --git a/setup.py b/setup.py index 9eb82082..0e1c510b 100644 --- a/setup.py +++ b/setup.py @@ -2,14 +2,13 @@ # read the contents of your README file from os import path - this_directory = path.abspath(path.dirname(__file__)) -with open(path.join(this_directory, "README.md"), encoding="utf-8") as f: +with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: lines = f.readlines() # remove images from README -lines = [x for x in lines if ((".png" not in x) and (".gif" not in x))] -long_description = "".join(lines) +lines = [x for x in lines if (('.png' not in x) and ('.gif' not in x))] +long_description = ''.join(lines) setup( name="robomimic", @@ -31,14 +30,14 @@ "torch", "torchvision", ], - eager_resources=["*"], + eager_resources=['*'], include_package_data=True, - python_requires=">=3", + python_requires='>=3', description="robomimic: A Modular Framework for Robot Learning from Demonstration", author="Ajay Mandlekar, Danfei Xu, Josiah Wong, Soroush Nasiriany, Chen Wang, Matthew Bronars", url="https://github.com/ARISE-Initiative/robomimic", author_email="amandlek@cs.stanford.edu", version="0.3.0", long_description=long_description, - long_description_content_type="text/markdown", + long_description_content_type='text/markdown' ) diff --git a/tests/test_bc.py b/tests/test_bc.py index 7f823e66..adc12501 100644 --- a/tests/test_bc.py +++ b/tests/test_bc.py @@ -4,7 +4,6 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse from collections import OrderedDict @@ -23,14 +22,9 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="bc") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.modalities.obs.rgb = [] # by default, vanilla BC @@ -53,33 +47,19 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( - False # kwargs for visual core - ) + config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = ( - "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( - 32 # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( - False # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( - 1.0 # Default arguments for "SpatialSoftmax" - ) + config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -99,12 +79,9 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() - - def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier - return decorator @@ -302,9 +279,7 @@ def test_bc(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run( - base_config=base_config, config_modifier=MODIFIERS[test_name] - ) + res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) print("{}: {}".format(test_name, res_str)) @@ -312,7 +287,7 @@ def test_bc(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_bcq.py b/tests/test_bcq.py index b246a2cb..b8bd0835 100644 --- a/tests/test_bcq.py +++ b/tests/test_bcq.py @@ -4,7 +4,6 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse from collections import OrderedDict @@ -23,20 +22,15 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="bcq") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.modalities.obs.rgb = [] # by default, vanilla BCQ - config.algo.actor.enabled = True # perturbation actor - config.algo.critic.distributional.enabled = False # vanilla critic training - config.algo.action_sampler.vae.enabled = True # action sampler is VAE + config.algo.actor.enabled = True # perturbation actor + config.algo.critic.distributional.enabled = False # vanilla critic training + config.algo.action_sampler.vae.enabled = True # action sampler is VAE config.algo.action_sampler.gmm.enabled = False return config @@ -53,33 +47,19 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( - False # kwargs for visual core - ) + config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = ( - "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( - 32 # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( - False # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( - 1.0 # Default arguments for "SpatialSoftmax" - ) + config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -99,12 +79,9 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() - - def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier - return decorator @@ -117,7 +94,7 @@ def bcq_no_actor_modifier(config): @register_mod("bcq-distributional") def bcq_distributional_modifier(config): config.algo.critic.distributional.enabled = True - config.algo.critic.value_bounds = [-100.0, 100.0] + config.algo.critic.value_bounds = [-100., 100.] return config @@ -270,9 +247,7 @@ def test_bcq(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run( - base_config=base_config, config_modifier=MODIFIERS[test_name] - ) + res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) print("{}: {}".format(test_name, res_str)) @@ -280,7 +255,7 @@ def test_bcq(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_cql.py b/tests/test_cql.py index 84811ee5..a78c4bf2 100644 --- a/tests/test_cql.py +++ b/tests/test_cql.py @@ -4,7 +4,6 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse from collections import OrderedDict @@ -23,20 +22,15 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="cql") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.modalities.obs.rgb = [] # by default, vanilla CQL - config.algo.actor.bc_start_steps = 40 # BC training initially - config.algo.critic.target_q_gap = 5.0 # use automatic cql tuning - config.algo.actor.target_entropy = "default" # use automatic entropy tuning + config.algo.actor.bc_start_steps = 40 # BC training initially + config.algo.critic.target_q_gap = 5.0 # use automatic cql tuning + config.algo.actor.target_entropy = "default" # use automatic entropy tuning # lower batch size to 100 to accomodate small test dataset config.train.batch_size = 100 @@ -55,33 +49,19 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( - False # kwargs for visual core - ) + config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = ( - "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( - 32 # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( - False # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( - 1.0 # Default arguments for "SpatialSoftmax" - ) + config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -101,12 +81,9 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() - - def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier - return decorator @@ -159,9 +136,7 @@ def test_cql(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run( - base_config=base_config, config_modifier=MODIFIERS[test_name] - ) + res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) print("{}: {}".format(test_name, res_str)) @@ -169,7 +144,7 @@ def test_cql(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_examples.py b/tests/test_examples.py index 21a3ff33..6696015f 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -2,7 +2,6 @@ Tests for the provided examples in the repository. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse import traceback import os @@ -30,24 +29,16 @@ def test_example_script(script_name, args_string, test_name, silence=True): # run example script stdout = subprocess.DEVNULL if silence else None - path_to_script = os.path.join( - robomimic.__path__[0], "../examples/{}".format(script_name) - ) - example_job = subprocess.Popen( - "python {} {}".format(path_to_script, args_string), - shell=True, - stdout=stdout, - stderr=subprocess.PIPE, - ) + path_to_script = os.path.join(robomimic.__path__[0], "../examples/{}".format(script_name)) + example_job = subprocess.Popen("python {} {}".format(path_to_script, args_string), + shell=True, stdout=stdout, stderr=subprocess.PIPE) example_job.wait() # get stderr output out, err = example_job.communicate() err = err.decode("utf-8") if len(err) > 0: - ret = "maybe failed - stderr output below (if it's only from tqdm, the test passed)\n{}".format( - err - ) + ret = "maybe failed - stderr output below (if it's only from tqdm, the test passed)\n{}".format(err) ret = colored(ret, "red") else: ret = colored("passed", "green") @@ -58,35 +49,35 @@ def test_example_script(script_name, args_string, test_name, silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() test_example_script( - script_name="simple_config.py", + script_name="simple_config.py", args_string="", - test_name="simple-config-example", + test_name="simple-config-example", silence=(not args.verbose), ) test_example_script( - script_name="simple_obs_nets.py", + script_name="simple_obs_nets.py", args_string="", - test_name="simple-obs-nets-example", + test_name="simple-obs-nets-example", silence=(not args.verbose), ) test_example_script( - script_name="simple_train_loop.py", + script_name="simple_train_loop.py", args_string="", - test_name="simple-train-loop-example", + test_name="simple-train-loop-example", silence=(not args.verbose), ) # clear tmp model dir before running script TestUtils.maybe_remove_dir(TestUtils.temp_model_dir_path()) test_example_script( - script_name="train_bc_rnn.py", + script_name="train_bc_rnn.py", args_string="--debug", - test_name="train-bc-rnn-example", + test_name="train-bc-rnn-example", silence=(not args.verbose), ) # cleanup diff --git a/tests/test_hbc.py b/tests/test_hbc.py index 1a1c2946..e5560696 100644 --- a/tests/test_hbc.py +++ b/tests/test_hbc.py @@ -4,7 +4,6 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse from collections import OrderedDict @@ -22,30 +21,15 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="hbc") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.planner.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.planner.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.planner.modalities.obs.rgb = [] - config.observation.planner.modalities.subgoal.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.planner.modalities.subgoal.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.planner.modalities.subgoal.rgb = [] - config.observation.actor.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.actor.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.actor.modalities.obs.rgb = [] # by default, planner is deterministic prediction @@ -56,12 +40,9 @@ def get_algo_base_config(): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() - - def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier - return decorator @@ -187,9 +168,7 @@ def test_hbc(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run( - base_config=base_config, config_modifier=MODIFIERS[test_name] - ) + res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) print("{}: {}".format(test_name, res_str)) @@ -197,7 +176,7 @@ def test_hbc(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_iql.py b/tests/test_iql.py index ebd521bc..e80a8f3b 100644 --- a/tests/test_iql.py +++ b/tests/test_iql.py @@ -4,7 +4,6 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse from collections import OrderedDict @@ -25,12 +24,7 @@ def get_algo_base_config(): # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example HBC) - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.modalities.obs.rgb = [] return config @@ -47,33 +41,19 @@ def convert_config_for_images(config): config.train.batch_size = 16 # replace object with rgb modality - config.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - ] + config.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] config.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - config.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) - config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( - False # kwargs for visual core - ) + config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) + config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False - config.observation.encoder.rgb.core_kwargs.pool_class = ( - "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( - 32 # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( - False # Default arguments for "SpatialSoftmax" - ) - config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( - 1.0 # Default arguments for "SpatialSoftmax" - ) + config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" + config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -93,12 +73,9 @@ def make_image_modifier(config_modifier): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() - - def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier - return decorator @@ -150,9 +127,7 @@ def test_iql(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run( - base_config=base_config, config_modifier=MODIFIERS[test_name] - ) + res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) print("{}: {}".format(test_name, res_str)) @@ -160,7 +135,7 @@ def test_iql(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_iris.py b/tests/test_iris.py index 44f130dc..126c5c28 100644 --- a/tests/test_iris.py +++ b/tests/test_iris.py @@ -4,7 +4,6 @@ the model. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse from collections import OrderedDict @@ -22,38 +21,18 @@ def get_algo_base_config(): # config with basic settings for quick training run config = TestUtils.get_base_config(algo_name="iris") - # low-level obs (note that we define it here because @observation structure might vary per algorithm, + # low-level obs (note that we define it here because @observation structure might vary per algorithm, # for example iris) - config.observation.value_planner.planner.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.value_planner.planner.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.value_planner.planner.modalities.obs.rgb = [] - config.observation.value_planner.planner.modalities.subgoal.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.value_planner.planner.modalities.subgoal.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.value_planner.planner.modalities.subgoal.rgb = [] - config.observation.value_planner.value.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.value_planner.value.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.value_planner.value.modalities.obs.rgb = [] - config.observation.actor.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object", - ] + config.observation.actor.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", "object"] config.observation.actor.modalities.obs.rgb = [] # by default, basic N(0, 1) prior for both planner VAE and BCQ cVAE @@ -69,12 +48,9 @@ def get_algo_base_config(): # mapping from test name to config modifier functions MODIFIERS = OrderedDict() - - def register_mod(test_name): def decorator(config_modifier): MODIFIERS[test_name] = config_modifier - return decorator @@ -199,10 +175,9 @@ def iris_modifier_11(config): def iris_modifier_12(config): # bcq value function is distributional config.algo.value_planner.value.critic.distributional.enabled = True - config.algo.value_planner.value.critic.value_bounds = [-100.0, 100.0] + config.algo.value_planner.value.critic.value_bounds = [-100., 100.] return config - @register_mod("iris, bcq cVAE Gaussian prior (obs-independent)") def iris_modifier_13(config): # learn parameters of Gaussian prior (obs-independent) @@ -311,9 +286,7 @@ def test_iris(silence=True): context = silence_stdout() if silence else dummy_context_mgr() with context: base_config = get_algo_base_config() - res_str = TestUtils.test_run( - base_config=base_config, config_modifier=MODIFIERS[test_name] - ) + res_str = TestUtils.test_run(base_config=base_config, config_modifier=MODIFIERS[test_name]) print("{}: {}".format(test_name, res_str)) @@ -321,7 +294,7 @@ def test_iris(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() diff --git a/tests/test_scripts.py b/tests/test_scripts.py index 8807f3bb..30ed7f61 100644 --- a/tests/test_scripts.py +++ b/tests/test_scripts.py @@ -2,7 +2,6 @@ Tests for a handful of scripts. Excludes stdout output by default (pass --verbose to see stdout output). """ - import argparse import traceback import h5py @@ -40,33 +39,19 @@ def image_modifier(conf): conf.train.batch_size = 16 # replace object with rgb modality - conf.observation.modalities.obs.low_dim = [ - "robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - ] + conf.observation.modalities.obs.low_dim = ["robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos"] conf.observation.modalities.obs.rgb = ["agentview_image"] # set up visual encoders conf.observation.encoder.rgb.core_class = "VisualCore" conf.observation.encoder.rgb.core_kwargs.feature_dimension = 64 - conf.observation.encoder.rgb.core_kwargs.backbone_class = "ResNet18Conv" # ResNet backbone for image observations (unused if no image observations) - conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = ( - False # kwargs for visual core - ) - conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = ( - False - ) - conf.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) - conf.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = ( - 32 # Default arguments for "SpatialSoftmax" - ) - conf.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = ( - False # Default arguments for "SpatialSoftmax" - ) - conf.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = ( - 1.0 # Default arguments for "SpatialSoftmax" - ) + conf.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) + conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core + conf.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False + conf.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) + conf.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" + conf.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" + conf.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" conf.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization @@ -74,9 +59,7 @@ def image_modifier(conf): return conf - config = TestUtils.config_from_modifier( - base_config=config, config_modifier=image_modifier - ) + config = TestUtils.config_from_modifier(base_config=config, config_modifier=image_modifier) # run training device = TorchUtils.get_torch_device(try_to_use_cuda=True) @@ -96,18 +79,15 @@ def test_playback_script(silence=True, use_actions=False, use_obs=False): args = argparse.Namespace() args.dataset = TestUtils.example_dataset_path() args.filter_key = None - args.n = 3 # playback 3 demonstrations + args.n = 3 # playback 3 demonstrations args.use_actions = use_actions args.use_obs = use_obs args.render = False - args.video_path = TestUtils.temp_video_path() # dump video + args.video_path = TestUtils.temp_video_path() # dump video args.video_skip = 5 if use_obs: # camera observation names - args.render_image_names = [ - "agentview_image", - "robot0_eye_in_hand_image", - ] + args.render_image_names = ["agentview_image", "robot0_eye_in_hand_image"] else: # camera names args.render_image_names = ["agentview", "robot0_eye_in_hand"] @@ -119,9 +99,7 @@ def test_playback_script(silence=True, use_actions=False, use_obs=False): except Exception as e: # indicate failure by returning error string - ret = colored( - "failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red" - ) + ret = colored("failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red") # delete output video TestUtils.maybe_remove_file(TestUtils.temp_video_path()) @@ -143,14 +121,14 @@ def test_run_agent_script(silence=True): # setup args and run script args = argparse.Namespace() args.agent = ckpt_path - args.n_rollouts = 3 # 3 rollouts - args.horizon = 10 # short rollouts - 10 steps + args.n_rollouts = 3 # 3 rollouts + args.horizon = 10 # short rollouts - 10 steps args.env = None args.render = False - args.video_path = TestUtils.temp_video_path() # dump video + args.video_path = TestUtils.temp_video_path() # dump video args.video_skip = 5 args.camera_names = ["agentview", "robot0_eye_in_hand"] - args.dataset_path = TestUtils.temp_dataset_path() # dump dataset + args.dataset_path = TestUtils.temp_dataset_path() # dump dataset args.dataset_obs = True args.seed = 0 run_trained_agent(args) @@ -166,9 +144,7 @@ def test_run_agent_script(silence=True): except Exception as e: # indicate failure by returning error string - ret = colored( - "failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red" - ) + ret = colored("failed with error:\n{}\n\n{}".format(e, traceback.format_exc()), "red") # delete trained model directory, output video, and output dataset TestUtils.maybe_remove_dir(TestUtils.temp_model_dir_path()) @@ -183,7 +159,7 @@ def test_run_agent_script(silence=True): parser = argparse.ArgumentParser() parser.add_argument( "--verbose", - action="store_true", + action='store_true', help="don't suppress stdout during tests", ) args = parser.parse_args() From d6eed8606ee95aded934c03db8001b71ee9da85e Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Fri, 18 Oct 2024 14:24:20 -0400 Subject: [PATCH 36/44] revert readme changes --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index 063eac13..bdc3556f 100644 --- a/README.md +++ b/README.md @@ -20,11 +20,6 @@ - [05/23/2022] **v0.2.1**: Updated website and documentation to feature more tutorials :notebook_with_decorative_cover: - [12/16/2021] **v0.2.0**: Modular observation modalities and encoders :wrench:, support for [MOMART](https://sites.google.com/view/il-for-mm/home) datasets :open_file_folder: [[release notes]](https://github.com/ARISE-Initiative/robomimic/releases/tag/v0.2.0) [[documentation]](https://robomimic.github.io/docs/v0.2/introduction/overview.html) - [08/09/2021] **v0.1.0**: Initial code and paper release -## Installation -1. Clone the repo with the `--recurse-submodules` flag. -2. (if applicable) switch to `r2d2` branch -3. Run `pip install -e .` in `robomimic` -4. Run `pip install -e .` in `robomimic/act/detr` ------- From ffca93b8e1b7abdeeeb49e00f53834d9a764c35e Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Mon, 21 Oct 2024 13:57:38 -0400 Subject: [PATCH 37/44] cleanup unused changes --- robomimic/algo/__init__.py | 1 - robomimic/config/__init__.py | 3 +- robomimic/scripts/config_gen/act_gen.py | 131 ---- robomimic/scripts/config_gen/helper.py | 954 ------------------------ robomimic/utils/env_utils.py | 1 - robomimic/utils/train_utils.py | 146 +--- 6 files changed, 2 insertions(+), 1234 deletions(-) delete mode 100644 robomimic/scripts/config_gen/act_gen.py delete mode 100644 robomimic/scripts/config_gen/helper.py diff --git a/robomimic/algo/__init__.py b/robomimic/algo/__init__.py index dbe2ea4d..68d70a57 100644 --- a/robomimic/algo/__init__.py +++ b/robomimic/algo/__init__.py @@ -9,4 +9,3 @@ from robomimic.algo.hbc import HBC from robomimic.algo.iris import IRIS from robomimic.algo.td3_bc import TD3_BC -# from robomimic.algo.diffusion_policy import DiffusionPolicyUNet diff --git a/robomimic/config/__init__.py b/robomimic/config/__init__.py index b4f857f1..fa60a2f5 100644 --- a/robomimic/config/__init__.py +++ b/robomimic/config/__init__.py @@ -9,5 +9,4 @@ from robomimic.config.gl_config import GLConfig from robomimic.config.hbc_config import HBCConfig from robomimic.config.iris_config import IRISConfig -from robomimic.config.td3_bc_config import TD3_BCConfig -# from robomimic.config.diffusion_policy_config import DiffusionPolicyConfig +from robomimic.config.td3_bc_config import TD3_BCConfig \ No newline at end of file diff --git a/robomimic/scripts/config_gen/act_gen.py b/robomimic/scripts/config_gen/act_gen.py deleted file mode 100644 index 8962941d..00000000 --- a/robomimic/scripts/config_gen/act_gen.py +++ /dev/null @@ -1,131 +0,0 @@ -from robomimic.scripts.config_gen.helper import * - -def make_generator_helper(args): - algo_name_short = "act" - generator = get_generator( - algo_name="act", - config_file=os.path.join(base_path, 'robomimic/exps/templates/act.json'), - args=args, - algo_name_short=algo_name_short, - pt=True, - ) - if args.ckpt_mode is None: - args.ckpt_mode = "off" - - - generator.add_param( - key="train.num_epochs", - name="", - group=-1, - values=[1000], - ) - - generator.add_param( - key="train.batch_size", - name="", - group=-1, - values=[64], - ) - - generator.add_param( - key="train.max_grad_norm", - name="", - group=-1, - values=[100.0], - ) - - if args.env == "r2d2": - generator.add_param( - key="train.data", - name="ds", - group=2, - values=[ - [{"path": p} for p in scan_datasets("~/Downloads/example_pen_in_cup", postfix="trajectory_im128.h5")], - ], - value_names=[ - "pen-in-cup", - ], - ) - generator.add_param( - key="train.action_keys", - name="ac_keys", - group=-1, - values=[ - [ - "action/abs_pos", - "action/abs_rot_6d", - "action/gripper_position", - ], - ], - value_names=[ - "abs", - ], - ) - elif args.env == "kitchen": - raise NotImplementedError - elif args.env == "square": - generator.add_param( - key="train.data", - name="ds", - group=2, - values=[ - [ - {"path": "TODO.hdf5"}, # replace with your own path - ], - ], - value_names=[ - "square", - ], - ) - - # update env config to use absolute action control - generator.add_param( - key="experiment.env_meta_update_dict", - name="", - group=-1, - values=[ - {"env_kwargs": {"controller_configs": {"control_delta": False}}} - ], - ) - - generator.add_param( - key="train.action_keys", - name="ac_keys", - group=-1, - values=[ - [ - "action_dict/abs_pos", - "action_dict/abs_rot_6d", - "action_dict/gripper", - # "actions", - ], - ], - value_names=[ - "abs", - ], - ) - - - else: - raise ValueError - - generator.add_param( - key="train.output_dir", - name="", - group=-1, - values=[ - "~/expdata/{env}/{mod}/{algo_name_short}".format( - env=args.env, - mod=args.mod, - algo_name_short=algo_name_short, - ) - ], - ) - - return generator - -if __name__ == "__main__": - parser = get_argparser() - - args = parser.parse_args() - make_generator(args, make_generator_helper) diff --git a/robomimic/scripts/config_gen/helper.py b/robomimic/scripts/config_gen/helper.py deleted file mode 100644 index 48a3af07..00000000 --- a/robomimic/scripts/config_gen/helper.py +++ /dev/null @@ -1,954 +0,0 @@ -import argparse -import os -import time -import datetime - -import robomimic -import robomimic.utils.hyperparam_utils as HyperparamUtils - -base_path = os.path.abspath(os.path.join(os.path.dirname(robomimic.__file__), os.pardir)) - -def scan_datasets(folder, postfix=".h5"): - dataset_paths = [] - for root, dirs, files in os.walk(os.path.expanduser(folder)): - for f in files: - if f.endswith(postfix): - dataset_paths.append(os.path.join(root, f)) - return dataset_paths - - -def get_generator(algo_name, config_file, args, algo_name_short=None, pt=False): - if args.wandb_proj_name is None: - strings = [ - algo_name_short if (algo_name_short is not None) else algo_name, - args.name, - args.env, - args.mod, - ] - args.wandb_proj_name = '_'.join([str(s) for s in strings if s is not None]) - - if args.script is not None: - generated_config_dir = os.path.join(os.path.dirname(args.script), "json") - else: - curr_time = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-%y-%H-%M-%S') - generated_config_dir=os.path.join( - '~/', 'tmp/autogen_configs/ril', algo_name, args.env, args.mod, args.name, curr_time, "json", - ) - - generator = HyperparamUtils.ConfigGenerator( - base_config_file=config_file, - generated_config_dir=generated_config_dir, - wandb_proj_name=args.wandb_proj_name, - script_file=args.script, - ) - - args.algo_name = algo_name - args.pt = pt - - return generator - - -def set_env_settings(generator, args): - if args.env in ["r2d2"]: - assert args.mod == "im" - generator.add_param( - key="experiment.rollout.enabled", - name="", - group=-1, - values=[ - False - ], - ) - generator.add_param( - key="experiment.save.every_n_epochs", - name="", - group=-1, - values=[50], - ) - generator.add_param( - key="experiment.mse.enabled", - name="", - group=-1, - values=[True], - ), - generator.add_param( - key="experiment.mse.every_n_epochs", - name="", - group=-1, - values=[50], - ), - generator.add_param( - key="experiment.mse.on_save_ckpt", - name="", - group=-1, - values=[True], - ), - generator.add_param( - key="experiment.mse.num_samples", - name="", - group=-1, - values=[20], - ), - generator.add_param( - key="experiment.mse.visualize", - name="", - group=-1, - values=[True], - ), - if "observation.modalities.obs.low_dim" not in generator.parameters: - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot_state/cartesian_position", "robot_state/gripper_position"] - ], - ) - if "observation.modalities.obs.rgb" not in generator.parameters: - generator.add_param( - key="observation.modalities.obs.rgb", - name="", - group=-1, - values=[ - [ - "camera/image/hand_camera_left_image", - "camera/image/varied_camera_1_left_image", "camera/image/varied_camera_2_left_image" # uncomment to use all 3 cameras - ] - ], - ) - generator.add_param( - key="observation.encoder.rgb.obs_randomizer_class", - name="obsrand", - group=-1, - values=[ - # "CropRandomizer", # crop only - # "ColorRandomizer", # jitter only - ["ColorRandomizer", "CropRandomizer"], # jitter, followed by crop - ], - hidename=True, - ) - generator.add_param( - key="observation.encoder.rgb.obs_randomizer_kwargs", - name="obsrandargs", - group=-1, - values=[ - # {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}, # crop only - # {}, # jitter only - [{}, {"crop_height": 116, "crop_width": 116, "num_crops": 1, "pos_enc": False}], # jitter, followed by crop - ], - hidename=True, - ) - if ("observation.encoder.rgb.obs_randomizer_kwargs" not in generator.parameters) and \ - ("observation.encoder.rgb.obs_randomizer_kwargs.crop_height" not in generator.parameters): - generator.add_param( - key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", - name="", - group=-1, - values=[ - 116 - ], - ) - generator.add_param( - key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", - name="", - group=-1, - values=[ - 116 - ], - ) - # remove spatial softmax by default for r2d2 dataset - generator.add_param( - key="observation.encoder.rgb.core_kwargs.pool_class", - name="", - group=-1, - values=[ - None - ], - ) - generator.add_param( - key="observation.encoder.rgb.core_kwargs.pool_kwargs", - name="", - group=-1, - values=[ - None - ], - ) - - # specify dataset type is r2d2 rather than default robomimic - generator.add_param( - key="train.data_format", - name="", - group=-1, - values=[ - "r2d2" - ], - ) - - # here, we list how each action key should be treated (normalized etc) - generator.add_param( - key="train.action_config", - name="", - group=-1, - values=[ - { - "action/cartesian_position":{ - "normalization": "min_max", - }, - "action/abs_pos":{ - "normalization": "min_max", - }, - "action/abs_rot_6d":{ - "normalization": "min_max", - "format": "rot_6d", - "convert_at_runtime": "rot_euler", - }, - "action/abs_rot_euler":{ - "normalization": "min_max", - "format": "rot_euler", - }, - "action/gripper_position":{ - "normalization": "min_max", - }, - "action/cartesian_velocity":{ - "normalization": None, - }, - "action/rel_pos":{ - "normalization": None, - }, - "action/rel_rot_6d":{ - "format": "rot_6d", - "normalization": None, - "convert_at_runtime": "rot_euler", - }, - "action/rel_rot_euler":{ - "format": "rot_euler", - "normalization": None, - }, - "action/gripper_velocity":{ - "normalization": None, - }, - } - ], - ) - generator.add_param( - key="train.dataset_keys", - name="", - group=-1, - values=[[]], - ) - if "train.action_keys" not in generator.parameters: - generator.add_param( - key="train.action_keys", - name="ac_keys", - group=-1, - values=[ - [ - "action/rel_pos", - "action/rel_rot_euler", - "action/gripper_velocity", - ], - ], - value_names=[ - "rel", - ], - ) - # observation key groups to swap - generator.add_param( - key="train.shuffled_obs_key_groups", - name="", - group=-1, - values=[[[ - ( - "camera/image/varied_camera_1_left_image", - "camera/image/varied_camera_1_right_image", - "camera/extrinsics/varied_camera_1_left", - "camera/extrinsics/varied_camera_1_right", - ), - ( - "camera/image/varied_camera_2_left_image", - "camera/image/varied_camera_2_right_image", - "camera/extrinsics/varied_camera_2_left", - "camera/extrinsics/varied_camera_2_right", - ), - ]]], - ) - elif args.env == "kitchen": - generator.add_param( - key="train.action_config", - name="", - group=-1, - values=[ - { - "actions":{ - "normalization": None, - }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, - "action_dict/abs_rot_axis_angle": { - "normalization": "min_max", - "format": "rot_axis_angle" - }, - "action_dict/abs_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/rel_pos": { - "normalization": None, - }, - "action_dict/rel_rot_axis_angle": { - "normalization": None, - "format": "rot_axis_angle" - }, - "action_dict/rel_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/gripper": { - "normalization": None, - }, - "action_dict/base_mode": { - "normalization": None, - } - } - ], - ) - - if args.mod == 'im': - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_base_pos", - "robot0_gripper_qpos"] - ], - ) - generator.add_param( - key="observation.modalities.obs.rgb", - name="", - group=-1, - values=[ - ["robot0_agentview_left_image", - "robot0_agentview_right_image", - "robot0_eye_in_hand_image"] - ], - ) - else: - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot0_base_pos", - "object", - ] - ], - ) - elif args.env in ['square', 'lift', 'place_close']: - # # set videos off - # args.no_video = True - - generator.add_param( - key="train.action_config", - name="", - group=-1, - values=[ - { - "actions":{ - "normalization": None, - }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, - "action_dict/abs_rot_axis_angle": { - "normalization": "min_max", - "format": "rot_axis_angle" - }, - "action_dict/abs_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/rel_pos": { - "normalization": None, - }, - "action_dict/rel_rot_axis_angle": { - "normalization": None, - "format": "rot_axis_angle" - }, - "action_dict/rel_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/gripper": { - "normalization": None, - } - } - ], - ) - - if args.mod == 'im': - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos"] - ], - ) - generator.add_param( - key="observation.modalities.obs.rgb", - name="", - group=-1, - values=[ - ["agentview_image", - "robot0_eye_in_hand_image"] - ], - ) - else: - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object"] - ], - ) - elif args.env == 'transport': - # set videos off - args.no_video = True - - # TODO: fix 2 robot case - generator.add_param( - key="train.action_config", - name="", - group=-1, - values=[ - { - "actions":{ - "normalization": None, - }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, - "action_dict/abs_rot_axis_angle": { - "normalization": "min_max", - "format": "rot_axis_angle" - }, - "action_dict/abs_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/rel_pos": { - "normalization": None, - }, - "action_dict/rel_rot_axis_angle": { - "normalization": None, - "format": "rot_axis_angle" - }, - "action_dict/rel_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/gripper": { - "normalization": None, - } - } - ], - ) - - if args.mod == 'im': - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos"] - ], - ) - generator.add_param( - key="observation.modalities.obs.rgb", - name="", - group=-1, - values=[ - ["shouldercamera0_image", - "robot0_eye_in_hand_image", - "shouldercamera1_image", - "robot1_eye_in_hand_image"] - ], - ) - else: - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "robot1_eef_pos", - "robot1_eef_quat", - "robot1_gripper_qpos", - "object"] - ], - ) - - generator.add_param( - key="experiment.rollout.horizon", - name="", - group=-1, - values=[700], - ) - elif args.env == 'tool_hang': - # set videos off - args.no_video = True - - generator.add_param( - key="train.action_config", - name="", - group=-1, - values=[ - { - "actions":{ - "normalization": None, - }, - "action_dict/abs_pos": { - "normalization": "min_max" - }, - "action_dict/abs_rot_axis_angle": { - "normalization": "min_max", - "format": "rot_axis_angle" - }, - "action_dict/abs_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/rel_pos": { - "normalization": None, - }, - "action_dict/rel_rot_axis_angle": { - "normalization": None, - "format": "rot_axis_angle" - }, - "action_dict/rel_rot_6d": { - "normalization": None, - "format": "rot_6d" - }, - "action_dict/gripper": { - "normalization": None, - } - } - ], - ) - - if args.mod == 'im': - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos"] - ], - ) - generator.add_param( - key="observation.modalities.obs.rgb", - name="", - group=-1, - values=[ - ["sideview_image", - "robot0_eye_in_hand_image"] - ], - ) - generator.add_param( - key="observation.encoder.rgb.obs_randomizer_kwargs.crop_height", - name="", - group=-1, - values=[ - 216 - ], - ) - generator.add_param( - key="observation.encoder.rgb.obs_randomizer_kwargs.crop_width", - name="", - group=-1, - values=[ - 216 - ], - ) - generator.add_param( - key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_height", - name="", - group=-1, - values=[ - 216 - ], - ) - generator.add_param( - key="observation.encoder.rgb2.obs_randomizer_kwargs.crop_width", - name="", - group=-1, - values=[ - 216 - ], - ) - else: - generator.add_param( - key="observation.modalities.obs.low_dim", - name="", - group=-1, - values=[ - ["robot0_eef_pos", - "robot0_eef_quat", - "robot0_gripper_qpos", - "object"] - ], - ) - - generator.add_param( - key="experiment.rollout.horizon", - name="", - group=-1, - values=[700], - ) - else: - raise ValueError - - -def set_mod_settings(generator, args): - if args.mod == 'ld': - if "experiment.save.epochs" not in generator.parameters: - generator.add_param( - key="experiment.save.epochs", - name="", - group=-1, - values=[ - [2000] - ], - ) - elif args.mod == 'im': - if "experiment.save.every_n_epochs" not in generator.parameters: - generator.add_param( - key="experiment.save.every_n_epochs", - name="", - group=-1, - values=[40], - ) - - generator.add_param( - key="experiment.epoch_every_n_steps", - name="", - group=-1, - values=[500], - ) - if "train.num_data_workers" not in generator.parameters: - generator.add_param( - key="train.num_data_workers", - name="", - group=-1, - values=[4], - ) - generator.add_param( - key="train.hdf5_cache_mode", - name="", - group=-1, - values=["low_dim"], - ) - if "train.batch_size" not in generator.parameters: - generator.add_param( - key="train.batch_size", - name="", - group=-1, - values=[16], - ) - if "train.num_epochs" not in generator.parameters: - generator.add_param( - key="train.num_epochs", - name="", - group=-1, - values=[600], - ) - if "experiment.rollout.rate" not in generator.parameters: - generator.add_param( - key="experiment.rollout.rate", - name="", - group=-1, - values=[40], - ) - - -def set_debug_mode(generator, args): - if not args.debug: - return - - generator.add_param( - key="experiment.mse.every_n_epochs", - name="", - group=-1, - values=[2], - value_names=[""], - ) - generator.add_param( - key="experiment.mse.visualize", - name="", - group=-1, - values=[True], - value_names=[""], - ) - generator.add_param( - key="experiment.rollout.n", - name="", - group=-1, - values=[2], - value_names=[""], - ) - generator.add_param( - key="experiment.rollout.horizon", - name="", - group=-1, - values=[30], - value_names=[""], - ) - generator.add_param( - key="experiment.rollout.rate", - name="", - group=-1, - values=[2], - value_names=[""], - ) - generator.add_param( - key="experiment.epoch_every_n_steps", - name="", - group=-1, - values=[2], - value_names=[""], - ) - generator.add_param( - key="experiment.save.every_n_epochs", - name="", - group=-1, - values=[2], - value_names=[""], - ) - generator.add_param( - key="experiment.validation_epoch_every_n_steps", - name="", - group=-1, - values=[2], - value_names=[""], - ) - generator.add_param( - key="train.num_epochs", - name="", - group=-1, - values=[2], - value_names=[""], - ) - if args.name is None: - generator.add_param( - key="experiment.name", - name="", - group=-1, - values=["debug"], - value_names=[""], - ) - generator.add_param( - key="experiment.save.enabled", - name="", - group=-1, - values=[False], - value_names=[""], - ) - generator.add_param( - key="train.hdf5_cache_mode", - name="", - group=-1, - values=["low_dim"], - value_names=[""], - ) - generator.add_param( - key="train.num_data_workers", - name="", - group=-1, - values=[3], - ) - - -def set_output_dir(generator, args): - assert args.name is not None - - vals = generator.parameters["train.output_dir"].values - - for i in range(len(vals)): - vals[i] = os.path.join(vals[i], args.name) - - -def set_wandb_mode(generator, args): - generator.add_param( - key="experiment.logging.log_wandb", - name="", - group=-1, - values=[not args.no_wandb], - ) - - -def set_num_seeds(generator, args): - if args.n_seeds is not None and "train.seed" not in generator.parameters: - generator.add_param( - key="train.seed", - name="seed", - group=-10, - values=[i + 1 for i in range(args.n_seeds)], - prepend=True, - ) - - -def get_argparser(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "--name", - type=str, - ) - - parser.add_argument( - "--env", - type=str, - default='r2d2', - ) - - parser.add_argument( - '--mod', - type=str, - choices=['ld', 'im'], - default='im', - ) - - parser.add_argument( - "--ckpt_mode", - type=str, - choices=["off", "all", "best_only"], - default=None, - ) - - parser.add_argument( - "--script", - type=str, - default=None - ) - - parser.add_argument( - "--wandb_proj_name", - type=str, - default=None - ) - - parser.add_argument( - "--debug", - action="store_true", - ) - - parser.add_argument( - '--no_video', - action='store_true' - ) - - parser.add_argument( - "--tmplog", - action="store_true", - ) - - parser.add_argument( - "--nr", - type=int, - default=-1 - ) - - parser.add_argument( - "--no_wandb", - action="store_true", - ) - - parser.add_argument( - "--n_seeds", - type=int, - default=None - ) - - parser.add_argument( - "--num_cmd_groups", - type=int, - default=None - ) - - return parser - - -def make_generator(args, make_generator_helper): - if args.tmplog or args.debug and args.name is None: - args.name = "debug" - else: - time_str = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-') - args.name = time_str + str(args.name) - - if args.debug or args.tmplog: - args.no_wandb = True - - if args.wandb_proj_name is not None: - # prepend data to wandb name - # time_str = datetime.datetime.fromtimestamp(time.time()).strftime('%m-%d-') - # args.wandb_proj_name = time_str + args.wandb_proj_name - pass - - if (args.debug or args.tmplog) and (args.wandb_proj_name is None): - args.wandb_proj_name = 'debug' - - if not args.debug: - assert args.name is not None - - # make config generator - generator = make_generator_helper(args) - - if args.ckpt_mode is None: - if args.pt: - args.ckpt_mode = "all" - else: - args.ckpt_mode = "best_only" - - set_env_settings(generator, args) - set_mod_settings(generator, args) - set_output_dir(generator, args) - set_num_seeds(generator, args) - set_wandb_mode(generator, args) - - # set the debug settings last, to override previous setting changes - set_debug_mode(generator, args) - - """ misc settings """ - generator.add_param( - key="experiment.validate", - name="", - group=-1, - values=[ - False, - ], - ) - - # generate jsons and script - generator.generate(override_base_name=True) diff --git a/robomimic/utils/env_utils.py b/robomimic/utils/env_utils.py index b656ea64..465b5091 100644 --- a/robomimic/utils/env_utils.py +++ b/robomimic/utils/env_utils.py @@ -134,7 +134,6 @@ def is_robosuite_env(env_meta=None, env_type=None, env=None): Determines whether the environment is a robosuite environment. Accepts either env_meta, env_type, or env. """ - return False return check_env_type(type_to_check=EB.EnvType.ROBOSUITE_TYPE, env_meta=env_meta, env_type=env_type, env=env) diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index 58de759d..5aa00e89 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -156,7 +156,7 @@ def dataset_factory(config, obs_keys, filter_by_attribute=None, dataset_path=Non seq_length=config.train.seq_length, pad_frame_stack=config.train.pad_frame_stack, pad_seq_length=config.train.pad_seq_length, - get_pad_mask=True, + get_pad_mask=False, goal_mode=config.train.goal_mode, hdf5_cache_mode=config.train.hdf5_cache_mode, hdf5_use_swmr=config.train.hdf5_use_swmr, @@ -496,44 +496,6 @@ def save_model(model, config, env_meta, shape_meta, ckpt_path, obs_normalization torch.save(params, ckpt_path) print("save checkpoint to {}".format(ckpt_path)) -def delete_checkpoints(ckpt_dir, top_n=3, smallest=True): - """ - Delete checkpoints in a directory, keeping top @top_n checkpoints based on lowest validation loss. Where checkpoints are saved in the form "model_epoch_{n}_best_validation_{validation loss}.pth - """ - # get all checkpoints - all_checkpoints = [] - for filename in os.listdir(ckpt_dir): - if filename.endswith(".pth"): - all_checkpoints.append(filename) - all_checkpoints = sorted(all_checkpoints) - - # get validation losses - validation_losses = [] - for ckpt in all_checkpoints: - val_loss = float(ckpt.split("best_validation_")[1].split(".pth")[0]) - - validation_losses.append((val_loss, ckpt)) - # validation_losses = np.array(validation_losses) - validation_losses = sorted(validation_losses, key=lambda x: x[0]) - - # delete checkpoints - if smallest: - for ckpt in all_checkpoints[top_n:]: - os.remove(os.path.join(ckpt_dir, ckpt)) - else: - for ckpt in all_checkpoints[:-top_n]: - os.remove(os.path.join(ckpt_dir, ckpt)) - -def get_gpu_usage_mb(index): - """Returns the GPU usage in B.""" - h = nvmlDeviceGetHandleByIndex(index) - info = nvmlDeviceGetMemoryInfo(h) - print(f'total : {info.total}') - print(f'free : {info.free}') - print(f'used : {info.used}') - - return info.used / 1024 / 1024 - def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): """ Run an epoch of training or validation. @@ -559,9 +521,6 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor Returns: step_log_all (dict): dictionary of logged training metrics averaged across all batches """ - - #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) - epoch_timestamp = time.time() if validate: model.set_eval() @@ -622,109 +581,6 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor return step_log_all -def run_epoch_2_dataloaders(model, data_loader, epoch, data_loader_2, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): - """ - Run an epoch of training or validation. - - Args: - model (Algo instance): model to train - - data_loader (DataLoader instance): data loader that will be used to serve batches of data - to the model - - epoch (int): epoch number - - validate (bool): whether this is a training epoch or validation epoch. This tells the model - whether to do gradient steps or purely do forward passes. - - num_steps (int): if provided, this epoch lasts for a fixed number of batches (gradient steps), - otherwise the epoch is a complete pass through the training dataset - - obs_normalization_stats (dict or None): if provided, this should map observation keys to dicts - with a "mean" and "std" of shape (1, ...) where ... is the default - shape for the observation. - - Returns: - step_log_all (dict): dictionary of logged training metrics averaged across all batches - """ - - #print("LOCAL RANK:",int(os.environ.get("LOCAL_RANK"))," USAGE:",get_gpu_usage_mb(int(os.environ.get(" LOCAL_RANK: ",os.environ.get("SLURM_LOCAL_ID",0))))," SLURM_LOCAL_ID: ",os.environ.get("SLURM_LOCAL_ID",0)) - # breakpoint() - epoch_timestamp = time.time() - if validate: - model.set_eval() - else: - model.set_train() - if num_steps is None: - num_steps = len(data_loader) - - step_log_all = [] - timing_stats = dict(Data_Loading=[], Process_Batch=[], Train_Batch=[], Log_Info=[]) - start_time = time.time() - - data_loader_iter = iter(data_loader) - data_loader_2_iter = None if data_loader_2 is None else iter(data_loader_2) - # breakpoint() - for _ in LogUtils.custom_tqdm(range(num_steps)): - - # load next batch from data loader - try: - t = time.time() - batch = next(data_loader_iter) - batch_2 = None if data_loader_2_iter is None else next(data_loader_2_iter) - except StopIteration: - # reset for next dataset pass - data_loader_iter = iter(data_loader) - data_loader_2_iter = None if data_loader_2 is None else iter(data_loader_2) - t = time.time() - batch = next(data_loader_iter) - batch_2 = None if data_loader_2_iter is None else next(data_loader_2_iter) - timing_stats["Data_Loading"].append(time.time() - t) - - # process batch for training - t = time.time() - # breakpoint() - input_batch = model.process_batch_for_training(batch, ac_key=ac_key) - input_batch_2 = None if batch_2 is None else model.process_batch_for_training(batch_2, ac_key=ac_key) - - # breakpoint() - input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) - input_batch_2 = None if input_batch_2 is None else model.postprocess_batch_for_training(input_batch_2, obs_normalization_stats=obs_normalization_stats) - - timing_stats["Process_Batch"].append(time.time() - t) - - # forward and backward pass - t = time.time() - # breakpoint() - if input_batch_2 is not None: - info = model.train_on_batch([input_batch, input_batch_2], epoch, validate=validate) - else: - info = model.train_on_batch(input_batch, epoch, validate=validate) - timing_stats["Train_Batch"].append(time.time() - t) - - # tensorboard logging - t = time.time() - step_log = model.log_info(info) - step_log_all.append(step_log) - timing_stats["Log_Info"].append(time.time() - t) - - # flatten and take the mean of the metrics - step_log_dict = {} - for i in range(len(step_log_all)): - for k in step_log_all[i]: - if k not in step_log_dict: - step_log_dict[k] = [] - step_log_dict[k].append(step_log_all[i][k]) - step_log_all = dict((k, float(np.mean(v))) for k, v in step_log_dict.items()) - - # add in timing stats - for k in timing_stats: - # sum across all training steps, and convert from seconds to minutes - step_log_all["Time_{}".format(k)] = np.sum(timing_stats[k]) / 60. - step_log_all["Time_Epoch"] = (time.time() - epoch_timestamp) / 60. - - return step_log_all - def is_every_n_steps(interval, current_step, skip_zero=False): """ Convenient function to check whether current_step is at the interval. From 16a47f5bd1cc8e5747557766aa4ce713a0e76364 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Tue, 22 Oct 2024 16:48:31 -0400 Subject: [PATCH 38/44] removed seq_length_to_load --- requirements.txt | 8 +-- robomimic/envs/env_robosuite.py | 12 +++-- robomimic/utils/dataset.py | 86 +++------------------------------ robomimic/utils/train_utils.py | 5 +- 4 files changed, 18 insertions(+), 93 deletions(-) diff --git a/requirements.txt b/requirements.txt index 752fb152..79837098 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,10 +10,4 @@ imageio-ffmpeg matplotlib egl_probe>=1.0.1 torch -torchvision -wandb -pytorch_lightning -ipython -cv2 -scipy -pytorch-kinematics \ No newline at end of file +torchvision \ No newline at end of file diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 942cb623..7cb6f624 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -149,10 +149,14 @@ def reset_to(self, state): should_ret = False if "model" in state: self.reset() - # ----- loading LIBERO model xml ---- - model_xml = state["model"] - model_xml = postprocess_model_xml(model_xml, {}) - self.env.reset_from_xml_string(model_xml) + robosuite_version_id = int(robosuite.__version__.split(".")[1]) + if robosuite_version_id <= 3: + from robosuite.utils.mjcf_utils import postprocess_model_xml + xml = postprocess_model_xml(state["model"]) + else: + # v1.4 and above use the class-based edit_model_xml function + xml = self.env.edit_model_xml(state["model"]) + self.env.reset_from_xml_string(xml) self.env.sim.reset() if not self._is_v1: # hide teleop visualization after restoring from model diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 643fcbf7..4dc47118 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -17,76 +17,6 @@ import scipy import matplotlib.pyplot as plt -def interpolate_arr(v, seq_length): - """ - v: (B, T, D) - seq_length: int - """ - assert len(v.shape) == 3 - if v.shape[1] == seq_length: - return - - interpolated = [] - for i in range(v.shape[0]): - index = v[i] - # if i == 20: - # plt.plot(index[:, 2]) - # plt.savefig('index.png') - # plt.close() - - interp = scipy.interpolate.interp1d( - np.linspace(0, 1, index.shape[0]), index, axis=0 - ) - interpolated.append(interp(np.linspace(0, 1, seq_length))) - - # if i == 20: - # plt.plot(interpolated[-1][:, 2]) - # plt.savefig('interpolated.png') - # plt.close() - - - # L = v.shape[0] - # if L == seq_length: - # return v - - # interp = scipy.interpolate.interp1d( - # np.linspace(0, 1, L), v, axis=0 - # ) - # return interp(np.linspace(0, 1, seq_length)) - - return np.array(interpolated) - -def interpolate_keys(obs, keys, seq_length): - """ - obs: dict with values of shape (T, D) - keys: list of keys to interpolate - seq_length: int changes shape (T, D) to (seq_length, D) - """ - for k in keys: - v = obs[k] - L = v.shape[0] - if L == seq_length: - continue - - if k == "pad_mask": - # interpolate it by simply copying each index (seq_length / seq_length_to_load) times - obs[k] = np.repeat(v, (seq_length // L), axis=0) - elif k != "pad_mask": - # plot v[:, 3] - # plt.plot(v[:, 2]) - # plt.savefig('v_3.png') - # plt.close() - interp = scipy.interpolate.interp1d( - np.linspace(0, 1, L), v, axis=0 - ) - try: - obs[k] = interp(np.linspace(0, 1, seq_length)) - except: - raise ValueError(f"Interpolation failed for key: {k} with shape{k.shape}") - # plt.plot(obs[k][:, 2]) - # plt.savefig('v_3_after.png') - # plt.close() - class SequenceDataset(torch.utils.data.Dataset): def __init__( self, @@ -588,7 +518,7 @@ def get_sequence_from_demo( return seq, pad_mask - def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False, seq_length_to_load=None): + def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, prefix="obs", dont_load_fut=False): """ Extract a (sub)sequence of observation items from a demo given the @keys of the items. @@ -603,9 +533,7 @@ def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to Returns: a dictionary of extracted items. """ - if seq_length_to_load is None: - seq_length_to_load = seq_length - + seq_length_to_load = 1 if self.prestacked_actions else seq_length obs, pad_mask = self.get_sequence_from_demo( demo_id, index_in_demo=index_in_demo, @@ -622,12 +550,12 @@ def get_obs_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to # to_interp = [k for k in obs if ObsUtils.key_is_obs_modality(k, "low_dim")] to_interp = ["pad_mask"] # t = time.time() - interpolate_keys(obs, to_interp, seq_length) + obs["pad_mask"] = np.repeat(obs["pad_mask"], seq_length, axis=0) # print("Interpolation time: ", time.time() - t) return obs - def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1, seq_length_to_load=None): + def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frames_to_stack=0, seq_length=1): """ Extract a (sub)sequence of dataset items from a demo given the @keys of the items (e.g., states, actions). @@ -641,9 +569,7 @@ def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frame Returns: a dictionary of extracted items. """ - if seq_length_to_load is None: - seq_length_to_load = seq_length - + seq_length_to_load = 1 if self.prestacked_actions else seq_length data, pad_mask = self.get_sequence_from_demo( demo_id, index_in_demo=index_in_demo, @@ -665,7 +591,7 @@ def get_dataset_sequence_from_demo(self, demo_id, index_in_demo, keys, num_frame if not "actions" in k: raise ValueError("Interpolating actions, but key is not an action, key: ", k) - interpolate_keys(data, to_interp, seq_length) + data["pad_mask"] = np.repeat(data["pad_mask"], seq_length, axis=0) # print("Interpolation time: ", time.time() - t) return data diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index 5aa00e89..c9c7850d 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -496,7 +496,7 @@ def save_model(model, config, env_meta, shape_meta, ckpt_path, obs_normalization torch.save(params, ckpt_path) print("save checkpoint to {}".format(ckpt_path)) -def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None, ac_key=None): +def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None): """ Run an epoch of training or validation. @@ -549,7 +549,7 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor # process batch for training t = time.time() - input_batch = model.process_batch_for_training(batch, ac_key=ac_key) + input_batch = model.process_batch_for_training(batch) input_batch = model.postprocess_batch_for_training(input_batch, obs_normalization_stats=obs_normalization_stats) timing_stats["Process_Batch"].append(time.time() - t) @@ -581,6 +581,7 @@ def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_nor return step_log_all + def is_every_n_steps(interval, current_step, skip_zero=False): """ Convenient function to check whether current_step is at the interval. From a70b19a8a29bd4f9ee6374dbefc79b3344309c08 Mon Sep 17 00:00:00 2001 From: Simar Kareer Date: Tue, 22 Oct 2024 16:53:25 -0400 Subject: [PATCH 39/44] formatting differences --- robomimic/envs/env_robosuite.py | 1 - robomimic/utils/train_utils.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/robomimic/envs/env_robosuite.py b/robomimic/envs/env_robosuite.py index 7cb6f624..7d398ff2 100644 --- a/robomimic/envs/env_robosuite.py +++ b/robomimic/envs/env_robosuite.py @@ -17,7 +17,6 @@ import robomimic.utils.obs_utils as ObsUtils import robomimic.envs.env_base as EB -from libero.libero.utils.utils import postprocess_model_xml # protect against missing mujoco-py module, since robosuite might be using mujoco-py or DM backend try: diff --git a/robomimic/utils/train_utils.py b/robomimic/utils/train_utils.py index c9c7850d..b5fb1e48 100644 --- a/robomimic/utils/train_utils.py +++ b/robomimic/utils/train_utils.py @@ -496,6 +496,7 @@ def save_model(model, config, env_meta, shape_meta, ckpt_path, obs_normalization torch.save(params, ckpt_path) print("save checkpoint to {}".format(ckpt_path)) + def run_epoch(model, data_loader, epoch, validate=False, num_steps=None, obs_normalization_stats=None): """ Run an epoch of training or validation. From 0932f807cc1bc7154cb55526d689edfae7397cb4 Mon Sep 17 00:00:00 2001 From: ryanthecreator Date: Tue, 26 Nov 2024 13:11:11 -0500 Subject: [PATCH 40/44] changes to add imagenet normalize --- robomimic/utils/dataset.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 4dc47118..df9fe345 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -36,7 +36,8 @@ def __init__( filter_by_attribute=None, load_next_obs=True, prestacked_actions=False, - hdf5_normalize_actions=False + hdf5_normalize_actions=False, + imagenet_normalize_images=False, ): """ Dataset class for fetching sequences of experience. @@ -84,6 +85,8 @@ def __init__( demonstrations to load load_next_obs (bool): whether to load next_obs from the dataset + + imagenet_normalize_images (bool): if True, normalize images using ImageNet mean and std """ super(SequenceDataset, self).__init__() @@ -92,6 +95,7 @@ def __init__( self.hdf5_path = os.path.expanduser(hdf5_path) self.hdf5_use_swmr = hdf5_use_swmr self.hdf5_normalize_obs = hdf5_normalize_obs + self.imagenet_normalize_images = imagenet_normalize_images self.hdf5_normalize_actions = hdf5_normalize_actions self._hdf5_file = None self.ac_key = ac_key @@ -480,6 +484,8 @@ def get_sequence_from_demo( Returns: a dictionary of extracted items. """ + + breakpoint() if dont_load_fut is None: dont_load_fut = [] assert num_frames_to_stack >= 0 From 1d16a966f7121328d99791ccfb1ade45f7686e9c Mon Sep 17 00:00:00 2001 From: ryanthecreator Date: Wed, 27 Nov 2024 14:43:48 -0500 Subject: [PATCH 41/44] imagenet normalization --- robomimic/algo/algo.py | 2 +- robomimic/models/base_nets.py | 6 +++--- robomimic/utils/dataset.py | 4 ---- robomimic/utils/obs_utils.py | 26 +++++++++++++++++--------- 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/robomimic/algo/algo.py b/robomimic/algo/algo.py index 27e6c50b..330b065e 100644 --- a/robomimic/algo/algo.py +++ b/robomimic/algo/algo.py @@ -241,7 +241,7 @@ def recurse_helper(d): if k in obs_keys: # found key - stop search and process observation if d[k] is not None: - d[k] = ObsUtils.process_obs_dict(d[k]) + d[k] = ObsUtils.process_obs_dict(d[k], imagenet_normalize=self.global_config.train.imagenet_normalize_images) elif isinstance(d[k], dict): # search down into dictionary recurse_helper(d[k]) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index b9654d37..151a4fac 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -590,7 +590,7 @@ def __init__( self.preprocess = nn.Sequential( transforms.Resize((294,294)), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) try: @@ -740,7 +740,7 @@ def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return self.preprocess = nn.Sequential( transforms.Resize((294,294)), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) try: @@ -845,7 +845,7 @@ def __init__( preprocess = nn.Sequential( transforms.Resize(256), transforms.CenterCrop(224), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) self.nets = Sequential(*([preprocess] + list(net.module.convnet.children())), has_output_shape = False) if freeze: diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index df9fe345..467bc591 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -37,7 +37,6 @@ def __init__( load_next_obs=True, prestacked_actions=False, hdf5_normalize_actions=False, - imagenet_normalize_images=False, ): """ Dataset class for fetching sequences of experience. @@ -95,7 +94,6 @@ def __init__( self.hdf5_path = os.path.expanduser(hdf5_path) self.hdf5_use_swmr = hdf5_use_swmr self.hdf5_normalize_obs = hdf5_normalize_obs - self.imagenet_normalize_images = imagenet_normalize_images self.hdf5_normalize_actions = hdf5_normalize_actions self._hdf5_file = None self.ac_key = ac_key @@ -428,7 +426,6 @@ def get_item(self, index): goal_index = None if self.goal_mode == "last": goal_index = end_index_in_demo - 1 - meta["obs"] = self.get_obs_sequence_from_demo( demo_id, index_in_demo=index_in_demo, @@ -485,7 +482,6 @@ def get_sequence_from_demo( a dictionary of extracted items. """ - breakpoint() if dont_load_fut is None: dont_load_fut = [] assert num_frames_to_stack >= 0 diff --git a/robomimic/utils/obs_utils.py b/robomimic/utils/obs_utils.py index 3313b631..d05ae229 100644 --- a/robomimic/utils/obs_utils.py +++ b/robomimic/utils/obs_utils.py @@ -325,7 +325,7 @@ def batch_image_chw_to_hwc(im): return im.permute(start_dims + [s + 2, s + 3, s + 1]) -def process_obs(obs, obs_modality=None, obs_key=None): +def process_obs(obs, obs_modality=None, obs_key=None, imagenet_normalize=False): """ Process observation @obs corresponding to @obs_modality modality (or implicitly inferred from @obs_key) to prepare for network input. @@ -345,10 +345,10 @@ def process_obs(obs, obs_modality=None, obs_key=None): assert obs_modality is not None or obs_key is not None, "Either obs_modality or obs_key must be specified!" if obs_key is not None: obs_modality = OBS_KEYS_TO_MODALITIES[obs_key] - return OBS_MODALITY_CLASSES[obs_modality].process_obs(obs) + return OBS_MODALITY_CLASSES[obs_modality].process_obs(obs, imagenet_normalize=imagenet_normalize) -def process_obs_dict(obs_dict): +def process_obs_dict(obs_dict, imagenet_normalize=False): """ Process observations in observation dictionary to prepare for network input. @@ -359,10 +359,11 @@ def process_obs_dict(obs_dict): Returns: new_dict (dict): dictionary where observation keys have been processed by their corresponding processors """ - return { k : process_obs(obs=obs, obs_key=k) for k, obs in obs_dict.items() } # shallow copy + return { k : process_obs(obs=obs, obs_key=k, imagenet_normalize=imagenet_normalize) for k, obs in obs_dict.items() } # shallow copy -def process_frame(frame, channel_dim, scale): + +def process_frame(frame, channel_dim, scale, imagenet_normalize=False): """ Given frame fetched from dataset, process for network input. Converts array to float (from uint8), normalizes pixels from range [0, @scale] to [0, 1], and channel swaps @@ -382,6 +383,10 @@ def process_frame(frame, channel_dim, scale): if scale is not None: frame = frame / scale frame = frame.clip(0.0, 1.0) + if imagenet_normalize: + mean = np.array([0.485, 0.456, 0.406]) + std = np.array([0.229, 0.224, 0.225]) + frame = (frame - mean) / std frame = batch_image_hwc_to_chw(frame) return frame @@ -890,7 +895,7 @@ def _default_obs_unprocessor(cls, obs): raise NotImplementedError @classmethod - def process_obs(cls, obs): + def process_obs(cls, obs, imagenet_normalize=False): """ Prepares an observation @obs of this modality for network input. @@ -902,7 +907,10 @@ def process_obs(cls, obs): """ processor = cls._custom_obs_processor if \ cls._custom_obs_processor is not None else cls._default_obs_processor - return processor(obs) + if isinstance(cls, ImageModality): + return processor(obs, imagenet_normalize=imagenet_normalize) + else: + return processor(obs) @classmethod def unprocess_obs(cls, obs): @@ -949,7 +957,7 @@ class ImageModality(Modality): name = "rgb" @classmethod - def _default_obs_processor(cls, obs): + def _default_obs_processor(cls, obs, imagenet_normalize=False): """ Given image fetched from dataset, process for network input. Converts array to float (from uint8), normalizes pixels from range [0, 255] to [0, 1], and channel swaps @@ -961,7 +969,7 @@ def _default_obs_processor(cls, obs): Returns: processed_obs (np.array or torch.Tensor): processed image """ - return process_frame(frame=obs, channel_dim=3, scale=255.) + return process_frame(frame=obs, channel_dim=3, scale=255., imagenet_normalize=imagenet_normalize) @classmethod def _default_obs_unprocessor(cls, obs): From 647e4fa66a9bae61ddc3e2e66cc0babb040211c7 Mon Sep 17 00:00:00 2001 From: ryanthecreator Date: Sun, 1 Dec 2024 13:02:02 -0500 Subject: [PATCH 42/44] hpt fixes for robomimic --- robomimic/utils/dataset.py | 6 ++++-- robomimic/utils/file_utils.py | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/robomimic/utils/dataset.py b/robomimic/utils/dataset.py index 467bc591..065c442c 100644 --- a/robomimic/utils/dataset.py +++ b/robomimic/utils/dataset.py @@ -333,7 +333,8 @@ def _calc_helper(hdf5_key): obs_normalization_stats = {} # keys_to_norm = [f"obs/{k}" for k in self.obs_keys if ObsUtils.key_is_obs_modality(k, "low_dim")] + ["actions"] for key in self.obs_keys: - if ObsUtils.key_is_obs_modality(key, "low_dim"): + # hardcoded language key not normalized for now + if ObsUtils.key_is_obs_modality(key, "low_dim") and "lang" not in key: obs_normalization_stats[key] = _calc_helper(f"obs/{key}") for key in self.dataset_keys: @@ -364,6 +365,7 @@ def get_dataset_for_ep(self, ep, key): Helper utility to get a dataset for a specific demonstration. Takes into account whether the dataset has been loaded into memory. """ + # check if this key should be in memory key_should_be_in_memory = (self.hdf5_cache_mode in ["all", "low_dim"]) @@ -374,7 +376,7 @@ def get_dataset_for_ep(self, ep, key): assert(key1 in ['obs', 'next_obs']) if key2 not in self.obs_keys_in_memory: key_should_be_in_memory = False - + if key_should_be_in_memory: # read cache if '/' in key: diff --git a/robomimic/utils/file_utils.py b/robomimic/utils/file_utils.py index 65519c5a..a79e5efc 100644 --- a/robomimic/utils/file_utils.py +++ b/robomimic/utils/file_utils.py @@ -154,6 +154,10 @@ def get_shape_metadata_from_dataset(dataset_path, all_obs_keys=None, verbose=Fal all_obs_keys = [k for k in demo["obs"]] for k in sorted(all_obs_keys): + if k not in demo["obs"]: + if verbose: + print(f"Warning: {k} not in some demos['obs']") + continue initial_shape = demo["obs/{}".format(k)].shape[1:] if verbose: print("obs key {} with shape {}".format(k, initial_shape)) From 53f9d69f2c97ecd4337e365c788212b3ed4af650 Mon Sep 17 00:00:00 2001 From: ryanthecreator Date: Fri, 20 Dec 2024 19:28:28 -0500 Subject: [PATCH 43/44] added radio to vit class, cleaned up some things and added peft lora --- robomimic/models/base_nets.py | 60 ++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 11 deletions(-) diff --git a/robomimic/models/base_nets.py b/robomimic/models/base_nets.py index 151a4fac..611a0dd7 100644 --- a/robomimic/models/base_nets.py +++ b/robomimic/models/base_nets.py @@ -19,6 +19,8 @@ from robomimic.models.vit_rein import Reins, LoRAReins, MLPhead from robomimic.utils.log_utils import bcolors +from peft import LoraConfig, get_peft_model + CONV_ACTIVATIONS = { "relu": nn.ReLU, "None": None, @@ -708,10 +710,10 @@ def __repr__(self): class Vit(ConvBase): """ - Vision transformer + Vision transformer with optional peft lora """ - def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return_key="x_norm_patchtokens"): + def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return_key="x_norm_patchtokens", use_lora=False, **kwargs): """ Using pretrained observation encoder network proposed in Vision Transformers git clone https://github.com/facebookresearch/dinov2 @@ -720,60 +722,96 @@ def __init__(self, input_channel=3, vit_model_class="vit_b", freeze=True, return input_channel (int): number of input channels for input images to the network. If not equal to 3, modifies first conv layer to handle the number of input channels. - vit_model_class (str): select one of the vit pretrained model "vit_b", "vit_l", "vit_s" or "vit_g" + vit_model_class (str): select one of the vit pretrained model "vit_b", "vit_l", "vit_s", "vit_g" or "radio" freeze (bool): if True, use a frozen ViT pretrained model. """ super(Vit, self).__init__() assert input_channel == 3 - assert vit_model_class in ["vit_b", "vit_l" ,"vit_g", "vit_s"] # make sure the selected vit model do exist + assert vit_model_class in ["vit_b", "vit_l" ,"vit_g", "vit_s", "radio"] # make sure the selected vit model do exist # cut the last fc layer self._input_channel = input_channel self._vit_model_class = vit_model_class + + self._model_version = kwargs.get("model_version", None) + self._freeze = freeze self._input_coord_conv = False self._pretrained = False self.return_key = return_key if self.return_key not in ["x_norm_patchtokens", "x_norm_clstoken"]: raise ValueError(f"return_key {self.return_key} not supported") + + self.use_lora = use_lora self.preprocess = nn.Sequential( - transforms.Resize((294,294)), + transforms.Resize((224, 224)), # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ) + try: if self._vit_model_class == "vit_s": self.nets = dinov2_vits14 = torch.hub.load( "facebookresearch/dinov2", "dinov2_vits14" ) + self.patch_size = self.nets.patch_embed.patch_size if self._vit_model_class == "vit_l": self.nets = dinov2_vits14 = torch.hub.load( "facebookresearch/dinov2", "dinov2_vitl14" ) + self.patch_size = self.nets.patch_embed.patch_size if self._vit_model_class == "vit_g": self.nets = dinov2_vits14 = torch.hub.load( "facebookresearch/dinov2", "dinov2_vitg14" ) + self.patch_size = self.nets.patch_embed.patch_size if self._vit_model_class == "vit_b": self.nets = dinov2_vits14 = torch.hub.load( "facebookresearch/dinov2", "dinov2_vitb14" ) + self.patch_size = self.nets.patch_embed.patch_size + if self._vit_model_class == "radio": + radio_model_version = self._model_version if self._model_version is not None else "radio_v2.5-l" + self.nets = torch.hub.load( + 'NVlabs/RADIO', 'radio_model', version=radio_model_version, progress=True, skip_validation=True + ) + self.preprocess = nn.Sequential( + transforms.Resize((224, 224)), + # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ) + self.patch_size = self.nets.patch_size + + except ImportError: print("WARNING: could not load Vit") - if freeze: + if self.use_lora: + lora_config = LoraConfig( + r=8, + lora_alpha=32, + target_modules=["qkv", "query", "key", "value"], + lora_dropout=0.1, + bias="none", + task_type="SEQ_2_SEQ_LM" + ) + self.nets = get_peft_model(self.nets, lora_config) + + if self._freeze and not self.use_lora: for param in self.nets.parameters(): param.requires_grad = False - - if self._freeze: self.nets.eval() def forward(self, inputs): + x = self.preprocess(inputs) # x = self.nets(x) - x = self.nets.forward_features(x)[self.return_key] + if "vit" in self._vit_model_class: + x = self.nets.forward_features(x)[self.return_key] + else: + summary, x = self.nets(x) + return x def output_shape(self, input_shape): @@ -792,7 +830,7 @@ def output_shape(self, input_shape): out_dim = self.nets.patch_embed.proj.out_channels if self.return_key == "x_norm_patchtokens": - return [441, out_dim] + return [(H / self.patch_size) * (W / self.patch_size), out_dim] elif self.return_key == "x_norm_clstoken": return [out_dim] @@ -802,7 +840,7 @@ def __repr__(self): print("**Number of params:",sum(p.numel() for p in self.nets.parameters())) header = '{}'.format(str(self.__class__.__name__)) - return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze) + return header + '(input_channel={}, input_coord_conv={}, pretrained={}, freeze={})'.format(self._input_channel, self._input_coord_conv, self._pretrained, self._freeze, self.use_lora) class R3MConv(ConvBase): """ From 3d65f03e76dfbc2f2b813b2ed1d04ba4e7afa3a6 Mon Sep 17 00:00:00 2001 From: ryanthecreator Date: Mon, 23 Dec 2024 15:16:32 -0500 Subject: [PATCH 44/44] added diffusion policy support for our robomimic stuff --- robomimic/algo/diffusion_policy.py | 693 +++++++++++++++++++++++++++++ 1 file changed, 693 insertions(+) create mode 100644 robomimic/algo/diffusion_policy.py diff --git a/robomimic/algo/diffusion_policy.py b/robomimic/algo/diffusion_policy.py new file mode 100644 index 00000000..f1ad2610 --- /dev/null +++ b/robomimic/algo/diffusion_policy.py @@ -0,0 +1,693 @@ +""" +Implementation of Diffusion Policy https://diffusion-policy.cs.columbia.edu/ by Cheng Chi +""" +from typing import Callable, Union +import math +from collections import OrderedDict, deque +from packaging.version import parse as parse_version +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +# requires diffusers==0.11.1 +from diffusers.schedulers.scheduling_ddpm import DDPMScheduler +from diffusers.schedulers.scheduling_ddim import DDIMScheduler +from diffusers.training_utils import EMAModel + +import robomimic.models.obs_nets as ObsNets +import robomimic.utils.tensor_utils as TensorUtils +import robomimic.utils.torch_utils as TorchUtils +import robomimic.utils.obs_utils as ObsUtils + +from robomimic.algo import register_algo_factory_func, PolicyAlgo + +@register_algo_factory_func("diffusion_policy") +def algo_config_to_class(algo_config): + """ + Maps algo config to the BC algo class to instantiate, along with additional algo kwargs. + + Args: + algo_config (Config instance): algo config + + Returns: + algo_class: subclass of Algo + algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm + """ + + if algo_config.unet.enabled: + return DiffusionPolicyUNet, {} + elif algo_config.transformer.enabled: + raise NotImplementedError() + else: + raise RuntimeError() + +class DiffusionPolicyUNet(PolicyAlgo): + def _create_networks(self): + """ + Creates networks and places them into @self.nets. + """ + # set up different observation groups for @MIMO_MLP + observation_group_shapes = OrderedDict() + observation_group_shapes["obs"] = OrderedDict(self.obs_shapes) + encoder_kwargs = ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder) + + obs_encoder = ObsNets.ObservationGroupEncoder( + observation_group_shapes=observation_group_shapes, + encoder_kwargs=encoder_kwargs, + ) + # IMPORTANT! + # replace all BatchNorm with GroupNorm to work with EMA + # performance will tank if you forget to do this! + obs_encoder = replace_bn_with_gn(obs_encoder) + + obs_dim = obs_encoder.output_shape()[0] + + # create network object + noise_pred_net = ConditionalUnet1D( + input_dim=self.ac_dim, + global_cond_dim=obs_dim*self.algo_config.horizon.observation_horizon + ) + + # the final arch has 2 parts + nets = nn.ModuleDict({ + 'policy': nn.ModuleDict({ + 'obs_encoder': obs_encoder, + 'noise_pred_net': noise_pred_net + }) + }) + + nets = nets.float().to(self.device) + + # setup noise scheduler + noise_scheduler = None + if self.algo_config.ddpm.enabled: + noise_scheduler = DDPMScheduler( + num_train_timesteps=self.algo_config.ddpm.num_train_timesteps, + beta_schedule=self.algo_config.ddpm.beta_schedule, + clip_sample=self.algo_config.ddpm.clip_sample, + prediction_type=self.algo_config.ddpm.prediction_type + ) + elif self.algo_config.ddim.enabled: + noise_scheduler = DDIMScheduler( + num_train_timesteps=self.algo_config.ddim.num_train_timesteps, + beta_schedule=self.algo_config.ddim.beta_schedule, + clip_sample=self.algo_config.ddim.clip_sample, + set_alpha_to_one=self.algo_config.ddim.set_alpha_to_one, + steps_offset=self.algo_config.ddim.steps_offset, + prediction_type=self.algo_config.ddim.prediction_type + ) + else: + raise RuntimeError() + + # setup EMA + ema = None + if self.algo_config.ema.enabled: + ema = EMAModel(parameters=nets.parameters(), power=self.algo_config.ema.power) + + # set attrs + self.nets = nets + self._shadow_nets = copy.deepcopy(self.nets).eval() + self._shadow_nets.requires_grad_(False) + self.noise_scheduler = noise_scheduler + self.ema = ema + self.action_check_done = False + self.obs_queue = None + self.action_queue = None + + def process_batch_for_training(self, batch): + """ + Processes input batch from a data loader to filter out + relevant information and prepare the batch for training. + + Args: + batch (dict): dictionary with torch.Tensors sampled + from a data loader + + Returns: + input_batch (dict): processed and filtered batch that + will be used for training + """ + To = self.algo_config.horizon.observation_horizon + Ta = self.algo_config.horizon.action_horizon + Tp = self.algo_config.horizon.prediction_horizon + + input_batch = dict() + input_batch["obs"] = {k: batch["obs"][k][:, :To, :] for k in batch["obs"]} + input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present + input_batch["actions"] = batch["actions"][:, :Tp, :] + + # check if actions are normalized to [-1,1] + if not self.action_check_done: + actions = input_batch["actions"] + in_range = (-1 <= actions) & (actions <= 1) + all_in_range = torch.all(in_range).item() + if not all_in_range: + raise ValueError('"actions" must be in range [-1,1] for Diffusion Policy! Check if hdf5_normalize_action is enabled.') + self.action_check_done = True + + return TensorUtils.to_device(TensorUtils.to_float(input_batch), self.device) + + def train_on_batch(self, batch, epoch, validate=False): + """ + Training on a single batch of data. + + Args: + batch (dict): dictionary with torch.Tensors sampled + from a data loader and filtered by @process_batch_for_training + + epoch (int): epoch number - required by some Algos that need + to perform staged training and early stopping + + validate (bool): if True, don't perform any learning updates. + + Returns: + info (dict): dictionary of relevant inputs, outputs, and losses + that might be relevant for logging + """ + To = self.algo_config.horizon.observation_horizon + Ta = self.algo_config.horizon.action_horizon + Tp = self.algo_config.horizon.prediction_horizon + action_dim = self.ac_dim + B = batch['actions'].shape[0] + + + with TorchUtils.maybe_no_grad(no_grad=validate): + info = super(DiffusionPolicyUNet, self).train_on_batch(batch, epoch, validate=validate) + actions = batch['actions'] + + # encode obs + inputs = { + 'obs': batch["obs"], + 'goal': batch["goal_obs"] + } + for k in self.obs_shapes: + # first two dimensions should be [B, T] for inputs + assert inputs['obs'][k].ndim - 2 == len(self.obs_shapes[k]) + + obs_features = TensorUtils.time_distributed(inputs, self.nets['policy']['obs_encoder'], inputs_as_kwargs=True) + assert obs_features.ndim == 3 # [B, T, D] + + obs_cond = obs_features.flatten(start_dim=1) + + # sample noise to add to actions + noise = torch.randn(actions.shape, device=self.device) + + # sample a diffusion iteration for each data point + timesteps = torch.randint( + 0, self.noise_scheduler.config.num_train_timesteps, + (B,), device=self.device + ).long() + + # add noise to the clean actions according to the noise magnitude at each diffusion iteration + # (this is the forward diffusion process) + noisy_actions = self.noise_scheduler.add_noise( + actions, noise, timesteps) + + # predict the noise residual + noise_pred = self.nets['policy']['noise_pred_net']( + noisy_actions, timesteps, global_cond=obs_cond) + + # L2 loss + loss = F.mse_loss(noise_pred, noise) + + # logging + losses = { + 'l2_loss': loss + } + info["losses"] = TensorUtils.detach(losses) + + if not validate: + # gradient step + policy_grad_norms = TorchUtils.backprop_for_loss( + net=self.nets, + optim=self.optimizers["policy"], + loss=loss, + ) + + # update Exponential Moving Average of the model weights + if self.ema is not None: + self.ema.step(self.nets.parameters()) + + step_info = { + 'policy_grad_norms': policy_grad_norms + } + info.update(step_info) + + return info + + def log_info(self, info): + """ + Process info dictionary from @train_on_batch to summarize + information to pass to tensorboard for logging. + + Args: + info (dict): dictionary of info + + Returns: + loss_log (dict): name -> summary statistic + """ + log = super(DiffusionPolicyUNet, self).log_info(info) + log["Loss"] = info["losses"]["l2_loss"].item() + if "policy_grad_norms" in info: + log["Policy_Grad_Norms"] = info["policy_grad_norms"] + return log + + def reset(self): + """ + Reset algo state to prepare for environment rollouts. + """ + # setup inference queues + To = self.algo_config.horizon.observation_horizon + Ta = self.algo_config.horizon.action_horizon + obs_queue = deque(maxlen=To) + action_queue = deque(maxlen=Ta) + self.obs_queue = obs_queue + self.action_queue = action_queue + + def get_action(self, obs_dict, goal_dict=None): + """ + Get policy action outputs. + + Args: + obs_dict (dict): current observation [1, Do] + goal_dict (dict): (optional) goal + + Returns: + action (torch.Tensor): action tensor [1, Da] + """ + # obs_dict: key: [1,D] + To = self.algo_config.horizon.observation_horizon + Ta = self.algo_config.horizon.action_horizon + + # TODO: obs_queue already handled by frame_stack + # make sure we have at least To observations in obs_queue + # if not enough, repeat + # if already full, append one to the obs_queue + # n_repeats = max(To - len(self.obs_queue), 1) + # self.obs_queue.extend([obs_dict] * n_repeats) + + if len(self.action_queue) == 0: + # no actions left, run inference + # turn obs_queue into dict of tensors (concat at T dim) + # import pdb; pdb.set_trace() + # obs_dict_list = TensorUtils.list_of_flat_dict_to_dict_of_list(list(self.obs_queue)) + # obs_dict_tensor = dict((k, torch.cat(v, dim=0).unsqueeze(0)) for k,v in obs_dict_list.items()) + + # run inference + # [1,T,Da] + action_sequence = self._get_action_trajectory(obs_dict=obs_dict) + + # put actions into the queue + self.action_queue.extend(action_sequence[0]) + + # has action, execute from left to right + # [Da] + action = self.action_queue.popleft() + + # [1,Da] + action = action.unsqueeze(0) + return action + + def _get_action_trajectory(self, obs_dict, goal_dict=None): + assert not self.nets.training + To = self.algo_config.horizon.observation_horizon + Ta = self.algo_config.horizon.action_horizon + Tp = self.algo_config.horizon.prediction_horizon + action_dim = self.ac_dim + if self.algo_config.ddpm.enabled is True: + num_inference_timesteps = self.algo_config.ddpm.num_inference_timesteps + elif self.algo_config.ddim.enabled is True: + num_inference_timesteps = self.algo_config.ddim.num_inference_timesteps + else: + raise ValueError + + # select network + nets = self.nets + if self.ema is not None: + self.ema.copy_to(parameters=self._shadow_nets.parameters()) + nets = self._shadow_nets + + # encode obs + inputs = { + 'obs': obs_dict, + 'goal': goal_dict + } + for k in self.obs_shapes: + # first two dimensions should be [B, T] for inputs + assert inputs['obs'][k].ndim - 2 == len(self.obs_shapes[k]) + obs_features = TensorUtils.time_distributed(inputs, self.nets['policy']['obs_encoder'], inputs_as_kwargs=True) + assert obs_features.ndim == 3 # [B, T, D] + B = obs_features.shape[0] + + # reshape observation to (B,obs_horizon*obs_dim) + obs_cond = obs_features.flatten(start_dim=1) + + # initialize action from Guassian noise + noisy_action = torch.randn( + (B, Tp, action_dim), device=self.device) + naction = noisy_action + + # init scheduler + self.noise_scheduler.set_timesteps(num_inference_timesteps) + + for k in self.noise_scheduler.timesteps: + # predict noise + noise_pred = nets['policy']['noise_pred_net']( + sample=naction, + timestep=k, + global_cond=obs_cond + ) + + # inverse diffusion step (remove noise) + naction = self.noise_scheduler.step( + model_output=noise_pred, + timestep=k, + sample=naction + ).prev_sample + + # process action using Ta + start = To - 1 + end = start + Ta + action = naction[:,start:end] + return action + + def serialize(self): + """ + Get dictionary of current model parameters. + """ + return { + "nets": self.nets.state_dict(), + "ema": self.ema.state_dict() if self.ema is not None else None, + } + + def deserialize(self, model_dict): + """ + Load model from a checkpoint. + + Args: + model_dict (dict): a dictionary saved by self.serialize() that contains + the same keys as @self.network_classes + """ + self.nets.load_state_dict(model_dict["nets"]) + if model_dict.get("ema", None) is not None: + self.ema.load_state_dict(model_dict["ema"]) + + +# =================== Vision Encoder Utils ===================== +def replace_submodules( + root_module: nn.Module, + predicate: Callable[[nn.Module], bool], + func: Callable[[nn.Module], nn.Module]) -> nn.Module: + """ + Replace all submodules selected by the predicate with + the output of func. + + predicate: Return true if the module is to be replaced. + func: Return new module to use. + """ + if predicate(root_module): + return func(root_module) + + if parse_version(torch.__version__) < parse_version('1.9.0'): + raise ImportError('This function requires pytorch >= 1.9.0') + + bn_list = [k.split('.') for k, m + in root_module.named_modules(remove_duplicate=True) + if predicate(m)] + for *parent, k in bn_list: + parent_module = root_module + if len(parent) > 0: + parent_module = root_module.get_submodule('.'.join(parent)) + if isinstance(parent_module, nn.Sequential): + src_module = parent_module[int(k)] + else: + src_module = getattr(parent_module, k) + tgt_module = func(src_module) + if isinstance(parent_module, nn.Sequential): + parent_module[int(k)] = tgt_module + else: + setattr(parent_module, k, tgt_module) + # verify that all modules are replaced + bn_list = [k.split('.') for k, m + in root_module.named_modules(remove_duplicate=True) + if predicate(m)] + assert len(bn_list) == 0 + return root_module + +def replace_bn_with_gn( + root_module: nn.Module, + features_per_group: int=16) -> nn.Module: + """ + Relace all BatchNorm layers with GroupNorm. + """ + replace_submodules( + root_module=root_module, + predicate=lambda x: isinstance(x, nn.BatchNorm2d), + func=lambda x: nn.GroupNorm( + num_groups=x.num_features//features_per_group, + num_channels=x.num_features) + ) + return root_module + +# =================== UNet for Diffusion ============== + +class SinusoidalPosEmb(nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + + def forward(self, x): + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device) * -emb) + emb = x[:, None] * emb[None, :] + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class Downsample1d(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.Conv1d(dim, dim, 3, 2, 1) + + def forward(self, x): + return self.conv(x) + +class Upsample1d(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = nn.ConvTranspose1d(dim, dim, 4, 2, 1) + + def forward(self, x): + return self.conv(x) + + +class Conv1dBlock(nn.Module): + ''' + Conv1d --> GroupNorm --> Mish + ''' + + def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8): + super().__init__() + + self.block = nn.Sequential( + nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2), + nn.GroupNorm(n_groups, out_channels), + nn.Mish(), + ) + + def forward(self, x): + return self.block(x) + + +class ConditionalResidualBlock1D(nn.Module): + def __init__(self, + in_channels, + out_channels, + cond_dim, + kernel_size=3, + n_groups=8): + super().__init__() + + self.blocks = nn.ModuleList([ + Conv1dBlock(in_channels, out_channels, kernel_size, n_groups=n_groups), + Conv1dBlock(out_channels, out_channels, kernel_size, n_groups=n_groups), + ]) + + # FiLM modulation https://arxiv.org/abs/1709.07871 + # predicts per-channel scale and bias + cond_channels = out_channels * 2 + self.out_channels = out_channels + self.cond_encoder = nn.Sequential( + nn.Mish(), + nn.Linear(cond_dim, cond_channels), + nn.Unflatten(-1, (-1, 1)) + ) + + # make sure dimensions compatible + self.residual_conv = nn.Conv1d(in_channels, out_channels, 1) \ + if in_channels != out_channels else nn.Identity() + + def forward(self, x, cond): + ''' + x : [ batch_size x in_channels x horizon ] + cond : [ batch_size x cond_dim] + + returns: + out : [ batch_size x out_channels x horizon ] + ''' + out = self.blocks[0](x) + embed = self.cond_encoder(cond) + + embed = embed.reshape( + embed.shape[0], 2, self.out_channels, 1) + scale = embed[:,0,...] + bias = embed[:,1,...] + out = scale * out + bias + + out = self.blocks[1](out) + out = out + self.residual_conv(x) + return out + + +class ConditionalUnet1D(nn.Module): + def __init__(self, + input_dim, + global_cond_dim, + diffusion_step_embed_dim=256, + down_dims=[256,512,1024], + kernel_size=5, + n_groups=8 + ): + """ + input_dim: Dim of actions. + global_cond_dim: Dim of global conditioning applied with FiLM + in addition to diffusion step embedding. This is usually obs_horizon * obs_dim + diffusion_step_embed_dim: Size of positional encoding for diffusion iteration k + down_dims: Channel size for each UNet level. + The length of this array determines numebr of levels. + kernel_size: Conv kernel size + n_groups: Number of groups for GroupNorm + """ + + super().__init__() + all_dims = [input_dim] + list(down_dims) + start_dim = down_dims[0] + + dsed = diffusion_step_embed_dim + diffusion_step_encoder = nn.Sequential( + SinusoidalPosEmb(dsed), + nn.Linear(dsed, dsed * 4), + nn.Mish(), + nn.Linear(dsed * 4, dsed), + ) + cond_dim = dsed + global_cond_dim + + in_out = list(zip(all_dims[:-1], all_dims[1:])) + mid_dim = all_dims[-1] + self.mid_modules = nn.ModuleList([ + ConditionalResidualBlock1D( + mid_dim, mid_dim, cond_dim=cond_dim, + kernel_size=kernel_size, n_groups=n_groups + ), + ConditionalResidualBlock1D( + mid_dim, mid_dim, cond_dim=cond_dim, + kernel_size=kernel_size, n_groups=n_groups + ), + ]) + + down_modules = nn.ModuleList([]) + for ind, (dim_in, dim_out) in enumerate(in_out): + is_last = ind >= (len(in_out) - 1) + down_modules.append(nn.ModuleList([ + ConditionalResidualBlock1D( + dim_in, dim_out, cond_dim=cond_dim, + kernel_size=kernel_size, n_groups=n_groups), + ConditionalResidualBlock1D( + dim_out, dim_out, cond_dim=cond_dim, + kernel_size=kernel_size, n_groups=n_groups), + Downsample1d(dim_out) if not is_last else nn.Identity() + ])) + + up_modules = nn.ModuleList([]) + for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): + is_last = ind >= (len(in_out) - 1) + up_modules.append(nn.ModuleList([ + ConditionalResidualBlock1D( + dim_out*2, dim_in, cond_dim=cond_dim, + kernel_size=kernel_size, n_groups=n_groups), + ConditionalResidualBlock1D( + dim_in, dim_in, cond_dim=cond_dim, + kernel_size=kernel_size, n_groups=n_groups), + Upsample1d(dim_in) if not is_last else nn.Identity() + ])) + + final_conv = nn.Sequential( + Conv1dBlock(start_dim, start_dim, kernel_size=kernel_size), + nn.Conv1d(start_dim, input_dim, 1), + ) + + self.diffusion_step_encoder = diffusion_step_encoder + self.up_modules = up_modules + self.down_modules = down_modules + self.final_conv = final_conv + + print("number of parameters: {:e}".format( + sum(p.numel() for p in self.parameters())) + ) + + def forward(self, + sample: torch.Tensor, + timestep: Union[torch.Tensor, float, int], + global_cond=None): + """ + x: (B,T,input_dim) + timestep: (B,) or int, diffusion step + global_cond: (B,global_cond_dim) + output: (B,T,input_dim) + """ + # (B,T,C) + sample = sample.moveaxis(-1,-2) + # (B,C,T) + + # 1. time + timesteps = timestep + if not torch.is_tensor(timesteps): + timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) + elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: + timesteps = timesteps[None].to(sample.device) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timesteps = timesteps.expand(sample.shape[0]) + + global_feature = self.diffusion_step_encoder(timesteps) + # breakpoint() + if global_cond is not None: + global_feature = torch.cat([ + global_feature, global_cond + ], axis=-1) + + x = sample + h = [] + for idx, (resnet, resnet2, downsample) in enumerate(self.down_modules): + x = resnet(x, global_feature) + x = resnet2(x, global_feature) + h.append(x) + x = downsample(x) + + for mid_module in self.mid_modules: + x = mid_module(x, global_feature) + + for idx, (resnet, resnet2, upsample) in enumerate(self.up_modules): + x = torch.cat((x, h.pop()), dim=1) + x = resnet(x, global_feature) + x = resnet2(x, global_feature) + x = upsample(x) + + x = self.final_conv(x) + + # (B,C,T) + x = x.moveaxis(-1,-2) + # (B,T,C) + return x \ No newline at end of file

K_&zoKU6m*yZ$}3Y8Lh&v8lCW~wR$z`oDz7(F-zlrMa#e30Xn=3zLB@q zRN$5Szd-OF;CG^o^@d-47Kj>cs1~my{D!kYn3Z6|zQ!D_E1|6oZ6d)~D1Pb7(H?%c z+f`Gz2okErh;aB7Zh`2wRZ3`&q^;tthTm#4${BunU9eGOt5l0~N%$Rgf#|kXN@$Oy zt>Q|CU)wXD7=Alnun}9OT3kQEFX0PBx2;k_dn9cYeGz^y0V6Sd%b;K*wu&AAwCDkZ z?>-cWZd;{<_DI?)`iAHqdS~HV22E_#REr+jcW;#v+9PSJ=(l4m(Z3E~4QSRX)uOlj z-S40h+9PSJxbwnu#~o7meSWi6sTTJ;-+iSgdelu7gx^0yb+9PSJIDg=GqMV7s??ekWVyje(&vy9LXMyOpRZ3`& zc&lh5<6MhYan=sMsV&$bQBy6>gWVF{wn_=@k+fA@mv9HU!gYJ)u~n+YHKW_!?Y31) zXpf|=;u?<8PH&*wQ4(9FT3lbd9f{qxN(t?ev{m$XFf-6|>h_6>tx_%eBHcc<-L^^z z?UA%q^bIj*(97(0_KB@hE&6TU&T!qfN(t=|%W7qLu!bKrq5AMN&l+m;_>Vh3{OI!z z%W2}}Q`i2y19SWJlZSU&)%Lf_8r8}?Bi4Apvxd6X1yf$_uJ#qER9hURQ%Lg2SZ)B*A{a(AUUOjk1^|I-nHPp5Gz3_U6<$3b*1&h5= zAli@g+?mxmLp*D!Yn^uJTOF2t%FC};-!2e&c9rVs*Lv1a*BWxfSN3~O{5tQ)`PFKx ztgjjy+m$~u&9jEO*6FA9oNXDvyj*{`r~OxnE!L>kUoc{=2Rv)2YmMBj-)zeU<>je6 z^(zo>TTZ=JyYgR$tf8*;`31|)wp>D9ZaHz;0&&lgot145oKSsqh-VFTt=4f%&$g^R zUhezKQU&5k%Vc)IUK6TYP4TRuuJxzQde7!PFJA{TvRT%*>dMb}*3dW_EO(95=mpD4 zXMU=SaP-!sg2Jp-<@Ucsdi5Ctf8?k%k$%9 z^jk_?e8LM(96ZFchPu|h@AjE(nTWg$dnxg6%a-=)L>na`Yp84Cu35gKNT)=eEv;&K zmyk8owdS3;%xufSiU zpYk$BWlGp+uW+=hgfjz1yJfg?8c#u%t#8@V?$~`oHG9CbhPq_;NBYfHLTj?GcvwD2 zjKph0*3ej&wlF1DvuvA#Z1%Y+WDSj@gtN~>(z@cl6RLee z)=-yJ_UJ!biPT2tSMysjO5R!ESwmx8mSxP#xTci2>yVaNu(4Fg8X8+=8QGjbt1NSx zmti~0>^f%gIxHibOVC~=QXBTXWoXrlA!}%?%krzma!S~B%;I%erZ?B}`yuvH!tP+5 zcTi_FrBlLYxH@Mzm2hsysI-iHZaa*zJinTcUCZw05{z~ww5BXO*K*6^I?fJRL!H3b zwfuTs4x_JYCbIkk15QTh9=e(;Dt~hvtEqkHU;d!*@yw2y` zG9kJIGlLRZQ_QdC&$-s6y^s=nTYj}A_McFl5VD5Gab@`+-5%yd%iHMXFcaA28dc{SrR%HGDS^z^ zT%#<*rrW?;WSKX;j5R7H=mB7j(p6vSlz<07Z%HL=A-pBpgBO)$&*j&#+!W6m>MeQw zv`;&f(3-OB*8DoQe8#heI)S>_D)=ZVk!R1YTJ~%^Yv~%*yn~O={;-2RJ#O?zMptc= zSZ>+s)Q~mQZG5}Z2OUaiO_m=nWY4ZPCVAFSCr}sf2d*h47C-R?Hj&&TVw4xSdWhB^VijpGV*N}Ol;2Q8~^^|+8VG_)#P(7|T~9$-qeTdu~t zcb!oEdB_^-1iVQ;QR9qGi3K(@ST@?~kACb~L!H>M_iG)Tqu`gOgn7tyddMo_{0I-3 zGZ8%JEc>C&qIcOkxai-WHPmg4-RG4KCA22{N}A1ZXw}shdDhTy9ab0TE3_~r%nP4I zFI**DKj4LPU4eI=WxHGU>;bk$^$S@;-Nvcamphcun(TLXU(cU~R=vEyvxYi>y0{wP zno=UOe;}R2lR3Qw)prM-|6H=Ezy$L6d1Le|i(RW0}QF~iZn4)co5iO7ZRb0YTun6K#RgGg;O z%x}jiS!al64fSz_y7Zb-!sbNxMKLFGKY)2q?WKgR2EOycYQQ}X)(oXn!d3&{d0{o+ zehDi8cST{9(RiCfTS{9=O5{x*E5^teBE9r<)LM1xJw5BZE z@7~R-);G0B-`l`#;5uMK$8Jjez`pVO!HK_V?`gmDQi&26F=$~}uYU6UR&L{#BYw~* zYQY|~QlcgQz4O=``ZZLdBN3w=Ho|Dni5dyktItGAEIZV{osbP z%y2qiVIITZ!z`K;H4>-`gwA#;@#p+Yp3m4Xq*bDXjxk#Y&+K2_cj}~>=l*<0Ctw5C zb*4^+Q8^K4}>yz8@ zUC!G0-X}JST4+18QUcoH|7ytN9_vzX34TzQjUvH%)muu5uk){-&b8}Mi4rC=jh)y_lFb|-y_R?3h-)tU$N|Z?VxaMW} zmBY&@YQYA4hLkuaf9Gz#{We@BD3eHc-Z@btp;{6f zXy$ISRH6iJ2Opj`NWccHvsKVaiAzIY)o#0SNL?jLun&V5UL;@x^|E)Nl@j{BSCuG% zs1f=q>?W@N#W9T)TeqCK&lUUCRZC-JDe)V7GjsieZyx=y?O0TzgpQaj``(-*8|zGZ zb!Lyj=hjtA<5wwRdmMiTvByzKkwA|G?=>efBPt{iNkti2D6_TU+(>LVp;{jxn8OYuS}A_NPhhV+Nm_#9rA8IPFV`o9%wsILh|FV!z~xDiUa6 zu(4>FFz>!+zXs%dg_t+zT}10SAERtvBo#s>piwVqt;zRlem`v9bsN;;jLLXB##M;7 z?`E^;H&cGoK5OQXq*Y3AencET%l>RLwOx4a=STdYTGWC)Xr;uq`MkT<4gIPrL74<+ zz7TKEi5dyjLK#{qF+YFTd1d=Gph{3Cq2n#fwy~M|<4fLYU%TI;Btp*_6{i3xA@goB zc8dh;g|pOg`4?#Jwy!$Ud%?L1y&9D1ap3GF%W|Sd0(DUfWoQ+ME>EVa5|l~MGYDrz z;>kFnS|~#+C2}t#GcTj65|l~kIL@*Ur}S@J_TZ$Mqqf=632M=+!3kDMylcJ>o~XCh z-ny!iB7vI1^Ksk3FU^{G&7tjc>{rD7i)%lD`Hj-^&R`1op33-v-P zC5~Jp?1~oH_g+Ti9Sd28 z7{LDXd$r?!aDR?I;m#Yq%h*7;=ZFEIRa3V*yjs-ay-5ihCBD1EC{YRH45(LoJ! z$5l}aHgIoJ!bZE>z-U(q$|UqY+1ze%p93~HzYWhhV={Qw7?Z(M%Xmzltlb z*mGXgf(>Y;gv76^Dq(C11S1w{fT4*7(Qo?)&+>Z=i zhDuN-p?B8348MaUR10O?|CF$q+HGK_Rtd@^bSz}qK#QbKpLKlurgJ~2F#hT1=kgX3 zBdoy#m}QYvjH^qRT~uK#6e}`guh2?~W%7vdspe&sgi)IGd;xwfqj67oHLlr%vu5v`FZEviFwkI<`A|QhPt!P0$ARGQ1|7 z>99w_KFWM*zjE`blan7k$4Lp>dHMaoUQH!R;6w=iZrBgzgzxT(TDT5qrG)Ju-3E4$ zD$$cBoFOIqTE7kwtXEH(x+OB~gjJ#hZHN7N-YVZa7qwsyS}9>WdcO|r=vBh&0Kq;C zJC>aA7(h`AHlURf`5p9KkxJOp#gU(7lb$@Zj`?cq&ZDh$TUsyv!S?6NZh`W)$G7I? ziT?Mk`YvsM&xsld)j~OFDS`TJDrw+<=B^uZ3{2W`h1rMvEaU>Po$u^JpZ%eYV|;V@ zd-mV(FZ`xK3Dvr6Fnr$uoxj*_`2wL@mED8Z8#}EqyF}F3D%HB_ZoIuUptILv-4NEW z&?>Dr4!LG>u)o4Stju{!WvPR$8B?6G! zP>B*}{uXaN4d`6v1njoMzd-EspYWx9+}qh#g);8o&}YI|{xNzIqNZBsuG8Z6Vk|W$ zRHDTBcZF;PaU|Aw9V;%6_h|=o_8tFP$AJq&`KUMMckD6+C)fizzpTI6;jJQ3Q>}&T zjB%}B-TOv!BGbB*_{p3=|KdozBX0j{ydSDH@tE+X?8~ostD`BAsYHq8UI_Fjliu%u zi7p$e_0J2&`gPnm^SzFyM5YoY&buwpD_?@MRJEa6R~#C?@?PotQAblEQ;8B=Z4>CR zZ!ZisV8%z{-Tw?(h9EiiU1Uce(0RaF3p)emhJ?&O z<=$1TZ4V4BLGPA`XhS7R%pCrGzozmiQLW2Q3oXGYDG||zN|cy>ba-mxXpe+G+sn6w zmK^x$GP6t1d5!l&wXVG;wCdB}E?p*~4XsOw)nQeOt8*>Cj+$z{KQ7RC&PznJp%NuJHw>$2*~?I^DGvlKco`)k z+E9rScbwO1Bd|MfNwxZ{ihWe@mP$mlp%NvAJh-XTWv@@QjvBL>Yr*R)5z&T9lu)0@ zlw~BU*7z+)yA61+B_i5Ti4y8Xxs6EZ`g+<5Te%IaoTat4#!;eLhi@7PjNKAJ8<|R! z*!{+^o|m6N)jDT#uz_c=DUqo}iOxLdOKC2tsn(f)2{tg7G$&M|M4#Kjb(H4Bnrf|h zT(E&Tu|%-1YAR9Utet|5(%fECtv`N0*udOgBBBkID6vyL_;BSlO0_;ZDYT?wYgCDd zHdLa-2JeQMsJzyy)`ou%Ex}q_BBBkID4{c-Da#Y3TK7*4ErHKaB4{I1i4ryKsZWRUyYrsvND1}AmU=e$JJ?N>JhB>}LDjmjPuQzr$5J9< zt5l-IJMRS=dmnFAw4quz?Gm(h89BeBL_`}ZQDUR>e&F*;qk=c^q0Z}2t*@2|TBCZs z+)*L``C6+IB`(@!Ew{1x=`VGdve;0qcmBJkYn`(3yp9q9$Ze=ZiH>n0=5TY%iybi0 zbsehp-hYRVls;kd!|f88(NnVeGd=x#s7V_!<6O9RqMmeg4S8PJ=0Ml0Qr@xM2U`TLw3i{ z|MyG>Omy|GYAtbU(7I-~r#ngnpwPQgmlFCsrZ!Y-&SK$7{c_P$$rJ2Ew4rq=QF_j6 zY}H3+hX@3Ear3uA8TX^VpSQ?8Cs9+ag`bBQ3GR7QB2$SHo418%OL^?7*7sHpUIxZ) zQzBD|5|gF}Z>jtYs@94t2X6__U{fMfi4u<=9K62rT%uaNJ`P%#OPUgyN|exekl*uo zKU8b{^x(Z>PAn0Ig^{QdB{YJR*vNGJ9JgLrOR#3>x{?qz)f(|lATV~D5}8Vr*nC3B z8d`n^RqKmC1T8#+O^Hk;O6XINUWaP+zc)M|m`h4T>|K>8q0f!IGtuKZKT+KE+e51w z9n^?mD6f>qsgV=ulLfc8Sr#SFc#E%?U$MgqvzZ_1uBE=0N4ZH{F?iSdb-#I}<$*kZ zEeL2ax6|KGD-xe?H>v*2V$<5!S^Jo3h?s}Elz3-d%yuhv9)2jI=l34)(yZ6dx?|MS z_U3?UG5gdv_kC}6N?e}xt&bYN^2`TMJHDzCC7xX#v)xLaZ`}zS`@GRQYp3^)8FhO5 ziB**-fnN!Mc;w+creE7-ms{Jv{q{u9c#A7XOPHMt<&-cRt+b&MCDwfpQTFLl_;^&GZ@G0-OC5>TDkm`-?!rqsur`Q;clkH z7u!v$4Bz9DmUZn8sziy>xT-Ad`A;8Ls>Q5t7-K1M@T-5TOdd6@WzF7)RaK&dj_E8L zZ)fyxv|ZWuah~y(W0%?MrtGnNA$q>;rY}_9d;ZTY?HBHwJReHv&eGm?vERx}y5-Mp zPwx^k-oggzWfs11BbO`eQtOBJuAX<}1ub(X?^4yel+Yb=mR)D>OpM!jYTHA3##`7x zz04-K>kIt~yX_A*9a!DyowHj;JpA*j)}@5*^zGi;@4OZtaZcM)dB$7VK)uY0_|mq0 zXQ#xP!-iFFe)+VPWA1CIszeEmSa|;P>VJ-#(#DLpuz`A+-SOaymnlR)-d%Ca>cF3! z)Uw-!YgM%_B{cGqWyoK<g z@RI-hzU>@4KUXbg4IO#y;)VU-$3H))`mgnW+cIL{-4(4%3Ehi^T&Dfoe%r>3x3~_} z%j~B&AKk04ububxSXpAG|$o*=c(QTGh*E8P2hAqCc*{Z3vMTL0DaF*7k zgvM$@UtRsPAEB>e{#w+_e!H&kXN9=Wb;lf6-F>COE!AJGTi3dj(3n%W=O-*V823Ep zuSLBaCFjrhs1U#U@L;>=zx`gz?N^^!*SeI@bE+&m(8lg>8{ccY!`|6eEso<|PJh2} z=5^X>2UedxYKfL!U)1YbmlAs37M{V|&R7!9pl7_r{Xo5ZYR^6A?ZVmJrO)nJUF)S! zM;^QK|LR(o5_)bJ=91I@^4ScVOFZK(Y@lAwK5IVtX5kES%4s`R-}%i;BmeT_{tc~5 z2|fP|bK<85y)?t-M9+8&8>pAF=(vfm70yy$pS4AGn-lIG`PzDGH?%G#)YlIAYY$xa zz8TDT3md4HtHHRfUn%(QkKMXqb@{Vz7};~<(G9Ij3EeYT&SG1mc6|K$8E@nnZ{3E~ z%T?{r^It0LKYCuYYIV(}&l|Z-zik>?mlC@7$+AOjt!=yYyczG??^0EZtMkW|7YqBS z({JioU4NN_M(%Xxgof6ogzn+8>^Acm2Hiht#((UWVX8$h zkKC`Jbt$2H(Jb5C-u}Gl#FiO{*zeU;i+<3BZx{EqLss}}<;y2}j{M|?!x~zb61oG( zGV{VK$6VBFhWT(Ye=X{zKlhK1io1y`&Yw^jIOVEMZeRM?hSsHo?qu8xZ=Lkb6?Y*o zfNIe%?LGOK!mj9|7w5HpcJk9B9zE%VhSsHo?#O&sluiA|lXqelsao{HVWT;r5+yWZ z5q1+hJowxQT!(6LXMw&-i4iZ(n|1ONSKjrwU584PNMkbC6^-8LnoV#&REzs4+?xV% z{)GD0pY@)B`=JsgG=k*gs=mRiy+>kPsTQ-*V(g}bjjQ^gy*8bJaitO^G!o{|M}2JP zW+U-@s1~!);u%Z{dp_#N%{XKRo)48Mp)o*z&Kv(5d&EdQ=c>hQw3tg$!k+U+^|%XX z;5k=`5*o|QvUdAb(_?$TqR2ATO{t6G#=QmUkm&a)nYbU_&zCNeo15T!&YyDU!oEv z^lcdTuNt*^tG8(WTGY#IwD7f3!u+eoKD(^n2LDPWO6Zxl`-zP;x7wfuexhnI8!dds zlrTTBF=Dq-ZSWISqJ*B^yT99b{_mq(;P0xI((?{ZUu8jkS1*N|exd zRkLiKtx>-_n!zPSZ)IMt&6fLLEjSRAgt;Fdeu5QkHV68hG3 zmaX^d-|8baoz}9UMF&+&_FcRaoB{YsTRK94FZ`Rni82xl%S6q;>p2^x2mRE_y#&`AiqUZB2$SH^tGdnT7KpJ zZFSa*FQv0wBCKJduT-K0_YASGq7Bu;7ueZXB_i5Ti4xo);XawNHHp`uTKI}RY~T)- z2ta<%RiXsq79WO!N?TW-PDF^A>RaSAa6>Eh&EKB1S4$l zA5t5tg*+CpfjlN9BHB=i5{v=D4@zyQ7IJ~W2C}P^h-gD4N-*LHe=fD5TF4jz8_4ET zBBBkID8a}r{L<8hY9SK|Y#_TziHJ5-qJ&1EQyZ!!xondtw-dBc)4G(PZR`_L8>&Tn z$c|7Vq79WO!9K)(C$*tk>^EdxC=tOL8nNLbYw4o9uxOO4Vl-f`&u2;yFQX--al_`i zcAFC_QG$EX(leN;7P1<_2A;v@gi4g)p1L%bWU7TMO0a>sq&cAyB^Vnh&54<6AY`_<7PN+l)jV&j0d!|~F<#u*+LM2Mjc6o20TC|6~ zL5bk{k*P!p_F;LCqgw1Y>~Ts&w4o9uI2PhwjYLhgc>l3iYf5A)QG#PS?m43k)#5nD zp0g>DsYD4rn{n?PZKxKXN9>)O5}8Vr;2aZU0MUkOab7_Tped25Lrz5n z8`?x0Hrl-fJH67l~yZ3r@sVsg~_OoH%FM_r7DRl+YeYTV=ac zuNVDeyGbvj-%71AEIc2oWqU&>zI&^b&>l%!WpM_t7h}od0ba%!Yu+l=vfaHC-~A3M zp*@ne$|6NxFP=M#=y(~=WAj$2mc?hBnERbayAs+XX{#*O=k;QavY4EgF?Tg@m1HT8pZB*);6yQiWO6{usuUYLWi#v#1iFXJUsxEV%`ruE8HIQMM6VrCJ=XEB~?^qT5y}p*@neiu1?l zWBP2tnW*>Xeg2PDsTQB@W#3t{Omy2SCA3GpRWVsHqm$jBb0k+g2%|J(9MHYdA(by@75=NoCA3GVbotp=(lw{ z!*$y#CA3GpRUrH~)pk}i=HZ7A>3sd>uYAQp87q#RQ~BSozZbqQ)Jb3#r55Hyl(Alw z2taN_B}&+No6|#H`KDk)wJ_Jh2G)-f5pAeM2|G7*+7xY;fI3^HT3D}O!{l3(2ta-v zDp7*A-#fQpqo!K4hx=b5q79WO!9HC7`-`R5QBy7U8^%?Mh&EKB1jj<(>x(v2i}xQR zu|z~0DpA7D4ZWv79g1}q>UrnTz#Lp6q79WO zVdsWUU$U@hL$$D;yAAUIN<_4w5+&^1(CN9`!mEKg`%1Ooali&vgAxJA`${EB*tub# z|6H`8T3GdA1FL9>h&EKBgq<5Yece+<8>$5_12(WamxyRXB}&-2q0_hYfX@hZ_LXYE z4}uM>=OqG=_mxVNuyaGFZx~#(p<3`oU;}r%qvjQ;n*rDssJ;J3jBp24OBVwSNk zB`jX*e=pA^ss%q0HZYeoB@la!btz$SU;le~PE;*;vakVvw<&=ba;!@Ui(mWS%X7PG z!JBj&Hn%q=5DSlWDPeJV|9g3jQZ0C|uz@wIDS?=NtV;B&*ILzre+?U0Ynu`{ zn}~HOVdo%PEl%Va>-k_#hw1>SxiQxKCQ;8Dn!_pq7rdsSb>~Ts&w4o9uI2N$KGG+hgt253Z zy?;}~4%?Xt#!7;t^H8|N&8>+=|jObuB+p^XD#Kw4qv@=MfigPN+l) zu3fuM#3_*Wm1=RlLcF~>p%Nvy2G4p7XH#lJwYcsgLf@QFi4ycB{;_k>hHBA&z{y5) zLM2MjCz}4{r-kcKE&4$?F=B-sQBS_2XhXH=m*Q-y zIiV6I=xawCHSRxDi~coiln5IrVgI2LCAepZeHCq}7WWD0s}d1ys6+|wed0Z*jZC$; z-@!dE5z&T9l;9pNj@@WOwYYD?*ewy!hDwy+UNk;~(S~Yq|A}X?L_`}ZQG$EwIG02l zs>OXY=8_T-ZKy;E#s=b?7;UH);{lixOGLDx5+xYph;w_ip<0Y+=P6V}=i5pAeM3C7N%&k${>7UN~`8A?R7p%Nt+ z1B|{;w4qvz`@#1q5wKCU|H&|yWZ52_h$mp~V)>eUrrP7xy?W$a7` zejg{}dvyNx{yd5k39KKGV2^BHK=ty~W)E{d{+{g5(W;ti-P{tidJgY~ppBZ=r3B`D z*hu0GZbP;9KPhMp8{d125)o~vL<#LTQ?~Z!y<1Z)`|7E;#ySNlrWa``cfMzQ9`dTAu`Mis24kT%tR<7QefW~_3|(O znn!C53-?1u^;&Ce=32N8eG1%0jf85A@73zWkGA{22(3#A%*SY95-;(wt6C2&3R+jJ z-pgai-4dDBrG)lEave3*I_j3tKR4f%_L~#-KcTO*E+tC68~X|#0NRTgz`jE2Wy}m% zwej~Ps^(X&S~I>0S{S=ciA*I*=-icFxoTN10B;GNs1m^|uc<@{9X*MSOrPi9t{>)L z%(Xfnr-W+R_hr3Rm`j=yDp3Ny9eTGsC#u#ZCj>3biA{-2B}(X>O|GM+T08zb+;hzB zC4#-1sYD4K3vMG4h#KGu5rM$&9%ZbSFHQ-6-}sz7YEa|Y<-803oc)F}dwtAE*&Fys z#AzSuMbruFE6Rv+t#x$x`@Ze@X|@`H_cCT2?050^BxdP0RO^gi1}!{; zC4x3;Dp3OaUDznkC91Vaub_pwq(np;Dp5k`d{efr&3m_|S~o2UHZUia2tc@Uk0GPI zSU;4|RRVhW7YOz)JV^EeJXZGMZ`&nm;5PK$@_EF4*0B%_@H$kBtMfONZiv__txE~| z63Jf8@40Hxf0*{GPkmpD9*IP>p>-)ipD6CtBEdNkJ1jm$^~*o?y&7zk2z*=J#}Bml z3@*Cl{~+R1i@KEHj8VFdnrd;Dz?C;8;_QREl+Y*G?;x){&OWG@vmNeWQzClEm<^QB z(UaK7xEf$@$RPuo(e6&{>(Clc=?apOI*~STF$kY z&p8KUp5I{r-htht^KUojZ`Q zO7NPJa~!v!THiZ4T;B8cQ1$M2wOO}R9zQAcdq!{7brmdLa& zCD`l9IgZ; z!8=$}i4t5t%6kKP7U&at9PkEiS+J_N@JH(wJpk5)wTL#JnEZvu+i`tK^uv2swRlZ< zCfM3Y@H+fNk##A-K1`w?ZbP-+JwEjApAY)dPa05DB%%$iO9`BEfJmYr;X0(=WA+Ii z*~&*P@)MJUh&Hq?CD`jp^uuka)}t>5Pp@^AuM!W?s`r1vXVAKoDEW!Tq<#rzQTip* z_x#LH8gLy+9L`&%TJ(edeP1_3v_bz0bt$1pR{5;{Ut zg1c?36WncMHJ}fdL^=J+Rf~QbR-6(MTcr{uv=7r(aTdih!C4zm6zAj8^HEbRt_*k{ zn-Vc(2q|PxtdNa(E3H5@Z1*Y@O8ALA-hEz5o9J>Bb@tV~23T9*=d zD-=&{a;on(RBOXyf{mjt%KQz}?g_0+3B273!Wz?cs_!;b>(ajj8_;S_s6+|cPEPgx z8B{IWTjA+%{Ec6npUl?v`7nTIbvsR%EoQM4(}QKUAUw-g8L$iq}z7ttUW{h);LO3i+d_~$R#4$P>B-iub8s`3$u@oYR-0uA<{=J z@4Qrt-Ya%qCBm$@jhaf7ps!utL8=!0YwRFPM6{t2CAeoO?Q3hQ#oYvU!X<(>{EbG; z45&*9?qskx%(9+CHg3G~@$kmU=RG;=p=~!Gv++NF+xdq>vn{w@VJ3R}`{D2Q6)z)t z81bwT`(5zlEY;!~h50HaezVW8#*>SUXsos2Q18m%E#7agSckJZ4YVmo*NKA=SZd|*u(ig)Uo1An`T_sBF zHoxN6(bfhVV~zOg^WlwmPClouT6`Wc5>w)pWd=3=X;*&ro!8e@qQpVV1{;G`$0%8D z*+Gq`jF@o8^>x+aQ;U(95=WlAYGdzSBN~fcb5C6*O8n`6t$rP6tpgi>HDVtlcE9?b zx@vJYz<5oGoyV@&*!%6_jq`>*Qdfx*zw8@q4EqCY{Md-`MhyM&BX!l{eh1Nyl=yte z(v7zFhBq#FenDL&N?iB-&HOqB-U}O-t-W;PT_g5;c0paWxZgpHBqc^p>(%(O=ZMCP zNek;LQKI*MH+372KLHz4F6-5Jz=%zcSXfsr?ssthkrFGc^F@8G-NCg^?b%R?5?^02 z+HHKc22OZi_~94z2aVXq2-V_#2j?FtapnH=>zgkc-Wa^?vJI6evFiL$ZsRXo!Nxz0 z7-PiBMyM8dML7RRi9`Q2x4zle!y9XkT%n;7B~}_9Y}6;hM(YD}>%TE#!X_&;RExVU zoK&Yok2y2zui2IF(6U-XB}$xs%SgYDou|RZn1^Q8A2Q+wBUFp=UA*s+5@%1IUSH5- zMB`_@1~*iq#NOKl8|U2!8?PQey}qj5^B?sX+)ypXjdA*#5@*eyS|9ko;f*b~AJR~X z5??R3iC;(G$6(_hFHfz%_~G!zhQA!rP%TE!af+N0-%Q!Re$ku58`m7YenTZne0QT@bz~zNbI%^$P>B-Tyta|s=r;g1-u!yUS$7$+zzEgi*$qzIQvxo=J9g!tu05im z5+&{!xS`lkLbZ5S2pcIeqxbk$wCW{ml}ePDcHB_ut9f7V*xE4SKSrn)&kE63De?C~ zZLPDdcYkQTs}d#t_3IzGjd0Ju`hHvM1395u+*9G6r^E)Gt5tfwHoP&%Mu|$4cQt;=ERh^?WpOMHEb5G_Z-|%i4uqW zGS~=ndyf;QR~Fh#y{*l=s>MAZ=Ju4RKQyxfU-URz4OF7U+jG|T>j-PqWFww4;tV5H zi}78oQ7Q4Of6cAzXsg!4~eZj7}yB`({4eq{?= zod?^ZGhb++B}-UlqKt3-+4yt<~_2tM4Fm-ni^WyILS7S>gZ zk$?DbDe?4>rK@k6*Y|7l6IG(bt^fOh+X%ks1#2%|on=J-=N8mei>F!eMN{I^u`5=$ zH}7>>^LJIE#NmB|jo?%N!U%ZC*BhZ)x(7^&!%kkc3NQQ{^TJi4#MF-l`*no9!SN@p zT0O*wMc3X_S1sLhro?H>4643ucd*Z0*Vk2|#9vnrHo_ify=4bgXBaWr_8+RHyW*6X zcH`RB&uv%q3)@GjM2UfS4)W^=d$s+I=xsZ%rEMRjTDrSWiT5{MuiD#ocgNd)P$f$2 zab&O&_MDT9Si^`#$4#!QmPRR3;)=6}R$sC!f7^DcDp8_y(HedoVei~-1fGw+MyQs? zPEumaeTG&4YrE~mZGWy3C02Sh*a$Izi;Q^2h|O((u38#_Oo?TNY+QZDq6X(!oIxc@ zY;t?Bf%t=EA6jeU>dQv#VQ~i4(nxAbtn%9MD&~^g77U!F5+yGEOR!-ciP>1rh)0aL z_^pAnR7+#MDY3i}j~cQ4n*&=_q6BUCS^Zm!SFdWseMVgQ*1%TPqCLdIQ^F!F)u-&r z2TpymRV7NW567K=@0VB2(5+~Xne9i9QTYL1cs6+{lg}XO{ z4U5TC^O#IUwRrz=(vT7sld0x0nTkr3;FxaT6K|wkZ^XmaSKGWerlMLL$B3k+#9Q`6 zEp1QKkw;Iis6+`qo453Vjc1Km+=xSunOspVK97jKro>R28G6{v@S{`Csi;H=&M_DK z32#mAV#JC@d~?z{71iRrg4kJuVJpr)w&HxU z(IXX=D8aSs%093$!H6A<->^O3$iFl%P+vd>fGZI-R7L<#zK zj~oFTD;QBR;!j4X7X3M#ZKs6AWGZ<~rm7Mp=%b#x32fYIzUcPmi~ePk6{@O5zZ7pk zq{KxJ&a5B;`9#ZVRh1|~Uwfz7c#HK?Bk)8$W`t_dzs7qRDKYNk>6I_d)8A;Z!Bv$g z!9Byve}|0$wl{dmh`oCZuBsOI2{>I!iGHt6tvqjgocY@isj5T??tNb06E=p~uIM8p zPBTKaxZlAkSxStzz1khN^E&V7^{XmTf_u0R2f{|n8T(frHDVhhREzsIoc5)}XP=L) z%(a=}w6R00Dp7)a(O<3t8$BK$Te;ncDL)xnRW0s6aR!+Zzq)@lAMG!`y-`&qN^nno z#DDQy2&+jrqwYZPQ8?7nPX)%E7Y)3!lvW=@MQG&67*-yj9T5GhmUS-5D zj8HAc1MsG7N<3+?j4LfN@#)#at13}~F^*RsgN-Qz#<%{{h=+_&EygwQ26jrAKa=G# znW{>ZV5}zCkeEzWwHTj)jg;uI#Q0fvhL}uMB}y>n6#D9l0pn*~ZNxmg4%K3u34N6k z4_S96p08msvcKhoYB64hd!7~zNd^)@5+G(xo)hs9iy5}&;?wZ711p9gIAQHc_aEr&UA ztVNyY8F8r*s>OIR=ERg(=A`NM7i<>2*5+N6D8U$cnA`iDFunf15#w#%RV~KFF}J70 zXAjQwJJ_(*KqX2r)*sfW?Tx_s;7dlR7UTO^qf%lUJDb?eR<%AOSE#B)37%PmwRRUf zo7milgKZsDEuK?gtxbtZljhf3ZFN4tR%ewc!Lyg(Gc0SwSR>9fLbZ6_0-qrzzOb{R zd+o~Sm|vn2C3uDue4m@`tOz^$;ivYjsus_U;QOS+2s^=g#k{5I=3l8q37&-oAMR)) z<{7bu5vs-WFZggN@uHpGy<}eB-p?=c(NISc`$B5sUpQu_q$Ad4L5);R+ z*ns!?SMy#~q6E(-gHQcKBla|6(2pOfs20y7;ZvuC#bo?s)M7Ffl_mTc15e( zK1wA@@XR~x)okb0Siy)FPCloiT0G~*UM(eFwiEXiZFje??FUt&1kdino^zSa)@v+d z#LJT>S5%AV_1JT!1l}CLYVh?7V=5|9g6|-Nz4LS<9y8*nwy#w!zITAVb4nbSzX9>4 z?ax)B1mC3yF@V#Jz>a=|5vs-aClCWjiBH$sxG~?N2FqBSK_yD?osbaASk~VCKosW> z7H3c`z6XL>MoN6&&OR`gOt5%~N|gBB@4~wuAtv*UX=C2q{QZHgs-`q<7Fc* zv$&5+l%VYpTk3BF&TF@Sf8Z?DqCLFzk`f2~;+O_JrmL>r&)+Y}vPvi7ZM-)q@B1#^ zXs&c_xCpVO4Q`y=*yWieN6r4?^yG~cju=D)Q)1&eN5V$-@}P6;Dp3Mu*a-2&LuVY~ zM0JZ>k~dN~@)6lgiL0JCu(9l8qucK|X_gbLO9?!i_v?NB!FsvE;RGWkUbug=#)2cxnX%?O8#Gj+1lC|&M>t~{d(Nibs;^szHi}xX2d$J? zWO>~2e7yIBTwkx77QIJ;Z2 z*NWb%ht}CXc_W1$Ahc3q{8v4l*k_js{(cGTQUX3I{vOUCiv;=#_0p4tR!S`M+bgep)7M+25+x9MfsN3++2Ygd4<9zX?c$veOZtlYHrUX9%d$V*cBbFK z7p+xAE!=-xM@sCv`mRp&>UC_=yWD^JmD@WkXjOQhp-8Y^?uwy>au>1YwEk{m)q{`s zcUUMPc?bzNY zHdLYn;t#M9-eEcOt&3)1v~RTiiOE|pj8Z@=C7>PtujcQtur4JKN8#U1w?N=JP%on` zEO!wbzj=Eru;c7Hv@Rt+?2Rwpu&=KE??tUAzV>|)Z)EK2^%FKGK?#gFkXgw@pQok##ok(Ja}5=lAHc9`c!+) zRZE|hl$hRoy4(1hJs-uV2(7|Xls-}GEc=9C$Ior{VU!bEjH+V3N{OSse5Uf~(Mz?j zZF8bZltA1U*AeF3pMLw6+ZbhYVo?jNf>uhb^i|I)`sys3+f~ANG6=-4VI!;`IZ-3Q zdKu-0R!Z0!-n+$2ccE9r);e)YT=rol@jIw_;tW%PzlN;a4rHH!7s@P_cDrFAfS~J=KHt} z_&zG(c?*am+VSnBN@wt|%=fAOcG|)j@O@N^r$F$pQo?+=>e-L}tpz@uN|eAk6>J1Q z(R?^>6@0j&7On$YDeC_u-_HqJR|2Pi z_4M>-1aXZ*s6Mo-VrIer&K2JpJGOXs_xYE~$=s%RGJ6 z;+ZFQ6Sah}-Gtk~ZbBtWpbXn#$8yk&L#n@DV#ArOwkzU!Ep|dYAI9G4FDu|nG2E*i z@QY)rtwX;z`byi~sYD5!V1o#6SsXQeQuU%f56Jt}C}arxVz77TbW8Je>tQWP=Drlv|f@vrEb(MXB6avPtv@8)lEur4Losu2As60ia5Y!$Rp zV*C8c7oNRhMYY%qh}@(EdiTexdG9)bR-s-cct1jHX{~wT4z9QUHp!bD&_WBLl@b=q z@ZPmphQF5q8`L7f5qfnWy!&C1iR!t}4;%HLLFXoKa&ROfl9m!O5=#VZfKWQiESBM; z#9|rAn;d+)5D`p?9qswJ{ngR!EvL+K0@s0hmEg<}VwRs>JIQU_x55L-n;d+)pp_C= zUKnPdt)|TP_cCCETDT6BIV&Bx5#Ig4oOsVC{}}bdev6VfIXH77LYorvZBDdtl&$>a z2PFbFKq#GM7R&JKu$j8bSrl5FwGnAf30V!Cz;zS}t{)*5E-Ow|>*eYLt(3qzi0iO* zu&NRzxITVyF5a2II+({YoWRwhUapCV{HMevotsr3{nMXj-2VOsRh1~gbv~R;96x1K z@2idNB(A6hd(cXW^Og;M$sR9^^7k^(SJWaweSp_cCxD)WUV3Og}iB-Q`4$gleG-tpf4YGrl*t zd*}W9y$sl(776ldUA2A`D*>>3Ea-t@JvswVl_2SIum( zomcX<2zOys^(gOoDsS@E$-;)JUin%Fs%Q_ow#vaaB9`cz^c;_ncZJxTjui9^T8q-uccOFGDQD z32Jd4jkg|BVy{nbZ~bAPbKA!EKC!A2B^VnB?`0qcfM?KR0Lj}Tj0Zp~C7^BpXa8#c z?gy@eT4*84jD7?gP!0bV2-VUuYqGa@KD`|gmTU*R4y}vv64c9BP3SAcWY*i|i#ChN zByWo_ii5sN3A-QlCExEc3im@LN-!D~?m1#hxDMQpq89H>O4uj?kztgmgz+oX%cxoy zyB5o+kDuGVr9LRd@zVPpw}Eo?6vn^c3?|O1!w}nfgbM zFV+5(&50^ef)U{`@2&|(?Yq86?L zS}7r?WL2JXL5nA5@Lp3wPU4)vNGuXOyMtHj9*QG>f8l_^TVFxLv;mJD_ zJU_?&GbQp9_YAvKl_-HSY{#9jUk7%=s-^EXq=fCZ8{BQHL=;`r}4>4d$l9(qgw1Y#FkRRqET)G(I}M^3A8rYuoj_F zRs64Iyq%q8ll?j@Gp(0Hmh_F{-=T+p;ae}3|G_25|6qA$y^Ne#mU}%VY9v&PHuCK9 zDM1_d->$lp(AJug{|l|MOdDPoGI&@<3@;Q`0lMzLVF}_mE{ccdNGzPx0jbO#+tWEwJcMY z6W{#~Dxp1+w#u@;dA)e8UcH_zMW zWz1b#Q$o}%3t!Z-Yq`jaQH)xozxynz zg!V{cBV*fPgbc(b8?jZY#b+D&giA!Xtx`gJ#9I|@aIQtGIBO$^bjb#Znrd+#?3U=ZRZ3`& zq^;t*ggeL;uG=e*tx_$n8Qu17x2;k_dn9cY*Kmw>dIQ~#lGrNM;`-X{NbI&%N@$Oy zt)jn!nSq{Dw@*}Tm1@x!>GrAZwpB`KkEE@lZ-_aAUS_wmPi&QH(QoT^hU>OfN@$OG zt3bdLM?T@%$fS$!R$_ic87q!`k<`n{H)WkrEzF4^uwIpjXhS7R*!NSNj^E#{u~n*t zxfV9^JiI9pZD?Ie*mq!^FhzTUp-vmBh4l(HOpb5L#xel>3w@;$C1^YOrmXLCREzd- z|4T%)p%Nw7hsif(-G*wh-}3ypR{682?G zCz7vhx((IBN{0K9=i5$+XhZ8#!oHd5MDmqQx1m~CIbkEu8=ex;hSsHosymL~NJ%csXg2w?HSPe=`T6Z{&UfWYGKuf4XmOiBHB=i z681%6r<06q-dCyxF9SBPI+uuOLnTVsmzAAP@F~(RHB6Cd~zB&HBDLWxoW}pfem;|B_i5Ti4vBtsEG~Lf_Fw6O$lT; zighVrd6)d}<-Sracu4H4rUWuI#k!QR+;0B&@;z5Acx14Fd)|~l#;I7B5|($#|6U%u zss*1BHZXRZ63Dw0>r%pUKKb8~g)_b9ss+CdHt-BKC6Gla)}@5yR`9=<=MvR|p9mY6 zOPUgyN|dnd5l)xqMAd>P3mfovn-a)35$jUIa$We}%X7PG!JBj&Hn%q=kT)aNrG#bk z@V}SWDAj`Z3L99Xni9zW5$jUIzVYpUFR!(#1^*g0u+}yuGL@%nqd|cRo z&(M^>ch6&8O4v8o{qNC}spVc@O|{@t!v=hxrUbs<9_v!VGCug?C#=7dU=pzYG$pr%^1 zhrL0GFvIS**Hoee`>?dfsi_wG4SSpt5pAeM366y%FMzMLs>S=C=c)F7D-&9m5**Vp zFF-+2xiZOD?_scJfG*IcN=9w>r#SiSIjsSZKxL4 zE5zHI6Dm=HYjDhp7;UH)*Ih*Dn-eNgg1$t|7!qx$7X1gDY&0iSq6B@SB%`O_bJe0B zgcFnIgi4g4ZdCzgn4LnTTu#t~=gXhXFa*TCFfBBBkID8X1wTydfe)na@GYgCDd zHdLYnV@`49j5bt@aVD&_B_i5Ti4u&RMGqj_P%XyG;4_qnXhS7RFa{VsnP@|`825wk zlM-*_zu>vGzFVt$eJJaDAn-E7>0(aQNT?Rd&{E$n%dWHE_f-(v9)Uqrm zY9v$(W!&?Wcq;!L%eMAA7L}k(LOr!C%ZVBZ)j}C#Hzf|VUp3->e0qJqs!IMp%H9O- zuj$(VKQfdcM1vqvNr_pCBqYf9Boef@s3E0onwp1PQ>r|shC{XH;Tq~1DwODt-lAGE z@O;0`qX=nA%wrK_P!vfJ;s5#Ub=LR!tn7ZWe{a2X9reF`H&+Rdols9L%SxhfLbZ6zu{{!-l)wMzkKcc& zgvU;(rH9?D-lJbi z-yC@QjMH{aCsd2aoTDOvSuNS%tfmqkJE5Lhng^2& zCsd2aoNFV2**V$Z?5q+VJE5Lhn$MFBCsd2a^cf-nFC*EYm!T3KJE0z6@=KBpCsd2a z^nD@$Zzysi>ILlWMHwgaY0c)Q+j)QUuT&*>r8RicDH3$|ybO3((|^{0edktm;C zddPK^DB;gER~^E$n}~EBXxE>1YDMCGdk2e!kAF~Ci4y*9+8(AVK^tt>-&$%#VmBL0 zimmXSJC!KmV~p*3suHxpc761rRwTyRm{`0xYj8s)O88jGoeEi2C1``~`Y1}RNR(qc zJ>-T;l<+a%cA-@X+F-jr8&E3}N7)=zeEPTVHdLa7&t0}}tx6;tXxC>oYDMA$xIL<#q`%|?}=4NBa<_G6CiMwFg@#_Q0wl<=6r z`l=+-l~=X=nj(STt>Mfv-Vc>{{K0lT_OW|j5``1qu17`GibNTCF;;7+LF#04L9IsU3@i__44cM4i5^031YS9L@B2i{0GLE|{@jL?w&vDq;UJ_}f zu4*M47=t5$*&tm9=MR;5zJ%?1uEyr5l1MX7Rf{&L6$#90$p+^^m3ZEVgy)=Wt}Tf) zb5^xzgIbZm?3`?HK39q7uSj_A%zTEDNFG2{i#Dhg3CZ=?8S^P(g?Cu_cFNzkWGgtzP2 zT53fC-fOZ!e^(_+Xl^-q$Rr9UXoKx));SV<$L$|~pD_;c#SHk;y)hT9B*){%>@j6j zhy5MeMJExoW_M~;2m{h@KvXg=5c76FE!u#cbXKZ0yHji2n9gGufHwab(Rm$eV_GHM+=aqD zA^f}TFY&L7IpOh=?i`84?d7vO_twERl_K6i(0vt?SN_NSs%W?Q@YYQHc`V6G9udc2^R(A4bpyt?SN_NGu&- zbJRA-`=~?-?g^m{TZ1f#!U@`-b=^4%FM%FM%FMjpe4l_@U$lzeA-o_dri0}gf{FQEQ$1fRJCY>T9Lq8+rVAS_|~dK_nL4|2yNI{QWEJs zuWHc-wIZ?MKW*LMXpC1X(Y+?z6G9s{CYD6ugtx0ZMle^u3@4QfRKUSD}X2G>;LzriEH-~7>r z`JyF}{KTpjZBQ!`<+olkdao++-x8AGZx?9;uj+d7A zBJpUsZg42RFs>3Me2lT*m{bYcV7or9P%9GG+n88Py?1a!B}({MX}^`J612f~eSD-= zBxIy+s6+`L^X)f3Rf0CyuFnS4iiFHK4V5V2bC>;gsY=iW+x1zET9J^Mv!N0td=9qX zU{wj)V7or=QY#YT0W?&ig!>ZqTe2!a8*JD82Wmy)X4^AS9Dw|;N|bP)$bJ)7C1``~ zx*tTXNQg()P>B-m+u3jXsswGYUH9jx6$$Yy8!A!4eN_7$WR;)|w(EW=wIab$+WspV zDpA6HZT+n!DI;iu68FOWn6rTqIIFa$2tIW~+fu?~2J5Smpii9$x8czQwIYGuP5E~C z)D4v=;jxe1^OB%Xoe1wMkBX=j2|Ojq2A?aHDB&@jJ-a25p2Vsa?NKWdc-oT<_|y%R zcsxkLV^MntOM*UiBD`IXQmGXQytO&*IeqGeN|f-J+QyQSpii9$+F-jLZBr`}k^^X{ zLbzLQ%PbA^FO!MJN zfO{~6+x2WMwITuUHQAuQs}dzNx19Fk zkSLs>4YsSlaEt`saRAAV&gVMsqUwC|+kf&v`J3t{p|gRG+Ni^whyxBSe|g^|bi6`! z=%jTcW(FNAc}8jONY|oTs9Bwe?!%TtxQ#;FQbNc4XhXG7dpp_KW_)%2NR0JWp=~Ll zy%24v7HWtm8)qJ9b7JQ_C^pjf09>L(*S)JFHELET8>c)~oj+isDDQcpTBxL*i0yk| z)w0Z4+8iZkRj5P>y-%j^Hgs%91?psD+mov^j-Hijp)PhJ`1I-ZMH?znLa#3oMR``4 zdSriD+cIf<64e_FM4?*WS;6vR?5i$`OeIR_+ZAo7R_y@Gi}4P&5Wvz~Tc|{dmbbQf z<@)^m_DRcy@=evZIh<9dT60%0g3oT(gi4f9A2rx0RBO_imObS=*d>vvLDwZ_ zLguDX1wLH|tIN9|Y1*F0Mu zzjTqJ+*qc1ljDmiJqaFSAOFlbEuvv@Ip{ zEinDkM*0E``-E#A{^Sq6=!>-1#P4i8E7kILZ~4QrRsxvaK|d=#B}(`i@`(vHnm)C! z@bXM}>>kdT?H3u*w4oz_k0tZlPzNjQ!~=R|oCcx>`D~@;bCFCH%dy zRWh}qTK-1;@3Eh!-6^!uBnYLaUE5N^-+x;rQyZ$~?|i?LmZn`cw9zD*Hnc4zd>pe? zGPR*vK6Z^-_lvY!h&Gx;(}uRCgg!k`PXFx6eNN@7uFss)4qB3Sr_hG2*8(W~4yu;V z)DQi2IRv59yY2z7EhXIhu=QHCp<3>7+;Z8cX%%WuaPr{~616Nn|Qf!Xy0_zobwt&mho~>5|A)qJ(FVT0Jt=^1KE;vKFG* zyDCva$73kVQ=(d){i2`PLNsltL)50;bsFG>09?4uXr2@ZA%Ha-I@bX zEw{%UKnnpw_718<3GYLWsNt*%)$)F0oY^IjsYD6Cv#lewO8kjo?Cc{(OH@>-mX9Tj zqq-zg)dluD+fu?uzGie+lzmmGmXCIf2V00{@2W%zAHkbZYSV^l`KZmfwuNZgP>B*c zR^k`AgNHU=`FL34tV^G0-|nsrX0y5*>(BY!Ww~F0coW3X1N+of%kMu&hDh9Y;gH5-3x+j5 zeSS<`B}$-zeY(C4zekVk4iGPb=ng`){E4C87KtZz+`RG4uZA`DJGrf{5+w%QZ8kPL zjW!ApD}h*dN?TpE{E6W_7>WP+{l<+IKO5HA`OJ&zDpBI5^#`Tvm@$Jkeg@+6Plq-7 zo^?@Owfu?Ud>)CJs}5}3i7U_VxwWnmB~ZaW*%-J!YcTcbJ+Sc_h&}#%YhAVcxuRbZ zi6hTiukq6!!y6yncwb#5N}#fRvT?ztv~l*C>opDs(R12;b=C6cmHt&E&KtW{-k5XL;<`$dSlVx+WaF{)?MrN5_*SEo#? zuZMfSRkuM6)$&-DtKpHjV8P|}0sk7-7`NBphDww`ZTfT_t365^e|-7!`U@WpYkX_( z!41{&h@PwZkvRRFN%bq<9M-t~04V5T?+VsiB4?d=i_ir0pzY@gg?+j_EmS=RhwjGJ0qksLkxPzO& zyLCe)N}z&$vhm<5-0w4YPXGE65U0Mgbwjm0qr)}&NZc@RM*G8yhc!01c4$K-N}x7< zva!Khw9##Y8SNb)wgRDAp7-HigGl_o=lJ%&WWyUzUO23w5+zWY_8rv#@edHH<@rnYRV4o3z!{@vqjxt!@2W%z)TU21?4Ez|jTxinl!R(|Mu+!2 z5?glkuXTS7J&UJAB}$-zeX?QC?#d7MuicK`?Es-#o&(~u8;Jup99KIRPx~{Yhcr~8 z#K%838}<%fb=%n58z4UZ_K=2Zc@Bv0U?eU%XHxB5ytRAdJy(em%huQ~U5AY&pIkJl zHWkF$AXLk9KpaaV@z8?HYmZ~}ITz!VN|ZR{d)p@)HYWb!<;!b*F~Thcp<13P;+PnT z{oAJ124WOlyT_o0N|gAY+s%fJ?Y&N)R(l^K^>~cCs^u9dj_r{cICoajs>oBC-2}kJlz*R_ld%P$f#Nx4>-JT>GOrkJmnc&=ci-44 z*)SjOh--V~pMc0tSX@^vUs0kD7m2xpSIIwy*S8z|M3pG9_iCe(4f93o8?TZ#K->sF zQMG)Pl)h*r4jsEzPVe<5c&{o^;^Kdq4fClt0dWwBf!ofjtCp{l(x;BZ@n@}2VtE7x^A~E&%8|R-QD%ulqluDF%{lD^b9Tuw%0nrzc*Yk*@RLj>=8LLI& zfoXI)5scJggk>vlt7*OWP|yKEL*zK(EMc(2O`g)TDnIg66?M;EazBq=UW5X zRieb?y zaR%<-Yq*085A0J@i4y)S+`AKPASaWTIhmSj`Tb|ME)vMe=q!hO4WC(uS;5H%1NgHSE^=eY6{3FKsIWlknni4yLkp1mDyjDj!vL-?W-w(Fa# zmiwjL2M~$Z9-dYE2m0#v5&d(ODB-^L_h)lY>VY8UfmjGawcNkv&VfiQDW=u_1yBFd z6$a%hQNm+}m;WDatb^F#B@l;n8PJ(US4|+G0uX$2IneK!egJ;52B52 z5EXp{;#VM4%i|rc(MRGb#Al%)N_Z@~_qw#v?Xj`7yFi??>yTWvJpSYkh)C@JK>zf#zwpl1xk{Aq znEHgbm%zqN5B9G;k9&R(2-Wg9n!9%*(SaPm?-0?Cxpru-5+yu0F#9Rm*l2?pqiz7P zCkWN@d;oXvL}CDP8P_8-@yUh5a+N6IIgVE!rH%8}89(YS5OYDOmghCND=ZT5XR$;srZ<@p)fh{SZ{mhQ2fOs*0oJm+M6^_z9Zx8DfjMO=q!d7g=V6^TcX zBb$oe{Wf}6B}#bi%2!v{R9+qQCB$m8#d3`ZPpE($PRHB6EmTgQNi>&jD zAg%$STAoklm>7w}&YD(#4x{KE7|z0e9gk<+WWCKk%Rah z=0Vl+bqdb4kvQt81@%#wosY)stP&-B?ZteC-XO+;I3I*+`Faa|hDdydwW9lR<#XYe zs6+{0Lo(mz4_GUD2gIqhi_A~F2h9*tMvElq>Bq!J~3EzEql?_*+KsFtrs(x;9Dax#rFCsR|2628W2vB4CqrB23Jvh?Qr zYO3Yys*DXHu>fnrZ{QC8714xBl<>7;i*Z)NnlK}lAAnFTUms?S6N&y5|3GN)=Y_RP4F*TJa;d>A)cAf^}Q4rroT&r5X?|`v$Bz|7*fOr$}xk{Aqy%d%M zI2QyX`r#l{%lA((2M~!x8x3tNK-OSYarunOoWtUoiX8 z(<_afjc+HYmOnAf2u5P)+!JXddwJj`b(JW=W7@EM;!mefPDH-*?O{iXKl#jTM&bs1 z|El++JIws^neB=2wv^zz$-i6f^}K0Er7K@^jX7aQioe0sio_k4AC`#T7c5BoOS~;5 zic`(TgKu#U%i;K9)@fJeBNyTe8>;0a5i`w^*kix}>B=`T|Gk=tb z2M_AmP%R%(nK6&VE%?UPmtRetaU;Ghq7o%I=F^7d^!NXCpLFH>bX%iQ)#A=7YDMCc zXLm`&*H8RrLnS_!kT_xHhRH_v8QlBv%pKzz?V~On@!c0UOoX@VGaOeiBJunKJ2l=q z;iBmqzPn{ZB}#A(=5^Q_%h-!{OlP%h)R0D1i}t7$iKVNi+D^qk@f~`V_}or{z65R9 zT2YnocHQHkRwUkgpnp26g&Ti2?Jx1Rl<0K_zYS39`1=9e2XIQCfsLI$ykO=N5APOs zq`2qAm9a={e|n$BZm0Jd`PM%BHdLYneLLE)wY!yitd-8{kxlmsJ5t;Oq*f%>{vv&s zv$)_#6VmGSk`(PHEMk4*bZye%c@&-3rLUc2j8 z3+pfU={sw?KOGWwq_{`U71v0t-+O+&?b%yr)$W`W_I@zlVY^(dM#c zaLz72OS>#wYlm;~n*+6u!=9=Vb1#`$-*=A{cR1vKj}3cRl!#}w@A^NkP5kk%XMKim zQ+Y(s)qBq%@T?wujNeN!ds%kso#)q9==0Ug=Z*t==;F@_P9=T*@t^XIRrL$6r z63o2NhV|~vJ*U+detX!ATlPCX^p(eLw4wcmy=r%#&o9IlTjH#$TDqdN_^OpU&5$$e7pwt zmmI%oxBAM1uN!gPdq&U(+x6(4J6$3%^v%24$KHSOjGy;BqoEQdn17%R+rx6>+gGOg zgWK+PM%eY@Sqf@Jf<0^h6=@HPx1|L0DE@coRtR1P+x2XVA9oUuzj;^scH61A4sA<` zKA-c8H{Mt0FT8Tp7O#9dV{2T8YI)X(eH97ZkJ`)ob|1<6p%Nwd7SM*>bKDPpC9UB7 zsA~DWi3FaK^j$SRS1PH#U9`bxEX%e(ZffnS|NG+(XX8CrEqzz`K6Vn>*QO;KzsCDf zeT#Tje2d~6wQ2A9={o)kqmO4fspVN!j#rUb<@2X&UmUyg%p7B)N|a#UmuF?;?!n6z zCL8x)Oss10tf&=<@n6{Y;SR&tt`g5DlVJXuHf;VViNXnQ*R$NziUj7UoR1^tD3vI& zWG}N}^C0G^yxS-DZO=JMwLDwTc`y=~Yx6Oe44lciRwYVsJ%QI@^ZBWL2J)bRF~=RO0ItB)Bd@8|IgkMDjAKTC_o}NWk|=Ht74P#MfI$T)68_$%gq? z@O|>1OkF&kzK?48DiHmvNWh27FZ}y0Bk04aL;gb$a_iauObi`PM|NUZwl zKB;%>@GO1Ji(0!3dpl+ddSHJJ>;qu&x%@+XdiGu zx{hmlFHJi+ye(f{On&<>&4zjUH{e@(-7m;T_J*ggTE6DVXrc%NqKRaK(S%Br;4y7m z#B%KP$@zb;_^nx^5Ec1)Eh8acA7<=y#C!Zkj>l?8?|Di-YRH;9T#x8ZB}#Awn}qGM zIBD8Z`IWsMp49_Uk=jsN&oIjd&pk1}d*|DBcm~m(YS9MUfJ=} ztk|v+em^X?w9$)p2RGkh_pp#wlUXP0vpq%N%Z!t2yFU9+D-xUs@jhgj2XmDu;q&8Xmwb-C;yhU9G7`b7WxGBnGV>pa zt2%bd|9#=!iEbuXOx*qq(MP7e3A zxS|w^PtNI+pZ%THN7i=VH&=-g?mya^7yU$>Q~^IR?BsBtky?@XWm!jY!H*`SeHpwC z*Wz{X*!^HzyDN#p3Dx2;wIcCCSr76_)Jjo_`_v@dpSN|+%~pM$n z`|w<;jKqPzm|2_m%N2K+cg?YB&jhc7Z7Jc=%l6;reuiHT`fREx*dip2Yu_en8laom)& z_k;J`wVd#ny8nyZm%-Ti?%!X_Tt*^X%j0P7dWb}~MR$#Q`D>TV*r(?ixk{Aq+<@)N zU=D!qAaVd(%uhVhimZ+dF~WE zxDIX0^CfK8b2Zji%*jld_}L8PWWsI{&*HGJB7yr+U+MjBBY8hmqJ(Fo?4C2X#OvVw zsA~DWi3FY!5*eQol~mtgUb#KH$Ys>WPfNLs@Sf}Y=y^CJrXkly8_4yAeHnaHc~*P} zTi)7q9eit5%d@8(uOhJ<>PP+c`ITqhi!o6pN_a-t#@#hmdnegA8e?Kri)TfxNbF{{ zu(K6WKT0K@Lnh&wZJR$zqHx07^~^T4B7r%o!M@@gr4l7P4{!4z^X(fSelK$wVV8wx z=s6EYg6js4G*T`jSBVn7(qQv>eT{)Ut71Op+Nu_>iCU3>2av9V9)L=C?1Zn2m|s#7 z$!Dl)k)T#2;QJ&S^nFy~Yb+#ueaHMO_&$xRF8*{neIM2GH68j_k>Gk%najvkqJ*zg znV-n@DBeNjGV-bxuY+2VkX5qW*SVB zAGIPOD~f3c1Z}t$uY%VuNy>(+ETpsjnDKXj@9~ zn6^o086uW+9gJ91%hzie6-5G3QNyDml_=qB!i>UF&4=qQAyienkmkyrEypL*mzcIHI31p*^4Q8WM zQYCn{W}~cRmfcZj7H7XPM;!hodXm|p{l;uN^xbrfdeZ6dm%YcQ-D{sYp;|nqRwPi_ zI^983wyvp!$4;OwcCvBM%N%`5g0-*{p;|nqRwPiNJJ~=@@0v<@>;x)zCma9s55|Kf z!J6KQP%R!)D-x(Qo@{)4R(nk)Jaz(g%9D-FKBT`}5``10#batk;>oi9;2szgRl;K@ zP)R-6_;}sLuu&3)6RO2yYDMChvX0^+)KOFkkDWk8_+;bwgFb-|Egn-V5^Jn) z^&tD99;8Zm>;x*?CmWB?{RlQnqHsdBcucKG^eTOZSKu?KgvU;-@#445#>9WYMoAP- zs8%|LRwSmsXZ1PjsL!bq9y{^J3wKF2>is{2jglywP%R!)D-xTOeqw+4i7Mf-6E}Y` zCfS(S>jT&*iNXoh;xV-%@%d7#mpTvru1a|9#I@Vil8rYudmlDRqHsdBcucKGEVJ5Y z*2Wx#6bbXJv@ISxas9)aNnhbQ0-;(wW?w}Dy<6jTuy<9$V<-B}8k}s{ zJ+Bg~#batk0#8Y@!KXweJa*!+hX*Aa_Uu*()#5R=B7vtp+2GTz5*|D8pTdNWtJF(}_)=M^QEU6Nz#batk0;5l|!O=%0Ja*!P4f`b;YPBu6@tAxi+Y`JmoWW#)hDxq3D zrdA~2Wh5K)GE~B2C$_$Bm1M(wpDLkRJf>D8;4LK^^p;e@V<(P1W94MSe7Gv1T0Ev! zB;fTW8}#~A!eb|Pc%oObVZLaUP%R!)D-vbxvx45MN_gzVHCLMrysL;Ns)TCsm|Br| zy<8bRqrqsRvn?Jw;kKvu;{M%|D4b9&9#bn4<&|gXtGY`3I!JgQey)>11HyOnhmx591_l_=q3%x;ge zdR<8rPS6J1_0fk~kr-!VV)o*!!3~ut;bUdLYrX~>B~ds*8*JA{QEEk^9H}#m?G2SE z;bVTky;p<{8QT*<8*JBS18PO$D4V0QPyhDahDwz1xoglzv{4d;6UheJ^;wNtkvOAV zS9=5d$W)?)&%yhgwi0ZVMBxN&uw9>>sTGM~<+|EZR6kXT67Ea%ewQ{%qHuyX*sgmS z)QUvu$5XjHZQnj(ST>r^t|j3ywT&euk>2yF7Hv=~5*U4w4URr4@%WsC=LT#{EQvH;Rkdh? zT9Lpgnrv{~Rf*>rNO+FJ#`cn6E+Y}%u4g}}6$#7+$p&Wwl_=r48k?g^BF!IFE!v}2 zBrvNb8=Tct;&~quo^!Ifwj|O#Sk z;AJEm^h;FYc{mcD12o^KB$(?!lt68#WaG9$RU4|M9$*X6 zw4o9uP{AqL7>%#2LRp?&)lv_zg=pGPi4v&u)rAe!QV+08g0%sgZ7G2|UtRQ-YN-d< zC6TE_3Dhx4Hd^j^rdsL&c1@^63Dhx4Hd>zDOtsVl?3z%C5~yR8Y_z%lnPF@D`$JLnTW1v(Q?#N45O^ zGpFAr!K%8c-`SQD{!BNk_V`&9s^!ly*Cx6oGLlwP{1Oe7@ouWS0c1%cs6#TT1vG%$k~LKK--kPPKgALNsltL}V zN_dW=Ikq=#sFvq7IJUPCO&cmv!gDpvIjU(xwLCw=IjV(d+E9rSo^xu>wM`qU<#{H~ zwJk)`hDwz1+*#9SXxdOM&zI3>Xd#+5RHB6E0Gqx~(}rq!-jBXd3(>Tp5+zVyF^w^; zKAdX#$~b+v7NTiGB}$-9WU|rfi>j8d@Y5GKQra2th6 zlyKWEu|c6)ZjU+S7NTiGB}#Z7w!}Ei>!|K}Xd#+5RN_6&EVn-kEwNgmT7LhTdyho9 zFFf1hzKNq4f8ZO}9qf0FMHn;i?{?39C%F@z^VY~B61SH*{kgXeuBk)`#tgJ!&u&Q+ zPS6IeYh)3L&C0cj7qH()B}y=6pbdKmOQLXsHfUWVi%9HNuCW}C-=C{Q3C0YxVPi>2 z6i(0vt!rcvi8sr&qOW29l}eOg%s?A9CYD6u1Z~i|Mi!AcuUzw*i+yA&QGzi8ZP?ge z61X2m&<3q*WD$v_18nVX8|;Tui4u$%Xv5~Hk|>;@4O-X8A`*L-YmnDrU!O{pV9Y=p zHrJL!;RJ2ax<(d}_-DD6dL{NJszeFK476cBLrD})&<3q*WD$uapWB-72<)>|i4u$% zXv2J;k|>;@4O-X8A`%_={UF}!?0W1MRf!Ue8EC_NxRNNGpbc8r$RZMJooZ|JH(}qa zN|a#CKpW5+xWjblOPzLeK`SYh)1#oK@N#&9hR8 z5{wx-`>IN~4UH@!f!?k0I@r4^QGzi8ZP-1p65dxDSwsR)NwUGGL?ucvW}ppwcB=$! z(7Hwzk-*cQZ18DUi4u$%Xv5yYDnT2xu8~C~@Yd$+E55ZVQGzi8ZP-{+C1``zHL{4r zhX1tv)JJ2yQi&3b8EC`C#414>w62jwB-SnWQ_sh^s}dy`Gth>O?Nx#{Xk88B}y=6pbeX&sswG&x<(d}z^s;Ra8^@^5{wyW!{*v5K^wHLkwqjhJ0}~QomHX) zV+PtVpP@?72CZvk5eax1$p*a)l_){4k2cKrsS>n7>+Ty;D-!UQk_~!GDp7(yDs7k# zS0!kJ?YbvRtw_M@EAPkPno5+QuT2}~i&hERV7u;-Q!5hiUXu-auPRZ3F#~PjT}Av+ zC1``~YGe_KatC%cuYF>>N|bQh7JvAjZ6|1h*8Qxg6^Zi7GkC~#l_=qTXz_>iRU&AE z?RxK0D-z`$%y7@^DpA6p1&crAo+pAf*seb%)QZH}A}~l<;TT*1SrhaDq12 zu0QS6io`F={nTyv2AxWj@ORVJ?nO=AEXq718V~njqmPFwM zZLnP*eW(?Q>upTTrrtZap%Nv0thBY%k|>;@4Yuo}D77L{j?@{(_J&H7@G;-kgk@|` z1Z}WgpAD!LiE_rtFh@01qJ+;~wze&ER3d1D?fR@ntw@wJXNI}9p%Nv04z@LVnQIe4 z8*JBSXKF>F^Z@8nH&mj8`x3U-KzxQo&<5LeFN0cS7mK}gytSV8$eIna4Q4)m{ zw83`WTcTDZN{=jq57$tM67JjCJ{j@h53B0up_7&fAl_=pcwT&euK`%TJ zw83^g+NM?{BnQw?i4vY0urX0`0EwUtw(D5~YDGeF84Z;v;W-W)+a;Hg2-;w~p8cR! zBrqGKzT#}45+yuWV{=qVr1_(&MSIkW1ZK5lgR`1SJnuuob51tbmPDEdt6H={tw>;Y zPBu6@tHkqHBs_O!K0`^Q`Mjz{8`O#fyo_XnUWQ6|?1bk4&G#vZmlL z7PG4EGanDjk3>CT)w<}fMzCgaBz}mBw@<7vJnx6P$SP3+Unxm8>}w=tjof@F>LRNa z>hYwrV(sNflr?hmPf@K|B}(AiJIRLCd_nag)-2AugHSEh6-qXaKn>|gpu$o9&98># z`=L&?N|eBNf07NWg`bfzJYhg#?e^Bvu#m|Q2JEPvVN|eCYh>{Ka zO7qV^eE#XMyf5l)s}|}!B^#{i9f_H!8-6FQJiq5wR*7l0r3Ajil>TmCwC;f#xvzoP z19ihy3-zj!4b~=)#F40Y`_mr7^N&!?TqR22ds4}U)mT6K%=PlaLG+wSjQ29N-50&3l3-!a24c3Z}L|G#@zYO*0 zRiXsG)|G5n&HIy3Blnem4$EIdJ$ltb9kgVFHSZ%adTNjS{qDo_IY%w7t3(NWbu8Ji z@3&99u1Ee5h~tl6TvsjBb4xbJ8>)pmfyu_U zC&RxA#NCg~sy&CkI&nmQ)|F|tr3Aj+m;P?`4So&+wc_(jK&Te#9VQz;d7uaD$_Vko zNz-Z{b{n3*-F*~P%YG-Og6Uf(UY}ngg6~F za<6!ESbqD-Td*EWQ=$aEvYP(R+ALXi^+l6vZ-RK@q%9h%g*uwa#*K&*A~AQfakV8F z89vO1u!2joEhX^f$nB1F;AzxG?)!Oh>@ znl)dV5+(3$%k+1v|1oz?|Jo7|r@pgwL$y%XG}+i@d{5SV5#k2a$bES6uzZ7ShqC5N zQ=$aEzM1~c3MHuZiyFBdAhrUbTBy&OY@B&uPu71C;`cqrkNQhCJb&`SVXWZNlqi93 zji$d_&6hWp?K^5Zh&Mo}7V69<8wVWPlQmz2U|fJ2x%r}vhqLBOQ=$aEtlIoL^|QhW z)k3}8rj1B+TXB5*-8ie4a8@c&0^fd3Hmt8+T()m}1H?Z-s21w}Hv1|P{|_~CXQOvF zLGP+W34G}`*|2;5#W!ZO&nXGjLjB@ogZDfVTXyuXcYkeIJ{M1kN|eC2c#{o#c2|D5 zfBkm!ZU+d}LLKI0gU@ax4%l#9{aifl&x{_zx=YQrl)%@5)8FkKyy~{G^*2C#{OutP z)j~b#WP|TuBrZXX+;{QT?v3|cB}(Ai$jOF{C7)b0sXi6N+8|U5b+MBTjwO+J2o-N1 z$LMn|#w(R5f$vBs8#X5X;^oWheKEo<1)*A~@11OLOpL_-ZPV%lF^aC;V-PFRG}}@F z-@{ISx3Rq!YUI9;k$ODFUDZOJ@??W!dn5+VomGDjckn381}aekU;j=vY>wLEky-VZ zL0knwwNP(8+29-%iQQ2ncOquBUYG|}q6EHAo^04$`=dFJ*GGdm4)dUDp>BM#!MQdP z*G^hc-x;&>K+Mi6Q379MPd3bFxElmLfOSBq)+0;J27QJ|3_^|Ehj8Uv!pl&J68K(x zvSGf@qo|R~xpr3&s&&%sw(EhuPb3bS+M_WW_q^xPi|Z;;0$;RGHq3`R0#$WC0g;`s zxUO2qEH)eT;UX~?HF7_O*S8z|M3pFk@AoGg=8M)*BewzKM)--Ug%5wF`$1nc5{IJV zExp&9;JvCu2~-$JHq58q1jIog25vjAu38f(+HM;9)R8D_tyj=^? z-4TcfRiXsyX(SsKbKVSMAc*5mZL6!+MkkvM#+;G(4JzKE#(Mq^qEwYAfvP0QhQ-b^ zLGb-p4TNg#v(7H|o+EaS#5h#EeH-!l*ASnpL zs7jM;z$1Zw^(AW8G6yjF{Q>Q&bbL*C&St`so&X_68=ov7nE-W@i!2= zqrS6h`E$$~(vet*H)<8UQ758KwMvxmchhQN_uOgo#+o3WYHO>hmcNgzg&m0vFfx1{ zBf~({+g6DZKE~MBrZ)$%Hi&0XZ(Fr|TwzV`Nc6KR;Q4S=0au99z60B$>BaZM#kxG!Pfp>G56GKc{nRLlK`TQ2*Q^>NZ$U_?87pAT>c z|L+&wSs$kGWCb~ew^JsS1tF09{TGN*2hUL8FBDY3+m(GksYynZ&tf$ zN|bQlj$i!Gvc4c{AT9=>TJFzHJ7~$6NFXOu-wB>2ax$!TlWedpCEQ1~`Ua!mi~bPA zgzfs~s^xy^wW}{-eVo*i5wATwtNssM`Ryb6vp!B!qJ;a}R;S@W5U78iF9e}l?q82P zaWU)Tq?U|WQcSD=3!eU^D-2?_o2Enwj~Q4=0;`vZ4PF9qNVh?`YI&Tn+V>Z;@=a>V zh}B=cy#5?woCSLgW?h-4LAV}@hcFjil~U75+ytqwQ3&S z9vfS~3&c6Q4#`!^Aq)~sFAlqlgbwbe1X>B0W>=W);P z0ijwRM^D{<5o_0^mW=2?4&Zl)=*L_;l$B_j5+yu0z)CAww$TPN+HU}{CkWN@e888r zMPni{0J)6ok(v19!eOjLlWedpB|OJrb!E<5XMFozAm)NlEzfI=*?tjg*QA!3m1sOC z!`d}Xi4vZxX(d$4^E0#&iRs8K-D5c!R-$RzP{MOg)>pq-XZ)xeLA;3TP%Y0hv9BWW z2y$do(YxP9@2W%z&z;#F+yFVUTS`K;JYU9p9*JEa=wG8x{SclKl_=pkKzkDJe6W9Q zE{F#}sFvsb`0PgF8=sA>-HxaI*0Doaqbl_k+fu@Fnf4C8{^;1+JP^m={ZK8>U-BJ{ z#J7-posYNn1ia@eQNnYwHZpwwyh*hgAPxedTAqjHSQ3dPuUuYRjL~NfMjw?Z;kjiS z;l?8C{33{JK&Y1IlQ||v;;^%()tEnml@FB*w! z#;%p`1@E;t{9TnO;cJuTQ|}4l5D>S5P%U4Nq)#0QPX`Ga!BdLbZH-m@!Ty`eUWK zAEKi55l5*+319QJSZxxBo*-619Hm;m&dpdY67OThy(gl(DToJEqJ*#ATg=H-{gpu+ zbxK=JwS2vvF=r&WbAYqKGQ_niQNs5iSnNCv#G@d-jks2|eBS|M=Sch9HCPpS29+q`dqOOi(Hna|o(6Fm@(ilw z`#_k>h{QLr_QA1a0`es)Q3BO<()?&SnKeMrZ~yhe0i#swh{J8~2el&cbr63CaU1eJ zDpA61TW)D>5OY9WyKq3eYPmh;mLh?wJ^3|##*Zwk_P`I^Htmz~ek*J6q`zApPAfuJ zwYbZMTD-Gt$q1<@oc6G|#P2_~P{An?-&)H273&Fq{2I=7SD=W zk?}cMOoVM%ZBMGHL zJ>iCG`KZmTd?ZkjEkE?}Cq~?eeSIoXg7XT`(AEu5^)2<)KHb)6RJGVw)QZF>&o=7` zH&o*D6$#X+OE$XC;5Xt~Pxz@@XU;$^xvnamHS>W4exIqlPr9TC_*4NGx5|>Iwf7d+1f-{sRg6E3{$jQ6*70;qAItL#;?i zJ>j&6#oJN>wGWexzaPN;45#!Nn2#JgbL0~b?-q8wxOc|Yw@7S%dY}A*|H@~+wa>l{ zl_){~k+yApQ0fVXT`%rsQY#W`f02F@mtF9q326_Dx1|LAVE*0KIZL8&f_=qy-CL$s zBu@G~{kAXLc>g2Q9u{v)3C0ZkyR8Y2`)XnBsk?UGVfdd83A*uO&Bp$wGX6>~N zhR!MX^^Tj8 z_Q|+J3DmVs#K&uJpA72>KREsGGf=-a?5yz&0(Xc+V(6Q9jXExCA8}gGGa4#Uf;kS_ zwtX_F!<&Bfi#ohvXN_kItJlci6+? zIa0oZEpKhQ4!*Uj<(XTKSCLrd^QY_E?y~*J9AlzNlwfX|*J0!C!OIpV8~0#LtZMO| zQ!5hVzp&qC9EP!7C7!b;!5lel*!)ovg%jScXVR$^3CvMx?>Xlvl_;@fFSB9uAnOUQ zw#nzyQBOGRNbwZ`&V!L)J>gX^9Y1p6#@}tILicM6F1` z14!3F4?rcp7D9q+FSKEPNl7H1p{hk2)QSXrpJao+k4k(^hs1@uawpHW9p+!b_i6MR zz2OM@KC0y_P4urK0UxgM{ij}>@%0nG*-(iRTnnQO^An|>aHFcl>!4O7R{eAzUPn=f zXX)!|)be#adasc<6u(N@<@qN@tbnilt3(N|H1e!$x5!`5{86%jdcuvW7SD=Wk!T-q zKv!Yfc{0mL2^{W!!aY2GRJZJ+g!^`k5G&$kz>OPQ2Xw71kDB-p(!%`(^ zgVz15s1*sVq^w%r3(O8hg1vjh;(eLRsHsE=@AcM-&{Zv-A+;i*Ri{1RflSXcjZ$PL-2_G3+D~(sR zcvjSkgjO?GiNB8|d{naf=l5YueD2(LXI|ZVY1l2|BWJ3yUXEi%3>ad+?c4G34_XMG z6$zzd<#!)NspX?Kv)Pf5*&tVm5rS8N@pBnl_IUH4I_6$$!R>?`nrofiCPLfZSm>u@bz2anwkw)Md(p;|nqRwTAq^_}E}FTCTBu=m3~a;|Mg z;ssQ-dhNmOM?Q%!8>vJIj{t1-_5PLTCmT=RIVtS@@W_H%kvRAlGf8BRUUO{P$-ygk zEnWwYJ>J>=``pRF7-z55mdrqm6ZU?1W`b+YkwDgQNrUWTMd8iqVv;r z%sRd;?EUZz3$-F~@gR%t9>&+KRHB4OeYUD!5``1ITDI$v99MzzWe0z9SIT9K$8~61 zo^xQko)xjaV)kQRpTQ%M{Rn$MJYT}TiUjUQt?zzY&E)-1i4vaov3t%e4zC~HgM7~;fsuh%o<;_hDB+n@(x@|9eu?c~j8`gA!t=W} ziekK~oxK0#kxyW}QZ3KDa!icGZm70-+^8#Nu81$OsYD6SQ`>CtwbkCKU3ctnb~p&* zu4;J>nP(CS+Tc^-Hh2eJi^ps?5}2bB!8uALJa)qK@HP)J-@e~EpE8$`2-orqJ?Ftl zaNS_f$5JjMSBVn7(qQv>eT{+Xtmb2`t!mL8wITrzAQAKcRKjB?d|kx+k}9EEJf>D8 z;QJ&S^nFy~Yb+#ueaHMO_&)h_=WH~BzK?48nhyP|NN_!>%w^;%QNq`$%unQc6z@6K zqw=a2uY+2V;JO+KtgGcJ@ii|JzTRegCb+J4bFU9aU|lT{-mb5)(cg^(av8}6av6D5 zi!|Gf#2#3K&Z7IQH+O~)VF-{^zAjVNGU%O?56p2MQAC;fE)8ixW?>i?DzIMyq8%l>& zTR*;g+Qyd>?i;Q6|i6TY@>5hT~Px8HR5Ok^g)z6@VKXOtQVS#hta zL$4mHBKmaYi!WTOR@LH_Q!5h4 z8YE)T%!z52g}3E<7)Wqu1pkgcMJ}UCc)PlPA`<2I`Z8ueRHB5RmE~mo3x7_~2Ce&9 zQ7aP2;-ubX7DpvYc#qgGk>(wl<=Y-RA>aJw58AzVTlYz9J?7T7Vda(IH8`(51!B>I zi|RfPwywS@@rN(37`6V<6Y?n+b_?rK?$y{DloD@0eMNnT<0j-g-TqwNV;SbyJwmdK zxf1_%a!36;#~{Ah@UHY5Mc%2_ys;8{{;hlCv>#5$SGeW%`#g)&nzK@3&XHMTIIeu) z%^!@?oJ`EBC^3JXy#6wX@eePmX=I{tLY67<@+()=#^TDq{k!LCI@ju)7>NZZbkz35 zmEW`RU1=vEue^1ZQ5!G)U|H>$gD2!K{i<&m8T1W~HvTxeTmEzO)%2%sgWucPmOn9k zxBYpucM!dsvv*g%`SqRjX;(TD=-u3VSIlk$O*fdgmTXC%Iw4G%`&Un5y1w^sY z6=_E!iPqUtiLbvkxAx|t6Y{r@`CU!tbDekNS#`VM?qp-NTN*X>WYpt`#B0~wP^wY zvTVYn5u?^SVnV*^;oU=Yr-ZhIr{v28qu5tZzLkYIO0OvrUrid(P8+MhhDtOlj&Uun zqusAVBX=dVCCn7~cRJ4Msb$?mB&-o@Byb(=ejS=;PpqI=DuCLn5IiwXSVy@BSIraVmQEzNc<$*S7qu%r{hGt@GZi{~7b(+ec2S>sk37pee`0JsTd!j?ydN)N9$b3N@9O?eaJ}E3D_fUW;<)qgPS4e}TN-tLCt7zT zDDeTtl9`wXH#=c|-Nz4llS;?y=(*mx={m01W`G*wEaGKYis`2y2r{lRS9h=%kb7FpPFy2wxzug ziJ!u6UjgIQp+7h?j&QQy+%rWsu3${8`IzXrm$Yw1h-hOR{B}Mimtefow)C1Jfibb> zW1`0c90%22BrqG)d^YeHhjWI~k-%(_`)uIx5@!I9ifoqAd+w1J<64cpl#Vv|4kA(1 z;QwXi-ff4zI~#{Pv%)TqSq+SY|DAgLX9VllQo`DBte4AU)^KYf3MW*{ZLlI+3(>Tp z5+(F(c}DzaXT?gfJQG&;V`W+%vkITKWa>_+7VG7bU`@RiqG>}VN@$NH8%=^WS=lb@ zGqMIOk6A}iTZ(6;TC7Z)2vkLEA(}Q+qJ;KHJgbGf?2y`Jg-uplr43fX)Ry8|sTOO% zl3-oS7Gk+)rG)lKJS)~2WxK3j%DSe0++w5fJE&T$wn~DPNn42Jo|O{XBk`Hv8ClC8F^A-0zyN?d(s?ZjbhCwb48))$)6= z;>Y~uL+8_D7M6EbN@$M+qVRWOpVL;_+25`~AFbGGqj^@U<C3_xd+yrCR=O_kMS!R${qlrG)lKI;+C3!^c{lm5$R%%RMV4v`6Auxo^lZ!@bPqjy}z^ zQZ4t}mOH{N_pFrA9^qN}8_VOBFqY3K$8$gKjW>$NH$n-$?4d<{)jMdCWEeU|;=_k+t7MQh8njIdawt>fHZ zb%%|TC^`w%nw+#EvAxxB%TC6Qc$L_(5f&k}bzHntPuM7lqLWaq$w?~`hnH*g&!BFG zO6=GOi`&{dUR>}s*eHpjlTfY6Nh=cEYrwnR?+N?8bCy|Kre%ahmTetpp0E;Zltj@< zsMh496^T8{dR`mh7b7aMVbnsOO~;B`hn_*750OyD-z{ystmiS%HIiFTS{2A zrLAMXDXYPT?7u1rYuB_UC#?$ch5b$#JI~7B30qrASPrPIWBZ*}rwt>DPQuzXt;tC% z64#dXybeO$C6y>)*{`;a1Lo02Nfe!gwQE|FlU5{7+`xYO`WAj!tP&+`C9bWb-?#d} zMoAQ%gtcp0lap2??mRBlmd*Z#s#Ypd!ZL+z9kl~#qa=z>!rC>h$w?~`e8=sd|1Q zre%b!tGUlm5(R!GR1&I%euHNb33wT)ujpl{#Ey-yH8%HsN+S6sty<=zL;~JYvO#Z2 zC3b9ttu(q1R}#s;YSl7dD-!Vfk_~!&DzRfDYz5PO(UM4hVyl+FvhfuaNC1V_+oUYjZQ+fCU;OP64#V{ zb!YSUgKeT^g!kcxAAJfNB~f$|sx`TTT9MePyyxSwms2I)(Pz4V5208PQu#t z`%kS%$g^Aiwy&+DQ)_ZM_Gfyx9X^5$d3H-ewI+8^D-ylS-`$;9{_c+Vqf^TWe`~*5 z`vcf0iK3HGt;rqKio}R=EE#*->gDh5+C<9;AAL^Q_&wMtiK3HGt;rqKio`d|F|j-9 zhO5NKD-u3d{_3X-VWT99PQu#t@sV1Q7*zfy?vdti;@UbowI-)yAMBXlfbZbJyz6zYZHRN0o$XP41vpBxJ5Ff3MaiT1NQnyl>CfU_<8G zl2EP59n^}%5v9-YLG#yVZK7p_dl{FV^D1nVMA1p8*5nRqMM8X^^4Di=qGg2pL}L$o z1vbR@DGAk@+(E5KoHfvVxM!Qc3~LiDBi!qof8)!rQ4&Qbp<0tWs1=E2$Cxkr_!*~{ zzYJ>=EhF4VJv*n3k|;U})tcNvtw?0{ufSkk?p13OEhF3uHyf>lYU#1Hlx1fhlVXG7 z30#M^s+F*IJ({3aB+$DxoLTB!l_=q{kKOZD!uraiB5FkfPf4-?pE~`b zieqA@mfcN{;q2LMB~;6vu}D0zG{v07)6HKy*|Y2MAlvmQ)!xBY!rJvHm0FRINVxp9 zlf8pVc(iR}Nh@LRphw%(iUdZVoc9Ahb@|&R8%vb%+<=XVt%Qvwo;9FWBru958yrPd zqJ(EZY;12OY)tg*2el%B*&x~AY@iY)Jd0yI-~23S{^qAG&16b= z&dKK5R>I~e&qh%z5>Hr;tk}E!Jr1AUPA!{7J*#FuLo1)AnSMFQSZvO#Z2B}#ZM(|ovA!h9dka#AZ2@cNPsdVMNU!n3vJ zi?$Nx!+ExrT9GJo$OXMul_+7mNW`a3XVpoVFKSxm`$pnlKisdKqxH~R_$!;OXIswu zJ>*_^%)Iz}V{V+kMf&@y1NirnC^`w%nlYPNk@({_15*y5-*Z3DRbt0Rd~y3G$;NBP z(MCxWorG%5m`$xneDaI4lZ}PDo|LP^j*a;JO&cZ~SA9+!B~f$|sx@OawIcD`)21gI zL%%vUSBV`P(evI7l8tGzxbL7OicUhcX3VBmB;Nh$+sVd)bq>u{V#h{g*??r@lRt1D zNJ$i(glf&0ZCXlv@0e8@yyx5hX+PX^mGIbzHK(kTY&c1Y)Yc&Bvfn0Y-&Z~;~vA44gCLHC3bAY&|SZtZ1ldK zI~7Wz=pLbYbhrdA|g-Swno zW9zfeuBpV1jd_4#0s(8vrI{=+0JqDmI)b zRLf&ETZ5_)%RMV4v`6Au>3W2&Y_z-|&9hQ1U2kb2mU~u8XpdCRDt#f+a{9W4&|;%` zR;s1zA1%ak&q@jHk$6^`WA_}Ht!G#6NAs*y%X4J^`O~h1u7$+2^6V+cD9?-9j)bnx zO0_gw|DW}h651o_tnhzqg!9Zcy?f15x7cX*m1;TtpO2zSXph9R@^fdj<-K6hRu_Gh zsg}2Eu~UU;Ub$YO5`Kmj71?=NU-^AzgzZm^#mf~N-dCAw`8}}cw?ZuUtd!6mv2*e} z=r|;#-n1Rc~+|B&#UDxD#UWnN(t?ecve1sFw5m5k>#u^HkxOpTK;ZZ2C70V z|E$s%zb$K|Jrd8#$6B71kJ^?Itl03gYL4xlSx3v_>C+Q!sKiH3(%K`D@Og=M(C5K_ zd*y{{`JA!b-i4u7Peuvtk$6@?Ak`5T6*qXpOs3KSb6_TlD*5e;{Gf9XV;e!9{*iFoUlEm{efvHPKCqC}TxRs4UAojr>tMHGb#vrWasz(5v+H8gidObj(M zQ6oR_17yKO#I^r`xgcr_f{~bMgF0&mCcBsl{soiWKw;6ud(U~R>gle2(Sc!#>GRaR zU3Kf$tzvkG^TBr-Mz?uI)oTxbDzBdX{Ih2UqIy;Mgal_c#PJgTo^CS|-OlsiHYyX? z6I8KXWBU46mx{uBKir6)j_{0Ex0f~B9`$9-xhdrwK^5nzly5(LA#F@IK7QnqD$mEV z<_sPZr4zHPli+NY^2I~`Y^s;v`sacw&-1co^c@nV6SF)av5k`FulfJ{{Q1Y73#vT- z%bF|okSLv)SIe6D>X0a%nB@rxX1n;m;Sc<|QIG%n z(T`MlbuDY=!9${SVwNW)xHe1q{xQGDcjduPR5?p5Yi8|3qI6=GCnT6-N_p~~_cc~G zzkL0QDrfCw&E4RTD4m$)2?^%>_@u@AH)Z3g7r(xu%6WcSb2m67N+)J{LV~+Q{O!m3 zhHU(N>-Q_FoP(D&cgsVfbYhk#B$&bDvqk>)r`Fw%|5r^FbK)UUIx))=63nhjqPKe{ z-RH!bIrI0BX%yT+@3@0gDMFGCu3@;jeU)uLTJ zRU)*f>UFf?6%}5IaFyY{tDSB=-b1QW&9Ef~8>bc3Jc)Tyd#u~}B<4n{%!$?8_8wDx zd#YljMP#NPh(oVP#99%Zdr64(Rp+F?c|@O7r`=iYtOhKvUa_x;e5rgM=sj%-sv_D& zw-rZlF=jHQ}}5Dr2xQRj;V>dUVY&5Qkooi2ZRpgN@)QI`X~a z3^tlqRC#rIzZ!^UgHK3sL_*xYbh>N)+V@svwBL7mj@PIhWU_z4NOeOf9dPIqZz%Cj0( z%m#;qPIr7l!fl`WbV`+tJabYtW}h9QQ!4u+;kHk&I&I5Fo}H;0^W%=tX`6kKaNDQw z_{*AWZ_7sB0Z=t&?H!?81NKG2ZQmy7_Cq%EE`zFZ$Jr6O{a{}t-1e=AZgFHI?_{VN t_rx8cTO9U9!foH~=x#|i@-~X9no&=;QIQjOMCn9*;(hmYmnFjOe*t`u``7>g diff --git a/act/assets/vx300s_8_gripper_prop.stl b/act/assets/vx300s_8_gripper_prop.stl deleted file mode 100644 index 36099b4221155b491a2ad61fb799c7fd7d443b77..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31684 zcmb`Qci0_OwZ<1wZ!i!DV3Zb`G(n<3EHXsrDp4{(+D58{t&! z2|4?nkC;JEX(Wl4T>Yg+&3)2ocwwg1lk zqh}1WGkxGgNv$2Mntts`raErYcVnXTAyeMor{#SNd3C@38Bbj@-k&1 zt@zHEaMPN04Q^QU{kR>jYzM1mEm_xy-yM8VOsE%}T|SFUyt^~*w{2%`Fkn^c5L#b7 z7fG&n;=sb@@;+Y{8P(o*+#1$CUpB&(K1Q@o9Pp}S*>C&by>_gXXFm7){zLEkaG(GC zuk>B2N5l^;=QGM@dac)I*_l4*w=+SwvgNG0edLb@{BdtTbY6c%s2liT3loFaf6;v0 zdH2O(t%57aS8eqFPPUfswRCKa;Oji~nzzRIS{-opsQ8+f?GRQyKj^-OCuU9BE+)!$ z1U&;2{QRrferP#g+eO=<|JZtdE8}|s^734`n{*$)IagVx#Irg_fzH#_*#v< z_v!eFa^<;V)jqE-Fyhj2PsW70>2j`!7$lDxeb=x2pZK9xY`GwLMy*M{;-QCPAFiww ztDZROB_oo>3t~du#I<7L`%_;x`j-0!^iTXyE4J)C|2?%P`Dv{=u@6_)idAQv_rA6# z4twOLm{2!yt(f39D(X}c+_z7;=AGTWk8LJQ4Ep(iaI8B1+|}*bPTD8*CWLz*Cb*X{ zAFgaUs}8#DIP)>;nZy6h=bVY&y$|2jub#cI&$s-R`~TXg|VzWVS= z2tF1QI}eN>@cwT7_RIPwez=0){K9*E-g5q^JNw@J`unuUO!CE}ZtlBwt4#*%?{65S z_ksRgc?G$5zF>n(2E1z4=Lhd@LsT{iqePAD$?Fryl?M{dSkEckp&?eg{Kdwu4pQ8Mkxh&(*D4Y#tMe z!3Q6U2^|gEwb7BK9>vQ`f>qD`Zie}|>+JR05#b?ymx^Nx6Z$qVA3Czsws~bqujnM&V-&7OqtUCnoUHokwrpXpz|^H2R# zf3B<*tL9z(VxL!KUtieQn_vqQy`K+Oy|eT+^Kr%g?f8lE6|-}|$6|t?O%>Y@#DXtg zq-2u!zxi1Ie4B04_U!o)gn9cVr_ z9dTvN2dnt}L`3OB_?_|g(7}4VBp)^Yy7+E)<=JJ`^Vwq3o?s~+6x zk@%@C+u>NX^^mQNn6=~LnCST6nlN$BT6-A1+k+3)YLy&)iCO-<_CWvgdoHu`jqlD| z$rHt@=}WFK;$MsBu7u$Giix4W`mxam|L8X>sTI@QKJh}zNO>o63z>Lf#d${0 z|F8RN=Zeqb{4K9C)gI$+?|nTSs}9^B*Wmc!vwIUkFUrJ_O>q5BTQaLwE4JKm!=+ZM zrE`DL+lOP-`7d5%#HO=v>`er`IeQW3B)fpVIozj|iE zWb<+JHQTjV758v8LMj3lCQwdu-=m6LcKS5)vGCpR%x6{H2iFLx2w0dvIn8}PFLK5s z=0mnPVE7nT#czh*1T0LToaT5tPo6o&w?m|VXoFSp`@A;+3lk`(xgS61tnvD@%*UN` zPH(U(9&z+0U||B~H20$#k6(+-iI4`+y|O$VImwe6ok|XR-v5c7-ej>^#Rt7758t{WK}#a z=}o}G1j=dd$3Y^?zVj*banblsHdz&qoO%`^Mu|6EKH!B z+FV3r{eKOZk8H$AO;*LTm)-;{OrV_Rew9Mxl({p_NAq73o2=qF)v5>_iwTrdTNM#` z^v)g4$Al4+o2=qd*s2H|iwTs|Vs>}%pbK<7D00*IX-!teqrTn*EKH!B=6;kZve9ZA z&Q}-i7mKDfSrv~uevi1+xZFwIfzBZ-Fs(7?qBcvi=VFKke$2{tR z)uxz_jfb7pWK}%#su5BVurPsgn){ia$if3|GaoPi^OPp5;<;gskcxnX36#?ut5IM4 z@N?!P-|^TctK#*U8X*+{3lk`(xnDIBIpOCY%>4D3{n)`xR>kW~H9{%^7A8qZw9pFXuE^74PZP2&o8Im_RwT z9e$CE#!WIG%hyU8tm0j{u7p$sj>QDZY3}1dM2>vpTi%CU^`Wmd*usR)gxexEckb&D zt|AgU7E~5L(H9bXJadlucxvs@4Yn}BF*+i>2v(t-=74yH10s;GZ#;_b{zlJC+4#%j#Q`- zQW3B)fpVJrXb6#Qp75)3;!gY4XIrd_<0oo_R0J$cpq%E2thxM;XPA#~PrS6nsyG&> zMo2}#!UW1`?qh#MUOVGN??Zk%>fRP2g!HYZaYP&~#IZ)b309$;=00vsv zV#|zGail_xkcxnX36yo-uF()8gPz*Sd>p>b=!{iy{6vkAihzX)l+zq>8O?L>t{vRJ zZ^o)PlA}gQMZm%Y%KGlo$Ptm#&mLm!*#5wSGgig1CN)AT0v0AvPA#5A;~qDA!NxvU z?{G}UsyG6sMo2}#!UW1`j>wv8m)q{Mn>FX;j8z=H)0L2lz_FM>In90SkM<=`o^S2= z+u}1bR>jdmH9{%^7A8THDah)B)RRp3jIZ87mK7FH|D;f6WnHgJ{K-@`c(WW8=;VJ@Ar5v3S692do z5eNVCz{wd~n80p-TEwsvgsTX|x^hfFNL(}9ul9=!-v5}4ElgmyKP@5$3c^(cB4{`! zAS9;jV$Yx~I_KbwElglHQ&({;vZf$hMIdg0_dG*l*huS3syP2u&*NYT{ zs|bw{Q186w84^F3VSS?9an|UJEleOjKv%*v4n(9NTty&SfTIXP;;!|qZ#3JtUEWa21i5 zcaC8SiC5P?)xSUFqvK|^c*MdZ86K~Q6ojjY#Kt&0k_ibNAve7b9aFb-9Nk}x&j(OO z6tj+Z_3sluLe~29B3Om8jf0iYF}3&Mbc8&gRXX;y`7X*&Q1j;rJ)?P%% z)Yc9iA@9yA9s8OOAKkPn0>@$kWg7?U(REC1K6HdUhE+QDH6K-MRBr+nCQ!CySM90+TLr2IBR_T3ZK73Trst6p536yP2tv>K` zng5gTYTj4n=f?C-ZxSO|#dl>yC}xC@plkbk*U#egI$PP#wh*(eyEWw(BOQWOdNqxx zVz#>w!pCAluZPi?6@*{1XjMLBo&RVrJ~Dtuj$=Kkvno>QBL4uL4v zH21ODh}ll_D$2VY0gw;JD#VDV6iH5mhy3+$+;Sdq;5(Ky?|jG7Ay~!Zhu#ERn7}tQ zseMDEhp*06@vRK%%k7AWvR1-ZO7C#HYJQ%{?K2-G;aG)!7qiBmL~xfd5%#o-vXufEs%=s2ZF~}i0e!#UbGx1D&Ptqj#Y>@)seTxZ1ZRk9?Dv=g$W*` zh6G!f;QQE!POVslsB)c6c4E)FJcGi=VuGLluvXkB@}0qFJ**Y0@O_N#>F8H<5usRo z1)1QxC-6~xC4_5>*!(p2Uj{`!TVi0n`E>RW(S-B(QX?nPOJZx3|l-T9`0dg@Ng z`B+SFpKmSi5Qs2M?Rzd>#}C)ry}WEWVmVWb|3~bx&QkQd0DcP0M@cwVA#z!FvpTWP z81tm64jPr`o_*EtpwJ;ztV z?+IS5;~pcd6{~pdttSz@qnHR+)#&UBTXj1oFcI#nu@8Qpb*yD~eieIOuE`MLScS;-H1`ql++(QTerP#cnBd+O z@}lLARfxdXw^}Doe^os-6a1DaDpmA8{AS@jT7DCSwPF?Txb`H9ueR`3V}h?u=z~?f z;~Ei);q0=7iRhYl2!6kE-NRjdFE4$tiX(F)qT_=tOoY)1?1NPtvl|f|A8cVFjK)=Y zhu|1Nj^*PcL0;a8tm4SLi0JrW6-UiQge!fpieuv3S!R_E^voI=*YE^ud!ZBWm#o{BG4_BTmj^*N* ztBB}YD<;DDEAvu3wS13qWH+CYpjN^vj=GMBPOaF&L>Ph1wc-|XY-Jb&=^d2kid)E$ zV0>jGaa|GN-PlZ+ElluzY(6>!w~%AO`HUFhN*}D^$nJ>f_+SeY{M-aS9G^vw z_~iD51gkiHGa@>*Vha=etk_z3(<8bT-rx}c%C;`v$M5-W1gjDs6Hp_pC1_5@YnJhf zC%y}T)Yr;i6IP)N6@PCcGUeD4tsR?8ySKshMH#i?uj@L5)CpGc=tJb$Espd)Wa*q& z8?1`I48%C78vzRwJVwz7y6ImsANTGwtjVhQdqs?cx)HE25srOk|9VgJ@$%rUnyljQ zc2`C4Y#U$U^PE~_*5f;xkDa&NwaLegzZ1q>y&C}w6Fl=4nY8I9=41HS{hF+bzhuVj zz8e7x6XA^hw1L&l$E(L5(qt7@rgVQ;*T_~yVC{_wUe^-Y=bnSS4|m)b)@kxw24&=N z;#H$=1gjETjS7jSFC1Y$ZkhR7gDoh72v?(Gf>p>T6B0kX0MFnR`8^G`pbUanKSg4K zRmgG^5*yB(XzjRf!9@+WpbUc7NJV0TRmh$c5)V9VyM1ol(YrR-f-;D3?KLJ?g=|eB zG50gr?OXo2E61<}We~igr*TUW!K%bo2SZ}rzhJj-_nH5g#}<@9@QR*DLAZ)wm3|?u zxkW-^`1Sa@=B-q`! zRmj{D679RbY0vp9pSZcj7L-Bou9HYXxQbvEvVeue-lN8wkEw6}sl^tQLGb#6NI|%Y zU{zv!ULkS!#)q1Z7p@zeu?1xiyka3z5UwIvmDuV>Nc`Z6{mjQB_m9Zff-(qRvCt?B z5UwIvmDqYoNGyG9wE6hqHalf(K^X+kclE6d!c_#T5?e0`iAx7>X+E;=@0GCyWe_~y z6)6Z;5v)QEvXI#Fn6=Euvewv)EhvNF`L4zef^ZeVDr8s)i9dYmV+}tpdG_qd#rp$q z1AdqAzM)7jf>kId#jfSRis{~m``L;!o2=qp#IA%?1dhc7%E`n6J;zjR?k_o55JRRrb(P$i}ciO;3y zn2)Ld9MfV86Pq8tr`{K7{_^oa9pU8+^a(3l?SF+ zJI2rWj|N+qxN7gA=3~ZTC#a8ta20{$LY3rR6%uFs_AK+U?6apf*uuo3c?;(G>oIir zMDN(( zT$V3su!V`am#?K7{Vx6XM12&5s|XwyD$OUPs*qUY8;6>Yb+#YeWD66AE}URK4n69- z>Z2fBMc}wlCAn9H#EnnyXFhH}Wki!LOx(ELEc3D8?wRVNAY4V@xKJgzSB1nILw7YF zvzPDKWD65#p1aI^ymi~f>Z2fBMc}wlCAn9H#Jk&XX+91)X|E<*nE2FHA0F`6~^ZZo*IGno3Tx{FrjZ}^P#zr)JH+MiokK9viEsAB=qo3|I?pq z@`?xdkKyWvm*T&Y;G7&>rWFhvSy(9)hVf(^Ok` zQmm|B(bWj9JNLo79#ps}YQ-wL&=2?)>KFEHU3<5(ehJ^!6}_ha zd;M}UY=`6X!_P`|u3UL`S;cpLPr~Pc>?_YZ6S}8of3F#7QLBy*R`H$RlMuErp?gK< zL%+MLwOrS!2YjtGvq@W5pslPKNLF&L_`LJGgs+nnKir9|;x|K2BDm&E@HsRWuDl+s z;x|J?D2BV8EllupQ#7L>_&LzM!ajfA^sCP{KQ~6W(g&+_=g$cJhBGDb7MaAf4G9W5^u-V90_gRwX(A2 zRf~PNvR16py{XKfQO!&c6H*fVr0UL(`Ous`G12kCD&0LXLh~HPMCl`FITN~*VzlPo z3HuV^zE<}sOr^OWV;`<;2di{H$_UN%5ff6LUA`Vn=-!Lbn#H45E4JuWkATI|D> zwPKa-u^6FQHDW@#)rtwe7gVhBlF)rF8*OXGgSPH-**&J23EH}Qruh{tLqhHOV3qF5 z*Z=(`f1B~9DWi(nP^b|RWr&HM0q z)tanAKbY9~p)29@suhIgRckX5_M)0s&3tHnN>=T$2R%d6IAf}b?Yt7bklOIm|f zc)AjM2D=hIuUbJ^UbQw8{A^lYHS?ia(i*J7GpIS@^{MSj_`GTbf!=@#eu70bubTPL zENKl^;R)7NReiR*5iOnGemY*5c!0RAYIsgZXENkczOpYHcQXOyutm|8q_MKVI47 z6$H$qYHI+V5X?X9Usow+@rlocQ_MfGV&QfEXFddT0L!7@#@rzB8DWaq1lCa?FA44$ zxF^EMAPHyNRx8Xua4hzL89IaXo5llWXXD`^KGiwQm>){e5}{@(@Y^Iv-UIWpG} zb87n+V=w}6<@I0{uXsg7#|K-Oz+BXFz|?%OidXI;qV!R$GvHWE@Y=1z&vtQhn@y?t;W#l2lbROia(&gcP{;Qlx~S6E>|AB7bJ%XQ!TIrq8l z`}}?7*D0|ckoc@+y$Ru-fe9Wh2etCK?#+ikF1NNPA#7oSN6zNMm3Jbm_zv$$2tUuf zHxcy=9UrXX9g~RAs^N{p7AE*92;0Fb-ua1$(nqn@hqo6K{4|@7l5qS)@l(X-Fsv1; zFe}!mr_KsXw|5jDiwS-ru-_FPQ}Y-;YPl<)4_5KaA|lFm1XqxW@Lj99aWh4HZrqH=gWLy)*Sr_O zDwHh)uC`pW>3Sbdv*~86itno$Ar%1&6DV5-T=k*Zbj^om)6G~F-}yB{DgqWJP__)X z>O-^Xnh(vUo3Sc>9&3bD1T0LTY#DIXhi20?ADT@!V^#d#tr0%|Zb87p1j_oAxxU*i zn{LZr56z~Vv5I?~u7uCOTM#%F6DZr)5$Z#;>6#DCrkk-U?%`^LR0J$cpo}$Z%cg5S zG@EY5Dt;cj5>gR37859223&22X45qvnoT!j6+hcu38@GiiwRzt6wz$Dx)Li+v*~86 ziu)4FnX9!b2%mqqAYfqv<#K&jv+0iUJ~TIO#;Ults}Vk%Zb87p1j?2HS8Ju&bQ|7> z=Eluf75Bk4!e`Sh2w0dv*)rg&56z})J~TIO#wvbOcO`r_-Gacem_S+g*lZ11v+0@- z&5fI}iu>BGgwLj15I7bSD3^P}noZYyXl~q$Rq+V0M)+*H1px~aC|d?xJy)7dx9P8k z=Eluf6^~?VgwLj15U?eHx&;9X6TJUv*>uf^=Eluf z#WU=#gj9rO(`_?>GWJz1o38oL+_)L5;u(F7@Y!?=0v0AvwhXv>t~8sj`Ow_B8LQ$o zhZ^Ct=@tYmOrUHTaMg!q(={KO8@I(OUUTS5_-wibfnza&vSq+kADT_qd}wam7OQy8 zp)29D=@tZz#RSTlB!R`EQcE8(;076gvP1j=PZ$_+ar3)Ij5cu0#a VOkg%qW)~|6R}onK;5kP~{2$;m(f0rV diff --git a/act/assets/vx300s_9_gripper_bar.stl b/act/assets/vx300s_9_gripper_bar.stl deleted file mode 100644 index eba3caa21990c28559553fe0003ae379514e7215..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 379484 zcmb@vdAyF*_s6{@Boc~9nxsiean5nXxzACdG^m8;Zz{)3QX#{ICR9QSjY_jn=E}Lx zeMM0eissUwnM$M-p7q)LUYF0lu5*6R^T%^uufEpzUhBQr+T%6t!I=Ml|7JDR^n$cV zfiX8^)|`KPcutLKZAa{#8bYpUo-ESejcJjwgjhO)tyFeaE~SV-@+r%Tj|)b$u|AM4 zFy^EiPi(n5(kH*8A>S##VjSI-+}@I8iiXUdaXNYf^_4E+}Z|9@%gvZWgt$zIdO~no zkhOGuqP`GU39%)$pnx@T0(7Ur2NW68SBT4ncu9zlp6C=R!9)s5lLo$nSOr;87i?$NQb8a4IZ%(Ht>v*mXCH}7Xo8D?Vp@ia&yTw zsqO^}3fStnimNlb4{a6>`*BQ>Pux6hUdd$fF;;xAhKYISt;zg*QM0hgyJN)1r;pAn z`Ad8>mz0m-Y%Td!RTh+%Tnx+k!D)Np9wOys@SEBs(;aahevk*HGHvWmAW&J(vDV}25%j`$cI%Q=WP z^K--9A3Wi;N@IQ#;t(P35Q5WU;;{YKW~S}R4c|UwrbP9P)OlkeawH#Ix6M<1!sG9| zF>HU-U9naYijUqxRF<-0t9iS7hZXW}3_sj)M~wJXh=D@9Cw0#4iirGU0F1!!rz&&Qu}an^Hs4L zM33qsKDJ5D*=lwEKbfMA?ZaA^$BDb82A2u(sFW_(ITKw6@5;;>-#+Yf%__;qn8)Xp z%$NSLNeFJEjjQwyf05Q+;oixy{&B4296m5Iu+^5w^g%D~;JE^)aIK zn$%tS3ktXtx2)56IG0gRUM>T4l>m0rtZ_#f@oW`?%U3opFz zsn}R@j@0=RQXe-->9W;DFRslTI6W^sGI%;hd@lWCfcRJ~t%S!CCh9d^m$`mxUij5B zvm~mM#78|LS_!dTYN_oR#o=LJOf5R$#p3XRcE#aoFHViM-5ufsImfumR^RU^4xj5* z95x*gCmLmY?K47f+hv0LjWHdi*M1^C+DOi;zx{ru=;dZ%`ou9sldoNyshn;Wp7cw+ zpLcjEnBU*xKB7IuQ)s-FHY1E9|wz% zfw5l8#Jj!vgv*a94!?NhK4~TQ2ywI!&q+S`TD0ore>2y;**=_dR{W}lduoi-i==ef z>Z5P|%cQrr57!+KC)x^u`}X)5LuE9O^Jn85E2yvkh zY}NVC^_hP6=Y+GGJY3`xz2zD;Ponxm^1&J=THg0wX6u+{;rn_;F~N6g#{%bkm z{iShY*^~21u9woym+^}8!Ng}jtj#Q2l@l(xc!t#AO%hdWiK>N^E?*}Q;lu4(hF#xy zCRT%OWb}Dfh_2aus1-_j&XX;};ib>Uh{Zw-7vdEmxCWU(3w`O|mf@he&q-8=Pm;SO z$@$BYbB-5RwHcq~gcp84BUV=DJtgCnuO?=>5 z%Vos`uEB3r$_R-5=o=BZlego8d`5zG1GLf~#`i4dOEkZqYs zz0V2r=Drmp?w7JcAKW1M;Iev{2<~a}Y#?_T6NGq3d~m!yj#-=er(te5X2GMeoL85! zI!emwZ1KTXxJNmmWp4P)C6C33kvwrgIrcj@KdpStETVa>Q0|z-*Vusn#u?N z{&MLFr;m(&cW>3QL{OGhlBj=v6Z@WTWkEr@X6xpX*1)jsp2JEZ|MlCoR-U^2jBxMX z0pYH*Yn2+4B?2Yb3NoyGqRr5{*2hx~E=scovJ!W+bw2*Qs+#!75`hwI1sPU8@yp)5 z@>bB8FXmsJW({N|+Aluc=3`lx%HktS1WK?KWLWvc)YMw**pPn~!Q8_sDzTED@xD^Ok`GoXZ>jS-(HIS9KZfO&n4>y)% z3ER)(Rv=*I6EgZpdo%{44{IPRaZ#T0;l{))VaKbu6$n`Qgp8ur2S!oWKvrTy`ed6A zjK_A|1!2eCxD^Ok`2_Ot=l(e<`7hmiS0>YOTVc2%cX(05HDTtM!G+=I(?%A(yZW`v zn$-ni&mOoJmA^mgwxEJSkAe0 z*=pDLw==t^qWe`yfa``YZ`S6u~4Mqgl+RrVQXI8FBg@bMgVo~)9>My>sa!zVO z#hf%--L`CbrtV{@@Z%M6;;5SM<(@A&e?)T5Q8D4yV55`Y&n=PCJxNM;!5O8QCC{`C z8~3`UsKHC6nT6HchNrE$Ef&>?7n|pPEJO_<*y^6MGMW8uFAD!TWLS(q-O^5rI}~9_X{^(@j$GVO#J+Z=9Q#feJ$;Ztxl3W4Y|Bucz%niF=EV=8X-Oz(?Br0e^jyYLiiAS=Pqy8GRa?;D>lSI*SS4P8`Mp803# z2Q`oro&QdR0b9(gw)aXyz%eSAND^W43Fp(UeWyTQjS)hV4tsKeRO+& zL@I7obV8>PdGd+V9&2TZaf@#Eat|7n!8cHe4H-A*C$I3C^D_t7TeN zH3wI=Wrf_n^Je9!tT%%9c&OsQ|Icp#*!TGa&utfXd<~A-W^rBbCRod4FDO$Nh z?ls9qg|-{F(zeU_lbIaV2iU04KUf1<3C^D#uS#^h+W)2fZCPQ&nf~Yg(Rjtt8YBJO zMumPJx6*#@6Ec?As4$kW2C@>IKRdRUEbQmT_LWDus4zNTe#8FJ*v`?)Y^qM%ZB%3b z`Jc3Q-%7_spOA6aMul;gHIS9y{28-#MT7LM)vC4n?)Z+WMUPdCu1jOTs2E)bIa*@| zRH$X2akl+^Nh)ro*AJgKI#UURxw-unDb_$%g7asu&X&N{nF&OQ>p8Bq9If}{W%Hp{ zXCChe+yRtH7NVrK;u0;=s6-= zBG9s8E2THB`Moefs1L&$CbZ>)wmuTXu@W!S4?eu!Mm2NBU9qT6UVpsJ`Kad#mp$-_ zlTm{jZItss^TAe1uh}-dAVEY?v4)B2Z#aMdRbEx#=Y!+j^X)nt`-I>7N2QDWSwf>S zY*pc0M|}F&g-IfcO3MdnG0|Y{gwY^D zXjB1f$|5k@c|M}jz2oHVg?a~A&}w&~)@(FhMfqSWy*qnn_P`&!?JUB6o@rNF(w_rX?rB|AvQ-2@?6vY#`d z*F;={Js(D`P|xZQtt8y>yG%B{z*zq1NK?r7Z9dB>J ziV{Q=6@Sg(?_8V`f{LOF*lPb4?#r0z??-3}K=z8mX)(bOS{Km>AOF&TXD2-U@;8Kt z2;c}Bwt|dr6FzaHye$BCw|pBu;lEv(Lry(E?DJu@3}g_HFaF?Pm(r^T$-m_hYz2AR zqK;t~ORSZ*9*Ahqkh*EMy5xcXY`RapcChu4AdC{KVFL19EjosvWj&63VSta>@~xh& z>YcvZ`gq`nTA4BlqlBi#1mtsHI4=Zk%*S8!vOe}JT3fG6|!Ero{y0V8pp0Xk(7-In4U#@=fOw zwyJ-H^HEx@ex^*qD4}UF0lCel4k2h`ev~)!$VXoNjmy|-(e2Jh=LromWfDdSO^XT0 z-3mK|pe6O}SOp<&g|<7O#*vva3802GN-Ht>iT0M3m0{U@d{=K{8@AFs?SKE#)<=Rc zN~nej$g}sh3qi}Q$!vW*_f6*lw$i$(S+S8Vs{~<`Pz@81dv$LYg7(@~0b6Msd!Owql9Xhfc()bXN90`Badb$jGFp>X682qVY=0rBFH-b zrccd%vLyoPOle50VM1l3WX$&LyWMT_FV^;*`}DP8uN%h{A`)CnCRRDvt_DPZ#P2@A zR+^6_ao+QzY($MezI2&>)qi`}v{Jn$;@z}f?c96n|7xCYi4NbkjArlP4Xw`0nO|#| zl@S$W`R+XHED*tw4H`$Ycc9U^I3xBST{dBq(6pF zdZ+c5AdC{KVFEIK2W?Efr@C7oFCV_OgspVu$i(gAO3NgS5}FnhknuZcW3Ft`-}-1Y z@%0k6()lHo-n+lFOu{IkX)ysAzk`-Oxbs%)?1f@WA6XOMiqQeIwZyELe||K$|MX3U*i*?-A)L{zT!rP)4uJ{rpvK;cIG19 zG(*PxM98n7!xT-fz5`e)<9OG<#*19`?{1R0wvfAGOTjU>I9uYm=sfG!?BLiLb(;YN=2iSG<$<`n4yF4O#x>F4k zdS?mRyL*kE?x4{-*^SdrwEl4a=n>J=ooblSJ7mz_U3m0#2aVp5=j0x5{oy{?Bci7} z)i9xV`k>2xx`RfaEKbNYvi@*C?-9|{ooblSCl=6UKixs2Pe@aWjL8CeUw%`csBSAz@cdB6m zavOOP2JO{H^mGS}*5Jlf^{u}If%>q7YM6kGr*P256ldQWEXY2qp?sjxmYCb)FzYWt z7$sE01Z2ET0PXdU=;;m`?NJL3uVeisi0J7~HB3OpTM^J@Kixs2BSW~Qw)K}FqNh96 zFaa5FcR-i@bO()&aKkp&wEhxA^mL~hCLrT273i{`?x4|e7tc6l64BG0ro{wgeWpSi zckdD*VOGdrzYVY}6nKV+{*L>ghzLXq;#O!euqu-P8u?JGIDzLY*zN1J>VpZLGlOUy zL3d=!_`Z$r=k#97eTj<`SUCkBdUpmZpU^w9W`%W2Si^+gYq?n#aRNS&uHK!&$|v-W z?EW=F?A;x%$Z7=|vfgXq+tj{$I3?H$GOT<;@5ovXpKb5%;6tsH(0i>-c3wuDU@OS5 z@`*RxJzTK;$4~9u9ek*j5_+$N@9M_H3ATa^E1!7D-NOavJyOx$-NA=iDWUgT_sW-e zW8wr`L57u29O&-hf`y~%*}FUVP%9<$Udzq*i4$xE8CE{=qPvF+X8zK|-rd26S}B1$ zGW_o50mKQmf($F4kT)#$J`rzN_)9fpq>K3_`u*I^F9C7v?veAcc7m<;)ce^IH%U)T z66m||!J0U+Z|)$hwXgSIK&#=9Qv|NG{uDZv^h@O}m~R##aE>ce1t z1T_v%GYzgf3QTL z`M_RC$T@aQ!j319u}4a4SzQMitLuCp_=*G#zH{O05@dXjDxbh=Cs<)k6qg&;G2wTt zeDb0S*oxCl5Q4eu6#Kv$BczM|=r41vJvqXZH8U=0)8daRE!3B51TrwRCT zPZN%D`QZBCoMWdc5ZIf{_ra|}-zQ<+6}KKs1lm8?O5bu}omYYo%tgf-CiFcV)|Gia z4EI{@j|U&(+S}4CxA}y3&u~Dv+~)S!B|{6#B@AnrDEVZCrE|It_I=p)`$F$D_u@YWe{BdUQ|*4fR)w__RmWYkq_1| zk!V+eMui<6P(Ij;5qmU3#;%T>k{=aY;R`qjd^1lFWk$sWk1^IqL~x7KH_ceRs(Er> z0briZN5EG4#vH3<6GY^LHB6L${ooeYdcU`A&JWr$AlY`+N5ED`v~;8Bq`Uf-Pk61J ziH*0p@py65-i4C9{d4V#HKog3eO&od&+>f)Y;{a!SLbIe`#%UTDkkzTaIL5Gg*?p2x1zLk__3TPF<9Am{|0E%fT+Vr9U}EVGcZFN|@t{~#+H3Xs z3~d+BZRK7$105OksSyO8CKE&y6>IcrpwG3S-IFlPHRl`^(oxf+#a8CbZn#J2LWN`0fRJVWC8^ zS6pKB3AA+C3cE6azz$XkB8rMNOz>DK#`e#(M7~493VHazI(kmYnQK&Rh1~=|VE=&x zA(->Q8YZyc0ch-s;QL@JtZ|1GR>dcX$Omhfz>0j(*hj(oP$J-!A-n@3uO#uKVk_Q1 z(I>L|OxRr`@HBy^2F&c$Ckx0t``4|HTE1ffRxhV@Mk8kb>3Xc=U)>QSUfYPT2PUX= zc*k_y3Tu%ukI?tQyI-&dvJxDvx56#0vnDaaP}g?7)c@~7j@Fp$%CJCIhNa_HSQ&;q z`9ya0nUU3JUd~~q1n19~wCs3N)L~ryQL^KS&Y;8mL|q3rZPBhmj@GVT#`i(2UQWlY zu!;_O@`>z9JtHgiyqv>I3C^GFteX3{T{-!%?De8E3^7wu*9_h>_P;`o)|i@Z#bL0y z(@p8P6;>P~Pd<_Vn_bmtx{mGZxkKyy1ugC+W1_~7Y?$k zGlLVh3`xhWusSn2`?Gsby8_hQdCM(c&QS_VaQ@ti+`WZ%^>SM0Sz?y2u0?%%`kq3L z)|j?0*_Et8>irSvxD{5iB2PXs=MlSF);zN4b}#3!QiAhm`?>k1zFobX*14RBO4sfF z`FVUEa@VnT)o*b3Pot!@yVc8Tg;l@Fnad4cuq%sA)!b2D&QS_VaQ@s%Z!IgGBZ}PW z8fTOhM{A7uz`M=>KH^sD!zUykScPqn57t0dg7fEA#cR9L*{Wz6x*iZ1Tr zp+4ePS|2_kbq>Oy&RGLl3C^GGgLVf2+Xs10b9M&-^g)i+7-_pUDzsf)VXs!&c6~zn zhm8vTgEf$q;QZO~szAppo^`GJ3}C$CdDimw{5Q8t0s48|O8dD_$XH^d!dSu@$Vzbj zyqzi1Jd<4aWWdWwq1waPT^ zn8tj3y)Nng6}S#^w8qfx7wNc_UO#*yyTgW&9X7n2!%7LxpW9mmEfH5|PD@t|;9ASk z`up{<6|es(lfdd_FXv1^)_VzK*321a_Z%qsYgAgFYo7l7%2GTbRsG|-aP<*aSsC{} zkd3J@au?Q(1ltZ7nP#hZMy|^I+5d*HsO!}+;>!>JTXxicV_MER&=Hyz6Z#By;&oSx zkK!MSOLi?Av!vbSL(^>4yl`da*k5~uInP~N>Jz0R>cFDqJh&KsI$4HNn_x!-Ho zijO-Qzfkh%{i~OBfAXd@Tj7bdX3t*X$fK_>^@)3CzEC1LH_wc}Da{%t^m!Mw>_fKx zQxHMLn~KwHmG|?COoM5C!m(!+m-8+}pR*C;mIJa{BB$2eq12<%%?Gn9%o3Q-0|yKGr{T zMEbHb$Gz5j*m-HT!W%FB2IUitUpm6RZ|`>Nd1=-#q3_Gw7p%E=u5NwZ-YZ%!-aJZX z8@_Abv3K~!Z;5E3cw3Ja8nxX3ZI@djw_~4J(R~T7YUYz2BW-Kfw3xtiJAOa?3$*qu z5h#KBfOmbmhm}vXAK$@_qT3I=J>@T3nP>94L4A6X($0zQzgz7K$vPYgeQy7e)% zXmFY}OyGS0eDv9No%qNSffDe6boH$PtbF3oDXXoIOJDAtW(^Z~&j25-s`n5dSt3vZ zK9H`yjewO;TrqaH^)aybHEGr`fp;A6aq=;J#7CA0lz%WaszC zE=se83B1LHk86&-Nql69KneIjy85mcRz5LscO&Z~_rbH%tYPAnzRm}}8_BK_b(*9a zw)rD}=70iAzz5RRcXDm_+*axnx3@jj`lvhZ%rtA5fQ-4rpe4)p&ptZ`SOEs?}~YNKDRWcVsne~_7W4&+xv7O<6;0={M?2pHSEu!f05R1u*q5%0yY-W_?u zS8HuO(HpNoqhc$pMFoLx&eXBEdIkj#iSF%Fy~?E3Co=fpy{u z0+4MbtYL!NG+f}%Z;9w@+KwS>8*}{}=1Q;?=Qcq^K3KzqM(f5a_Q6(Mx(Pyk$ZQdJ z9YhP)7K7auw7p?>2QjvPF6T<%%E{M0{T(U!1Y7a-s$3$9YU7-q@^yM&gqjlGr29VD zieoRAFsxxhBaGg+2U>${#eF_ONNBEIv4#olH+Wlb^AY)AEA1;7i4uhR2vmb>E!V&L zc6S*t*L<**ws7nRks#EEVGR@7SE4oU>cj3|f_?{$_D8Iv=aeLQii)kY&tt8Af&gT% z2CQL%+k)?dt#pLL-T(Bkang>jK;kP5age zSR&Azvz5wNJ@EhT16E9Mw9ZF%*P)c|G=x!2cOZhSZ=-wF|EJU^*h=3t!^$UQW{n+l zF&77`_|Zx*n?_gkL&p9b_}$I-$r5(@%WQ24k`W z=7n2A)8*Yrd_rccTOTr8-R`8KX)%F42k^U_f0ZRLzdhP}1=l6snZPGx*1h#1v+he+ z!vyv?fDdK$V@=%19Jyi!vuDwfDbo6F-rtWzz4kR z{uQwDiGyAlWqmxqyTdZpFoFG~;KN-9vqYc-d>~!kb;>6s(`nnU27a6K|Lz!cUYC6{SmVE6}LlEoInoM3NoyGLT2gN8kBi`Db_$%Li>ta z{}CtH3NoyGf_hZ6Z;)Ckp?$@zV~G=N1sPU8K|QK~HIS9izT(#7#0j>73@e|I*`Kzo zu!4s*kd@HB;?@<#3ATa^E1%FFg_TIGfvkk~6}LVrPOueZSowtZD6FVr4P+&>uefzy zae}QN!^$V9N3~%MWF@$F$zHFn*P;(vf~_Er7u!aeJ$ANn-IpugrgS>WP{iIO(?FRd;=(ii}>&#_n z%=9)#wmfLRoYbdPa=d+=^?MZddhv-byR2EfL3a7CCHwZXh6(+G{)3pht13%f7oyr{{eo z5mk}M*h;@Bq1XDvF?IGV_*>FPKW7aS`t=O`-s?VU2zz!+u+wlI*-% zGH_}8+BvDmceb$m(BoU0+Yfo)UiUt6XF>J6p%PJlDJ$0K*Ej5r&uvW3bFcgFlIhiQ zQWxxLA!FjcboGlOcK-5-Z~r2XeGKcjWH))b6EcFe_Nz}Iw;5PT#ql6 zNLRm9qSk!k*rWc<(YDJPCUgy!+pXv0`VTIByIxM}h(B6*`|0cVTC^*lc)j=3h4my4 zve&P@M(J)nOmKTM=IhFR^Y?CRk=k5WT08A;uit@_t$mN#ykwT-y6C?aUfX4Y$4Xh< z|7o?wT_mbW5*4>Gw$j-&sB@n{|Hz(#;YGy+UssG-xw7fv znkK8$u)uZUfMxJ?_g zZqcmVSHwD3D7L~Z#%|d?eSG4bN9Qb%^F?qrhu1%t;E~Um>pw2dZ6)cyECgFY)_r(< zqSDtb7OfX+jMl7Sf=6)q3O;3Z>vjiR*?Pg|Q7Qd)hdI0Yt_SZH`5mA!b+TU+ufO%Y zlzutHni&0}2wA^0Vm`ewZ^~E8Ct8hr?bczA(6xQ~1@p~rLt_Me8BK9oOz0O=H|w6h zzNXkpzr@z6Ik?a#=nHR(HB9I?Va%=ez9OgMR`^m4E1#e*)hX6MRzkmEV>Y(;HQW+v zrC-LE-`uy*C+LfNiZx8=mv}d4o4)F&*h;_fuWi)3&?ji-K#Da?p!MLn9j(Wkosf!K zVLk(_e1c{`*p{eiajQg2#P9CSc9y_*XRqyYd-VzWikxB%6L?<=AMVX|oWPS1yz5(4 zSos8fMNYAX3A~Ml5BFv}PQVA!)pypg@(KEioMH_VcrOkgcrWgKceVt4AYFcY?i0UE zw%?tDt6uJ%Vht1cdIBHrYhRpz4|vyaEU@y4QSQ5Q(4)7UjZA4Vfv+;~;l7N<3HU&| z`t1f*K5@4D?i}Pic9H$IrD-vNZ%FtZUrFUTDn`Hu($z0bu=0rw@)g<03#Nw#6r@t%8l`qvR*QZ;3KILpnzz5RR=Nd^Ywqm5G`|cc^IPOgQRHSLe3Ei_w);M1| zB=yM^L-1uk(9s!lJal}-j1>Ldy@zWp#B)~;DVT8mKP}iwM?1`u@d@%_RKr&Tq^l`m zoR`uSA4iLiRqr-gpaiVghfg4?mxPdf7}hYMGRAqCVJd|DXLGI^o#Uf(iu89!oGHYD zETPvvM5QvWgFF%$Qx<`*eM*2!_EYYYqUC8cj zHd2ZaK;@4$&Ic1;uRcP^Y(4@AAUNI9ZuLALju>Cdp0*j>baO%afIn{*i~b>207Aw} zZ^&2n8*G1fYewS)TS11EPaOG1oirl4Yt}_+)<9Nb={DyB>xbq2Rg7RO$guK>>#o{e z0w3dVk(o}E7GxzJ8GVb*M-KCs3}?f_y0TS11EPdvS%z4cLg=az3ycRCZ5_5o`q+Rz5NI_{nxg z{Nx>@(yW23#A}ZavH5U&E@z2A3ATa^E1y_hu-DG#s9a%Wnl+G>__AoI^?{Y_#$<^= z3ATa^E1wuOqECU8l^NP-Xqq*UmH75+=L0L`jmZ*$5^Mz-RzA_Pi<^7?aqpYbtbwe= zw&BBUKCrILm@E+}!B&uAtE)G(K6ajbUYa$KmDvBN;Wi&` zy;_zClwd2!u=0t);px`L+?FlVtbwdV*Qw5jTX&Zw0wvfAGOT>!?g3|8AOAb)xHM}Z zEAjRo=fmyKkR<{o*a|YNeB$~~J6j($&a9DU4P+&by<&tdE4M#GmI#z!E6A|&iQuE| z*2j+*ZYg06WF@*j>wI8u2pM)95h%e{kYVK$jq|a8i!txr`g#d#AS-e2H_iw4gD@sb z1WK?KWLWvc>`k{?ABWz0cL{4CD^b11NLyC!{MsxLD8W{cVdWE@7Yw&P9(=n|32PuL zarueP2X-lu*|?4flwd2!u=0ujX1aL*gD<&q8EYUbal+}&2X;S^^&gH1l<=+Gj_*Fv zY{tBji__PnY8Eai(e<30CREDkdd?1iG{~$Q@xQQ9jc(a}gj)&GS%{sf1tn~C${St6 zOV{Cau@*5RCB$(;d?r3PEhcWS-aQ<4!R3~geQVuLo73@e|IZ^CJ5h<08EYalD3^RL`&kvPFtkYVK$@@?Dt!2A-{KvqKMd%0OJae}QN z!^$UQo{aT@c`~eltc1=;bF+8i1Y1Fdl~2fA1M36xeOLoo37uKzW+BB1wt@^RpEy6e z3)D|CZ;3UKmC%`hn9pQPoM0=+u<{Aomov>8$V%wULpLidPOueZSouVaxppVdpt;OE zV+~{_bZ(?O?=DWT6=Xd>4DUD`k!|g!@9!PjaG&75qV3qtt=0aa1k&PO1S_A&wss@) zmTb>KBx(g3vbJN)<=wZ_trBbn8CE`#ZS6+-hwVA=p;k(0J9g)*QU8eg8VGIqu=0uQ zo@WNLURc9~wqwi$HAZ(yQv#9b*-nsQvJKeGdvJ%>kF+bIqED`?ALcaK>6SH2pno8LZf1X$2$X;iq^rFMRzATerq^uU+ zzmKbRWTCf4F%`E00V|)t8SA){!6~k+fvm(GZJm$NC)E=lSt3w^tsuk7C*In-*FLfQ zDCe252C@?E7oTqPaekvi#7CA0lwd2!u=0tia{f8;agv;W&Kk%{^f=D>Fb5qhKC(oh z1Y1Fdl~2@`ok-zhwCqgE8pukFoY2hXqt3Q!;v-80O0X4VSouW%A39qfpUBzdtbwe= znOje@K2o1m6(3n5P=c)>!^$Um*WPHKQuoR^AgqC`M3)Plk1O9hKzwA0Knb>j3@e|I zVhZFx`3q94fvm*Dmru3%a6Vw_{&NIdv5bBbMTNHp22pWZkd=7xAm_uCmE^;|XHbHz zAj8Thq;yN*1EtFv$VwCsY)W;WC2W1ftw5m8eL`A^^?_Ex8pukF{OAA_XaZ#T0;l{))VaKbu6$n`Q1nsJlVhv;^Hl$Cs`M?31%yee?;82#8Al0)2|Z)0WBy|Ne5R zlcgQ&ob3d`Ryc2>e1gw*QMqRicVdj59^=B%$Z!=#t^K>`p`xnYzskzvi>gih!^(p` zUs2=}tYPBeA=@mGH}@?enl)>@06s>vJ8q%Y!Qs=}7u{K-*w)~iReXQOymr&3`5@Xg z{k{cTee{hxWo+F6tGo!ksDe#v)>-d$UYu66Y5aOC|NhLwMYn(RzLj@>@Px}ppgI31 z)ze1O=9H;LC%ovQI_<@&MW1f!Wq;rD4c>*zUqrAKM_4|=8YW)caE0}O67hTlY{lhP zE@4>1MEh6Hw?2|Pn6cI4=hcd=$|VeInBe;^*++Q(FAGt+uN>1bm#r`w=*Unu@%n`7 zOIX7Mk1@tvHn(IkeC)mRl*QaKR=xbMt>-CcmC;JH1_QS0I(U~Q`dm{6foiw?kXtwt ztMmV{bgj!*xf+ZJ)(oofx3!wwybK>vRBSc*y}vAR+18ciw|2u(G0}b24j0w9oVBuwPJ=lXdyGbL75T#EbOwcVD_hAL&$ zN0f868glEOme|wt?f;K8$b>(NO8oNYS|ZoSyxlJ6AMUurs~cyoIcKZpsXn$g+aGmT zl8Ah8eK0X|pre28e79fcrtc)DX*=bfqIY{a`HM&HD=MF0t2c5SvEu#vlSC91r^N(E zYjdtdz)`iF>3o!K9Op%7i9pMWtv-4p8&!h|NkV-X&Ic16?{oB=YwnGe6}Pwvm%7$h zxM51N?P^qptuB1eU9Y-Md@xBwQE_W$g0FUBZ2w%l;&GSnefeJ7C;U4AcmFVG?sL-G z_uWf42U@zSft8Lv!%Lq{5`yWtyDu#!RK~U4_n{i3t9Juk-*_hS7ZH&UC7@xVUQ>53 z@zpc4Vj}}ichM(T)AtE1{Y>fP@s@ysQU z`DJDJYM}DK$&VK$2=x)L56#ns-_P*;iLw22<1Slix!qqny?nxpiV3Y7+}n9R4Bs2@ z^A5LZj|kX`pTf!|3~QL+zT){X{M^s)88{_ZP})D(ir;=D2*Jwewe-G(-_Q6_v4-Ci zC42-L6pH9LoV5|5J{0RalVOl~pOh6uTdA|@l z{>ynq`%c0vV5|5U0SN*pVOl~pOhBHpq;H6G2)vUp3)m`tazTQ?Ntl*U4HJ;RI;3yt zPQjp)Fx#+I{M?KLVdU&gOQ?nk$a%N)v1g&1?4KLq)Q6rFi}x^i`@wHpd_r40&V6Mo z%((^ac6$>h&=HQUbQUvq=t&TO?DHUNn9wyFZr3^&m%9#XZt;Ex>x2;rWc}Tpc*{|- zmA+5HiNFaWii-2WR!Y0~SL~xKEBHtdkq_3Wtfw3!PwuRE@?o%+3>rPV9WvIBVLc9H zcX~DxtkK=yLFn%0?ksE}0NMJ`dHUF0it9t?*1P@TV6Jt}RyuDUJ0_J&81|uS53s8a zr-X2y?vSN*18E|?sr{qV)Ryxk(EadVD)-b`NtLGy+bwN*JM-4WY zv)j2pS|Z4nRiOPGR#>A70_&XDyG~jAK+B4)ct5Ig38SafVLur?6%V_^=!}A0 zXV)rqF=-9*jxf4c6lgrXqTC`vqcUv8`{I;Ou!af!64!RmVJ@o32U}^~;2EcUf;CL| zt9~W#>LXw)twHP!kRSlr{-GNA|Akr#x)&D8%FR_1b0q>iK|)ilRwo&BO;25eemv5oRW=7eHgY<8hh`QPq2mwE(PBQTWM~whfsou z^1&J=v=rQ4NY+cXU1{r+vMbb9iIIrgo3q!_Wh?C~m^)Z5VOYZiw_`u&+OE(>wRhqB zIk#z_U@P`lKEWC$@cs&|Hv{w)TLnbRHqDIOP(CHB9IU#L<`-lob`9+|0ghOiBc7 zrFp{jDnSV5dM#_1&}gG;l=`r5&(U7d545kajK1PW#a7xMF}qEFPY{MROsE{Sc8$uu zbw*T3S4ULLZc7kR&RN3*k4iQl5yAaV?A|I?_LhH%Rhl6>x z1_Q1sZA%zyl|Y{tdm`A1&!$cgfb3l#Ynb3ZZ+$2cXwGr(tX8-S*F2%GSR&ATu$B72 zj*AIGeHhj-q4|q?R77Y#P>MP};_Abt?nlK|ItHU%B?$Equ!ae~miRu{N@=v?1QGdQ zO<6?p8l|Hp?(TIQgsg2k@%$06m5!o#{zwo}RIFiw+k&5Sw$gF_0GWrLAk>Fp4HJ4@ z!E>ASp+tanP^2{tO8s^1&J=cn1kTDlSFs+qh5UQum``E8JzG4zSuSK}1oph6zo{jV0%w zR6P%^l zXsIWkCfG`Q7w(qICs@OTU%ECYuAc{NrF|P$%W?_(RD^m)O);UP1g;X!gZ6V?JI`zN zd5;6XblHk`5J(VOx`s7O@E!-A55qe}@Xi#x3g0K#iucPXpI{9WJWBXJ*oyc4NDxuZ zS;ItP^a;wU4=y*3){lyjky`Cot zwqjWcIcZGxnOra5Cirft?(|7=uf_iOo?#ymw`W|oo|cAprZIo9U9){Jw~=S}Vvc6_in z{I1`WBFyEz?1ADCGk$qquQ5xyzg2*;>Sy|ST$YYoojAHU#0+eo_^9ztn~#Rg|CeSBWF@#ZjrpKQh189$%$XIx zy~+}5)%2d8;qF_C!--!%SmYB2Hvh%?`2PCqZJndnqBX>ceXXR~&Qnr5@7nLor(SEF zXo;ZF5-}(JgQ>-#TLsbe*Ar~cKWkOwl`hX*_lX~u=Gyw$(6M=%HB2Ei95#@(AjZs})xi=g zcORK%4HLZv^bXffC=TaNo+QLuZ=Ihy?u0R|9-LX<5{L?E@p==VXuZ6X^|Ae=L({Bb z0`jb}5|ylJF{a}s-7N9h<^$5KVPeaw-eH}g#o_CBJRro{FRo0Tb=c@u-yL1i5;iJH zi&sAQ#NMK7t&e~1*;T?CCLlNITO49FkL*=E`9@2uzWhA^x~6l1krOu z&s2w9BU?Sud7~u|71H9h89p(nppW%YXZxxW)-VCNZ^z;gYe9_psc@hrK6-av32T^G zb8(+=PF`_%MjnXA#tcf0+B2-x`TNbX1foJ(yiUU>)=e91ee`bda0zRefV}*e;t*?E zWIupHcV1DIs>4cH!^Er+eZrEe#bNFqtpB)V-O$uyvu|nDW6rgfKvYPJSBdz<(>b@> zs5Uk^r-U_3K%V_ukI=0#`QXx#AcAn|Ps`XUzA7a_7$sE01myV(dxR@w6^!fzy?d1P z@mOB9H`pq^ZYDt(B~-%%KK`lsb{n>eujWY*MhVq00r{O%dW2Y^ zWX$r{huM5Q^7MrTY{hGg$|Q^unidm~um9@C(5;WE{MF6Y$Jo~%DPXJkimC)*lu!*5 zkdMFb#tVu1xlUta**~aec|20=9~; zL`x7x3DqzG`R6Zg2(hkEcA&ZXBJ1PiBkQKviq{O5Nf;$GEhZp;HRguUtx>$XYk~E# zvRRW9Tk$E; zaNy5aIf+%|ElM73fq&WR-PNyU)~qfFv2NQZo~Yz}bP$3yaRRjS@#>v}>^;Mv#Y61< zmEK2TiKnjVJIHFr(BFwIu+m6PQYs$l~1kno%kD>aSztf+$ZvHI_;(rgu9 z+gU!L8YUq3tJf~X`cY$E-~K@f^6|(4z0zzIUvXMKp&BM2kL%MetYugIPCxZ&>!a(l zgVJmjUng8Xp&BM2|N2_H5bKhSS$f@d*2i~ChNjsnzLL3oLN!c44lA_}v0B=gopra{ z9Z$YIWn`MI;_Iu+Cse}(>Z&T(g}_)72c3DqzGdCvIuAy$^l ziG`Ut*nuY4RdZCDt>SCZ6NFJhHB3Oh?#=cg*1Jo;Xq!Sm50;F)JOw#TPf!vy5p zCg+51&FG-|9W62JqXsF~F!A?^>oZ?Y%L&Id#u~+K4K7NT%seFjy&-iifv9+uDONK2 z#L>Ghu|AgUt(jsCuYH6(b7oHH)oSXG=Y+YSnPTtBw2jcV`KHw#$91mqc?<%C$H zDrY-ge3K;>eE(trYnXU)@Y>AG-*UpAMm!?Kupu|6OM4xfpOSsB*oxP|`o!OZ-M6y3 zeJ2&Lh6%`TRmu&qQdjmD-QwQ$Ej@5R0c)7(am?DxKMixkF$+L!DIS)-?T>18gcF3_Ig8U`0`h5t zazm^#melEF(zq4!gxy-oB!Jp=!kU)SO0=1uYiW7!%pHK}G zkf-g+4c&eje@T5HqE4GTm#~%AAXa++--M>c1mvOZ^Fq+Z6iZ8lj|H-G7F%hn#QO99 zo6xkFfPCQeyb!c8XGkA}kG`^V7F%hr#Y*`9o6xkFfPDSdywI(q4`lR#kB5v&vz3ll zSgZek6PgwikRNW>G6XI6)H3eEhm5;v&nmkkknRmoKA{>N+btpVoF`j`kmYnPx#GYF zcFtlezN+E(@(I;2!PiIozTNu3`*xo33mN?bZ|HfJu6=u+9>3vjthBI%TJb(SK7l>9 z8a_Y5zSCz76E&;v$c*Tf6aHAgx8xjm0B_#ZU;*BqvlZ_rjNtjPh-&3e(nKoFxr%=y{8@9Qo5UoCKucwB3QE?=^vzg3cxnNs!e3y_fCeaRN zh!<vV8prM##Qx;u$HV>-SfiS z9*4z<84H%@y)36-wyK)~Oz3%#CbrC@S2OzYu^`wt@NZ=v@4(3F#4k0&%{SHiHbE$_^ss9My(eY zi;q#_gRS%|TeM@Jc<8Oya~FvZIV;{9shQvw>c&Hi4j2;=6~3$4IlFWhrfc7EQkNs{ zX!m(DCG=j6@8Ns`BTse&virTl2hxfYr~ci{{tjCcWas;&bXE{{Z`IjCnB}Xplkj_- z*f(ce=PD%$Iqk;Y@8YzZHl}%!!=>%n8Ahiv>V8X)}4=Y!ho|u1ncFr8@6h_ zOZMB@R%p+zDU&ctXj)7_#_#TYoWnlrY<*N{RU^$-_neiF##FB zyYq22ZR~D+bi8;=30pOIsWh{&dRu#TO__vILepXbGJbdGF##FByYq3r+;pq;@#5gSOW11uXX(tjXP;%yt|^l+N@!Y4K*sOx ze4M%qhFc$_*EA|&t9s9+Gt<96%bs0RCSjD&w3vX5-`)8*HJ=(~eVozi%4KYYy=7L7 zKikf^E0ZuvXj)9@&NP^RCtpmeH%wptPpkYna^lo&%a&X3IG5_@mdi6M*QCNhHv~o8 z3uMkjk7I3A&wqDLI&QVSLZLlz$|uU}QLs{i`;9Rr<0_}?P0r8XfA!_*f(9|$h$5QsJEuZ*us{7{kV#f=;oWn{9&Yv-x zc79%R%pEQBd&((e-|c?OMuoG;UMhYo)8d#^_{g%+MI5a$4;21{?>NCx-(8=MTg`5l zvZsvs#D*6P@?mbi_-Zfbuu_8aXUu8WO}0M9b-E?Je~Xp&+a^w7n|!#F@tqUDqb;~I z+?tGhvZTe$d-E#Szd6lTyT-qr3BD@`_sfeDC6~QevUy}~{#H4|jniU+OGMsuwO(SQ zI^xs*>A2Oxy#@9RH=merm)lkTiuZbXr3))1xD1V%G{l|4cbA;PcU;BQncasrvuE{< zn)+_$*}bRPZ?us2eXo_%>ugjwy^hO@^K{VQRhDpP>ct5yMOZUmPr^8tceq7 zSLs%$!S5b?qTsH+w{%n(y0sMjzy8jj29;oQy ziGB5P;p0yfG=6-*($CtsoNJkIS#kc1S-1C@f}%(Fw)~~ZAX^_=2RAo)Kht7*v#`!J zV~Tv@#!V9nTHUi?$@gvjvSNZu0VgT%w)yx{&SZUXyE~C1N?czl`=;zA=Lq zq;1ZZ|F56d+T(wqw;|l&BBwP4ZQj{rt(oWt&cCKTx#na;~-i_oY>b& z5Rv-}>1u`=1yB0y#YgAc;TRWl8IBoh)DQe%KuU!SvsQH|t zUJV}`dZ#9Sdi?8gk_JQQV)*@S}j0p9~%iUR)&7Djw!(@bJG3sK<-sazicTk+Y#qw@~-hQaoWrRT}G3u|5hb zq;-udWZmyT%nBXx*E@&Vo$=U8S7hTWfKSMmrSu^q|822V{^c^k%5t3binm`pqRI?9 zv8*_5XbXI|Wh-9qo+OYDd?jN|oWQqK_f~DASborVcrM;8FIsl~DD9(xtj2gvAy zclqayX}kd6pV^9Y?h~*ay}fV&N|!ZEaQ@s|LF5D9pW{}zilVH10#Sj$zpR10kJz`z zfjkxMJ-I|8N}c}wY4Wabs#x6`Ry0o*?Ol%6ertHcL5pJ}L-j0ih7fGUSI&4#3=oz5 z(^9~Ln~RDsuDJB)K==DMFuiz5VF4O$Ic|KCH~yt zDmw$4t#ob!c1Qf&M5Tnb zUDTk<2Z*bM_(=%1f?V3Io~=Qxb4Rol;(Q?vk#nk9!^EF^r(_PU($Lbjtg`|A*x1NK=jYn1S#Vk_R6okt9x;HbDQG3^s<#pPBeA!v|h z4HM-@#a3LR3BraJFI~Sx%jh2g2)6S3T^WSU2j`p#zi(ESy9(jeU1B|OUc!Z%v^qXNDHj&$9y%mf{gPY z@SW2qT7GhcooOg{uU-u%39XMKdT!6TT5>)v>qFDkb1ZO@1m{oY8WgQOqpX}W!KIFz z@4F*IBs!9*jC?@m(I+B;xRple6E!6d@br=71N~gbD17I}S8MDj#{J(ICfG{9j3)`C zFL|+kuWIM3cxr_`@07-Fc95ezWg~*q#onlzZuvgg zO6R@eZpkO~%t@Re$r>hfj|+E4ro^kqG+e}1e0Sy(C@C!~)-a*FNkk`YLHl{nFxF;5Qnn-nPd=BPpTLPmG2yQVxfzj>vjaM:WfBSN(sYU3!tO?rYNF z51gR@A3cUWZQo3ATI|Co;6wAl8Ya+o^?okOxqaG>Tj_}E6WQk|%=d}js$j1Yjnc)ed)6?aZzpiJs}+T(b&u@EuDPxw(*&tHaL0Izhk54C`$xv_17EDYHyPX z+aV&k|>f{1*u zhKc-QN1rfbOsuTfs#fZLn~zz4-IGZWkq_1|F>H?W@xs>8F&}JI`xWP-?RR%)5=7*K zHB7wxyYq2%tGYKN{ z!5St;FL6FrA2KZFgRK@{;e32_aF9t5kq_1|aqdUX$2%|F7W2VYJ100FU$h;bNf40_ z)-Z9;7UyH-iMPaju+_x5&PQRV3i=$N5C2;)c2C&elZ_xrT$L%q)(KOh=_b}T1;pe=6}>X z=7X)Y+`^LLs6HYh^1*2_p=}|rOV5}Ow$l1PZrSxwyNZa&2dBk^w&@nX-Wc=2R@#m) z-E?i#KO!RX!D%s}ee~F=p-1>ta6GO2?J&!*0=d z6%mmSPKya0E8iY@ZOjK->G(M7;me|NHzFb*oE8&0=C@yXb<77_={W!F>z72=kBEqT za9T|0wQKINSH*m=m0qt#~Yw&`1uZa0zE4}W%R{O%}dL9vx4^E2- zy_dLV!R0X@Y^C=P?_7F*bYBt?kq=Ib3B4!!^Q&$#A8e)fLBH-jFS@^qh{y-0#f0A5 z{n07zgRS&_u0ezIqWi>%h&dVhX zYnaIY*3nT*R3C<|UTEp8(00owSi?m6VMj+jD)PZrKc>goe4r1OPq2oGk)526Xk>_d zu+{e$I3E~G$|qREM8_J=M>N7kKG^(G@51!B(lJ&IhhhiYW^Fh*m+ z-i;MCw%90(vX7u*?8ev;|Hc9cx}qXB$|!4HD~UZ8Y)R}9R2F0(cWlw9QLNF}jmF*^ zioEA{&YYRw-I)jS@=3X$J?DGRxp!_kx6C|eXsLc6n{UT5b<(gAx&ZFM}ma zcpaA4IIQLMhBZ!!$ZfEM32zJeTFr@swfy;Gt=5nTEMdaibiU@yZLpTNW2`wF5`iU5 zc;C#|&bbZN^8Sdmb3-Dqgb5#G@;!ju25b4af<1tSL|_RMK33*?8MzJC^6_y~*~@51 z1eP%2V}8CTliOe|ALp?r(~t-(VZ!IGd~YeY!CF3FVQ;A+5m>^6&%yZ~S#E>1eBQ+# zSwkYQgb9x&^1Z&?25WizfW5wkL|_RM9uwtzmbneq@;C^4mJNwKzCvD1cx;#Nz2-Jp z%i}rhy~f1LqhH0FNa61jC)7-v!YJVcYN z>6Tk|C6NDG4^8U--8?7SG+!n>ayA>ksK2_6+bC<9S1}>w(07+m4whKj2wwr9eDVc> z62V$l3o&u^ng{6{BQNwgrN$CV8{sP%JWcuLa*1HA))ZPX(d*P3)W!){99LtBrH$|v z8CqosWrDR@Q)tD+oF86L8&B>#qQ(+S8{sQU^sW%f1Z%aX(29vGf9Rnn0M8#dq{b3U z8{sQtj1nP~3D#;&p%oM3KUzg?eDK6BHI`V~2w&-Av7L}VdC4#kDQ)tD+=uHk)8xKAAX){YKZG`WHAsUdA%o4#`ttqr( z;`h%Stv0?qeO5C|ENz7E=pm|+H^WK{{HM{mRQ;d-o7DAa| zt=1G;G4b|wKsk4L$A7ik( zkH(UWXibr?k3P_fiEM-mGA7np!i0~N*rkZZ#EfW7k*|-U(29v{qz*E+*IB}ZkNMbb ziN^MfkU2{7_1OSgF_FzULFTABOPKJv3%f?q9F-BRDf0DM4O%hLVsmZy?7$&)mN4OS zFm}J>U88JvE{N6?`TFb(t(eFnfFLnLoh3|oEP-9TC}zlr))e`AlmV@n$Re2_u}_^P zOn6L$-N-2R$%xie_Yn0SKx+!En3%Md#&F^B9;eh<;_(#-kL|Fl8bU@S1!4Jm)CaAY z_^Cx>(eTv733Zk*;V~+9hvkb|MkEE%nj&A1UZE8eh^g&gQfCPh9=DqfSC`N|BU)2P zJccjQZTffhO4kPPit}Q^R~A-RQVtMWjP5*?HQ32gym~mHU`Ip%pYn4^9M_q@SPW%qcS2X2+P;BY%Yn3 zxo`bWZCrUv&pJz(@Es(ZYcnD#2+P;BY)*`czMJYdox=0aeA>YhCVVH%Vup-J3c~U= zEt}h8LgErF2jUWzFyT9T7W-sGQV^D}X<3XC6A~@iGne>^B~19vq{VOQly2xOmRQ;d-YE_tBa(t( zEsK3);=|0w6ZZ`rz!FOv;kIpUkP%5iu$IN}F+p|J%n~mL^7T5jH4fEPMp(XHyU>aW zSrzFs$2y87On6(cwOU3b1z~F(fBw*l32M8|EMdaiw5>U*?Pi3n)x5PsD<-H9HnW5Y z@0+%EraqVvw&wI+3$2)-u|&RtVJ8@cmaU3?jIlic8cQ;QwQQ{(6Er5)SmNUq^7XOO z_A+Qp%m~ZZM^R|S1dZ)AmN4OCzU|4-*q#x#m*KMkv|@ths2WR{@VU$OmS~R32(cmg z`m6@6n2_BlZM)dNVhIyI2iqQ5MkEDcdrLk$Ln|gIW~i}*36CXgua9DejIcd2k20VY z6BPT@Si*$IM7C#1u}?iV{hf4=xZyN7DwuaNA^zdPS*1x+p{y(WfYuK>PjF?Pm{C#FY zBv3b^(1N|;!)9f47eW9d#?8ApbsOX7okr-&R+ki}Yz?gX$`Q0k^?gg;x~>AhqFnu~(>d|GOw^{%)fvtvwqnM{wRdp% z;ybL*d;r7+VxzF$FVXLPf8VU@cDDyWz17 zZzWa><`vTX%*#Z%b}IP~20k*Y*e($*tl`($V)^EMMNZC@y*EO1KvO?kzvAE6xk) z7goJqc7*?;HcA9gwc(N~0<_xD)-EZz>+JpfI?315TV-h*kIMwwm3&)7wY-hlIEIpE zbrsoQEpL4d2`LB0M9T|lCh%mth3Q2(5-&%rg^|-dED

K_&zoKU6m*yZ$}3Y8Lh&v8lCW~wR$z`oDz7(F-zlrMa#e30Xn=3zLB@q zRN$5Szd-OF;CG^o^@d-47Kj>cs1~my{D!kYn3Z6|zQ!D_E1|6oZ6d)~D1Pb7(H?%c z+f`Gz2okErh;aB7Zh`2wRZ3`&q^;tthTm#4${BunU9eGOt5l0~N%$Rgf#|kXN@$Oy zt>Q|CU)wXD7=Alnun}9OT3kQEFX0PBx2;k_dn9cYeGz^y0V6Sd%b;K*wu&AAwCDkZ z?>-cWZd;{<_DI?)`iAHqdS~HV22E_#REr+jcW;#v+9PSJ=(l4m(Z3E~4QSRX)uOlj z-S40h+9PSJxbwnu#~o7meSWi6sTTJ;-+iSgdelu7gx^0yb+9PSJIDg=GqMV7s??ekWVyje(&vy9LXMyOpRZ3`& zc&lh5<6MhYan=sMsV&$bQBy6>gWVF{wn_=@k+fA@mv9HU!gYJ)u~n+YHKW_!?Y31) zXpf|=;u?<8PH&*wQ4(9FT3lbd9f{qxN(t?ev{m$XFf-6|>h_6>tx_%eBHcc<-L^^z z?UA%q^bIj*(97(0_KB@hE&6TU&T!qfN(t=|%W7qLu!bKrq5AMN&l+m;_>Vh3{OI!z z%W2}}Q`i2y19SWJlZSU&)%Lf_8r8}?Bi4Apvxd6X1yf$_uJ#qER9hURQ%Lg2SZ)B*A{a(AUUOjk1^|I-nHPp5Gz3_U6<$3b*1&h5= zAli@g+?mxmLp*D!Yn^uJTOF2t%FC};-!2e&c9rVs*Lv1a*BWxfSN3~O{5tQ)`PFKx ztgjjy+m$~u&9jEO*6FA9oNXDvyj*{`r~OxnE!L>kUoc{=2Rv)2YmMBj-)zeU<>je6 z^(zo>TTZ=JyYgR$tf8*;`31|)wp>D9ZaHz;0&&lgot145oKSsqh-VFTt=4f%&$g^R zUhezKQU&5k%Vc)IUK6TYP4TRuuJxzQde7!PFJA{TvRT%*>dMb}*3dW_EO(95=mpD4 zXMU=SaP-!sg2Jp-<@Ucsdi5Ctf8?k%k$%9 z^jk_?e8LM(96ZFchPu|h@AjE(nTWg$dnxg6%a-=)L>na`Yp84Cu35gKNT)=eEv;&K zmyk8owdS3;%xufSiU zpYk$BWlGp+uW+=hgfjz1yJfg?8c#u%t#8@V?$~`oHG9CbhPq_;NBYfHLTj?GcvwD2 zjKph0*3ej&wlF1DvuvA#Z1%Y+WDSj@gtN~>(z@cl6RLee z)=-yJ_UJ!biPT2tSMysjO5R!ESwmx8mSxP#xTci2>yVaNu(4Fg8X8+=8QGjbt1NSx zmti~0>^f%gIxHibOVC~=QXBTXWoXrlA!}%?%krzma!S~B%;I%erZ?B}`yuvH!tP+5 zcTi_FrBlLYxH@Mzm2hsysI-iHZaa*zJinTcUCZw05{z~ww5BXO*K*6^I?fJRL!H3b zwfuTs4x_JYCbIkk15QTh9=e(;Dt~hvtEqkHU;d!*@yw2y` zG9kJIGlLRZQ_QdC&$-s6y^s=nTYj}A_McFl5VD5Gab@`+-5%yd%iHMXFcaA28dc{SrR%HGDS^z^ zT%#<*rrW?;WSKX;j5R7H=mB7j(p6vSlz<07Z%HL=A-pBpgBO)$&*j&#+!W6m>MeQw zv`;&f(3-OB*8DoQe8#heI)S>_D)=ZVk!R1YTJ~%^Yv~%*yn~O={;-2RJ#O?zMptc= zSZ>+s)Q~mQZG5}Z2OUaiO_m=nWY4ZPCVAFSCr}sf2d*h47C-R?Hj&&TVw4xSdWhB^VijpGV*N}Ol;2Q8~^^|+8VG_)#P(7|T~9$-qeTdu~t zcb!oEdB_^-1iVQ;QR9qGi3K(@ST@?~kACb~L!H>M_iG)Tqu`gOgn7tyddMo_{0I-3 zGZ8%JEc>C&qIcOkxai-WHPmg4-RG4KCA22{N}A1ZXw}shdDhTy9ab0TE3_~r%nP4I zFI**DKj4LPU4eI=WxHGU>;bk$^$S@;-Nvcamphcun(TLXU(cU~R=vEyvxYi>y0{wP zno=UOe;}R2lR3Qw)prM-|6H=Ezy$L6d1Le|i(RW0}QF~iZn4)co5iO7ZRb0YTun6K#RgGg;O z%x}jiS!al64fSz_y7Zb-!sbNxMKLFGKY)2q?WKgR2EOycYQQ}X)(oXn!d3&{d0{o+ zehDi8cST{9(RiCfTS{9=O5{x*E5^teBE9r<)LM1xJw5BZE z@7~R-);G0B-`l`#;5uMK$8Jjez`pVO!HK_V?`gmDQi&26F=$~}uYU6UR&L{#BYw~* zYQY|~QlcgQz4O=``ZZLdBN3w=Ho|Dni5dyktItGAEIZV{osbP z%y2qiVIITZ!z`K;H4>-`gwA#;@#p+Yp3m4Xq*bDXjxk#Y&+K2_cj}~>=l*<0Ctw5C zb*4^+Q8^K4}>yz8@ zUC!G0-X}JST4+18QUcoH|7ytN9_vzX34TzQjUvH%)muu5uk){-&b8}Mi4rC=jh)y_lFb|-y_R?3h-)tU$N|Z?VxaMW} zmBY&@YQYA4hLkuaf9Gz#{We@BD3eHc-Z@btp;{6f zXy$ISRH6iJ2Opj`NWccHvsKVaiAzIY)o#0SNL?jLun&V5UL;@x^|E)Nl@j{BSCuG% zs1f=q>?W@N#W9T)TeqCK&lUUCRZC-JDe)V7GjsieZyx=y?O0TzgpQaj``(-*8|zGZ zb!Lyj=hjtA<5wwRdmMiTvByzKkwA|G?=>efBPt{iNkti2D6_TU+(>LVp;{jxn8OYuS}A_NPhhV+Nm_#9rA8IPFV`o9%wsILh|FV!z~xDiUa6 zu(4>FFz>!+zXs%dg_t+zT}10SAERtvBo#s>piwVqt;zRlem`v9bsN;;jLLXB##M;7 z?`E^;H&cGoK5OQXq*Y3AencET%l>RLwOx4a=STdYTGWC)Xr;uq`MkT<4gIPrL74<+ zz7TKEi5dyjLK#{qF+YFTd1d=Gph{3Cq2n#fwy~M|<4fLYU%TI;Btp*_6{i3xA@goB zc8dh;g|pOg`4?#Jwy!$Ud%?L1y&9D1ap3GF%W|Sd0(DUfWoQ+ME>EVa5|l~MGYDrz z;>kFnS|~#+C2}t#GcTj65|l~kIL@*Ur}S@J_TZ$Mqqf=632M=+!3kDMylcJ>o~XCh z-ny!iB7vI1^Ksk3FU^{G&7tjc>{rD7i)%lD`Hj-^&R`1op33-v-P zC5~Jp?1~oH_g+Ti9Sd28 z7{LDXd$r?!aDR?I;m#Yq%h*7;=ZFEIRa3V*yjs-ay-5ihCBD1EC{YRH45(LoJ! z$5l}aHgIoJ!bZE>z-U(q$|UqY+1ze%p93~HzYWhhV={Qw7?Z(M%Xmzltlb z*mGXgf(>Y;gv76^Dq(C11S1w{fT4*7(Qo?)&+>Z=i zhDuN-p?B8348MaUR10O?|CF$q+HGK_Rtd@^bSz}qK#QbKpLKlurgJ~2F#hT1=kgX3 zBdoy#m}QYvjH^qRT~uK#6e}`guh2?~W%7vdspe&sgi)IGd;xwfqj67oHLlr%vu5v`FZEviFwkI<`A|QhPt!P0$ARGQ1|7 z>99w_KFWM*zjE`blan7k$4Lp>dHMaoUQH!R;6w=iZrBgzgzxT(TDT5qrG)Ju-3E4$ zD$$cBoFOIqTE7kwtXEH(x+OB~gjJ#hZHN7N-YVZa7qwsyS}9>WdcO|r=vBh&0Kq;C zJC>aA7(h`AHlURf`5p9KkxJOp#gU(7lb$@Zj`?cq&ZDh$TUsyv!S?6NZh`W)$G7I? ziT?Mk`YvsM&xsld)j~OFDS`TJDrw+<=B^uZ3{2W`h1rMvEaU>Po$u^JpZ%eYV|;V@ zd-mV(FZ`xK3Dvr6Fnr$uoxj*_`2wL@mED8Z8#}EqyF}F3D%HB_ZoIuUptILv-4NEW z&?>Dr4!LG>u)o4Stju{!WvPR$8B?6G! zP>B*}{uXaN4d`6v1njoMzd-EspYWx9+}qh#g);8o&}YI|{xNzIqNZBsuG8Z6Vk|W$ zRHDTBcZF;PaU|Aw9V;%6_h|=o_8tFP$AJq&`KUMMckD6+C)fizzpTI6;jJQ3Q>}&T zjB%}B-TOv!BGbB*_{p3=|KdozBX0j{ydSDH@tE+X?8~ostD`BAsYHq8UI_Fjliu%u zi7p$e_0J2&`gPnm^SzFyM5YoY&buwpD_?@MRJEa6R~#C?@?PotQAblEQ;8B=Z4>CR zZ!ZisV8%z{-Tw?(h9EiiU1Uce(0RaF3p)emhJ?&O z<=$1TZ4V4BLGPA`XhS7R%pCrGzozmiQLW2Q3oXGYDG||zN|cy>ba-mxXpe+G+sn6w zmK^x$GP6t1d5!l&wXVG;wCdB}E?p*~4XsOw)nQeOt8*>Cj+$z{KQ7RC&PznJp%NuJHw>$2*~?I^DGvlKco`)k z+E9rScbwO1Bd|MfNwxZ{ihWe@mP$mlp%NvAJh-XTWv@@QjvBL>Yr*R)5z&T9lu)0@ zlw~BU*7z+)yA61+B_i5Ti4y8Xxs6EZ`g+<5Te%IaoTat4#!;eLhi@7PjNKAJ8<|R! z*!{+^o|m6N)jDT#uz_c=DUqo}iOxLdOKC2tsn(f)2{tg7G$&M|M4#Kjb(H4Bnrf|h zT(E&Tu|%-1YAR9Utet|5(%fECtv`N0*udOgBBBkID6vyL_;BSlO0_;ZDYT?wYgCDd zHdLa-2JeQMsJzyy)`ou%Ex}q_BBBkID4{c-Da#Y3TK7*4ErHKaB4{I1i4ryKsZWRUyYrsvND1}AmU=e$JJ?N>JhB>}LDjmjPuQzr$5J9< zt5l-IJMRS=dmnFAw4quz?Gm(h89BeBL_`}ZQDUR>e&F*;qk=c^q0Z}2t*@2|TBCZs z+)*L``C6+IB`(@!Ew{1x=`VGdve;0qcmBJkYn`(3yp9q9$Ze=ZiH>n0=5TY%iybi0 zbsehp-hYRVls;kd!|f88(NnVeGd=x#s7V_!<6O9RqMmeg4S8PJ=0Ml0Qr@xM2U`TLw3i{ z|MyG>Omy|GYAtbU(7I-~r#ngnpwPQgmlFCsrZ!Y-&SK$7{c_P$$rJ2Ew4rq=QF_j6 zY}H3+hX@3Ear3uA8TX^VpSQ?8Cs9+ag`bBQ3GR7QB2$SHo418%OL^?7*7sHpUIxZ) zQzBD|5|gF}Z>jtYs@94t2X6__U{fMfi4u<=9K62rT%uaNJ`P%#OPUgyN|exekl*uo zKU8b{^x(Z>PAn0Ig^{QdB{YJR*vNGJ9JgLrOR#3>x{?qz)f(|lATV~D5}8Vr*nC3B z8d`n^RqKmC1T8#+O^Hk;O6XINUWaP+zc)M|m`h4T>|K>8q0f!IGtuKZKT+KE+e51w z9n^?mD6f>qsgV=ulLfc8Sr#SFc#E%?U$MgqvzZ_1uBE=0N4ZH{F?iSdb-#I}<$*kZ zEeL2ax6|KGD-xe?H>v*2V$<5!S^Jo3h?s}Elz3-d%yuhv9)2jI=l34)(yZ6dx?|MS z_U3?UG5gdv_kC}6N?e}xt&bYN^2`TMJHDzCC7xX#v)xLaZ`}zS`@GRQYp3^)8FhO5 ziB**-fnN!Mc;w+creE7-ms{Jv{q{u9c#A7XOPHMt<&-cRt+b&MCDwfpQTFLl_;^&GZ@G0-OC5>TDkm`-?!rqsur`Q;clkH z7u!v$4Bz9DmUZn8sziy>xT-Ad`A;8Ls>Q5t7-K1M@T-5TOdd6@WzF7)RaK&dj_E8L zZ)fyxv|ZWuah~y(W0%?MrtGnNA$q>;rY}_9d;ZTY?HBHwJReHv&eGm?vERx}y5-Mp zPwx^k-oggzWfs11BbO`eQtOBJuAX<}1ub(X?^4yel+Yb=mR)D>OpM!jYTHA3##`7x zz04-K>kIt~yX_A*9a!DyowHj;JpA*j)}@5*^zGi;@4OZtaZcM)dB$7VK)uY0_|mq0 zXQ#xP!-iFFe)+VPWA1CIszeEmSa|;P>VJ-#(#DLpuz`A+-SOaymnlR)-d%Ca>cF3! z)Uw-!YgM%_B{cGqWyoK<g z@RI-hzU>@4KUXbg4IO#y;)VU-$3H))`mgnW+cIL{-4(4%3Ehi^T&Dfoe%r>3x3~_} z%j~B&AKk04ububxSXpAG|$o*=c(QTGh*E8P2hAqCc*{Z3vMTL0DaF*7k zgvM$@UtRsPAEB>e{#w+_e!H&kXN9=Wb;lf6-F>COE!AJGTi3dj(3n%W=O-*V823Ep zuSLBaCFjrhs1U#U@L;>=zx`gz?N^^!*SeI@bE+&m(8lg>8{ccY!`|6eEso<|PJh2} z=5^X>2UedxYKfL!U)1YbmlAs37M{V|&R7!9pl7_r{Xo5ZYR^6A?ZVmJrO)nJUF)S! zM;^QK|LR(o5_)bJ=91I@^4ScVOFZK(Y@lAwK5IVtX5kES%4s`R-}%i;BmeT_{tc~5 z2|fP|bK<85y)?t-M9+8&8>pAF=(vfm70yy$pS4AGn-lIG`PzDGH?%G#)YlIAYY$xa zz8TDT3md4HtHHRfUn%(QkKMXqb@{Vz7};~<(G9Ij3EeYT&SG1mc6|K$8E@nnZ{3E~ z%T?{r^It0LKYCuYYIV(}&l|Z-zik>?mlC@7$+AOjt!=yYyczG??^0EZtMkW|7YqBS z({JioU4NN_M(%Xxgof6ogzn+8>^Acm2Hiht#((UWVX8$h zkKC`Jbt$2H(Jb5C-u}Gl#FiO{*zeU;i+<3BZx{EqLss}}<;y2}j{M|?!x~zb61oG( zGV{VK$6VBFhWT(Ye=X{zKlhK1io1y`&Yw^jIOVEMZeRM?hSsHo?qu8xZ=Lkb6?Y*o zfNIe%?LGOK!mj9|7w5HpcJk9B9zE%VhSsHo?#O&sluiA|lXqelsao{HVWT;r5+yWZ z5q1+hJowxQT!(6LXMw&-i4iZ(n|1ONSKjrwU584PNMkbC6^-8LnoV#&REzs4+?xV% z{)GD0pY@)B`=JsgG=k*gs=mRiy+>kPsTQ-*V(g}bjjQ^gy*8bJaitO^G!o{|M}2JP zW+U-@s1~!);u%Z{dp_#N%{XKRo)48Mp)o*z&Kv(5d&EdQ=c>hQw3tg$!k+U+^|%XX z;5k=`5*o|QvUdAb(_?$TqR2ATO{t6G#=QmUkm&a)nYbU_&zCNeo15T!&YyDU!oEv z^lcdTuNt*^tG8(WTGY#IwD7f3!u+eoKD(^n2LDPWO6Zxl`-zP;x7wfuexhnI8!dds zlrTTBF=Dq-ZSWISqJ*B^yT99b{_mq(;P0xI((?{ZUu8jkS1*N|exd zRkLiKtx>-_n!zPSZ)IMt&6fLLEjSRAgt;Fdeu5QkHV68hG3 zmaX^d-|8baoz}9UMF&+&_FcRaoB{YsTRK94FZ`Rni82xl%S6q;>p2^x2mRE_y#&`AiqUZB2$SH^tGdnT7KpJ zZFSa*FQv0wBCKJduT-K0_YASGq7Bu;7ueZXB_i5Ti4xo);XawNHHp`uTKI}RY~T)- z2ta<%RiXsq79WO!N?TW-PDF^A>RaSAa6>Eh&EKB1S4$l zA5t5tg*+CpfjlN9BHB=i5{v=D4@zyQ7IJ~W2C}P^h-gD4N-*LHe=fD5TF4jz8_4ET zBBBkID8a}r{L<8hY9SK|Y#_TziHJ5-qJ&1EQyZ!!xondtw-dBc)4G(PZR`_L8>&Tn z$c|7Vq79WO!9K)(C$*tk>^EdxC=tOL8nNLbYw4o9uxOO4Vl-f`&u2;yFQX--al_`i zcAFC_QG$EX(leN;7P1<_2A;v@gi4g)p1L%bWU7TMO0a>sq&cAyB^Vnh&54<6AY`_<7PN+l)jV&j0d!|~F<#u*+LM2Mjc6o20TC|6~ zL5bk{k*P!p_F;LCqgw1Y>~Ts&w4o9uI2PhwjYLhgc>l3iYf5A)QG#PS?m43k)#5nD zp0g>DsYD4rn{n?PZKxKXN9>)O5}8Vr;2aZU0MUkOab7_Tped25Lrz5n z8`?x0Hrl-fJH67l~yZ3r@sVsg~_OoH%FM_r7DRl+YeYTV=ac zuNVDeyGbvj-%71AEIc2oWqU&>zI&^b&>l%!WpM_t7h}od0ba%!Yu+l=vfaHC-~A3M zp*@ne$|6NxFP=M#=y(~=WAj$2mc?hBnERbayAs+XX{#*O=k;QavY4EgF?Tg@m1HT8pZB*);6yQiWO6{usuUYLWi#v#1iFXJUsxEV%`ruE8HIQMM6VrCJ=XEB~?^qT5y}p*@neiu1?l zWBP2tnW*>Xeg2PDsTQB@W#3t{Omy2SCA3GpRWVsHqm$jBb0k+g2%|J(9MHYdA(by@75=NoCA3GVbotp=(lw{ z!*$y#CA3GpRUrH~)pk}i=HZ7A>3sd>uYAQp87q#RQ~BSozZbqQ)Jb3#r55Hyl(Alw z2taN_B}&+No6|#H`KDk)wJ_Jh2G)-f5pAeM2|G7*+7xY;fI3^HT3D}O!{l3(2ta-v zDp7*A-#fQpqo!K4hx=b5q79WO!9HC7`-`R5QBy7U8^%?Mh&EKB1jj<(>x(v2i}xQR zu|z~0DpA7D4ZWv79g1}q>UrnTz#Lp6q79WO zVdsWUU$U@hL$$D;yAAUIN<_4w5+&^1(CN9`!mEKg`%1Ooali&vgAxJA`${EB*tub# z|6H`8T3GdA1FL9>h&EKBgq<5Yece+<8>$5_12(WamxyRXB}&-2q0_hYfX@hZ_LXYE z4}uM>=OqG=_mxVNuyaGFZx~#(p<3`oU;}r%qvjQ;n*rDssJ;J3jBp24OBVwSNk zB`jX*e=pA^ss%q0HZYeoB@la!btz$SU;le~PE;*;vakVvw<&=ba;!@Ui(mWS%X7PG z!JBj&Hn%q=5DSlWDPeJV|9g3jQZ0C|uz@wIDS?=NtV;B&*ILzre+?U0Ynu`{ zn}~HOVdo%PEl%Va>-k_#hw1>SxiQxKCQ;8Dn!_pq7rdsSb>~Ts&w4o9uI2N$KGG+hgt253Z zy?;}~4%?Xt#!7;t^H8|N&8>+=|jObuB+p^XD#Kw4qv@=MfigPN+l) zu3fuM#3_*Wm1=RlLcF~>p%Nvy2G4p7XH#lJwYcsgLf@QFi4ycB{;_k>hHBA&z{y5) zLM2MjCz}4{r-kcKE&4$?F=B-sQBS_2XhXH=m*Q-y zIiV6I=xawCHSRxDi~coiln5IrVgI2LCAepZeHCq}7WWD0s}d1ys6+|wed0Z*jZC$; z-@!dE5z&T9l;9pNj@@WOwYYD?*ewy!hDwy+UNk;~(S~Yq|A}X?L_`}ZQG$EwIG02l zs>OXY=8_T-ZKy;E#s=b?7;UH);{lixOGLDx5+xYph;w_ip<0Y+=P6V}=i5pAeM3C7N%&k${>7UN~`8A?R7p%Nt+ z1B|{;w4qvz`@#1q5wKCU|H&|yWZ52_h$mp~V)>eUrrP7xy?W$a7` zejg{}dvyNx{yd5k39KKGV2^BHK=ty~W)E{d{+{g5(W;ti-P{tidJgY~ppBZ=r3B`D z*hu0GZbP;9KPhMp8{d125)o~vL<#LTQ?~Z!y<1Z)`|7E;#ySNlrWa``cfMzQ9`dTAu`Mis24kT%tR<7QefW~_3|(O znn!C53-?1u^;&Ce=32N8eG1%0jf85A@73zWkGA{22(3#A%*SY95-;(wt6C2&3R+jJ z-pgai-4dDBrG)lEave3*I_j3tKR4f%_L~#-KcTO*E+tC68~X|#0NRTgz`jE2Wy}m% zwej~Ps^(X&S~I>0S{S=ciA*I*=-icFxoTN10B;GNs1m^|uc<@{9X*MSOrPi9t{>)L z%(Xfnr-W+R_hr3Rm`j=yDp3Ny9eTGsC#u#ZCj>3biA{-2B}(X>O|GM+T08zb+;hzB zC4#-1sYD4K3vMG4h#KGu5rM$&9%ZbSFHQ-6-}sz7YEa|Y<-803oc)F}dwtAE*&Fys z#AzSuMbruFE6Rv+t#x$x`@Ze@X|@`H_cCT2?050^BxdP0RO^gi1}!{; zC4x3;Dp3OaUDznkC91Vaub_pwq(np;Dp5k`d{efr&3m_|S~o2UHZUia2tc@Uk0GPI zSU;4|RRVhW7YOz)JV^EeJXZGMZ`&nm;5PK$@_EF4*0B%_@H$kBtMfONZiv__txE~| z63Jf8@40Hxf0*{GPkmpD9*IP>p>-)ipD6CtBEdNkJ1jm$^~*o?y&7zk2z*=J#}Bml z3@*Cl{~+R1i@KEHj8VFdnrd;Dz?C;8;_QREl+Y*G?;x){&OWG@vmNeWQzClEm<^QB z(UaK7xEf$@$RPuo(e6&{>(Clc=?apOI*~STF$kY z&p8KUp5I{r-htht^KUojZ`Q zO7NPJa~!v!THiZ4T;B8cQ1$M2wOO}R9zQAcdq!{7brmdLa& zCD`l9IgZ; z!8=$}i4t5t%6kKP7U&at9PkEiS+J_N@JH(wJpk5)wTL#JnEZvu+i`tK^uv2swRlZ< zCfM3Y@H+fNk##A-K1`w?ZbP-+JwEjApAY)dPa05DB%%$iO9`BEfJmYr;X0(=WA+Ii z*~&*P@)MJUh&Hq?CD`jp^uuka)}t>5Pp@^AuM!W?s`r1vXVAKoDEW!Tq<#rzQTip* z_x#LH8gLy+9L`&%TJ(edeP1_3v_bz0bt$1pR{5;{Ut zg1c?36WncMHJ}fdL^=J+Rf~QbR-6(MTcr{uv=7r(aTdih!C4zm6zAj8^HEbRt_*k{ zn-Vc(2q|PxtdNa(E3H5@Z1*Y@O8ALA-hEz5o9J>Bb@tV~23T9*=d zD-=&{a;on(RBOXyf{mjt%KQz}?g_0+3B273!Wz?cs_!;b>(ajj8_;S_s6+|cPEPgx z8B{IWTjA+%{Ec6npUl?v`7nTIbvsR%EoQM4(}QKUAUw-g8L$iq}z7ttUW{h);LO3i+d_~$R#4$P>B-iub8s`3$u@oYR-0uA<{=J z@4Qrt-Ya%qCBm$@jhaf7ps!utL8=!0YwRFPM6{t2CAeoO?Q3hQ#oYvU!X<(>{EbG; z45&*9?qskx%(9+CHg3G~@$kmU=RG;=p=~!Gv++NF+xdq>vn{w@VJ3R}`{D2Q6)z)t z81bwT`(5zlEY;!~h50HaezVW8#*>SUXsos2Q18m%E#7agSckJZ4YVmo*NKA=SZd|*u(ig)Uo1An`T_sBF zHoxN6(bfhVV~zOg^WlwmPClouT6`Wc5>w)pWd=3=X;*&ro!8e@qQpVV1{;G`$0%8D z*+Gq`jF@o8^>x+aQ;U(95=WlAYGdzSBN~fcb5C6*O8n`6t$rP6tpgi>HDVtlcE9?b zx@vJYz<5oGoyV@&*!%6_jq`>*Qdfx*zw8@q4EqCY{Md-`MhyM&BX!l{eh1Nyl=yte z(v7zFhBq#FenDL&N?iB-&HOqB-U}O-t-W;PT_g5;c0paWxZgpHBqc^p>(%(O=ZMCP zNek;LQKI*MH+372KLHz4F6-5Jz=%zcSXfsr?ssthkrFGc^F@8G-NCg^?b%R?5?^02 z+HHKc22OZi_~94z2aVXq2-V_#2j?FtapnH=>zgkc-Wa^?vJI6evFiL$ZsRXo!Nxz0 z7-PiBMyM8dML7RRi9`Q2x4zle!y9XkT%n;7B~}_9Y}6;hM(YD}>%TE#!X_&;RExVU zoK&Yok2y2zui2IF(6U-XB}$xs%SgYDou|RZn1^Q8A2Q+wBUFp=UA*s+5@%1IUSH5- zMB`_@1~*iq#NOKl8|U2!8?PQey}qj5^B?sX+)ypXjdA*#5@*eyS|9ko;f*b~AJR~X z5??R3iC;(G$6(_hFHfz%_~G!zhQA!rP%TE!af+N0-%Q!Re$ku58`m7YenTZne0QT@bz~zNbI%^$P>B-Tyta|s=r;g1-u!yUS$7$+zzEgi*$qzIQvxo=J9g!tu05im z5+&{!xS`lkLbZ5S2pcIeqxbk$wCW{ml}ePDcHB_ut9f7V*xE4SKSrn)&kE63De?C~ zZLPDdcYkQTs}d#t_3IzGjd0Ju`hHvM1395u+*9G6r^E)Gt5tfwHoP&%Mu|$4cQt;=ERh^?WpOMHEb5G_Z-|%i4uqW zGS~=ndyf;QR~Fh#y{*l=s>MAZ=Ju4RKQyxfU-URz4OF7U+jG|T>j-PqWFww4;tV5H zi}78oQ7Q4Of6cAzXsg!4~eZj7}yB`({4eq{?= zod?^ZGhb++B}-UlqKt3-+4yt<~_2tM4Fm-ni^WyILS7S>gZ zk$?DbDe?4>rK@k6*Y|7l6IG(bt^fOh+X%ks1#2%|on=J-=N8mei>F!eMN{I^u`5=$ zH}7>>^LJIE#NmB|jo?%N!U%ZC*BhZ)x(7^&!%kkc3NQQ{^TJi4#MF-l`*no9!SN@p zT0O*wMc3X_S1sLhro?H>4643ucd*Z0*Vk2|#9vnrHo_ify=4bgXBaWr_8+RHyW*6X zcH`RB&uv%q3)@GjM2UfS4)W^=d$s+I=xsZ%rEMRjTDrSWiT5{MuiD#ocgNd)P$f$2 zab&O&_MDT9Si^`#$4#!QmPRR3;)=6}R$sC!f7^DcDp8_y(HedoVei~-1fGw+MyQs? zPEumaeTG&4YrE~mZGWy3C02Sh*a$Izi;Q^2h|O((u38#_Oo?TNY+QZDq6X(!oIxc@ zY;t?Bf%t=EA6jeU>dQv#VQ~i4(nxAbtn%9MD&~^g77U!F5+yGEOR!-ciP>1rh)0aL z_^pAnR7+#MDY3i}j~cQ4n*&=_q6BUCS^Zm!SFdWseMVgQ*1%TPqCLdIQ^F!F)u-&r z2TpymRV7NW567K=@0VB2(5+~Xne9i9QTYL1cs6+{lg}XO{ z4U5TC^O#IUwRrz=(vT7sld0x0nTkr3;FxaT6K|wkZ^XmaSKGWerlMLL$B3k+#9Q`6 zEp1QKkw;Iis6+`qo453Vjc1Km+=xSunOspVK97jKro>R28G6{v@S{`Csi;H=&M_DK z32#mAV#JC@d~?z{71iRrg4kJuVJpr)w&HxU z(IXX=D8aSs%093$!H6A<->^O3$iFl%P+vd>fGZI-R7L<#zK zj~oFTD;QBR;!j4X7X3M#ZKs6AWGZ<~rm7Mp=%b#x32fYIzUcPmi~ePk6{@O5zZ7pk zq{KxJ&a5B;`9#ZVRh1|~Uwfz7c#HK?Bk)8$W`t_dzs7qRDKYNk>6I_d)8A;Z!Bv$g z!9Byve}|0$wl{dmh`oCZuBsOI2{>I!iGHt6tvqjgocY@isj5T??tNb06E=p~uIM8p zPBTKaxZlAkSxStzz1khN^E&V7^{XmTf_u0R2f{|n8T(frHDVhhREzsIoc5)}XP=L) z%(a=}w6R00Dp7)a(O<3t8$BK$Te;ncDL)xnRW0s6aR!+Zzq)@lAMG!`y-`&qN^nno z#DDQy2&+jrqwYZPQ8?7nPX)%E7Y)3!lvW=@MQG&67*-yj9T5GhmUS-5D zj8HAc1MsG7N<3+?j4LfN@#)#at13}~F^*RsgN-Qz#<%{{h=+_&EygwQ26jrAKa=G# znW{>ZV5}zCkeEzWwHTj)jg;uI#Q0fvhL}uMB}y>n6#D9l0pn*~ZNxmg4%K3u34N6k z4_S96p08msvcKhoYB64hd!7~zNd^)@5+G(xo)hs9iy5}&;?wZ711p9gIAQHc_aEr&UA ztVNyY8F8r*s>OIR=ERg(=A`NM7i<>2*5+N6D8U$cnA`iDFunf15#w#%RV~KFF}J70 zXAjQwJJ_(*KqX2r)*sfW?Tx_s;7dlR7UTO^qf%lUJDb?eR<%AOSE#B)37%PmwRRUf zo7milgKZsDEuK?gtxbtZljhf3ZFN4tR%ewc!Lyg(Gc0SwSR>9fLbZ6_0-qrzzOb{R zd+o~Sm|vn2C3uDue4m@`tOz^$;ivYjsus_U;QOS+2s^=g#k{5I=3l8q37&-oAMR)) z<{7bu5vs-WFZggN@uHpGy<}eB-p?=c(NISc`$B5sUpQu_q$Ad4L5);R+ z*ns!?SMy#~q6E(-gHQcKBla|6(2pOfs20y7;ZvuC#bo?s)M7Ffl_mTc15e( zK1wA@@XR~x)okb0Siy)FPCloiT0G~*UM(eFwiEXiZFje??FUt&1kdino^zSa)@v+d z#LJT>S5%AV_1JT!1l}CLYVh?7V=5|9g6|-Nz4LS<9y8*nwy#w!zITAVb4nbSzX9>4 z?ax)B1mC3yF@V#Jz>a=|5vs-aClCWjiBH$sxG~?N2FqBSK_yD?osbaASk~VCKosW> z7H3c`z6XL>MoN6&&OR`gOt5%~N|gBB@4~wuAtv*UX=C2q{QZHgs-`q<7Fc* zv$&5+l%VYpTk3BF&TF@Sf8Z?DqCLFzk`f2~;+O_JrmL>r&)+Y}vPvi7ZM-)q@B1#^ zXs&c_xCpVO4Q`y=*yWieN6r4?^yG~cju=D)Q)1&eN5V$-@}P6;Dp3Mu*a-2&LuVY~ zM0JZ>k~dN~@)6lgiL0JCu(9l8qucK|X_gbLO9?!i_v?NB!FsvE;RGWkUbug=#)2cxnX%?O8#Gj+1lC|&M>t~{d(Nibs;^szHi}xX2d$J? zWO>~2e7yIBTwkx77QIJ;Z2 z*NWb%ht}CXc_W1$Ahc3q{8v4l*k_js{(cGTQUX3I{vOUCiv;=#_0p4tR!S`M+bgep)7M+25+x9MfsN3++2Ygd4<9zX?c$veOZtlYHrUX9%d$V*cBbFK z7p+xAE!=-xM@sCv`mRp&>UC_=yWD^JmD@WkXjOQhp-8Y^?uwy>au>1YwEk{m)q{`s zcUUMPc?bzNY zHdLYn;t#M9-eEcOt&3)1v~RTiiOE|pj8Z@=C7>PtujcQtur4JKN8#U1w?N=JP%on` zEO!wbzj=Eru;c7Hv@Rt+?2Rwpu&=KE??tUAzV>|)Z)EK2^%FKGK?#gFkXgw@pQok##ok(Ja}5=lAHc9`c!+) zRZE|hl$hRoy4(1hJs-uV2(7|Xls-}GEc=9C$Ior{VU!bEjH+V3N{OSse5Uf~(Mz?j zZF8bZltA1U*AeF3pMLw6+ZbhYVo?jNf>uhb^i|I)`sys3+f~ANG6=-4VI!;`IZ-3Q zdKu-0R!Z0!-n+$2ccE9r);e)YT=rol@jIw_;tW%PzlN;a4rHH!7s@P_cDrFAfS~J=KHt} z_&zG(c?*am+VSnBN@wt|%=fAOcG|)j@O@N^r$F$pQo?+=>e-L}tpz@uN|eAk6>J1Q z(R?^>6@0j&7On$YDeC_u-_HqJR|2Pi z_4M>-1aXZ*s6Mo-VrIer&K2JpJGOXs_xYE~$=s%RGJ6 z;+ZFQ6Sah}-Gtk~ZbBtWpbXn#$8yk&L#n@DV#ArOwkzU!Ep|dYAI9G4FDu|nG2E*i z@QY)rtwX;z`byi~sYD5!V1o#6SsXQeQuU%f56Jt}C}arxVz77TbW8Je>tQWP=Drlv|f@vrEb(MXB6avPtv@8)lEur4Losu2As60ia5Y!$Rp zV*C8c7oNRhMYY%qh}@(EdiTexdG9)bR-s-cct1jHX{~wT4z9QUHp!bD&_WBLl@b=q z@ZPmphQF5q8`L7f5qfnWy!&C1iR!t}4;%HLLFXoKa&ROfl9m!O5=#VZfKWQiESBM; z#9|rAn;d+)5D`p?9qswJ{ngR!EvL+K0@s0hmEg<}VwRs>JIQU_x55L-n;d+)pp_C= zUKnPdt)|TP_cCCETDT6BIV&Bx5#Ig4oOsVC{}}bdev6VfIXH77LYorvZBDdtl&$>a z2PFbFKq#GM7R&JKu$j8bSrl5FwGnAf30V!Cz;zS}t{)*5E-Ow|>*eYLt(3qzi0iO* zu&NRzxITVyF5a2II+({YoWRwhUapCV{HMevotsr3{nMXj-2VOsRh1~gbv~R;96x1K z@2idNB(A6hd(cXW^Og;M$sR9^^7k^(SJWaweSp_cCxD)WUV3Og}iB-Q`4$gleG-tpf4YGrl*t zd*}W9y$sl(776ldUA2A`D*>>3Ea-t@JvswVl_2SIum( zomcX<2zOys^(gOoDsS@E$-;)JUin%Fs%Q_ow#vaaB9`cz^c;_ncZJxTjui9^T8q-uccOFGDQD z32Jd4jkg|BVy{nbZ~bAPbKA!EKC!A2B^VnB?`0qcfM?KR0Lj}Tj0Zp~C7^BpXa8#c z?gy@eT4*84jD7?gP!0bV2-VUuYqGa@KD`|gmTU*R4y}vv64c9BP3SAcWY*i|i#ChN zByWo_ii5sN3A-QlCExEc3im@LN-!D~?m1#hxDMQpq89H>O4uj?kztgmgz+oX%cxoy zyB5o+kDuGVr9LRd@zVPpw}Eo?6vn^c3?|O1!w}nfgbM zFV+5(&50^ef)U{`@2&|(?Yq86?L zS}7r?WL2JXL5nA5@Lp3wPU4)vNGuXOyMtHj9*QG>f8l_^TVFxLv;mJD_ zJU_?&GbQp9_YAvKl_-HSY{#9jUk7%=s-^EXq=fCZ8{BQHL=;`r}4>4d$l9(qgw1Y#FkRRqET)G(I}M^3A8rYuoj_F zRs64Iyq%q8ll?j@Gp(0Hmh_F{-=T+p;ae}3|G_25|6qA$y^Ne#mU}%VY9v&PHuCK9 zDM1_d->$lp(AJug{|l|MOdDPoGI&@<3@;Q`0lMzLVF}_mE{ccdNGzPx0jbO#+tWEwJcMY z6W{#~Dxp1+w#u@;dA)e8UcH_zMW zWz1b#Q$o}%3t!Z-Yq`jaQH)xozxynz zg!V{cBV*fPgbc(b8?jZY#b+D&giA!Xtx`gJ#9I|@aIQtGIBO$^bjb#Znrd+#?3U=ZRZ3`& zq^;t*ggeL;uG=e*tx_$n8Qu17x2;k_dn9cY*Kmw>dIQ~#lGrNM;`-X{NbI&%N@$Oy zt)jn!nSq{Dw@*}Tm1@x!>GrAZwpB`KkEE@lZ-_aAUS_wmPi&QH(QoT^hU>OfN@$OG zt3bdLM?T@%$fS$!R$_ic87q!`k<`n{H)WkrEzF4^uwIpjXhS7R*!NSNj^E#{u~n*t zxfV9^JiI9pZD?Ie*mq!^FhzTUp-vmBh4l(HOpb5L#xel>3w@;$C1^YOrmXLCREzd- z|4T%)p%Nw7hsif(-G*wh-}3ypR{682?G zCz7vhx((IBN{0K9=i5$+XhZ8#!oHd5MDmqQx1m~CIbkEu8=ex;hSsHosymL~NJ%csXg2w?HSPe=`T6Z{&UfWYGKuf4XmOiBHB=i z681%6r<06q-dCyxF9SBPI+uuOLnTVsmzAAP@F~(RHB6Cd~zB&HBDLWxoW}pfem;|B_i5Ti4vBtsEG~Lf_Fw6O$lT; zighVrd6)d}<-Sracu4H4rUWuI#k!QR+;0B&@;z5Acx14Fd)|~l#;I7B5|($#|6U%u zss*1BHZXRZ63Dw0>r%pUKKb8~g)_b9ss+CdHt-BKC6Gla)}@5yR`9=<=MvR|p9mY6 zOPUgyN|dnd5l)xqMAd>P3mfovn-a)35$jUIa$We}%X7PG!JBj&Hn%q=kT)aNrG#bk z@V}SWDAj`Z3L99Xni9zW5$jUIzVYpUFR!(#1^*g0u+}yuGL@%nqd|cRo z&(M^>ch6&8O4v8o{qNC}spVc@O|{@t!v=hxrUbs<9_v!VGCug?C#=7dU=pzYG$pr%^1 zhrL0GFvIS**Hoee`>?dfsi_wG4SSpt5pAeM366y%FMzMLs>S=C=c)F7D-&9m5**Vp zFF-+2xiZOD?_scJfG*IcN=9w>r#SiSIjsSZKxL4 zE5zHI6Dm=HYjDhp7;UH)*Ih*Dn-eNgg1$t|7!qx$7X1gDY&0iSq6B@SB%`O_bJe0B zgcFnIgi4g4ZdCzgn4LnTTu#t~=gXhXFa*TCFfBBBkID8X1wTydfe)na@GYgCDd zHdLYnV@`49j5bt@aVD&_B_i5Ti4u&RMGqj_P%XyG;4_qnXhS7RFa{VsnP@|`825wk zlM-*_zu>vGzFVt$eJJaDAn-E7>0(aQNT?Rd&{E$n%dWHE_f-(v9)Uqrm zY9v$(W!&?Wcq;!L%eMAA7L}k(LOr!C%ZVBZ)j}C#Hzf|VUp3->e0qJqs!IMp%H9O- zuj$(VKQfdcM1vqvNr_pCBqYf9Boef@s3E0onwp1PQ>r|shC{XH;Tq~1DwODt-lAGE z@O;0`qX=nA%wrK_P!vfJ;s5#Ub=LR!tn7ZWe{a2X9reF`H&+Rdols9L%SxhfLbZ6zu{{!-l)wMzkKcc& zgvU;(rH9?D-lJbi z-yC@QjMH{aCsd2aoTDOvSuNS%tfmqkJE5Lhng^2& zCsd2aoNFV2**V$Z?5q+VJE5Lhn$MFBCsd2a^cf-nFC*EYm!T3KJE0z6@=KBpCsd2a z^nD@$Zzysi>ILlWMHwgaY0c)Q+j)QUuT&*>r8RicDH3$|ybO3((|^{0edktm;C zddPK^DB;gER~^E$n}~EBXxE>1YDMCGdk2e!kAF~Ci4y*9+8(AVK^tt>-&$%#VmBL0 zimmXSJC!KmV~p*3suHxpc761rRwTyRm{`0xYj8s)O88jGoeEi2C1``~`Y1}RNR(qc zJ>-T;l<+a%cA-@X+F-jr8&E3}N7)=zeEPTVHdLa7&t0}}tx6;tXxC>oYDMA$xIL<#q`%|?}=4NBa<_G6CiMwFg@#_Q0wl<=6r z`l=+-l~=X=nj(STt>Mfv-Vc>{{K0lT_OW|j5``1qu17`GibNTCF;;7+LF#04L9IsU3@i__44cM4i5^031YS9L@B2i{0GLE|{@jL?w&vDq;UJ_}f zu4*M47=t5$*&tm9=MR;5zJ%?1uEyr5l1MX7Rf{&L6$#90$p+^^m3ZEVgy)=Wt}Tf) zb5^xzgIbZm?3`?HK39q7uSj_A%zTEDNFG2{i#Dhg3CZ=?8S^P(g?Cu_cFNzkWGgtzP2 zT53fC-fOZ!e^(_+Xl^-q$Rr9UXoKx));SV<$L$|~pD_;c#SHk;y)hT9B*){%>@j6j zhy5MeMJExoW_M~;2m{h@KvXg=5c76FE!u#cbXKZ0yHji2n9gGufHwab(Rm$eV_GHM+=aqD zA^f}TFY&L7IpOh=?i`84?d7vO_twERl_K6i(0vt?SN_NSs%W?Q@YYQHc`V6G9udc2^R(A4bpyt?SN_NGu&- zbJRA-`=~?-?g^m{TZ1f#!U@`-b=^4%FM%FM%FMjpe4l_@U$lzeA-o_dri0}gf{FQEQ$1fRJCY>T9Lq8+rVAS_|~dK_nL4|2yNI{QWEJs zuWHc-wIZ?MKW*LMXpC1X(Y+?z6G9s{CYD6ugtx0ZMle^u3@4QfRKUSD}X2G>;LzriEH-~7>r z`JyF}{KTpjZBQ!`<+olkdao++-x8AGZx?9;uj+d7A zBJpUsZg42RFs>3Me2lT*m{bYcV7or9P%9GG+n88Py?1a!B}({MX}^`J612f~eSD-= zBxIy+s6+`L^X)f3Rf0CyuFnS4iiFHK4V5V2bC>;gsY=iW+x1zET9J^Mv!N0td=9qX zU{wj)V7or=QY#YT0W?&ig!>ZqTe2!a8*JD82Wmy)X4^AS9Dw|;N|bP)$bJ)7C1``~ zx*tTXNQg()P>B-m+u3jXsswGYUH9jx6$$Yy8!A!4eN_7$WR;)|w(EW=wIab$+WspV zDpA6HZT+n!DI;iu68FOWn6rTqIIFa$2tIW~+fu?~2J5Smpii9$x8czQwIYGuP5E~C z)D4v=;jxe1^OB%Xoe1wMkBX=j2|Ojq2A?aHDB&@jJ-a25p2Vsa?NKWdc-oT<_|y%R zcsxkLV^MntOM*UiBD`IXQmGXQytO&*IeqGeN|f-J+QyQSpii9$+F-jLZBr`}k^^X{ zLbzLQ%PbA^FO!MJN zfO{~6+x2WMwITuUHQAuQs}dzNx19Fk zkSLs>4YsSlaEt`saRAAV&gVMsqUwC|+kf&v`J3t{p|gRG+Ni^whyxBSe|g^|bi6`! z=%jTcW(FNAc}8jONY|oTs9Bwe?!%TtxQ#;FQbNc4XhXG7dpp_KW_)%2NR0JWp=~Ll zy%24v7HWtm8)qJ9b7JQ_C^pjf09>L(*S)JFHELET8>c)~oj+isDDQcpTBxL*i0yk| z)w0Z4+8iZkRj5P>y-%j^Hgs%91?psD+mov^j-Hijp)PhJ`1I-ZMH?znLa#3oMR``4 zdSriD+cIf<64e_FM4?*WS;6vR?5i$`OeIR_+ZAo7R_y@Gi}4P&5Wvz~Tc|{dmbbQf z<@)^m_DRcy@=evZIh<9dT60%0g3oT(gi4f9A2rx0RBO_imObS=*d>vvLDwZ_ zLguDX1wLH|tIN9|Y1*F0Mu zzjTqJ+*qc1ljDmiJqaFSAOFlbEuvv@Ip{ zEinDkM*0E``-E#A{^Sq6=!>-1#P4i8E7kILZ~4QrRsxvaK|d=#B}(`i@`(vHnm)C! z@bXM}>>kdT?H3u*w4oz_k0tZlPzNjQ!~=R|oCcx>`D~@;bCFCH%dy zRWh}qTK-1;@3Eh!-6^!uBnYLaUE5N^-+x;rQyZ$~?|i?LmZn`cw9zD*Hnc4zd>pe? zGPR*vK6Z^-_lvY!h&Gx;(}uRCgg!k`PXFx6eNN@7uFss)4qB3Sr_hG2*8(W~4yu;V z)DQi2IRv59yY2z7EhXIhu=QHCp<3>7+;Z8cX%%WuaPr{~616Nn|Qf!Xy0_zobwt&mho~>5|A)qJ(FVT0Jt=^1KE;vKFG* zyDCva$73kVQ=(d){i2`PLNsltL)50;bsFG>09?4uXr2@ZA%Ha-I@bX zEw{%UKnnpw_718<3GYLWsNt*%)$)F0oY^IjsYD6Cv#lewO8kjo?Cc{(OH@>-mX9Tj zqq-zg)dluD+fu?uzGie+lzmmGmXCIf2V00{@2W%zAHkbZYSV^l`KZmfwuNZgP>B*c zR^k`AgNHU=`FL34tV^G0-|nsrX0y5*>(BY!Ww~F0coW3X1N+of%kMu&hDh9Y;gH5-3x+j5 zeSS<`B}$-zeY(C4zekVk4iGPb=ng`){E4C87KtZz+`RG4uZA`DJGrf{5+w%QZ8kPL zjW!ApD}h*dN?TpE{E6W_7>WP+{l<+IKO5HA`OJ&zDpBI5^#`Tvm@$Jkeg@+6Plq-7 zo^?@Owfu?Ud>)CJs}5}3i7U_VxwWnmB~ZaW*%-J!YcTcbJ+Sc_h&}#%YhAVcxuRbZ zi6hTiukq6!!y6yncwb#5N}#fRvT?ztv~l*C>opDs(R12;b=C6cmHt&E&KtW{-k5XL;<`$dSlVx+WaF{)?MrN5_*SEo#? zuZMfSRkuM6)$&-DtKpHjV8P|}0sk7-7`NBphDww`ZTfT_t365^e|-7!`U@WpYkX_( z!41{&h@PwZkvRRFN%bq<9M-t~04V5T?+VsiB4?d=i_ir0pzY@gg?+j_EmS=RhwjGJ0qksLkxPzO& zyLCe)N}z&$vhm<5-0w4YPXGE65U0Mgbwjm0qr)}&NZc@RM*G8yhc!01c4$K-N}x7< zva!Khw9##Y8SNb)wgRDAp7-HigGl_o=lJ%&WWyUzUO23w5+zWY_8rv#@edHH<@rnYRV4o3z!{@vqjxt!@2W%z)TU21?4Ez|jTxinl!R(|Mu+!2 z5?glkuXTS7J&UJAB}$-zeX?QC?#d7MuicK`?Es-#o&(~u8;Jup99KIRPx~{Yhcr~8 z#K%838}<%fb=%n58z4UZ_K=2Zc@Bv0U?eU%XHxB5ytRAdJy(em%huQ~U5AY&pIkJl zHWkF$AXLk9KpaaV@z8?HYmZ~}ITz!VN|ZR{d)p@)HYWb!<;!b*F~Thcp<13P;+PnT z{oAJ124WOlyT_o0N|gAY+s%fJ?Y&N)R(l^K^>~cCs^u9dj_r{cICoajs>oBC-2}kJlz*R_ld%P$f#Nx4>-JT>GOrkJmnc&=ci-44 z*)SjOh--V~pMc0tSX@^vUs0kD7m2xpSIIwy*S8z|M3pG9_iCe(4f93o8?TZ#K->sF zQMG)Pl)h*r4jsEzPVe<5c&{o^;^Kdq4fClt0dWwBf!ofjtCp{l(x;BZ@n@}2VtE7x^A~E&%8|R-QD%ulqluDF%{lD^b9Tuw%0nrzc*Yk*@RLj>=8LLI& zfoXI)5scJggk>vlt7*OWP|yKEL*zK(EMc(2O`g)TDnIg66?M;EazBq=UW5X zRieb?y zaR%<-Yq*085A0J@i4y)S+`AKPASaWTIhmSj`Tb|ME)vMe=q!hO4WC(uS;5H%1NgHSE^=eY6{3FKsIWlknni4yLkp1mDyjDj!vL-?W-w(Fa# zmiwjL2M~$Z9-dYE2m0#v5&d(ODB-^L_h)lY>VY8UfmjGawcNkv&VfiQDW=u_1yBFd z6$a%hQNm+}m;WDatb^F#B@l;n8PJ(US4|+G0uX$2IneK!egJ;52B52 z5EXp{;#VM4%i|rc(MRGb#Al%)N_Z@~_qw#v?Xj`7yFi??>yTWvJpSYkh)C@JK>zf#zwpl1xk{Aq znEHgbm%zqN5B9G;k9&R(2-Wg9n!9%*(SaPm?-0?Cxpru-5+yu0F#9Rm*l2?pqiz7P zCkWN@d;oXvL}CDP8P_8-@yUh5a+N6IIgVE!rH%8}89(YS5OYDOmghCND=ZT5XR$;srZ<@p)fh{SZ{mhQ2fOs*0oJm+M6^_z9Zx8DfjMO=q!d7g=V6^TcX zBb$oe{Wf}6B}#bi%2!v{R9+qQCB$m8#d3`ZPpE($PRHB6EmTgQNi>&jD zAg%$STAoklm>7w}&YD(#4x{KE7|z0e9gk<+WWCKk%Rah z=0Vl+bqdb4kvQt81@%#wosY)stP&-B?ZteC-XO+;I3I*+`Faa|hDdydwW9lR<#XYe zs6+{0Lo(mz4_GUD2gIqhi_A~F2h9*tMvElq>Bq!J~3EzEql?_*+KsFtrs(x;9Dax#rFCsR|2628W2vB4CqrB23Jvh?Qr zYO3Yys*DXHu>fnrZ{QC8714xBl<>7;i*Z)NnlK}lAAnFTUms?S6N&y5|3GN)=Y_RP4F*TJa;d>A)cAf^}Q4rroT&r5X?|`v$Bz|7*fOr$}xk{Aqy%d%M zI2QyX`r#l{%lA((2M~!x8x3tNK-OSYarunOoWtUoiX8 z(<_afjc+HYmOnAf2u5P)+!JXddwJj`b(JW=W7@EM;!mefPDH-*?O{iXKl#jTM&bs1 z|El++JIws^neB=2wv^zz$-i6f^}K0Er7K@^jX7aQioe0sio_k4AC`#T7c5BoOS~;5 zic`(TgKu#U%i;K9)@fJeBNyTe8>;0a5i`w^*kix}>B=`T|Gk=tb z2M_AmP%R%(nK6&VE%?UPmtRetaU;Ghq7o%I=F^7d^!NXCpLFH>bX%iQ)#A=7YDMCc zXLm`&*H8RrLnS_!kT_xHhRH_v8QlBv%pKzz?V~On@!c0UOoX@VGaOeiBJunKJ2l=q z;iBmqzPn{ZB}#A(=5^Q_%h-!{OlP%h)R0D1i}t7$iKVNi+D^qk@f~`V_}or{z65R9 zT2YnocHQHkRwUkgpnp26g&Ti2?Jx1Rl<0K_zYS39`1=9e2XIQCfsLI$ykO=N5APOs zq`2qAm9a={e|n$BZm0Jd`PM%BHdLYneLLE)wY!yitd-8{kxlmsJ5t;Oq*f%>{vv&s zv$)_#6VmGSk`(PHEMk4*bZye%c@&-3rLUc2j8 z3+pfU={sw?KOGWwq_{`U71v0t-+O+&?b%yr)$W`W_I@zlVY^(dM#c zaLz72OS>#wYlm;~n*+6u!=9=Vb1#`$-*=A{cR1vKj}3cRl!#}w@A^NkP5kk%XMKim zQ+Y(s)qBq%@T?wujNeN!ds%kso#)q9==0Ug=Z*t==;F@_P9=T*@t^XIRrL$6r z63o2NhV|~vJ*U+detX!ATlPCX^p(eLw4wcmy=r%#&o9IlTjH#$TDqdN_^OpU&5$$e7pwt zmmI%oxBAM1uN!gPdq&U(+x6(4J6$3%^v%24$KHSOjGy;BqoEQdn17%R+rx6>+gGOg zgWK+PM%eY@Sqf@Jf<0^h6=@HPx1|L0DE@coRtR1P+x2XVA9oUuzj;^scH61A4sA<` zKA-c8H{Mt0FT8Tp7O#9dV{2T8YI)X(eH97ZkJ`)ob|1<6p%Nwd7SM*>bKDPpC9UB7 zsA~DWi3FaK^j$SRS1PH#U9`bxEX%e(ZffnS|NG+(XX8CrEqzz`K6Vn>*QO;KzsCDf zeT#Tje2d~6wQ2A9={o)kqmO4fspVN!j#rUb<@2X&UmUyg%p7B)N|a#UmuF?;?!n6z zCL8x)Oss10tf&=<@n6{Y;SR&tt`g5DlVJXuHf;VViNXnQ*R$NziUj7UoR1^tD3vI& zWG}N}^C0G^yxS-DZO=JMwLDwTc`y=~Yx6Oe44lciRwYVsJ%QI@^ZBWL2J)bRF~=RO0ItB)Bd@8|IgkMDjAKTC_o}NWk|=Ht74P#MfI$T)68_$%gq? z@O|>1OkF&kzK?48DiHmvNWh27FZ}y0Bk04aL;gb$a_iauObi`PM|NUZwl zKB;%>@GO1Ji(0!3dpl+ddSHJJ>;qu&x%@+XdiGu zx{hmlFHJi+ye(f{On&<>&4zjUH{e@(-7m;T_J*ggTE6DVXrc%NqKRaK(S%Br;4y7m z#B%KP$@zb;_^nx^5Ec1)Eh8acA7<=y#C!Zkj>l?8?|Di-YRH;9T#x8ZB}#Awn}qGM zIBD8Z`IWsMp49_Uk=jsN&oIjd&pk1}d*|DBcm~m(YS9MUfJ=} ztk|v+em^X?w9$)p2RGkh_pp#wlUXP0vpq%N%Z!t2yFU9+D-xUs@jhgj2XmDu;q&8Xmwb-C;yhU9G7`b7WxGBnGV>pa zt2%bd|9#=!iEbuXOx*qq(MP7e3A zxS|w^PtNI+pZ%THN7i=VH&=-g?mya^7yU$>Q~^IR?BsBtky?@XWm!jY!H*`SeHpwC z*Wz{X*!^HzyDN#p3Dx2;wIcCCSr76_)Jjo_`_v@dpSN|+%~pM$n z`|w<;jKqPzm|2_m%N2K+cg?YB&jhc7Z7Jc=%l6;reuiHT`fREx*dip2Yu_en8laom)& z_k;J`wVd#ny8nyZm%-Ti?%!X_Tt*^X%j0P7dWb}~MR$#Q`D>TV*r(?ixk{Aq+<@)N zU=D!qAaVd(%uhVhimZ+dF~WE zxDIX0^CfK8b2Zji%*jld_}L8PWWsI{&*HGJB7yr+U+MjBBY8hmqJ(Fo?4C2X#OvVw zsA~DWi3FY!5*eQol~mtgUb#KH$Ys>WPfNLs@Sf}Y=y^CJrXkly8_4yAeHnaHc~*P} zTi)7q9eit5%d@8(uOhJ<>PP+c`ITqhi!o6pN_a-t#@#hmdnegA8e?Kri)TfxNbF{{ zu(K6WKT0K@Lnh&wZJR$zqHx07^~^T4B7r%o!M@@gr4l7P4{!4z^X(fSelK$wVV8wx z=s6EYg6js4G*T`jSBVn7(qQv>eT{)Ut71Op+Nu_>iCU3>2av9V9)L=C?1Zn2m|s#7 z$!Dl)k)T#2;QJ&S^nFy~Yb+#ueaHMO_&$xRF8*{neIM2GH68j_k>Gk%najvkqJ*zg znV-n@DBeNjGV-bxuY+2VkX5qW*SVB zAGIPOD~f3c1Z}t$uY%VuNy>(+ETpsjnDKXj@9~ zn6^o086uW+9gJ91%hzie6-5G3QNyDml_=qB!i>UF&4=qQAyienkmkyrEypL*mzcIHI31p*^4Q8WM zQYCn{W}~cRmfcZj7H7XPM;!hodXm|p{l;uN^xbrfdeZ6dm%YcQ-D{sYp;|nqRwPi_ zI^983wyvp!$4;OwcCvBM%N%`5g0-*{p;|nqRwPiNJJ~=@@0v<@>;x)zCma9s55|Kf z!J6KQP%R!)D-x(Qo@{)4R(nk)Jaz(g%9D-FKBT`}5``10#batk;>oi9;2szgRl;K@ zP)R-6_;}sLuu&3)6RO2yYDMChvX0^+)KOFkkDWk8_+;bwgFb-|Egn-V5^Jn) z^&tD99;8Zm>;x*?CmWB?{RlQnqHsdBcucKG^eTOZSKu?KgvU;-@#445#>9WYMoAP- zs8%|LRwSmsXZ1PjsL!bq9y{^J3wKF2>is{2jglywP%R!)D-xTOeqw+4i7Mf-6E}Y` zCfS(S>jT&*iNXoh;xV-%@%d7#mpTvru1a|9#I@Vil8rYudmlDRqHsdBcucKGEVJ5Y z*2Wx#6bbXJv@ISxas9)aNnhbQ0-;(wW?w}Dy<6jTuy<9$V<-B}8k}s{ zJ+Bg~#batk0#8Y@!KXweJa*!+hX*Aa_Uu*()#5R=B7vtp+2GTz5*|D8pTdNWtJF(}_)=M^QEU6Nz#batk0;5l|!O=%0Ja*!P4f`b;YPBu6@tAxi+Y`JmoWW#)hDxq3D zrdA~2Wh5K)GE~B2C$_$Bm1M(wpDLkRJf>D8;4LK^^p;e@V<(P1W94MSe7Gv1T0Ev! zB;fTW8}#~A!eb|Pc%oObVZLaUP%R!)D-vbxvx45MN_gzVHCLMrysL;Ns)TCsm|Br| zy<8bRqrqsRvn?Jw;kKvu;{M%|D4b9&9#bn4<&|gXtGY`3I!JgQey)>11HyOnhmx591_l_=q3%x;ge zdR<8rPS6J1_0fk~kr-!VV)o*!!3~ut;bUdLYrX~>B~ds*8*JA{QEEk^9H}#m?G2SE z;bVTky;p<{8QT*<8*JBS18PO$D4V0QPyhDahDwz1xoglzv{4d;6UheJ^;wNtkvOAV zS9=5d$W)?)&%yhgwi0ZVMBxN&uw9>>sTGM~<+|EZR6kXT67Ea%ewQ{%qHuyX*sgmS z)QUvu$5XjHZQnj(ST>r^t|j3ywT&euk>2yF7Hv=~5*U4w4URr4@%WsC=LT#{EQvH;Rkdh? zT9Lpgnrv{~Rf*>rNO+FJ#`cn6E+Y}%u4g}}6$#7+$p&Wwl_=r48k?g^BF!IFE!v}2 zBrvNb8=Tct;&~quo^!Ifwj|O#Sk z;AJEm^h;FYc{mcD12o^KB$(?!lt68#WaG9$RU4|M9$*X6 zw4o9uP{AqL7>%#2LRp?&)lv_zg=pGPi4v&u)rAe!QV+08g0%sgZ7G2|UtRQ-YN-d< zC6TE_3Dhx4Hd^j^rdsL&c1@^63Dhx4Hd>zDOtsVl?3z%C5~yR8Y_z%lnPF@D`$JLnTW1v(Q?#N45O^ zGpFAr!K%8c-`SQD{!BNk_V`&9s^!ly*Cx6oGLlwP{1Oe7@ouWS0c1%cs6#TT1vG%$k~LKK--kPPKgALNsltL}V zN_dW=Ikq=#sFvq7IJUPCO&cmv!gDpvIjU(xwLCw=IjV(d+E9rSo^xu>wM`qU<#{H~ zwJk)`hDwz1+*#9SXxdOM&zI3>Xd#+5RHB6E0Gqx~(}rq!-jBXd3(>Tp5+zVyF^w^; zKAdX#$~b+v7NTiGB}$-9WU|rfi>j8d@Y5GKQra2th6 zlyKWEu|c6)ZjU+S7NTiGB}#Z7w!}Ei>!|K}Xd#+5RN_6&EVn-kEwNgmT7LhTdyho9 zFFf1hzKNq4f8ZO}9qf0FMHn;i?{?39C%F@z^VY~B61SH*{kgXeuBk)`#tgJ!&u&Q+ zPS6IeYh)3L&C0cj7qH()B}y=6pbdKmOQLXsHfUWVi%9HNuCW}C-=C{Q3C0YxVPi>2 z6i(0vt!rcvi8sr&qOW29l}eOg%s?A9CYD6u1Z~i|Mi!AcuUzw*i+yA&QGzi8ZP?ge z61X2m&<3q*WD$v_18nVX8|;Tui4u$%Xv5~Hk|>;@4O-X8A`*L-YmnDrU!O{pV9Y=p zHrJL!;RJ2ax<(d}_-DD6dL{NJszeFK476cBLrD})&<3q*WD$uapWB-72<)>|i4u$% zXv2J;k|>;@4O-X8A`%_={UF}!?0W1MRf!Ue8EC_NxRNNGpbc8r$RZMJooZ|JH(}qa zN|a#CKpW5+xWjblOPzLeK`SYh)1#oK@N#&9hR8 z5{wx-`>IN~4UH@!f!?k0I@r4^QGzi8ZP-1p65dxDSwsR)NwUGGL?ucvW}ppwcB=$! z(7Hwzk-*cQZ18DUi4u$%Xv5yYDnT2xu8~C~@Yd$+E55ZVQGzi8ZP-{+C1``zHL{4r zhX1tv)JJ2yQi&3b8EC`C#414>w62jwB-SnWQ_sh^s}dy`Gth>O?Nx#{Xk88B}y=6pbeX&sswG&x<(d}z^s;Ra8^@^5{wyW!{*v5K^wHLkwqjhJ0}~QomHX) zV+PtVpP@?72CZvk5eax1$p*a)l_){4k2cKrsS>n7>+Ty;D-!UQk_~!GDp7(yDs7k# zS0!kJ?YbvRtw_M@EAPkPno5+QuT2}~i&hERV7u;-Q!5hiUXu-auPRZ3F#~PjT}Av+ zC1``~YGe_KatC%cuYF>>N|bQh7JvAjZ6|1h*8Qxg6^Zi7GkC~#l_=qTXz_>iRU&AE z?RxK0D-z`$%y7@^DpA6p1&crAo+pAf*seb%)QZH}A}~l<;TT*1SrhaDq12 zu0QS6io`F={nTyv2AxWj@ORVJ?nO=AEXq718V~njqmPFwM zZLnP*eW(?Q>upTTrrtZap%Nv0thBY%k|>;@4Yuo}D77L{j?@{(_J&H7@G;-kgk@|` z1Z}WgpAD!LiE_rtFh@01qJ+;~wze&ER3d1D?fR@ntw@wJXNI}9p%Nv04z@LVnQIe4 z8*JBSXKF>F^Z@8nH&mj8`x3U-KzxQo&<5LeFN0cS7mK}gytSV8$eIna4Q4)m{ zw83`WTcTDZN{=jq57$tM67JjCJ{j@h53B0up_7&fAl_=pcwT&euK`%TJ zw83^g+NM?{BnQw?i4vY0urX0`0EwUtw(D5~YDGeF84Z;v;W-W)+a;Hg2-;w~p8cR! zBrqGKzT#}45+yuWV{=qVr1_(&MSIkW1ZK5lgR`1SJnuuob51tbmPDEdt6H={tw>;Y zPBu6@tHkqHBs_O!K0`^Q`Mjz{8`O#fyo_XnUWQ6|?1bk4&G#vZmlL z7PG4EGanDjk3>CT)w<}fMzCgaBz}mBw@<7vJnx6P$SP3+Unxm8>}w=tjof@F>LRNa z>hYwrV(sNflr?hmPf@K|B}(AiJIRLCd_nag)-2AugHSEh6-qXaKn>|gpu$o9&98># z`=L&?N|eBNf07NWg`bfzJYhg#?e^Bvu#m|Q2JEPvVN|eCYh>{Ka zO7qV^eE#XMyf5l)s}|}!B^#{i9f_H!8-6FQJiq5wR*7l0r3Ajil>TmCwC;f#xvzoP z19ihy3-zj!4b~=)#F40Y`_mr7^N&!?TqR22ds4}U)mT6K%=PlaLG+wSjQ29N-50&3l3-!a24c3Z}L|G#@zYO*0 zRiXsG)|G5n&HIy3Blnem4$EIdJ$ltb9kgVFHSZ%adTNjS{qDo_IY%w7t3(NWbu8Ji z@3&99u1Ee5h~tl6TvsjBb4xbJ8>)pmfyu_U zC&RxA#NCg~sy&CkI&nmQ)|F|tr3Aj+m;P?`4So&+wc_(jK&Te#9VQz;d7uaD$_Vko zNz-Z{b{n3*-F*~P%YG-Og6Uf(UY}ngg6~F za<6!ESbqD-Td*EWQ=$aEvYP(R+ALXi^+l6vZ-RK@q%9h%g*uwa#*K&*A~AQfakV8F z89vO1u!2joEhX^f$nB1F;AzxG?)!Oh>@ znl)dV5+(3$%k+1v|1oz?|Jo7|r@pgwL$y%XG}+i@d{5SV5#k2a$bES6uzZ7ShqC5N zQ=$aEzM1~c3MHuZiyFBdAhrUbTBy&OY@B&uPu71C;`cqrkNQhCJb&`SVXWZNlqi93 zji$d_&6hWp?K^5Zh&Mo}7V69<8wVWPlQmz2U|fJ2x%r}vhqLBOQ=$aEtlIoL^|QhW z)k3}8rj1B+TXB5*-8ie4a8@c&0^fd3Hmt8+T()m}1H?Z-s21w}Hv1|P{|_~CXQOvF zLGP+W34G}`*|2;5#W!ZO&nXGjLjB@ogZDfVTXyuXcYkeIJ{M1kN|eC2c#{o#c2|D5 zfBkm!ZU+d}LLKI0gU@ax4%l#9{aifl&x{_zx=YQrl)%@5)8FkKyy~{G^*2C#{OutP z)j~b#WP|TuBrZXX+;{QT?v3|cB}(Ai$jOF{C7)b0sXi6N+8|U5b+MBTjwO+J2o-N1 z$LMn|#w(R5f$vBs8#X5X;^oWheKEo<1)*A~@11OLOpL_-ZPV%lF^aC;V-PFRG}}@F z-@{ISx3Rq!YUI9;k$ODFUDZOJ@??W!dn5+VomGDjckn381}aekU;j=vY>wLEky-VZ zL0knwwNP(8+29-%iQQ2ncOquBUYG|}q6EHAo^04$`=dFJ*GGdm4)dUDp>BM#!MQdP z*G^hc-x;&>K+Mi6Q379MPd3bFxElmLfOSBq)+0;J27QJ|3_^|Ehj8Uv!pl&J68K(x zvSGf@qo|R~xpr3&s&&%sw(EhuPb3bS+M_WW_q^xPi|Z;;0$;RGHq3`R0#$WC0g;`s zxUO2qEH)eT;UX~?HF7_O*S8z|M3pFk@AoGg=8M)*BewzKM)--Ug%5wF`$1nc5{IJV zExp&9;JvCu2~-$JHq58q1jIog25vjAu38f(+HM;9)R8D_tyj=^? z-4TcfRiXsyX(SsKbKVSMAc*5mZL6!+MkkvM#+;G(4JzKE#(Mq^qEwYAfvP0QhQ-b^ zLGb-p4TNg#v(7H|o+EaS#5h#EeH-!l*ASnpL zs7jM;z$1Zw^(AW8G6yjF{Q>Q&bbL*C&St`so&X_68=ov7nE-W@i!2= zqrS6h`E$$~(vet*H)<8UQ758KwMvxmchhQN_uOgo#+o3WYHO>hmcNgzg&m0vFfx1{ zBf~({+g6DZKE~MBrZ)$%Hi&0XZ(Fr|TwzV`Nc6KR;Q4S=0au99z60B$>BaZM#kxG!Pfp>G56GKc{nRLlK`TQ2*Q^>NZ$U_?87pAT>c z|L+&wSs$kGWCb~ew^JsS1tF09{TGN*2hUL8FBDY3+m(GksYynZ&tf$ zN|bQlj$i!Gvc4c{AT9=>TJFzHJ7~$6NFXOu-wB>2ax$!TlWedpCEQ1~`Ua!mi~bPA zgzfs~s^xy^wW}{-eVo*i5wATwtNssM`Ryb6vp!B!qJ;a}R;S@W5U78iF9e}l?q82P zaWU)Tq?U|WQcSD=3!eU^D-2?_o2Enwj~Q4=0;`vZ4PF9qNVh?`YI&Tn+V>Z;@=a>V zh}B=cy#5?woCSLgW?h-4LAV}@hcFjil~U75+ytqwQ3&S z9vfS~3&c6Q4#`!^Aq)~sFAlqlgbwbe1X>B0W>=W);P z0ijwRM^D{<5o_0^mW=2?4&Zl)=*L_;l$B_j5+yu0z)CAww$TPN+HU}{CkWN@e888r zMPni{0J)6ok(v19!eOjLlWedpB|OJrb!E<5XMFozAm)NlEzfI=*?tjg*QA!3m1sOC z!`d}Xi4vZxX(d$4^E0#&iRs8K-D5c!R-$RzP{MOg)>pq-XZ)xeLA;3TP%Y0hv9BWW z2y$do(YxP9@2W%z&z;#F+yFVUTS`K;JYU9p9*JEa=wG8x{SclKl_=pkKzkDJe6W9Q zE{F#}sFvsb`0PgF8=sA>-HxaI*0Doaqbl_k+fu@Fnf4C8{^;1+JP^m={ZK8>U-BJ{ z#J7-posYNn1ia@eQNnYwHZpwwyh*hgAPxedTAqjHSQ3dPuUuYRjL~NfMjw?Z;kjiS z;l?8C{33{JK&Y1IlQ||v;;^%()tEnml@FB*w! z#;%p`1@E;t{9TnO;cJuTQ|}4l5D>S5P%U4Nq)#0QPX`Ga!BdLbZH-m@!Ty`eUWK zAEKi55l5*+319QJSZxxBo*-619Hm;m&dpdY67OThy(gl(DToJEqJ*#ATg=H-{gpu+ zbxK=JwS2vvF=r&WbAYqKGQ_niQNs5iSnNCv#G@d-jks2|eBS|M=Sch9HCPpS29+q`dqOOi(Hna|o(6Fm@(ilw z`#_k>h{QLr_QA1a0`es)Q3BO<()?&SnKeMrZ~yhe0i#swh{J8~2el&cbr63CaU1eJ zDpA61TW)D>5OY9WyKq3eYPmh;mLh?wJ^3|##*Zwk_P`I^Htmz~ek*J6q`zApPAfuJ zwYbZMTD-Gt$q1<@oc6G|#P2_~P{An?-&)H273&Fq{2I=7SD=W zk?}cMOoVM%ZBMGHL zJ>iCG`KZmTd?ZkjEkE?}Cq~?eeSIoXg7XT`(AEu5^)2<)KHb)6RJGVw)QZF>&o=7` zH&o*D6$#X+OE$XC;5Xt~Pxz@@XU;$^xvnamHS>W4exIqlPr9TC_*4NGx5|>Iwf7d+1f-{sRg6E3{$jQ6*70;qAItL#;?i zJ>j&6#oJN>wGWexzaPN;45#!Nn2#JgbL0~b?-q8wxOc|Yw@7S%dY}A*|H@~+wa>l{ zl_){~k+yApQ0fVXT`%rsQY#W`f02F@mtF9q326_Dx1|LAVE*0KIZL8&f_=qy-CL$s zBu@G~{kAXLc>g2Q9u{v)3C0ZkyR8Y2`)XnBsk?UGVfdd83A*uO&Bp$wGX6>~N zhR!MX^^Tj8 z_Q|+J3DmVs#K&uJpA72>KREsGGf=-a?5yz&0(Xc+V(6Q9jXExCA8}gGGa4#Uf;kS_ zwtX_F!<&Bfi#ohvXN_kItJlci6+? zIa0oZEpKhQ4!*Uj<(XTKSCLrd^QY_E?y~*J9AlzNlwfX|*J0!C!OIpV8~0#LtZMO| zQ!5hVzp&qC9EP!7C7!b;!5lel*!)ovg%jScXVR$^3CvMx?>Xlvl_;@fFSB9uAnOUQ zw#nzyQBOGRNbwZ`&V!L)J>gX^9Y1p6#@}tILicM6F1` z14!3F4?rcp7D9q+FSKEPNl7H1p{hk2)QSXrpJao+k4k(^hs1@uawpHW9p+!b_i6MR zz2OM@KC0y_P4urK0UxgM{ij}>@%0nG*-(iRTnnQO^An|>aHFcl>!4O7R{eAzUPn=f zXX)!|)be#adasc<6u(N@<@qN@tbnilt3(N|H1e!$x5!`5{86%jdcuvW7SD=Wk!T-q zKv!Yfc{0mL2^{W!!aY2GRJZJ+g!^`k5G&$kz>OPQ2Xw71kDB-p(!%`(^ zgVz15s1*sVq^w%r3(O8hg1vjh;(eLRsHsE=@AcM-&{Zv-A+;i*Ri{1RflSXcjZ$PL-2_G3+D~(sR zcvjSkgjO?GiNB8|d{naf=l5YueD2(LXI|ZVY1l2|BWJ3yUXEi%3>ad+?c4G34_XMG z6$zzd<#!)NspX?Kv)Pf5*&tVm5rS8N@pBnl_IUH4I_6$$!R>?`nrofiCPLfZSm>u@bz2anwkw)Md(p;|nqRwTAq^_}E}FTCTBu=m3~a;|Mg z;ssQ-dhNmOM?Q%!8>vJIj{t1-_5PLTCmT=RIVtS@@W_H%kvRAlGf8BRUUO{P$-ygk zEnWwYJ>J>=``pRF7-z55mdrqm6ZU?1W`b+YkwDgQNrUWTMd8iqVv;r z%sRd;?EUZz3$-F~@gR%t9>&+KRHB4OeYUD!5``1ITDI$v99MzzWe0z9SIT9K$8~61 zo^xQko)xjaV)kQRpTQ%M{Rn$MJYT}TiUjUQt?zzY&E)-1i4vaov3t%e4zC~HgM7~;fsuh%o<;_hDB+n@(x@|9eu?c~j8`gA!t=W} ziekK~oxK0#kxyW}QZ3KDa!icGZm70-+^8#Nu81$OsYD6SQ`>CtwbkCKU3ctnb~p&* zu4;J>nP(CS+Tc^-Hh2eJi^ps?5}2bB!8uALJa)qK@HP)J-@e~EpE8$`2-orqJ?Ftl zaNS_f$5JjMSBVn7(qQv>eT{+Xtmb2`t!mL8wITrzAQAKcRKjB?d|kx+k}9EEJf>D8 z;QJ&S^nFy~Yb+#ueaHMO_&)h_=WH~BzK?48nhyP|NN_!>%w^;%QNq`$%unQc6z@6K zqw=a2uY+2V;JO+KtgGcJ@ii|JzTRegCb+J4bFU9aU|lT{-mb5)(cg^(av8}6av6D5 zi!|Gf#2#3K&Z7IQH+O~)VF-{^zAjVNGU%O?56p2MQAC;fE)8ixW?>i?DzIMyq8%l>& zTR*;g+Qyd>?i;Q6|i6TY@>5hT~Px8HR5Ok^g)z6@VKXOtQVS#hta zL$4mHBKmaYi!WTOR@LH_Q!5h4 z8YE)T%!z52g}3E<7)Wqu1pkgcMJ}UCc)PlPA`<2I`Z8ueRHB5RmE~mo3x7_~2Ce&9 zQ7aP2;-ubX7DpvYc#qgGk>(wl<=Y-RA>aJw58AzVTlYz9J?7T7Vda(IH8`(51!B>I zi|RfPwywS@@rN(37`6V<6Y?n+b_?rK?$y{DloD@0eMNnT<0j-g-TqwNV;SbyJwmdK zxf1_%a!36;#~{Ah@UHY5Mc%2_ys;8{{;hlCv>#5$SGeW%`#g)&nzK@3&XHMTIIeu) z%^!@?oJ`EBC^3JXy#6wX@eePmX=I{tLY67<@+()=#^TDq{k!LCI@ju)7>NZZbkz35 zmEW`RU1=vEue^1ZQ5!G)U|H>$gD2!K{i<&m8T1W~HvTxeTmEzO)%2%sgWucPmOn9k zxBYpucM!dsvv*g%`SqRjX;(TD=-u3VSIlk$O*fdgmTXC%Iw4G%`&Un5y1w^sY z6=_E!iPqUtiLbvkxAx|t6Y{r@`CU!tbDekNS#`VM?qp-NTN*X>WYpt`#B0~wP^wY zvTVYn5u?^SVnV*^;oU=Yr-ZhIr{v28qu5tZzLkYIO0OvrUrid(P8+MhhDtOlj&Uun zqusAVBX=dVCCn7~cRJ4Msb$?mB&-o@Byb(=ejS=;PpqI=DuCLn5IiwXSVy@BSIraVmQEzNc<$*S7qu%r{hGt@GZi{~7b(+ec2S>sk37pee`0JsTd!j?ydN)N9$b3N@9O?eaJ}E3D_fUW;<)qgPS4e}TN-tLCt7zT zDDeTtl9`wXH#=c|-Nz4llS;?y=(*mx={m01W`G*wEaGKYis`2y2r{lRS9h=%kb7FpPFy2wxzug ziJ!u6UjgIQp+7h?j&QQy+%rWsu3${8`IzXrm$Yw1h-hOR{B}Mimtefow)C1Jfibb> zW1`0c90%22BrqG)d^YeHhjWI~k-%(_`)uIx5@!I9ifoqAd+w1J<64cpl#Vv|4kA(1 z;QwXi-ff4zI~#{Pv%)TqSq+SY|DAgLX9VllQo`DBte4AU)^KYf3MW*{ZLlI+3(>Tp z5+(F(c}DzaXT?gfJQG&;V`W+%vkITKWa>_+7VG7bU`@RiqG>}VN@$NH8%=^WS=lb@ zGqMIOk6A}iTZ(6;TC7Z)2vkLEA(}Q+qJ;KHJgbGf?2y`Jg-uplr43fX)Ry8|sTOO% zl3-oS7Gk+)rG)lKJS)~2WxK3j%DSe0++w5fJE&T$wn~DPNn42Jo|O{XBk`Hv8ClC8F^A-0zyN?d(s?ZjbhCwb48))$)6= z;>Y~uL+8_D7M6EbN@$M+qVRWOpVL;_+25`~AFbGGqj^@U<C3_xd+yrCR=O_kMS!R${qlrG)lKI;+C3!^c{lm5$R%%RMV4v`6Auxo^lZ!@bPqjy}z^ zQZ4t}mOH{N_pFrA9^qN}8_VOBFqY3K$8$gKjW>$NH$n-$?4d<{)jMdCWEeU|;=_k+t7MQh8njIdawt>fHZ zb%%|TC^`w%nw+#EvAxxB%TC6Qc$L_(5f&k}bzHntPuM7lqLWaq$w?~`hnH*g&!BFG zO6=GOi`&{dUR>}s*eHpjlTfY6Nh=cEYrwnR?+N?8bCy|Kre%ahmTetpp0E;Zltj@< zsMh496^T8{dR`mh7b7aMVbnsOO~;B`hn_*750OyD-z{ystmiS%HIiFTS{2A zrLAMXDXYPT?7u1rYuB_UC#?$ch5b$#JI~7B30qrASPrPIWBZ*}rwt>DPQuzXt;tC% z64#dXybeO$C6y>)*{`;a1Lo02Nfe!gwQE|FlU5{7+`xYO`WAj!tP&+`C9bWb-?#d} zMoAQ%gtcp0lap2??mRBlmd*Z#s#Ypd!ZL+z9kl~#qa=z>!rC>h$w?~`e8=sd|1Q zre%b!tGUlm5(R!GR1&I%euHNb33wT)ujpl{#Ey-yH8%HsN+S6sty<=zL;~JYvO#Z2 zC3b9ttu(q1R}#s;YSl7dD-!Vfk_~!&DzRfDYz5PO(UM4hVyl+FvhfuaNC1V_+oUYjZQ+fCU;OP64#V{ zb!YSUgKeT^g!kcxAAJfNB~f$|sx`TTT9MePyyxSwms2I)(Pz4V5208PQu#t z`%kS%$g^Aiwy&+DQ)_ZM_Gfyx9X^5$d3H-ewI+8^D-ylS-`$;9{_c+Vqf^TWe`~*5 z`vcf0iK3HGt;rqKio}R=EE#*->gDh5+C<9;AAL^Q_&wMtiK3HGt;rqKio`d|F|j-9 zhO5NKD-u3d{_3X-VWT99PQu#t@sV1Q7*zfy?vdti;@UbowI-)yAMBXlfbZbJyz6zYZHRN0o$XP41vpBxJ5Ff3MaiT1NQnyl>CfU_<8G zl2EP59n^}%5v9-YLG#yVZK7p_dl{FV^D1nVMA1p8*5nRqMM8X^^4Di=qGg2pL}L$o z1vbR@DGAk@+(E5KoHfvVxM!Qc3~LiDBi!qof8)!rQ4&Qbp<0tWs1=E2$Cxkr_!*~{ zzYJ>=EhF4VJv*n3k|;U})tcNvtw?0{ufSkk?p13OEhF3uHyf>lYU#1Hlx1fhlVXG7 z30#M^s+F*IJ({3aB+$DxoLTB!l_=q{kKOZD!uraiB5FkfPf4-?pE~`b zieqA@mfcN{;q2LMB~;6vu}D0zG{v07)6HKy*|Y2MAlvmQ)!xBY!rJvHm0FRINVxp9 zlf8pVc(iR}Nh@LRphw%(iUdZVoc9Ahb@|&R8%vb%+<=XVt%Qvwo;9FWBru958yrPd zqJ(EZY;12OY)tg*2el%B*&x~AY@iY)Jd0yI-~23S{^qAG&16b= z&dKK5R>I~e&qh%z5>Hr;tk}E!Jr1AUPA!{7J*#FuLo1)AnSMFQSZvO#Z2B}#ZM(|ovA!h9dka#AZ2@cNPsdVMNU!n3vJ zi?$Nx!+ExrT9GJo$OXMul_+7mNW`a3XVpoVFKSxm`$pnlKisdKqxH~R_$!;OXIswu zJ>*_^%)Iz}V{V+kMf&@y1NirnC^`w%nlYPNk@({_15*y5-*Z3DRbt0Rd~y3G$;NBP z(MCxWorG%5m`$xneDaI4lZ}PDo|LP^j*a;JO&cZ~SA9+!B~f$|sx@OawIcD`)21gI zL%%vUSBV`P(evI7l8tGzxbL7OicUhcX3VBmB;Nh$+sVd)bq>u{V#h{g*??r@lRt1D zNJ$i(glf&0ZCXlv@0e8@yyx5hX+PX^mGIbzHK(kTY&c1Y)Yc&Bvfn0Y-&Z~;~vA44gCLHC3bAY&|SZtZ1ldK zI~7Wz=pLbYbhrdA|g-Swno zW9zfeuBpV1jd_4#0s(8vrI{=+0JqDmI)b zRLf&ETZ5_)%RMV4v`6Au>3W2&Y_z-|&9hQ1U2kb2mU~u8XpdCRDt#f+a{9W4&|;%` zR;s1zA1%ak&q@jHk$6^`WA_}Ht!G#6NAs*y%X4J^`O~h1u7$+2^6V+cD9?-9j)bnx zO0_gw|DW}h651o_tnhzqg!9Zcy?f15x7cX*m1;TtpO2zSXph9R@^fdj<-K6hRu_Gh zsg}2Eu~UU;Ub$YO5`Kmj71?=NU-^AzgzZm^#mf~N-dCAw`8}}cw?ZuUtd!6mv2*e} z=r|;#-n1Rc~+|B&#UDxD#UWnN(t?ecve1sFw5m5k>#u^HkxOpTK;ZZ2C70V z|E$s%zb$K|Jrd8#$6B71kJ^?Itl03gYL4xlSx3v_>C+Q!sKiH3(%K`D@Og=M(C5K_ zd*y{{`JA!b-i4u7Peuvtk$6@?Ak`5T6*qXpOs3KSb6_TlD*5e;{Gf9XV;e!9{*iFoUlEm{efvHPKCqC}TxRs4UAojr>tMHGb#vrWasz(5v+H8gidObj(M zQ6oR_17yKO#I^r`xgcr_f{~bMgF0&mCcBsl{soiWKw;6ud(U~R>gle2(Sc!#>GRaR zU3Kf$tzvkG^TBr-Mz?uI)oTxbDzBdX{Ih2UqIy;Mgal_c#PJgTo^CS|-OlsiHYyX? z6I8KXWBU46mx{uBKir6)j_{0Ex0f~B9`$9-xhdrwK^5nzly5(LA#F@IK7QnqD$mEV z<_sPZr4zHPli+NY^2I~`Y^s;v`sacw&-1co^c@nV6SF)av5k`FulfJ{{Q1Y73#vT- z%bF|okSLv)SIe6D>X0a%nB@rxX1n;m;Sc<|QIG%n z(T`MlbuDY=!9${SVwNW)xHe1q{xQGDcjduPR5?p5Yi8|3qI6=GCnT6-N_p~~_cc~G zzkL0QDrfCw&E4RTD4m$)2?^%>_@u@AH)Z3g7r(xu%6WcSb2m67N+)J{LV~+Q{O!m3 zhHU(N>-Q_FoP(D&cgsVfbYhk#B$&bDvqk>)r`Fw%|5r^FbK)UUIx))=63nhjqPKe{ z-RH!bIrI0BX%yT+@3@0gDMFGCu3@;jeU)uLTJ zRU)*f>UFf?6%}5IaFyY{tDSB=-b1QW&9Ef~8>bc3Jc)Tyd#u~}B<4n{%!$?8_8wDx zd#YljMP#NPh(oVP#99%Zdr64(Rp+F?c|@O7r`=iYtOhKvUa_x;e5rgM=sj%-sv_D& zw-rZlF=jHQ}}5Dr2xQRj;V>dUVY&5Qkooi2ZRpgN@)QI`X~a z3^tlqRC#rIzZ!^UgHK3sL_*xYbh>N)+V@svwBL7mj@PIhWU_z4NOeOf9dPIqZz%Cj0( z%m#;qPIr7l!fl`WbV`+tJabYtW}h9QQ!4u+;kHk&I&I5Fo}H;0^W%=tX`6kKaNDQw z_{*AWZ_7sB0Z=t&?H!?81NKG2ZQmy7_Cq%EE`zFZ$Jr6O{a{}t-1e=AZgFHI?_{VN t_rx8cTO9U9!foH~=x#|i@-~X9no&=;QIQjOMCn9*;(hmYmnFjOe*t`u``7>g literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_8_gripper_prop.stl b/act/assets/vx300s_8_gripper_prop.stl new file mode 100644 index 0000000000000000000000000000000000000000..36099b4221155b491a2ad61fb799c7fd7d443b77 GIT binary patch literal 31684 zcmb`Qci0_OwZ<1wZ!i!DV3Zb`G(n<3EHXsrDp4{(+D58{t&! z2|4?nkC;JEX(Wl4T>Yg+&3)2ocwwg1lk zqh}1WGkxGgNv$2Mntts`raErYcVnXTAyeMor{#SNd3C@38Bbj@-k&1 zt@zHEaMPN04Q^QU{kR>jYzM1mEm_xy-yM8VOsE%}T|SFUyt^~*w{2%`Fkn^c5L#b7 z7fG&n;=sb@@;+Y{8P(o*+#1$CUpB&(K1Q@o9Pp}S*>C&by>_gXXFm7){zLEkaG(GC zuk>B2N5l^;=QGM@dac)I*_l4*w=+SwvgNG0edLb@{BdtTbY6c%s2liT3loFaf6;v0 zdH2O(t%57aS8eqFPPUfswRCKa;Oji~nzzRIS{-opsQ8+f?GRQyKj^-OCuU9BE+)!$ z1U&;2{QRrferP#g+eO=<|JZtdE8}|s^734`n{*$)IagVx#Irg_fzH#_*#v< z_v!eFa^<;V)jqE-Fyhj2PsW70>2j`!7$lDxeb=x2pZK9xY`GwLMy*M{;-QCPAFiww ztDZROB_oo>3t~du#I<7L`%_;x`j-0!^iTXyE4J)C|2?%P`Dv{=u@6_)idAQv_rA6# z4twOLm{2!yt(f39D(X}c+_z7;=AGTWk8LJQ4Ep(iaI8B1+|}*bPTD8*CWLz*Cb*X{ zAFgaUs}8#DIP)>;nZy6h=bVY&y$|2jub#cI&$s-R`~TXg|VzWVS= z2tF1QI}eN>@cwT7_RIPwez=0){K9*E-g5q^JNw@J`unuUO!CE}ZtlBwt4#*%?{65S z_ksRgc?G$5zF>n(2E1z4=Lhd@LsT{iqePAD$?Fryl?M{dSkEckp&?eg{Kdwu4pQ8Mkxh&(*D4Y#tMe z!3Q6U2^|gEwb7BK9>vQ`f>qD`Zie}|>+JR05#b?ymx^Nx6Z$qVA3Czsws~bqujnM&V-&7OqtUCnoUHokwrpXpz|^H2R# zf3B<*tL9z(VxL!KUtieQn_vqQy`K+Oy|eT+^Kr%g?f8lE6|-}|$6|t?O%>Y@#DXtg zq-2u!zxi1Ie4B04_U!o)gn9cVr_ z9dTvN2dnt}L`3OB_?_|g(7}4VBp)^Yy7+E)<=JJ`^Vwq3o?s~+6x zk@%@C+u>NX^^mQNn6=~LnCST6nlN$BT6-A1+k+3)YLy&)iCO-<_CWvgdoHu`jqlD| z$rHt@=}WFK;$MsBu7u$Giix4W`mxam|L8X>sTI@QKJh}zNO>o63z>Lf#d${0 z|F8RN=Zeqb{4K9C)gI$+?|nTSs}9^B*Wmc!vwIUkFUrJ_O>q5BTQaLwE4JKm!=+ZM zrE`DL+lOP-`7d5%#HO=v>`er`IeQW3B)fpVIozj|iE zWb<+JHQTjV758v8LMj3lCQwdu-=m6LcKS5)vGCpR%x6{H2iFLx2w0dvIn8}PFLK5s z=0mnPVE7nT#czh*1T0LToaT5tPo6o&w?m|VXoFSp`@A;+3lk`(xgS61tnvD@%*UN` zPH(U(9&z+0U||B~H20$#k6(+-iI4`+y|O$VImwe6ok|XR-v5c7-ej>^#Rt7758t{WK}#a z=}o}G1j=dd$3Y^?zVj*banblsHdz&qoO%`^Mu|6EKH!B z+FV3r{eKOZk8H$AO;*LTm)-;{OrV_Rew9Mxl({p_NAq73o2=qF)v5>_iwTrdTNM#` z^v)g4$Al4+o2=qd*s2H|iwTs|Vs>}%pbK<7D00*IX-!teqrTn*EKH!B=6;kZve9ZA z&Q}-i7mKDfSrv~uevi1+xZFwIfzBZ-Fs(7?qBcvi=VFKke$2{tR z)uxz_jfb7pWK}%#su5BVurPsgn){ia$if3|GaoPi^OPp5;<;gskcxnX36#?ut5IM4 z@N?!P-|^TctK#*U8X*+{3lk`(xnDIBIpOCY%>4D3{n)`xR>kW~H9{%^7A8qZw9pFXuE^74PZP2&o8Im_RwT z9e$CE#!WIG%hyU8tm0j{u7p$sj>QDZY3}1dM2>vpTi%CU^`Wmd*usR)gxexEckb&D zt|AgU7E~5L(H9bXJadlucxvs@4Yn}BF*+i>2v(t-=74yH10s;GZ#;_b{zlJC+4#%j#Q`- zQW3B)fpVJrXb6#Qp75)3;!gY4XIrd_<0oo_R0J$cpq%E2thxM;XPA#~PrS6nsyG&> zMo2}#!UW1`?qh#MUOVGN??Zk%>fRP2g!HYZaYP&~#IZ)b309$;=00vsv zV#|zGail_xkcxnX36yo-uF()8gPz*Sd>p>b=!{iy{6vkAihzX)l+zq>8O?L>t{vRJ zZ^o)PlA}gQMZm%Y%KGlo$Ptm#&mLm!*#5wSGgig1CN)AT0v0AvPA#5A;~qDA!NxvU z?{G}UsyG6sMo2}#!UW1`j>wv8m)q{Mn>FX;j8z=H)0L2lz_FM>In90SkM<=`o^S2= z+u}1bR>jdmH9{%^7A8THDah)B)RRp3jIZ87mK7FH|D;f6WnHgJ{K-@`c(WW8=;VJ@Ar5v3S692do z5eNVCz{wd~n80p-TEwsvgsTX|x^hfFNL(}9ul9=!-v5}4ElgmyKP@5$3c^(cB4{`! zAS9;jV$Yx~I_KbwElglHQ&({;vZf$hMIdg0_dG*l*huS3syP2u&*NYT{ zs|bw{Q186w84^F3VSS?9an|UJEleOjKv%*v4n(9NTty&SfTIXP;;!|qZ#3JtUEWa21i5 zcaC8SiC5P?)xSUFqvK|^c*MdZ86K~Q6ojjY#Kt&0k_ibNAve7b9aFb-9Nk}x&j(OO z6tj+Z_3sluLe~29B3Om8jf0iYF}3&Mbc8&gRXX;y`7X*&Q1j;rJ)?P%% z)Yc9iA@9yA9s8OOAKkPn0>@$kWg7?U(REC1K6HdUhE+QDH6K-MRBr+nCQ!CySM90+TLr2IBR_T3ZK73Trst6p536yP2tv>K` zng5gTYTj4n=f?C-ZxSO|#dl>yC}xC@plkbk*U#egI$PP#wh*(eyEWw(BOQWOdNqxx zVz#>w!pCAluZPi?6@*{1XjMLBo&RVrJ~Dtuj$=Kkvno>QBL4uL4v zH21ODh}ll_D$2VY0gw;JD#VDV6iH5mhy3+$+;Sdq;5(Ky?|jG7Ay~!Zhu#ERn7}tQ zseMDEhp*06@vRK%%k7AWvR1-ZO7C#HYJQ%{?K2-G;aG)!7qiBmL~xfd5%#o-vXufEs%=s2ZF~}i0e!#UbGx1D&Ptqj#Y>@)seTxZ1ZRk9?Dv=g$W*` zh6G!f;QQE!POVslsB)c6c4E)FJcGi=VuGLluvXkB@}0qFJ**Y0@O_N#>F8H<5usRo z1)1QxC-6~xC4_5>*!(p2Uj{`!TVi0n`E>RW(S-B(QX?nPOJZx3|l-T9`0dg@Ng z`B+SFpKmSi5Qs2M?Rzd>#}C)ry}WEWVmVWb|3~bx&QkQd0DcP0M@cwVA#z!FvpTWP z81tm64jPr`o_*EtpwJ;ztV z?+IS5;~pcd6{~pdttSz@qnHR+)#&UBTXj1oFcI#nu@8Qpb*yD~eieIOuE`MLScS;-H1`ql++(QTerP#cnBd+O z@}lLARfxdXw^}Doe^os-6a1DaDpmA8{AS@jT7DCSwPF?Txb`H9ueR`3V}h?u=z~?f z;~Ei);q0=7iRhYl2!6kE-NRjdFE4$tiX(F)qT_=tOoY)1?1NPtvl|f|A8cVFjK)=Y zhu|1Nj^*PcL0;a8tm4SLi0JrW6-UiQge!fpieuv3S!R_E^voI=*YE^ud!ZBWm#o{BG4_BTmj^*N* ztBB}YD<;DDEAvu3wS13qWH+CYpjN^vj=GMBPOaF&L>Ph1wc-|XY-Jb&=^d2kid)E$ zV0>jGaa|GN-PlZ+ElluzY(6>!w~%AO`HUFhN*}D^$nJ>f_+SeY{M-aS9G^vw z_~iD51gkiHGa@>*Vha=etk_z3(<8bT-rx}c%C;`v$M5-W1gjDs6Hp_pC1_5@YnJhf zC%y}T)Yr;i6IP)N6@PCcGUeD4tsR?8ySKshMH#i?uj@L5)CpGc=tJb$Espd)Wa*q& z8?1`I48%C78vzRwJVwz7y6ImsANTGwtjVhQdqs?cx)HE25srOk|9VgJ@$%rUnyljQ zc2`C4Y#U$U^PE~_*5f;xkDa&NwaLegzZ1q>y&C}w6Fl=4nY8I9=41HS{hF+bzhuVj zz8e7x6XA^hw1L&l$E(L5(qt7@rgVQ;*T_~yVC{_wUe^-Y=bnSS4|m)b)@kxw24&=N z;#H$=1gjETjS7jSFC1Y$ZkhR7gDoh72v?(Gf>p>T6B0kX0MFnR`8^G`pbUanKSg4K zRmgG^5*yB(XzjRf!9@+WpbUc7NJV0TRmh$c5)V9VyM1ol(YrR-f-;D3?KLJ?g=|eB zG50gr?OXo2E61<}We~igr*TUW!K%bo2SZ}rzhJj-_nH5g#}<@9@QR*DLAZ)wm3|?u zxkW-^`1Sa@=B-q`! zRmj{D679RbY0vp9pSZcj7L-Bou9HYXxQbvEvVeue-lN8wkEw6}sl^tQLGb#6NI|%Y zU{zv!ULkS!#)q1Z7p@zeu?1xiyka3z5UwIvmDuV>Nc`Z6{mjQB_m9Zff-(qRvCt?B z5UwIvmDqYoNGyG9wE6hqHalf(K^X+kclE6d!c_#T5?e0`iAx7>X+E;=@0GCyWe_~y z6)6Z;5v)QEvXI#Fn6=Euvewv)EhvNF`L4zef^ZeVDr8s)i9dYmV+}tpdG_qd#rp$q z1AdqAzM)7jf>kId#jfSRis{~m``L;!o2=qp#IA%?1dhc7%E`n6J;zjR?k_o55JRRrb(P$i}ciO;3y zn2)Ld9MfV86Pq8tr`{K7{_^oa9pU8+^a(3l?SF+ zJI2rWj|N+qxN7gA=3~ZTC#a8ta20{$LY3rR6%uFs_AK+U?6apf*uuo3c?;(G>oIir zMDN(( zT$V3su!V`am#?K7{Vx6XM12&5s|XwyD$OUPs*qUY8;6>Yb+#YeWD66AE}URK4n69- z>Z2fBMc}wlCAn9H#EnnyXFhH}Wki!LOx(ELEc3D8?wRVNAY4V@xKJgzSB1nILw7YF zvzPDKWD65#p1aI^ymi~f>Z2fBMc}wlCAn9H#Jk&XX+91)X|E<*nE2FHA0F`6~^ZZo*IGno3Tx{FrjZ}^P#zr)JH+MiokK9viEsAB=qo3|I?pq z@`?xdkKyWvm*T&Y;G7&>rWFhvSy(9)hVf(^Ok` zQmm|B(bWj9JNLo79#ps}YQ-wL&=2?)>KFEHU3<5(ehJ^!6}_ha zd;M}UY=`6X!_P`|u3UL`S;cpLPr~Pc>?_YZ6S}8of3F#7QLBy*R`H$RlMuErp?gK< zL%+MLwOrS!2YjtGvq@W5pslPKNLF&L_`LJGgs+nnKir9|;x|K2BDm&E@HsRWuDl+s z;x|J?D2BV8EllupQ#7L>_&LzM!ajfA^sCP{KQ~6W(g&+_=g$cJhBGDb7MaAf4G9W5^u-V90_gRwX(A2 zRf~PNvR16py{XKfQO!&c6H*fVr0UL(`Ous`G12kCD&0LXLh~HPMCl`FITN~*VzlPo z3HuV^zE<}sOr^OWV;`<;2di{H$_UN%5ff6LUA`Vn=-!Lbn#H45E4JuWkATI|D> zwPKa-u^6FQHDW@#)rtwe7gVhBlF)rF8*OXGgSPH-**&J23EH}Qruh{tLqhHOV3qF5 z*Z=(`f1B~9DWi(nP^b|RWr&HM0q z)tanAKbY9~p)29@suhIgRckX5_M)0s&3tHnN>=T$2R%d6IAf}b?Yt7bklOIm|f zc)AjM2D=hIuUbJ^UbQw8{A^lYHS?ia(i*J7GpIS@^{MSj_`GTbf!=@#eu70bubTPL zENKl^;R)7NReiR*5iOnGemY*5c!0RAYIsgZXENkczOpYHcQXOyutm|8q_MKVI47 z6$H$qYHI+V5X?X9Usow+@rlocQ_MfGV&QfEXFddT0L!7@#@rzB8DWaq1lCa?FA44$ zxF^EMAPHyNRx8Xua4hzL89IaXo5llWXXD`^KGiwQm>){e5}{@(@Y^Iv-UIWpG} zb87n+V=w}6<@I0{uXsg7#|K-Oz+BXFz|?%OidXI;qV!R$GvHWE@Y=1z&vtQhn@y?t;W#l2lbROia(&gcP{;Qlx~S6E>|AB7bJ%XQ!TIrq8l z`}}?7*D0|ckoc@+y$Ru-fe9Wh2etCK?#+ikF1NNPA#7oSN6zNMm3Jbm_zv$$2tUuf zHxcy=9UrXX9g~RAs^N{p7AE*92;0Fb-ua1$(nqn@hqo6K{4|@7l5qS)@l(X-Fsv1; zFe}!mr_KsXw|5jDiwS-ru-_FPQ}Y-;YPl<)4_5KaA|lFm1XqxW@Lj99aWh4HZrqH=gWLy)*Sr_O zDwHh)uC`pW>3Sbdv*~86itno$Ar%1&6DV5-T=k*Zbj^om)6G~F-}yB{DgqWJP__)X z>O-^Xnh(vUo3Sc>9&3bD1T0LTY#DIXhi20?ADT@!V^#d#tr0%|Zb87p1j_oAxxU*i zn{LZr56z~Vv5I?~u7uCOTM#%F6DZr)5$Z#;>6#DCrkk-U?%`^LR0J$cpo}$Z%cg5S zG@EY5Dt;cj5>gR37859223&22X45qvnoT!j6+hcu38@GiiwRzt6wz$Dx)Li+v*~86 ziu)4FnX9!b2%mqqAYfqv<#K&jv+0iUJ~TIO#;Ults}Vk%Zb87p1j?2HS8Ju&bQ|7> z=Eluf75Bk4!e`Sh2w0dv*)rg&56z})J~TIO#wvbOcO`r_-Gacem_S+g*lZ11v+0@- z&5fI}iu>BGgwLj15I7bSD3^P}noZYyXl~q$Rq+V0M)+*H1px~aC|d?xJy)7dx9P8k z=Eluf6^~?VgwLj15U?eHx&;9X6TJUv*>uf^=Eluf z#WU=#gj9rO(`_?>GWJz1o38oL+_)L5;u(F7@Y!?=0v0AvwhXv>t~8sj`Ow_B8LQ$o zhZ^Ct=@tYmOrUHTaMg!q(={KO8@I(OUUTS5_-wibfnza&vSq+kADT_qd}wam7OQy8 zp)29D=@tZz#RSTlB!R`EQcE8(;076gvP1j=PZ$_+ar3)Ij5cu0#a VOkg%qW)~|6R}onK;5kP~{2$;m(f0rV literal 0 HcmV?d00001 diff --git a/act/assets/vx300s_9_gripper_bar.stl b/act/assets/vx300s_9_gripper_bar.stl new file mode 100644 index 0000000000000000000000000000000000000000..eba3caa21990c28559553fe0003ae379514e7215 GIT binary patch literal 379484 zcmb@vdAyF*_s6{@Boc~9nxsiean5nXxzACdG^m8;Zz{)3QX#{ICR9QSjY_jn=E}Lx zeMM0eissUwnM$M-p7q)LUYF0lu5*6R^T%^uufEpzUhBQr+T%6t!I=Ml|7JDR^n$cV zfiX8^)|`KPcutLKZAa{#8bYpUo-ESejcJjwgjhO)tyFeaE~SV-@+r%Tj|)b$u|AM4 zFy^EiPi(n5(kH*8A>S##VjSI-+}@I8iiXUdaXNYf^_4E+}Z|9@%gvZWgt$zIdO~no zkhOGuqP`GU39%)$pnx@T0(7Ur2NW68SBT4ncu9zlp6C=R!9)s5lLo$nSOr;87i?$NQb8a4IZ%(Ht>v*mXCH}7Xo8D?Vp@ia&yTw zsqO^}3fStnimNlb4{a6>`*BQ>Pux6hUdd$fF;;xAhKYISt;zg*QM0hgyJN)1r;pAn z`Ad8>mz0m-Y%Td!RTh+%Tnx+k!D)Np9wOys@SEBs(;aahevk*HGHvWmAW&J(vDV}25%j`$cI%Q=WP z^K--9A3Wi;N@IQ#;t(P35Q5WU;;{YKW~S}R4c|UwrbP9P)OlkeawH#Ix6M<1!sG9| zF>HU-U9naYijUqxRF<-0t9iS7hZXW}3_sj)M~wJXh=D@9Cw0#4iirGU0F1!!rz&&Qu}an^Hs4L zM33qsKDJ5D*=lwEKbfMA?ZaA^$BDb82A2u(sFW_(ITKw6@5;;>-#+Yf%__;qn8)Xp z%$NSLNeFJEjjQwyf05Q+;oixy{&B4296m5Iu+^5w^g%D~;JE^)aIK zn$%tS3ktXtx2)56IG0gRUM>T4l>m0rtZ_#f@oW`?%U3opFz zsn}R@j@0=RQXe-->9W;DFRslTI6W^sGI%;hd@lWCfcRJ~t%S!CCh9d^m$`mxUij5B zvm~mM#78|LS_!dTYN_oR#o=LJOf5R$#p3XRcE#aoFHViM-5ufsImfumR^RU^4xj5* z95x*gCmLmY?K47f+hv0LjWHdi*M1^C+DOi;zx{ru=;dZ%`ou9sldoNyshn;Wp7cw+ zpLcjEnBU*xKB7IuQ)s-FHY1E9|wz% zfw5l8#Jj!vgv*a94!?NhK4~TQ2ywI!&q+S`TD0ore>2y;**=_dR{W}lduoi-i==ef z>Z5P|%cQrr57!+KC)x^u`}X)5LuE9O^Jn85E2yvkh zY}NVC^_hP6=Y+GGJY3`xz2zD;Ponxm^1&J=THg0wX6u+{;rn_;F~N6g#{%bkm z{iShY*^~21u9woym+^}8!Ng}jtj#Q2l@l(xc!t#AO%hdWiK>N^E?*}Q;lu4(hF#xy zCRT%OWb}Dfh_2aus1-_j&XX;};ib>Uh{Zw-7vdEmxCWU(3w`O|mf@he&q-8=Pm;SO z$@$BYbB-5RwHcq~gcp84BUV=DJtgCnuO?=>5 z%Vos`uEB3r$_R-5=o=BZlego8d`5zG1GLf~#`i4dOEkZqYs zz0V2r=Drmp?w7JcAKW1M;Iev{2<~a}Y#?_T6NGq3d~m!yj#-=er(te5X2GMeoL85! zI!emwZ1KTXxJNmmWp4P)C6C33kvwrgIrcj@KdpStETVa>Q0|z-*Vusn#u?N z{&MLFr;m(&cW>3QL{OGhlBj=v6Z@WTWkEr@X6xpX*1)jsp2JEZ|MlCoR-U^2jBxMX z0pYH*Yn2+4B?2Yb3NoyGqRr5{*2hx~E=scovJ!W+bw2*Qs+#!75`hwI1sPU8@yp)5 z@>bB8FXmsJW({N|+Aluc=3`lx%HktS1WK?KWLWvc)YMw**pPn~!Q8_sDzTED@xD^Ok`GoXZ>jS-(HIS9KZfO&n4>y)% z3ER)(Rv=*I6EgZpdo%{44{IPRaZ#T0;l{))VaKbu6$n`Qgp8ur2S!oWKvrTy`ed6A zjK_A|1!2eCxD^Ok`2_Ot=l(e<`7hmiS0>YOTVc2%cX(05HDTtM!G+=I(?%A(yZW`v zn$-ni&mOoJmA^mgwxEJSkAe0 z*=pDLw==t^qWe`yfa``YZ`S6u~4Mqgl+RrVQXI8FBg@bMgVo~)9>My>sa!zVO z#hf%--L`CbrtV{@@Z%M6;;5SM<(@A&e?)T5Q8D4yV55`Y&n=PCJxNM;!5O8QCC{`C z8~3`UsKHC6nT6HchNrE$Ef&>?7n|pPEJO_<*y^6MGMW8uFAD!TWLS(q-O^5rI}~9_X{^(@j$GVO#J+Z=9Q#feJ$;Ztxl3W4Y|Bucz%niF=EV=8X-Oz(?Br0e^jyYLiiAS=Pqy8GRa?;D>lSI*SS4P8`Mp803# z2Q`oro&QdR0b9(gw)aXyz%eSAND^W43Fp(UeWyTQjS)hV4tsKeRO+& zL@I7obV8>PdGd+V9&2TZaf@#Eat|7n!8cHe4H-A*C$I3C^D_t7TeN zH3wI=Wrf_n^Je9!tT%%9c&OsQ|Icp#*!TGa&utfXd<~A-W^rBbCRod4FDO$Nh z?ls9qg|-{F(zeU_lbIaV2iU04KUf1<3C^D#uS#^h+W)2fZCPQ&nf~Yg(Rjtt8YBJO zMumPJx6*#@6Ec?As4$kW2C@>IKRdRUEbQmT_LWDus4zNTe#8FJ*v`?)Y^qM%ZB%3b z`Jc3Q-%7_spOA6aMul;gHIS9y{28-#MT7LM)vC4n?)Z+WMUPdCu1jOTs2E)bIa*@| zRH$X2akl+^Nh)ro*AJgKI#UURxw-unDb_$%g7asu&X&N{nF&OQ>p8Bq9If}{W%Hp{ zXCChe+yRtH7NVrK;u0;=s6-= zBG9s8E2THB`Moefs1L&$CbZ>)wmuTXu@W!S4?eu!Mm2NBU9qT6UVpsJ`Kad#mp$-_ zlTm{jZItss^TAe1uh}-dAVEY?v4)B2Z#aMdRbEx#=Y!+j^X)nt`-I>7N2QDWSwf>S zY*pc0M|}F&g-IfcO3MdnG0|Y{gwY^D zXjB1f$|5k@c|M}jz2oHVg?a~A&}w&~)@(FhMfqSWy*qnn_P`&!?JUB6o@rNF(w_rX?rB|AvQ-2@?6vY#`d z*F;={Js(D`P|xZQtt8y>yG%B{z*zq1NK?r7Z9dB>J ziV{Q=6@Sg(?_8V`f{LOF*lPb4?#r0z??-3}K=z8mX)(bOS{Km>AOF&TXD2-U@;8Kt z2;c}Bwt|dr6FzaHye$BCw|pBu;lEv(Lry(E?DJu@3}g_HFaF?Pm(r^T$-m_hYz2AR zqK;t~ORSZ*9*Ahqkh*EMy5xcXY`RapcChu4AdC{KVFL19EjosvWj&63VSta>@~xh& z>YcvZ`gq`nTA4BlqlBi#1mtsHI4=Zk%*S8!vOe}JT3fG6|!Ero{y0V8pp0Xk(7-In4U#@=fOw zwyJ-H^HEx@ex^*qD4}UF0lCel4k2h`ev~)!$VXoNjmy|-(e2Jh=LromWfDdSO^XT0 z-3mK|pe6O}SOp<&g|<7O#*vva3802GN-Ht>iT0M3m0{U@d{=K{8@AFs?SKE#)<=Rc zN~nej$g}sh3qi}Q$!vW*_f6*lw$i$(S+S8Vs{~<`Pz@81dv$LYg7(@~0b6Msd!Owql9Xhfc()bXN90`Badb$jGFp>X682qVY=0rBFH-b zrccd%vLyoPOle50VM1l3WX$&LyWMT_FV^;*`}DP8uN%h{A`)CnCRRDvt_DPZ#P2@A zR+^6_ao+QzY($MezI2&>)qi`}v{Jn$;@z}f?c96n|7xCYi4NbkjArlP4Xw`0nO|#| zl@S$W`R+XHED*tw4H`$Ycc9U^I3xBST{dBq(6pF zdZ+c5AdC{KVFEIK2W?Efr@C7oFCV_OgspVu$i(gAO3NgS5}FnhknuZcW3Ft`-}-1Y z@%0k6()lHo-n+lFOu{IkX)ysAzk`-Oxbs%)?1f@WA6XOMiqQeIwZyELe||K$|MX3U*i*?-A)L{zT!rP)4uJ{rpvK;cIG19 zG(*PxM98n7!xT-fz5`e)<9OG<#*19`?{1R0wvfAGOTjU>I9uYm=sfG!?BLiLb(;YN=2iSG<$<`n4yF4O#x>F4k zdS?mRyL*kE?x4{-*^SdrwEl4a=n>J=ooblSJ7mz_U3m0#2aVp5=j0x5{oy{?Bci7} z)i9xV`k>2xx`RfaEKbNYvi@*C?-9|{ooblSCl=6UKixs2Pe@aWjL8CeUw%`csBSAz@cdB6m zavOOP2JO{H^mGS}*5Jlf^{u}If%>q7YM6kGr*P256ldQWEXY2qp?sjxmYCb)FzYWt z7$sE01Z2ET0PXdU=;;m`?NJL3uVeisi0J7~HB3OpTM^J@Kixs2BSW~Qw)K}FqNh96 zFaa5FcR-i@bO()&aKkp&wEhxA^mL~hCLrT273i{`?x4|e7tc6l64BG0ro{wgeWpSi zckdD*VOGdrzYVY}6nKV+{*L>ghzLXq;#O!euqu-P8u?JGIDzLY*zN1J>VpZLGlOUy zL3d=!_`Z$r=k#97eTj<`SUCkBdUpmZpU^w9W`%W2Si^+gYq?n#aRNS&uHK!&$|v-W z?EW=F?A;x%$Z7=|vfgXq+tj{$I3?H$GOT<;@5ovXpKb5%;6tsH(0i>-c3wuDU@OS5 z@`*RxJzTK;$4~9u9ek*j5_+$N@9M_H3ATa^E1!7D-NOavJyOx$-NA=iDWUgT_sW-e zW8wr`L57u29O&-hf`y~%*}FUVP%9<$Udzq*i4$xE8CE{=qPvF+X8zK|-rd26S}B1$ zGW_o50mKQmf($F4kT)#$J`rzN_)9fpq>K3_`u*I^F9C7v?veAcc7m<;)ce^IH%U)T z66m||!J0U+Z|)$hwXgSIK&#=9Qv|NG{uDZv^h@O}m~R##aE>ce1t z1T_v%GYzgf3QTL z`M_RC$T@aQ!j319u}4a4SzQMitLuCp_=*G#zH{O05@dXjDxbh=Cs<)k6qg&;G2wTt zeDb0S*oxCl5Q4eu6#Kv$BczM|=r41vJvqXZH8U=0)8daRE!3B51TrwRCT zPZN%D`QZBCoMWdc5ZIf{_ra|}-zQ<+6}KKs1lm8?O5bu}omYYo%tgf-CiFcV)|Gia z4EI{@j|U&(+S}4CxA}y3&u~Dv+~)S!B|{6#B@AnrDEVZCrE|It_I=p)`$F$D_u@YWe{BdUQ|*4fR)w__RmWYkq_1| zk!V+eMui<6P(Ij;5qmU3#;%T>k{=aY;R`qjd^1lFWk$sWk1^IqL~x7KH_ceRs(Er> z0briZN5EG4#vH3<6GY^LHB6L${ooeYdcU`A&JWr$AlY`+N5ED`v~;8Bq`Uf-Pk61J ziH*0p@py65-i4C9{d4V#HKog3eO&od&+>f)Y;{a!SLbIe`#%UTDkkzTaIL5Gg*?p2x1zLk__3TPF<9Am{|0E%fT+Vr9U}EVGcZFN|@t{~#+H3Xs z3~d+BZRK7$105OksSyO8CKE&y6>IcrpwG3S-IFlPHRl`^(oxf+#a8CbZn#J2LWN`0fRJVWC8^ zS6pKB3AA+C3cE6azz$XkB8rMNOz>DK#`e#(M7~493VHazI(kmYnQK&Rh1~=|VE=&x zA(->Q8YZyc0ch-s;QL@JtZ|1GR>dcX$Omhfz>0j(*hj(oP$J-!A-n@3uO#uKVk_Q1 z(I>L|OxRr`@HBy^2F&c$Ckx0t``4|HTE1ffRxhV@Mk8kb>3Xc=U)>QSUfYPT2PUX= zc*k_y3Tu%ukI?tQyI-&dvJxDvx56#0vnDaaP}g?7)c@~7j@Fp$%CJCIhNa_HSQ&;q z`9ya0nUU3JUd~~q1n19~wCs3N)L~ryQL^KS&Y;8mL|q3rZPBhmj@GVT#`i(2UQWlY zu!;_O@`>z9JtHgiyqv>I3C^GFteX3{T{-!%?De8E3^7wu*9_h>_P;`o)|i@Z#bL0y z(@p8P6;>P~Pd<_Vn_bmtx{mGZxkKyy1ugC+W1_~7Y?$k zGlLVh3`xhWusSn2`?Gsby8_hQdCM(c&QS_VaQ@ti+`WZ%^>SM0Sz?y2u0?%%`kq3L z)|j?0*_Et8>irSvxD{5iB2PXs=MlSF);zN4b}#3!QiAhm`?>k1zFobX*14RBO4sfF z`FVUEa@VnT)o*b3Pot!@yVc8Tg;l@Fnad4cuq%sA)!b2D&QS_VaQ@s%Z!IgGBZ}PW z8fTOhM{A7uz`M=>KH^sD!zUykScPqn57t0dg7fEA#cR9L*{Wz6x*iZ1Tr zp+4ePS|2_kbq>Oy&RGLl3C^GGgLVf2+Xs10b9M&-^g)i+7-_pUDzsf)VXs!&c6~zn zhm8vTgEf$q;QZO~szAppo^`GJ3}C$CdDimw{5Q8t0s48|O8dD_$XH^d!dSu@$Vzbj zyqzi1Jd<4aWWdWwq1waPT^ zn8tj3y)Nng6}S#^w8qfx7wNc_UO#*yyTgW&9X7n2!%7LxpW9mmEfH5|PD@t|;9ASk z`up{<6|es(lfdd_FXv1^)_VzK*321a_Z%qsYgAgFYo7l7%2GTbRsG|-aP<*aSsC{} zkd3J@au?Q(1ltZ7nP#hZMy|^I+5d*HsO!}+;>!>JTXxicV_MER&=Hyz6Z#By;&oSx zkK!MSOLi?Av!vbSL(^>4yl`da*k5~uInP~N>Jz0R>cFDqJh&KsI$4HNn_x!-Ho zijO-Qzfkh%{i~OBfAXd@Tj7bdX3t*X$fK_>^@)3CzEC1LH_wc}Da{%t^m!Mw>_fKx zQxHMLn~KwHmG|?COoM5C!m(!+m-8+}pR*C;mIJa{BB$2eq12<%%?Gn9%o3Q-0|yKGr{T zMEbHb$Gz5j*m-HT!W%FB2IUitUpm6RZ|`>Nd1=-#q3_Gw7p%E=u5NwZ-YZ%!-aJZX z8@_Abv3K~!Z;5E3cw3Ja8nxX3ZI@djw_~4J(R~T7YUYz2BW-Kfw3xtiJAOa?3$*qu z5h#KBfOmbmhm}vXAK$@_qT3I=J>@T3nP>94L4A6X($0zQzgz7K$vPYgeQy7e)% zXmFY}OyGS0eDv9No%qNSffDe6boH$PtbF3oDXXoIOJDAtW(^Z~&j25-s`n5dSt3vZ zK9H`yjewO;TrqaH^)aybHEGr`fp;A6aq=;J#7CA0lz%WaszC zE=se83B1LHk86&-Nql69KneIjy85mcRz5LscO&Z~_rbH%tYPAnzRm}}8_BK_b(*9a zw)rD}=70iAzz5RRcXDm_+*axnx3@jj`lvhZ%rtA5fQ-4rpe4)p&ptZ`SOEs?}~YNKDRWcVsne~_7W4&+xv7O<6;0={M?2pHSEu!f05R1u*q5%0yY-W_?u zS8HuO(HpNoqhc$pMFoLx&eXBEdIkj#iSF%Fy~?E3Co=fpy{u z0+4MbtYL!NG+f}%Z;9w@+KwS>8*}{}=1Q;?=Qcq^K3KzqM(f5a_Q6(Mx(Pyk$ZQdJ z9YhP)7K7auw7p?>2QjvPF6T<%%E{M0{T(U!1Y7a-s$3$9YU7-q@^yM&gqjlGr29VD zieoRAFsxxhBaGg+2U>${#eF_ONNBEIv4#olH+Wlb^AY)AEA1;7i4uhR2vmb>E!V&L zc6S*t*L<**ws7nRks#EEVGR@7SE4oU>cj3|f_?{$_D8Iv=aeLQii)kY&tt8Af&gT% z2CQL%+k)?dt#pLL-T(Bkang>jK;kP5age zSR&Azvz5wNJ@EhT16E9Mw9ZF%*P)c|G=x!2cOZhSZ=-wF|EJU^*h=3t!^$UQW{n+l zF&77`_|Zx*n?_gkL&p9b_}$I-$r5(@%WQ24k`W z=7n2A)8*Yrd_rccTOTr8-R`8KX)%F42k^U_f0ZRLzdhP}1=l6snZPGx*1h#1v+he+ z!vyv?fDdK$V@=%19Jyi!vuDwfDbo6F-rtWzz4kR z{uQwDiGyAlWqmxqyTdZpFoFG~;KN-9vqYc-d>~!kb;>6s(`nnU27a6K|Lz!cUYC6{SmVE6}LlEoInoM3NoyGLT2gN8kBi`Db_$%Li>ta z{}CtH3NoyGf_hZ6Z;)Ckp?$@zV~G=N1sPU8K|QK~HIS9izT(#7#0j>73@e|I*`Kzo zu!4s*kd@HB;?@<#3ATa^E1%FFg_TIGfvkk~6}LVrPOueZSowtZD6FVr4P+&>uefzy zae}QN!^$V9N3~%MWF@$F$zHFn*P;(vf~_Er7u!aeJ$ANn-IpugrgS>WP{iIO(?FRd;=(ii}>&#_n z%=9)#wmfLRoYbdPa=d+=^?MZddhv-byR2EfL3a7CCHwZXh6(+G{)3pht13%f7oyr{{eo z5mk}M*h;@Bq1XDvF?IGV_*>FPKW7aS`t=O`-s?VU2zz!+u+wlI*-% zGH_}8+BvDmceb$m(BoU0+Yfo)UiUt6XF>J6p%PJlDJ$0K*Ej5r&uvW3bFcgFlIhiQ zQWxxLA!FjcboGlOcK-5-Z~r2XeGKcjWH))b6EcFe_Nz}Iw;5PT#ql6 zNLRm9qSk!k*rWc<(YDJPCUgy!+pXv0`VTIByIxM}h(B6*`|0cVTC^*lc)j=3h4my4 zve&P@M(J)nOmKTM=IhFR^Y?CRk=k5WT08A;uit@_t$mN#ykwT-y6C?aUfX4Y$4Xh< z|7o?wT_mbW5*4>Gw$j-&sB@n{|Hz(#;YGy+UssG-xw7fv znkK8$u)uZUfMxJ?_g zZqcmVSHwD3D7L~Z#%|d?eSG4bN9Qb%^F?qrhu1%t;E~Um>pw2dZ6)cyECgFY)_r(< zqSDtb7OfX+jMl7Sf=6)q3O;3Z>vjiR*?Pg|Q7Qd)hdI0Yt_SZH`5mA!b+TU+ufO%Y zlzutHni&0}2wA^0Vm`ewZ^~E8Ct8hr?bczA(6xQ~1@p~rLt_Me8BK9oOz0O=H|w6h zzNXkpzr@z6Ik?a#=nHR(HB9I?Va%=ez9OgMR`^m4E1#e*)hX6MRzkmEV>Y(;HQW+v zrC-LE-`uy*C+LfNiZx8=mv}d4o4)F&*h;_fuWi)3&?ji-K#Da?p!MLn9j(Wkosf!K zVLk(_e1c{`*p{eiajQg2#P9CSc9y_*XRqyYd-VzWikxB%6L?<=AMVX|oWPS1yz5(4 zSos8fMNYAX3A~Ml5BFv}PQVA!)pypg@(KEioMH_VcrOkgcrWgKceVt4AYFcY?i0UE zw%?tDt6uJ%Vht1cdIBHrYhRpz4|vyaEU@y4QSQ5Q(4)7UjZA4Vfv+;~;l7N<3HU&| z`t1f*K5@4D?i}Pic9H$IrD-vNZ%FtZUrFUTDn`Hu($z0bu=0rw@)g<03#Nw#6r@t%8l`qvR*QZ;3KILpnzz5RR=Nd^Ywqm5G`|cc^IPOgQRHSLe3Ei_w);M1| zB=yM^L-1uk(9s!lJal}-j1>Ldy@zWp#B)~;DVT8mKP}iwM?1`u@d@%_RKr&Tq^l`m zoR`uSA4iLiRqr-gpaiVghfg4?mxPdf7}hYMGRAqCVJd|DXLGI^o#Uf(iu89!oGHYD zETPvvM5QvWgFF%$Qx<`*eM*2!_EYYYqUC8cj zHd2ZaK;@4$&Ic1;uRcP^Y(4@AAUNI9ZuLALju>Cdp0*j>baO%afIn{*i~b>207Aw} zZ^&2n8*G1fYewS)TS11EPaOG1oirl4Yt}_+)<9Nb={DyB>xbq2Rg7RO$guK>>#o{e z0w3dVk(o}E7GxzJ8GVb*M-KCs3}?f_y0TS11EPdvS%z4cLg=az3ycRCZ5_5o`q+Rz5NI_{nxg z{Nx>@(yW23#A}ZavH5U&E@z2A3ATa^E1y_hu-DG#s9a%Wnl+G>__AoI^?{Y_#$<^= z3ATa^E1wuOqECU8l^NP-Xqq*UmH75+=L0L`jmZ*$5^Mz-RzA_Pi<^7?aqpYbtbwe= zw&BBUKCrILm@E+}!B&uAtE)G(K6ajbUYa$KmDvBN;Wi&` zy;_zClwd2!u=0t);px`L+?FlVtbwdV*Qw5jTX&Zw0wvfAGOT>!?g3|8AOAb)xHM}Z zEAjRo=fmyKkR<{o*a|YNeB$~~J6j($&a9DU4P+&by<&tdE4M#GmI#z!E6A|&iQuE| z*2j+*ZYg06WF@*j>wI8u2pM)95h%e{kYVK$jq|a8i!txr`g#d#AS-e2H_iw4gD@sb z1WK?KWLWvc>`k{?ABWz0cL{4CD^b11NLyC!{MsxLD8W{cVdWE@7Yw&P9(=n|32PuL zarueP2X-lu*|?4flwd2!u=0ujX1aL*gD<&q8EYUbal+}&2X;S^^&gH1l<=+Gj_*Fv zY{tBji__PnY8Eai(e<30CREDkdd?1iG{~$Q@xQQ9jc(a}gj)&GS%{sf1tn~C${St6 zOV{Cau@*5RCB$(;d?r3PEhcWS-aQ<4!R3~geQVuLo73@e|IZ^CJ5h<08EYalD3^RL`&kvPFtkYVK$@@?Dt!2A-{KvqKMd%0OJae}QN z!^$UQo{aT@c`~eltc1=;bF+8i1Y1Fdl~2fA1M36xeOLoo37uKzW+BB1wt@^RpEy6e z3)D|CZ;3UKmC%`hn9pQPoM0=+u<{Aomov>8$V%wULpLidPOueZSouVaxppVdpt;OE zV+~{_bZ(?O?=DWT6=Xd>4DUD`k!|g!@9!PjaG&75qV3qtt=0aa1k&PO1S_A&wss@) zmTb>KBx(g3vbJN)<=wZ_trBbn8CE`#ZS6+-hwVA=p;k(0J9g)*QU8eg8VGIqu=0uQ zo@WNLURc9~wqwi$HAZ(yQv#9b*-nsQvJKeGdvJ%>kF+bIqED`?ALcaK>6SH2pno8LZf1X$2$X;iq^rFMRzATerq^uU+ zzmKbRWTCf4F%`E00V|)t8SA){!6~k+fvm(GZJm$NC)E=lSt3w^tsuk7C*In-*FLfQ zDCe252C@?E7oTqPaekvi#7CA0lwd2!u=0tia{f8;agv;W&Kk%{^f=D>Fb5qhKC(oh z1Y1Fdl~2@`ok-zhwCqgE8pukFoY2hXqt3Q!;v-80O0X4VSouW%A39qfpUBzdtbwe= znOje@K2o1m6(3n5P=c)>!^$Um*WPHKQuoR^AgqC`M3)Plk1O9hKzwA0Knb>j3@e|I zVhZFx`3q94fvm*Dmru3%a6Vw_{&NIdv5bBbMTNHp22pWZkd=7xAm_uCmE^;|XHbHz zAj8Thq;yN*1EtFv$VwCsY)W;WC2W1ftw5m8eL`A^^?_Ex8pukF{OAA_XaZ#T0;l{))VaKbu6$n`Q1nsJlVhv;^Hl$Cs`M?31%yee?;82#8Al0)2|Z)0WBy|Ne5R zlcgQ&ob3d`Ryc2>e1gw*QMqRicVdj59^=B%$Z!=#t^K>`p`xnYzskzvi>gih!^(p` zUs2=}tYPBeA=@mGH}@?enl)>@06s>vJ8q%Y!Qs=}7u{K-*w)~iReXQOymr&3`5@Xg z{k{cTee{hxWo+F6tGo!ksDe#v)>-d$UYu66Y5aOC|NhLwMYn(RzLj@>@Px}ppgI31 z)ze1O=9H;LC%ovQI_<@&MW1f!Wq;rD4c>*zUqrAKM_4|=8YW)caE0}O67hTlY{lhP zE@4>1MEh6Hw?2|Pn6cI4=hcd=$|VeInBe;^*++Q(FAGt+uN>1bm#r`w=*Unu@%n`7 zOIX7Mk1@tvHn(IkeC)mRl*QaKR=xbMt>-CcmC;JH1_QS0I(U~Q`dm{6foiw?kXtwt ztMmV{bgj!*xf+ZJ)(oofx3!wwybK>vRBSc*y}vAR+18ciw|2u(G0}b24j0w9oVBuwPJ=lXdyGbL75T#EbOwcVD_hAL&$ zN0f868glEOme|wt?f;K8$b>(NO8oNYS|ZoSyxlJ6AMUurs~cyoIcKZpsXn$g+aGmT zl8Ah8eK0X|pre28e79fcrtc)DX*=bfqIY{a`HM&HD=MF0t2c5SvEu#vlSC91r^N(E zYjdtdz)`iF>3o!K9Op%7i9pMWtv-4p8&!h|NkV-X&Ic16?{oB=YwnGe6}Pwvm%7$h zxM51N?P^qptuB1eU9Y-Md@xBwQE_W$g0FUBZ2w%l;&GSnefeJ7C;U4AcmFVG?sL-G z_uWf42U@zSft8Lv!%Lq{5`yWtyDu#!RK~U4_n{i3t9Juk-*_hS7ZH&UC7@xVUQ>53 z@zpc4Vj}}ichM(T)AtE1{Y>fP@s@ysQU z`DJDJYM}DK$&VK$2=x)L56#ns-_P*;iLw22<1Slix!qqny?nxpiV3Y7+}n9R4Bs2@ z^A5LZj|kX`pTf!|3~QL+zT){X{M^s)88{_ZP})D(ir;=D2*Jwewe-G(-_Q6_v4-Ci zC42-L6pH9LoV5|5J{0RalVOl~pOh6uTdA|@l z{>ynq`%c0vV5|5U0SN*pVOl~pOhBHpq;H6G2)vUp3)m`tazTQ?Ntl*U4HJ;RI;3yt zPQjp)Fx#+I{M?KLVdU&gOQ?nk$a%N)v1g&1?4KLq)Q6rFi}x^i`@wHpd_r40&V6Mo z%((^ac6$>h&=HQUbQUvq=t&TO?DHUNn9wyFZr3^&m%9#XZt;Ex>x2;rWc}Tpc*{|- zmA+5HiNFaWii-2WR!Y0~SL~xKEBHtdkq_3Wtfw3!PwuRE@?o%+3>rPV9WvIBVLc9H zcX~DxtkK=yLFn%0?ksE}0NMJ`dHUF0it9t?*1P@TV6Jt}RyuDUJ0_J&81|uS53s8a zr-X2y?vSN*18E|?sr{qV)Ryxk(EadVD)-b`NtLGy+bwN*JM-4WY zv)j2pS|Z4nRiOPGR#>A70_&XDyG~jAK+B4)ct5Ig38SafVLur?6%V_^=!}A0 zXV)rqF=-9*jxf4c6lgrXqTC`vqcUv8`{I;Ou!af!64!RmVJ@o32U}^~;2EcUf;CL| zt9~W#>LXw)twHP!kRSlr{-GNA|Akr#x)&D8%FR_1b0q>iK|)ilRwo&BO;25eemv5oRW=7eHgY<8hh`QPq2mwE(PBQTWM~whfsou z^1&J=v=rQ4NY+cXU1{r+vMbb9iIIrgo3q!_Wh?C~m^)Z5VOYZiw_`u&+OE(>wRhqB zIk#z_U@P`lKEWC$@cs&|Hv{w)TLnbRHqDIOP(CHB9IU#L<`-lob`9+|0ghOiBc7 zrFp{jDnSV5dM#_1&}gG;l=`r5&(U7d545kajK1PW#a7xMF}qEFPY{MROsE{Sc8$uu zbw*T3S4ULLZc7kR&RN3*k4iQl5yAaV?A|I?_LhH%Rhl6>x z1_Q1sZA%zyl|Y{tdm`A1&!$cgfb3l#Ynb3ZZ+$2cXwGr(tX8-S*F2%GSR&ATu$B72 zj*AIGeHhj-q4|q?R77Y#P>MP};_Abt?nlK|ItHU%B?$Equ!ae~miRu{N@=v?1QGdQ zO<6?p8l|Hp?(TIQgsg2k@%$06m5!o#{zwo}RIFiw+k&5Sw$gF_0GWrLAk>Fp4HJ4@ z!E>ASp+tanP^2{tO8s^1&J=cn1kTDlSFs+qh5UQum``E8JzG4zSuSK}1oph6zo{jV0%w zR6P%^l zXsIWkCfG`Q7w(qICs@OTU%ECYuAc{NrF|P$%W?_(RD^m)O);UP1g;X!gZ6V?JI`zN zd5;6XblHk`5J(VOx`s7O@E!-A55qe}@Xi#x3g0K#iucPXpI{9WJWBXJ*oyc4NDxuZ zS;ItP^a;wU4=y*3){lyjky`Cot zwqjWcIcZGxnOra5Cirft?(|7=uf_iOo?#ymw`W|oo|cAprZIo9U9){Jw~=S}Vvc6_in z{I1`WBFyEz?1ADCGk$qquQ5xyzg2*;>Sy|ST$YYoojAHU#0+eo_^9ztn~#Rg|CeSBWF@#ZjrpKQh189$%$XIx zy~+}5)%2d8;qF_C!--!%SmYB2Hvh%?`2PCqZJndnqBX>ceXXR~&Qnr5@7nLor(SEF zXo;ZF5-}(JgQ>-#TLsbe*Ar~cKWkOwl`hX*_lX~u=Gyw$(6M=%HB2Ei95#@(AjZs})xi=g zcORK%4HLZv^bXffC=TaNo+QLuZ=Ihy?u0R|9-LX<5{L?E@p==VXuZ6X^|Ae=L({Bb z0`jb}5|ylJF{a}s-7N9h<^$5KVPeaw-eH}g#o_CBJRro{FRo0Tb=c@u-yL1i5;iJH zi&sAQ#NMK7t&e~1*;T?CCLlNITO49FkL*=E`9@2uzWhA^x~6l1krOu z&s2w9BU?Sud7~u|71H9h89p(nppW%YXZxxW)-VCNZ^z;gYe9_psc@hrK6-av32T^G zb8(+=PF`_%MjnXA#tcf0+B2-x`TNbX1foJ(yiUU>)=e91ee`bda0zRefV}*e;t*?E zWIupHcV1DIs>4cH!^Er+eZrEe#bNFqtpB)V-O$uyvu|nDW6rgfKvYPJSBdz<(>b@> zs5Uk^r-U_3K%V_ukI=0#`QXx#AcAn|Ps`XUzA7a_7$sE01myV(dxR@w6^!fzy?d1P z@mOB9H`pq^ZYDt(B~-%%KK`lsb{n>eujWY*MhVq00r{O%dW2Y^ zWX$r{huM5Q^7MrTY{hGg$|Q^unidm~um9@C(5;WE{MF6Y$Jo~%DPXJkimC)*lu!*5 zkdMFb#tVu1xlUta**~aec|20=9~; zL`x7x3DqzG`R6Zg2(hkEcA&ZXBJ1PiBkQKviq{O5Nf;$GEhZp;HRguUtx>$XYk~E# zvRRW9Tk$E; zaNy5aIf+%|ElM73fq&WR-PNyU)~qfFv2NQZo~Yz}bP$3yaRRjS@#>v}>^;Mv#Y61< zmEK2TiKnjVJIHFr(BFwIu+m6PQYs$l~1kno%kD>aSztf+$ZvHI_;(rgu9 z+gU!L8YUq3tJf~X`cY$E-~K@f^6|(4z0zzIUvXMKp&BM2kL%MetYugIPCxZ&>!a(l zgVJmjUng8Xp&BM2|N2_H5bKhSS$f@d*2i~ChNjsnzLL3oLN!c44lA_}v0B=gopra{ z9Z$YIWn`MI;_Iu+Cse}(>Z&T(g}_)72c3DqzGdCvIuAy$^l ziG`Ut*nuY4RdZCDt>SCZ6NFJhHB3Oh?#=cg*1Jo;Xq!Sm50;F)JOw#TPf!vy5p zCg+51&FG-|9W62JqXsF~F!A?^>oZ?Y%L&Id#u~+K4K7NT%seFjy&-iifv9+uDONK2 z#L>Ghu|AgUt(jsCuYH6(b7oHH)oSXG=Y+YSnPTtBw2jcV`KHw#$91mqc?<%C$H zDrY-ge3K;>eE(trYnXU)@Y>AG-*UpAMm!?Kupu|6OM4xfpOSsB*oxP|`o!OZ-M6y3 zeJ2&Lh6%`TRmu&qQdjmD-QwQ$Ej@5R0c)7(am?DxKMixkF$+L!DIS)-?T>18gcF3_Ig8U`0`h5t zazm^#melEF(zq4!gxy-oB!Jp=!kU)SO0=1uYiW7!%pHK}G zkf-g+4c&eje@T5HqE4GTm#~%AAXa++--M>c1mvOZ^Fq+Z6iZ8lj|H-G7F%hn#QO99 zo6xkFfPCQeyb!c8XGkA}kG`^V7F%hr#Y*`9o6xkFfPDSdywI(q4`lR#kB5v&vz3ll zSgZek6PgwikRNW>G6XI6)H3eEhm5;v&nmkkknRmoKA{>N+btpVoF`j`kmYnPx#GYF zcFtlezN+E(@(I;2!PiIozTNu3`*xo33mN?bZ|HfJu6=u+9>3vjthBI%TJb(SK7l>9 z8a_Y5zSCz76E&;v$c*Tf6aHAgx8xjm0B_#ZU;*BqvlZ_rjNtjPh-&3e(nKoFxr%=y{8@9Qo5UoCKucwB3QE?=^vzg3cxnNs!e3y_fCeaRN zh!<vV8prM##Qx;u$HV>-SfiS z9*4z<84H%@y)36-wyK)~Oz3%#CbrC@S2OzYu^`wt@NZ=v@4(3F#4k0&%{SHiHbE$_^ss9My(eY zi;q#_gRS%|TeM@Jc<8Oya~FvZIV;{9shQvw>c&Hi4j2;=6~3$4IlFWhrfc7EQkNs{ zX!m(DCG=j6@8Ns`BTse&virTl2hxfYr~ci{{tjCcWas;&bXE{{Z`IjCnB}Xplkj_- z*f(ce=PD%$Iqk;Y@8YzZHl}%!!=>%n8Ahiv>V8X)}4=Y!ho|u1ncFr8@6h_ zOZMB@R%p+zDU&ctXj)7_#_#TYoWnlrY<*N{RU^$-_neiF##FB zyYq22ZR~D+bi8;=30pOIsWh{&dRu#TO__vILepXbGJbdGF##FByYq3r+;pq;@#5gSOW11uXX(tjXP;%yt|^l+N@!Y4K*sOx ze4M%qhFc$_*EA|&t9s9+Gt<96%bs0RCSjD&w3vX5-`)8*HJ=(~eVozi%4KYYy=7L7 zKikf^E0ZuvXj)9@&NP^RCtpmeH%wptPpkYna^lo&%a&X3IG5_@mdi6M*QCNhHv~o8 z3uMkjk7I3A&wqDLI&QVSLZLlz$|uU}QLs{i`;9Rr<0_}?P0r8XfA!_*f(9|$h$5QsJEuZ*us{7{kV#f=;oWn{9&Yv-x zc79%R%pEQBd&((e-|c?OMuoG;UMhYo)8d#^_{g%+MI5a$4;21{?>NCx-(8=MTg`5l zvZsvs#D*6P@?mbi_-Zfbuu_8aXUu8WO}0M9b-E?Je~Xp&+a^w7n|!#F@tqUDqb;~I z+?tGhvZTe$d-E#Szd6lTyT-qr3BD@`_sfeDC6~QevUy}~{#H4|jniU+OGMsuwO(SQ zI^xs*>A2Oxy#@9RH=merm)lkTiuZbXr3))1xD1V%G{l|4cbA;PcU;BQncasrvuE{< zn)+_$*}bRPZ?us2eXo_%>ugjwy^hO@^K{VQRhDpP>ct5yMOZUmPr^8tceq7 zSLs%$!S5b?qTsH+w{%n(y0sMjzy8jj29;oQy ziGB5P;p0yfG=6-*($CtsoNJkIS#kc1S-1C@f}%(Fw)~~ZAX^_=2RAo)Kht7*v#`!J zV~Tv@#!V9nTHUi?$@gvjvSNZu0VgT%w)yx{&SZUXyE~C1N?czl`=;zA=Lq zq;1ZZ|F56d+T(wqw;|l&BBwP4ZQj{rt(oWt&cCKTx#na;~-i_oY>b& z5Rv-}>1u`=1yB0y#YgAc;TRWl8IBoh)DQe%KuU!SvsQH|t zUJV}`dZ#9Sdi?8gk_JQQV)*@S}j0p9~%iUR)&7Djw!(@bJG3sK<-sazicTk+Y#qw@~-hQaoWrRT}G3u|5hb zq;-udWZmyT%nBXx*E@&Vo$=U8S7hTWfKSMmrSu^q|822V{^c^k%5t3binm`pqRI?9 zv8*_5XbXI|Wh-9qo+OYDd?jN|oWQqK_f~DASborVcrM;8FIsl~DD9(xtj2gvAy zclqayX}kd6pV^9Y?h~*ay}fV&N|!ZEaQ@s|LF5D9pW{}zilVH10#Sj$zpR10kJz`z zfjkxMJ-I|8N}c}wY4Wabs#x6`Ry0o*?Ol%6ertHcL5pJ}L-j0ih7fGUSI&4#3=oz5 z(^9~Ln~RDsuDJB)K==DMFuiz5VF4O$Ic|KCH~yt zDmw$4t#ob!c1Qf&M5Tnb zUDTk<2Z*bM_(=%1f?V3Io~=Qxb4Rol;(Q?vk#nk9!^EF^r(_PU($Lbjtg`|A*x1NK=jYn1S#Vk_R6okt9x;HbDQG3^s<#pPBeA!v|h z4HM-@#a3LR3BraJFI~Sx%jh2g2)6S3T^WSU2j`p#zi(ESy9(jeU1B|OUc!Z%v^qXNDHj&$9y%mf{gPY z@SW2qT7GhcooOg{uU-u%39XMKdT!6TT5>)v>qFDkb1ZO@1m{oY8WgQOqpX}W!KIFz z@4F*IBs!9*jC?@m(I+B;xRple6E!6d@br=71N~gbD17I}S8MDj#{J(ICfG{9j3)`C zFL|+kuWIM3cxr_`@07-Fc95ezWg~*q#onlzZuvgg zO6R@eZpkO~%t@Re$r>hfj|+E4ro^kqG+e}1e0Sy(C@C!~)-a*FNkk`YLHl{nFxF;5Qnn-nPd=BPpTLPmG2yQVxfzj>vjaM:WfBSN(sYU3!tO?rYNF z51gR@A3cUWZQo3ATI|Co;6wAl8Ya+o^?okOxqaG>Tj_}E6WQk|%=d}js$j1Yjnc)ed)6?aZzpiJs}+T(b&u@EuDPxw(*&tHaL0Izhk54C`$xv_17EDYHyPX z+aV&k|>f{1*u zhKc-QN1rfbOsuTfs#fZLn~zz4-IGZWkq_1|F>H?W@xs>8F&}JI`xWP-?RR%)5=7*K zHB7wxyYq2%tGYKN{ z!5St;FL6FrA2KZFgRK@{;e32_aF9t5kq_1|aqdUX$2%|F7W2VYJ100FU$h;bNf40_ z)-Z9;7UyH-iMPaju+_x5&PQRV3i=$N5C2;)c2C&elZ_xrT$L%q)(KOh=_b}T1;pe=6}>X z=7X)Y+`^LLs6HYh^1*2_p=}|rOV5}Ow$l1PZrSxwyNZa&2dBk^w&@nX-Wc=2R@#m) z-E?i#KO!RX!D%s}ee~F=p-1>ta6GO2?J&!*0=d z6%mmSPKya0E8iY@ZOjK->G(M7;me|NHzFb*oE8&0=C@yXb<77_={W!F>z72=kBEqT za9T|0wQKINSH*m=m0qt#~Yw&`1uZa0zE4}W%R{O%}dL9vx4^E2- zy_dLV!R0X@Y^C=P?_7F*bYBt?kq=Ib3B4!!^Q&$#A8e)fLBH-jFS@^qh{y-0#f0A5 z{n07zgRS&_u0ezIqWi>%h&dVhX zYnaIY*3nT*R3C<|UTEp8(00owSi?m6VMj+jD)PZrKc>goe4r1OPq2oGk)526Xk>_d zu+{e$I3E~G$|qREM8_J=M>N7kKG^(G@51!B(lJ&IhhhiYW^Fh*m+ z-i;MCw%90(vX7u*?8ev;|Hc9cx}qXB$|!4HD~UZ8Y)R}9R2F0(cWlw9QLNF}jmF*^ zioEA{&YYRw-I)jS@=3X$J?DGRxp!_kx6C|eXsLc6n{UT5b<(gAx&ZFM}ma zcpaA4IIQLMhBZ!!$ZfEM32zJeTFr@swfy;Gt=5nTEMdaibiU@yZLpTNW2`wF5`iU5 zc;C#|&bbZN^8Sdmb3-Dqgb5#G@;!ju25b4af<1tSL|_RMK33*?8MzJC^6_y~*~@51 z1eP%2V}8CTliOe|ALp?r(~t-(VZ!IGd~YeY!CF3FVQ;A+5m>^6&%yZ~S#E>1eBQ+# zSwkYQgb9x&^1Z&?25WizfW5wkL|_RM9uwtzmbneq@;C^4mJNwKzCvD1cx;#Nz2-Jp z%i}rhy~f1LqhH0FNa61jC)7-v!YJVcYN z>6Tk|C6NDG4^8U--8?7SG+!n>ayA>ksK2_6+bC<9S1}>w(07+m4whKj2wwr9eDVc> z62V$l3o&u^ng{6{BQNwgrN$CV8{sP%JWcuLa*1HA))ZPX(d*P3)W!){99LtBrH$|v z8CqosWrDR@Q)tD+oF86L8&B>#qQ(+S8{sQU^sW%f1Z%aX(29vGf9Rnn0M8#dq{b3U z8{sQtj1nP~3D#;&p%oM3KUzg?eDK6BHI`V~2w&-Av7L}VdC4#kDQ)tD+=uHk)8xKAAX){YKZG`WHAsUdA%o4#`ttqr( z;`h%Stv0?qeO5C|ENz7E=pm|+H^WK{{HM{mRQ;d-o7DAa| zt=1G;G4b|wKsk4L$A7ik( zkH(UWXibr?k3P_fiEM-mGA7np!i0~N*rkZZ#EfW7k*|-U(29v{qz*E+*IB}ZkNMbb ziN^MfkU2{7_1OSgF_FzULFTABOPKJv3%f?q9F-BRDf0DM4O%hLVsmZy?7$&)mN4OS zFm}J>U88JvE{N6?`TFb(t(eFnfFLnLoh3|oEP-9TC}zlr))e`AlmV@n$Re2_u}_^P zOn6L$-N-2R$%xie_Yn0SKx+!En3%Md#&F^B9;eh<;_(#-kL|Fl8bU@S1!4Jm)CaAY z_^Cx>(eTv733Zk*;V~+9hvkb|MkEE%nj&A1UZE8eh^g&gQfCPh9=DqfSC`N|BU)2P zJccjQZTffhO4kPPit}Q^R~A-RQVtMWjP5*?HQ32gym~mHU`Ip%pYn4^9M_q@SPW%qcS2X2+P;BY%Yn3 zxo`bWZCrUv&pJz(@Es(ZYcnD#2+P;BY)*`czMJYdox=0aeA>YhCVVH%Vup-J3c~U= zEt}h8LgErF2jUWzFyT9T7W-sGQV^D}X<3XC6A~@iGne>^B~19vq{VOQly2xOmRQ;d-YE_tBa(t( zEsK3);=|0w6ZZ`rz!FOv;kIpUkP%5iu$IN}F+p|J%n~mL^7T5jH4fEPMp(XHyU>aW zSrzFs$2y87On6(cwOU3b1z~F(fBw*l32M8|EMdaiw5>U*?Pi3n)x5PsD<-H9HnW5Y z@0+%EraqVvw&wI+3$2)-u|&RtVJ8@cmaU3?jIlic8cQ;QwQQ{(6Er5)SmNUq^7XOO z_A+Qp%m~ZZM^R|S1dZ)AmN4OCzU|4-*q#x#m*KMkv|@ths2WR{@VU$OmS~R32(cmg z`m6@6n2_BlZM)dNVhIyI2iqQ5MkEDcdrLk$Ln|gIW~i}*36CXgua9DejIcd2k20VY z6BPT@Si*$IM7C#1u}?iV{hf4=xZyN7DwuaNA^zdPS*1x+p{y(WfYuK>PjF?Pm{C#FY zBv3b^(1N|;!)9f47eW9d#?8ApbsOX7okr-&R+ki}Yz?gX$`Q0k^?gg;x~>AhqFnu~(>d|GOw^{%)fvtvwqnM{wRdp% z;ybL*d;r7+VxzF$FVXLPf8VU@cDDyWz17 zZzWa><`vTX%*#Z%b}IP~20k*Y*e($*tl`($V)^EMMNZC@y*EO1KvO?kzvAE6xk) z7goJqc7*?;HcA9gwc(N~0<_xD)-EZz>+JpfI?315TV-h*kIMwwm3&)7wY-hlIEIpE zbrsoQEpL4d2`LB0M9T|lCh%mth3Q2(5-&%rg^|-dED