From b965cc5251197266478c529d8a94af909a58f78c Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Wed, 17 Jan 2024 22:17:16 +0000 Subject: [PATCH 01/54] add probability field plots to analyze_ensemble.py --- singularity/prep/files/analyze_ensemble.py | 27 ++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/singularity/prep/files/analyze_ensemble.py b/singularity/prep/files/analyze_ensemble.py index 3a3b696..c66f8da 100644 --- a/singularity/prep/files/analyze_ensemble.py +++ b/singularity/prep/files/analyze_ensemble.py @@ -22,6 +22,7 @@ plot_selected_validations, plot_sensitivities, plot_validations, + plot_selected_probability_fields, ) from ensembleperturbation.uncertainty_quantification.karhunen_loeve_expansion import ( karhunen_loeve_expansion, @@ -33,6 +34,7 @@ surrogate_from_karhunen_loeve, surrogate_from_training_set, validations_from_surrogate, + probability_field_from_surrogate, ) from ensembleperturbation.utilities import get_logger @@ -102,6 +104,7 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): make_sensitivities_plot = True make_validation_plot = True make_percentile_plot = True + make_probability_plot = True save_plots = True show_plots = False @@ -128,6 +131,7 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): sensitivities_filename = output_directory / 'sensitivities.nc' validation_filename = output_directory / 'validation.nc' percentile_filename = output_directory / 'percentiles.nc' + probability_filename = output_directory / 'probabilities.nc' filenames = ['perturbations.nc', 'maxele.63.nc'] if storm_name is None: @@ -377,6 +381,29 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): output_directory=output_directory if save_plots else None, ) + if make_probability_plot: + level_list = [0.3048, 0.6096, 0.9144, 1.2192, 1.524, 1.8288, 2.1336, 2.4384, 2.7432, 3.048, 3.3528, 3.6576, 3.9624, 4.2672, 4.572, 4.8768, 5.1816, 5.4864, 5.7912, 6.096] + + node_prob_field = probability_field_from_surrogate( + levels=level_list, + surrogate_model=surrogate_model, + distribution=distribution, + training_set=validation_set, + minimum_allowable_value=min_depth if use_depth else None, + convert_from_log_scale=log_space, + convert_from_depths=training_depth_adjust.values if log_space else use_depth, + element_table=elements if point_spacing is None else None, + filename=probability_filename, + ) + + plot_selected_probability_fields( + node_prob_field=node_prob_field, + level_list=level_list, + output_directory=output_directory if save_plots else None, + label_unit_convert_factor=1/0.3048, + label_unit_name='ft', + ) + if show_plots: LOGGER.info('showing plots') pyplot.show() From 2c141bbcfb9a5e546a832cbb233c22d70b885439 Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Wed, 17 Jan 2024 22:23:12 +0000 Subject: [PATCH 02/54] run oitnb to reformat 6 *.py files in singularity/prep/files/ --- singularity/prep/files/analyze_ensemble.py | 52 ++-- singularity/prep/files/combine_ensemble.py | 6 +- singularity/prep/files/download_data.py | 28 +-- singularity/prep/files/setup_ensemble.py | 159 +++++------- singularity/prep/files/setup_model.py | 279 ++++++++++----------- singularity/prep/files/wwm.py | 144 ++++++----- 6 files changed, 307 insertions(+), 361 deletions(-) diff --git a/singularity/prep/files/analyze_ensemble.py b/singularity/prep/files/analyze_ensemble.py index c66f8da..f811e9c 100644 --- a/singularity/prep/files/analyze_ensemble.py +++ b/singularity/prep/files/analyze_ensemble.py @@ -41,14 +41,12 @@ LOGGER = get_logger('klpc_wetonly') - def main(args): tracks_dir = args.tracks_dir ensemble_dir = args.ensemble_dir - analyze(tracks_dir, ensemble_dir/'analyze') - + analyze(tracks_dir, ensemble_dir / 'analyze') def analyze(tracks_dir, analyze_dir): @@ -112,17 +110,12 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): storm_name = None if log_space: - output_directory = ( - analyze_dir / f'log_k{k_neighbors}_p{idw_order}_n{mann_coef}' - ) + output_directory = analyze_dir / f'log_k{k_neighbors}_p{idw_order}_n{mann_coef}' else: - output_directory = ( - analyze_dir / f'linear_k{k_neighbors}_p{idw_order}_n{mann_coef}' - ) + output_directory = analyze_dir / f'linear_k{k_neighbors}_p{idw_order}_n{mann_coef}' if not output_directory.exists(): output_directory.mkdir(parents=True, exist_ok=True) - subset_filename = output_directory / 'subset.nc' kl_filename = output_directory / 'karhunen_loeve.pkl' kl_surrogate_filename = output_directory / 'kl_surrogate.npy' @@ -246,9 +239,7 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): training_set_adjusted += training_set_adjusted['depth'] if log_space: - training_depth_adjust = numpy.fmax( - 0, min_depth - training_set_adjusted.min(axis=0) - ) + training_depth_adjust = numpy.fmax(0, min_depth - training_set_adjusted.min(axis=0)) training_set_adjusted += training_depth_adjust training_set_adjusted = numpy.log(training_set_adjusted) @@ -305,9 +296,7 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): plot_kl_surrogate_fit( kl_fit=kl_fit, - output_filename=output_directory / 'kl_surrogate_fit.png' - if save_plots - else None, + output_filename=output_directory / 'kl_surrogate_fit.png' if save_plots else None, ) # convert the KL surrogate model to the overall surrogate at each node @@ -382,8 +371,29 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): ) if make_probability_plot: - level_list = [0.3048, 0.6096, 0.9144, 1.2192, 1.524, 1.8288, 2.1336, 2.4384, 2.7432, 3.048, 3.3528, 3.6576, 3.9624, 4.2672, 4.572, 4.8768, 5.1816, 5.4864, 5.7912, 6.096] - + level_list = [ + 0.3048, + 0.6096, + 0.9144, + 1.2192, + 1.524, + 1.8288, + 2.1336, + 2.4384, + 2.7432, + 3.048, + 3.3528, + 3.6576, + 3.9624, + 4.2672, + 4.572, + 4.8768, + 5.1816, + 5.4864, + 5.7912, + 6.096, + ] + node_prob_field = probability_field_from_surrogate( levels=level_list, surrogate_model=surrogate_model, @@ -395,15 +405,15 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): element_table=elements if point_spacing is None else None, filename=probability_filename, ) - + plot_selected_probability_fields( node_prob_field=node_prob_field, level_list=level_list, output_directory=output_directory if save_plots else None, - label_unit_convert_factor=1/0.3048, + label_unit_convert_factor=1 / 0.3048, label_unit_name='ft', ) - + if show_plots: LOGGER.info('showing plots') pyplot.show() diff --git a/singularity/prep/files/combine_ensemble.py b/singularity/prep/files/combine_ensemble.py index 28a6d9a..ad82b74 100644 --- a/singularity/prep/files/combine_ensemble.py +++ b/singularity/prep/files/combine_ensemble.py @@ -7,7 +7,6 @@ LOGGER = get_logger('klpc_wetonly') - def main(args): tracks_dir = args.tracks_dir @@ -16,11 +15,12 @@ def main(args): output = combine_results( model='schism', adcirc_like=True, - output=ensemble_dir/'analyze', + output=ensemble_dir / 'analyze', directory=ensemble_dir, - parallel=not args.sequential + parallel=not args.sequential, ) + if __name__ == '__main__': parser = ArgumentParser() diff --git a/singularity/prep/files/download_data.py b/singularity/prep/files/download_data.py index ef01ae5..204f8b6 100644 --- a/singularity/prep/files/download_data.py +++ b/singularity/prep/files/download_data.py @@ -27,10 +27,9 @@ def main(args): workdir.mkdir(exist_ok=True) dt_data = pd.read_csv(dt_rng_path, delimiter=',') - date_1, date_2, _ = pd.to_datetime(dt_data.date_time).dt.strftime( - "%Y%m%d%H").values - model_start_time = datetime.strptime(date_1, "%Y%m%d%H") - model_end_time = datetime.strptime(date_2, "%Y%m%d%H") + date_1, date_2, _ = pd.to_datetime(dt_data.date_time).dt.strftime('%Y%m%d%H').values + model_start_time = datetime.strptime(date_1, '%Y%m%d%H') + model_end_time = datetime.strptime(date_2, '%Y%m%d%H') spinup_time = timedelta(days=2) # Right now the only download is for NWM, in the future there @@ -48,10 +47,9 @@ def main(args): start_date=model_start_time - spinup_time, end_date=model_end_time - model_start_time + spinup_time, overwrite=True, - ) + ) nwm.pairings.save_json( - sources=workdir / 'source.json', - sinks=workdir / 'sink.json' + sources=workdir / 'source.json', sinks=workdir / 'sink.json' ) @@ -63,18 +61,16 @@ def parse_arguments(): required=True, type=Path, default=None, - help='path to store generated configuration files' + help='path to store generated configuration files', ) argument_parser.add_argument( - "--date-range-file", + '--date-range-file', required=True, type=Path, - help="path to the file containing simulation date range" + help='path to the file containing simulation date range', ) argument_parser.add_argument( - "--nwm-file", - type=Path, - help="path to the NWM hydrofabric dataset", + '--nwm-file', type=Path, help='path to the NWM hydrofabric dataset', ) argument_parser.add_argument( '--mesh-directory', @@ -82,14 +78,12 @@ def parse_arguments(): required=True, help='path to input mesh (`hgrid.gr3`, `manning.gr3` or `drag.gr3`)', ) - argument_parser.add_argument( - "--with-hydrology", action="store_true" - ) + argument_parser.add_argument('--with-hydrology', action='store_true') args = argument_parser.parse_args() return args -if __name__ == "__main__": +if __name__ == '__main__': main(parse_arguments()) diff --git a/singularity/prep/files/setup_ensemble.py b/singularity/prep/files/setup_ensemble.py index d16b4a0..d055e8c 100644 --- a/singularity/prep/files/setup_ensemble.py +++ b/singularity/prep/files/setup_ensemble.py @@ -51,7 +51,6 @@ logger.setLevel(logging.INFO) - def _relocate_source_sink(schism_dir, region_shape): # Feeder info is generated during mesh generation @@ -66,19 +65,15 @@ def _relocate_source_sink(schism_dir, region_shape): original_ss = source_sink.from_files(source_dir=old_ss_dir) region = gpd.read_file(region_shape) - region_coords = [ - get_coordinates(p) for p in region.explode(index_parts=True).exterior - ] + region_coords = [get_coordinates(p) for p in region.explode(index_parts=True).exterior] # split source/sink into inside and outside region - _, outside_ss = original_ss.clip_by_polygons( - hgrid=hgrid, polygons_xy=region_coords, - ) + _, outside_ss = original_ss.clip_by_polygons(hgrid=hgrid, polygons_xy=region_coords,) # relocate sources relocated_ss = relocate_sources( old_ss_dir=old_ss_dir, # based on the without feeder hgrid - feeder_info_file=feeder_info_file, + feeder_info_file=feeder_info_file, hgrid_fname=hgrid_fname, # HGrid with feeder outdir=str(schism_dir / 'relocated_source_sink'), max_search_radius=2000, # search radius (in meters) @@ -96,7 +91,6 @@ def _relocate_source_sink(schism_dir, region_shape): combined_ss.writer(str(schism_dir)) - def _fix_nwm_issues(ensemble_dir, hires_shapefile): # Workaround for hydrology param bug #34 @@ -130,17 +124,16 @@ def main(args): workdir.mkdir(exist_ok=True) dt_data = pd.read_csv(dt_rng_path, delimiter=',') - date_1, date_2, date_3 = pd.to_datetime(dt_data.date_time).dt.strftime( - "%Y%m%d%H").values - model_start_time = datetime.strptime(date_1, "%Y%m%d%H") - model_end_time = datetime.strptime(date_2, "%Y%m%d%H") - perturb_start = datetime.strptime(date_3, "%Y%m%d%H") + date_1, date_2, date_3 = pd.to_datetime(dt_data.date_time).dt.strftime('%Y%m%d%H').values + model_start_time = datetime.strptime(date_1, '%Y%m%d%H') + model_end_time = datetime.strptime(date_2, '%Y%m%d%H') + perturb_start = datetime.strptime(date_3, '%Y%m%d%H') spinup_time = timedelta(days=2) forcing_configurations = [] - forcing_configurations.append(TidalForcingJSON( - resource=tpxo_dir / 'h_tpxo9.v1.nc', - tidal_source=TidalSource.TPXO)) + forcing_configurations.append( + TidalForcingJSON(resource=tpxo_dir / 'h_tpxo9.v1.nc', tidal_source=TidalSource.TPXO) + ) if with_hydrology: forcing_configurations.append( NationalWaterModelFocringJSON( @@ -148,11 +141,10 @@ def main(args): cache=True, source_json=workdir / 'source.json', sink_json=workdir / 'sink.json', - pairing_hgrid=mesh_file + pairing_hgrid=mesh_file, ) ) - platform = Platform.LOCAL unperturbed = None @@ -161,38 +153,35 @@ def main(args): orig_track = VortexTrack.from_file(track_path) adv_uniq = orig_track.data.advisory.unique() if len(adv_uniq) != 1: - raise ValueError("Track file has multiple advisory types!") + raise ValueError('Track file has multiple advisory types!') advisory = adv_uniq.item() file_deck = 'a' if advisory != 'BEST' else 'b' - # NOTE: Perturbers use min("forecast_time") to filter multiple # tracks. But for OFCL forecast simulation, the track file we # get has unique forecast time for only the segment we want to # perturb, the preceeding entries are 0-hour forecasts from # previous forecast_times track_to_perturb = VortexTrack.from_file( - track_path, - start_date=perturb_start, - forecast_time=perturb_start if advisory != 'BEST' else None, - end_date=model_end_time, - file_deck=file_deck, - advisories=[advisory], - ) - track_to_perturb.to_file( - workdir/'track_to_perturb.dat', overwrite=True + track_path, + start_date=perturb_start, + forecast_time=perturb_start if advisory != 'BEST' else None, + end_date=model_end_time, + file_deck=file_deck, + advisories=[advisory], ) + track_to_perturb.to_file(workdir / 'track_to_perturb.dat', overwrite=True) perturbations = perturb_tracks( perturbations=args.num_perturbations, - directory=workdir/'track_files', - storm=workdir/'track_to_perturb.dat', + directory=workdir / 'track_files', + storm=workdir / 'track_to_perturb.dat', variables=[ 'cross_track', 'along_track', 'radius_of_maximum_winds', 'max_sustained_wind_speed', - ], + ], sample_from_distribution=args.sample_from_distribution, sample_rule=args.sample_rule, quadrature=args.quadrature, @@ -204,9 +193,7 @@ def main(args): ) if perturb_start != model_start_time: - perturb_idx = orig_track.data[ - orig_track.data.datetime == perturb_start - ].index.min() + perturb_idx = orig_track.data[orig_track.data.datetime == perturb_start].index.min() if perturb_idx > 0: # If only part of the track needs to be updated @@ -216,26 +203,21 @@ def main(args): unperturbed = VortexTrack( unperturbed_data, file_deck='b', - advisories = ['BEST'], - end_date=orig_track.data.iloc[perturb_idx - 1].datetime + advisories=['BEST'], + end_date=orig_track.data.iloc[perturb_idx - 1].datetime, ) # Read generated tracks and append to unpertubed section - perturbed_tracks = glob.glob(str(workdir/'track_files'/'*.22')) + perturbed_tracks = glob.glob(str(workdir / 'track_files' / '*.22')) for pt in perturbed_tracks: # Fake BEST track here (in case it's not a real best)! perturbed_data = VortexTrack.from_file(pt).data perturbed_data.advisory = 'BEST' perturbed_data.forecast_hours = 0 - perturbed = VortexTrack( - perturbed_data, - file_deck='b', - advisories = ['BEST'], - ) + perturbed = VortexTrack(perturbed_data, file_deck='b', advisories=['BEST'],) full_track = pd.concat( - (unperturbed.fort_22(), perturbed.fort_22()), - ignore_index=True + (unperturbed.fort_22(), perturbed.fort_22()), ignore_index=True ) # Overwrites the perturbed-segment-only file full_track.to_csv(pt, index=False, header=False) @@ -247,8 +229,8 @@ def main(args): nhc_code=f'{args.name}{args.year}', interval_seconds=3600, nws=20, - fort22_filename=workdir/'track_files'/'original.22', - attributes={'model': pahm_model} + fort22_filename=workdir / 'track_files' / 'original.22', + attributes={'model': pahm_model}, ) ) @@ -261,12 +243,10 @@ def main(args): 'forcings': forcing_configurations, 'perturbations': perturbations, 'platform': platform, -# 'schism_executable': 'pschism_PAHM_TVD-VL' + # 'schism_executable': 'pschism_PAHM_TVD-VL' } - run_configuration = SCHISMRunConfiguration( - **run_config_kwargs, - ) + run_configuration = SCHISMRunConfiguration(**run_config_kwargs,) run_configuration['schism']['hgrid_path'] = mesh_file run_configuration['schism']['attributes']['ncor'] = 1 @@ -275,13 +255,15 @@ def main(args): ) # Now generate the setup - generate_schism_configuration(**{ - 'configuration_directory': workdir, - 'output_directory': workdir, - 'relative_paths': True, - 'overwrite': True, - 'parallel': True - }) + generate_schism_configuration( + **{ + 'configuration_directory': workdir, + 'output_directory': workdir, + 'relative_paths': True, + 'overwrite': True, + 'parallel': True, + } + ) if with_hydrology: _fix_nwm_issues(workdir, hires_reg) @@ -293,10 +275,10 @@ def parse_arguments(): argument_parser = ArgumentParser() argument_parser.add_argument( - "--track-file", - help="path to the storm track file for parametric wind setup", + '--track-file', + help='path to the storm track file for parametric wind setup', type=Path, - required=True + required=True, ) argument_parser.add_argument( @@ -304,30 +286,26 @@ def parse_arguments(): required=True, type=Path, default=None, - help='path to store generated configuration files' + help='path to store generated configuration files', ) argument_parser.add_argument( - "--date-range-file", + '--date-range-file', required=True, type=Path, - help="path to the file containing simulation date range" + help='path to the file containing simulation date range', ) argument_parser.add_argument( - '-n', '--num-perturbations', + '-n', + '--num-perturbations', type=int, required=True, help='path to input mesh (`hgrid.gr3`, `manning.gr3` or `drag.gr3`)', ) argument_parser.add_argument( - "--tpxo-dir", - required=True, - type=Path, - help="path to the TPXO dataset directory", + '--tpxo-dir', required=True, type=Path, help='path to the TPXO dataset directory', ) argument_parser.add_argument( - "--nwm-file", - type=Path, - help="path to the NWM hydrofabric dataset", + '--nwm-file', type=Path, help='path to the NWM hydrofabric dataset', ) argument_parser.add_argument( '--mesh-directory', @@ -339,38 +317,23 @@ def parse_arguments(): '--hires-region', type=Path, required=True, - help='path to high resolution polygon shapefile' + help='path to high resolution polygon shapefile', ) - argument_parser.add_argument( - "--sample-from-distribution", action="store_true" - ) - argument_parser.add_argument( - "--sample-rule", type=str, default='random' - ) - argument_parser.add_argument( - "--quadrature", action="store_true" - ) - argument_parser.add_argument( - "--use-wwm", action="store_true" - ) - argument_parser.add_argument( - "--with-hydrology", action="store_true" - ) - argument_parser.add_argument( - "--pahm-model", choices=['gahm', 'symmetric'], default='gahm' - ) - - argument_parser.add_argument( - "name", help="name of the storm", type=str) + argument_parser.add_argument('--sample-from-distribution', action='store_true') + argument_parser.add_argument('--sample-rule', type=str, default='random') + argument_parser.add_argument('--quadrature', action='store_true') + argument_parser.add_argument('--use-wwm', action='store_true') + argument_parser.add_argument('--with-hydrology', action='store_true') + argument_parser.add_argument('--pahm-model', choices=['gahm', 'symmetric'], default='gahm') - argument_parser.add_argument( - "year", help="year of the storm", type=int) + argument_parser.add_argument('name', help='name of the storm', type=str) + argument_parser.add_argument('year', help='year of the storm', type=int) args = argument_parser.parse_args() return args -if __name__ == "__main__": +if __name__ == '__main__': main(parse_arguments()) diff --git a/singularity/prep/files/setup_model.py b/singularity/prep/files/setup_model.py index 6e2061f..5867326 100755 --- a/singularity/prep/files/setup_model.py +++ b/singularity/prep/files/setup_model.py @@ -33,14 +33,14 @@ logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) -CDSAPI_URL = "https://cds.climate.copernicus.eu/api/v2" +CDSAPI_URL = 'https://cds.climate.copernicus.eu/api/v2' TPXO_LINK_PATH = pathlib.Path('~').expanduser() / '.local/share/tpxo' NWM_LINK_PATH = pathlib.Path('~').expanduser() / '.local/share/pyschism/nwm' @contextmanager def pushd(directory): - '''Temporarily modify current directory + """Temporarily modify current directory Parameters ---------- @@ -50,7 +50,7 @@ def pushd(directory): Returns ------- None - ''' + """ origin = os.getcwd() try: @@ -65,14 +65,15 @@ def get_main_cache_path(cache_dir, storm, year): return cache_dir / f'{storm.lower()}_{year}' + def get_meteo_cache_path(source, main_cache_path, bbox, start_date, end_date): m = hashlib.md5() m.update(np.round(bbox.corners(), decimals=2).tobytes()) - m.update(start_date.strftime("%Y-%m-%d:%H:%M:%S").encode('utf8')) - m.update(end_date.strftime("%Y-%m-%d:%H:%M:%S").encode('utf8')) + m.update(start_date.strftime('%Y-%m-%d:%H:%M:%S').encode('utf8')) + m.update(end_date.strftime('%Y-%m-%d:%H:%M:%S').encode('utf8')) - meteo_cache_path = main_cache_path / f"{source}_{m.hexdigest()}" + meteo_cache_path = main_cache_path / f'{source}_{m.hexdigest()}' return meteo_cache_path @@ -82,7 +83,7 @@ def cache_lock(cache_path): if not cache_path.exists(): cache_path.mkdir(parents=True, exist_ok=True) - with open(cache_path / ".cache.lock", "w") as fp: + with open(cache_path / '.cache.lock', 'w') as fp: try: fcntl.flock(fp.fileno(), fcntl.LOCK_EX) yield @@ -90,6 +91,7 @@ def cache_lock(cache_path): finally: fcntl.flock(fp.fileno(), fcntl.LOCK_UN) + def from_meteo_cache(meteo_cache_path, sflux_dir): # TODO: Generalize @@ -98,10 +100,10 @@ def from_meteo_cache(meteo_cache_path, sflux_dir): return False contents = list(meteo_cache_path.iterdir()) - if not any(p.match("sflux_inputs.txt") for p in contents): + if not any(p.match('sflux_inputs.txt') for p in contents): return False - logger.info("Creating sflux from cache...") + logger.info('Creating sflux from cache...') # Copy files from cache dir to sflux dir for p in contents: @@ -111,7 +113,7 @@ def from_meteo_cache(meteo_cache_path, sflux_dir): else: shutil.copy(p, dest) - logger.info("Done copying cached sflux.") + logger.info('Done copying cached sflux.') return True @@ -119,12 +121,12 @@ def from_meteo_cache(meteo_cache_path, sflux_dir): def copy_meteo_cache(sflux_dir, meteo_cache_path): # TODO: Generalize - logger.info("Copying cache files to main cache location...") + logger.info('Copying cache files to main cache location...') # Copy files from sflux dir to cache dir # Clean meteo_cache_path if already populated? contents_dst = list(meteo_cache_path.iterdir()) - contents_dst = [p for p in contents_dst if p.suffix != ".lock"] + contents_dst = [p for p in contents_dst if p.suffix != '.lock'] for p in contents_dst: if p.is_dir(): shutil.rmtree(p) @@ -140,55 +142,52 @@ def copy_meteo_cache(sflux_dir, meteo_cache_path): else: shutil.copy(p, dest) - logger.info("Done copying cache files to main cache location.") + logger.info('Done copying cache files to main cache location.') -def setup_schism_model( - mesh_path, - domain_bbox_path, - date_range_path, - station_info_path, - out_dir, - main_cache_path, - parametric_wind=False, - nhc_track_file=None, - storm_id=None, - use_wwm=False, - ): +def setup_schism_model( + mesh_path, + domain_bbox_path, + date_range_path, + station_info_path, + out_dir, + main_cache_path, + parametric_wind=False, + nhc_track_file=None, + storm_id=None, + use_wwm=False, +): domain_box = gpd.read_file(domain_bbox_path) - atm_bbox = Bbox(domain_box.to_crs('EPSG:4326').total_bounds.reshape(2,2)) + atm_bbox = Bbox(domain_box.to_crs('EPSG:4326').total_bounds.reshape(2, 2)) schism_dir = out_dir schism_dir.mkdir(exist_ok=True, parents=True) - logger.info("Calculating times and dates") - dt = timedelta(seconds=150.) + logger.info('Calculating times and dates') + dt = timedelta(seconds=150.0) # Use an integer for number of steps or a timedelta to approximate # number of steps internally based on timestep - nspool = timedelta(minutes=20.) - + nspool = timedelta(minutes=20.0) # measurement days +7 days of simulation: 3 ramp, 2 prior # & 2 after the measurement dates dt_data = pd.read_csv(date_range_path, delimiter=',') - date_1, date_2 = pd.to_datetime(dt_data.date_time).dt.strftime( - "%Y%m%d%H").values - date_1 = datetime.strptime(date_1, "%Y%m%d%H") - date_2 = datetime.strptime(date_2, "%Y%m%d%H") - + date_1, date_2 = pd.to_datetime(dt_data.date_time).dt.strftime('%Y%m%d%H').values + date_1 = datetime.strptime(date_1, '%Y%m%d%H') + date_2 = datetime.strptime(date_2, '%Y%m%d%H') # If there are no observation data, it's hindcast mode hindcast_mode = (station_info_path).is_file() if hindcast_mode: # If in hindcast mode run for 4 days: 2 days prior to now to # 2 days after. - logger.info("Setup hindcast mode") + logger.info('Setup hindcast mode') start_date = date_1 - timedelta(days=2) end_date = date_2 + timedelta(days=2) else: - logger.info("Setup forecast mode") + logger.info('Setup forecast mode') # If in forecast mode then date_1 == date_2, and simulation # will run for about 3 days: abou 1 day prior to now to 2 days @@ -208,22 +207,20 @@ def setup_schism_model( rnday = end_date - start_date - dramp = timedelta(days=1.) + dramp = timedelta(days=1.0) - hgrid = Hgrid.open(mesh_path, crs="epsg:4326") + hgrid = Hgrid.open(mesh_path, crs='epsg:4326') fgrid = ManningsN.linear_with_depth( - hgrid, - min_value=0.02, max_value=0.05, - min_depth=-1.0, max_depth=-3.0) + hgrid, min_value=0.02, max_value=0.05, min_depth=-1.0, max_depth=-3.0 + ) coops_stations = None stations_file = station_info_path if stations_file.is_file(): st_data = np.genfromtxt(stations_file, delimiter=',') coops_stations = Stations( - nspool_sta=nspool, - crs="EPSG:4326", - elev=True, u=True, v=True) + nspool_sta=nspool, crs='EPSG:4326', elev=True, u=True, v=True + ) for coord in st_data: coops_stations.add_station(coord[0], coord[1]) @@ -235,20 +232,19 @@ def setup_schism_model( elif storm_id is not None: atmospheric = BestTrackForcing(storm=storm_id) else: - ValueError("Storm track information is not provided!") + ValueError('Storm track information is not provided!') else: # For hindcast ERA5 is used and for forecast # GFS and hrrr3.HRRR. Neither ERA5 nor the GFS and # hrrr3.HRRR combination are supported by nws2 mechanism pass - - logger.info("Creating model configuration ...") + logger.info('Creating model configuration ...') config = ModelConfig( hgrid=hgrid, fgrid=fgrid, - iettype=iettype.Iettype3(database="tpxo"), - ifltype=ifltype.Ifltype3(database="tpxo"), + iettype=iettype.Iettype3(database='tpxo'), + ifltype=ifltype.Ifltype3(database='tpxo'), nws=atmospheric, source_sink=NWM(), ) @@ -256,7 +252,7 @@ def setup_schism_model( if config.forcings.nws and getattr(config.forcings.nws, 'sflux_2', None): config.forcings.nws.sflux_2.inventory.file_interval = timedelta(hours=6) - logger.info("Creating cold start ...") + logger.info('Creating cold start ...') # create reference dates coldstart = config.coldstart( stations=coops_stations, @@ -271,19 +267,19 @@ def setup_schism_model( dahv=True, ) - logger.info("Writing to disk ...") + logger.info('Writing to disk ...') if not parametric_wind: # In hindcast mode ERA5 is used manually: temporary solution - sflux_dir = (schism_dir / "sflux") + sflux_dir = schism_dir / 'sflux' sflux_dir.mkdir(exist_ok=True, parents=True) # Workaround for ERA5 not being compatible with NWS2 object meteo_cache_kwargs = { - 'bbox': atm_bbox, - 'start_date': start_date, - 'end_date': start_date + rnday + 'bbox': atm_bbox, + 'start_date': start_date, + 'end_date': start_date + rnday, } if hindcast_mode: @@ -300,16 +296,18 @@ def setup_schism_model( if hindcast_mode: era5 = ERA5() era5.write( - outdir=schism_dir / "sflux", - start_date=start_date, - rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), - air=True, rad=True, prc=True, - bbox=atm_bbox, - overwrite=True) + outdir=schism_dir / 'sflux', + start_date=start_date, + rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), + air=True, + rad=True, + prc=True, + bbox=atm_bbox, + overwrite=True, + ) else: - with ExitStack() as stack: # Just to make sure there are not permission @@ -320,14 +318,16 @@ def setup_schism_model( gfs = GFS() gfs.write( - outdir=schism_dir / "sflux", - level=1, - start_date=start_date, - rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), - air=True, rad=True, prc=True, - bbox=atm_bbox, - overwrite=True - ) + outdir=schism_dir / 'sflux', + level=1, + start_date=start_date, + rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), + air=True, + rad=True, + prc=True, + bbox=atm_bbox, + overwrite=True, + ) # If we should limit forecast to 2 days, then # why not use old HRRR implementation? Because @@ -336,39 +336,40 @@ def setup_schism_model( # 2day forecast! hrrr = HRRR() hrrr.write( - outdir=schism_dir / "sflux", - level=2, - start_date=start_date, - rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), - air=True, rad=True, prc=True, - bbox=atm_bbox, - overwrite=True + outdir=schism_dir / 'sflux', + level=2, + start_date=start_date, + rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), + air=True, + rad=True, + prc=True, + bbox=atm_bbox, + overwrite=True, ) -# hrrr3.HRRR( -# start_date=start_date, -# rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), -# record=2, -# bbox=atm_bbox -# ) -# for i, nc_file in enumerate(sorted(pathlib.Path().glob('*/*.nc'))): -# dst_air = schism_dir / "sflux" / f"sflux_air_2.{i:04d}.nc" -# shutil.move(nc_file, dst_air) -# pathlib.Path(schism_dir / "sflux" / f"sflux_prc_2.{i:04d}.nc").symlink_to( -# dst_air -# ) -# pathlib.Path(schism_dir / "sflux" / f"sflux_rad_2.{i:04d}.nc").symlink_to( -# dst_air -# ) - - - with open(schism_dir / "sflux" / "sflux_inputs.txt", "w") as f: - f.write("&sflux_inputs\n/\n") + # hrrr3.HRRR( + # start_date=start_date, + # rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), + # record=2, + # bbox=atm_bbox + # ) + # for i, nc_file in enumerate(sorted(pathlib.Path().glob('*/*.nc'))): + # dst_air = schism_dir / "sflux" / f"sflux_air_2.{i:04d}.nc" + # shutil.move(nc_file, dst_air) + # pathlib.Path(schism_dir / "sflux" / f"sflux_prc_2.{i:04d}.nc").symlink_to( + # dst_air + # ) + # pathlib.Path(schism_dir / "sflux" / f"sflux_rad_2.{i:04d}.nc").symlink_to( + # dst_air + # ) + + with open(schism_dir / 'sflux' / 'sflux_inputs.txt', 'w') as f: + f.write('&sflux_inputs\n/\n') copy_meteo_cache(sflux_dir, meteo_cache_path) windrot = gridgr3.Windrot.default(hgrid) - windrot.write(schism_dir / "windrot_geo2proj.gr3", overwrite=True) + windrot.write(schism_dir / 'windrot_geo2proj.gr3', overwrite=True) ## end of workaround # Workaround for bug #30 @@ -376,12 +377,11 @@ def setup_schism_model( coldstart.param.opt.nws = NWSType.CLIMATE_AND_FORECAST.value ## end of workaround - - # Workaround for station bug #32 if coops_stations is not None: coldstart.param.schout.nspool_sta = int( - round(nspool.total_seconds() / coldstart.param.core.dt)) + round(nspool.total_seconds() / coldstart.param.core.dt) + ) ## end of workaround with ExitStack() as stack: @@ -402,7 +402,7 @@ def setup_schism_model( ## Workaround to make sure outputs directory is copied from/to S3 try: - os.mknod(schism_dir / "outputs" / "_") + os.mknod(schism_dir / 'outputs' / '_') except FileExistsError: pass ## end of workaround @@ -410,7 +410,8 @@ def setup_schism_model( if use_wwm: wwm.setup_wwm(mesh_path, schism_dir, ensemble=False) - logger.info("Setup done") + logger.info('Setup done') + def main(args): @@ -424,9 +425,7 @@ def main(args): st_loc_path = args.station_location_file out_dir = args.out nhc_track = None if args.track_file is None else args.track_file - cache_path = get_main_cache_path( - args.cache_dir, storm_name, storm_year - ) + cache_path = get_main_cache_path(args.cache_dir, storm_name, storm_year) tpxo_dir = args.tpxo_dir nwm_dir = args.nwm_dir use_wwm = args.use_wwm @@ -438,7 +437,6 @@ def main(args): os.symlink(tpxo_dir, TPXO_LINK_PATH, target_is_directory=True) os.symlink(nwm_dir, NWM_LINK_PATH, target_is_directory=True) - setup_schism_model( mesh_path, bbox_path, @@ -449,83 +447,68 @@ def main(args): parametric_wind=param_wind, nhc_track_file=nhc_track, storm_id=f'{storm_name}{storm_year}', - use_wwm=use_wwm - ) + use_wwm=use_wwm, + ) if __name__ == '__main__': parser = argparse.ArgumentParser() - - parser.add_argument( - "--parametric-wind", "-w", - help="flag to switch to parametric wind setup", action="store_true") - parser.add_argument( - "--mesh-file", - help="path to the file containing computational grid", - type=pathlib.Path + '--parametric-wind', + '-w', + help='flag to switch to parametric wind setup', + action='store_true', ) parser.add_argument( - "--domain-bbox-file", - help="path to the file containing domain bounding box", - type=pathlib.Path + '--mesh-file', help='path to the file containing computational grid', type=pathlib.Path ) parser.add_argument( - "--date-range-file", - help="path to the file containing simulation date range", - type=pathlib.Path + '--domain-bbox-file', + help='path to the file containing domain bounding box', + type=pathlib.Path, ) parser.add_argument( - "--station-location-file", - help="path to the file containing station locations", - type=pathlib.Path + '--date-range-file', + help='path to the file containing simulation date range', + type=pathlib.Path, ) parser.add_argument( - "--cache-dir", - help="path to the cache directory", - type=pathlib.Path + '--station-location-file', + help='path to the file containing station locations', + type=pathlib.Path, ) - parser.add_argument( - "--track-file", - help="path to the storm track file for parametric wind setup", - type=pathlib.Path - ) + parser.add_argument('--cache-dir', help='path to the cache directory', type=pathlib.Path) parser.add_argument( - "--tpxo-dir", - help="path to the TPXO database directory", - type=pathlib.Path + '--track-file', + help='path to the storm track file for parametric wind setup', + type=pathlib.Path, ) parser.add_argument( - "--nwm-dir", - help="path to the NWM stream vector database directory", - type=pathlib.Path + '--tpxo-dir', help='path to the TPXO database directory', type=pathlib.Path ) parser.add_argument( - "--out", - help="path to the setup output (solver input) directory", - type=pathlib.Path + '--nwm-dir', help='path to the NWM stream vector database directory', type=pathlib.Path ) parser.add_argument( - "--use-wwm", action="store_true" + '--out', help='path to the setup output (solver input) directory', type=pathlib.Path ) - parser.add_argument( - "name", help="name of the storm", type=str) + parser.add_argument('--use-wwm', action='store_true') - parser.add_argument( - "year", help="year of the storm", type=int) + parser.add_argument('name', help='name of the storm', type=str) + parser.add_argument('year', help='year of the storm', type=int) args = parser.parse_args() diff --git a/singularity/prep/files/wwm.py b/singularity/prep/files/wwm.py index 56a060a..0448687 100644 --- a/singularity/prep/files/wwm.py +++ b/singularity/prep/files/wwm.py @@ -13,18 +13,18 @@ REFS = Path('/refs') + def setup_wwm(mesh_file: Path, setup_dir: Path, ensemble: bool): - '''Output is + """Output is - hgrid_WWM.gr3 - param.nml - wwmbnd.gr3 - wwminput.nml - ''' + """ - runs_dir = [setup_dir] if ensemble: - spinup_dir = setup_dir/'spinup' + spinup_dir = setup_dir / 'spinup' runs_dir = setup_dir.glob('runs/*') schism_grid = Gr3.open(mesh_file, crs=4326) @@ -44,7 +44,6 @@ def setup_wwm(mesh_file: Path, setup_dir: Path, ensemble: bool): wwm_nml = get_wwm_params(run_name=run.name, schism_nml=schism_nml) wwm_nml.write(run / 'wwminput.nml') - def break_quads(pyschism_mesh: Gr3) -> Gr3 | Gr3Field: @@ -54,27 +53,25 @@ def break_quads(pyschism_mesh: Gr3) -> Gr3 | Gr3Field: new_mesh = deepcopy(pyschism_mesh) else: - tmp = quads[:,2:] + tmp = quads[:, 2:] tmp = np.insert(tmp, 0, quads[:, 0], axis=1) broken = np.vstack((quads[:, :3], tmp)) trias = pyschism_mesh.triangles final_trias = np.vstack((trias, broken)) # NOTE: Node IDs and indexs are the same as before elements = { - idx+1: list(map(pyschism_mesh.nodes.get_id_by_index, tri)) + idx + 1: list(map(pyschism_mesh.nodes.get_id_by_index, tri)) for idx, tri in enumerate(final_trias) } new_mesh = deepcopy(pyschism_mesh) new_mesh.elements = Elements(pyschism_mesh.nodes, elements) - return new_mesh - def get_wwm_params(run_name, schism_nml) -> f90nml.Namelist: - + # Get relevant values from SCHISM setup begin_time = datetime( year=schism_nml['opt']['start_year'], @@ -93,7 +90,7 @@ def get_wwm_params(run_name, schism_nml) -> f90nml.Namelist: wwm_delta_t = nstep_wwm * delta_t # For now just read the example file update relevant names and write - wwm_params = f90nml.read(REFS/'wwminput.nml') + wwm_params = f90nml.read(REFS / 'wwminput.nml') wwm_params.uppercase = True proc_nml = wwm_params['PROC'] @@ -120,21 +117,21 @@ def get_wwm_params(run_name, schism_nml) -> f90nml.Namelist: grid_nml['IGRIDTYPE'] = 3 bouc_nml = wwm_params['BOUC'] - # Begin time of the wave boundary file (FILEWAVE) + # Begin time of the wave boundary file (FILEWAVE) bouc_nml['BEGTC'] = begin_time.strftime(time_fmt) - # Time step in FILEWAVE + # Time step in FILEWAVE bouc_nml['DELTC'] = 1 - # Unit can be HR, MIN, SEC + # Unit can be HR, MIN, SEC bouc_nml['UNITC'] = 'HR' # End time bouc_nml['ENDTC'] = end_time.strftime(time_fmt) # Boundary file defining boundary conditions and Neumann nodes. bouc_nml['FILEBOUND'] = 'wwmbnd.gr3' - bouc_nml['BEGTC_OUT'] = 20030908.000000 + bouc_nml['BEGTC_OUT'] = 20030908.000000 bouc_nml['DELTC_OUT'] = 600.000000000000 bouc_nml['UNITC_OUT'] = 'SEC' - bouc_nml['ENDTC_OUT'] = 20031008.000000 - + bouc_nml['ENDTC_OUT'] = 20031008.000000 + hist_nml = wwm_params['HISTORY'] # Start output time, yyyymmdd. hhmmss; # must fit the simulation time otherwise no output. @@ -169,42 +166,42 @@ def get_wwm_params(run_name, schism_nml) -> f90nml.Namelist: hot_nml = wwm_params['HOTFILE'] # Write hotfile hot_nml['LHOTF'] = False - #'.nc' suffix will be added -# hot_nml['FILEHOT_OUT'] = 'wwm_hot_out' -# #Starting time of hotfile writing. With ihot!=0 in SCHISM, -# # this will be whatever the new hotstarted time is (even with ihot=2) -# hot_nml['BEGTC'] = '20030908.000000' -# # time between hotfile writes -# hot_nml['DELTC'] = 86400. -# # unit used above -# hot_nml['UNITC'] = 'SEC' -# # Ending time of hotfile writing (adjust with BEGTC) -# hot_nml['ENDTC'] = '20031008.000000' -# # Applies only to netcdf -# # If T then hotfile contains 2 last records. -# # If F then hotfile contains N record if N outputs -# # have been done. -# # For binary only one record. -# hot_nml['LCYCLEHOT'] = True -# # 1: binary hotfile of data as output -# # 2: netcdf hotfile of data as output (default) -# hot_nml['HOTSTYLE_OUT'] = 2 -# # 0: hotfile in a single file (binary or netcdf) -# # MPI_REDUCE is then used and thus youd avoid too freq. output -# # 1: hotfiles in separate files, each associated -# # with one process -# hot_nml['MULTIPLEOUT'] = 0 -# # (Full) hot file name for input -# hot_nml['FILEHOT_IN'] = 'wwm_hot_in.nc' -# # 1: binary hotfile of data as input -# # 2: netcdf hotfile of data as input (default) -# hot_nml['HOTSTYLE_IN'] = 2 -# # Position in hotfile (only for netcdf) -# # for reading -# hot_nml['IHOTPOS_IN'] = 1 -# # 0: read hotfile from one single file -# # 1: read hotfile from multiple files (must use same # of CPU?) -# hot_nml['MULTIPLEIN'] = 0 + #'.nc' suffix will be added + # hot_nml['FILEHOT_OUT'] = 'wwm_hot_out' + # #Starting time of hotfile writing. With ihot!=0 in SCHISM, + # # this will be whatever the new hotstarted time is (even with ihot=2) + # hot_nml['BEGTC'] = '20030908.000000' + # # time between hotfile writes + # hot_nml['DELTC'] = 86400. + # # unit used above + # hot_nml['UNITC'] = 'SEC' + # # Ending time of hotfile writing (adjust with BEGTC) + # hot_nml['ENDTC'] = '20031008.000000' + # # Applies only to netcdf + # # If T then hotfile contains 2 last records. + # # If F then hotfile contains N record if N outputs + # # have been done. + # # For binary only one record. + # hot_nml['LCYCLEHOT'] = True + # # 1: binary hotfile of data as output + # # 2: netcdf hotfile of data as output (default) + # hot_nml['HOTSTYLE_OUT'] = 2 + # # 0: hotfile in a single file (binary or netcdf) + # # MPI_REDUCE is then used and thus youd avoid too freq. output + # # 1: hotfiles in separate files, each associated + # # with one process + # hot_nml['MULTIPLEOUT'] = 0 + # # (Full) hot file name for input + # hot_nml['FILEHOT_IN'] = 'wwm_hot_in.nc' + # # 1: binary hotfile of data as input + # # 2: netcdf hotfile of data as input (default) + # hot_nml['HOTSTYLE_IN'] = 2 + # # Position in hotfile (only for netcdf) + # # for reading + # hot_nml['IHOTPOS_IN'] = 1 + # # 0: read hotfile from one single file + # # 1: read hotfile from multiple files (must use same # of CPU?) + # hot_nml['MULTIPLEIN'] = 0 return wwm_params @@ -221,16 +218,15 @@ def update_schism_params(path: Path) -> f90nml.Namelist: opt_nml['icou_elfe_wwm'] = 1 opt_nml['nstep_wwm'] = 4 opt_nml['iwbl'] = 0 - opt_nml['hmin_radstress'] = 1. + opt_nml['hmin_radstress'] = 1.0 # TODO: Revisit for spinup support # NOTE: Issue 7#issuecomment-1482848205 oceanmodeling fork -# opt_nml['nrampwafo'] = 0 - opt_nml['drampwafo'] = 0. + # opt_nml['nrampwafo'] = 0 + opt_nml['drampwafo'] = 0.0 opt_nml['turbinj'] = 0.15 opt_nml['turbinjds'] = 1.0 opt_nml['alphaw'] = 0.5 - # NOTE: Python index is different from the NML index schout_nml = schism_nml['schout'] @@ -239,39 +235,39 @@ def update_schism_params(path: Path) -> f90nml.Namelist: schout_nml.start_index.update(iof_hydro=[14], iof_wwm=[1]) - #sig. height (m) {sigWaveHeight} 2D + # sig. height (m) {sigWaveHeight} 2D schout_nml['iof_wwm'][0] = 1 - #Mean average period (sec) - TM01 {meanWavePeriod} 2D + # Mean average period (sec) - TM01 {meanWavePeriod} 2D schout_nml['iof_wwm'][1] = 0 - #Zero down crossing period for comparison with buoy (s) - TM02 {zeroDowncrossPeriod} 2D + # Zero down crossing period for comparison with buoy (s) - TM02 {zeroDowncrossPeriod} 2D schout_nml['iof_wwm'][2] = 0 - #Average period of wave runup/overtopping - TM10 {TM10} 2D + # Average period of wave runup/overtopping - TM10 {TM10} 2D schout_nml['iof_wwm'][3] = 0 - #Mean wave number (1/m) {meanWaveNumber} 2D + # Mean wave number (1/m) {meanWaveNumber} 2D schout_nml['iof_wwm'][4] = 0 - #Mean wave length (m) {meanWaveLength} 2D + # Mean wave length (m) {meanWaveLength} 2D schout_nml['iof_wwm'][5] = 0 - #Mean average energy transport direction (degr) - MWD in NDBC? {meanWaveDirection} 2D + # Mean average energy transport direction (degr) - MWD in NDBC? {meanWaveDirection} 2D schout_nml['iof_wwm'][6] = 0 - #Mean directional spreading (degr) {meanDirSpreading} 2D + # Mean directional spreading (degr) {meanDirSpreading} 2D schout_nml['iof_wwm'][7] = 0 - #Discrete peak period (sec) - Tp {peakPeriod} 2D + # Discrete peak period (sec) - Tp {peakPeriod} 2D schout_nml['iof_wwm'][8] = 1 - #Continuous peak period based on higher order moments (sec) {continuousPeakPeriod} 2D + # Continuous peak period based on higher order moments (sec) {continuousPeakPeriod} 2D schout_nml['iof_wwm'][9] = 0 - #Peak phase vel. (m/s) {peakPhaseVel} 2D + # Peak phase vel. (m/s) {peakPhaseVel} 2D schout_nml['iof_wwm'][10] = 0 - #Peak n-factor {peakNFactor} 2D + # Peak n-factor {peakNFactor} 2D schout_nml['iof_wwm'][11] = 0 - #Peak group vel. (m/s) {peakGroupVel} 2D + # Peak group vel. (m/s) {peakGroupVel} 2D schout_nml['iof_wwm'][12] = 0 - #Peak wave number {peakWaveNumber} 2D + # Peak wave number {peakWaveNumber} 2D schout_nml['iof_wwm'][13] = 0 - #Peak wave length {peakWaveLength} 2D + # Peak wave length {peakWaveLength} 2D schout_nml['iof_wwm'][14] = 0 - #Peak (dominant) direction (degr) {dominantDirection} 2D + # Peak (dominant) direction (degr) {dominantDirection} 2D schout_nml['iof_wwm'][15] = 1 - #Peak directional spreading {peakSpreading} 2D + # Peak directional spreading {peakSpreading} 2D schout_nml['iof_wwm'][16] = 0 return schism_nml From baee769b41e0c600d571572f42a50d53458090d6 Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Thu, 18 Jan 2024 17:28:06 +0000 Subject: [PATCH 03/54] format level_list in probability plot --- singularity/prep/files/analyze_ensemble.py | 28 ++++------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/singularity/prep/files/analyze_ensemble.py b/singularity/prep/files/analyze_ensemble.py index f811e9c..ed9aba3 100644 --- a/singularity/prep/files/analyze_ensemble.py +++ b/singularity/prep/files/analyze_ensemble.py @@ -371,31 +371,11 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): ) if make_probability_plot: - level_list = [ - 0.3048, - 0.6096, - 0.9144, - 1.2192, - 1.524, - 1.8288, - 2.1336, - 2.4384, - 2.7432, - 3.048, - 3.3528, - 3.6576, - 3.9624, - 4.2672, - 4.572, - 4.8768, - 5.1816, - 5.4864, - 5.7912, - 6.096, - ] + level_ft = numpy.arange(1, 21) + level_m = (level_ft * 0.3048).round(decimals=4) node_prob_field = probability_field_from_surrogate( - levels=level_list, + levels=level_m, surrogate_model=surrogate_model, distribution=distribution, training_set=validation_set, @@ -408,7 +388,7 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): plot_selected_probability_fields( node_prob_field=node_prob_field, - level_list=level_list, + level_list=level_m, output_directory=output_directory if save_plots else None, label_unit_convert_factor=1 / 0.3048, label_unit_name='ft', From 5ffef49b88941f73346aca9ea8ac75bfa64845b6 Mon Sep 17 00:00:00 2001 From: "Soroosh.Mani" Date: Thu, 25 Jan 2024 10:47:03 -0500 Subject: [PATCH 04/54] Update deterministic setup script and prep env --- singularity/info/files/hurricane_data.py | 2 +- singularity/prep/files/setup_model.py | 54 +++++++++++++++--------- singularity/prep/prep.def | 3 ++ 3 files changed, 39 insertions(+), 20 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index 827a521..a9bbc90 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -137,7 +137,7 @@ def main(args): gdf_track )) df_dt['date_time'] = ( - track.start_date, track.end_date, forecast_start + forecast_start - timedelta(days=2), track.end_date, forecast_start ) diff --git a/singularity/prep/files/setup_model.py b/singularity/prep/files/setup_model.py index 5867326..680e310 100755 --- a/singularity/prep/files/setup_model.py +++ b/singularity/prep/files/setup_model.py @@ -20,7 +20,9 @@ from pyschism import dates from pyschism.enums import NWSType from pyschism.driver import ModelConfig -from pyschism.forcing.bctides import iettype, ifltype +from pyschism.forcing.bctides.tides import Tides, TidalDatabase +from pyschism.forcing.bctides.tpxo import TPXO_ELEVATION +from pyschism.forcing.bctides.tpxo import TPXO_VELOCITY from pyschism.forcing.nws import GFS, HRRR, ERA5, BestTrackForcing from pyschism.forcing.nws.nws2 import hrrr3 from pyschism.forcing.source_sink import NWM @@ -146,17 +148,19 @@ def copy_meteo_cache(sflux_dir, meteo_cache_path): def setup_schism_model( - mesh_path, - domain_bbox_path, - date_range_path, - station_info_path, - out_dir, - main_cache_path, - parametric_wind=False, - nhc_track_file=None, - storm_id=None, - use_wwm=False, -): + mesh_path, + domain_bbox_path, + date_range_path, + station_info_path, + out_dir, + main_cache_path, + parametric_wind=False, + nhc_track_file=None, + storm_id=None, + use_wwm=False, + tpxo_dir=None, + ): + domain_box = gpd.read_file(domain_bbox_path) atm_bbox = Bbox(domain_box.to_crs('EPSG:4326').total_bounds.reshape(2, 2)) @@ -173,9 +177,12 @@ def setup_schism_model( # measurement days +7 days of simulation: 3 ramp, 2 prior # & 2 after the measurement dates dt_data = pd.read_csv(date_range_path, delimiter=',') - date_1, date_2 = pd.to_datetime(dt_data.date_time).dt.strftime('%Y%m%d%H').values - date_1 = datetime.strptime(date_1, '%Y%m%d%H') - date_2 = datetime.strptime(date_2, '%Y%m%d%H') + date_1, date_2, date_3 = pd.to_datetime(dt_data.date_time).dt.strftime( + "%Y%m%d%H").values + date_1 = datetime.strptime(date_1, "%Y%m%d%H") + date_2 = datetime.strptime(date_2, "%Y%m%d%H") +# date_3 = datetime.strptime(date_3, "%Y%m%d%H") + # If there are no observation data, it's hindcast mode hindcast_mode = (station_info_path).is_file() @@ -239,15 +246,23 @@ def setup_schism_model( # hrrr3.HRRR combination are supported by nws2 mechanism pass - logger.info('Creating model configuration ...') + + tidal_flags = [3, 3, 0, 0] + logger.info("Creating model configuration ...") config = ModelConfig( hgrid=hgrid, fgrid=fgrid, - iettype=iettype.Iettype3(database='tpxo'), - ifltype=ifltype.Ifltype3(database='tpxo'), + flags=[tidal_flags for _ in hgrid.boundaries.open.itertuples()], + constituents=[], # we're overwriting Tides obj + database='tpxo', # we're overwriting Tides obj nws=atmospheric, source_sink=NWM(), ) + tide_db = TidalDatabase.TPXO.value( + h_file=tpxo_dir / TPXO_ELEVATION, u_file=tpxo_dir / TPXO_VELOCITY, + ) + tides = Tides(tidal_database=tide_db, constituents='all') + config.bctides.tides = tides if config.forcings.nws and getattr(config.forcings.nws, 'sflux_2', None): config.forcings.nws.sflux_2.inventory.file_interval = timedelta(hours=6) @@ -448,7 +463,8 @@ def main(args): nhc_track_file=nhc_track, storm_id=f'{storm_name}{storm_year}', use_wwm=use_wwm, - ) + tpxo_dir=tpxo_dir, + ) if __name__ == '__main__': diff --git a/singularity/prep/prep.def b/singularity/prep/prep.def index 8757545..483e470 100644 --- a/singularity/prep/prep.def +++ b/singularity/prep/prep.def @@ -33,6 +33,9 @@ From: continuumio/miniconda3:23.5.2-0-alpine mamba install -y -n $ENV_NAME -cconda-forge \ --force-reinstall geopandas geopandas-base + pip uninstall pygeos # We use shapely 2 + mamba install -y -cconda-forge --force-reinstall geopandas + git clone https://github.com/schism-dev/schism cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/Relocate/relocate_source_feeder.py /scripts cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/feeder_heads_bases_v2.1.xy /refs From a80263a4e1eb843d00875120affa3bb384293b18 Mon Sep 17 00:00:00 2001 From: "Soroosh.Mani" Date: Tue, 6 Feb 2024 16:22:19 -0500 Subject: [PATCH 05/54] Hydrology option for deterministic --- singularity/prep/files/setup_model.py | 39 +++++++++++++++++++-------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/singularity/prep/files/setup_model.py b/singularity/prep/files/setup_model.py index 680e310..48dbb78 100755 --- a/singularity/prep/files/setup_model.py +++ b/singularity/prep/files/setup_model.py @@ -159,6 +159,7 @@ def setup_schism_model( storm_id=None, use_wwm=False, tpxo_dir=None, + with_hydrology=False, ): @@ -249,6 +250,9 @@ def setup_schism_model( tidal_flags = [3, 3, 0, 0] logger.info("Creating model configuration ...") + src_sink = None + if with_hydrology: + src_sink = NWM() config = ModelConfig( hgrid=hgrid, fgrid=fgrid, @@ -256,7 +260,7 @@ def setup_schism_model( constituents=[], # we're overwriting Tides obj database='tpxo', # we're overwriting Tides obj nws=atmospheric, - source_sink=NWM(), + source_sink=src_sink, ) tide_db = TidalDatabase.TPXO.value( h_file=tpxo_dir / TPXO_ELEVATION, u_file=tpxo_dir / TPXO_VELOCITY, @@ -411,7 +415,8 @@ def setup_schism_model( # Workardoun for hydrology param bug #34 nm_list = f90nml.read(schism_dir / 'param.nml') - nm_list['opt']['if_source'] = 1 + if with_hydrology: + nm_list['opt']['if_source'] = 1 nm_list.write(schism_dir / 'param.nml', force=True) ## end of workaround @@ -445,12 +450,14 @@ def main(args): nwm_dir = args.nwm_dir use_wwm = args.use_wwm - if TPXO_LINK_PATH.is_dir(): - shutil.rmtree(TPXO_LINK_PATH) - if NWM_LINK_PATH.is_dir(): - shutil.rmtree(NWM_LINK_PATH) - os.symlink(tpxo_dir, TPXO_LINK_PATH, target_is_directory=True) - os.symlink(nwm_dir, NWM_LINK_PATH, target_is_directory=True) + with_hydrology = args.with_hydrology + +# if TPXO_LINK_PATH.is_dir(): +# shutil.rmtree(TPXO_LINK_PATH) +# if NWM_LINK_PATH.is_dir(): +# shutil.rmtree(NWM_LINK_PATH) +# os.symlink(tpxo_dir, TPXO_LINK_PATH, target_is_directory=True) +# os.symlink(nwm_dir, NWM_LINK_PATH, target_is_directory=True) setup_schism_model( mesh_path, @@ -464,6 +471,7 @@ def main(args): storm_id=f'{storm_name}{storm_year}', use_wwm=use_wwm, tpxo_dir=tpxo_dir, + with_hydrology=with_hydrology, ) @@ -520,11 +528,20 @@ def main(args): '--out', help='path to the setup output (solver input) directory', type=pathlib.Path ) - parser.add_argument('--use-wwm', action='store_true') + parser.add_argument( + "--use-wwm", action="store_true" + ) - parser.add_argument('name', help='name of the storm', type=str) + parser.add_argument( + "--with-hydrology", action="store_true" + ) + + parser.add_argument( + "name", help="name of the storm", type=str) + + parser.add_argument( + "year", help="year of the storm", type=int) - parser.add_argument('year', help='year of the storm', type=int) args = parser.parse_args() From 23055fc2d1e4ee6311998b20a7a979b7ab9ac2db Mon Sep 17 00:00:00 2001 From: "Soroosh.Mani" Date: Tue, 6 Feb 2024 16:22:42 -0500 Subject: [PATCH 06/54] nhc_code fix and internet requirement for ensemble --- singularity/prep/files/setup_ensemble.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/singularity/prep/files/setup_ensemble.py b/singularity/prep/files/setup_ensemble.py index d055e8c..bfd1472 100644 --- a/singularity/prep/files/setup_ensemble.py +++ b/singularity/prep/files/setup_ensemble.py @@ -226,7 +226,7 @@ def main(args): # spinup too instead of spinup trying to download! forcing_configurations.append( BestTrackForcingJSON( - nhc_code=f'{args.name}{args.year}', + nhc_code=orig_track.nhc_code, interval_seconds=3600, nws=20, fort22_filename=workdir / 'track_files' / 'original.22', From 93389834fff833d0185529dbcaead74ad3eaffa2 Mon Sep 17 00:00:00 2001 From: "Soroosh.Mani" Date: Wed, 14 Feb 2024 16:56:48 -0500 Subject: [PATCH 07/54] Add storm name for non best tracks --- singularity/info/files/hurricane_data.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index a9bbc90..ea61755 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -58,6 +58,7 @@ def main(args): else: event = StormEvent(name_or_code, year) nhc_code = event.nhc_code + storm_name = event.name logger.info("Fetching a-deck track info...") prescribed = None @@ -245,6 +246,7 @@ def main(args): df_dt.to_csv(date_out) # Remove duplicate entries for similar isotach and time # (e.g. Dorian19 and Ian22 best tracks) + track.data.name = storm_name track.to_file(track_out) gs = gpd.GeoSeries(windswath) gdf_windswath = gpd.GeoDataFrame( From 51dab5e76ddc06d2a55aee5f332481121ed68443 Mon Sep 17 00:00:00 2001 From: "Soroosh.Mani" Date: Thu, 15 Feb 2024 10:44:31 -0500 Subject: [PATCH 08/54] Fix attempt 2 for storm name --- singularity/info/files/hurricane_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index ea61755..307248f 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -175,6 +175,7 @@ def main(args): # NOTE: Fake best track AFTER perturbation # gdf_track.advisory = 'BEST' # gdf_track.forecast_hours = 0 + gdf_track['name'] = storm_name track = VortexTrack(storm=gdf_track, file_deck='a', advisories=[advisory]) windswath_dict = track.wind_swaths(wind_speed=34) @@ -246,7 +247,6 @@ def main(args): df_dt.to_csv(date_out) # Remove duplicate entries for similar isotach and time # (e.g. Dorian19 and Ian22 best tracks) - track.data.name = storm_name track.to_file(track_out) gs = gpd.GeoSeries(windswath) gdf_windswath = gpd.GeoDataFrame( From 69a1a008e18f8c81bf95167dcfb611c2fc6da9ac Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Wed, 6 Mar 2024 16:49:21 -0500 Subject: [PATCH 09/54] Fix no coops station case & storm name lookup --- singularity/info/files/hurricane_data.py | 2 +- singularity/info/info.def | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index 307248f..68ecac1 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -253,7 +253,7 @@ def main(args): geometry=gs, data={'RADII': len(gs) * [34]}, crs="EPSG:4326" ) gdf_windswath.to_file(swath_out) - if coops_ssh is not None: + if coops_ssh is not None and len(coops_ssh) > 0: coops_ssh.to_netcdf(sta_dat_out, 'w') coops_ssh[['x', 'y']].to_dataframe().drop(columns=['nws_id']).to_csv( sta_loc_out, header=False, index=False) diff --git a/singularity/info/info.def b/singularity/info/info.def index edaeed3..35481cc 100644 --- a/singularity/info/info.def +++ b/singularity/info/info.def @@ -20,7 +20,7 @@ From: continuumio/miniconda3:23.3.1-0-alpine mamba clean --all --yes conda run -n info --no-capture-output \ - pip install stormevents==2.2.0 + pip install stormevents==2.2.1 conda clean --all From 2ae0f582071ac8f2c2768530a2482e9da6c8e0b2 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Wed, 6 Mar 2024 17:11:11 -0500 Subject: [PATCH 10/54] Update storm events to correct version --- singularity/info/info.def | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/singularity/info/info.def b/singularity/info/info.def index 35481cc..1e1d1d7 100644 --- a/singularity/info/info.def +++ b/singularity/info/info.def @@ -20,7 +20,7 @@ From: continuumio/miniconda3:23.3.1-0-alpine mamba clean --all --yes conda run -n info --no-capture-output \ - pip install stormevents==2.2.1 + pip install stormevents==2.2.3 conda clean --all From 67a88c41447bdfda60663f941fe50f7587b9055c Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Tue, 21 May 2024 21:58:22 +0000 Subject: [PATCH 11/54] add ROC_single_run.py to plot ROC curve --- singularity/post/files/ROC_single_run.py | 276 +++++++++++++++++++++++ 1 file changed, 276 insertions(+) create mode 100644 singularity/post/files/ROC_single_run.py diff --git a/singularity/post/files/ROC_single_run.py b/singularity/post/files/ROC_single_run.py new file mode 100644 index 0000000..b1fe624 --- /dev/null +++ b/singularity/post/files/ROC_single_run.py @@ -0,0 +1,276 @@ +import argparse +import logging +import os +import warnings +import numpy as np +import pandas as pd +import xarray as xr +import scipy as sp +import matplotlib.pyplot as plt +from pathlib import Path +from cartopy.feature import NaturalEarthFeature + +os.environ['USE_PYGEOS'] = '0' +import geopandas as gpd + +pd.options.mode.copy_on_write = True + + +def stack_station_coordinates(x,y): + """ + Create numpy.column_stack based on + coordinates of observation points + """ + coord_combined = np.column_stack([x, y]) + return coord_combined + + +def create_search_tree(longitude, latitude): + """ + Create scipy.spatial.CKDTree based on Lat. and Long. + """ + long_lat = np.column_stack((longitude.T.ravel(), + latitude.T.ravel())) + tree = sp.spatial.cKDTree(long_lat) + return tree + + +def find_nearby_prediction(ds,variable,indices): + """ + Reads netcdf file, target variable, and indices + Returns max value among corresponding indices for each point + """ + obs_count = indices.shape[0] # total number of search/observation points + max_prediction_index = len(ds.node.values) # total number of nodes + + prediction_prob = np.zeros(obs_count) # assuming all are dry (probability of zero) + + for obs_point in range(obs_count): + idx_arr = np.delete(indices[obs_point], + np.where(indices[obs_point]==max_prediction_index)[0]) #len is length of surrogate model array + val_arr = ds[variable].values[idx_arr] + val_arr = np.nan_to_num(val_arr) # replace nan with zero (dry node) + + # # Pick the nearest non-zero probability (option #1) + # for val in val_arr: + # if val > 0.0: + # prediction_prob[obs_point] = round(val,4) #round to 0.1 mm + # break + + # pick the largest value (option #2) + if val_arr.size>0: + prediction_prob[obs_point] = val_arr.max() + return prediction_prob + + +def plot_probabilities(df,prob_column,gdf_countries,title,save_name): + """ + plot probabilities of exceeding given threshold at obs. points + """ + figure, axis = plt.subplots(1, 1) + figure.set_size_inches(10, 10/1.6) + + plt.scatter(x=df.Longitude, y=df.Latitude, + vmin=0, vmax=1.0, + c=df[prob_column]) + xlim = axis.get_xlim() + ylim = axis.get_ylim() + + gdf_countries.plot(color='lightgrey', ax=axis, zorder=-5) + + axis.set_xlim(xlim) + axis.set_ylim(ylim) + plt.colorbar(shrink=0.75) + plt.title(title) + plt.savefig(save_name) + plt.close() + + +def calculate_hit_miss(df,obs_column,prob_column,threshold,probability): + """ + Reads dataframe with two columns for obs_elev, and probabilities + returns hit/miss/... based on user-defined threshold & probability + """ + hit = len(df[(df[obs_column]>=threshold) & (df[prob_column]>=probability)]) + miss = len(df[(df[obs_column]>=threshold) & (df[prob_column]=probability)]) + correct_neg = len(df[(df[obs_column] Date: Tue, 21 May 2024 22:04:34 +0000 Subject: [PATCH 12/54] update ROC_single_run.py to save stats dataset --- singularity/post/files/ROC_single_run.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/singularity/post/files/ROC_single_run.py b/singularity/post/files/ROC_single_run.py index b1fe624..06209f4 100644 --- a/singularity/post/files/ROC_single_run.py +++ b/singularity/post/files/ROC_single_run.py @@ -218,6 +218,8 @@ def main(args): ), ) + ds_ROC.to_netcdf(os.path.join(save_dir, f"{storm_name}_{storm_year}_{leadtime}hr_leadtime_POD_FAR.nc")) + # plot ROC curves marker_list=['s','x'] linestyle_list=['dashed','dotted'] From 625ce97e78009da560b27cd5ea472e9372770b09 Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Wed, 22 May 2024 16:02:24 +0000 Subject: [PATCH 13/54] remove extra lines and format with oitnb --- singularity/post/files/ROC_single_run.py | 276 ++++++++++++----------- 1 file changed, 149 insertions(+), 127 deletions(-) diff --git a/singularity/post/files/ROC_single_run.py b/singularity/post/files/ROC_single_run.py index 06209f4..fbb768d 100644 --- a/singularity/post/files/ROC_single_run.py +++ b/singularity/post/files/ROC_single_run.py @@ -7,7 +7,7 @@ import xarray as xr import scipy as sp import matplotlib.pyplot as plt -from pathlib import Path +from pathlib import Path from cartopy.feature import NaturalEarthFeature os.environ['USE_PYGEOS'] = '0' @@ -16,7 +16,7 @@ pd.options.mode.copy_on_write = True -def stack_station_coordinates(x,y): +def stack_station_coordinates(x, y): """ Create numpy.column_stack based on coordinates of observation points @@ -29,28 +29,28 @@ def create_search_tree(longitude, latitude): """ Create scipy.spatial.CKDTree based on Lat. and Long. """ - long_lat = np.column_stack((longitude.T.ravel(), - latitude.T.ravel())) + long_lat = np.column_stack((longitude.T.ravel(), latitude.T.ravel())) tree = sp.spatial.cKDTree(long_lat) return tree -def find_nearby_prediction(ds,variable,indices): +def find_nearby_prediction(ds, variable, indices): """ Reads netcdf file, target variable, and indices Returns max value among corresponding indices for each point """ - obs_count = indices.shape[0] # total number of search/observation points + obs_count = indices.shape[0] # total number of search/observation points max_prediction_index = len(ds.node.values) # total number of nodes - + prediction_prob = np.zeros(obs_count) # assuming all are dry (probability of zero) for obs_point in range(obs_count): - idx_arr = np.delete(indices[obs_point], - np.where(indices[obs_point]==max_prediction_index)[0]) #len is length of surrogate model array + idx_arr = np.delete( + indices[obs_point], np.where(indices[obs_point] == max_prediction_index)[0] + ) # len is length of surrogate model array val_arr = ds[variable].values[idx_arr] - val_arr = np.nan_to_num(val_arr) # replace nan with zero (dry node) - + val_arr = np.nan_to_num(val_arr) # replace nan with zero (dry node) + # # Pick the nearest non-zero probability (option #1) # for val in val_arr: # if val > 0.0: @@ -58,21 +58,19 @@ def find_nearby_prediction(ds,variable,indices): # break # pick the largest value (option #2) - if val_arr.size>0: + if val_arr.size > 0: prediction_prob[obs_point] = val_arr.max() return prediction_prob -def plot_probabilities(df,prob_column,gdf_countries,title,save_name): +def plot_probabilities(df, prob_column, gdf_countries, title, save_name): """ plot probabilities of exceeding given threshold at obs. points """ figure, axis = plt.subplots(1, 1) - figure.set_size_inches(10, 10/1.6) + figure.set_size_inches(10, 10 / 1.6) - plt.scatter(x=df.Longitude, y=df.Latitude, - vmin=0, vmax=1.0, - c=df[prob_column]) + plt.scatter(x=df.Longitude, y=df.Latitude, vmin=0, vmax=1.0, c=df[prob_column]) xlim = axis.get_xlim() ylim = axis.get_ylim() @@ -84,22 +82,22 @@ def plot_probabilities(df,prob_column,gdf_countries,title,save_name): plt.title(title) plt.savefig(save_name) plt.close() - -def calculate_hit_miss(df,obs_column,prob_column,threshold,probability): + +def calculate_hit_miss(df, obs_column, prob_column, threshold, probability): """ Reads dataframe with two columns for obs_elev, and probabilities returns hit/miss/... based on user-defined threshold & probability """ - hit = len(df[(df[obs_column]>=threshold) & (df[prob_column]>=probability)]) - miss = len(df[(df[obs_column]>=threshold) & (df[prob_column]=probability)]) - correct_neg = len(df[(df[obs_column]= threshold) & (df[prob_column] >= probability)]) + miss = len(df[(df[obs_column] >= threshold) & (df[prob_column] < probability)]) + false_alarm = len(df[(df[obs_column] < threshold) & (df[prob_column] >= probability)]) + correct_neg = len(df[(df[obs_column] < threshold) & (df[prob_column] < probability)]) + + return hit, miss, false_alarm, correct_neg -def calculate_POD_FAR(hit,miss,false_alarm,correct_neg): +def calculate_POD_FAR(hit, miss, false_alarm, correct_neg): """ Reads hit, miss, false_alarm, and correct_neg returns POD and FAR @@ -108,14 +106,14 @@ def calculate_POD_FAR(hit,miss,false_alarm,correct_neg): POD = np.nan FAR = np.nan try: - POD = round(hit/(hit+miss), 4) # Probability of Detection + POD = round(hit / (hit + miss), 4) # Probability of Detection except ZeroDivisionError: pass - try: - FAR = round(false_alarm/(false_alarm+correct_neg), 4) # False Alarm Rate + try: + FAR = round(false_alarm / (false_alarm + correct_neg), 4) # False Alarm Rate except ZeroDivisionError: pass - return POD,FAR + return POD, FAR def main(args): @@ -124,155 +122,179 @@ def main(args): leadtime = args.leadtime prob_nc_path = Path(args.prob_nc_path) obs_df_path = Path(args.obs_df_path) - save_dir = args.save_dir + save_dir = args.save_dir - - # *.nc file coordinates - thresholds_ft = [3,6,9] # in ft - thresholds_m = [round(i*0.3048,4) for i in thresholds_ft] # convert to meter + # *.nc file coordinates + thresholds_ft = [3, 6, 9] # in ft + thresholds_m = [round(i * 0.3048, 4) for i in thresholds_ft] # convert to meter sources = ['model', 'surrogate'] - probabilities = [0.0,0.05, 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] - + probabilities = [0.0, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] + # attributes of input files - prediction_variable="probabilities" - obs_attribute='Elev_m_xGEOID20b' - - # search criteria + prediction_variable = 'probabilities' + obs_attribute = 'Elev_m_xGEOID20b' + + # search criteria max_distance = 1000 # [in meters] to set distance_upper_bound - max_neighbors = 10 # to set k + max_neighbors = 10 # to set k - blank_arr = np.empty((len(thresholds_ft),1,1,len(sources),len(probabilities))) + blank_arr = np.empty((len(thresholds_ft), 1, 1, len(sources), len(probabilities))) blank_arr[:] = np.nan - - hit_arr=blank_arr.copy() - miss_arr=blank_arr.copy() - false_alarm_arr=blank_arr.copy() - correct_neg_arr=blank_arr.copy() - POD_arr=blank_arr.copy() - FAR_arr=blank_arr.copy() + + hit_arr = blank_arr.copy() + miss_arr = blank_arr.copy() + false_alarm_arr = blank_arr.copy() + correct_neg_arr = blank_arr.copy() + POD_arr = blank_arr.copy() + FAR_arr = blank_arr.copy() # Load obs file, extract storm obs points and coordinates df_obs = pd.read_csv(obs_df_path) Event_name = f'{storm_name}_{storm_year}' - df_obs_storm = df_obs[df_obs.Event==Event_name] - obs_coordinates = stack_station_coordinates(df_obs_storm.Longitude.values,df_obs_storm.Latitude.values) - + df_obs_storm = df_obs[df_obs.Event == Event_name] + obs_coordinates = stack_station_coordinates( + df_obs_storm.Longitude.values, df_obs_storm.Latitude.values + ) + # Load probabilities.nc file ds_prob = xr.open_dataset(prob_nc_path) - gdf_countries=gpd.GeoSeries(NaturalEarthFeature(category='physical', - scale='10m', - name='land', - ).geometries(), - crs=4326) + gdf_countries = gpd.GeoSeries( + NaturalEarthFeature(category='physical', scale='10m', name='land',).geometries(), + crs=4326, + ) # Loop through thresholds and sources and find corresponding values from probabilities.nc threshold_count = -1 for threshold in thresholds_m: - threshold_count+=1 + threshold_count += 1 source_count = -1 for source in sources: - source_count+=1 - ds_temp = ds_prob.sel(level=threshold,source=source) + source_count += 1 + ds_temp = ds_prob.sel(level=threshold, source=source) tree = create_search_tree(ds_temp.x.values, ds_temp.y.values) - dist, indices = tree.query(obs_coordinates, k=max_neighbors, distance_upper_bound=max_distance*1e-5) # 0.01 is equivalent to 1000 m - prediction_prob = find_nearby_prediction(ds=ds_temp,variable=prediction_variable,indices=indices) + dist, indices = tree.query( + obs_coordinates, k=max_neighbors, distance_upper_bound=max_distance * 1e-5 + ) # 0.01 is equivalent to 1000 m + prediction_prob = find_nearby_prediction( + ds=ds_temp, variable=prediction_variable, indices=indices + ) df_obs_storm[f'{source}_prob'] = prediction_prob - + # Plot probabilities at obs. points - plot_probabilities(df_obs_storm,f'{source}_prob',gdf_countries, - f"Probability of {source} exceeding {thresholds_ft[threshold_count]} ft \n {storm_name}, {storm_year}, {leadtime}-hr leadtime", - os.path.join(save_dir, f"prob_{source}_above_{thresholds_ft[threshold_count]}ft_{storm_name}_{storm_year}_{leadtime}-hr.png")) + plot_probabilities( + df_obs_storm, + f'{source}_prob', + gdf_countries, + f'Probability of {source} exceeding {thresholds_ft[threshold_count]} ft \n {storm_name}, {storm_year}, {leadtime}-hr leadtime', + os.path.join( + save_dir, + f'prob_{source}_above_{thresholds_ft[threshold_count]}ft_{storm_name}_{storm_year}_{leadtime}-hr.png', + ), + ) # Loop through probabilities: calculate hit/miss/... & POD/FAR prob_count = -1 for prob in probabilities: - prob_count+=1 - hit,miss,false_alarm,correct_neg = calculate_hit_miss(df_obs_storm,obs_attribute,f'{source}_prob',threshold,prob) - hit_arr[threshold_count,0,0,source_count,prob_count] = hit - miss_arr[threshold_count,0,0,source_count,prob_count] = miss - false_alarm_arr[threshold_count,0,0,source_count,prob_count]= false_alarm - correct_neg_arr[threshold_count,0,0,source_count,prob_count] = correct_neg - - pod,far = calculate_POD_FAR(hit,miss,false_alarm,correct_neg) - POD_arr[threshold_count,0,0,source_count,prob_count] = pod - FAR_arr[threshold_count,0,0,source_count,prob_count] = far - - plot_ROC_curvesource_count = -1 + prob_count += 1 + hit, miss, false_alarm, correct_neg = calculate_hit_miss( + df_obs_storm, obs_attribute, f'{source}_prob', threshold, prob + ) + hit_arr[threshold_count, 0, 0, source_count, prob_count] = hit + miss_arr[threshold_count, 0, 0, source_count, prob_count] = miss + false_alarm_arr[threshold_count, 0, 0, source_count, prob_count] = false_alarm + correct_neg_arr[threshold_count, 0, 0, source_count, prob_count] = correct_neg + + pod, far = calculate_POD_FAR(hit, miss, false_alarm, correct_neg) + POD_arr[threshold_count, 0, 0, source_count, prob_count] = pod + FAR_arr[threshold_count, 0, 0, source_count, prob_count] = far ds_ROC = xr.Dataset( - coords = dict( - threshold = thresholds_ft, - storm = [storm_name], - leadtime = [leadtime], - source = sources, - prob = probabilities, + coords=dict( + threshold=thresholds_ft, + storm=[storm_name], + leadtime=[leadtime], + source=sources, + prob=probabilities, ), data_vars=dict( - hit=(["threshold","storm","leadtime","source","prob"], hit_arr), - miss=(["threshold","storm","leadtime","source","prob"], miss_arr), - false_alarm=(["threshold","storm","leadtime","source","prob"],false_alarm_arr), - correct_neg=(["threshold","storm","leadtime","source","prob"],correct_neg_arr), - POD=(["threshold","storm","leadtime","source","prob"],POD_arr), - FAR=(["threshold","storm","leadtime","source","prob"],FAR_arr), + hit=(['threshold', 'storm', 'leadtime', 'source', 'prob'], hit_arr), + miss=(['threshold', 'storm', 'leadtime', 'source', 'prob'], miss_arr), + false_alarm=( + ['threshold', 'storm', 'leadtime', 'source', 'prob'], + false_alarm_arr, + ), + correct_neg=( + ['threshold', 'storm', 'leadtime', 'source', 'prob'], + correct_neg_arr, + ), + POD=(['threshold', 'storm', 'leadtime', 'source', 'prob'], POD_arr), + FAR=(['threshold', 'storm', 'leadtime', 'source', 'prob'], FAR_arr), ), - ) - ds_ROC.to_netcdf(os.path.join(save_dir, f"{storm_name}_{storm_year}_{leadtime}hr_leadtime_POD_FAR.nc")) - + ds_ROC.to_netcdf( + os.path.join(save_dir, f'{storm_name}_{storm_year}_{leadtime}hr_leadtime_POD_FAR.nc') + ) + # plot ROC curves - marker_list=['s','x'] - linestyle_list=['dashed','dotted'] + marker_list = ['s', 'x'] + linestyle_list = ['dashed', 'dotted'] threshold_count = -1 for threshold in thresholds_ft: - threshold_count+=1 + threshold_count += 1 fig = plt.figure() ax = fig.add_subplot(111) - plt.axline((0.0,0.0), (1.0,1.0), linestyle='--', color='grey', label='random prediction') - source_count=-1 + plt.axline( + (0.0, 0.0), (1.0, 1.0), linestyle='--', color='grey', label='random prediction' + ) + source_count = -1 for source in sources: - source_count+=1 - plt.plot(FAR_arr[threshold_count,0,0,source_count,:], - POD_arr[threshold_count,0,0,source_count,:], - label=f'{source}', - marker=marker_list[source_count], - linestyle=linestyle_list[source_count], - markersize=5) + source_count += 1 + plt.plot( + FAR_arr[threshold_count, 0, 0, source_count, :], + POD_arr[threshold_count, 0, 0, source_count, :], + label=f'{source}', + marker=marker_list[source_count], + linestyle=linestyle_list[source_count], + markersize=5, + ) plt.legend() plt.xlabel('False Alarm Rate') plt.ylabel('Probability of Detection') - plt.title(f'{storm_name}_{storm_year}, {leadtime}-hr leadtime, {threshold} ft threshold') - plt.savefig(os.path.join(save_dir, f'ROC_{storm_name}_{leadtime}hr_leadtime_{threshold}_ft.png')) + plt.title( + f'{storm_name}_{storm_year}, {leadtime}-hr leadtime, {threshold} ft threshold' + ) + plt.savefig( + os.path.join( + save_dir, f'ROC_{storm_name}_{leadtime}hr_leadtime_{threshold}_ft.png' + ) + ) plt.close() def entry(): parser = argparse.ArgumentParser() - parser.add_argument( - "--storm_name", help="name of the storm", type=str) + parser.add_argument('--storm_name', help='name of the storm', type=str) + parser.add_argument('--storm_year', help='year of the storm', type=int) + + parser.add_argument('--leadtime', help='OFCL track leadtime hr', type=int) + + parser.add_argument('--prob_nc_path', help='path to probabilities.nc', type=str) + + parser.add_argument('--obs_df_path', help='Path to observations dataframe', type=str) + + # optional parser.add_argument( - "--storm_year", help="year of the storm", type=int) - - parser.add_argument( - "--leadtime", help="OFCL track leadtime hr", type=int) - - parser.add_argument( - "--prob_nc_path", help="path to probabilities.nc", type=str) - - parser.add_argument( - "--obs_df_path", help="Path to observations dataframe", type=str) - - # optional - parser.add_argument( - "--save_dir", help="directory for saving analysis", default=os.getcwd(), type=str) + '--save_dir', help='directory for saving analysis', default=os.getcwd(), type=str + ) main(parser.parse_args()) -if __name__ == "__main__": - warnings.filterwarnings("ignore") +if __name__ == '__main__': + warnings.filterwarnings('ignore') # warnings.filterwarnings("ignore", category=DeprecationWarning) - entry() \ No newline at end of file + entry() From fcc0bc98ab2b6ef13eaea897858a5edcc8f2dc06 Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Wed, 22 May 2024 20:00:33 +0000 Subject: [PATCH 14/54] move ROC_single_run.py from /post/files/ to /prep/files/ --- singularity/prep/files/ROC_single_run.py | 300 +++++++++++++++++++++++ 1 file changed, 300 insertions(+) create mode 100644 singularity/prep/files/ROC_single_run.py diff --git a/singularity/prep/files/ROC_single_run.py b/singularity/prep/files/ROC_single_run.py new file mode 100644 index 0000000..fbb768d --- /dev/null +++ b/singularity/prep/files/ROC_single_run.py @@ -0,0 +1,300 @@ +import argparse +import logging +import os +import warnings +import numpy as np +import pandas as pd +import xarray as xr +import scipy as sp +import matplotlib.pyplot as plt +from pathlib import Path +from cartopy.feature import NaturalEarthFeature + +os.environ['USE_PYGEOS'] = '0' +import geopandas as gpd + +pd.options.mode.copy_on_write = True + + +def stack_station_coordinates(x, y): + """ + Create numpy.column_stack based on + coordinates of observation points + """ + coord_combined = np.column_stack([x, y]) + return coord_combined + + +def create_search_tree(longitude, latitude): + """ + Create scipy.spatial.CKDTree based on Lat. and Long. + """ + long_lat = np.column_stack((longitude.T.ravel(), latitude.T.ravel())) + tree = sp.spatial.cKDTree(long_lat) + return tree + + +def find_nearby_prediction(ds, variable, indices): + """ + Reads netcdf file, target variable, and indices + Returns max value among corresponding indices for each point + """ + obs_count = indices.shape[0] # total number of search/observation points + max_prediction_index = len(ds.node.values) # total number of nodes + + prediction_prob = np.zeros(obs_count) # assuming all are dry (probability of zero) + + for obs_point in range(obs_count): + idx_arr = np.delete( + indices[obs_point], np.where(indices[obs_point] == max_prediction_index)[0] + ) # len is length of surrogate model array + val_arr = ds[variable].values[idx_arr] + val_arr = np.nan_to_num(val_arr) # replace nan with zero (dry node) + + # # Pick the nearest non-zero probability (option #1) + # for val in val_arr: + # if val > 0.0: + # prediction_prob[obs_point] = round(val,4) #round to 0.1 mm + # break + + # pick the largest value (option #2) + if val_arr.size > 0: + prediction_prob[obs_point] = val_arr.max() + return prediction_prob + + +def plot_probabilities(df, prob_column, gdf_countries, title, save_name): + """ + plot probabilities of exceeding given threshold at obs. points + """ + figure, axis = plt.subplots(1, 1) + figure.set_size_inches(10, 10 / 1.6) + + plt.scatter(x=df.Longitude, y=df.Latitude, vmin=0, vmax=1.0, c=df[prob_column]) + xlim = axis.get_xlim() + ylim = axis.get_ylim() + + gdf_countries.plot(color='lightgrey', ax=axis, zorder=-5) + + axis.set_xlim(xlim) + axis.set_ylim(ylim) + plt.colorbar(shrink=0.75) + plt.title(title) + plt.savefig(save_name) + plt.close() + + +def calculate_hit_miss(df, obs_column, prob_column, threshold, probability): + """ + Reads dataframe with two columns for obs_elev, and probabilities + returns hit/miss/... based on user-defined threshold & probability + """ + hit = len(df[(df[obs_column] >= threshold) & (df[prob_column] >= probability)]) + miss = len(df[(df[obs_column] >= threshold) & (df[prob_column] < probability)]) + false_alarm = len(df[(df[obs_column] < threshold) & (df[prob_column] >= probability)]) + correct_neg = len(df[(df[obs_column] < threshold) & (df[prob_column] < probability)]) + + return hit, miss, false_alarm, correct_neg + + +def calculate_POD_FAR(hit, miss, false_alarm, correct_neg): + """ + Reads hit, miss, false_alarm, and correct_neg + returns POD and FAR + default POD and FAR are np.nan + """ + POD = np.nan + FAR = np.nan + try: + POD = round(hit / (hit + miss), 4) # Probability of Detection + except ZeroDivisionError: + pass + try: + FAR = round(false_alarm / (false_alarm + correct_neg), 4) # False Alarm Rate + except ZeroDivisionError: + pass + return POD, FAR + + +def main(args): + storm_name = args.storm_name.capitalize() + storm_year = args.storm_year + leadtime = args.leadtime + prob_nc_path = Path(args.prob_nc_path) + obs_df_path = Path(args.obs_df_path) + save_dir = args.save_dir + + # *.nc file coordinates + thresholds_ft = [3, 6, 9] # in ft + thresholds_m = [round(i * 0.3048, 4) for i in thresholds_ft] # convert to meter + sources = ['model', 'surrogate'] + probabilities = [0.0, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] + + # attributes of input files + prediction_variable = 'probabilities' + obs_attribute = 'Elev_m_xGEOID20b' + + # search criteria + max_distance = 1000 # [in meters] to set distance_upper_bound + max_neighbors = 10 # to set k + + blank_arr = np.empty((len(thresholds_ft), 1, 1, len(sources), len(probabilities))) + blank_arr[:] = np.nan + + hit_arr = blank_arr.copy() + miss_arr = blank_arr.copy() + false_alarm_arr = blank_arr.copy() + correct_neg_arr = blank_arr.copy() + POD_arr = blank_arr.copy() + FAR_arr = blank_arr.copy() + + # Load obs file, extract storm obs points and coordinates + df_obs = pd.read_csv(obs_df_path) + Event_name = f'{storm_name}_{storm_year}' + df_obs_storm = df_obs[df_obs.Event == Event_name] + obs_coordinates = stack_station_coordinates( + df_obs_storm.Longitude.values, df_obs_storm.Latitude.values + ) + + # Load probabilities.nc file + ds_prob = xr.open_dataset(prob_nc_path) + + gdf_countries = gpd.GeoSeries( + NaturalEarthFeature(category='physical', scale='10m', name='land',).geometries(), + crs=4326, + ) + + # Loop through thresholds and sources and find corresponding values from probabilities.nc + threshold_count = -1 + for threshold in thresholds_m: + threshold_count += 1 + source_count = -1 + for source in sources: + source_count += 1 + ds_temp = ds_prob.sel(level=threshold, source=source) + tree = create_search_tree(ds_temp.x.values, ds_temp.y.values) + dist, indices = tree.query( + obs_coordinates, k=max_neighbors, distance_upper_bound=max_distance * 1e-5 + ) # 0.01 is equivalent to 1000 m + prediction_prob = find_nearby_prediction( + ds=ds_temp, variable=prediction_variable, indices=indices + ) + df_obs_storm[f'{source}_prob'] = prediction_prob + + # Plot probabilities at obs. points + plot_probabilities( + df_obs_storm, + f'{source}_prob', + gdf_countries, + f'Probability of {source} exceeding {thresholds_ft[threshold_count]} ft \n {storm_name}, {storm_year}, {leadtime}-hr leadtime', + os.path.join( + save_dir, + f'prob_{source}_above_{thresholds_ft[threshold_count]}ft_{storm_name}_{storm_year}_{leadtime}-hr.png', + ), + ) + + # Loop through probabilities: calculate hit/miss/... & POD/FAR + prob_count = -1 + for prob in probabilities: + prob_count += 1 + hit, miss, false_alarm, correct_neg = calculate_hit_miss( + df_obs_storm, obs_attribute, f'{source}_prob', threshold, prob + ) + hit_arr[threshold_count, 0, 0, source_count, prob_count] = hit + miss_arr[threshold_count, 0, 0, source_count, prob_count] = miss + false_alarm_arr[threshold_count, 0, 0, source_count, prob_count] = false_alarm + correct_neg_arr[threshold_count, 0, 0, source_count, prob_count] = correct_neg + + pod, far = calculate_POD_FAR(hit, miss, false_alarm, correct_neg) + POD_arr[threshold_count, 0, 0, source_count, prob_count] = pod + FAR_arr[threshold_count, 0, 0, source_count, prob_count] = far + + ds_ROC = xr.Dataset( + coords=dict( + threshold=thresholds_ft, + storm=[storm_name], + leadtime=[leadtime], + source=sources, + prob=probabilities, + ), + data_vars=dict( + hit=(['threshold', 'storm', 'leadtime', 'source', 'prob'], hit_arr), + miss=(['threshold', 'storm', 'leadtime', 'source', 'prob'], miss_arr), + false_alarm=( + ['threshold', 'storm', 'leadtime', 'source', 'prob'], + false_alarm_arr, + ), + correct_neg=( + ['threshold', 'storm', 'leadtime', 'source', 'prob'], + correct_neg_arr, + ), + POD=(['threshold', 'storm', 'leadtime', 'source', 'prob'], POD_arr), + FAR=(['threshold', 'storm', 'leadtime', 'source', 'prob'], FAR_arr), + ), + ) + ds_ROC.to_netcdf( + os.path.join(save_dir, f'{storm_name}_{storm_year}_{leadtime}hr_leadtime_POD_FAR.nc') + ) + + # plot ROC curves + marker_list = ['s', 'x'] + linestyle_list = ['dashed', 'dotted'] + threshold_count = -1 + for threshold in thresholds_ft: + threshold_count += 1 + fig = plt.figure() + ax = fig.add_subplot(111) + plt.axline( + (0.0, 0.0), (1.0, 1.0), linestyle='--', color='grey', label='random prediction' + ) + source_count = -1 + for source in sources: + source_count += 1 + plt.plot( + FAR_arr[threshold_count, 0, 0, source_count, :], + POD_arr[threshold_count, 0, 0, source_count, :], + label=f'{source}', + marker=marker_list[source_count], + linestyle=linestyle_list[source_count], + markersize=5, + ) + plt.legend() + plt.xlabel('False Alarm Rate') + plt.ylabel('Probability of Detection') + + plt.title( + f'{storm_name}_{storm_year}, {leadtime}-hr leadtime, {threshold} ft threshold' + ) + plt.savefig( + os.path.join( + save_dir, f'ROC_{storm_name}_{leadtime}hr_leadtime_{threshold}_ft.png' + ) + ) + plt.close() + + +def entry(): + parser = argparse.ArgumentParser() + + parser.add_argument('--storm_name', help='name of the storm', type=str) + + parser.add_argument('--storm_year', help='year of the storm', type=int) + + parser.add_argument('--leadtime', help='OFCL track leadtime hr', type=int) + + parser.add_argument('--prob_nc_path', help='path to probabilities.nc', type=str) + + parser.add_argument('--obs_df_path', help='Path to observations dataframe', type=str) + + # optional + parser.add_argument( + '--save_dir', help='directory for saving analysis', default=os.getcwd(), type=str + ) + + main(parser.parse_args()) + + +if __name__ == '__main__': + warnings.filterwarnings('ignore') + # warnings.filterwarnings("ignore", category=DeprecationWarning) + entry() From e767c9f6fd363932310f6a9d0a09247be1c7a37d Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 22 Mar 2024 17:57:26 +0000 Subject: [PATCH 15/54] From Hercules --- singularity/info/info.def | 2 +- singularity/ocsmesh/files/hurricane_mesh.py | 0 singularity/prep/files/refs/param.nml | 0 singularity/prep/files/refs/wwminput.nml | 0 singularity/prep/files/setup_model.py | 0 singularity/scripts/build.sh | 7 ++-- singularity/scripts/combine_gr3.exp | 0 singularity/scripts/input.conf | 33 ++++++++-------- singularity/scripts/mesh.sbatch | 9 ++++- singularity/scripts/post.sbatch | 8 +++- singularity/scripts/prep.sbatch | 9 ++++- singularity/scripts/schism.sbatch | 44 ++++++++++++--------- singularity/scripts/workflow.sh | 8 +++- singularity/solve/files/combine_gr3.exp | 0 singularity/solve/files/entrypoint.sh | 0 15 files changed, 74 insertions(+), 46 deletions(-) mode change 100755 => 100644 singularity/ocsmesh/files/hurricane_mesh.py mode change 100755 => 100644 singularity/prep/files/refs/param.nml mode change 100755 => 100644 singularity/prep/files/refs/wwminput.nml mode change 100755 => 100644 singularity/prep/files/setup_model.py mode change 100755 => 100644 singularity/scripts/build.sh mode change 100755 => 100644 singularity/scripts/combine_gr3.exp mode change 100755 => 100644 singularity/scripts/mesh.sbatch mode change 100755 => 100644 singularity/scripts/schism.sbatch mode change 100755 => 100644 singularity/scripts/workflow.sh mode change 100755 => 100644 singularity/solve/files/combine_gr3.exp mode change 100755 => 100644 singularity/solve/files/entrypoint.sh diff --git a/singularity/info/info.def b/singularity/info/info.def index 1e1d1d7..35481cc 100644 --- a/singularity/info/info.def +++ b/singularity/info/info.def @@ -20,7 +20,7 @@ From: continuumio/miniconda3:23.3.1-0-alpine mamba clean --all --yes conda run -n info --no-capture-output \ - pip install stormevents==2.2.3 + pip install stormevents==2.2.1 conda clean --all diff --git a/singularity/ocsmesh/files/hurricane_mesh.py b/singularity/ocsmesh/files/hurricane_mesh.py old mode 100755 new mode 100644 diff --git a/singularity/prep/files/refs/param.nml b/singularity/prep/files/refs/param.nml old mode 100755 new mode 100644 diff --git a/singularity/prep/files/refs/wwminput.nml b/singularity/prep/files/refs/wwminput.nml old mode 100755 new mode 100644 diff --git a/singularity/prep/files/setup_model.py b/singularity/prep/files/setup_model.py old mode 100755 new mode 100644 diff --git a/singularity/scripts/build.sh b/singularity/scripts/build.sh old mode 100755 new mode 100644 index 43bebd7..c65a70c --- a/singularity/scripts/build.sh +++ b/singularity/scripts/build.sh @@ -1,9 +1,10 @@ -L_DEF_DIR=~/sandbox/ondemand-storm-workflow/singularity/ -L_IMG_DIR=/lustre/imgs +L_DEF_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/ +L_IMG_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/imgs mkdir -p $L_IMG_DIR for i in prep; do pushd $L_DEF_DIR/$i/ - sudo singularity build $L_IMG_DIR/$i.sif $i.def +# sudo singularity build $L_IMG_DIR/$i.sif $i.def + singularity build --fakeroot $L_IMG_DIR/$i.sif $i.def popd done diff --git a/singularity/scripts/combine_gr3.exp b/singularity/scripts/combine_gr3.exp old mode 100755 new mode 100644 diff --git a/singularity/scripts/input.conf b/singularity/scripts/input.conf index d4d7d33..40ec577 100644 --- a/singularity/scripts/input.conf +++ b/singularity/scripts/input.conf @@ -5,29 +5,30 @@ subset_mesh=1 # Other params hr_prelandfall=-1 past_forecast=1 -hydrology=1 +hydrology=0 use_wwm=0 -pahm_model='symmetric' -num_perturb=2 +pahm_model='gahm' +num_perturb=3 sample_rule='korobov' spinup_exec='pschism_PAHM_TVD-VL' hotstart_exec='pschism_PAHM_TVD-VL' +DATA=/work2/noaa/nos-surge/smani/data # Paths as local variables -L_NWM_DATASET=/lustre/static_data/nwm/NWM_v2.0_channel_hydrofabric/nwm_v2_0_hydrofabric.gdb -L_TPXO_DATASET=/lustre/static_data/tpxo -L_LEADTIMES_DATASET=/lustre/static_data/leadtimes.json -L_DEM_HI=/lustre/static_data/dem/ncei19/*.tif -L_DEM_LO=/lustre/static_data/dem/gebco/*.tif -L_MESH_HI=/lustre/static_data/grid/stofs3d_atl_v2.1_eval.gr3 -L_MESH_LO=/lustre/static_data/grid/WNAT_1km.14 -L_SHP_DIR=/lustre/static_data/shape -L_IMG_DIR=/lustre/imgs -L_SCRIPT_DIR=~/sandbox/ondemand-storm-workflow/singularity/scripts +L_NWM_DATASET=$DATA/nwm/NWM_v2.0_channel_hydrofabric/nwm_v2_0_hydrofabric.gdb +L_TPXO_DATASET=$DATA/tpxo +L_LEADTIMES_DATASET=$DATA/lead.json +L_DEM_HI=$DATA/dem/NCEI_1_9th/*.tif +L_DEM_LO=$DATA/dem/GEBCO/*.tif +L_MESH_HI=$DATA/grid/stofs3d_atl_v2.1_eval.gr3 +L_MESH_LO=$DATA/grid/WNAT_1km.14 +L_SHP_DIR=$DATA/shape +L_IMG_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/imgs +L_SCRIPT_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/scripts # Environment -export SINGULARITY_BINDFLAGS="--bind /lustre" -export TMPDIR=/lustre/.tmp # redirect OCSMESH temp files +export SINGULARITY_BINDFLAGS="--bind /work2 --bind /work" +export TMPDIR=/work2/noaa/nos-surge/smani/.tmp # redirect OCSMESH temp files # Modules -L_SOLVE_MODULES="openmpi/4.1.2" +L_SOLVE_MODULES="openmpi/4.1.4" diff --git a/singularity/scripts/mesh.sbatch b/singularity/scripts/mesh.sbatch old mode 100755 new mode 100644 index 0ee6ee8..d243826 --- a/singularity/scripts/mesh.sbatch +++ b/singularity/scripts/mesh.sbatch @@ -1,7 +1,12 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --exclusive -#SBATCH --mem=0 +#SBATCH --nodes=1 +#SBATCH --ntasks=80 +#SBATCH --cpus-per-task=1 +#SBATCH --time=02:00:00 +#SBATCH --account=nos-surge +#SBATCH --qos=batch +#SBATCH --partition=hercules set -ex diff --git a/singularity/scripts/post.sbatch b/singularity/scripts/post.sbatch index 8376b20..fd8837d 100644 --- a/singularity/scripts/post.sbatch +++ b/singularity/scripts/post.sbatch @@ -1,8 +1,12 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --exclusive -#SBATCH --mem=0 #SBATCH --nodes=1 +#SBATCH --ntasks=40 +#SBATCH --cpus-per-task=1 +#SBATCH --time=04:00:00 +#SBATCH --account=nos-surge +#SBATCH --qos=batch +#SBATCH --partition=hercules set -ex diff --git a/singularity/scripts/prep.sbatch b/singularity/scripts/prep.sbatch index f892c92..bb505ee 100644 --- a/singularity/scripts/prep.sbatch +++ b/singularity/scripts/prep.sbatch @@ -1,7 +1,12 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --exclusive -#SBATCH --mem=0 +#SBATCH --nodes=1 +#SBATCH --ntasks=10 +#SBATCH --cpus-per-task=1 +#SBATCH --time=02:00:00 +#SBATCH --account=nos-surge +#SBATCH --qos=batch +#SBATCH --partition=hercules set -ex diff --git a/singularity/scripts/schism.sbatch b/singularity/scripts/schism.sbatch old mode 100755 new mode 100644 index c8f09fb..79cc437 --- a/singularity/scripts/schism.sbatch +++ b/singularity/scripts/schism.sbatch @@ -1,21 +1,30 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --exclusive -#SBATCH --mem=0 -#SBATCH --nodes=3 -#SBATCH --ntasks-per-node=36 - -module load $MODULES - -export MV2_ENABLE_AFFINITY=0 -ulimit -s unlimited +#SBATCH --nodes=1 +#SBATCH --ntasks=80 +#SBATCH --cpus-per-task=1 +#SBATCH --time=03:00:00 +#SBATCH --account=nos-surge +#SBATCH --qos=batch +#SBATCH --partition=hercules set -ex pushd ${SCHISM_DIR} mkdir -p outputs -mpirun -np 36 singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ - ${SCHISM_EXEC} 4 + +export BIN_DIR=/work2/noaa/nos-surge/smani/bin/ + +module purge +module load python +module load intel-oneapi-compilers/2022.2.1 intel-oneapi-mpi/2021.7.1 +module load netcdf-c/4.9.0 netcdf-fortran/4.6.0 +module list + +export MV2_ENABLE_AFFINITY=0 +ulimit -s unlimited + +srun $BIN_DIR/pschism_HERCULES_PAHM_TVD-VL 4 if [ $? -eq 0 ]; then echo "Combining outputs..." @@ -24,17 +33,16 @@ if [ $? -eq 0 ]; then if ls hotstart* >/dev/null 2>&1; then times=$(ls hotstart_* | grep -o "hotstart[0-9_]\+" | awk 'BEGIN {FS = "_"}; {print $3}' | sort -h | uniq ) for i in $times; do - singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ - combine_hotstart7 --iteration $i + $BIN_DIR/combine_hotstart7 --iteration $i done fi popd - singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ - expect -f /scripts/combine_gr3.exp maxelev 1 - singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ - expect -f /scripts/combine_gr3.exp maxdahv 3 - mv maxdahv.gr3 maxelev.gr3 -t outputs +# singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ +# expect -f /scripts/combine_gr3.exp maxelev 1 +# singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ +# expect -f /scripts/combine_gr3.exp maxdahv 3 +# mv maxdahv.gr3 maxelev.gr3 -t outputs fi diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh old mode 100755 new mode 100644 index da699a2..d543669 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -14,7 +14,7 @@ export PATH=$L_SCRIPT_DIR:$PATH mkdir -p $TMPDIR function init { - local run_dir=/lustre/hurricanes/$1 + local run_dir=/work2/noaa/nos-surge/smani/runs/$1 mkdir $run_dir # mkdir $run_dir/downloads mkdir $run_dir/mesh @@ -64,7 +64,7 @@ else fi MESH_KWDS+=" --out ${run_dir}/mesh" export MESH_KWDS -sbatch --wait --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year,IMG=$L_IMG_DIR/ocsmesh.sif $L_SCRIPT_DIR/mesh.sbatch +sbatch --wait --job-name=mesh_$tag --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year,IMG=$L_IMG_DIR/ocsmesh.sif $L_SCRIPT_DIR/mesh.sbatch echo "Download necessary data..." @@ -98,6 +98,7 @@ export PREP_KWDS # NOTE: We need to wait because run jobs depend on perturbation dirs! setup_id=$(sbatch \ --wait \ + --job-name=prep_$tag \ --parsable \ --export=ALL,PREP_KWDS,STORM=$storm,YEAR=$year,IMG="$L_IMG_DIR/prep.sif" \ $L_SCRIPT_DIR/prep.sbatch \ @@ -111,6 +112,7 @@ SCHISM_SHARED_ENV+=",IMG=$L_IMG_DIR/solve.sif" SCHISM_SHARED_ENV+=",MODULES=$L_SOLVE_MODULES" spinup_id=$(sbatch \ --parsable \ + --job-name=spinup_$tag \ -d afterok:$setup_id \ --export=$SCHISM_SHARED_ENV,SCHISM_DIR="$run_dir/setup/ensemble.dir/spinup",SCHISM_EXEC="$spinup_exec" \ $L_SCRIPT_DIR/schism.sbatch @@ -120,6 +122,7 @@ joblist="" for i in $run_dir/setup/ensemble.dir/runs/*; do jobid=$( sbatch --parsable -d afterok:$spinup_id \ + --job-name="run_$(basename $i)_$tag" \ --export=$SCHISM_SHARED_ENV,SCHISM_DIR="$i",SCHISM_EXEC="$hotstart_exec" \ $L_SCRIPT_DIR/schism.sbatch ) @@ -131,6 +134,7 @@ done # Post processing sbatch \ --parsable \ + --job-name=post_$tag \ -d afterok${joblist} \ --export=ALL,IMG="$L_IMG_DIR/prep.sif",ENSEMBLE_DIR="$run_dir/setup/ensemble.dir/" \ $L_SCRIPT_DIR/post.sbatch diff --git a/singularity/solve/files/combine_gr3.exp b/singularity/solve/files/combine_gr3.exp old mode 100755 new mode 100644 diff --git a/singularity/solve/files/entrypoint.sh b/singularity/solve/files/entrypoint.sh old mode 100755 new mode 100644 From 4ed283c05578651d053981b866547f6b74f2e5f2 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 22 Mar 2024 18:09:12 +0000 Subject: [PATCH 16/54] Update for PW paths --- singularity/info/info.def | 2 +- singularity/scripts/build.sh | 4 ++-- singularity/scripts/input.conf | 10 +++++----- singularity/scripts/mesh.sbatch | 6 +----- singularity/scripts/post.sbatch | 5 +---- singularity/scripts/prep.sbatch | 6 +----- singularity/scripts/schism.sbatch | 17 +++++++---------- singularity/scripts/workflow.sh | 2 +- 8 files changed, 19 insertions(+), 33 deletions(-) diff --git a/singularity/info/info.def b/singularity/info/info.def index 35481cc..1e1d1d7 100644 --- a/singularity/info/info.def +++ b/singularity/info/info.def @@ -20,7 +20,7 @@ From: continuumio/miniconda3:23.3.1-0-alpine mamba clean --all --yes conda run -n info --no-capture-output \ - pip install stormevents==2.2.1 + pip install stormevents==2.2.3 conda clean --all diff --git a/singularity/scripts/build.sh b/singularity/scripts/build.sh index c65a70c..2f295c5 100644 --- a/singularity/scripts/build.sh +++ b/singularity/scripts/build.sh @@ -1,5 +1,5 @@ -L_DEF_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/ -L_IMG_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/imgs +L_DEF_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/ +L_IMG_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/imgs mkdir -p $L_IMG_DIR for i in prep; do diff --git a/singularity/scripts/input.conf b/singularity/scripts/input.conf index 40ec577..7eb2cc6 100644 --- a/singularity/scripts/input.conf +++ b/singularity/scripts/input.conf @@ -13,7 +13,7 @@ sample_rule='korobov' spinup_exec='pschism_PAHM_TVD-VL' hotstart_exec='pschism_PAHM_TVD-VL' -DATA=/work2/noaa/nos-surge/smani/data +DATA=/nhc/static_data # Paths as local variables L_NWM_DATASET=$DATA/nwm/NWM_v2.0_channel_hydrofabric/nwm_v2_0_hydrofabric.gdb L_TPXO_DATASET=$DATA/tpxo @@ -23,12 +23,12 @@ L_DEM_LO=$DATA/dem/GEBCO/*.tif L_MESH_HI=$DATA/grid/stofs3d_atl_v2.1_eval.gr3 L_MESH_LO=$DATA/grid/WNAT_1km.14 L_SHP_DIR=$DATA/shape -L_IMG_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/imgs -L_SCRIPT_DIR=/work2/noaa/nos-surge/smani/sandbox/ondemand-storm-workflow/singularity/scripts +L_IMG_DIR=/nhc/singularity_images +L_SCRIPT_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/scripts # Environment -export SINGULARITY_BINDFLAGS="--bind /work2 --bind /work" -export TMPDIR=/work2/noaa/nos-surge/smani/.tmp # redirect OCSMESH temp files +export SINGULARITY_BINDFLAGS="--bind /nhc" +export TMPDIR=/nhc/.tmp # redirect OCSMESH temp files # Modules L_SOLVE_MODULES="openmpi/4.1.4" diff --git a/singularity/scripts/mesh.sbatch b/singularity/scripts/mesh.sbatch index d243826..bc6d70a 100644 --- a/singularity/scripts/mesh.sbatch +++ b/singularity/scripts/mesh.sbatch @@ -1,12 +1,8 @@ #!/bin/bash #SBATCH --parsable #SBATCH --nodes=1 -#SBATCH --ntasks=80 -#SBATCH --cpus-per-task=1 +#SBATCH --exclusive #SBATCH --time=02:00:00 -#SBATCH --account=nos-surge -#SBATCH --qos=batch -#SBATCH --partition=hercules set -ex diff --git a/singularity/scripts/post.sbatch b/singularity/scripts/post.sbatch index fd8837d..64df18b 100644 --- a/singularity/scripts/post.sbatch +++ b/singularity/scripts/post.sbatch @@ -1,12 +1,9 @@ #!/bin/bash #SBATCH --parsable #SBATCH --nodes=1 -#SBATCH --ntasks=40 +#SBATCH --ntasks=36 #SBATCH --cpus-per-task=1 #SBATCH --time=04:00:00 -#SBATCH --account=nos-surge -#SBATCH --qos=batch -#SBATCH --partition=hercules set -ex diff --git a/singularity/scripts/prep.sbatch b/singularity/scripts/prep.sbatch index bb505ee..6487952 100644 --- a/singularity/scripts/prep.sbatch +++ b/singularity/scripts/prep.sbatch @@ -1,12 +1,8 @@ #!/bin/bash #SBATCH --parsable #SBATCH --nodes=1 -#SBATCH --ntasks=10 -#SBATCH --cpus-per-task=1 +#SBATCH --exclusive #SBATCH --time=02:00:00 -#SBATCH --account=nos-surge -#SBATCH --qos=batch -#SBATCH --partition=hercules set -ex diff --git a/singularity/scripts/schism.sbatch b/singularity/scripts/schism.sbatch index 79cc437..336d718 100644 --- a/singularity/scripts/schism.sbatch +++ b/singularity/scripts/schism.sbatch @@ -1,30 +1,27 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --nodes=1 -#SBATCH --ntasks=80 +#SBATCH --nodes=10 +#SBATCH --ntasks=36 #SBATCH --cpus-per-task=1 +#SBATCH --exclusive #SBATCH --time=03:00:00 -#SBATCH --account=nos-surge -#SBATCH --qos=batch -#SBATCH --partition=hercules set -ex pushd ${SCHISM_DIR} mkdir -p outputs -export BIN_DIR=/work2/noaa/nos-surge/smani/bin/ +export BIN_DIR=/nhc/bin/ module purge -module load python -module load intel-oneapi-compilers/2022.2.1 intel-oneapi-mpi/2021.7.1 -module load netcdf-c/4.9.0 netcdf-fortran/4.6.0 +module load intel/2022.1.2 impi/2022.1.2 +module load netcdf module list export MV2_ENABLE_AFFINITY=0 ulimit -s unlimited -srun $BIN_DIR/pschism_HERCULES_PAHM_TVD-VL 4 +srun $BIN_DIR/pschism_PAHM_TVD-VL 4 if [ $? -eq 0 ]; then echo "Combining outputs..." diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index d543669..c796f62 100644 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -14,7 +14,7 @@ export PATH=$L_SCRIPT_DIR:$PATH mkdir -p $TMPDIR function init { - local run_dir=/work2/noaa/nos-surge/smani/runs/$1 + local run_dir=/nhc/Soroosh.Mani/runs/$1 mkdir $run_dir # mkdir $run_dir/downloads mkdir $run_dir/mesh From dfe53100e903dadfee14f849cd0213aba7069657 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 22 Mar 2024 20:51:59 +0000 Subject: [PATCH 17/54] Fix schism slurm for PW --- singularity/scripts/schism.sbatch | 4 ++-- singularity/scripts/workflow.sh | 0 2 files changed, 2 insertions(+), 2 deletions(-) mode change 100644 => 100755 singularity/scripts/workflow.sh diff --git a/singularity/scripts/schism.sbatch b/singularity/scripts/schism.sbatch index 336d718..908089e 100644 --- a/singularity/scripts/schism.sbatch +++ b/singularity/scripts/schism.sbatch @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH --parsable #SBATCH --nodes=10 -#SBATCH --ntasks=36 +#SBATCH --ntasks=360 #SBATCH --cpus-per-task=1 #SBATCH --exclusive #SBATCH --time=03:00:00 @@ -21,7 +21,7 @@ module list export MV2_ENABLE_AFFINITY=0 ulimit -s unlimited -srun $BIN_DIR/pschism_PAHM_TVD-VL 4 +mpirun -np $SLURM_NTASKS $BIN_DIR/pschism_PAHM_TVD-VL 4 if [ $? -eq 0 ]; then echo "Combining outputs..." diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh old mode 100644 new mode 100755 From 8eda7390c6ee29ec48d16a9e8b7b07dd5acae15d Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 19 Apr 2024 16:19:32 +0000 Subject: [PATCH 18/54] Update # of nodes --- singularity/scripts/schism.sbatch | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/singularity/scripts/schism.sbatch b/singularity/scripts/schism.sbatch index 908089e..467d86a 100644 --- a/singularity/scripts/schism.sbatch +++ b/singularity/scripts/schism.sbatch @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --nodes=10 -#SBATCH --ntasks=360 +#SBATCH --nodes=3 +#SBATCH --ntasks=108 #SBATCH --cpus-per-task=1 #SBATCH --exclusive #SBATCH --time=03:00:00 @@ -21,6 +21,7 @@ module list export MV2_ENABLE_AFFINITY=0 ulimit -s unlimited +date mpirun -np $SLURM_NTASKS $BIN_DIR/pschism_PAHM_TVD-VL 4 if [ $? -eq 0 ]; then From 67b5c6ec72500da3daea53d0d6a4442cded1a54a Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 26 Apr 2024 14:57:25 +0000 Subject: [PATCH 19/54] Reorg hurricane data processing script --- singularity/info/files/hurricane_data.py | 236 +++++++++++++++-------- 1 file changed, 158 insertions(+), 78 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index 68ecac1..15400dc 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -13,13 +13,14 @@ import tempfile import numpy as np from datetime import datetime, timedelta +from typing import Optional, List import pandas as pd import geopandas as gpd from searvey.coops import COOPS_TidalDatum from searvey.coops import COOPS_TimeZone from searvey.coops import COOPS_Units -from shapely.geometry import box +from shapely.geometry import box, base from stormevents import StormEvent from stormevents.nhc import VortexTrack @@ -32,6 +33,97 @@ datefmt='%Y-%m-%d:%H:%M:%S') + +def trackstart_from_file( + leadtime_file: Optional[pathlib.Path], + nhc_code: str, + leadtime: float, +) -> Optional[datetime]: + if leadtime_file is None or not leadtime_file.is_file(): + return None + + leadtime_dict = pd.read_json(leadtime_file, orient='index') + leadtime_table = leadtime_dict.drop(columns='leadtime').merge( + leadtime_dict.leadtime.apply( + lambda x: pd.Series({v: k for k, v in x.items()}) + ).apply(pd.to_datetime, format='%Y%m%d%H'), + left_index=True, + right_index=True + ).set_index('ALnumber') + + if nhc_code.lower() not in leadtime_table.index: + return None + + storm_all_times = leadtime_table.loc[nhc_code.lower()].dropna() + if len(storm_all_times) > 1: + storm_all_times = storm_all_times.iloc[0] + if leadtime not in storm_all_times: + return None + + return storm_all_times[leadtime] + + +def get_perturb_timestamp_in_track( + track: VortexTrack, + time_col: 'str', + prescribed: Optional[datetime], + land_shapes: List[base.BaseGeometry], +) -> Optional[datetime]: + ''' + For best track pick the best track time that is at least + leadtime before the time besttrack is on land. But for forecast + pick the track that has a fcst000 date which is + at least leadtime before the time that the track is on land. + + Note that for a single advisory forecast, there are still MULTIPLE + tracks each with a different START DATE; while for best track + there's a SINGLE track with a start date equal to the beginning. + ''' + + track_data = track.data + + assert len(track.advisory.unique()) == 1 + + perturb_start = track_data.track_start_time.iloc[0] + if prescribed is not None: + times = track_data[time_col].unique() + leastdiff_idx = np.argmin(abs(times - prescribed)) + perturb_start = times[leastdiff_idx] + return perturb_start + + for shp in land_shapes: + tracks_onland = track_data[track_data.intersects(shp)] + if not tracks_onland.empty: + break + else: + # If track is never on input land polygons + return perturb_start + + + # Find tracks that started closest and prior to specified leadtime + # For each track start date, pick the FIRST time it's on land + candidates = tracks_onland.groupby('track_start_time').nth(0).reset_index() + dt = timedelta(hours=hr_before_landfall) + + # Pick LAST track that starts BEFORE the given leadtime among + # the candidates (start time and landfall time) + candidates['timediff'] = candidates.datetime - candidates.track_start_time + times_start_landfall = candidates[ + candidates['timediff'] >= dt + ][ + 'track_start_time', 'datetime' + ].iloc[-1] + picked_track = track_data[ + track_data.track_start_time == times_start_landfall.track_start_time] + + # Get the chosen track's timestamp closest to specifid leadtime + perturb_start = picked_track.loc[ + times_start_landfall.datetime - picked_track.datetime >= dt + ].iloc[-1] + + return perturb_start + + def main(args): name_or_code = args.name_or_code @@ -41,9 +133,10 @@ def main(args): swath_out = args.swath_outpath sta_dat_out = args.station_data_outpath sta_loc_out = args.station_location_outpath - is_past_forecast = args.past_forecast + use_past_forecast = args.past_forecast hr_before_landfall = args.hours_before_landfall lead_times = args.lead_times + track_dir = args.track_dir if hr_before_landfall < 0: hr_before_landfall = 48 @@ -59,31 +152,29 @@ def main(args): event = StormEvent(name_or_code, year) nhc_code = event.nhc_code storm_name = event.name - logger.info("Fetching a-deck track info...") - - prescribed = None - if lead_times is not None and lead_times.is_file(): - leadtime_dict = pd.read_json(lead_times, orient='index') - leadtime_table = leadtime_dict.drop(columns='leadtime').merge( - leadtime_dict.leadtime.apply( - lambda x: pd.Series({v: k for k, v in x.items()}) - ).apply(pd.to_datetime, format='%Y%m%d%H'), - left_index=True, - right_index=True - ).set_index('ALnumber') - - if nhc_code.lower() in leadtime_table.index: - storm_all_times = leadtime_table.loc[nhc_code.lower()].dropna() - if len(storm_all_times.shape) > 1: - storm_all_times = storm_all_times.iloc[0] - if hr_before_landfall in storm_all_times: - prescribed = storm_all_times[hr_before_landfall] - - # TODO: Get user input for whether its forecast or now! + + prescribed = trackstart_from_file( + lead_times, nhc_code, hr_before_landfall + ) + + # TODO: Get user input for whether it's forecast or now! now = datetime.now() + is_current_storm = (now - event.start_date < timedelta(days=30)) + df_dt = pd.DataFrame(columns=['date_time']) - if (is_past_forecast or (now - event.start_date < timedelta(days=30))): + + local_track_file = track_dir / f'a{nhc_code.lower()}.dat' + if use_past_forecast or is_current_storm: + logger.info("Fetching a-deck track info...") + # Find and pick a single advisory based on priority temp_track = event.track(file_deck='a') + # If a file exists, use the local file +# if local_track_file.exists(): +# track_raw = pd.read_csv(local_track_file, header=None) +# # Handle special case +# track_raw[track_raw[4] == 'RMWP', 4] = 'OFCL' +# track_raw.to_csv(tempfile, index=None) +# temp_track = VortexTrack(local_track_file, file_deck='a') adv_avail = temp_track.unfiltered_data.advisory.unique() adv_order = ['OFCL', 'HWRF', 'HMON', 'CARQ'] advisory = adv_avail[0] @@ -92,6 +183,7 @@ def main(args): advisory = adv break + # TODO: THIS IS NO LONGER RELEVANT IF WE FAKE RMWP AS OFCL! if advisory == "OFCL" and "CARQ" not in adv_avail: raise ValueError( "OFCL advisory needs CARQ for fixing missing variables!" @@ -100,33 +192,42 @@ def main(args): # NOTE: Track taken from `StormEvent` object is up to now only. # See GitHub issue #57 for StormEvents track = VortexTrack(nhc_code, file_deck='a', advisories=[advisory]) + if local_track_file.exists(): + track = VortexTrack( + local_track_file, file_deck='a', advisories=[advisory] + ) + if is_current_storm: + # Get the latest track forecast + forecast_start = track.data.track_start_time.max() + gdf_track = track.data[track.data.track_start_time == forecast_start] + gdf_track = pd.concat(( + track.data[ + (track.data.track_start_time < forecast_start) + & (track.data.forecast_hours == 0) + ], + gdf_track + )) - if is_past_forecast: + # Put both dates as now(), for pyschism to setup forecast + df_dt['date_time'] = ( + track.start_date, track.end_date, forecast_start + ) + + coops_ssh = None + + else: #if use_past_forecast: logger.info( f"Creating {advisory} track for {hr_before_landfall}" +" hours before landfall forecast..." ) - if prescribed is not None: - start_times = track.data.track_start_time.unique() - leastdiff_idx = np.argmin(abs(start_times - prescribed)) - forecast_start = start_times[leastdiff_idx] - - - else: - onland_adv_tracks = track.data[track.data.intersects(shp_US)] - if onland_adv_tracks.empty: - # If it doesn't landfall on US, check with other countries - onland_adv_tracks = track.data[ - track.data.intersects(ne_low.unary_union) - ] - - candidates = onland_adv_tracks.groupby('track_start_time').nth(0).reset_index() - candidates['timediff'] = candidates.datetime - candidates.track_start_time - forecast_start = candidates[ - candidates['timediff'] >= timedelta(hours=hr_before_landfall) - ].track_start_time.iloc[-1] + forecast_start = get_perturb_timestamp_in_track( + track, + 'track_start_time', + prescribed, + [shp_US, ne_low.unary_union], + ) gdf_track = track.data[track.data.track_start_time == forecast_start] # Append before track from previous forecasts: @@ -150,24 +251,6 @@ def main(args): time_zone=COOPS_TimeZone.GMT, ) - else: - # Get the latest track forecast - forecast_start = track.data.track_start_time.max() - gdf_track = track.data[track.data.track_start_time == forecast_start] - gdf_track = pd.concat(( - track.data[ - (track.data.track_start_time < forecast_start) - & (track.data.forecast_hours == 0) - ], - gdf_track - )) - - # Put both dates as now(), for pyschism to setup forecast - df_dt['date_time'] = ( - track.start_date, track.end_date, forecast_start - ) - - coops_ssh = None # NOTE: Fake besttrack: Since PySCHISM supports "BEST" track # files for its parametric forcing, write track as "BEST" after @@ -175,6 +258,7 @@ def main(args): # NOTE: Fake best track AFTER perturbation # gdf_track.advisory = 'BEST' # gdf_track.forecast_hours = 0 + # Fill missing name column if any gdf_track['name'] = storm_name track = VortexTrack(storm=gdf_track, file_deck='a', advisories=[advisory]) @@ -204,22 +288,12 @@ def main(args): perturb_start = track.start_date if hr_before_landfall: - if prescribed is not None: - # NOTE: track_start_time is the genesis for best track - times = track.data.datetime.unique() - leastdiff_idx = np.argmin(abs(times - prescribed)) - perturb_start = times[leastdiff_idx] - else: - onland_adv_tracks = track.data[track.data.intersects(shp_US)] - if onland_adv_tracks.empty: - # If it doesn't landfall on US, check with other countries - onland_adv_tracks = track.data[ - track.data.intersects(ne_low.unary_union) - ] - onland_date = onland_adv_tracks.datetime.iloc[0] - perturb_start = track.data[ - onland_date - track.data.datetime >= timedelta(hours=hr_before_landfall) - ].datetime.iloc[-1] + perturb_start = get_perturb_timestamp_in_track( + track, + 'datetime', + prescribed, + [shp_US, ne_low.unary_union], + ) df_dt['date_time'] = ( track.start_date, track.end_date, perturb_start @@ -322,6 +396,12 @@ def main(args): help="Helper file for prescribed lead times", ) + parser.add_argument( + "--track-dir", + type=pathlib.Path, + help="Existing adjusted track directory", + ) + args = parser.parse_args() main(args) From bfe5ea0afc10ccd338a9a21561217d5aa20fa1fb Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 26 Apr 2024 16:19:51 +0000 Subject: [PATCH 20/54] Preprocessed track - pending test --- singularity/info/files/hurricane_data.py | 158 ++++++++++------------- singularity/scripts/workflow.sh | 6 +- 2 files changed, 74 insertions(+), 90 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index 15400dc..e1459e4 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -136,7 +136,7 @@ def main(args): use_past_forecast = args.past_forecast hr_before_landfall = args.hours_before_landfall lead_times = args.lead_times - track_dir = args.track_dir + track_dir = args.preprocessed_tracks_dir if hr_before_landfall < 0: hr_before_landfall = 48 @@ -163,61 +163,47 @@ def main(args): df_dt = pd.DataFrame(columns=['date_time']) + # All preprocessed tracks are treated as OFCL local_track_file = track_dir / f'a{nhc_code.lower()}.dat' + if use_past_forecast or is_current_storm: logger.info("Fetching a-deck track info...") - # Find and pick a single advisory based on priority - temp_track = event.track(file_deck='a') - # If a file exists, use the local file -# if local_track_file.exists(): -# track_raw = pd.read_csv(local_track_file, header=None) -# # Handle special case -# track_raw[track_raw[4] == 'RMWP', 4] = 'OFCL' -# track_raw.to_csv(tempfile, index=None) -# temp_track = VortexTrack(local_track_file, file_deck='a') - adv_avail = temp_track.unfiltered_data.advisory.unique() - adv_order = ['OFCL', 'HWRF', 'HMON', 'CARQ'] - advisory = adv_avail[0] - for adv in adv_order: - if adv in adv_avail: - advisory = adv - break - - # TODO: THIS IS NO LONGER RELEVANT IF WE FAKE RMWP AS OFCL! - if advisory == "OFCL" and "CARQ" not in adv_avail: - raise ValueError( - "OFCL advisory needs CARQ for fixing missing variables!" - ) - # NOTE: Track taken from `StormEvent` object is up to now only. - # See GitHub issue #57 for StormEvents - track = VortexTrack(nhc_code, file_deck='a', advisories=[advisory]) - if local_track_file.exists(): - track = VortexTrack( - local_track_file, file_deck='a', advisories=[advisory] - ) + if not local_track_file.exists(): + # Find and pick a single advisory based on priority + temp_track = event.track(file_deck='a') + adv_avail = temp_track.unfiltered_data.advisory.unique() + adv_order = ['OFCL', 'HWRF', 'HMON', 'CARQ'] + advisory = adv_avail[0] + for adv in adv_order: + if adv in adv_avail: + advisory = adv + break + + # TODO: THIS IS NO LONGER RELEVANT IF WE FAKE RMWP AS OFCL! + if advisory == "OFCL" and "CARQ" not in adv_avail: + raise ValueError( + "OFCL advisory needs CARQ for fixing missing variables!" + ) + + track = VortexTrack(nhc_code, file_deck='a', advisories=[advisory]) + + else: # read from preprocessed file + # If a file exists, use the local file + track_raw = pd.read_csv(local_track_file, header=None) + assert len(track_raw[4].unique()) == 1 + track_raw[4] = 'OFCL' + track = VortexTrack(track_raw, file_deck='a', advisories=['OFCL']) + + + forecast_start = None # TODO? if is_current_storm: # Get the latest track forecast forecast_start = track.data.track_start_time.max() - gdf_track = track.data[track.data.track_start_time == forecast_start] - gdf_track = pd.concat(( - track.data[ - (track.data.track_start_time < forecast_start) - & (track.data.forecast_hours == 0) - ], - gdf_track - )) - - # Put both dates as now(), for pyschism to setup forecast - df_dt['date_time'] = ( - track.start_date, track.end_date, forecast_start - ) - coops_ssh = None else: #if use_past_forecast: - logger.info( f"Creating {advisory} track for {hr_before_landfall}" +" hours before landfall forecast..." @@ -229,21 +215,7 @@ def main(args): [shp_US, ne_low.unary_union], ) - gdf_track = track.data[track.data.track_start_time == forecast_start] - # Append before track from previous forecasts: - gdf_track = pd.concat(( - track.data[ - (track.data.track_start_time < forecast_start) - & (track.data.forecast_hours == 0) - ], - gdf_track - )) - df_dt['date_time'] = ( - forecast_start - timedelta(days=2), track.end_date, forecast_start - ) - - - logger.info("Fetching water level measurements from COOPS stations...") + logger.info("Fetching water levels for COOPS stations...") coops_ssh = event.coops_product_within_isotach( product='water_level', wind_speed=34, datum=COOPS_TidalDatum.NAVD, @@ -251,20 +223,28 @@ def main(args): time_zone=COOPS_TimeZone.GMT, ) + df_dt['date_time'] = ( + forecast_start - timedelta(days=2), track.end_date, forecast_start + ) - # NOTE: Fake besttrack: Since PySCHISM supports "BEST" track - # files for its parametric forcing, write track as "BEST" after - # fixing the OFCL by CARQ through StormEvents - # NOTE: Fake best track AFTER perturbation -# gdf_track.advisory = 'BEST' -# gdf_track.forecast_hours = 0 + gdf_track = track.data[track.data.track_start_time == forecast_start] + # Prepend track from previous 0hr forecasts: + gdf_track = pd.concat(( + track.data[ + (track.data.track_start_time < forecast_start) + & (track.data.forecast_hours == 0) + ], + gdf_track + )) + + # NOTE: Fake best track for PySCHISM AFTER perturbation # Fill missing name column if any gdf_track['name'] = storm_name - track = VortexTrack(storm=gdf_track, file_deck='a', advisories=[advisory]) + track = VortexTrack( + storm=gdf_track, file_deck='a', advisories=[advisory] + ) windswath_dict = track.wind_swaths(wind_speed=34) - # NOTE: Fake best track AFTER perturbation -# windswaths = windswath_dict['BEST'] # Faked BEST windswaths = windswath_dict[advisory] logger.info(f"Fetching {advisory} windswath...") windswath_time = min(pd.to_datetime(list(windswaths.keys()))) @@ -272,19 +252,13 @@ def main(args): windswath_time.strftime("%Y%m%dT%H%M%S") ] - else: + else: # Best track logger.info("Fetching b-deck track info...") logger.info("Fetching BEST windswath...") track = event.track(file_deck='b') - # Drop duplicate rows based on isotach and time without minutes - # (PaHM doesn't take minutes into account) - gdf_track = track.data - gdf_track.datetime = gdf_track.datetime.dt.floor('h') - gdf_track = gdf_track.drop_duplicates(subset=['datetime', 'isotach_radius'], keep='last') - track = VortexTrack(storm=gdf_track, file_deck='b', advisories=['BEST']) perturb_start = track.start_date if hr_before_landfall: @@ -295,28 +269,36 @@ def main(args): [shp_US, ne_low.unary_union], ) + logger.info("Fetching water level measurements from COOPS stations...") + coops_ssh = event.coops_product_within_isotach( + product='water_level', wind_speed=34, + datum=COOPS_TidalDatum.NAVD, + units=COOPS_Units.METRIC, + time_zone=COOPS_TimeZone.GMT, + ) + df_dt['date_time'] = ( track.start_date, track.end_date, perturb_start ) + # Drop duplicate rows based on isotach and time without minutes + # (PaHM doesn't take minutes into account) + gdf_track = track.data + gdf_track.datetime = gdf_track.datetime.dt.floor('h') + gdf_track = gdf_track.drop_duplicates( + subset=['datetime', 'isotach_radius'], keep='last' + ) + track = VortexTrack( + storm=gdf_track, file_deck='b', advisories=['BEST'] + ) + windswath_dict = track.wind_swaths(wind_speed=34) - # NOTE: event.start_date (first advisory date) doesn't - # necessarily match the windswath key which comes from track - # start date for the first advisory (at least in 2021!) windswaths = windswath_dict['BEST'] latest_advistory_stamp = max(pd.to_datetime(list(windswaths.keys()))) windswath = windswaths[ latest_advistory_stamp.strftime("%Y%m%dT%H%M%S") ] - logger.info("Fetching water level measurements from COOPS stations...") - coops_ssh = event.coops_product_within_isotach( - product='water_level', wind_speed=34, - datum=COOPS_TidalDatum.NAVD, - units=COOPS_Units.METRIC, - time_zone=COOPS_TimeZone.GMT, - ) - logger.info("Writing relevant data to files...") df_dt.to_csv(date_out) # Remove duplicate entries for similar isotach and time @@ -397,7 +379,7 @@ def main(args): ) parser.add_argument( - "--track-dir", + "--preprocessed-tracks-dir", type=pathlib.Path, help="Existing adjusted track directory", ) diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index c796f62..e5b3fe5 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -36,10 +36,12 @@ singularity run $SINGULARITY_BINDFLAGS $L_IMG_DIR/info.sif \ --station-data-outpath $run_dir/coops_ssh/stations.nc \ --station-location-outpath $run_dir/setup/stations.csv \ $(if [ $past_forecast == 1 ]; then echo "--past-forecast"; fi) \ - --hours-before-landfall $hr_prelandfall \ - --lead-times $L_LEADTIMES_DATASET \ + --hours-before-landfall "$hr_prelandfall" \ + --lead-times "$L_LEADTIMES_DATASET" \ + --preprocessed-tracks-dir "$L_TRACK_DIR" \ $storm $year +exit 0 MESH_KWDS="" if [ $subset_mesh == 1 ]; then From add2bd64ce1659c65a5d73de6338b17122ca26b7 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 26 Apr 2024 17:51:49 +0000 Subject: [PATCH 21/54] Fix issue with selecting leadtime from file --- singularity/info/files/hurricane_data.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index e1459e4..fbc6b82 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -54,13 +54,13 @@ def trackstart_from_file( if nhc_code.lower() not in leadtime_table.index: return None - storm_all_times = leadtime_table.loc[nhc_code.lower()].dropna() + storm_all_times = leadtime_table.loc[[nhc_code.lower()]].dropna() if len(storm_all_times) > 1: storm_all_times = storm_all_times.iloc[0] if leadtime not in storm_all_times: return None - return storm_all_times[leadtime] + return storm_all_times[leadtime].item() def get_perturb_timestamp_in_track( @@ -82,7 +82,7 @@ def get_perturb_timestamp_in_track( track_data = track.data - assert len(track.advisory.unique()) == 1 + assert len(set(track.advisories)) == 1 perturb_start = track_data.track_start_time.iloc[0] if prescribed is not None: @@ -169,6 +169,7 @@ def main(args): if use_past_forecast or is_current_storm: logger.info("Fetching a-deck track info...") + advisory = 'OFCL' if not local_track_file.exists(): # Find and pick a single advisory based on priority temp_track = event.track(file_deck='a') @@ -189,12 +190,18 @@ def main(args): track = VortexTrack(nhc_code, file_deck='a', advisories=[advisory]) else: # read from preprocessed file + advisory = 'OFCL' + # If a file exists, use the local file track_raw = pd.read_csv(local_track_file, header=None) assert len(track_raw[4].unique()) == 1 track_raw[4] = 'OFCL' - track = VortexTrack(track_raw, file_deck='a', advisories=['OFCL']) + with tempfile.NamedTemporaryFile() as tmp: + track_raw.to_csv(tmp.name, index=None) + track = VortexTrack( + tmp.name, file_deck='a', advisories=[advisory] + ) forecast_start = None # TODO? From 501f80edbec3474a2afda7bbea611a63653bc2f1 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 26 Apr 2024 20:06:33 +0000 Subject: [PATCH 22/54] More hurricane info fixes + test script --- singularity/info/files/hurricane_data.py | 11 ++- singularity/scripts/test.sh | 102 +++++++++++++++++++++++ 2 files changed, 110 insertions(+), 3 deletions(-) create mode 100755 singularity/scripts/test.sh diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index fbc6b82..d3712b1 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -66,6 +66,7 @@ def trackstart_from_file( def get_perturb_timestamp_in_track( track: VortexTrack, time_col: 'str', + hr_before_landfall: datetime, prescribed: Optional[datetime], land_shapes: List[base.BaseGeometry], ) -> Optional[datetime]: @@ -111,7 +112,7 @@ def get_perturb_timestamp_in_track( times_start_landfall = candidates[ candidates['timediff'] >= dt ][ - 'track_start_time', 'datetime' + ['track_start_time', 'datetime'] ].iloc[-1] picked_track = track_data[ track_data.track_start_time == times_start_landfall.track_start_time] @@ -164,13 +165,15 @@ def main(args): df_dt = pd.DataFrame(columns=['date_time']) # All preprocessed tracks are treated as OFCL - local_track_file = track_dir / f'a{nhc_code.lower()}.dat' + local_track_file = pathlib.Path() + if track_dir is not None: + local_track_file = track_dir / f'a{nhc_code.lower()}.dat' if use_past_forecast or is_current_storm: logger.info("Fetching a-deck track info...") advisory = 'OFCL' - if not local_track_file.exists(): + if not local_track_file.is_file(): # Find and pick a single advisory based on priority temp_track = event.track(file_deck='a') adv_avail = temp_track.unfiltered_data.advisory.unique() @@ -218,6 +221,7 @@ def main(args): forecast_start = get_perturb_timestamp_in_track( track, 'track_start_time', + hr_before_landfall, prescribed, [shp_US, ne_low.unary_union], ) @@ -272,6 +276,7 @@ def main(args): perturb_start = get_perturb_timestamp_in_track( track, 'datetime', + hr_before_landfall, prescribed, [shp_US, ne_low.unary_union], ) diff --git a/singularity/scripts/test.sh b/singularity/scripts/test.sh new file mode 100755 index 0000000..4d67ed6 --- /dev/null +++ b/singularity/scripts/test.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +THIS_SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +source $THIS_SCRIPT_DIR/input.conf + +TEST_OUT=/nhc/Soroosh.Mani/runs/ +SINGULARITY_ROOT=$L_SCRIPT_DIR/../ + +init () { + uuid=$(uuidgen) + tag=test_${2}_${3}_${uuid} + + local run_dir=$1/$tag + mkdir $run_dir + echo $run_dir +} + +test_hurricane_info () { + storm=florence + year=2018 + hr_prelandfall=48 + + run_dir=$(init $TEST_OUT $storm $year) + + this_test_out=$run_dir/info_w_leadjson_preptrack_$hr_prelandfall + mkdir $this_test_out + python $SINGULARITY_ROOT/info/files/hurricane_data.py \ + --date-range-outpath $this_test_out/dates.csv \ + --track-outpath $this_test_out/hurricane-track.dat \ + --swath-outpath $this_test_out/windswath \ + --station-data-outpath $this_test_out/stations.nc \ + --station-location-outpath $this_test_out/stations.csv \ + --past-forecast \ + --hours-before-landfall "$hr_prelandfall" \ + --lead-times "$L_LEADTIMES_DATASET" \ + --preprocessed-tracks-dir "$L_TRACK_DIR" \ + $storm $year + + this_test_out=$run_dir/info_w_leadjson_$hr_prelandfall + mkdir $this_test_out + python $SINGULARITY_ROOT/info/files/hurricane_data.py \ + --date-range-outpath $this_test_out/dates.csv \ + --track-outpath $this_test_out/hurricane-track.dat \ + --swath-outpath $this_test_out/windswath \ + --station-data-outpath $this_test_out/stations.nc \ + --station-location-outpath $this_test_out/stations.csv \ + --past-forecast \ + --hours-before-landfall "$hr_prelandfall" \ + --lead-times "$L_LEADTIMES_DATASET" \ + $storm $year + + this_test_out=$run_dir/info_w_leadjson_24 + mkdir $this_test_out + python $SINGULARITY_ROOT/info/files/hurricane_data.py \ + --date-range-outpath $this_test_out/dates.csv \ + --track-outpath $this_test_out/hurricane-track.dat \ + --swath-outpath $this_test_out/windswath \ + --station-data-outpath $this_test_out/stations.nc \ + --station-location-outpath $this_test_out/stations.csv \ + --past-forecast \ + --hours-before-landfall 24 \ + --lead-times "$L_LEADTIMES_DATASET" \ + $storm $year + + this_test_out=$run_dir/info_w_preptrack_$hr_prelandfall + mkdir $this_test_out + python $SINGULARITY_ROOT/info/files/hurricane_data.py \ + --date-range-outpath $this_test_out/dates.csv \ + --track-outpath $this_test_out/hurricane-track.dat \ + --swath-outpath $this_test_out/windswath \ + --station-data-outpath $this_test_out/stations.nc \ + --station-location-outpath $this_test_out/stations.csv \ + --past-forecast \ + --hours-before-landfall "$hr_prelandfall" \ + --preprocessed-tracks-dir "$L_TRACK_DIR" \ + $storm $year + + this_test_out=$run_dir/info_w_leadjson_besttrack_$hr_prelandfall + mkdir $this_test_out + python $SINGULARITY_ROOT/info/files/hurricane_data.py \ + --date-range-outpath $this_test_out/dates.csv \ + --track-outpath $this_test_out/hurricane-track.dat \ + --swath-outpath $this_test_out/windswath \ + --station-data-outpath $this_test_out/stations.nc \ + --station-location-outpath $this_test_out/stations.csv \ + --hours-before-landfall "$hr_prelandfall" \ + --lead-times "$L_LEADTIMES_DATASET" \ + $storm $year + + this_test_out=$run_dir/info_w_besttrack_$hr_prelandfall + mkdir $this_test_out + python $SINGULARITY_ROOT/info/files/hurricane_data.py \ + --date-range-outpath $this_test_out/dates.csv \ + --track-outpath $this_test_out/hurricane-track.dat \ + --swath-outpath $this_test_out/windswath \ + --station-data-outpath $this_test_out/stations.nc \ + --station-location-outpath $this_test_out/stations.csv \ + --hours-before-landfall "$hr_prelandfall" \ + $storm $year +} + +$1 From 6171de77f23349aea9d6cab3a1b767a1969bcdfc Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 29 Apr 2024 13:55:46 +0000 Subject: [PATCH 23/54] Fix processing rmwp track files --- singularity/info/files/hurricane_data.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index d3712b1..f8e9018 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -122,7 +122,7 @@ def get_perturb_timestamp_in_track( times_start_landfall.datetime - picked_track.datetime >= dt ].iloc[-1] - return perturb_start + return perturb_start[time_col] def main(args): @@ -198,10 +198,11 @@ def main(args): # If a file exists, use the local file track_raw = pd.read_csv(local_track_file, header=None) assert len(track_raw[4].unique()) == 1 - track_raw[4] = 'OFCL' + track_raw[4] = advisory with tempfile.NamedTemporaryFile() as tmp: - track_raw.to_csv(tmp.name, index=None) + # TODO: Spaces get messed up! + track_raw.to_csv(tmp.name, header=False, index=False) track = VortexTrack( tmp.name, file_deck='a', advisories=[advisory] ) From 2d64537c814e16c81f802146d94a28fd104c15b4 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 29 Apr 2024 18:25:00 +0000 Subject: [PATCH 24/54] More fixes for reading RMWP --- singularity/info/files/hurricane_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index f8e9018..0bb8171 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -196,7 +196,7 @@ def main(args): advisory = 'OFCL' # If a file exists, use the local file - track_raw = pd.read_csv(local_track_file, header=None) + track_raw = pd.read_csv(local_track_file, header=None, dtype=str) assert len(track_raw[4].unique()) == 1 track_raw[4] = advisory @@ -244,7 +244,7 @@ def main(args): gdf_track = pd.concat(( track.data[ (track.data.track_start_time < forecast_start) - & (track.data.forecast_hours == 0) + & (track.data.forecast_hours.astype(int) == 0) ], gdf_track )) From 12bbd49ce618f959abf76eb620e90ba1e30e9794 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Thu, 2 May 2024 19:25:39 +0000 Subject: [PATCH 25/54] Fix RMWP setup issues --- singularity/info/files/hurricane_data.py | 25 +++++++++++++++++++++--- singularity/scripts/workflow.sh | 1 - 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/singularity/info/files/hurricane_data.py b/singularity/info/files/hurricane_data.py index 0bb8171..cdd8249 100644 --- a/singularity/info/files/hurricane_data.py +++ b/singularity/info/files/hurricane_data.py @@ -23,6 +23,11 @@ from shapely.geometry import box, base from stormevents import StormEvent from stormevents.nhc import VortexTrack +from stormevents.nhc.track import ( + combine_tracks, + correct_ofcl_based_on_carq_n_hollandb, + separate_tracks, +) logger = logging.getLogger(__name__) @@ -194,18 +199,32 @@ def main(args): else: # read from preprocessed file advisory = 'OFCL' - + # If a file exists, use the local file track_raw = pd.read_csv(local_track_file, header=None, dtype=str) assert len(track_raw[4].unique()) == 1 track_raw[4] = advisory with tempfile.NamedTemporaryFile() as tmp: - # TODO: Spaces get messed up! track_raw.to_csv(tmp.name, header=False, index=False) - track = VortexTrack( + + unfixed_track = VortexTrack( tmp.name, file_deck='a', advisories=[advisory] ) + carq_track = event.track(file_deck='a', advisories=['CARQ']) + unfix_dict = { + **separate_tracks(unfixed_track.data), + **separate_tracks(carq_track.data), + } + + fix_dict = correct_ofcl_based_on_carq_n_hollandb(unfix_dict) + fix_track = combine_tracks(fix_dict) + + track = VortexTrack( + fix_track[fix_track.advisory == advisory], + file_deck='a', + advisories=[advisory] + ) forecast_start = None # TODO? diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index e5b3fe5..4679fa8 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -41,7 +41,6 @@ singularity run $SINGULARITY_BINDFLAGS $L_IMG_DIR/info.sif \ --preprocessed-tracks-dir "$L_TRACK_DIR" \ $storm $year -exit 0 MESH_KWDS="" if [ $subset_mesh == 1 ]; then From 5851f12e505c387648fbfb81adae10c4a35b2067 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 3 May 2024 13:07:36 +0000 Subject: [PATCH 26/54] Add prepared track input to input.conf --- singularity/scripts/input.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/singularity/scripts/input.conf b/singularity/scripts/input.conf index 7eb2cc6..42065e8 100644 --- a/singularity/scripts/input.conf +++ b/singularity/scripts/input.conf @@ -18,6 +18,7 @@ DATA=/nhc/static_data L_NWM_DATASET=$DATA/nwm/NWM_v2.0_channel_hydrofabric/nwm_v2_0_hydrofabric.gdb L_TPXO_DATASET=$DATA/tpxo L_LEADTIMES_DATASET=$DATA/lead.json +L_TRACK_DIR=$DATA/tracks_adj_rmw L_DEM_HI=$DATA/dem/NCEI_1_9th/*.tif L_DEM_LO=$DATA/dem/GEBCO/*.tif L_MESH_HI=$DATA/grid/stofs3d_atl_v2.1_eval.gr3 From 1f7d551251f8eb3269f16dd22b28b811d99b7a92 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 31 May 2024 15:52:06 +0000 Subject: [PATCH 27/54] Fix stormevents and ensembleperturbation versions to pre gaussian and add version log --- singularity/info/environment.yml | 2 +- singularity/info/info.def | 10 ++++------ singularity/prep/files/setup_ensemble.py | 4 ++-- singularity/prep/prep.def | 21 ++++++++++++++------- singularity/scripts/workflow.sh | 13 +++++++++++++ 5 files changed, 34 insertions(+), 16 deletions(-) diff --git a/singularity/info/environment.yml b/singularity/info/environment.yml index 22107ab..53230db 100644 --- a/singularity/info/environment.yml +++ b/singularity/info/environment.yml @@ -5,7 +5,7 @@ dependencies: - cartopy - cfunits - gdal - - geopandas + - geopandas>=0.13 - geos - proj - pygeos diff --git a/singularity/info/info.def b/singularity/info/info.def index 1e1d1d7..a133ce7 100644 --- a/singularity/info/info.def +++ b/singularity/info/info.def @@ -1,6 +1,5 @@ BootStrap: docker -#From: centos:centos7.8.2003 -From: continuumio/miniconda3:23.3.1-0-alpine +From: continuumio/miniconda3:24.3.0-0 %files environment.yml @@ -10,8 +9,7 @@ From: continuumio/miniconda3:23.3.1-0-alpine export PYTHONPATH=/scripts %post -# yum update -y && yum upgrade -y - apk update && apk upgrade && apk add git + apt update && apt upgrade -y && apt install -y git conda install mamba -n base -c conda-forge conda install libarchive -n base -c conda-forge @@ -24,11 +22,11 @@ From: continuumio/miniconda3:23.3.1-0-alpine conda clean --all - apk del git + apt remove -y git %runscript - conda run -n info --no-capture-output python -m hurricane_data $* + conda run -n info --no-capture-output python -m $* %labels diff --git a/singularity/prep/files/setup_ensemble.py b/singularity/prep/files/setup_ensemble.py index bfd1472..24eedab 100644 --- a/singularity/prep/files/setup_ensemble.py +++ b/singularity/prep/files/setup_ensemble.py @@ -128,7 +128,7 @@ def main(args): model_start_time = datetime.strptime(date_1, '%Y%m%d%H') model_end_time = datetime.strptime(date_2, '%Y%m%d%H') perturb_start = datetime.strptime(date_3, '%Y%m%d%H') - spinup_time = timedelta(days=2) + spinup_time = timedelta(days=8) forcing_configurations = [] forcing_configurations.append( @@ -179,7 +179,7 @@ def main(args): variables=[ 'cross_track', 'along_track', - 'radius_of_maximum_winds', + 'radius_of_maximum_winds', # TODO: add option for persistent 'max_sustained_wind_speed', ], sample_from_distribution=args.sample_from_distribution, diff --git a/singularity/prep/prep.def b/singularity/prep/prep.def index 483e470..2677bef 100644 --- a/singularity/prep/prep.def +++ b/singularity/prep/prep.def @@ -1,6 +1,5 @@ BootStrap: docker -#From: centos:centos7.8.2003 -From: continuumio/miniconda3:23.5.2-0-alpine +From: continuumio/miniconda3:24.3.0-0 %files environment.yml @@ -13,7 +12,7 @@ From: continuumio/miniconda3:23.5.2-0-alpine %post ENV_NAME=prep - apk update && apk upgrade && apk add \ + apt update && apt upgrade && apt install -y \ git \ libarchive @@ -21,10 +20,21 @@ From: continuumio/miniconda3:23.5.2-0-alpine mamba update --name base --channel defaults conda mamba env create -n $ENV_NAME --file /environment.yml +# git clone https://github.com/noaa-ocs-modeling/ensembleperturbation +# cd ensembleperturbation +# git fetch origin pull/139/head:rmax +# git checkout rmax +# conda run -n $ENV_NAME --no-capture-output \ +# pip install ./ +# cd .. +# rm -rf ensembleperturbation + conda run -n $ENV_NAME --no-capture-output \ pip install "pyschism>=0.1.15" conda run -n $ENV_NAME --no-capture-output \ pip install "coupledmodeldriver>=1.6.6" + conda run -n $ENV_NAME --no-capture-output \ + pip install stormevents==2.2.3 conda run -n $ENV_NAME --no-capture-output \ pip install "ensembleperturbation>=1.1.2" conda run -n $ENV_NAME --no-capture-output \ @@ -33,16 +43,13 @@ From: continuumio/miniconda3:23.5.2-0-alpine mamba install -y -n $ENV_NAME -cconda-forge \ --force-reinstall geopandas geopandas-base - pip uninstall pygeos # We use shapely 2 - mamba install -y -cconda-forge --force-reinstall geopandas - git clone https://github.com/schism-dev/schism cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/Relocate/relocate_source_feeder.py /scripts cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/feeder_heads_bases_v2.1.xy /refs # cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/relocate_florence.reg /refs rm -rfv schism - mamba clean --all --yes && apk del git + mamba clean --all --yes && apt remove -y git %runscript diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index 4679fa8..a9ac78e 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -13,6 +13,12 @@ export PATH=$L_SCRIPT_DIR:$PATH # Processing... mkdir -p $TMPDIR +function version { + logfile=$1 + echo -n "`basename $2`: " >> $logfile + singularity run $2 pip list | grep $3 >> $logfile +} + function init { local run_dir=/nhc/Soroosh.Mani/runs/$1 mkdir $run_dir @@ -21,6 +27,12 @@ function init { mkdir $run_dir/setup mkdir $run_dir/nhc_track mkdir $run_dir/coops_ssh + + logfile=$run_dir/versions.info + version $logfile $L_IMG_DIR/info.sif stormevents + version $logfile $L_IMG_DIR/prep.sif stormevents + version $logfile $L_IMG_DIR/prep.sif ensembleperturbation +# version $logfile $L_IMG_DIR/ocsmesh.sif ocsmesh echo $run_dir } @@ -30,6 +42,7 @@ run_dir=$(init $tag) echo $run_dir singularity run $SINGULARITY_BINDFLAGS $L_IMG_DIR/info.sif \ + hurricane_data \ --date-range-outpath $run_dir/setup/dates.csv \ --track-outpath $run_dir/nhc_track/hurricane-track.dat \ --swath-outpath $run_dir/windswath \ From 88aa964e862fb8d9c2c981f3c1483abdd88b3efe Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 10 Jun 2024 15:52:35 +0000 Subject: [PATCH 28/54] Add suffix to rundir --- singularity/scripts/workflow.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index a9ac78e..2d2fafd 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -38,6 +38,8 @@ function init { uuid=$(uuidgen) tag=${storm}_${year}_${uuid} +suffix=$3 +if [ ! -z $suffix ]; then tag=${tag}_${suffix}; fi run_dir=$(init $tag) echo $run_dir From da43e4b16cb1deb401b50b0eb6e6920721cdabf3 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 10 Jun 2024 19:12:14 +0000 Subject: [PATCH 29/54] Versioning for SCHISM --- singularity/scripts/schism.sbatch | 1 + singularity/scripts/workflow.sh | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/singularity/scripts/schism.sbatch b/singularity/scripts/schism.sbatch index 467d86a..9d1e3d3 100644 --- a/singularity/scripts/schism.sbatch +++ b/singularity/scripts/schism.sbatch @@ -22,6 +22,7 @@ export MV2_ENABLE_AFFINITY=0 ulimit -s unlimited date +$BIN_DIR/pschism_PAHM_TVD-VL -v > outputs/solver.version mpirun -np $SLURM_NTASKS $BIN_DIR/pschism_PAHM_TVD-VL 4 if [ $? -eq 0 ]; then diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index 2d2fafd..e1d248e 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -23,6 +23,7 @@ function init { local run_dir=/nhc/Soroosh.Mani/runs/$1 mkdir $run_dir # mkdir $run_dir/downloads + mkdir $run_dir/slurm mkdir $run_dir/mesh mkdir $run_dir/setup mkdir $run_dir/nhc_track @@ -33,6 +34,8 @@ function init { version $logfile $L_IMG_DIR/prep.sif stormevents version $logfile $L_IMG_DIR/prep.sif ensembleperturbation # version $logfile $L_IMG_DIR/ocsmesh.sif ocsmesh + echo "SCHISM: see solver.version each outputs dir" >> $logfile + echo $run_dir } @@ -80,7 +83,12 @@ else fi MESH_KWDS+=" --out ${run_dir}/mesh" export MESH_KWDS -sbatch --wait --job-name=mesh_$tag --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year,IMG=$L_IMG_DIR/ocsmesh.sif $L_SCRIPT_DIR/mesh.sbatch +sbatch \ + --output "${run_dir}/slurm/slurm-%j.mesh.out" \ + --wait \ + --job-name=mesh_$tag \ + --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year,IMG=$L_IMG_DIR/ocsmesh.sif \ + $L_SCRIPT_DIR/mesh.sbatch echo "Download necessary data..." @@ -113,6 +121,7 @@ PREP_KWDS+=" --pahm-model $pahm_model" export PREP_KWDS # NOTE: We need to wait because run jobs depend on perturbation dirs! setup_id=$(sbatch \ + --output "${run_dir}/slurm/slurm-%j.setup.out" \ --wait \ --job-name=prep_$tag \ --parsable \ @@ -128,6 +137,7 @@ SCHISM_SHARED_ENV+=",IMG=$L_IMG_DIR/solve.sif" SCHISM_SHARED_ENV+=",MODULES=$L_SOLVE_MODULES" spinup_id=$(sbatch \ --parsable \ + --output "${run_dir}/slurm/slurm-%j.spinup.out" \ --job-name=spinup_$tag \ -d afterok:$setup_id \ --export=$SCHISM_SHARED_ENV,SCHISM_DIR="$run_dir/setup/ensemble.dir/spinup",SCHISM_EXEC="$spinup_exec" \ @@ -138,6 +148,7 @@ joblist="" for i in $run_dir/setup/ensemble.dir/runs/*; do jobid=$( sbatch --parsable -d afterok:$spinup_id \ + --output "${run_dir}/slurm/slurm-%j.run-$(basename $i).out" \ --job-name="run_$(basename $i)_$tag" \ --export=$SCHISM_SHARED_ENV,SCHISM_DIR="$i",SCHISM_EXEC="$hotstart_exec" \ $L_SCRIPT_DIR/schism.sbatch @@ -150,6 +161,7 @@ done # Post processing sbatch \ --parsable \ + --output "${run_dir}/slurm/slurm-%j.post.out" \ --job-name=post_$tag \ -d afterok${joblist} \ --export=ALL,IMG="$L_IMG_DIR/prep.sif",ENSEMBLE_DIR="$run_dir/setup/ensemble.dir/" \ From c1b50d8de75999b1605d50da3efc75b52acbbc1c Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Tue, 11 Jun 2024 14:26:49 +0000 Subject: [PATCH 30/54] Reorg toward native env --- singularity/scripts/build.sh | 6 ++-- singularity/scripts/input.conf | 15 ++++++++-- singularity/scripts/mesh.sbatch | 4 +-- singularity/scripts/post.sbatch | 4 +-- singularity/scripts/prep.sbatch | 4 +-- singularity/scripts/schism.sbatch | 28 +++++++----------- singularity/scripts/workflow.sh | 49 +++++++++++++++++++++---------- 7 files changed, 64 insertions(+), 46 deletions(-) mode change 100644 => 100755 singularity/scripts/build.sh diff --git a/singularity/scripts/build.sh b/singularity/scripts/build.sh old mode 100644 new mode 100755 index 2f295c5..a51aa35 --- a/singularity/scripts/build.sh +++ b/singularity/scripts/build.sh @@ -2,9 +2,9 @@ L_DEF_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/ L_IMG_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/imgs mkdir -p $L_IMG_DIR -for i in prep; do +for i in info; do pushd $L_DEF_DIR/$i/ -# sudo singularity build $L_IMG_DIR/$i.sif $i.def - singularity build --fakeroot $L_IMG_DIR/$i.sif $i.def + sudo singularity build $L_IMG_DIR/$i.sif $i.def +# singularity build --fakeroot $L_IMG_DIR/$i.sif $i.def popd done diff --git a/singularity/scripts/input.conf b/singularity/scripts/input.conf index 42065e8..f99d311 100644 --- a/singularity/scripts/input.conf +++ b/singularity/scripts/input.conf @@ -13,23 +13,32 @@ sample_rule='korobov' spinup_exec='pschism_PAHM_TVD-VL' hotstart_exec='pschism_PAHM_TVD-VL' +hpc_solver_nnodes=3 +hpc_solver_ntasks=108 +hpc_account='' +hpc_partition='compute' + +RUN_OUT=/nhc/Soroosh.Mani/runs/ DATA=/nhc/static_data # Paths as local variables L_NWM_DATASET=$DATA/nwm/NWM_v2.0_channel_hydrofabric/nwm_v2_0_hydrofabric.gdb L_TPXO_DATASET=$DATA/tpxo L_LEADTIMES_DATASET=$DATA/lead.json -L_TRACK_DIR=$DATA/tracks_adj_rmw +L_TRACK_DIR=INVALD #$DATA/tracks_adj_rmw L_DEM_HI=$DATA/dem/NCEI_1_9th/*.tif L_DEM_LO=$DATA/dem/GEBCO/*.tif L_MESH_HI=$DATA/grid/stofs3d_atl_v2.1_eval.gr3 L_MESH_LO=$DATA/grid/WNAT_1km.14 L_SHP_DIR=$DATA/shape -L_IMG_DIR=/nhc/singularity_images +L_IMG_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/imgs L_SCRIPT_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/scripts # Environment export SINGULARITY_BINDFLAGS="--bind /nhc" export TMPDIR=/nhc/.tmp # redirect OCSMESH temp files +# Update PATH +export PATH=$PATH:/nhc/bin/ + # Modules -L_SOLVE_MODULES="openmpi/4.1.4" +L_SOLVE_MODULES="intel/2022.1.2 impi/2022.1.2 netcdf" diff --git a/singularity/scripts/mesh.sbatch b/singularity/scripts/mesh.sbatch index bc6d70a..5543bf3 100644 --- a/singularity/scripts/mesh.sbatch +++ b/singularity/scripts/mesh.sbatch @@ -1,8 +1,8 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --nodes=1 #SBATCH --exclusive -#SBATCH --time=02:00:00 +#SBATCH --time=00:30:00 +#SBATCH --nodes=1 set -ex diff --git a/singularity/scripts/post.sbatch b/singularity/scripts/post.sbatch index 64df18b..5d0436e 100644 --- a/singularity/scripts/post.sbatch +++ b/singularity/scripts/post.sbatch @@ -1,9 +1,7 @@ #!/bin/bash #SBATCH --parsable +#SBATCH --time=05:00:00 #SBATCH --nodes=1 -#SBATCH --ntasks=36 -#SBATCH --cpus-per-task=1 -#SBATCH --time=04:00:00 set -ex diff --git a/singularity/scripts/prep.sbatch b/singularity/scripts/prep.sbatch index 6487952..cd1fe8d 100644 --- a/singularity/scripts/prep.sbatch +++ b/singularity/scripts/prep.sbatch @@ -1,8 +1,8 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --nodes=1 #SBATCH --exclusive -#SBATCH --time=02:00:00 +#SBATCH --time=00:30:00 +#SBATCH --nodes=1 set -ex diff --git a/singularity/scripts/schism.sbatch b/singularity/scripts/schism.sbatch index 9d1e3d3..c94b2f5 100644 --- a/singularity/scripts/schism.sbatch +++ b/singularity/scripts/schism.sbatch @@ -1,29 +1,27 @@ #!/bin/bash #SBATCH --parsable -#SBATCH --nodes=3 -#SBATCH --ntasks=108 -#SBATCH --cpus-per-task=1 #SBATCH --exclusive #SBATCH --time=03:00:00 set -ex -pushd ${SCHISM_DIR} +SCHISM_DIR=$1 +pushd $SCHISM_DIR mkdir -p outputs -export BIN_DIR=/nhc/bin/ -module purge -module load intel/2022.1.2 impi/2022.1.2 -module load netcdf -module list +if [ ! -z $MODULES ]; then + module purge + module load $MODULES + module list +fi export MV2_ENABLE_AFFINITY=0 ulimit -s unlimited date -$BIN_DIR/pschism_PAHM_TVD-VL -v > outputs/solver.version -mpirun -np $SLURM_NTASKS $BIN_DIR/pschism_PAHM_TVD-VL 4 +${SCHISM_EXEC} -v > outputs/solver.version +mpirun -np $SLURM_NTASKS ${SCHISM_EXEC} 4 if [ $? -eq 0 ]; then echo "Combining outputs..." @@ -32,16 +30,10 @@ if [ $? -eq 0 ]; then if ls hotstart* >/dev/null 2>&1; then times=$(ls hotstart_* | grep -o "hotstart[0-9_]\+" | awk 'BEGIN {FS = "_"}; {print $3}' | sort -h | uniq ) for i in $times; do - $BIN_DIR/combine_hotstart7 --iteration $i + combine_hotstart7 --iteration $i done fi popd - -# singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ -# expect -f /scripts/combine_gr3.exp maxelev 1 -# singularity exec ${SINGULARITY_BINDFLAGS} ${IMG} \ -# expect -f /scripts/combine_gr3.exp maxdahv 3 -# mv maxdahv.gr3 maxelev.gr3 -t outputs fi diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index e1d248e..182b0d4 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -7,33 +7,53 @@ source $THIS_SCRIPT_DIR/input.conf if [ $use_wwm == 1 ]; then hotstart_exec='pschism_WWM_PAHM_TVD-VL'; fi -# PATH -export PATH=$L_SCRIPT_DIR:$PATH - # Processing... mkdir -p $TMPDIR +# CHECK VER +### pip install --quiet --report - --dry-run --no-deps -r requirements.txt | jq -r '.install' + +# CHECK BIN +# combine_hotstart7 +# pschism ... + function version { logfile=$1 echo -n "`basename $2`: " >> $logfile singularity run $2 pip list | grep $3 >> $logfile } +function add_sbatch_header { + fnm=${2##*\/} + awk '!found && /^#SBATCH/ { print "#SBATCH '$1'"; found=1 } 1' $2 > /tmp/$fnm + mv /tmp/$fnm $2 +} + function init { - local run_dir=/nhc/Soroosh.Mani/runs/$1 + local run_dir=$RUN_OUT/$1 mkdir $run_dir -# mkdir $run_dir/downloads mkdir $run_dir/slurm mkdir $run_dir/mesh mkdir $run_dir/setup mkdir $run_dir/nhc_track mkdir $run_dir/coops_ssh + for i in $L_SCRIPT_DIR/*.sbatch; do + d=$run_dir/slurm/${i##*\/} + cp $i $d + if [ ! -z $hpc_partition ]; then + add_sbatch_header "--parition=$hpc_partition" $d + fi + if [ ! -z $hpc_account ]; then + add_sbatch_header "--account=$hpc_account" $d + fi + done + logfile=$run_dir/versions.info version $logfile $L_IMG_DIR/info.sif stormevents version $logfile $L_IMG_DIR/prep.sif stormevents version $logfile $L_IMG_DIR/prep.sif ensembleperturbation -# version $logfile $L_IMG_DIR/ocsmesh.sif ocsmesh + version $logfile $L_IMG_DIR/ocsmesh.sif ocsmesh echo "SCHISM: see solver.version each outputs dir" >> $logfile echo $run_dir @@ -88,7 +108,7 @@ sbatch \ --wait \ --job-name=mesh_$tag \ --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year,IMG=$L_IMG_DIR/ocsmesh.sif \ - $L_SCRIPT_DIR/mesh.sbatch + $run_dir/slurm/mesh.sbatch echo "Download necessary data..." @@ -126,7 +146,7 @@ setup_id=$(sbatch \ --job-name=prep_$tag \ --parsable \ --export=ALL,PREP_KWDS,STORM=$storm,YEAR=$year,IMG="$L_IMG_DIR/prep.sif" \ - $L_SCRIPT_DIR/prep.sbatch \ + $run_dir/slurm/prep.sbatch \ ) @@ -136,12 +156,13 @@ SCHISM_SHARED_ENV+="ALL" SCHISM_SHARED_ENV+=",IMG=$L_IMG_DIR/solve.sif" SCHISM_SHARED_ENV+=",MODULES=$L_SOLVE_MODULES" spinup_id=$(sbatch \ + --nodes $solver_nnodes --ntasks $solver_ntasks \ --parsable \ --output "${run_dir}/slurm/slurm-%j.spinup.out" \ --job-name=spinup_$tag \ -d afterok:$setup_id \ - --export=$SCHISM_SHARED_ENV,SCHISM_DIR="$run_dir/setup/ensemble.dir/spinup",SCHISM_EXEC="$spinup_exec" \ - $L_SCRIPT_DIR/schism.sbatch + --export=$SCHISM_SHARED_ENV,SCHISM_EXEC="$spinup_exec" \ + $run_dir/slurm/schism.sbatch "$run_dir/setup/ensemble.dir/spinup" ) joblist="" @@ -150,13 +171,11 @@ for i in $run_dir/setup/ensemble.dir/runs/*; do sbatch --parsable -d afterok:$spinup_id \ --output "${run_dir}/slurm/slurm-%j.run-$(basename $i).out" \ --job-name="run_$(basename $i)_$tag" \ - --export=$SCHISM_SHARED_ENV,SCHISM_DIR="$i",SCHISM_EXEC="$hotstart_exec" \ - $L_SCRIPT_DIR/schism.sbatch + --export=$SCHISM_SHARED_ENV,SCHISM_EXEC="$hotstart_exec" \ + $run_dir/slurm/schism.sbatch "$i" ) joblist+=":$jobid" done -#echo "Wait for ${joblist}" -#srun -d afterok${joblist} --pty sleep 1 # Post processing sbatch \ @@ -165,4 +184,4 @@ sbatch \ --job-name=post_$tag \ -d afterok${joblist} \ --export=ALL,IMG="$L_IMG_DIR/prep.sif",ENSEMBLE_DIR="$run_dir/setup/ensemble.dir/" \ - $L_SCRIPT_DIR/post.sbatch + $run_dir/slurm/post.sbatch From 195f5ac6f5d6cc7941b1634a8e9fe431abe7f17c Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Tue, 11 Jun 2024 19:02:21 +0000 Subject: [PATCH 31/54] Fix typo, remove ocsmesh version --- singularity/scripts/workflow.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index 182b0d4..4fd865b 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -42,7 +42,7 @@ function init { d=$run_dir/slurm/${i##*\/} cp $i $d if [ ! -z $hpc_partition ]; then - add_sbatch_header "--parition=$hpc_partition" $d + add_sbatch_header "--partition=$hpc_partition" $d fi if [ ! -z $hpc_account ]; then add_sbatch_header "--account=$hpc_account" $d @@ -53,7 +53,7 @@ function init { version $logfile $L_IMG_DIR/info.sif stormevents version $logfile $L_IMG_DIR/prep.sif stormevents version $logfile $L_IMG_DIR/prep.sif ensembleperturbation - version $logfile $L_IMG_DIR/ocsmesh.sif ocsmesh +# version $logfile $L_IMG_DIR/ocsmesh.sif ocsmesh echo "SCHISM: see solver.version each outputs dir" >> $logfile echo $run_dir From 66d34fd93629289b7289e50414b36a8b12fb3102 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Tue, 11 Jun 2024 19:48:05 +0000 Subject: [PATCH 32/54] More bugfix --- singularity/scripts/input.conf | 2 +- singularity/scripts/workflow.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/singularity/scripts/input.conf b/singularity/scripts/input.conf index f99d311..b888d99 100644 --- a/singularity/scripts/input.conf +++ b/singularity/scripts/input.conf @@ -16,7 +16,7 @@ hotstart_exec='pschism_PAHM_TVD-VL' hpc_solver_nnodes=3 hpc_solver_ntasks=108 hpc_account='' -hpc_partition='compute' +hpc_partition='' RUN_OUT=/nhc/Soroosh.Mani/runs/ DATA=/nhc/static_data diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index 4fd865b..a7755e9 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -154,9 +154,9 @@ echo "Launching runs" SCHISM_SHARED_ENV="" SCHISM_SHARED_ENV+="ALL" SCHISM_SHARED_ENV+=",IMG=$L_IMG_DIR/solve.sif" -SCHISM_SHARED_ENV+=",MODULES=$L_SOLVE_MODULES" +SCHISM_SHARED_ENV+=",MODULES=\"$L_SOLVE_MODULES\"" spinup_id=$(sbatch \ - --nodes $solver_nnodes --ntasks $solver_ntasks \ + --nodes $hpc_solver_nnodes --ntasks $hpc_solver_ntasks \ --parsable \ --output "${run_dir}/slurm/slurm-%j.spinup.out" \ --job-name=spinup_$tag \ From 12ac0590c1351f8a2eab282959ef88ed7c411ef2 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Wed, 12 Jun 2024 15:00:30 +0000 Subject: [PATCH 33/54] Remove temp file --- singularity/ocsmesh/.ocsmesh.def.swp | Bin 12288 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 singularity/ocsmesh/.ocsmesh.def.swp diff --git a/singularity/ocsmesh/.ocsmesh.def.swp b/singularity/ocsmesh/.ocsmesh.def.swp deleted file mode 100644 index 345fa1dadf6508a5f26f9eaa20f4558c31d32c95..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2zi%Ej7{^@}R6+rP5M?t-)1j5`-ZyVcLPVqnl&S?1B2ua%kb=+W_dPdv_F?;) zWPt^Vg+G7+v48|K42X@P3@jkT%7Pe?7}%KLdq3XfeFKG5Ody~1iRC*#{_*iWC%uZ3 z-u0{NbTPU>a6LrGuOH0Fk1u^Wb90Z7En|&wS+vfzsvqC!Dn^B8u5DbPEz_A0Cg;fl z>-Bp5{y6T<$qr-3b()yH*yvOuDrL?aTd-YYcZRvyjh)h?D(6=D{n!XsNSBSvD3!x; zr=bi6f`MZiI7J2v{a)R8ZuSZK$ipumGcGg>27-ZLAQ%V+f`MQl7zhS}f&YU6U!EXu zVIlW)i&^T*6GzIf8j4^b7zhS}fnXpQ2nK?IU?3O>27-ZLAQ<=$8sI4*ntG9900Y3lY48~M@c}}<2Va5D!6)E7@HTh@l;9FL z3BEl=$Y+>L7!9Xw&3JNmw39NX4HLZdm6gm?zN|bg_t6s%#5-YC z>6<RMf)uN_oJ=g&{pgqW-;-__blid=F`fbl%1XU|ez?$A``U6X?z zOI1>(ov&L(<3p;=)2XGNS=+e2y|KE!_LqxoCpf|c*s}*#L?*=!Y*6}Snxq#8Jv!&A z#7R$wYL6zCYmqer)o~&>TGGt>(k;g^?qpRG2~)({aLyo_6$Z1%x!S2Y+JZ7Wq&L;b z@mQg(gZL3B{em zr8IBT;K=ZBS(Q<_Pm@aJ>GpUw?SPXn>Rs&KlqPzxhr^}Ci;MlIXw&!KZdfYyXjj>k zcT&1ti4fGLP{E6YW5W;MPR917YkF>6bld5=_&m&a{B?ar-rLWPU Date: Thu, 13 Jun 2024 21:16:28 +0000 Subject: [PATCH 34/54] Fix solver script call --- singularity/scripts/schism.sbatch | 2 +- singularity/scripts/workflow.sh | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/singularity/scripts/schism.sbatch b/singularity/scripts/schism.sbatch index c94b2f5..fef7e43 100644 --- a/singularity/scripts/schism.sbatch +++ b/singularity/scripts/schism.sbatch @@ -10,7 +10,7 @@ pushd $SCHISM_DIR mkdir -p outputs -if [ ! -z $MODULES ]; then +if [ ! -z "$MODULES" ]; then module purge module load $MODULES module list diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index a7755e9..ccd6370 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -154,14 +154,14 @@ echo "Launching runs" SCHISM_SHARED_ENV="" SCHISM_SHARED_ENV+="ALL" SCHISM_SHARED_ENV+=",IMG=$L_IMG_DIR/solve.sif" -SCHISM_SHARED_ENV+=",MODULES=\"$L_SOLVE_MODULES\"" +SCHISM_SHARED_ENV+=",MODULES=$L_SOLVE_MODULES" spinup_id=$(sbatch \ --nodes $hpc_solver_nnodes --ntasks $hpc_solver_ntasks \ --parsable \ --output "${run_dir}/slurm/slurm-%j.spinup.out" \ --job-name=spinup_$tag \ -d afterok:$setup_id \ - --export=$SCHISM_SHARED_ENV,SCHISM_EXEC="$spinup_exec" \ + --export="$SCHISM_SHARED_ENV",SCHISM_EXEC="$spinup_exec" \ $run_dir/slurm/schism.sbatch "$run_dir/setup/ensemble.dir/spinup" ) @@ -171,7 +171,7 @@ for i in $run_dir/setup/ensemble.dir/runs/*; do sbatch --parsable -d afterok:$spinup_id \ --output "${run_dir}/slurm/slurm-%j.run-$(basename $i).out" \ --job-name="run_$(basename $i)_$tag" \ - --export=$SCHISM_SHARED_ENV,SCHISM_EXEC="$hotstart_exec" \ + --export="$SCHISM_SHARED_ENV",SCHISM_EXEC="$hotstart_exec" \ $run_dir/slurm/schism.sbatch "$i" ) joblist+=":$jobid" From 97444607a60966f8e099c46784219357f861e2c0 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 14 Jun 2024 02:06:06 +0000 Subject: [PATCH 35/54] Add missed parameters for hotstart sbatch command --- singularity/scripts/workflow.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/singularity/scripts/workflow.sh b/singularity/scripts/workflow.sh index ccd6370..c626530 100755 --- a/singularity/scripts/workflow.sh +++ b/singularity/scripts/workflow.sh @@ -169,6 +169,7 @@ joblist="" for i in $run_dir/setup/ensemble.dir/runs/*; do jobid=$( sbatch --parsable -d afterok:$spinup_id \ + --nodes $hpc_solver_nnodes --ntasks $hpc_solver_ntasks \ --output "${run_dir}/slurm/slurm-%j.run-$(basename $i).out" \ --job-name="run_$(basename $i)_$tag" \ --export="$SCHISM_SHARED_ENV",SCHISM_EXEC="$hotstart_exec" \ From 1f996549009368d0f36722943fbd176336a4ab3c Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 14 Jun 2024 19:49:40 +0000 Subject: [PATCH 36/54] Remove stale old files --- ansible/ansible.cfg | 12 - .../inventory/group_vars/prefect-agent/vars | 6 - ansible/inventory/group_vars/thalassa.yml | 10 - ansible/inventory/inventory | 10 - ansible/playbooks/provision-prefect-agent.yml | 222 ---- ansible/playbooks/thalassa.yml | 5 - ansible/requirements.yml | 17 - ansible/roles/prefect-agent/tasks/main.yml | 18 - ansible/roles/thalassa/tasks/main.yml | 76 -- docker/info/docker/.env | 1 - docker/info/docker/Dockerfile | 57 - docker/info/docker/docker-compose.yml | 15 - docker/info/docker/hurricane_data.py | 242 ---- docker/info/environment.yml | 14 - docker/main/.env | 10 - docker/main/docker-compose.yml | 101 -- docker/ocsmesh/docker/.env | 1 - docker/ocsmesh/docker/Dockerfile | 76 -- docker/ocsmesh/docker/docker-compose.yml | 30 - docker/ocsmesh/docker/hurricane_mesh.py | 548 -------- docker/ocsmesh/environment.yml | 29 - docker/post/docker/Dockerfile | 66 - docker/post/docker/__init__.py | 0 docker/post/docker/defn.py | 79 -- docker/post/docker/generate_viz.py | 1004 --------------- docker/post/docker/hurricane_funcs.py | 273 ---- docker/post/environment.yml | 96 -- docker/prefect-aws/Dockerfile | 72 -- docker/prefect-aws/entrypoint.sh | 4 - docker/prefect-aws/environment.yml | 12 - docker/prefect-aws/pw_client.py | 112 -- docker/pyschism/docker/.env | 1 - docker/pyschism/docker/Dockerfile | 68 - docker/pyschism/docker/analyze_ensemble.py | 354 ------ docker/pyschism/docker/combine_ensemble.py | 32 - docker/pyschism/docker/docker-compose.yml | 28 - docker/pyschism/docker/refs/param.nml | 69 -- docker/pyschism/docker/refs/wwminput.nml | 667 ---------- docker/pyschism/docker/setup_ensemble.py | 276 ----- docker/pyschism/docker/setup_model.py | 533 -------- docker/pyschism/docker/wwm.py | 276 ----- docker/pyschism/environment.yml | 39 - docker/schism/docker/.env | 2 - docker/schism/docker/Dockerfile | 106 -- docker/schism/docker/combine_gr3.exp | 52 - docker/schism/docker/docker-compose.yml | 22 - docker/schism/docker/entrypoint.sh | 45 - docs/workflow.pdf | Bin 217520 -> 0 bytes prefect/workflow/__init__.py | 0 prefect/workflow/conf.py | 68 - prefect/workflow/flows/__init__.py | 2 - prefect/workflow/flows/infra.py | 82 -- prefect/workflow/flows/jobs/__init__.py | 2 - prefect/workflow/flows/jobs/ecs.py | 510 -------- prefect/workflow/flows/jobs/pw.py | 367 ------ prefect/workflow/flows/utils.py | 164 --- prefect/workflow/main.py | 294 ----- prefect/workflow/pw_client.py | 112 -- prefect/workflow/tasks/__init__.py | 1 - prefect/workflow/tasks/data.py | 210 ---- prefect/workflow/tasks/infra.py | 309 ----- prefect/workflow/tasks/jobs.py | 276 ----- prefect/workflow/tasks/params.py | 25 - prefect/workflow/tasks/utils.py | 105 -- rdhpcs/clusters/mesh_cluster.json | 24 - rdhpcs/clusters/mesh_init.sh | 44 - rdhpcs/clusters/mesh_lustre.json | 5 - rdhpcs/clusters/schism_cluster.json | 23 - rdhpcs/clusters/schism_init.sh | 39 - rdhpcs/clusters/schism_lustre.json | 5 - rdhpcs/scripts/combine_gr3.exp | 1 - rdhpcs/scripts/compile_schism.sh | 110 -- rdhpcs/scripts/hurricane_mesh.py | 555 --------- rdhpcs/scripts/mesh.sbatch | 19 - rdhpcs/scripts/schism.sbatch | 65 - terraform/backend/backend.tf | 87 -- terraform/main.tf | 1100 ----------------- terraform/outputs.tf | 30 - terraform/ud/userdata-ocsmesh.txt | 3 - terraform/ud/userdata-schism.txt | 3 - terraform/ud/userdata-viz.txt | 3 - terraform/ud/userdata-wf.txt | 3 - terraform/variables.tf | 9 - 83 files changed, 10443 deletions(-) delete mode 100644 ansible/ansible.cfg delete mode 100644 ansible/inventory/group_vars/prefect-agent/vars delete mode 100644 ansible/inventory/group_vars/thalassa.yml delete mode 100644 ansible/inventory/inventory delete mode 100644 ansible/playbooks/provision-prefect-agent.yml delete mode 100644 ansible/playbooks/thalassa.yml delete mode 100644 ansible/requirements.yml delete mode 100644 ansible/roles/prefect-agent/tasks/main.yml delete mode 100644 ansible/roles/thalassa/tasks/main.yml delete mode 100644 docker/info/docker/.env delete mode 100644 docker/info/docker/Dockerfile delete mode 100644 docker/info/docker/docker-compose.yml delete mode 100644 docker/info/docker/hurricane_data.py delete mode 100644 docker/info/environment.yml delete mode 100644 docker/main/.env delete mode 100644 docker/main/docker-compose.yml delete mode 100644 docker/ocsmesh/docker/.env delete mode 100644 docker/ocsmesh/docker/Dockerfile delete mode 100644 docker/ocsmesh/docker/docker-compose.yml delete mode 100755 docker/ocsmesh/docker/hurricane_mesh.py delete mode 100644 docker/ocsmesh/environment.yml delete mode 100644 docker/post/docker/Dockerfile delete mode 100644 docker/post/docker/__init__.py delete mode 100644 docker/post/docker/defn.py delete mode 100644 docker/post/docker/generate_viz.py delete mode 100644 docker/post/docker/hurricane_funcs.py delete mode 100644 docker/post/environment.yml delete mode 100755 docker/prefect-aws/Dockerfile delete mode 100644 docker/prefect-aws/entrypoint.sh delete mode 100644 docker/prefect-aws/environment.yml delete mode 100644 docker/prefect-aws/pw_client.py delete mode 100644 docker/pyschism/docker/.env delete mode 100644 docker/pyschism/docker/Dockerfile delete mode 100644 docker/pyschism/docker/analyze_ensemble.py delete mode 100644 docker/pyschism/docker/combine_ensemble.py delete mode 100644 docker/pyschism/docker/docker-compose.yml delete mode 100755 docker/pyschism/docker/refs/param.nml delete mode 100755 docker/pyschism/docker/refs/wwminput.nml delete mode 100644 docker/pyschism/docker/setup_ensemble.py delete mode 100755 docker/pyschism/docker/setup_model.py delete mode 100644 docker/pyschism/docker/wwm.py delete mode 100644 docker/pyschism/environment.yml delete mode 100644 docker/schism/docker/.env delete mode 100644 docker/schism/docker/Dockerfile delete mode 100755 docker/schism/docker/combine_gr3.exp delete mode 100644 docker/schism/docker/docker-compose.yml delete mode 100644 docker/schism/docker/entrypoint.sh delete mode 100755 docs/workflow.pdf delete mode 100644 prefect/workflow/__init__.py delete mode 100644 prefect/workflow/conf.py delete mode 100644 prefect/workflow/flows/__init__.py delete mode 100644 prefect/workflow/flows/infra.py delete mode 100644 prefect/workflow/flows/jobs/__init__.py delete mode 100644 prefect/workflow/flows/jobs/ecs.py delete mode 100644 prefect/workflow/flows/jobs/pw.py delete mode 100644 prefect/workflow/flows/utils.py delete mode 100644 prefect/workflow/main.py delete mode 100755 prefect/workflow/pw_client.py delete mode 100644 prefect/workflow/tasks/__init__.py delete mode 100644 prefect/workflow/tasks/data.py delete mode 100644 prefect/workflow/tasks/infra.py delete mode 100644 prefect/workflow/tasks/jobs.py delete mode 100644 prefect/workflow/tasks/params.py delete mode 100644 prefect/workflow/tasks/utils.py delete mode 100644 rdhpcs/clusters/mesh_cluster.json delete mode 100644 rdhpcs/clusters/mesh_init.sh delete mode 100644 rdhpcs/clusters/mesh_lustre.json delete mode 100644 rdhpcs/clusters/schism_cluster.json delete mode 100644 rdhpcs/clusters/schism_init.sh delete mode 100644 rdhpcs/clusters/schism_lustre.json delete mode 120000 rdhpcs/scripts/combine_gr3.exp delete mode 100755 rdhpcs/scripts/compile_schism.sh delete mode 100755 rdhpcs/scripts/hurricane_mesh.py delete mode 100644 rdhpcs/scripts/mesh.sbatch delete mode 100644 rdhpcs/scripts/schism.sbatch delete mode 100644 terraform/backend/backend.tf delete mode 100644 terraform/main.tf delete mode 100644 terraform/outputs.tf delete mode 100644 terraform/ud/userdata-ocsmesh.txt delete mode 100644 terraform/ud/userdata-schism.txt delete mode 100644 terraform/ud/userdata-viz.txt delete mode 100644 terraform/ud/userdata-wf.txt delete mode 100644 terraform/variables.tf diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg deleted file mode 100644 index b4b360f..0000000 --- a/ansible/ansible.cfg +++ /dev/null @@ -1,12 +0,0 @@ -[defaults] -become_user=root -ask_pass=False -ask_become_pass=False -roles_path=./roles -ask_vault_pass=False -action_plugins=./action_plugins -filter_plugins=./filter_plugins -callback_plugins=./callback_plugins -host_key_checking=False -collections_paths=./ -interpreter_python=auto_silent diff --git a/ansible/inventory/group_vars/prefect-agent/vars b/ansible/inventory/group_vars/prefect-agent/vars deleted file mode 100644 index b789105..0000000 --- a/ansible/inventory/group_vars/prefect-agent/vars +++ /dev/null @@ -1,6 +0,0 @@ ---- - -prefect_agent_ec2_key: odss-ec2-prefect-agent -prefect_agent_image_type: ami-03fe4d5b1d229063a -prefect_agent_region: us-west-2 -prefect_agent_vpc_subnet_id: subnet-98f04dc5 diff --git a/ansible/inventory/group_vars/thalassa.yml b/ansible/inventory/group_vars/thalassa.yml deleted file mode 100644 index 29049f3..0000000 --- a/ansible/inventory/group_vars/thalassa.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -thalassa_image: yosoyjay/thalassa -thalassa_image_version: runtime-20210818 - -thalassa_internal_port: 8000 -thalassa_exposed_port: 10001 - -thalassa_data_mnt: /mnt/data -thalassa_corral_mnt: /mnt/corral \ No newline at end of file diff --git a/ansible/inventory/inventory b/ansible/inventory/inventory deleted file mode 100644 index 23b121a..0000000 --- a/ansible/inventory/inventory +++ /dev/null @@ -1,10 +0,0 @@ -[local] -localhost ansible_connection=local ansible_python_interpreter=python - -[thalassa] -noaa-stofs.tacc.utexas.edu - -[prefect_agent] - -[vars_from_terraform] -localhost ansible_connection=local ansible_python_interpreter=python diff --git a/ansible/playbooks/provision-prefect-agent.yml b/ansible/playbooks/provision-prefect-agent.yml deleted file mode 100644 index bc48500..0000000 --- a/ansible/playbooks/provision-prefect-agent.yml +++ /dev/null @@ -1,222 +0,0 @@ ---- - -# NOTE: Some of the variables are only defined in inventory vars for -# all groups. This file is updated by terraform execution -# -# Run from ansible directory (after terraform vars gen) using -# ansible-playbook -i inventory/inventory ./playbooks/provision-prefect-agent.yml -# -# Docker image authentication take from StackOverflow 63723674 - -- name: Setup EC2 play - hosts: local - gather_facts: false - - vars: - ec2_prefix: odssm-ec2-prefect-agent - ec2_inventory_name: local_ec2_agent - user_name: ec2-user - # ansible var cannot use '-' - ansible_group: prefect_agent - ansible_vars_group: vars_from_terraform - - - tasks: - - name: Setup EC2 task - block: - - name: Verify connectivity to EC2 - ansible.builtin.wait_for: - host: "{{ ec2_public_ip }}" - port: 22 - state: started - - - name: Add instance to group - ansible.builtin.add_host: - name: "{{ ec2_inventory_name }}" - ansible_host: "{{ ec2_public_ip }}" - ansible_user: "{{ user_name }}" - instance_name: "{{ ec2_prefix }}" - groups: - - "{{ ansible_group }}" - - "{{ ansible_vars_group }}" - - - name: Print instance group - debug: - var: ansible_group - - tags: - - setup - - -- name: Configure Prefect agent host - hosts: prefect_agent - gather_facts: True - become: True - - # TODO: Use --key instead of --token for Prefect - vars: - key: "{{ lookup('env', 'PREFECT_AGENT_TOKEN') }}" - rdhpcs_s3_access_key_id: "{{ lookup('env', 'RDHPCS_S3_ACCESS_KEY_ID') }}" - rdhpcs_s3_secret_access_key: "{{ lookup('env', 'RDHPCS_S3_SECRET_ACCESS_KEY') }}" - pw_api_key: "{{ lookup('env', 'PW_API_KEY') }}" - efs_mount_dir: /efs - docker_image: "{{ prefect_image }}:v0.4" - cdsapi_url: "{{ lookup('env', 'CDSAPI_URL') }}" - cdsapi_key: "{{ lookup('env', 'CDSAPI_KEY') }}" - - tasks: - - name: Install packages - yum: - name: - - docker - - python-pip - - python-devel - - "@Development tools" - - nfs-utils - - amazon-efs-utils - state: present - - - name: Start Docker - ansible.builtin.systemd: - name: docker - state: started - - - name: Start NFS (used to mount EFS) - ansible.builtin.systemd: - name: nfs - state: started - - - name: Update pip - pip: - name: pip - extra_args: --upgrade - - - name: Install wheel - pip: - name: wheel - - - name: Install Ansible - pip: - name: - - ansible - - - name: Install Docker python package - pip: - name: - - docker - # Needed to deal with older requests which is not installed via pip - extra_args: --ignore-installed - - - name: Create mount directory - file: - path: "{{ efs_mount_dir }}" - state: directory - mode: 0755 - - - name: Mount EFS volume - mount: - name: "{{ efs_mount_dir }}" - src: "{{ efs_id }}.efs.{{ aws_default_region }}.amazonaws.com:/" - fstype: nfs4 - opts: nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport - state: mounted - - - name: Make sure volume has rw permissions - file: - path: "{{ efs_mount_dir }}" - state: directory - mode: 0755 - - - name: ECR Docker authentication - shell: "aws ecr get-authorization-token" - environment: - AWS_DEFAULT_REGION: "{{ aws_default_region }}" - # fixes aws-cli command output issue - AWS_PAGER: "" - register: ecr_command - - - set_fact: - ecr_authorization_data: "{{ (ecr_command.stdout | from_json).authorizationData[0] }}" - - - set_fact: - ecr_credentials: "{{ (ecr_authorization_data.authorizationToken | b64decode).split(':') }}" - - - name: Login to ECR on Docker - docker_login: - registry: "{{ ecr_authorization_data.proxyEndpoint.rpartition('//')[2] }}" - username: "{{ ecr_credentials[0] }}" - password: "{{ ecr_credentials[1] }}" - reauthorize: yes - - - name: Register Prefect Local agent - docker_container: - name: "prefect-agent-local" - image: "{{ docker_image }}" - container_default_behavior: "compatibility" - env: - PREFECT_AGENT_TOKEN: "{{ key }} " - AWS_DEFAULT_REGION: "{{ aws_default_region }}" - # fixes aws-cli command output issue - AWS_PAGER: "" - CDSAPI_URL: "{{ cdsapi_url }}" - CDSAPI_KEY: "{{ cdsapi_key }}" - - volumes: - - /efs:/efs - command: > - prefect agent local start - --key "{{ key }}" - --label tacc-odssm-local - --name tacc-odssm-agent-local - --log-level INFO - state: started - - - name: Register Prefect Local agent 2 - docker_container: - name: "prefect-agent-local-for-rdhpcs" - image: "{{ docker_image }}" - container_default_behavior: "compatibility" - env: - PREFECT_AGENT_TOKEN: "{{ key }} " # TODO: Remove? - AWS_ACCESS_KEY_ID: "{{ rdhpcs_s3_access_key_id }}" - AWS_SECRET_ACCESS_KEY: "{{ rdhpcs_s3_secret_access_key }}" - PW_API_KEY: "{{ pw_api_key }}" - # fixes aws-cli command output issue - AWS_PAGER: "" - CDSAPI_URL: "{{ cdsapi_url }}" - CDSAPI_KEY: "{{ cdsapi_key }}" - volumes: - - /efs:/efs - command: > - prefect agent local start - --key "{{ key }}" - --label tacc-odssm-local-for-rdhpcs - --name tacc-odssm-agent-local-for-rdhpcs - --log-level INFO - state: started - - name: Register Prefect ECS agents - docker_container: - name: "prefect-agent-ecs" - image: "{{ docker_image }}" - container_default_behavior: "compatibility" - env: - PREFECT_AGENT_TOKEN: "{{ key }} " - AWS_DEFAULT_REGION: "{{ aws_default_region }}" - # fixes aws-cli command output issue - AWS_PAGER: "" - volumes: - - /efs:/efs - command: > - prefect agent ecs start - --launch-type EC2 - --env AWS_DEFAULT_REGION="{{ aws_default_region }}" - --key "{{ key }}" - --label tacc-odssm-ecs - --name tacc-odssm-agent-ecs - --log-level INFO - --cluster workflow - state: started - - - tags: - - config diff --git a/ansible/playbooks/thalassa.yml b/ansible/playbooks/thalassa.yml deleted file mode 100644 index b2b3642..0000000 --- a/ansible/playbooks/thalassa.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- hosts: thalassa - roles: - - thalassa diff --git a/ansible/requirements.yml b/ansible/requirements.yml deleted file mode 100644 index 17d1501..0000000 --- a/ansible/requirements.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -collections: -- name: ansible.posix - version: 1.2.0 - -- name: community.docker - version: 1.5.0 - -- name: community.general - version: 3.0.0 - -- name: amazon.aws - version: 1.5.0 - -- name: community.aws - version: 1.5.0 diff --git a/ansible/roles/prefect-agent/tasks/main.yml b/ansible/roles/prefect-agent/tasks/main.yml deleted file mode 100644 index ceb9fab..0000000 --- a/ansible/roles/prefect-agent/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- - -- block: - - name: Provision micro instance on EC2 - ec2: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - key_name: "{{ prefect_agent_ec2_key }}" - instance_type: t2.micro - image: "{{ prefect_agent_image_type }}" - wait: yes - count: 1 - region: "{{ prefect_agent_region }}" - assign_public_ip: yes - id: odssm-ec2-prefect-agent - - tags: - - provision \ No newline at end of file diff --git a/ansible/roles/thalassa/tasks/main.yml b/ansible/roles/thalassa/tasks/main.yml deleted file mode 100644 index e584739..0000000 --- a/ansible/roles/thalassa/tasks/main.yml +++ /dev/null @@ -1,76 +0,0 @@ ---- - -- block: - - name: Add EPEL repo (Ansible prerequisite) - yum_repository: - name: epel - description: EPEL YUM repo - baseurl: https://download.fedoraproject.org/pub/epel/7/x86_64/ - - - name: Install default packages - yum: - name: - - yum-utils - - tmux - - htop - - "@Development tools" - - python-devel - state: present - - - name: Install Ansible - yum: - name: ansible - state: present - - - name: Install pip - yum: - name: - - python-pip - state: present - - - name: Install pip packages for Docker + Ansible - pip: - name: - - docker==4.4.4 - - docker-compose==1.26.2 - - pyrsistent==0.16.1 - - requests==2.25.1 - - websocket-client==0.32.0 - - - name: Add Docker repo - yum_repository: - name: Docker - description: Docker Repo - skip_if_unavailable: yes - baseurl: https://download.docker.com/linux/centos/docker-ce.repo - - - name: Install Docker - yum: - name: - - docker-ce - - docker-ce-cli - - containerd.io - state: present - - - name: Start Docker - ansible.builtin.systemd: - name: docker - state: started - - - name: Run Thalassa - docker_container: - name: "thalassa-{{ deploy_env }}" - image: "{{ thalassa_image }}:{{ thalassa_image_version }}" - state: started - restart: yes - restart_policy: unless-stopped - pull: true - ports: "{{ thalassa_exposed_port }}:{{ thalassa_internal_port }}" - volumes: - - "{{ thalassa_data_mnt }}:/data" - - "{{ thalassa_corral_mnt }}:/data/corral" - command: "thalassa serve --websocket-origin '*' --port {{ thalassa_internal_port }} --no-show" - - become: true - tags: - - deploy diff --git a/docker/info/docker/.env b/docker/info/docker/.env deleted file mode 100644 index 065abaf..0000000 --- a/docker/info/docker/.env +++ /dev/null @@ -1 +0,0 @@ -HURRINFO_USER=hurricaner diff --git a/docker/info/docker/Dockerfile b/docker/info/docker/Dockerfile deleted file mode 100644 index 1f1a17f..0000000 --- a/docker/info/docker/Dockerfile +++ /dev/null @@ -1,57 +0,0 @@ -FROM continuumio/miniconda3:4.10.3-alpine - -# Create a non-root user -ARG username=hurricaner -ARG uid=1000 -ARG gid=100 - -ENV USER $username -ENV UID $uid -ENV GID $gid -ENV HOME /home/$USER - -# Get necessary packages -RUN apk update && apk upgrade && apk add \ - git - -# New user -RUN adduser --disabled-password --gecos "Non-root user" --uid $UID --home $HOME $USER - -# Create a project directory inside user home -ENV PROJECT_DIR $HOME/app -RUN mkdir $PROJECT_DIR -RUN chown $UID:$GID $PROJECT_DIR -WORKDIR $PROJECT_DIR - - -# Build the conda environment -ENV ENV_PREFIX $HOME/icogsc - -COPY environment.yml /tmp/ -RUN chown $UID:$GID /tmp/environment.yml - -RUN conda install mamba -n base -c conda-forge && \ - mamba update --name base --channel defaults conda && \ - mamba env create --prefix $ENV_PREFIX --file /tmp/environment.yml --force && \ - mamba clean --all --yes - -RUN conda run -p $ENV_PREFIX --no-capture-output \ - pip install stormevents==2.1.2 - -ENV CONDA_DIR /opt/conda - -RUN conda clean --all -RUN apk del git - -RUN mkdir -p $PROJECT_DIR/scripts -COPY docker/hurricane_data.py ${PROJECT_DIR}/scripts/ -ENV PYTHONPATH ${PROJECT_DIR}/scripts/ - - -RUN mkdir -p $PROJECT_DIR/io - -USER $USER - - -# Ref: https://pythonspeed.com/articles/activate-conda-dockerfile/ -ENTRYPOINT [ "conda", "run", "-p", "$ENV_PREFIX", "--no-capture-output", "python", "-m", "hurricane_data" ] diff --git a/docker/info/docker/docker-compose.yml b/docker/info/docker/docker-compose.yml deleted file mode 100644 index 3c97ff3..0000000 --- a/docker/info/docker/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: "3.9" -services: - hurricane-info-noaa: - build: - context: .. - dockerfile: docker/Dockerfile - args: - - username=${HURRINFO_USER} - - uid=1000 - - gid=100 -# command: '/bin/bash' - volumes: - - type: bind - source: /home/ec2-user/data/test/hurricanes - target: /home/${HURRINFO_USER}/app/io/output diff --git a/docker/info/docker/hurricane_data.py b/docker/info/docker/hurricane_data.py deleted file mode 100644 index 84e93c9..0000000 --- a/docker/info/docker/hurricane_data.py +++ /dev/null @@ -1,242 +0,0 @@ -"""User script to get hurricane info relevant to the workflow -This script gether information about: - - Hurricane track - - Hurricane windswath - - Hurricane event dates - - Stations info for historical hurricane -""" - -import sys -import logging -import pathlib -import argparse -import tempfile -from datetime import datetime, timedelta - -import pandas as pd -import geopandas as gpd -from searvey.coops import COOPS_TidalDatum -from searvey.coops import COOPS_TimeZone -from searvey.coops import COOPS_Units -from shapely.geometry import box -from stormevents import StormEvent -from stormevents.nhc import VortexTrack - - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -logging.basicConfig( - stream=sys.stdout, - format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', - datefmt='%Y-%m-%d:%H:%M:%S') - -EFS_MOUNT_POINT = pathlib.Path('~').expanduser() / f'app/io/output' - -def main(args): - - name_or_code = args.name_or_code - year = args.year - date_out = EFS_MOUNT_POINT / args.date_range_outpath - track_out = EFS_MOUNT_POINT / args.track_outpath - swath_out = EFS_MOUNT_POINT / args.swath_outpath - sta_dat_out = EFS_MOUNT_POINT / args.station_data_outpath - sta_loc_out = EFS_MOUNT_POINT / args.station_location_outpath - is_past_forecast = args.past_forecast - hr_before_landfall = args.hours_before_landfall - - if is_past_forecast and hr_before_landfall < 0: - hr_before_landfall = 48 - - ne_low = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) - shp_US = ne_low[ne_low.name.isin(['United States of America', 'Puerto Rico'])].unary_union - - logger.info("Fetching hurricane info...") - event = None - if year == 0: - event = StormEvent.from_nhc_code(name_or_code) - else: - event = StormEvent(name_or_code, year) - nhc_code = event.nhc_code - logger.info("Fetching a-deck track info...") - - # TODO: Get user input for whether its forecast or now! - now = datetime.now() - if (is_past_forecast or (now - event.start_date < timedelta(days=30))): - temp_track = event.track(file_deck='a') - adv_avail = temp_track.unfiltered_data.advisory.unique() - adv_order = ['OFCL', 'HWRF', 'HMON', 'CARQ'] - advisory = adv_avail[0] - for adv in adv_order: - if adv in adv_avail: - advisory = adv - break - - if advisory == "OFCL" and "CARQ" not in adv_avail: - raise ValueError( - "OFCL advisory needs CARQ for fixing missing variables!" - ) - - # NOTE: Track taken from `StormEvent` object is up to now only. - # See GitHub issue #57 for StormEvents - track = VortexTrack(nhc_code, file_deck='a', advisories=[advisory]) - - df_dt = pd.DataFrame(columns=['date_time']) - - if is_past_forecast: - - logger.info( - f"Creating {advisory} track for {hr_before_landfall}" - +" hours before landfall forecast..." - ) - onland_adv_tracks = track.data[track.data.intersects(shp_US)] - candidates = onland_adv_tracks.groupby('track_start_time').nth(0).reset_index() - candidates['timediff'] = candidates.datetime - candidates.track_start_time - track_start = candidates[ - candidates['timediff'] >= timedelta(hours=hr_before_landfall) - ].track_start_time.iloc[-1] - - gdf_track = track.data[track.data.track_start_time == track_start] - # Append before track from previous forecasts: - gdf_track = pd.concat(( - track.data[ - (track.data.track_start_time < track_start) - & (track.data.forecast_hours == 0) - ], - gdf_track - )) - df_dt['date_time'] = (track.start_date, track.end_date) - - - logger.info("Fetching water level measurements from COOPS stations...") - coops_ssh = event.coops_product_within_isotach( - product='water_level', wind_speed=34, - datum=COOPS_TidalDatum.NAVD, - units=COOPS_Units.METRIC, - time_zone=COOPS_TimeZone.GMT, - ) - - else: - # Get the latest track forecast - track_start = track.data.track_start_time.max() - gdf_track = track.data[track.data.track_start_time == track_start] - - # Put both dates as now(), for pyschism to setup forecast - df_dt['date_time'] = (now, now) - - coops_ssh = None - - # NOTE: Fake besttrack: Since PySCHISM supports "BEST" track - # files for its parametric forcing, write track as "BEST" after - # fixing the OFCL by CARQ through StormEvents - gdf_track.advisory = 'BEST' - gdf_track.forecast_hours = 0 - track = VortexTrack(storm=gdf_track, file_deck='b', advisories=['BEST']) - - windswath_dict = track.wind_swaths(wind_speed=34) - windswaths = windswath_dict['BEST'] # Faked BEST - logger.info(f"Fetching {advisory} windswath...") - windswath_time = min(pd.to_datetime(list(windswaths.keys()))) - windswath = windswaths[ - windswath_time.strftime("%Y%m%dT%H%M%S") - ] - - else: - - logger.info("Fetching b-deck track info...") - - df_dt = pd.DataFrame(columns=['date_time']) - df_dt['date_time'] = (event.start_date, event.end_date) - - logger.info("Fetching BEST windswath...") - track = event.track(file_deck='b') - windswath_dict = track.wind_swaths(wind_speed=34) - # NOTE: event.start_date (first advisory date) doesn't - # necessarily match the windswath key which comes from track - # start date for the first advisory (at least in 2021!) - windswaths = windswath_dict['BEST'] - latest_advistory_stamp = max(pd.to_datetime(list(windswaths.keys()))) - windswath = windswaths[ - latest_advistory_stamp.strftime("%Y%m%dT%H%M%S") - ] - - logger.info("Fetching water level measurements from COOPS stations...") - coops_ssh = event.coops_product_within_isotach( - product='water_level', wind_speed=34, - datum=COOPS_TidalDatum.NAVD, - units=COOPS_Units.METRIC, - time_zone=COOPS_TimeZone.GMT, - ) - - logger.info("Writing relevant data to files...") - df_dt.to_csv(date_out) - track.to_file(track_out) - gs = gpd.GeoSeries(windswath) - gdf_windswath = gpd.GeoDataFrame( - geometry=gs, data={'RADII': len(gs) * [34]}, crs="EPSG:4326" - ) - gdf_windswath.to_file(swath_out) - if coops_ssh is not None: - coops_ssh.to_netcdf(sta_dat_out, 'w') - coops_ssh[['x', 'y']].to_dataframe().drop(columns=['nws_id']).to_csv( - sta_loc_out, header=False, index=False) - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser() - - parser.add_argument( - "name_or_code", help="name or NHC code of the storm", type=str) - parser.add_argument( - "year", help="year of the storm", type=int) - - parser.add_argument( - "--date-range-outpath", - help="output date range", - type=pathlib.Path, - required=True - ) - - parser.add_argument( - "--track-outpath", - help="output hurricane track", - type=pathlib.Path, - required=True - ) - - parser.add_argument( - "--swath-outpath", - help="output hurricane windswath", - type=pathlib.Path, - required=True - ) - - parser.add_argument( - "--station-data-outpath", - help="output station data", - type=pathlib.Path, - required=True - ) - - parser.add_argument( - "--station-location-outpath", - help="output station location", - type=pathlib.Path, - required=True - ) - - parser.add_argument( - "--past-forecast", - help="Get forecast data for a past storm", - action='store_true', - ) - - parser.add_argument( - "--hours-before-landfall", - help="Get forecast data for a past storm at this many hour before landfall", - type=int, - ) - - args = parser.parse_args() - - main(args) diff --git a/docker/info/environment.yml b/docker/info/environment.yml deleted file mode 100644 index 22107ab..0000000 --- a/docker/info/environment.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: icogsc -channels: - - conda-forge -dependencies: - - cartopy - - cfunits - - gdal - - geopandas - - geos - - proj - - pygeos - - pyproj - - python=3.9 - - shapely>=1.8 diff --git a/docker/main/.env b/docker/main/.env deleted file mode 100644 index 84422dc..0000000 --- a/docker/main/.env +++ /dev/null @@ -1,10 +0,0 @@ -ONDEMAND_USER=ondemand-user -HURRICANE_NAME=florence -HURRICANE_YEAR=2018 -SCHISM_NPROCS=16 - -OUT_DIR=/home/ec2-user/data/test/hurricanes -SHAPE_DIR=/home/ec2-user/data/test/static/shape -DEM_DIR=/home/ec2-user/data/dem/ -TPXO_DIR=/home/ec2-user/data/test/static/tpxo -NWM_DIR=/home/ec2-user/data/test/static/nwm diff --git a/docker/main/docker-compose.yml b/docker/main/docker-compose.yml deleted file mode 100644 index 071a213..0000000 --- a/docker/main/docker-compose.yml +++ /dev/null @@ -1,101 +0,0 @@ -version: "3.9" -services: - hurricane-info-noaa: - build: - context: ../info - dockerfile: docker/Dockerfile - args: - - username=${ONDEMAND_USER} - - uid=1000 - - gid=100 -# command: '/bin/bash' - command: ${HURRICANE_NAME} ${HURRICANE_YEAR} - volumes: - - type: bind - source: ${OUT_DIR} - target: /home/${ONDEMAND_USER}/app/io/output - - ocsmesh-noaa: -# depends_on: -# - hurricane-info-noaa - build: - context: ../ocsmesh - dockerfile: docker/Dockerfile - args: - - username=${ONDEMAND_USER} - - uid=1000 - - gid=100 -# command: '/bin/sh' - volumes: - - type: bind - source: ${OUT_DIR} - target: /home/${ONDEMAND_USER}/app/io/hurricanes - - type: bind - source: ${SHAPE_DIR} - target: /home/${ONDEMAND_USER}/app/io/shape - - type: bind - source: ${DEM_DIR} - target: /home/${ONDEMAND_USER}/app/io/dem - - pyschism-noaa: -# depends_on: -# - ocsmesh-noaa -# - hurricane-info-noaa - build: - context: ../pyschism - dockerfile: docker/Dockerfile - args: - - username=${ONDEMAND_USER} - - uid=1000 - - gid=100 -# command: '/bin/sh' - volumes: - - type: bind - source: ${OUT_DIR} - target: /home/${ONDEMAND_USER}/app/io/hurricanes - - type: bind - source: ${TPXO_DIR} - target: /home/${ONDEMAND_USER}/.local/share/tpxo - - type: bind - source: ${NWM_DIR} - target: /home/${ONDEMAND_USER}/.local/share/nwm - - schism-noaa: -# depends_on: -# - pyschism-noaa -# - ocsmesh-noaa -# - hurricane-info-noaa - environment: - - SCHISM_NPROCS=${SCHISM_NPROCS} - cap_add: - - SYS_PTRACE - build: - context: ../schism - dockerfile: docker/Dockerfile - args: - - username=${ONDEMAND_USER} - - uid=1000 - - gid=100 -# command: '/bin/sh' - volumes: - - type: bind - source: ${OUT_DIR} - target: /home/${ONDEMAND_USER}/app/io/hurricanes - odssm-post-noaa: -# depends_on: -# - pyschism-noaa -# - ocsmesh-noaa -# - hurricane-info-noaa -# - schism-noaa - build: - context: ../post - dockerfile: docker/Dockerfile - args: - - username=${ONDEMAND_USER} - - uid=1000 - - gid=100 -# command: '/bin/sh' - volumes: - - type: bind - source: ${OUT_DIR} - target: /home/${ONDEMAND_USER}/app/io/hurricanes diff --git a/docker/ocsmesh/docker/.env b/docker/ocsmesh/docker/.env deleted file mode 100644 index edd28fd..0000000 --- a/docker/ocsmesh/docker/.env +++ /dev/null @@ -1 +0,0 @@ -GEOMESH_USER=ocsmesher diff --git a/docker/ocsmesh/docker/Dockerfile b/docker/ocsmesh/docker/Dockerfile deleted file mode 100644 index a0dfed8..0000000 --- a/docker/ocsmesh/docker/Dockerfile +++ /dev/null @@ -1,76 +0,0 @@ -FROM continuumio/miniconda3:4.10.3p0-alpine - -# Create a non-root user -ARG username=ocsmesher -ARG uid=1000 -ARG gid=100 - -ENV USER $username -ENV UID $uid -ENV GID $gid -ENV HOME /home/$USER - -# Get necessary packages -RUN apk update && apk upgrade && apk --no-cache add \ - git \ - gcc \ - g++ \ - make \ - cmake \ - libstdc++ - -# New user -RUN adduser -D -g "Non-root user" -u $UID -h $HOME $USER - -# Create a project directory inside user home -ENV PROJECT_DIR $HOME/app -RUN mkdir $PROJECT_DIR -RUN chown $UID:$GID $PROJECT_DIR -WORKDIR $PROJECT_DIR - - -# Build the conda environment -ENV ENV_PREFIX $HOME/icogsc - -COPY environment.yml /tmp/ -RUN chown $UID:$GID /tmp/environment.yml - -RUN conda install mamba -n base -c conda-forge && \ - mamba update --name base --channel defaults conda && \ - mamba env create --prefix $ENV_PREFIX --file /tmp/environment.yml --force && \ - mamba clean --all --yes - -ENV CONDA_DIR /opt/conda - -RUN git clone https://github.com/dengwirda/jigsaw-python.git && \ - git -C jigsaw-python checkout f875719 && \ - conda run -p $ENV_PREFIX --no-capture-output \ - python3 jigsaw-python/setup.py build_external && \ - cp jigsaw-python/external/jigsaw/bin/* $ENV_PREFIX/bin && \ - cp jigsaw-python/external/jigsaw/lib/* $ENV_PREFIX/lib && \ - conda run -p $ENV_PREFIX --no-capture-output \ - pip install ./jigsaw-python && \ - rm -rf jigsaw-python -RUN conda run -p $ENV_PREFIX --no-capture-output \ - pip install ocsmesh>=1.0.5 - -RUN conda clean --all && apk del \ - git \ - gcc \ - g++ \ - make \ - cmake - - -RUN mkdir -p $PROJECT_DIR/scripts -COPY docker/hurricane_mesh.py ${PROJECT_DIR}/scripts/ -ENV PYTHONPATH ${PROJECT_DIR}/scripts/ - - -RUN mkdir -p $PROJECT_DIR/io - -USER $USER - - -# Ref: https://pythonspeed.com/articles/activate-conda-dockerfile/ -ENTRYPOINT [ "conda", "run", "-p", "$ENV_PREFIX", "--no-capture-output", "python", "-m", "hurricane_mesh" ] diff --git a/docker/ocsmesh/docker/docker-compose.yml b/docker/ocsmesh/docker/docker-compose.yml deleted file mode 100644 index c371a74..0000000 --- a/docker/ocsmesh/docker/docker-compose.yml +++ /dev/null @@ -1,30 +0,0 @@ -version: "3.9" -services: - ocsmesh-noaa: - build: - context: .. - dockerfile: docker/Dockerfile - args: - - username=${GEOMESH_USER} - - uid=1000 - - gid=100 -# command: '/bin/sh' - volumes: - - type: bind - source: /home/ec2-user/data/test/hurricanes/florence_2018/windswath - target: /home/${GEOMESH_USER}/app/io/input/hurricane - - type: bind - source: /home/ec2-user/data/test/static/shape - target: /home/${GEOMESH_USER}/app/io/input/shape - - type: bind - source: /home/ec2-user/data/dem/gebco - target: /home/${GEOMESH_USER}/app/io/input/dem/GEBCO - - type: bind - source: /home/ec2-user/data/dem/ncei19 - target: /home/${GEOMESH_USER}/app/io/input/dem/NCEI19 - - type: bind - source: /home/ec2-user/data/dem/ncei19/tileindex_NCEI_ninth_Topobathy_2014.zip - target: /home/${GEOMESH_USER}/app/io/input/dem/tileindex_NCEI_ninth_Topobathy_2014.zip - - type: bind - source: /home/ec2-user/data/test/hurricanes/florence_2018/mesh - target: /home/${GEOMESH_USER}/app/io/output diff --git a/docker/ocsmesh/docker/hurricane_mesh.py b/docker/ocsmesh/docker/hurricane_mesh.py deleted file mode 100755 index 49b1608..0000000 --- a/docker/ocsmesh/docker/hurricane_mesh.py +++ /dev/null @@ -1,548 +0,0 @@ -#!/usr/bin/env python - -# Import modules -import logging -import os -import pathlib -import argparse -import sys -import warnings - -import numpy as np - -from fiona.drvsupport import supported_drivers -from shapely.geometry import box, MultiLineString -from shapely.ops import polygonize, unary_union, linemerge -from pyproj import CRS, Transformer -import geopandas as gpd - -from ocsmesh import Raster, Geom, Hfun, JigsawDriver, Mesh, utils -from ocsmesh.cli.subset_n_combine import SubsetAndCombine - -EFS_MOUNT_POINT = pathlib.Path('~').expanduser() / f'app/io' - -# Setup modules -# Enable KML driver -#from https://stackoverflow.com/questions/72960340/attributeerror-nonetype-object-has-no-attribute-drvsupport-when-using-fiona -supported_drivers['KML'] = 'rw' -supported_drivers['LIBKML'] = 'rw' - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -logging.basicConfig( - stream=sys.stdout, - format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', - datefmt='%Y-%m-%d:%H:%M:%S') - - -# Helper functions -def get_raster(path, crs=None): - rast = Raster(path) - if crs and rast.crs != crs: - rast.warp(crs) - return rast - - -def get_rasters(paths, crs=None): - rast_list = list() - for p in paths: - rast_list.append(get_raster(p, crs)) - return rast_list - - -def _generate_mesh_boundary_and_write( - out_dir, mesh_path, mesh_crs='EPSG:4326', threshold=-1000 - ): - - mesh = Mesh.open(str(mesh_path), crs=mesh_crs) - - logger.info('Calculating boundary types...') - mesh.boundaries.auto_generate(threshold=threshold) - - logger.info('Write interpolated mesh to disk...') - mesh.write( - str(out_dir/f'mesh_w_bdry.grd'), format='grd', overwrite=True - ) - - -def _write_mesh_box(out_dir, mesh_path, mesh_crs='EPSG:4326'): - mesh = Mesh.open(str(mesh_path), crs=mesh_crs) - domain_box = box(*mesh.get_multipolygon().bounds) - gdf_domain_box = gpd.GeoDataFrame( - geometry=[domain_box], crs=mesh.crs) - gdf_domain_box.to_file(out_dir/'domain_box') - - -# Main script -def main(args, clients): - - cmd = args.cmd - logger.info(f"The mesh command is {cmd}.") - - clients_dict = {c.script_name: c for c in clients} - - storm_name = str(args.name).lower() - storm_year = str(args.year).lower() - out_dir = EFS_MOUNT_POINT / args.out - - final_mesh_name = 'hgrid.gr3' - write_mesh_box = False - - if cmd == 'subset_n_combine': - final_mesh_name = 'final_mesh.2dm' - write_mesh_box = True - - args.rasters = [ - i for i in (EFS_MOUNT_POINT / args.rasters_dir / 'gebco').iterdir() if i.suffix == '.tif' - ] - - - args.out = out_dir - args.fine_mesh = EFS_MOUNT_POINT / args.fine_mesh - args.coarse_mesh = EFS_MOUNT_POINT / args.coarse_mesh - args.region_of_interset = EFS_MOUNT_POINT / args.region_of_interset - elif cmd == 'hurricane_mesh': - final_mesh_name = 'mesh_no_bdry.2dm' - - if cmd in clients_dict: - clients_dict[cmd].run(args) - else: - raise ValueError(f'Invalid meshing command specified: <{cmd}>') - - #TODO interpolate DEM? - if write_mesh_box: - _write_mesh_box(out_dir, out_dir / final_mesh_name) - _generate_mesh_boundary_and_write(out_dir, out_dir / final_mesh_name) - - -class HurricaneMesher: - - @property - def script_name(self): - return 'hurricane_mesh' - - def __init__(self, sub_parser): - - this_parser = sub_parser.add_parser(self.script_name) - - this_parser.add_argument( - "--nprocs", type=int, help="Number of parallel threads to use when " - "computing geom and hfun.") - - this_parser.add_argument( - "--geom-nprocs", type=int, help="Number of processors used when " - "computing the geom, overrides --nprocs argument.") - - this_parser.add_argument( - "--hfun-nprocs", type=int, help="Number of processors used when " - "computing the hfun, overrides --nprocs argument.") - - this_parser.add_argument( - "--hmax", type=float, help="Maximum mesh size.", - default=20000) - - this_parser.add_argument( - "--hmin-low", type=float, default=1500, - help="Minimum mesh size for low resolution region.") - - this_parser.add_argument( - "--rate-low", type=float, default=2e-3, - help="Expansion rate for low resolution region.") - - this_parser.add_argument( - "--contours", type=float, nargs=2, - help="Contour specification applied to whole domain; " - "contour mesh size needs to be greater that hmin-low", - metavar="SPEC") - - this_parser.add_argument( - "--transition-elev", "-e", type=float, default=-200, - help="Cut off elev for high resolution region") - - this_parser.add_argument( - "--hmin-high", type=float, default=300, - help="Minimum mesh size for high resolution region.") - - this_parser.add_argument( - "--rate-high", type=float, default=1e-3, - help="Expansion rate for high resolution region") - - this_parser.add_argument( - "--shapes-dir", - help="top-level directory that contains shapefiles") - - this_parser.add_argument( - "--windswath", - help="path to NHC windswath shapefile") - - # Similar to the argument for SubsetAndCombine - this_parser.add_argument( - "--out", help="mesh operation output directory") - - def run(self, args): - - nprocs = args.nprocs - - geom_nprocs = nprocs - if args.geom_nprocs: - nprocs = args.geom_nprocs - geom_nprocs = -1 if nprocs == None else nprocs - - hfun_nprocs = nprocs - if args.hfun_nprocs: - nprocs = args.hfun_nprocs - hfun_nprocs = -1 if nprocs == None else nprocs - - storm_name = str(args.name).lower() - storm_year = str(args.year).lower() - - dem_dir = EFS_MOUNT_POINT / args.rasters_dir - shp_dir = EFS_MOUNT_POINT / args.shapes_dir - hurr_info = EFS_MOUNT_POINT / args.windswath - out_dir = EFS_MOUNT_POINT / args.out - - coarse_geom = shp_dir / 'base_geom' - fine_geom = shp_dir / 'high_geom' - - gebco_paths = [i for i in (dem_dir / 'gebco').iterdir() if str(i).endswith('.tif')] - cudem_paths = [i for i in (dem_dir / 'ncei19').iterdir() if str(i).endswith('.tif')] - all_dem_paths = [*gebco_paths, *cudem_paths] - tile_idx_path = f'zip://{str(dem_dir)}/tileindex_NCEI_ninth_Topobathy_2014.zip' - - # Specs - wind_kt = 34 - filter_factor = 3 - max_n_hires_dem = 150 - - - # Geom (hardcoded based on prepared hurricane meshing spec) - z_max_lo = 0 - z_max_hi = 10 - z_max = max(z_max_lo, z_max_hi) - - # Hfun - hmax = args.hmax - - hmin_lo = args.hmin_low - rate_lo = args.rate_low - - contour_specs_lo = [] - if args.contours is not None: - for c_elev, m_size in args.contours: - if hmin_lo > m_size: - warnings.warn( - "Specified contour must have a mesh size" - f" larger than minimum low res size: {hmin_low}") - contour_specs_lo.append((c_elev, rate_lo, m_size)) - - else: - contour_specs_lo = [ - (-4000, rate_lo, 10000), - (-1000, rate_lo, 6000), - (-10, rate_lo, hmin_lo) - ] - - const_specs_lo = [ - (hmin_lo, 0, z_max) - ] - - cutoff_hi = args.transition_elev - hmin_hi = args.hmin_high - rate_hi = args.rate_high - - contour_specs_hi = [ - (0, rate_hi, hmin_hi) - ] - const_specs_hi = [ - (hmin_hi, 0, z_max) - ] - - - # Read inputs - logger.info("Reading input shapes...") - gdf_fine = gpd.read_file(fine_geom) - gdf_coarse = gpd.read_file(coarse_geom) - tile_idx = gpd.read_file(tile_idx_path) - - logger.info("Reading hurricane info...") - gdf = gpd.read_file(hurr_info) - gdf_wind_kt = gdf[gdf.RADII.astype(int) == wind_kt] - - # Simplify high resolution geometry - logger.info("Simplify high-resolution shape...") - gdf_fine = gpd.GeoDataFrame( - geometry=gdf_fine.to_crs("EPSG:3857").simplify(tolerance=hmin_hi / 2).buffer(0).to_crs(gdf_fine.crs), - crs=gdf_fine.crs) - - - # Calculate refinement region - logger.info(f"Create polygon from {wind_kt}kt windswath polygon...") - ext_poly = [i for i in polygonize([ext for ext in gdf_wind_kt.exterior])] - gdf_refine_super_0 = gpd.GeoDataFrame( - geometry=ext_poly, crs=gdf_wind_kt.crs) - - logger.info("Find upstream...") - domain_extent = gdf_fine.to_crs(gdf_refine_super_0.crs).total_bounds - domain_box = box(*domain_extent) - box_tol = 1/1000 * max(domain_extent[2]- domain_extent[0], domain_extent[3] - domain_extent[1]) - gdf_refine_super_0 = gdf_refine_super_0.intersection(domain_box.buffer(-box_tol)) - gdf_refine_super_0.plot() - ext_poly = [i for i in gdf_refine_super_0.explode().geometry] - - dmn_ext = [pl.exterior for mp in gdf_fine.geometry for pl in mp] - wnd_ext = [pl.exterior for pl in ext_poly] - - gdf_dmn_ext = gpd.GeoDataFrame(geometry=dmn_ext, crs=gdf_fine.crs) - gdf_wnd_ext = gpd.GeoDataFrame(geometry=wnd_ext, crs=gdf_wind_kt.crs) - - gdf_ext_over = gpd.overlay(gdf_dmn_ext, gdf_wnd_ext.to_crs(gdf_dmn_ext.crs), how="union") - - gdf_ext_x = gdf_ext_over[gdf_ext_over.intersects(gdf_wnd_ext.to_crs(gdf_ext_over.crs).unary_union)] - - filter_lines_threshold = np.max(gdf_dmn_ext.length) / filter_factor - lnstrs = linemerge([lnstr for lnstr in gdf_ext_x.explode().geometry]) - if not isinstance(lnstrs, MultiLineString): - lnstrs = [lnstrs] - lnstrs = [lnstr for lnstr in lnstrs if lnstr.length < filter_lines_threshold] - gdf_hurr_w_upstream = gdf_wnd_ext.to_crs(gdf_ext_x.crs) - gdf_hurr_w_upstream = gdf_hurr_w_upstream.append( - gpd.GeoDataFrame( - geometry=gpd.GeoSeries(lnstrs), - crs=gdf_ext_x.crs - )) - - - gdf_hurr_w_upstream_poly = gpd.GeoDataFrame( - geometry=gpd.GeoSeries(polygonize(gdf_hurr_w_upstream.unary_union)), - crs=gdf_hurr_w_upstream.crs) - - logger.info("Find intersection of domain polygon with impacted area upstream...") - gdf_refine_super_2 = gpd.overlay( - gdf_fine, gdf_hurr_w_upstream_poly.to_crs(gdf_fine.crs), - how='intersection' - ) - - gdf_refine_super_2.to_file(out_dir / 'dmn_hurr_upstream') - - logger.info("Selecting high resolution DEMs...") - gdf_dem_box = gpd.GeoDataFrame( - columns=['geometry', 'path'], - crs=gdf_refine_super_2.crs) - for path in all_dem_paths: - bbox = Raster(path).get_bbox(crs=gdf_dem_box.crs) - gdf_dem_box = gdf_dem_box.append( - gpd.GeoDataFrame( - {'geometry': [bbox], - 'path': str(path)}, - crs=gdf_dem_box.crs) - ) - gdf_dem_box = gdf_dem_box.reset_index() - - lo_res_paths = gebco_paths - - # TODO: use sjoin instead?! - gdf_hi_res_box = gdf_dem_box[gdf_dem_box.geometry.intersects(gdf_refine_super_2.unary_union)].reset_index() - hi_res_paths = gdf_hi_res_box.path.values.tolist() - - - # For refine cut off either use static geom at e.g. 200m depth or instead just use low-res for cut off polygon - - - # Or intersect with full geom? (timewise an issue for hfun creation) - logger.info("Calculate refinement area cutoff...") - cutoff_dem_paths = [i for i in gdf_hi_res_box.path.values.tolist() if pathlib.Path(i) in lo_res_paths] - cutoff_geom = Geom( - get_rasters(cutoff_dem_paths), - base_shape=gdf_coarse.unary_union, - base_shape_crs=gdf_coarse.crs, - zmax=cutoff_hi, - nprocs=geom_nprocs) - cutoff_poly = cutoff_geom.get_multipolygon() - - gdf_cutoff = gpd.GeoDataFrame( - geometry=gpd.GeoSeries(cutoff_poly), - crs=cutoff_geom.crs) - - gdf_draft_refine = gpd.overlay(gdf_refine_super_2, gdf_cutoff.to_crs(gdf_refine_super_2.crs), how='difference') - - refine_polys = [pl for pl in gdf_draft_refine.unary_union] - - gdf_final_refine = gpd.GeoDataFrame( - geometry=refine_polys, - crs=gdf_draft_refine.crs) - - - logger.info("Write landfall area to disk...") - gdf_final_refine.to_file(out_dir/'landfall_refine_area') - - gdf_geom = gpd.overlay( - gdf_coarse, - gdf_final_refine.to_crs(gdf_coarse.crs), - how='union') - - domain_box = box(*gdf_fine.total_bounds) - gdf_domain_box = gpd.GeoDataFrame( - geometry=[domain_box], crs=gdf_fine.crs) - gdf_domain_box.to_file(out_dir/'domain_box') - - geom = Geom(gdf_geom.unary_union, crs=gdf_geom.crs) - - - logger.info("Create low-res size function...") - hfun_lo = Hfun( - get_rasters(lo_res_paths), - base_shape=gdf_coarse.unary_union, - base_shape_crs=gdf_coarse.crs, - hmin=hmin_lo, - hmax=hmax, - nprocs=hfun_nprocs, - method='fast') - - logger.info("Add refinement spec to low-res size function...") - for ctr in contour_specs_lo: - hfun_lo.add_contour(*ctr) - hfun_lo.add_constant_value(value=ctr[2], lower_bound=ctr[0]) - - for const in const_specs_lo: - hfun_lo.add_constant_value(*const) - - # hfun_lo.add_subtidal_flow_limiter(upper_bound=z_max) - # hfun_lo.add_subtidal_flow_limiter(hmin=hmin_lo, upper_bound=z_max) - - - logger.info("Compute low-res size function...") - jig_hfun_lo = hfun_lo.msh_t() - - - logger.info("Write low-res size function to disk...") - Mesh(jig_hfun_lo).write( - str(out_dir/f'hfun_lo_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - - # For interpolation after meshing and use GEBCO for mesh size calculation in refinement area. - hfun_hi_rast_paths = hi_res_paths - if len(hi_res_paths) > max_n_hires_dem: - hfun_hi_rast_paths = gebco_paths - - logger.info("Create high-res size function...") - hfun_hi = Hfun( - get_rasters(hfun_hi_rast_paths), - base_shape=gdf_final_refine.unary_union, - base_shape_crs=gdf_final_refine.crs, - hmin=hmin_hi, - hmax=hmax, - nprocs=hfun_nprocs, - method='fast') - - # Apply low resolution criteria on hires as ewll - logger.info("Add refinement spec to high-res size function...") - for ctr in contour_specs_lo: - hfun_hi.add_contour(*ctr) - hfun_hi.add_constant_value(value=ctr[2], lower_bound=ctr[0]) - - for ctr in contour_specs_hi: - hfun_hi.add_contour(*ctr) - hfun_hi.add_constant_value(value=ctr[2], lower_bound=ctr[0]) - - for const in const_specs_hi: - hfun_hi.add_constant_value(*const) - - # hfun_hi.add_subtidal_flow_limiter(upper_bound=z_max) - - logger.info("Compute high-res size function...") - jig_hfun_hi = hfun_hi.msh_t() - - logger.info("Write high-res size function to disk...") - Mesh(jig_hfun_hi).write( - str(out_dir/f'hfun_hi_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - - jig_hfun_lo = Mesh.open(str(out_dir/f'hfun_lo_{hmin_hi}.2dm'), crs="EPSG:4326").msh_t - jig_hfun_hi = Mesh.open(str(out_dir/f'hfun_hi_{hmin_hi}.2dm'), crs="EPSG:4326").msh_t - - - logger.info("Combine size functions...") - gdf_final_refine = gpd.read_file(out_dir/'landfall_refine_area') - - utils.clip_mesh_by_shape( - jig_hfun_hi, - shape=gdf_final_refine.to_crs(jig_hfun_hi.crs).unary_union, - fit_inside=True, - in_place=True) - - jig_hfun_final = utils.merge_msh_t( - jig_hfun_lo, jig_hfun_hi, - drop_by_bbox=False, - can_overlap=False, - check_cross_edges=True) - - - logger.info("Write final size function to disk...") - hfun_mesh = Mesh(jig_hfun_final) - hfun_mesh.write( - str(out_dir/f'hfun_comp_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - - hfun = Hfun(hfun_mesh) - - logger.info("Generate mesh...") - driver = JigsawDriver(geom=geom, hfun=hfun, initial_mesh=True) - mesh = driver.run() - - - utils.reproject(mesh.msh_t, "EPSG:4326") - mesh.write( - str(out_dir/f'mesh_raw_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - mesh = Mesh.open(str(out_dir/f'mesh_raw_{hmin_hi}.2dm'), crs="EPSG:4326") - - dst_crs = "EPSG:4326" - interp_rast_list = [ - *get_rasters(gebco_paths, dst_crs), - *get_rasters(gdf_hi_res_box.path.values, dst_crs)] - - # TODO: Fix the deadlock issue with multiple cores when interpolating - logger.info("Interpolate DEMs on the generated mesh...") - mesh.interpolate(interp_rast_list, nprocs=1, method='nearest') - - logger.info("Write raw mesh to disk...") - mesh.write( - str(out_dir/f'mesh_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - # Write the same mesh with a generic name - mesh.write( - str(out_dir/f'mesh_no_bdry.2dm'), - format='2dm', - overwrite=True) - - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser() - parser.add_argument( - "name", help="name of the storm", type=str) - parser.add_argument( - "year", help="year of the storm", type=int) - parser.add_argument( - "--rasters-dir", help="top-level directory that contains rasters") - - subparsers = parser.add_subparsers(dest='cmd') - subset_client = SubsetAndCombine(subparsers) - hurrmesh_client = HurricaneMesher(subparsers) - - args = parser.parse_args() - - logger.info(f"Mesh arguments are {args}.") - - main(args, [hurrmesh_client, subset_client]) diff --git a/docker/ocsmesh/environment.yml b/docker/ocsmesh/environment.yml deleted file mode 100644 index 8b2d996..0000000 --- a/docker/ocsmesh/environment.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: icogsc -channels: - - conda-forge -dependencies: - - python=3.9 - - gdal - - geos - - proj - - netcdf4 - - udunits2 - - pyproj - - shapely>=1.8,<2 - - rasterio - - fiona - - pygeos - - geopandas - - utm - - scipy<1.8 - - numba - - numpy>=1.21 - - matplotlib - - requests - - tqdm - - mpi4py - - pyarrow - - pytz - - geoalchemy2 - - colored-traceback - - typing-extensions diff --git a/docker/post/docker/Dockerfile b/docker/post/docker/Dockerfile deleted file mode 100644 index 89171db..0000000 --- a/docker/post/docker/Dockerfile +++ /dev/null @@ -1,66 +0,0 @@ -FROM continuumio/miniconda3:4.10.3p0-alpine - -# Create a non-root user -ARG username=pyschismer -ARG uid=1000 -ARG gid=100 -ARG post_repo=odssm_post - -ENV USER $username -ENV UID $uid -ENV GID $gid -ENV HOME /home/$USER - -# Get necessary packages -RUN apk update && apk upgrade && apk --no-cache add \ - git \ - gcc \ - g++ \ - make \ - cmake \ - patch \ - libstdc++ - -# New user -RUN adduser -D -g "Non-root user" -u $UID -h $HOME $USER - -# Create a project directory inside user home -ENV PROJECT_DIR $HOME/app -RUN mkdir $PROJECT_DIR -WORKDIR $PROJECT_DIR - - -# Build the conda environment -ENV ENV_PREFIX $HOME/icogsc - -COPY environment.yml /tmp/ -RUN chown $UID:$GID /tmp/environment.yml - -RUN conda install mamba -n base -c conda-forge && \ - mamba update --name base --channel defaults conda && \ - mamba env create --prefix $ENV_PREFIX --file /tmp/environment.yml --force && \ - mamba clean --all --yes - -RUN conda clean --all -RUN apk del git -RUN apk del gcc -RUN apk del g++ -RUN apk del make -RUN apk del cmake - - -RUN mkdir -p $PROJECT_DIR/scripts -COPY docker/*.py ${PROJECT_DIR}/scripts/ -ENV PYTHONPATH ${PROJECT_DIR}/scripts/ - -ENV CONDA_DIR /opt/conda - - -RUN mkdir -p $PROJECT_DIR/io - -USER $USER - -RUN echo "source $CONDA_DIR/etc/profile.d/conda.sh" >> ~/.profile - -# Ref: https://pythonspeed.com/articles/activate-conda-dockerfile/ -ENTRYPOINT ["conda", "run", "-p", "$ENV_PREFIX", "--no-capture-output", "python", "-m", "generate_viz"] diff --git a/docker/post/docker/__init__.py b/docker/post/docker/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/docker/post/docker/defn.py b/docker/post/docker/defn.py deleted file mode 100644 index acb7e8b..0000000 --- a/docker/post/docker/defn.py +++ /dev/null @@ -1,79 +0,0 @@ -from matplotlib.colors import LinearSegmentedColormap -import matplotlib.pyplot as plt - -cdict = { - 'red': ( - (0.0, 1, 1), - (0.05, 1, 1), - (0.11, 0, 0), - (0.66, 1, 1), - (0.89, 1, 1), - (1, 0.5, 0.5), - ), - 'green': ( - (0.0, 1, 1), - (0.05, 1, 1), - (0.11, 0, 0), - (0.375, 1, 1), - (0.64, 1, 1), - (0.91, 0, 0), - (1, 0, 0), - ), - 'blue': ((0.0, 1, 1), (0.05, 1, 1), (0.11, 1, 1), (0.34, 1, 1), (0.65, 0, 0), (1, 0, 0)), -} - -jetMinWi = LinearSegmentedColormap('my_colormap', cdict, 256) -my_cmap = plt.cm.jet - -# Color code for the point track. -colors_hurricane_condition = { - 'subtropical depression': '#ffff99', - 'tropical depression': '#ffff66', - 'tropical storm': '#ffcc99', - 'subtropical storm': '#ffcc66', - 'hurricane': 'red', - 'major hurricane': 'crimson', -} - -width = 750 -height = 250 - -# Constants -noaa_logo = 'https://www.nauticalcharts.noaa.gov/images/noaa-logo-no-ring-70.png' - -template_track_popup = """ -
-
{} condition

- 'Date: {}
- Condition: {}
- """ - -template_storm_info = """ -
  Storm: {}
-   Year: {}  
-
- """ - -template_fct_info = """ -
  Date: {}UTC
-   FCT : t{}z  
-
- """ - -disclaimer = """ -
  Hurricane Explorer; - NOAA/NOS/OCS
-   Contact: Saeed.Moghimi@noaa.gov  
-   Disclaimer: Experimental product. All configurations and results are pre-decisional.
-
- - """ diff --git a/docker/post/docker/generate_viz.py b/docker/post/docker/generate_viz.py deleted file mode 100644 index 1553e31..0000000 --- a/docker/post/docker/generate_viz.py +++ /dev/null @@ -1,1004 +0,0 @@ -""" -Dynamic map hindcast implementation - -############################################################### -# Original development from https://github.com/ocefpaf/python_hurricane_gis_map -# # Exploring the NHC GIS Data -# -# This notebook aims to demonstrate how to create a simple interactive GIS map with the National Hurricane Center predictions [1] and CO-OPS [2] observations along the Hurricane's path. -# -# -# 1. http://www.nhc.noaa.gov/gis/ -# 2. https://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/ -# -# -# NHC codes storms are coded with 8 letter names: -# - 2 char for region `al` → Atlantic -# - 2 char for number `11` is Irma -# - and 4 char for year, `2017` -# -# Browse http://www.nhc.noaa.gov/gis/archive_wsurge.php?year=2017 to find other hurricanes code. -############################################################### -""" - -__author__ = 'Saeed Moghimi' -__copyright__ = 'Copyright 2020, UCAR/NOAA' -__license__ = 'GPL' -__version__ = '1.0' -__email__ = 'moghimis@gmail.com' - -import argparse -import logging -import os -import sys -import pathlib -import warnings -from glob import glob -from datetime import datetime, timedelta, timezone -from importlib import resources - -import numpy as np -import pandas as pd -import arrow -import f90nml -from bokeh.resources import CDN -from bokeh.plotting import figure -from bokeh.models import Title -from bokeh.embed import file_html -from bokeh.models import Range1d, HoverTool -from branca.element import IFrame -import folium -from folium.plugins import Fullscreen, MarkerCluster, MousePosition -import netCDF4 -import matplotlib as mpl -import matplotlib.tri as Tri -import matplotlib.pyplot as plt -from shapely.geometry import Polygon, LineString, box -from geopandas import GeoDataFrame -import geopandas as gpd -from pyschism.mesh import Hgrid -import cfunits -from retrying import retry -from searvey import coops - -import defn as defn -import hurricane_funcs as hurr_f - -_logger = logging.getLogger() -mpl.use('Agg') - -warnings.filterwarnings("ignore", category=DeprecationWarning) - -EFS_MOUNT_POINT = pathlib.Path('~').expanduser() / 'app/io' - -def ceil_dt(date=datetime.now(), delta=timedelta(minutes=30)): - """ - Rounds up the input date based on the `delta` time period tolerance - - Examples - -------- - now = datetime.now() - print(now) - print(ceil_dt(now,timedelta(minutes=30) )) - - """ - - date_min = datetime.min - if date.tzinfo: - date_min = date_min.replace(tzinfo=date.tzinfo) - return date + (date_min - date) % delta - -@retry(stop_max_attempt_number=5, wait_fixed=3000) -def get_coops(start, end, sos_name, units, bbox, datum='NAVD', verbose=True): - """ - function to read COOPS data - We need to retry in case of failure b/c the server cannot handle - the high traffic during hurricane season. - """ - - - coops_stas = coops.coops_stations_within_region(region=box(*bbox)) - # TODO: NAVD 88? - coops_data = coops.coops_product_within_region( - 'water_level', region=box(*bbox), start_date=start, end_date=end) - station_names = [ - coops_stas[coops_stas.index == i].name.values[0] - for i in coops_data.nos_id.astype(int).values - ] - staobs_df = coops_data.assign( - {'station_name': ('nos_id', station_names)} - ).reset_coords().drop( - ['f', 's', 'q', 'nws_id'] - ).to_dataframe().reset_index( - level='nos_id' - ).rename( - columns={ - 'x': 'lon', - 'y': 'lat', - 'v': 'ssh', - 't': 'time', - 'nos_id': 'station_code', - } - ).astype( - {'station_code': 'int64'} - ) - staobs_df.index = staobs_df.index.tz_localize(tz=timezone.utc) - - return staobs_df - - - -def make_plot_1line(obs, label=None): - # TOOLS="hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select," - TOOLS = 'crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,reset,save,' - - p = figure( - toolbar_location='above', - x_axis_type='datetime', - width=defn.width, - height=defn.height, - tools=TOOLS, - ) - - if obs.station_code.isna().sum() == 0: - station_code = obs.station_code.array[0] - p.add_layout( - Title(text=f"Station: {station_code}", - text_font_style='italic'), - 'above') - - if obs.station_name.isna().sum() == 0: - station_name = obs.station_name.array[0] - p.add_layout( - Title(text=station_name, text_font_size='10pt'), - 'above') - - p.yaxis.axis_label = label - - obs_val = obs.ssh.to_numpy().squeeze() - - l1 = p.line( - x=obs.index, - y=obs_val, - line_width=5, - line_cap='round', - line_join='round', - legend_label='model', - color='#0000ff', - alpha=0.7, - ) - - minx = obs.index.min() - maxx = obs.index.max() - - p.x_range = Range1d(start=minx, end=maxx) - - p.legend.location = 'top_left' - - p.add_tools(HoverTool(tooltips=[('model', '@y'), ], renderers=[l1], ), ) - return p - - -def make_plot_2line(obs, model=None, label=None, remove_mean_diff=False, bbox_bias=0.0): - # TOOLS="hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select," - TOOLS = 'crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,reset,save,' - - p = figure( - toolbar_location='above', - x_axis_type='datetime', - width=defn.width, - height=defn.height, - tools=TOOLS, - ) - - if obs.station_code.isna().sum() == 0: - station_code = obs.station_code.array[0] - p.add_layout( - Title(text=f"Station: {station_code}", - text_font_style='italic'), - 'above') - - if obs.station_name.isna().sum() == 0: - station_name = obs.station_name.array[0] - p.add_layout( - Title(text=station_name, text_font_size='10pt'), - 'above') - - p.yaxis.axis_label = label - - obs_val = obs.ssh.to_numpy().squeeze() - - l1 = p.line( - x=obs.index, - y=obs_val, - line_width=5, - line_cap='round', - line_join='round', - legend_label='obs.', - color='#0000ff', - alpha=0.7, - ) - - if model is not None: - mod_val = model.ssh.to_numpy().squeeze() - - if ('SSH' in label) and remove_mean_diff: - mod_val = mod_val + obs_val.mean() - mod_val.mean() - - if ('SSH' in label) and bbox_bias is not None: - mod_val = mod_val + bbox_bias - - l0 = p.line( - x=model.index, - y=mod_val, - line_width=5, - line_cap='round', - line_join='round', - legend_label='model', - color='#9900cc', - alpha=0.7, - ) - - - minx = max(model.index.min(), obs.index.min()) - maxx = min(model.index.max(), obs.index.max()) - - minx = model.index.min() - maxx = model.index.max() - else: - minx = obs.index.min() - maxx = obs.index.max() - - p.x_range = Range1d(start=minx, end=maxx) - - p.legend.location = 'top_left' - - p.add_tools( - HoverTool(tooltips=[('model', '@y'), ], renderers=[l0], ), - HoverTool(tooltips=[('obs', '@y'), ], renderers=[l1], ), - ) - - return p - - -################# -def make_marker(p, location, fname, color='green', icon='stats'): - html = file_html(p, CDN, fname) - # iframe = IFrame(html , width=defn.width+45+defn.height, height=defn.height+80) - iframe = IFrame(html, width=defn.width * 1.1, height=defn.height * 1.2) - # popup = folium.Popup(iframe, max_width=2650+defn.height) - popup = folium.Popup(iframe) - iconm = folium.Icon(color=color, icon=icon) - marker = folium.Marker(location=location, popup=popup, icon=iconm) - return marker - - -############################### -def read_max_water_level_file(fgrd='hgrid.gr3', felev='maxelev.gr3', cutoff=True): - - hgrid = Hgrid.open(fgrd, crs='EPSG:4326') - h = -hgrid.values - bbox = hgrid.get_bbox('EPSG:4326', output_type='bbox') - - elev = Hgrid.open(felev, crs='EPSG:4326') - mzeta = -elev.values - D = mzeta - - #Mask dry nodes - NP = len(mzeta) - idxs = np.where(h < 0) - D[idxs] = np.maximum(0, mzeta[idxs]+h[idxs]) - - idry = np.zeros(NP) - idxs = np.where(mzeta+h <= 1e-6) - idry[idxs] = 1 - - MinVal = np.min(mzeta) - MaxVal = np.max(mzeta) - NumLevels = 21 - - if cutoff: - MinVal = max(MinVal, 0.0) - MaxVal = min(MaxVal, 2.4) - NumLevels = 12 - _logger.info(f'MinVal is {MinVal}') - _logger.info(f'MaxVal is {MaxVal}') - - step = 0.2 # m - levels = np.arange(MinVal, MaxVal + step, step=step) - _logger.info(f'levels is {levels}') - - fig, ax = plt.subplots() - tri = elev.triangulation - mask = np.any(np.where(idry[tri.triangles], True, False), axis=1) - tri.set_mask(mask) - - contour = ax.tricontourf( - tri, - mzeta, - vmin=MinVal, - vmax=MaxVal, - levels=levels, - cmap=defn.my_cmap, - extend='max') - - return contour, MinVal, MaxVal, levels - - -############################################################# -def contourf_to_geodataframe(contour_obj): - - """Transform a `matplotlib.contour.ContourSet` to a GeoDataFrame""" - - polygons, colors = [], [] - for i, polygon in enumerate(contour_obj.collections): - mpoly = [] - for path in polygon.get_paths(): - try: - path.should_simplify = False - poly = path.to_polygons() - # Each polygon should contain an exterior ring + maybe hole(s): - exterior, holes = [], [] - if len(poly) > 0 and len(poly[0]) > 3: - # The first of the list is the exterior ring : - exterior = poly[0] - # Other(s) are hole(s): - if len(poly) > 1: - holes = [h for h in poly[1:] if len(h) > 3] - mpoly.append(Polygon(exterior, holes)) - except: - _logger.warning('Warning: Geometry error when making polygon #{}'.format(i)) - if len(mpoly) > 1: - mpoly = MultiPolygon(mpoly) - polygons.append(mpoly) - colors.append(polygon.get_facecolor().tolist()[0]) - elif len(mpoly) == 1: - polygons.append(mpoly[0]) - colors.append(polygon.get_facecolor().tolist()[0]) - return GeoDataFrame(geometry=polygons, data={'RGBA': colors}, crs={'init': 'epsg:4326'}) - - -################# -def convert_to_hex(rgba_color): - red = str(hex(int(rgba_color[0] * 255)))[2:].capitalize() - green = str(hex(int(rgba_color[1] * 255)))[2:].capitalize() - blue = str(hex(int(rgba_color[2] * 255)))[2:].capitalize() - - if blue == '0': - blue = '00' - if red == '0': - red = '00' - if green == '0': - green = '00' - - return '#' + red + green + blue - - -################# -def get_model_station_ssh(sim_date, sta_in_file, sta_out_file, stations_info): - """Read model ssh""" - - station_dist_tolerance = 0.0001 # degrees - - # Get rid of time zone and convert to string "--
T" - sim_date_str = sim_date.astimezone(timezone.utc).strftime('%Y-%m-%dT%H') - - - #Read model output - sta_data = np.loadtxt(sta_out_file) - time_deltas = sta_data[:, 0].ravel().astype('timedelta64[s]') - sta_date = pd.DatetimeIndex( - data=np.datetime64(sim_date_str) + time_deltas, - tz=timezone.utc, - name="date_time") - - sta_zeta = sta_data[:, 1:] - - _logger.debug(len(sta_zeta[:,1])) - _logger.debug(type(sta_date)) - _logger.debug(type(sta_zeta)) - - df_staout = pd.DataFrame(data=sta_zeta, index=sta_date) - df_staout_melt = df_staout.melt( - ignore_index=False, - value_name="ssh", - var_name="staout_index") - - df_stain = pd.read_csv( - sta_in_file, - sep=' ', - header=None, - skiprows=2, - usecols=[1, 2], - names=["lon", "lat"]) - - df_stasim = df_staout_melt.merge( - df_stain, left_on='staout_index', right_index=True) - - gdf_stasim = gpd.GeoDataFrame( - df_stasim, - geometry=gpd.points_from_xy(df_stasim.lon, df_stasim.lat)) - - gdf_sta_info = gpd.GeoDataFrame( - stations_info, - geometry=gpd.points_from_xy( - stations_info.lon, stations_info.lat)) - - gdf_staout_w_info = gpd.sjoin_nearest( - gdf_stasim, - gdf_sta_info.drop(columns=['lon','lat']), - lsuffix='staout', rsuffix='real_station', - max_distance=station_dist_tolerance) - - # Now go back to DF or keep GDF and remove lon lat columns? - df_staout_w_info = pd.DataFrame(gdf_staout_w_info.drop( - columns=['geometry'])) - df_staout_w_info = df_staout_w_info.rename( - columns={'nos_id': 'station_code'} - ).astype( - {'station_code': 'int64'} - ) - - # TODO: Reset index or keep date as index? -# df_staout_w_info['date_time'] = df_staout_w_info.index -# df_staout_w_info = df_staout_w_info.reset_index(drop=True) - - - return df_staout_w_info - - - -################ -def get_storm_bbox(cone_gdf_list, pos_gdf_list, bbox_from_track=True): - # Find the bounding box to search the data. - last_cone = cone_gdf_list[-1]['geometry'].iloc[0] - track = LineString([point['geometry'] for point in pos_gdf_list]) - if bbox_from_track: - track_lons = track.coords.xy[0] - track_lats = track.coords.xy[1] - bbox = ( - min(track_lons) - 2, min(track_lats) - 2, - max(track_lons) + 2, max(track_lats) + 2, - ) - else: - bounds = np.array([last_cone.buffer(2).bounds, track.buffer(2).bounds]).reshape(4, 2) - lons, lats = bounds[:, 0], bounds[:, 1] - bbox = lons.min(), lats.min(), lons.max(), lats.max() - - return bbox - - -def get_storm_dates(pos_gdf_list): - # Ignoring the timezone, like AST (Atlantic Time Standard) b/c - # those are not a unique identifiers and we cannot disambiguate. - - if 'FLDATELBL' in pos_gdf_list[0].keys(): - start = pos_gdf_list[0]['FLDATELBL'] - end = pos_gdf_list[-1]['FLDATELBL'] - date_format = 'YYYY-MM-DD h:mm A ddd' - - elif 'ADVDATE' in pos_gdf_list[0].keys(): - # older versions (e.g. IKE) - start = pos_gdf_list[0]['ADVDATE'] - end = pos_gdf_list[-1]['ADVDATE'] - date_format = 'YYMMDD/hhmm' - - else: - msg = 'Check for correct time stamp and adapt the code !' - _logger.error(msg) - raise ValueError(msg) - - beg_date = arrow.get(start, date_format).datetime - end_date = arrow.get(end, date_format).datetime - - return beg_date, end_date - - -def get_stations_info(bbox): - - # Not using static local file anymore! - # We should get the same stations we use for observation -# df = coops.coops_stations() - df = coops.coops_stations_within_region(region=box(*bbox)) - stations_info = df.assign( - lon=df.geometry.apply('x'), - lat=df.geometry.apply('y'), - ).reset_index().rename( - {'name': 'station_name', 'nos_id': 'station_code'} - ) - - # Some stations are duplicate with different NOS ID but the same NWS ID - stations_info = stations_info.drop_duplicates(subset=['nws_id']) - stations_info = stations_info[stations_info.nws_id != ''] - - - return stations_info - - -def get_adjusted_times_for_station_outputs(staout_df_w_info, freq): - - # Round up datetime smaller than 30 minutes - start_date = ceil_dt(staout_df_w_info.index.min().to_pydatetime()) - end_date = ceil_dt(staout_df_w_info.index.max().to_pydatetime()) - - new_index_dates = pd.date_range( - start=start_date.replace(tzinfo=None), - end=end_date.replace(tzinfo=None), - freq=freq, - tz=start_date.tzinfo - ) - - return new_index_dates - - -def adjust_stations_time_and_data(time_indexed_df, freq, groupby): - - new_index_dates = get_adjusted_times_for_station_outputs( - time_indexed_df, freq) - - - adj_staout_df = pd.concat( - df.reindex( - index=new_index_dates, limit=1, method='nearest').drop_duplicates() - for idx, df in time_indexed_df.groupby(by=groupby)) - adj_staout_df.loc[np.abs(adj_staout_df['ssh']) > 10, 'ssh'] = np.nan - adj_staout_df = adj_staout_df[adj_staout_df.ssh.notna()] - - return adj_staout_df - - -def get_esri_url(layer_name): - - pos = 'MapServer/tile/{z}/{y}/{x}' - base = 'http://services.arcgisonline.com/arcgis/rest/services' - layer_info = dict( - Imagery='World_Imagery/MapServer', - Ocean_Base='Ocean/World_Ocean_Base', - Topo_Map='World_Topo_Map/MapServer', - Physical_Map='World_Physical_Map/MapServer', - Terrain_Base='World_Terrain_Base/MapServer', - NatGeo_World_Map='NatGeo_World_Map/MapServer', - Shaded_Relief='World_Shaded_Relief/MapServer', - Ocean_Reference='Ocean/World_Ocean_Reference', - Navigation_Charts='Specialty/World_Navigation_Charts', - Street_Map='World_Street_Map/MapServer' - ) - - layer = layer_info.get(layer_name) - if layer is None: - layer = layer_info['Imagery'] - - url = f'{base}/{layer}/{pos}' - return url - - -def folium_create_base_map(bbox_str, layer_name_list=None): - - # Here is the final result. Explore the map by clicking on - # the map features plotted! - bbox_ary = np.fromstring(bbox_str, sep=',') - lon = 0.5 * (bbox_ary[0] + bbox_ary[2]) - lat = 0.5 * (bbox_ary[1] + bbox_ary[3]) - - m = folium.Map( - location=[lat, lon], - tiles='OpenStreetMap', - zoom_start=4, control_scale=True) - Fullscreen(position='topright', force_separate_button=True).add_to(m) - - if layer_name_list is None: - return m - - for lyr_nm in layer_name_list: - url = get_esri_url(lyr_nm) - - lyr = folium.TileLayer(tiles=url, name=lyr_nm, attr='ESRI', overlay=False) - lyr.add_to(m) - - return m - - -def folium_add_max_water_level_contour(map_obj, max_water_level_contours_gdf, MinVal, MaxVal): - ## Get colors in Hex - colors_elev = [] - for i in range(len(max_water_level_contours_gdf)): - color = defn.my_cmap(i / len(max_water_level_contours_gdf)) - colors_elev.append(mpl.colors.to_hex(color)) - - # assign to geopandas obj - max_water_level_contours_gdf['RGBA'] = colors_elev - - # plot geopandas obj - maxele = folium.GeoJson( - max_water_level_contours_gdf, - name='Maximum water level [m above MSL]', - style_function=lambda feature: { - 'fillColor': feature['properties']['RGBA'], - 'color': feature['properties']['RGBA'], - 'weight': 1.0, - 'fillOpacity': 0.6, - 'line_opacity': 0.6, - }, - ) - - maxele.add_to(map_obj) - - # Add colorbar - color_scale = folium.StepColormap( - colors_elev, - # index=color_domain, - vmin=MinVal, - vmax=MaxVal, - caption='Maximum water level [m above MSL]', - ) - map_obj.add_child(color_scale) - - -def folium_add_ssh_time_series(map_obj, staout_df_w_info, obs_df=None): -# marker_cluster_estofs_ssh = MarkerCluster(name='CO-OPS SSH observations') - marker_cluster_estofs_ssh = MarkerCluster(name='Simulation SSH [m above MSL]') - - _logger.info(' > plot model only') - - - by = ["staout_index", "station_code"] - for (staout_idx, st_code), df in staout_df_w_info.groupby(by=by): - fname = df.station_code.array[0] - location = df.lat.array[0], df.lon.array[0] - if st_code is None or obs_df is None: - p = make_plot_1line(df, label='SSH [m above MSL]') - else: - p = make_plot_2line( - obs=obs_df[obs_df.station_code == st_code], - remove_mean_diff=True, - model=df, label='SSH [m]') - marker = make_marker(p, location=location, fname=fname) - marker.add_to(marker_cluster_estofs_ssh) - - marker_cluster_estofs_ssh.add_to(map_obj) - - -def folium_add_bbox(map_obj, bbox_str): - ## Plotting bounding box - bbox_ary = np.fromstring(bbox_str, sep=',') - p = folium.PolyLine(get_coordinates(bbox_ary), color='#009933', weight=2, opacity=0.6) - - p.add_to(map_obj) - - -def folium_add_storm_latest_cone(map_obj, cone_gdf_list, pos_gdf_list): - latest_cone_style = { - 'fillOpacity': 0.1, - 'color': 'red', - 'stroke': 1, - 'weight': 1.5, - 'opacity': 0.8, - } - # Latest cone prediction. - latest = cone_gdf_list[-1] - ### - if 'FLDATELBL' in pos_gdf_list[0].keys(): # Newer storms have this information - names3 = 'Cone prediction as of {}'.format(latest['ADVDATE'].values[0]) - else: - names3 = 'Cone prediction' - ### - folium.GeoJson( - data=latest.__geo_interface__, - name=names3, - style_function=lambda feat: latest_cone_style, - ).add_to(map_obj) - - -def folium_add_storm_all_cones( - map_obj, - cone_gdf_list, - pos_gdf_list, - track_radius, - storm_name, - storm_year - ): - cone_style = { - 'fillOpacity': 0, - 'color': 'lightblue', - 'stroke': 1, - 'weight': 0.3, - 'opacity': 0.3, - } - marker_cluster1 = MarkerCluster(name='NHC cone predictions') - marker_cluster1.add_to(map_obj) - if 'FLDATELBL' not in pos_gdf_list[0].keys(): # Newer storms have this information - names3 = 'Cone prediction' - - # Past cone predictions. - for cone in cone_gdf_list[:-1]: - folium.GeoJson( - data=cone.__geo_interface__, style_function=lambda feat: cone_style, - ).add_to(marker_cluster1) - - # Latest points prediction. - for k, row in last_pts.iterrows(): - - if 'FLDATELBL' in pos_gdf_list[0].keys(): # Newer storms have this information - date = row['FLDATELBL'] - hclass = row['TCDVLP'] - popup = '{}
{}'.format(date, hclass) - if 'tropical' in hclass.lower(): - hclass = 'tropical depression' - - color = defn.colors_hurricane_condition[hclass.lower()] - else: - popup = '{}
{}'.format(storm_name, storm_year) - color = defn.colors_hurricane_condition['hurricane'] - - location = row['LAT'], row['LON'] - folium.CircleMarker( - location=location, - radius=track_radius, - fill=True, - color=color, - popup=popup, - ).add_to(map_obj) - - -def folium_add_storm_track( - map_obj, - pos_gdf_list, - track_radius, - storm_name, - storm_year - ): - # marker_cluster3 = MarkerCluster(name='Track') - # marker_cluster3.add_to(map_obj) - for point in pos_gdf_list: - if 'FLDATELBL' in pos_gdf_list[0].keys(): # Newer storms have this information - date = point['FLDATELBL'] - hclass = point['TCDVLP'] - popup = defn.template_track_popup.format( - storm_name, date, hclass) - - if 'tropical' in hclass.lower(): - hclass = 'tropical depression' - - color = defn.colors_hurricane_condition[hclass.lower()] - else: - popup = '{}
{}'.format(storm_name, storm_year) - color = defn.colors_hurricane_condition['hurricane'] - - location = point['LAT'], point['LON'] - folium.CircleMarker( - location=location, radius=track_radius, fill=True, color=color, popup=popup, - ).add_to(map_obj) - - -def get_schism_date(param_file): - - # Use f90nml to read parameters from the input mirror - - # &OPT - # start_year = 2000 !int - # start_month = 1 !int - # start_day = 1 !int - # start_hour = 0 !double - # utc_start = 8 !double - # / - params = f90nml.read(str(param_file)) - - opt = params.get('opt', {}) - - year = opt.get('start_year', 2000) - month = opt.get('start_month', 1) - day = opt.get('start_day', 1) - hour = int(opt.get('start_hour', 0)) - tz_rel_utc = opt.get('utc_start', 8) - - sim_tz = timezone(timedelta(hours=tz_rel_utc)) - sim_date = datetime(year, month, day, hour, tzinfo=sim_tz) - - return sim_date - - -def folium_finalize_map(map_obj, storm_name, storm_year, Date, FCT): - html = map_obj.get_root().html - if storm_name and storm_year: - html.add_child(folium.Element( - defn.template_storm_info.format(storm_name,storm_year))) - - html.add_child(folium.Element(defn.template_fct_info.format(Date, FCT))) - html.add_child(folium.Element(defn.disclaimer)) - - folium.LayerControl().add_to(map_obj) - MousePosition().add_to(map_obj) - - -def main(args): - - schism_dir = EFS_MOUNT_POINT / args.schismdir - storm_name = args.name - storm_year = args.year - - storm_tag = f"{storm_name.upper()}_{storm_year}" - grid_file = schism_dir / "hgrid.gr3" - - draw_bbox = False - plot_cones = True - plot_latest_cone_only = True - track_radius = 5 - freq = '30min' - - sta_in_file = schism_dir / "station.in" - if not sta_in_file.exists(): - _logger.warning('Stations input file is not found!') - sta_in_file = None - - results_dir = schism_dir / "outputs" - if not results_dir.exists(): - raise ValueError("Simulation results directory not found!") - - _logger.info(f'results_dir: {str(results_dir)}') - - sta_out_file = results_dir / 'staout_1' - if not sta_out_file.exists(): - _logger.warning('Points time-series file is not found!') - sta_out_file = None - - felev = results_dir / 'maxelev.gr3' - if not felev.exists(): - raise FileNotFoundError('Maximum elevation file is not found!') - - param_file = results_dir / 'param.out.nml' - if not param_file.exists(): - raise FileNotFoundError('Parameter file not found!') - - - if not grid_file.exists(): - raise FileNotFoundError('Grid file not found!') - - - post_dir = schism_dir / 'viz' - if not post_dir.exists(): - post_dir.mkdir(exist_ok=True, parents=True) - - - no_sta = False - if sta_out_file is None or sta_in_file is None: - # Station in file is needed for lat-lon - no_sta = True - - -##################################################### - sim_date = get_schism_date(param_file) - Date = sim_date.strftime('%Y%m%d') - FCT = ceil_dt(sim_date, timedelta(hours=6)).hour - - -##################################################### - bbox_str = args.bbox_str - if storm_tag is not None: - - _logger.info(f' > Read NHC information for {storm_name} {storm_year} ... ') - ts_code, hurr_prod_tag = hurr_f.get_nhc_storm_info(str(storm_year), storm_name) - - # download gis zip files - hurr_gis_path = hurr_f.download_nhc_gis_files(hurr_prod_tag, post_dir) - - # get advisory cones and track points - cone_gdf_list, pos_gdf_list, last_pts = hurr_f.read_advisory_cones_info( - hurr_prod_tag, hurr_gis_path, str(storm_year), ts_code) - - bbox = get_storm_bbox(cone_gdf_list, pos_gdf_list) - start_date, end_date = get_storm_dates(pos_gdf_list) - - bbox_str = ', '.join(format(v, '.2f') for v in bbox) - _logger.info(' > bbox: {}\nstart: {}\n end: {}'.format( - bbox_str, start_date, end_date)) - - -##################################################### - obs_df = None - if not no_sta: - - stations_info = get_stations_info(bbox) - staout_df_w_info = get_model_station_ssh( - sim_date, sta_in_file, sta_out_file, stations_info) - adj_station_df = adjust_stations_time_and_data( - staout_df_w_info, freq, "staout_index") - - - start_dt = adj_station_df.index.min().to_pydatetime() - end_dt = adj_station_df.index.max().to_pydatetime() - - all_obs_df = get_coops( - start=start_dt, - end=end_dt, - sos_name='water_surface_height_above_reference_datum', - units=cfunits.Units('meters'), - datum = 'MSL', - bbox=bbox, - ) - - # Get observation from stations that have a corresponding - # model time history output - obs_df = all_obs_df[all_obs_df.station_code.isin( - np.unique(adj_station_df.station_code.to_numpy()))] - - # To get smaller html file -# obs_df = adjust_stations_time_and_data( -# obs_df, freq, "station_code") - - -##################################################### - _logger.info(' > Put together the final map') - m = folium_create_base_map(bbox_str, layer_name_list=["Imagery"]) - - -##################################################### - _logger.info(' > Plot max water elev ..') - contour, MinVal, MaxVal, levels = read_max_water_level_file(fgrd=grid_file, felev=felev) - max_water_level_contours_gdf = contourf_to_geodataframe(contour) - - folium_add_max_water_level_contour(m, max_water_level_contours_gdf, MinVal, MaxVal) - -##################################################### - if not no_sta: - _logger.info(' > Plot SSH stations ..') - - folium_add_ssh_time_series(m, adj_station_df, obs_df) - -##################################################### - if draw_bbox: - folium_add_bbox(m, bbox_str) - -##################################################### - if storm_tag is not None: - if plot_cones: - _logger.info(' > Plot NHC cone predictions') - - if plot_latest_cone_only: - folium_add_storm_latest_cone(m, cone_gdf_list, pos_gdf_list) - else: - folium_add_storm_all_cones( - m, cone_gdf_list, pos_gdf_list, - track_radius, - storm_name, storm_year) - - _logger.info(' > Plot points along the final track ..') - folium_add_storm_track( - m, pos_gdf_list, track_radius, - storm_name, storm_year) - - -##################################################### - _logger.info(' > Add disclaimer and storm name ..') - folium_finalize_map(m, storm_name, storm_year, Date, FCT) - - _logger.info(' > Save file ...') - - - fname = os.path.join(post_dir, '{}_{}_{}.html'.format(storm_tag, Date, FCT)) - _logger.info(fname) - m.save(fname) - - -def entry(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "name", help="name of the storm", type=str) - - parser.add_argument( - "year", help="year of the storm", type=int) - - parser.add_argument( - "schismdir", type=pathlib.Path) - - parser.add_argument('--vdatum', default='MSL') - parser.add_argument( - '--bbox-str', - default='-99.0,5.0,-52.8,46.3', - help='format: lon_min,lat_min,lon_max,lat_max') - - main(parser.parse_args()) - -if __name__ == "__main__": - warnings.filterwarnings("ignore", category=DeprecationWarning) - entry() diff --git a/docker/post/docker/hurricane_funcs.py b/docker/post/docker/hurricane_funcs.py deleted file mode 100644 index b60d403..0000000 --- a/docker/post/docker/hurricane_funcs.py +++ /dev/null @@ -1,273 +0,0 @@ -from __future__ import division, print_function - -# !/usr/bin/env python -# -*- coding: utf-8 -*- -""" - -Functions for handling nhc data - - -""" - -__author__ = 'Saeed Moghimi' -__copyright__ = 'Copyright 2020, UCAR/NOAA' -__license__ = 'GPL' -__version__ = '1.0' -__email__ = 'moghimis@gmail.com' - -import pandas as pd -import geopandas as gpd -import numpy as np -import sys -from glob import glob -import requests -from bs4 import BeautifulSoup - -try: - from urllib.request import urlopen, urlretrieve -except: - from urllib import urlopen, urlretrieve -import lxml.html - -import wget - -# from highwatermarks import HighWaterMarks -# from collections import OrderedDict -# import json -import os - - -################## -def url_lister(url): - urls = [] - connection = urlopen(url) - dom = lxml.html.fromstring(connection.read()) - for link in dom.xpath('//a/@href'): - urls.append(link) - return urls - - -################# -def download(url, path, fname): - sys.stdout.write(fname + '\n') - if not os.path.isfile(path): - urlretrieve(url, filename=path, reporthook=progress_hook(sys.stdout)) - sys.stdout.write('\n') - sys.stdout.flush() - - -################# -def progress_hook(out): - """ - Return a progress hook function, suitable for passing to - urllib.retrieve, that writes to the file object *out*. - """ - - def it(n, bs, ts): - got = n * bs - if ts < 0: - outof = '' - else: - # On the last block n*bs can exceed ts, so we clamp it - # to avoid awkward questions. - got = min(got, ts) - outof = '/%d [%d%%]' % (ts, 100 * got // ts) - out.write('\r %d%s' % (got, outof)) - out.flush() - - return it - - -################# -def get_nhc_storm_info(year, name): - """ - - """ - - print('Read list of hurricanes from NHC based on year') - - if int(year) < 2008: - print(' ERROR: GIS Data is not available for storms before 2008 ') - sys.exit('Exiting .....') - - url = 'http://www.nhc.noaa.gov/gis/archive_wsurge.php?year=' + year - - # r = requests.get(url,headers=headers,verify=False) - r = requests.get(url, verify=False) - - soup = BeautifulSoup(r.content, 'lxml') - - table = soup.find('table') - # table = [row.get_text().strip().split(maxsplit=1) for row in table.find_all('tr')] - - tab = [] - for row in table.find_all('tr'): - tmp = row.get_text().strip().split() - tab.append([tmp[0], tmp[-1]]) - - print(tab) - - df = pd.DataFrame(data=tab[:], columns=['identifier', 'name'], ).set_index('name') - - ############################### - - print(' > based on specific storm go fetch gis files') - hid = df.to_dict()['identifier'][name.upper()] - al_code = ('{}' + year).format(hid) - hurricane_gis_files = '{}_5day'.format(al_code) - - return al_code, hurricane_gis_files - - -################# -# @retry(stop_max_attempt_number=5, wait_fixed=3000) -def download_nhc_gis_files(hurricane_gis_files, rundir): - """ - """ - - base = os.path.abspath(os.path.join(rundir, 'nhcdata', hurricane_gis_files)) - - if len(glob(base + '/*')) < 1: - nhc = 'http://www.nhc.noaa.gov/gis/forecast/archive/' - - # We don't need the latest file b/c that is redundant to the latest number. - fnames = [ - fname - for fname in url_lister(nhc) - if fname.startswith(hurricane_gis_files) and 'latest' not in fname - ] - - if not os.path.exists(base): - os.makedirs(base) - - for fname in fnames: - path1 = os.path.join(base, fname) - if not os.path.exists(path1): - url = '{}/{}'.format(nhc, fname) - download(url, path1, fname) - - return base - ################################# - - -# Only needed to run on binder! -# See https://gitter.im/binder-project/binder?at=59bc2498c101bc4e3acfc9f1 -os.environ['CPL_ZIP_ENCODING'] = 'UTF-8' - - -def read_advisory_cones_info(hurricane_gis_files, base, year, code): - print(' > Read cones shape file ...') - - cones, points = [], [] - for fname in sorted(glob(os.path.join(base, '{}_*.zip'.format(hurricane_gis_files)))): - number = os.path.splitext(os.path.split(fname)[-1])[0].split('_')[-1] - - # read cone shapefiles - - if int(year) < 2014: - # al092008.001_5day_pgn.shp - divd = '.' - else: - divd = '-' - - pgn = gpd.read_file( - ('/{}' + divd + '{}_5day_pgn.shp').format(code, number), - vfs='zip://{}'.format(fname), - ) - cones.append(pgn) - - # read points shapefiles - pts = gpd.read_file( - ('/{}' + divd + '{}_5day_pts.shp').format(code, number), - vfs='zip://{}'.format(fname), - ) - # Only the first "obsevartion." - points.append(pts.iloc[0]) - - return cones, points, pts - - -################# -def download_nhc_best_track(year, code): - """ - - """ - - url = 'http://ftp.nhc.noaa.gov/atcf/archive/{}/'.format(year) - fname = 'b{}.dat.gz'.format(code) - base = os.path.abspath(os.path.join(os.path.curdir, 'data', code + '_best_track')) - - if not os.path.exists(base): - os.makedirs(base) - - path1 = os.path.join(base, fname) - # download(url, path,fname) - if not os.path.exists(url + fname): - wget.download(url + fname, out=base) - - return base - - -################# -def download_nhc_gis_best_track(year, code): - """ - - """ - - url = 'http://www.nhc.noaa.gov/gis/best_track/' - fname = '{}_best_track.zip'.format(code) - base = os.path.abspath(os.path.join(os.path.curdir, 'data', code + '_best_track')) - - if not os.path.exists(base): - os.makedirs(base) - - path = os.path.join(base, fname) - # download(url, path,fname) - if not os.path.exists(url + fname): - wget.download(url + fname, out=base) - return base - - -################# -def read_gis_best_track(base, code): - """ - - """ - print(' > Read GIS Best_track file ...') - - fname = base + '/{}_best_track.zip'.format(code) - - points = gpd.read_file(('/{}_pts.shp').format(code), vfs='zip://{}'.format(fname)) - - radii = gpd.read_file(('/{}_radii.shp').format(code), vfs='zip://{}'.format(fname)) - - line = gpd.read_file(('/{}_lin.shp').format(code), vfs='zip://{}'.format(fname)) - - return line, points, radii - - -def get_coordinates(bbox): - """ - Create bounding box coordinates for the map. It takes flat or - nested list/numpy.array and returns 5 points that closes square - around the borders. - - Examples - -------- - >>> bbox = [-87.40, 24.25, -74.70, 36.70] - >>> len(get_coordinates(bbox)) - 5 - - """ - bbox = np.asanyarray(bbox).ravel() - if bbox.size == 4: - bbox = bbox.reshape(2, 2) - coordinates = [] - coordinates.append([bbox[0][1], bbox[0][0]]) - coordinates.append([bbox[0][1], bbox[1][0]]) - coordinates.append([bbox[1][1], bbox[1][0]]) - coordinates.append([bbox[1][1], bbox[0][0]]) - coordinates.append([bbox[0][1], bbox[0][0]]) - else: - raise ValueError('Wrong number corners.' ' Expected 4 got {}'.format(bbox.size)) - return coordinates diff --git a/docker/post/environment.yml b/docker/post/environment.yml deleted file mode 100644 index 605d54c..0000000 --- a/docker/post/environment.yml +++ /dev/null @@ -1,96 +0,0 @@ -name: odssm-post-env -channels: - - conda-forge - - defaults -dependencies: - - python>=3.9 # because of searvey - - pygeos - - geos - - gdal - - proj - - pyproj - - cartopy - - udunits2 - - shapely>=1.8.0 - - arrow - - attrs - - backcall - - beautifulsoup4 - - bokeh - - branca - - brotlipy - - bs4 - - certifi - - cffi - - cftime - - cfunits - - cfgrib - - chardet - - click - - click-plugins - - cligj - - cryptography - - cycler - - decorator - - f90nml - - fiona - - folium - - gdal - - geopandas - - geos - - geotiff - - glib - - icu - - idna - - ipython - - ipython_genutils - - jedi - - jinja2 - - kiwisolver - - krb5 - - lxml - - markupsafe - - matplotlib - - munch - - netcdf4 - - hdf5 - - numpy - - olefile - - packaging - - pandas - - parso - - pexpect - - pickleshare - - pillow - - prompt-toolkit - - ptyprocess - - pycparser - - pygeos - - pygments - - pyopenssl - - pyparsing - - pyproj - - pysocks - - python-wget - - pytz - - pyyaml - - readline - - requests - - retrying - - rtree - - setuptools - - shapely - - six - - searvey - - soupsieve - - tbb - - tiledb - - tk - - tornado - - traitlets - - typing_extensions - - wcwidth - - wheel - - zstd - - pip: - - pyschism diff --git a/docker/prefect-aws/Dockerfile b/docker/prefect-aws/Dockerfile deleted file mode 100755 index c4c0e6f..0000000 --- a/docker/prefect-aws/Dockerfile +++ /dev/null @@ -1,72 +0,0 @@ -FROM continuumio/miniconda3:22.11.1-alpine - -# Create a non-root user -ARG username=ocsmesher -ARG uid=1000 -ARG gid=100 - -ENV USER $username -ENV UID $uid -ENV GID $gid -ENV HOME /home/$USER - -# Get necessary packages -RUN apk update && apk upgrade && apk --no-cache add \ - tzdata \ - libstdc++ \ - groff \ - less \ - curl \ - zip - -# New user -RUN adduser -D -g "Non-root user" -u $UID -h $HOME $USER - -# Build the conda environment -COPY environment.yml /tmp/ -RUN chown $UID:$GID /tmp/environment.yml - -RUN conda install mamba -n base -c conda-forge && \ - mamba update --name base --channel defaults conda && \ - mamba env create --name odssm --file /tmp/environment.yml --force && \ - mamba clean --all --yes - - -ENV CONDA_DIR /opt/conda - -# run the postBuild script to install any JupyterLab extensions - - - -# AWS has its own python distro -RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ - unzip awscliv2.zip && \ - ./aws/install && \ - rm -rf awscliv2.zip aws - -RUN mkdir -p /scripts -COPY pw_client.py /scripts/pw_client.py -ENV PYTHONPATH=/scripts - -RUN source $CONDA_DIR/etc/profile.d/conda.sh && \ - conda activate odssm && \ - pip install dunamai && \ - conda deactivate - -RUN apk del curl zip - - -# Set default entry -COPY entrypoint.sh /usr/local/bin/ -RUN chown $UID:$GID /usr/local/bin/entrypoint.sh && \ - chmod u+x /usr/local/bin/entrypoint.sh - -# https://github.com/PrefectHQ/prefect/issues/3061 -ENV TZ UTC -RUN cp /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone - -USER $USER - -RUN echo "source $CONDA_DIR/etc/profile.d/conda.sh" >> ~/.profile - -ENTRYPOINT [ "/usr/local/bin/entrypoint.sh" ] diff --git a/docker/prefect-aws/entrypoint.sh b/docker/prefect-aws/entrypoint.sh deleted file mode 100644 index 2b95ddf..0000000 --- a/docker/prefect-aws/entrypoint.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh --login -set -e -conda activate odssm -exec "$@" diff --git a/docker/prefect-aws/environment.yml b/docker/prefect-aws/environment.yml deleted file mode 100644 index 13fe544..0000000 --- a/docker/prefect-aws/environment.yml +++ /dev/null @@ -1,12 +0,0 @@ -name: odssm -channels: - - conda-forge - - defaults -dependencies: - - python=3.10 - - prefect=1.4, <2 - - cloudpickle - - requests - - dnspython - - boto3 - - dunamai diff --git a/docker/prefect-aws/pw_client.py b/docker/prefect-aws/pw_client.py deleted file mode 100644 index 8cf5aea..0000000 --- a/docker/prefect-aws/pw_client.py +++ /dev/null @@ -1,112 +0,0 @@ -import requests -import json -import pprint as pp - -class Client(): - - def __init__(self, url, key): - self.url = url - self.api = url+'/api' - self.key = key - self.session = requests.Session() - self.headers = { - 'Content-Type': 'application/json' - } - - def upload_dataset(self, filename, path): - req = self.session.post(self.api + "/datasets/upload?key="+self.key, - data={'dir': path}, - files={'file':open(filename, 'rb')}) - req.raise_for_status() - data = json.loads(req.text) - return data - - def download_dataset(self, file): - url=self.api + "/datasets/download?key=" + self.key + '&file=' + file - #print url - req = self.session.get(url) - req.raise_for_status() - return req.content - - def find_datasets(self, path, ext=''): - url = self.api + "/datasets/find?key=" + self.key + "&path=" + path + "&ext=" + ext - #print url - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - return data - - def get_job_tail(self, jid, file, lastline): - url = self.api + "/jobs/"+jid+"/tail?key=" + self.key + "&file=" + file + "&line="+str(lastline) - try: - req = self.session.get(url) - req.raise_for_status() - data = req.text - except: - data = "" - return data - - def start_job(self,workflow,inputs,user): - inputs = json.dumps(inputs) - req = self.session.post(self.api + "/tools",data={'user':user,'tool_xml': "/workspaces/"+user+"/workflows/"+workflow+"/workflow.xml",'key':self.key,'tool_id':workflow,'inputs':inputs}) - req.raise_for_status() - data = json.loads(req.text) - jid=data['jobs'][0]['id'] - djid=str(data['decoded_job_id']) - return jid,djid - - def get_job_state(self, jid): - url = self.api + "/jobs/"+ jid + "?key=" + self.key - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - return data['state'] - - def get_job_credit_info(self, jid): - url = self.api + "/jobs/"+ jid + "/monitor?key=" + self.key - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - # return data['info'] - return data - - def get_resources(self): - req = self.session.get(self.api + "/resources?key=" + self.key) - req.raise_for_status() - data = json.loads(req.text) - return data - - def get_resource(self, name): - req = self.session.get(self.api + "/resources/list?key=" + self.key + "&name=" + name) - req.raise_for_status() - try: - data = json.loads(req.text) - return data - except: - return None - - def start_resource(self, name): - req = self.session.get(self.api + "/resources/start?key=" + self.key + "&name=" + name) - req.raise_for_status() - return req.text - - def stop_resource(self, name): - req = self.session.get(self.api + "/resources/stop?key=" + self.key + "&name=" + name) - req.raise_for_status() - return req.text - - def update_resource(self, name, params): - update = "&name={}".format(name) - for key, value in params.items(): - update = "{}&{}={}".format(update, key, value) - req = self.session.post(self.api + "/resources/set?key=" + self.key + update) - req.raise_for_status() - return req.text - - def get_account(self): - url = self.api + "/account?key=" + self.key - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - return data - \ No newline at end of file diff --git a/docker/pyschism/docker/.env b/docker/pyschism/docker/.env deleted file mode 100644 index a09408b..0000000 --- a/docker/pyschism/docker/.env +++ /dev/null @@ -1 +0,0 @@ -PYSCHISM_USER=pyschismer diff --git a/docker/pyschism/docker/Dockerfile b/docker/pyschism/docker/Dockerfile deleted file mode 100644 index 8873c0e..0000000 --- a/docker/pyschism/docker/Dockerfile +++ /dev/null @@ -1,68 +0,0 @@ -FROM continuumio/miniconda3:22.11.1-alpine - -# Create a non-root user -ARG username=pyschismer -ARG uid=1000 -ARG gid=100 - -ENV USER $username -ENV UID $uid -ENV GID $gid -ENV HOME /home/$USER - -# Get necessary packages -RUN apk update && apk upgrade && apk add \ - git - -# New user -RUN adduser -D -g "Non-root user" -u $UID -h $HOME $USER - -# Create a project directory inside user home -ENV PROJECT_DIR $HOME/app -RUN mkdir $PROJECT_DIR -WORKDIR $PROJECT_DIR - - -# Build the conda environment -ENV ENV_PREFIX $HOME/icogsc - -COPY environment.yml /tmp/ -RUN chown $UID:$GID /tmp/environment.yml - -RUN conda install mamba -n base -c conda-forge && \ - mamba update --name base --channel defaults conda && \ - mamba env create --prefix $ENV_PREFIX --file /tmp/environment.yml --force && \ - mamba clean --all --yes - -# TODO: After perturbation schism branch is merged update this -# conda run -p $ENV_PREFIX --no-capture-output \ -# pip install "ensembleperturbation>=1.0.0" -RUN git clone https://github.com/schism-dev/pyschism.git && \ - git -C pyschism checkout 96e52fd && \ - conda run -p $ENV_PREFIX --no-capture-output \ - pip install ./pyschism && \ - rm -rf pyschism && \ - conda run -p $ENV_PREFIX --no-capture-output \ - pip install "coupledmodeldriver>=1.6.3" && \ - conda run -p $ENV_PREFIX --no-capture-output \ - pip install "ensembleperturbation>=1.1.2" - -ENV CONDA_DIR /opt/conda - -RUN conda clean --all -RUN apk del git - -RUN mkdir -p $PROJECT_DIR/io -RUN chown -R $UID:$GID $HOME - -USER $USER - -RUN mkdir -p $PROJECT_DIR/scripts -COPY docker/*.py ${PROJECT_DIR}/scripts/ -COPY docker/refs ${PROJECT_DIR}/refs/ -ENV PYTHONPATH ${PROJECT_DIR}/scripts/ - -RUN mkdir -p $HOME/.local/share/pyschism - -# Ref: https://pythonspeed.com/articles/activate-conda-dockerfile/ -ENTRYPOINT ["conda", "run", "-p", "$ENV_PREFIX", "--no-capture-output", "python", "-m"] diff --git a/docker/pyschism/docker/analyze_ensemble.py b/docker/pyschism/docker/analyze_ensemble.py deleted file mode 100644 index 77c4d3e..0000000 --- a/docker/pyschism/docker/analyze_ensemble.py +++ /dev/null @@ -1,354 +0,0 @@ -from argparse import ArgumentParser -from pathlib import Path -import pickle - -import chaospy -import dask -from matplotlib import pyplot -import numpy -from sklearn.linear_model import LassoCV, ElasticNetCV, LinearRegression -from sklearn.model_selection import ShuffleSplit, LeaveOneOut -import xarray - -from ensembleperturbation.parsing.adcirc import subset_dataset -from ensembleperturbation.perturbation.atcf import VortexPerturbedVariable -from ensembleperturbation.plotting.perturbation import plot_perturbations -from ensembleperturbation.plotting.surrogate import ( - plot_kl_surrogate_fit, - plot_selected_percentiles, - plot_selected_validations, - plot_sensitivities, - plot_validations, -) -from ensembleperturbation.uncertainty_quantification.karhunen_loeve_expansion import ( - karhunen_loeve_expansion, - karhunen_loeve_prediction, -) -from ensembleperturbation.uncertainty_quantification.surrogate import ( - percentiles_from_surrogate, - sensitivities_from_surrogate, - surrogate_from_karhunen_loeve, - surrogate_from_training_set, - validations_from_surrogate, -) -from ensembleperturbation.utilities import get_logger - -EFS_MOUNT_POINT = Path('~').expanduser() / 'app/io' -LOGGER = get_logger('klpc_wetonly') - - - -def main(args): - - tracks_dir = EFS_MOUNT_POINT / args.tracks_dir - ensemble_dir = EFS_MOUNT_POINT / args.ensemble_dir - - analyze(tracks_dir, ensemble_dir/'analyze') - - - -def analyze(tracks_dir, analyze_dir): - # KL parameters - variance_explained = 0.9999 - # subsetting parameters - isotach = 34 # -kt wind swath of the cyclone - depth_bounds = 25.0 - point_spacing = None - node_status_mask = 'always_wet' - # analysis type - variable_name = 'zeta_max' - use_depth = True # for depths - # use_depth = False # for elevations - log_space = False # normal linear space - # log_space = True # use log-scale to force surrogate to positive values only - training_runs = 'korobov' - validation_runs = 'random' - # PC parameters - polynomial_order = 3 - # cross_validator = ShuffleSplit(n_splits=10, test_size=12, random_state=666) - # cross_validator = ShuffleSplit(random_state=666) - cross_validator = LeaveOneOut() - # regression_model = LassoCV( - # fit_intercept=False, cv=cross_validator, selection='random', random_state=666 - # ) - regression_model = ElasticNetCV( - fit_intercept=False, - cv=cross_validator, - l1_ratio=0.5, - selection='random', - random_state=666, - ) - # regression_model = LinearRegression(fit_intercept=False) - regression_name = 'ElasticNet_LOO' - if training_runs == 'quadrature': - use_quadrature = True - else: - use_quadrature = False - - make_perturbations_plot = True - make_klprediction_plot = True - make_klsurrogate_plot = True - make_sensitivities_plot = True - make_validation_plot = True - make_percentile_plot = True - - save_plots = True - - storm_name = None - - if log_space: - output_directory = analyze_dir / f'outputs_log_{regression_name}' - else: - output_directory = analyze_dir / f'outputs_linear_{regression_name}' - if not output_directory.exists(): - output_directory.mkdir(parents=True, exist_ok=True) - - subset_filename = output_directory / 'subset.nc' - kl_filename = output_directory / 'karhunen_loeve.pkl' - kl_surrogate_filename = output_directory / 'kl_surrogate.npy' - surrogate_filename = output_directory / 'surrogate.npy' - kl_validation_filename = output_directory / 'kl_surrogate_fit.nc' - sensitivities_filename = output_directory / 'sensitivities.nc' - validation_filename = output_directory / 'validation.nc' - percentile_filename = output_directory / 'percentiles.nc' - - filenames = ['perturbations.nc', 'maxele.63.nc'] - if storm_name is None: - storm_name = tracks_dir / 'original.22' - - datasets = {} - existing_filenames = [] - for filename in filenames: - filename = analyze_dir / filename - if filename.exists(): - datasets[filename.name] = xarray.open_dataset(filename, chunks='auto') - else: - raise FileNotFoundError(filename.name) - - perturbations = datasets[filenames[0]] - max_elevations = datasets[filenames[1]] - min_depth = 0.8 * max_elevations.h0 # the minimum allowable depth - - perturbations = perturbations.assign_coords( - type=( - 'run', - ( - numpy.where( - perturbations['run'].str.contains(training_runs), - 'training', - numpy.where( - perturbations['run'].str.contains(validation_runs), - 'validation', - 'none', - ), - ) - ), - ) - ) - - if len(numpy.unique(perturbations['type'][:])) == 1: - perturbations['type'][:] = numpy.random.choice( - ['training', 'validation'], size=len(perturbations.run), p=[0.7, 0.3] - ) - LOGGER.info('dividing 70/30% for training/testing the model') - - training_perturbations = perturbations.sel(run=perturbations['type'] == 'training') - validation_perturbations = perturbations.sel(run=perturbations['type'] == 'validation') - - if make_perturbations_plot: - plot_perturbations( - training_perturbations=training_perturbations, - validation_perturbations=validation_perturbations, - runs=perturbations['run'].values, - perturbation_types=perturbations['type'].values, - track_directory=tracks_dir, - output_directory=output_directory if save_plots else None, - ) - - variables = { - variable_class.name: variable_class() - for variable_class in VortexPerturbedVariable.__subclasses__() - } - - distribution = chaospy.J( - *( - variables[variable_name].chaospy_distribution() - for variable_name in perturbations['variable'].values - ) - ) - - # sample based on subset and excluding points that are never wet during training run - if not subset_filename.exists(): - LOGGER.info('subsetting nodes') - subset = subset_dataset( - ds=max_elevations, - variable=variable_name, - maximum_depth=depth_bounds, - wind_swath=[storm_name, isotach], - node_status_selection={ - 'mask': node_status_mask, - 'runs': training_perturbations['run'], - }, - point_spacing=point_spacing, - output_filename=subset_filename, - ) - - # subset chunking can be disturbed by point_spacing so load from saved filename always - LOGGER.info(f'loading subset from "{subset_filename}"') - subset = xarray.open_dataset(subset_filename) - if 'element' in subset: - elements = subset['element'] - subset = subset[variable_name] - - # divide subset into training/validation runs - with dask.config.set(**{'array.slicing.split_large_chunks': True}): - training_set = subset.sel(run=training_perturbations['run']) - validation_set = subset.sel(run=validation_perturbations['run']) - - LOGGER.info(f'total {training_set.shape} training samples') - LOGGER.info(f'total {validation_set.shape} validation samples') - - training_set_adjusted = training_set.copy(deep=True) - - if use_depth: - training_set_adjusted += training_set_adjusted['depth'] # + adjusted_min_depth - - if log_space: - training_set_adjusted = numpy.log(training_set_adjusted) - - # Evaluating the Karhunen-Loeve expansion - nens, ngrid = training_set.shape - if not kl_filename.exists(): - LOGGER.info( - f'Evaluating Karhunen-Loeve expansion from {ngrid} grid nodes and {nens} ensemble members' - ) - kl_expansion = karhunen_loeve_expansion( - training_set_adjusted.values, - neig=variance_explained, - method='PCA', - output_directory=output_directory, - ) - else: - LOGGER.info(f'loading Karhunen-Loeve expansion from "{kl_filename}"') - with open(kl_filename, 'rb') as kl_handle: - kl_expansion = pickle.load(kl_handle) - - LOGGER.info(f'found {kl_expansion["neig"]} Karhunen-Loeve modes') - LOGGER.info(f'Karhunen-Loeve expansion: {list(kl_expansion)}') - - # plot prediction versus actual simulated - if make_klprediction_plot: - kl_predicted = karhunen_loeve_prediction( - kl_dict=kl_expansion, - actual_values=training_set_adjusted, - ensembles_to_plot=[0, int(nens / 2), nens - 1], - element_table=elements if point_spacing is None else None, - plot_directory=output_directory, - ) - - # evaluate the surrogate for each KL sample - kl_training_set = xarray.DataArray(data=kl_expansion['samples'], dims=['run', 'mode']) - kl_surrogate_model = surrogate_from_training_set( - training_set=kl_training_set, - training_perturbations=training_perturbations, - distribution=distribution, - filename=kl_surrogate_filename, - use_quadrature=use_quadrature, - polynomial_order=polynomial_order, - regression_model=regression_model, - ) - - # plot kl surrogate model versus training set - if make_klsurrogate_plot: - kl_fit = validations_from_surrogate( - surrogate_model=kl_surrogate_model, - training_set=kl_training_set, - training_perturbations=training_perturbations, - filename=kl_validation_filename, - ) - - plot_kl_surrogate_fit( - kl_fit=kl_fit, - output_filename=output_directory / 'kl_surrogate_fit.png' if save_plots else None, - ) - - # convert the KL surrogate model to the overall surrogate at each node - surrogate_model = surrogate_from_karhunen_loeve( - mean_vector=kl_expansion['mean_vector'], - eigenvalues=kl_expansion['eigenvalues'], - modes=kl_expansion['modes'], - kl_surrogate_model=kl_surrogate_model, - filename=surrogate_filename, - ) - - if make_sensitivities_plot: - sensitivities = sensitivities_from_surrogate( - surrogate_model=surrogate_model, - distribution=distribution, - variables=perturbations['variable'], - nodes=subset, - element_table=elements if point_spacing is None else None, - filename=sensitivities_filename, - ) - plot_sensitivities( - sensitivities=sensitivities, - storm=storm_name, - output_filename=output_directory / 'sensitivities.png' if save_plots else None, - ) - - if make_validation_plot: - node_validation = validations_from_surrogate( - surrogate_model=surrogate_model, - training_set=training_set, - training_perturbations=training_perturbations, - validation_set=validation_set, - validation_perturbations=validation_perturbations, - convert_from_log_scale=log_space, - convert_from_depths=use_depth, - minimum_allowable_value=min_depth if use_depth else None, - element_table=elements if point_spacing is None else None, - filename=validation_filename, - ) - - plot_validations( - validation=node_validation, - output_directory=output_directory if save_plots else None, - ) - - plot_selected_validations( - validation=node_validation, - run_list=validation_set['run'][ - numpy.linspace(0, validation_set.shape[0], 6, endpoint=False).astype(int) - ].values, - output_directory=output_directory if save_plots else None, - ) - - if make_percentile_plot: - percentiles = [10, 50, 90] - node_percentiles = percentiles_from_surrogate( - surrogate_model=surrogate_model, - distribution=distribution, - training_set=validation_set, - percentiles=percentiles, - convert_from_log_scale=log_space, - convert_from_depths=use_depth, - minimum_allowable_value=min_depth if use_depth else None, - element_table=elements if point_spacing is None else None, - filename=percentile_filename, - ) - - plot_selected_percentiles( - node_percentiles=node_percentiles, - perc_list=percentiles, - output_directory=output_directory if save_plots else None, - ) - - -if __name__ == '__main__': - - parser = ArgumentParser() - parser.add_argument('-d', '--ensemble-dir') - parser.add_argument('-t', '--tracks-dir') - parser.add_argument('-s', '--sequential', action='store_true') - - main(parser.parse_args()) diff --git a/docker/pyschism/docker/combine_ensemble.py b/docker/pyschism/docker/combine_ensemble.py deleted file mode 100644 index 909976f..0000000 --- a/docker/pyschism/docker/combine_ensemble.py +++ /dev/null @@ -1,32 +0,0 @@ -from argparse import ArgumentParser -from pathlib import Path - -from ensembleperturbation.client.combine_results import combine_results -from ensembleperturbation.utilities import get_logger - -EFS_MOUNT_POINT = Path('~').expanduser() / 'app/io' -LOGGER = get_logger('klpc_wetonly') - - - -def main(args): - - tracks_dir = EFS_MOUNT_POINT / args.tracks_dir - ensemble_dir = EFS_MOUNT_POINT / args.ensemble_dir - - output = combine_results( - model='schism', - adcirc_like=True, - output=ensemble_dir/'analyze', - directory=ensemble_dir, - parallel=not args.sequential - ) - -if __name__ == '__main__': - - parser = ArgumentParser() - parser.add_argument('-d', '--ensemble-dir') - parser.add_argument('-t', '--tracks-dir') - parser.add_argument('-s', '--sequential', action='store_true') - - main(parser.parse_args()) diff --git a/docker/pyschism/docker/docker-compose.yml b/docker/pyschism/docker/docker-compose.yml deleted file mode 100644 index ff41fd4..0000000 --- a/docker/pyschism/docker/docker-compose.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: "3.9" -services: - pyschism-noaa: - build: - context: .. - dockerfile: docker/Dockerfile - args: - - username=${PYSCHISM_USER} - - uid=1000 - - gid=100 -# command: '/bin/sh' - volumes: - - type: bind - source: /home/ec2-user/data/test/hurricanes/florence_2018/mesh - target: /home/${PYSCHISM_USER}/app/io/input/mesh - - type: bind - source: /home/ec2-user/data/test/hurricanes/florence_2018/coops_ssh - target: /home/${PYSCHISM_USER}/app/io/input/coops_ssh - - type: bind - source: /home/ec2-user/data/test/hurricanes/florence_2018/setup - target: /home/${PYSCHISM_USER}/app/io/output - - type: bind - source: /home/ec2-user/data/test/static/tpxo - target: /home/${PYSCHISM_USER}/.local/share/tpxo - - type: bind - source: /home/ec2-user/data/test/static/nwm - target: /home/${PYSCHISM_USER}/.local/share/pyschism/nwm - diff --git a/docker/pyschism/docker/refs/param.nml b/docker/pyschism/docker/refs/param.nml deleted file mode 100755 index 70c9afc..0000000 --- a/docker/pyschism/docker/refs/param.nml +++ /dev/null @@ -1,69 +0,0 @@ -&CORE - ipre=0 - ibc=1 - ibtp=0 - nspool=24 - ihfskip=11088 - dt=150.0 - rnday=19.25 - - msc2 = 24 !same as msc in .nml ... for consitency check between SCHISM and WWM - mdc2 = 30 !same as mdc in .nml -/ - -&OPT - start_year=2018 - start_month=8 - start_day=30 - start_hour=6.0 - utc_start=-0.0 - ics=2 - ihot=1 - nchi=-1 - hmin_man=1.0 - ic_elev=1 - nws=-1 - wtiminc=150.0 - - icou_elfe_wwm = 1 - nstep_wwm = 4 !call WWM every this many time steps - iwbl = 0 !wave boundary layer formulation (used only if USE_WMM and - !icou_elfe_wwm/=0 and nchi=1. If icou_elfe_wwm=0, set iwbl=0): - !1-modified Grant-Madsen formulation; 2-Soulsby (1997) - hmin_radstress = 1. !min. total water depth used only in radiation stress calculation [m] -! nrampwafo = 0 !ramp-up option for the wave forces (1: on; 0: off) - drampwafo = 0. !ramp-up period in days for the wave forces (no ramp-up if <=0) - turbinj = 0.15 !% of depth-induced wave breaking energy injected in turbulence - !(default: 0.15 (15%), as proposed by Feddersen, 2012) - turbinjds = 1.0 !% of wave energy dissipated through whitecapping injected in turbulence - !(default: 1 (100%), as proposed by Paskyabi et al. 2012) - alphaw = 0.5 !for itur=4 : scaling parameter for the surface roughness z0s = alphaw*Hm0. - !If negative z0s = abs(alphaw) e.g. z0s=0.2 m (Feddersen and Trowbridge, 2005) - ! Vortex Force terms (off/on:0/1) -/ - -&SCHOUT - nhot=1 - nhot_write=11088 - - iof_hydro(14) = 1 - - iof_wwm(1) = 1 !sig. height (m) {sigWaveHeight} 2D - iof_wwm(2) = 0 !Mean average period (sec) - TM01 {meanWavePeriod} 2D - iof_wwm(3) = 0 !Zero down crossing period for comparison with buoy (s) - TM02 {zeroDowncrossPeriod} 2D - iof_wwm(4) = 0 !Average period of wave runup/overtopping - TM10 {TM10} 2D - iof_wwm(5) = 0 !Mean wave number (1/m) {meanWaveNumber} 2D - iof_wwm(6) = 0 !Mean wave length (m) {meanWaveLength} 2D - iof_wwm(7) = 0 !Mean average energy transport direction (degr) - MWD in NDBC? {meanWaveDirection} 2D - iof_wwm(8) = 0 !Mean directional spreading (degr) {meanDirSpreading} 2D - iof_wwm(9) = 1 !Discrete peak period (sec) - Tp {peakPeriod} 2D - iof_wwm(10) = 0 !Continuous peak period based on higher order moments (sec) {continuousPeakPeriod} 2D - iof_wwm(11) = 0 !Peak phase vel. (m/s) {peakPhaseVel} 2D - iof_wwm(12) = 0 !Peak n-factor {peakNFactor} 2D - iof_wwm(13) = 0 !Peak group vel. (m/s) {peakGroupVel} 2D - iof_wwm(14) = 0 !Peak wave number {peakWaveNumber} 2D - iof_wwm(15) = 0 !Peak wave length {peakWaveLength} 2D - iof_wwm(16) = 1 !Peak (dominant) direction (degr) {dominantDirection} 2D - iof_wwm(17) = 0 !Peak directional spreading {peakSpreading} 2D - -/ diff --git a/docker/pyschism/docker/refs/wwminput.nml b/docker/pyschism/docker/refs/wwminput.nml deleted file mode 100755 index f0996e5..0000000 --- a/docker/pyschism/docker/refs/wwminput.nml +++ /dev/null @@ -1,667 +0,0 @@ -! This is the main input for WWM -! Other mandatory inputs: wwmbnd.gr3 (boundary flag files; see below) -! Depending on the choices of parameters below you may need additional inputs - -&PROC - PROCNAME = 'schism_wwm_2003_test' ! Project Name - DIMMODE = 2 ! Mode of run (ex: 1 = 1D, 2 = 2D) always 2D when coupled to SCHISM - LSTEA = F ! steady mode; under development - LQSTEA = F ! Quasi-Steady Mode; In this case WWM-II is doing subiterations defined as DELTC/NQSITER unless QSCONVI is not reached - LSPHE = T ! Spherical coordinates (lon/lat) - LNAUTIN = T ! Nautical convention for all inputs given in degrees - LNAUTOUT = T ! Output in Nautical convention - ! If T, 0 is _from_ north, 90 is from east etc; - ! If F, maths. convention - 0: to east; 90: going to north - LMONO_IN = F ! For prescribing monochromatic wave height Hmono as a boundary conditions; incident wave is defined as monochromatic wave height, which is Hmono = sqrt(2) * Hs - LMONO_OUT = F ! Output wave heights in terms of Lmono - BEGTC = '20180830.000000' ! Time for start the simulation, ex:yyyymmdd. hhmmss - DELTC = 600 ! Time step (MUST match dt*nstep_wwm in SCHISM!) - UNITC = 'SEC' ! Unity of time step - ENDTC = '20181030.000000' ! Time for stop the simulation, ex:yyyymmdd. hhmmss - DMIN = 0.01 ! Minimum water depth. THis must be same as h0 in selfe -/ - -&COUPL - LCPL = T ! Couple with current model ... main switch - keep it on for SCHISM-WWM - LROMS = F ! ROMS (set as F) - LTIMOR = F ! TIMOR (set as F) - LSHYFEM = F ! SHYFEM (set as F) - RADFLAG = 'LON' ! LON: Longuet-Higgin; VOR: vortex formulation - LETOT = F ! Option to compute the wave induced radiation stress. If .T. the radiation stress is based on the integrated wave spectrum - ! e.g. Etot = Int,0,inf;Int,0,2*pi[N(sigma,theta)]dsigma,dtheta. If .F. the radiation stress is estimated as given in Roland et al. (2008) based - ! on the directional spectra itself. It is always desirable to use .F., since otherwise the spectral informations are truncated and therefore - ! LETOT = .T., is only for testing and developers! - NLVT = 10 ! Number of vertical Layers; not used with SCHISM - DTCOUP = 600. ! Couple time step - not used when coupled to SCHISM - IMET_DRY = 0 ! -/ - -&GRID - LCIRD = T ! Full circle in directional space - LSTAG = F ! Stagger directional bins with a half Dtheta; may use T only for regular grid to avoid char. line aligning with grid line - MINDIR = 0. ! Minimum direction for simulation (unit: degrees; nautical convention; 0: from N; 90: from E); not used if LCIRD = .T. - MAXDIR = 360. ! Maximum direction for simulation (unit: degrees); may be < MINDIR; not used if LCIRD = .T. - MDC = 30 ! Number of directional bins - FRLOW = 0.04 ! Low frequency limit of the discrete wave period (Hz; 1/period) - FRHIGH = 1. ! High frequency limit of the discrete wave period. - MSC = 24 ! Number of frequency bins - FILEGRID = 'hgrid_WWM.gr3' ! Name of the grid file. hgrid.gr3 if IGRIDTYPE = 3 (SCHISM) - IGRIDTYPE = 3 ! Gridtype used. - ! 1 ~ XFN system.dat - ! 2 ~ WWM-PERIODIC - ! 3 ~ SCHISM - ! 4 ~ old WWM type - LSLOP = F ! Bottom Slope limiter (default=F) - SLMAX = 0.2 ! Max Slope; - LVAR1D = F ! For 1d-mode if variable dx is used; not used with SCHISM - LOPTSIG = F ! Use optimal distributions of freq. in spectral space ... fi+1 = fi * 1.1. Take care what you high freq. limit is! - CART2LATLON = F, - LATLON2CART = F, - APPLY_DXP_CORR = F, - USE_EXACT_FORMULA_SPHERICAL_AREA = T, ! Use spherical formular for triangle area computation. - !LEXPORT_GRID_MOD_OUT = F -/ - -&INIT - LHOTR = F ! Use hotstart file (see &HOTFILE section) - LINID = F ! Initial condition; F for default; use T if using WW3 as i.c. etc - INITSTYLE = 1 ! 1 - Parametric Jonswap, 2 - Read from Global NETCDF files, work only if IBOUNDFORMAT=2 -/ - -&BOUC - LBCSE = F ! The wave boundary data is time dependent - LBCWA = T ! Parametric Wave Spectra - LBCSP = F ! Specify (non-parametric) wave spectra, specified in 'FILEWAVE' below - LINHOM = F ! Non-uniform wave b.c. in space - LBSP1D = F ! 1D (freq. space only) format for FILEWAVE if LBCSP=T and LINHOM=F - LBSP2D = F ! Not used now - LBINTER = F ! Do interpolation in time if LBCSE=T (not available for quasi-steady mode within the subtime steps) - BEGTC = '20180830.000000' ! Begin time of the wave boundary file (FILEWAVE) - DELTC = 1 ! Time step in FILEWAVE - UNITC = 'HR' ! Unit can be HR, MIN, SEC - ENDTC = '20181030.000000' ! End time - FILEBOUND = 'wwmbnd.gr3' ! Boundary file defining boundary conditions and Neumann nodes. - ! In this file there is following definition Flag 0: not on boundary; 3: Neumann (0 gradient only for advection part); - ! 2: active bnd (Dirichlet). Bnd flags imported from SCHISM: ! 1: exterior bnd; -1: interior (islands) - ! exterio and interior boundaries need not to be defined. - IBOUNDFORMAT = 1 ! - FILEWAVE = 'bndfiles.dat' ! Boundary file defining boundary input - LINDSPRDEG = F ! If 1-d wave spectra are read this flag defines whether the input for the directional spreading is in degrees (true) or exponent (false) - LPARMDIR = F ! If LPARMDIR is true then directional spreading is read from WBDS and must be in exponential format at this time, only valid for 1d Spectra - ! For WW3 boundary input also set LINHOM=T, LBCSE=T and this works only for spherical coordinates - - WBHS = 2. ! Hs at the boundary for parametric spectra - WBSS = 2 ! 1 or -1: Pierson-Moskowitz, 2 or -2: JONSWAP, 3 or -3: all in one BIN, - ! 4: Gauss. The sign decides whether WBTP below is - ! peak (+) or mean period (-) - WBTP = 8. ! Tp at the boundary (sec); mean or peak depending on the sign of WBSS - WBDM = 90.0 ! Avg. Wave Direction at the boundary - WBDSMS = 1 ! Directional spreading value in degrees (1) or as exponent (2) - WBDS = 10. ! Directioanl spreading at the boundary (degrees/exponent) - WBGAUSS = 0.1 ! factor for gaussian distribution if WBSS=1 - ! End section for LBCWA=T and LINHOM=F - WBPKEN = 3.3 ! Peak enhancement factor for Jonswap Spectra if WBSS=2 - MULTIPLE_IN = T, - NETCDF_OUT_PARAM = F, - NETCDF_OUT_SPECTRA = F, - NETCDF_OUT_FILE = 'boundary_out_spec.nc' - USE_SINGLE_OUT = T, - BEGTC_OUT = 20030908.000000 , - DELTC_OUT = 600.000000000000 , - UNITC_OUT = SEC , - ENDTC_OUT = 20031008.000000 , - EXTRAPOLATION_ALLOWED = F, - HACK_HARD_SET_IOBP = F, - !PARAMWRITE = T, - NETCDF_IN_FILE = 'bndfiles.dat' - !LEXPORT_BOUC_MOD_OUT = F, - EXPORT_BOUC_DELTC = 0.00 -/ -&WIND ! THIS IS NOW USED IN SCHISM - LSEWD = F ! Time dependend wind input - BEGTC = '20030101.000000' ! Begin time - DELTC = 60.0 ! Time step - UNITC = 'MIN' ! Unit - ENDTC = '20030102.000000' ! End time - LINTERWD = T ! Interpolate linear within the wind input time step - LSTWD = T ! Steady wind - LCWIN = T ! Constant wind - LWDIR = T ! Define wind using wind direction rather than vel. vectors - WDIR = 140.0 ! Wind direction if LWDIR=T - WVEL = 10.0 ! Wind velocity ... - CWINDX = 30.0 ! wind x-vec if LWDIR=F - CWINDY = 0.0 ! wind y-vec - FILEWIND = 'wind.dat' ! wind input data file; input file format: write(*,*) curtx; write(*,*) curty - WINDFAC = 1. ! Factor for wind scaling - IWINDFORMAT = 1 ! kind of wind input - ! 1 - ASCII, - ! 2 - DWD_NETCDF - ! 3 - NOAA CFRS - ! 4 - NOAA NARR - ! 5 - netCDF WRF/ROMS forcing (Uwind,Vwind,LON,LAT,wind_time are used), fast bilinear interp - LWINDFROMWWM = F, ! Wind is coming from WWM (true) or from SCHISM(false). This is under developement. If F, the following parameters in this section are ignored. For SELFE users, use F. - GRIB_FILE_TYPE = 1, - EXTRAPOLATION_ALLOWED = F, - USE_STEPRANGE = T, - MULTIPLE_IN = T, - !LEXPORT_WIND_MOD_OUT = F, - EXPORT_WIND_DELTC = 0.00, - !LSAVE_INTERP_ARRAY = F -/ -&CURR !NOT USED WITH SCHISM - LSECU = F ! Time dependend currents - BEGTC = '20030908.000000' ! Beginn time - DELTC = 600 ! Time step - UNITC = 'SEC' ! Unit - ENDTC = '20031008.000000' ! End time - LINTERCU = F ! Interpolate linear within the wind input time step - LSTCU = F ! Steady current - LCCUR = F ! Constant current - CCURTX = 0.0 ! current x-vec - CCURTY = 0.0 ! current y-vec - FILECUR = 'current.dat' ! Current file name; input file format: write(*,*) curtx; write(*,*) curty - LERGINP = F ! read timor file for input ... ergzus.bin - CURFAC = 1.000000 - ICURRFORMAT = 1 - MULTIPLE_IN = T, - !LEXPORT_CURR_MOD_OUT = F, - EXPORT_CURR_DELTC = 0.000000000000000E+000 -/ - -&WALV !NOT USED WITH SCHISM - LSEWL = F ! Time dependend elev. - BEGTC = '20030908.000000' ! Begin time - DELTC = 1 ! Time step - UNITC = 'HR' ! Unit - ENDTC = '20031008.000000' ! End time - LINTERWL = F ! Interpolate linear within the wind input time step - LSTWL = T ! Steady water level - LCWLV = T ! Constant water level - CWATLV = 0.0 ! elevation of the water level [m] - FILEWATL = ' ' ! water level file name; input file format: write(*,*) eta - LERGINP = F, - WALVFAC = 1.00000000000000 , - IWATLVFORMAT = 1, - MULTIPLE_IN = T, - !LEXPORT_WALV_MOD_OUT = F, - EXPORT_WALV_DELTC = 0.000000000000000E+000 -/ - -&ENGS !SOURCE TERMS - !ISOURCE = 1 ! Source Term Formulation for deep water: 1 ~ Ardhuin et al. (WW3), 2 ~ Janssen et al., (ECMWF), ~ 3 ~ Komen et al. 1984, (SWAN), (DEFAULT: 1) - MESNL = 1 ! Nonlinear Interaction NL4 , 1 ~ on, 0 ~ off (DIA), (DEFAULT: 1) - MESIN = 1 ! Wind input 1 ~ on, 0 ~ off, (DEFAULT: 1) - IFRIC = 1 ! Now only JONSWAP friction will add Roland & Ardhuin soon. - MESBF = 1 ! Bottomg friction: 1 ~ on, 0 ~ off (JONSWAP Formulation); (DEFAULT: 1) - FRICC = 0.067 ! Cjon - Bottom friction coefficient (always positive); (DEFAULT: 0.067) - MESBR = 1 ! Shallow water wave breaking; 0: off; 1: on: BJ78 same as in SWAN, (DEFAULT: 1) - ICRIT = 1 ! Wave breaking criterion: set as 1 - SWAN, 2 - Dingemans; (DEFAULT: 2) - IBREAK = 1 ! Now only Battjes & Janssen - B_ALP = 0.5 ! Dissipation proportionality coefficient, (DEFAULT: 0.5) - BRCR = 0.78 ! Wave breaking coefficient for Const. type wave breaking criterion; range: 0.6-1.1 (suggested 0.78) - MEVEG = 0 - LMAXETOT = T ! Limit shallow water wave height by wave breaking limiter (default=T) - MESDS = 1 ! Whitecapping 1 ~ on, 0 ~ off; (DEFAULT: 1) - MESTR = 1 ! Nonlinear Interaction in shallow water SNL3: 1 ~ on, 0 ~ off (DEFAULT: 0) - TRICO = 0.1 ! proportionality const. (\alpha_EB); default is 0.1; (DEFAULT: 0.1) - TRIRA = 5. ! ratio of max. freq. considered in triads over mean freq.; 2.5 is suggested; (DEFAULT: 2.5) - TRIURS = 0.1 ! critical Ursell number; if Ursell # < TRIURS; triads are not computed; (DEFAULT: 0.1) -/ - - -&SIN4 ! Input parameter for ST4 source terms do not touch or reach our paper about this ... - ZWND = 10.0000000000000, - ALPHA0 = 9.499999694526196E-003, - Z0MAX = 0.000000000000000E+000, - BETAMAX = 1.54000000000000, - SINTHP = 2.00000000000000, - ZALP = 6.000000052154064E-003, - TAUWSHELTER = 0.300000011920929, - SWELLFPAR = 1.00000000000000, - SWELLF = 0.660000026226044, - SWELLF2 = -1.799999922513962E-002, - SWELLF3 = 2.199999988079071E-002, - SWELLF4 = 150000.000000000, - SWELLF5 = 1.20000004768372, - SWELLF6 = 0.000000000000000E+000, - SWELLF7 = 360000.000000000, - Z0RAT = 3.999999910593033E-002, - SINBR = 0.000000000000000E+000, -/ - -&SDS4 ! Input parameter for ST4 dissipation terms do not touch or reach our paper about this ... - SDSC1 = 0.000000000000000E+000, - FXPM3 = 4.00000000000000, - FXFM3 = 2.50000000000000, - FXFMAGE = 0.000000000000000E+000, - SDSC2 = -2.200000017182902E-005, - SDSCUM = -0.403439998626709, - SDSSTRAIN = 0.000000000000000E+000, - SDSC4 = 1.00000000000000, - SDSC5 = 0.000000000000000E+000, - SDSC6 = 0.300000011920929, - SDSBR = 8.999999845400453E-004, - SDSBR2 = 0.800000011920929, - SDSP = 2.00000000000000, - SDSISO = 2.00000000000000, - SDSBCK = 0.000000000000000E+000, - SDSABK = 1.50000000000000, - SDSPBK = 4.00000000000000, - SDSBINT = 0.300000011920929, - SDSHCK = 1.50000000000000, - SDSDTH = 80.0000000000000, - SDSCOS = 2.00000000000000, - SDSBRF1 = 0.500000000000000, - SDSBRFDF = 0.000000000000000E+000, - SDSBM0 = 1.00000000000000, - SDSBM1 = 0.000000000000000E+000, - SDSBM2 = 0.000000000000000E+000, - SDSBM3 = 0.000000000000000E+000, - SDSBM4 = 0.000000000000000E+000, - SDSHFGEN = 0.000000000000000E+000, - SDSLFGEN = 0.000000000000000E+000, - WHITECAPWIDTH = 0.300000011920929, - FXINCUT = 0.000000000000000E+000, - FXDSCUT = 0.000000000000000E+000, -/ - -&NUMS - ICOMP = 3 - ! This parameter controls the way how the splitting is done and whether implicit or explicit schemes are used for spectral advection - ! ICOMP = 0 - ! This means that all dimensions are integrated using explicit methods. Similar - ! to WW3, actually the same schemes are available in WW3 4.1. - ! ICOMP = 1 - ! This mean that advection in geographical space is done using implicit - ! Methods, source terms and spectral space are still integrated as done in - ! WW3. - ! ICOMP = 2 - ! This means that the advection is done using implicit methods and that the - ! source terms are integrated semi-implicit using Patankar rules and linearized - ! source terms as done in SWAN. Spectral part is still a fractional step - ! ICOMP = 3: fully implicit and no splitting - - AMETHOD = 7 - ! AMETHOD controls the different Methods in geographical space - ! AMETHOD = 0 - ! No Advection in geo. Space - ! AMETHOD = 1 - ! Explicit N-Scheme for ICOMP = 0 and Implicit N-Scheme for ICOMP > 0 - ! AMETHOD = 2 - ! PSI-Scheme for ICOMP = 0 and Implicit - ! Crank-Nicholson N-Scheme for ICOMP > 0 - ! AMETHOD = 3 - ! LFPSI Scheme for ICOMP = 0 and Implicit two time level N2 scheme for ICOMP > 0 - - ! AMETHOD = 4 - ! Like AMETHOD = 1 but using PETSc based on small matrices MNP**2. this can be efficient on small to medium scale cluster up to say 128 Nodes. - - ! AMETHOD = 5 - ! Like AMETHOD = 1 but using PETSc and assembling the full matrix and the source terms at once (MNP * MDC * MSC)**2. number of equations - ! this is for large scale applications - - ! Remark for AMETHOD = 4 and 5. This methods are new and only tested on a few cases where the results look reasonable and do not depend on the number of CPU's which - ! valdiates the correct implementation. The scaling performance is anticipated to be "quite poor" at this time. Many different consituents influence the parallel speedup. - ! Please let me know all the information you have in order to improve and accelarate the developement of implicit parallel WWM-III. - ! Have fun ... Aron and Thomas. - ! AMETHOD = 6 - BCGS Solver - ! AMETHOD = 7 - GAUSS and JACOBI SOLVER - SMETHOD = 1 - ! This switch controls the way the source terms are integrated. 0: no source terms; - ! 1: splitting using RK-3 and SI for fast and slow modes 2: semi-implicit; - ! 3: R-K3 (if ICOMP=0 or 1) - slow; 4: Dynamic Splitting (experimental) - - DMETHOD = 2 - ! This switch controls the numerical method in directional space. - ! DMETHOD = 0 - ! No advection in directional space - ! DMETHOD = 1 - ! Crank-Nicholson (RTHETA = 0.5) or Euler Implicit scheme (RTHETA = 1.0) - ! DMEHOD = 2 - ! Ultimate Quickest as in WW3 (usually best) - ! DMETHOD = 3 - ! RK5-WENO - ! DMETHOD = 4 - ! Explicit FVM Upwind scheme - MELIM = 1 ! Source Term Limiter on/off (1/0) default values = 1 - LITERSPLIT = F ! T: double Strang split; F: simple split (more efficienct). Default: F - - LFILTERTH = F - ! LFILTERTH: use a CFL filter to limit the advection vel. In directional space. This is similar to WW3. - ! Mostly not used. WWMII is always stable. - MAXCFLTH = 1.0 ! Max Cfl in Theta space; used only if LFILTERTH=T - FMETHOD = 1 - ! This switch controls the numerical method used in freq. space - ! = 0 - ! No Advection in spectral space - ! = 1 - ! Ultimate Quickest as in WW3 (best) - LFILTERSIG = F ! Limit the advection velocitiy in freq. space (usually F) - MAXCFLSIG = 1.0 ! Max Cfl in freq. space; used only if LFILTERSIG=T - LDIFR = F ! Use phase decoupled diffraction approximation according to Holthuijsen et al. (2003) (usually T; if crash, use F) - IDIFFR = 1 ! Extended WAE accounting for higher order effects WAE becomes nonlinear; 1: Holthuijsen et al. ; 2: Liau et al. ; 3: Toledo et al. (in preparation) - LCONV = F ! Estimate convergence criterian and write disk (quasi-steady - qstea.out) - LCFL = F ! Write out CFL numbers; use F to save time - NQSITER = 1 ! # of quasi-steady (Q-S) sub-divisions within each WWM time step (trial and errors) - QSCONV1 = 0.98 ! Number of grid points [%/100] that have to fulfill abs. wave height criteria EPSH1 - QSCONV2 = 0.98 ! Number of grid points [%/100] that have to fulfill rel. wave height criteria EPSH2 - QSCONV3 = 0.98 ! Number of grid points [%/100] that have to fulfill sum. rel. wave action criteria EPSH3 - QSCONV4 = 0.98 ! Number of grid points [%/100] that have to fulfill rel. avg. wave steepness criteria EPSH4 - QSCONV5 = 0.98 ! Number of grid points [%/100] that have to fulfill avg. rel. wave period criteria EPSH5 - - LEXPIMP = F ! Use implicit schemes for freq. lower than given below by FREQEXP; used only if ICOMP=0 - FREQEXP = 0.1 ! Minimum frequency for explicit schemes; only used if LEXPIMP=T and ICOMP=0 - EPSH1 = 0.01 ! Convergence criteria for rel. wave height ! EPSH1 < CONVK1 = REAL(ABS(HSOLD(IP)-HS2)/HS2) - EPSH2 = 0.01 ! Convergence criteria for abs. wave height ! EPSH2 < CONVK2 = REAL(ABS(HS2-HSOLD(IP))) - EPSH3 = 0.01 ! Convergence criteria for the rel. sum of wave action ! EPSH3 < CONVK3 = REAL(ABS(SUMACOLD(IP)-SUMAC)/SUMAC) - EPSH4 = 0.01 ! Convergence criteria for the rel. avg. wave steepness criteria ! EPSH4 < CONVK4 = REAL(ABS(KHS2-KHSOLD(IP))/KHSOLD(IP)) - EPSH5 = 0.01 ! Convergence criteria for the rel. avg. waveperiod ! EPSH5 < REAL(ABS(TM02-TM02OLD(IP))/TM02OLD(IP)) - LVECTOR = F ! Use optmized propagation routines for large high performance computers e.g. at least more than 128 CPU. Try LVECTOR=F first. - IVECTOR = 2 ! USed if LVECTOR=T; Different flavours of communications - ! LVECTOR = 1; same propagation style as if LVECTOR = F, this is for testing and development - ! LVECTOR = 2; all spectral bins are propagated with the same time step and communications is done only once per sub-iteration - ! LVECTOR = 3; all directions with the same freq. are propgated using the same time step the communications is done for each freq. - ! LVECTOR = 4; 2 but for mixed open-mpi, code has to be compiled with -openmp - ! LVECTOR = 5; 3 but for mixed open-mpi, code has to be compiled with -openmp - ! LVECTOR = 6; same as 2 but highly optmizied with respect to memory usage, of course it is must less efficient than 2 - ! remarks: if you are using this routines be aware that the memory amount that is used is approx. for LVECTOR 1-5 arround - ! 24 * MSC * MDC * MNP, so if you are trying this on 1 CPU you get a segmentation fault if your system has not enough memory or - ! if your system is not properly configured it may results into the fact that your computer starts blocking since it try's to swap to disk - ! The total amount of memoery used per CPU = 24 * MSC * MDC * MNP / No.CPU - LADVTEST = F ! for testing the advection schemes, testcase will be added soon - LCHKCONV = F ! needs to set to .true. for quasi-steady mode. in order to compute the QSCONVi criteria and check them - DTMIN_DYN = 1. ! min. time step (sec?) for dynamic integration, this controls in SMETHOD the smallest time step for the triads, DT = 1.s is found to work well. - NDYNITER = 100, ! max. iteration for dyn. scheme afterwards the limiter is applied in the last step, for SMETHOD .eq. this controls the integration of the triad interaction terms, which is done dynamically. - DTMIN_SIN = 1. ! min. time steps for the full fractional step method, where each source term is integrated with its own fractional step - DTMIN_SNL4 = 1. ! - DTMIN_SDS = 1. ! - DTMIN_SNL3 = 1. ! - DTMIN_SBR = 0.10 ! - DTMIN_SBF = 1.0 ! - NDYNITER_SIN = 10, ! max. iterations for each source term in the fractional step approach. - NDYNITER_SNL4 = 10, ! - NDYNITER_SDS = 10, ! - NDYNITER_SBR = 10, ! - NDYNITER_SNL3 = 10, ! - NDYNITER_SBF = 10, ! - ! 1: use PETSC - WAE_SOLVERTHR = 1.e-9, ! Threshold for the Block-Jacobi or Block-Gauss-Seider solver - MAXITER = 500, ! Max. number of iterations - PMIN = 1., ! Max. percentage of non-converged grid points - LNANINFCHK = F, ! Check for NaN and INF; usually turned off for efficiency - LZETA_SETUP = F, ! Compute wave setup (simple momentum eq.) - ZETA_METH = 0, ! Method for wave setup, Mathieu please explain! - LSOUBOUND = F - BLOCK_GAUSS_SEIDEL = T, ! Use the Gauss Seidel on each computer block. The result seems to be faster and use less memory But the # of iterations depends on the number of processors - LNONL = F ! Solve the nonlinear system using simpler algorithm (Patankar) - ASPAR_LOCAL_LEVEL = 0 ! Aspar locality level (0-10; check with your system) - L_SOLVER_NORM = F ! Compute solver norm ||A*x-b|| as termination - ! check of jacobi-Gauss-Seidel solver. Will increas cost if T - LACCEL = F -/ - - -! output of statistical variables over the whole domain at specified times. -&HISTORY - BEGTC = '20180830.000000' ! Start output time, yyyymmdd. hhmmss; - ! must fit the simulation time otherwise no output. - ! Default is same as PROC%BEGTC - DELTC = 1 ! Time step for output; if smaller than simulation time step, the latter is used (output every step for better 1D 2D spectra analysis) - UNITC = 'SEC' ! Unit - ENDTC = '20181030.000000' ! Stop time output, yyyymmdd. hhmmss - ! Default is same as PROC%ENDC - DEFINETC = 86400 ! Time scoop (sec) for history files - ! If unset or set to a negative value - ! then only one file is generated - ! otherwise, for example for 86400 - ! daily output files are created. - OUTSTYLE = 'NO' ! output option - use 'NO' for no output - ! 'NC' for netcdf output - ! 'XFN' for XFN output (default) - ! 'SHP' for DARKO SHP output - MULTIPLEOUT = 0 ! 0: output in a single netcdf file - ! MPI_reduce is used (default) - ! 1: output in separate netcdf files - ! each associated with one process - USE_SINGLE_OUT = T ! T: Use single precision in the - ! output of model variables (default) - PARAMWRITE = T ! T: Write the physical parametrization - ! and chosen numerical method - ! in the netcdf file (default T) - GRIDWRITE = T ! T/F: Write the grid in the netcdf history file (default T) - PRINTMMA = F ! T/F: Print minimum, maximum and average - ! value of statistics during runtime - ! (Default F) - ! (Requires a MPI_REDUCE) - FILEOUT = 'wwm_hist.dat' - ! Below is selection for all variables. Default is F for all variables. - HS = F ! significant wave height - TM01 = F ! mean period - TM02 = F ! zero-crossing mean period - KLM = F ! mean wave number - WLM = F ! mean wave length - ETOTC = F ! Variable ETOTC - ETOTS = F ! Variable ETOTS - DM = F ! mean wave direction - DSPR = F ! directional spreading - TPPD = F ! direaction of the peak ... check source code - TPP = F ! peak period - CPP = F ! peak phase vel. - WNPP = F ! peak wave number - CGPP = F ! peak group speed - KPP = F ! peak wave number - LPP = F ! peak wave length - PEAKD = F ! peak direction - PEAKDSPR = F ! peak directional spreading - DPEAK = F ! peak direction - UBOT = F ! bottom exc. vel. - ORBITAL = F ! bottom orbital vel. - BOTEXPER = F ! bottom exc. - TMBOT = F ! bottom period - URSELL = F ! Ursell number - UFRIC = F ! air friction velocity - Z0 = F ! air roughness length - ALPHA_CH = F ! Charnoch coefficient for air - WINDX = F ! Wind in X direction - WINDY = F ! Wind in Y direction - CD = F ! Drag coefficient - CURRTX = F ! current in X direction - CURRTY = F ! current in Y direction - WATLEV = F ! water level - WATLEVOLD = F ! water level at previous time step - DEPDT = F ! change of water level in time - DEP = F ! depth - TAUW = F ! surface stress from the wave - TAUHF = F ! high frequency surface stress - TAUTOT = F ! total surface stress - STOKESSURFX = F ! Surface Stokes drift in X direction - STOKESSURFY = F ! Surface Stokes drift in X direction - STOKESBAROX = F ! Barotropic Stokes drift in X direction - STOKESBAROY = F ! Barotropic Stokes drift in Y direction - RSXX = F ! RSXX potential of LH - RSXY = F ! RSXY potential of LH - RSYY = F ! RSYY potential of LH - CFL1 = F ! CFL number 1 - CFL2 = F ! CFL number 2 - CFL3 = F ! CFL number 3 -/ - -&STATION - BEGTC = '20180830.000000' ! Start simulation time, yyyymmdd. hhmmss; must fit the simulation time otherwise no output - ! Default is same as PROC%BEGTC - DELTC = 600 ! Time step for output; if smaller than simulation time step, the latter is used (output every step for better 1D 2D spectra analysis) - UNITC = 'SEC' ! Unit - ENDTC = '20181030.000000' ! Stop time simulation, yyyymmdd. hhmmss - ! Default is same as PROC%ENDC - DEFINETC = 86400 ! Time for definition of station files - ! If unset or set to a negative value - ! then only one file is generated - ! otherwise, for example for 86400 - ! daily output files are created. - OUTSTYLE = 'NO' ! output option - ! 'NO' no output - ! 'STE' classic station output (default) - ! 'NC' for netcdf output - MULTIPLEOUT = 0 ! 0: output in a single netcdf file - ! MPI_reduce is used (default) - ! 1: output in separate netcdf files - ! each associated with one process - USE_SINGLE_OUT = T ! T: Use single precision in the - ! output of model variables (default) - PARAMWRITE = T ! T: Write the physical parametrization - ! and chosen numerical method - ! in the netcdf file (default T) - FILEOUT = 'wwm_sta.dat' !not used - LOUTITER = F - IOUTS = 15, - NOUTS = P-1, P-2, P-3, P-4, P-5, P-6, P-7, P-8, P-9, P-10, P-11, P-12, P-13, P-14, P-15 - XOUTS = -76.0460000000000 , -76.7780000000000 , -75.8100000000000 , -75.7200000000000 , -74.8420000000000 , - -74.7030000000000 , -75.3300000000000 , -72.6310000000000 , -74.8350000000000 , -69.2480000000000 , - -72.6000000000000 - YOUTS = 39.152, 38.556, 38.033, 37.551, - 36.9740000000000 , 37.2040000000000 , 37.0230000000000 , 36.9150000000000 , 36.6110000000000 , - 38.4610000000000 , 35.7500000000000 , 34.5610000000000 , 31.8620000000000 , 40.5030000000000 , - 39.5840000000000 - CUTOFF = 15*0.44 ! cutoff freq (Hz) for each station - consistent with buoys - LSP1D = T ! 1D spectral station output - LSP2D = F ! 2D spectral station output - LSIGMAX = T ! Adjust the cut-freq. for the output (e.g. consistent with buoy cut-off freq.) - AC = F ! spectrum - WK = F ! variable WK - ACOUT_1D = F ! variable ACOUT_1D - ACOUT_2D = F ! variable ACOUT_2D - HS = F ! significant wave height - TM01 = F ! mean period - TM02 = F ! zero-crossing mean period - KLM = F ! mean wave number - WLM = F ! mean wave length - ETOTC = F ! Variable ETOTC - ETOTS = F ! Variable ETOTS - DM = F ! mean wave direction - DSPR = F ! directional spreading - TPPD = F ! Discrete Peak Period - TPP = F ! Peak Period - CPP = F - WNPP = F ! peak wave number - CGPP = F ! peak group speed - KPP = F ! peak wave number - LPP = F ! peak - PEAKD = F ! peak direction - PEAKDSPR = F ! peak directional spreading - DPEAK = F - UBOT = F - ORBITAL = F - BOTEXPER = F - TMBOT = F - URSELL = F ! Ursell number - UFRIC = F ! air friction velocity - Z0 = F ! air roughness length - ALPHA_CH = F ! Charnoch coefficient for air - WINDX = F ! Wind in X direction - WINDY = F ! Wind in Y direction - CD = F ! Drag coefficient - CURRTX = F ! current in X direction - CURRTY = F ! current in Y direction - WATLEV = F ! water level - WATLEVOLD = F ! water level at previous time step - DEPDT = F ! change of water level in time - DEP = F ! depth - TAUW = F ! surface stress from the wave - TAUHF = F ! high frequency surface stress - TAUTOT = F ! total surface stress - STOKESSURFX = F ! Surface Stokes drift in X direction - STOKESSURFY = F ! Surface Stokes drift in X direction - STOKESBAROX = F ! Barotropic Stokes drift in X direction - STOKESBAROY = F ! Barotropic Stokes drift in Y direction - RSXX = F ! RSXX potential of LH - RSXY = F ! RSXY potential of LH - RSYY = F ! RSYY potential of LH - CFL1 = F ! CFL number 1 - CFL2 = F ! CFL number 2 - CFL3 = F ! CFL number 3 -/ - -&HOTFILE - LHOTF = F ! Write hotfile - FILEHOT_OUT = 'wwm_hot_out' !'.nc' suffix will be added - BEGTC = '20030908.000000' !Starting time of hotfile writing. With ihot!=0 in SCHISM, - !this will be whatever the new hotstarted time is (even with ihot=2) - DELTC = 86400. ! time between hotfile writes - UNITC = 'SEC' ! unit used above - ENDTC = '20031008.000000' ! Ending time of hotfile writing (adjust with BEGTC) - LCYCLEHOT = T ! Applies only to netcdf - ! If T then hotfile contains 2 last records. - ! If F then hotfile contains N record if N outputs - ! have been done - ! For binary only one record. - HOTSTYLE_OUT = 2 ! 1: binary hotfile of data as output - ! 2: netcdf hotfile of data as output (default) - MULTIPLEOUT = 0 ! 0: hotfile in a single file (binary or netcdf) - ! MPI_REDUCE is then used and thus you'd avoid too freq. output - ! 1: hotfiles in separate files, each associated - ! with one process - FILEHOT_IN = 'wwm_hot_in.nc' ! (Full) hot file name for input - HOTSTYLE_IN = 2 ! 1: binary hotfile of data as input - ! 2: netcdf hotfile of data as input (default) - IHOTPOS_IN = 1 ! Position in hotfile (only for netcdf) - ! for reading - MULTIPLEIN = 0 ! 0: read hotfile from one single file - ! 1: read hotfile from multiple files (must use same # of CPU?) -/ - -&NESTING - L_NESTING = F, ! whether to produce nesting data or not - L_HOTFILE = F ! whether to produce an hotfile as output - L_BOUC_PARAM = F ! whether to produce a parametric boundary condition to be used by the nested grids - L_BOUC_SPEC = F ! whether to produce a spectral boundary condition to be used by the nested grids - NB_GRID_NEST = 0 ! number of nested grids. All lines below must contain NB_GRID_NEST entries. -! ListIGRIDTYPE = ! list of integers giving the type of nested grid -! ListFILEGRID = ! list of strings for the grid file names. -! ListFILEBOUND = ! list of boundary file names to be used -! ListBEGTC = ! list of beginning time of the runs (used for hotfile and boundary) -! ListDELTC = ! list of DELTC of the boundary output -! ListUNITC = ! list of UNITS of the boundary output -! ListENDTC = ! list of ENDTC of the boundary output -! ListPrefix = ! list of prefix used for the output variable -/ - -! only used with AMETHOD 4 or 5 -&PETScOptions - ! Summary of Sparse Linear Solvers Available from PETSc: http://www.mcs.anl.gov/petsc/documentation/linearsolvertable.html - KSPTYPE = 'LGMRES' - ! This parameter controls which solver is used. This is the same as petsc command line parameter -ksp_type. - ! KSPTYPE = 'GMRES' - ! Implements the Generalized Minimal Residual method. (Saad and Schultz, 1986) with restart - ! KSPTYPE = 'LGMRES' - ! Augments the standard GMRES approximation space with approximations to the error from previous restart cycles. - ! KSPTYPE = 'DGMRES' - ! In this implementation, the adaptive strategy allows to switch to the deflated GMRES when the stagnation occurs. - ! KSPTYPE = 'PGMRES' - ! Implements the Pipelined Generalized Minimal Residual method. Only PETSc 3.3 - ! KSPTYPE = 'KSPBCGSL' - ! Implements a slight variant of the Enhanced BiCGStab(L) algorithm - - RTOL = 1.E-20 ! the relative convergence tolerance (relative decrease in the residual norm) - ABSTOL = 1.E-20 ! the absolute convergence tolerance (absolute size of the residual norm) - DTOL = 10000. ! the divergence tolerance - MAXITS = 1000 ! maximum number of iterations to use - - INITIALGUESSNONZERO = F ! Tells the iterative solver that the initial guess is nonzero; otherwise KSP assumes the initial guess is to be zero - GMRESPREALLOCATE = T ! Causes GMRES and FGMRES to preallocate all its needed work vectors at initial setup rather than the default, which is to allocate them in chunks when needed. - - - PCTYPE = 'SOR' - ! This parameter controls which preconditioner is used. This is the same as petsc command line parameter -pc_type - ! PCTYPE = 'SOR' - ! (S)SOR (successive over relaxation, Gauss-Seidel) preconditioning - ! PCTYPE = 'ASM' - ! Use the (restricted) additive Schwarz method, each block is (approximately) solved with its own KSP object. - ! PCTYPE = 'HYPRE' - ! Allows you to use the matrix element based preconditioners in the LLNL package hypre - ! PCTYPE = 'SPAI' - ! Use the Sparse Approximate Inverse method of Grote and Barnard as a preconditioner - ! PCTYPE = 'NONE' - ! This is used when you wish to employ a nonpreconditioned Krylov method. -/ - - diff --git a/docker/pyschism/docker/setup_ensemble.py b/docker/pyschism/docker/setup_ensemble.py deleted file mode 100644 index d2c7d13..0000000 --- a/docker/pyschism/docker/setup_ensemble.py +++ /dev/null @@ -1,276 +0,0 @@ -import os -import glob -import logging -import tempfile -from argparse import ArgumentParser -from copy import deepcopy -from datetime import datetime, timedelta -from pathlib import Path - - -import geopandas as gpd -import pandas as pd -from coupledmodeldriver import Platform -from coupledmodeldriver.configure.forcings.base import TidalSource -from coupledmodeldriver.configure import ( - BestTrackForcingJSON, - TidalForcingJSON, - NationalWaterModelFocringJSON, -) -from coupledmodeldriver.generate import SCHISMRunConfiguration -from coupledmodeldriver.generate.schism.script import SchismEnsembleGenerationJob -from coupledmodeldriver.generate import generate_schism_configuration -from stormevents import StormEvent -from stormevents.nhc.track import VortexTrack -from pyschism.mesh import Hgrid -from pyschism.forcing import NWM -from ensembleperturbation.perturbation.atcf import perturb_tracks - -import wwm - - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - -EFS_MOUNT_POINT = Path('~').expanduser() / 'app/io' - -def main(args): - - track_path = EFS_MOUNT_POINT / args.track_file - out_dir = EFS_MOUNT_POINT / args.output_directory - dt_rng_path = EFS_MOUNT_POINT / args.date_range_file - tpxo_dir = EFS_MOUNT_POINT / args.tpxo_dir - nwm_file = EFS_MOUNT_POINT / args.nwm_file - mesh_dir = EFS_MOUNT_POINT / args.mesh_directory - hr_prelandfall = args.hours_before_landfall - use_wwm = args.use_wwm - - workdir = out_dir - mesh_file = mesh_dir / 'mesh_w_bdry.grd' - - workdir.mkdir(exist_ok=True) - - dt_data = pd.read_csv(dt_rng_path, delimiter=',') - date_1, date_2 = pd.to_datetime(dt_data.date_time).dt.strftime( - "%Y%m%d%H").values - model_start_time = datetime.strptime(date_1, "%Y%m%d%H") - model_end_time = datetime.strptime(date_2, "%Y%m%d%H") - spinup_time = timedelta(days=2) - - # More processing for caching - with tempfile.TemporaryDirectory() as tmpdir: - # NOTE: The output of write is not important. Calling - # `write` results in the relevant files being cached! - nwm = NWM(nwm_file=nwm_file, cache=True) - nwm.write( - output_directory=tmpdir, - gr3=Hgrid.open(mesh_file, crs=4326), - start_date=model_start_time - spinup_time, - end_date=model_end_time - model_start_time + spinup_time, - overwrite=True, - ) - nwm.pairings.save_json( - sources=workdir / 'source.json', - sinks=workdir / 'sink.json' - ) - - forcing_configurations = [] - forcing_configurations.append(TidalForcingJSON( - resource=tpxo_dir / 'h_tpxo9.v1.nc', - tidal_source=TidalSource.TPXO)) - forcing_configurations.append( - NationalWaterModelFocringJSON( - resource=nwm_file, - cache=True, - source_json=workdir / 'source.json', - sink_json=workdir / 'sink.json', - pairing_hgrid=mesh_file - ) - ) - forcing_configurations.append( - BestTrackForcingJSON( - nhc_code=f'{args.name}{args.year}', - interval_seconds=3600, - nws=20 - ) - ) - - - platform = Platform.LOCAL - - perturb_begin = model_start_time - unpertubed = None - if hr_prelandfall is not None and hr_prelandfall >= 0: - # Calculate time to landfall based on track and coastline - # and then perturb ONLY from the requested hours before landfall - countries = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) - usa = countries[countries.name.isin( - ["United States of America", "Puerto Rico"] - )] - - orig_track = VortexTrack.from_file(track_path) - track_dat = orig_track.data - onland = track_dat.geometry.set_crs(4326).intersects(usa.unary_union) - if onland.any(): - landfall_idx = onland[onland].index.min() - else: - logger.warn("The track doesn't cross US territories!") - landfall_idx = 0 - landfall_time = pd.Timestamp( - track_dat.iloc[landfall_idx].datetime - ) - toi = landfall_time - timedelta(hours=hr_prelandfall) - perturb_idx = (track_dat.datetime - toi).abs().argsort().iloc[0] - - if perturb_idx > 0: - # If only part of the track needs to be updated - unpertubed = deepcopy(orig_track) - unpertubed.end_date = track_dat.iloc[perturb_idx - 1].datetime - - # NOTE: Perturbation dataframe is truncated based on the - # passed `perturb_begin` to `perturb_tracks(...)` - perturb_begin = track_dat.iloc[perturb_idx].datetime - - perturbations = perturb_tracks( - perturbations=args.num_perturbations, - directory=workdir/'track_files', - storm=track_path, - variables=[ - 'cross_track', - 'along_track', - 'radius_of_maximum_winds', - 'max_sustained_wind_speed', - ], - sample_from_distribution=args.sample_from_distribution, - sample_rule=args.sample_rule, - quadrature=args.quadrature, - start_date=perturb_begin, - end_date=model_end_time, - overwrite=True - ) - - if perturb_begin != model_start_time: - # Read generated tracks and append to unpertubed section - perturbed_tracks = glob.glob(str(workdir/'track_files'/'*.22')) - for pt in perturbed_tracks: - if 'original' in pt: - continue -# perturbed_segment = pd.read_csv(pt, header=None) - perturbed_segment = VortexTrack.from_file(pt) - full_track = pd.concat( - (unpertubed.fort_22(), perturbed_segment.fort_22()), - ignore_index=True - ) - # Overwrites the perturbed-segment-only file - full_track.to_csv(pt, index=False, header=False) - - - run_config_kwargs = { - 'mesh_directory': mesh_dir, - 'modeled_start_time': model_start_time, - 'modeled_end_time': model_end_time, - 'modeled_timestep': timedelta(seconds=150), - 'tidal_spinup_duration': spinup_time, - 'forcings': forcing_configurations, - 'perturbations': perturbations, - 'platform': platform, -# 'schism_executable': 'pschism_PAHM_TVD-VL' - } - - run_configuration = SCHISMRunConfiguration( - **run_config_kwargs, - ) - run_configuration['schism']['hgrid_path'] = mesh_file - - run_configuration.write_directory( - directory=workdir, absolute=False, overwrite=False, - ) - - # Now generate the setup - generate_schism_configuration(**{ - 'configuration_directory': workdir, - 'output_directory': workdir, - 'relative_paths': True, - 'overwrite': True, - 'parallel': True - }) - - if use_wwm: - wwm.setup_wwm(mesh_file, workdir, ensemble=True) - - -def parse_arguments(): - argument_parser = ArgumentParser() - - argument_parser.add_argument( - "--track-file", - help="path to the storm track file for parametric wind setup", - type=Path, - required=True - ) - - argument_parser.add_argument( - '--output-directory', - default=None, - required=True, - help='path to store generated configuration files' - ) - argument_parser.add_argument( - "--date-range-file", - required=True, - type=Path, - help="path to the file containing simulation date range" - ) - argument_parser.add_argument( - '-n', '--num-perturbations', - type=int, - required=True, - help='path to input mesh (`hgrid.gr3`, `manning.gr3` or `drag.gr3`)', - ) - argument_parser.add_argument( - "--tpxo-dir", - required=True, - type=Path, - help="path to the TPXO dataset directory", - ) - argument_parser.add_argument( - "--nwm-file", - required=True, - type=Path, - help="path to the NWM hydrofabric dataset", - ) - argument_parser.add_argument( - '--mesh-directory', - required=True, - help='path to input mesh (`hgrid.gr3`, `manning.gr3` or `drag.gr3`)', - ) - argument_parser.add_argument( - "--sample-from-distribution", action="store_true" - ) - argument_parser.add_argument( - "--sample-rule", type=str, default='random' - ) - argument_parser.add_argument( - "--quadrature", action="store_true" - ) - argument_parser.add_argument( - "-b", "--hours-before-landfall", type=int - ) - argument_parser.add_argument( - "--use-wwm", action="store_true" - ) - - argument_parser.add_argument( - "name", help="name of the storm", type=str) - - argument_parser.add_argument( - "year", help="year of the storm", type=int) - - - args = argument_parser.parse_args() - - return args - - -if __name__ == "__main__": - main(parse_arguments()) diff --git a/docker/pyschism/docker/setup_model.py b/docker/pyschism/docker/setup_model.py deleted file mode 100755 index 1c77f46..0000000 --- a/docker/pyschism/docker/setup_model.py +++ /dev/null @@ -1,533 +0,0 @@ -#!/usr/bin/env python -import os -import pathlib -from datetime import datetime, timedelta, timezone -import logging -import argparse -import shutil -import hashlib -import fcntl -from time import time -import tempfile -from contextlib import contextmanager, ExitStack - -import numpy as np -import pandas as pd -import geopandas as gpd -import f90nml -from matplotlib.transforms import Bbox - -from pyschism import dates -from pyschism.enums import NWSType -from pyschism.driver import ModelConfig -from pyschism.forcing.bctides import iettype, ifltype -from pyschism.forcing.nws import GFS, HRRR, ERA5, BestTrackForcing -from pyschism.forcing.nws.nws2 import hrrr3 -from pyschism.forcing.source_sink import NWM -from pyschism.mesh import Hgrid, gridgr3 -from pyschism.mesh.fgrid import ManningsN -from pyschism.stations import Stations - -import wwm - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - -CDSAPI_URL = "https://cds.climate.copernicus.eu/api/v2" -EFS_MOUNT_POINT = pathlib.Path('~').expanduser() / 'app/io' -TPXO_LINK_PATH = pathlib.Path('~').expanduser() / '.local/share/tpxo' -NWM_LINK_PATH = pathlib.Path('~').expanduser() / '.local/share/pyschism/nwm' - - -@contextmanager -def pushd(directory): - '''Temporarily modify current directory - - Parameters - ---------- - directory: str, pathlike - the directory to use as cwd during this context - - Returns - ------- - None - ''' - - origin = os.getcwd() - try: - os.chdir(directory) - yield - - finally: - os.chdir(origin) - - -def get_main_cache_path(cache_dir, storm, year): - - return cache_dir / f'{storm.lower()}_{year}' - -def get_meteo_cache_path(source, main_cache_path, bbox, start_date, end_date): - - m = hashlib.md5() - m.update(np.round(bbox.corners(), decimals=2).tobytes()) - m.update(start_date.strftime("%Y-%m-%d:%H:%M:%S").encode('utf8')) - m.update(end_date.strftime("%Y-%m-%d:%H:%M:%S").encode('utf8')) - - meteo_cache_path = main_cache_path / f"{source}_{m.hexdigest()}" - return meteo_cache_path - - -@contextmanager -def cache_lock(cache_path): - - if not cache_path.exists(): - cache_path.mkdir(parents=True, exist_ok=True) - - with open(cache_path / ".cache.lock", "w") as fp: - try: - fcntl.flock(fp.fileno(), fcntl.LOCK_EX) - yield - - finally: - fcntl.flock(fp.fileno(), fcntl.LOCK_UN) - -def from_meteo_cache(meteo_cache_path, sflux_dir): - - # TODO: Generalize - # Redundant check - if not meteo_cache_path.exists(): - return False - - contents = list(meteo_cache_path.iterdir()) - if not any(p.match("sflux_inputs.txt") for p in contents): - return False - - logger.info("Creating sflux from cache...") - - # Copy files from cache dir to sflux dir - for p in contents: - dest = sflux_dir / p.relative_to(meteo_cache_path) - if p.is_dir(): - shutil.copytree(p, dest) - else: - shutil.copy(p, dest) - - logger.info("Done copying cached sflux.") - - return True - - -def copy_meteo_cache(sflux_dir, meteo_cache_path): - - # TODO: Generalize - logger.info("Copying cache files to main cache location...") - # Copy files from sflux dir to cache dir - - # Clean meteo_cache_path if already populated? - contents_dst = list(meteo_cache_path.iterdir()) - contents_dst = [p for p in contents_dst if p.suffix != ".lock"] - for p in contents_dst: - if p.is_dir(): - shutil.rmtree(p) - else: - os.remove(p) - - # Copy files from cache dir to sflux dir - contents_src = list(sflux_dir.iterdir()) - for p in contents_src: - dest = meteo_cache_path / p.relative_to(sflux_dir) - if p.is_dir(): - shutil.copytree(p, dest) - else: - shutil.copy(p, dest) - - logger.info("Done copying cache files to main cache location.") - -def setup_schism_model( - mesh_path, - domain_bbox_path, - date_range_path, - station_info_path, - out_dir, - main_cache_path, - parametric_wind=False, - nhc_track_file=None, - storm_id=None, - use_wwm=False, - ): - - - domain_box = gpd.read_file(domain_bbox_path) - atm_bbox = Bbox(domain_box.to_crs('EPSG:4326').total_bounds.reshape(2,2)) - - schism_dir = out_dir - schism_dir.mkdir(exist_ok=True, parents=True) - logger.info("Calculating times and dates") - dt = timedelta(seconds=150.) - - # Use an integer for number of steps or a timedelta to approximate - # number of steps internally based on timestep - nspool = timedelta(minutes=20.) - - - # measurement days +7 days of simulation: 3 ramp, 2 prior - # & 2 after the measurement dates - dt_data = pd.read_csv(date_range_path, delimiter=',') - date_1, date_2 = pd.to_datetime(dt_data.date_time).dt.strftime( - "%Y%m%d%H").values - date_1 = datetime.strptime(date_1, "%Y%m%d%H") - date_2 = datetime.strptime(date_2, "%Y%m%d%H") - - - # If there are no observation data, it's hindcast mode - hindcast_mode = (station_info_path).is_file() - if hindcast_mode: - # If in hindcast mode run for 4 days: 2 days prior to now to - # 2 days after. - logger.info("Setup hindcast mode") - start_date = date_1 - timedelta(days=2) - end_date = date_2 + timedelta(days=2) - else: - - logger.info("Setup forecast mode") - - # If in forecast mode then date_1 == date_2, and simulation - # will run for about 3 days: abou 1 day prior to now to 2 days - # last meteo (HRRR) cycle after. - # - # Since HRRR forecasts are 48 hours on 6-hour cycles, find an end - # date which is 48 hours after the latest cycle before now! Note - # that the last cycle upload to either AWS or NOMADS server might - # take MORE than 1 hour in realtime cases. Also the oldest - # cycle on NOMADS is t00z from previous day - last_meteo_cycle = np.datetime64( - pd.DatetimeIndex([date_2 - timedelta(hours=2)]).floor('6H').values[0], 'h' - ).tolist() - oneday_before_last_cycle = last_meteo_cycle - timedelta(days=1) - start_date = oneday_before_last_cycle.replace(hour=0) - end_date = last_meteo_cycle + timedelta(days=2) - - rnday = end_date - start_date - - dramp = timedelta(days=1.) - - hgrid = Hgrid.open(mesh_path, crs="epsg:4326") - fgrid = ManningsN.linear_with_depth( - hgrid, - min_value=0.02, max_value=0.05, - min_depth=-1.0, max_depth=-3.0) - - coops_stations = None - stations_file = station_info_path - if stations_file.is_file(): - st_data = np.genfromtxt(stations_file, delimiter=',') - coops_stations = Stations( - nspool_sta=nspool, - crs="EPSG:4326", - elev=True, u=True, v=True) - for coord in st_data: - coops_stations.add_station(coord[0], coord[1]) - - atmospheric = None - if parametric_wind: - # NOTE: SCHISM supports parametric ofcl forecast as well - if nhc_track_file is not None and nhc_track_file.is_file(): - atmospheric = BestTrackForcing.from_nhc_bdeck(nhc_bdeck=nhc_track_file) - elif storm_id is not None: - atmospheric = BestTrackForcing(storm=storm_id) - else: - ValueError("Storm track information is not provided!") - else: - # For hindcast ERA5 is used and for forecast - # GFS and hrrr3.HRRR. Neither ERA5 nor the GFS and - # hrrr3.HRRR combination are supported by nws2 mechanism - pass - - - logger.info("Creating model configuration ...") - config = ModelConfig( - hgrid=hgrid, - fgrid=fgrid, - iettype=iettype.Iettype3(database="tpxo"), - ifltype=ifltype.Ifltype3(database="tpxo"), - nws=atmospheric, - source_sink=NWM(), - ) - - if config.forcings.nws and getattr(config.forcings.nws, 'sflux_2', None): - config.forcings.nws.sflux_2.inventory.file_interval = timedelta(hours=6) - - logger.info("Creating cold start ...") - # create reference dates - coldstart = config.coldstart( - stations=coops_stations, - start_date=start_date, - end_date=start_date + rnday, - timestep=dt, - dramp=dramp, - dramp_ss=dramp, - drampwind=dramp, - nspool=timedelta(hours=1), - elev=True, - dahv=True, - ) - - logger.info("Writing to disk ...") - if not parametric_wind: - - # In hindcast mode ERA5 is used manually: temporary solution - - sflux_dir = (schism_dir / "sflux") - sflux_dir.mkdir(exist_ok=True, parents=True) - - # Workaround for ERA5 not being compatible with NWS2 object - meteo_cache_kwargs = { - 'bbox': atm_bbox, - 'start_date': start_date, - 'end_date': start_date + rnday - } - - if hindcast_mode: - meteo_cache_path = get_meteo_cache_path( - 'era5', main_cache_path, **meteo_cache_kwargs - ) - else: - meteo_cache_path = get_meteo_cache_path( - 'gfs_hrrr', main_cache_path, **meteo_cache_kwargs - ) - - with cache_lock(meteo_cache_path): - if not from_meteo_cache(meteo_cache_path, sflux_dir): - if hindcast_mode: - era5 = ERA5() - era5.write( - outdir=schism_dir / "sflux", - start_date=start_date, - rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), - air=True, rad=True, prc=True, - bbox=atm_bbox, - overwrite=True) - - else: - - - with ExitStack() as stack: - - # Just to make sure there are not permission - # issues for temporary data (e.g. HRRR tmpdir - # in current dir) - tempdir = stack.enter_context(tempfile.TemporaryDirectory()) - stack.enter_context(pushd(tempdir)) - - gfs = GFS() - gfs.write( - outdir=schism_dir / "sflux", - level=1, - start_date=start_date, - rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), - air=True, rad=True, prc=True, - bbox=atm_bbox, - overwrite=True - ) - - # If we should limit forecast to 2 days, then - # why not use old HRRR implementation? Because - # We have prior day, today and 1 day forecast (?) - # BUT the new implementation has issues getting - # 2day forecast! - hrrr = HRRR() - hrrr.write( - outdir=schism_dir / "sflux", - level=2, - start_date=start_date, - rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), - air=True, rad=True, prc=True, - bbox=atm_bbox, - overwrite=True - ) - -# hrrr3.HRRR( -# start_date=start_date, -# rnday=rnday.total_seconds() / timedelta(days=1).total_seconds(), -# record=2, -# bbox=atm_bbox -# ) -# for i, nc_file in enumerate(sorted(pathlib.Path().glob('*/*.nc'))): -# dst_air = schism_dir / "sflux" / f"sflux_air_2.{i:04d}.nc" -# shutil.move(nc_file, dst_air) -# pathlib.Path(schism_dir / "sflux" / f"sflux_prc_2.{i:04d}.nc").symlink_to( -# dst_air -# ) -# pathlib.Path(schism_dir / "sflux" / f"sflux_rad_2.{i:04d}.nc").symlink_to( -# dst_air -# ) - - - with open(schism_dir / "sflux" / "sflux_inputs.txt", "w") as f: - f.write("&sflux_inputs\n/\n") - - copy_meteo_cache(sflux_dir, meteo_cache_path) - - windrot = gridgr3.Windrot.default(hgrid) - windrot.write(schism_dir / "windrot_geo2proj.gr3", overwrite=True) - ## end of workaround - - # Workaround for bug #30 - coldstart.param.opt.wtiminc = coldstart.param.core.dt - coldstart.param.opt.nws = NWSType.CLIMATE_AND_FORECAST.value - ## end of workaround - - - - # Workaround for station bug #32 - if coops_stations is not None: - coldstart.param.schout.nspool_sta = int( - round(nspool.total_seconds() / coldstart.param.core.dt)) - ## end of workaround - - with ExitStack() as stack: - - # Just to make sure there are not permission - # issues for temporary data (e.g. HRRR tmpdir - # in current dir) - tempdir = stack.enter_context(tempfile.TemporaryDirectory()) - stack.enter_context(pushd(tempdir)) - - coldstart.write(schism_dir, overwrite=True) - - # Workardoun for hydrology param bug #34 - nm_list = f90nml.read(schism_dir / 'param.nml') - nm_list['opt']['if_source'] = 1 - nm_list.write(schism_dir / 'param.nml', force=True) - ## end of workaround - - ## Workaround to make sure outputs directory is copied from/to S3 - try: - os.mknod(schism_dir / "outputs" / "_") - except FileExistsError: - pass - ## end of workaround - - if use_wwm: - wwm.setup_wwm(mesh_path, schism_dir, ensemble=False) - - logger.info("Setup done") - -def main(args): - - storm_name = str(args.name).lower() - storm_year = str(args.year).lower() - param_wind = args.parametric_wind - - mesh_path = EFS_MOUNT_POINT / args.mesh_file - bbox_path = EFS_MOUNT_POINT / args.domain_bbox_file - dt_rng_path = EFS_MOUNT_POINT / args.date_range_file - st_loc_path = EFS_MOUNT_POINT / args.station_location_file - out_dir = EFS_MOUNT_POINT / args.out - nhc_track = None if args.track_file is None else EFS_MOUNT_POINT / args.track_file - cache_path = get_main_cache_path( - EFS_MOUNT_POINT / args.cache_dir, storm_name, storm_year - ) - tpxo_dir = EFS_MOUNT_POINT / args.tpxo_dir - nwm_dir = EFS_MOUNT_POINT / args.nwm_dir - use_wwm = args.use_wwm - - if TPXO_LINK_PATH.is_dir(): - shutil.rmtree(TPXO_LINK_PATH) - if NWM_LINK_PATH.is_dir(): - shutil.rmtree(NWM_LINK_PATH) - os.symlink(tpxo_dir, TPXO_LINK_PATH, target_is_directory=True) - os.symlink(nwm_dir, NWM_LINK_PATH, target_is_directory=True) - - - setup_schism_model( - mesh_path, - bbox_path, - dt_rng_path, - st_loc_path, - out_dir, - cache_path, - parametric_wind=param_wind, - nhc_track_file=nhc_track, - storm_id=f'{storm_name}{storm_year}', - use_wwm=use_wwm - ) - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser() - - - parser.add_argument( - "--parametric-wind", "-w", - help="flag to switch to parametric wind setup", action="store_true") - - parser.add_argument( - "--mesh-file", - help="path to the file containing computational grid", - type=pathlib.Path - ) - - parser.add_argument( - "--domain-bbox-file", - help="path to the file containing domain bounding box", - type=pathlib.Path - ) - - parser.add_argument( - "--date-range-file", - help="path to the file containing simulation date range", - type=pathlib.Path - ) - - parser.add_argument( - "--station-location-file", - help="path to the file containing station locations", - type=pathlib.Path - ) - - parser.add_argument( - "--cache-dir", - help="path to the cache directory", - type=pathlib.Path - ) - - parser.add_argument( - "--track-file", - help="path to the storm track file for parametric wind setup", - type=pathlib.Path - ) - - parser.add_argument( - "--tpxo-dir", - help="path to the TPXO database directory", - type=pathlib.Path - ) - - parser.add_argument( - "--nwm-dir", - help="path to the NWM stream vector database directory", - type=pathlib.Path - ) - - parser.add_argument( - "--out", - help="path to the setup output (solver input) directory", - type=pathlib.Path - ) - - parser.add_argument( - "--use-wwm", action="store_true" - ) - - parser.add_argument( - "name", help="name of the storm", type=str) - - parser.add_argument( - "year", help="year of the storm", type=int) - - - args = parser.parse_args() - - main(args) diff --git a/docker/pyschism/docker/wwm.py b/docker/pyschism/docker/wwm.py deleted file mode 100644 index 0ab04ef..0000000 --- a/docker/pyschism/docker/wwm.py +++ /dev/null @@ -1,276 +0,0 @@ -from __future__ import annotations -from copy import deepcopy -from datetime import datetime, timedelta -from pathlib import Path - -import f90nml -import numpy as np -from pyschism.mesh.base import Elements -from pyschism.mesh.base import Gr3 -from pyschism.mesh.gridgr3 import Gr3Field -from pyschism.param.param import Param - - -REFS = Path('~').expanduser() / 'app/refs' - -def setup_wwm(mesh_file: Path, setup_dir: Path, ensemble: bool): - '''Output is - - hgrid_WWM.gr3 - - param.nml - - wwmbnd.gr3 - - wwminput.nml - ''' - - - runs_dir = [setup_dir] - if ensemble: - spinup_dir = setup_dir/'spinup' - runs_dir = setup_dir.glob('runs/*') - - schism_grid = Gr3.open(mesh_file, crs=4326) - wwm_grid = break_quads(schism_grid) - wwm_bdry = Gr3Field.constant(wwm_grid, 0.0) - - # TODO: Update spinup - # NOTE: Requires setup of WWM hotfile - - # Update runs - for run in runs_dir: - wwm_grid.write(run / 'hgrid_WWM.gr3', format='gr3') - wwm_bdry.write(run / 'wwmbnd.gr3', format='gr3') - - schism_nml = update_schism_params(run / 'param.nml') - schism_nml.write(run / 'param.nml', force=True) - - wwm_nml = get_wwm_params(run_name=run.name, schism_nml=schism_nml) - wwm_nml.write(run / 'wwminput.nml') - - - -def break_quads(pyschism_mesh: Gr3) -> Gr3 | Gr3Field: - # Create new Elements and set it for the Gr3.elements - quads = pyschism_mesh.quads - if len(quads) == 0: - new_mesh = deepcopy(pyschism_mesh) - - else: - tmp = np.hstack((quads, quads[0, 0][None, None])) - broken = np.vstack((tmp[:, :3], tmp[:, 2:])) - trias = pyschism_mesh.triangles - final_trias = np.vstack((trias, broken)) - # NOTE: Node IDs and indexs are the same as before - elements = { - idx+1: list(map(pyschism_mesh.nodes.get_id_by_index, tri)) - for idx, tri in enumerate(final_trias) - } - - new_mesh = deepcopy(pyschism_mesh) - new_mesh.elements = Elements(pyschism_mesh.nodes, elements) - - - return new_mesh - - - -def get_wwm_params(run_name, schism_nml) -> f90nml.Namelist: - - # Get relevant values from SCHISM setup - begin_time = datetime( - year=schism_nml['opt']['start_year'], - month=schism_nml['opt']['start_month'], - day=schism_nml['opt']['start_day'], - # TODO: Handle decimal hour - hour=int(schism_nml['opt']['start_hour']), - ) - end_time = begin_time + timedelta(days=schism_nml['core']['rnday']) - delta_t = schism_nml['core']['dt'] - mdc = schism_nml['core']['mdc2'] - msc = schism_nml['core']['msc2'] - nstep_wwm = schism_nml['opt']['nstep_wwm'] - - time_fmt = '%Y%m%d.%H%M%S' - wwm_delta_t = nstep_wwm * delta_t - - # For now just read the example file update relevant names and write - wwm_params = f90nml.read(REFS/'wwminput.nml') - wwm_params.uppercase = True - - proc_nml = wwm_params['PROC'] - proc_nml['PROCNAME'] = run_name - # Time for start the simulation, ex:yyyymmdd. hhmmss - proc_nml['BEGTC'] = begin_time.strftime(time_fmt) - # Time step (MUST match dt*nstep_wwm in SCHISM!) - proc_nml['DELTC'] = wwm_delta_t - # Unity of time step - proc_nml['UNITC'] = 'SEC' - # Time for stop the simulation, ex:yyyymmdd. hhmmss - proc_nml['ENDTC'] = end_time.strftime(time_fmt) - # Minimum water depth. THis must be same as h0 in selfe - proc_nml['DMIN'] = 0.01 - - grid_nml = wwm_params['GRID'] - # Number of directional bins - grid_nml['MDC'] = mdc - # Number of frequency bins - grid_nml['MSC'] = msc - # Name of the grid file. hgrid.gr3 if IGRIDTYPE = 3 (SCHISM) - grid_nml['FILEGRID'] = 'hgrid_WWM.gr3' - # Gridtype used. - grid_nml['IGRIDTYPE'] = 3 - - bouc_nml = wwm_params['BOUC'] - # Begin time of the wave boundary file (FILEWAVE) - bouc_nml['BEGTC'] = begin_time.strftime(time_fmt) - # Time step in FILEWAVE - bouc_nml['DELTC'] = 1 - # Unit can be HR, MIN, SEC - bouc_nml['UNITC'] = 'HR' - # End time - bouc_nml['ENDTC'] = end_time.strftime(time_fmt) - # Boundary file defining boundary conditions and Neumann nodes. - bouc_nml['FILEBOUND'] = 'wwmbnd.gr3' - bouc_nml['BEGTC_OUT'] = 20030908.000000 - bouc_nml['DELTC_OUT'] = 600.000000000000 - bouc_nml['UNITC_OUT'] = 'SEC' - bouc_nml['ENDTC_OUT'] = 20031008.000000 - - hist_nml = wwm_params['HISTORY'] - # Start output time, yyyymmdd. hhmmss; - # must fit the simulation time otherwise no output. - # Default is same as PROC%BEGTC - hist_nml['BEGTC'] = begin_time.strftime(time_fmt) - # Time step for output; if smaller than simulation time step, the latter is used (output every step for better 1D 2D spectra analysis) - hist_nml['DELTC'] = 1 - # Unit - hist_nml['UNITC'] = 'SEC' - # Stop time output, yyyymmdd. hhmmss - # Default is same as PROC%ENDC - hist_nml['ENDTC'] = end_time.strftime(time_fmt) - # Time scoop (sec) for history files - hist_nml['DEFINETC'] = 86400 - hist_nml['FILEOUT'] = 'wwm_hist.dat' - - sta_nml = wwm_params['STATION'] - # Start simulation time, yyyymmdd. hhmmss; must fit the simulation time otherwise no output - # Default is same as PROC%BEGTC - sta_nml['BEGTC'] = begin_time.strftime(time_fmt) - # Time step for output; if smaller than simulation time step, the latter is used (output every step for better 1D 2D spectra analysis) - sta_nml['DELTC'] = wwm_delta_t - # Unit - sta_nml['UNITC'] = 'SEC' - # Stop time simulation, yyyymmdd. hhmmss - # Default is same as PROC%ENDC - sta_nml['ENDTC'] = end_time.strftime(time_fmt) - # Time for definition of station files - sta_nml['DEFINETC'] = 86400 - - # TODO: Add hot file? - hot_nml = wwm_params['HOTFILE'] - # Write hotfile - hot_nml['LHOTF'] = False - #'.nc' suffix will be added -# hot_nml['FILEHOT_OUT'] = 'wwm_hot_out' -# #Starting time of hotfile writing. With ihot!=0 in SCHISM, -# # this will be whatever the new hotstarted time is (even with ihot=2) -# hot_nml['BEGTC'] = '20030908.000000' -# # time between hotfile writes -# hot_nml['DELTC'] = 86400. -# # unit used above -# hot_nml['UNITC'] = 'SEC' -# # Ending time of hotfile writing (adjust with BEGTC) -# hot_nml['ENDTC'] = '20031008.000000' -# # Applies only to netcdf -# # If T then hotfile contains 2 last records. -# # If F then hotfile contains N record if N outputs -# # have been done. -# # For binary only one record. -# hot_nml['LCYCLEHOT'] = True -# # 1: binary hotfile of data as output -# # 2: netcdf hotfile of data as output (default) -# hot_nml['HOTSTYLE_OUT'] = 2 -# # 0: hotfile in a single file (binary or netcdf) -# # MPI_REDUCE is then used and thus youd avoid too freq. output -# # 1: hotfiles in separate files, each associated -# # with one process -# hot_nml['MULTIPLEOUT'] = 0 -# # (Full) hot file name for input -# hot_nml['FILEHOT_IN'] = 'wwm_hot_in.nc' -# # 1: binary hotfile of data as input -# # 2: netcdf hotfile of data as input (default) -# hot_nml['HOTSTYLE_IN'] = 2 -# # Position in hotfile (only for netcdf) -# # for reading -# hot_nml['IHOTPOS_IN'] = 1 -# # 0: read hotfile from one single file -# # 1: read hotfile from multiple files (must use same # of CPU?) -# hot_nml['MULTIPLEIN'] = 0 - - return wwm_params - - -def update_schism_params(path: Path) -> f90nml.Namelist: - - schism_nml = f90nml.read(path) - - core_nml = schism_nml['core'] - core_nml['msc2'] = 24 - core_nml['mdc2'] = 30 - - opt_nml = schism_nml['opt'] - opt_nml['icou_elfe_wwm'] = 1 - opt_nml['nstep_wwm'] = 4 - opt_nml['iwbl'] = 0 - opt_nml['hmin_radstress'] = 1. - # TODO: Revisit for spinup support - # NOTE: Issue 7#issuecomment-1482848205 oceanmodeling fork -# opt_nml['nrampwafo'] = 0 - opt_nml['drampwafo'] = 0. - opt_nml['turbinj'] = 0.15 - opt_nml['turbinjds'] = 1.0 - opt_nml['alphaw'] = 0.5 - - - # NOTE: Python index is different from the NML index - schout_nml = schism_nml['schout'] - - schout_nml['iof_hydro'] = [1] - schout_nml['iof_wwm'] = [0 for i in range(17)] - - schout_nml.start_index.update(iof_hydro=[14], iof_wwm=[1]) - - #sig. height (m) {sigWaveHeight} 2D - schout_nml['iof_wwm'][0] = 1 - #Mean average period (sec) - TM01 {meanWavePeriod} 2D - schout_nml['iof_wwm'][1] = 0 - #Zero down crossing period for comparison with buoy (s) - TM02 {zeroDowncrossPeriod} 2D - schout_nml['iof_wwm'][2] = 0 - #Average period of wave runup/overtopping - TM10 {TM10} 2D - schout_nml['iof_wwm'][3] = 0 - #Mean wave number (1/m) {meanWaveNumber} 2D - schout_nml['iof_wwm'][4] = 0 - #Mean wave length (m) {meanWaveLength} 2D - schout_nml['iof_wwm'][5] = 0 - #Mean average energy transport direction (degr) - MWD in NDBC? {meanWaveDirection} 2D - schout_nml['iof_wwm'][6] = 0 - #Mean directional spreading (degr) {meanDirSpreading} 2D - schout_nml['iof_wwm'][7] = 0 - #Discrete peak period (sec) - Tp {peakPeriod} 2D - schout_nml['iof_wwm'][8] = 1 - #Continuous peak period based on higher order moments (sec) {continuousPeakPeriod} 2D - schout_nml['iof_wwm'][9] = 0 - #Peak phase vel. (m/s) {peakPhaseVel} 2D - schout_nml['iof_wwm'][10] = 0 - #Peak n-factor {peakNFactor} 2D - schout_nml['iof_wwm'][11] = 0 - #Peak group vel. (m/s) {peakGroupVel} 2D - schout_nml['iof_wwm'][12] = 0 - #Peak wave number {peakWaveNumber} 2D - schout_nml['iof_wwm'][13] = 0 - #Peak wave length {peakWaveLength} 2D - schout_nml['iof_wwm'][14] = 0 - #Peak (dominant) direction (degr) {dominantDirection} 2D - schout_nml['iof_wwm'][15] = 1 - #Peak directional spreading {peakSpreading} 2D - schout_nml['iof_wwm'][16] = 0 - - return schism_nml diff --git a/docker/pyschism/environment.yml b/docker/pyschism/environment.yml deleted file mode 100644 index ce24f19..0000000 --- a/docker/pyschism/environment.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: icogsc -channels: - - conda-forge -dependencies: - - python<3.10 - - pip - - gdal - - geos - - proj - - netcdf4 - - hdf5 - - cartopy - - cfunits - - cf-python - - cfgrib - - esmf - - esmpy - - cfdm - - udunits2 - - pyproj - - shapely>=1.8, <2 - - rasterio - - fiona - - pygeos - - geopandas>=0.10.0 - - pandas<1.5.0 # moved SettingWithCopyWarning - - utm - - scipy - - numpy - - matplotlib - - requests - - tqdm - - mpi4py - - pyarrow - - pytz - - geoalchemy2 - - seawater - - pip: - - chaospy>=4.2.7 diff --git a/docker/schism/docker/.env b/docker/schism/docker/.env deleted file mode 100644 index 2669c7b..0000000 --- a/docker/schism/docker/.env +++ /dev/null @@ -1,2 +0,0 @@ -SCHISM_USER=schismer -SCHISM_NPROCS=4 diff --git a/docker/schism/docker/Dockerfile b/docker/schism/docker/Dockerfile deleted file mode 100644 index d549d63..0000000 --- a/docker/schism/docker/Dockerfile +++ /dev/null @@ -1,106 +0,0 @@ -FROM ubuntu:22.10 - -# Create a non-root user -ARG username=schismer -ARG uid=1000 -ARG gid=100 -ARG ioprefix=/app/io -ENV USER $username -ENV UID $uid -ENV GID $gid -ENV HOME /home/$USER - -# Get necessary packages -RUN apt-get update && apt-get upgrade -y && apt-get install -y \ - git \ - gcc \ - g++ \ - gfortran \ - make \ - cmake \ - openmpi-bin libopenmpi-dev \ - libhdf5-dev \ - libnetcdf-dev libnetcdf-mpi-dev libnetcdff-dev \ - python3 \ - python-is-python3 - -# New user -RUN adduser --disabled-password --gecos "Non-root user" --uid $UID --home $HOME $USER - -# Create a project directory inside user home -ENV PROJECT_DIR $HOME/app -RUN mkdir -p $PROJECT_DIR -WORKDIR $PROJECT_DIR - -# Install SCHISM -RUN \ - git clone https://github.com/schism-dev/schism.git && \ - git -C schism checkout 0741120 && \ - mkdir -p schism/build && \ - PREV_PWD=$PWD && \ - cd schism/build && \ - cmake ../src/ \ - -DCMAKE_Fortran_COMPILER=mpifort \ - -DCMAKE_C_COMPILER=mpicc \ - -DNetCDF_Fortran_LIBRARY=$(nc-config --libdir)/libnetcdff.so \ - -DNetCDF_C_LIBRARY=$(nc-config --libdir)/libnetcdf.so \ - -DNetCDF_INCLUDE_DIR=$(nc-config --includedir) \ - -DUSE_PAHM=TRUE \ - -DCMAKE_Fortran_FLAGS_RELEASE="-O2 -ffree-line-length-none -fallow-argument-mismatch" && \ - make -j8 && \ - mv bin/* -t /usr/bin/ && \ - rm -rf * && \ - cmake ../src/ \ - -DCMAKE_Fortran_COMPILER=mpifort \ - -DCMAKE_C_COMPILER=mpicc \ - -DNetCDF_Fortran_LIBRARY=$(nc-config --libdir)/libnetcdff.so \ - -DNetCDF_C_LIBRARY=$(nc-config --libdir)/libnetcdf.so \ - -DNetCDF_INCLUDE_DIR=$(nc-config --includedir) \ - -DUSE_PAHM=TRUE \ - -DUSE_WWM=TRUE \ - -DCMAKE_Fortran_FLAGS_RELEASE="-O2 -ffree-line-length-none -fallow-argument-mismatch" && \ - make -j8 && \ - mv bin/* -t /usr/bin/ && \ - cd ${PREV_PWD} && \ - rm -rf schism - - -RUN apt-get remove -y git -RUN apt-get remove -y gcc -RUN apt-get remove -y g++ -RUN apt-get remove -y gfortran -RUN apt-get remove -y make -RUN apt-get remove -y cmake -RUN apt-get remove -y python3 -RUN apt-get remove -y python-is-python3 -RUN apt-get remove -y libopenmpi-dev -RUN apt-get remove -y libhdf5-dev -RUN apt-get remove -y libnetcdf-dev libnetcdf-mpi-dev libnetcdff-dev - -RUN apt-get install -y libnetcdf-c++4-1 libnetcdf-c++4 libnetcdf-mpi-19 libnetcdf19 libnetcdff7 -RUN apt-get install -y libhdf5-103-1 libhdf5-cpp-103-1 libhdf5-openmpi-103-1 -RUN apt-get install -y libopenmpi3 -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata -RUN apt-get install -y expect - -RUN apt-get clean autoclean -RUN apt-get autoremove --yes -RUN rm -rf /var/lib/{apt,dpkg,cache,log}/ - -# Set default entry -COPY docker/entrypoint.sh /usr/local/bin/ -RUN chown $UID:$GID /usr/local/bin/entrypoint.sh && \ - chmod u+x /usr/local/bin/entrypoint.sh - -# Helper scripts -COPY docker/combine_gr3.exp $PROJECT_DIR -RUN chown -R $UID:$GID $PROJECT_DIR - - -# Volume mount points -RUN mkdir -p $ioprefix/output -RUN mkdir -p $ioprefix/input - -USER $USER - -ENTRYPOINT [ "/usr/local/bin/entrypoint.sh" ] diff --git a/docker/schism/docker/combine_gr3.exp b/docker/schism/docker/combine_gr3.exp deleted file mode 100755 index ac4b0b3..0000000 --- a/docker/schism/docker/combine_gr3.exp +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/expect -f -# -# This Expect script was generated by autoexpect on Tue Dec 21 16:42:59 2021 -# Expect and autoexpect were both written by Don Libes, NIST. -# -# Note that autoexpect does not guarantee a working script. It -# necessarily has to guess about certain things. Two reasons a script -# might fail are: -# -# 1) timing - A surprising number of programs (rn, ksh, zsh, telnet, -# etc.) and devices discard or ignore keystrokes that arrive "too -# quickly" after prompts. If you find your new script hanging up at -# one spot, try adding a short sleep just before the previous send. -# Setting "force_conservative" to 1 (see below) makes Expect do this -# automatically - pausing briefly before sending each character. This -# pacifies every program I know of. The -c flag makes the script do -# this in the first place. The -C flag allows you to define a -# character to toggle this mode off and on. - -set force_conservative 0 ;# set to 1 to force conservative mode even if - ;# script wasn't run conservatively originally -if {$force_conservative} { - set send_slow {1 .1} - proc send {ignore arg} { - sleep .1 - exp_send -s -- $arg - } -} - -# -# 2) differing output - Some programs produce different output each time -# they run. The "date" command is an obvious example. Another is -# ftp, if it produces throughput statistics at the end of a file -# transfer. If this causes a problem, delete these patterns or replace -# them with wildcards. An alternative is to use the -p flag (for -# "prompt") which makes Expect only look for the last line of output -# (i.e., the prompt). The -P flag allows you to define a character to -# toggle this mode off and on. -# -# Read the man page for more info. -# -# -Don - - -set timeout -1 -spawn combine_gr3 -match_max 100000 -expect -exact " Input file name (e.g.: maxelev):\r" -send -- "[lindex $argv 0]\r" -expect -exact " Input # of scalar fields:\r" -send -- "[lindex $argv 1]\r" -expect eof diff --git a/docker/schism/docker/docker-compose.yml b/docker/schism/docker/docker-compose.yml deleted file mode 100644 index 9af92fc..0000000 --- a/docker/schism/docker/docker-compose.yml +++ /dev/null @@ -1,22 +0,0 @@ -version: "3.9" -services: - schism-noaa: - environment: - - SCHISM_NPROCS=${SCHISM_NPROCS} - cap_add: - - SYS_PTRACE - build: - context: .. - dockerfile: docker/Dockerfile - args: - - username=${SCHISM_USER} - - uid=1000 - - gid=100 -# command: '/bin/sh' - volumes: - - type: bind - source: /home/ec2-user/data/test/hurricanes/florence_2018/setup/schism.dir - target: /home/${SCHISM_USER}/app/io/input/ - - type: bind - source: /home/ec2-user/data/test/hurricanes/florence_2018/sim - target: /home/${SCHISM_USER}/app/io/output/ diff --git a/docker/schism/docker/entrypoint.sh b/docker/schism/docker/entrypoint.sh deleted file mode 100644 index ab6e4aa..0000000 --- a/docker/schism/docker/entrypoint.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -#exec "$@" -cd io/$1 - -# MCA issue https://github.com/open-mpi/ompi/issues/4948 -#mpirun --mca btl_vader_single_copy_mechanism none -np $SCHISM_NPROCS pschism_TVD-VL - -# If SYS_PTRACE capability added for container we can use MCA - -echo "Starting solver..." -date - -set -ex - -mkdir -p outputs -mpirun -np $SCHISM_NPROCS $2 4 - - -echo "Combining outputs..." -date -# NOTE: Due to the scribed IO, there's no need to combine main output -#pushd outputs -#times=$(ls schout_* | grep -o "schout[0-9_]\+" | awk 'BEGIN {FS = "_"}; {print $3}' | sort -h | uniq ) -#for i in $times; do -# combine_output11 -b $i -e $i -#done -#popd - -# Combine hotstart -pushd outputs -if ls hotstart* >/dev/null 2>&1; then - times=$(ls hotstart_* | grep -o "hotstart[0-9_]\+" | awk 'BEGIN {FS = "_"}; {print $3}' | sort -h | uniq ) - for i in $times; do - combine_hotstart7 --iteration $i - done -fi -popd - -expect -f $HOME/app/combine_gr3.exp maxelev 1 -expect -f $HOME/app/combine_gr3.exp maxdahv 3 -mv maxdahv.gr3 maxelev.gr3 -t outputs - - -echo "Done" -date diff --git a/docs/workflow.pdf b/docs/workflow.pdf deleted file mode 100755 index c8fd7b5a976d647ca4b161cc297b2555edb5c35e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 217520 zcmeFZW0YmVk~Um+mu=g&ZQHhOSC?&c*;QS(ZFQGzyUY5j`(o~_JL~=KJ9plHv(8y( zot-;Qd-WiJNQ(h5@mlujo-oVU6+Q87n`Y$rFE;de3TAycs zYf^%u6aM^khGO_Fs`E!)&c)f4UdqciGj17 zBNX}j2Y?uW-RH{;z#71W;uEQbv$e^eb~ylB02%;c022Tk0E5r(#sK&LYM+vh09F8| zpVD>!ZWO<}|Eqe|e^CFI;r<@R@4+~l*#3t58x5U|iLr%&pq=}l@z6uDG15aZ)3bbz zgHFiK*7;M=35xA6l8Poyb}o)apQB>`OM`%|t=;Eo2Kv7oiaFZ3*#B)xlpPIho$P=2 zWaROw>h~m>xLO#QD2fUGhWH;Se=GmnB#2sAJDWH{(TQ3cIGYHY7}*(r!j(3$HFGwH z;`r0a=kJ`HKc~tD$~~(}UB`}y1-bDjTR-sIm*iZL^3zK=5V%}ix0G6ru5o%hf&is(QNjT>&8ot;YC-xQvIx7CiXX@=1Kh{2ak53;awc9q`zKe-; zmwfLkRCDJs?iGRAC=Ra?v^+KXs^4Nf2`@T|&t#sB-&^Enb5?BxcGo`$n0z@RT@^Zd zA>^%=z){wiJH=3i?50)MzUSuig3{N^|y zoaHv*h*5sa&YL&R#&7|fz$;pl^B|*=6HM`iFDZfK$WNR!GrlQO=omgV$UC3ISiUty zfFo&6!!*lL+3G+AqW1tMVHHWd(f=zr{Ox_3J##f6HoVk{6@>!_2!R(<7PD2akAAG)Q4 zvO5fN1PjQM%4H=s3mZV+QJx?@uT5R1W}gd;>_*3q`1RSLtHJgb7lqatU#Tq-m&^7W$G#)*(Ob)|L}D-1UIoa4XF< zLt4N12iRn=YacAp#V@;|ueRrA&?HEjv}(;0QboAXEq7dvYyL6=^|BLh%Wo;n2m`fV7dN+ zpK@a#Lj$|u@agJeOZn)1wd>a#=asBz+M3%Trd>AoA*4AIt(Lac)v37PqD*VoLrBDJa-v`b;gAIRar++B}Do zD!-hxg&tP47}R!U_$ko~?n-(Tu*i5;g-$^rai<}xkY1`)=v{I~vX|{u2y?bO+fCoYE0e_WKbg{>77ouY}6 z^QRaSBL^)rD+}vyHMAU`cA5o>nVFuJjfI|x8H$04iI(Y4+o$msv~x86G^M|t)bH14 zi@-m2V&rULXZy!@sVGW7(f$4)w=wuMzjmBXc8+#-PUf^W2DTRbwsr;vv}Sg$fBed) z@%<0i`Tv8Hk(QmFiRIHbFw?U91t$v|Ee9(-8{{jnm9R`@Ef>W*x49=x^X8X zi%(x;LThAa^M4DLiTx8S3o{E82R$vr-@&r8(K9jq!vOv*uucXhCdRZjc4p=lHvj!u zWoD*jrDtVif?{K#W%~{E^YVY*5sdU~{{j8qg6U!DX>MR^#_wuj<3wv>?DBs*lb=_c zjh%&q5sH=JFSjBS6cZ~4Ee8_=^FLtzw;);@xmf%YKApcmQvb~H{+pzS;Ws<`J(iRSLTXRS2m}sUO4^N?0$E1E5gM9ND z|NayUf0M)avU_7^_d)pd;5Xgo+|B31XK~;B+D`V0{q13XFM`w#7wHFONdp-L1BqNd ze*Zw4VKfbNanVArab$yO9-kPek_|?fQpj88~%BuR1ky%e_XvcfcPf zmV@|LB@&t(-#AC7B9WyT5tQ1Em4j`R?fb^#1Y>Eaq^S_9VmSe5++qv%ebK+%=acMn zf(E;>N_76XO)Xojs_{qB-F^TnKs!Tn1@p?PPd#IM^a{8ps6mR-b!%E)e;tH6-GN)8 zTV}e)HjH6-2P#;6_LL> zHcbll`fS<&!EqRL;sXelBXf~r5W-0Ix)Taq8de!U4C9{be%5)mCi&}ZRCqNh4$GxJ z37JV$ZO{zW5&omGK*bi8$SKCid?s_*+aeVsy(OPa;|68CNoRCGh2>g*)?oz&kP|0f zCoF7#nqOXn%nCf67n#pkRJHRBtLjW0zoexVR%Lr_pr+vW9|_9NlYDt|kZP~svWc(Z z-A|%7c6k8{L&wG_p`;b2Jj)9pUkYv_chei&U$3NtIP!FuVXZ_X;B6pQ6{EMGIEL$B z>T-(q)T)lzAjk)qU!r{O(=z=d$$L0`xr9$V(vfBf)ZwHbzI1agW2RVPh=y`YnNK}GR0yFOAL z*a`B@wEz#?UB9WW*B_B3r^$l}ok-5=w4 z{keynpfad0Cq;r0vShmh#zScb1-wBiel%uw{Wk@9-w6sw?-po;Q5Ft6O z(O(=>WyJY?NB%z1vkui-dVNWIBN3pcKbZ;Cfs{kL(zjUN!uBOmKTb?ecL|iDJOj0(K zf&!MIP(%wUWX`AZBkTEGlMN#rDl<)iqv}rB5}PE}G(f*-4gXh+mY}w@F~y$JwvbZ) z#IQ2dV0+-xoL^=+vn2fVyN*hlWn+_Z_v8rl4w)gCVH9{VC7g{dBM$fF(=TGO-1_hX zF|%YJPEuPk+@%`u1+i!$vSra!fi-q}5x`U-dN38q*qOC;gL7JS8Gx}28W4Ph8(Gqw zf@1F<%*?H&Os(wC9-Nr=l8CblTl?}9kVTHTYXTK7v;@1=k+#foN6-+3tHQSXGR_To zE%_dPMKZL69ZY^cPsW-kl?u+QXVkz{pg)Xh#9D0LpIgQ~+QaaUC5CW$BH^MGt2XQilQ(TnsruIoxc-pG80-ZxtaWnVrO( zb-^U5YQd1YntQcS$2q&zK%FYRi47)Sz`g^qBaIO&PZ7$yS(_B-exTzxtMS=`(^~aA z>oe%loq>zkfRAiHsxN7ACGnttaZ`;LjOT`Q;=gKP?W?D5K(Rr zA6c8w7!tXlPt!)QdTJWUTS;MDi$8?OsR}kmK^?=;c;5yYI1-U`6IX9EQ6^r{D_$L# z0tyBAz-m$2Ud!*b1dy)MjuSW)XzSgF?G8d_Ev&}t@nYm-m(=vk`Y@w@g2u5`-99;6 zwu)z@RUku*b3o#g^5Qv?Lo6sUlit94?&s$0(tbp)^n*k;bK`w_*`4m@h`w7gvjv!F zs96y2Zk?;P&%!SyGh%l_uB5fNyE9vLreN!~wf5twJ$DzyM|#0hp_9s84S$~n{}3A# z_L0z5AUf&1BJ_#sxz+inE{13NlRe5?o4N8Yg;9`ZpoUpb$|Z0~Q6$3Tdl1Y^V9NLB zvGV-Qxq`tC{UEDVytFTkD3s-}TB_)d0?Z*e6mTf!a&ODRZ&_@=L}yf&fIS>#7#m#8 zyO2EwiLwWanVb-ENFU}k-4;2xwbjqZhdR}pF3S3?njfQ+8SZWL+K2V{ElOGCKopr- zwL4-6?m*kg4lfkWOOHxlgPy!~7;{9*;Y%T7Gh(LTU^08;8omL1D=8#t+?scJc;PPJ zs=Mw1YPI)JGr)8R!le0umPcSMI;))SbN+pzq(~O3_Qv0vn=r$9`htiT7J(*5hk=A8 z{R>`DM$uw=?07QMSq(DbnbvGyP!ZSpgiiA==<4o$kM#5T&(-`AP+)s$+I$HH8Jao* zZVw)6mr3*8C1)qO>+?e-E0)5OErb&z!NZN01yS_fBR*$-4viK5HM)0}W`41E?Bx0P z!>!ZTcjuNt-i8C|40*4^<1eRaj4$;}MU7!~x22W63*!pD6u#+tv4<$!?aATqH(G}Z zCZ^^*&-d8U7>1)nWuG?^;n=d)a$P1RK^~*u(Ev-*+Fq4@T(h9FY;waqy41M2fd~lmtc2R9<3WWk@~T={Q;UQAII0xkCn#dq%*iFNJ%x=t2#u=Q zrYU!&E!x)2Ib&t>JixOi-WYx>m~);H9EShZG9lq)%0?-`Bb0&c@#_h08++aDpF^uZ z4CyzOVq~Ic`a2Q)&CdR>Vkjs9On+(n zx5(@NL~Owjk3GO^gr2sZk1rXc8Ddk5M;!cBYf z#@DMHgX&U&Bt$P1B3~WpRck;C7Q?N9-(P$OaaotSIy17Db3)+e^j+EIh%4I4)$hco zxblh+U!PxQYR0y2cN9C2Bq@s$V`Z1uc2C@#cVeXsI-qs;w|(9O$3VxUb+w=R{5B&} zcgP?-$Z+54KR#A3lODHIE2TU>Ui)S1yHPgz;D_BitwUKDl1Zp$ed%KNBH}%LU&wYIULU`x@rvrEYm;janjTz^-8rqN z0t?`iAw2h{Ovp~9NHZoZb{#!}SUuV*R3ivMIK_%;h*}VX;y*p|Xov}$)u)4{P(zN} zSS~cLIQ*{kU27Y((mv(^QggaZu8nAEx5aDY4aHAxh8N`ii{|$Tnu%UqoJPT;v;2VHBhztxI!SZAcK4 z*+kVQ7OBmS?kJ(ypl(vHO!ZWE$tEuN6%Zm7#Qb2sUieyh%?hroVkC6H-K9yGM2tp+ zVW#1y%I6ttx&sE9iqx^7>UAs((YutjQYLKFnU%s|wjgqa^8s@c4z& zm#E=U!9Mv>_4Tk@vcmv04~L{;mRW;h9EuYOLKKxu4t4SJ*Fj?#qJ)+6)fMbO9N9zH zj*+nMTx(m9Cl)hV`6!flAQTuoh$p@LI6moSRw6pO{~SVm$|bn1%y&s)UttkvOJ zbO!Z?r^(*$qv_Pl#|<}8j>ayLcV7+^>ym06%tKOQvx@Fk2_3T%2@E?m4)hWCMlR9W z!8GxuG}$1tUe>K*a)6Bw`X_W?+hVVL<5#iyVsD$x9*01G4a(LELqSG%+}=uBqev~< zkI&wu>{?yL%`c>(eqMF7o2?j>SnGb!?1R3+wjLpv&>bTbZm8`Y-Vcmfr)6}1~2vYD&%mjd^Pb8fuar-;5lqex#9;- z9&Rz`M?*3$)ZQ1Hp@Ku9uGNQz2;?b?(@^t?0@7)K)9&f2mFg|O76nAXL}^_T<9yrN z;!6o?E)|O-Yy_@l0n9W-?D6gTuvU;joxfGu6jNrUz#tWE0l6;l3>yIop8~z`aDF8lV%=Zf$=q=e zk^`c}-d`F73#;@yd_{3+25XFU4G;WD$lYovDups>0Y5Qgo@-nadEPz5_f&}QN+2kg z>EEs>`fPaY%g-rtCi#@bFMM_WW>X)6fW=youPFCh(?V*Uoe*ZaY9p=&FdQJ?UIkP;pIaB^(ZY_`b% z{8+vxS93>bGpz?rvlNd&7lPr7I>xf07ydN#X*0@0B7ynk7(q%*jAFwG(7vE z`}KCe0q+E6ESYOXk9e;%@Pmq9WL0a@aW9c3MTELXZQvU+vn}Y&?2e^#Z;ISnvyuX6 zCgR})SVsY62WFek#5lmySL0yE zX+i-t#@g)u8dQq&NO_t(Jhi%Z>Rzirx{zD%;5|@Btf#!vn9dGI2l6gjj()G5;zdz7 zL(CGgvWq&yJ;i0FM92e8>FaxkRg~6MjBUneL(b?g5818GqRg)Tdo6wYMnc1>q^_Tj z((ITqBSh#@C~R{>^>2caa^L&6V&Xkd_iFY48I!?A>TFAFiBM|Nb6$ zSGz1b?w>%*xxL_fRf7w2vGRNY)|IsK{XHlR+yX5jt}8u*z?+%|Zpu;%#6pL;$O)VL zzZk(sGSO$ic>C-cY2SKo7zaL~JuaWv2QA^)ZuWu3$gg|( z+O8omTD5p3jck&~lv@NM-qBE;H}rkqTO~LB6#}$1zYaHSrF#3xLzwya2Wm6JY1X`B zR;HEe7vNNkKt-3I?~(kBf^sh*2bv3TD(-k$InV9*q69pp=O zf8+evyXx5Bp>e_}E{LxwlY?I4v3cGGIWgfK>YOm4T79=S^jeoy z6Me!NSqOFcXG;8s#{8kgjGv_NuS*brDAfN_;{Wcl$iGO5|HoF{8UJb7o$)`fyEFY; z>+XNl|4WqkpI6?`HKgpY*x*TXu`-38f#;+Gh+Q-@E{AJFtSe%)hU=_3qP@s5iAC{C z8kKVWy74H^Y1K3}QW`3hh{UC$rKespF%Dv&hAh0_=yl9(uU>CcHWa4V(Rw}mU7jPK zE*}dXTZ+ao%{*3k%eusEvtlA4vtJPT?2giW!4kRJK6dvmGx^xx9!W+OJ1+Tbou9wF z)rd`BwT;7~U0;=Du&bgI*zD~H)x@K?lV1i+TO>x1wkFAvXx6e2CBqO&?L#}MRv3@( zNO_m{U>{C#l{<7@i%y>DKs^3{n4aZX=X*W9D zn#tfO2$`tVoOOIj&BO|?dMV^?9VPKO;`JBF=L)6&uwvhnX_qaFn%P6vikKnlAQf~@ zBiu|R%d8KrN_!{|cX2f@@UV)<4px+0RQ#U5kPxf(lllp>>2 zYJ-GmJ%BM+QL-1+3e3)};GoFsu^xnnH0YbKQTAg<*Gu+~2L<5~vLICTa^jR2Ld<>Q z3F$B~9C7#e$7iDJ#5(QXbjccc=E{41Kt<-`6=knNBM6}>A)0aa8!AIvxMdc8L$Q|A z0>tcrwg$%b40lMAbosdUbR!2-vfALaT7|_ig|8-5GNKvEg%dJ^L$bT2D#r_73Ho^= z!i7)gWMDLeC)2d^)bAC`*dqp=>X*X5(6m*t8k$M)Q7EV=acw6J~ohLJO$Kc|neCDI*wVTu~N6 zP0{jTNRiDORphdQeD1S=MLsm#IfTr`sW=GAIXgZPq1(n_%;1l{99VaTSZ$ug+GD!x z(sV26_2dHwVa4SYnJx+L6v19_m{Ao69$y}{tD)(Uk@D2{Ix&up{KChWhlmhuD#?ijqL)Ya3F1xA4q=51+%CEF@o@y?#UZpV9pT$ zNjyiUmr6zTd1TmnsqFmlP9m|Dw6RQ5f~LlVqrZOun8I0D{TCeI?(#Wl>$~YC2&=N6z%>54b$M%Y@2Yku{oS;QF5O1}o6FVHs$w0CFJMB64G;C}6u`3nq#3&4CN zj;XH~kgt!}n}>W2&6n@zFMcZv<|vPP*3;b>kyK2B8XB89ZguP*ZJo2;Gu+lw86}h1 z2eiE3n7gfIZJQ1^8~r#p4I%c9>sXK-yBR@~3&v>ySxn3j-Hiy#a*bkut(AH!6-L91 ziE{UkEzCV|rGdNEi%QA(L2oox3-0ygFCDibTdvT%;TWD9o+NGoUfHd9t$d@W!_YSY z&v-?ry*99?-}^$=_vJG1*qSqysGZ<;;TSw)Hm7U1h`jl1H>`YJTGXu}D3|pq%k&p1 z6WE$|ECc3W@o;lMXlWAxZ1OmOdyzZuB+skV&N>NnnVxm_3DopeIrh#686HG#0#BjsZ2XAkV}*-1 ztjzU}buXVcV? zJ1Zhyn0%EFT59}vdKz*r5lDm-g=^>hb`TpVa>A|0gri^t^JqbDiIW(LJ{&C7I%^4j zA$~@oVVF5e`5kO$U4bmx*dT(`fD-a^*(Y%WRdg7IWq*`nG<8A*FgW7qBMb^ed;(Id z-Gic*oBo^ZK3rzEABvgzvZ_V);xQ{k)Xtar25TFAy@Zf51SV<1sOJGI2wam%i1$YM z0Kn72XL7+ca56ZDS!(oB!##J7SaqX?UsD@W3`_Ylfq*Q}Ipsg}0dY2KYnDh~tw7YG zp+ajO*zo*|s&_vP#%eCV1DKX&^xLv7O(!CSAPOa`GVza6Gz&Bly2E6K9(}HRu~QK> zubEf28-&(QrYMnZXxWcNUd~3t{ai+LiUjC;SW%+ZR(8Z)|Ip8FgMz z)$tNPR~=Bzj6Ne}h!G*##RYt%+V)P&fiQvTjOp=ShgJ%wUlFv@Rgdbh-`t~1CKEmG zq7-@8s?t#mKgH31h7lSFtnp;N zv7z{`&zrv2GHZ`=l^wUwi@DjvOqL?Lk<#I`dQ)kN8bD|kYrqq}QtH;K;(r+|{4o32 zM8a9ioUhE>j#y=i8m>9;x=TV?D$4dm`*F%o9gJ{5eMm91%Ecm$#nZVyOp5e9YE&_yL|jsqt26o*J3O{TV^crKu^PQ3Vz zvv6NBpipU}^b85O8HAIW7~$mFF{c&J-a-4LNuX53-~l4Wd?$Z=UT^lA%fipeH7e;T zOlP|0b6TK1tk}B;yM+BcL)yTlEe(K}R?7oON1#}Jw=K9{G9i92T#XqlDQOeaGOWTo z%CR>rt2&XsOLS0ZrNf|!fa&00)f~<_z79?!1V$`}Cd0&BDIkQo*l^m&7)NkW<*j03 zN=r$4;hyM7_oTLTF$Y}+gg-y>9iy@NM|AP+Z#asvOd>nhF~Qno-E znYP`=)SXnYqXn8_CxCIyC@vCN=+|B3=TcDYD_84B5fq!= ztb2GQaJPz$nNoAdDh62f&@{^UbJA;ZV6r<8jU7&sRL4Q)614jg{h$#%Tu*Q70S&b? zP0ecPDn`WM{fbi5!1r!wsOR`(G6u!P$l}?wljo+p9Q&t)J^OG6)#A!!!|w-!~k`GhH~j<<}ZNY&A$2-*VNVhQwRh1NpI(?08cMn)~Y zPra@8M{|2rg=Ol@)0(3V1ZX^*#Qb6?n7oTksoxdnV(3Vm;85s}>!6j;WjEHMDr}eG z$a(&T9IbDEoIE#1TAtN55=W6-{ml*}b5FLqstyjIA;VMXGG(`;iZipnUB)Yj2euQm; zmo$2Y&?N@w{m>#9E(@hS%|O`GS(!E3{iZ3)4xllAb*w$gvSCuwCI{D9dfVObmMFPd z)pd49dar3t2$#eux8adP_wB2It{UE(%gd$O2lM>Hhk|C?<+rYH^Dh-A?QrEz-kr3ZWXB!p zq-`W}tRv(UhxkGUFtQXSh^w$^EDNHlm0peh&wwW4vg?kpa&0N=csH3&IMLx0s<3Y_ zd#yT`F+H(#S1&syCj~Db&)3~egi7E~bgF2Z(aXspJK9=mI?Np{k4>I_PG+~xPF)^d zt<9~m-=L>Y<(= z4KXH~**>>$cE^hhy+e$@!2xu+qQ&z1`5IRJ{d*r%Wgkh`x1HnqNjp}#8r;LoX*R!s z=@R!Y@H2#*u8W0($9+82?dL1AOg%f^7489gxT{y(oUWkoMl55@Jj8E_k9U_hn!zLaFegFUyjUg(*Zv>3K47G<5zBoNsM;q z#Led4uOr{6!f=eNBDY8l(PZ3uFAArvJSS0mk^O@H3{AOBJ!)phkri(sm3AV?Tg)v9 zHKfECLOEN2)>FtIQI&#Vagg;Zj-;GS#G`sY_)iKV%JC>`QT-~qp2EtyPy_{vCS$Hf zB(snNdBh?It8;kOqo?TLnnhA<&g)7d3?78;k43eD1C0BWOh=wqOj&1RShIRnk?y>j zo&e+yn{BJgHWI(<&@aot&APOPV)w2b#+?cif%PuuZVewb2}A25^HfqwoGN$*Q!{j= zBg^PYqyP~gJsnUjO*1v(!VTn1C6FWHp~0fl;48jpPv~R8^(veiJa1*u<(>m@^e1V3 z)OpzP$GFjDzr({4L*1J$CbYsPQuC87yue^FQBSbx169b~v7GHQdGo2no?xr~lJ2Cc zE%$4MCd1_E`2N0#mMJ%X{Ny#e!^0i!2K6xPW%ESKJ^xZpnNx=2E2rTD=fkI}^(FH) z_GD*P=Ci0(A`lWP?vS42n~F==e(t#Z#c$uIKjp5?mWpz1IO34+3P5?Sp$=SAJ zWdZd&h=@b@_h|w(LN_?@*31&d%8-vh9S=_`kR+cCgCr63>P)l3Wv?1zm5=APxdy0Y zG4Z4@nR-~=Cr+S!0{KHs{)6pOatm# z^7#d0Sc(a7L}ML*GDI@rEye58v=t)^x)CH=5G_fs9sPmAt7OvNn*h?;=g(Bg1K6Dr zv4vZUsxk-tJE={%b5{**z#6oAe3NjGEIeBK?q5Qh?cnTUUS90c> zeZS1j#*F6>Qh(IWcAGUagMirViAU41tv@O%b3j8SI@EFcseAM|)g}Kt@i50oS7Ok`RxxkI*@?K$9MSP4SO}t4(;F z*sQP$posz8yyoP>*k%hl(lra7SXK^=F$9h-ffyKKF>O`^ifF|u?z*As%J*48=I0}X zP-A_)7z@T+iM3RtuWeP33-`|>YAP_il3PL)m|`yI2qQOp>dO3eM1kRl5W4T#t~ zgO!R51(vcAcRv?T**UMQR`@pGp%{?2AqE$n%#|f4s|@AbRc~0Du|G>k-S^8LwZ2rQ zoERK>y+G`;x!QfvgfF?wx6myRSe~r$lE7oBO7t~4UK2uqqp?vbo-u622ozWgiR;#P z91+o@Pg^u>ZIUqKfZO|8g?L1)i4n=LILp#jbbu+TRo!+9Y-7B`BY6{2oWS3Z#_i(aH&;sm?bAW%rR#1Eznh8p9Y?6as$23)T=ET0S6ULsejIM zEPv_H;Z~DvrFnaZn*(T7u-zZyhoY$>V|GlWbyZp8io{O?`!E|w^bmI%5SJt$Bvsn3 z;9T_Asv>?lV_C%{MnYsjWHVJ1JI(a8+OLV(z$)WpTx*)*Sqn~r(lC{DBJZ;c7-^R|%Vz^u&AbzBqx1!(DG}7=$Bbf9 z(&OsDI_Mmv1gIxu!?odJDEFd91Kg4KWld+73B?8>Hm<2RjVm0ji+K) z{5euZDPuLuQIc|r=CML2w~is%TU4!HzNMhYr*f6g=cAz#lTvKAa6FR<0gv5<{n~a@ z-@HW?9#ofq<%}Y?k9#RuFh;YHDwX_tJYKYbl8a)Vr8IXJiI_niI6^z_(y?7R`*kAX zgq7F)aX_8>5Xzj3AEALGM`xq4wuj;nt?0rAcE+gBWB3fAg)j(KnO< zO0E(?H`eaXrxr80=9I#6X6etJ3Knu27IOg`b6PUXL(+C5Z6ry?C`^4_hv1p*T^d+YOK?i8WMBu6 zAHLULsAM>#UMzfrz4OsZ!CLS&b6Yb#_Qd6ooC&EWz}E~9kh16UClDCGY(3eetw9lN zK{4kh^^UAjBfyei;!mK)zSb~>>nMOvry7zaZcX84^r_ru{kD1OvPGE&B@cAtL;1R>1yEMWDc(OEzoR&@>B$?O8yyjG$cne z?URqUPoh47fg(a=%Sw?7x#4#_cf`P~jl)71^ICN2D4oD>8J;^7)S#7qN({9fs|_$A zs29Y2)j#I5wXrmCw($=ER2Y}p9+V{!v%Qmoab0)5KwmCLV{YI|Vzxl>KVHU`WzC*yL5y9F)wa=U8WZHgAKXPW&bbuSxHKZr*K+OM*#^4QEzGi)eGBdr$2f1cEiydXs zU1$)uW^ITSFX{A?iP;(FO)Ia1@Sruqh4~ap{CaRA;ET4dwB=@lXMmig+=+ayJ`B<- zcA6QbT=w&A+F4D-a^sPhFW@25u%5=IcRMy;yC?l}jzU$x`v(9x$_iu!y6hD`Az z4)6z0Kw?IpK02r_Pze$4ifigw&2%}R&I?Va3y%R0mI>9wkYGffKx!cKH|LQLp0opt zf|a@VN2gNhopBC3Z)qDG3&@DqTh%T1P0NVn9~)I+V8I6NWtQ1k&iC4ls64DT*8PvC z2EUpg9pym?sQ;XZWfPdvEGkMGWnIn{pFw00XspT0@X}%^$nkOG&1i~?lq+vh|Bh$A z?82JRAjY*w!DIo^7_tu1XtHL2_RSWSWS_`v+x}D{wG=4T91~kZZh4mP`#58){g@*t zf3{mKWxObUxzo%zD0A7dxq6C2HZzTXs?96}P z8S^JX|KAdT|FQF00L=eX0`Pe$|FHz%^M4EejS_%A>i?CD3-fsMpdFj`K_nTD z)T*@}5Bx~ft>DFK!uKqCuJiT1O1s9@z59K}@2qidg%~gUzn6s&+}C0iWBbJ zPq;!I{noT!(0F^7FYk39kb=DTav!`;4=;8u9~XQYsmrISZdjA=T_4*Leo>pTlY641 zbmIdJLl`%NB*l9o$i_?PO4hX!gW^b_nYGI^V3;5g;#g(}m1@f=^>8<~iBOth=$=Eb zw47Xx57NDKQ&+zCQXjvTr}^GeJqwakzs-Dmqra>s#v~J2`-Y;I<+FI{7TcD1(b9ef z+h+FYHT#yg>b3UmZRumzy7zM*x{Ps_9MkAj1+sPB?|tYp!8X$NW2bR_{xX@>H5RN045LDcBlKmT0FAa?gc<$>&#r}qx9 zhDf_Jec9CKR*h;6xqFf4D91Gn{uX=DuXi4+qbsmSspFd)TsvR&1?r8i$W8GzlxkI= zkx;Y>)xwG&IycnC8d?8@5iLj03Y-Cg!!YE;2O_LTr&ftFQ544@9Oapdr!1&L+Bxqj zt}QKQlOn?k!|Y^X!9jN9o^fR(ck>-3XY-S@NaiG4L=)533cDYLcbmB*Sqfnm8SYlX z3+`CJX!9*ZPPCJ|!RAw|^d}aWZYhW?GlhbcL9!&_a>FXdLD;gC@b$;byFfdyH`umS zHwDL;(S(PHSsDfVP3%Xq&)Bo?Ky+oG314*$z_9^&_wf?gUi!?L_0L_(9y$cvEFka4LpV z0@I^pDo+RxB&!S=lg{{BmiE{dw|gg0jN9AQhDApu>Cdg5t8e^3!|U>S=sbn<#f5R; zA|m+CM3NVmHC0PZVBI=M`o+{|Y|VHss%oj`CPj-QA^sz1pkU({-@W50-%Ev^^M}qg zEq2vQ;^9N?ES*e`35yvOA_8(wx>6HnVC`U~CT1 zILQsRcx6o8oWrPkzCsQG2ceZlyf-JAgiyllX?WXdN}m*sw3ND2f+C`zYM3Gnq9S_m zM;#UOPy7eVT3CiUt?+5O9O_s`km{KF{gY+KPSYuTP>Hz*!y_%>)guj61ZpE9u-jwm zXjbs|P-AUlp$JY`Sa}#BV&l%Fy1Xi}Mv~f?y$Ea=H!Mtq3L9A6Dkd^`xrDr=Ic`pi znVy-(rEb|7|5G#yR(tBNf&IaN2Pok`rY>ZWXcBOje;D0COAJ19B%mlX1aDsiNaF;Z z`_jma^!T|FV;nH;*-CB-922C`7$f$k7)>0*{Q{L8Kc3DNn6R)}ml5=c7M11?u?fIC z4#v(PwjLfdeE?^V0|@L6Q)wE(Nl4j@qsUD}Mi;S6kTi>cl!mVZPO6Novj|>fFakFD zDLLV(X%YsHt`8y4X;_iItl5b|&XM-0OU+G+Of|s2NCx|e*D7bEC%8s+ye_|o%_^wT z9dk6q%^wY4AecFtmla4uCaNeOpjKD?U@mxo8sSFO>grF5!niO&%^PK1b9Wmfwk?Nn z1}{*tRF5?W%Rx_k%B}@g1P-byz3WJ7zq*eTTj>;T?G!O_%hiD!0ETx^%xA;`d@c*w zwDVmRQo@6du^sim7j|~KPO-1=;R?^-=pjF%5VDXwc%?(8u=Ncyg?%JC7#n6i(j7$T z2AW3tm`9f2xdVkysjqdd{`JY>Tl1UmgU4Z}ud&!zULaNEDjLe#<`9$=++HEK`p;h+ zV%+{%u(x@`9uN04QCEz^2Ut&6SGvmKu-VAu3r>-)POoKQLfs6>QX0J{P+5ajUrJv6b01lj-CM)30% zOQSH<`UNv5IQsI$#VbOx!oQFLY6cu`D?s8A^Nq0}rfB%{3nP+3A&BUs1qBMbW0a8V)R;#gWQPbHu-O6gr&Rm2QrPPD zj<6D%38b?J6Yk~(zUU;m>RT(h=A347K0<7MpEFSi^{*tbH!{OTNUO{_uXf^UIZEnW zrykiSoK@%HA?_%6sotY}1u1~im0?Pekh4+{7{nUBpEZG7aA5@s|FS{|>nhXt69USz zp9bWwCm3VU;7)BNW0EBUIxgv^DTK;iMu$2L$?5qSz}>>~N^QK|P}1ubzIahAi$T_1 z2Q~*r({1b4wIGVFiBmpB=Zi6t(^`FP-KMn!w5%L*w~0r=UUWzkL=1D(771OmBE`dvn4=C*(?&q1eN;A?BQoXfHP7cr5*)z9cpqvM^n{Oi@yLn6bF znzyM_7y47UETu?7B4g{)N*TaVGx#@(X$1kGg2~Dx&QcIqR`*dgD-CD^I^I>cZ=Z+5 ztGbPlGAa)*zSSG&#w*@9l|{w`NKNZcN`9~<-Y;-!jnMhrX2Gt8pB^nyB?gt&8~tJL z%@^thqt6AOAw3jKp$+2Y19@|OlD_*764NYv*%r+)a*^YzAu#hIoe8W}uq+Ju;!Xz{ zkTU%~24{M&QVSl~v{=W1OLdJwsF?>AqwRmLf{hm}Po^j7MM_UMWm_ns)^bE|h>z;C zwI0fyKMmvCplbEvZ?x8OBx@bz}iuFk8<0oLC6s zu-ebZ0aOGEhO13Xz*yFRp|o!+7d#b`tOp?HIT}V-Ylz8J?+k_zzsyHt0iJIpyj&*1NJ)kRr#=tZ^JTxM>R4)4lF*-FVNE zeTbt(r_qseuWWbwBL8&pjDh4ayZ$_K8Jpdq{1p3ws+-)o{;o~wJO35`$}#@e8`bIN z-Lcw~{U1^CrNAip#8>b?qU70Be?-X#(8}VJAQ>l+(H)kn=Z);Zcc!s`6$-(%i@wlt zF&}vuoK73Hy?L2i@3=Ukf63GR*nJ_tAM3los%{uFEzrO}*1!SOIZ7U?yry`+Wx0v3 z;yv;oyjx$rZQ_4!emTBP64IAXl;F-rA09*e5hvgBBTjyGe*>aY2nIYIJL(dtA{Nze zZtlq32C-03kEdXzr)wiqHiC}S_vF4DT`ngWs9Xemgk1gJB0e$j^_#(s_@i&Y5wZS_ z!_{nF`pCyl%T1a)oK>&YlW%l`-Z4k~n}^hoKF|U3XZPh^oe!V;MupZ8$ZSUoN<8Gff~Ke20N;;Kbv9kKaWQ+rTQN+LNZ}ZLld;?5d$3nEFla z=6P09SY0^EG}?Lkpv==v%yFt8vz7UZs)hMZMKpb=F`|LDs#I@4(yFs(Fh)t5ebn7b zV(zQ~BFoe$c8_O17DcR9=5vWD`!_gJ%bZdnHSqT}DA~)Z+c10?Ap&!UYo6ME8T{*^ zILx=IH#S!SW2Bh1G0AoeD(!G17^j%x_FIU3h3j1apOEec)P-8~7tAB{`}-2urYR}9 zSccV(6dlA$aKy>k@Tl__XWs`6p2>N4FCu3?f$ZkJmvgY&2L2A;HB^oFWK=nmWUW-n zsUUXx_LT`AEJ8?~8r;s;5SB2kEgA46N!h_%ZJnmkEs26FxZk}~HpGeI4UG>$e=8X> z_yCQ zW@-qHVX7 zC9zE0ZlOV77B|hn&tO=BKNG#Pr*e>wj7h%vYD?p-aHM9*%WV=+d|~#5M-Sn!BcNsc z1}-hH(ci@=A-`Nj)?b)$@-TqN&S`6~KAf&bQ;R}l_^<@?E~f_8iXoJN@ciRoe zZb??f+zk`tGU~}ta#BV#V3|JYPkT-abrI;U?~KMX7JJk#9Zk1Y7)C_XKB5OFfoNPd z5MZ3KK$_SU9e-?12}EXN?z!s--raIKodlb#m518HHl_-@t zh>n0xHNw?=MX2+pg@6u$K-6PAL~82>0A3Bwdre{$gHxBX6{HV z77;Ij!p$3D1~|$Bc=4zJ-e$+nlax(|TMd}gLT}f;XJQQ3@^iCrYkdtH1anN!f7x~g28cK1Mx)#>PpqgKRCG}}#8o$B=* z$by1zCmLvq4Jbwy&V_V!Qr+jKcf#ww-Nc(su@3v1d5#O>J44vr<`rQGu@C#B3g@Ftf@j3S`QpH=4;R8al zwYn;pj}2-F<_a3oN^7*Ym%CX?0zz2?!(jeNqR~m(2#Al%li#sbE0LDr&@I7rj3%er zV8Oje>j}mc-y+~G0BjcVtk_miL3> zm*3O2S5qCLUb+>ERK>73cyP=;NuNi=OyFZu7Gy4;NI`IhY6ve3jxNm?Yj7Icn=B5% z=yHL$q2eGKOqz@kSTGTcPVTA{P!48rql`T$)Ditum2%{-Y!?L<*vBI>Wxxq3gehvA zf!0T08>?}ai`798#BNuG?w!b9A(9nqOnqp31ERKrflEvD$;W3_Pp@DV59{t+HS21GW7zX ze_1id>0&1Am!$Tf>s6uwYdDf%6i53w7LlOWd0^%4f#Dkih5;P%j^z-PBx+!OFkdWN z;#%1mCANlo(%vpe2p+EFq^!k6s1JDtQyMWaLxZRnxD^{N%phV15kK-0@o$oC$Qj#D z1js*9QZJB#A2EX=&~G0v4h*%GFX!oZAEKvGZC5{5|35;CJu|&GWq<-Rb^w_1Jm(d-0PbAg1}JRmEp{V^(>2u&`@?T{1lE8Z)-v@#=IhT8X@AAkztsU zTuF!siz5qzj)?PWQiyn9wHH#?8N3)bihPX7(kPLqM|Vw45FrS6x(hE}m4~X`q(>vA zrp3>bCd)ep&gw((SD^$*24s^r)d9m@=GCHU)uQo&LfEgda9A8vAW?b16QQyCT$w94 zU|AXh=r?AlN?1O5ma|}6Z<>xkdsbasHC$x9#YTGGokCc`M8Dp_=BOuF2|=kRn1+{9 z@_@x4v;?sM24bQI>W3?BELjDf$M~JaBVDnNCbBj|MfuU1Pz?=dfeZ6->wH6UXdS`p z0a3O+dert5!A4$$W}6@yTJXbMhzrU)RT2nKfuF8tOSGr-Oq4|Bgkfu7!f>{%S6;>- zMW;f0o3iNa`sUb+_N3KJElogejNmPwUT=NI2dj-4Ke2vs{eirkBh-g3d;}^J`f!dF z6(}J>A^~(5McA-!9FC3|?`QiZ{0YP5y$N0)BoT+)f`KM%`Q5%bi_`c*S~0yrRGToR z&nheyVIy%HZ|P82EhHLN9iVuYT#~?~2hM6oYWa4a6?( zc~-n_oM=F`p(61!X5Pm;2!^SI3I-wX%dpX)83N`8D21GAe{ZUI(zBS%e5${E zWVX!fK*1#cMSIO}$Mb`NPjiGEw|SK=4+Lew$oYPpHTOt_oB~f$eV~DZxHU+>kGX=R zPkT&4!~Atn#yo;x3^>OaX;^h!uVBxj6kT^SOEo928NNR|>1H{gImc{lXprs_flZpZ zq2w!VYa1I&$vn(~$m=ETO4Bi3J`7m4DO77~Qu_Uw!;*%|mU3q*HI5|~4&H_PrDad~ zUQ4_OEWX5;sMOdL>8RQOV8X(@%XOV`<4Jpa-_yq&qb$>6e=2ND=>;1$J%FLQ+ScD> z(OF*el2(J+SR!9@+v>^K+w&m#HrZ`S&o=W$R|Ro}H1ADhzc&@(sRjHdQw_ed0i@P8 zmR6EdS+%wxJieVtlGUc>KIRw?$$aG&FlnN|q*0YNR1TLLQ>kGtv1IUKKHTGY`Ih5& zB#`fHE(f5FsdtqIAV%Yw+W_S{xhMdN zugMcZ9EXvwWtg1rJOU*9vdS%~GnHd@t;GeG;|t9g=t2 zxDI|9DcZnpbdH(oJL|eS>#7laiZgqPH+(@iPDoi`s+~22>(^rBX%`sGD~Z#V-p<>} z-k^y1SRN~QE_!o776~ft?P+)Wv-bdJd*;J?!HlFu=2)&a@^xwlqIIaVrdCA4H@l(u z_E)JHes`K3T)~~8TX&A-g-Bh2}X0q>Pj=ij!Ra-u&!;k$;z^8CHI;iroDLziS` z{-URep65k?lrl#;v^sU2=(?KhVP$@Grh?s5ZeqQXvr<9cg57@TGq zk^1eMT)NfnF_HUUbv~6lBo8c)r=tu-PtS0SW6144+3=05e;yQl-Wa&xUwi&?!R{-j zS2iEYu8#d=^L)Sjh&mu@WiJ?r5gTq0j69Wqh95wxT$S5~*sh?n2w|Na_{`?=*}Me zoP(kC!T~w*tnzNvnjOcac#-DF4AMQ@RL`Wn-TP6igI^Z_cBUc-V{sDYjVGi+pz zl62Z7#_^fPt$WsIuY!*9yQ)90HVD3WW9G-r09S$pu_z7DO_HL4M4j0gwnFdQOO$Jy8`L|>P#(#$ z<5tD7MY()(iCaz}EQ1S#OI)Rv9wA|X4e&68h0q z64W;Pq&z^Rs)zKHc(&CBES@gPYpW@!{Y^=pCN2W;Y+{#tF!Qk8y{Xyqvpf9Q>{!ux zu@(%nsRdI9S7=GB<6sS^@iwf$gLD3!QS{rdxrNHEW~3gYvPB#(>=5s;ot`@-6bBeD z+c_dLY&Z6}+>_K#m2ov`&3IBTT~= z^QP;=$Eog5Q*ll3NRp-MZ~SWc9;W5gdP(v)qnm0Hvh+2bvv&I!BgUzl4H1D!d`-_R zw3M>1dNsNxGn)5(b2nk4Mqhr(Q%D@P;`hnNd1xXHYsRr=J!Hu3HQ{YtUOU&%I1u&N zRnJKI~Ff91yAVL8EjNm z$S9-hu|>4g%>@wf+dTKg}=o@-NQ{%5YG-gk679Q?GtQyIyvF|)3GeVw=LeqR%eLw7`;W{+^KXl71NyW6 zvwA$gyHoz+U}5_`nCdP6xfpy&OY6tXQe;Or?{Jw2$)FuzP=wS*!MSKD36^s{}_VUA!htSuQz{y|LIom2ZGpFFvFMKMcptu zk%W2-QZK`on(y25RHiHW>c?+QUQstA-#&laM{#%#ygh#m5#0FcT8S{;Gl3yX4J*}Q z;_6}J;y6&1FzCI9Ag0Z~ihni1<@u)Ks{zYGr0_5rc!3)m<6DVOMx z=*_#E*Vh~e9f!|BH_COf{=DKxM~3OEqk0upbk8LZJ~t&^FpTmO&rlVWXZMg`S(_Jm zUX>4DPgaH2MkI9Eb-R^EfPR(!GBm3beuOM=jGG~jiwxItm7{4>FN~GLF`1hGL=F%A6>@mE_Xlz~wGFL^#q8ll zaLFmR4>4e*d%chSMQKY5GkTCiN-f{Q`x3R>EI$nxA;aNQ)AY`)k)A0@LNh-w6o(*$ zr`b&vX!cDUL&HR!%i$!JO-|?`+UoTTBE3ZY_`zw3ad=c++q;1-7l@j7FNGHc&Mo$( z*{X{GO2;)un0XgNyqV0bW};wvn0YvUmUfOkMS4i7zdsjElg}}E&MtJ&K`tb$O+VFI zvWlS7Yy;EW-`RYX#Gda^*Xa_M5FFnu!ExpZp>kwp_tqn$v%QP`Q_=^ozSoq-Wane! z)YZx~HU#ncJG2Ay8-yP#7=ic4q~GqwqwZM^aKS$C`Ia|4}qhs$I+F*sf!cnS^C%3Wy!okx%5 zSk4ri1pmREnD?2!?Nj22BW-1sAkMmJu=Yg){#?{mGVQ?97e60Z%>tV$H;_yNyE$-A z%Qarb9HiHXn7PzohFw%2hSV6oGgf8C535Y%ltGL_fPi&NAVra4EHKO^2-eT>5H?ua zC+)?s8Di<@zKS$Z826|k<(UxJVf=(W(FcY_`QZZja#$E#nr7iufWADHv8O72zJU4( zC`Ogtu~L;~o1#FlnCK^jcsXCE)5?)EyaM5UeT_wT}J{l~8kiI}r|4G{;@4~bRE65Dcb^~)8Y&=u$sQ^gXm%@@XkS%b?7 zxW$8BNx(oHc+_8$3oWDM?oMn+pg1DgZvs-G*w_^G2-RfaS#59^^&tsnfFxa5TBQ;h zF@FhTp%VfdMWGX9-AW`~GbpMxT>j0k0Z(Bp_?x9wQ1NR5|6dT|Yv=truziF-AjDq( zAqcTp9Bevo9QlVSWLEZEza_7W-6-?96{^B!L&MTsPc3fNg6r~>ksPx_J96GePnE>c z8QstlHOS>LxYn3gS7D&#Rk`WsdlwChis z{khi;pBjC2Gue2ApL~M9kDn8Mm#(X-c!bz6Srf@}9tZJ>7&p7q5152Co*&>G$V;*t zDX%@`ESKL(?0k!d_%?-7AC?=H(>(bWjDyTR0{u`t3`1H5R86d50R-y0=l-M@(FAI# z)p4TmYo5!U;HMGs)ks`+j61(Aj?fMFplZGMVriyj?j_hd)}uReh;JsP6nhV&(2k39 z9H`@B@zwKT8@wGm(<7)Bk~k{aHh5JJfmOVjJV{VXiuED6UMBT@OX*?ZkwS$|o2p|h zYX0!sK~Hsw(H~G^FsUvgK?|n4pHO1F2FxJI$jtuXAqwgc8f%wf}??hyQ>Q=Zg+tQ2z-f zM*kX;-&eg?l;7v3OY5F(m@u9H5Ku3EgAC3zI5g=KjNo2h75C0dhgJiHl+@r)I0rSYo{w!BffJbe>X&WIwJRnmqxvdv2|(@8qkqYMYj(11?SRm`@DE* zcy)dVx>1xF>CecPbWj8FknJf&Zw#bG2;c0>P5W=bn|Xe1UeAJLAfRRl;>B#n zk#FjvGCu|Ju&N%vB%YBc!1)L}!(b0iI}Y|OCzH+oOrTtCffp^Bb!dWKAq+EB$r9ASFojRC*X~C>e?oHC-(` z_v%~ZW+7~O^iqyR)6eo?G0~|OyG>#OL;qi-?NhLPxzp_;zM&mmAB4MZ zuerM-eO_`i*7T&r!b~wZU0%ae#H%Z&>;qjhwX8-;VxaI{Ukk!Jn$z(Wt!}k7qt1WC zKf&_<`1hUUpBW4}6I$1A!>Px;qs1(QT`9)L0q$(kEL%7uG)Xy} zx*o$mC&D*dFIFyX5{RGwm>Bi3OUsRWKG{?AX_dEPzCy9({WyMW@O;qs4;Uv$YSb07 zTM|oq)SsTCBe#$8Tx9%RI((F$tI`5K^cA(Ahd#cW&1&S=PnWQ)wU*sb9VjGZ;Plo{cc@~ z<5v1%N4@0St8E)Q#1oOD0^JiI?dvy_{0HUVF&{;e7(S+=-qj<2Pu7n;!Y&of{9z zEJ*!0PGWng@(}Z~^V6d3?#%$G)n1jNZHvXE2PzAqf1`sRq5BA@xyZ+k|)irmPNWsvcn!P8YI zLHKAkQ;=B;1NuM`Ho4nmuOOKcdXgp2vk8G3aexSj89c#zn|r0{#oRzujEvEA1u4^ z1X3q31$D#;VO=%EC1Azm_S6`!33%rRr_!d~z4c5#_0l4NXs;+Ht`GpbvE3Dh`b8T9 z{5)`SEn0gf6h(4~QZ4rUko;DVj;5luIB-vCs&>#O@?a;6C3Qb(I%}H6#3@trnLn6= z`crgGtgy^n2W*#wOc1(?!&_)%Uqyl6IfDd1&fw%JRGpYkD~U_!Or0DYwPrO*4k1Bm zt`Wo?3WWm#S0#bmhyLyf!4aV2I8%c9of_tL4vkUh0eteHdv{=l*k#n0*EupWQ~jeO zCKuw{+`*akEX9U6fiY^LYvfob$mlJNlTfyHoBHuKVlmKU@io^1ClVQ^QOM&)LJ&3D z;S2cC2KZ@Qa&(mj$q41abaG_zM2l0JFxviX;9xULE;-NruejPk^v_aGD?zkbBNXKa z@F2yjk5uH{UeV-}$wOP0vR-3w*I#NDK&euUDtOA|>lQq`_U7Azg$q1cz%L(fSqfwW zPl`L#kt*n{_S?PG!<`}NS)dh3O^C6BlfJlp8F5Q}nXtkeIHcc!ik|fapy}Y-wuRrL zz;Q85rLtbVsDFoexq~I7{EZLAr+x&n zT=IhIGLco`3{p;05{UzEPDK)tH8hJin?`d_)cC;SjpF2;<}W9|*&b}iOj2{c1c$8h z?8Z{9=l0K1?Pj;|137~sbOKsp_T>iYFd+ipB%(dz!0QY{oeA$vuO!;VCPSe38es&* zdO&8U-W$cLDUH2e6b?B3~Z z8%KXx$+1UFa`9Ui`SUMJquSZ5YW=C(-uwNZ?6^Q($Ewa;q#PByiQ+G&PM7NhgWlo6 z1=Q$p)jncP3ZZ%?i|yzZ-VyDIEOYB^C(u#SgUSzKDW0VXaqNNBVqn4`81LjCxh9`` zT?~nkM~55%DS|X^K#E`?XEp12!+vHf)%f8sk;3+||9oNzyl1)=_^q6TGe)$XRvwBL zu@rbM?IiHLVq}VMlL&L!r2dL1cw!eOVp3wUcio|P@@Z3jlMy?fEZ@LyoWXbx;7m!7 zCJc+-W9_AtK?d8PXR^DsH$8iY!=7oU0WGQJIeQ!MXEt2 zznCX`?*c{n-ByMG!>~hg-k^Z5g2E^-S6WY2RTNbiXIvdA{di2? z%xP+9&;3|_7i|8_gdxeARW*5<4KfPidwkUq+ZkZRHusiG4wxA={6QKF7*Rk!i~K9n z;A4X|(H%gY#1o2JhHtt9bSn&$8bKXdrbYFBcek>{32tSmz_JfW3r~rdaOqB7Upk2xxFa zg!)i%%hY_~F(I4-{qntXf4xL8Pz3^+;KrgYk#%og-mysXN*d4c)_W&2{EbQ{Q?3EN zx^5~(&Xw+gMo5bXy>ru@lhRRo%g*$)t5j|1qAfn-H~_`J-!jyQFB z58Z*(ATB~X)}4L%K$!0j)%QL3OkVu<%`*GH(+Nra(#?a#GoqQ~f6xgn_Q;^!nY8Oq z!A{4JSiWdr_`~Hdh!V~Ax*|)a9_uRWp##Akm$xsTs4bu5w#Jk{Ad{l%W#yNNCqXE% z=FP0TCnA$F6~y7?#pybT96Y%hiI{$O5VMfIT5~zG5EIHZ)i3%KO#c3yaBR4nd;;Kh zuGofFzrZ?7Ax&-PEPZDas7=``Qff^hnoMZlFI-d7u>4V&Ze4YfKDiB?QsczCr><~GH4DOu zergBhu!)QTHQqOdrX%J!(Hm@85}FW<3uPDL2&UgfA#-h;N22tJDn3&j_Ma}h*a`Bz zfZR6nhW;zs=^ri};6#sq^74ObGrzRc|61*o@vnpBg#}?4=@?+>jSPOSfnJc`r!N9d zWb_wD5Adqru3`8wGUWFw{@N2HW@qaR^V8D;++c!X{4Hi4crCqxfwQxTqwSBAKXx_b z4J}QKeqQ@ySJd5ET-n*c+2rS!1+DGP4H#g4Ch*71?>}}KVSc9M=a3Y71xGt0WfNx@ zE#Q4c#9-)EOx&Ge=p}6o%uIxSUkm@fmW0vytF-={O#V+r_g_sc{f|5_!vHJePlfu& z9t#Z9pV2q}BpLZRPv`#=8TtROK7ac}{&_0X&q*QvJfF|joR z-j?l0G5+-^xtE2P4FHmqn4}m02 zFsKNKa7Y-an3x!-=;+vZr1;o4#JK3_1T+N1WaN}olvwz*Kqx%}DFr3PkDGu%LPEko z!Jxpvpip3=V^jRQKQCPXWGGNbC;%u35#SXv2q-ehOD_N)m`-qzpC7>QACOm|!1O{w zLBqfTFQ`WXyaE9QeFX*z4h{weyxIqN9sq_6{+f_c5CTQf0FuZ7mB~Lo7m8S@x*JVt z@|1+x&=H8FM909y!X_mnr=X-_VP#|I;N%h(5fu}ckd#tZQB_mdc&lk-Y+`C=Zei)< z?BeR??%^326dV#779Nq1nDjpRLrQ8|UVcGgQE^FWSxs$SeM4hYb4$6zKN`Gv)$&8@H7JG*=P2Zv|p7nfJpH@A29Kk@|v%;z7U-!k?u@`Vh{*DEkEP%y|J z`2u<623$aq!N3U_AzlkALK--r5Ha~fp$f(4R(C@aGb^2<89GkFpp&p{lAis@+RvQ* z+cFmLAIjOE8T(7VmH=>|Ai#$QiVWZf+!sYqJ;S{~_-13CMR*h!N-)t?(F zLcwq}$dsoWeqdwiPngQ3XtqEAYLtaoDt>79GZn+2v$t1N%;KBduCwIq z&F`-t1-ZOElk;xOoXVEv#m~>?>@)*D7!TR z|Ms}ty3S%FggcycvGYr;+ZT=4<5Xg8sq7GMm4(mfdtU(Ikh;^k9p}Xj61|Ir?1Dp})f-N8e!NK%A^Z$nDQB&& z?gO7JJF-g_4i#9ITz2YLsQP5$q-rRLO9pwZc1!K-_%{XJxn^Tii^t2;r3(|BU|5LW zvh|r*gv3%f)k8MO%Q8%X_(X4xeIa_ zf7Bwwk#1^J+i*0^jRI!3S&w6{wTz)`w2V{HN~3F;BE_HZ{z+nUmN$WrZXn7g#k zJuM_JfLho5Pb0-bOTBi?g`49k!L~5b#Z?0a1Db~}-w_UU@tbe8%J#1= zFn+r^lTbpWEVe!db|br`eZ1X6J)(k^M}8|7N7|^s-9;>;L-gvol_XKZ0qX^zF&g0jas0~lG(Yay+3penJ*{gC4<_uxDgtE5mZ>604!5$yN zX)CNdSRdIMrZ+=iB`WOZIC2$AKJGXJ?7^Mh!A^KHZ`lwJaK*+g~WD79svCEnPoduFCuSuMuCz&Zh`W*%;tPeFwxcH)COGk(-FW$S`DC4)p zK1_L{EcnJ>P7Rv(k1TPw*F85Jy2p=Na(Bqa=1~-VelF!-MT01srz~2nj|gqm$mpq< z=s=k8#Z@24+BA(g&qiDs@lGz9?qi>H&wVS~7g9(lkrH^36Kf}9hGV%})b3Sq8`}R` z>%dJj%0YX8B$z}+BOiG?JLTMK198}omo8$LzD?(KcM&|>nLlyETYVvTInp>bKuzGcM+_jB6tBoA^Qh=Gfwj1fmt9HH5_Er{K zu(wy~DZyV9XPz3kYFXL5{;D|h2AO6}8fsIn*CV={a}1+$ z%8v`1I;QDt27ghL_bMja#g!E+4LsPML|nmGf9@kU2$Ni;Zk#JnF7A1GbAbJzbqq_%@uLWSe9Aw=t*y>c$CH$tw=i z2h((yV(p#>k_dzMc`l}-Sv)Uw%#k{nAHZ`%j5Oyao6tm+EKpHy3pelZY&BQqNoZGh!JgsIqo%Jk-c21 z?WY1l+yS5bu3!#@5xZvAuujJ!HzQHZax5CI*-l2F=0EV24`T2Bh#cGRA}0D98a8d^F&=DuCE1n7?tW$`1h!p$d64 zU*uGS;r$E1F{V|+$Q6gU)p%CNX>%{Ae$|9kM@y)-xl;-Hy+6+2h8@7~5pfHmrHlfk zI!_dP0K1IHudIv#fCoT5{MC#8AE!l>TA5FoHy&UoyEewF3Jj5yZhe|m#d55BfRQ>J z7}rFMuyytluTNORX^L+famjSL_V8`%tbEh?iM02IEBZ*9d5CvrG(n!`i`5|v^}bQI z&}*k?9QM5z0N`xry_BM&3yZGasaP-LYtZ>ud7UvH<;5{mzO^e{$hgbG*4Fm+?i{>j zp45AY#uu4(;%Dq(6&22-@^aIg36SDkJ@U;RO3;i&6T<^5iK5@e}4&daaZq4)K>3;!qYZms0=aUcH?WT6tOPFBAyah|} z!bJm(xb$k(_*QOOm-KE|V~Gs(p}?>hD6?R-=rJ|SM>dZnXxf^-o*T|~OsE_CfSjLY zZg}FFW^TzNS|%RO(b*8qJ73moIf1WEhASy}q=rCQ#~NC-1AlV)a|gz3x$_GEPSC+J zYz{l?XTH0&B9y33}eJ6XEp~e_;3_%?-j`LGb z1mMDl|1(#6V}SMLLnga?hxsS6gstJt(C@0L4Jq4uR!;ZV6~HHPfN?KdxU^Q<(3mVR zH%GSliEr3X|I@`hc?JTw%Wo3&hZOzGA~H^42*&xQHJCh={Shc;2KoT9qQbkfF906? zvorBu2Y(gFugdW2MffEdeo4h&EyAzX@KqOhrc? zsIZ<`jtEt=YX%jfa4V{q6xa^uH z_Mz+w0D&a9$@9ah_MuqlNIInW{cdvqn-5V|H<-k8(@{wU(BIytpFXRA@cy?6oL|Bo zoY=YWN9Bjlmh@fcA5^&l|9#^Uq~ATPzZ02z-r3V1Z4A2n#4NLjR0yQe0Bg|ET^mV!F4xCFX;?CA&VWX_ zqAl)JgBf`m&t2cLa4y{euP{N-1V^i1d(;SZSv7p$(V-Rh)W;G>hw2-*RZ3rmJ!D6S za|}DU5Pa%hOuD>XA;6OFBhwv2BsBbG+4!|D<2w~doYY$X<++kW?9|k{A)^qNczSF~ z0?gGWu@?ZnHuA(C#{jG6Gi-{B#P<_I4A3ElA@tLVsBI+qH*K$}IBmXvBbhd7j$xQw z#lkbPC6kX-S^WYvLK$<1INdPRDitgUU~6~?D9P%UrmKs?>9q5BJftZdwjE+Ru6VCYX+O%07&y=%!Hv+vFh*tOJRQTx8m?_tIxl}T_x@fxxIDq29HP_= zcGhQNxumo6rE&k7aSf}+W;el`hS-+vgcrfl{U}Q!2i~hf*?Qu$neC0TG)4w0g8+(u zMs}rCv4{7Y?_y03q;)1cOf;Q@+QepVJ2EZRnJ48U_ttKbxG;sFLCNG@L+}|!`(AD( zMU&+1fl`6W$_}_n64R#ICrSttS^*0irhaUf%b$HfGK40325v;}TV4QOc>X79haqtv z>_x8UQyrj}QMUtki>+@4_bs0QsEVTh@PamcJKdLXt2dmWL#36@$$>IJ0iyL#U=(|# zqblrW0e)4`Fz-MzA=RpbX^bp@sSAIGyGB~?Rb3=$Va_2*5{J)l(={M?q=UxtMUDPCrt}3M zu;2L|=;+uof1aG>-^asvdI9i1$^vW@H9D8Lil0F9UI3jyUrTuYgSg5IAjetn=J5q^ z#sX3=YWM;$Kg_;Fh?Bv>0ajVFW5va-A=)KHfE!pF$V#3^Z!vpeg5Cmt{KI^f!Ax_gR|j9TfschA2Rc{-b6QCJQChbz&PmO&^81 z&aWfSU=*+9e=iCCIvd};;(e>kJU=e_f5Kx4uuW!;)qK049DPo#xp}?=n=b(S7XYIG zPTfDMy$lsf7||2<#1v(#T*cm=CLZ!<0_d-!D<3bP;PIE(IxnU#S;r;?@G$-fk4eIB zqwo!`)6QeEewjVNdFB^?DDq?9|J54*9DGg9uMJcefjWq{TJWDTpeL`EN7RchyD0VK z`a2xt7bec~k@Qtdc>a@jHbH*ExF^^UYhh2&gP@hUtWih(2w%SCewe8Q=cqS5DV0+r1nk>%}ILG{NxJdRE zRqodkkFr*UmMxrBTFu@m#t+uY68Kf_6A7tTLq&osliI#PJbG?hvQp=$lcbF`^=Vpe zZR{|^I#2-cE|NG&-ca0(d4s=qUHu_V)O7yX`cIH6ze^LUr$59BC`82g{HN{esiLMp zyOeZ?CS%7gU(G?U6BFk%XVNM|eGD=r9QRm`)xiNh&wiJDNK@C%Q#3}Ty+}#jr5i3! z>eukPqNfG-^y%5`j`16X@_)L9dcJ`TPm=J9&w%nO=U$jSB1RzZnQ;45Mi zOpv)8wuQ5J2`Akhx zlB(SAmRRDy{zeN?%W!%;t#QC!G`G7+7YASmheM4@kUW@PONA^@)HY508qzdhJndpb z=0lHe`iw=IYP=4`K^pKheUdo!alaD@U!i^a-E5wY#z9(s%CQ|uD$W;uXplI^)B)U# z2}rkp*~`(yCVzZ~e}i~|mi^}@DoE?UyzJk*|EC1gF7q6CbeFusAM@ipqzdUI?)yLN zeRo__Yqs`A6jYitX-ZU35JXX$A`prcMF<_GDu^@zkzNBLptJ~x1%%Lhk=}_&6{SNc zp(9;NfItXI__m&zGiPSbjC1D9z2A55cmBxlmt>dhyk)&>J?mM|n`Cq8+UKa#C1JT4 z`;TPo*MCkF>2oG&*>NrViVe&R9N(@>=xsGKOO4MEe0(^<*9O=c7W^c|H#Jm(I=E9V z=#p(C;aP%y`JCfX2d086ZY2}9Y*0*;_cCU?m9U}sl1}@&Ro#GDZ_scQW3WCD#C~OA z*zfA1U_hTQ&(P2aK7yt6Qdu<1BfeF~t*FL*)W_}=MUZUajfBlu>L)8MZ*m)>>Y1uB z&m2FVV5~k~Ja9ad(lunN1bGbRnA3DOd){fGz{O_3)qW%Qv5RID{dkol2Kga#0m)=A z{b7A+V3@TjErL}Dd#T!}%_zT-otly|dbNdxV%}MwiY$}R^XV2D-8EC#FS>NsZA+O73yrK1xUK~8`tsn z<8l>oUuo>i*_4(IKk0=reOBSPqkfss_0W{x)oj6_QtX$M#;q!)tduxNX{%Wv8N<@S zPCKOnng&AbiWQJEEJ5@|Y^!Ph`3>Ok2UhkIR;cG&xd(8ElJR+mI|rcCAb7+=(TFcP zWG@;Nad1ree1=U}>>Tb&X&mONmtt{M$S^<8v*%||wrxxK#(*Xl_mJE@wk0if=H6PD z#uxPt!fSLo4fODwZlhlJ`!V?I%R2QP@WUJ9fhLSWOZ8O?@iW3UGs}r@>h^%47pzLa z=8EH2LG*myRHc?OQp-MdWL)3rvo~aIWmUQFu2)6L$>T+rl-=xTlV|01GR;)&v^($E zL`1Jm8x>Y|#mshdkE^0~mE(`uzhXOR$Xk79rj{+E)^moX^$G%*XEWh0>`SVRqV|fH zw=KDN@dVE_@P*zzKOux4NdB8wo*E6w8MPAf>CbO~p}#%lw|>s%93XnY-v^rh`3>M$ zcP8lirs~s4r6`5+!zsMP50yDXr8(g;!);fuX4*|XkzZs%itK-zATr{(Y~0S;sTW-0 zR*O(Q^twj$$`xibVS8}S&Uq;mv46yR*E{Fg^6b+?j{upZ8V zEVj${cQ)8fqi5zK$5}@;4dtZ@HNQRtGbVj8%Khcr3Cau&K98u10xi2O9{1{~Vjp!^ zjjB@~JvmOD;C^hb&#a`t`%qTrqkdAyWo%Vd8)wv#*lv5DY-W6pi6%>> zLuA8Fi~b77f=Sxn1MIB7aAI%ce(~Y=d$g>uTcF!>NG4^czbzvd-tq*gNt~yQ?$1lu zxKUg7G&9P|L`OV2%+Xe{p13LME^ z9{u?Bk&j>%$Ouo9VK**|n{1t{_RqJoH!!{)Abwsm-pJ4O%d{E2qw$4ihMhBnagviM ziO#k#F$|Z;aJ5THEN^^~(>^C}?3R?cD-{=eUgQhpML_6`Q$U39C7BmlYh*)Y;8p?$ zs?z_05a#pslmpZB$6r5`<-Pk9YOpEEPub#PdT_)_<{(yV6qg?CAUO2;g>_-;C5e5i zA_@3o=H&*XYXw0#FOsrcKW;bYkdWw-&`V82rMC82C5lV-tsR%&PweU^CwPKIDH%^| zAePWT7dwfDElF3Tqt6kDzyQboT7W z%gy5CD_i+(AfMgv!akda?v@`oojUJX_J~oqU8T0m^&fhNJb%4`K2dKgagahCJuZi4bpNOGPwh5yiZ8sOwT~V-*>M{~ zEi^CbHINGiK&)P1IuOxKUdR^v^SeYk0$9EDtyK5_``4drJ!{gUa|$hI(t9~)6pqbFan59{ zy$ZF1_p9@9_uM0wPz-gOI6jiPZ3c^X5N?6V)wn z+Mg}_)If_QoLIU>%)(m|baNb@dWu!@T5r6E8E46I2P-;idf(_WUFC|A!R`UL#Xk5E zDVaTBee)A}U4vR*(WwwW(6A2weB(5jN))q!GS3(JwBH!n1Kwv+nuR2B7pH&RzL3{R zq}5sSvKe=srQCe3aUeuMEaA*bDQWq&#@bE8;<2!o6!E7lZbC)l4l{^y|6IGt=kC>A^ z&dS=3@+-x>WHz#CjVe&oa3p9)1Wzqd?iO3RCxoRy+x#R21mnrW=&zUiyhWJ(`%{c8 zclw`7Ii~D8u!RsST-~ji-qE1yXTd<3@LKbJMVMxoH8X3my|ZoPYq~gu89(Kzkoh3w zJtf5wUN8PMaz`DfBVcC(>($%>Uvy5nq>}J6oaTokaRa|H%VQ_+VOB z!fep<3oj67I5Z|Cq?c+qT@t7XqNdyJfK!RS@?Sa{Zh3nT=q&ku;Kgq~5c6mAm}mOu z|B}&Q?LfH_#G=%EDTU<^?$M7xM*W25o(V?jl1rAmt#r4N{jMyHQ(5VIY@FlKUVM^g zZsaZA>tH_cqNkJZ4kJWp=gb3_K|-5!(_pbJam~_2h1_znUiv|J@(}p`f$koI{fo8hRPB z_}K3X#z&!kVB*0p73_W(-)7FI$yK`1Js_u~I2tlfn$S6JiKo6Wn zbjt&AbT?K;gVmj6xh~u^;z2PRhD7$@^8#t@G4h+d~%p%gpjThd}z{V*_c^Bl%HlZ4L=JpOqoH@_|Nz_BHvW6&-qJ zU@<69aT?BWy;oD#r$2Zz2QeAXr1`QpZxFI&lbb-Zwz~&>7H8c9+(aqNpsZF=KBGzu zjZ?WT`PPqoM&$>vFAg_FzvqU)cU&y?0GGuQBwY#HZqB3NVHmj-aIww24IQm6cSsF^ z=iMm&um`x_!py%PI5JKrI=*A!;)+~5*LPtJ4c)QG8O>NCSy6U@E9xE)=;gyk2=|@F zQWLH7CiZ|M@*-?cRVa5GLx4?B!8ZFz?9sQE$buyRoJO^CCZweT>so1rQ7WeW0(ZGi zenGL^rypYAKG($bRyGIPv(MH-N@4n0%8uxaQA9cu^i3jzR{HgGj1EIzgvIVx9oCVIh|dDx^xteP$}s|W*R=eN;OK%p^4uEJ$?9_lx-6a`2=^H_L@5%%hd-aMJ}RP6^&%nc z{q&ADyoBhG32&)G(q(&=i4Yh2AdDqq?6)jK$wU6c>=F2pO{(4AR^+XAzykh3VhTfc zz$G1oHb5Lcv7R%fn|L0Ox#;G`jS?5xnJr7fb_Rc0@J(3^gQnd^G{qi6x@iVrFDD+> z^QG((Qi0yvxaAi4EU(&_2Uf~nUQ`ZqWN%mGMx}n-786w11MC!Usi`-SXJ)E>cv&dy zWHkc;202BgXt19i={TflZ!>7h(kOCpmuOJv&J~qb?1$OWzFDfI$YBBNxjiP94}XPR z`XI(vWdV;d8zGAK(%2VQd^0YlAuE|+v>-z>@Bl$nb=gt*$_p}0Ov1;c-Yp8P>iWLj z0|Yj2faa$qytG;*947nf_^>dXfV0O;slRBk8<7z2B=)q@wb1OD#S?)&A-7#^8$X4;joeoRDzWr`f@>rtp zW*TD>M&=-GepiPDJ5ZBHv1-ARu9dSC#7oTW7cezW8Q`oc4qBGh=O8;%%)ii2C_oe} z`xZ~W7@vOwQ=AaABBiMbr3|yW{Mu^2{ zEgppKmBd4T&5llrS{dUz%ZufqPkK!$%dDweHx5ovsPG;zZiXlyhIf93ZVx8Xh-wK^ zYqZH}xAN2q{U*Yf$6RB<4!bA=VTokh)gou&ffXbx(NU5?SIJ=WV%IOqZ2!np{^mU8 zFCbpZ+l+PVS%i?z>aLRN_loU-)pM0{2B5A1kK5X=Jo1JAq{FR|kk8SRy|S2+c9U7= zc{NWuuv8pASSR{!s#jMv?KmY8n@S$T#(-NOSkpXA$qIu9dJ2K3bbBiud1tJ8y~>Z^ z-()j{-vi_-Ci4xBZblx=Prh|3#`PoT*uqcQxfi( z$6~le(JSwb1!cXRwI<$bP$&EQ`}aJSU6dsZ#KW81G97nAMWM3R%8zlFF&AlHBe=_< zMEcn|t)kM-;qyrd$k;={qZ`cxx-uv4BF151!yP^2@&-F0Vf&2A3j19o2QkN{_X8(u zKCP$sIb}C4R6KBJ#QSHrpz>c?xN=EA<}n}Z)YG01R9Jp8sd+hq9yw@ zFsxSQRYPg!%ixQmcnX|gw4tcRe#d&28~YuT{d;LbqChw=(a+P*D%p?IT%n(Z%+g#Q z7{tlm*{`Uo2xv#&co)4o^nP52)c{66zwSd4@2@Ki=}-0(=3R63BRPL^Bri_oT%Uk4 zpoo=r;ClKrAIuj{r{bLwB}PM>WqM-sK~Kp!nw;;Ix(E2rR>$&Fc8*WRWDtw{R+(|> z@DJ4$2$E9eBbm~9XASBE_y^|v7II4CB}A4D46jskmw|vf;=uj2fBuCAb^IQ1-+lH% zWo82(=&*!Cun$kp3t2$c4u3bzdqAm_3mc(bl8rz?;s^b%S4~)WZkGg!f*kYhYVh86 z;Jr}|(HFYJ`0>GykV~6a(+Gz*&oSJP5An<-W>Wf@K6~jmWp@e3(51706}%4h*$Jxt-h5!$IFLWW((*Q!5K%ei)?k~HsF-Zg!K^fS7cOSwaKj9*)9xS*H z;!tgO%ICBLHj&=X#=v>>d>)d#pH9D`SJZaGd&%~?(}=$j5p-AX?;;){m9tmZ+8T28 zw=;cS!vg^;V?0B{z?HKMQDcR5f|p>ys?7GiE?%zwHm;TW9q!9Ir0uT-;J?=DwuG&P1YJk z={I~1MxSxOW)}{+N42~@(U-)Ney9oX!Z~q^ua)?oC>f{m(8nT1D=yQKKNd)oWBEdJ zRYvUD>3fu`N`+*2ZI8S)vHo-dcldo5HKosi(eRO%1M(`fVUsVSjaNZa@X5P-(Vz@# zLvQ}<8ELL^E9*4&%(E#Wb9HiWkA-TaK;nb7q00avEEeaYt9Ur^03|@*2W0QdJSb4VuCmpfh1r2<@m!tn!tv;6&iQuRk6(;vtGahqwNpBXCF^UNKGa-wb%=7^pb zcBgkZfyS}%F^7)uWzsBhp+tkSynH)uBgG5ufs(Xi0hYggJ^%fTI7Q&<_pc?&w-Iw{ z$$mWUW>Ipyktr&YBK*9*V&Yq5?7x18xQ+B~S^DocE^$^ee>4%LG&VO@ET{_lv4~e- zd_w4=8vVV!sonqaDD1H^PX;<)SIC?!W%!M7a%a_HRbozQi@f3MJj_pl!R)s0bl0M4 z#^c390N3wdJ2r0*J5dMAQkJ()AhlpET*`LX)^Zl}3t3+z>+#laf z;#+$i|NbMU-M7Ps4m{g|d3NN{F_UFFztKp3MC#n=d}g{XYz4s$5C)PW9@lJ4=@Y{^ zsYx#6(s}d5|9_=7|0`ryi*cR!P1Eg`b+M#h%#trP3Yqada(!ZqM_vrS-|(;~TNyso z?B$$pMToXBIab`)Xw5ugDAe5iAUmqE{_?_VNZxd2DzZNIi-ywrjV-P0g%;DM4R#uDR}C5NTAP8~del@A#dPRoutG?o8+|7Ez}oUmM@ZL4%RFTlQE^va!vxqCCVxUpG=A&_&=qV}jhgPxg&p)m;3= z^hMiQTfwpGdjJTw7yy{xn&tf4^Ro>=l0|tRT(VlH6oJP&WP;J%$0hv?weHn|#~wk$ zht3SWwI5EMmafuupjm7&T3ZgvynBQz^GV$;JB$B#rMUdX zpc!C167Ao*CB_g^cf#7>e1L7X!6&LD*3Ze8pE%tS?)?0SVz=^(K4tPkGzRoK5N?*a z2BQY1uv11DLyv+7s>j%#JiZO5J60v6`fK_e4s1%8xi=;_)6rf2qmQF3XV zfdPc*joL+_{lw>gCn-_jWg*Dj5CR!Z`TajHn@*cQV1Is3vk!svS+tLp6%xAz9~J!} z%ZfU|`^Fv+834M-`6zTPtuyQ2IE23mJo!&l*Zpt42kU|5cU8f6?JBNxh-i1qazF`@um-F^c?*QF66VNYz1R}El!YRi6b79G89a4x!Y16DVP^%tiX_+CYW{l{9ix7F zKHqtqX(85Gh*`ne*Y)1jVCl;Ij9qqkrxTb71{0x6QP=_^qKf*PgVRL?`^DpQ|K(9| zd5XZjJ>ci(f&AqU81!lUt5@AFe0Q`+WG8U_VlS)f@3&I|TonfzsV-rh##zPw(+&5} zy6C-o(c+wP|7NJF>$9G9wFqN++16usb_cY7(Ow_DRlw)ky-x|0M}`3H8sC`N#DCziV!c5D^vwUr$I>o!d>)+-(inr$3@Wr5_Ie;?pGqnq&GpHxyPT!zato zWTY*9YJBB*qFHt4{>3pqEM3RW`D=Roz8+GAo$sdnq%Va~15UAj&wu+{H}3a+o~f}P zUQWxZ>6$xm#CM&^@2zJD(1ZumojlMW=n{(jxpdxTqb_I44yIjGT2b>t6rEQk@{aac z072XxdHug# z$^Iyr{?C?->-Oby4+B{LsKHr3A#i?8J(ms;5QLUi&b8a90U~>`2W3UdwfaQQ;+SZ zmW2G&&=jDTaD;f(lKR zDE_i%#HcM&j_H-hu}NKJEm#_$s;Fq|NY`JYf3%19-HoK4YghENE*+GK`Sm<3Yg!Vh zQ2c2Rs3zHpmy|&9JMe9I9$@!-Y5YIKjmAF%7r@+nro(x&qT+J-OKyGLC|A<; zb&P!MF6F8FaW-P?(3yDjSY?QknF>f|?JO-gVuRLjU&vd{XZzG=w>%yB_FZW>-Yjtq6`W^^ z<&Js@PFuw&cQAjcA(z30EK+lvRpS8Sn{UpUI}3(!uBNPQy$H`j7Aj3^-PyK1dA{hm z@TEg1d|By3Cyf~tiz-WcojSGPPpwCJjTO_0bQ*10Fv9Z|cnZzC_t7{W{q}!NKAzV{UNpUO(6J<6SWardJC-}P z+#sf9h%v2CK5uN@*y6CbpH9@pfQSQnT{VJ9^JgE%s>dCusOXHVH=EcxUF5p!34>7zs>GK3z>cbY1Fk8*p-sLPUAzKnPpj>Kb=)rrbNRUb5*u8ai zNqR}Wm|ul${s4&n??!unUm5ea&+`lPG4F6%P7^OWQ9f```0{>+IoV?*R_I^>bPf&M zT>#VFg(mRMT;w)LE(r(Z6k%I}ojbT_GQkv^@>L(XvBXE(L`};c(bOh|Y><6G%%<)Y z(Q21pSmT@sf97W(HQVPe&u&9TI7n$%tI;TlPuvSYwsuj~ z{$Q2-r5pOI@3qj->83ica%eawTWNi$sxb}212A@iEj{z!+S2g_Hr?}Dm@_H+7v`c^ z@_VQ*4$N-nlwI0kfBCF-=@iZdqAg3GoMmKFc65Ac^#p=6x34(g+cZg5#_k5y9^hCG z7XW7K{wjB|?r;6>^231ZkE)-4wd(1t>4F}o zKh;(bM=Se*E+)ZK)AgMDcSi4oB3GDDZB-zH^eJ+(EMyN@jGcmPn?gG>Q4M0@p|@?y zFD776g#P&w4eB_F0B^Mi@NOwN9{%~X*Kh+mBqFzEB|qtn&sH=cWD{giwWG?V9PW$A zM3n<*u2lA=cZ|fP9{v;DAR(I!c~*994@h0u)*z#LN`%O#teBqD z_B6vWh<#Na56*#%979{mJ)p)9?b8ytd3$5l8chT_T&^H=07NjbS!qw4$*@24)sU)W zqVGWlN5~=@cZXnnB_5u5^HEtI;~sFg{r-rzaL?c@Ovj$oBJAs;=|yXq7)0F`f-P(e5|*l1amzp_5gGW znF)2&;H}C0T|e^T+iy0IJ6h4B&0D$R>C}coB~0tk9&osvp>g!YbH1QO9`Xbf?RT|G z@Krf>E_k-t;Tn83i#5YxO6b%C1D{4YNZc<4OKvzzbQs4BRmu5NY~aJyXVve&cdYC8 zkSh-e!BmU-Pce>TlrKfZ1@p{%=|kXaEBl|fCym3*CGJ+lK+x60AYWK!Xd21@#$TUl zvW5$={2wU?B5YMv@z2smgd<3&3*#!DJMIkF-j2-=NVf@5#J$yGCf;iBHID@eYVIEIm3}dLxb1Q$x;%ZLBvVK9)z$=Km%41L4!;6%$tBK=s%My1T@AdsYD%)Mx;fFzeH?g598&(5Q!TjWo0$AQ#4Tqb3Y zRe|`f1H$2os>-q)4je=Us-)2A&`$hL|EXuNZA0o!uwMRaz0xgzoEYt^m)-mBRXmf$ zIhy#^QKHX7r?(rKBS+=TNjA60?6-=ks_Lqq?;hc&%tkg6&h3BZB51)=M)+KT-2;Lr zpxDhG_<(4;0(C;4ZN4vGK!8@jPZ~&Mkl^OS22M1U1r;JzA3*!M zs2gdD{kq;~^zpYWm_#cA9_7SSvryF6 zuXWy#m0AkTLU_M}w3X!5P)_2ZGoLGQZfC$Pz$nl3Hh4Y31hhpRWi?FR~@Az6ep@13s}*tevm4aWXGZ@sW55=^IGxmgTl1#Z2cw zaRx7(GRemfi)Y~_H6GTVtGq?{SN-6T1uCFn9&sNxEe9tUilJ?AQTG;HK`WzikO%yt z{n9M+IPtB1(({A$9IVX47}WUVGlWD@iWUq}rE7-LBjLh!FHuk((;R!CS=@#(VE#Q~ zRsh!$ToM}PO_CWU=iGiJvwsaV;_Fl3{qikYC<>@b(_!Rna(``2cWm{s!Zs1E9K@Eo z?K9+Ldc+9kqthYWcx)<2(lrsO>NTj;`wCj@r7r3eO%ssLdk_(_to{JkB|A@Z1Rlc; zLoCBS^6CR_=NL9l`AMG%*)X4ia11*-r5-9IHG(^J^#(T{zcB*Om={rwNVOzNge3CppuxJa!kZv#5S! zFkik>Axhytg<@5m9Mb8`s3}|Bwosau3GNz@-UDc~Fl7IA__)hFXLx*mn&s)z;dAd= zh2_X*HTsPv)TmzE>*d@O+o$Kbj#NOBHWTT4Z{kBq8D7Wp5?+Z$t2_i8`Uc<}W+6tv z;1|lboU=rZNiPTb+qBD3TXMTY@WidWo&4oVaD9}rCT~!HWe~R|xOuvl!3Anj{5|0i zg;<=^bSFbSdfQ|3+8IB|5Wzs?)cxmvI~t=myzDff@!C+PGf1+Ov-Qko$2S(c80+l# zS`5BueriD8s<_t^vdDkQo&0s~Q_unFdQAkl=kJ-wHyRdCDf?nKoF-8mCJdYL);y*w ztI#g(SCjqAEfT|DzU%=-hrklw(e~_H@(gia4_=cu{q&s4=erU+-()s`RaBqg0&^31 zi*y8@P+f(@VhqH`8HiA7Vzm?!*S#UjIO2V+%dF#L>MK$s6kVK#N8T~CON;rO(d8$Y zS-w{H%w8^7tz3Jlti-Ia^r_iZEB*P$ewV4STY1oOtmIFcz zp0h}E3ST7XhHPZjFl1ZX-(Q=}6M*-grH3BAhBbX!qBK;npmt6nx0`+(+|raYWVQss zIbZdX)&2<+aw2Yq2fJW=O0p_QiIzd04iNgi(a;~)|EBcguSS&rMjJAYbd+H^AeXsg zV7mqkE2tUR_oy_vk^qv5&;h#zJKBcYr4F@RknE7gWIr)J93;>Q$w+_&x}BGH--0 z9W=o=%oltG!n;on(6o$(g&iac=7Xm|@k%&b(793@NPp6_i#^B zvVuj89K!Ar4Jy=etbKWlMu;T?*I`J|9d_f35ONj2!yGKx?*fL78w0U?7a;|A6&4s? z6c61t1TqS5dzYC=AAu$az7aoh$1y?CAXkW)3}x62Cg?5y zDtz@l@(j!VR>Ss03#=lUEu3g))A@AWrtk)QCe`$8=cJJuDR4UgH!990r0!N)8BHZF zWzLgHv_;!?kWcRct@WVE1Pi+q0ngL_Mpyl(y8Ms({QFx^u>8b5&G)UM`%Whp48o?A z_#Q!ziQjW6dYyB&p8D<=PEg{fCSdYucfP)2WHOf0rk#5$RvS9>DUkPVbn$UVOd7Pl z?qp>*@lfL6f@|ccO;!BpXT2ps)^OL>rZ-RL09O`@r5E~djoept%DP?dSG_9@)$O*w z6G0HW?;d-WVLa>$V}0k9CWUP8>lHz!ObzDNw8ghRvFLF%m`%Ld?~vfpFqh6x*=v;x zJ8G-t?)72YGpkHO3)9Tr7z{+oc6hw!__)Q|QvS@L)Pc%9vQ*WWn=nix;V6T2zxUYm zm5d?(DMjc@E6Sev0x?7rXP-VnCU}x`RULj+Aq2O7T{71)T_oy;AJAgr*~e+wXlyn+ zAltX%9Cx`STxsC}d+toy#&w>D?C*^N>OX?a5T%f&=*BAT5#ym@o1;;S;tMBj9Z#G; z9PnJ(L@>0A=#~G;A<;&%?v*)rWX?^YqbjG(Pu;jq5rUk&EcM81t`zWeVV&bNjd1bE zmy&aF%P=_qEM>%~>D>Gmm9bkpEN7)*PQ8)6ES-mmiWPmhI6-N7A^tNdT%R7C%Wv$F z)pdLD+LhOBI{o4Rz#XILp&pE7Hjca5?Gmo7`p%e}JonX(mr3GvzEP%6hF^_22VcZc zlV|Do>Hm$xh=-4WMZmT+AWZOG6(XkkpBrh9@kZ1RyA<>rZ#J{wTESI3SjoUc9Sr)6 z1voI`d+`Gb^?@=bQB~$_em0s$yN~9%u!Cq2;|KNmhHnuotRWj1{#}*NKpzZ7;T8k=Zs%h!+I6BR*^MFk{PS zQf1(?FzTcSpLKyK-e+Q$q|lJS9;e?o!|4+iv?wi>C(jyryKKe#i=RjR@g+2WBE7cC z+JmE>4o`FKSJc|;ie1DS{J9(0pPO5K`GPR*DJF_cTo)C5Y7s^5S>&>Mb4s`ORETz@ zpMmB}UWS{NwQgWdWQ6zSH9K<7q!6$;9M-i^_DaoJ6kU1{gH~!bdOLxuKTdwVCHWKG zoenIw`O^Y__vguN^>>nOzoe-B^$cd6kC}rr?EzozfJrg1_9od+n&}5r`h9;K|9`SN z>9Azy(Q~j&vP zX3jG9rq_!{ml|S$dCH{uHaMs85@~V=Eke9f4T6h`1NO`qBsRSu!27xbXl91TzQVmh zVHW^1h8ly!bA5ZFT&12G7mA$=c{7?A8W4=>2}K+?q<u~eBVyL zi(ta%GvEnb<__D;AO>PRzLqeq(9{w%M_maZNH6`w%fJ0?rEga(2|%A6RRyC|1LM!8 zik*1f%sQTVremURhgP}~*hIQo+5q>5ayty0d5bfN*nwbn%jF!7Z3WT+(|{&PUi^9~ zJH3xOSaD~ksne_5`=$BQk~Z?XKGEj0lOi9Xk`Xm}$BJU}A1sNi;7r305M^L6QBZr@ zo@LL?Yr?uqUnED0&WBK6uvHjw~91@tCgF zYMyK2=6*T0a>y)1);O}BW`iDN98)GH7Y#H@UU|gLe(|u=v3p;qPNNL%_5RYEpav z#GDWa`kP~^_K=>pNG47wez$wY9M=NvsHF|VrL(63m`y`u00H@W;#x&-5)1Qi!Df(gA2kyngatKO|ctp zcZ~jf&h^`X*pJ1bMy>c-vLI7up=C8r_HcMw!wJZU>6+LA3wRuCN871`)CA?=GY&4a z)aw};jcy3C%%<(@TF}-Gu+IB<_R?x56ebb;!66D~E=m42BDi-n$zq<6r=FD4K8=xk zKrIJ3rl&n)=rh#Ij>bnr_iT+wa{KoIk$o7*q7D~5R5zQ%4lcc{WEam1N35Ah?KgVo zE%p`PUy}%)=_DM^*bQdA;SfZiS$`&F*$3k1IUxkJDNbJ)wVO|;&d&WrpOUXZkeQrL zUW8_dQQ)1NDB2UsL4B&)NRS5Jomn7M+wD?&_5&*PuK&~4+ouK8bjjV={p#=^$* zZf^lj^jl;OKI#Xs*T=uQSRXS7Z%k z2r0SZyOQw$I`&ZDt3es^0wD+EMo{73x6INM%g1s12*O+)#9jF*epERAUsbgD|LuJf zpQrHpNiQTkfRCGY_(2p!+7Y`;JKeJ_Bph*`VpCfRbU5lz@!Y?9oRX`__gESBfPcsx zCKrwfjVsz@)(JvKv|H*TgQXh>Q$WkELZ5z*%=e3Hwx-zb<TV)@ywu1?*$h?w2D9ToFoqZl+>>cO^LuM1Iq{O0wW+>#aOo znC@BP$t1Pyl6f{#eff?a7+!w-w6@#OV|g?pqyHYLS|Z{t+G9%6+17+t4Ut_&K|wLLWbxV`KNYmvGs{aXJdv|*g#V|shJhjsPl zv}4}0F(NljkF>LI^AR`|CLI_tAVZ@3>F3uAwNJ_Sx~NmTO>D|!u9ROvXqQGR=L8^o zB1;*LVO$=Md3&0JnIzfcpqd$%F&t*=6+_W-u~jmwfNEgVu$EcVR<-0xXKO{rO&jnG?I~>@ zvpJ`j9*orbXVCua8h1+Qg zOn06(qE?UTeyov5xhYJrDTABI&#E0UK$x}_GVgK0I(g=uuy4hBgrp1ca(eIz=` znuWUKqs{Pcv8AWvL~vh774MT%wvOcRDeCCs5>e2sLf7UCGslH|K z(>5oYdBeY3o*^q6G&xU@!5jImRYLk?DPAHp-l^nA+!TtR1%Cu_)MO+gT?>YUd)?LBYfvfdO|B)T)5S$h@H@yapp=NE`B>WAA@t)P7U@ z{5g|#wb8A|IswvGkf>vwmzVm6^07{gmXc${<@}aE4Z|rZsp4+#XyC=!-bWy-6|NG_6Ku%)6*Le!tuDXvdg#P=x$L)>j-R zK8n|Urk0r+ZPI6Vw0F-npI-~}E&`>b66dZg$4;>|cqSAi#{y2h zekE{=p5i`bNE3`|DOx7b(b1||&1||I-Iw)&;&P1B!a5NZ#7{Yh6H@QI*?YS0!t4=) zYjI;r)c%`GNc+;bG<>;Y-^(@3*aLCLo#I@qln|%+)u#{Xr5C&%&4v+yt9jb$oRN{r z1PSDqSM}60-p3fj5RN8Z4qz(yh^>M0(_Hz*i-Vn3k;5{DGo;M6T=p!BFGzcl@6LzkN`tP1YoMp5A-$S^q<)|`@P)nZ%;8TNcp+(JM2RDXRC z`&^j*;s8gsq;!5ko;$MPmA)h|L9y1LY#cLn40{LsBZzuk$u%VM^%`)U zogQ$`nf5wR&DYU*<59(?K;t)mM8v&UHmG@J-EggJYe4BE=9BlEU>(h{Lc;xmdCuAR zf??^77{@uK2=j-!k&(d~ZSRlxCf9UouG!}~OpMd0S{`pK-(rr#R3us4e)DDLb^e4- zRl*h4(n(XA+U1~PvBo;yp4mRj@L_9v^N7hZdUg-n^*P=no)a3>u)Ia4VLU>%B!bN4 zzA~ntX#=9CyO>escYKHN1eJ$Uyv`j)O-!S)oMKedEjH!QTSjd#KUi;1x%Kv za0}Pe;@_MPb$||)U|2pU<&MU>0x!7=pJ+yKCADz0=GnrY)*Y=XuB-~y(WWcPX=~== zayv%xxRN)aAvVn--6W$C!D3RHo7^+?fq&{nLEBfBdz4GJhE3w9q$MWMNb5Ai5ks5q zN|8oC-m%*;p>~~0m@wZL6k}F0r+UR^D~-inJ%pa#PKn}s*2}{q@SfY&qy{sIj`wkx zmVUH84ePhFJD-=hZm9X<6OY}*%7X--;`asD6aS5r#xK(w|KR<;^c)ZeVe*A^RC;|h z;JQ8|OLIzucC(cuKSdW@nhrr3=8W?xZ%>@gjI(C2gZ2P-B(c6oa)6z|V z8`d2o`|5gSUrOB>G+Hsl`@`ha{S$PpN|RBZ(~y{3HCVlH%kw#`?O-0MqQ|hX5b-ki zQbxv=>2%KlHnw0K*6{^|u@78b?7%vdi`ulU+ zJ#t50mj+tQUiSWSkFVJ6me1HioA-ck-0R%tf<6+%Zjso zvb1+KwBwXWpu36mykyJ>)#`^teBwT$)POAezCJ&wz_hcVw7yBdl%T8gv#)ldJ{S8l zVqSy>@SfB;!@z@@0~_`0<~YRNQnR$Cvb3)|vE-@R)e`$8mbdgN`O0#gH=;hZa6@Vf z>#ED^&IqzM76%1($1o@j>o7^b7i&_O6w=$BaLErpUM!b=@=~0~!@$-zan_wcWSHe} zF^=xitTA)ra7U7Lfx@FIRfC<_7tbeyCYp3LS^RTqKl|+P~i5;S3*oaaM0Mu$9ZH3$!_oHIAu^ zp6a>7tSh@1TaNRQ_X4|~vpk=;%oXI=lhWgDZD~D0qS3&zPN-Jn6%It4xgS{={X|wB zN~>okla;$r-u-D3_D+=ZX4xmH#q|5nlemp;%~_&6=hD{3DlM!hkLz*yz{EY_L+9@l ztv5CD&dusRlDYvTqtO`ovR@huG&L1Qq1bB_!9@ z(FOf|?h3ClGXkv**+tI}$~5Z7#`78V)IiheBnONmn!Oh6#kkxv5X-6J;>-7j0p5H5 zk3|M*jn-H^TY^f2eD%8u<|^a#tj$V2Rl}LB2#<2wGs2J*>ulF9?O{mHg_&^Je zbzW?4{3v-gd=D^%^l4E0{OnQP3?SWvuOMb%|4-)V{-@?n5C0@prw=uz0z+0%M+>9{ z{3v3f`d)|~Ollwry1pxc;fc@{$b#VyUG5*c}6+z0&tr`JX~__=bZPxvNUtM ztU;KC&yiTP#Srhk$O=!#_UpC|tZBL8Tk}lp8cHGbu9@tV>seNb`*Li=^$0OxTW*Di z>@u{XPV(AQm$Tjv0YZAl1XXX@<#q8MNp9f`h|i`1h=Hnq&E@|iHuxVDpD-G|FF*2n zLQut zH=fVh?p~}saBXXOatP(hvXgZ}p<*r~xf%pz+MXnw2W#6A@xtwh6-E+8qI(2^s%b0G zi~aZwFySCppqnbdpuvFllWK!lb*VqHzebI9U_qZtQjC68g=q{E$e!0i4^LiRI1l-%=!p|+?*f2?2E+@L% zmR}^?izH@2C%LiVDfh>Xm?P>3k>K7&?P*V3i`bN0dCg~ehytS?)2h=;3LB?OKQPEv zCgO9NTq}0u?LwiT-t#=*S5NnSmcPwDRNW@Yda2Nc}?z*8`` z4BPDQxa|-FT}l)rE~$^3^yEXqjZHeiJ)x*#SUF)dVX2u7LxCy>rRjg3^FP8AetCS~ z)u(#y1`@nfE0X?^D61&_C zJ4+I?&<}`j7v%mQ_TB?7swL|eZ6r#t1wqM;1VIpxj0oK#5(LRPi-07dB}t}HL<9+9 z03`v?Oj!?s#dM= zU;oAroF7_PH&vVEYH4QNdkmUYezat%JJ1GhX79(pcYXKW6qR4~0I{1$2`N{+IfAXd zS_+fQB=+NSA?UwGG?Y6r-7|`W+!Hs1975>OuSqskkPAnVzkDWpt2}|#P#u*&P+~!s6dVFCRe?P@yB?XcdJ^#x;>*P0DZ%=@k#6;w zp7x?C!zU=IQz z*GcRNX8d*Fcc_Nq&p~3=-cyHrCs8?t_u-HEJ0s7e^X*eYHrKGKRk|LCE`Ru~ zj;niw^uldH;YI>yZ118$r!qAixfTU^b_dFFTZWIQ;8+2N+?8WVf4K$xy}jyZ{=cZd zLKq%VK7hZ}a2C)^zfOVcpr7ChNaX2rR-Jmna_Ihm zl_KP>sJ}$9LPdvmRY>g^!zph8^WX#oZF{q#Gmxl<0v*fTj$ITQ)$4fi2KnxK$Lx7T z5zfjy*X4NBB{sfeyr^#DE}b`VdmTaZF~p1?XyGg~KH43Gkw}7HAKmyK*tg z$@?}ik)?M-Ri$>414REb@-HScWgStCkB zz!j4i8+d2-v)=xDS`b?3HjPe`>%sdnmia04kz1`RaI2MQRLArvy1yEmiAuhW%Ci^o-swc;6<0LBMSRjCPwTSq zmNd{y?Lm*qosM06C3yT7-=8yWf&Du8gCwh#W7U^ zfn`nTz81N@qK*8NIkUfyNo0A)d+WVE?3%MJaa;NQP5dBHDiysJfFXMzm;EcxE%1E3 zKRSDKn5CL|!#mHKwIT0{>&n^{Hd}_xH_06R88Oe)4l+HZIm$_x7phJnr=TZCLj$g^ zUZzLLCUn5T*Tja~h&)bL>3v3TeZfr~{;R|_d#3CWV*6QB%GyPGA|8e$*E|Q{xV(g1 zi6aaV(tjigMxZK8* zhmf}@LYk^9X^w`w-xNz0Tc0~HlN$Zv=XCO%+G9PaAwv=(tjN@Vui_<$Z}aJWLiFU@ zR}B*FqciQR(>MVoXea^pgau6BxLh$oY&@i%y$!c!rOidAUBSngiLBiudRB2=yYKj1q=bDCeQ^jjjkip zx%qv?&I9t?l5Yld`wf=@Gxtm$8r8I{O!=XV<4itZeyMz&SaI2ll>5)8NUPC+xEQWeNDX=8e3eaLDWmjyClQ+>{i#`@M7%Ef7Gi3RIT5S;ObI zX(Rdp`^0)z;bem=XS8jj3WSaHX9Sy30Q2+89w4d@Vzgv^CBM?0G)I61v!!vbQ4K9N;-M0Mlf8@_u!grEV1 z-)sNjR=^eJ4VVR@nZXenCd2(|leEEdjTdY1Wb`Yr9C%ri4c&TIN~`E5 zB73rIHxev-mymQYilx%dv2*EyiKS~nku|P+o_P<(o5q;BSLlWszc(^|q6NO+x@DNL zXjjgvSP?SV2#@j<5!3sUA#Q$R72)f~DNAVi_Jd&efQ2xW&|TnY3$@Vm1^ zehA5RO8dQWf9@B<&++&U9LoEFd~&5IsO`yQ)eTgSgy|Q&PxH1!<^M~+q#!YKS0DfA zp3)_Zye7QNLQbq4J4Wg7VU_KhCJ4s!YmXPdwa}F4ybg1lSigVe^klaOR*CatspY|K z{vkzHYufbugADpbzCW_jKhu8|75%@EJOA7F(>q@fgy8xSF1o%P@c-i82k05B+YB^6 z>P$>D-G%(itQ}a0c_B8_8+mGq*bu8+P5W3AgR)!Ycf;=XE4MO9f4MUa6j*~@#&D(q zw7;Si&*%$xWmb1tk{giAFD8 zQ#35BP^+YHnKIm%jkMM0n*Gc~yrgYEFsIvdt5VYIp3)7%)T-Pwq2sK18e$Q68i@H( zx4$2f!Uaa}s%!t2fFS=_ti%7C@B}~K17kp*l@Fp>6N%BBzJ4_NV`%TFp5w7(eXXy%_!(KMoF=es*rzxD}~?X=&qrI0Pf7rrYaDQdO-r3uUCWb$NA= ztQV8Y+sPnmn1wTVa2XjgRCF@iDE)!BB3qCr#89WgAo1x?UCHSu4DXSp@{}PV)mqj+ zWRpRtY9}PvL3TeVx*|F18~Mo%_JcpCLV5bchhZmRL^pV1IAXD8W&mN-R8(A5^s8?` z*Gi+clh?_y`4rl7#YsGm)HMd~6iCYN_1e%{jC*AnTNpo4YrE{Qq9j6HAi&YA`69wq z3uUzczt(vOvHiX$#^9j4yijtr*5gC7^rt3YE%M`U9J1=i=_CYbAT>hbsNy|C-O7v+ zp9BSwO9_T2;tFHx9$oSIvhZmxel4)sd1t&N zAQ@J?7igIB0XdksGCIICM}Lsu?UL&4ax2Q-%qk;BK>&IcY68K$={=D$k-}hVU+dH} z)~{y!QZT<}07>mKE_MYwA}Ur!=ejbiX|`n+;N*Av+pTb(YnPTjF=nTAiDzG!EegF+5Y>&`{*YNg@*Mwz zn0!4ID$==yEfQw&zUk0y#@8pF<@cGpoj1OXkt3;LntfE@#2DWyfz=z{?=-9zIaA?P zP?@aeoe#M;(OPw$?FzR|{4R8PNf>^-rrCMUG%borHgl9Y=D|DHPt8`^@F$ry$tBRY zgA(D1=z{yyH|J!9M0(>P>p2#~+L)A(iJnBLjyHwz{puFNSbd#e&xZVD^&f{XL5A{#M_ z=S^>Xu)b%~+MDp{^vtWc-3e+4&VR(U9b$mV-5B+@R!Q~}7B7jpT(J06i_R(iS4$Wf z#*7E_;S}A{8CuzoA^OBY-~eDh|BEI^oT&B>ktzSJc;27i^$*#^{+DB>w;heeJO@$7 z%--!k6m)f5Z;otc?9eD8YS%Wb{g_v~5QpDE0ZHlm{C!)%bEjZZDaG~I{(PBnz`sOz zjT*PZ@g0j~@cZ)>p%t71-dgSg#9m5BhN8tEuMxjj9K_e9C#xJF6zXY9{o%sfg^~v! zt&t0+8LAb{IA9o4(}C3@W#-@A(I3Ez{&J~}e+kYaH)_`kfj(9#y!sE9C|h$>kMIHT zkO+Zjr|19S5;kaPxz$EEKulhtciaJ5Mpy>ENBPf{>Z4`q{QP3LYb-h`_DsP^Uc&yL z3{dkw*82QEu-`!FI)Jxyx)Zfj`df&{L&zSLJz{(Kmsoax+d}ZCQ{4Z5`)3wso#)8R zP*z^t)4ljTU}sETvz|@h&D%C+H5O&gM@o#qfh$N>prJ-A&a9%~^>X(q$Ah2~8f)DZ zc=G1`*g_OrW^xRR$U*yTBHy;Gq15ymrE=zyL0#j6&)(nl5TD#}?|Z8RCYLOs3rjQ= zk6u2qc@bsQN!B`NKN|Ucj=iYpe;)_rCx*&@P~RzdNtfP#BT&gW_4fAze2@U@4s>q{ zwe}8CLXKNU?DkoAU15O@6BE2s4i=+V?jJ&AHSyaWac~pW0bJw) z41hyQ0saGG4y$mq5%%{@){x0R;-jfTo-`2mqOZ&I?yk`%GeENMftL!ed-|Smq zAt02G;)u@w2W}8QH}k(AgY3@-`1db0kLkL7yrT(`;_!!oi zUsg&`@B9&07uSTdZOXo7$?H~la=3Kv1I!Bz@x>?oKyN9T3cPaRqSmSsVkKi!Qg&U? z9f4F)pE@H?}n=Zlk|UlP^n zZIs1@AGp22e0C=&bFil=`oOc+&us`q(+cLg`iL|=$!sy?G$!U$fyz$4v1|o8NLq_U z8N(2lyk&Fu_LZG6|B_TTHDrqEPZ|_ z8yE2Pbc-x2>PFuvs)=rd=Gbsu`zg)b4sO>G^Q(&mxz=-+PxSM42VG`q8#d*M%on;@ zN>>&}{gGsBE#JHIkwx`bt`}lL1+4z?JN9jsmg3nWE?H-``vP$z(;B8ybYBH z=5|JEERmuV;^o)W99&t7N*}^fsHH6(A=`@rU#kqK&94q-rl{8TWQ1R)ua4>+Qg;{o z;dM(V_>NlC!<0O+=77}kBwdA`?PJNt6{S(PUVj^HJ%KQR)EX8cv3S^Oe8z7H$oPM% zVD{&C|80fC@B7{#1=rR1k7Gt7CY21v8TMb;uG~~YnPf}`FbhNNC4Kd)@-G;)>YJ~Z z%{>1GFMh8GaSfit2X!hX3axglteFs~vRjO;UL>wJi42pW7vMC7A3yeUl~n+!;=UJ| zqkZ+qRx?$DERpyoC+B#S zK3zhL$@cm}gI0>FRqHmUb$caX%kx75jH~ANhI1IDKQT#9^Okm67XV~IZj8iZOjO$B z9&fywU%7?#SzzQ5XLAIP z7#x8)l?I_<8YLwOTGjFP+YG&L%3cP`eiZVSy(ugQ$q~k#Vy!^0tS03;#v9dej5a!O zhQ!oXi5@qQYssq!hfEiD9Fxykl!@#$d7E}yg7Gv}ORDYgMePt?j+kaM0DNy~jVV+_ zW+{*rNZ#sgl&y?uxw98> zx%SS!q2aFv#eK}bnHG1F;s1k-6o0bd{|y#gukS^1#C>^QuB?7EKnckQMqwk!)@We; zP&WS3Q%{Rr;0I!tWh0{%wJP<)TpvJ!?P=nh0Jc%eLFAsHz9Ph}Mi*Gv)bh(_5vBw6 z`@nqu7uJ!tKwDNtl0!^h?9%nOjoN?G=gCmC;UJ~0i;cU7o2`}eQ3`p>ky28c z@*?F?LPA1ZMEbY8;$pwvm6fFwRzkXXQ2v$>r4%{J)1idJDTTGHJUndOT#ho2E{!yA zIoewPsqA%UD?3{WN^!~G3oCkiDC>GydD#9^m;9Z(_Er}uMMQqd7yji^gi=KG_u^V^ zNNZhN4@y(;nrljw!g{ve9+bkrRD1Q;UxiIz5mx1NgUO2)S{zL*7dyQ;d9sa z*Mi{31$^*b@Nn}48HNrv9`=-?7saJ1g;i`F?Cd=#r6eUOg%yx@kZ!tnt*k+r zYqs|stZj9a!Hcgtc(`lXx+x%??;>4nLC1i{lpOAWT0t=dy=%7CNE`6P4O&=lRbw!2~Y~@?GTNJI{Ai2@yFtIVA<< zDN4#ye9Whr`TpB~ho2y{6vqe@5Mm-u$T3j71?V|&}q-oeq` z!}Fe(w~ue|!;nXzkHf;BJ%8~sHtyBy`1Fi7nQybQbKZR{EGjN3Ei136t8Zv*YHs<` z+TGLJ*FW%maAE1Vt+LpYSVtVO;o^Z<*!fluU~N9tIMOY2l76_wc7 zWTrlUP4`K4itkj9U7Tl(*jLTJbA7Ygm`)b^CfDf4=3^J_*;u$vx)K`gFU6iVvB0VP zvFgE>JZd-VIb)y88txBLU8DUWF5VKJYtZK0tgvmPZMI{B27YWhc9!Cer9wQ-X7^lh z2l@f9shayaiw5_`Z}%;)ti7Q%wzM!7HyCj7r5M?j`$*>W4JR?#u3(5sX;6cLU@($! zrkXHB!Akz_SqI2D_4`p*MIi2rax@+_u=U8dIa%Iz^)Xnb{X>Xek>vWQ>Db__jGhQb zqF0qUOlG#OUv^}*NCi(#o!Nmt@%KCZE`BFBcK`7PdU?_>#+gF*`IlklKrU%nR>QPW z@6Gz1v6IE?7vkP5#F;9VUlZicYbIiliEZIx`w$@T0*j=|3a{afi+ll3mwZ+>B3szg zUp1iK>8VzGe~h@gsI=GEBT3aXAv^a)KqlAb8~lvpryp_IjB@tGBeLhGN5vYv3=Pzr zauzCHR$2IcUo>bs9^uo3InI$60PQcYL`BxHBxn@%sz0h`PY_#vw!I9EaTzWMW2KXW zudWuqoM3SlFj@~@x+fDFAz}K&={U#1?VMxE#~#TxCWSZNpCjfl6tiCe1k!uPOivTl&M$tg-%V$;$W zm@|Kb8#~E7H)6F^&l(i(GG0#^lY-pzY;p}YTWjF6aYjvEQW<%?7UxPCs9hklZzwU9 zq`HGViLGgF==LslX;Z0?d-Q@kYRp^mWbO@bn#JKL89ansb6t1JY+o+)kx-QwS%$lF zj@j^6mz9;%DsS(HedQ>7o8-9k17 z-mOm=ZEV;>zju5&jEKsmIop_CcCh;15=|tf2O``ifLFDvAzCRcB|F}h+Vl^wOf5<5;iZhHl z$~6%p>J$R(6mo!#b4On0AtbQp5b`9iv>|0fXt8#C;mGo>PSF263guw!`j&BxZe|%J zj3`4XcW7Md(CmSa=?B*vKFoT2udrli-sjb_qN9#>dbE1*R8;7NuBMy4r`$Tna$UMwB_aeXcW$}hE z@#7rhFM19`!~S)JXwOm3)wSi4ip+?COI)o{lT6s9PnWWm`csfyNVM!Bq|gUbW#P3e zbxdr3Ac>pS!9Ygi^%VNJ^Uac2p3=TtHe>1_T&*)DHRKF1bR`9m*Sc(yXU+fr2R zy8r7(-I8#zC}mmpKDVk&7L(GrrrmvwOqxUkuV)%v#UEb26=3AK&P#cbTt5CgoTIP! zU=((vTPVGUx1#5aPV&U3bNq}!40?&4yq5qRPx$6*f3@7<@0vT6j~@;xy=N$1E@(P7 zVp>jo@5H^7SEquT_*rV17PV@_63b39zFMFu&@hI5aT%BTrZ+JM6c4s`D3=9<%fXqQ zr08L(I~%gw+UaLyTW0`rY*wGxyuq*=d-8V_=^l8{oF+eoBTOM(A`iA zCmHA|r=f4JC(nE+FDdiB!80uNJA=p{w`mjo0aU z>)U20xUZ58?64J>Ge8)~QcIy$Zm~KSywq7Jk}3qIL2z%o@4Z`El?AFXN%1r-4hOYck2^;;&w$X%znSA^sTa8#>ikj%Z!;|>x}d?QOqkl;~iY`Wx=|bxu<4meFF;{-(J-z>i{OI5G%>7da_?KhIAmL|M)t&F0 z!6Dlz>!^7}^{Lq2K*S(ctVvSatkHp3k*&_h?O}NYxuwD;Dlx@mmsy~e{8ewAV$*T) zgS_T*T;$XWI?o;Qnja3~WY?L>A78g)b*Qo(@;IyHi*dJDQKT;@4>5Ckk{z2=)%>VV zfhJIGX(T^;&~#A}L@YM&|fUjF51^ORrKQRamxy(2aj&b z9bYjIV5swC-N)WyC|ak6**|BlzpQc>;Z-iVzN9~cdFbT)CV}nh?6*URM(OE-(S~P2_ENUv0JTHpHM5Q6 zRor+k>-WMI_jLyL)+q-(II3HJY{@u4wN0n0Co5TmrloW^?1jg-iM#c0C)@QuV$2Mf zg_3Noa?FP{v#O#=zxugvK2!bpR6kS>FOZuaB{5ms!&WE3?#?Phl=l<|>Q~bFp{@fIQZi@c~th7cXo2j6H^cDw7 z{tDec>i7Ax+&}Ik`;Yh4xyiLFREkpGTZoq|&1tx7qF!CEXoWrPDqj%pgY9R=|FtgB zIsvc7a~*Em}4sR3{n<{@zdbsr08CAe!xZi;ECMj>m{aW6HLl^ z)=4hyzL9+Z(I>+%@Tj78-79p`32)@EADbL1mig}E7dkK77RdcIpxeyr?w=Bo@;*ZFOMyP|q4&JbSAk z0`@9MbAv1sbPL)Kw4%k?~nBjVvXGK$ibm+KRR75+J4NBzOhbF zy!YZH1xG~iP>?k0w0Dnk0NLueM!DKSWwwR%ROoDz-h=je$jn@K1>$%;L+zuYmYYdJ zTv4MZ)mU5%K7HoEJNA{lwsN$U>^J!E8rARP%D=Wh;ciBFGP%@FY>8UV(LErW5&PnN z_R<5*eY@p=qsSqB$)Uw=kw|<}rd(`}^{g@VYE|jk$6vnfSfoa4$1lECIBx*SVdU7& zz1=x7%p%b%M3$)3L)OQO+*O#CURheZ!hqa1MilWBJz6yNp120TWl}Veg%l1yaP zT}Lv@T{6L%tF=T_y-bDV!p5_zz*X5(9TxJR8qHJGtlaEARQO$IVqoPbGa$|)FWXL^ zQ}vdDmPDM2b82p9&D&$o>IuvD^FL zKJ-JkO^E)Y?YFz~yQ$RZYnDa(qBlA-zgKwDk+y!=7@r+ZK5^L&ZSNpv_GEbJa(T$- zU~BHmW6v1bDOg>@-5mJyZJK=8R(?3VPI=BH`qX`4pfuCyL8l_h(}VT7OsYUs!VzNH zOD_`i)Pa&|j(*^`<&*p;AvUcWV!?a+x1O3UO5GbU+!+vQq!;FFCW<&Yd7RvJ>3MJu z<1X5+kmjADUsW)x`suHpBYJZP40EPXgi!~pRH1^y<F4mXz zof|8f$RWfr8okT0gi&izS&gWV%AD5x^=Y3cI>`c`1c=8+>^y51th=Db|3$DoWRIKn z0hu!ENq#|#_yGBRD2V zMlU|(enojvS=8-EPWLXWXnlH5eUSq%(@CaOkTYspJriTWCpjoUBDuoKi-voGG4;aI?F>4$!@P&29)<9kY3LzN&D6y|j{w3*Fb=WzAZBp;mHT zG2rRBJxU)t&@Ip3#CeNew2W;hIdm*YM8a8f`J;W@fYPP!7n&spZQV;FP{{&m-GQC` zPXtF6E<7V(C_a4;+R~S6E??45?Yeb;ncD4=spiGWAfC4EbGZw#T$)iT=}!~feq=;M zZ{OV33J~k*4ifsudC$Me&k{A@6xh!fhOQ7;YTPNK$A*C4q2*2-|MgwijDt5PjlaVo z#C~03J$_1ya(GYW!P!-Pc|qL!H|f847ITCW;4Z+Kj50a(_td}4<@jQ8Efxn`h0Z#` zizI$aCdrH?6Q8rE!qvei-H~5RmOs16sIHb$ zjasUw|LMq8?(en{aMz*_-&~VZR5L_P__Jt;ha#hX`sxBmqGV7K-@I`;wW?S!RR7MrNzA z4r-Ue9`k)xB1cZH!e7qskK+QNyX2b^DMo#nD}v~#5q7b~(p{tazLm*bVpBVGY`;gZ0_evKRqVa1b$4;{05fv%s*BZlXZRH^+pHQVWXOuYB3 zIp*?j$r*(A2?(%0kCRQTOg2V@cz%)9l$-oms&~7Z%$`~#j85g1@>BXtmjsf&`pmet zd$Ch?pSgyqNwVFsM`x*=>M;5=Yo02??RkmK$tBz-qG+9(drx=6x&H&sBHnSr7OT(*`-? z*DViz$;Re8kE7PBrazv`ll|Cj|0gToMjv08}(DD^>?RRyA8v^Y5GM=tp4#Y8x#H-I~D^Qm1M zg?Mfma>LL5a@~-!+bHb~K%(ae76}Aw>WRGYAHYxE&tD z(a@AVlxAz9tn(zu7h%jM&PXqxoJN4#VW0PULL^y-X+W8?KpIik4?v+Ys4~Rs+=ali-hqfc@0i-r8{Nui6Q}n(mB(gj z4+^XAB}Spp5LdKhhO@1o1JcUP#No#|&GV1Hw-tfT8OZT;KA~=?foV8p{03uc&Rr^U zgX|bZq8oZ3)@WVCBeKqjfSTC2Nk8dy6NGbm^J*JsP;2Ojj3YwaUOQ%aq;A zc*N_z4+$!hluun-7KJq@65h8RR3@xQjydqu7QD33iD=@EJZ+V@HF=*6ztqcz{zm+z z7)rc|CUK&9Cy~e;IrHRZBn8=dm5sq`kJlrpF;_TFMKEurK#fXXrx_VvzARC7oJpAX zl{aJ4GBy4#{AHRVgkJm4|2eU7<}t{cAQy)_nwMww58)MoL((K zfCc%f6wHWxZ41>D`qeLUnh){E-ydI>nFqZp0_~b7?#<2@wG4}6+seSosld-&A6I^X zhaCkYWq+>d(~48y?sAFl0`bK;u&-h3_5;CSIFpK50Y^d zNU~%;BDb%qwA)$ve6z5gQRMaKSrN#U(zZ z{19@#r>Xp{l-iQ}A%vKoC__<)xg>d1m;)>ljk1oCcA34f-S4SXdV0q;zHgO*SxsN- zSc=#f14vq0B)xhl`%r?PG9Xe)OBt?1h~O#0K0OfSq=RU%C>irb)MFsoDbk{lqxqXh zfPYJV?q77g`Dfckq`L9hCG*5Ly=lf{sf1?Dy~XjDjv!iYsfT{hjwIABA}y${sS zQG@^t4eKo2HgJqx_5hGC=skl>dZIXdRLW*wEkB~5hu~ZK!{mh)ndOcQs>1-6{SVfb zxPSUJXS7VykW%x7TTHyYM@ZnIybrf7!FQ6#Cq(pZF~Xs509}&mF0zGg=fwlx?>AV_ z)Y{Q-y?r_T=aW%UBh3;+u-mXn^HC59d2TXSfRLeAso9i)3>{b^WHd;rZI2Qp=3jtF z>@ZR+fPFXFL>vshm3riF3EM|&0F1aX;zLMSzG`}qY|BER#FMpvM@Z~3U?F`mD}wW2 zNG@W0=ZjM>Un*2Uiop4V0GAcG{e9~pDpGuDz!mI=&A}506>_ZMdjNuN>@Y=jx$@P@ zTTHrXQSPa)BGf{D_@QAL=ddMPu-FF1;RHU^DoQ@5OMRM^XvmfAc-q&wHU;3@56M5R zvDvub^t{t1@pcfGj6t&RpebrROqwD>)>PEWM9q0 zyNpX#8DehAxbKN>>b`gnQJZxjfAO{~%uY7Y~-A(5{Lu|d^IWdO_X zhX^zbAe$aSHYcG=grgj55V>9Amn_SCY!`?#-5-4jfzt-s;hb)TLz)5lwjG#@DHOG0 z1c~UW0H|8g$H>W^x3@*DPdBX2`(nKF>WVxr()7$7r1WZ$hofv?M+t5vsi3M*2h6@$ zQ^PpC#!F9ZjOA6(QADu&xT|vn`Hx^JaEQY>&RCu(UXW>|Lsf0BdT5b9QN}cz@Xp0{ zDId%nLR3p(b#2pxk2|FM$D9VTQ=zKyn^B#x)Fe$Bp=3xmYuTO39F;Uk_)D6=lFi1)dG?8|hUA(bXc z+}A@`m#mRu4FnHL#EHNVsoX(pn%X1QzNbQyNlRj7Mo07UmC*$LOwHx3gNovf05E{dOY_88gUKba zfTdYM)x~?X5%;$a6Y#qhOi9hPxfCwg!shA5A3F*T6E6gt`lCaMozY~RV)DRWVc<( zUpSq;!nyXx6?VG)%tgQq4M|Nvx3X)C*Dkk&E-{1N^81X5ec-$BG_ZDByENDzKI#FK z82|$wf~j3`%IK{N{E&s!uVyEhMw+xT#gau`?_%^h-*SOy#iDhL0<(Syd38r&zMUY! zf)%@3Og2TubNv>6>SXhSOJqR+4zi$VZEqu(fTa&r?9s%PgPG>vyB~s2PI7UPpnC78 zLPTkkEYCc%#l%a$B+%xxWK&j9_GosLu>7IS6LD2Kl`>SJ2Z}3K0MGSk+|Oh$xkxx# zWNlJTEbyWuEYW8%{ZXhNb;1KUCyPH#KBqrTKFjqC0xcRw@aM#%no9UmsZm&^A~3t3 zY=gETCYiuc&eaJ6jQYqs+E*9>dT|6`LpCSR%%Ym2u}U6CB{vb%ru16mTvL{m7y2nG zPNXEHaa`b%UH;smyA(cKJ9%=QA8hwBGXadKNWXKT;GBgdOcHLD@)3VRO)?&3wna|t zOM~}WVwG{KwHpim@Moy)rE&XI=rKa`y3AH-FL}2np&B+=bKW9mQGqZ7Th9*wbWOFK ze8D1R$eJ)Dh3yVmciaWhG}cN$MZ2wh#GgsV1TCj}Yb7(!)ChqDYVEnjwj#t^y3gON zwmBO$4q!Jk%-7uj2fT~lLrhw%flRG+;U@zG6N|u$sO_Bk#z0|aP_0L!j7YVH2@9Rb}#h)^eLyxjo4I@iD??`NOv(5U;bM!{g+ZxeypGi)4@C zQ@>6xU5%Gn$t9BGy9*%xfb4gF{kWRMEW!}ziKTU;-5#++h%;)xZ$4DHN7IxAd9ZUn zOQE~NlnSKyi0@m5FW^i}15bg{(^I)@TZ80y@F7cvZXPsz+dW6$a9j}!J% zPh9lZD_XPS11w#*H#W;^tko48z7+ka$a+8ngz>0x$g0~vyYzM#2oT$%!)5IJOz-dM zHaF66RaYM|sL|_r5m;RXIgjrME`oT^Bpb{eHTUf1x*7bAXxDAxJHP}@A~B5^veIUe~CzUaX#&Y$$UO+3ewd*b3tT? zQ+#}=q`@a9PJWZ!Sf0t!LeaPSPlfVs2g!CH)bA%hmq@(psEHIRco*z3WCK^dmaITr zeEp71Hczy_K5PX+7frkPUSgy(SI|v1)R=OQiiL61_4MZ!-O9l0EzOSG@ljKZtIj_j z*pAY_(;kp+;Udu@k6Eag%@cPDTXES=z_Q#ZdFEqIStNC#mBpGdy7gqNw84apo4fA( zK>TX~C6U|IiK3Zv=`tbtasYqb=O8S(%Rk690?>_DKf%b~>?()7%m4q2=58YGB2$m^SDwMxB&#*sXXW>kGy(JiW{2>$cMN>Mu}BF^Vk|R!Eb*Tm7duj@C327UJQkFFT@1I z8T9|?GQsjRZVuDRy6#bOw;GnLJ=s ztREkU)>MTzRAY8B-xM9AxxO;jw{1k|93UYVgf=Ov>~Xf-iK&S>F1GiVuMTWK&$#aH?3KI7=kYLh=A*NH9u*(E zDPsqV+Nq!s>J6vwGm5d2kxkn7b>!r;J(dIlBvY@qYpC4}l$1>l%vH9X5Dpl8~x{Bk%7O75ctCl4aNz9}bW;3IhGoWo+ zmGhdFJK|(a^s9*zBmH8FQfgMPLC=7D=1h5AF8Maaoc7P#Ze1-IZ9q~UAKsWF9tH=y z6Uy~6Td`wo_qTT|#c0K2Du>E_IIdsJ`)v4#UvC|8Y14etUWoau{y_H6#g=ZP4`alYz+3)BCBI5m+xUB44E zlwQ=VT|8V(m5pEA_~dD6q0F#2fLdRWItT>{y({c?@8(V3MsPhF*V+&&4lo%NI`JAg z^G18wmPapkW}ADrQJ!=+>`YV7u$qtf-TJ#-xjJE#U+!>CHhnLdN%o3kUB7RUrLxGz zWMr%InVx8Rh^+d362q$Vngse9+u7UYqwLg%q`ktiO=38nFP~hXZuf^qmHr9%u6vyt<8i#?J?WUHs{SFml2V?fqyZ?&N0ahrrXI+bBVesjnF!L%^xV=S zHD#F>e%Lh9;{Tdjvqn<$6+}adD5hn`)zk!r3p^U6@Q$DR?0+e^`)!}iyItY5hTTlS z*PFHfg>d#aK`ldO8mhVH5K@j;yxkuQWZN)J!rV4JQR%-jI)nB|j-QWq;O2P{>lv&U zr~}ctgZD>rNkpL#s-kwcGw(q-WDfYG|LH2@_x$<4D6Q1vqJN#gj2Pit!E7_WXafZ9 zo#e0#Gv!>S#xUU-$>tHs34%lx{?&Fk!ghCJmm8_a9=n^<+x23G+_%xS{*`en;k+tQEO@Dr1V4`f4)@SvF+G3B{)t$gfxQsJ|$lBsp_T zHIE$OTPW`IYWq={DV^DH$rmY_GDLKDB@?O-L>ElKod?Tmq!*zEx+zZ7_51VxQ2l@Y z97FLo&?8i>sY2b-65y!)8v*_N{Zn7a2Y@I@?x-@SZ?f+2j1v*T%1NA+&%AqBy0~N9 zV~w8RYOEmB@a_5C@%1^WgGXhfrZ+zJEw|@oxIF));*5T(aN;3t3son#B}|v5~S4;&RR<+_ga-f=8csYEfC`BcnODLJAd9 zFUGpfpX<8sd3sHH=WugM=u0tn7!B_j@4;-#qms~L!bwJPuITWl)1yMsOhX9#9*}k z10Mw>3d#k!>)xFXn6fzE-40Ku>imaSC-$PG9)HtKnvKdcJn}-zs{GaS4`qBiZe+Xch9thTSQ6v%rpI^^1;%>howv{~Ee3 z?q2H}r;|cDdaqSg7SlSdOzkhbL5Q^VE{C`lGTp;*@qJ9CsMvGBhc+YF*k|2ipjFi>LRca~cYgqiT~N@bD|Gz87%|_J0)t z@1I)6|GF-sk09zbDlH#@)dl+~^BUeCHtW>M1F)zLAqo~KOcntyF}{yQeC-6v$^Y4M zfIl+lf1Uovc^8v|E71)i=%P}(Ph9Yu_Uq4lt+(Gx{F$SH0AVKvt&i$k+_l~STCflw zkJ`oUwwUc-^jS$jVMG2df4}ym2K-GojMO^>)rXDM#+G6S(t#QT9N#Ij!h2S+VF`=v z1aVZI+%)$=#dl|-&q8cDmK`d0Gwt#RLPKpIO=)F#{zNrjD{M6JZ+7*6p5&5H#O%qY@>CVe4tx9C83 zI}BBqj)dB;45%03IO&O=)xa?asJ2O%z63vZx*iBYffBsfwMsa>)Le2LBd;y>Q2g9d zmI7Amm~ey#8PE|VmF*X+Ci8cutS**>5{4*q$BttTJbQXD)&vPEELA-k2!N&cRmP{U zFu$K-J6p4waR)VVE7ROg9a$OR0+JtCCRc-eO+gvQb6EGZk zO`~e*MXBo<2+UbCYk!HZ=<7m;^V>{V+e{Qq-xQt^6$0^M_fRI7WZ}2YkJ;E|UbrUCNlJc?)y*Z~UUW8GEnSnF*dvPRVjPrW#Z-$Q8G6L;F@!wH|)!voqKpuIXM zTki$rD{D)<;*2Cfg+Zd-?FKes^bXu`5^)q}ND2yDEELQ@+*)wE-OWoSZC#PV8{ zoY)n%teuznxGkVVRfi23)NTSUo=xIg*A=!ifA>C@qh!PzCTZD#9y7q}vl5svMa%6x zrC*!5AJj`|Xp&-$qg&2tW=Plr9Z0t~%N2y%#tQs3P?_Zth%vdU1KmahN~r_0_0 z7SRN_zZ}Q*5Hpy&RLn-e&}Lep5~g4g5`F_Je}$FW^3;Xs{ZW}!M1O8pLlQlhFQzYm z-p*#=*3O6PyOWVM@AxYhWc*VCn+9tNz>#AL8v^u)KW4#j`-<|91WI(m2)9Ip51JWV zG->9$16CvlFcFk!-e3c;S-a9U;ENd4oBq<08f@Df0Dx!kqRDewuNlFeX zNRT8sQwRcrWCRox5F{swC0QhcAYs&wSnY_T2lu@7sU)^VB(; zQ&p$-UTf`VJ?mLAwrcSZ-9Ok7i~jmy#v$D{Fs!i6&3uqauEUyEpEBB2+2IGfIU+a; zWpLuOF&$uDe@~zL>-g_NrgKK{<~PVSx@|baa`6J4L1|-VDUyFob2R@|t~o~<#dN6uJ<&ql%>1*H5n)+xZyqOQ4%3+>ivZKNqDkgGL%!sI7 zk#2Lw_;IEj!!5EcUX(A-esQ9^mbjo#GNNYSdhW;>x+$|*q4?hGd`g!-uynFGj}?AG zF@2o&5wH?_^xo(eU+k*u?I&r?u~xIBn}wf?u8n=<2=TGVLm5y#Kt!}URk%%jeR!eh zAVmgJ^m6p$xzl37b$u9oJBN3d9*?|C*)2Kp;8UZpM%=wJOM@FkFy+_B67F984o&4) zIm-3aGlYBHv}y}gMt+(12bL=kAyuBpW<)i_YS=YZv37vOzhsMcUp}*gIM=?2K}e3p z@|^o*;>%)mf`Ttvr^AtAW}Ns&$b0XNon9 z_Xkal3zZl4E$N$KU)i>N9G;CGIp^OINt`ZD-OSFKCMP~ z(Z?udz1W+ty*n9xz^cXBtY81qG(NogW^Y-|(vTs3Th~BErGKt3Ul+$xSbhN0;%9^7 zu&+(wZR5U^Z87VACgJ6?y(ahgWykC2L-;-{Gnu5~4_X(Ze>XmO8!OIn(lsDPNzD-6 z41&8g!L4L6!Ob#+gkPGJgB7=Dt4#`G+_BMEay9geoTSL0Yz(R$erf&4xb-@IuP*C| z^UP2QqijFYd&cB+gh8WyU9{_y(6{f+YHwp-j;lxqtdnS~X;5Vh^$^}mJ`JDZ?uC_vh9_Nr!qN_+J-`X^zUT&8tp+9K8!I)Q8?vXXhN6O)LO)QK1k_BET zsT~i!@60@7xTl>?#dV)+85=7+mbxLEzy+AHg{Y;lbIaa)MM28i&dFG#76!7*A?P7l z@*kR%lDFKyJYd-1ht*WYI?TC|TY(jnS50+Y1?o~!nC$_J&WtVTwl9aPE z9M3KuTZ$T>?MO55OL!iiq73NdlA+MMn&w#?W5VkD8hh0V6)8z=)RaSzW%-D$@S`Bz z%R%~8u)K!L+dZogx1TogI`~)`>u(CYPPr4YMA62|^ZI!(VVueR$;ws2$Mfa6r5g@% zPvXukv0Y;sc;7!z6(P&$Tw-F@os_VXQ-KUjuO2}4J09P$7MZEr(|LVKN3gNdnc8LC zYM!La@Ffn5Yk@pn(ITjSiulr-%XHuWi=i<|o!A%aF)0SKYMUW;ED1@@>!#Ejees+8 zgvDF^u^&|-*2mJAwP|~NCmKMWxCdHrvO^I3IBMbi^Ep-A%s<%7&8l`O zX~D7!3u3C~rb_Szga`3EXMO`c<((iV()_*3U;O^hjsM)%|+Ea-A&cFBt%@_Wfng3m9^e;0rtiO~?ncsS3 z&xlinSFd?1cASNQbJ8x!FlHF}bC?Qa8lIRKw#duS6(1|B2%)}CA;Lu|+gkUK08R%% zdVEyhe|EkcahYQu}Ct=KVAE_qRhFg6qOR)4i~8&JplG`@%B2{>cZym*K3; zr#+J$D5C84<;O!J;~~4*{}?LtcGseSX&%F-6k96&N=|@`{(od<{t?pqx34n`V5naB zKK;SnQmT23y9yQszXF-&f2-RiTcY_%0&Xbw28+fPR}mj56bTnyT!}51%y&JFl)3D(g0P*E#f|BbG54C*nq`ST~`EjGMkUyxBiKwYN_!~Me{!ECgxW?b_=-N#bBYL3oQWAn~rDWEWkqPIQ*x52=9zqAsA}Mi}E);Wjl-z7Ot)MF6H-u-J zmVc|^P-b>MV8_y=#Ightd!edG=bjjP;PmAv>7-YFzEAA*2MeaW6g^xoa1)vNXe*GU zA@+QXA3QN(Gr9UjlQ^PG`9ESf;LIa0e1r5a!ge=cF)n~Zm!+~V z^91_O=O7|~kZeQHj?Kab08-7A3dYm2c6S5*xYws?aDM4448sh~!;rSDc|Cv~b4EW`k zK=k(!*_M`mJJICY>!w*2KAg55UF0wTW5olH0ah~vic)IlKY#=|{BQ>+%8iCE_}$$r zKG{N$vpBlnApdG$w2UR(7=hV??t>h)Vf>-gUDNGAT$r;D??U#(g-W`EDxyTWuQOj; z$19PgpjPhJu|B-!dA2LgfFoRwJpia-&v6)Xdl-&{9lJwXQ%Y+KN3kAcM*uJ?klPc5 zcY%z!=Qqgi!YOVttOW9eMQL-7m>AcR$Kj$?K2ONKL&)zDf!SVv0bg=VIz&U;D$(%% zjH8r~WdC>uC~JY9Cx74{F2H#2{zEQjC|T*B)=@H<^q=Wx{+r$F6!G5}DUL%wEuO0I z=@RGOg~Qt}(>KjW@`}E&3K=AXIjjalRx=COK9p-aacT1n3DU6|UJTj#K zEELPMdfM-lj&x1X>i2|Zjir~H#$CTZBs(%dq+)vS6W7P{O}uA>gcm1H>S=S*grjel zI@lv*Qq;}#AKsDJXl5A5{=2!v^3)g9=?W+^Bw*|X2IRaSYHOF-m=@W ztA_IU*bG}k;!g61xfX8wjkUWXD#xB=vj!O0lE!h5KH)!}ktF?t)9SD6=D%cH{iebH z7a87PvYYC2BgC1Q%FKuHV)ItdY0MTZb7}xlN4R#3|4TQX^av#KVgPp}LsKQexx;Wp znNoxXkM<=3+udguff1e9#XNq@iBs4VDO=bO{5+K?`vzp^)Z2eZD*xZ`Ip6!Q@6z_I z*u`X0!YLKxAc!J{k^u{e^XRba*pqID=Dr6By<{mLI)@W1C?zhTQ!P4WJN zF^?X2S>Fhr5M9t*p~B<4HcDpr>S_i{ua5{FSG%Vuisa$pVeiD6L5Pi6rPIBT3q@N$ zL|&7JI9v)H^O{i6t(cJTx^?HHrM7a~7ixj~FC*Etf-R(vSB!ZJ;>tiksMWrJ!(-jDH1USWTYJy$ z%BKifF{YX`K!jPhBNHLw?7=gRMzo=}+Qp@Pd|ypU5_?6P%PsDC##p1yRx6D8JW)~i zaES59UfO38?=NYwEj7=bJQ51g(UrQyzAG9MvLw#{7dgqh1@^9cN@aIRWY}hn)SHne z@73OqKbP6VnsA2K{_`zgIjc$IYR#U~JWDLatGwM8eYW#mSJ-l6Q&+Z{ZX#M=+bd0; zx&>8x8VuQ?;|j0Ydn~CA(0^zJR$RM?jZ?XQ(zQq zOE2KXUFJ(gS6b&TpRIhEbKfl7wkr?f==3XO^q-@3ejhpAPB^}uv=;C6lL;`#^+l`g z?}G4)e}U>G6)5pD5A)O?mVXjEnnoVJ<-pbuZslX4yO*&G`MCdhCYkVG7~6kon7{rL za>>6y@z3lXkgNZrKX#cgwyuO-sjzE@(jF#O8DkHVm4nJ5)dJDAzME%6pgKoT? zyMH_G`S}_|<kBoe?%QRia zf1T38g7bsL8-qInDKx`oVx$A=K0IXpCC-(W0mYh;EAZl16i@DB*I)&@Io6YJ=}ync zC}rWkvVT+_b?Yd0oCZJW6XA#H3SCZIm}1Y$PF8_Qk%Ku;xBCb5x`>7i9``LtSYxO)o>3%Nwq7DDd0K^ZgCaEoI3KD+)(Ol-Mh&2fD0cZ-vy0|zGmTgob+mkaT(B+d7Ny| zF1qK<#A$t~Z09a4iMGmphhlwmj+FEaD^qg6Gt-5>TwTql>s76f!Yo)lT6M-F9=PyW zMTQ9^lBLl6S10U4l{NVRf6FU)HP}LSTU9wxV;b(mbI3N5$>dV1`rA>@ zYiCPSk9FWpubh(TZ|$dic(B&~4brGenBHS(=&3WESvxqQ15Le-?o$ab{a8CVtOtGX z>m#lhyp_5sy>)%J!bEQI`ZtK~dzD}$vX5s6B%FW%NUDrmw&&e4thj{K&I1jOMscCz zBhkp6I&2Bf!b>fJr1Lw@%lIS5cj2LTk|-PnmduQ@RS7L+>~W{recZc1@wE?~#wKz7 zB)qvj>B!U^H$Ua$AUZTTEUL}>`C|ce^t-S`w?j6FbH5f5^1S1NUeV|sHBi$Gq-_Y5 zEQ^7RjnU?h4a`uRa(7_%Z7Wz@j0J4UR1Y!MXOjT(k@jFbBB`Kmn?+ggHo+ z!;(N@_q!fL7a$~q&GeoRyry!z3q*u^;h*jD5n~$zpTOR{79D@$jm^w)|G^V$t&ET8RWs#8s=5&B#*nQ}^1 zd)sCL{EOjN_~b1Rw2_7A<=5KOZosgm5O-^;Iis&kS8Z#j+0(r2&1Hfvq?DKo0fDy>@Uh3{kK*rKQ-?_zj zKMRlESBh$ZKQ!4~*CjAD2B=N8NLjDP_gc0;IvOG;%L#@g2B(jt`5rHiKG==8d%eKo z@_s3o6!>gf7;{|3;3hbwdDf6R0>eP}6l0v6VIX_Gp4~@;i0c)c5$z^|^xKgI9`Y%c z*_^m{YIKn{nY1E!I%!(=dG*=V9YudEhXwJz_1fq{D!5x^N)-lkyV!PD*9ql)Dgq+| zV3m8R1_+?b(%zBlm~U>hRAN(`R4*GdUui9YQed2g60JAK#nX7zSMwsg2*Hz+MM4$&M>A%@Tw3VYdh-ophF-ga z*vLSED)7`J+@`IhB>hmiBjhn)74cI61<)XLw#shB{)`G*4RGHi$^g{kbO1ZsO_)kS zs1a5&pTd8omo;4JOIZeM3{3-ypH^9i!fa;OkXcj@~;OxJc#I1O}US|17HwSrHb-zs?z;)qKLDft%(u78!>5%>*ClRMsTWJDoA z%Hk*K%YRE=`0qTXV+f8Kf$e{Tyvw`t>v8f~MXvZEjFH*9B?p}e8RpBT*CH~wxu}N= zv-cmm-_;Edp6y%Zh?>81#_p_h=Z06>84bRK8+K6wyPY@OAp6Or*}-+a7zL7vw>cAI z4cy6nHfB#-qn)RyH5Yi2R0eYv6L0qM)2!xSiV|8$FA@x}Fb%zZ|Au;QWf0!^)&(-K zrs9Je{2CLP5?$W~>h+n#>WICUDVNr%hIDa69t>pec3d%#_MqA25~%XI5R1dRE-4aq zGcs%kXf8R7a0KU;R=e-j-;N@F#hn~xTT*|+>EV}8`GnE}^9Albm4hYi;?nf=)zZE6 zUpHs#ShMpFglO<)W;P}1C^fBtf|K5--gYmW%mP-oZ9=4^Qb9kyFw?+W>JZ1*Vhj^( zIy4r`w2?1kCdl@A-q>^!-0bfrl#|iyE4bfpb+M{A(gg7uH}x{!Iqw)#pBtsGuc!2r zuQ<}IZ}{!6Jc|*AU4wV&nz$o^?>@XvChdnUAM$tya`XxC8hAt=-~~U(LU+`xg#$sJ zPxw%)&3dnKfqpMF%U&QhA+RmxetdOh2zx3CB^QkvT&9Z0p26}j@~ILgcm*~q53Fio z4^od8(!yr(cJcQwTJ-WW^ z!m+>!J==eLY`TJl9=q>=V5}>!p38Q6z@}>o;MN)80nV@=ZTos=GU+)_zCKv+5>V&( zl!cng567tMC!rZ#z~`Wppf@*axJGtd3XFyD1{zq7n@PFpm2{lAQAU9iQ0Uew^I^+l znGNc|W=#3~(m;QYBttJ6T93l&pF0NUO?_Z<^f-sX3+BmZnWq>b@Gn<}HWlSI0(_9$ zL}kz)@<3^G0vylPt9A}bb7-du^AVFkZY|U=%xG!JKN6XIkzH zUkt+`4eUG>P?bvj=c5@Ya1eAyeV}3pECX90}sDo}qpd*%ezkGO}E0&<6_DTW&Q?z{x zWb)~5_Rs|7U@5Z~|4N1X__*FzS*%`7$Ho_fF0M}(H`kl1U3OFn#iz!Jyd;`+5EW53 z_m)RrJmZOSejgYzLuwfX$62OWj2u;xNUGr`-$}c&?mO`?#&~OIr0VE!x8P2Sk5K5X z&w?)0@wq+XDKBy=&!Ac_`umw~cpGL|G%p8thEMf(*uhB}zo?jTmfujBzw;=hlZJeD zeXB{3N%Ei?XRi8EFl5S6g>YQK#SzOEuADewDe9p{lD|SkE0fQb>_WEts7!+)n?wF-nbYXjFy3u(&e9(}b9*QJOpKjvuClp@Sb~p1|=g#P5(kgji!FJT@d&hH+jc04G3zlwK4oW%dycrFj zm&`(3P@qqUt2qy{)+Q zPF06g`rI9*ckRZ>9quH+_r-ai>1QKiiSMv`^uWg36bkSYhkbYQzTHM;sP9_qb0z+Y z%>H#-R`ah2HwV_>WV`)<&BQS${s~auWxTPhGN92A(duFPprmi7c4a4Gw2HnZg1p{I zh+(yD{+%$G#K91*?`^yGJ-{VW-Q<2>=O}tQ-P%4(9mNEm`t#F7d$@D@71Uc;ShhR5Mrs(Yh3MQZ%NsqIz#q};m0qakR^jUfKyvpv`sh%61ZV?uf_kBRh^JyH6n>`)RBZ`)-sx>VY}af2Q5Pzi zbT4=+ZrbL#d$XfNCafvVD2R@HsV^MB92PS6#G7Q2;x-IkssMmlCu2fg& zfkeTLZO8sp%)Q#r=|?jsX1YBcg zeqlOdyq}xwxXfrXiy-ZxyCDE+a=-@haj=i!2tw(VN>8Jh3bAAC&7B9JrIthql0=5;g8@-(aG(5;w65u*8k)O^l9aG}mI$}o-s&t0` zbiP&wqWaU|)Su25^!%QiP~}zV$*PM@zS&mq`FncpzuLe5-t%T4HeUHLHr943|HWe3 z>COk0gpy8k@++mDEn<$mE4{wu=X+;Bh+#jOB>oDQ-gBGP)Q_bx?7sKTMzK87W_m^! z`>UD1BKjItRu=0vYNE#MVr51|ppa?JPptGAzx0c`a47pQ{oHA!Q}8Zi5+(5x%p|Z< z5RNePj_rQe7L~tV1r*~hcr<@N1nK@cdxw)oxqA=h&m6!q$O_vaS4r`?Bv3Y=OAbXt z(i6Be21fuYV%4Z31JSVNZLHaSTMmG+hbh+QZ`)QIgM!IZC16t%yD=Ar^tginU0Z^x zF256;=;Q91(sNWLl$baR3OK)9!`bPOo*+1><@6}5%twL zwX-4hiyQ!ttt8^a4Ym!aIJ0o=<&hGIR@0#Y6>=NO&b52JmBF02elQX%nD5~#eDZGn z#t7NufGzbVSaw5ju<=_&+tjaF#=Ss8H$ef1zlVF$5=P1a?FaN`!G{4r5lJowD9=?! z`^jOLu`DXP-WP3~|90)>^z2E+B{@18O9_8~9v88Ug!LzF{8%fveI(ed(5LnkEOcxs z)NFIwG4bni_d+t67!tL!5F35~54}F>>7C@>)PqfcH&qh_L$;Bta0U)!Z?bJV3`jl7 zd)`li|Df?p-2Pc>+VKd>WO{fWs3u8*0Vz>n zyQy(Ob*^hIcz>ex4YCIcg(@2bvyu$dn5!m6yYVtWeB_gbTtg)}wpgXc2z?q<`K^Vh zC1%#f{P?p1_!n#=xi)Xgi(9V^Eu~@}cOXKnoDc$oOzyW+zpw=eF~HRf^mO$Q6TU0} zgQj|$qpMeMQ*f3iH5T{EN1?jpv+E_wfbV4j?i30&2e||Qe|gc>4T&kgUvMFp${Oe6 zZ3~dO=Z~Bv#Cx0lgur7k4J2A)d-Qb`qou9{D6rgb`?QazieK^^ zQ{_h3K^mwCj+9MF@nJjq0l7V>a8!{7m0528SfrJ%x*OHkk2c{_F$Rgc<1pIPFxom zXAhTaUz*|S4#ObA0&IEkRKd1ZO81}mcR2KQ=a12mHQ6NUAG>$jLkoZ2 zUA`e0IL~_QZ_IHNNR=R1Z_ULz(&d7)=Q(6|GC?;;b_+X`3iLCQ?*jqQ6(c$w5J%M% zQhnYdJ&}gI3uftYWwR!H*(xtzW56zHUQx^}U(e2KYO(NmrWG5rz;})e6jn~6(2=Hr z)UnhNP^akDqc0wNw&ak0=_6KFh1E;k!HiVS3Q-HHZIG$DO!=pCA%L z<7pBWwc5>&mwQ?+0_WJ@UbS^PE01}7Zsnl=X~fOtMt5gW3&WI!{RY|L50qqboH$?p zq5pkpPv~^xqm)-Gk1Sqmtg^j7NT5+i3b@hGF7}p(7d{@OV_N0C4!^XEW9)UDTniNg+9z}Yq7GzD2|IJCR#j?QeEHWTi-9& zD=0k9a{S6Wj}?CxAzy5EoXmI=d8yht7_uc>vU%P$0DG^n2BDC*j*3xjS!gI&d(y!| zz)?u#V}?6wSyv(67#^HjRb5m;p>IBzY4%VtH}hkpQ6l}?-ip`qiQyEX^UeclunQ@3 z$a{1N`nUIna_nB*L-0)VJ!16>Xl|nxp6BZ#Xw0_IXmz^wHur#Bo7Lq>$MyJq>V$y# zkehEp%vjHnF~tD*k-#ZGeS5-hVbal+9tY)J$$rDmp1FupOHP3aPZWb5ON(CB)9E<^_8EDfjWtq|+ z*jA?FFPsff@Z2G7Wquv>kXMi-*k0se$+ePIi<0`Ybf)k57H!QeU1y`@;_7;Xd-TI!sW0sO4iEsYqS#nzhsfwpLNx0q z67n<7Bh*3I0X*K)o+3e9r3h-+<~l=)%twr*N!Q1;*uDnkY5GO=rbE1VQS{8PiOGGn zsy8>~KSle4xPnc%jBS{}m zjd8C%K~^+F|E-VtEv5<1yfIdB8$Lv~z}3Um*^G$+^1m*v!V+p=D z!+P=S825}H(J;o}^o>-N?|>IiJ;{*dUilHH={dn^R0;mB$Isuuwg27!&6blk7i!nT zXDp?Qce~KrM*GcMd^rEAmV#iyAJ~jufY9JuXgZ%97o0d1s?^_w0x~(nW(ow4pJ08f zDXLcBt%kOTQ^)x3Y`^*S|EIe3KjV9U7aF*@IB$B&75T5xa*ry#{_<$U|5>4rU7rwp z^?3u34FGil@I$(pU2$r~0RyN;-&I=!0`mNe3Pj;j9~o2$=`&pe)rtoAOZqPP>fmE| zNY;881IG1VK5wZEKjM_56=F}AI=1-+(cG+wxxV*|plM#-B82CCaM)*q_nD?NR{JbZ zBfE50S$p`hxMvX2w+qc#M+^lJ(s?EL`XToxA%w|PH~Mm)mGu&fK4AO6g?pg%>M0HL z9SOBCAnBsz=1h|$N?k@RQY3+%-;4b74I2s6hCb&Bq0 zmUI|5#48WfM0)Khr5e9~afGm6=L>Z7d-y?!gD92Ni7?-?o1}zb(dIS}eatPow!5}O z@@!H-VYa#SVkv^!+VSFx)!V)-0(Di%!o-#Nyb6sFx4ew7Dk*k9Zp@y$mDJ$Zsh1Tm z)vjhg^vz+3>wZ|lWgybl2Qw)y^W({tYY)(fj(VYsVC1H$+dU0zBv1Nq{JXH-*^TM&00FKnGWChqj<(PrT_ z(S^r^E-&momiGwr;j?`c)!Ze@m*2@J+%Dv~#HScAE|AUB&I!IZs#<>Jk)P6BX=qm5 z7af;Zaiaa!nw=s~KZ*N891>YKsg5(A(<^_7RxH)FYMhCFe620&)U0zBo^=ziUSeu+ zve+YUr~dHvLILIMx7SxtcI9}NkJlVRtIX)#9pYE7J1*1j`xoS{=IdSp8j7&6#swp=xT#%i38GHJQG5=;-Q8q zKEi-|{S6{^g0d(>&62RL0A=xvUjoTkqZJUtcG1GtqvgH=`g0;L9LZh*z_7R@G0;t< zN#$s!-yuh@p|>-gGcMMJUh**7;E@9s_6-s#rv@|u;5?F)tNmj?6pi{_RqpAVBV3SC z*aKkod`QxMYnBA#H%LoT2E5#W?L^@2mp7K^f?+nJu?PJ90I#e4<$c3{{g6&zo8%oF z&R7|!sZslmW`#@o_kq@cyYXxAs`WPffUDyhL?4!1@WY>i>;GB#f&UKoIlJnD71uZh z?FB76K|R5;8aj4VsfQpbwkT?~W-!c!%E=%(hopRbh@hJO*Dzyp7fDi&)47UZ3#lZ3v{9J+}*` zdY#Xednaeh)+TZAZafeDr?!N0yva?!x~7|*vX}RnaC_4NTSGGfsEWec_(KC!SrZ?A zWVIhHpZC+G|J=bb{HT2J`g~PM<&mzw!iPkS2g;Ss{#R6_`lBTo|EVFe4akNr{Fn_T z`UUEy#Ci99HNl^GtPg^*Z;%-@3=I0r>YRng`v#>8E~8pM1_6C&P-S*7v#I=i5#pF+qneM=`yB7zZHy-u4!VLbtRM zupi-4hCzRL!hSz_|2kItr{_mI@;4A+h$Ii4*eqL7?z?*p$G364{fcjHaGhcS%!Adg zFS5f$|Rm+S&4{HQF-kz4yfKQMAm;F}lcB+tu;mICnL=KWay;-@cLe zMzBk4R-I45p@fGcVDiYLZx?ocG2XtBbvs9|-I}|%Gm2R{2q7d|q%g+gV;PRvV9&8k ze|)Z4JS18nv-v8{>wr;h^bceoUX7jdeniL^3RBv+mWJ{an`$@q-g#^cln@@Vx<4(}yWh@2Q}husZ!Y#U@$J!WZih>q%l=4y z@&^A?M6HLGAVuu?5jX9Y5d4etv<&%Y_;AMbjJ}R5Ull+&Z!3{E1Tj2)O)5Ww+k7!R zlixobT|vBTaUE97Pw&Oc!zI4+nVI5wI|1t%VkdQpRplvh97odu_E;|4Qp1UkCw)KE z-eeIr(FEi{rniKTXG`EX%J`wXF%wDeoAx91>)F%r>r0MIuFAC-qbJJI7FV=mvy6$S&#?N1_}dVPZ_$JImG_WmZ|x|NU<0BVs2SgN1Qg#aP0{ zH{pt{es?x+j{vH&a%w%D&MPW4U8ls`5ZZdqk&k#(v_}*DHN+dc*cWgQ)*K77jJ9Bw z4<#R?%wumybqZ#FeG?ud%ZV$evS}I!xAm~QkoGu2=ga+g1-5(l#Bn;^nVxt|4vwj_ z0%3TODjQs`%Psfa&slb@VOVl$)-U-iuAX1&%4Fs(v1wV7GMMHyzHiqy&%(k|li|KEa1jHeP1Ud;#8BqkAzC)heR()nr1#A%C{B>prcUi0am&^hsE(ByzKoM z_Pr5i`%mfK>h^>}OI#-nZ&CjVyz6_M?C)Y(1N^^xDM^kI1%WQ?&NqmCh2k9cyJ+^e z4F116=QjKe5>7Y9{?Mome7+d#=v6k4^?=QTaE_|u=VQ5_FX8m|{0YOB`m!MaNRhReJD}rRACJaEFxVW5nZl030^<6Vh9vS7ddt<<1tX(uF9%mu-syJSz>f# z5x%Hm6relrQUcurro|uP-ewuEcRYIJ019-dL>>#M?AyG9ZSWY%)}G}L+a3ii1+%oK zyfY@&a1Oc&^NF9Io;L5ooYr_&m>308`&_Lk^n=U-OoOpeG7P0@jJD1yP&O~%OF!iY4G`ij(d){su9aI#2;s zZv8Ke!S9p5(LcYiD#=taaiu#hKgR+PHu{xOB%t^}efXHJDo;=4#j%S&C>1PmPZV9n zfe*T>&%#%8k`^VIq9*ve2{8>1VYk!}VO0{a*#nhsErR$0|DkoXy%RJy3#E+bLB^A6wY<7)wTqG ze!aeQdBk?}%Qt#@z?KFW4N zC42@ZC_I2T8uX@2hX%#NH@+G`Kj)ujHjI#lt-b;yT<BXH6LXLwid zE9CiiSjI~mSQZS|ePPBMu*pk_*Vq^t0Ob_Su9qGAe0r(7u;a(DsRt8+W8L$xx3;iC%%b;2L*4Edi~|b$&q%I}>~Kv8FHl9K6YIT7^ylnovqn$8)i4cun1r6$F}b0UxIG%=wC!z z5X18Zt|e?{WPCopDeTeS1Qs4B7A9@n^%9rZox#NtT%q11jY#rPD?9mv5l_CJxZF79 zT{|olucvT#T&{!QKnKLK_5~m1(MVpw@%}wzQw1n;`XbA+Bb>ds@eNYnKG>14%mONZ z6Hjj(I`;K#n!*NsF2R}5UhvtCq?Ea2`12a5(Sa?`;SZL|Vl3}CltrKors*mq=B<6o zr5Iz-GinbVlLCj7+a8&M?gFpV;rMxd@K2Vx%IB3-Dlu?OSs<_wqU_mwv4?%kn#<( zRB}0d2+TjrX=*ThY7VA4urSQz8v^>R!#}H`&Zw+m#Ge9X!O-h_Lt&r+g<{44R}ZKK z5jrvpH#sWn&KQuvmta=HI4{5^aSydp0khd-s({pT(>2S<+G5J#Lk`u0BpZBCD)fhU zo4>iI{xhMO{|fig)b}<=?9=EZEb^w_Mz(j8vv< zlA?S*Convv&6wx&#EbMaYNBepA-0R-a~D}*)Djiq0!7<>C~xNFkCDFKYTI5inhFIo z12wG{ZcdgYgOz+Le5bQlp$zU-zSuPB=Pzn6195N6 z`$BE^n%gwY+1G(=z z1=^+RL!!E}0gQr({(EYXkcu4LiG)#2zcUt>{AS$D%xB)VnQx$D zPn#V#;!mqiPvRjyf4!5?D6%AsjQ z*``;YjQPHcpkTMC_PCB_x4MOw)FfwYAx)JFs@(fl+w6m1KrzaE+a9!RYt?@VlRlY-J;!!*zaih`H&I_ zyLZWFeZYD{R8KzVP5#%}o26w^xere-StOLe^^>Yhu(i_3g3hzZW~9X+~2>uuW1aPm>=WbCn!%8IJK^%0L3 z36*!N9E(4GR4%wGd7a~FQo@r?QLUfBBO6_dGHzRjP&2E}He#M21&$%1T#>~UAPNbr z$5t{yUTDhks5W6QkBXkF)>=KyA!n9(G9FP*H_i@a$v*>7e}QuU;b-!|626IsftZ2uR4SjvW5RyvCnQj7pDvzUU^F8qOY*^8n7d7@UnTYS+`buSz3;eQ)_D)e{VOdt=A8U(QMR+dG?WqB6OrEDBb*3O>^Sl|TZ z1I|+~y!=?%bTABrYfuI3B-XSz^B3}Cc13KGb(UT(b)j3~)gVrTMsJh*ZLZ?a3G%-C zA&60LiMo$tr)Pmmy)&xa=Je#YjD^h9>ZLi#STBjMGf!w=CAV^vCle{5y2ubpV_vGk zF`kymg38i7yIBdJb?Ke@?qe>b*y1mc45@@~u=}tXQJX9GsUx78jf#mJJ@n~WVCMtJ zSG7Im>Q*rvUe|^0SJ^nK@96Dm?$rc5dB|4-xPonE<_M7MxN>ihZ|(?DHotf=->!zy zoa4QELGLX!!Fn&s9ZUa{K2R?*b-wQdH69NNlk_z_Z}x^Oy(m4rk(n?R@Wy-fcqs|Q z>qL-sF@Pt{r+7R5)aGsyHl-^KIzQ>vp@UV>TIvOd)sGjvff|Nr+)@2@TFY($2&SJA zsu}INAAiCqGjB*#)GlF^gfMzQ$)bm|0H9pYZZ$0Kd&;kZ%54zh@lVejRjjRNfm^)P zdJBA^>wv^u9=(@2wsUjZYxKzZaP{P4oal65lAd1%fIuDt{1g0hxL|YZpr|RP7KG&# z!P{L3^rFb{C!{}gR6_7hg_!b=#)(FUON?a! zM_?g0W|KVx>7rf++YpIy>fota-34q~po`X%l9iwF5b3;zdjc$6na~p)|M(2-Xd8jt z)DPE~#o5C_uT=GY^>d0;R3><4%$c~-!OhrV+kZ0;<~3)0o=H(Z2!aKD-nu_$5*G7BV)BflOlba{l#U0#uCtV7fci$ z$g^6v(f!1K|6`$m+cGuiPI1F6DJ1|r01QN35Fq*BkatgJloQPnmJjsMF> zt}SQjKcY5faJv@@#G+Z8VlBcqSg@qY8aeEf>?_p%T6A7$stc6AmJ_{;a-*`}Z zZ)3)caLu<~V&KK2O#;h=7ij`}-Hq0a9eq`~b;?EAuS{C-e2$tW-5Gu~D_`Oo55wN^ z=jDCjwsa!MvpD~%s^wW^Tdw34q=3*26B$+c6t|f=6V1RbqrH5-Zm&mP3f_of!xtiu zGWf_e+^Gz4=F>VYrk7KnUe7&{7;j@6iWqLtV$D97C!X^agw#&p5+*m=qBXRW?jfp` z7E*%O+7?&YZxr%WauV9lzqubqrU)6!Pi)7EO`gzPuFP>|6oK@-5OBk7KJ0&VVKnlm z${w|3EM4wb#pAZLLAsv~ACQq9@k;PomM0tJ(acOHQhsmIv7gJWGI)YW1(h(e)$Anz z2ji`UIf>F+Y)Q;JhLp=N=k9`R6G_ElxA!RopzpX<(IyKTh`>H2ZDuBYs$sOLa1T!f zZ6TFqAokWL(87FL>h)e(;EsZRay#Lv@hj%Zq--_x=7AX_ggr|)H`?W}v};2PC=6Ex zD20ciHOp|14UV^f!D>d|OE93~qv^P_1^BSR+XsL!yqB~_HfN4261shASI*6rge+eNc=4=ZwAt34A*+s=fH*c5D z#SacfRKJ<-*5}-&lfRArWI94FKX#gyH5{1#D_ossn>B?OlDM0PxuKo?Nq36L55gK! z70TaAwoXBCR5--B?5bok!*LbzS~I*7w;+a=7Prn`l3?(8;=OS7=4w1-DYHPd812V4 z(C|shl9Gq5qsL9hN1!EVRw3dHPP;SV+8d%Aju3wOVRhnHx`Ru|dxb&q*Llxve)(Fb zc~Dq~$CTRZBQ6q*!cgZdsiWbVHG{KZlOyB$*$0>X$^;)Ec2J{vsw%Wgk&EXH^uJn6 z*d&*&Ka3npite<|ne)B*G%^;KFoO~Y*`sYpIe3j~#(A<-&L4rzrGz3v0MG7L1Q7Jm%oogijPDHhS%V><1|HhTyPzTSowqRQx~ z2KV_lgEI5Jd>)mn;xol%Mwzs>>^4VKYs^{g!UqfX3SHF}YLZl!`f{iG;KkFW z{#Tu8F#f9NEtYuJqt&jxNawtN)p}#_bH`Ijsb`KMK;y1;x&#PCTEd7IVzrOhUt5#1 zDg-n%cfKA$szrBI8x;3l{a@_81#n%fP1IkuM<(OgJ|;PB)V>VqeqFpDRheWUX`MF0vG$&N*T$AlSbj(#FFq3L&X5rq3`UYqQ6GB;6|K*Y$+cT07gq zAsNl@5XnLRVd;S#C{l0WrWAk%CFkPI={JEdGN~)JLU@e|`qgw+znac;EF(|FWX)*7 zLbv{!<@{>?Q(b}L_yiEeE-LVh3c!xb#XFnD+WV%+{I7)`|BLWv{}zs* z3s@_9lX2!H56IH>mxXQHL{s(@K39vtK0zvf~F3!buz`S4KiK z8+R{|<>bE!<10%W0DO|xUI3S*-N)%gK$0fBGsIm!lz$)I|F09+E8zl+Fg%lAE`$2N z@7a8qGs+oeJe*C_4%~`gvFw&i?&&)wQlje_0-|b^{~pJ)x@d| zp{)r%cUdD@w=)`5?Er}V0z&roF6;bJWd`QuqbLSC<)#cBkWzVeVdIvhIlYS+9&*G( z+`cWjh&5il6e_KW1U+WhF@P^Y5A=xvQc6;$1B~opA#&?sJH|(p*EnCbFTFp9;X^=n z#|PG(xEW=gLwg3THbPt7gpJF<%o1N_v>Vq@12m!Vcl3OrejB1GJ$Er4{v{*<-||~n z3|94l6A6(C3B)Xw%N#FUM)jQud+VQW=aN3|C>7Lm$hLWrfBW#}I09GsX!Ee&3gm>U zL~yAT#;iP@d~MOi9vmFpqtkUpzNcrn(X!2+Z|q4tRokda&9R%#kC90AqeXa|6nNhG zQ^JbA48$k^GV`z5#(%H4)ZfiEx>cRM6sqQGVT&a-SIx`IJSVA}7cH%tiukCZOHENj6#MBQ_wG{{m9(&%_idaB*!e>1oTi)QPcj)z)xv8UG+kSi zpYco)&~NnZ3e=CL^DnV=^tYP-or~>nI4b))t;>DLXTL{fdJXP`7ng28=TiV?{@dw) z#_wMihnZ~!V2R?hG$72y1Uzuq_Nk_J&O!2vzVDXXuzwon=BKPKiVga~H|+h0+BlC5 zV=Mbofibg+QJX@Rd=w^_<4>tNDi}YG=h^%0mYM*g|@G6t@A) zrsi=FL5HkROq$7^NSOqg;GH@ZLb#KDy}q6Rb#@$4yU+nC1C`I-QpU` zh0_b+JsR?B^C_Oeba(()T1vayp4D?dZsMz1DPWz4@e9f2+Io?xZCR8DQT4d~XZ7lV zX0`h?$NLVxyMYWyn#I7G@+PMl1X{t0CrVIV`5#Upwx zmwK$i^mT>|!jq#GMi4*i{av6)_v@*egKTe((7%o4)WSvpb^gPvp?&C|N1gP4!qSNU zm34sQ{@u4bUq+3T;{UQg26#i66lS%gqA#O&NzhK)pwZ|FfWBYDoD2?Zb02^@hLjt) zfgdC;SJMS3`QH=LV7mJJVBqdYCEjgbmoSVjYzjsbjch7L27YnO81kn2w*&N_1NraZ z;{UXfTmfZCkTW2Tl@4P}62KCq@HSQp@P=wVT;@D;%)hyKZTFQ<@_pp3F7?_^c;f=N zrvE%$Y%c>YeR8bzxC@xpV{Lm9t4ri89ggP#Ce`DWW1#wa`RrwPE%)2#FV28ub=R+C zpmmZwGubidL|)EQfK^JW07MP#i}Nf1_%mBRAzuKJf9FvC0`Q0QhvO~IwOGetpHLC3#BW&goY@y{3w{()y;02bDJkOI=y%|{<4 z(^NYF!U+g*Xz^R{^0MM{k9m>95p;yZ7OU+}1J*20cAB++&4&#E>=Nn^dI}6fR!ogn zH}kXQG{lt=_JRx?krYzj5TLkFqEfd*mM+$Or~|_gvS^}qsg}%<0D7-0NumfPChk8NmD3v;K|Q^KZ51|HD3T;2&ZLa6Ob9P5t30ddsV} z{WI-SR&!H*b?9hwI@#wHboVk;{Z!zXmE>dqe_QUr?JuVIsIa>~iEaOKl$7cIC2|EI zrvJ0Z+^^Tev@W*+af|u!n{t72?gv0H4)<45w|5|bPZsJAHChM&y%%d`=Kl-Z_20nD z`FFO%CJ&ifvlpuA981v_C1(33v0TpiM+J@cU}WXyzdQytz+3aU_%k)U@T$^!_H|C^ z6D!By`U;A%E&j8%L%k^`c=#|jW1Dhj%C;*<%JXs>uK`dFX&mAJ0=T|jW7Gsg3Oirt z)u{2GZqxu$frqonvKHY@@Moc5yyXn~?;>D98a|J)*L}Pw>tMFS+wfK=PSJVU#LZ}| z)#^(gX`**#eTWHT47Pk2?)wE>zc`&X+{LW4HX(9Y@||gEjH<4arOEYx=)hx_@WeAiNfo|ETJ2^AnwejF8xEcK*Sg^Vn17bKoIO2&MWWBma>J!ZL>v7^ zrdNABpawpFmex%1DYbw^&wypNpFUk0CbuuBJJSyjS{>mGe5yO4?~~#-`$f-(KGUM* zmFw&#@-{g8JzXgUZbym`M6cNl{}I}@fr4*=4=A3L`G^&lXo~u0Jo|aJ_-7+!IYe-j z5uNF()VWqvpaFgHY;j%f$)*4$oX!KYqQCcZmWZ)f1Hw)~04s|79~kZaaR3f7&>-7K zlvTbAPv?ZA4H>W+MOFX=uz%dcv|UB{#k3@yXmb1cDZGATV*;Y~pUb?H@IVf6`|TL*+x_BzOsE-$%0v%wS^3g6yw8OI6;LMFk4sf^N%4(e z5AH}O!-WQtFy9pDj_qGs;J=;bgHS3pW4ec#B+y339B(Y*UA))29(dDj;vfS}eH(8A zOtwX@QN4^-y~YV2LkUcuGYL!?2c)-~YHweHn>xW1zul~Lm z*9_sZ7jIW@lK`@D1wlX}Bt*QPQSu1fxhvkH$jf5c7vYJNb8!X_J?7cg(&jKOq^Ps( zqTy_lXaM16WxZ7q3{Zb)e>(1u4K3~G9K=JV?Mur*($yJlJo%8Q7wW7wCljTl+pAuM zluj(LR&yk;)h%x(6s@UT`JMHp0RZOTS2o&%oVO1B%^Bd21%TGSjc!Q7%(l&fA(vFV zT_PsiL=9}kd$auA6{2^5g$Ls(fb|$&x5c<2KnrgRT8~=*l4bz2=rSyz!G33Nnk#W7 z1J!%h@!Ajm1%iK?@iM1I8+PyGQcPx|%*0E5fERf@Swy>eqzpR0xx}P7RVpzs+*4Lzs-a<;gZc31tcHRyRinS z9hQq1FE<&%8#&V!PoRqz4{FTzHCxjePq+!f!+e4DbGQj`avp*M*n3MNuUx~T!AnXJ zt!A9zSL7zL7xo;{dq7C(NrYm?RpYyt?HFKjP$mL(8;MHgYhRi(0I~{*e(OB=+XE2n zOq2;v`!AI3YR(Eex?zY0s2f2X6p;Xk`}G@&)(6SAobDLlBS9~J(ib8g`h-`du)?>C za*;y-zvlR|yBGVT`DqTGFy35bwey`-s|}czz#jm24#@C)06?jfNN?H(AVIvFaF^Ka zmzU)k575j90LM-$pgEgc65e>2k0U$qE(_&1bMda`6`Q@LpXM_s`LI6&a9&=!CHK+* zyil)ddMllY|)BthpAIr5w|$2uN+?cpZ? zeJ0a7vZ)gbhe$E=-a@UUipA6z+cesxPJet)`0`bw-ogbPc^l-s1GV6{$<3B#7rhke z=44@f_pt;{)SHtQWf^z!pRcuLO6*9Io?(}(w$}H1MM7|Qv z`t^}^G2dOd`T#zX0??Ehcll!Cw?@keQ47Gb=UgO7OoWjtE*CId9s|8`^QW-cpcfk( z<|DPG16CAFOB!lz9UBZ}acK7A32l^^o6ci3<#e=Z%X7=KpD2M%&iZjma!ARae}1z1 z`jTlhm0NSMmSzT6w@!}lOheN2wUL*ram_P>e#@KPY&=BOqJ;$+RyJ4`M^Ty-&kIb@^atLE8Bdrg0ttRg zn$x8v+Uh{x-E9-X6*av$LhC`^v@dIjQxRoNA*EeI^}OatB`2S+D}A3C>xmrLGNH=d zH7qNaXc9~u++3U(`N5m}=6XZQ_J{@`P!S&3xnOb-BQ5gl8ipP_=Vn1P zxQs;RwnIC3DOBS7g``Ztc>XWKUd0b7->TBb$Vy7}e8tkVsGoaQ18&Dj>*&`?oEcST znB(U}a?DUcrk*5>!>m&MPdxgu@5lK=+I6y2zL`d^F9plFWQh^s>MU$RpvE0GATY{y zZ!VM3&=}T7U!mC8((H(xX($W;j|1JX@7S~9#sXKXV&T1b{t~>9j%oXWn>!ND&_O$_ z{=-DvVDbjL*S+srB@R#1<=kxS+(vvkiDmZ zxz<_akT*LAWywPODqh^D|M>1wc*Hl}Pp{oCy_SF6kZBJj12Xwv^SAuVPxY_w_AeRu zmkj*RkOAQLMHmB1eLH(w109QB|EzRPVOZE<=wN>Tp=YFL``b~bza8b`f}s_$vb2Z! z+aCrPdPcwLS^VbK#yWH`^z^^a)BfJ2hoS!;m&@2%>B$?|!>9pXBP0Stt6<=44@3L=u>ybo z3jX;k4x{nw{eRYonfcEe{jJFVzU;rN#0W$GtCs(7HThMs|5Zh2q5Dr3{dYx}U>JVg z_ur}_|NFMVZ}I-sA%2OEf{V4mpB;c!-a*&?_wKKQbTG66I(7!Xy9TYKqKKlZ3MId- ziH^CX0zh0W^{n(wER6u-sc&FuZ({F4Ee1m?WMHQU5K$dV`(Jn20R)@%k7m;VyvIt> z(gbjm0SxOO9pZn8^KT*jua}2fz{*_zKUFBE-?H?7FVwH*{4HO9_Oagr_g^ZMppLnT zuC2-MV*Mw+6x)ALsK0vepN0DU>HbzI5fgKOY67&KIY8qG8T|gXv|kM@jqHtK*#6Qo zf9gW#R1f)KOFy~~E!Q7;@&cij`Jj*misTfxCIXZ1Ksz+$a&>tgc)oArTgD`b zx5Ez0t8SBeGNKgORK7_7wO#_ToD8bwae9Yv9@%{Xijx?8nMsxy&R9O?j0ocB>OIgAB zkgn8@En7khUo6rRs=k1tYucUx^H2lHZF~#-XM$h_v5>?8)JU-p;yl1YN@7FugbJaB zc;ZP&v${iJ$e-(-iK@OKkXb_7Uy?~A5C%6#eHPA|k&k}G!?KG$G%&VD$C|!@u5Hgc zjM+K46{UzO5tnoG+Y!+4;AwhwZzWIx%9Tu7ms!EipHXkhyTHhIdE6l02ytQ7n z*?+04g`n~3nLu3gzo#t)KO6t|0}|`6;pHD{#lOa(zgjW171~{)_Z5hT@!a#-@&44q zusUVbG6Eh<3-{b-c)%6xZ0>8@N{Piv>4X4Q^>2 zZmsj2f@i!Otp5Fbn#;35#ohbidwWXSy~lM~(z*M-+c<0&6ajcG0m5Om5>=#C_}iwH z%=HI>t(``RJ!;S6qdZ7p9*B0zJq^#>m6-FlUAgZk)KJu3wr{CP)x4K&cklwNL)N1m zk20HOSLmH>z|Xzc=(JkwIeRw>3mM@uqbI}Vsm$_hmtJNcTrXKGc)c!1;v`mZ?pVGz zJ#{`_5=-n;iMP&QP@F`H4&2%4;K6j#QgQhUxMP7M4S4{!(;l)k@|&%G4*!j||9v+6x(Yt^l27KQUM>w3c3p128KE4CR%QWWo+HH-h?vpu+~ z@3L)pRBm?t2!uF93|Mm@6c6cfe=(vVNoYvTsZ%0|b) zdLY_`aI;-el#}C=<&#LMJ?;3)Fv)a$bl=Tv&+%!GBJ=pnGJGj{``i0^xOuUL@>rF(graUbnq&R66_k=KdhHm2k-lef(XmhqgL= z(Qk3x;63is54AuNAnuZYxphBWpY&7Vz{%%}R}7C3aPw}2hdvt+>CXACdsp{Qo~Tyc z(%T5mLr5>=Z!lhg@4?bYU|LZ^V!o)Itm5&}89y?j0|#^-a0%9bc-aA$1r1v>tK-b~ zXh4^$j$8NwoL;orT@KMaKwA+f(XHtZfVL`GL1{ z>5a=1v*HHwg}P=@D!XLVLmZwxBh2sor#^xSams*3u3w9&lr6H+c4}%Q<-Ul`=nA$a zyA!h$?fh74ug8GLP}+u6H*0^fJAqbD{v#Z3NJghtFK?Gurx(Fj^fgo-6h)iL2B{O& zcWErLG|KP<60y-KoGIKxoJR11>jYy~SESpg4e5s26(~#ArZk01f-AKvwak4?W0rY7 zZ@EF179aIMPO-`gIqWY1Ta^bG84&92A}lc|`egIo{Tt%O1cz2@k~df59eEMA3N z0d22nXDxCw7q>fVvVxrSpKC8hv}|4<;O|qrN!57AD{`LzL0Xvn4Q0c!xB=K5q>dFx zISGNw0&(QbqXTkm8!M9pD%K8Zq8D}J17g>`=LQOw%mhf{HaaN!GxgC2(ikZwX>08b z4ya2jL(*)MM$$>pMwf$-6^#PK3%}b&MeBZx14eturGRIKwf~htE1h>{MYR zw*;RCpY@R&?@unl&GW54c)+jk>+Gv<64yk%gC>7Jva8=)13#hxM|cKmg3}0!5TJcW zkZ$l^jx4Iu8;n>HvxL66*JC2MS19{XZ0vbv%Y@Jxmi_~bNq(pUqkel`(5qb7ZlGty z)>6t5cw-Zx!xwm=3FcvZw6B6ZW}NY8oIBt0;Kx#@pJ`Ds#Lc<#V!gC*UQ0<0Q5uC5 z8u6BYrLQp&HVniIPkI08R2o9Fq5MY`WbkxC48<_92=n0je6^)V`zv4Ar-|2{uLX&V z#o0f(5fll>dGfi}x}-ugJo12z#$G#<0$lPY?MUXf9SX{%h24c1);x`!8PNW@SoZZmzX1jo0D-FJs zwFFcHJcBDnv*-i5uM6>`BZz<#D*AMy;Lh6iA?QxVmd%2nGXaa<&hhyHI-D>-6gfDL z=i{mKLdlN7DVd9BGh??DfA@mkQm@V_vfEd8;8XY};k(9JlZaY^! zPS)yRp6)oGeFQ5+bvq_!Lv+hC+_{Ff`$?#J8zF1{)HBPK2A;Jx0|CAB9B5S*=1%Sn z)o856DjOoMCndhEL~$EuES2S%g^2ks!tEN;8fC-xJtEPGJM*Et6;G>m zgwSdzyH#0^1dX{{+cd-u2Os*pK4*IC&$AlMkBrO*k-ugH`Nm4Qior*yS-X);Yide~ z3t>U%!B}u5oX+1Aew!4J9wc_fX7T1>!nNkCHW4GvU9YHmKb?KrYieK)f0=N7S>&-)S- zLYD?;o>-1&g)lIYdYkP5efOQd_;5x}JBtM3ZCBfEOZw}gBKj}7M(5xP^(0yQk!ywx~_o1a878FZq+fXC0V$BtouGBhHunawKdUa zJ%b&{%5mKvXKgf;b z&L@K^l!g*t**`&KK=DA8cM+q3Tp<09`<#@um!)q|CWcB}qIn5hY1VBKWs!!d$I}x*xfOy*9p2H^H}u#YJRW_vjB>o?cvS z(x=;svpk0kILZmV0oHW`O};xnsndzmsd3Au^qhnC$HASzY${^DGG=Gl=}ByZH)vmM zR7un?RC-9fOKc;@qam2K@+MA7GdbZ{olQlu;&HFd!hQ0N%znzzxYanu@nDQNzVVCB zv269Tyx{<-yug(AYrm`ISTd5(FH_93ppZXJRG5LjH+n-Nzyw9};{Uwt@L`@k)Q>v2 zegee^wO3=-#&;?>yq|wD7|ljc&v1C#3_SN>erBb*g>Id(+Dfe}MmN$^4o{>vy&2I* zX4Su6VZOinL~6dyV>b6%jJt4=FD_xl>orQtsr-JHzlak)3}AaybsEGWY^){Rn~zMg zIlRcG-(6*X@CiM~T&?j!keMqw0h>!TSrsz3QWIxr4 zWyB!7AFE#dv}*z%SdDT)`any2-s-envz~GEwsF3AUd2!|wP6_LloiKcGuAK>U||X- zs-UUJ6e#Z`o(&&^KRZFxV`tl8k-*2B!|_Rekx%dX3Yk<-5!D>((1!M|jRI!i?)yUU zIsHi&%7C>?kt|sam@2mtZ@LNF8a%4<{HnULn#w-HOc$PGl3LT@IwG?h#3Segxa<)@ z4zer-Fbw09Wm|{e`A*CA++FKtvR#=vyZ(y5pE4C*Me@4m^G4Nbx6!J+hK*Zej$wDK z%_qxd>meKu!Az(eqbj`3l#VIo$%R#lia?_*lkIS(y(6ejiZVfpO>M|vzn=u#9WYJE z_D|FuU+Na>rpWDD9v@^|i>1m1Z);|nZ_a3p>1%3=@`m<*Qh#DI9?!Z&M=Stu;+b)k zXd+;`Ms_}PX^5dB3ia7mz{n!jhZe=M2IXQ17W?)Mg!KCZalLFVw#6K;XdJx8#Jv(m zPxAM3F1Y|sr}Mnhb;w>Ev@a2W-p^4Y)-lSL$&afT{LFdU6)`ObGU?~$$5>A*gJ zH-EOq(F`3SFdhX$X552xJi=4S^_#E?(L5OeI_h-HjEGgoFrww)6mvQmG92yOc|Gor<{Dt9d%USZtJtF zem1x_+8qDf-?da+ht5ruEqV*WofpbGjsr^N>8rm$LD#jJ2*5`@h%lXmi*(yE!NpPq z=A?4xIYsw7h1=;UlOmkuar4y-Hq%Wigouj-r8S;o+yOtVO_0yc8u--vIK$(^zaXoG zZ;AE$!H?E9r;p$(M}Ok;SQHJBC1}&@r#8>-v}cRTss@=5V^`7eP7 zCFIz?uJ>p|aoP4{LerNYv)(x%(+}Z>-Hm{%-i*Gpd^v9Pit@=XO!) zn&`mLF(74@p``nL`Tl7t_jXoWrLyIE#_RrmN0lrFTRhDPXq!bp?U}IZS?(ovZ={C) z_wH{E17CXe_e)SQIMG!K6HC(KqDL@rDT$LRITlOuq2g7@E9CTlc72hjYWP(9xa7Aq zy>y&xjpP*dn$NW#a2U=M6$3WgCypvTHHf+OXBq~(x_jHNu0zIe=YEG}vb8QXd zD!9uzxPlJO&kO@otSDg(biqDd1oYqriKASLg2-?ZEgM@Bzl6*@MM9bt(`k6GL`WyL z^_}oW*A*T}^nSu@z*3a7EiU|?6`v|`g-t5oA?>YktS7*;boJV?Dqe}&?#t+0nFds}8^#9B6V5bzj%OR`S!>{8#nsYsI&Wk^?@ zH1#%)qzk+K9Hc?VQemfJ8HLtVZzEk}y&JsC!`Zy%@xa^$rnI%2l-7fgBV|RETNC;u z(mL#jE>4Ew+>|!q7_3+<>gTyjbsRL833>`y?#~K)mT}jrGL#2Ew{)#485`wZyod!@ z*e`d!O`Xg*9QIG-GMqt$R+Vm?6RJ73Nv9;st9AY6{pOiyTQwYFIE7M*`vuLj(u#w5 z97IoF3#CxOA+6k$em1Gd6>O_8D*4HhesO5wSYQz<*g1AV-luP$HDNA9P^h-diq!Vky3WNg4MkPvgmhyM(7B# z)qaR*V*~4FlwcsLMr6%IzM4uOf(=R2?stezfy?K4Q&%`y+oC$lr=1LLL*xzryb~4i z)VbSh-R2v;gCrO{zmY7n-J7?xx-u~f9|mpCE7R@+>fmF3W(rYekNQJ#-5oX2(Lz(( zCs9`kr*?lO_81I8(I#(|#W=G$FBz3I)OqW=>9NR}K2hj%1{DQO9?J$k>le6=W|rO|7}Go&jc0Sm=dAMT`POHew&#p* zJqgTbWlucMBkvxut?bXQ6@%5~o$Vcc_Yd(u8P}Jt`;p{Z?n>VK2K|(4H7h*xCYvUl z%}0o@-y2*R%hqkhug9B=){eqrQyFoW&z)h=x!?0oTFT#(Ja!K6j?vw%yQjv_Ty~}V zd3&U=y{>OhtqggehCl5R6Sz-~-Xp4A@2$Y!r`I3W(*clW_>CkR{>ZjJd|RRXqT`jZ zr5O1RN9Li>I^;Kz*t)NU)^^+ZF-eb(_3!w{s}AqdfD-s`Xg< zJQU+r1LH#cRx>)0{pc5r!x%m0qafRi%)=+1S!AgbZOx-Dt!=*iT$9cq9amPpfnOnc zb=YLKY4`kEKPZ5b{-y8u8qgJaYrGcTH)Wi|>#q^U@+d&Y{pjL_#ixMcg(X3neeV_q zna&$>T{b^#vZ?OpIH;%2w^+v^qkyrK*y@t1=#q)ewJ|`E=;PH9gjocrh32p$v)XWZ zCB7%_vvl^H^(<6`&)-SlnXQn(o7rnQeF-GC^_w9n%7H>#O%h%$*uQ5Bwsid87PCIa9xO-=Dn>TGY#&Pq*TmwSzp_W`QU~%%2H!g()PBl1ObnMhzAYHVzsLVF{~$ zX#4Ul8|WO>vMQL^(;qfIw*Lh@_J|%6zU;c2!lo30Ga5GToE#9eo6CBz;>|8 zbz@mrgZ9&DTEP`DTuvLh)eo)A`)(ps_bYEu;mygn)Fk`M7H%hB7jBFn^}AAzbRdkt zlQm}whLg-j0aA!}-^@lGRuyg{44&chrVKpC$PO+*XNwW^!I{4@l&KbZCq1Fpo)oDE zYYxTv5!vW^7pCOL;44GI>2kqsxQV+irws3nsk!y{TC_JXyRq^Z-EP44hqYWron%~W zNlbU=!6&(omoA=mwdJ+i(Rfr9?d;aMaH>App=i7{!snDzan=e?LC|8cUz+9>kB@o~ zlmH#K0C_aKp{ac1t64_0(B-<9@G5`GjN?W6i}Tkk9Wz4vzK0m}>9;-2gLCO?2@ zs?{|yKf*ihXYJQpJXzIZPNY4;^u#`4-Inb9H1uqHY&~omxQZ;;KepU$28cV6TK4cd zIfPzAN!C@)BtsT3IxHaUbKQ{gPy#Fj=6aASMrU#ax-4KZ2Xtkho*z>RgFhMki(1_K zee9~*UrH#S12bB^192W0%P^iyXMLJ8-i~j2Gs@jgX#6Le{+k@=qRiMtRiE zT$!Fv(`XIammV16)+2%5(;=>hgJG-(tovav=@wHFX}I@We|e`$Q*2* zVjISv58k{RPR}i12i$QOKUhU1yoc~Q-#s0dZY75k!dv&o1kI{BtxwaWZ}29q*LU^m zjnH?dj~e^Zo4(K#R0KP@<>i?r!ML! ze1GH{q|w1T;*Tu}fVcUoHy>}4A%A^$4(hoMkn(wWsa`O&jvsMe0P*8Zfepg(CQfFo zO4@e>9x%ha@E;YY@5<$HpN`|>jX)OP4`Ki#x!38m?1azg8Pj_O?{Wj8w3o3(75`Y48IzfwAyY=mC(4%^t^drq5^&IW`X}I2`Q|Q|$ zUj??RxRuMeFJgK&ZWC8rUMuayU+o&6V}1_bZArX8;A+m%mtW_<#@8@^{l;k7z2~=h zzHErlMZ*yv2|7=i&4_3=)3Rdqz};q=9!h?Mb{q6X!RJE?9CkMr!y*<#5<*hfHy8D2 zAyYI)ZlDN1q^}OZd)G*XYI&)Lpoxc|qZW?lq%17je{6NmYx2$OGFi~?pq0Zey63$g zUmu~d>$e*`T3UPCdd3hfU<{Mh+|+Bh)u*uCffi`&~G0IpVT!r z@k;itDNh^BXzsc_#xgOKlA@Y0=WuV_IuqAPRDYZG;C>6FXa;QOXRBq+m_+iNlr>Ip z-LtzxVdLlFC!v5be^4QtHyPZG<33VIhIU8}*tm1^a}{s<#qAJtMrukK@0sm3=)CU+ z4AaMeUbVgdBJ*9wjc8ew{%!oc_w zKZMBrnOM20*qEjG69+;@ef0ecSfauc((y`S+!nMCgIqVRJf7E%mQAXy6qtE?U^1Iu zpKl38@8n5MNc5)gLv}!k{E-QDqvEhBU6f!-a>4Y8$b}N|!D*z4!?weiORD6m!VIv2 z>0y!*lsi<_9h5m$2#m425MB2lbwlU8E0#ZheYS16 zVw+w+IzAd1W4ce4{TihIxeq>w&44y|;04Aa?d}ubPZrfrr#nLrv8`Q`0~%fB73KCN z^}{Tm2WszpB5d^GSk)PJAf6u)ZauGnoXYLXTf*JGAg}fuTHl}qDSrqmSv-9O!{k#* zIYJM7a-C$t@Vc(c!?GX~xJH!Qs96x1h|a_qZmZYkT0 zbJx}C7|G&?gBUdB=FzRaD(~j`>M)WT2tU%xX!uMiHk<}R;ei;n;x=N6H(l0QDaR&4 zym`-wO;I}ftg*frQ{bRLlz8qio~hdJ%@4P-3Tb-%Hv1K2;kBR^Gf@X6n+kDezoUbn z4OkA0wDQlkuu=0_MZB9x8$=#x02Br$AKN-lyBqHAmvhFG<4 zaM5!>pg$FDWKpTgXm18AJ5!VK)xw!l@J4MuMJ)F%tC~xgTd;JJ+!y3y$xA1#)CuOi zv2w?~OHZ!mmQ1f$WV73l06N{-Zr{(C{=x$hE#8uw5eo$>6 zXf&XMoQ-=<_s7e6kXCcsWFi^=LS!Ee#MflPXHO3fSkSiQc3_C+Q-x2XN+6O?f}1!y zd`Ks(ec#g-irreH_=vi$InHar&5_;?J8=8W5NJL?iHNmySchMS!D4KRWXU#u%h*cZ zOhQXUO2ukpvz6EY&JNTCunaAtW)P!#%q2b_eAV;qO2~8JW{RCfQOW})QDM-^W+%2! zB%-)E5UY$id=RsTI>tnVtN*wGLK-6=B?2quXlIlx=WWhGm}E$Jw`HAmP{L>!WN^WH zALb}1JrMe&tZGswQ!JX_rekW!(Chpoq6h+CnHjhnZ?x;vMF;EUfDvbvn;PS5$F(({=qK?+?}s-IoiW6BMsEwq}EStBaZpqxU%Ouwom1^t#ow*O8jXN6SPb zM^vHyum$Y-&$O61k(Zj;T3fpos&H(dVZbC|M=i2$O4v4W(cHf2-#UKyneVIp}?)i+Q5x zq6rj~ILe&!MW0gd7c(3kIoj>rH)r_k`Zc7=hj;DSnR{^vf-AH>t}>PLLs5kAen992 z`AQ6^g7)MTKNOfuc2JZs-GM#FPNMK9`cM{KZVpqbQ)jW<7%D5=r0$`UY*Rym;1R(k zY+6mZ6{V4n=-`LUY=L}5fImFpDEa{zef*FK&e;|4@=Wc zqkK=tPS?bXWd(y%WJ#@+@(3E{{?5R+GNhjm`e4kaa10`K9FWl+Gv^R) zug-eIy87#c8+hSXrLuvV%{y~FLY?@kvW=o&87{LIPF&Hnoz{gH%9_aQK>1yEv^%oo z(omjwr7@hpF9(kI2g>e}8Z{}qQ4n*q54GMF- zYbSM9j1RMYk^H+uMdQD2CFdT3qbIDL*btFlPSDfTdB=k1QfKynM4La1#z^8D0Gq&) zWeKrG3;evZrzhY0A_tl_JGfbh6gRykA0sc9u#C2Z&c5fm4_02Mq1WE$lIE#+SvA99 znR9CREV6I$w6v8X=d^h4uy!1}t9ZZ@T*DcSKzYbe5h0_pFp7APA!6r^xbUow)5tFH znY_JKx`fDI-d8L0WgL#M_lc}BF+*>Jd)s!NC1vbmlZhcFe4NG}tL}~XN6r%#0wn4n zVSVu(_Rm`iHb^tU0#s9^<}z~apw1=2o(AL~dJuUq z)jSCFepx%P5UqOTKNV957%phAV;hKNSUM4K9k06Z2D_dlSup=vp#a(eB-LCvB_!>r zHa)S5vTQ4zSIs=bdv0ZF$u{)0r^RhK+Rg>99zlr_zQiyB;IlMl(4&T>XOf++yd7cc zZgpD7y@m^kDgt&nH?x5r&M{cTb!l%H|8#K;%u?8KKqV#ZHOaHhIYp*sl4InqH0%bGHDiZ=YR+8`DFkqILu4D1T_g==*g<> zLzD}DF5abo0~yf|j4&QuShrZIDs$zPu-jI$KKjBPI>-yl)ca2!CJPu|vDYJ)oWm=x zGh5O5)Mbgxqg1Ymp?uW5VLh4N9QVK2cI~S@7 zhqW!}YfP0)c2hY?DBS1oFFgmtRI_^xc>S;2uerVLJ?ne>FTB~>bXo-lECrYD0-@w0 z3Zb5}xb)dqE^f1^gr<=gvMC-4+`NP$Blv=TYpG6NH)Pgddj~QN)pv&|Ek_qQrwT>AqTrz2M*PPJ^drOw?Q?hPO-62=I==YCYE$1lG7 zY(!(09FkW(szV`=Y3sPxbPd^%4*Hewl!-8RUwTt`TDt3jp&M>IkGcO#_AV{w7?{tr z{l}#FYEqziuY5laRDWJL~yK!!HcEY(jmaPxf+B(O2H67JRhi%S5goKB--={WgA!E^^T=qqxPDcbFg zpo;)wsDNk~M!%m4&6g^wf(eb@GiwfJ0EFl65wsKk;NGDb3^i=JbkA|JlzxXydWfIKF9!6CB>IF0#RVd=KhwELPn!qyL!Oa_)} zPUU4(#?0M>=395Tfe{K8e`5LqlLB}5N~Fj}V$Pe*ZBrmJ?iO=dE;tu;q~=Gth2afq zJ|9PhcKaz}KRn%UM^Ex(yfe7jm858j2Ax77OYT*BP%*y?SRpwlF&_UHPQf zd|z=A@s4VbA*TsLH{8jK z-jK+$&VOWRR4=u^xW06^X*(s1LU)cpF%U-*7T!Ts%#odJFug7+lH3vU&uI$In6tI< zZROL6O0Hp;lv!5DcHCO^rbs~(UFoseK@fl{mGy3D zjdwL)NQ9cWGJl_L!N>39cI@{Nf|W7c`+g{uDWTdlI=UabJP$n5V!;=6{A|d;aL^1Q z0g*M~nhcUHNT0_IRD<$!5U(oF_@!;dre`;z+I*FS5hdiPI|SsZ434*r@qRMmgQ7#q zSkN^dGUfo9x`At0QAH@+sps{lR1Ds&7YUo2=`-{}-7*4B+u0(6lndB=Cx>;(!hE<~ zOU|FTsd;a6=WzFkj1;Q~L$_&;%D3RovF<)yg;kQ34bg&v2%L4jOxk4hNElL18XotZ zT-AeI!`DvIm&Y$<&nBY9B0F}*=c8_B)mjICh!N72 z*TagQO}MZ$rbg^W<(K1K{1NP)rDWAzR{=`MpXx2(Na63J!GsMNz^51O0N<~6e3SEZ zn67OG2<~KsQ4<^gEa}MFH+qrduv1H6gxk+0lMVF?R?VD_bPO~VN9yJj<{DC%-SF`% z0P2_PQWj~$@#;KWzuKHGXSG@8XfQI;YVdeAF~JR=TZ`7<^ZlBEX`=5;;}=X5gh|xs z^O^Y4MN>?ND4CxOOE?c^!0xL1`g+(bv$p>@@ofUd{k{RYEw^~c3AXL@^}ZT57yAC? z`St(e>>8UZVY+Q>%*4sWnb?}xwr$(CZ96CSBoo`VZJgLnPVhce_aEFZ-Mc^T?&@9D ztM^*V=fAsSioJ*OT;fgEhlne+#FY##M2rAIG5eq^U8IC7Vy56D|F+JSJ^E;p&=-|W zqtQSS#M3aj6F{_^uIHGb>5SSu$2jFTF7f7~M2#off{PigM~%8Z2sC@Xzt;u{?iP$C z-p>Q4pO5duo}q#CpMN%1)A&xmwO*|{>z1OEQWZaGL<=cJW>c@>yH&lJT-zRBdQfG6 z`~LXF!7E~J(5)cZ1x;i%L@Ny71yo00odYV}Yo%UIU$JYMZzRv(v#Na1k z%HlCR%svbAaR`7eWsq8N2aaq^i zoVK?tvqAh#bszdbaHX(Ntj>~_rq33-)KSqhrf34lWJVpn%%=s7vr$T;7&D-RrnGge z`(XFn-dC7mFVVdl64v6??}t|-ZiHyyw|fn;9=ee$K032GuJ_ZHqiS{;eifeu=aj|z zx?im=n{=c2v~iZ|&!6O$tr%X=o5W^eKkIp2E^WBL9HPn;ds~SBq3JVXc}^%@|NISE z7xjDKrs=EuwQ4Nnf`!OF04G$zXv_FJEKF>^>hx`%JS+!cm?WP&K<}0GTWsIFeSPQD z_SpbWpUS`Q=a_dxPMg@0whj6P0@|j)m4j>!W(LQ>&s#AGgns2 z!eh5!SdjkNKor8&1KoOPlF2yq7q3eLdqxb`!*Xr?H+8j+hWb8k$PKd1!)9l_?Q zMf(G%Otb&`e$SQ&#*~xavfi^^3xN*b4~^tnB9_8T!CMvEq;-rP8U-q^)SoLl*{AFN zE`CXKO3Wjjq|N-w%|%z)$+{kGfYUC}%9+D@gy#;&IC9;X%lac;c!nj!%|ripNb52Y z*MK3K$>y_dSnb=s#=J?@b`m2P4fZ)?e=N}2nu&eQaN}CAtOhTZ6iFNfn~4;Tl@y2I ziP`yqdEtTC|AdyGZeO@kpENl{j7ilyYp4W$c>4{>L-9F5mFUt5EE8&hR?XDPf1XqB z1g01~o`FwZ5ebPS7sxO8b-kyD6?V<8n(JaqQSYNPv zt!tFw9Te89uLiG#<60@anX_u-ALXNG$Qr}ACg>bwTm$xb20HUmMF3v>5B|Sibxv7F zbUwnuvY5;yO$JhVr2M4nyXykrD(e$S$S%pci;J-iIO7RmlRRe*oHWkQ64%d2#0!SO$8)wRl zoCYXt^wCs>c2VgQhLOLzQ=dYk^T2|`(+1%)23k{j@ZvDJ5}>_{(2rjRt(7p?GE;H1 z^=>|O(h;QzrMW{Y?>LlB%CXZ#6fY&2Ty;3pOk%_>4vetZWTh`-0@|+2r^6|f~TX$ZEZHT(u5~WL+iIa@`tb7Ft5^#cqF`k0dmg(4N zRcu|_#0JtP@N+I%SHoCU3D_x=D~P2h(Oy6UQa4ddySse9mjt}?J4t-?U4wnREsb5v ze4ISZT}#^*Jc4uK&c_n+o&^oN03F$!0ZZ*tyG7`FCuB-$!k{r$*L2NXzux|E=?Hkg z1lTpN@*Y6Q8z33hvT{WmgcXS_pzu zAz&i>>{0xSCME0G#TJ*~bU0Z_SKltKnd?Fh<;M;CwbGw!in_>#pBC+=u3NL|Ow+hn zTwDllf{>J%3MU5Zw$ffW5)SRbEEy$SD@@1j6%29@}j5pUOkE~9E<8;>3TxL_sSvTWlqA4 z7KiOuit4dvApS;Zs}fiSE^}RQUg3mdZ{kmFwD+v%#80bE{FaQ3O%afYZ)DLj9JV_4 zn7h1-xY{KdZe7HQXo>x^vBS)u<0xj0(vjB=5S~HnK5tZZe#joBQQz zCz#_yWotIIWmVLoT9>`WR5{S8aa&#vhtB}V-!@Bc)7!wPK~$n+!megFrKho&ZPaQq z9fr=QN8L=X6AMa5gTbkoLgx?K;$d_2F3gtH7t&3*x(bX?1R7f_2eF2gKXX2Xb|oHF zOO`nfJe6OIC#N|zR9qZh_TysYIW#S*l91?-ljpSJ8{!n=sdDnN3_8Tq(o-}xm6p}7 z1xJ%ZyD7WrZ4mzRt5WW+sp5QS=)>OlPbR@_$X5~)eSw$1S8+Rj#BneG6jPFW0{utD z?9QMMu=p$SzC&e%NU74lmjt!~DPBN6hqXs*C^?pjFcJqD`wP~)%_-37Fc3gc9CM1x zQDzb4x_dMN&PoykPWiXpe<5?0%Ay^;Tv3UHtV~#3HY_$1A;!Rs(B1!XitA0-qT2=E z60c@w+_O1lTFD}FA-XY=e;c<{!JTF2(zI(GcS4m;LGd{k*T+1&B;mz5#m8RVPD%(-L{^$lY8fwNI3sA_HYfGs-7lzsCC-u?2iE-Jq4^w2yY<+x0Sf zcl-ICU18>JF&z-_*_X2VccbMqJ8cth5M3G4T;bP}KVmKD)<4#vyZ7pG;0CqMH=Xu- z`%7c@9ca^A(&)}7yp=)c?l`M|6hRF{RBZ%KP2Ec0vH!w(0pN9e`_rjX8x~|>)eEs|V(8TkH@eWu zPU?AO{>(*Zps>YctXt-s-ovy67^BL>|C)p#Fk6PK-uxW>FpUa`>D>lAJoq1&iCme9 zoSTK*$Yreyb2pvi_^pcKSG=GPH4cS$A>NeyG~5bqMf39kBIU7Nfr;Yy5Kmp=T>N6^ zU(<22nby?m`I~iUfgaf0G1>%PQJ!!IA90$fjG`1{*olM>p#-~aom%MNP{ASx*}Hop z-LW;P%;^V&7W}BHu&#W$1We8P1e^ zt0Ep*VG;b}P0Yu72hnH4`A=q`AvkYiqUKUCmlv0MbWN(Qmx5z!skDdABx2`ZVA!@e zIHDtyNSsOOm~_;vPnLp?zB&32xsEIn~ zK@C8~B}T6*7?9yPKus!F$MXKKTt6X+Dzb}4ni~8r=Z)T%rx)-Yiv991>KXjX836U{ zxx@AZ=jZJI>hSxJ9% zXRi?ZPJ&(9;<~MwCvLB;1A~Qvx2EK_<5&Dv=l{|fU;qj~SUrrjXg;>5Qz8q(e^tHsPnE!a zt+K%07mc(+G$(agX{s(Qiu(E0;ia5!${H0r_3E7lA}Uo~RPV_ydP1~;)Hm8UJ!q?n z#t~mZR7n{F*BwJkUaCkVSv3AcCHNc@N-b=BWDg&k1x*7eXT~+!1`!WqFvyq{_<&Xv1^B0 zeGaNFPv}4LG7stTSzG(~qiw#_K9h@xRTnH}J=7G8z=N7QiT_ja<;t&6zjA|-t98un zMyY*vt#|&4976~sV_G>pBp(NA=k%koI)*cH%9&*3rE$NO-<&ZZm39=DusiRTY|}YI z7d0ujxb~~n-7T!FYWY|UDr)&c#Wgx#y4%A7P724c0klH7F?e1{w=~DHf^rXIh+SCi zXY?vk$;ixPh!#!83eG%KPpufsm+tubJYYCP|QgvncDmH_aJ4$Y)i)W}ER zV~(V)GK5~5xtWO>xx+Wvw*%Yjo=)s0?ZOIz0g>w89CmYHBy=c!L7(;q(f@^ljVywf zSbP7#%Aq~!uJl^LTA|zM`b?;UI-uL=`^>=IX_GWs!0=|+@ob;ZF#=Krsy@hXwRAl- zIDb-tbLFcBC$$YYjccktDmCj27_;CnER#`pQ8$x|DHHqPSIRM59%MaGjNx<8;SJ<+ z=yNABJg-o5XEF@va>_ooY2pu=kvBd&AMKfwVWs+%3bGg@m;DiQ4CNzUcnE~^5e()2 zsrLx}vy4nUvyM?X3sXg%49`z>2%%l{6kYmfrF6q4C2pe(i+P6ZJ{g?KCD$pqw zi1`6KX_Acb_JDV0HR;Z0@0n$7om?__6g*rh-{riDN?v_dZeAwIc;n`Bqju`rn*)?> zVh~%ZftD)^>M0(=Jng2MFZANGsymK0Y~8k?u1z1^@2%u|^kbycpzG5o8h+F^e}7(a zY(?>O-j4<$)CC+SS-6vz13Hh&3E~Rtx@!tfNsLr2jx4!UnmUrSA#9eXfzb+eqHD6fr?(jf+?RPs!U9Lx!+rffC<|7k*vlEth5tYTs@O{UPR6-8ZY$4gpzJL5QtLZ8vvVn54XcW{L@EO#+Y8t0n zgnmDMK(M4pC^?Ld-i|taEd&02Z}e{8JGI!{%sgGVUu$>zQ{L?*Z)%u+FZi80a2(*Y z^gbQy^n#Xi90W~PEwFFl3lKP>_kH4k**g*f5=6xhPj}Cp)Ug>>9@DelX$C@+NkTROmE4+02c6^^INi}{rqi~`rMNx!8qJ(g5A;oTu zAF2YhaNY+ILW=2z;79;yAw&segoDPS0zRQRB){3^tc?%hM27!7_96uM(s^$|U7_6} z(4LpCY52JrQweMdWay^P{Zxwnz~g@pt&S$e}Q+Fk>i6gs4RYldIY3;SU3x4l+^U zIxy(hKtiq9iQ9!mw~iXMQHehH5pHwZSr4zzWjgm#9^GY}mppw(FL3I}_uo5HM@Bo9 zv-JP9{Dem&|0KGyp+!1$bQ-&bs)n7Lw9!QRZJF+>9`TWiCgS96MkQZ|z^ubNR3!UI z75mBW44>@0du2ozd*IwQe{Fe{SUHPB?q@93uao3{X1`tRVw}WBC?s8p(JxFyWvss@$C><7 zHMPmcISa49lsuZo{1uL739;hXq$e~4Q&x>(wno1j@5#7$Nn_2o?4U-&F1AI^Q=po;6on}?*Znqw+hWPn4F_x`ylhd)+ ze1X!NFdN07&290@HEw9#ce-pMp{J*v*}k{o)gQG;i6SkI$ZPGi>{VT+u<3i}lG=)O z(nAIufTRjUJoDj@W1U~QP=nLD5G$o1)()J|CrB#WsrnrG4YnbSV>DfBjPmA2eyA#B z?);}*KQ#K^CmnNHwz2FWmlR=PtYuQVq}>8VltjUYZPG^R=fY1Z(@DvP3#kNFV>3n> zGb!3KaRBT+t*ipeMzJ*}X7iF*#%ir>snUi7RuM5_o5?k5jnA{a-QsA#qbEU3v&X7Z z;c-=!@r^?hM!sraZ0jNuz~+a~EV)jE;Nz{BTZW!~z4>(Qb|+uf!rIF8d%FuEd87UC z0&soP{;H?5kW{vtv9DU7@Ou|&w+TDUifIG8g2E{Qz`)9D9=i1(tpgT&lCU?N&;J)3 z6ZRge+8aG+*nyhpix4ynK!B+z{AsQ(8@SF0&Vg9}bc6(&ETgm6o3*0a-h}V7s8lvr0;Gk0P5*M8_shcQ$XN?Y&P6ml zx;@dxo#H5T%;+*5nTOj=$KUS?_>}84L2Cs+FfzQdyFWNeX-I4jWp)-TVZ=6f5mQCE zQ$NpVdB)>)TrM*E%OK``fVR+3xRni&VwmvJbcUoDjqFcpS*WOIM1&AkCEQR~c1Oc_ z;KigGf2GB;x7Dfxw{_?jgZo50`ms)0X`W`N>P6XU!h)%@q?M}h-UhkRA#3y&@%oW|pc`Zr_DbNq`J39o_+-!z~H zbFTV}ogflmx%zWC0+~{WCN$Hupsm17R;5(as9E#}+plTpPbSE=pxwf$LZqsaNj)n6 zSfQ%llhk5QG||zV!pg_zm_lkoJf<>AqEc#{p<&X}8ZHqcI&wO52K#G^r{7-Yo?mnInk{-MgUfDD9u#MN6(6!*PP8wCc-3$O z0No!a5-MT+Qye~x8n<_iMUBR{m%w#TpqcMV3_C_#z9?9iwi(e)r8xYfvO)9Mf z4=(WLiScGS&fWM;201Z;t`t^c%)Rk=i`LyWNLlZCI2DU%-q$Ya^4GT z$ox6dmri;$_bRoHTV2bPe-PXP_z>7Rd$7d>F_RPwd9A+>-dQc$$-}|#%3;dilu=qN z_H1VX<-NIG;QJp?fxRKj%x_N&3kQmAzMkrEpY#ZyT>G$%#)7{5#q%fpl35`%HYue#w{^KqGSX(gxOls`$yJk4ze{ozcr*>%A?345l>K04g$i3uJT!h?d%RiZp z@?u+X?Oi2eG5Hx?4NtZ^^?)7ftz17pN{5JCA~ZsMJf|4PEv9F!C^;u;{9S-DnO$`Y0>mi5%H9TD~g%O@TC?a|GDb-Wf@gR$1TLO$wAM}(LwC3 zvxNO*b^31$-+iz|U{_t=yRlcY=49)E`YsR@Q4&EX`|P zOvyjj#dw0~j*9h&FcJa&acFS@`;O6EhL7_k7V;!N2bmA+n=Wc*n*Tk-G2SwI;)25y zCBC<1Q1g-(No6y&CZ~Z}6N3tERujaux-LO-(HytLs2l3vrNCeK8=zceho?!oyYRT- z9kNS~8e#9&EC+dHq_McsxXHiO|cZ zt(HZ1X`{7YqXhE~8~OZVTO%u?W;@3i(-oo*DN36}a_x&1kD}TbE&sS&!KHftdew zt~Peg)D+w+tS%8c!1&>0>Q>@c#|6AtyY(3PsQGuW%D%rGMhImR0Ssl-)u-^%>FHu~ zaN#Tq?K~OV9$I?Mjjr4;-B5n`n!CS6nMLu7xKm#HCGS=_rgpefV`y)h=p6m+4P@xm zBFw2-{`K@lT?k4w>oNP9E+W4z3eY&sNa9%H+cALNjrHA&Y4=+qD$r!hB?$E&bI6tq zTfXg?M77L zj~&QSdH3vA>Y03NQmv5Gqv}WyF-wSIcx``>L*imG(T(^0!F3zud!i}PnuzMdB|E#D zKr-Xc*|nBBWp5yK#8Eya9ntzHyaAEsc8Z+oxk5)DCYI>W7^q%^(2BFla6~TdK z*AFDB)vf!P?$c~L+{Rx(n{X@Q8KHhv8k&}~as54*06GRJcI`j45K*;$f5oY6@S~MV zZ}uyFR>9(1BR2IE#r}2R$&mQ8x3j1W0H*{-b7d-6Y{C{?qMtkS6W19{uNUzo(zIrT z17Y&IU~Gw=jh)ID$yzoDZ7fCvEzId|$p@+orf1$wMr= z*59m2cJqIu1VCS@ue@t`T$%sw5%pHC&|XeXtoE~S<&4;K7xVpmy6IQGE*}ydIe!G% zMsR1QM2^Oo^gz+=gN1t3?`GVB{G9wBr|MjwQi;Z~fp41+V*W7|-rTVrlH}S$p{czz z>CsVV^;gV4ZIr4w!qTqUNM}b|P3=mfAwD4vAYb*2cOq1L)wB0#w|WRK93r z6Ru;>L=-CUx7oE1?ppQU8+S9@6w+P;@^FOg<@z$R6D9K9s9*#p1n-*3;bshfw z%f>T>*sE%*tIUiU!Zg<~j1j389Mo41q>ZTPL)t^~DnqGX&nW|SC0$HZg9t#}EI{eV zz?@F1_aqsnbeKadi*hX6sOP9y88!M4luMO|uW7AwVcT5S5QxvL=Z){?YGV2r?M4U~ zI5x>Fle%M`)jHri<*UcswrU@5ij09n-}M3KpP(5=Du4GLdiH=x3sKyz_^S7C;Oif( z{E|oX8YzgO`?I`KoL7@jT zTcIZ`x*fc} zk!2(;1n@Zgc3nxGTT^&ZmzLG{X|kC;>Il8E$Z5xFr@2}k)>mRAqCPHsyE^{o+qO(W zWIt1Vo2_>*Z>HDSZWHz?pTdAi3V!5%_QTQl$TYF&KT#-XROTtmZ zmqJJ}Fr*7uB-4b#1h$n30Ty&D*MC$+q5!!SNYIdJiv7xZF0kRX%CRZa(uInpRXg+; zNhyBA=+Eaxym?qweg^&;QrQ*fTN5i23ZxN!bAw-HuyqLYX&SmtVa{O>Dl=*g-na8N zBzkV!rz_(%zw_zOU1s1#V5%li^-`W64EWq-fUi4bo%yWogQ3}YrT)KX9xqebpUlr? zwNwDv0E$p!8u=&Gup<(0^WGAO~CokP877KFG zJgt)@3 zx%51jfjyyf2fdJIRMok;M(>1kmhRfYp?lNX_ax`4n(DX8A@!Yy1%%}~j{Iw3J!^vVYrmenAU$ep z{SJQWI2_v5_L%#vU;fYpGXXJz1MM1TfhMPfwWwMjj>WVDA#J5FpLW{`^Soc6!$tR@ z0JQ^76C@>Ad*}}1$geo+{<=M2^m7!5pHPC#wk*5AJNvsD^NjwfORzC0!brcgTcY|2 zp#$St;-`J5I9uo^@szS3GJOarq%DACix4Ic0z}57AkyX^zizW7#U%?)aNYfL_DbH% zr*d||@NfS+jBntHA0n!t2lo`cx2jU5xfO++49hh~f^5kJGt$yaCN}2^ewB_%>$S<2OWc{NZ zv&gd`qqpF33>OVquHuuVU50dJxz;OZ6yq{4>QR8`DR90Ma4pi8jp;LBkNo!DGa7m; zJc`I-13!C#CMfVD_BzGdLn&$A{YzQJLV4~+Q~_h^M5rst?-J-UOO}r> z2#XNd(+Ar}V~FC2poo}CYA4)Lb)<2N^iXz$dK-QMy+m?P;kCjrIM~2<#s3hAj$OVe z7Z82Sc{<|c9AK0$roYnZcu+KM;e6Jts@IZW1l7 zQDufjJ1||jUSGwJwPpY^?fRLkrg;SloJ|cXTA*=$h$^%vN6%03j1ISXdB!4%pC?G4 z5M!xnY}z>r#mhu<&>Q8d#m{9u`TmX~UVE)lzOoiMJeFV^&iI!v<@07pCJD|_7O)fT zSWoQ{gu;3Hg;zhfoB5(g8&}@PL?+~Cwd)nL6AI20-Pc|+S-5vWE!W>jL;ADYV^hs5 z06rf&L|}UaDD((zAEpgYtQZ^$*_I(%0s&l}jKSP4wfLg|%N$@Aad?$+{Ht!V@}K_QR4+8|3Odz`$L8n~wR#R~{Q9U_82P>D-lb>8&IuiSMob0T${H^nOLi}`7l zLU`r6AZtrq7i9*uwc865dYW!5iqAFq`S*uu66%-w1h?codNL#DC4Pl!1>&JFOJAeK z0P5&VBvMLIrV3HQL`UL&VGI+hiypB&U(8JU`_O390BaIOF)E)i+X7i8Dcbw6_0S#8 zfZS0Rv2HDXU4ZCGLBKY`x+|?Iiqsdtnm~EAh<7(k7j{W}XQh2bgnWwZJ%Knv3-J1LyDV)U&JSf9zLkeBF-7vly=^=6YZKMa1Ac&#pj}SV2Y!po z7W0CohRiX5Ss7#XS1F+&3u;X{vELJ!waB_EHyzn1U}7J#|ojY!pEwW)O=uZ>uP;V@@)j_dljVPhlOn;YKw+tjy^b=FkMGG7fA9a zme+av-(Wne(dmuPW0AjIcfPa={IJ1KCf+|0&DMEE zNzCe=uugxQF^Vl!_l1C6QVtM$TrApAw(ZhxFK(cA3G1GmU7tNay7T4h*%qAT?cWRW z2yBTA;%H-jK%Tj~P$HY0_V#rf;9ITcbE1?f6S;An!1JzMNOtnadoznF9I!dhX3fEYwa zF~#MzvQK?1xo#q8lq7;8um^=BcE_$eht_;4)1Iy)jKRa7?z{$F^sM48ps?9$h?SUCb_ivOIH|Z1iq^*^Q0` z7|UMW7<%Ky1FsKEVNGz(Ozv8T??$czyy*l+aJId2zcj9{Id2AdV&7R~Q=8M0YBLB6 zVev=(dHG&8Hx{4b9qbeqWpsVGX33lie4g z?EY`36c{FaN|^ zMj#u(ZJX;4J@5~xXQoS|v4c>;ur2v{^e#fwbI2#59>8+_h(+`Hs+52UaO2(= z51A_r*k@%K>A`V9+<(iw|QmgBaDaM_5)bc^i?DU+#ASWm8VPVE7 zN(qnDHT?9vC&3Hrd*OLGo&_@3el7XKJ4S+rw?XunTkOom*v-7u23r+)6`K#hnzXLB zf@rKed${YmHeYI%6Qm>8!~Xd11=@QpSBh*}%(>U9@|}1at`#<}^Hiu?_PS2ibySTI zJ)z-w)F!L_$|hSrYtaoNtJlhvyKdA~eihjwen|5@OE;onl{uk!(=fIdIn(M!L(>4? zuY2*mwDH<2iFL$aoBz1HZc(AQaVaeGrQN0>A>_LBp-BgGFB*&nq|Xqe|}FfR4!BgwXodCNSX_}wvcb9bX?9ptjS;jwR^7~ewlZduzkTFgV@(GX2P1fbf_{Rd&ulTeCllF`f$ZPWNa9@1d9m2Cf zpM<(`13T_LeGvV<(|>)RStxBUAKAXS|IL7`eAF?@DU<)o#?~YC+%EUwr$j+{NHA!_ zABFW8d&PG2OJ{V~5A3*i*z_Z@u>d+|~)P+;S1yh-cX9{LKL&SS!(I>kOE@1@0u=c5(4 zX_mU5&23oOw+;t@`g~iei8OW@ z-Xu=0`DXr1`MAe>P1CJCHR5L8n)-%rtVjC14z@6YS9H0C;~&z?RPvX@%hb@C=$rz@ z_-7Z_2LHWfZvp-+ESp|uto4%IT#SU!4k>awV@R8pedA0aLm?|zy9V`3sOQa5XVY4% z(mCVLS1HQ?VgH-*tIWpj^DJS6feUeiwcZ4$&!6wLCqAxuUetE00%H0vbGES>QhwZO zp7sUv*IVknsa^SCj}Q^rlc&OmP;GUEq*n>4H+o3j*X(!++`kUWwtGAr6d|q?HZN$C zzXR(g0vFBllMGIc@QzpDU$z2IbeG0uvjD8X{Kb?Kqj>rJPn^zC#mfD#Yc=^+??$WA zFX)Lv5e51b(0yZG$IU26VZ`I~_pC9E)5-NasNw|v$c6f5wEY5nZkuqPA^F`{_S%N) z0{m(zF63ns?+k^XHmP>C&0Dooe!u2tQyo_u5FB{d*!_WWRpPK_gTPY&FjvC8IUHwC zay>tix<0%DW8`|Tsm~k1qd7ff)9DR6=d~!%KTx&cm0&i(Fl+QT$5F29OdYkH6*?_) zQw?t6nm)T(1>4FH=koLA-!g#BZvc_KFuG^>q1*qRBzTbtgzg)D_Uo~rZ?1`2)XM5O=c}Xf+@GW!6xJ~|SGP%irvp8}(^#kqC6X8$LRsd=K+Bc9L<%9249 zcsW>jz}H^zXE@<(df>`JvDS*`dz4$f&Sh`a%^(DR1a(ua3O$h>E96z$qAL^dr9ZvY zI|Bf-$G*rLvyR(fgq-*(IKKINmUGO>OtI||gYypw@SpafkW;4IHNDJv204WB=OJr#Vj_V4mt-_|@|)mQg7G%u^bZq{5@Zw6qa&%4q?+;h@v z4zgFeIr1>UX~n8tpSEE}h!5^5ZM(0!R35Z{Hu)@VO9WRi>h;i8>i$~Z14jqS^vqZb z2psTPFZ3i)O=OZj9U#uwnSazyp!4z$2883bu>DUfqpsY8m+v4qMXtfNzislf3-C-g z&98gXcz*fT)QoUg0{c~*|Hh`=W%{}1LSQ;jz7mr!HU|w>;+7JUhoZjG0cf7#f~}l< z=(9N;pXu9bN$mGe)Q7yyEOuxl^eK&N5m>(_cyl(Zdq+(@`~e($D5d7=C9Knn<@y{> zE%uKE&0%}?O7Q{0A2JJ3!tIAG?0YV9Gl0zztwMHCN{olg%X#k{fVzDb_CVg0!K+8! z5wPd)E~J~88yVpH0{++?2Q~HV*IqvFVhHDJ4f=%`jX6fX(_T#D-V5D@#yQK}Jjxfi zZup0MQOvNMYZ3RahyB%p8`J(z1Vh59^#Y>;LeC#<+49NP5q*>f0|Eus|Gv?dLfX4w z?P33WHiXQY95jAjB^lYCvZ=gf$=Z}@$Gz?;7?za?7b`a6H^zfF$hO>l zzGf~EE)8~EK0SFpJoyWgde+&L6Ch=3{^swmr~}EWmdcbd?C_vq3jZ279i!GrbR!hB z)I=EVk}u3cw}FmOo<2euo^z!sy|oL8x}f>jB+~L3Pr(&1)6Nf>Zyu zhVJR^2rn=_t~SH?!y3AJ3zRHH4jz6nVA{OxgQt2F;U^qvd51N(Hn)gV%qnd9$(^^0 zBX39g^P=Hg<%06owqc z)ugLtixEkbsitTk32I$87l!}MQ>F;B%_>DZurcxqzgtb$fVQrs?;Wz&fr)C}9~}nY zo9)sVe6ZpwKYsY3Fj0Rdqa!b^HFlB6GF|8v?%_jI-xCg0=Olj@a^h3WL$;*X?S@~d-MXo zIdR%@HP#(VUJq{e**X9$4SaLl4yw??Sf zm}{r+g5!SmJ5l|-F#{=#OCO{@1m zKC8MfdCX*cT@+n`GTRqi=~MNW$_aX2Ym_BSQ^JD)SvFVPDcWluV($afj$Tq;e*lDr zFt%0O)X0Y>46dmo6axtL@D7P(j5y@~GrR_%Aq31v^A}5dM2);_B!A!q5fM2t%BvjY4qqoO+l)IfnMXa3ywLk$;k$ND!JftLU_ZMYnzDv zfu$w4r9&PDp7m|_AqZxCt(g)2gIh!KrMW3ts-1p865Q$hv&%3ZE_3{xvGQPd(d1I< zHhAQnlkb!+XEU<4BVXU0$2RAFtz);SPr1gXzSWyl$+#9UddpyqjnRO};?GqZ{mK=F z8J66nJY7T~SkZBqtZeejJN?n1UT;}aDXeNvR$7}!wUi6zN??nUt{z~ViV^S=u;wvD z`Ac~Ii#9v~^JanDkf$KpV(q*Amm1>}_2yrv|aE*1`(9 zc4tq4`kZXApxjIGiOxzm9MmZ=23Xh3=YggSyr3IM+8_!r-EbQyDzwf(0|=i=qC|)h z$TH8rxZ_Qc8aXmmDTAGn8bgRnnCxE-o=OV_(I@17QXj$#c?DqwokI`PVN!V4qkh7f z|D9wL{tMs1Ix?S$l#QpVt;9CD6a9Z#I`z5k*$ZNCioG0XjpLB6i+9Qf&ziqWE{s~G zNma@uS0*p^1&aAna{*N|9krC2ivGvgI|o_PbPK=F^t5f;wr$(C?P+V;wr$&-wr$(i zZui{o=e_5ACvKeh;{K6YnX6W=wX#;$j@Y|u=TEp$GIB2t9gMU)al;u*+KZy? z$|Lnzk@3m$yS_*Ku3b34I5vP6)+=`hO#A%;uwh+3ENp>Fz`pgC0L=4 zWx+ho=*4qtPl7QoL9?$x(`s>+)qY5=Dc&eAYyvWsMjGFOc~zRbt|>l_Ven|Oh5j7R zKu^zd`2~Y|z*;|G6S~791Ugg!760V&`|txfx!J_Q=+V;c|627bf1LFZYVl@FTme4= zjyN>5$(U5Hj6JZ|2b`YIqkC-gwaW41@hBo9Rj;Si`hvz<1uNvC^JsNK$x&GfXe90D zw}1bA_KfZ9@7kkvU_;3z=)Rum$bds7)=D&)Bw3H|Z%BMAmH)X+fufv`{5xA3W{x-X zPNw)Lg`*`g**MTzf``;jmt=I#UeWHy|_oSJ2DQBy%nt!jbwvNhI{j@77eQ8xjXv{Jr^7dF-qlWy~Vt{Psc)jH-aQ&3u=O z$b7N20m>JQO=`R;`8EZy$)qG)b4##f{=uC$+^>y>M|MC63RX z4PUQ!bxv7wjDA{ z83_|@>kVP?@w8?6HA6-5X&)|&$a#n?*Vv_nm#2A5`XjT=Kp@3=Ns;LyGn@*sU0%o^Sv`BQcQT*=tnc`fg zyqeD@U4{+32kxDds7A}%C1vEWpZFg3GP#T1j;b+f@qn(v4)USY781zod|;Oh_E_sg z$%_j3xYmivb9ctpT`OsEstP=_fTz67`cnl)O#u)ZVRi8r^nHizCEit( zxn!2!*L$<2&1G^rHcE>cN+p$S(5+KDge+Nm?Jplhnw2=sMtbboebMD>S8$!!HNiPfXHLNI0vheUANX7Y;^To2OkUHTZA;BKVwE`4Mm>cYw z*7w6BHhm_p41Xzy4;BW!Le>Zn3VW;K0eEVv8lF*;Vu24m$z|l0{|nIbe?pD_SD>zt zgzz^K_xl{<|Bb8rrKjZX?oww3L~B!p!Q?Eeq8 z_Y2yq2Fb$03Q4E-h3lpNM)tCN&xRh7k&W#OI?W9E&86k|lC!e1ezlmGnEq4PzuC0k zI^UA-<9DI|8u10rWn}u-B;yyh`Fk`Y2PD(~tIGh%^bOx-V)-)2K+nMPuhnnMY>bTm zmhUzz13e_$m$h%%w^bJU?|8n0WdHK@ujZHZKk|5d)-e;>b{>p-$Id^P?P4D;9E|6GxO zRlXzr9{Q#J9hD9w2LmL>m+DuagBg;6?fX(NuyH^#u>WTe!s!}ygc zItk}5;IWZ_t(mpSSDbW02KM467G~yOx(r{-O6hAct3rNF>0cjT%;K*b;9mgl|AZ~G zebbl!YZCuguJM1+&0nkhzlpd(@snR%W9Z>)ZjtECJVFS)ion=n*#!0rAtTR!#HB-+ z1(8quTl8v|qnkoR1N7gnbFy~q^>~wuGt{4tKvjAN)~3wC_U)Y^)qjRe0Shv2mYpv5 zwVYXD1@g9~4@11pH44QZQ+qx2Iyf?|bD!k-ci?-uU}~GeTHm+Tbn4(#eFWo947kZ7 zOADO~OgDL#dFm4s+elsRZ!+6?Pvz_xHUA-A3rJV^9Xz`Psci!bf3Q&#a46@4@&nx) z;W%-z(b%K+R4JSAtip99j61|>6spnHxCn$KPx7;brXJKpJMJ~xQL7P{tb}sOWza;2 zKw_zyDy%}1kia}#i`b5zW%P`Nxg%igd10@95NJK7^g8s4Xz8HvP9a!}a8x59ftFx& z1_OzS)Kqc;U0iUYQNT=aB9MGFiiiYw!u6bp#@7pD;-0;%z{w}x-oqgoHDlqh|&>)iW~?CvOPQ3ihaPZU33{G#-$5IM@K{Cc5IB25Gd`X zt31sgK4$a7i+9`C2ckw#&BbN8LhZVe@*nVj%xlqsYxNkf%Xi9ndwlV%n*Sk3DxOU9 zX?uP7^y=jMf#1dTQEyfJQQLjj@=x;eKOT753@&qtLTEisAb)aso@Jjf)OWQ{M*HrT z)u*x@UR0o7FFn_w=vQFn84l}LQ=}2GE3QG*ol%m_w0SLhv48P7;Qq;mtM#^@+9-4S zWO-JCOP2e|z7-1)!VCe!mzfA+_FLH{7B(A#Z7_qLr~4u9fsM%5v18zta*%+3K}yyO zMHZUl{96FO4~~6KEDy&P3@tMFaR6d<$De^dG<2mHTs3a8&b^Lqhr1Q!DvU?gHq>VL zxaH5iJrKonhj=a+^dP`UpVEuK3-Yl}Cby4~Uce4rf}0*flzGH*lLgTu(?yTruGs zC2SzPHY8iG=pI`3aL4+tZYUy?4ZLMxNnMEm{TJ=wr53TdUur-iSPHDDpl*90+7Fr{ zHimgJkWL%~L+~1DvW~azX;*LsjBdwm)vm%V@7d4Uppnzn>_Wr7e_xnPF5aw=@$}c_ z^6*vk$BzkdIktll`3p0~4e;qd5jS+t$+Y1#0si!mo}Ww-WCHOK*uEjJr-JSBZb-L_U@=!TEcq9kCZU(Ywt^k{z)kf^jm-8C4uNKT2U&D4?j|gqSmlHgEjIvK`F#X&&vA z>jB?JXsk<6ogA?p;&&iKUr-+kdvHbI*MHX!E;J1~Zpv?p?S-GU&HFX(!!h<2_nP)_ z_I~c+-nyIuojSgEiFs&RCh4ZgN|l&Uf*mlo31^e=k^5GTNd7Wutrv8cdXdc%niD5b z#0`YyTZrLI6=+dPX$@^O5aC4V4JM-Nb4L%-yv0$5QpiN3@{De5E7OxUup0|riM-Ym z$@YW1GgBv4`cakd2JRXMcS3M0_rUFz!Hn4R>}MbJfC$qI+bC*vlJEdA2M3s;B~!VhwVkC}MfuqotgM82@cP!|ChA)4P{3fHE8UP>8q z@)Nm(OjO&!S#-p-6oWN+uu1 z3?pa*)%2&_{8VtTeV2zuC~P5{7XqROb7951bn6)quO6t`@Y`4qczs7c z8&YUQLhHrb#m^gvAZiDGQ@iDI895GHdG*Qc#TvD(Obbx$fbZ^e(2|hzKmnNTq%0LI zT8gCe`c22KGsvnjtOb1<|U4N$u<@ zuX2@)#sgnw$9GKJz>f#uk10NWM0OWMo$(&4JYK1+Hi(H7c2;UxNG#J1iUfhYxVLev z?pDgvGZFF7u>va9P}p3XB;rJblT0%CehY=8d$>n1a6U_Imp>kMbciVS2iK$r!r?*okat^Wf9WKwX-PeLg@BnF)?*VmF< zdAq1JZ9y9`djopcXR?`a3)s?)ZPHPup|1mC%DBKk<2P4?mRhxCUH`!l;eFywq6Au$ z9V+Q!124A7irrt+>!uKIvxsL0v&m-@oSb4vDzIl$Pz{H}F_Ye-C4_x0UYI&7ZyukY z+eI8rQB+oS=PDxu?Fs@HQCrG<-Dw+vI-slKE2Vq;8JM_#EHTYMGMemrZX%N!FdZy` zs4@%=Sn_oI{p#pk!GV4TP>IFBg``&~^*<)OmRvgbj>Ros0I&r9w#l_4UpI2?MQ&O& zk=RZoN3$tSbMOD^ZBU(Wo!g37Ys+avuV)nktqY% z-x2N75@sm!#LfU<3NC1(r@E7K>)X_3+YYym6Q}d0 zIu-dn2DLTWJo?6xUOA>MXaA|v*1&>YsXSyI>xLNt{U^@_FQHxLtkrMlVP2 zOwKMdDZ-}`z!4DG7E$>mcgguto63jc4nn6lC+N3aHp&T<(m3Omm9@MphX!r5av=2_ zq2Len0gy*bNwReS+s;=(mX;(yIUXl94B)TXm~@p33-Qn# zLF~^*vuj>uy0p4jzsTyHHhf?2ux*o|CESt?(e!GOyxftk737fZxZ&G~O2owM?+w$s?a1{LGN zX`DZ7hrLr6F)Ylj_?ArVgt}KNF352T{R5pWf^fvtWXskMA3RPL%&t~kM^{LysETvc z5Tc{m1-n0h5}-X!%1+K_9sYK*z3v=YM8;zl4(udCGkUo*%jL+HoMXxF;x7jhVhlqI2|nLKk@SA%lpDMJN)N2lHp|Ilsy z!Z<&-?}vXggOQ(G^3%!X1r)uc=Drk&-B{HDRuI;(UdZ|7h;T~m;8C;_>%n1~vb;oo z#^0d$oU(ov841Z>#J~x(SCm&IOXx&YH}EZ2DH9wD+^4H=PnJAeQb492(imi{&%j zFa2^HUQ$sg6@$yRcr8I^Gf+$>#cV!>L!>*W(!f-97h1zG~ucnx;# zSjl%NV|pmWBQmO~tlSbjAr6pV+;6@Vj+tJLExYh|mk;X9{f5u8)IP2-YQAE!KT^9! zC9WBaKc?FRhEMbyvPgoEOlNUG$Cv`4L{lD-Lt-@|ei)ZG^6K~l6@pA$)Ca$675 z4<{x3yCTL{!>NZ%cx#K|C?1F!R}^P!1wciPv=EX-h)K9AdaJ!QgigsS!+DrI654zY zV>on-AH+VreK)7qdux&!qEtLIm&4a*h1b0DhF~yNVkW{_GZoq9Vj1yRJ}&N`LikK`{2W;0jT3z8@h=cadU{d2ZP+@C1%9I$TK2t~uZ zKEueOSWdH4+MHwhJCvp|SD3ixbWbW?chR>q?^InVp~m~fuk2&CL6tzMydt=9S35a( z3&p{y;iCOG+2xU;aHXO=ndeDcy9!{AFc}b?XrZXnV41q zYl|X+$CLDdvN|a*)~|;+kq~@tewL5ccH2|t!F?@58MXU?8v}8Z=8~@~9&{b9PThpZ z(TCBI7vxfk0h_t}7%d)S2_z^dY*<7As*Q!ChdpuM!L&W3&7$m0fz72uXSWjDEF9DM z5I(oe-RWSEG3os79+w4)iAg4%BH+^;Mf0{=ba__?t7Gc4z4bi3dcD28x4KBjLQPE- zK7gXARF>x9Xj7*>Dx4`|P+jN(MrJr-53+Zk22>lRRl{)@^;)vVnW+I;-~4t8*);5| z4q+a-n9R)0L`e5?;r9qyF>PKw^jIa764Zl8J+h#Y+EM|RQfB8y?G88zWZUke^9`OW zbq2lX-W&_euW2mo4`xe(muX%_x8BzF-oY9>vMC#p4^fQePU1_so|EXXc|6)NPsiEB zl5}cHl$;n|LW}*WYd+D0wI>lY^P}Lr4_ZoNRtv?8&@bd|!9Sfw&^qN+_Ek65K~Z-< zE@^gY=AQzR3l1~xX+WxhrWfZH*AyjG6p?G#2G417g9M`s52SvHV|{J$!Ix2J$q4Pe zFU}P!WRIK_Qn#@iVP9F$sefhX=B*>VXsMosV6`cXg<(p>+J705TQb5{pQ+>A3y5-| z_WJp*AmT44)BSmPiQ{JXGOYLMuHXg>)YcGmMqmB>2+>I%{y3F!<}48h4-2 zZ`x5qc5qm(zyRGCwJ{UGh=G}?WF=|ZM}CbL&If*brnVH}z}-$_{mC}a)8yO7IGuGy zOIES2+XIv%R2qLZev4gKYn$s;JY4}HRIU5!shhT*H(q; zMS^xkzj8~qnEHuSJUU8frMiSP?cy16q^XrMIaoW+rahGjBn3%VOCu8p2@s)^g3U*E zg((_S*2=OZ3~tPdZX&_!6<85SMRnoe3XB0*(NGF_V+Wu`<~TCGowu*aL$o}_Fl`QZ zU6*$8?{L2BMmq!L<>R7{?$T>H$#2#VBYJop4(lW?L1}vpB~+DDb-r8O-q*|}Nuwi| z=sMVgaq0R|4%w7p;ps$NOeBhwJmJAAbHEH>_hdTb4F2UKE_-w zqk$8uU{L(C5eXVltcb*tCDrrk^uVTki(b_=HX8buJs)sdL6GU0~jlltzlx>j=LU*J1{H#DT5YX&;(Je zv1d16<*fz+(0p2hQ_pVcvEnmqfwXXpwaAFWD4iIn*^dn+tBR=PNbBP`4=##DF1DeN zUWeq-u?7Oo75FP$Ipq{xl-8T;Cp1MlnXmELX~QXDY9xIw2-T8Z;9`9PFtU-^&>Lui zONs%3Qn{27QQ6pW03k*p0JGFrWrZXHRXr47$op;Ekx-AmXVm5mT*K(!x z0qPKYw|DZE#)ZoFc6uzw%B>)zoNB+HnKRD1xe4tWUDWzmdA?Tr6+Ru2#+*6Akvggt zInts{vTB5RUh%VWOTAli#QEYe<<7=FOju<`hjeEx$MGGdy*tXDtzVw(T{?0YQ_r~s z@1@*b0oZy@BA|s&<}JIjXt{;1kD*HNcICGbqCV^L?@g1q&;e*s_7J0jwZkYsWAXl? z!8rnnu|&)wjyVMhZR)(kLZDJ*x0rvJ`w8GR(|AmOd%#E$Y7@9h$sTPC$d|hdE-|B( znLJbxzRZ0lG>LJl;$AFr<<|*w`A2!qw;R&v z8p|y5&2}%%>YaN)${<~87ULEV8p>1kJ~#xkt7H^Smp~18HeJ3Nt@(Vkx+*?dqb!?i zJsvlqJ^nfsxW&2T`2hdWeph}|Y?)X_Jz<+u*^64E^oCIx$|#=G*mJfC=8e%MqWrMP zTG6YRA1>Q(ylvl`m#RcqlZrI&dbF${ly4xnO1~=pFb?2rE0@)aWPZ~3Eb~gev->*P+DzBg?KAo;-A>Kx@~EM zQbtp^C5m=ILSCM(N*YQl08e3BZUQu@6`q4IL0DvpQy?HR*@$QbRd-=}4tTSW<9$S9 z9Vpr0vUC=vRDw2%lUyi4-KFL>flQxh3<5A`vM+)3!zhOcp?E)Cd*ocS# z%6-==fP6eXl{}SH{+JEp!%60p_vo{HcIIbc8ekfS5=KdT+;)nGp%;`v{l1% zOUr(M1jCa~0&h-f-T}{s31&w&i||GIAbxOxy72DaCp7KBl%O*5r>%v)Vw$3+y)V{U z22{R+)KGFN7{+zfu3MCww)Y25)+&S&A&wz07Lk|dDwcy|-a)yOgjx>}V%4dW_JkwQ z&Rg{uVA@(TLpVuC$T(+|Eeh|eYA+(48n{Y7eC*}Ou)n?WrCWiY7B1C>|9pPqh(dO@y|j#!Bi02 z6+z?{Ow$K9tg=VqEVMbJeXPajBkL^hu7MQD6OID!S~NJT>Qi;exG7zu=B`TLI~8*m z3B)|){$Mv&mmDKx9-$4d0J365*DSRPj2?or)5$;>K-5((C+DSfOEOF^c_=;{leZ*C zo2?5!$}by(=j2~4ypY!|zMh>vMK~0*94mMk(>L+2*LF`Gn~MQu-qqE<>ZAyvi3q1Kw*k>Z*1mJB)(jzmJR;|i+%BZn)u(7De1lQw*3 z)Ki%(cl}X+!w92y&Y7r-!CUrq@4@EOk^aM+Q>NwE=4YR3h~!IV&01FTjhxvAW=u7v zoAaQP5b^_Qbq$55@oV^IAzvFsJN>J&vz{}-DPVOfE63~k zc|YVtD};4wRb_$7L&RC>Vd}&hn%L|eJ!;UfL@v|NOJ89TVgYC<;ClXx#=!D6!G$_Yf0<5g|VZ7TaE+fqbSo zPoDuF>FMaizUv*EAx@k9+X0M_S=aLfPs~(Bo16Y1t3?ml!*0R2rYYyf<#qV9C{~B; z6_;1A$Z&KTnkXHpEqE`)ez4>^{yMu!vPH6c2f)uo%dO@yw|Km~(o-d7Yg5yzs)?dp z$EmfFHe-Bib8XYL`_l591;FMq4`k!-tNZF`)8=c@Ug@r#zG{kN(zVt;&_4C*Sz!#L zIYApSXwB3?qbnyHzA1vKv#z~x7@!|o2ZeDAx;Pep$X&Zavv8+sxy9P1VVk6vAmYB7 zNe>Y$)z6IW`0GGZwMA;mF=||paD9L1*4liy>K{!%&~q2SaVSyTra|re!XI?ap_ed- zEH)Bi=c3)(euw0W)Pc#*v&+G0-g&pp%FWr1tu(IJJqs0IPfyjwTK`qgXz{m@F6Vyl z>*7c4cA{5?vMmKX%WXpFN!)F_n;_G$!HC~RIbqxRSI7y=k@o-&&AF!|K0Rtf$)lmf ziA$<;zU%uK!>x9<-{r{_YZWO|s>Ba!sh z(vM(-MD?P9Iint=)T|~fK{eCuyzeoUzJZ5?T{H1nNjTMw!hJ3tSw+68GH%Hxmd}{u zO&V0xR1H_lc(vTAN%^PgB2)4GKAU#8#uOR`n}mqh>N&e`qNZ;n*4wIhkC!D%1SnOp z7{BM0p+ zNb1_=TA6OKdNP@cyBuJNc>#UsY>AI}ZI<40^`2-%|3%l8Qbbq*^zsE)Nl?M+*7GHo3VBuqmfD@7&OE%`4i|rF=&kQ- z#^WYTp-!HJCGGvQWYSW}a~&|!?aNB5^)M+kAkr!Mpg-oatixAY*h*&M^LVDh;KuoC z@_=(^u(o!%92p8N>Ee3H14!qac$sgs)2nCJQH)c;7^O&aeNkegd*4wMO!Y!3kXajE z?l{8SMXXY$P71biY8+g{l!*%yb`tid%(c|DK{kuL#U6?7qCmQ53mMjeh&pceILB7KJo{g`;?oq;!Aj6 zy33hsCO(3zb1;1Nm2|XMv((|@?+_c(0Ht@1)k}2VTu(*s!bXr{#8UOCR)Tl*uegC z_&?BO^P^8?(6)Cm(1U{^2X^BG|FAgL_`b@)|uHa1*wW zdv`p$jrpzD+Y2;FfI~Ftq`+;s=WnV+IHsRc4BG4#s$dFfC^)ik!c%x4WGLDp@F@UU zpp`!@#uw@n`XfZ>`G}d66Ws-td$I4=iU8&yqww5%>pDU48PNbld#g~p-fG^a)hm{} zRGb~74e@!P%p9mmLi5xEew*41&_LBef_71Mq>3$~sw5Q)ji3kBLJim;VporwzfWGb zuy#=mO3T^$K+8bSc`~k{+y|>egAc!#g0GCSbNMK#h`dPxc`4$~gk-M4AG|_z3j+bx zv;h3JSk6q=OyLQ5X+O=!gZ=urz334a>%m_wn6=<y~&Wg|I=6--bC6I)l9M!Hjdp z6Y@Zo>0d-JXjz<8R`O#zKn%9#FW)L(?Jq+8!$whC%c~vK2Hr}F2mXy>UVRi@#qs?bQZ{bymRKl=%Z=Rz5uIus8S-383^Q@jpH z`a6-j8oC0O^>`cX7@vkDEG@0{D=0*6=M0h~jLKC5$6_SFp47O{$it_Xs^gvKl(&w&e35@*WUOTzrfvp z)1%Mbu+7K&wb@whGq^vStiq-fyVPH^v~;D(Yv7`zDNAoN;I}o)mTA)6S-xd7?&xVO zCz4DjWT8G>u*KZ!gvV|~7HL~NhFf-AqIj(CUdg+}w4{~;_)Vx5+C|f)M5zcC>XO`^ z_@nGRk&AiLT4es`kIX=w_&r0XI77x&{-@xDKbg?+26VZC)m1^r=%$Qds7#OIHPQDS zWtQSvo{MHe44BcwedzlQDsbkO_%JN{F!Rjr>AaInlWa^fmVdN^YkuROXQBg$`tLR< zPW-2ZKgUD=2J1|V7F@p3ZdkAr{jRoXbr^jplPdbS6k8UF5%!J5jq3I__(0Wk+pgu% z`gPIuCVQkayt7iFfb@O?XEflZ)z$FzO9JaW$GlV4HRfN?_TKl-5BKd3z%MVo5xr8q zLa$w3f|3X6(^K3F?2EF;#SF9bB;6_8Y+PQ0ZszS~X-!M5UEi4(KeHUC8nFgP2$?AQ z_tbfmx@0Z&C_bXTx=Dn(3o^o!5vG!UU{F?&pX&lIn2GZrC!E4w$Pbi(i}CP@>MKzs z%oCE8{>mf=cVI(LJRMY<^o3O?>RJd|uU72Z$a=rTuv^eZ<_8G%_Bw%)IFtGF$ip}D z^)%=(Snu0KQebnANUQUWYP=9W!rj)gKN5X(SS=scF6C_6>|ICkazyqC5rYo6|5T2c zA8Zx|Zd5X~v!5S&bZudzBu9~Bk}=!J4(&m&Nrk3*wh?{y3i{sb|MLBdlYX?fXK1Ks zq%IS_7_5}%300Y_-A6oV`uh)!w5wn_pSRay8Uia$j1=_Kz-?sg@H078a@w7=cQF$! zjiZn^xpEvj{!y^*gfG|E4x$tu<=0aXF=BEeIh!s&Z(F#eUiG*vUgA%cl5N^*vkwd^ ztw=sy7JNLW08rJf*IUEaTjBec5&M%u8V&-OFCV_r5p0F2ATo5~yYyM4X>DUR zR(7&}BrL9gk*>|`uZmK&eB{uO?q1HpBsi4qc1ggEwA(dC8E{ZYVFwgBSg*`v+8XgL zMH|aGbH+_}h$PGpuVmRM;tNY|I1o^drlvA*8CeCV8bFl=526YLlT{kba(HsspvPm; z=EPl>128aHJ98lb3|f=kiY{Dmt6Ac}&Gw9jwQ)=aU|H5jR`kX}G3NMX^J67Nlv8c~HMv55^C zVFg3M?kR3pHl^ec^`7W(qkLTErT07pvo3fV-_V494m4MCPL@X?3pZ)Wj)U=HH0z|WD?`{xeoAlCLK72mZ9Wwac%$6@BY`|CNI4f z7$XmpF3E&?Li{-|(J^~E(H+GWW#jOQK7F~?PU;XxjjolkrFhyM#jX>9r(R?Nr`vh7 z^y2s;iRg@%`=gaU^xVfeaAkxj6p?Wum7C97gVQCQ#;KjAiTa8Z6vFgQkbezHCVjo@^?JrEV**tvZ#CZR z(QQ+Q+ElvUYgPIu?mp%pkj=9l!TNRM=>7M;R-izqy;d#aYVdTfB(BdeL?-sMX71oh z540is1g+bb!#*G-+dspR(ML~UKrPNhn=`;o=$wgWA(@|ZOiRb=eV;4q7ndOkQ{^A( zhKn?(fBlh$xweB6`Ug|+B8;dIFO)@#^4Ah*tHai5)}@yxGEYI~!%dS%CdS=KV%Scf zsAd95fLF8-99t`yu?P4G23?ktb%b8aPhX5`PT~>_HCA4yToNGMU#BC&=FFy$T7-K( zo*$y3=6zedh8D1_?mgO>1hZ{-BiQ)fS7_{zkRZ4n-%jsyHux$mx34Q7hC;47->To5 zH^U~{QJAJY8qaZ8OpsT_Nz06pEUv{~IX-25h|>w(3#~b~82Ski1eLU5&BAx`2ZmD9 zY01;8J2u+E{uCaUrz1>6bC<`ZiZUBBag`({;b`oy9~C%xiFrR@;obUEE$<(hLap06 zB^2210X!@ABkj_)5?2=bJNCmAxsm$W+pFSnSdDj+#_m-fK&kmha&3_h!GHZ?>>%Mv z=3}uKuYEX)+X=n5tRauV?XBIJuljjR-BaXEa|!qp82(9>-0U0mTXeM>2IbbEd%EDy zyLWC~7EXw+jd?#yySmkqX47(`9W#ZJ)dH%;daWR$F$l2&Kb`5#2lfw4deV3j4Fs3+ z-06dQ^s(qr9~Oq{hW*l`Gj~@e$RsCQg**5On_D6JJzYXkxg1RDqihd6Gjous8R6~@ z0FGAgr+v#{@A7ff`MVBbxIUWV0k=*1d+BH6X8@r7v%5M55fj8x!ArA>TQ-YK%CjkE zfc3WVxr-V`CAfV4;Cw)3&(tBJQDUpmTJceOmI5tIL@`gh6uF!vnX^uIj)8R1S!R($ z2g!JrIE;~t+KRC(oc5$$!a6Sn>qnVz)x;M9V^L71&6okc`SO%a?Jp^UVS~fl<#2{q zsRA{e4DHe26X(MD#WcxuM15XYtrQeVbW`mF(h=*xul_xXm_2oYcVqZ8_^zxCWN)6= zkEm30T(bN89-E$2uU5iWdRK2(Rfgx7>fWZU1!7K&5Tg4`U+`S`MH=JeB-h z_|g4o;3Q8@fnLOdN=>_>XN5|tD>(Oig2dsH)9 z1ShFNa*_knm$(!&30*j~!%n7hFVzY(#NDGMi*qTf>$a0K&pU;d`Fqia>{%8M;(~V1 zZ48}!v4M>yxF}pE(}#hGS3B#P5~S-hO_rOZM+HeMHaYTY9Y?XZ(`Vaq^%$ok|Egd) ziQ8gnd9zZ(2yy%7i;u<6X1a^`37HI`I!)t-(ZD=Pk~Z9(Vi@trtju?i`Qi-6N!1~} zh(WKRzO+$=2ghSg_NlTQ^tM(*;)0DC4$>$l2$(E&Jf3)-e8(9T5p(jwjBV=`X6gh} zZtkYz8qo|eKcM*~)LHm*!_b%CJMB~CGvT#Uqg_*nfKCAfEJZLO zrEY|TvRaa4=8IUBRIDjvFYAFQcef|tIrnD4bU9CNnsbT-;pAONK(aSrAn?v@Xe$fG z$L47Amh2lR{_yn}F6y&39(Y;LVeqo$tMW*13Ll{=0PB8Hdytbdgxh-D`MuuE{fv)w zJvlo*0K=}!bwrjCS8&calTmt|Blc&q>ou=PDoTn#)^(D9{MOrUfiOWm!;d&Nua(m+0^tDeBJm7Nc zYYz>utJQFzHfa8n-r>*OxE=w*^rXo6VhJ@V!7!&zmtdD@r3ID@j(KT2hT)0EHSP)N zNgC&FgFX>9oYXm7YJbTX+I?~N`O=4n? zyz!u;jh~V1cy>2%VIg@V5Q$e~_3TvXxz(#&<<)HgIh|DJVP4=4X}n0WW3cs663&xF zA6VzZS-!g0aU_OU!BTH(UfWIAfRi5k$RhG-S8O8!?DqCGjzjZ@T&=h#;SW8&jboVvy^;X)kyonj3BR6HU||IRi&{R#-Lg{T5Gk# zG*xT6eVeDmF6q-{kY_z0kS1c3W=o};3lfjgnl`zMAO-L3i6M?b`$&;h_17aMTud76D_yci$7=T7&Za~c! zwUk_=O+pRMhcbF$k44J>}2MKK%&077dFs zX5f$n#&_*zBe_eaz_peLWdIgtRN$smWCf1#&jBQQ!n{{6|B3O43dcpD(@J~5lk`f{vBG;y%=NYnX`<_EG32x){g@bF1|A zDmog>UZ$xahEbvlP&I$5E!o{TT0iT`md= zOb$}MD&iAYjQwK@P1YORdsfdNQQ<1x-Ux~_S~dex zC$`qr?X>o>+H@JsFkPTj5?NuY-uM--oY-@hXa}{9=5|~ch^XqpYL@C{Z806h){E-g ziUpGPcYl!e_+DWhC%8tWt``eawaO$LwXEoC)zz3==$5FzI#kvksac+Ma>lJN!1Eet z$7LxDQ8wEgeJF=09IzfX6%=$dcC~hQc66=HbA>v-JP@I6A^8P%1_YR??a3{8Cq}Qpv z$L*VcpbHoK3j*EJ|57Og6lX)gDLX2YoY9f-JD|-|aGHnG{W{#k7rHh0bvjXbd zdx!0iKaT>aOM0AN>SFM8!0>3B(PAr9oS%sCM1FG0T7?v_1YH8*JX>gmStLBegFxoBny=Fv}{xDwO5yjd8Iir>;MB6EtpLQKLT$nx5n!B_Md zFQ}F)$`s0DKZE@5L=6fi)MBDu29JrLWb(#d`L5tC@qVNzHTnB@bsuT zM}$e5QgHnPZ9bwWE#>6>ylW)2nywa&y{!sqF_jx z0|jODjG}>bIDubf9kVpGMaWlDPDEZogK5bK4h;!FNddd}VyL$`2i!shox%_%BxGg4 zL{*-|b!n#_v<49o!HsXXV-KXCYRci^=|`}2cwCVR-a-6i~Ygt;zaa6~q_+m&jOK7T194v+J(RicWyg!2r>pq12U3IeW&fciUXvHz-i zXKse8fSVm2-on68QG{pX@1BIGXQn$u1jnGCU8^y?81mI`N%ru_Jh4dg(v9ExAC7tK zbWF#evq|y&y@x@vQQ5BWN%|{=@#LjA_!p!wCFE(#^X}l(m8*1BJ3_*%CtW9rcQ#Mn zC?tVQPS+|#_#+KZsw=ZJKM7(5mZooeh#dx5mtd!}K~PsQ0-O{n{>J zXvlEPQoa`UrW@y+XXS5Ir0GE0%HO2Dq&n74x&b@5zToE@|qTAy1^aA_7(-brC;mYbBBC-=+gPwSUtGTK!|N zh8;|$#raB3j}4#;YKZVhc7SwNB51}rtxN{|5v5KcQ2u{vdke6*mTg^>#@&K@kl@nYG;V=F z0t5+8a1X)VEhG>;1cEyOLh#`3?u6j(?rwdXtiATxo9uJ$efPab(O=CPJ<3Lns-E8* z-T!Y_h5xY%$rPj07W=HJ{K&nEeA~}if)AVj!~(U8e+c1{m#R?|+knOUSV5aAn~Tg) zk8XfKR1(LYft$9kr9toIQP3fBFM^$Jr_uD!mvoF^Oim1-4590M`=Xyp zh69UtAMfOZ1J2^R0i9H->Q!X(HZ3-Ia|)hROC*5QLGhG3z@qtkGZHN61^XDdyMkSs zq1}>w+#DK2T-~P{y(?JhcQEl4UoU8Uwcyh+5Tx{3mQFJ;ItUA5u z0(w*T!1?+BgXVtoDf+tp&W6ZtEqrhTm!{EGmm{rTpbLqGo%r+M!&^*_ZKe?Eq?LBk z8y3qJoA58=8A;wq67A;(-wudLSakd^)F|df@>>r-+GJv}zB72sO0b}f-RiQx@tzl? zIQOKqxJavwvMZ!=QGQ^&(mdvZC-N(02`4=br;Ol+xHt6C4CxDt8`4%x0g-E)Z78GI ze57*|cO6v8U682taHhvm{4zM27BPxs+vPEv6q8Cwm(;1!NJc227Sk5fpyG`fXSXkv zA*)J=HPkfw@S_$>3wP?#D)P2SUYEA?#;-sh7y6sntGWSFeH;lj3zgv_jPa543R>uXczU=uMRzij&b^X1beCXyb@3_hIw5-KYJ4{Q9fgLS(0ef<;--*X(= zIZSL#KewaOo#YADy1l3iuo|PBFBX#v!xDOx`V`voq;`qjQ1FDT<=(4xge}7=WkB>| zgh7-me&}*MtrKgO|fed_jMEO(w{TD9cT+I=uNyG(7&nq2BrF4sJ zdTb6|5x)9Qc(ZEfY!yrQOI)7_BNCJ`)D*j^v5+I<(mycbzT`rxe*`+-!xhWO2h1H!8K(1i=BW7j3t=)-IM!a4-C?gAzk zn@mnu^7UO-ir%cDNBiT!f`&Q8yKpzi#SJCih!F~LsUX85px3L{%{e3!q7??*2NyWk zgd&peHvX$`WUs#KHSe1cnIH&mVUVKEE;aRr;1bd12O|`fb^_N(E1RA2Q{+Doe?mvc zrF%)-7N~g6BcZnl=u65mxH({$5tG%A=OS1w#PSM&8!!2xfq5xTK8nUz1=W1AIpkk? zoJI|$2nad7@Me99=h95@IN)65{q!rUL*ZPcCof|4lbnFwjDRx1hRkN$OVc{eAmP5L zLlOBlA0VLr!g|6ohJ#PPB5 znJc*gMoSbblz?^N$;}z~qf2R>FW{DLilgn~M&x}C?p=!K7@mfxsQNr!+o0jKU`!V1 zo3U8Pa5i!3b!cTH2JI4jS!-6wE#`Z-H1MYa#k6mVVxk>aB^SCmKcaES+a@QR=~1EK z*?I^!%)3p`GnYxCj{PQ%shdzJZ9f@|7IE~6LTxs#Q{e{oY2R-qL8sGLW77wLPt*;4 zXcb!8R}^>XrySy+)?oq$25XxllNj^$2DSzJse5ExI*tcUUG>^&H1~vxpoO9H$&+}W z?LQ%AVPYaL&6fy^uY;}-{Qdfj^DlB%!MAt#p#i8zxoQ+)Wu7-be^>_MBRgWp@Gcz> z(nK`JAH8AHZ<+#$%>|Is6rF?9s3ku7ap9+it?XWNZ8VJ^h8Bsldc-ykt}`;PC$FQ- zBOUUWJr4w$QepWGRAml0k4+CaNoyTBSP5)OZE|`YLlt~$j`3Rn@7JNQc$&RDoC#qw zHp$QWphGEe$M?ogp=}cltf2;qg;6kE4r2zTIJM^$UOu`1dG$x=hlSRf_>aTn$pY`s z9S$b7P>X0RA1NC=r7b)$aud6mJ-dE1-y768I42l1JdnG0c&@-MF%f$1BPqr#c*@we zjq4M!n|N67-bR9u)BvzU>}|zUq>^t8Ull>!I9E6qzkmm93WXW;ZQ^@EIo`XmQC-Fh zoj{48;&+FsOBZBt+y`-wHg}UGZaVq+Q=!N%1CY;;Pekin2p1MUH$Hlmi*`O&MQ|DaeYguQQ-^ z84gVnLn{yVJ%GIx_3h%~HdPta%N33qRId&;fZfcJjLmZ&f_Ul!d z5^jwCdTs-~$;+ROtHC!bmkK7FRD+0$^Ph*L_wTgCgm)PS!s}y4$LX<>PcK3n70fe! zb2ATYMC%Q{K(`ST0h~hRQv`e2Lk8VEKMF0E6*X7$Av2)X<0YXA;&F(A#~F6mR8Noq z!koPhA6iTH54Gl@N^BRhB*%5tZ1b%@uzGpQ+H;TOyY#oogrL^#H>Z!vLQwSACLs+P zwRMS=)gO!48*wCt2NYV9Tq`c5cMKW=ms?egtnH~k6cx-yPRL4|SV;8nTJAHRy)g8$ z&R>!cy`T(@ycS2E5BylvuFzZ&TU^e3g>&WucK|WggLg65R_@d5Mw;o=5X`4L7ym|F zEAd#|#4vSBe39P!n(A(^-&0|YfIB3jNuz=|+a}Oy3ZRj*t+r=HkS zWs=OEEZ^jgWm7%!d^lC}+Q=e)l5&juGET4)F{xqijXJB*x#JG+Wy)*+I#SYka+&;6 z=vzzw`Axqeq(zbzGttI5QV)iby=uP2<($;R%jEIR{da|ifPKz}Cm|wQpkRI6~eV))A_C8AcKA>-@D!%(Z`9oOK9I7QLliOU+$nywUwiW zx;Q9G)iN%~RGfF5pYyHNU7=q#;0GK6f0S2KGF5g_FLE@9xElN@c#W0$g^;NzB4cO9 z_jXJ?yAE%W>BZ&3m2~ES!mzz8Qk=>iAPJ2 zS9u=Z*rMyi+?Y!~r49LfuZd!puQg+On~1@Y2!r?FwysQ*3tCkpf`09UD6af4Plf%* z`J5NyqRZT9&e6Z*efM9=`}%$wID>1q>&kOT&jv5lV#Ycm=bQ26fa$jok^$SKd*h_V z4fBIThqAlgLE(De2X`?wQm=i)Iyt>~XGn`hZdgx#IU(m4U2|?+Fo>ABAm5efu2 zBj;03#bcK#M9>O_@oyRR`D7I7`o`V*P_92t2|GZk>#XDDVSnZQc-Xo_Q(v^C5Ao!= zel_>i^29i*7Oqqh(wKT5r4Qqz!j4iMl<4sCuwkNhm~qL=;l&AQz39{J<&V&b^u9Np z4Vl!P*UMgXl*6}&d`~LSrgvKH_n$T5v~7mF_SxBU?IW;~J~&EoZQPAx?6^VI!5@rGEyp;3Ukmy~;Wv?6xF z8{Swkk!={tt9JK5Y1^IwFz|pVRKLk6Ol>dk8sw9figIet`E`>7@!&eQN6+AkCs0tY z#VbWR-fwl2#HCmmgtV6H(#*0-BSM@;bgL@&LnMB>hqv8O2^jZTLm2zCr&ps9u|)~4 zC0{fPej-6W!g${7Y>(R1jU(0NiD2HcZ4K%oygYa$WWtXR;%4o8ydv4xjl{I(Ay2*R z08ca-;_gItM2NK8>9w-G$`^>% zHzxIY8!(bwEbR$sbYeujY)~^4Uci+1_TR1aLQ0&}3>vVL1BDAbnx`K_9XN?3!D;Pt zaV8P%q+ANSNNABK={)P05TS|?6j-E}v!ssL&02KfSUXgwuF|L`^4g|TWEDk$C}DMM z`lm}_i78g&+^~r#XQx~Ka1AS1L`)AUBUS7)g77JD(xyZVB@OMDpe1_{FD|~{B_SGnS#p~-l7o( zlMPUHv>OBX#y;b)cK2kv^<&mX$CTCB<%#zx42RY0x&q5r;C1^uEF!_As7wBv)WX@r zTlX?$>6@R&C_#{FFdts5i%6gY(R{Go)chm6d`LqfYTo5!lAa&ftg0czi`8cwFEi|! zzL~o>_6nwDEFkUX@max9U};<`@PhD@P4l_+Q|M>XYqkEJkIn6H=DLppCv@Mh?ti>?cnQlI-9VR2|WdgsAzZnqLq$ z#ZeMCwr&}VbnM4*-l$%%p7{^It0B174Bk&_^Om>2q4cMpkMewZgw*N$eo+1MnlUtJ z|ITI|ud#Y-d?9w7;f&HBnr8pz>QM7zMgykc&dj|-D&Dh&e5rh+H&6_H8wMbVnP1$vuZ&|X9@uiF^z9+{Q+ z_bf~VzQmcBq()|ofyJwtl#PlqF}=U#Kfh7;G$R^})Yxf1OP!p;5;PKIHn%}hJ z6oRHf9&u~vBXyy?6iT1x^NkKw=dnLv?!4u$ICdgA$ThLO#9_JVE(A%z-u+{s)}X6- zG1BYUW<<1oQhg5gy)dK4k?O}YSHJjBpY9-RZ^}+sqdnWG)ZoQ^`kW*hZfs3D^l+lo zyH1Dn7XG>l&hatCPS1aE?z!UfrJ$br1@NIb8*&Q@+kQ)JaTT zM+P|x*kU_$JCx1uhG5s6*Yvn)2YMvdFqZjEB@3${9n6k5-*`=gG#D(-v4Z!_mn&8W zkS6aSn(ZEi=mj_V;&){G<4RM52odlS)}$ffU`WhV zV#%m(m_m|Ij~UDP{;bAZ&Q7KER9EEEwqww|DAC*!Klgd2ADJ`p9LijzEqEvf1N9nh zN{M}^;ql&r;B_Sl^<9YLdCD8e4ePLv>eZb@dE>m(QtOS-_r2<)`pX8%MhCtR)!XW> zx-6W;b8Fu6yAt$HgGa-X=WmrU&dps$sD6!SC^+r^N?CJC*z%VlxS1KWrT#jbj_+HQ z`s$nk#a4IwM0SU0aADdOdC&U2=+=qy3`#e`5QXo&kAPRq7P#7GdH?Yht2un5-?5K_ zpU8_Dc!Z9Oa{)-N||b zgTVO)sAsO*W6L?M(Cb#>lts@jk6WPLg~NAy@8c!d_f_hL@A}`?XIxQ^nw(}_@3Zd- zuM?^2F@B@)k~w7@L;4(UJ3COc`AS~Y@jRyQ7=ZFNY0dt|?3eo6llL+RUP;q?H*a-Y zw*_?tJqO*077K6J#_d(Dr{A`fdyy|MgwzerQ|{+nx`l-7gGQ-7pKnBr>aLw%)StPm z>&Mz2pNeoH^uhCrhU?u8>2&VuY2e5mLM!af34I_pqBK6XqBqc#o9}Y@>0X|CNzIL{ z`Ul4ulAo+asT|H}ZpKD%Ep)V>_0tpknIXDRTsY543|sZt3f!S2-Tq<<$Nhr&h`sh@ z8HI^@@GmLm9w4F)iNWGd=c_}9jGk1yf;P@UZI?Z@kp%?oMfjO&ulxh73RF&r?=dJ@g zFIeX4W*67wf_Kv%QG9#3mHX=x1mT@{p#|>&J{P(+!cEr@pBE|t95ZLn@fimtP+RO- zx=L1^Z!g%c4nP1GG^5u9>nlQ=n%e&KZJ{eaBlNqHYOi`a`XUK(fFIfdvf{JnwiHO{ zzVEk%S_Yy%t*`P6<_9V>9I;U%yuXn^H@{ixC%r_eE@nyGUlC{hY|6sslUf6K8gt{6 z06@&RePTRM4>;x!Y>*~2P%r$-Mhc^vG`=DL*`rIF+KrA@qZQxg&IGfB-;q0i)80%e zeZ(G4M;uOH`wG2LEZcI`vZz{~?3?E8_Kf1}ZpehT zQVb%W^X3!2s?=M4MWj*e=v)xUCcBlclFt~E&&a<2r&VSgzIu@&)`d2~-D{SsK$V53 zPhf4W#%e{d5pADu^HRP$&=?WvQNz28(S=$8b?pxRkO@N8XifUcI<`q?)w9gxb2@ot z>=iZ1&(90Ho;We%tkeEt+bA8@F6vR0|0Sl;u4=DZ`1Gl5^JrK3nar%rCf|BMe$*3h zdeiDpZF(W+)iv!{Nnq%QUQy>uPD+lxkDBYUkMDxrILf7N9?=TY+=WXm;x7eP$)llq z(;g*Z9Qws&@Eph``H%`B&0#iJjyL43ONyX~&*_!O=3N-JI|+{M2%LP|)==FQVEO(z z_uGY^YseT)a3i1yrIEq(4dn$HAAAZP!U@L(R>_A7f9U9HWji;o-C=gsGp%O++R#${ zNkBH*^x%nCVp8dzsn=^yz0jX;;~K2TivsrBwQt>p9gz;rm)?;STbu-|1o7o$m8Z5c z$@0eMiYHZS@qfnim>SAy`$z($D=-}mO*8`G$+bN)8u>_Om-&kXtYlz^Oxg9V>0CXj zrQ{2y-xxw%(?>po>(>RNoOW`aI`0DP`dHMFJ4IG$DOxeoU83{kC~{_f#V912fchk^ zDV}^H7(ytXSQj-%q;SZ7nHzFfX&e3-ut0y-Um4R?bcRBf2x>7cf+TV zX#udpdnNj`7h;i$=m@`)1T!Yn@07)r220Vjn35#HTm%MLs1kSqu!072%Xe>NfLh;E zAWAz0frHFnnPMOFeMzw|C^O2Q=MiQla zh+4e#leb4f2eRbGPT8ix8c>cE95HOwF5#$rIBu?VIR574p~K0$C9_=B!&l$cLoWz$ zK2eQY)c~hX+Q%m6ZrMTa$-*D5jh1Y3sXt{`SA49F7 z-+Wqp`t`UMQ;#v|RM}-zMS}}J=qhy@@p&Wa<2IMbRZl&`7_oXPT8revp3%+*%{_b- z<(w&9JN8wM+nf_(o@;o>-=E_y;Qty4RG8JjdMbFDC}!c+Du43QjAK?eYx*=|!_8_c z9m^EclROTw!7*f*75NxBUs;b%>0>?JF) ztT~dArPmtr(u=v(8t`t&)h>QYVHx9ig)kUg#;fSCynqJd{aF~NbnhQ4^5uZh5pJN> z8qmFIoINlSR{g$9H`_Tv6??u^6Hi@v%GqQYQD6htz7ybHVJ$BH>b>>Qh$9Atlj?SI z^p>TU;G5(E8>4L+h;6%tcW~YgA)pTwi0Q?hmF1OC{Sumc$9~JR0u6sBd3@2-buGr5 z;kJum8YDs~im@DRog*_rwcPr(m*|-MO5^1H9s1HpM(oM{2Kg!Y&=YqqCvyK03!0Ws zb61F+!v~Rk#uL8__If>^m!jW93q|ohG#23+vZhMp zve}rK=quYin{7}%2^>*u5l{YoDR}UqTrwTL^CxC^W zX#@B}t&3y0yi#ZThiS^!LWzLWka{P#0^>pAf{YsH{_h(Zg(=P!7Wu5yOhVS>9X0u4 z=XWPxvy(SJ6fx-bS&T*vSRkDV=vt|I68SmaIGz%iPf2ttoS!Lu-HNYxq18TcWK}Cb zW!qKNIZ>}*TvFO`D1D%F za(;Y#7^76H2eY6W({!{6u~c7>)H2}iYjVHh^Kc#EmEt^;cRJF{zgTOpJ}a55|D3Qs z${wBsnv?WAk#SU>oCzJFB`+g+Blu){`-^IFde)?j>^Lcl5xL5<8tka5!k65A+px=Q z{L+N|_-8s%>=;k9R9-99nq>;|d-y^4cwIYk4fjb2;-To^hXXmv`CuNi0+>kv5Y zQ^vEn_+;=B8_V9;p!=#XoiWu=#C3u!UvS0%>z5NrRAQkh&4mDp&G+rNuiMGOo88-M zi`X7f)w!j6`p<0&t)2^Zb};MPZn#dS{W&kumM=1*=ZGYRpYf z4^@QPpI)TcDU#MnSSuR8@vu5I+07s)2yk)Jxo)xO4GOEtXZNVdpQ_h;>7ZQF=Z{lZ z>V5ZZ0oBR7906)8lIGjk9y1DU8f@|x@%Q(?^&|;BedMGm=TV#g{EUXt5AJi1<4KFn za$Rb4kIj#&DzIMMQ+>?%A!I&&=k_uZnIX33@2oUG?kvR)lniJpYo55Rut*&k!HwB1 zeVfgX8C-f}trI=YiBP@9j%`%kV_8%vcXU0X_1Gj_z%r(Dkt8U*?fL7{VN0Rut-$>( zpzD^AQ1n#4(Pw1C+hZiax)>*V;=FnI$-tz=Ta^|~=QhLo67N$ohnmK3MtgZ9uQo7{ zruRFhiZZC>Po-c{(pnYevdT%7o7M# z4*TD@?SEb0gPH%0oPLi?u5%B}{x>4K)^Ff;7{WNL_B~27m#(uhW4!-sem~-S+5PLU zao@jtf5itwn*SSi{r+9Pf55K8#P=$HSG>Qz2aW%$Sshpv7)ZE@H4HQVEjsVLt^ag~ z|2;G$*bH=y);fcU>>@;?Hl|K(o) z3Y4y+IQ+eZ3)5>~eP#HXCRrrHkEw;~L^sJ>Y}_<+VdS$D&h<69;49$JP;0lee8%}{ z`jb>Ks9iDElMNyM4&X~myw^Lzx&1^*m778K75+goAyx66?4uz@(z;*D6nz{8A_p@YHOw#<}I1yGZi5uxbF$Ao0)? zXyo4tBnz?p_FR<+pD_A)kj&$1&ISz?t4cBEi;J}{x>7rBY*a^cZWl)RjGoC)0xlF+ z&v5{fRyu!fd>CB*|Dk>UY1w}wlkdj6xhnsv>1Zr>8F7?JG}H=kd@)(pVlEwT_SiWi z3lzg^*481d8Ai%QpkiT`CWTNd9!!-yua4#;N=EA=)(nqXRrUP@K{i#pYW@*P#=GU_?0Qh#_Hss-9BZKCP= z@9TZZ>XRNT$Q6uux#N0D%CB^}3iQcd-H7Z#kCKku%Fl?S37-WCp;W)!{{B`^NNBwO zjP)5>9r83;28JEj=x>SjZ;nRZ>Gb^=1`~!6P(U>63J5 z#q;Z5WBkVIm9I#g7|^LOsNT%u3W*tq@wb(ulEf}8 zl9+6tYhBVmhyzJYH9sCvi|MMETrRefyUso<&tcDr+4|+Gf~ zQhV9gZxDpr>H^1p);o#{H=X&{UE>V5?)ameNuA|q)=qd(dzi$WSS{tq7Z?xGmBm{i z*S5aRv^pUX9++wQ<@vc{MEQh!+>TuRjQ=O2Fy&dQ+0w1slsxAUwtJ-Wh^!~bd6=~< zQ8#P+PBxNQKzPJGc`l9jGQVC9M&)xtwVPgl0`wBU-T;d@&ELx2cnw_mI>3$}uO3ak zA;5r8SA3VmK%nV~g~R*G>V1(Z&X&z@iW_`L)eLzqA)4va{Q^B(-COy*=bB?ikEVZ! zpSz5i((NBg73P!~UxzC~sMcoG=9L8Kl7OvjxR}dSmEde%Fj;^R&hG(oqf4e zzV4bpv7}z8ZFQ;p`FTF!%SBAXmZcA2F!k5{O*7%GPq?h-Vh$+Mwr5IQtA|apl_d#|yk%TDcP;hOv6^($dE#?m;Jt zY{NT~^`$l}W{$!Pl@S|^9Lwh~7l|fk_BfG)nhn2^45Tzz;^=!Ic3#EzrAu>1aiklK z6@5{6y>V^GD|1ITec5O`HOlOrTAA-?-~Bb{QAet}P~kUQ%ctJD)&tckz|28Xh<)3E zfkYtYfb)6(R&wN=>~{PVyT|~ZMMQ8YYUKw__V2Vzta_}dRVSgo*4;zs7}qjdtY*(I znZtee4Aq-A`DV*`J zYft=5J?bVajN<~5!^*h#7JK&9lGjHho4yngYrmdNqV8Hge4R<_P(Jg$t3h*=jI_O< z$$GQCd`fNDql}AOX8#7S0R6-CYFSM}ik@z+0?Xm*I{(j+o9qbt{*seo?OD_C)dcqO zm*X|BODiCp77jywVy2E+YMLVbIeDuyxtjep*18#;CR<)*LiL01Zf7q(=bf;zbLtI_ zx)v{WCqF`+P1qaK?tdOcTts=Z=cwS;^(bbKPv!?RXNf~1-`wyMUGkz&MV#z%g{xCM zIQ_;FHY|tD&ciQro~G=)Y%D}HHr9Gh-^@xUM;SS=2Vj?rVG+f3*LgiFXOpnGmDpZu z(=c+`ZWmi&lprvrlwxFs^6T9#OGCi@+ zyNirgP;gHX<(LVs4>!ieUN&L01xH|BT6G=!1b1omuVwSXejJsEPeyMfzx2P(wTb?z zy=|EM_29fL4r=|@t7rssaQ`LDK-&uS4GuLasGKR{!g*x8RsgB9%&`0vJaDUPk| z@Z4S&(bkVJW5sf>yoj1a6Y*N$R z8ZF>Nd6QFP-f^t-N1lIO^X)ii7Eu(jN2icf#om02f!yn9UJE19HuoIOF3G+)`S`No zTnl;7lYs^@zu&E<>m z3Ac$8Ly0#vqRR4HiMzZJr4l*fR*12bMz6su&nEBIIHur^OS`E*be%BZ57MOhBl9ifLw5CVwzD?YHuTz*^DCEI+T6KoR{6Uw3-IiUDy#Uyw0)fo(csUP7; zwjy)ph|XND^72-r&YT^M;a~<^I9NT}u176E;9$@8+l#^vda|y$pi@o$m(t{NV{3=W zk{nYM-5mzADY_~Hygnxph7mmZnGRKT&!2thn7}TQ({mX9jRhXuVG_c$vcpWhqcUKPr0h#gaRz z<0m4^w2X5}g3sG<+*+RX)eq<8!u{Fe93>$wmkK?5vU+y7RG;; z30b>9im6-H`cf2lEp?8bXcM87kGGiTkwbs7mFg&8B0OdtX6L*cX^asO5_BVLaM?ZW z@zo_PbSGhsj#(%Ci?=;}x6g4JXC69-ZW4&jdGys5mGxb|_t7+~Zsg2lmD}r~SzVhD zoaxqTygGkZ+eWUtV{OYyw|9H`G3-=y$ww15L6@_*MR=NL2m4UbR}*OqS~09|s!a&w zqJKyY+gpIzzv~U#kHsnT>3V%Mn$7JZlk5GGqSVaXhiOV>@y?z)HwJEf@aw9blc6RVZ8FEcKTgQs9gt-zl@z$R{=@vW$O%2bpSvUvHwj#j=&yLuER zEFmE{Wq@FwpA|>+-J2zP$r-pU6cW~v%xuE!r0x=_ycVLjVR$HOOI3S{jU0ENNAEP6*CMe@+acil}rwlS>cP|ra zBDJ@AqbHboSMyA`VZUIg>ydocBcKNPhHX$_sMA^VLA52=gxw^|e&F>OLOwrtZ_Bot z+Hh>X>Q<)8Ad4!+s4D4835{)nFtW)8jVsqenHcb>Gt1!Ti1KDo{;J)zkQ%5mD$XR|L8DcZ zx5Sf;Gmn@RH#qjay*&Xd18bsrS;o)epHmS^Jky&p?5E0WpR>6?Nw|q_RB)sljA@U( zB{TO53^-2m*D%RX$ychT8e>xvVb$7$_+XCTK#zUef7U@qykGatdsY4*uvg?k`k2Am zR(-?H^qOvzXO)+Zfxlet^4E)Q;~T@T#z1f6aQNrA*JxuAO*l;k3%CcuARPGl=1cYi zEEv!=*#-Iq{ssJm&DUpKB7vu*2f2|mA4&#a>K`B0i+eg?3 z?8EQF*&x^eY~XF+iV%tbMTU5_e(JtDO}%kv490tix`@tjoN&{A>wfFLW`0&pV3K3= z1!lfVcm;R`I4xhE1-k{{!Yh670jfS+fz$|G0JbIQGct$@nFk&Rz~1C9rR39ryZ|_< zrLMxMBG$l}L6`~Ka{+AomN${jdeC@5=>F zG3qp4Tb@sVujD$B0_t8vvVz=p$f9j2jo}89W{dn4-9u6j=j+kp_=W0|$EiPE61ipASHR5)NntG{QH= z{R~d+XT^WMCd<+i85mvuLeoF`*GCwHg=+Dh*xGKB)rKvr zZAc_nkR7;Y9x)%$y6v;I*xG5AHbC2qKAJ0r&VkgHeaN!yngq(dbQ_u_b;I3(Pj@HY z0g<}l>oBDAW?82Gs1w$g_sKrX?9s8UrT12PT+67}@FS&EBjMhu4)-TpH!A+WdPX!8 zHE>FKeoz^f7gHHq=?gd2q&iV=>yEjP3Fp^px|EESjQtpUrj|3;T$AiXzO6RKo}UOi zL)3m)8_zb?h-3&jA#Dqc*^hz7s_}ZzR!Nc-a(nDdAz8ta9hyhWRYM!J5o6kEb04Fg z=V4z6Dq@Vn`5>&p#ljII_#nYt4=b<@RiBRwlLQ?TfQ;bp>(>NFr)=6q=wxF~$S z8Z^kokz$Cr*b|twY))}zEXN*WPhsB%wb7g}#T zjEbZ8I{#clp)|$N%0#%^{aty2tQ}*X9JfIZF^Sz#*I?&b9|8vg2NIdDHVw+PEyn_& z11{6g!L}PsOvpB-jYw6Mur;dY&RjLUKYy1^l_7uCK8-*%HT26DxG3MCCPaN9TPj7& zP=sJ#aeX#LxD^C9F$Ef|5EnqM82TqndVqWy$`Iv7K<^GZy}KGv;fH*2cxiZv_cA=R z=jy{Lc$;Y(b(^tpDuDxVuycFh&%j7*| zEaYoEGouGQf|lFlJiL}Riqr3=g>=XQ!r0!tt!j80v|c5SMXOfxlPtp!eT`_$ZCh)r zE~e(GQ%@J2lc8R+;?=JpX(vB{52aMO+rMhXOmPZjcU~=g+g4xCRfg(nEEibi9?IwE z=4z^xR;Vr$@0!#OEIEN?`orjPLlu{2Z~p zy$;~B8Wy+P(4{qNAL$ZPJEHYVX|up~@Fb=dVUBbUqyDEB$BDdy!lArX{L*Qy*Bk~P z)YBz?d|GLZ-6__Uu`=3~v4+u=vDl{$_K-$bw!|f=c1UYfX(L&MsiOv??sDss${lh# zf%mU$?ko!Vl4nStEuy|mt$1|@M*W)l+A7^coAYtEyP| z%8mpqNKJ*TGI179A+!7@@p_Plu(e*&ho!N0<`P!J@eb=?RmP5ikHQ4xq2-prgo%bJ zL2m3)$s}O9>?cxTVUZPE2W?f?#BEc`yD24;&Q2;7UZGwYIywf#RJ9&rew}mMJMVzQ zk21}kZl3h7G?1X? z!>`p6;x%VXG2}VMS{dbjwJQnNt2B7XWN#SAIvvT!Z&?s73HanG+V`HgUZ&<|*SlWY z_egV$Mag6&q0itZD2>k4%bLI&qH7)T*RGy|N+6RpkVc$M!gLw&ZF6^d5o4{@PUQP6pB$3)<* zD4gx_G|`_=R)5o%{{7_n|0bj&0b>a@v^W3#Nd5nxkP3`s^nVvp;Q{{_(Jh4QJ~HvI zaf06s{5i+}F;4LB)B2Bb0w5O%ogE0Hg5`t7F8Cn-MyCoR+vU7(|DIcw9duv%Z|PKd zz`*|;CHVc&`(L93eD^V-`+0z2tM-o=2#j9#FBym*0^?u(Tl#mP3=A9A-?G2SSV15j z;D2fU9YX;DLI2dee~5nH4+PB5^|$a}pTPg+CyEAUCJyMFvgXDP_mQRFiv;|O$Q5+2 zt%MHxO9-3beF+wdy3e@(D*X;PDZ#qXn&G9b?%b%8v8 zwg-Yh{?OwF0UyY~+MaoAIQMm{JfwCGA;-g{~vRM03X(c_4|iiU@|Zl{~zsf^Zsr7f363Fhx=i_Ur34MzA3DAGU(Y{@A}DJ|OsyK43oi zXCFL&?rAQbKYSO=#q+0Maq<5#Uoa51NdInU*gW|Dtji7lV=cklFsJlGT z^#DK61M+eI;p4Eq&;8IhfP4@j|ARdU)8l_=dk_~)@qz!r>Vo+n$Ut2DTo2a>rpE_* zU;_}4oBv_Iu)3g!egONd0R2%HcKSV>FH8@%3IE+z_cHGPAme#3-+Mj2KV`5(_K)`d z?JRM$H-HhU+oKB!!R`#q-A!P380efzHa3nhPWSuIM^0&LQycVuA>lT#cl`b72 0 - task_arg = remains[0] - if len(remains) == 1: - return task_arg[0](param=task_arg[1], value=value) - return task_arg[0](param=task_arg[1], value=_call_task(remains[1:])) - - def _task_recurse(): - return _call_task(tasks_args) - - return _task_recurse - - -def _tag_n_use_if_and(*and_conds, template=None): - assert template is not None - assert len(and_conds) % 2 == 0 - tasks_args = [] - conds_iter = iter(and_conds) - for par, is_true in zip(conds_iter, conds_iter): - if is_true: - tasks_args.append((task_return_value_if_param_true, par)) - else: - tasks_args.append((task_return_value_if_param_false, par)) - - def _call_task(remains): - assert len(remains) > 0 - task_arg = remains[0] - if len(remains) == 1: - return task_arg[0]( - param=task_arg[1], - value=task_replace_tag_in_template( - storm_name=param_storm_name, - storm_year=param_storm_year, - run_id=param_run_id, - template_str=str(template) - ) - ) - return task_arg[0](param=task_arg[1], value=_call_task(remains[1:])) - - def _task_recurse(): - return _call_task(tasks_args) - - return _task_recurse - - -info_flow_ecs_task_details = { - "sim-prep-info-aws": ECSTaskDetail( - OCSMESH_CLUSTER, OCSMESH_TEMPLATE_2_ID, "odssm-info", "info", - [ - '--date-range-outpath', - _tag('{tag}/setup/dates.csv'), - '--track-outpath', - _tag('{tag}/nhc_track/hurricane-track.dat'), - '--swath-outpath', - _tag('{tag}/windswath'), - '--station-data-outpath', - _tag('{tag}/coops_ssh/stations.nc'), - '--station-location-outpath', - _tag('{tag}/setup/stations.csv'), - _use_if(param_past_forecast, True, '--past-forecast'), - _use_if(param_past_forecast, True, "--hours-before-landfall"), - _use_if(param_past_forecast, True, param_hr_prelandfall), - param_storm_name, param_storm_year, - ], - "hurricane info", - 60, 20, []), - "sim-prep-mesh-aws": ECSTaskDetail( - OCSMESH_CLUSTER, OCSMESH_TEMPLATE_1_ID, "odssm-mesh", "mesh", [ - param_storm_name, param_storm_year, - "--rasters-dir", 'dem', - # If subsetting flag is False - _use_if(param_subset_mesh, False, "hurricane_mesh"), - _use_if(param_subset_mesh, False, "--hmax"), - _use_if(param_subset_mesh, False, param_mesh_hmax), - _use_if(param_subset_mesh, False, "--hmin-low"), - _use_if(param_subset_mesh, False, param_mesh_hmin_low), - _use_if(param_subset_mesh, False, "--rate-low"), - _use_if(param_subset_mesh, False, param_mesh_rate_low), - _use_if(param_subset_mesh, False, "--transition-elev"), - _use_if(param_subset_mesh, False, param_mesh_trans_elev), - _use_if(param_subset_mesh, False, "--hmin-high"), - _use_if(param_subset_mesh, False, param_mesh_hmin_high), - _use_if(param_subset_mesh, False, "--rate-high"), - _use_if(param_subset_mesh, False, param_mesh_rate_high), - _use_if(param_subset_mesh, False, "--shapes-dir"), - _use_if(param_subset_mesh, False, 'shape'), - _use_if(param_subset_mesh, False, "--windswath"), - _tag_n_use_if( - param_subset_mesh, False, 'hurricanes/{tag}/windswath' - ), - # If subsetting flag is True - _use_if(param_subset_mesh, True, "subset_n_combine"), - _use_if(param_subset_mesh, True, 'grid/HSOFS_250m_v1.0_fixed.14'), - _use_if(param_subset_mesh, True, 'grid/WNAT_1km.14'), - _tag_n_use_if( - param_subset_mesh, True, 'hurricanes/{tag}/windswath' - ), - # Other shared options - "--out", _tag('hurricanes/{tag}/mesh'), - ], - "meshing", - 60, 180, []), - "sim-prep-setup-aws": ECSTaskDetail( - OCSMESH_CLUSTER, OCSMESH_TEMPLATE_2_ID, "odssm-prep", "prep", [ - # Command and arguments for deterministic run - _use_if(param_ensemble, False, "setup_model"), - _use_if_and( - param_use_parametric_wind, True, param_ensemble, False, - value="--parametric-wind" - ), - _use_if(param_ensemble, False, "--mesh-file"), - _tag_n_use_if( - param_ensemble, False, 'hurricanes/{tag}/mesh/mesh_w_bdry.grd' - ), - _use_if(param_ensemble, False, "--domain-bbox-file"), - _tag_n_use_if( - param_ensemble, False, 'hurricanes/{tag}/mesh/domain_box/' - ), - _use_if(param_ensemble, False, "--station-location-file"), - _tag_n_use_if( - param_ensemble, False, 'hurricanes/{tag}/setup/stations.csv' - ), - _use_if(param_ensemble, False, "--out"), - _tag_n_use_if( - param_ensemble, False, 'hurricanes/{tag}/setup/schism.dir/' - ), - _use_if_and( - param_use_parametric_wind, True, param_ensemble, False, - value="--track-file" - ), - _tag_n_use_if_and( - param_use_parametric_wind, True, param_ensemble, False, - template='hurricanes/{tag}/nhc_track/hurricane-track.dat', - ), - _use_if(param_ensemble, False, "--cache-dir"), - _use_if(param_ensemble, False, 'cache'), - _use_if(param_ensemble, False, "--nwm-dir"), - _use_if(param_ensemble, False, 'nwm'), - # Command and arguments for ensemble run - _use_if(param_ensemble, True, "setup_ensemble"), - _use_if(param_ensemble, True, "--track-file"), - _tag_n_use_if( - param_ensemble, True, 'hurricanes/{tag}/nhc_track/hurricane-track.dat', - ), - _use_if(param_ensemble, True, "--output-directory"), - _tag_n_use_if( - param_ensemble, True, 'hurricanes/{tag}/setup/ensemble.dir/' - ), - _use_if(param_ensemble, True, "--num-perturbations"), - _use_if(param_ensemble, True, param_ensemble_n_perturb), - _use_if(param_ensemble, True, '--mesh-directory'), - _tag_n_use_if( - param_ensemble, True, 'hurricanes/{tag}/mesh/' - ), - _use_if(param_ensemble, True, "--sample-from-distribution"), -# _use_if(param_ensemble, True, "--quadrature"), - _use_if(param_ensemble, True, "--sample-rule"), - _use_if(param_ensemble, True, param_ensemble_sample_rule), - _use_if(param_ensemble, True, "--hours-before-landfall"), - _use_if(param_ensemble, True, param_hr_prelandfall), - _use_if(param_ensemble, True, "--nwm-file"), - _use_if(param_ensemble, - True, - "nwm/NWM_v2.0_channel_hydrofabric/nwm_v2_0_hydrofabric.gdb" - ), - # Common arguments - "--date-range-file", - _tag('hurricanes/{tag}/setup/dates.csv'), - "--tpxo-dir", 'tpxo', - _use_if(param_wind_coupling, True, "--use-wwm"), - param_storm_name, param_storm_year], - "setup", - 60, 180, ["CDSAPI_URL", "CDSAPI_KEY"]), - "schism-run-aws-single": ECSTaskDetail( - SCHISM_CLUSTER, SCHISM_TEMPLATE_ID, "odssm-solve", "solve", [ - param_schism_dir, - param_schism_exec - ], - "SCHISM", - 60, 240, []), - "viz-sta-html-aws": ECSTaskDetail( - VIZ_CLUSTER, VIZ_TEMPLATE_ID, "odssm-post", "post", [ - param_storm_name, param_storm_year, - _tag('hurricanes/{tag}/setup/schism.dir/'), - ], - "visualization", - 20, 45, []), - "viz-cmb-ensemble-aws": ECSTaskDetail( - SCHISM_CLUSTER, SCHISM_TEMPLATE_ID, "odssm-prep", "prep", [ - 'combine_ensemble', - '--ensemble-dir', - _tag('hurricanes/{tag}/setup/ensemble.dir/'), - '--tracks-dir', - _tag('hurricanes/{tag}/setup/ensemble.dir/track_files'), - ], - "Combine ensemble output files", - 60, 90, []), - "viz-ana-ensemble-aws": ECSTaskDetail( - SCHISM_CLUSTER, SCHISM_TEMPLATE_ID, "odssm-prep", "prep", [ - 'analyze_ensemble', - '--ensemble-dir', - _tag('hurricanes/{tag}/setup/ensemble.dir/'), - '--tracks-dir', - _tag('hurricanes/{tag}/setup/ensemble.dir/track_files'), - ], - "Analyze combined ensemble output", - 60, 90, []), -} - -def helper_call_prefect_task_for_ecs_job( - cluster_name, - ec2_template, - description, - name_ecs_task, - name_docker, - command, - wait_delay=60, - wait_attempt=150, - environment=None): - - additional_kwds = {} - if environment is not None: - env = additional_kwds.setdefault('env', []) - for item in environment: - env.append({ - "name": item, - "value": EnvVarSecret(item, raise_if_missing=True)} - ) - - - # Using container instance per ecs flow, NOT main flow - thisflow_run_id = task_get_flow_run_id() - with ContainerInstance(thisflow_run_id, ec2_template): - - result_ecs_task = task_start_ecs_task( - task_args=dict( - name=f'Start {description}', - ), - command=task_format_start_task( - template=shell_run_task, - cluster=cluster_name, - docker_cmd=command, - name_ecs_task=name_ecs_task, - name_docker=name_docker, - run_tag=thisflow_run_id, - **additional_kwds) - ) - - result_wait_ecs = task_client_wait_for_ecs( - waiter_kwargs=dict( - cluster=cluster_name, - tasks=task_pylist_from_jsonlist(result_ecs_task), - WaiterConfig=dict(Delay=wait_delay, MaxAttempts=wait_attempt) - ) - ) - - task_retrieve_task_docker_logs( - tasks=task_pylist_from_jsonlist(result_ecs_task), - log_prefix=name_ecs_task, - container_name=name_docker, - upstream_tasks=[result_wait_ecs]) - - # Timeout based on Prefect wait - task_kill_task_if_wait_fails.map( - upstream_tasks=[unmapped(result_wait_ecs)], - command=task_format_kill_timedout.map( - cluster=unmapped(cluster_name), - task=task_pylist_from_jsonlist(result_ecs_task) - ) - ) - - result_docker_success = task_check_docker_success( - upstream_tasks=[result_wait_ecs], - cluster_name=cluster_name, - tasks=task_pylist_from_jsonlist(result_ecs_task)) - - return result_docker_success - - - -@task -def _task_pathlist_to_strlist(path_list, rel_to=None): - '''PosixPath objects are not picklable and need to be converted to string''' - return [str(p) if rel_to is None else str(p.relative_to(rel_to)) for p in path_list] - - - -def make_flow_generic_ecs_task(flow_name): - - task_detail = info_flow_ecs_task_details[flow_name] - - with LocalAWSFlow(flow_name) as flow: - ref_task = helper_call_prefect_task_for_ecs_job( - cluster_name=task_detail.name_ecs_cluster, - ec2_template=task_detail.id_ec2_template, - description=task_detail.description, - name_ecs_task=task_detail.name_ecs_task, - name_docker=task_detail.name_docker, - wait_delay=task_detail.wait_delay, - wait_attempt=task_detail.wait_max_attempt, - environment=task_detail.env_secrets, - command=[ - i() if callable(i) else i - for i in task_detail.docker_args]) - - flow.set_reference_tasks([ref_task]) - return flow - - -def make_flow_solve_ecs_task(child_flow): - - - ref_tasks = [] - with LocalAWSFlow("schism-run-aws-ensemble") as flow: - - result_is_ensemble_on = task_check_param_true(param_ensemble) - with case(result_is_ensemble_on, False): - rundir = task_replace_tag_in_template( - storm_name=param_storm_name, - storm_year=param_storm_year, - run_id=param_run_id, - template_str='hurricanes/{tag}/setup/schism.dir/' - ) - - ref_tasks.append( - flow_dependency( - flow_name=child_flow.name, - upstream_tasks=None, - parameters=task_bundle_params( - name=param_storm_name, - year=param_storm_year, - run_id=param_run_id, - schism_dir=rundir, - schism_exec=task_return_this_if_param_true_else_that( - param_wind_coupling, - 'pschism_WWM_PAHM_TVD-VL', - 'pschism_PAHM_TVD-VL', - ) - ) - ) - ) - - with case(result_is_ensemble_on, True): - result_ensemble_dir = task_replace_tag_in_template( - storm_name=param_storm_name, - storm_year=param_storm_year, - run_id=param_run_id, - template_str='hurricanes/{tag}/setup/ensemble.dir/' - ) - - run_tag = task_get_run_tag( - storm_name=param_storm_name, - storm_year=param_storm_year, - run_id=param_run_id) - - # Start an EC2 to manage ensemble flow runs - with ContainerInstance(run_tag, WF_TEMPLATE_ID) as ec2_ids: - - task_add_ecs_attribute_for_ec2(ec2_ids, WF_CLUSTER, run_tag) - ecs_config = task_create_ecsrun_config(run_tag) - coldstart_task = flow_dependency( - flow_name=child_flow.name, - upstream_tasks=None, - parameters=task_bundle_params( - name=param_storm_name, - year=param_storm_year, - run_id=param_run_id, - schism_dir=result_ensemble_dir + '/spinup', - schism_exec='pschism_PAHM_TVD-VL', - ), - run_config=ecs_config, - ) - - hotstart_dirs = Glob(pattern='runs/*')( - path=task_convert_str_to_path('/efs/' + result_ensemble_dir) - ) - - flow_run_uuid = create_flow_run.map( - flow_name=unmapped(child_flow.name), - project_name=unmapped(PREFECT_PROJECT_NAME), - parameters=task_bundle_params.map( - name=unmapped(param_storm_name), - year=unmapped(param_storm_year), - run_id=unmapped(param_run_id), - schism_exec=unmapped( - task_return_this_if_param_true_else_that( - param_wind_coupling, - 'pschism_WWM_PAHM_TVD-VL', - 'pschism_PAHM_TVD-VL', - ) - ), - schism_dir=_task_pathlist_to_strlist( - hotstart_dirs, rel_to='/efs' - ) - ), - upstream_tasks=[unmapped(coldstart_task)], - run_config=unmapped(ecs_config) - ) - - hotstart_task = wait_for_flow_run.map( - flow_run_uuid, raise_final_state=unmapped(True)) - - - ref_tasks.append(coldstart_task) - ref_tasks.append(hotstart_task) - - flow.set_reference_tasks(ref_tasks) - return flow diff --git a/prefect/workflow/flows/jobs/pw.py b/prefect/workflow/flows/jobs/pw.py deleted file mode 100644 index 51096c8..0000000 --- a/prefect/workflow/flows/jobs/pw.py +++ /dev/null @@ -1,367 +0,0 @@ -from prefect import unmapped, case, task -from prefect.tasks.secrets import EnvVarSecret -from prefect.tasks.control_flow import merge -from prefect.tasks.files.operations import Glob - -from conf import PW_S3, PW_S3_PREFIX -from tasks.params import ( - param_storm_name, param_storm_year, param_run_id, - param_subset_mesh, param_ensemble, - param_mesh_hmax, - param_mesh_hmin_low, param_mesh_rate_low, - param_mesh_trans_elev, - param_mesh_hmin_high, param_mesh_rate_high, - param_use_rdhpcs_post, - param_wind_coupling, -) -from tasks.jobs import ( - task_submit_slurm, - task_format_mesh_slurm, - task_format_schism_slurm, - task_wait_slurm_done, - task_run_rdhpcs_job) -from tasks.data import ( - task_download_s3_to_luster, - task_format_copy_s3_to_lustre, - task_upload_luster_to_s3, - task_format_copy_lustre_to_s3, - task_upload_to_rdhpcs, - task_format_s3_upload, - task_download_from_rdhpcs, - task_format_s3_download, - task_delete_from_rdhpcs, - task_format_s3_delete) -from tasks.infra import ( - task_start_rdhpcs_cluster, - task_stop_rdhpcs_cluster) -from tasks.utils import ( - task_check_param_true, - task_bundle_params, task_get_run_tag, - task_replace_tag_in_template, - task_convert_str_to_path, - task_return_value_if_param_true, - task_return_value_if_param_false, - task_return_this_if_param_true_else_that, -) -from flows.utils import ( - LocalPWFlow, RDHPCSMeshFlow, RDHPCSSolveFlow, flow_dependency) - - -def helper_mesh_args(argument, is_true): - if is_true: - return lambda: task_return_value_if_param_true( - param=param_subset_mesh, - value=argument) - return lambda: task_return_value_if_param_false( - param=param_subset_mesh, - value=argument) - - -def helper_mesh_arglist(*args): - return [i() if callable(i) else i for i in args] - - -@task -def _task_pathlist_to_strlist(path_list, rel_to=None): - '''PosixPath objects are not picklable and need to be converted to string''' - return [str(p) if rel_to is None else str(p.relative_to(rel_to)) for p in path_list] - -def make_flow_mesh_rdhpcs_pw_task(): - with RDHPCSMeshFlow(f"sim-prep-rdhpcs-mesh-cluster-task") as flow: - - result_run_tag = task_get_run_tag( - param_storm_name, param_storm_year, param_run_id) - - # 1. Copy files from S3 to /luster - result_s3_to_lustre = task_download_s3_to_luster( - command=task_format_copy_s3_to_lustre( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX)) - - # 2. Call sbatch on slurm job - result_mesh_slurm_submitted_id = task_submit_slurm( - command=task_format_mesh_slurm( - storm_name=param_storm_name, - storm_year=param_storm_year, - kwds=helper_mesh_arglist( - "--tag", lambda: result_run_tag, - helper_mesh_args("hurricane_mesh", False), - helper_mesh_args("--hmax", False), - helper_mesh_args(param_mesh_hmax, False), - helper_mesh_args("--hmin-low", False), - helper_mesh_args(param_mesh_hmin_low, False), - helper_mesh_args("--rate-low", False), - helper_mesh_args(param_mesh_rate_low, False), - helper_mesh_args("--transition-elev", False), - helper_mesh_args(param_mesh_trans_elev, False), - helper_mesh_args("--hmin-high", False), - helper_mesh_args(param_mesh_hmin_high, False), - helper_mesh_args("--rate-high", False), - helper_mesh_args(param_mesh_rate_high, False), - helper_mesh_args("subset_n_combine", True), - helper_mesh_args("FINEMESH_PLACEHOLDER", True), - helper_mesh_args("COARSEMESH_PLACEHOLDER", True), - helper_mesh_args("ROI_PLACEHOLDER", True), - ), - upstream_tasks=[result_s3_to_lustre])) - - # 3. Check slurm job status - result_wait_slurm_done = task_wait_slurm_done( - job_id=result_mesh_slurm_submitted_id) - - # 4. Copy /luster to S3 - result_lustre_to_s3 = task_upload_luster_to_s3( - command=task_format_copy_lustre_to_s3( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX, - upstream_tasks=[result_wait_slurm_done])) - return flow - -def make_flow_mesh_rdhpcs(mesh_pw_task_flow): - - - with LocalPWFlow(f"sim-prep-mesh-rdhpcs") as flow: - - result_run_tag = task_get_run_tag( - param_storm_name, param_storm_year, param_run_id) - - result_pw_api_key = EnvVarSecret("PW_API_KEY") - - # 1. COPY HURR INFO TO S3 USING LOCAL AGENT FOR RDHPCS - result_upload_to_rdhpcs = task_upload_to_rdhpcs( - command=task_format_s3_upload( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX)) - - # 2. START RDHPCS MESH CLUSTER - result_start_rdhpcs_cluster = task_start_rdhpcs_cluster( - upstream_tasks=[result_upload_to_rdhpcs], - api_key=result_pw_api_key, - cluster_name="odssmmeshv22" - ) - - # NOTE: Using disowned user bootstrap script instead - # 3. START PREFECT AGENT ON MESH CLUSTER -# result_start_prefect_agent = task_run_rdhpcs_job( -# upstream_tasks=[result_start_rdhpcs_cluster], -# api_key=result_pw_api_key, -# workflow_name="odssm_agent_mesh") - - # Note: there's no need to wait, whenever the tasks that need - # cluster agent will wait until it is started - # 4. RUN RDHPCS PREFECT TASK - # 5. WAIT RDHPCS PREFECT TASK - # TODO: Use dummy task dependent on taskflow run added in main! - after_mesh_on_rdhpcs = flow_dependency( - flow_name=mesh_pw_task_flow.name, - upstream_tasks=[result_start_rdhpcs_cluster], - parameters=task_bundle_params( - name=param_storm_name, - year=param_storm_year, - run_id=param_run_id, - mesh_hmax=param_mesh_hmax, - mesh_hmin_low=param_mesh_hmin_low, - mesh_rate_low=param_mesh_rate_low, - mesh_cutoff=param_mesh_trans_elev, - mesh_hmin_high=param_mesh_hmin_high, - mesh_rate_high=param_mesh_rate_high, - subset_mesh=param_subset_mesh, - ) - ) - - # 6. STOP RDHPCS MESH CLUSTER? FIXME -# result_stop_rdhpcs_cluster = task_stop_rdhpcs_cluster( -# upstream_tasks=[result_wait_rdhpcs_job], -# api_key=result_pw_api_key, -# cluster_name="odssm_mesh_v2_2" -# ) - - # 7. COPY MESH FROM S3 TO EFS - result_download_from_rdhpcs = task_download_from_rdhpcs( -# upstream_tasks=[result_wait_rdhpcs_job], - upstream_tasks=[after_mesh_on_rdhpcs], - command=task_format_s3_download( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX)) - - # NOTE: We remove storm dir after simulation is done - - return flow - - -def make_flow_solve_rdhpcs_pw_task(): - with RDHPCSSolveFlow(f"run-schism-rdhpcs-schism-cluster-task") as flow: - - result_run_tag = task_get_run_tag( - param_storm_name, param_storm_year, param_run_id) - - result_is_ensemble_on = task_check_param_true(param_ensemble) - - # 1. Copy files from S3 to /luster - result_s3_to_lustre = task_download_s3_to_luster( - command=task_format_copy_s3_to_lustre( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX)) - - # 2. Call sbatch on slurm job - # 3. Check slurm job status - with case(result_is_ensemble_on, False): - result_rundir = task_replace_tag_in_template( - storm_name=param_storm_name, - storm_year=param_storm_year, - run_id=param_run_id, - template_str='hurricanes/{tag}/setup/schism.dir/' - ) - result_after_single_run = task_submit_slurm( - command=task_format_schism_slurm( - run_path=result_rundir, - schism_exec=task_return_this_if_param_true_else_that( - param_wind_coupling, - 'pschism_WWM_PAHM_TVD-VL', - 'pschism_PAHM_TVD-VL', - ), - upstream_tasks=[result_s3_to_lustre])) - - result_wait_slurm_done_1 = task_wait_slurm_done( - job_id=result_after_single_run) - - with case(result_is_ensemble_on, True): - result_ensemble_dir = task_replace_tag_in_template( - storm_name=param_storm_name, - storm_year=param_storm_year, - run_id=param_run_id, - template_str='hurricanes/{tag}/setup/ensemble.dir/') - - result_after_coldstart = task_submit_slurm( - command=task_format_schism_slurm( - run_path=result_ensemble_dir + '/spinup', - schism_exec='pschism_PAHM_TVD-VL', - upstream_tasks=[result_s3_to_lustre])) - result_wait_slurm_done_spinup = task_wait_slurm_done( - job_id=result_after_coldstart) - - - hotstart_dirs = Glob(pattern='runs/*')( - path=task_convert_str_to_path('/lustre/' + result_ensemble_dir) - ) - - # TODO: Somehow failure in coldstart task doesn't fail the - # whole flow due to these mapped tasks -- why? - result_after_hotstart = task_submit_slurm.map( - command=task_format_schism_slurm.map( - run_path=_task_pathlist_to_strlist( - hotstart_dirs, rel_to='/lustre'), - schism_exec=unmapped(task_return_this_if_param_true_else_that( - param_wind_coupling, - 'pschism_WWM_PAHM_TVD-VL', - 'pschism_PAHM_TVD-VL', - )), - upstream_tasks=[unmapped(result_wait_slurm_done_spinup)])) - result_wait_slurm_done_2 = task_wait_slurm_done.map( - job_id=result_after_hotstart) - - result_wait_slurm_done = merge( - result_wait_slurm_done_1, result_wait_slurm_done_2 - ) - - - # 4. Copy /luster to S3 - result_lustre_to_s3 = task_upload_luster_to_s3( - command=task_format_copy_lustre_to_s3( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX, - upstream_tasks=[result_wait_slurm_done])) - return flow - - -def make_flow_solve_rdhpcs(solve_pw_task_flow): - - - with LocalPWFlow(f"run-schism-rdhpcs") as flow: - - result_run_tag = task_get_run_tag( - param_storm_name, param_storm_year, param_run_id) - - result_pw_api_key = EnvVarSecret("PW_API_KEY") - result_is_rdhpcspost_on = task_check_param_true( - param_use_rdhpcs_post) - - # NOTE: We should have the mesh in S3 bucket from before, but we - # need the hurricane schism setup now - - # 1. COPY HURR SETUP TO S3 USING LOCAL AGENT FOR RDHPCS - result_upload_to_rdhpcs = task_upload_to_rdhpcs( - command=task_format_s3_upload( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX)) - - # 2. START RDHPCS SOLVE CLUSTER - result_start_rdhpcs_cluster = task_start_rdhpcs_cluster( - upstream_tasks=[result_upload_to_rdhpcs], - api_key=result_pw_api_key, - cluster_name="odssmschismv22" - ) - - # NOTE: Using disowned user bootstrap script instead - # 3. START PREFECT AGENT ON SOLVE CLUSTER -# result_start_prefect_agent = task_run_rdhpcs_job( -# upstream_tasks=[result_start_rdhpcs_cluster], -# api_key=result_pw_api_key, -# workflow_name="odssm_agent_solve") - - # Note: there's no need to wait, whenever the tasks that need - # cluster agent will wait until it is started - # 4. RUN RDHPCS SOLVE JOB - # 5. WAIT FOR SOLVE JOB TO FINISH - # TODO: Use dummy task dependent on taskflow run added in main! - after_schism_on_rdhpcs = flow_dependency( - flow_name=solve_pw_task_flow.name, - upstream_tasks=[result_start_rdhpcs_cluster], - parameters=task_bundle_params( - name=param_storm_name, - year=param_storm_year, - run_id=param_run_id, - ensemble=param_ensemble, - couple_wind=param_wind_coupling, - ) - ) - - - # 6. STOP RDHPCS SOLVE CLUSTER? FIXME -# result_stop_rdhpcs_cluster = task_stop_rdhpcs_cluster( -# upstream_tasks=[result_wait_rdhpcs_job], -# api_key=result_pw_api_key, -# cluster_name="odssm_schism_v2_2" -# ) - - # 7. COPY SOLUTION FROM S3 TO EFS - with case(result_is_rdhpcspost_on, False): - result_download_from_rdhpcs = task_download_from_rdhpcs( -# upstream_tasks=[result_wait_rdhpcs_job], - upstream_tasks=[after_schism_on_rdhpcs], - command=task_format_s3_download( - run_tag=result_run_tag, - bucket_name=PW_S3, - bucket_prefix=PW_S3_PREFIX)) - - with case(result_is_rdhpcspost_on, True): - # TODO: - pass - - # 8. DELETE STORM FILES FROM RDHPCS S3? FIXME -# result_delete_from_rdhpcs = task_delete_from_rdhpcs( -# upstream_tasks=[result_download_from_rdhpcs], -# command=task_format_s3_delete( -# storm_name=param_storm_name, -# storm_year=param_storm_year, -# bucket_name=PW_S3, -# bucket_prefix=PW_S3_PREFIX)) - - return flow diff --git a/prefect/workflow/flows/utils.py b/prefect/workflow/flows/utils.py deleted file mode 100644 index 321f72d..0000000 --- a/prefect/workflow/flows/utils.py +++ /dev/null @@ -1,164 +0,0 @@ -from contextlib import contextmanager -from functools import partial - - -from dunamai import Version -from slugify import slugify -import prefect -from prefect import Flow, case, task -from prefect.tasks.prefect import StartFlowRun -from prefect.tasks.prefect.flow_run import create_flow_run, wait_for_flow_run -from prefect.backend.flow_run import FlowRunView -from prefect.tasks.prefect.flow_run_cancel import CancelFlowRun -from prefect.storage import S3 -from prefect.engine.results.s3_result import S3Result -from prefect.executors import LocalDaskExecutor -from prefect.triggers import all_finished -from prefect.run_configs.ecs import ECSRun - -from conf import ( - S3_BUCKET, PW_S3, PW_S3_PREFIX, pw_s3_cred, - PREFECT_PROJECT_NAME, - WF_CLUSTER, WF_IMG, WF_ECS_TASK_ARN, - ECS_TASK_ROLE, ECS_EXEC_ROLE, ECS_SUBNET_ID, ECS_EC2_SG, - run_cfg_local_aws_cred, - run_cfg_local_pw_cred, - run_cfg_rdhpcsc_mesh_cluster, - run_cfg_rdhpcsc_schism_cluster) - -LocalAWSFlow = partial( - Flow, - storage=S3(bucket=S3_BUCKET), - run_config=run_cfg_local_aws_cred) - - -@contextmanager -def LocalPWFlow(flow_name): - ver = Version.from_git() - flow = Flow( - name=flow_name, - result=S3Result( - bucket=PW_S3, - location=f'{PW_S3_PREFIX}/prefect-results/{{flow_run_id}}' - ), - storage=S3( - key=f'{PW_S3_PREFIX}/prefect-flows/{slugify(flow_name)}/{ver.commit}{".mod" if ver.dirty else ""}', - bucket=PW_S3, - client_options=pw_s3_cred - ), - run_config=run_cfg_local_pw_cred - ) - with flow as inctx_flow: - yield inctx_flow - - inctx_flow.executor = LocalDaskExecutor(scheduler="processes", num_workers=10) - -@contextmanager -def RDHPCSMeshFlow(flow_name): - ver = Version.from_git() - flow = Flow( - name=flow_name, - result=S3Result( - bucket=PW_S3, - location=f'{PW_S3_PREFIX}/prefect-results/{{flow_run_id}}' - ), - storage=S3( - key=f'{PW_S3_PREFIX}/prefect-flows/{slugify(flow_name)}/{ver.commit}{".mod" if ver.dirty else ""}', - bucket=PW_S3, - client_options=pw_s3_cred - ), - run_config=run_cfg_rdhpcsc_mesh_cluster - ) - with flow as inctx_flow: - yield inctx_flow - - -@contextmanager -def RDHPCSSolveFlow(flow_name): - ver = Version.from_git() - flow = Flow( - name=flow_name, - result=S3Result( - bucket=PW_S3, - location=f'{PW_S3_PREFIX}/prefect-results/{{flow_run_id}}' - ), - storage=S3( - key=f'{PW_S3_PREFIX}/prefect-flows/{slugify(flow_name)}/{ver.commit}{".mod" if ver.dirty else ""}', - bucket=PW_S3, - client_options=pw_s3_cred - ), - run_config=run_cfg_rdhpcsc_schism_cluster - ) - with flow as inctx_flow: - yield inctx_flow - - -@task(name="Create ECSRun config") -def task_create_ecsrun_config(run_tag): - ecs_config = ECSRun( - task_definition_arn=WF_ECS_TASK_ARN, - # Use instance profile instead of task role -# task_role_arn=ECS_TASK_ROLE, -# execution_role_arn=ECS_EXEC_ROLE, - labels=['tacc-odssm-ecs'], - run_task_kwargs=dict( - cluster=WF_CLUSTER, - launchType='EC2', -# networkConfiguration={ -# 'awsvpcConfiguration': { -# 'subnets': [ECS_SUBNET_ID], -# 'securityGroups': ECS_EC2_SG, -# 'assignPublicIp': 'DISABLED', -# }, -# }, - placementConstraints=[ -# {'type': 'distinctInstance'}, - {'type': 'memberOf', - 'expression': f"attribute:run-tag == '{run_tag}'" - } - ], - ) - ) - - return ecs_config - -@task(name="Check if child flow is still running", trigger=all_finished) -def _task_is_childflow_still_running(flow_run_id): - flow_run_vu = FlowRunView.from_flow_run_id(flow_run_id) - logger = prefect.context.get("logger") - logger.info("*****************") - logger.info(flow_run_vu.state) - logger.info(type(flow_run_vu.state)) - logger.info("*****************") - return False - - -def flow_dependency(flow_name, parameters, upstream_tasks, **kwargs): - flow_run_uuid = create_flow_run( - flow_name=flow_name, - parameters=parameters, - project_name=PREFECT_PROJECT_NAME, - upstream_tasks=upstream_tasks, - task_args={'name': f'Start "{flow_name}"'}, - **kwargs) - - task_wait_for_flow = wait_for_flow_run( - flow_run_uuid, raise_final_state=True, - task_args={'name': f'Wait for "{flow_name}"'} - ) - - # TODO: Check for fail wait state and call cancel if still running -# child_flow_running = _task_is_childflow_still_running( -# flow_run_uuid, -# upstream_tasks=[task_wait_for_flow], -# ) -# with case(child_flow_running, True): -# task_cancel_flow = CancelFlowRun()(flow_run_id=flow_run_uuid) - - return task_wait_for_flow - -# Deprecated -FlowDependency = partial( - StartFlowRun, - wait=True, - project_name=PREFECT_PROJECT_NAME) diff --git a/prefect/workflow/main.py b/prefect/workflow/main.py deleted file mode 100644 index e25fb97..0000000 --- a/prefect/workflow/main.py +++ /dev/null @@ -1,294 +0,0 @@ -# Run from prefect directory (after terraform vars gen) using -# prefect run --name sim-prep --param name=florance --param year=2018 - - -# For logging, use `logger = prefect.context.get("logger")` within tasks -import argparse -import warnings -import pathlib - -from prefect import case -from prefect.utilities import graphql -from prefect.client import Client -from prefect.tasks.control_flow import merge - -from conf import PREFECT_PROJECT_NAME, INIT_FINI_LOCK -from tasks.params import ( - param_storm_name, param_storm_year, - param_use_rdhpcs, param_use_rdhpcs_post, param_run_id, - param_use_parametric_wind, param_subset_mesh, param_ensemble, - param_mesh_hmax, - param_mesh_hmin_low, param_mesh_rate_low, - param_mesh_trans_elev, - param_mesh_hmin_high, param_mesh_rate_high, - param_ensemble_n_perturb, param_hr_prelandfall, - param_ensemble_sample_rule, - param_past_forecast, - param_wind_coupling, - ) -from tasks.data import ( - task_copy_s3_data, - task_init_run, - task_final_results_to_s3, - task_cleanup_run, - task_cache_to_s3, - task_cleanup_efs) -from tasks.utils import ( - task_check_param_true, - task_bundle_params, - task_get_flow_run_id, - task_get_run_tag, - FLock) -from flows.jobs.ecs import ( - make_flow_generic_ecs_task, - make_flow_solve_ecs_task - ) -from flows.jobs.pw import( - make_flow_mesh_rdhpcs_pw_task, - make_flow_mesh_rdhpcs, - make_flow_solve_rdhpcs_pw_task, - make_flow_solve_rdhpcs) -from flows.utils import LocalAWSFlow, flow_dependency - - -# TODO: Later add build image and push to ECS logic into Prefect workflow - -# TODO: Use subprocess.run to switch backend here -# TODO: Create user config file to be session based? https://docs-v1.prefect.io/core/concepts/configuration.html#environment-variables - -def _check_project(): - client = Client() - print(f"Connecting to {client.api_server}...") - - qry = graphql.parse_graphql({'query': {'project': ['name']}}) - rsp = client.graphql(qry) - - prj_names = [i['name'] for i in rsp['data']['project']] - if PREFECT_PROJECT_NAME in prj_names: - print(f"Project {PREFECT_PROJECT_NAME} found on {client.api_server}!") - return - - print(f"Creating project {PREFECT_PROJECT_NAME} on {client.api_server}...") - client.create_project(project_name=PREFECT_PROJECT_NAME) - print("Done!") - - -def _make_workflow(): - # Create flow objects - flow_sim_prep_info_aws = make_flow_generic_ecs_task("sim-prep-info-aws") - flow_sim_prep_mesh_aws = make_flow_generic_ecs_task("sim-prep-mesh-aws") - flow_sim_prep_setup_aws = make_flow_generic_ecs_task("sim-prep-setup-aws") - flow_mesh_rdhpcs_pw_task = make_flow_mesh_rdhpcs_pw_task() - flow_mesh_rdhpcs = make_flow_mesh_rdhpcs(flow_mesh_rdhpcs_pw_task) - flow_schism_single_run_aws = make_flow_generic_ecs_task("schism-run-aws-single") - flow_schism_ensemble_run_aws = make_flow_solve_ecs_task(flow_schism_single_run_aws) - flow_solve_rdhpcs_pw_task = make_flow_solve_rdhpcs_pw_task() - flow_solve_rdhpcs = make_flow_solve_rdhpcs(flow_solve_rdhpcs_pw_task) - flow_sta_html_aws = make_flow_generic_ecs_task("viz-sta-html-aws") - flow_cmb_ensemble_aws = make_flow_generic_ecs_task( - "viz-cmb-ensemble-aws" - ) - flow_ana_ensemble_aws = make_flow_generic_ecs_task( - "viz-ana-ensemble-aws" - ) - - - with LocalAWSFlow("end-to-end") as flow_main: - - result_flow_run_id = task_get_flow_run_id() - - result_run_tag = task_get_run_tag( - param_storm_name, param_storm_year, result_flow_run_id) - - result_is_rdhpcs_on = task_check_param_true(param_use_rdhpcs) - result_is_ensemble_on = task_check_param_true(param_ensemble) - result_is_rdhpcspost_on = task_check_param_true(param_use_rdhpcs_post) - - with FLock(INIT_FINI_LOCK, task_args={'name': 'Sync init'}): - result_copy_task = task_copy_s3_data() - result_init_run = task_init_run( - result_run_tag, upstream_tasks=[result_copy_task]) - - result_bundle_params_1 = task_bundle_params( - name=param_storm_name, - year=param_storm_year, - rdhpcs=param_use_rdhpcs, - rdhpcs_post=param_use_rdhpcs_post, - run_id=result_flow_run_id, - parametric_wind=param_use_parametric_wind, - ensemble=param_ensemble, - hr_before_landfall=param_hr_prelandfall, - past_forecast=param_past_forecast, - couple_wind=param_wind_coupling, - ) - - result_bundle_params_2 = task_bundle_params( - name=param_storm_name, - year=param_storm_year, - rdhpcs=param_use_rdhpcs, - run_id=result_flow_run_id, - subset_mesh=param_subset_mesh, - mesh_hmax=param_mesh_hmax, - mesh_hmin_low=param_mesh_hmin_low, - mesh_rate_low=param_mesh_rate_low, - mesh_cutoff=param_mesh_trans_elev, - mesh_hmin_high=param_mesh_hmin_high, - mesh_rate_high=param_mesh_rate_high - ) - - result_bundle_params_3 = task_bundle_params( - name=param_storm_name, - year=param_storm_year, - run_id=result_flow_run_id, - parametric_wind=param_use_parametric_wind, - ensemble=param_ensemble, - ensemble_num_perturbations=param_ensemble_n_perturb, - hr_before_landfall=param_hr_prelandfall, - couple_wind=param_wind_coupling, - ensemble_sample_rule=param_ensemble_sample_rule, - ) - - after_sim_prep_info = flow_dependency( - flow_name=flow_sim_prep_info_aws.name, - upstream_tasks=[result_init_run], - parameters=result_bundle_params_1) - - # TODO: Meshing based-on original track for now - # TODO: If mesh each track: diff mesh - - - with case(result_is_rdhpcs_on, True): - after_sim_prep_mesh_b1 = flow_dependency( - flow_name=flow_mesh_rdhpcs.name, - upstream_tasks=[after_sim_prep_info], - parameters=result_bundle_params_2) - with case(result_is_rdhpcs_on, False): - after_sim_prep_mesh_b2 = flow_dependency( - flow_name=flow_sim_prep_mesh_aws.name, - upstream_tasks=[after_sim_prep_info], - parameters=result_bundle_params_2) - after_sim_prep_mesh = merge(after_sim_prep_mesh_b1, after_sim_prep_mesh_b2) - - after_sim_prep_setup = flow_dependency( - flow_name=flow_sim_prep_setup_aws.name, - upstream_tasks=[after_sim_prep_mesh], - parameters=result_bundle_params_3) - - with case(result_is_rdhpcs_on, True): - after_run_schism_b1 = flow_dependency( - flow_name=flow_solve_rdhpcs.name, - upstream_tasks=[after_sim_prep_setup], - parameters=result_bundle_params_1) - with case(result_is_rdhpcs_on, False): - after_run_schism_b2 = flow_dependency( - flow_name=flow_schism_ensemble_run_aws.name, - upstream_tasks=[after_sim_prep_setup], - parameters=result_bundle_params_1) - after_run_schism = merge(after_run_schism_b1, after_run_schism_b2) - - - with case(result_is_ensemble_on, True): - with case(result_is_rdhpcspost_on, False): - after_cmb_ensemble = flow_dependency( - flow_name=flow_cmb_ensemble_aws.name, - upstream_tasks=[after_run_schism], - parameters=result_bundle_params_1) - after_ana_ensemble = flow_dependency( - flow_name=flow_ana_ensemble_aws.name, - upstream_tasks=[after_cmb_ensemble], - parameters=result_bundle_params_1) - - with case(result_is_rdhpcspost_on, True): - # TODO: - pass - - with case(result_is_ensemble_on, False): - after_sta_html = flow_dependency( - flow_name=flow_sta_html_aws.name, - upstream_tasks=[after_run_schism], - parameters=result_bundle_params_1) - after_gen_viz = merge(after_ana_ensemble, after_sta_html) - - # TODO: Make this a separate flow? - after_results_to_s3 = task_final_results_to_s3( - param_storm_name, param_storm_year, result_run_tag, - upstream_tasks=[after_gen_viz]) - - after_cleanup_run = task_cleanup_run( - result_run_tag, upstream_tasks=[after_results_to_s3]) - - with FLock(INIT_FINI_LOCK, upstream_tasks=[after_cleanup_run], task_args={'name': 'Sync cleanup'}): - after_cache_storage = task_cache_to_s3( - upstream_tasks=[after_cleanup_run]) - task_cleanup_efs( - result_run_tag, - upstream_tasks=[after_cache_storage]) - - flow_main.set_reference_tasks([after_cleanup_run]) - - all_flows = [ - flow_sim_prep_info_aws, - flow_sim_prep_mesh_aws, - flow_sim_prep_setup_aws, - flow_mesh_rdhpcs_pw_task, - flow_mesh_rdhpcs, - flow_schism_single_run_aws, - flow_schism_ensemble_run_aws, - flow_solve_rdhpcs_pw_task, - flow_solve_rdhpcs, - flow_sta_html_aws, - flow_cmb_ensemble_aws, - flow_ana_ensemble_aws, - flow_main - ] - - return all_flows - -def _regiser(flows): - # Register unregistered flows - for flow in flows: - flow.register(project_name=PREFECT_PROJECT_NAME) - -def _viz(flows, out_dir, flow_names): - flow_dict = {f.name: f for f in flows} - for flow_nm in flow_names: - flow = flow_dict.get(flow_nm) - if flow is None: - warnings.warn(f'Flow with the name {flow_nm} NOT found!') - flow.visualize(filename=out_dir/flow.name, format='dot') - -def _list(flows): - flow_names = [f.name for f in flows] - print("\n".join(flow_names)) - - -def _main(args): - - _check_project() - all_flows = _make_workflow() - if args.command in ["register", None]: - _regiser(all_flows) - - elif args.command == "visualize": - _viz(all_flows, args.out_dir, args.flowname) - - elif args.command == "list": - _list(all_flows) - - else: - raise ValueError("Invalid command!") - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - subparsers = parser.add_subparsers(dest="command") - - reg_parser = subparsers.add_parser('register') - viz_parser = subparsers.add_parser('visualize') - list_parser = subparsers.add_parser('list') - - viz_parser.add_argument('flowname', nargs='+') - viz_parser.add_argument( - '--out-dir', '-d', type=pathlib.Path, default='.') - - _main(parser.parse_args()) diff --git a/prefect/workflow/pw_client.py b/prefect/workflow/pw_client.py deleted file mode 100755 index f5fa0d9..0000000 --- a/prefect/workflow/pw_client.py +++ /dev/null @@ -1,112 +0,0 @@ -import requests -import json -import pprint as pp - -class Client(): - - def __init__(self, url, key): - self.url = url - self.api = url+'/api' - self.key = key - self.session = requests.Session() - self.headers = { - 'Content-Type': 'application/json' - } - - def upload_dataset(self, filename, path): - req = self.session.post(self.api + "/datasets/upload?key="+self.key, - data={'dir': path}, - files={'file':open(filename, 'rb')}) - req.raise_for_status() - data = json.loads(req.text) - return data - - def download_dataset(self, file): - url=self.api + "/datasets/download?key=" + self.key + '&file=' + file - #print url - req = self.session.get(url) - req.raise_for_status() - return req.content - - def find_datasets(self, path, ext=''): - url = self.api + "/datasets/find?key=" + self.key + "&path=" + path + "&ext=" + ext - #print url - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - return data - - def get_job_tail(self, jid, file, lastline): - url = self.api + "/jobs/"+jid+"/tail?key=" + self.key + "&file=" + file + "&line="+str(lastline) - try: - req = self.session.get(url) - req.raise_for_status() - data = req.text - except: - data = "" - return data - - def start_job(self,workflow,inputs,user): - inputs = json.dumps(inputs) - req = self.session.post(self.api + "/tools",data={'user':user,'tool_xml': "/workspaces/"+user+"/workflows/"+workflow+"/workflow.xml",'key':self.key,'tool_id':workflow,'inputs':inputs}) - req.raise_for_status() - data = json.loads(req.text) - jid=data['jobs'][0]['id'] - djid=str(data['decoded_job_id']) - return jid,djid - - def get_job_state(self, jid): - url = self.api + "/jobs/"+ jid + "?key=" + self.key - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - return data['state'] - - def get_job_credit_info(self, jid): - url = self.api + "/jobs/"+ jid + "/monitor?key=" + self.key - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - # return data['info'] - return data - - def get_resources(self): - req = self.session.get(self.api + "/resources?key=" + self.key) - req.raise_for_status() - data = json.loads(req.text) - return data - - def get_resource(self, name): - req = self.session.get(self.api + "/resources/list?key=" + self.key + "&name=" + name) - req.raise_for_status() - try: - data = json.loads(req.text) - return data - except: - return None - - def start_resource(self, name): - req = self.session.get(self.api + "/resources/start?key=" + self.key + "&name=" + name) - req.raise_for_status() - return req.text - - def stop_resource(self, name): - req = self.session.get(self.api + "/resources/stop?key=" + self.key + "&name=" + name) - req.raise_for_status() - return req.text - - def update_resource(self, name, params): - update = "&name={}".format(name) - for key, value in params.items(): - update = "{}&{}={}".format(update, key, value) - req = self.session.post(self.api + "/resources/set?key=" + self.key + update) - req.raise_for_status() - return req.text - - def get_account(self): - url = self.api + "/account?key=" + self.key - req = self.session.get(url) - req.raise_for_status() - data = json.loads(req.text) - return data - \ No newline at end of file diff --git a/prefect/workflow/tasks/__init__.py b/prefect/workflow/tasks/__init__.py deleted file mode 100644 index 8b13789..0000000 --- a/prefect/workflow/tasks/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/prefect/workflow/tasks/data.py b/prefect/workflow/tasks/data.py deleted file mode 100644 index 954ffde..0000000 --- a/prefect/workflow/tasks/data.py +++ /dev/null @@ -1,210 +0,0 @@ -import pathlib -import shutil -import subprocess -import json -from datetime import datetime, timezone - -import boto3 -import prefect -from prefect import task -from prefect.tasks.shell import ShellTask -from prefect.tasks.templates import StringFormatter -from prefect.engine.signals import SKIP - -from conf import LOG_STDERR, RESULT_S3, STATIC_S3, COMMIT_HASH, DOCKER_VERS - -task_copy_s3_data = ShellTask( - name="Copy s3 to efs", - command='\n'.join([ - f"aws s3 sync s3://{STATIC_S3} /efs", - "chown ec2-user:ec2-user -R /efs", - "chmod 751 -R /efs", - ]) -) - -@task(name="Initialize simulation directory") -def task_init_run(run_tag): - root = pathlib.Path(f"/efs/hurricanes/{run_tag}/") - root.mkdir() - - # Get current time with local timezone info - # https://stackoverflow.com/questions/2720319/python-figure-out-local-timezone - now = datetime.now(timezone.utc).astimezone() - - # Log run info and parameters - info_file_path = root / 'run_info.json' - run_info = {} - run_info['start_date'] = now.strftime("%Y-%m-%d %H:%M:%S %Z") - run_info['run_tag'] = run_tag - run_info['git_commit'] = COMMIT_HASH - run_info['ecs_images'] = DOCKER_VERS - run_info['prefect'] = {} - run_info['prefect']['parameters'] = prefect.context.parameters - run_info['prefect']['flow_id'] = prefect.context.flow_id - run_info['prefect']['flow_run_id'] = prefect.context.flow_run_id - run_info['prefect']['flow_run_name'] = prefect.context.flow_run_name - - with open(info_file_path, 'w') as info_file: - json.dump(run_info, info_file, indent=2) - - for subdir in ['mesh', 'setup', 'sim', 'nhc_track', 'coops_ssh']: - (root / subdir).mkdir() - - - - -task_format_s3_upload = StringFormatter( - name="Prepare path to upload to rdhpcs S3", - template='; '.join( - ["find /efs/hurricanes/{run_tag}/ -type l -exec bash -c" - + " 'for i in \"$@\"; do" - + " readlink $i > $i.symlnk;" - # Don't remove actual links from the EFS -# + " rm -rf $i;" - + " done' _ {{}} +", - "aws s3 sync --no-follow-symlinks" - + " /efs/hurricanes/{run_tag}/" - + " s3://{bucket_name}/{bucket_prefix}/hurricanes/{run_tag}/", - ])) - -task_upload_to_rdhpcs = ShellTask( - name="Copy from efs to rdhpcs s3", -) - -task_format_s3_download = StringFormatter( - name="Prepare path to download from rdhpcs S3", - template='; '.join( - ["aws s3 sync" - + " s3://{bucket_name}/{bucket_prefix}/hurricanes/{run_tag}/" - + " /efs/hurricanes/{run_tag}/", - "find /efs/hurricanes/{run_tag}/ -type f -name '*.symlnk' -exec bash -c" - + " 'for i in \"$@\"; do" - + " ln -sf $(cat $i) ${{i%.symlnk}};" - + " rm -rf $i;" - + " done' _ {{}} +", - ])) - -task_download_from_rdhpcs = ShellTask( - name="Download from rdhpcs s3 to efs", -) - - -task_format_copy_s3_to_lustre = StringFormatter( - name="Prepare path to Luster from S3 on RDHPCS cluster", - template='; '.join( - ["aws s3 sync" - + " s3://{bucket_name}/{bucket_prefix}/hurricanes/{run_tag}/" - + " /lustre/hurricanes/{run_tag}/", - "find /lustre/hurricanes/{run_tag}/ -type f -name '*.symlnk' -exec bash -c" - + " 'for i in \"$@\"; do" - + " ln -sf $(cat $i) ${{i%.symlnk}};" - + " rm -rf $i;" - + " done' _ {{}} +", - ])) - -task_download_s3_to_luster = ShellTask( - name="Download data from RDHPCS S3 onto RDHPCS cluster /lustre", - return_all=True, - log_stderr=LOG_STDERR, -) - -task_format_copy_lustre_to_s3 = StringFormatter( - name="Prepare path to S3 from Luster on RDHPCS cluster", - template='; '.join( - ["find /lustre/hurricanes/{run_tag}/ -type l -exec bash -c" - + " 'for i in \"$@\"; do" - + " readlink $i > $i.symlnk;" - # Don't remove the actual links from the luster -# + " rm -rf $i;" - + " done' _ {{}} +", - "aws s3 sync --no-follow-symlinks" - + " /lustre/hurricanes/{run_tag}/" - + " s3://{bucket_name}/{bucket_prefix}/hurricanes/{run_tag}/" - ])) - -task_upload_luster_to_s3 = ShellTask( - name="Upload data from RDHPCS cluster /lustre onto RDHPCS S3", - return_all=True, - log_stderr=LOG_STDERR, -) - -task_format_s3_delete = StringFormatter( - name="Prepare path to remove from rdhpcs S3", - template="aws s3 rm --recursive" - + " s3://{bucket_name}/{bucket_prefix}/hurricanes/{run_tag}/") - -task_delete_from_rdhpcs = ShellTask( - name="Delete from rdhpcs s3", -) - -@task(name="Copy final results to S3 for longterm storage") -def task_final_results_to_s3(storm_name, storm_year, run_tag): - s3 = boto3.client("s3") - src = pathlib.Path(f'/efs/hurricanes/{run_tag}') - prefix = f'{storm_name}_{storm_year}_' - - aws_rsp = s3.list_objects_v2(Bucket=RESULT_S3, Delimiter='/') - - try: - top_level = [k['Prefix'] for k in aws_rsp['CommonPrefixes']] - except KeyError: - top_level = [] - - old_runs = [i.strip('/') for i in top_level if i.startswith(prefix)] - run_numstr = [i[len(prefix):] for i in old_runs] - run_nums = [int(i) for i in run_numstr if i.isnumeric()] - - next_num = 1 - if len(run_nums) > 0: - next_num = max(run_nums) + 1 - # Zero filled number - dest = f'{prefix}{next_num:03d}' - - for p in src.rglob("*"): - # For S3 object storage folders are meaningless - if p.is_dir(): - continue - - # Ignore thsese - ignore_patterns = [ - "max*_*", - "schout_*_*.nc", - "hotstart_*_*.nc", - "local_to_global_*", - "nonfatal_*" - ] - if any(p.match(pat) for pat in ignore_patterns): - continue - - s3.upload_file( - str(p), RESULT_S3, f'{dest}/{p.relative_to(src)}') - -@task(name="Cleanup run directory after run") -def task_cleanup_run(run_tag): - # Remove the current run's directory - base = pathlib.Path('/efs/hurricanes/') - src = base / run_tag - shutil.rmtree(src) - - -task_cache_to_s3 = ShellTask( - name="Sync all cached files with static S3", - command='\n'.join([ - "mkdir -p /efs/cache", # To avoid error if no cache! - f"aws s3 sync /efs/cache s3://{STATIC_S3}/cache/" - ]) -) - -@task(name="Cleanup EFS after run") -def task_cleanup_efs(run_tag): - - base = pathlib.Path('/efs/hurricanes/') - - # If there are no other runs under hurricane cleanup EFS - if any(not i.match("./_") for i in base.glob("*")): - # This means there are other ongoing runs or failed runs that - # may need inspections, so don't cleanup EFS - raise SKIP("Other run directories exist in EFS, skip cleanup!") - - for p in pathlib.Path('/efs').glob("*"): - shutil.rmtree(p) diff --git a/prefect/workflow/tasks/infra.py b/prefect/workflow/tasks/infra.py deleted file mode 100644 index 404ec04..0000000 --- a/prefect/workflow/tasks/infra.py +++ /dev/null @@ -1,309 +0,0 @@ -import time -import json -import multiprocessing, time, signal - -import boto3 -import prefect -from prefect.tasks.shell import ShellTask -from prefect.tasks.aws.client_waiter import AWSClientWait -from prefect import task -from prefect.triggers import all_finished -from prefect.tasks.templates import StringFormatter -from prefect.engine.signals import SKIP -from prefect import resource_manager -from prefect.agent.ecs.agent import ECSAgent - -import pw_client -from conf import LOG_STDERR, PW_URL, WORKFLOW_TAG_NAME - -shell_list_cluster_instance_arns = " ".join([ - "aws ecs list-container-instances", - "--cluster {cluster}", - "--output json" -]) - -task_format_list_cluster_instance_arns = StringFormatter( - name="Format list cluster instance ARNs", - template=shell_list_cluster_instance_arns -) - -task_list_cluster_instance_arns = ShellTask( - name="List cluster instance arns", - return_all=True, # To get stdout as return value - log_stderr=LOG_STDERR, -) - - -# TODO: Check if instance is running (e.g. vs exist but STOPPED) -@task(name="Check if EC2 is needed") -def task_check_if_ec2_needed(rv_shell): - - aws_rv = json.loads("\n".join(rv_shell)) - ec2_arn_list = aws_rv.get('containerInstanceArns', []) - - is_needed = len(ec2_arn_list) == 0 - - return is_needed - -task_client_wait_for_ec2 = AWSClientWait( - name='Wait for ECS task', - client='ec2', - waiter_name='instance_status_ok' -) - -@task(name="Check for instance shutdown") -def task_check_cluster_shutdown(rv_shell): - - task_arns = json.loads("\n".join(rv_shell)) - can_shutdown = len(task_arns) == 0 - - return can_shutdown - -shell_term_instances = " ".join([ - "aws ec2 terminate-instances", - "--instance-ids {instance_id_list}" - ]) - -task_format_term_ec2 = StringFormatter( - name="Format term ec2 command", - template=shell_term_instances) - -task_term_instances = ShellTask( - name="Stop cluster instances" -) - - -shell_spinup_cluster_ec2 = " ".join([ - "aws ec2 run-instances", - "--launch-template LaunchTemplateId={template_id}", - "--query Instances[*].InstanceId", - "--output json", -]) -task_format_spinup_cluster_ec2 = StringFormatter( - name="Format EC2 spinup command", - template=shell_spinup_cluster_ec2, - ) - -task_spinup_cluster_ec2 = ShellTask( - name="EC2 for cluster", - return_all=True, # Multi line for list of tasks as json - log_stderr=LOG_STDERR, -) - -shell_list_cluster_tasks = " ".join([ - "aws ecs list-tasks", - "--cluster {cluster}", - "--query taskArns", - "--output json" -]) -task_format_list_cluster_tasks = StringFormatter( - name="Format list cluster tasks command", - template=shell_list_cluster_tasks) - -task_list_cluster_tasks = ShellTask( - name="List cluster tasks", - return_all=True, # To potentially multiline json string - log_stderr=LOG_STDERR, -) - -shell_list_cluster_instance_ids = " ".join([ - "aws ecs list-container-instances", - "--cluster {cluster}", - "--query containerInstanceArns", - "--output text", - "| xargs", - "aws ecs describe-container-instances", - "--cluster {cluster}", - "--query containerInstances[*].ec2InstanceId", - "--output text", - "--container-instances" # THIS MUST BE THE LAST ONE FOR XARGS -]) - -task_format_list_cluster_instance_ids = StringFormatter( - name="Format list cluster instance ids command", - template=shell_list_cluster_instance_ids -) - -task_list_cluster_instance_ids = ShellTask( - name="List cluster instance IDs", - return_all=False, # To get single line text output list - log_stderr=LOG_STDERR, -) - - -@task(name="Create EC2 instance with unique tag") -def task_create_ec2_w_tag(template_id, run_tag): - ec2_resource = boto3.resource('ec2') - ec2_client = boto3.client('ec2') - - ec2_instances = ec2_resource.create_instances( - LaunchTemplate={'LaunchTemplateId': template_id}, - MinCount=1, MaxCount=1 - ) - - instance_ids = [ - ec2_inst.instance_id for ec2_inst in ec2_instances] - - waiter = ec2_client.get_waiter('instance_exists') - waiter.wait(InstanceIds=instance_ids) - - ec2_resource.create_tags( - Resources=instance_ids, - Tags=[{'Key': WORKFLOW_TAG_NAME, 'Value': run_tag}] - ) - - return instance_ids - -@task(name="Destroy EC2 instance by unique tag") -def task_destroy_ec2_by_tag(run_tag): - - ec2_client = boto3.client('ec2') - - filter_by_run_tag = [{ - 'Name': f'tag:{WORKFLOW_TAG_NAME}', 'Values': [run_tag]}] - - response = ec2_client.describe_instances(Filters=filter_by_run_tag) - instance_ids = [ - instance['InstanceId'] - for rsv in response.get('Reservations', []) - for instance in rsv.get('Instances', []) - ] - - - if len(instance_ids) == 0: - raise SKIP( - message="Could NOT find any instances tagged for this run") - - response = ec2_client.terminate_instances( - InstanceIds=instance_ids) - - -@task(name="Add run tag attribute to ECS instance") -def task_add_ecs_attribute_for_ec2(ec2_instance_ids, cluster, run_tag): - - ecs_client = boto3.client('ecs') - response = ecs_client.list_container_instances(cluster=cluster) - all_ecs_instance_arns = response['containerInstanceArns'] - if len(all_ecs_instance_arns) == 0: - raise FAIL( - message=f"Could NOT find any instances associated with cluster {cluster}") - - response = ecs_client.describe_container_instances( - cluster=cluster, - containerInstances=all_ecs_instance_arns) - - ecs_instance_arns = [] - for container_instance_info in response['containerInstances']: - ec2_instance_id = container_instance_info['ec2InstanceId'] - if ec2_instance_id in ec2_instance_ids: - ecs_instance_arns.append(container_instance_info['containerInstanceArn']) - break - - for inst_arn in ecs_instance_arns: - ecs_client.put_attributes( - cluster=cluster, - attributes=[ - { - 'name': 'run-tag', - 'value': run_tag, - 'targetType': 'container-instance', - 'targetId': inst_arn - }, - ] - ) - -@resource_manager -class ContainerInstance: - def __init__(self, run_tag, template_id): - self.tag = run_tag - self.template = template_id - - def setup(self): - "Create container instances for the run specified by tag" - - ec2_client = boto3.client('ec2') - - ec2_instance_ids = task_create_ec2_w_tag.run(self.template, self.tag) - - waiter = ec2_client.get_waiter('instance_status_ok') - waiter.wait(InstanceIds=ec2_instance_ids) - - response = ec2_client.describe_instances(InstanceIds=ec2_instance_ids) - instance_ips = [ - instance['PublicIpAddress'] - for rsv in response.get('Reservations', []) - for instance in rsv.get('Instances', []) - ] - - logger = prefect.context.get("logger") - logger.info(f"EC2 public IPs: {','.join(instance_ips)}") - - return ec2_instance_ids - - def cleanup(self, ec2_instance_ids): - "Shutdown the container instance" - - # NOTE: We destroy by tag - task_destroy_ec2_by_tag.run(self.tag) - - -@task(name="Start RDHPCS cluster") -def task_start_rdhpcs_cluster(api_key, cluster_name): - c = pw_client.Client(PW_URL, api_key) - - # check if resource exists and is on - cluster = c.get_resource(cluster_name) - if cluster: - if cluster['status'] == "on": - return - - # if resource not on, start it - time.sleep(0.2) - c.start_resource(cluster_name) - - else: - raise ValueError("Cluster name could not be found!") - - while True: - time.sleep(10) - - current_state = c.get_resources() - - for cluster in current_state: - if cluster['name'] != cluster_name: - continue - - if cluster['status'] != 'on': - continue - - state = cluster['state'] - - if 'masterNode' not in cluster['state']: - continue - - if cluster['state']['masterNode'] == None: - continue - - ip = cluster['state']['masterNode'] - return ip - - -@task(name="Stop RDHPCS cluster", trigger=all_finished) -def task_stop_rdhpcs_cluster(api_key, cluster_name): - - c = pw_client.Client(PW_URL, api_key) - - # check if resource exists and is on - cluster = c.get_resource(cluster_name) - if cluster: - if cluster['status'] == "off": - return - - # TODO: Check if another job is running on the cluster - - # if resource not on, start it - time.sleep(0.2) - c.stop_resource(cluster_name) - - else: - raise ValueError("Cluster name could not be found!") diff --git a/prefect/workflow/tasks/jobs.py b/prefect/workflow/tasks/jobs.py deleted file mode 100644 index 28c6855..0000000 --- a/prefect/workflow/tasks/jobs.py +++ /dev/null @@ -1,276 +0,0 @@ -import subprocess -import time -import json -from functools import partial - -import boto3 -import prefect -from prefect import task -from prefect.tasks.shell import ShellTask -from prefect.tasks.templates import StringFormatter -from prefect.tasks.aws.client_waiter import AWSClientWait -from prefect.triggers import any_failed, all_finished -from prefect.engine.signals import FAIL - -import pw_client -from conf import LOG_STDERR, PW_URL, WORKFLOW_TAG_NAME, log_group_name - - - -shell_run_task = " ".join([ - "aws ecs start-task", - "--cluster {cluster}", - "--task-definition {name_ecs_task}", - "--overrides '{overrides}'", - "--query tasks[*].taskArn", - "--output json", - "--container-instances {instance_ids}" -# "--count 5" # TEST: run and stop multiple tasks - ]) - -@task(name="Prepare run command") -def task_format_start_task(template, **kwargs): - aux = {} - cluster = kwargs['cluster'] - env_list = kwargs.get('env', []) - if len(env_list) > 0: - aux['environment'] = env_list - - run_tag = kwargs.pop("run_tag") - if run_tag is None: - raise FAIL(message="Run tag is NOT provided for the task!") - - overrides_storm = json.dumps({ - "containerOverrides": [ - { - "name": f'{kwargs["name_docker"]}', - "command": [f'{seg}' for seg in kwargs['docker_cmd'] if seg is not None], - **aux - } - ], - }) - - ec2_client = boto3.client('ec2') - filter_by_run_tag = [{ - 'Name': f'tag:{WORKFLOW_TAG_NAME}', 'Values': [run_tag]}] - response = ec2_client.describe_instances(Filters=filter_by_run_tag) - ec2_instance_ids = [ - instance['InstanceId'] - for rsv in response.get('Reservations', []) - for instance in rsv.get('Instances', []) - ] - - if len(ec2_instance_ids) == 0: - raise FAIL( - message="Could NOT find any EC2 instances tagged for this run") - - ecs_client = boto3.client('ecs') - response = ecs_client.list_container_instances(cluster=cluster) - all_ecs_instance_arns = response['containerInstanceArns'] - if len(all_ecs_instance_arns) == 0: - raise FAIL( - message=f"Could NOT find any instances associated with cluster {cluster}") - - response = ecs_client.describe_container_instances( - cluster=cluster, - containerInstances=all_ecs_instance_arns) - - ecs_instance_arns = [] - for container_instance_info in response['containerInstances']: - ec2_instance_id = container_instance_info['ec2InstanceId'] - if ec2_instance_id in ec2_instance_ids: - ecs_instance_arns.append(container_instance_info['containerInstanceArn']) - break - - if len(ecs_instance_arns) == 0: - raise FAIL( - message="Could NOT find any container instances tagged for this run") - - formatted_cmd = template.format( - overrides=overrides_storm, - instance_ids=" ".join(ecs_instance_arns), - **kwargs) - - return formatted_cmd - -task_start_ecs_task = ShellTask( - name='Run ECS task', - return_all=True, # Need json list - log_stderr=LOG_STDERR, -) - -# NOTE: We cannot use CLI aws ecs wait because it timesout after -# 100 attempts made 6 secs apart. -task_client_wait_for_ecs = AWSClientWait( - name='Wait for ECS task', - client='ecs', - waiter_name='tasks_stopped' -) - - -@task(name="Retrieve task logs", trigger=all_finished) -def task_retrieve_task_docker_logs(log_prefix, container_name, tasks): - - logger = prefect.context.get("logger") - logs = boto3.client('logs') - ecs = boto3.client('ecs') - - task_ids = [t.split('/')[-1] for t in tasks] - - get_events = partial( - logs.filter_log_events, - logGroupName=log_group_name, - logStreamNames=[ - f"{log_prefix}/{container_name}/{task_id}" - for task_id in task_ids - ], - interleaved=True - ) - - response = get_events() - events = response['events'] - for e in events: - logger.info(e['message']) - - while len(events) > 0: - response = get_events(nextToken=response['nextToken']) - events = response['events'] - for e in events: - logger.info(e['message']) - - - -shell_kill_timedout = " ".join([ - "aws ecs stop-task", - "--cluster {cluster}", - "--reason \"Timed out\"", - "--task {task}" - ]) -task_format_kill_timedout = StringFormatter( - name="Prepare kill command", - template=shell_kill_timedout) -task_kill_task_if_wait_fails = ShellTask( - name='Kill timed-out tasks', - return_all=True, - log_stderr=LOG_STDERR, - trigger=any_failed -) - -@task(name="Check docker success") -def task_check_docker_success(tasks, cluster_name): - ecs = boto3.client('ecs') - response = ecs.describe_tasks(cluster=cluster_name, tasks=tasks) - logger = prefect.context.get("logger") - exit_codes = [] - try: - for task in response['tasks']: - for container in task['containers']: - try: - exit_codes.append(container['exitCode']) - except KeyError: - logger.error(container['reason']) - raise FAIL(message="A task description doesn't have exit code!") - except KeyError: - logger.error(response) - raise FAIL(message="ECS task decription cannot be parsed!") - - if any(int(rv) != 0 for rv in exit_codes): - raise FAIL(message="Docker returned non-zero code!") - - -# Using workflow-json on RDHPCS-C -@task(name="Run RDHPCS job") -def task_run_rdhpcs_job(api_key, workflow_name, **workflow_inputs): - c = pw_client.Client(PW_URL, api_key) - - # get the account username - account = c.get_account() - - user = account['info']['username'] - - job_id, decod_job_id = c.start_job(workflow_name, workflow_inputs, user) - return decod_job_id - - -@task(name="Wait for RDHPCS job") -def task_wait_rdhpcs_job(api_key, decod_job_id): - - c = pw_client.Client(PW_URL, api_key) - while True: - time.sleep(5) - try: - state = c.get_job_state(decod_job_id) - except: - state = "starting" - - if state == 'ok': - break - elif (state == 'deleted' or state == 'error'): - raise Exception('Simulation had an error. Please try again') - - -@task(name="Prepare Slurm script to submit the batch job") -def task_format_mesh_slurm(storm_name, storm_year, kwds): - return " ".join( - ["sbatch", - ",".join([ - "--export=ALL", - f"KWDS=\"{' '.join(str(i) for i in kwds if i is not None)}\"", - f"STORM=\"{storm_name}\"", - f"YEAR=\"{storm_year}\"", - ]), - "~/mesh.sbatch"] - ) - - -task_submit_slurm = ShellTask( - name="Submit batch job on meshing cluster", - return_all=False, # Need single line reult for job ID extraction - log_stderr=LOG_STDERR, -) - - -@task(name="Wait for slurm job") -def task_wait_slurm_done(job_id): - - logger = prefect.context.get("logger") - logger.info(f"Waiting for job with ID: {job_id}") - while True: - time.sleep(10) - - result = subprocess.run( - ["sacct", "--format=State", - "--parsable2", f"--jobs={job_id}"], - capture_output=True, - text=True) - - # A single job can have sub-jobs (e.g. srun calls) - stdout = result.stdout - stderr = result.stderr - # Skip header ("State") - status = stdout.strip().split('\n')[1:] - - # TODO: Add timeout? - if any(st in ('RUNNING', 'PENDING', 'NODE_FAIL') for st in status): - # TODO: Any special handling for node failure? - continue - - # If it's not running or pending we can safely look at finalized - # log, whether it's a failure or finished without errors - logger.info('Fetching SLURM logs...') - with open(f'slurm-{job_id}.out') as slurm_log: - logger.info(''.join(slurm_log.readlines())) - - if all(st == 'COMPLETED' for st in status): - break - - raise RuntimeError(f"Slurm job failed with status {status}") - -task_format_schism_slurm = StringFormatter( - name="Prepare Slurm script to submit the batch job", - template=" ".join( - ["sbatch", - "--export=ALL,STORM_PATH=\"{run_path}\",SCHISM_EXEC=\"{schism_exec}\"", - "~/schism.sbatch"] - ) -) diff --git a/prefect/workflow/tasks/params.py b/prefect/workflow/tasks/params.py deleted file mode 100644 index a42d103..0000000 --- a/prefect/workflow/tasks/params.py +++ /dev/null @@ -1,25 +0,0 @@ -from prefect import Parameter - -# Define parameters -param_storm_name = Parameter('name') -param_storm_year = Parameter('year') -param_use_rdhpcs = Parameter('rdhpcs', default=False) -param_use_rdhpcs_post = Parameter('rdhpcs_post', default=False) -param_use_parametric_wind = Parameter('parametric_wind', default=False) -param_run_id = Parameter('run_id') -param_schism_dir = Parameter('schism_dir') -param_schism_exec = Parameter('schism_exec') -param_subset_mesh = Parameter('subset_mesh', default=False) -param_past_forecast = Parameter('past_forecast', default=False) -param_hr_prelandfall = Parameter('hr_before_landfall', default=-1) -param_wind_coupling = Parameter('couple_wind', default=False) -param_ensemble = Parameter('ensemble', default=False) -param_ensemble_n_perturb = Parameter('ensemble_num_perturbations', default=40) -param_ensemble_sample_rule = Parameter('ensemble_sample_rule', default='korobov') - -param_mesh_hmax = Parameter('mesh_hmax', default=20000) -param_mesh_hmin_low = Parameter('mesh_hmin_low', default=1500) -param_mesh_rate_low = Parameter('mesh_rate_low', default=2e-3) -param_mesh_trans_elev = Parameter('mesh_cutoff', default=-200) -param_mesh_hmin_high = Parameter('mesh_hmin_high', default=300) -param_mesh_rate_high = Parameter('mesh_rate_high', default=1e-3) diff --git a/prefect/workflow/tasks/utils.py b/prefect/workflow/tasks/utils.py deleted file mode 100644 index 77ab16d..0000000 --- a/prefect/workflow/tasks/utils.py +++ /dev/null @@ -1,105 +0,0 @@ -import fcntl -import json -from dataclasses import dataclass -from functools import partial -from typing import List -from pathlib import Path - -import prefect -from prefect import task -from prefect import resource_manager - - -@task(name="List from JSON") -def task_pylist_from_jsonlist(json_lines): - return json.loads("\n".join(json_lines)) - - -@task(name="Check parameter is true") -def task_check_param_true(param): - return param in [True, 1, 'True', 'true', '1'] - -@task(name="Return flag if boolean parameter is true") -def task_return_value_if_param_true(param, value): - if param in [True, 1, 'True', 'true', '1']: - return value - return None - -@task(name="Return flag if boolean parameter is false") -def task_return_value_if_param_false(param, value): - if param in [True, 1, 'True', 'true', '1']: - return None - return value - -@task(name="Return flag if boolean parameter is true") -def task_return_this_if_param_true_else_that(param, this, that): - if param in [True, 1, 'True', 'true', '1']: - return this - return that - - -@task(name="Create param dict") -def task_bundle_params(existing_bundle=None, **kwargs): - par_dict = kwargs - if isinstance(existing_bundle, dict): - par_dict = existing_bundle.copy() - par_dict.update(kwargs) - return par_dict - -@task(name="Get run ID") -def task_get_flow_run_id(): - return prefect.context.get('flow_run_id') - - -@task(name="Get run tag") -def task_get_run_tag(storm_name, storm_year, run_id): - return f'{storm_name}_{storm_year}_{run_id}' - -@task(name="Add tag prefix to localpath") -def task_add_tag_path_prefix(storm_name, storm_year, run_id, local_path): - return f'{storm_name}_{storm_year}_{run_id}' / localpath - -@task(name="Replace tag in template") -def task_replace_tag_in_template(storm_name, storm_year, run_id, template_str): - return template_str.format(tag=f'{storm_name}_{storm_year}_{run_id}') - - -@task(name="Convert string to path object") -def task_convert_str_to_path(string): - return Path(string) - - -@task(name="Info printing") -def task_print_info(object_to_print): - logger = prefect.context.get("logger") - logger.info("*****************") - logger.info(object_to_print) - logger.info("*****************") - -@resource_manager(name="File mutex") -class FLock: - def __init__(self, path): - self.path = path - - def setup(self): - "Create a locked file in the specified address and returns file object" - file_obj = open(self.path, 'w') - fcntl.flock(file_obj.fileno(), fcntl.LOCK_EX) - return file_obj - - def cleanup(self, file_obj): - "Removes the lock" - fcntl.flock(file_obj.fileno(), fcntl.LOCK_UN) - file_obj.close() - -@dataclass(frozen=True) -class ECSTaskDetail: - name_ecs_cluster: str - id_ec2_template: str - name_ecs_task: str - name_docker: str - docker_args: List - description: str - wait_delay: float - wait_max_attempt: int - env_secrets: List diff --git a/rdhpcs/clusters/mesh_cluster.json b/rdhpcs/clusters/mesh_cluster.json deleted file mode 100644 index 715c3e5..0000000 --- a/rdhpcs/clusters/mesh_cluster.json +++ /dev/null @@ -1,24 +0,0 @@ -{ -"availability_zone":"us-east-1b", -"controller_image":"latest", -"controller_net_type":false, -"export_fs_type":"xfs", -"image_disk_count":1, -"image_disk_name":"snap-04f8963f5d94148b6", -"image_disk_size_gb":250, -"management_shape":"c5n.2xlarge", -"partition_config":[ - { - "availability_zone":"us-east-1b", - "default":"YES", - "elastic_image":"latest", - "enable_spot":false, - "instance_type":"r5n.16xlarge", - "max_node_num":102, - "name":"compute", - "net_type":false, - "architecture":"amd64" - } -], -"region":"us-east-1" -} diff --git a/rdhpcs/clusters/mesh_init.sh b/rdhpcs/clusters/mesh_init.sh deleted file mode 100644 index 3278bda..0000000 --- a/rdhpcs/clusters/mesh_init.sh +++ /dev/null @@ -1,44 +0,0 @@ -DISOWN - -# We want the disowned script only on head node -if [ "$(hostname | grep -o mgmt)" != "mgmt" ]; then - exit -fi - -export PATH=$PATH:/usr/local/bin - -sudo yum update -y && sudo yum upgrade -y - -# TODO: Use lustre instead of home -sudo yum install -y tmux -cp -v /contrib/Soroosh.Mani/configs/.vimrc ~ -cp -v /contrib/Soroosh.Mani/configs/.tmux.conf ~ - -cd ~ -cp -v /contrib/Soroosh.Mani/scripts/hurricane_mesh.py ~ -cp -v /contrib/Soroosh.Mani/scripts/mesh.sbatch ~ - -cp -v /contrib/Soroosh.Mani/pkgs/odssm-mesh.tar.gz . -mkdir odssm-mesh -pushd odssm-mesh -tar -xf ../odssm-mesh.tar.gz -rm -rf ../odssm-mesh.tar.gz -bin/conda-unpack -popd - -cp -v /contrib/Soroosh.Mani/pkgs/odssm-prefect.tar.gz . -mkdir odssm-prefect -pushd odssm-prefect -tar -xf ../odssm-prefect.tar.gz -rm -rf ../odssm-prefect.tar.gz -bin/conda-unpack -popd - -aws s3 sync s3://noaa-nos-none-ca-hsofs-c/Soroosh.Mani/dem /lustre/dem -aws s3 sync s3://noaa-nos-none-ca-hsofs-c/Soroosh.Mani/shape /lustre/shape -aws s3 sync s3://noaa-nos-none-ca-hsofs-c/Soroosh.Mani/grid /lustre/grid -date > ~/_initialized_ - -# This is executed only for head (ALLNODES not specified at the top) -source odssm-prefect/bin/activate -prefect agent local start --key `cat /contrib/Soroosh.Mani/secrets/prefect.key` --label tacc-odssm-rdhpcs-mesh-cluster --name tacc-odssm-agent-rdhpcs-mesh-cluster --log-level INFO diff --git a/rdhpcs/clusters/mesh_lustre.json b/rdhpcs/clusters/mesh_lustre.json deleted file mode 100644 index 29f38ac..0000000 --- a/rdhpcs/clusters/mesh_lustre.json +++ /dev/null @@ -1,5 +0,0 @@ -{ -"fsxcompression":"LZ4", -"fsxdeployment":"SCRATCH_2", -"storage_capacity":"1200" -} diff --git a/rdhpcs/clusters/schism_cluster.json b/rdhpcs/clusters/schism_cluster.json deleted file mode 100644 index c4f2388..0000000 --- a/rdhpcs/clusters/schism_cluster.json +++ /dev/null @@ -1,23 +0,0 @@ -{ -"availability_zone":"us-east-1b", -"controller_image":"latest", -"controller_net_type":false, -"export_fs_type":"xfs", -"image_disk_count":1, -"image_disk_name":"snap-04f8963f5d94148b6", -"image_disk_size_gb":250, -"management_shape":"c5n.2xlarge", -"partition_config":[ - { - "availability_zone":"us-east-1b", - "default":"YES", - "elastic_image":"latest", - "enable_spot":false, - "instance_type":"c5n.18xlarge", - "max_node_num":102, - "name":"compute", - "net_type":true - } -], -"region":"us-east-1" -} diff --git a/rdhpcs/clusters/schism_init.sh b/rdhpcs/clusters/schism_init.sh deleted file mode 100644 index 6d255a2..0000000 --- a/rdhpcs/clusters/schism_init.sh +++ /dev/null @@ -1,39 +0,0 @@ -DISOWN - -# We want the disowned script only on head node -if [ "$(hostname | grep -o mgmt)" != "mgmt" ]; then - exit -fi - -export PATH=$PATH:/usr/local/bin - -sudo yum update -y && sudo yum upgrade -y - -# TODO: Use lustre instead of home -sudo yum install -y tmux -cp -v /contrib/Soroosh.Mani/configs/.vimrc ~ -cp -v /contrib/Soroosh.Mani/configs/.tmux.conf ~ - -cd ~ -cp -v /contrib/Soroosh.Mani/scripts/schism.sbatch ~ -cp -v /contrib/Soroosh.Mani/scripts/combine_gr3.exp ~ - -cp -L -r /contrib/Soroosh.Mani/pkgs/schism . - -echo "export PATH=\$HOME/schism/bin/:\$PATH" >> ~/.bash_profile -echo "export PATH=\$HOME/schism/bin/:\$PATH" >> ~/.bashrc - -cp -v /contrib/Soroosh.Mani/pkgs/odssm-prefect.tar.gz . -mkdir odssm-prefect -pushd odssm-prefect -tar -xf ../odssm-prefect.tar.gz -rm -rf ../odssm-prefect.tar.gz -bin/conda-unpack -popd - -# No static files is needed for run! -date > ~/_initialized_ - -# This is executed only for head (ALLNODES not specified at the top) -source odssm-prefect/bin/activate -prefect agent local start --key `cat /contrib/Soroosh.Mani/secrets/prefect.key` --label tacc-odssm-rdhpcs-schism-cluster --name tacc-odssm-agent-rdhpcs-schism-cluster --log-level INFO diff --git a/rdhpcs/clusters/schism_lustre.json b/rdhpcs/clusters/schism_lustre.json deleted file mode 100644 index 29f38ac..0000000 --- a/rdhpcs/clusters/schism_lustre.json +++ /dev/null @@ -1,5 +0,0 @@ -{ -"fsxcompression":"LZ4", -"fsxdeployment":"SCRATCH_2", -"storage_capacity":"1200" -} diff --git a/rdhpcs/scripts/combine_gr3.exp b/rdhpcs/scripts/combine_gr3.exp deleted file mode 120000 index 01ba28b..0000000 --- a/rdhpcs/scripts/combine_gr3.exp +++ /dev/null @@ -1 +0,0 @@ -../../docker/schism/docker/combine_gr3.exp \ No newline at end of file diff --git a/rdhpcs/scripts/compile_schism.sh b/rdhpcs/scripts/compile_schism.sh deleted file mode 100755 index a8cc8ee..0000000 --- a/rdhpcs/scripts/compile_schism.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash - -## helper script for compiling schism -## moghimis@gmail.com - -prev_dir=$PWD - -commit=0741120 - -pkg_dir='/contrib/Soroosh.Mani/pkgs' -src_dir='/tmp/schism/sandbox' -install_dir="$pkg_dir/schism.$commit" -link_path="$pkg_dir/schism" - -function _compile { - # Download schism - git clone https://github.com/schism-dev/schism.git $src_dir - - ## Based on Zizang's email - module purge - - module load cmake - module load intel/2021.3.0 - module load impi/2021.3.0 - module load hdf5/1.10.6 - module load netcdf/4.7.0 - - - #for cmake - export CMAKE_Fortran_COMPILER=mpiifort - export CMAKE_CXX_COMPILER=mpiicc - export CMAKE_C_COMPILER=mpiicc - export FC=ifort - export MPI_HEADER_PATH='/apps/oneapi/mpi/2021.3.0' - # - - export NETCDF='/apps/netcdf/4.7.0/intel/18.0.5.274' - - export NetCDF_C_DIR=$NETCDF - export NetCDF_INCLUDE_DIR=$NETCDF"/include" - export NetCDF_LIBRARIES=$NETCDF"/lib" - export NetCDF_FORTRAN_DIR=$NETCDF - - export TVD_LIM=VL - # - cd ${src_dir} - git checkout $commit - - #clean cmake build folder - rm -rf build_mpiifort - mkdir build_mpiifort - - #cmake - cd build_mpiifort - cmake ../src \ - -DCMAKE_Fortran_COMPILER=$CMAKE_Fortran_COMPILER \ - -DCMAKE_CXX_COMPILER=$CMAKE_CXX_COMPILER \ - -DCMAKE_C_COMPILER=$CMAKE_C_COMPILER \ - -DMPI_HEADER_PATH=$MPI_HEADER_PATH \ - -DNetCDF_C_DIR=$NetCDF_C_DIR \ - -DNetCDF_INCLUDE_DIR=$NetCDF_INCLUDE_DIR \ - -DNetCDF_LIBRARIES=$NetCDF_LIBRARIES \ - -DNetCDF_FORTRAN_DIR=$NetCDF_FORTRAN_DIR \ - -DTVD_LIM=$TVD_LIM \ - -DUSE_PAHM=TRUE \ - -DCMAKE_C_FLAGS="-no-multibyte-chars" \ - -DCMAKE_CXX_FLAGS="-no-multibyte-chars" - - #gnu make - make -j 6 - - mkdir -p $install_dir - cp -L -r bin/ $install_dir - - rm -rf * - cmake ../src \ - -DCMAKE_Fortran_COMPILER=$CMAKE_Fortran_COMPILER \ - -DCMAKE_CXX_COMPILER=$CMAKE_CXX_COMPILER \ - -DCMAKE_C_COMPILER=$CMAKE_C_COMPILER \ - -DMPI_HEADER_PATH=$MPI_HEADER_PATH \ - -DNetCDF_C_DIR=$NetCDF_C_DIR \ - -DNetCDF_INCLUDE_DIR=$NetCDF_INCLUDE_DIR \ - -DNetCDF_LIBRARIES=$NetCDF_LIBRARIES \ - -DNetCDF_FORTRAN_DIR=$NetCDF_FORTRAN_DIR \ - -DTVD_LIM=$TVD_LIM \ - -DUSE_PAHM=TRUE \ - -DUSE_WWM=TRUE \ - -DCMAKE_C_FLAGS="-no-multibyte-chars" \ - -DCMAKE_CXX_FLAGS="-no-multibyte-chars" - - #gnu make - make -j 6 - - cp -L -r bin/ $install_dir - - if [ -f $link_path ]; then - rm $link_path - fi - ln -sf $install_dir $link_path - - rm -rf $src_dir - - cd $prev_dir -} - -if [ -d "$install_dir/bin" ]; then - echo "SCHISM commit $commit is alread compiled!" -else - _compile -fi diff --git a/rdhpcs/scripts/hurricane_mesh.py b/rdhpcs/scripts/hurricane_mesh.py deleted file mode 100755 index 099a9cf..0000000 --- a/rdhpcs/scripts/hurricane_mesh.py +++ /dev/null @@ -1,555 +0,0 @@ -#!/usr/bin/env python - -# Import modules -import logging -import os -import pathlib -import argparse -import sys -import warnings - -import numpy as np - -from fiona.drvsupport import supported_drivers -from shapely.geometry import box, MultiLineString -from shapely.ops import polygonize, unary_union, linemerge -from pyproj import CRS, Transformer -import geopandas as gpd - -from ocsmesh import Raster, Geom, Hfun, JigsawDriver, Mesh, utils -from ocsmesh.cli.subset_n_combine import SubsetAndCombine - - -# Setup modules -# Enable KML driver -#from https://stackoverflow.com/questions/72960340/attributeerror-nonetype-object-has-no-attribute-drvsupport-when-using-fiona -supported_drivers['KML'] = 'rw' -supported_drivers['LIBKML'] = 'rw' - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -logging.basicConfig( - stream=sys.stdout, - format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', - datefmt='%Y-%m-%d:%H:%M:%S') - - -# Helper functions -def get_raster(path, crs=None): - rast = Raster(path) - if crs and rast.crs != crs: - rast.warp(crs) - return rast - - -def get_rasters(paths, crs=None): - rast_list = list() - for p in paths: - rast_list.append(get_raster(p, crs)) - return rast_list - - -def _generate_mesh_boundary_and_write( - out_dir, mesh_path, mesh_crs='EPSG:4326', threshold=-1000 - ): - - mesh = Mesh.open(str(mesh_path), crs=mesh_crs) - - logger.info('Calculating boundary types...') - mesh.boundaries.auto_generate(threshold=threshold) - - logger.info('Write interpolated mesh to disk...') - mesh.write( - str(out_dir/f'mesh_w_bdry.grd'), format='grd', overwrite=True - ) - - -def _write_mesh_box(out_dir, mesh_path, mesh_crs='EPSG:4326'): - mesh = Mesh.open(str(mesh_path), crs=mesh_crs) - domain_box = box(*mesh.get_multipolygon().bounds) - gdf_domain_box = gpd.GeoDataFrame( - geometry=[domain_box], crs=mesh.crs) - gdf_domain_box.to_file(out_dir/'domain_box') - - -# Main script -def main(args, clients): - - cmd = args.cmd - logger.info(f"The mesh command is {cmd}.") - - clients_dict = {c.script_name: c for c in clients} - - io = pathlib.Path('/lustre') - - storm_name = str(args.name).lower() - storm_year = str(args.year).lower() - tag = args.tag - if tag is None: - tag = f'{storm_name.lower()}_{storm_year}' - - logger.info(f"The simulation tag is {tag}.") - - dem_dir = pathlib.Path(io / 'dem') - out_dir = io / 'hurricanes' / tag / 'mesh' - out_dir.mkdir(exist_ok=True, parents=True) - - final_mesh_name = 'hgrid.gr3' - write_mesh_box = False - - - - if cmd == 'subset_n_combine': - final_mesh_name = 'final_mesh.2dm' - write_mesh_box = True - - args.rasters = [i for i in (dem_dir / 'gebco').iterdir() if i.suffix == '.tif'] - args.out = out_dir - args.fine_mesh = io / 'grid' / 'HSOFS_250m_v1.0_fixed.14' - args.coarse_mesh = io / 'grid' / 'WNAT_1km.14' - args.region_of_interset = io / 'hurricanes' / tag / 'windswath' - - elif cmd == 'hurricane_mesh': - final_mesh_name = 'mesh_no_bdry.2dm' - - if cmd in clients_dict: - clients_dict[cmd].run(args) - else: - raise ValueError(f'Invalid meshing command specified: <{cmd}>') - - #TODO interpolate DEM? - if write_mesh_box: - _write_mesh_box(out_dir, out_dir / final_mesh_name) - _generate_mesh_boundary_and_write(out_dir, out_dir / final_mesh_name) - - -class HurricaneMesher: - - @property - def script_name(self): - return 'hurricane_mesh' - - def __init__(self, sub_parser): - - this_parser = sub_parser.add_parser(self.script_name) - - this_parser.add_argument( - "--nprocs", type=int, help="Number of parallel threads to use when " - "computing geom and hfun.") - - this_parser.add_argument( - "--geom-nprocs", type=int, help="Number of processors used when " - "computing the geom, overrides --nprocs argument.") - - this_parser.add_argument( - "--hfun-nprocs", type=int, help="Number of processors used when " - "computing the hfun, overrides --nprocs argument.") - - this_parser.add_argument( - "--hmax", type=float, help="Maximum mesh size.", - default=20000) - - this_parser.add_argument( - "--hmin-low", type=float, default=1500, - help="Minimum mesh size for low resolution region.") - - this_parser.add_argument( - "--rate-low", type=float, default=2e-3, - help="Expansion rate for low resolution region.") - - this_parser.add_argument( - "--contours", type=float, nargs=2, - help="Contour specification applied to whole domain; " - "contour mesh size needs to be greater that hmin-low", - metavar="SPEC") - - this_parser.add_argument( - "--transition-elev", "-e", type=float, default=-200, - help="Cut off elev for high resolution region") - - this_parser.add_argument( - "--hmin-high", type=float, default=300, - help="Minimum mesh size for high resolution region.") - - this_parser.add_argument( - "--rate-high", type=float, default=1e-3, - help="Expansion rate for high resolution region") - - - def run(self, args): - - nprocs = args.nprocs - - geom_nprocs = nprocs - if args.geom_nprocs: - nprocs = args.geom_nprocs - geom_nprocs = -1 if nprocs == None else nprocs - - hfun_nprocs = nprocs - if args.hfun_nprocs: - nprocs = args.hfun_nprocs - hfun_nprocs = -1 if nprocs == None else nprocs - - io = pathlib.Path('/lustre') - - storm_name = str(args.name).lower() - storm_year = str(args.year).lower() - tag = args.tag - if tag is None: - tag = f'{storm_name.lower()}_{storm_year}' - - dem_dir = pathlib.Path(io / 'dem') - shp_dir = pathlib.Path(io / 'shape') - hurr_info = pathlib.Path( - io / 'hurricanes' / tag / 'windswath') - out_dir = pathlib.Path( - io / 'hurricanes' / tag / 'mesh') - out_dir.mkdir(exist_ok=True, parents=True) - - coarse_geom = shp_dir / 'base_geom' - fine_geom = shp_dir / 'high_geom' - - gebco_paths = [i for i in (dem_dir / 'gebco').iterdir() if str(i).endswith('.tif')] - cudem_paths = [i for i in (dem_dir / 'ncei19').iterdir() if str(i).endswith('.tif')] - all_dem_paths = [*gebco_paths, *cudem_paths] - tile_idx_path = f'zip://{str(dem_dir)}/tileindex_NCEI_ninth_Topobathy_2014.zip' - - - # Specs - wind_kt = 34 - filter_factor = 3 - max_n_hires_dem = 150 - - - # Geom (hardcoded based on prepared hurricane meshing spec) - z_max_lo = 0 - z_max_hi = 10 - z_max = max(z_max_lo, z_max_hi) - - # Hfun - hmax = args.hmax - - hmin_lo = args.hmin_low - rate_lo = args.rate_low - - contour_specs_lo = [] - if args.contours is not None: - for c_elev, m_size in args.contours: - if hmin_lo > m_size: - warnings.warn( - "Specified contour must have a mesh size" - f" larger than minimum low res size: {hmin_low}") - contour_specs_lo.append((c_elev, rate_lo, m_size)) - - else: - contour_specs_lo = [ - (-4000, rate_lo, 10000), - (-1000, rate_lo, 6000), - (-10, rate_lo, hmin_lo) - ] - - const_specs_lo = [ - (hmin_lo, 0, z_max) - ] - - cutoff_hi = args.transition_elev - hmin_hi = args.hmin_high - rate_hi = args.rate_high - - contour_specs_hi = [ - (0, rate_hi, hmin_hi) - ] - const_specs_hi = [ - (hmin_hi, 0, z_max) - ] - - - # Read inputs - logger.info("Reading input shapes...") - gdf_fine = gpd.read_file(fine_geom) - gdf_coarse = gpd.read_file(coarse_geom) - tile_idx = gpd.read_file(tile_idx_path) - - logger.info("Reading hurricane info...") - gdf = gpd.read_file(hurr_info) - gdf_wind_kt = gdf[gdf.RADII.astype(int) == wind_kt] - - # Simplify high resolution geometry - logger.info("Simplify high-resolution shape...") - gdf_fine = gpd.GeoDataFrame( - geometry=gdf_fine.to_crs("EPSG:3857").simplify(tolerance=hmin_hi / 2).buffer(0).to_crs(gdf_fine.crs), - crs=gdf_fine.crs) - - - # Calculate refinement region - logger.info(f"Create polygon from {wind_kt}kt windswath polygon...") - ext_poly = [i for i in polygonize([ext for ext in gdf_wind_kt.exterior])] - gdf_refine_super_0 = gpd.GeoDataFrame( - geometry=ext_poly, crs=gdf_wind_kt.crs) - - logger.info("Find upstream...") - domain_extent = gdf_fine.to_crs(gdf_refine_super_0.crs).total_bounds - domain_box = box(*domain_extent) - box_tol = 1/1000 * max(domain_extent[2]- domain_extent[0], domain_extent[3] - domain_extent[1]) - gdf_refine_super_0 = gdf_refine_super_0.intersection(domain_box.buffer(-box_tol)) - gdf_refine_super_0.plot() - ext_poly = [i for i in gdf_refine_super_0.explode().geometry] - - dmn_ext = [pl.exterior for mp in gdf_fine.geometry for pl in mp] - wnd_ext = [pl.exterior for pl in ext_poly] - - gdf_dmn_ext = gpd.GeoDataFrame(geometry=dmn_ext, crs=gdf_fine.crs) - gdf_wnd_ext = gpd.GeoDataFrame(geometry=wnd_ext, crs=gdf_wind_kt.crs) - - gdf_ext_over = gpd.overlay(gdf_dmn_ext, gdf_wnd_ext.to_crs(gdf_dmn_ext.crs), how="union") - - gdf_ext_x = gdf_ext_over[gdf_ext_over.intersects(gdf_wnd_ext.to_crs(gdf_ext_over.crs).unary_union)] - - filter_lines_threshold = np.max(gdf_dmn_ext.length) / filter_factor - lnstrs = linemerge([lnstr for lnstr in gdf_ext_x.explode().geometry]) - if not isinstance(lnstrs, MultiLineString): - lnstrs = [lnstrs] - lnstrs = [lnstr for lnstr in lnstrs if lnstr.length < filter_lines_threshold] - gdf_hurr_w_upstream = gdf_wnd_ext.to_crs(gdf_ext_x.crs) - gdf_hurr_w_upstream = gdf_hurr_w_upstream.append( - gpd.GeoDataFrame( - geometry=gpd.GeoSeries(lnstrs), - crs=gdf_ext_x.crs - )) - - - gdf_hurr_w_upstream_poly = gpd.GeoDataFrame( - geometry=gpd.GeoSeries(polygonize(gdf_hurr_w_upstream.unary_union)), - crs=gdf_hurr_w_upstream.crs) - - logger.info("Find intersection of domain polygon with impacted area upstream...") - gdf_refine_super_2 = gpd.overlay( - gdf_fine, gdf_hurr_w_upstream_poly.to_crs(gdf_fine.crs), - how='intersection' - ) - - gdf_refine_super_2.to_file(out_dir / 'dmn_hurr_upstream') - - logger.info("Selecting high resolution DEMs...") - gdf_dem_box = gpd.GeoDataFrame( - columns=['geometry', 'path'], - crs=gdf_refine_super_2.crs) - for path in all_dem_paths: - bbox = Raster(path).get_bbox(crs=gdf_dem_box.crs) - gdf_dem_box = gdf_dem_box.append( - gpd.GeoDataFrame( - {'geometry': [bbox], - 'path': str(path)}, - crs=gdf_dem_box.crs) - ) - gdf_dem_box = gdf_dem_box.reset_index() - - lo_res_paths = gebco_paths - - # TODO: use sjoin instead?! - gdf_hi_res_box = gdf_dem_box[gdf_dem_box.geometry.intersects(gdf_refine_super_2.unary_union)].reset_index() - hi_res_paths = gdf_hi_res_box.path.values.tolist() - - - # For refine cut off either use static geom at e.g. 200m depth or instead just use low-res for cut off polygon - - - # Or intersect with full geom? (timewise an issue for hfun creation) - logger.info("Calculate refinement area cutoff...") - cutoff_dem_paths = [i for i in gdf_hi_res_box.path.values.tolist() if pathlib.Path(i) in lo_res_paths] - cutoff_geom = Geom( - get_rasters(cutoff_dem_paths), - base_shape=gdf_coarse.unary_union, - base_shape_crs=gdf_coarse.crs, - zmax=cutoff_hi, - nprocs=geom_nprocs) - cutoff_poly = cutoff_geom.get_multipolygon() - - gdf_cutoff = gpd.GeoDataFrame( - geometry=gpd.GeoSeries(cutoff_poly), - crs=cutoff_geom.crs) - - gdf_draft_refine = gpd.overlay(gdf_refine_super_2, gdf_cutoff.to_crs(gdf_refine_super_2.crs), how='difference') - - refine_polys = [pl for pl in gdf_draft_refine.unary_union] - - gdf_final_refine = gpd.GeoDataFrame( - geometry=refine_polys, - crs=gdf_draft_refine.crs) - - - logger.info("Write landfall area to disk...") - gdf_final_refine.to_file(out_dir/'landfall_refine_area') - - gdf_geom = gpd.overlay( - gdf_coarse, - gdf_final_refine.to_crs(gdf_coarse.crs), - how='union') - - domain_box = box(*gdf_fine.total_bounds) - gdf_domain_box = gpd.GeoDataFrame( - geometry=[domain_box], crs=gdf_fine.crs) - gdf_domain_box.to_file(out_dir/'domain_box') - - geom = Geom(gdf_geom.unary_union, crs=gdf_geom.crs) - - - logger.info("Create low-res size function...") - hfun_lo = Hfun( - get_rasters(lo_res_paths), - base_shape=gdf_coarse.unary_union, - base_shape_crs=gdf_coarse.crs, - hmin=hmin_lo, - hmax=hmax, - nprocs=hfun_nprocs, - method='fast') - - logger.info("Add refinement spec to low-res size function...") - for ctr in contour_specs_lo: - hfun_lo.add_contour(*ctr) - hfun_lo.add_constant_value(value=ctr[2], lower_bound=ctr[0]) - - for const in const_specs_lo: - hfun_lo.add_constant_value(*const) - - # hfun_lo.add_subtidal_flow_limiter(upper_bound=z_max) - # hfun_lo.add_subtidal_flow_limiter(hmin=hmin_lo, upper_bound=z_max) - - - logger.info("Compute low-res size function...") - jig_hfun_lo = hfun_lo.msh_t() - - - logger.info("Write low-res size function to disk...") - Mesh(jig_hfun_lo).write( - str(out_dir/f'hfun_lo_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - - # For interpolation after meshing and use GEBCO for mesh size calculation in refinement area. - hfun_hi_rast_paths = hi_res_paths - if len(hi_res_paths) > max_n_hires_dem: - hfun_hi_rast_paths = gebco_paths - - logger.info("Create high-res size function...") - hfun_hi = Hfun( - get_rasters(hfun_hi_rast_paths), - base_shape=gdf_final_refine.unary_union, - base_shape_crs=gdf_final_refine.crs, - hmin=hmin_hi, - hmax=hmax, - nprocs=hfun_nprocs, - method='fast') - - # Apply low resolution criteria on hires as ewll - logger.info("Add refinement spec to high-res size function...") - for ctr in contour_specs_lo: - hfun_hi.add_contour(*ctr) - hfun_hi.add_constant_value(value=ctr[2], lower_bound=ctr[0]) - - for ctr in contour_specs_hi: - hfun_hi.add_contour(*ctr) - hfun_hi.add_constant_value(value=ctr[2], lower_bound=ctr[0]) - - for const in const_specs_hi: - hfun_hi.add_constant_value(*const) - - # hfun_hi.add_subtidal_flow_limiter(upper_bound=z_max) - - logger.info("Compute high-res size function...") - jig_hfun_hi = hfun_hi.msh_t() - - logger.info("Write high-res size function to disk...") - Mesh(jig_hfun_hi).write( - str(out_dir/f'hfun_hi_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - - jig_hfun_lo = Mesh.open(str(out_dir/f'hfun_lo_{hmin_hi}.2dm'), crs="EPSG:4326").msh_t - jig_hfun_hi = Mesh.open(str(out_dir/f'hfun_hi_{hmin_hi}.2dm'), crs="EPSG:4326").msh_t - - - logger.info("Combine size functions...") - gdf_final_refine = gpd.read_file(out_dir/'landfall_refine_area') - - utils.clip_mesh_by_shape( - jig_hfun_hi, - shape=gdf_final_refine.to_crs(jig_hfun_hi.crs).unary_union, - fit_inside=True, - in_place=True) - - jig_hfun_final = utils.merge_msh_t( - jig_hfun_lo, jig_hfun_hi, - drop_by_bbox=False, - can_overlap=False, - check_cross_edges=True) - - - logger.info("Write final size function to disk...") - hfun_mesh = Mesh(jig_hfun_final) - hfun_mesh.write( - str(out_dir/f'hfun_comp_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - - hfun = Hfun(hfun_mesh) - - logger.info("Generate mesh...") - driver = JigsawDriver(geom=geom, hfun=hfun, initial_mesh=True) - mesh = driver.run() - - - utils.reproject(mesh.msh_t, "EPSG:4326") - mesh.write( - str(out_dir/f'mesh_raw_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - mesh = Mesh.open(str(out_dir/f'mesh_raw_{hmin_hi}.2dm'), crs="EPSG:4326") - - dst_crs = "EPSG:4326" - interp_rast_list = [ - *get_rasters(gebco_paths, dst_crs), - *get_rasters(gdf_hi_res_box.path.values, dst_crs)] - - # TODO: Fix the deadlock issue with multiple cores when interpolating - logger.info("Interpolate DEMs on the generated mesh...") - mesh.interpolate(interp_rast_list, nprocs=1, method='nearest') - - logger.info("Write raw mesh to disk...") - mesh.write( - str(out_dir/f'mesh_{hmin_hi}.2dm'), - format='2dm', - overwrite=True) - - # Write the same mesh with a generic name - mesh.write( - str(out_dir/f'mesh_no_bdry.2dm'), - format='2dm', - overwrite=True) - - - -if __name__ == '__main__': - - parser = argparse.ArgumentParser() - parser.add_argument( - "--tag", "-t", - help="storm tag used for path creation", type=str) - parser.add_argument( - "name", help="name of the storm", type=str) - parser.add_argument( - "year", help="year of the storm", type=int) - - subparsers = parser.add_subparsers(dest='cmd') - subset_client = SubsetAndCombine(subparsers) - hurrmesh_client = HurricaneMesher(subparsers) - - args = parser.parse_args() - - logger.info(f"Mesh arguments are {args}.") - - main(args, [hurrmesh_client, subset_client]) diff --git a/rdhpcs/scripts/mesh.sbatch b/rdhpcs/scripts/mesh.sbatch deleted file mode 100644 index 3d1f669..0000000 --- a/rdhpcs/scripts/mesh.sbatch +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -#SBATCH --parsable -#SBATCH --exclusive -#SBATCH --mem=0 - -# Wiating for _initialized_ indicating cluster is properly initialized -while [ ! -f ~/_initialized_ ]; -do - echo "Waiting for cluster initialization..." - sleep 10s -done - -# To redirect all the temp file creations in OCSMesh to luster file sys -export TMPDIR=/lustre/.tmp -mkdir -p $TMPDIR - -source ~/odssm-mesh/bin/activate -echo Executing: python \"~/hurricane_mesh.py ${KWDS} ${STORM} ${YEAR}\"... -python ~/hurricane_mesh.py ${STORM} ${YEAR} ${KWDS} diff --git a/rdhpcs/scripts/schism.sbatch b/rdhpcs/scripts/schism.sbatch deleted file mode 100644 index 6dafe58..0000000 --- a/rdhpcs/scripts/schism.sbatch +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash -#SBATCH --parsable -#SBATCH --exclusive -#SBATCH --mem=0 -#SBATCH --nodes=5 -#SBATCH --ntasks-per-node=36 - -# Wiating for _initialized_ indicating cluster is properly initialized -while [ ! -f ~/_initialized_ ]; -do - echo "Waiting for cluster initialization..." - sleep 10s -done - -PATH=~/schism/bin/:$PATH - -module purge - -module load cmake -module load intel/2021.3.0 -module load impi/2021.3.0 -module load hdf5/1.10.6 -module load netcdf/4.7.0 - -export MV2_ENABLE_AFFINITY=0 -ulimit -s unlimited - -echo "Starting solver..." -date - -set -ex - -pushd /lustre/${STORM_PATH} -mkdir -p outputs -#srun --mpi=pmi2 pschism_TVD-VL 4 -mpirun --ppn ${SLURM_TASKS_PER_NODE} ${SCHISM_EXEC} 4 - -if [ $? -eq 0 ]; then - echo "Combining outputs..." - date - # NOTE: Due to new IO, there's no need for combining main output -# pushd outputs -# times=$(ls schout_* | grep -o "schout[0-9_]\+" | awk 'BEGIN {FS = "_"}; {print $3}' | sort -h | uniq ) -# for i in $times; do -# combine_output11 -b $i -e $i -# done -# popd - # Combine hotstart - pushd outputs - if ls hotstart* >/dev/null 2>&1; then - times=$(ls hotstart_* | grep -o "hotstart[0-9_]\+" | awk 'BEGIN {FS = "_"}; {print $3}' | sort -h | uniq ) - for i in $times; do - combine_hotstart7 --iteration $i - done - fi - popd - - expect -f ~/combine_gr3.exp maxelev 1 - expect -f ~/combine_gr3.exp maxdahv 3 - mv maxdahv.gr3 maxelev.gr3 -t outputs -fi - - -echo "Done" -date diff --git a/terraform/backend/backend.tf b/terraform/backend/backend.tf deleted file mode 100644 index e0d880e..0000000 --- a/terraform/backend/backend.tf +++ /dev/null @@ -1,87 +0,0 @@ -provider "aws" { - region = "us-east-1" -} - - -################### -resource "aws_s3_bucket" "odssm-s3-backend" { - bucket = "tacc-nos-icogs-backend" - - tags = { - Name = "On-Demand Storm Surge Modeling" - Phase = "Development" - POCName = "saeed.moghimi@noaa.gov" - Project = "NOAA ICOGS-C" - LineOffice = "NOS" - DivisionBranch = "CSDL-CMMB" - Reason = "terraform" - } -} - - -################### -resource "aws_s3_bucket_acl" "odssm-s3-backend-acl" { - bucket = aws_s3_bucket.odssm-s3-backend.id - acl = "private" -} - - -################### -resource "aws_s3_bucket_public_access_block" "odssm-s3-backend-accessblock" { - bucket = aws_s3_bucket.odssm-s3-backend.id - - block_public_acls = true - block_public_policy = true - ignore_public_acls = true - restrict_public_buckets = true -} - - -################### -resource "aws_kms_key" "odssm-kms-s3-backend" { - description = "This key is used to encrypt bucket objects" - deletion_window_in_days = 10 -} - - -################### -resource "aws_s3_bucket_server_side_encryption_configuration" "odssm-s3-backend-encrypt" { - bucket = aws_s3_bucket.odssm-s3-backend.bucket - - rule { - apply_server_side_encryption_by_default { - kms_master_key_id = aws_kms_key.odssm-kms-s3-backend.arn - sse_algorithm = "aws:kms" - } - } -} - - -################### -resource "aws_s3_bucket_versioning" "odssm-s3-backend-versioning" { - bucket = aws_s3_bucket.odssm-s3-backend.id - versioning_configuration { - status = "Enabled" - } -} - - -################### -resource "aws_s3_bucket_lifecycle_configuration" "odssm-s3-backend-lifecycle" { - # Must have bucket versioning enabled first - depends_on = [aws_s3_bucket_versioning.odssm-s3-backend-versioning] - - bucket = aws_s3_bucket.odssm-s3-backend.id - - rule { - id = "statefile" - - filter {} - - noncurrent_version_expiration { - noncurrent_days = 60 - } - - status = "Enabled" - } -} diff --git a/terraform/main.tf b/terraform/main.tf deleted file mode 100644 index ad8d550..0000000 --- a/terraform/main.tf +++ /dev/null @@ -1,1100 +0,0 @@ -terraform { - backend "s3" { - bucket = "tacc-nos-icogs-backend" - key = "terraform/state" - region = "us-east-1" - } -} - -locals { - common_tags = { - Name = "On-Demand Storm Surge Modeling" - Phase = "Development" - POCName = "saeed.moghimi@noaa.gov" - Project = "NOAA ICOGS-C" - LineOffice = "NOS" - DivisionBranch = "CSDL-CMMB" - } - docker_user = "ondemand-user" - task_role_arn = "arn:aws:iam::${var.account_id}:role/${var.role_prefix}_ECS_Role" - execution_role_arn = "arn:aws:iam::${var.account_id}:role/${var.role_prefix}_ECS_Role" - ec2_profile_name = "${var.role_prefix}_ECS_Role2" - ecs_profile_name = "${var.role_prefix}_ECS_Role" - subnet_idx = 3 - ec2_ami = "ami-0d5eff06f840b45e9" - ecs_ami = "ami-03fe4d5b1d229063a" - # TODO: Make these to be terraform variables - ansible_var_path = "../ansible/inventory/group_vars/vars_from_terraform" - prefect_var_path = "../prefect/vars_from_terraform" - pvt_key_path = "~/.ssh/tacc_aws" - pub_key_path = "~/.ssh/tacc_aws.pub" - dev = "soroosh" -} - -################### -provider "aws" { - region = "us-east-1" -} - - -################ -data "aws_region" "current" {} - -################ -data "aws_caller_identity" "current" {} - -################ -data "aws_availability_zones" "available" { - state = "available" -} - -################ -data "aws_s3_object" "odssm-prep-ud" { - bucket = aws_s3_bucket.odssm-s3["statics"].bucket - key = "userdata/userdata-ocsmesh.txt" -} - -################ -data "aws_s3_object" "odssm-solve-ud" { - bucket = aws_s3_bucket.odssm-s3["statics"].bucket - key = "userdata/userdata-schism.txt" -} - -################ -data "aws_s3_object" "odssm-post-ud" { - bucket = aws_s3_bucket.odssm-s3["statics"].bucket - key = "userdata/userdata-viz.txt" -} - -################ -data "aws_s3_object" "odssm-wf-ud" { - bucket = aws_s3_bucket.odssm-s3["statics"].bucket - key = "userdata/userdata-wf.txt" -} - - -################### -resource "local_file" "odssm-ansible-vars" { - content = yamlencode({ - ansible_ssh_private_key_file = abspath(pathexpand(local.pvt_key_path)) - aws_account_id = data.aws_caller_identity.current.account_id - aws_default_region: data.aws_region.current.name - ec2_public_ip = aws_instance.odssm-local-agent-ec2.public_ip - ecs_task_role = local.task_role_arn - ecs_exec_role = local.execution_role_arn - efs_id = aws_efs_file_system.odssm-efs.id - prefect_image = aws_ecr_repository.odssm-repo["odssm-workflow"].repository_url - }) - filename = local.ansible_var_path -} -################### -resource "local_file" "odssm-prefect-vars" { - content = yamlencode({ - S3_BUCKET = aws_s3_bucket.odssm-s3["prefect"].bucket - OCSMESH_CLUSTER = aws_ecs_cluster.odssm-cluster-prep.name - SCHISM_CLUSTER = aws_ecs_cluster.odssm-cluster-solve.name - VIZ_CLUSTER = aws_ecs_cluster.odssm-cluster-post.name - WF_CLUSTER = aws_ecs_cluster.odssm-cluster-workflow.name - OCSMESH_TEMPLATE_1_ID = aws_launch_template.odssm-prep-instance-1-template.id - OCSMESH_TEMPLATE_2_ID = aws_launch_template.odssm-prep-instance-2-template.id - SCHISM_TEMPLATE_ID = aws_launch_template.odssm-solve-instance-template.id - VIZ_TEMPLATE_ID = aws_launch_template.odssm-post-instance-template.id - WF_TEMPLATE_ID = aws_launch_template.odssm-workflow-instance-template.id - ECS_TASK_ROLE = local.task_role_arn - ECS_EXEC_ROLE = local.execution_role_arn - ECS_SUBNET_ID = aws_subnet.odssm-subnet.id - ECS_EC2_SG = [ - aws_security_group.odssm-sg-efs.id, - aws_security_group.odssm-sg-ecsout.id, - aws_security_group.odssm-sg-ssh.id, - ] - WF_IMG = "${aws_ecr_repository.odssm-repo["odssm-workflow"].repository_url}:v0.4" - WF_ECS_TASK_ARN = aws_ecs_task_definition.odssm-flowrun-task.arn - }) - filename = local.prefect_var_path -} - -################### -resource "aws_key_pair" "odssm-ssh-key" { - key_name = "noaa-ondemand-${local.dev}-tacc-prefect-ssh-key" - public_key = file("${local.pub_key_path}") - tags = local.common_tags -} - - -################### -resource "aws_s3_bucket" "odssm-s3" { - for_each = { - statics = "tacc-nos-icogs-static" - prefect = "tacc-nos-icogs-prefect" - results = "tacc-icogs-results" - } - bucket = "${each.value}" - - tags = merge( - local.common_tags, - { - Reason = "${each.key}" - } - ) -} - - -################### -resource "aws_s3_bucket_acl" "odssm-s3-acl" { - for_each = aws_s3_bucket.odssm-s3 - bucket = each.value.id - acl = "private" -} - - -################### -resource "aws_s3_bucket_public_access_block" "odssm-s3-accessblock" { - for_each = aws_s3_bucket.odssm-s3 - bucket = each.value.id - - block_public_acls = true - block_public_policy = true - ignore_public_acls = true - restrict_public_buckets = true -} - - -################### -resource "aws_s3_bucket" "odssm-s3-website" { - bucket = "tacc-icogs-results-website" - - tags = merge( - local.common_tags, - { - Reason = "website" - } - ) -} - - -################### -resource "aws_s3_bucket_acl" "odssm-s3-website-acl" { - bucket = aws_s3_bucket.odssm-s3-website.id - acl = "public-read" -} - -################### -resource "aws_s3_bucket_website_configuration" "odssm-s3-website-config" { - bucket = aws_s3_bucket.odssm-s3-website.bucket - - index_document { - suffix = "index.html" - } - -} - -################### -resource "aws_efs_file_system" "odssm-efs" { - tags = local.common_tags -} - - -################### -resource "aws_efs_mount_target" "odssm-efs-mount" { - file_system_id = aws_efs_file_system.odssm-efs.id - subnet_id = aws_subnet.odssm-subnet.id - security_groups = [ - aws_security_group.odssm-sg-efs.id - ] -} - - -################### -resource "aws_vpc" "odssm-vpc" { - assign_generated_ipv6_cidr_block = false - cidr_block = "172.31.0.0/16" - enable_dns_hostnames = true - enable_dns_support = true - tags = local.common_tags -} - - -################### -resource "aws_subnet" "odssm-subnet" { - vpc_id = aws_vpc.odssm-vpc.id - - assign_ipv6_address_on_creation = false - - availability_zone = data.aws_availability_zones.available.names[local.subnet_idx] - - cidr_block = "172.31.0.0/20" - - tags = local.common_tags -} - - -################### -resource "aws_security_group" "odssm-sg-default" { - name = "default" - description = "default VPC security group" - vpc_id = aws_vpc.odssm-vpc.id - - egress = [ - { - cidr_blocks = [ - "0.0.0.0/0" - ] - description = "" - from_port = 0 - ipv6_cidr_blocks = [] - prefix_list_ids = [] - protocol = "-1" - security_groups = [] - self = false - to_port = 0 - } - ] - ingress = [ - { - cidr_blocks = [] - description = "" - from_port = 0 - ipv6_cidr_blocks = [] - prefix_list_ids = [] - protocol = "-1" - security_groups = [] - self = true - to_port = 0 - } - ] - - tags = local.common_tags -} - - -################### -resource "aws_security_group" "odssm-sg-ecsout" { - name = "ecs" - description = "Allow ecs to access the internet" - vpc_id = aws_vpc.odssm-vpc.id - - egress = [ - { - cidr_blocks = [ - "0.0.0.0/0" - ] - description = "" - from_port = 0 - ipv6_cidr_blocks = [] - prefix_list_ids = [] - protocol = "-1" - security_groups = [] - self = false - to_port = 0 - } - ] - ingress = [] - - tags = local.common_tags -} - - -################### -resource "aws_security_group" "odssm-sg-efs" { - name = "efs" - description = "Allow EFS/NFS mounts" - vpc_id = aws_vpc.odssm-vpc.id - - egress = [ - { - cidr_blocks = [ - "0.0.0.0/0" - ] - description = "" - from_port = 2049 - ipv6_cidr_blocks = [] - prefix_list_ids = [] - protocol = "tcp" - security_groups = [] - self = false - to_port = 2049 - } - ] - ingress = [ - { - cidr_blocks = [ - "0.0.0.0/0" - ] - description = "" - from_port = 2049 - ipv6_cidr_blocks = [] - prefix_list_ids = [] - protocol = "tcp" - security_groups = [] - self = false - to_port = 2049 - } - ] - - tags = local.common_tags -} - -################### -resource "aws_security_group" "odssm-sg-ssh" { - name = "ssh-access" - description = "Allow SSH" - vpc_id = aws_vpc.odssm-vpc.id - - egress = [ - { - cidr_blocks = [ - "0.0.0.0/0" - ] - description = "" - from_port = 22 - ipv6_cidr_blocks = [] - prefix_list_ids = [] - protocol = "tcp" - security_groups = [] - self = false - to_port = 22 - } - ] - ingress = [ - { - cidr_blocks = [ - "0.0.0.0/0" - ] - description = "" - from_port = 22 - ipv6_cidr_blocks = [] - prefix_list_ids = [] - protocol = "tcp" - security_groups = [] - self = false - to_port = 22 - } - ] - - tags = local.common_tags -} - - -################### -resource "aws_ecr_repository" "odssm-repo" { - for_each = { - odssm-info = "Fetch hurricane information" - odssm-mesh = "Mesh the domain" - odssm-prep = "Setup SCHISM model" - odssm-solve = "Run SCHISM model" - odssm-post = "Generate visualizations" - odssm-workflow = "Run SCHISM model" - } - name = "${each.key}" - - image_tag_mutability = "IMMUTABLE" - - image_scanning_configuration { - scan_on_push = true - } - - - tags = merge( - local.common_tags, - { - Description = "${each.value}" - } - ) -} - -################### -resource "aws_ecs_cluster" "odssm-cluster-prep" { - - name = "odssm-ocsmesh" - - setting { - name = "containerInsights" - value = "disabled" - } - - tags = merge( - local.common_tags, - { - Description = "Cluster used for model preparation" - } - ) -} - - -################### -resource "aws_ecs_cluster" "odssm-cluster-solve" { - name = "odssm-schism" - - setting { - name = "containerInsights" - value = "disabled" - } - - tags = merge( - local.common_tags, - { - Description = "Cluster used for solving the model" - } - ) -} - -################### -resource "aws_ecs_cluster" "odssm-cluster-post" { - name = "odssm-viz" - - setting { - name = "containerInsights" - value = "disabled" - } - - tags = merge( - local.common_tags, - { - Description = "Cluster used for generating visualizations" - } - ) -} - - -################### -resource "aws_ecs_cluster" "odssm-cluster-workflow" { - name = "odssm-wf" - - setting { - name = "containerInsights" - value = "disabled" - } - - tags = merge( - local.common_tags, - { - Description = "Cluster used for running Prefect Flows on ECS" - } - ) -} - - -################### -resource "aws_ecs_task_definition" "odssm-info-task" { - - family = "odssm-info" - network_mode = "bridge" - requires_compatibilities = [ "EC2" ] - task_role_arn = local.task_role_arn - execution_role_arn = local.execution_role_arn - - container_definitions = jsonencode([ - { - name = "info" - image = "${aws_ecr_repository.odssm-repo["odssm-info"].repository_url}:v0.11" - - essential = true - - memoryReservation = 2000 # MB - mountPoints = [ - { - containerPath = "/home/${local.docker_user}/app/io/output" - sourceVolume = "efs_vol" - } - ] - logConfiguration = { - logDriver = "awslogs", - options = { - awslogs-group = aws_cloudwatch_log_group.odssm-cw-log-grp.name, - awslogs-create-group = "true", - awslogs-region = data.aws_region.current.name, - awslogs-stream-prefix = "odssm-info" - } - } - }]) - - volume { - name = "efs_vol" - efs_volume_configuration { - file_system_id = aws_efs_file_system.odssm-efs.id - root_directory = "/hurricanes" - } - } - - tags = local.common_tags -} - - -################### -resource "aws_ecs_task_definition" "odssm-mesh-task" { - - family = "odssm-mesh" - network_mode = "bridge" - requires_compatibilities = [ "EC2" ] - task_role_arn = local.task_role_arn - execution_role_arn = local.execution_role_arn - - container_definitions = jsonencode([ - { - name = "mesh" - image = "${aws_ecr_repository.odssm-repo["odssm-mesh"].repository_url}:v0.11" - - essential = true - - memoryReservation = 123000 # MB - mountPoints = [ - { - containerPath = "/home/${local.docker_user}/app/io" - sourceVolume = "efs_vol" - }, - ] - logConfiguration = { - logDriver = "awslogs", - options = { - awslogs-group = aws_cloudwatch_log_group.odssm-cw-log-grp.name, - awslogs-create-group = "true", - awslogs-region = data.aws_region.current.name, - awslogs-stream-prefix = "odssm-mesh" - } - } - }]) - - volume { - name = "efs_vol" - efs_volume_configuration { - file_system_id = aws_efs_file_system.odssm-efs.id - root_directory = "/" - } - } - - tags = local.common_tags -} - - -################### -resource "aws_ecs_task_definition" "odssm-prep-task" { - - family = "odssm-prep" - network_mode = "bridge" - requires_compatibilities = [ "EC2" ] - task_role_arn = local.task_role_arn - execution_role_arn = local.execution_role_arn - - container_definitions = jsonencode([ - { - name = "prep" - image = "${aws_ecr_repository.odssm-repo["odssm-prep"].repository_url}:v0.17" - - essential = true - - memoryReservation = 2000 # MB - mountPoints = [ - { - containerPath = "/home/${local.docker_user}/app/io/" - sourceVolume = "efs_vol" - } - ] - logConfiguration = { - logDriver = "awslogs", - options = { - awslogs-group = aws_cloudwatch_log_group.odssm-cw-log-grp.name, - awslogs-create-group = "true", - awslogs-region = data.aws_region.current.name, - awslogs-stream-prefix = "odssm-prep" - } - } - }]) - - volume { - name = "efs_vol" - efs_volume_configuration { - file_system_id = aws_efs_file_system.odssm-efs.id - root_directory = "/" - } - } - - tags = local.common_tags -} - - -################### -resource "aws_ecs_task_definition" "odssm-solve-task" { - - family = "odssm-solve" - network_mode = "bridge" - requires_compatibilities = [ "EC2" ] - task_role_arn = local.task_role_arn - execution_role_arn = local.execution_role_arn - - container_definitions = jsonencode([ - { - name = "solve" - image = "${aws_ecr_repository.odssm-repo["odssm-solve"].repository_url}:v0.10" - - essential = true - - environment = [ - { - name = "SCHISM_NPROCS" - value = "48" - } - ] - - linuxParameters = { - capabilities = { - add = ["SYS_PTRACE"] - } - } - - memoryReservation = 50000 # MB - mountPoints = [ - { - containerPath = "/home/${local.docker_user}/app/io/hurricanes" - sourceVolume = "hurr_vol" - } - ] - logConfiguration = { - logDriver = "awslogs", - options = { - awslogs-group = aws_cloudwatch_log_group.odssm-cw-log-grp.name, - awslogs-create-group = "true", - awslogs-region = data.aws_region.current.name, - awslogs-stream-prefix = "odssm-solve" - } - } - }]) - - volume { - name = "hurr_vol" - efs_volume_configuration { - file_system_id = aws_efs_file_system.odssm-efs.id - root_directory = "/hurricanes" - } - } - - tags = local.common_tags -} - - -################### -resource "aws_ecs_task_definition" "odssm-post-task" { - - family = "odssm-post" - network_mode = "bridge" - requires_compatibilities = [ "EC2" ] - task_role_arn = local.task_role_arn - execution_role_arn = local.execution_role_arn - - container_definitions = jsonencode([ - { - name = "post" - image = "${aws_ecr_repository.odssm-repo["odssm-post"].repository_url}:v0.7" - - essential = true - - memoryReservation = 6000 # MB - mountPoints = [ - { - containerPath = "/home/${local.docker_user}/app/io/hurricanes" - sourceVolume = "hurr_vol" - } - ] - logConfiguration = { - logDriver = "awslogs", - options = { - awslogs-group = aws_cloudwatch_log_group.odssm-cw-log-grp.name, - awslogs-create-group = "true", - awslogs-region = data.aws_region.current.name, - awslogs-stream-prefix = "odssm-post" - } - } - }]) - - volume { - name = "hurr_vol" - efs_volume_configuration { - file_system_id = aws_efs_file_system.odssm-efs.id - root_directory = "/hurricanes" - } - } - - tags = local.common_tags -} - - -################### -resource "aws_ecs_task_definition" "odssm-flowrun-task" { - - family = "odssm-prefect-flowrun" - network_mode = "bridge" - requires_compatibilities = [ "EC2" ] - # Use the instance profile the task si running on instead -# task_role_arn = local.task_role_arn -# execution_role_arn = local.execution_role_arn - - container_definitions = jsonencode([ - { - name = "flow" - image = "${aws_ecr_repository.odssm-repo["odssm-workflow"].repository_url}:v0.4" - - essential = true - - memoryReservation = 500 # MB - mountPoints = [ - { - containerPath = "/efs" - sourceVolume = "efs_vol" - } - ] -# logConfiguration = { -# logDriver = "awslogs", -# options = { -# awslogs-group = aws_cloudwatch_log_group.odssm-cw-log-grp.name, -# awslogs-create-group = "true", -# awslogs-region = data.aws_region.current.name, -# awslogs-stream-prefix = "odssm-prefect-flowrun" -# } -# } - }]) - - volume { - name = "efs_vol" - efs_volume_configuration { - file_system_id = aws_efs_file_system.odssm-efs.id - root_directory = "/" - } - } - - tags = local.common_tags -} - -################### -resource "aws_instance" "odssm-local-agent-ec2" { - - ami = local.ec2_ami - - associate_public_ip_address = true - - availability_zone = data.aws_availability_zones.available.names[local.subnet_idx] - - iam_instance_profile = local.ec2_profile_name - - instance_type = "t3.small" # micro cannot handle multiple workflow runs, t2 network is slow - - key_name = aws_key_pair.odssm-ssh-key.id - - subnet_id = aws_subnet.odssm-subnet.id - - vpc_security_group_ids = [ -# aws_security_group.odssm-sg-default.id, - aws_security_group.odssm-sg-efs.id, - aws_security_group.odssm-sg-ecsout.id, - aws_security_group.odssm-sg-ssh.id, - ] - - tags = merge( - local.common_tags, - { - Role = "Workflow management agent" - } - ) - -} - - -################### -resource "aws_launch_template" "odssm-prep-instance-1-template" { - - name = "odssm-ocsmesh-awsall" - description = "Instance with sufficient memory for meshing process" - update_default_version = true - - image_id = local.ecs_ami - - key_name = aws_key_pair.odssm-ssh-key.key_name - - instance_type = "m5.8xlarge" - - iam_instance_profile { - name = local.ecs_profile_name - } - - network_interfaces { - associate_public_ip_address = true - subnet_id = aws_subnet.odssm-subnet.id - security_groups = [ -# aws_security_group.odssm-sg-default.id, - aws_security_group.odssm-sg-efs.id, - aws_security_group.odssm-sg-ecsout.id, - aws_security_group.odssm-sg-ssh.id, - ] - } - - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = 300 - } - } - - ebs_optimized = true - - placement { - availability_zone = data.aws_availability_zones.available.names[local.subnet_idx] - } - - tag_specifications { - resource_type = "instance" - - tags = merge( - local.common_tags, - { - Role = "Run model preparation tasks" - } - ) - } - - user_data = base64encode(data.aws_s3_object.odssm-prep-ud.body) -} - - -################### -resource "aws_launch_template" "odssm-prep-instance-2-template" { - - name = "odssm-ocsmesh-hybrid" - description = "Instance for pre processing in when meshing is done on HPC" - update_default_version = true - - image_id = local.ecs_ami - - key_name = aws_key_pair.odssm-ssh-key.key_name - - instance_type = "m5.xlarge" - - iam_instance_profile { - name = local.ecs_profile_name - } - - network_interfaces { - associate_public_ip_address = true - subnet_id = aws_subnet.odssm-subnet.id - security_groups = [ -# aws_security_group.odssm-sg-default.id, - aws_security_group.odssm-sg-efs.id, - aws_security_group.odssm-sg-ecsout.id, - aws_security_group.odssm-sg-ssh.id, - ] - } - - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = 300 - } - } - - ebs_optimized = true - - placement { - availability_zone = data.aws_availability_zones.available.names[local.subnet_idx] - } - - tag_specifications { - resource_type = "instance" - - tags = merge( - local.common_tags, - { - Role = "Run model preparation tasks" - } - ) - } - - user_data = base64encode(data.aws_s3_object.odssm-prep-ud.body) -} - - -################### -resource "aws_launch_template" "odssm-solve-instance-template" { - - name = "odssm-schism" - description = "Instance with sufficient compute power for SCHISM" - update_default_version = true - - image_id = local.ecs_ami - - key_name = aws_key_pair.odssm-ssh-key.key_name - - instance_type = "c5.metal" - - iam_instance_profile { - name = local.ecs_profile_name - } - - network_interfaces { - associate_public_ip_address = true - subnet_id = aws_subnet.odssm-subnet.id - security_groups = [ -# aws_security_group.odssm-sg-default.id, - aws_security_group.odssm-sg-efs.id, - aws_security_group.odssm-sg-ecsout.id, - aws_security_group.odssm-sg-ssh.id, - ] - } - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = 30 - } - } - - ebs_optimized = true - - # For ensemble runs where we need many instances, we need to spread - # over multiple az -# placement { -# availability_zone = data.aws_availability_zones.available.names[local.subnet_idx] -# } - - tag_specifications { - resource_type = "instance" - - tags = merge( - local.common_tags, - { - Role = "Run model preparation tasks" - } - ) - } - - user_data = base64encode(data.aws_s3_object.odssm-solve-ud.body) -} - - -################### -resource "aws_launch_template" "odssm-post-instance-template" { - - name = "odssm-viz" - description = "Instance for generating visualization" - update_default_version = true - - image_id = local.ecs_ami - - key_name = aws_key_pair.odssm-ssh-key.key_name - - instance_type = "c5.xlarge" - - iam_instance_profile { - name = local.ecs_profile_name - } - - network_interfaces { - associate_public_ip_address = true - subnet_id = aws_subnet.odssm-subnet.id - security_groups = [ -# aws_security_group.odssm-sg-default.id, - aws_security_group.odssm-sg-efs.id, - aws_security_group.odssm-sg-ecsout.id, - aws_security_group.odssm-sg-ssh.id, - ] - } - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = 30 - } - } - - ebs_optimized = true - - placement { - availability_zone = data.aws_availability_zones.available.names[local.subnet_idx] - } - - tag_specifications { - resource_type = "instance" - - tags = merge( - local.common_tags, - { - Role = "Run visualization generation tasks" - } - ) - } - - user_data = base64encode(data.aws_s3_object.odssm-post-ud.body) -} - -################### -resource "aws_launch_template" "odssm-workflow-instance-template" { - - name = "odssm-wf" - description = "Instance for running Prefect Flows as ECSRun" - update_default_version = true - - image_id = local.ecs_ami - - key_name = aws_key_pair.odssm-ssh-key.key_name - - instance_type = "c5.4xlarge" - - # The workflow ECSRun instance needs to be able to create its own instances - iam_instance_profile { - name = local.ec2_profile_name - } - - network_interfaces { - associate_public_ip_address = true - subnet_id = aws_subnet.odssm-subnet.id - security_groups = [ -# aws_security_group.odssm-sg-default.id, - aws_security_group.odssm-sg-efs.id, - aws_security_group.odssm-sg-ecsout.id, - aws_security_group.odssm-sg-ssh.id, - ] - } - - block_device_mappings { - device_name = "/dev/xvda" - - ebs { - volume_size = 30 - } - } - - ebs_optimized = true - - placement { - availability_zone = data.aws_availability_zones.available.names[local.subnet_idx] - } - - tag_specifications { - resource_type = "instance" - - tags = merge( - local.common_tags, - { - Role = "Run ECSRun tasks" - } - ) - } - - user_data = base64encode(data.aws_s3_object.odssm-wf-ud.body) -} - -resource "aws_cloudwatch_log_group" "odssm-cw-log-grp" { - name = "odssm_ecs_task_docker_logs" - -# kms_key_id = - - tags = merge( - local.common_tags, - { - Role = "Watch ECS logs" - } - ) -} diff --git a/terraform/outputs.tf b/terraform/outputs.tf deleted file mode 100644 index 5c5789a..0000000 --- a/terraform/outputs.tf +++ /dev/null @@ -1,30 +0,0 @@ -output "ec2_ip" { - description = "IP of the EC2 instance" - value = aws_instance.odssm-local-agent-ec2.public_ip -} - -output "efs_id" { - description = "ID of the EFS instance" - value = aws_efs_file_system.odssm-efs.id -} - -output "ecr_url" { - description = "URL of the ECR Repositories" - value = toset([ - for repo in aws_ecr_repository.odssm-repo: repo.repository_url - ]) -} - -output "ansible_var_path" { - description = "Path of the Ansible variable file written to local disk" - value = local_file.odssm-ansible-vars.filename -} - -output "prefect_var_path" { - description = "Path of the Prefect variable file written to local disk" - value = local_file.odssm-prefect-vars.filename -} - -output "account_id" { - value = data.aws_caller_identity.current.account_id -} diff --git a/terraform/ud/userdata-ocsmesh.txt b/terraform/ud/userdata-ocsmesh.txt deleted file mode 100644 index 5b98656..0000000 --- a/terraform/ud/userdata-ocsmesh.txt +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo ECS_CLUSTER=odssm-ocsmesh >> /etc/ecs/ecs.config diff --git a/terraform/ud/userdata-schism.txt b/terraform/ud/userdata-schism.txt deleted file mode 100644 index 0dca752..0000000 --- a/terraform/ud/userdata-schism.txt +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo ECS_CLUSTER=odssm-schism >> /etc/ecs/ecs.config diff --git a/terraform/ud/userdata-viz.txt b/terraform/ud/userdata-viz.txt deleted file mode 100644 index c27ec09..0000000 --- a/terraform/ud/userdata-viz.txt +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo ECS_CLUSTER=odssm-viz >> /etc/ecs/ecs.config diff --git a/terraform/ud/userdata-wf.txt b/terraform/ud/userdata-wf.txt deleted file mode 100644 index a51b9a2..0000000 --- a/terraform/ud/userdata-wf.txt +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo ECS_CLUSTER=odssm-wf >> /etc/ecs/ecs.config diff --git a/terraform/variables.tf b/terraform/variables.tf deleted file mode 100644 index ec9a0ed..0000000 --- a/terraform/variables.tf +++ /dev/null @@ -1,9 +0,0 @@ -variable account_id { - type = string - description = "AWS account ID number used for the system" -} - -variable role_prefix { - type = string - description = "Prefix used for the name of the roles created for this system" -} From 96a78a28ac35d0f837402469dd06a5feadc51ffc Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 14 Jun 2024 20:04:00 +0000 Subject: [PATCH 37/54] Move files to their new organization --- {singularity/post/files => stormworkflow}/Tidal_validation.py | 0 {singularity/post/files => stormworkflow}/__init__.py | 0 {singularity/prep/files => stormworkflow}/analyze_ensemble.py | 0 {singularity/prep/files => stormworkflow}/combine_ensemble.py | 0 {singularity/post/files => stormworkflow}/defn.py | 0 {singularity/prep/files => stormworkflow}/download_data.py | 0 {singularity/post/files => stormworkflow}/generate_viz.py | 0 {singularity/info/files => stormworkflow}/hurricane_data.py | 0 {singularity/post/files => stormworkflow}/hurricane_funcs.py | 0 {singularity/ocsmesh/files => stormworkflow}/hurricane_mesh.py | 0 {singularity/post/files => stormworkflow}/max_ele_vs_hwm.py | 0 {singularity/post/files => stormworkflow}/maxelev_diff.py | 0 {singularity/prep/files => stormworkflow}/refs/param.nml | 0 {singularity/prep/files => stormworkflow}/refs/wwminput.nml | 0 .../solve/files => stormworkflow/scripts}/combine_gr3.exp | 0 {singularity/solve/files => stormworkflow/scripts}/entrypoint.sh | 0 {singularity => stormworkflow}/scripts/input.conf | 0 {singularity => stormworkflow}/scripts/workflow.sh | 0 {singularity/prep/files => stormworkflow}/setup_ensemble.py | 0 {singularity/prep/files => stormworkflow}/setup_model.py | 0 {singularity/scripts => stormworkflow/slurm}/mesh.sbatch | 0 {singularity/scripts => stormworkflow/slurm}/post.sbatch | 0 {singularity/scripts => stormworkflow/slurm}/prep.sbatch | 0 {singularity/scripts => stormworkflow/slurm}/schism.sbatch | 0 {singularity/prep/files => stormworkflow}/wwm.py | 0 {singularity/scripts => tests}/test.sh | 0 26 files changed, 0 insertions(+), 0 deletions(-) rename {singularity/post/files => stormworkflow}/Tidal_validation.py (100%) rename {singularity/post/files => stormworkflow}/__init__.py (100%) rename {singularity/prep/files => stormworkflow}/analyze_ensemble.py (100%) rename {singularity/prep/files => stormworkflow}/combine_ensemble.py (100%) rename {singularity/post/files => stormworkflow}/defn.py (100%) rename {singularity/prep/files => stormworkflow}/download_data.py (100%) rename {singularity/post/files => stormworkflow}/generate_viz.py (100%) rename {singularity/info/files => stormworkflow}/hurricane_data.py (100%) rename {singularity/post/files => stormworkflow}/hurricane_funcs.py (100%) rename {singularity/ocsmesh/files => stormworkflow}/hurricane_mesh.py (100%) rename {singularity/post/files => stormworkflow}/max_ele_vs_hwm.py (100%) rename {singularity/post/files => stormworkflow}/maxelev_diff.py (100%) rename {singularity/prep/files => stormworkflow}/refs/param.nml (100%) rename {singularity/prep/files => stormworkflow}/refs/wwminput.nml (100%) rename {singularity/solve/files => stormworkflow/scripts}/combine_gr3.exp (100%) rename {singularity/solve/files => stormworkflow/scripts}/entrypoint.sh (100%) rename {singularity => stormworkflow}/scripts/input.conf (100%) rename {singularity => stormworkflow}/scripts/workflow.sh (100%) rename {singularity/prep/files => stormworkflow}/setup_ensemble.py (100%) rename {singularity/prep/files => stormworkflow}/setup_model.py (100%) rename {singularity/scripts => stormworkflow/slurm}/mesh.sbatch (100%) rename {singularity/scripts => stormworkflow/slurm}/post.sbatch (100%) rename {singularity/scripts => stormworkflow/slurm}/prep.sbatch (100%) rename {singularity/scripts => stormworkflow/slurm}/schism.sbatch (100%) rename {singularity/prep/files => stormworkflow}/wwm.py (100%) rename {singularity/scripts => tests}/test.sh (100%) diff --git a/singularity/post/files/Tidal_validation.py b/stormworkflow/Tidal_validation.py similarity index 100% rename from singularity/post/files/Tidal_validation.py rename to stormworkflow/Tidal_validation.py diff --git a/singularity/post/files/__init__.py b/stormworkflow/__init__.py similarity index 100% rename from singularity/post/files/__init__.py rename to stormworkflow/__init__.py diff --git a/singularity/prep/files/analyze_ensemble.py b/stormworkflow/analyze_ensemble.py similarity index 100% rename from singularity/prep/files/analyze_ensemble.py rename to stormworkflow/analyze_ensemble.py diff --git a/singularity/prep/files/combine_ensemble.py b/stormworkflow/combine_ensemble.py similarity index 100% rename from singularity/prep/files/combine_ensemble.py rename to stormworkflow/combine_ensemble.py diff --git a/singularity/post/files/defn.py b/stormworkflow/defn.py similarity index 100% rename from singularity/post/files/defn.py rename to stormworkflow/defn.py diff --git a/singularity/prep/files/download_data.py b/stormworkflow/download_data.py similarity index 100% rename from singularity/prep/files/download_data.py rename to stormworkflow/download_data.py diff --git a/singularity/post/files/generate_viz.py b/stormworkflow/generate_viz.py similarity index 100% rename from singularity/post/files/generate_viz.py rename to stormworkflow/generate_viz.py diff --git a/singularity/info/files/hurricane_data.py b/stormworkflow/hurricane_data.py similarity index 100% rename from singularity/info/files/hurricane_data.py rename to stormworkflow/hurricane_data.py diff --git a/singularity/post/files/hurricane_funcs.py b/stormworkflow/hurricane_funcs.py similarity index 100% rename from singularity/post/files/hurricane_funcs.py rename to stormworkflow/hurricane_funcs.py diff --git a/singularity/ocsmesh/files/hurricane_mesh.py b/stormworkflow/hurricane_mesh.py similarity index 100% rename from singularity/ocsmesh/files/hurricane_mesh.py rename to stormworkflow/hurricane_mesh.py diff --git a/singularity/post/files/max_ele_vs_hwm.py b/stormworkflow/max_ele_vs_hwm.py similarity index 100% rename from singularity/post/files/max_ele_vs_hwm.py rename to stormworkflow/max_ele_vs_hwm.py diff --git a/singularity/post/files/maxelev_diff.py b/stormworkflow/maxelev_diff.py similarity index 100% rename from singularity/post/files/maxelev_diff.py rename to stormworkflow/maxelev_diff.py diff --git a/singularity/prep/files/refs/param.nml b/stormworkflow/refs/param.nml similarity index 100% rename from singularity/prep/files/refs/param.nml rename to stormworkflow/refs/param.nml diff --git a/singularity/prep/files/refs/wwminput.nml b/stormworkflow/refs/wwminput.nml similarity index 100% rename from singularity/prep/files/refs/wwminput.nml rename to stormworkflow/refs/wwminput.nml diff --git a/singularity/solve/files/combine_gr3.exp b/stormworkflow/scripts/combine_gr3.exp similarity index 100% rename from singularity/solve/files/combine_gr3.exp rename to stormworkflow/scripts/combine_gr3.exp diff --git a/singularity/solve/files/entrypoint.sh b/stormworkflow/scripts/entrypoint.sh similarity index 100% rename from singularity/solve/files/entrypoint.sh rename to stormworkflow/scripts/entrypoint.sh diff --git a/singularity/scripts/input.conf b/stormworkflow/scripts/input.conf similarity index 100% rename from singularity/scripts/input.conf rename to stormworkflow/scripts/input.conf diff --git a/singularity/scripts/workflow.sh b/stormworkflow/scripts/workflow.sh similarity index 100% rename from singularity/scripts/workflow.sh rename to stormworkflow/scripts/workflow.sh diff --git a/singularity/prep/files/setup_ensemble.py b/stormworkflow/setup_ensemble.py similarity index 100% rename from singularity/prep/files/setup_ensemble.py rename to stormworkflow/setup_ensemble.py diff --git a/singularity/prep/files/setup_model.py b/stormworkflow/setup_model.py similarity index 100% rename from singularity/prep/files/setup_model.py rename to stormworkflow/setup_model.py diff --git a/singularity/scripts/mesh.sbatch b/stormworkflow/slurm/mesh.sbatch similarity index 100% rename from singularity/scripts/mesh.sbatch rename to stormworkflow/slurm/mesh.sbatch diff --git a/singularity/scripts/post.sbatch b/stormworkflow/slurm/post.sbatch similarity index 100% rename from singularity/scripts/post.sbatch rename to stormworkflow/slurm/post.sbatch diff --git a/singularity/scripts/prep.sbatch b/stormworkflow/slurm/prep.sbatch similarity index 100% rename from singularity/scripts/prep.sbatch rename to stormworkflow/slurm/prep.sbatch diff --git a/singularity/scripts/schism.sbatch b/stormworkflow/slurm/schism.sbatch similarity index 100% rename from singularity/scripts/schism.sbatch rename to stormworkflow/slurm/schism.sbatch diff --git a/singularity/prep/files/wwm.py b/stormworkflow/wwm.py similarity index 100% rename from singularity/prep/files/wwm.py rename to stormworkflow/wwm.py diff --git a/singularity/scripts/test.sh b/tests/test.sh similarity index 100% rename from singularity/scripts/test.sh rename to tests/test.sh From 3432f29d01ceda41a78700dd2991520ada563441 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Fri, 14 Jun 2024 20:05:34 +0000 Subject: [PATCH 38/54] Remove singularity image and add env definition files --- environment.yml | 53 +++++++++++++--- requirements.txt | 5 ++ singularity/info/environment.yml | 14 ----- singularity/info/info.def | 33 ---------- singularity/ocsmesh/environment.yml | 29 --------- singularity/ocsmesh/ocsmesh.def | 56 ----------------- singularity/post/environment.yml | 97 ----------------------------- singularity/post/post.def | 28 --------- singularity/prep/environment.yml | 41 ------------ singularity/prep/prep.def | 60 ------------------ singularity/scripts/build.sh | 10 --- singularity/scripts/combine_gr3.exp | 52 ---------------- singularity/solve/solve.def | 81 ------------------------ 13 files changed, 48 insertions(+), 511 deletions(-) create mode 100644 requirements.txt delete mode 100644 singularity/info/environment.yml delete mode 100644 singularity/info/info.def delete mode 100644 singularity/ocsmesh/environment.yml delete mode 100644 singularity/ocsmesh/ocsmesh.def delete mode 100644 singularity/post/environment.yml delete mode 100644 singularity/post/post.def delete mode 100644 singularity/prep/environment.yml delete mode 100644 singularity/prep/prep.def delete mode 100755 singularity/scripts/build.sh delete mode 100644 singularity/scripts/combine_gr3.exp delete mode 100644 singularity/solve/solve.def diff --git a/environment.yml b/environment.yml index 0af1717..73e02e4 100644 --- a/environment.yml +++ b/environment.yml @@ -1,14 +1,47 @@ -name: odssm +name: stormworkflow channels: - conda-forge - - defaults dependencies: - - python=3.10 - - prefect=1.4, <2 - - cloudpickle - - ansible-core - - terraform + - cartopy + - cf-python + - cfdm + - cfgrib + - cfunits + - colored-traceback + - cmocean + - esmf + - esmpy + - fiona + - gdal + - geoalchemy2 + - geopandas>=0.13 + - geos + - hdf5 + - matplotlib + - mpi4py + - netcdf4 + - numpy + - numba + - ocsmesh==1.5.3 + - pandas + - pip + - proj + - pyarrow + - pygeos + - pyproj + - python<3.11 + - pytz + - shapely>=2 + - rasterio - requests - - dnspython - - boto3 - - dunamai + - rtree + - scipy + - seawater + - typing-extensions + - tqdm + - udunits2 + - utm + - xarray==2023.7.0 + - pip: + - --no-deps + - -rrequirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..5cc4a85 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +chaospy>=4.2.7 +stormevents==2.2.3 +pyschism>=0.1.15 +coupledmodeldriver>=1.6.6 +ensembleperturbation>=1.1.2 diff --git a/singularity/info/environment.yml b/singularity/info/environment.yml deleted file mode 100644 index 53230db..0000000 --- a/singularity/info/environment.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: icogsc -channels: - - conda-forge -dependencies: - - cartopy - - cfunits - - gdal - - geopandas>=0.13 - - geos - - proj - - pygeos - - pyproj - - python=3.9 - - shapely>=1.8 diff --git a/singularity/info/info.def b/singularity/info/info.def deleted file mode 100644 index a133ce7..0000000 --- a/singularity/info/info.def +++ /dev/null @@ -1,33 +0,0 @@ -BootStrap: docker -From: continuumio/miniconda3:24.3.0-0 - -%files - environment.yml - files/hurricane_data.py /scripts/ - -%environment - export PYTHONPATH=/scripts - -%post - apt update && apt upgrade -y && apt install -y git - - conda install mamba -n base -c conda-forge - conda install libarchive -n base -c conda-forge - mamba update --name base --channel defaults conda - mamba env create -n info --file /environment.yml - mamba clean --all --yes - - conda run -n info --no-capture-output \ - pip install stormevents==2.2.3 - - - conda clean --all - apt remove -y git - - -%runscript - conda run -n info --no-capture-output python -m $* - - -%labels - Author "Soroosh Mani" diff --git a/singularity/ocsmesh/environment.yml b/singularity/ocsmesh/environment.yml deleted file mode 100644 index f8e2d6d..0000000 --- a/singularity/ocsmesh/environment.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: icogsc -channels: - - conda-forge -dependencies: - - python<3.11 - - gdal - - geos - - proj - - netcdf4 - - udunits2 - - pyproj - - shapely - - rasterio - - fiona - - pygeos - - geopandas - - utm - - scipy - - numba - - numpy>=1.21 - - matplotlib - - requests - - tqdm - - mpi4py - - pyarrow - - pytz - - geoalchemy2 - - colored-traceback - - typing-extensions diff --git a/singularity/ocsmesh/ocsmesh.def b/singularity/ocsmesh/ocsmesh.def deleted file mode 100644 index 12ea35c..0000000 --- a/singularity/ocsmesh/ocsmesh.def +++ /dev/null @@ -1,56 +0,0 @@ -BootStrap: docker -#From: centos:centos7.8.2003 -From: continuumio/miniconda3:23.3.1-0-alpine - -%files - environment.yml - files/hurricane_mesh.py /scripts/ - -%environment - export PYTHONPATH=/scripts - -%post - ENV_NAME=ocsmesh - - apk update && apk upgrade && apk --no-cache add \ - git \ - gcc \ - g++ \ - make \ - cmake \ - libstdc++ \ - libarchive - - conda install mamba -n base -c conda-forge - mamba update --name base --channel defaults conda - mamba env create -n $ENV_NAME --file /environment.yml - mamba clean --all --yes - - git clone https://github.com/dengwirda/jigsaw-python.git - git -C jigsaw-python checkout f875719 - conda run -n $ENV_NAME --no-capture-output \ - python3 jigsaw-python/setup.py build_external - cp jigsaw-python/external/jigsaw/bin/* $ENV_PREFIX/bin - cp jigsaw-python/external/jigsaw/lib/* $ENV_PREFIX/lib - conda run -n $ENV_NAME --no-capture-output \ - pip install ./jigsaw-python - rm -rf jigsaw-python - git clone https://github.com/noaa-ocs-modeling/ocsmesh - git -C ocsmesh checkout cc0b82a #subset fix branch - conda run -n $ENV_NAME --no-capture-output \ - pip install ./ocsmesh - - conda clean --all && apk del \ - git \ - gcc \ - g++ \ - make \ - cmake - - -%runscript - conda run -n ocsmesh --no-capture-output python -m hurricane_mesh $* - - -%labels - Author "Soroosh Mani" diff --git a/singularity/post/environment.yml b/singularity/post/environment.yml deleted file mode 100644 index 357ae8b..0000000 --- a/singularity/post/environment.yml +++ /dev/null @@ -1,97 +0,0 @@ -name: odssm-post-env -channels: - - conda-forge - - defaults -dependencies: - - appdirs - - python>=3.9 # because of searvey - - pygeos - - geos - - gdal - - proj - - pyproj - - cartopy - - udunits2 - - shapely>=1.8.0 - - arrow - - attrs - - backcall - - beautifulsoup4 - - bokeh - - branca - - brotlipy - - bs4 - - certifi - - cffi - - cftime - - cfunits - - cfgrib - - chardet - - click - - click-plugins - - cligj - - cryptography - - cycler - - decorator - - f90nml - - fiona - - folium - - gdal - - geopandas - - geos - - geotiff - - glib - - icu - - idna - - ipython - - ipython_genutils - - jedi - - jinja2 - - kiwisolver - - krb5 - - lxml - - markupsafe - - matplotlib - - munch - - netcdf4 - - hdf5 - - numpy - - olefile - - packaging - - pandas - - parso - - pexpect - - pickleshare - - pillow - - prompt-toolkit - - ptyprocess - - pycparser - - pygeos - - pygments - - pyopenssl - - pyparsing - - pyproj - - pysocks - - python-wget - - pytz - - pyyaml - - readline - - requests - - retrying - - rtree - - setuptools - - shapely - - six - - searvey - - soupsieve - - tbb - - tiledb - - tk - - tornado - - traitlets - - typing_extensions - - wcwidth - - wheel - - zstd - - pip: - - pyschism diff --git a/singularity/post/post.def b/singularity/post/post.def deleted file mode 100644 index be0df3a..0000000 --- a/singularity/post/post.def +++ /dev/null @@ -1,28 +0,0 @@ -BootStrap: docker -#From: centos:centos7.8.2003 -From: continuumio/miniconda3:23.3.1-0-alpine - -%files - environment.yml - files/*.py /scripts/ - -%environment - export PYTHONPATH=/scripts - -%post - ENV_NAME=post - - apk update && apk upgrade - - conda install mamba -n base -c conda-forge - mamba update --name base --channel defaults conda - mamba env create -n $ENV_NAME --file /environment.yml - mamba clean --all --yes - - -%runscript - conda run -n post --no-capture-output python -m generate_viz $* - - -%labels - Author "Soroosh Mani" diff --git a/singularity/prep/environment.yml b/singularity/prep/environment.yml deleted file mode 100644 index 0d24986..0000000 --- a/singularity/prep/environment.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: icogsc -channels: - - conda-forge -dependencies: - - python<3.10 - - pip - - gdal - - geos - - proj - - netcdf4 - - hdf5 - - cartopy - - cfunits - - cf-python - - cfgrib - - cmocean - - esmf - - esmpy - - cfdm - - udunits2 - - pyproj - - shapely>=2 - - rasterio - - fiona - - geopandas>=0.11.0 - - rtree - - pandas - - utm - - scipy - - numpy - - matplotlib - - requests - - tqdm - - mpi4py - - pyarrow - - pytz - - geoalchemy2 - - seawater - - xarray==2023.7.0 - - pip: - - chaospy>=4.2.7 diff --git a/singularity/prep/prep.def b/singularity/prep/prep.def deleted file mode 100644 index 2677bef..0000000 --- a/singularity/prep/prep.def +++ /dev/null @@ -1,60 +0,0 @@ -BootStrap: docker -From: continuumio/miniconda3:24.3.0-0 - -%files - environment.yml - files/*.py /scripts/ - files/refs/* /refs/ - -%environment - export PYTHONPATH=/scripts - -%post - ENV_NAME=prep - - apt update && apt upgrade && apt install -y \ - git \ - libarchive - - conda install mamba -n base -c conda-forge - mamba update --name base --channel defaults conda - mamba env create -n $ENV_NAME --file /environment.yml - -# git clone https://github.com/noaa-ocs-modeling/ensembleperturbation -# cd ensembleperturbation -# git fetch origin pull/139/head:rmax -# git checkout rmax -# conda run -n $ENV_NAME --no-capture-output \ -# pip install ./ -# cd .. -# rm -rf ensembleperturbation - - conda run -n $ENV_NAME --no-capture-output \ - pip install "pyschism>=0.1.15" - conda run -n $ENV_NAME --no-capture-output \ - pip install "coupledmodeldriver>=1.6.6" - conda run -n $ENV_NAME --no-capture-output \ - pip install stormevents==2.2.3 - conda run -n $ENV_NAME --no-capture-output \ - pip install "ensembleperturbation>=1.1.2" - conda run -n $ENV_NAME --no-capture-output \ - pip uninstall -y pygeos geopandas # We use shapely 2 - - mamba install -y -n $ENV_NAME -cconda-forge \ - --force-reinstall geopandas geopandas-base - - git clone https://github.com/schism-dev/schism - cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/Relocate/relocate_source_feeder.py /scripts - cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/feeder_heads_bases_v2.1.xy /refs -# cp -v schism/src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/relocate_florence.reg /refs - rm -rfv schism - - mamba clean --all --yes && apt remove -y git - - -%runscript - conda run -n prep --no-capture-output python -m $* - - -%labels - Author "Soroosh Mani" diff --git a/singularity/scripts/build.sh b/singularity/scripts/build.sh deleted file mode 100755 index a51aa35..0000000 --- a/singularity/scripts/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -L_DEF_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/ -L_IMG_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/imgs - -mkdir -p $L_IMG_DIR -for i in info; do - pushd $L_DEF_DIR/$i/ - sudo singularity build $L_IMG_DIR/$i.sif $i.def -# singularity build --fakeroot $L_IMG_DIR/$i.sif $i.def - popd -done diff --git a/singularity/scripts/combine_gr3.exp b/singularity/scripts/combine_gr3.exp deleted file mode 100644 index ac4b0b3..0000000 --- a/singularity/scripts/combine_gr3.exp +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/expect -f -# -# This Expect script was generated by autoexpect on Tue Dec 21 16:42:59 2021 -# Expect and autoexpect were both written by Don Libes, NIST. -# -# Note that autoexpect does not guarantee a working script. It -# necessarily has to guess about certain things. Two reasons a script -# might fail are: -# -# 1) timing - A surprising number of programs (rn, ksh, zsh, telnet, -# etc.) and devices discard or ignore keystrokes that arrive "too -# quickly" after prompts. If you find your new script hanging up at -# one spot, try adding a short sleep just before the previous send. -# Setting "force_conservative" to 1 (see below) makes Expect do this -# automatically - pausing briefly before sending each character. This -# pacifies every program I know of. The -c flag makes the script do -# this in the first place. The -C flag allows you to define a -# character to toggle this mode off and on. - -set force_conservative 0 ;# set to 1 to force conservative mode even if - ;# script wasn't run conservatively originally -if {$force_conservative} { - set send_slow {1 .1} - proc send {ignore arg} { - sleep .1 - exp_send -s -- $arg - } -} - -# -# 2) differing output - Some programs produce different output each time -# they run. The "date" command is an obvious example. Another is -# ftp, if it produces throughput statistics at the end of a file -# transfer. If this causes a problem, delete these patterns or replace -# them with wildcards. An alternative is to use the -p flag (for -# "prompt") which makes Expect only look for the last line of output -# (i.e., the prompt). The -P flag allows you to define a character to -# toggle this mode off and on. -# -# Read the man page for more info. -# -# -Don - - -set timeout -1 -spawn combine_gr3 -match_max 100000 -expect -exact " Input file name (e.g.: maxelev):\r" -send -- "[lindex $argv 0]\r" -expect -exact " Input # of scalar fields:\r" -send -- "[lindex $argv 1]\r" -expect eof diff --git a/singularity/solve/solve.def b/singularity/solve/solve.def deleted file mode 100644 index 6fe9773..0000000 --- a/singularity/solve/solve.def +++ /dev/null @@ -1,81 +0,0 @@ -BootStrap: docker -#From: centos:centos7.8.2003 -From: ubuntu:22.10 - -%files - files/entrypoint.sh /scripts/ - files/combine_gr3.exp /scripts/ - - -%post - apt-get update && apt-get upgrade -y && apt-get install -y \ - git \ - gcc \ - g++ \ - gfortran \ - make \ - cmake \ - openmpi-bin libopenmpi-dev \ - libhdf5-dev \ - libnetcdf-dev libnetcdf-mpi-dev libnetcdff-dev \ - python3 \ - python-is-python3 - - - # Install SCHISM - git clone https://github.com/SorooshMani-NOAA/schism.git - git -C schism checkout a0817a8 - mkdir -p schism/build - PREV_PWD=$PWD - cd schism/build - cmake ../src/ \ - -DCMAKE_Fortran_COMPILER=mpifort \ - -DCMAKE_C_COMPILER=mpicc \ - -DNetCDF_Fortran_LIBRARY=$(nc-config --libdir)/libnetcdff.so \ - -DNetCDF_C_LIBRARY=$(nc-config --libdir)/libnetcdf.so \ - -DNetCDF_INCLUDE_DIR=$(nc-config --includedir) \ - -DUSE_PAHM=TRUE \ - -DCMAKE_Fortran_FLAGS_RELEASE="-O2 -ffree-line-length-none -fallow-argument-mismatch" - make -j8 - mv bin/* -t /usr/bin/ - rm -rf * - cmake ../src/ \ - -DCMAKE_Fortran_COMPILER=mpifort \ - -DCMAKE_C_COMPILER=mpicc \ - -DNetCDF_Fortran_LIBRARY=$(nc-config --libdir)/libnetcdff.so \ - -DNetCDF_C_LIBRARY=$(nc-config --libdir)/libnetcdf.so \ - -DNetCDF_INCLUDE_DIR=$(nc-config --includedir) \ - -DUSE_PAHM=TRUE \ - -DUSE_WWM=TRUE \ - -DCMAKE_Fortran_FLAGS_RELEASE="-O2 -ffree-line-length-none -fallow-argument-mismatch" - make -j8 - mv bin/* -t /usr/bin/ - cd ${PREV_PWD} - rm -rf schism - - - apt-get remove -y git - apt-get remove -y gcc - apt-get remove -y g++ - apt-get remove -y gfortran - apt-get remove -y make - apt-get remove -y cmake - apt-get remove -y python3 - apt-get remove -y python-is-python3 - apt-get remove -y libopenmpi-dev - apt-get remove -y libhdf5-dev - apt-get remove -y libnetcdf-dev libnetcdf-mpi-dev libnetcdff-dev - - apt-get install -y libnetcdf-c++4-1 libnetcdf-c++4 libnetcdf-mpi-19 libnetcdf19 libnetcdff7 netcdf-bin - apt-get install -y libhdf5-103-1 libhdf5-cpp-103-1 libhdf5-openmpi-103-1 - apt-get install -y libopenmpi3 - DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata - apt-get install -y expect - - apt-get clean autoclean - apt-get autoremove --yes -# rm -rf /var/lib/{apt,dpkg,cache,log}/ - - -%labels - Author "Soroosh Mani" From fa92784eb8b4364c8238ea07a526656709867926 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Sat, 15 Jun 2024 02:43:20 +0000 Subject: [PATCH 39/54] Update conda env, gitignore and readme. Add python packaging --- .gitignore | 8 +- README.md | 379 +----------------------------------------------- environment.yml | 3 - pyproject.toml | 83 +++++++++++ 4 files changed, 91 insertions(+), 382 deletions(-) create mode 100644 pyproject.toml diff --git a/.gitignore b/.gitignore index b0ca438..1f0be4a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,3 @@ -.terraform* -terraform.tfstate* -*.tfvars* -vars_from_terraform __pycache__/ -ansible_collections/ -*.sif +*.egg-info/ +_version.py diff --git a/README.md b/README.md index 252aaeb..8ec0ca3 100644 --- a/README.md +++ b/README.md @@ -1,381 +1,14 @@ -# On-demand Storm Surge Model Infrastructure +# Probabilistic Hurricane Storm Surge Model Workflow +This is a Python based workflow for getting probabilistic results +from an ensemble of storm surge simulations during tropical cyclone +events. -## AWS On-Demand +TO BE COMPLETED... -This workflow uses ERA5 atmospheric forcing from Copernicus project -for hindcast mode. - -In case images are being rebuilt, to upload Docker images to the ECR, -first login to AWS ECR for docker when your AWS environment is set up. - -``` -aws ecr get-login-password --region | docker login --username AWS --password-stdin .dkr.ecr..amazonaws.com -``` - -** IMPORTANT ** -In this document it is assumed that no infrastructure has -been set up before hand. If this repo is being used as a part of -a collaboration, please check with the "admin" who originally sets it up -for the project in order to avoid overriding existing infrastructure. - -**The infrastructure set up at this point is not intended to be used -by multiple people on the same project. One "admin" sets it up and -then the rest of the collaborators can use Prefect to launch jobs** - -Also the names are not *yet* dynamic. Meaning that for separate projects -user need to modify names in Terraform file by hand! In later iterations -this issue will be addressed! - - -### Setting up accounts -To be able to administer the On-Demand workflow on AWS, first you need -to setup your accounts - -#### For AWS -Make sure you added MFA device in the AWS account. Then create a -API key. From AWS Console, go to "My Security Credentials" from -the pull-down and then create "access key". Make sure you note -what your access key is. This will be used later in setting up the -environment. (refer to AWS documentation) - -#### Prefect -You need to create a Prefect account. - -After creating the account create a **project** named `ondemand-stormsurge`. -You could also collaborate on existing project in other accounts -if you're added as collaborator to the team on Prefect. - -Create an **API key** for your account. Note this API key as it is -going to be used when setting up the environment. - - -### Setting up the environment - -Next to use the infrastructure you need to setup the local environment -correcly: - -#### For AWS -**Only on a trusted machine** -Use `aws configure` to configure your permanent keys. This includes -- permanent profile name -- aws_access_key_id -- aws_secret_access_key -- mfa_serial -- region=us-east-1 - -Using `aws-cli` execute the following code (replace the parts in the -brackets < and >, also remove the brackets themselves) - -```sh -# aws --profile sts get-session-token --serial-number arn:aws:iam:::mfa/ --token-code <6_DIGIT_MFA_PIN> -``` - -If everything is setup correctly, you'll receive a response with the -following items for a temporary credentials: -- AccessKeyId -- SecretAccessKey -- SessionToken -- Expiration - -Note that temporary credentials is **required** when using an -AWS account that has MFA setup. - -Copy these (the first 3) values into your `~/.aws/credentials` file. -Note that the the values should be set as the following in the -`credentials` file - -```txt -[temp profile name] -aws_access_key_id = XXXXXXXXXXXXX -aws_secret_access_key = XXXXXXXXXXXXX -aws_session_token = XXXXXXXXXXXXX -``` - -also set these values in your shell environment as (later used by -ansible and prefect): - -```sh -export AWS_ACCESS_KEY_ID=XXXXXXXXXXXXX -export AWS_SECRET_ACCESS_KEY=XXXXXXXXXXXXX -export AWS_SESSION_TOKEN=XXXXXXXXXXXXX -``` - -also for RDHPCS - -```sh -export RDHPCS_S3_ACCESS_KEY_ID=XXXXXXXXXXXXX -export RDHPCS_S3_SECRET_ACCESS_KEY=XXXXXXXXXXXXX -export PW_API_KEY=XXXXXXXX -``` - -and for ERA5 Copernicus -```sh -export CDSAPI_URL=https://cds.climate.copernicus.eu/api/v2 -export CDSAPI_KEY=: -``` - -Now test if your environment works by getting a list of S3 buckets -on the account: - -```sh -aws s3api list-buckets -``` - -You will get a list of all S3 buckets on the account - -#### For Prefect - -The environment for Prefect is setup by authenticating your local -client with the API server (e.g. Prefect cloud) using: - -```sh -prefect auth login --key API_KEY -``` - -#### Packages -Using `conda` create a single environment for Terraform, Prefect, and -Ansible using the environment file. From the repository -root execute: - -```sh -conda env create -f environment.yml -``` - -#### Misc -Create a **keypair** to be used for connecting to EC2 instaces -provisioned for the workflow. - - -### Usage - -The workflow backend setup happens in 3 steps: - -1. Terraform to setup AWS infrastructure such as S3 and manager EC2 -2. Ansible to install necessary packages and launch Prefect agents - on the manager EC2 - - Since temporary AWS credentials are inserted into the manager EC2 - the Ansible playbook needs to be executed when credentials expire -3. Prefect to define the workflow and start the workflow execution - -#### Step 1 -Currently part of the names and addresses in the Terraform -configurations are defined as `locals`. The rest of them need to be -found and modified from various sections of the Terraform file for -different projects to avoid name clash on the same account. - -In the `locals` section at the top of `main.tf` modify the following: -- `pvt_key_path = "/path/to/private_key"` used to create variable file for Ansible -- `pub_key_path = "/path/to/public_key.pub"` used to setup EC2 SSH key -- `dev = "developer_name"` developer name later to be used for dynamic naming - -In the `provider` section update `profile` to the name of the -temporary credentials profile created earlier - -After updating the values go to `terraform/backend` directory and call - -```sh -terraform init -terraform apply -``` - -Verify the changes and if correct type in `yes`. After this step -the Terraform sets up AWS backend and then creates two variable files: -One used by Ansible to connect to manager EC2 (provisioned by Terraform) -and another for Prefect. - -Now that the backend S3 bucket is provisioned go up to `terrafrom` -directory and execute the same commands for the rest of the -infrastructure. - -Before applying the Terraform script, you need to set `account_id` and -`role_prefix` variables in `terraform.tfvars` file (or pass by -`-var="value"` commandline argument) - -```sh -terraform init -terraform apply -``` - -Note that the Terraform state file for the system backend is stored -locally (in `backend` directory), but the state file for the rest of -the system is encrypted and stored on the S3 bucket defined in -`backend.tf`. - - -#### Step 2 - -Now that the backend is ready it's time to setup the manager EC2 -packages and start the agents. To do so Ansible is used. First -make sure that the **environment** variables for AWS and Prefect are -set (as described in the previous section), then go to the -`ansible` directory in the root of the repo and execute - -```sh -conda activate odssm -ansible-galaxy install -r requirements.yml -ansible-playbook -i inventory/inventory ./playbooks/provision-prefect-agent.yml -``` - -Note that Prefect agent Docker images are customized to have -AWS CLI installed. If you would like to further modify the agent -images, you need to change the value of `image` for the "Register ..." -Ansible-tasks. - -Wait for the Playbook to be fully executed. After that if you -SSH to the manager EC2 you should see 3 Prefect agents running on -Docker, 1 local and 2 ECS. - -Note that Currently we don't use Prefect ECS agents and all the logic -is executed by the "local" agent. Later this might change when -Prefect's ECSTask's are utilized. - - -#### Step 3 - -Now it's time to register the defined Prefect workflow and then run it. -From the shell environment. First activate the `prefect` Conda -environment: - -```sh -conda activate odssm -``` - -Then go to `prefect` directory and execute: - -```sh -python workflow/main.py register -``` - -This will register the workflow with the project in your Prefect -cloud account. Note that this doesn't need to be executed everytime. - -Once registered the workflow can be used the next time you set up -the environment. Now to run the workflow, with the `prefect` Conda -environment already activated, execute: - -```sh -prefect run -n end-to-end --param name= --param year= -``` - -For the Ansible playbook to work you also need to set this environment -variable: - -```sh -export PREFECT_AGENT_TOKEN= -``` - - -### Remarks -As mentioned before, current workflow is not designed to be set up -and activated by many users. For backend, one person needs to take -the role of admin and create all the necessary infrastructure as -well as agents and AWS temporary authentication. Then the Prefect -cloud account whose API key is used in the backend setup can start -the process. - -Also note that for the admin, after the first time setup, only the -following steps need to be repeated to update the expired -temporary AWS credentials: -- Setting AWS temporary credentials locally in ~/.aws/credentials -- Setting AWS temporary credentials in local environment -- Setting API key in local environment -- Executing Ansible script - -Note that the person executing the Ansible script needs to have -access to the key used to setup the EC2 when terraform was executed. - -If the role of admin is passed to another person, the tfstate files -from the current admin needs to be shared with the new person -and placed in the `terraform` directory to avoid overriding. - -The new admin can then generate their own keys and the Terraform -script will update the EC2 machine and launch templates with the -new key. - -The static S3 data is duplicated in both AWS infrastructure S3 as well -as PW S3. - - -## Dockerfiles created for on-demand project - -### Testing -Install Docker and Docker-compose on your machine. Usually Docker is installed with `sudo` access requirement for running containers - -#### To test if your environment is setup correctly - -Either inside `main` branch test the Docker image for fetching hurricane info - -In `info/docker` directory -```bash -sudo docker-compose build -``` - -modify `info/docker-compose.yml` and update the `source` to an address that exists on your machine. - -Then call -```bash -sudo docker-compose run hurricane-info-noaa elsa 2021 -``` - -This should fetch the hurricane info for the hurricane specified on the command line. The result is creation of `windswath` and `coops_ssh` directories with data in them as well as empty `mesh`, `setup`, and `sim` directories inside the address specified as `source` in the compose file. - - -#### To test the full pipeline -First, setup the environment variables to the desired hurricane name and year. You can also configure this in `main/.env` file, however if you have the environments set up, they'll always override the values in `.env` file -```bash -export HURRICANE_NAME=elsa -export HURRICANE_YEAR=2021 -``` - -Update `source` addresses in `main/docker-compose.yml` to match existing address on your machine. Note that each `service` in the compose file has its own mapping of sources to targets. Do **not** modify `target` values. Note that you need to update all the `source` values in this file as each one is used for one `service` or step. - -To test all the steps, in addition to this repo you need some static files -- Static geometry shapes (I'll provide `base_geom` and `high_geom`) -- GEBCO DEMs for Geomesh -- NCEI19 DEMs for Geomesh -- TPXO file `h_tpxo9.v1.nc` for PySCHISM -- NWM file `NWM_channel_hydrofabric.tar.gz` for PySCHISM - -Then when all the files and paths above are correctly set up, run - -```bash -sudo -E docker-compose run hurricane-info-noaa -``` - -Note that this time no argument is passed for hurricane name; it will be picked up from the environment. - -After this step is done (like the previous test) you'll get a directory structure needed for running the subsequent steps. - -Now you can run ocsmesh -```bash -sudo -E docker-compose run ocsmesh-noaa -``` -or you can run it in detached mode -```bash -sudo -E docker-compose run -d ocsmesh-noaa -``` - -When meshing is done you'll see `mesh` directory being filled with some files. After that for pyschism run: - -```bash - sudo -E docker-compose run pyschism-noaa - ``` - - or in detached mode - - ```bash - sudo -E docker-compose run -d pyschism-noaa - ``` - -When pyschism is done, you should see `/setup/schism.dir` that contains SCHISM. -In `main/.env` update `SCHISM_NPROCS` value to the number of available physical cores of the machine you're testing on, e.g. `2` or `4` and then run: - - ```bash - sudo -E docker-compose run -d schism-noaa - ``` ## References +- Daneshvar, F., et al. Tech Report (TODO) - Pringle, W. J., Mani, S., Sargsyan, K., Moghimi, S., Zhang, Y. J., Khazaei, B., Myers, E. (January 2023). _Surrogate-Assisted Bayesian Uncertainty Quantification for diff --git a/environment.yml b/environment.yml index 73e02e4..882357c 100644 --- a/environment.yml +++ b/environment.yml @@ -42,6 +42,3 @@ dependencies: - udunits2 - utm - xarray==2023.7.0 - - pip: - - --no-deps - - -rrequirements.txt diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..6c110b7 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,83 @@ +[build-system] +requires = ["setuptools>=64", "setuptools_scm>=8"] +build-backend = "setuptools.build_meta" + +[tool.setuptools_scm] +version_file = "stormworkflow/_version.py" + +[project] +name = "storm-workflow" +dynamic = ["version"] + +authors = [ + {name = "Soroosh Mani", email = "soroosh.mani@noaa.gov"}, + {name = "William Pringle", email = "wpringle@anl.gov"}, + {name = "Fariborz Daneshvar", email = "fariborz.daneshvar@noaa.gov"}, +] +maintainers = [ + {name = "Soroosh Mani", email = "soroosh.mani@noaa.gov"} +] + +readme = {file = "README.txt", content-type = "text/markdown"} + +description = "A set of scripts to generate probabilistic storm surge results!" + +license = {file = "LICENSE"} + +requires-python = ">= 3.8, < 3.11" + +[tool.setuptools.packages.find] +where = ["stormworkflow"] + +[tool.setuptools.package-data] +slurm = ["*.sbatch"] +scripts = ["*.sh", "*.conf", "*.exp"] +refs = ["*.nml"] + +dependencies = [ + "cartopy", + "cf-python", + "cfdm", + "cfgrib", + "cfunits", + "chaospy>=4.2.7", + "coupledmodeldriver>=1.6.6", + "colored-traceback", + "cmocean", + "ensembleperturbation>=1.1.2", + "fiona", + "geoalchemy2", +# "geopandas>=0.13", + "geopandas", + "matplotlib", + "mpi4py", + "netCDF4", + "numpy", + "numba", + "ocsmesh==1.5.3", + "pandas", + "pyarrow", + "pygeos", + "pyproj", + "pyschism>=0.1.15", + "pytz", + "shapely>=2", + "stormevents==2.2.3", + "rasterio", + "requests", + "rtree", + "scipy", + "seawater", + "typing-extensions", + "tqdm", + "utm", + "xarray==2023.7.0", +] + +[project.urls] +#Homepage = "https://example.com" +#Documentation = "https://readthedocs.org" +Repository = "https://github.com/oceanmodeling/ondemand-storm-workflow.git" + +#[project.scripts] +#run_ensemble = "stormworkflow:main_cli" From 7d44cc7523fd8199ea1d6523ff870e6500bab0a5 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Sat, 15 Jun 2024 03:10:48 +0000 Subject: [PATCH 40/54] Reorg python scripts --- stormworkflow/{ => post}/Tidal_validation.py | 0 stormworkflow/{ => post}/analyze_ensemble.py | 0 stormworkflow/{ => post}/combine_ensemble.py | 0 stormworkflow/{ => post}/defn.py | 0 stormworkflow/{ => post}/generate_viz.py | 0 stormworkflow/{ => post}/hurricane_funcs.py | 0 stormworkflow/{ => post}/max_ele_vs_hwm.py | 0 stormworkflow/{ => post}/maxelev_diff.py | 0 stormworkflow/{ => prep}/download_data.py | 0 stormworkflow/{ => prep}/hurricane_data.py | 0 stormworkflow/{ => prep}/hurricane_mesh.py | 0 stormworkflow/{ => prep}/setup_ensemble.py | 0 stormworkflow/{ => prep}/setup_model.py | 0 stormworkflow/{ => prep}/wwm.py | 0 14 files changed, 0 insertions(+), 0 deletions(-) rename stormworkflow/{ => post}/Tidal_validation.py (100%) rename stormworkflow/{ => post}/analyze_ensemble.py (100%) rename stormworkflow/{ => post}/combine_ensemble.py (100%) rename stormworkflow/{ => post}/defn.py (100%) rename stormworkflow/{ => post}/generate_viz.py (100%) rename stormworkflow/{ => post}/hurricane_funcs.py (100%) rename stormworkflow/{ => post}/max_ele_vs_hwm.py (100%) rename stormworkflow/{ => post}/maxelev_diff.py (100%) rename stormworkflow/{ => prep}/download_data.py (100%) rename stormworkflow/{ => prep}/hurricane_data.py (100%) rename stormworkflow/{ => prep}/hurricane_mesh.py (100%) rename stormworkflow/{ => prep}/setup_ensemble.py (100%) rename stormworkflow/{ => prep}/setup_model.py (100%) rename stormworkflow/{ => prep}/wwm.py (100%) diff --git a/stormworkflow/Tidal_validation.py b/stormworkflow/post/Tidal_validation.py similarity index 100% rename from stormworkflow/Tidal_validation.py rename to stormworkflow/post/Tidal_validation.py diff --git a/stormworkflow/analyze_ensemble.py b/stormworkflow/post/analyze_ensemble.py similarity index 100% rename from stormworkflow/analyze_ensemble.py rename to stormworkflow/post/analyze_ensemble.py diff --git a/stormworkflow/combine_ensemble.py b/stormworkflow/post/combine_ensemble.py similarity index 100% rename from stormworkflow/combine_ensemble.py rename to stormworkflow/post/combine_ensemble.py diff --git a/stormworkflow/defn.py b/stormworkflow/post/defn.py similarity index 100% rename from stormworkflow/defn.py rename to stormworkflow/post/defn.py diff --git a/stormworkflow/generate_viz.py b/stormworkflow/post/generate_viz.py similarity index 100% rename from stormworkflow/generate_viz.py rename to stormworkflow/post/generate_viz.py diff --git a/stormworkflow/hurricane_funcs.py b/stormworkflow/post/hurricane_funcs.py similarity index 100% rename from stormworkflow/hurricane_funcs.py rename to stormworkflow/post/hurricane_funcs.py diff --git a/stormworkflow/max_ele_vs_hwm.py b/stormworkflow/post/max_ele_vs_hwm.py similarity index 100% rename from stormworkflow/max_ele_vs_hwm.py rename to stormworkflow/post/max_ele_vs_hwm.py diff --git a/stormworkflow/maxelev_diff.py b/stormworkflow/post/maxelev_diff.py similarity index 100% rename from stormworkflow/maxelev_diff.py rename to stormworkflow/post/maxelev_diff.py diff --git a/stormworkflow/download_data.py b/stormworkflow/prep/download_data.py similarity index 100% rename from stormworkflow/download_data.py rename to stormworkflow/prep/download_data.py diff --git a/stormworkflow/hurricane_data.py b/stormworkflow/prep/hurricane_data.py similarity index 100% rename from stormworkflow/hurricane_data.py rename to stormworkflow/prep/hurricane_data.py diff --git a/stormworkflow/hurricane_mesh.py b/stormworkflow/prep/hurricane_mesh.py similarity index 100% rename from stormworkflow/hurricane_mesh.py rename to stormworkflow/prep/hurricane_mesh.py diff --git a/stormworkflow/setup_ensemble.py b/stormworkflow/prep/setup_ensemble.py similarity index 100% rename from stormworkflow/setup_ensemble.py rename to stormworkflow/prep/setup_ensemble.py diff --git a/stormworkflow/setup_model.py b/stormworkflow/prep/setup_model.py similarity index 100% rename from stormworkflow/setup_model.py rename to stormworkflow/prep/setup_model.py diff --git a/stormworkflow/wwm.py b/stormworkflow/prep/wwm.py similarity index 100% rename from stormworkflow/wwm.py rename to stormworkflow/prep/wwm.py From b7819e3999653afbdc4b4ba5ba5853f02603e1a6 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Sat, 15 Jun 2024 03:11:52 +0000 Subject: [PATCH 41/54] Adding __init__ --- stormworkflow/post/__init__.py | 0 stormworkflow/prep/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 stormworkflow/post/__init__.py create mode 100644 stormworkflow/prep/__init__.py diff --git a/stormworkflow/post/__init__.py b/stormworkflow/post/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/stormworkflow/prep/__init__.py b/stormworkflow/prep/__init__.py new file mode 100644 index 0000000..e69de29 From f783a6c51d290a054647322bc5a492674ac6e9ec Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 17 Jun 2024 15:35:42 +0000 Subject: [PATCH 42/54] Fix packaging --- pyproject.toml | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6c110b7..fff3765 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,11 +2,8 @@ requires = ["setuptools>=64", "setuptools_scm>=8"] build-backend = "setuptools.build_meta" -[tool.setuptools_scm] -version_file = "stormworkflow/_version.py" - [project] -name = "storm-workflow" +name = "stormworkflow" dynamic = ["version"] authors = [ @@ -26,14 +23,6 @@ license = {file = "LICENSE"} requires-python = ">= 3.8, < 3.11" -[tool.setuptools.packages.find] -where = ["stormworkflow"] - -[tool.setuptools.package-data] -slurm = ["*.sbatch"] -scripts = ["*.sh", "*.conf", "*.exp"] -refs = ["*.nml"] - dependencies = [ "cartopy", "cf-python", @@ -74,6 +63,20 @@ dependencies = [ "xarray==2023.7.0", ] +[tool.setuptools_scm] +version_file = "stormworkflow/_version.py" + +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +where = ["."] + +[tool.setuptools.package-data] +slurm = ["*.sbatch"] +scripts = ["*.sh", "*.conf", "*.exp"] +refs = ["*.nml"] + [project.urls] #Homepage = "https://example.com" #Documentation = "https://readthedocs.org" From 924d0a0f742f8be740226bf36f5c079f002db58d Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 17 Jun 2024 16:15:35 +0000 Subject: [PATCH 43/54] Add datafiles --- pyproject.toml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fff3765..1353b23 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,12 +70,13 @@ version_file = "stormworkflow/_version.py" include-package-data = true [tool.setuptools.packages.find] +namespaces = true where = ["."] [tool.setuptools.package-data] -slurm = ["*.sbatch"] -scripts = ["*.sh", "*.conf", "*.exp"] -refs = ["*.nml"] +"stormworkflow.slurm" = ["*.sbatch"] +"stormworkflow.scripts" = ["*.sh", "*.conf", "*.exp"] +"stormworkflow.refs" = ["*.nml"] [project.urls] #Homepage = "https://example.com" From 969b87e22566b5b846e44de95d6217269595d988 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 17 Jun 2024 20:29:54 +0000 Subject: [PATCH 44/54] Update workflow for local env -- up to mesh --- pyproject.toml | 7 ++-- stormworkflow/main.py | 49 ++++++++++++++++++++++++++++ stormworkflow/prep/download_data.py | 6 ++-- stormworkflow/prep/hurricane_data.py | 8 +++-- stormworkflow/prep/hurricane_mesh.py | 8 +++-- stormworkflow/scripts/input.conf | 5 ++- stormworkflow/scripts/workflow.sh | 19 +++++------ stormworkflow/slurm/mesh.sbatch | 2 +- 8 files changed, 80 insertions(+), 24 deletions(-) create mode 100644 stormworkflow/main.py diff --git a/pyproject.toml b/pyproject.toml index 1353b23..a6ccc37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -83,5 +83,8 @@ where = ["."] #Documentation = "https://readthedocs.org" Repository = "https://github.com/oceanmodeling/ondemand-storm-workflow.git" -#[project.scripts] -#run_ensemble = "stormworkflow:main_cli" +[project.scripts] +run_ensemble = "stormworkflow.main:main" +hurricane_data = "stormworkflow.prep.hurricane_data:cli" +hurricane_mesh = "stormworkflow.prep.hurricane_mesh:cli" +download_data = "stormworkflow.prep.download_data:main" diff --git a/stormworkflow/main.py b/stormworkflow/main.py new file mode 100644 index 0000000..c486dbd --- /dev/null +++ b/stormworkflow/main.py @@ -0,0 +1,49 @@ +import subprocess +import logging +import os +from importlib.resources import files +from argparse import ArgumentParser + +import stormworkflow + + +_logger = logging.getLogger(__file__) + +def main(): + + parser = ArgumentParser() + parser.add_argument('stormname', type=str) + parser.add_argument('stormyear', type=int) + parser.add_argument('--suffix', type=str, required=False) + args = parser.parse_args() + + scripts = files('stormworkflow.scripts') + slurm = files('stormworkflow.slurm') + + wf = scripts.joinpath('workflow.sh') + + run_env = os.environ.copy() + run_env['L_SCRIPT_DIR'] = slurm.joinpath('.') + + run_args = [wf, args.stormname, str(args.stormyear)] + if args.suffix is not None: + run_args.append(args.suffix) + + ps = subprocess.run( + run_args, + env=run_env, + shell=False, + check=True, + capture_output=False, + ) + +# if ps.returncode != 0: +# _logger.error(ps.stderr) +# +# _logger.info(ps.stdout) + + + +if __name__ == '__main__': + + main() diff --git a/stormworkflow/prep/download_data.py b/stormworkflow/prep/download_data.py index 204f8b6..56e565e 100644 --- a/stormworkflow/prep/download_data.py +++ b/stormworkflow/prep/download_data.py @@ -13,7 +13,9 @@ logger.setLevel(logging.INFO) -def main(args): +def main(): + + args = parse_arguments() out_dir = args.output_directory dt_rng_path = args.date_range_file @@ -86,4 +88,4 @@ def parse_arguments(): if __name__ == '__main__': - main(parse_arguments()) + main() diff --git a/stormworkflow/prep/hurricane_data.py b/stormworkflow/prep/hurricane_data.py index cdd8249..3b8d40c 100644 --- a/stormworkflow/prep/hurricane_data.py +++ b/stormworkflow/prep/hurricane_data.py @@ -346,9 +346,7 @@ def main(args): coops_ssh[['x', 'y']].to_dataframe().drop(columns=['nws_id']).to_csv( sta_loc_out, header=False, index=False) - -if __name__ == '__main__': - +def cli(): parser = argparse.ArgumentParser() parser.add_argument( @@ -419,3 +417,7 @@ def main(args): args = parser.parse_args() main(args) + +if __name__ == '__main__': + cli() + diff --git a/stormworkflow/prep/hurricane_mesh.py b/stormworkflow/prep/hurricane_mesh.py index 7af8cec..bf93ff3 100644 --- a/stormworkflow/prep/hurricane_mesh.py +++ b/stormworkflow/prep/hurricane_mesh.py @@ -526,8 +526,7 @@ def run(self, args): overwrite=True) - -if __name__ == '__main__': +def cli(): parser = argparse.ArgumentParser() parser.add_argument( @@ -546,3 +545,8 @@ def run(self, args): logger.info(f"Mesh arguments are {args}.") main(args, [hurrmesh_client, subset_client]) + + +if __name__ == '__main__': + cli() + diff --git a/stormworkflow/scripts/input.conf b/stormworkflow/scripts/input.conf index b888d99..083ce03 100644 --- a/stormworkflow/scripts/input.conf +++ b/stormworkflow/scripts/input.conf @@ -1,6 +1,7 @@ # Parameters storm=$1 year=$2 +suffix=$3 subset_mesh=1 # Other params hr_prelandfall=-1 @@ -8,7 +9,7 @@ past_forecast=1 hydrology=0 use_wwm=0 pahm_model='gahm' -num_perturb=3 +num_perturb=2 sample_rule='korobov' spinup_exec='pschism_PAHM_TVD-VL' hotstart_exec='pschism_PAHM_TVD-VL' @@ -30,8 +31,6 @@ L_DEM_LO=$DATA/dem/GEBCO/*.tif L_MESH_HI=$DATA/grid/stofs3d_atl_v2.1_eval.gr3 L_MESH_LO=$DATA/grid/WNAT_1km.14 L_SHP_DIR=$DATA/shape -L_IMG_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/imgs -L_SCRIPT_DIR=/nhc/Soroosh.Mani/sandbox/ondemand-storm-workflow/singularity/scripts # Environment export SINGULARITY_BINDFLAGS="--bind /nhc" diff --git a/stormworkflow/scripts/workflow.sh b/stormworkflow/scripts/workflow.sh index c626530..847b967 100755 --- a/stormworkflow/scripts/workflow.sh +++ b/stormworkflow/scripts/workflow.sh @@ -19,8 +19,7 @@ mkdir -p $TMPDIR function version { logfile=$1 - echo -n "`basename $2`: " >> $logfile - singularity run $2 pip list | grep $3 >> $logfile + pip list | grep $2 >> $logfile } function add_sbatch_header { @@ -50,10 +49,10 @@ function init { done logfile=$run_dir/versions.info - version $logfile $L_IMG_DIR/info.sif stormevents - version $logfile $L_IMG_DIR/prep.sif stormevents - version $logfile $L_IMG_DIR/prep.sif ensembleperturbation -# version $logfile $L_IMG_DIR/ocsmesh.sif ocsmesh + version $logfile stormevents + version $logfile stormevents + version $logfile ensembleperturbation + version $logfile ocsmesh echo "SCHISM: see solver.version each outputs dir" >> $logfile echo $run_dir @@ -61,13 +60,11 @@ function init { uuid=$(uuidgen) tag=${storm}_${year}_${uuid} -suffix=$3 if [ ! -z $suffix ]; then tag=${tag}_${suffix}; fi run_dir=$(init $tag) echo $run_dir -singularity run $SINGULARITY_BINDFLAGS $L_IMG_DIR/info.sif \ - hurricane_data \ +hurricane_data \ --date-range-outpath $run_dir/setup/dates.csv \ --track-outpath $run_dir/nhc_track/hurricane-track.dat \ --swath-outpath $run_dir/windswath \ @@ -107,7 +104,7 @@ sbatch \ --output "${run_dir}/slurm/slurm-%j.mesh.out" \ --wait \ --job-name=mesh_$tag \ - --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year,IMG=$L_IMG_DIR/ocsmesh.sif \ + --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year \ $run_dir/slurm/mesh.sbatch @@ -115,7 +112,7 @@ echo "Download necessary data..." # TODO: Separate pairing NWM-elem from downloading! DOWNLOAD_KWDS="" if [ $hydrology == 1 ]; then DOWNLOAD_KWDS+=" --with-hydrology"; fi -singularity run $SINGULARITY_BINDFLAGS $L_IMG_DIR/prep.sif download_data \ +download_data \ --output-directory $run_dir/setup/ensemble.dir/ \ --mesh-directory $run_dir/mesh/ \ --date-range-file $run_dir/setup/dates.csv \ diff --git a/stormworkflow/slurm/mesh.sbatch b/stormworkflow/slurm/mesh.sbatch index 5543bf3..757b9cd 100644 --- a/stormworkflow/slurm/mesh.sbatch +++ b/stormworkflow/slurm/mesh.sbatch @@ -6,4 +6,4 @@ set -ex -singularity run ${SINGULARITY_BINDFLAGS} ${IMG} ${STORM} ${YEAR} ${MESH_KWDS} +hurricane_mesh ${STORM} ${YEAR} ${MESH_KWDS} From edcd510800a521d7aec911ef6fb5ce631b3d687b Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Tue, 18 Jun 2024 14:53:11 +0000 Subject: [PATCH 45/54] Full workflow - caveat geopandas install --- pyproject.toml | 5 ++++- stormworkflow/main.py | 11 +++++------ stormworkflow/post/analyze_ensemble.py | 7 +++++-- stormworkflow/post/combine_ensemble.py | 7 +++++-- stormworkflow/prep/download_data.py | 9 +++++---- stormworkflow/prep/setup_ensemble.py | 17 +++++++++++------ stormworkflow/scripts/workflow.sh | 1 - stormworkflow/slurm/post.sbatch | 6 ++---- stormworkflow/slurm/prep.sbatch | 2 +- 9 files changed, 38 insertions(+), 27 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a6ccc37..90ee3fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,4 +87,7 @@ Repository = "https://github.com/oceanmodeling/ondemand-storm-workflow.git" run_ensemble = "stormworkflow.main:main" hurricane_data = "stormworkflow.prep.hurricane_data:cli" hurricane_mesh = "stormworkflow.prep.hurricane_mesh:cli" -download_data = "stormworkflow.prep.download_data:main" +download_data = "stormworkflow.prep.download_data:cli" +setup_ensemble = "stormworkflow.prep.setup_ensemble:cli" +combine_ensemble = "stormworkflow.post.combine_ensemble:cli" +analyze_ensemble = "stormworkflow.post.analyze_ensemble:cli" diff --git a/stormworkflow/main.py b/stormworkflow/main.py index c486dbd..690f435 100644 --- a/stormworkflow/main.py +++ b/stormworkflow/main.py @@ -33,15 +33,14 @@ def main(): run_args, env=run_env, shell=False, - check=True, +# check=True, capture_output=False, ) -# if ps.returncode != 0: -# _logger.error(ps.stderr) -# -# _logger.info(ps.stdout) - + if ps.returncode != 0: + _logger.error(ps.stderr) + + _logger.info(ps.stdout) if __name__ == '__main__': diff --git a/stormworkflow/post/analyze_ensemble.py b/stormworkflow/post/analyze_ensemble.py index ed9aba3..00a2932 100644 --- a/stormworkflow/post/analyze_ensemble.py +++ b/stormworkflow/post/analyze_ensemble.py @@ -399,11 +399,14 @@ def _analyze(tracks_dir, analyze_dir, mann_coef): pyplot.show() -if __name__ == '__main__': - +def cli(): parser = ArgumentParser() parser.add_argument('-d', '--ensemble-dir', type=Path) parser.add_argument('-t', '--tracks-dir', type=Path) parser.add_argument('-s', '--sequential', action='store_true') main(parser.parse_args()) + + +if __name__ == '__main__': + cli() diff --git a/stormworkflow/post/combine_ensemble.py b/stormworkflow/post/combine_ensemble.py index ad82b74..fffdbd2 100644 --- a/stormworkflow/post/combine_ensemble.py +++ b/stormworkflow/post/combine_ensemble.py @@ -21,11 +21,14 @@ def main(args): ) -if __name__ == '__main__': - +def cli(): parser = ArgumentParser() parser.add_argument('-d', '--ensemble-dir', type=Path) parser.add_argument('-t', '--tracks-dir', type=Path) parser.add_argument('-s', '--sequential', action='store_true') main(parser.parse_args()) + + +if __name__ == '__main__': + cli() diff --git a/stormworkflow/prep/download_data.py b/stormworkflow/prep/download_data.py index 56e565e..eab58a4 100644 --- a/stormworkflow/prep/download_data.py +++ b/stormworkflow/prep/download_data.py @@ -13,9 +13,7 @@ logger.setLevel(logging.INFO) -def main(): - - args = parse_arguments() +def main(args): out_dir = args.output_directory dt_rng_path = args.date_range_file @@ -86,6 +84,9 @@ def parse_arguments(): return args + +def cli(): + main(parse_arguments()) if __name__ == '__main__': - main() + cli() diff --git a/stormworkflow/prep/setup_ensemble.py b/stormworkflow/prep/setup_ensemble.py index 24eedab..71d2218 100644 --- a/stormworkflow/prep/setup_ensemble.py +++ b/stormworkflow/prep/setup_ensemble.py @@ -37,11 +37,13 @@ from stormevents import StormEvent from stormevents.nhc.track import VortexTrack -import wwm -from relocate_source_feeder import ( - relocate_sources, - v16_mandatory_sources_coor, -) +import stormworkflow.prep.wwm +# TODO: Later find a clean way to package this module from SCHISM from +# src/Utility/Pre-Processing/STOFS-3D-Atl-shadow-VIMS/Pre_processing/Source_sink/Relocate/ +#from relocate_source_feeder import ( +# relocate_sources, +# v16_mandatory_sources_coor, +#) REFS = Path('/refs') @@ -335,5 +337,8 @@ def parse_arguments(): return args -if __name__ == '__main__': +def cli(): main(parse_arguments()) + +if __name__ == '__main__': + cli() diff --git a/stormworkflow/scripts/workflow.sh b/stormworkflow/scripts/workflow.sh index 847b967..2d0509d 100755 --- a/stormworkflow/scripts/workflow.sh +++ b/stormworkflow/scripts/workflow.sh @@ -121,7 +121,6 @@ download_data \ echo "Setting up the model..." -PREP_KWDS="setup_ensemble" PREP_KWDS+=" --track-file $run_dir/nhc_track/hurricane-track.dat" PREP_KWDS+=" --output-directory $run_dir/setup/ensemble.dir/" PREP_KWDS+=" --num-perturbations $num_perturb" diff --git a/stormworkflow/slurm/post.sbatch b/stormworkflow/slurm/post.sbatch index 5d0436e..4ef59f8 100644 --- a/stormworkflow/slurm/post.sbatch +++ b/stormworkflow/slurm/post.sbatch @@ -5,12 +5,10 @@ set -ex -singularity run ${SINGULARITY_BINDFLAGS} ${IMG} \ - combine_ensemble \ +combine_ensemble \ --ensemble-dir $ENSEMBLE_DIR \ --tracks-dir $ENSEMBLE_DIR/track_files -singularity run ${SINGULARITY_BINDFLAGS} ${IMG} \ - analyze_ensemble \ +analyze_ensemble \ --ensemble-dir $ENSEMBLE_DIR \ --tracks-dir $ENSEMBLE_DIR/track_files diff --git a/stormworkflow/slurm/prep.sbatch b/stormworkflow/slurm/prep.sbatch index cd1fe8d..8beb298 100644 --- a/stormworkflow/slurm/prep.sbatch +++ b/stormworkflow/slurm/prep.sbatch @@ -6,4 +6,4 @@ set -ex -singularity run ${SINGULARITY_BINDFLAGS} ${IMG} ${PREP_KWDS} ${STORM} ${YEAR} +setup_ensemble ${PREP_KWDS} ${STORM} ${YEAR} From 07b9be965863f7ec67b15f2f0f5143f9f1e6caa7 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Tue, 25 Jun 2024 19:42:13 +0000 Subject: [PATCH 46/54] Use yaml conf file --- pyproject.toml | 5 ++-- stormworkflow/main.py | 29 +++++++++++++++------ stormworkflow/refs/input.yaml | 40 ++++++++++++++++++++++++++++ stormworkflow/scripts/input.conf | 43 ------------------------------- stormworkflow/scripts/workflow.sh | 11 ++++---- 5 files changed, 69 insertions(+), 59 deletions(-) create mode 100644 stormworkflow/refs/input.yaml delete mode 100644 stormworkflow/scripts/input.conf diff --git a/pyproject.toml b/pyproject.toml index 90ee3fa..0ae1b68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,7 @@ dependencies = [ "pyproj", "pyschism>=0.1.15", "pytz", + "pyyaml", "shapely>=2", "stormevents==2.2.3", "rasterio", @@ -75,8 +76,8 @@ where = ["."] [tool.setuptools.package-data] "stormworkflow.slurm" = ["*.sbatch"] -"stormworkflow.scripts" = ["*.sh", "*.conf", "*.exp"] -"stormworkflow.refs" = ["*.nml"] +"stormworkflow.scripts" = ["*.sh", "*.exp"] +"stormworkflow.refs" = ["*.nml", "*.yaml"] [project.urls] #Homepage = "https://example.com" diff --git a/stormworkflow/main.py b/stormworkflow/main.py index 690f435..9d22134 100644 --- a/stormworkflow/main.py +++ b/stormworkflow/main.py @@ -1,10 +1,16 @@ import subprocess import logging import os +import shlex from importlib.resources import files from argparse import ArgumentParser import stormworkflow +import yaml +try: + from yaml import CLoader as Loader, CDumper as Dumper +except ImportError: + from yaml import Loader, Dumper _logger = logging.getLogger(__file__) @@ -12,25 +18,32 @@ def main(): parser = ArgumentParser() - parser.add_argument('stormname', type=str) - parser.add_argument('stormyear', type=int) - parser.add_argument('--suffix', type=str, required=False) + parser.add_argument('--configuration', '-c', type=str, required=False) args = parser.parse_args() scripts = files('stormworkflow.scripts') slurm = files('stormworkflow.slurm') + refs = files('stormworkflow.refs') + + infile = args.configuration + if infile is None: + _logger.warn('No input configuration provided, using reference file!') + infile = refs.joinpath('input.yaml') + + with open(infile, 'r') as yfile: + conf = yaml.load(yfile, Loader=Loader) wf = scripts.joinpath('workflow.sh') run_env = os.environ.copy() run_env['L_SCRIPT_DIR'] = slurm.joinpath('.') - - run_args = [wf, args.stormname, str(args.stormyear)] - if args.suffix is not None: - run_args.append(args.suffix) + for k, v in conf.items(): + if isinstance(v, list): + v = shlex.join(v) + run_env[k] = str(v) ps = subprocess.run( - run_args, + [wf, infile], env=run_env, shell=False, # check=True, diff --git a/stormworkflow/refs/input.yaml b/stormworkflow/refs/input.yaml new file mode 100644 index 0000000..1e8141e --- /dev/null +++ b/stormworkflow/refs/input.yaml @@ -0,0 +1,40 @@ +--- +input_version: 0.0.1 + +storm: "florence" +year: 2018 +suffix: "" +subset_mesh: 1 +hr_prelandfall: -1 +past_forecast: 1 +hydrology: 0 +use_wwm: 0 +pahm_model: "gahm" +num_perturb: 2 +sample_rule: "korobov" +spinup_exec: "pschism_PAHM_TVD-VL" +hotstart_exec: "pschism_PAHM_TVD-VL" + +hpc_solver_nnodes: 3 +hpc_solver_ntasks: 108 +hpc_account: "" +hpc_partition: "" + +RUN_OUT: "" +L_NWM_DATASET: "" +L_TPXO_DATASET: "" +L_LEADTIMES_DATASET: "" +L_TRACK_DIR: "" +L_DEM_HI: "" +L_DEM_LO: "" +L_MESH_HI: "" +L_MESH_LO: "" +L_SHP_DIR: "" + +TMPDIR: "/tmp" +PATH_APPEND: "" + +L_SOLVE_MODULES: + - "intel/2022.1.2" + - "impi/2022.1.2" + - "netcdf" diff --git a/stormworkflow/scripts/input.conf b/stormworkflow/scripts/input.conf deleted file mode 100644 index 083ce03..0000000 --- a/stormworkflow/scripts/input.conf +++ /dev/null @@ -1,43 +0,0 @@ -# Parameters -storm=$1 -year=$2 -suffix=$3 -subset_mesh=1 -# Other params -hr_prelandfall=-1 -past_forecast=1 -hydrology=0 -use_wwm=0 -pahm_model='gahm' -num_perturb=2 -sample_rule='korobov' -spinup_exec='pschism_PAHM_TVD-VL' -hotstart_exec='pschism_PAHM_TVD-VL' - -hpc_solver_nnodes=3 -hpc_solver_ntasks=108 -hpc_account='' -hpc_partition='' - -RUN_OUT=/nhc/Soroosh.Mani/runs/ -DATA=/nhc/static_data -# Paths as local variables -L_NWM_DATASET=$DATA/nwm/NWM_v2.0_channel_hydrofabric/nwm_v2_0_hydrofabric.gdb -L_TPXO_DATASET=$DATA/tpxo -L_LEADTIMES_DATASET=$DATA/lead.json -L_TRACK_DIR=INVALD #$DATA/tracks_adj_rmw -L_DEM_HI=$DATA/dem/NCEI_1_9th/*.tif -L_DEM_LO=$DATA/dem/GEBCO/*.tif -L_MESH_HI=$DATA/grid/stofs3d_atl_v2.1_eval.gr3 -L_MESH_LO=$DATA/grid/WNAT_1km.14 -L_SHP_DIR=$DATA/shape - -# Environment -export SINGULARITY_BINDFLAGS="--bind /nhc" -export TMPDIR=/nhc/.tmp # redirect OCSMESH temp files - -# Update PATH -export PATH=$PATH:/nhc/bin/ - -# Modules -L_SOLVE_MODULES="intel/2022.1.2 impi/2022.1.2 netcdf" diff --git a/stormworkflow/scripts/workflow.sh b/stormworkflow/scripts/workflow.sh index 2d0509d..5c33f66 100755 --- a/stormworkflow/scripts/workflow.sh +++ b/stormworkflow/scripts/workflow.sh @@ -1,11 +1,8 @@ #!/bin/bash set -e -# User inputs... -THIS_SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -source $THIS_SCRIPT_DIR/input.conf - -if [ $use_wwm == 1 ]; then hotstart_exec='pschism_WWM_PAHM_TVD-VL'; fi +export PATH=$PATH:$PATH_APPEND +export TMPDIR # Processing... mkdir -p $TMPDIR @@ -16,6 +13,7 @@ mkdir -p $TMPDIR # CHECK BIN # combine_hotstart7 # pschism ... +input_file=$1 function version { logfile=$1 @@ -50,11 +48,12 @@ function init { logfile=$run_dir/versions.info version $logfile stormevents - version $logfile stormevents version $logfile ensembleperturbation version $logfile ocsmesh echo "SCHISM: see solver.version each outputs dir" >> $logfile + cp $input_file $run_dir/input.yaml + echo $run_dir } From 21499b3af0b125a1d79e0242e79f1168bc0d8752 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Tue, 25 Jun 2024 20:25:50 +0000 Subject: [PATCH 47/54] Use local natural earth shape --- stormworkflow/main.py | 3 ++- stormworkflow/prep/hurricane_data.py | 11 +++++++++-- stormworkflow/scripts/workflow.sh | 1 + 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/stormworkflow/main.py b/stormworkflow/main.py index 9d22134..e649f6c 100644 --- a/stormworkflow/main.py +++ b/stormworkflow/main.py @@ -4,6 +4,7 @@ import shlex from importlib.resources import files from argparse import ArgumentParser +from pathlib import Path import stormworkflow import yaml @@ -18,7 +19,7 @@ def main(): parser = ArgumentParser() - parser.add_argument('--configuration', '-c', type=str, required=False) + parser.add_argument('configuration', type=Path) args = parser.parse_args() scripts = files('stormworkflow.scripts') diff --git a/stormworkflow/prep/hurricane_data.py b/stormworkflow/prep/hurricane_data.py index 3b8d40c..5e3382a 100644 --- a/stormworkflow/prep/hurricane_data.py +++ b/stormworkflow/prep/hurricane_data.py @@ -143,12 +143,13 @@ def main(args): hr_before_landfall = args.hours_before_landfall lead_times = args.lead_times track_dir = args.preprocessed_tracks_dir + countries_shpfile = args.countries_polygon if hr_before_landfall < 0: hr_before_landfall = 48 - ne_low = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres')) - shp_US = ne_low[ne_low.name.isin(['United States of America', 'Puerto Rico'])].unary_union + ne_low = gpd.read_file(countries_shpfile) + shp_US = ne_low[ne_low.NAME_EN.isin(['United States of America', 'Puerto Rico'])].unary_union logger.info("Fetching hurricane info...") event = None @@ -414,6 +415,12 @@ def cli(): help="Existing adjusted track directory", ) + parser.add_argument( + "--countries-polygon", + type=pathlib.Path, + help="Shapefile containing country polygons", + ) + args = parser.parse_args() main(args) diff --git a/stormworkflow/scripts/workflow.sh b/stormworkflow/scripts/workflow.sh index 5c33f66..15b137e 100755 --- a/stormworkflow/scripts/workflow.sh +++ b/stormworkflow/scripts/workflow.sh @@ -73,6 +73,7 @@ hurricane_data \ --hours-before-landfall "$hr_prelandfall" \ --lead-times "$L_LEADTIMES_DATASET" \ --preprocessed-tracks-dir "$L_TRACK_DIR" \ + --countries-polygon "$L_SHP_DIR/ne_110m_cultural/ne_110m_admin_0_countries.shp" \ $storm $year From c9a4fe202c5312f78fefe3ff4550acc7303b9713 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Wed, 26 Jun 2024 13:42:53 +0000 Subject: [PATCH 48/54] Move ROC script to the correct location --- singularity/prep/files/ROC_single_run.py | 300 ------------------ .../post}/ROC_single_run.py | 0 2 files changed, 300 deletions(-) delete mode 100644 singularity/prep/files/ROC_single_run.py rename {singularity/post/files => stormworkflow/post}/ROC_single_run.py (100%) diff --git a/singularity/prep/files/ROC_single_run.py b/singularity/prep/files/ROC_single_run.py deleted file mode 100644 index fbb768d..0000000 --- a/singularity/prep/files/ROC_single_run.py +++ /dev/null @@ -1,300 +0,0 @@ -import argparse -import logging -import os -import warnings -import numpy as np -import pandas as pd -import xarray as xr -import scipy as sp -import matplotlib.pyplot as plt -from pathlib import Path -from cartopy.feature import NaturalEarthFeature - -os.environ['USE_PYGEOS'] = '0' -import geopandas as gpd - -pd.options.mode.copy_on_write = True - - -def stack_station_coordinates(x, y): - """ - Create numpy.column_stack based on - coordinates of observation points - """ - coord_combined = np.column_stack([x, y]) - return coord_combined - - -def create_search_tree(longitude, latitude): - """ - Create scipy.spatial.CKDTree based on Lat. and Long. - """ - long_lat = np.column_stack((longitude.T.ravel(), latitude.T.ravel())) - tree = sp.spatial.cKDTree(long_lat) - return tree - - -def find_nearby_prediction(ds, variable, indices): - """ - Reads netcdf file, target variable, and indices - Returns max value among corresponding indices for each point - """ - obs_count = indices.shape[0] # total number of search/observation points - max_prediction_index = len(ds.node.values) # total number of nodes - - prediction_prob = np.zeros(obs_count) # assuming all are dry (probability of zero) - - for obs_point in range(obs_count): - idx_arr = np.delete( - indices[obs_point], np.where(indices[obs_point] == max_prediction_index)[0] - ) # len is length of surrogate model array - val_arr = ds[variable].values[idx_arr] - val_arr = np.nan_to_num(val_arr) # replace nan with zero (dry node) - - # # Pick the nearest non-zero probability (option #1) - # for val in val_arr: - # if val > 0.0: - # prediction_prob[obs_point] = round(val,4) #round to 0.1 mm - # break - - # pick the largest value (option #2) - if val_arr.size > 0: - prediction_prob[obs_point] = val_arr.max() - return prediction_prob - - -def plot_probabilities(df, prob_column, gdf_countries, title, save_name): - """ - plot probabilities of exceeding given threshold at obs. points - """ - figure, axis = plt.subplots(1, 1) - figure.set_size_inches(10, 10 / 1.6) - - plt.scatter(x=df.Longitude, y=df.Latitude, vmin=0, vmax=1.0, c=df[prob_column]) - xlim = axis.get_xlim() - ylim = axis.get_ylim() - - gdf_countries.plot(color='lightgrey', ax=axis, zorder=-5) - - axis.set_xlim(xlim) - axis.set_ylim(ylim) - plt.colorbar(shrink=0.75) - plt.title(title) - plt.savefig(save_name) - plt.close() - - -def calculate_hit_miss(df, obs_column, prob_column, threshold, probability): - """ - Reads dataframe with two columns for obs_elev, and probabilities - returns hit/miss/... based on user-defined threshold & probability - """ - hit = len(df[(df[obs_column] >= threshold) & (df[prob_column] >= probability)]) - miss = len(df[(df[obs_column] >= threshold) & (df[prob_column] < probability)]) - false_alarm = len(df[(df[obs_column] < threshold) & (df[prob_column] >= probability)]) - correct_neg = len(df[(df[obs_column] < threshold) & (df[prob_column] < probability)]) - - return hit, miss, false_alarm, correct_neg - - -def calculate_POD_FAR(hit, miss, false_alarm, correct_neg): - """ - Reads hit, miss, false_alarm, and correct_neg - returns POD and FAR - default POD and FAR are np.nan - """ - POD = np.nan - FAR = np.nan - try: - POD = round(hit / (hit + miss), 4) # Probability of Detection - except ZeroDivisionError: - pass - try: - FAR = round(false_alarm / (false_alarm + correct_neg), 4) # False Alarm Rate - except ZeroDivisionError: - pass - return POD, FAR - - -def main(args): - storm_name = args.storm_name.capitalize() - storm_year = args.storm_year - leadtime = args.leadtime - prob_nc_path = Path(args.prob_nc_path) - obs_df_path = Path(args.obs_df_path) - save_dir = args.save_dir - - # *.nc file coordinates - thresholds_ft = [3, 6, 9] # in ft - thresholds_m = [round(i * 0.3048, 4) for i in thresholds_ft] # convert to meter - sources = ['model', 'surrogate'] - probabilities = [0.0, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] - - # attributes of input files - prediction_variable = 'probabilities' - obs_attribute = 'Elev_m_xGEOID20b' - - # search criteria - max_distance = 1000 # [in meters] to set distance_upper_bound - max_neighbors = 10 # to set k - - blank_arr = np.empty((len(thresholds_ft), 1, 1, len(sources), len(probabilities))) - blank_arr[:] = np.nan - - hit_arr = blank_arr.copy() - miss_arr = blank_arr.copy() - false_alarm_arr = blank_arr.copy() - correct_neg_arr = blank_arr.copy() - POD_arr = blank_arr.copy() - FAR_arr = blank_arr.copy() - - # Load obs file, extract storm obs points and coordinates - df_obs = pd.read_csv(obs_df_path) - Event_name = f'{storm_name}_{storm_year}' - df_obs_storm = df_obs[df_obs.Event == Event_name] - obs_coordinates = stack_station_coordinates( - df_obs_storm.Longitude.values, df_obs_storm.Latitude.values - ) - - # Load probabilities.nc file - ds_prob = xr.open_dataset(prob_nc_path) - - gdf_countries = gpd.GeoSeries( - NaturalEarthFeature(category='physical', scale='10m', name='land',).geometries(), - crs=4326, - ) - - # Loop through thresholds and sources and find corresponding values from probabilities.nc - threshold_count = -1 - for threshold in thresholds_m: - threshold_count += 1 - source_count = -1 - for source in sources: - source_count += 1 - ds_temp = ds_prob.sel(level=threshold, source=source) - tree = create_search_tree(ds_temp.x.values, ds_temp.y.values) - dist, indices = tree.query( - obs_coordinates, k=max_neighbors, distance_upper_bound=max_distance * 1e-5 - ) # 0.01 is equivalent to 1000 m - prediction_prob = find_nearby_prediction( - ds=ds_temp, variable=prediction_variable, indices=indices - ) - df_obs_storm[f'{source}_prob'] = prediction_prob - - # Plot probabilities at obs. points - plot_probabilities( - df_obs_storm, - f'{source}_prob', - gdf_countries, - f'Probability of {source} exceeding {thresholds_ft[threshold_count]} ft \n {storm_name}, {storm_year}, {leadtime}-hr leadtime', - os.path.join( - save_dir, - f'prob_{source}_above_{thresholds_ft[threshold_count]}ft_{storm_name}_{storm_year}_{leadtime}-hr.png', - ), - ) - - # Loop through probabilities: calculate hit/miss/... & POD/FAR - prob_count = -1 - for prob in probabilities: - prob_count += 1 - hit, miss, false_alarm, correct_neg = calculate_hit_miss( - df_obs_storm, obs_attribute, f'{source}_prob', threshold, prob - ) - hit_arr[threshold_count, 0, 0, source_count, prob_count] = hit - miss_arr[threshold_count, 0, 0, source_count, prob_count] = miss - false_alarm_arr[threshold_count, 0, 0, source_count, prob_count] = false_alarm - correct_neg_arr[threshold_count, 0, 0, source_count, prob_count] = correct_neg - - pod, far = calculate_POD_FAR(hit, miss, false_alarm, correct_neg) - POD_arr[threshold_count, 0, 0, source_count, prob_count] = pod - FAR_arr[threshold_count, 0, 0, source_count, prob_count] = far - - ds_ROC = xr.Dataset( - coords=dict( - threshold=thresholds_ft, - storm=[storm_name], - leadtime=[leadtime], - source=sources, - prob=probabilities, - ), - data_vars=dict( - hit=(['threshold', 'storm', 'leadtime', 'source', 'prob'], hit_arr), - miss=(['threshold', 'storm', 'leadtime', 'source', 'prob'], miss_arr), - false_alarm=( - ['threshold', 'storm', 'leadtime', 'source', 'prob'], - false_alarm_arr, - ), - correct_neg=( - ['threshold', 'storm', 'leadtime', 'source', 'prob'], - correct_neg_arr, - ), - POD=(['threshold', 'storm', 'leadtime', 'source', 'prob'], POD_arr), - FAR=(['threshold', 'storm', 'leadtime', 'source', 'prob'], FAR_arr), - ), - ) - ds_ROC.to_netcdf( - os.path.join(save_dir, f'{storm_name}_{storm_year}_{leadtime}hr_leadtime_POD_FAR.nc') - ) - - # plot ROC curves - marker_list = ['s', 'x'] - linestyle_list = ['dashed', 'dotted'] - threshold_count = -1 - for threshold in thresholds_ft: - threshold_count += 1 - fig = plt.figure() - ax = fig.add_subplot(111) - plt.axline( - (0.0, 0.0), (1.0, 1.0), linestyle='--', color='grey', label='random prediction' - ) - source_count = -1 - for source in sources: - source_count += 1 - plt.plot( - FAR_arr[threshold_count, 0, 0, source_count, :], - POD_arr[threshold_count, 0, 0, source_count, :], - label=f'{source}', - marker=marker_list[source_count], - linestyle=linestyle_list[source_count], - markersize=5, - ) - plt.legend() - plt.xlabel('False Alarm Rate') - plt.ylabel('Probability of Detection') - - plt.title( - f'{storm_name}_{storm_year}, {leadtime}-hr leadtime, {threshold} ft threshold' - ) - plt.savefig( - os.path.join( - save_dir, f'ROC_{storm_name}_{leadtime}hr_leadtime_{threshold}_ft.png' - ) - ) - plt.close() - - -def entry(): - parser = argparse.ArgumentParser() - - parser.add_argument('--storm_name', help='name of the storm', type=str) - - parser.add_argument('--storm_year', help='year of the storm', type=int) - - parser.add_argument('--leadtime', help='OFCL track leadtime hr', type=int) - - parser.add_argument('--prob_nc_path', help='path to probabilities.nc', type=str) - - parser.add_argument('--obs_df_path', help='Path to observations dataframe', type=str) - - # optional - parser.add_argument( - '--save_dir', help='directory for saving analysis', default=os.getcwd(), type=str - ) - - main(parser.parse_args()) - - -if __name__ == '__main__': - warnings.filterwarnings('ignore') - # warnings.filterwarnings("ignore", category=DeprecationWarning) - entry() diff --git a/singularity/post/files/ROC_single_run.py b/stormworkflow/post/ROC_single_run.py similarity index 100% rename from singularity/post/files/ROC_single_run.py rename to stormworkflow/post/ROC_single_run.py From b417653c17f7b01d1f3c876192e422262be90b67 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Wed, 26 Jun 2024 13:46:51 +0000 Subject: [PATCH 49/54] Add cli entrypoint for ROC --- pyproject.toml | 1 + stormworkflow/post/ROC_single_run.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0ae1b68..73a1263 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,3 +92,4 @@ download_data = "stormworkflow.prep.download_data:cli" setup_ensemble = "stormworkflow.prep.setup_ensemble:cli" combine_ensemble = "stormworkflow.post.combine_ensemble:cli" analyze_ensemble = "stormworkflow.post.analyze_ensemble:cli" +storm_roc_curve = "stormworkflow.post.ROC_single_run:cli" diff --git a/stormworkflow/post/ROC_single_run.py b/stormworkflow/post/ROC_single_run.py index fbb768d..26dbd2c 100644 --- a/stormworkflow/post/ROC_single_run.py +++ b/stormworkflow/post/ROC_single_run.py @@ -273,7 +273,7 @@ def main(args): plt.close() -def entry(): +def cli(): parser = argparse.ArgumentParser() parser.add_argument('--storm_name', help='name of the storm', type=str) @@ -297,4 +297,4 @@ def entry(): if __name__ == '__main__': warnings.filterwarnings('ignore') # warnings.filterwarnings("ignore", category=DeprecationWarning) - entry() + cli() From 503b5c88799ea8f7a0d9926f01487e782050ea46 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 1 Jul 2024 16:33:54 +0000 Subject: [PATCH 50/54] Better output redirect --- stormworkflow/scripts/workflow.sh | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/stormworkflow/scripts/workflow.sh b/stormworkflow/scripts/workflow.sh index 15b137e..e9fab6c 100755 --- a/stormworkflow/scripts/workflow.sh +++ b/stormworkflow/scripts/workflow.sh @@ -30,6 +30,7 @@ function init { local run_dir=$RUN_OUT/$1 mkdir $run_dir mkdir $run_dir/slurm + mkdir $run_dir/output mkdir $run_dir/mesh mkdir $run_dir/setup mkdir $run_dir/nhc_track @@ -74,7 +75,7 @@ hurricane_data \ --lead-times "$L_LEADTIMES_DATASET" \ --preprocessed-tracks-dir "$L_TRACK_DIR" \ --countries-polygon "$L_SHP_DIR/ne_110m_cultural/ne_110m_admin_0_countries.shp" \ - $storm $year + $storm $year 2>&1 | tee "${run_dir}/output/head_hurricane_data.out" MESH_KWDS="" @@ -101,7 +102,7 @@ fi MESH_KWDS+=" --out ${run_dir}/mesh" export MESH_KWDS sbatch \ - --output "${run_dir}/slurm/slurm-%j.mesh.out" \ + --output "${run_dir}/output/slurm-%j.mesh.out" \ --wait \ --job-name=mesh_$tag \ --export=ALL,MESH_KWDS,STORM=$storm,YEAR=$year \ @@ -117,7 +118,7 @@ download_data \ --mesh-directory $run_dir/mesh/ \ --date-range-file $run_dir/setup/dates.csv \ --nwm-file $L_NWM_DATASET \ - $DOWNLOAD_KWDS + $DOWNLOAD_KWDS 2>&1 | tee "${run_dir}/output/head_download_nwm.out" echo "Setting up the model..." @@ -137,7 +138,7 @@ PREP_KWDS+=" --pahm-model $pahm_model" export PREP_KWDS # NOTE: We need to wait because run jobs depend on perturbation dirs! setup_id=$(sbatch \ - --output "${run_dir}/slurm/slurm-%j.setup.out" \ + --output "${run_dir}/output/slurm-%j.setup.out" \ --wait \ --job-name=prep_$tag \ --parsable \ @@ -154,7 +155,7 @@ SCHISM_SHARED_ENV+=",MODULES=$L_SOLVE_MODULES" spinup_id=$(sbatch \ --nodes $hpc_solver_nnodes --ntasks $hpc_solver_ntasks \ --parsable \ - --output "${run_dir}/slurm/slurm-%j.spinup.out" \ + --output "${run_dir}/output/slurm-%j.spinup.out" \ --job-name=spinup_$tag \ -d afterok:$setup_id \ --export="$SCHISM_SHARED_ENV",SCHISM_EXEC="$spinup_exec" \ @@ -166,7 +167,7 @@ for i in $run_dir/setup/ensemble.dir/runs/*; do jobid=$( sbatch --parsable -d afterok:$spinup_id \ --nodes $hpc_solver_nnodes --ntasks $hpc_solver_ntasks \ - --output "${run_dir}/slurm/slurm-%j.run-$(basename $i).out" \ + --output "${run_dir}/output/slurm-%j.run-$(basename $i).out" \ --job-name="run_$(basename $i)_$tag" \ --export="$SCHISM_SHARED_ENV",SCHISM_EXEC="$hotstart_exec" \ $run_dir/slurm/schism.sbatch "$i" @@ -177,7 +178,7 @@ done # Post processing sbatch \ --parsable \ - --output "${run_dir}/slurm/slurm-%j.post.out" \ + --output "${run_dir}/output/slurm-%j.post.out" \ --job-name=post_$tag \ -d afterok${joblist} \ --export=ALL,IMG="$L_IMG_DIR/prep.sif",ENSEMBLE_DIR="$run_dir/setup/ensemble.dir/" \ From 1dd2b6d7e21b9a7742e2f7e098b8c92cb00e4369 Mon Sep 17 00:00:00 2001 From: SorooshMani-NOAA Date: Mon, 1 Jul 2024 17:24:27 +0000 Subject: [PATCH 51/54] Update importlib metadata to fix esmpy issue --- environment.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/environment.yml b/environment.yml index 882357c..28cf939 100644 --- a/environment.yml +++ b/environment.yml @@ -17,6 +17,7 @@ dependencies: - geopandas>=0.13 - geos - hdf5 + - importlib_metadata<8 # Fix issue with esmpy Author import - matplotlib - mpi4py - netcdf4 From 3298e4fa6ca86a76673c73a9944e9ff3997e1d0a Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Mon, 1 Jul 2024 18:38:25 +0000 Subject: [PATCH 52/54] update stormevent version to 2.2.4 in pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 73a1263..3273d44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,7 @@ dependencies = [ "pytz", "pyyaml", "shapely>=2", - "stormevents==2.2.3", + "stormevents==2.2.4", "rasterio", "requests", "rtree", From 6f6179dc12c24a985cdce7e6b7d92324c8f35616 Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar Date: Tue, 2 Jul 2024 15:49:57 +0000 Subject: [PATCH 53/54] use geopandas==0.14 to avoid AttributeError in analyze_ensemble.py --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3273d44..251bbeb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,8 +36,8 @@ dependencies = [ "ensembleperturbation>=1.1.2", "fiona", "geoalchemy2", -# "geopandas>=0.13", - "geopandas", + "geopandas==0.14", # to address AttributeError. Should be fixed later in EnsemblePerturbation +# "geopandas", "matplotlib", "mpi4py", "netCDF4", From 24963aa2b128f04bf0a8be6066f8ec88e3ce0273 Mon Sep 17 00:00:00 2001 From: Fariborz Daneshvar <132295102+FariborzDaneshvar-NOAA@users.noreply.github.com> Date: Wed, 3 Jul 2024 09:51:53 -0700 Subject: [PATCH 54/54] Enhance/post runtime (#55) * update combine_ensemble.py to only combine elevations * update analyze_ensemble.py to only use one manning's n coefficient * add dask clustering to analyze_ensemble.py * add dask and dask-jobqueue to pyproject.toml --- pyproject.toml | 2 ++ stormworkflow/post/analyze_ensemble.py | 16 +++++++++++++++- stormworkflow/post/combine_ensemble.py | 1 + 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 251bbeb..8a8a1e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,8 @@ dependencies = [ "coupledmodeldriver>=1.6.6", "colored-traceback", "cmocean", + "dask", + "dask-jobqueue", "ensembleperturbation>=1.1.2", "fiona", "geoalchemy2", diff --git a/stormworkflow/post/analyze_ensemble.py b/stormworkflow/post/analyze_ensemble.py index 00a2932..44a78a3 100644 --- a/stormworkflow/post/analyze_ensemble.py +++ b/stormworkflow/post/analyze_ensemble.py @@ -38,6 +38,9 @@ ) from ensembleperturbation.utilities import get_logger +from dask_jobqueue import SLURMCluster +from dask.distributed import Client + LOGGER = get_logger('klpc_wetonly') @@ -51,7 +54,7 @@ def main(args): def analyze(tracks_dir, analyze_dir): - mann_coefs = [0.025, 0.05, 0.1] + mann_coefs = [0.025] #[0.025, 0.05, 0.1] for mann_coef in mann_coefs: _analyze(tracks_dir, analyze_dir, mann_coef) @@ -409,4 +412,15 @@ def cli(): if __name__ == '__main__': + cluster = SLURMCluster(cores=16, + processes=1, + memory="500GB", + account="compute", + walltime="08:00:00", + header_skip=['--mem'], + interface="eth0") + cluster.scale(6) + client = Client(cluster) + print(client) + cli() diff --git a/stormworkflow/post/combine_ensemble.py b/stormworkflow/post/combine_ensemble.py index fffdbd2..107220f 100644 --- a/stormworkflow/post/combine_ensemble.py +++ b/stormworkflow/post/combine_ensemble.py @@ -15,6 +15,7 @@ def main(args): output = combine_results( model='schism', adcirc_like=True, + filenames=['out2d_*.nc'], #only combine elevations. output=ensemble_dir / 'analyze', directory=ensemble_dir, parallel=not args.sequential,