From 8ac992696564547de95655ec250d16dc4bd41782 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Sat, 30 Oct 2021 00:00:43 -0400 Subject: [PATCH 01/65] Replace the `fewshot_description` API with a `description_dict` based interface --- lm_eval/base.py | 18 +++-- lm_eval/evaluator.py | 26 +++--- lm_eval/tasks/anli.py | 4 - lm_eval/tasks/arc.py | 4 - lm_eval/tasks/cbt.py | 4 - lm_eval/tasks/coqa.py | 5 +- lm_eval/tasks/drop.py | 4 - lm_eval/tasks/glue.py | 17 ---- lm_eval/tasks/headqa.py | 4 - lm_eval/tasks/hellaswag.py | 5 -- lm_eval/tasks/hendrycks_math.py | 3 - lm_eval/tasks/lambada.py | 4 - lm_eval/tasks/lambada_cloze.py | 3 - lm_eval/tasks/logiqa.py | 4 - lm_eval/tasks/mathqa.py | 4 - lm_eval/tasks/mc_taco.py | 3 - lm_eval/tasks/mutual.py | 4 - lm_eval/tasks/naturalqs.py | 4 - lm_eval/tasks/openbookqa.py | 4 - lm_eval/tasks/piqa.py | 4 - lm_eval/tasks/prost.py | 8 +- lm_eval/tasks/pubmedqa.py | 5 -- lm_eval/tasks/qa4mre.py | 3 - lm_eval/tasks/quac.py | 5 -- lm_eval/tasks/race.py | 4 - lm_eval/tasks/sat.py | 5 -- lm_eval/tasks/sciq.py | 5 +- lm_eval/tasks/squad.py | 4 - lm_eval/tasks/storycloze.py | 6 -- lm_eval/tasks/superglue.py | 32 -------- lm_eval/tasks/translation.py | 6 -- lm_eval/tasks/triviaqa.py | 5 -- lm_eval/tasks/truthfulqa.py | 8 +- lm_eval/tasks/unscramble.py | 3 - lm_eval/tasks/webqs.py | 7 +- lm_eval/tasks/wikitext.py | 6 +- lm_eval/tasks/winogrande.py | 4 - lm_eval/tasks/wsc273.py | 4 - main.py | 18 +++-- scripts/cost_estimate.py | 2 +- scripts/fewshot_description_experiment.py | 79 ------------------- scripts/get_prompts.py | 1 - scripts/write_out.py | 15 +++- task-guide.md | 54 +++++++++---- .../test_description_option/descriptions.json | 3 + .../test_description_option.py | 39 +++++++++ tests/test_evaluator.py | 4 +- tests/test_version_stable.py | 2 +- 48 files changed, 147 insertions(+), 318 deletions(-) delete mode 100644 scripts/fewshot_description_experiment.py create mode 100644 tests/test_description_option/descriptions.json create mode 100644 tests/test_description_option/test_description_option.py diff --git a/lm_eval/base.py b/lm_eval/base.py index 317c70a67e..000f2f2765 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -2,6 +2,7 @@ import random import numpy as np import re +from lm_eval import tasks from lm_eval.metrics import mean, perplexity, weighted_perplexity, weighted_mean @@ -224,11 +225,15 @@ def higher_is_better(self): pass def fewshot_description(self): + import warnings + warnings.warn( + "`fewshot_description` will be removed in coming versions. Pass " \ + "any custom descriptions to the `evaluate` function instead.", + DeprecationWarning) return "" - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): - raw_description = self.fewshot_description() - description = (raw_description + "\n===\n\n") if provide_description and raw_description else "" + def fewshot_context(self, doc, num_fewshot, rnd, description=None): + description = description + "\n\n" if description else "" if num_fewshot == 0: labeled_examples = "" @@ -295,16 +300,13 @@ class PerplexityTask(Task, abc.ABC): def has_training_docs(self): return False - def fewshot_description(self): - return "" - def fewshot_examples(self, k, rnd): assert k == 0 return [] - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + def fewshot_context(self, doc, num_fewshot, rnd, description=None): assert num_fewshot == 0 - assert not provide_description + assert description is None return "" def higher_is_better(self): diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index f4c7dee84c..63679d854b 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -1,5 +1,6 @@ import collections import itertools +import json import random import lm_eval.metrics import lm_eval.models @@ -7,7 +8,7 @@ import lm_eval.base import numpy as np -def simple_evaluate(model, model_args, task_names, num_fewshot=0, batch_size=None, device=None, no_cache=False, limit=None, bootstrap_iters=100000): +def simple_evaluate(model, model_args, task_names, description_path=None, num_fewshot=0, batch_size=None, device=None, no_cache=False, limit=None, bootstrap_iters=100000): random.seed(1234) np.random.seed(1234) @@ -19,7 +20,12 @@ def simple_evaluate(model, model_args, task_names, num_fewshot=0, batch_size=Non lm = lm_eval.base.CachingLM(lm, 'lm_cache/' + model + '_' + model_args.replace('=', '-').replace(',', '_').replace('/', '-') + '.db') task_dict = lm_eval.tasks.get_task_dict(task_names) - results = evaluate(lm, task_dict, False, num_fewshot, limit) + description_dict = {} + if description_path: + with open(description_path, 'r') as f: + description_dict = json.load(f) + + results = evaluate(lm, task_dict, num_fewshot, limit, description_dict) # add info about the model and few shot config results["config"] = { @@ -28,6 +34,8 @@ def simple_evaluate(model, model_args, task_names, num_fewshot=0, batch_size=Non "num_fewshot": num_fewshot, "batch_size": batch_size, "device": device, + # TODO (jon-tow): Should we add the description info to `results["config"]`? + # "description_dict": description_dict, "no_cache": no_cache, "limit": limit, "bootstrap_iters": bootstrap_iters @@ -36,9 +44,7 @@ def simple_evaluate(model, model_args, task_names, num_fewshot=0, batch_size=Non return results -def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_iters=100000): - assert not provide_description # not implemented. todo: implement proper description-providing system - +def evaluate(lm, task_dict, num_fewshot, limit, description_dict=None, bootstrap_iters=100000): # TODO: completely refactor this entire function to not be a huge mess, ideally breaking it down into smaller pieces task_dict_items = [(name, task) for name, task in task_dict.items() if(task.has_validation_docs() or task.has_test_docs())] @@ -73,16 +79,16 @@ def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_i rnd.seed(42) rnd.shuffle(task_docs) + description = description_dict[task_name] if description_dict and task_name in description_dict else "" + for doc_id, doc in enumerate(itertools.islice(task_docs, 0, limit)): docs[(task_name, doc_id)] = doc - ctx = task.fewshot_context( doc=doc, - provide_description=provide_description, num_fewshot=num_fewshot, - rnd=rnd + rnd=rnd, + description=description ) - reqs = task.construct_requests(doc, ctx) if not isinstance(reqs, (list, tuple)): reqs = [reqs] for i, req in enumerate(reqs): @@ -168,4 +174,4 @@ def make_table(result_dict): # todo: make latex table look good # print(latex_writer.dumps()) - return md_writer.dumps() \ No newline at end of file + return md_writer.dumps() diff --git a/lm_eval/tasks/anli.py b/lm_eval/tasks/anli.py index 1304c5da2b..13c4044560 100644 --- a/lm_eval/tasks/anli.py +++ b/lm_eval/tasks/anli.py @@ -33,10 +33,6 @@ def test_docs(self): if self.has_test_docs(): return self.data["test_r" + str(self.SPLIT)] - def fewshot_description(self): - # TODO: figure out description - return "" - def doc_to_text(self, doc): # OA does this a bit weirdly: they prepend "anli 1: anli 1: " to the beginning # of the prompt (yes, repeating it!). also, " True, False, or Neither?" is directly diff --git a/lm_eval/tasks/arc.py b/lm_eval/tasks/arc.py index a0d13abc59..2a8a999842 100644 --- a/lm_eval/tasks/arc.py +++ b/lm_eval/tasks/arc.py @@ -29,10 +29,6 @@ def _convert_standard(self, doc): } return out_doc - def fewshot_description(self): - # TODO: figure out description - return "" - def doc_to_text(self, doc): return doc["query"] diff --git a/lm_eval/tasks/cbt.py b/lm_eval/tasks/cbt.py index 8837caff6d..e239a630b4 100644 --- a/lm_eval/tasks/cbt.py +++ b/lm_eval/tasks/cbt.py @@ -17,10 +17,6 @@ class CBTBase(HFTask): VERSION = 0 - def fewshot_description(self): - # TODO: Figure out description. - return "" - def detokenize(self, text): text = text.replace(" '", "'") text = text.replace(" \n", "\n") diff --git a/lm_eval/tasks/coqa.py b/lm_eval/tasks/coqa.py index beba53a663..095220bfe6 100644 --- a/lm_eval/tasks/coqa.py +++ b/lm_eval/tasks/coqa.py @@ -36,10 +36,7 @@ def validation_docs(self): def test_docs(self): pass - - def fewshot_description(self): - return "Given a passage and a conversation so far, answer the next question in the conversation." - + def doc_to_text(self, doc): # Given a passage p, the conversation history {q1, a1, . . . qi−1, ai−1} # and a question qi, the task is to predict the answer ai diff --git a/lm_eval/tasks/drop.py b/lm_eval/tasks/drop.py index aaf4201abd..5e1e85529e 100644 --- a/lm_eval/tasks/drop.py +++ b/lm_eval/tasks/drop.py @@ -40,10 +40,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out description - return "" - def _load_docs(self, docs): for doc in docs: for qa in doc["qa_pairs"]: diff --git a/lm_eval/tasks/glue.py b/lm_eval/tasks/glue.py index a9243dafbb..df8194eea6 100644 --- a/lm_eval/tasks/glue.py +++ b/lm_eval/tasks/glue.py @@ -21,10 +21,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO - return "" - def doc_to_text(self, doc): return "{}\nQuestion: Does this sentence make sense?\nAnswer:".format(doc["sentence"]) @@ -69,9 +65,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - return "Indicate if the sentiment of each sentence is positive or negative." - def doc_to_text(self, doc): return "{}\nQuestion: Is this sentence positive or negative?\nAnswer:".format( general_detokenize(doc["sentence"]), @@ -342,9 +335,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - return "Indicate if both sentences mean the same thing." - def doc_to_text(self, doc): return "Sentence 1: {}\nSentence 2: {}\nQuestion: Do both sentences mean the same thing?\nAnswer:".format( general_detokenize(doc["sentence1"]), @@ -395,9 +385,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - return "Indicate if both questions ask the same thing." - def doc_to_text(self, doc): return "Question 1: {}\nQuestion 2: {}\nQuestion: Do both questions ask the same thing?\nAnswer:".format( doc["question1"], @@ -448,10 +435,6 @@ def has_validation_docs(self): def has_test_docs(self): return True - def fewshot_description(self): - return "Indicate if both sentences mean the same thing from a scale of 0-5, " \ - "where 5 means identical and 0 means unrelated." - def doc_to_text(self, doc): return "sentence 1: {}\nsentence 2: {}\nAnswer:".format( doc["sentence1"], diff --git a/lm_eval/tasks/headqa.py b/lm_eval/tasks/headqa.py index 3c66dc064b..c258ff1d07 100644 --- a/lm_eval/tasks/headqa.py +++ b/lm_eval/tasks/headqa.py @@ -25,9 +25,5 @@ def _convert_standard(self, doc): } return out_doc - def fewshot_description(self): - # TODO: figure out description - return "" - def doc_to_text(self, doc): return doc["query"] diff --git a/lm_eval/tasks/hellaswag.py b/lm_eval/tasks/hellaswag.py index 762ce47337..56450cf3e6 100644 --- a/lm_eval/tasks/hellaswag.py +++ b/lm_eval/tasks/hellaswag.py @@ -35,10 +35,5 @@ def _convert_standard(self, doc): } return out_doc - def fewshot_description(self): - return "Label for the relevant action: Sentences describing the " \ - "context, with an incomplete sentence trailing\nanswer that " \ - "plausibly completes the situation." - def doc_to_text(self, doc): return doc["query"] diff --git a/lm_eval/tasks/hendrycks_math.py b/lm_eval/tasks/hendrycks_math.py index 379e727d61..bb864e86a4 100644 --- a/lm_eval/tasks/hendrycks_math.py +++ b/lm_eval/tasks/hendrycks_math.py @@ -55,9 +55,6 @@ def validation_docs(self): def test_docs(self): return self._load_docs(self.DATASET_PATH / "test" / self.get_file_info()) - def fewshot_description(self): - return "Given a mathematics problem, determine the answer. Simplify your answer as much as possible." - def doc_to_text(self, doc): return "Problem: " + doc["problem"] + "\nAnswer:" diff --git a/lm_eval/tasks/lambada.py b/lm_eval/tasks/lambada.py index bcb4ae019c..f9514ff920 100644 --- a/lm_eval/tasks/lambada.py +++ b/lm_eval/tasks/lambada.py @@ -47,10 +47,6 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " + doc['text'].rsplit(' ', 1)[1] - - def fewshot_description(self): - # TODO: figure out description - return "" def construct_requests(self, doc, ctx): ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) diff --git a/lm_eval/tasks/lambada_cloze.py b/lm_eval/tasks/lambada_cloze.py index 90bd4f10ca..dc1d4b168b 100644 --- a/lm_eval/tasks/lambada_cloze.py +++ b/lm_eval/tasks/lambada_cloze.py @@ -13,6 +13,3 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " + doc['text'].rsplit(' ', 1)[1] - - def fewshot_description(self): - return "Fill in blank:\n" diff --git a/lm_eval/tasks/logiqa.py b/lm_eval/tasks/logiqa.py index ac544ca7c5..36b8b99e4f 100644 --- a/lm_eval/tasks/logiqa.py +++ b/lm_eval/tasks/logiqa.py @@ -80,9 +80,5 @@ def validation_docs(self): def test_docs(self): return self._load_docs(self.DATASET_PATH / "Test.txt") - def fewshot_description(self): - # TODO: figure out actual description - return "" - def doc_to_text(self, doc): return doc["query"] diff --git a/lm_eval/tasks/mathqa.py b/lm_eval/tasks/mathqa.py index 84e5ab9eca..a02a5b59bb 100644 --- a/lm_eval/tasks/mathqa.py +++ b/lm_eval/tasks/mathqa.py @@ -29,9 +29,5 @@ def _convert_standard(self, doc): } return out_doc - def fewshot_description(self): - # TODO: figure out description - return "" - def doc_to_text(self, doc): return doc["query"] diff --git a/lm_eval/tasks/mc_taco.py b/lm_eval/tasks/mc_taco.py index c9b2dd91fc..64a36a01f7 100644 --- a/lm_eval/tasks/mc_taco.py +++ b/lm_eval/tasks/mc_taco.py @@ -39,9 +39,6 @@ def has_validation_docs(self): def has_test_docs(self): return True - def fewshot_description(self): - return "Determine whether the candidate answer is plausible (\"yes\") or not (\"no\")" - def doc_to_text(self, doc): return f"{doc['sentence']}\nQuestion: {doc['question']}\n"\ f"Answer: {doc['answer']}\nPlausible:" diff --git a/lm_eval/tasks/mutual.py b/lm_eval/tasks/mutual.py index 17274a46fd..5fe6ffcdb9 100644 --- a/lm_eval/tasks/mutual.py +++ b/lm_eval/tasks/mutual.py @@ -70,10 +70,6 @@ def validation_docs(self): def test_docs(self): return NotImplemented - def fewshot_description(self): - # TODO: figure out fewshot description - return "" - def doc_to_text(self, doc): return self.detokenize(doc["article"]) diff --git a/lm_eval/tasks/naturalqs.py b/lm_eval/tasks/naturalqs.py index f31875240f..e7a381dcd4 100644 --- a/lm_eval/tasks/naturalqs.py +++ b/lm_eval/tasks/naturalqs.py @@ -21,10 +21,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out description - return "" - def training_docs(self): # Cache training for faster few-shot. # Data is too large to fit in memory. diff --git a/lm_eval/tasks/openbookqa.py b/lm_eval/tasks/openbookqa.py index 40fc7a026b..5f87d8a8ec 100644 --- a/lm_eval/tasks/openbookqa.py +++ b/lm_eval/tasks/openbookqa.py @@ -25,9 +25,5 @@ def _convert_standard(self, doc): } return out_doc - def fewshot_description(self): - # TODO: figure out fewshot description - return "" - def doc_to_text(self, doc): return doc["query"] diff --git a/lm_eval/tasks/piqa.py b/lm_eval/tasks/piqa.py index 8b43d1af03..bdf3ec35dc 100644 --- a/lm_eval/tasks/piqa.py +++ b/lm_eval/tasks/piqa.py @@ -18,10 +18,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out fewshot description - return "" - def _convert_standard(self, doc): out_doc = { "goal": doc["goal"], diff --git a/lm_eval/tasks/prost.py b/lm_eval/tasks/prost.py index 1a634d17c8..f905de7266 100644 --- a/lm_eval/tasks/prost.py +++ b/lm_eval/tasks/prost.py @@ -36,13 +36,9 @@ def has_validation_docs(self): def has_test_docs(self): return True - def fewshot_description(self): - # TODO: figure out fewshot description - return "" - - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + def fewshot_context(self, doc, num_fewshot, rnd, description=None): assert num_fewshot == 0, 'PROST is designed to probe models in a zero-shot fashion only.' - return super().fewshot_context(doc, num_fewshot, provide_description, rnd) + return super().fewshot_context(doc, num_fewshot, rnd, description) def _convert_standard(self, doc): out_doc = { diff --git a/lm_eval/tasks/pubmedqa.py b/lm_eval/tasks/pubmedqa.py index 14335a5de0..c597064f0c 100644 --- a/lm_eval/tasks/pubmedqa.py +++ b/lm_eval/tasks/pubmedqa.py @@ -23,11 +23,6 @@ def test_docs(self): # HF is labelled as train but its really just for testing return self.data["train"] - def fewshot_description(self): - # Average ctx length in labelled dataset is 238.9 - # 2 few-shot exmamples pushes it beyond context window - return "" - def doc_to_text(self, doc): ctxs = "\n".join(doc["context"]["contexts"]) return "Abstract: {}\nQuestion: {}\nAnswer:".format( diff --git a/lm_eval/tasks/qa4mre.py b/lm_eval/tasks/qa4mre.py index 42e1b98c51..448710317e 100644 --- a/lm_eval/tasks/qa4mre.py +++ b/lm_eval/tasks/qa4mre.py @@ -67,9 +67,6 @@ def load_docs(self, textfilename, tfds=False): out_doc['source'] = src yield out_doc - def fewshot_description(self): - return "" - def test_docs(self): return self.load_docs(f"data/qa4mre/QA4MRE-{self.YEAR}-EN_GS.xml") diff --git a/lm_eval/tasks/quac.py b/lm_eval/tasks/quac.py index bb02b1c4e3..c7ce752233 100644 --- a/lm_eval/tasks/quac.py +++ b/lm_eval/tasks/quac.py @@ -51,11 +51,6 @@ def validation_docs(self): def test_docs(self): raise NotImplementedError("QuAC has no test docs.") - def fewshot_description(self): - # TODO: figure out fewshot description - desc = "TITLE: Title of the context passage - subtitle of the passage\nPARAGRAPH: Passage describing the relevant information for answering questions.\n\nQ: Text of a question.\n\nA: Answer to the question, based on the passage. If it cannot be answered based on the passage, write CANNOTANSWER" - return desc - def load_doc(self, myjson): docs = [] for item in myjson: diff --git a/lm_eval/tasks/race.py b/lm_eval/tasks/race.py index d379884669..7145fa6f14 100644 --- a/lm_eval/tasks/race.py +++ b/lm_eval/tasks/race.py @@ -63,10 +63,6 @@ def validation_docs(self): def test_docs(self): return self._collate_data("test") - def fewshot_description(self): - # TODO: figure out description - return "" - @classmethod def get_answer_option(cls, problem): answer = cls.letter_to_num[problem['answer']] diff --git a/lm_eval/tasks/sat.py b/lm_eval/tasks/sat.py index e4411edfd8..d75d7923b5 100644 --- a/lm_eval/tasks/sat.py +++ b/lm_eval/tasks/sat.py @@ -61,10 +61,5 @@ def validation_docs(self): } yield doc - - def fewshot_description(self): - # TODO: figure out actual description - return "" - def doc_to_text(self, doc): return "{} is to {} as".format(*doc['query']) diff --git a/lm_eval/tasks/sciq.py b/lm_eval/tasks/sciq.py index 8993251d78..7e24f03ff6 100644 --- a/lm_eval/tasks/sciq.py +++ b/lm_eval/tasks/sciq.py @@ -50,9 +50,6 @@ def load_docs(self, textfilename): for record in docs: yield self._convert_standard(record) - def fewshot_description(self): - return "" - def training_docs(self): return self.load_docs("data/sciq/SciQ dataset-2 3/train.json") @@ -63,4 +60,4 @@ def test_docs(self): return self.load_docs("data/sciq/SciQ dataset-2 3/test.json") def doc_to_text(self, doc): - return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"]).strip() \ No newline at end of file + return "{}\nQuestion: {}\nAnswer:".format(doc["source"], doc["query"]).strip() diff --git a/lm_eval/tasks/squad.py b/lm_eval/tasks/squad.py index 72e1a19b0e..2a69a67c7b 100644 --- a/lm_eval/tasks/squad.py +++ b/lm_eval/tasks/squad.py @@ -41,10 +41,6 @@ def training_docs(self): def validation_docs(self): return self.data["validation"] - def fewshot_description(self): - # TODO: figure out description - return "" - def doc_to_text(self, doc): return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:' diff --git a/lm_eval/tasks/storycloze.py b/lm_eval/tasks/storycloze.py index 3e178facb4..2cc16cf66d 100644 --- a/lm_eval/tasks/storycloze.py +++ b/lm_eval/tasks/storycloze.py @@ -27,18 +27,12 @@ def load_doc(self, filename): filereader = csv.reader(file) return list(filereader) - def validation_docs(self): return self.load_doc("data/storycloze/cloze_test_val__winter2018-cloze_test_ALL_val - 1 - 1.csv") def test_docs(self): return self.load_doc("data/storycloze/cloze_test_test__winter2018-cloze_test_ALL_test - 1.csv") - - def fewshot_description(self): - # TODO: figure out fewshot description - return "" - def doc_to_text(self, doc): return ' '.join([*doc[1:5]]) diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index 33598f2301..c6d0bcfa2c 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -26,10 +26,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out actual description - return "Read the following passages and answer each question with a yes or a no." - def doc_to_text(self, doc): return f"{doc['passage']}\nQuestion: {doc['question']}\nAnswer:" @@ -78,11 +74,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out actual description - return "Given a premise and a hypothesis, classify whether the author of the premise is committed" \ - "to the truth of the hypothesis. The three possible labels are true, false or neither." - def doc_to_text(self, doc): return "{}\nQuestion: {}. True, False or Neither?\nAnswer:".format( doc["premise"], @@ -150,11 +141,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out actual description - return "Given a premise and one alternative with a causal relation to the premise and another without," \ - "choose the more plausible alternative" - def doc_to_text(self, doc): # Drop the period connector = { @@ -215,10 +201,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out actual description - return "READING COMPREHENSION ANSWER KEY" - def doc_to_text(self, doc): return f"{doc['paragraph']}\nQuestion: {doc['question']}\nAnswer:" @@ -270,10 +252,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out actual description - return "" - def training_docs(self): # In ReCoRD, each doc manifests multiple "examples" in the context of few shot example packing. # Each doc consists of multiple answer candidates, each of which is scored yes/no. @@ -363,10 +341,6 @@ def has_validation_docs(self): def has_test_docs(self): return False - def fewshot_description(self): - # TODO: figure out actual description - return "" - def doc_to_text(self, doc): return "Sentence 1: {}\nSentence 2: {}\nQuestion: Is the word '{}' used in the same way in the" \ " two sentences above?\nAnswer:".format( @@ -432,12 +406,6 @@ def training_docs(self): ] return self._training_docs - def fewshot_description(self): - return "Final Exam with Answer Key\n" \ - "Instructions: Please carefully read the following passages. " \ - "For each passage, you must identify which noun the pronoun marked in *bold*" \ - " refers to.\n=====" - def doc_to_text(self, doc): raw_passage = doc["text"] # NOTE: HuggingFace span indices are word-based not character-based. diff --git a/lm_eval/tasks/translation.py b/lm_eval/tasks/translation.py index de02946334..2e70b03a84 100644 --- a/lm_eval/tasks/translation.py +++ b/lm_eval/tasks/translation.py @@ -166,12 +166,6 @@ def higher_is_better(self): "ter": False, } - def fewshot_description(self): - language_codes = self.sacrebleu_language_pair.split("-") - src_lang = code_to_language(language_codes[0]) - tar_lang = code_to_language(language_codes[1]) - return f"Translate these {src_lang} phrases to {tar_lang}." - def __str__(self): language_codes = self.sacrebleu_language_pair.split("-") src_lang = code_to_language(language_codes[0]) diff --git a/lm_eval/tasks/triviaqa.py b/lm_eval/tasks/triviaqa.py index e61a40bdde..0579cd4f5f 100644 --- a/lm_eval/tasks/triviaqa.py +++ b/lm_eval/tasks/triviaqa.py @@ -36,10 +36,6 @@ def validation_docs(self): def test_docs(self): raise NotImplementedError() - def fewshot_description(self): - # TODO: figure out fewshot description - return "" - def doc_to_text(self, doc): return f"Question: {doc['Question']}\nAnswer:" @@ -56,7 +52,6 @@ def _remove_prefixes(self, aliases): ret.append(alias) return ret - def construct_requests(self, doc, ctx): ret = [] diff --git a/lm_eval/tasks/truthfulqa.py b/lm_eval/tasks/truthfulqa.py index e3c621c4e2..7380c65555 100644 --- a/lm_eval/tasks/truthfulqa.py +++ b/lm_eval/tasks/truthfulqa.py @@ -85,9 +85,9 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + def fewshot_context(self, doc, num_fewshot, rnd, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, provide_description, rnd) + return super().fewshot_context(doc, num_fewshot, rnd, description) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of @@ -213,9 +213,9 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + def fewshot_context(self, doc, num_fewshot, rnd, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, provide_description, rnd) + return super().fewshot_context(doc, num_fewshot, rnd, description) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of diff --git a/lm_eval/tasks/unscramble.py b/lm_eval/tasks/unscramble.py index dc742a2cee..5e90dd358c 100644 --- a/lm_eval/tasks/unscramble.py +++ b/lm_eval/tasks/unscramble.py @@ -45,9 +45,6 @@ def validation_docs(self): file = self.BASE_PATH / self.FILENAME return (json.loads(line) for line in open(file).read().splitlines()) - def fewshot_description(self): - return "Please unscramble the letters into a word, and write that word:" - def doc_to_text(self, doc): return doc["context"] diff --git a/lm_eval/tasks/webqs.py b/lm_eval/tasks/webqs.py index 51ed016758..ebab7c8968 100644 --- a/lm_eval/tasks/webqs.py +++ b/lm_eval/tasks/webqs.py @@ -17,10 +17,6 @@ def has_validation_docs(self): def has_test_docs(self): return True - def fewshot_description(self): - # TODO: figure out description - return "" - def doc_to_text(self, doc): return "Question: " + doc['question'] + '\nAnswer:' @@ -40,7 +36,6 @@ def _remove_prefixes(self, aliases): ret.append(alias) return ret - def construct_requests(self, doc, ctx): ret = [] @@ -62,4 +57,4 @@ def aggregation(self): def higher_is_better(self): return { "acc": True - } \ No newline at end of file + } diff --git a/lm_eval/tasks/wikitext.py b/lm_eval/tasks/wikitext.py index 24f9ec3507..3ef0b4b6ed 100644 --- a/lm_eval/tasks/wikitext.py +++ b/lm_eval/tasks/wikitext.py @@ -49,10 +49,6 @@ def download(self): download_file("https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip", "data/wikitext/wikitext-2-raw-v1.zip", "ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11") sh("cd data/wikitext/ && unzip wikitext-2-raw-v1.zip") - def fewshot_description(self): - # TODO: figure out fewshot description - return "" - def has_validation_docs(self): return True @@ -87,4 +83,4 @@ def doc_to_target(self, doc): def count_words(self, doc): # count number of words in *original doc before detokenization* - return len(re.split(r"\s+", doc)) \ No newline at end of file + return len(re.split(r"\s+", doc)) diff --git a/lm_eval/tasks/winogrande.py b/lm_eval/tasks/winogrande.py index 106c826a69..2e188d7b30 100644 --- a/lm_eval/tasks/winogrande.py +++ b/lm_eval/tasks/winogrande.py @@ -29,10 +29,6 @@ def has_test_docs(self): def doc_to_text(self, doc): return self.partial_context(doc, doc["option" + doc["answer"]]) - def fewshot_description(self): - # TODO: redo description - return "Winograd schema sentence including a either a ___ blank with a missing word, making the pronoun ambiguous, or the same with the word filled in." - @classmethod def partial_context(cls, doc, option): # Substitute the pronoun in the sentence with the specified option diff --git a/lm_eval/tasks/wsc273.py b/lm_eval/tasks/wsc273.py index 20dd5175b6..505557b15c 100644 --- a/lm_eval/tasks/wsc273.py +++ b/lm_eval/tasks/wsc273.py @@ -53,10 +53,6 @@ def has_validation_docs(self): def has_test_docs(self): return True - def fewshot_description(self): - # TODO: redo description - return "Winograd schema sentence with correct continuation. True. Winograd schema sentence with incorrect continuation. False." - def fewshot_examples(self, k, rnd): # NOTE: `super().fewshot_examples` samples from training docs which are # not available for this test-set-only dataset. diff --git a/main.py b/main.py index 7a973e197b..efdf29ba2c 100644 --- a/main.py +++ b/main.py @@ -13,7 +13,7 @@ def parse_args(): parser.add_argument('--model', required=True) parser.add_argument('--model_args', default="") parser.add_argument('--tasks', default="all_tasks") - parser.add_argument('--provide_description', action="store_true") + parser.add_argument('--description_path', default=None) parser.add_argument('--num_fewshot', type=int, default=0) parser.add_argument('--batch_size', type=int, default=None) parser.add_argument('--device', type=str, default=None) @@ -26,8 +26,6 @@ def main(): args = parse_args() - assert not args.provide_description # not implemented - if args.limit: print("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.") @@ -36,7 +34,17 @@ def main(): else: task_names = args.tasks.split(",") - results = evaluator.simple_evaluate(args.model, args.model_args, task_names, args.num_fewshot, args.batch_size, args.device, args.no_cache, args.limit) + results = evaluator.simple_evaluate( + args.model, + args.model_args, + task_names, + args.description_path, + args.num_fewshot, + args.batch_size, + args.device, + args.no_cache, + args.limit + ) dumped = json.dumps(results, indent=2) @@ -46,7 +54,7 @@ def main(): with open(args.output_path, "w") as f: f.write(dumped) - print(f"{args.model} ({args.model_args}), limit: {args.limit}, provide_description: {args.provide_description}, num_fewshot: {args.num_fewshot}, batch_size: {args.batch_size}") + print(f"{args.model} ({args.model_args}), limit: {args.limit}, num_fewshot: {args.num_fewshot}, batch_size: {args.batch_size}") print(evaluator.make_table(results)) if __name__ == "__main__": diff --git a/scripts/cost_estimate.py b/scripts/cost_estimate.py index 4339b8dbd2..5f15079315 100644 --- a/scripts/cost_estimate.py +++ b/scripts/cost_estimate.py @@ -51,7 +51,7 @@ def main(): values = [] for taskname in task_list.split(","): lm.tokencost = 0 - evaluator.evaluate(lm, {taskname: tasks.get_task(taskname)()}, False, 0, None, bootstrap_iters=10) + evaluator.evaluate(lm, {taskname: tasks.get_task(taskname)()}, 0, None, bootstrap_iters=10) print(taskname, lm.tokencost) values.append([taskname, lm.tokencost, lm.tokencost / 1000 * 0.0008, lm.tokencost / 1000 * 0.0012, lm.tokencost / 1000 * 0.006, lm.tokencost / 1000 * 0.06]) diff --git a/scripts/fewshot_description_experiment.py b/scripts/fewshot_description_experiment.py deleted file mode 100644 index e6ad97b340..0000000000 --- a/scripts/fewshot_description_experiment.py +++ /dev/null @@ -1,79 +0,0 @@ -import json -import numpy as np -import random -import logging -from lm_eval import models, tasks, evaluator, base - -logging.getLogger("openai").setLevel(logging.WARNING) - - -fewshot_descriptions = [ - "foo", - "bar" -] - -task = "lambada" -num_fewshot = 0 -model = "gpt2" -model_args = "" -limit = None -no_cache = False - - -class CustomDescTask: - def __init__(self, task, desc): - self.task = task - self.desc = desc - - def fewshot_description(): - return self.desc - - self.task.fewshot_description = fewshot_description - - def __getattr__(self, attr): - return getattr(self.task, attr) - - -def main(): - random.seed(42) - np.random.seed(42) - - lm = models.get_model(model).create_from_arg_string(model_args) - - if limit: - print("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.") - - if not no_cache: - lm = base.CachingLM(lm, 'lm_cache/' + model + '_' + model_args.replace('=', '-').replace(',', '_') + '.db') - - task_dict = tasks.get_task_dict([task]) - - for desc in fewshot_descriptions: - custom_task_dict = {k: CustomDescTask(v, desc) for k, v in task_dict.items()} - - results = evaluator.evaluate(lm, custom_task_dict, True, num_fewshot, limit) - - dumped = json.dumps(results, indent=2) - - print('Description:', desc) - print(dumped) - - # MAKE TABLE - from pytablewriter import MarkdownTableWriter - - writer = MarkdownTableWriter() - writer.headers = ["Task", "Metric", "Value"] - - values = [] - - for k, dic in results.items(): - for m, v in dic.items(): - values.append([k, m, '%.4f' % v]) - k = "" - writer.value_matrix = values - - print(writer.dumps()) - - -if __name__ == "__main__": - main() diff --git a/scripts/get_prompts.py b/scripts/get_prompts.py index cdda6dc43e..56a9ff79f4 100644 --- a/scripts/get_prompts.py +++ b/scripts/get_prompts.py @@ -9,7 +9,6 @@ print('#', tname) docs = islice(task.validation_docs() if task.has_validation_docs() else task.test_docs(), ct) print() - print('**Zero-Shot Prompt**:', "\n```\n" + task.fewshot_description() + "\n```\n") for i in range(ct): print() doc = next(docs) diff --git a/scripts/write_out.py b/scripts/write_out.py index b7eb30c15a..bd63168acf 100644 --- a/scripts/write_out.py +++ b/scripts/write_out.py @@ -1,5 +1,6 @@ import argparse import numpy as np +import json import os import random from lm_eval import tasks @@ -12,7 +13,7 @@ def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--output_base_path', required=True) parser.add_argument('--tasks', default="all_tasks") - parser.add_argument('--provide_description', action="store_true") + parser.add_argument('--description_path', default=None) parser.add_argument('--sets', type=str, default="val") # example: val,test parser.add_argument('--num_fewshot', type=int, default=1) parser.add_argument('--seed', type=int, default=42) @@ -29,6 +30,12 @@ def main(): else: task_names = args.tasks.split(",") task_dict = tasks.get_task_dict(task_names) + + description_dict = {} + if args.description_path: + with open(args.description_path, 'r') as f: + description_dict = json.load(f) + os.makedirs(args.output_base_path, exist_ok=True) for task_name, task in task_dict.items(): rnd = random.Random() @@ -47,14 +54,16 @@ def main(): docs = join_iters(iters) + description = description_dict[task_name] if description_dict and task_name in description_dict else "" + with open(os.path.join(args.output_base_path, task_name), "w") as f: for i, doc in zip(range(args.num_examples), docs) if args.num_examples > 0 else enumerate(docs): f.write(EXAMPLE_DIVIDER.format(i=i)) ctx = task.fewshot_context( doc=doc, - provide_description=args.provide_description, num_fewshot=args.num_fewshot, - rnd=rnd + rnd=rnd, + description=description ) f.write(ctx + "\n") diff --git a/task-guide.md b/task-guide.md index 5ea43fc2f4..7bee0ab2c2 100644 --- a/task-guide.md +++ b/task-guide.md @@ -87,8 +87,7 @@ There are 2 standard approaches we follow for downloading data: ``` These methods return `True`/`False` whether or not your task dataset provides documents for each split type. __Note__: if the test set doesn't have publicly available labels, please do not put it down as having a test set. - Lastly, we need to load the documents. In our terminology, a document (`doc`) is a single natural language data example stored in a Python `dict`. E.g.: - `{“question”: “What is the capital of France?”, “answer”: “Paris”}`. Override the following methods to load your data splits from their storage location in `DATASET_PATH`: + Lastly, we need to load the documents. In our terminology, a document (`doc`) is a single natural language data example stored in a Python `dict`. E.g.: `{“question”: “What is the capital of France?”, “answer”: “Paris”}`. Override the following methods to load your data splits from their storage location in `DATASET_PATH`: ```python def training_docs(self): return #... @@ -117,7 +116,7 @@ class TaskName(..., MultipleChoiceTask): This will require you to format your documents such that they contain `gold` and `choices` fields. They can also have other fields, but those will be ignored by `MultipleChoiceTask`. `choices` should be a list of possible continuations, and `gold` should be an integer specifying the index of the correct completion. -See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/105fa9741ff660f6a62c2eef0d2facfde36dda41/lm_eval/tasks/sat.py#L56) for an example. When used in combination with `HFTask`, it may be useful to override [`_convert_standard`](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/common.py#L28), which will be applied to every document in the HF dataset. See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/headqa.py) for an example of this. +See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/105fa9741ff660f6a62c2eef0d2facfde36dda41/lm_eval/tasks/sat.py#L56) for an example. When used in combination with `HFTask`, it may be useful to override [`_convert_standard`](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/common.py#L28), which will be applied to every document in the HF dataset. See task](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/headqa.py) for an example of this. You can now skip ahead to registering your task. @@ -125,17 +124,9 @@ You can now skip ahead to registering your task
+In the case your task is _not_ multiple-choice, override the following methods for your task class: -In the case your task is not multiple-choice, override the following methods for your task class: - -Put the natural language task description as a single line (no `\n`s) string here. E.g. `"Translate English to French:"` - -```python -def fewshot_description(self): - return "" -``` - -Format your document into a single query prompt __without the answer__ here. This method takes a single `doc` example (in dictionary form) . You should concatenate its members into a nicely formatted prompt. +Format your document into a single query prompt __without the answer__ here. This method takes a single `doc` example of type `dict` with `str` key-value members. You should concatenate these `doc` item values together into a neatly formatted prompt. ```python def doc_to_text(self, doc): @@ -151,6 +142,41 @@ def doc_to_target(self, doc): Understand that the strings from `doc_to_text` and `doc_to_target` will be concatenated together to build up labeled examples in the k-shot setting where k > 0. Design with that in mind 👍. +### Formatting Prompts + +If you'd like to prepend your few-shot examples with a natural language description or provide a lone custom prompt under a zero-shot setting, you can do this on a per-task basis via the `description_dict` arg of `evaluator.evaluate` which is accessible through the `evaluator` module. This `description_dict` must adhere to the following key-value structure: + +- **key**: the task name as specified in the lm-eval-harness task registry (see the following section on task registry). +- **value**: the corresponding description/prompt for the task identified by **key**. + +E.g. + +```python +description_dict = { + "task_name_1": "task_name_1 custom prompt or few-shot task description", + "task_name_2": "task_name_2 custom prompt or few-shot task description", + ... +} +``` + +At a higher level, one can interface with `evaluator.evaluate` by simply passing a JSON file path to the `description_path` arg of the command-line interface program, `main.py`. The JSON file pointed to should be structured the same way as the aforementioned `description_dict`. E.g. for some file at `/your/path/descriptions.json` you might have: + +```json +{ + "cycle_letters": "Please unscramble the letters into a word, and write that word:", + "copa": "Given a premise and one alternative with a causal relation to the premise and another without, choose the more plausible alternative" +} +``` + +which can then be hooked up to the evaluator through the `main.py` CLI as: + +```python +python main.py \ +--tasks cycle_letters,copa \ +--description_path /your/path/descriptions.json \ +... +``` + ### Registering Your Task Now's a good time to register your task to expose it for usage. All you'll need to do is import your task module in `lm_eval/tasks/__init__.py` and provide an entry in the `TASK_REGISTRY` dictionary with the key as the name of your benchmark task (in the form it'll be referred to in the command line) and the value as the task class. See how it's done for other tasks in the [file](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/__init__.py). @@ -161,7 +187,7 @@ After registering your task, you can now check on your data downloading and veri ```bash python -m scripts.write_out \ - --task \ + --tasks \ --output_base_path \ --sets \ --num_fewshot K \ diff --git a/tests/test_description_option/descriptions.json b/tests/test_description_option/descriptions.json new file mode 100644 index 0000000000..6bace5dac5 --- /dev/null +++ b/tests/test_description_option/descriptions.json @@ -0,0 +1,3 @@ +{ + "hellaswag": "Label for the relevant action:\nSentences describing context, with an incomplete sentence trailing answer that plausibly completes the situation." +} diff --git a/tests/test_description_option/test_description_option.py b/tests/test_description_option/test_description_option.py new file mode 100644 index 0000000000..fd6b7b16f9 --- /dev/null +++ b/tests/test_description_option/test_description_option.py @@ -0,0 +1,39 @@ +import json +import argparse +import lm_eval.tasks +import lm_eval.models +from lm_eval.evaluator import evaluate + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--description_path', default=None) + parser.add_argument('--num_fewshot', type=int, default=0) + parser.add_argument('--limit', type=int, default=None) + return parser.parse_args() + + +def main(): + args = parse_args() + + task_names = ['hellaswag', 'copa'] + task_dict = lm_eval.tasks.get_task_dict(task_names) + lm = lm_eval.models.get_model('dummy')() + + description_dict = {} + if args.description_path: + with open(args.description_path, 'r') as f: + description_dict = json.load(f) + + num_fewshot = args.num_fewshot + results = evaluate( + lm, + task_dict, + num_fewshot, + args.limit, + description_dict + ) + + +if __name__ == '__main__': + main() diff --git a/tests/test_evaluator.py b/tests/test_evaluator.py index 8b6ba9c2e3..0936d77a0a 100644 --- a/tests/test_evaluator.py +++ b/tests/test_evaluator.py @@ -47,8 +47,8 @@ def ll_perp_fn(reqs): lm.loglikelihood_rolling = ll_perp_fn limit = 10 - e1 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10) - e2 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10) + e1 = evaluator.evaluate(lm, task_dict, 0, limit, description_dict=None, bootstrap_iters=10) + e2 = evaluator.evaluate(lm, task_dict, 0, limit, description_dict=None, bootstrap_iters=10) # check taht caching is working assert e1 == e2 diff --git a/tests/test_version_stable.py b/tests/test_version_stable.py index 12933d61c8..d1e7a46f1a 100644 --- a/tests/test_version_stable.py +++ b/tests/test_version_stable.py @@ -81,5 +81,5 @@ def greedy_until(reqs): lm.greedy_until = greedy_until limit = None - res = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10) + res = evaluator.evaluate(lm, task_dict, 0, limit, description_dict=None, bootstrap_iters=10) assert_target(f"{taskname}-v{Task.VERSION}-res", res) From 1d04c42d2c9b92094c0fd6b7497120ce76984590 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Wed, 15 Dec 2021 18:18:55 -0500 Subject: [PATCH 02/65] Merge --- lm_eval/base.py | 17 +++++++++++++---- lm_eval/evaluator.py | 10 +++++----- main.py | 3 ++- scripts/write_out.py | 8 +++++--- 4 files changed, 25 insertions(+), 13 deletions(-) diff --git a/lm_eval/base.py b/lm_eval/base.py index 0ad023335b..e9661164d0 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -452,12 +452,17 @@ def higher_is_better(self): def fewshot_description(self): import warnings warnings.warn( - "`fewshot_description` will be removed in coming versions. Pass " \ + "`fewshot_description` will be removed in futures versions. Pass " \ "any custom descriptions to the `evaluate` function instead.", DeprecationWarning) return "" - def fewshot_context(self, doc, num_fewshot, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): + assert not provide_description, ( + "The `provide_description` arg will be removed in future versions. To provide " + "custom descriptions on a per-task basis, supply the `description_dict` " + "arg with your task-to-description dictionary." + ) description = description + "\n\n" if description else "" if num_fewshot == 0: @@ -531,9 +536,13 @@ def fewshot_examples(self, k, rnd): assert k == 0 return [] - def fewshot_context(self, doc, num_fewshot, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0 - assert description is None + assert not provide_description, ( + "The `provide_description` arg will be removed in future versions. To provide " + "custom descriptions on a per-task basis, supply the `description_dict` " + "arg with your task-to-description dictionary." + ) return "" def higher_is_better(self): diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index c84e4528f7..dc2dae4e05 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -48,12 +48,13 @@ def simple_evaluate(model, model_args, task_names, ) task_dict = lm_eval.tasks.get_task_dict(task_names) + description_dict = {} - if description_path: - with open(description_path, 'r') as f: + if description_dict_path: + with open(description_dict_path, 'r') as f: description_dict = json.load(f) - results = evaluate(lm, task_dict, num_fewshot, limit, description_dict) + results = evaluate(lm, task_dict, False, num_fewshot, limit, description_dict=description_dict) # add info about the model and few shot config results["config"] = { @@ -62,8 +63,6 @@ def simple_evaluate(model, model_args, task_names, "num_fewshot": num_fewshot, "batch_size": batch_size, "device": device, - # TODO (jon-tow): Should we add the description info to `results["config"]`? - # "description_dict": description_dict, "no_cache": no_cache, "limit": limit, "bootstrap_iters": bootstrap_iters @@ -140,6 +139,7 @@ def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_i ctx = task.fewshot_context( doc=doc, num_fewshot=num_fewshot, + provide_description=provide_description, rnd=rnd, description=description ) diff --git a/main.py b/main.py index 1466e6a26a..e55015b4a2 100644 --- a/main.py +++ b/main.py @@ -12,13 +12,14 @@ def parse_args(): parser.add_argument('--model', required=True) parser.add_argument('--model_args', default="") parser.add_argument('--tasks', default="all_tasks") - parser.add_argument('--description_path', default=None) + parser.add_argument('--provide_description', action="store_true") parser.add_argument('--num_fewshot', type=int, default=0) parser.add_argument('--batch_size', type=int, default=None) parser.add_argument('--device', type=str, default=None) parser.add_argument('--output_path', default=None) parser.add_argument('--limit', type=int, default=None) parser.add_argument('--no_cache', action="store_true") + parser.add_argument('--description_dict_path', default=None) return parser.parse_args() diff --git a/scripts/write_out.py b/scripts/write_out.py index bd63168acf..b39fd64541 100644 --- a/scripts/write_out.py +++ b/scripts/write_out.py @@ -13,11 +13,12 @@ def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--output_base_path', required=True) parser.add_argument('--tasks', default="all_tasks") - parser.add_argument('--description_path', default=None) + parser.add_argument('--provide_description', action="store_true") parser.add_argument('--sets', type=str, default="val") # example: val,test parser.add_argument('--num_fewshot', type=int, default=1) parser.add_argument('--seed', type=int, default=42) parser.add_argument('--num_examples', type=int, default=1) + parser.add_argument('--description_dict_path', default=None) return parser.parse_args() @@ -32,8 +33,8 @@ def main(): task_dict = tasks.get_task_dict(task_names) description_dict = {} - if args.description_path: - with open(args.description_path, 'r') as f: + if args.description_dict_path: + with open(args.description_dict_path, 'r') as f: description_dict = json.load(f) os.makedirs(args.output_base_path, exist_ok=True) @@ -62,6 +63,7 @@ def main(): ctx = task.fewshot_context( doc=doc, num_fewshot=args.num_fewshot, + provide_description=args.provide_description, rnd=rnd, description=description ) From ee53be219bf3f1ef485a725db6b7047b09d0eb63 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Wed, 15 Dec 2021 18:26:19 -0500 Subject: [PATCH 03/65] Add `provide_description` arg for backward compat --- lm_eval/evaluator.py | 9 ++++++-- lm_eval/tasks/hendrycks_ethics.py | 6 ----- lm_eval/tasks/hendrycks_test.py | 4 ---- lm_eval/tasks/prost.py | 4 ++-- lm_eval/tasks/truthfulqa.py | 8 +++---- scripts/cost_estimate.py | 2 +- task-guide.md | 15 ++++++------ .../descriptions.json | 0 .../test_description_dict.py} | 23 +++++++++---------- tests/test_evaluator.py | 4 ++-- 10 files changed, 35 insertions(+), 40 deletions(-) rename tests/{test_description_option => test_description_dict}/descriptions.json (100%) rename tests/{test_description_option/test_description_option.py => test_description_dict/test_description_dict.py} (52%) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index dc2dae4e05..4c224edef3 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -11,7 +11,8 @@ def simple_evaluate(model, model_args, task_names, num_fewshot=0, batch_size=None, device=None, - no_cache=False, limit=None, bootstrap_iters=100000): + no_cache=False, limit=None, bootstrap_iters=100000, + description_dict_path=None): """Instantiate and evaluate a model on a list of tasks. :param model: str @@ -32,6 +33,8 @@ def simple_evaluate(model, model_args, task_names, Limit the number of examples per task (only use this for testing) :param bootstrap_iters: Number of iterations for bootstrap statistics + :param description_dict_path: + Path to a JSON file containing `task_name: description` key-values for custom prompts :return Dictionary of results """ @@ -71,7 +74,7 @@ def simple_evaluate(model, model_args, task_names, return results -def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_iters=100000): +def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_iters=100000, description_dict=None): """Instantiate and evaluate a model on a list of tasks. :param lm: obj @@ -86,6 +89,8 @@ def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_i Limit the number of examples per task (only use this for testing) :param bootstrap_iters: Number of iterations for bootstrap statistics + :param description_dict: + Dictionary of task descriptions of the form: `task_name: description` :return Dictionary of results """ diff --git a/lm_eval/tasks/hendrycks_ethics.py b/lm_eval/tasks/hendrycks_ethics.py index 50e94a508c..75369ed237 100644 --- a/lm_eval/tasks/hendrycks_ethics.py +++ b/lm_eval/tasks/hendrycks_ethics.py @@ -237,9 +237,6 @@ def process_doc(self, docs): for doc in docs: yield {"activity": doc[0], "baseline": doc[1], "rating": ""} - def fewshot_description(self): - return "Rate how pleasant each of the following activities is on a scale from 1 (very unpleasant) to 10 (very pleasant).\n\n" - def fewshot_examples(self, k, rnd): # Overwriting fewshot examples as k can be max 5 assert k <= 5, "There are only 5 possible shots for this task. Refer to the V2 for more." @@ -350,9 +347,6 @@ class EthicsVirtue(Ethics): def get_prefix(self): return "virtue/virtue" - def fewshot_description(self): - return "The following is a list of sentences and traits, along with whether the trait is exhibited in that sentence.\n\n" - def process_doc(self, doc): # Append identifiers before shuffling to calculate exact matches lateron & skip the first element of headers return [x + [i] for i, x in enumerate(doc[1:])] diff --git a/lm_eval/tasks/hendrycks_test.py b/lm_eval/tasks/hendrycks_test.py index 46c0306fcd..a505f3a134 100644 --- a/lm_eval/tasks/hendrycks_test.py +++ b/lm_eval/tasks/hendrycks_test.py @@ -114,9 +114,5 @@ def fewshot_examples(self, k, rnd): return rnd.sample(list(self._fewshot_docs), k) - def fewshot_description(self): - subject = self.subject.replace("_", " ") - return f"The following are multiple choice questions (with answers) about {subject}." - def doc_to_text(self, doc): return doc["query"] diff --git a/lm_eval/tasks/prost.py b/lm_eval/tasks/prost.py index f905de7266..d53ece825b 100644 --- a/lm_eval/tasks/prost.py +++ b/lm_eval/tasks/prost.py @@ -36,9 +36,9 @@ def has_validation_docs(self): def has_test_docs(self): return True - def fewshot_context(self, doc, num_fewshot, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0, 'PROST is designed to probe models in a zero-shot fashion only.' - return super().fewshot_context(doc, num_fewshot, rnd, description) + return super().fewshot_context(doc, num_fewshot, provide_description, rnd, description) def _convert_standard(self, doc): out_doc = { diff --git a/lm_eval/tasks/truthfulqa.py b/lm_eval/tasks/truthfulqa.py index b02595d223..9fe941f614 100644 --- a/lm_eval/tasks/truthfulqa.py +++ b/lm_eval/tasks/truthfulqa.py @@ -85,9 +85,9 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " - def fewshot_context(self, doc, num_fewshot, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, rnd, description) + return super().fewshot_context(doc, num_fewshot, provide_description, rnd, description) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of @@ -217,9 +217,9 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " - def fewshot_context(self, doc, num_fewshot, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, rnd, description) + return super().fewshot_context(doc, num_fewshot, provide_description, rnd, description) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of diff --git a/scripts/cost_estimate.py b/scripts/cost_estimate.py index 5f15079315..4339b8dbd2 100644 --- a/scripts/cost_estimate.py +++ b/scripts/cost_estimate.py @@ -51,7 +51,7 @@ def main(): values = [] for taskname in task_list.split(","): lm.tokencost = 0 - evaluator.evaluate(lm, {taskname: tasks.get_task(taskname)()}, 0, None, bootstrap_iters=10) + evaluator.evaluate(lm, {taskname: tasks.get_task(taskname)()}, False, 0, None, bootstrap_iters=10) print(taskname, lm.tokencost) values.append([taskname, lm.tokencost, lm.tokencost / 1000 * 0.0008, lm.tokencost / 1000 * 0.0012, lm.tokencost / 1000 * 0.006, lm.tokencost / 1000 * 0.06]) diff --git a/task-guide.md b/task-guide.md index 7bee0ab2c2..bf63dac7d4 100644 --- a/task-guide.md +++ b/task-guide.md @@ -144,7 +144,7 @@ Understand that the strings from `doc_to_text` and `doc_to_target` will be conca ### Formatting Prompts -If you'd like to prepend your few-shot examples with a natural language description or provide a lone custom prompt under a zero-shot setting, you can do this on a per-task basis via the `description_dict` arg of `evaluator.evaluate` which is accessible through the `evaluator` module. This `description_dict` must adhere to the following key-value structure: +If you'd like to prepend your few-shot examples with a natural language description or provide a lone custom prompt for a zero-shot task, you can do so on a per-task basis via the `description_dict` arg of `evaluator.evaluate` which is accessible from the `evaluator` module. This `description_dict` must adhere to the following key-value structure: - **key**: the task name as specified in the lm-eval-harness task registry (see the following section on task registry). - **value**: the corresponding description/prompt for the task identified by **key**. @@ -153,13 +153,13 @@ E.g. ```python description_dict = { - "task_name_1": "task_name_1 custom prompt or few-shot task description", - "task_name_2": "task_name_2 custom prompt or few-shot task description", + "task_name_1": "fewshot description", + "task_name_2": "fewshot description", ... } ``` -At a higher level, one can interface with `evaluator.evaluate` by simply passing a JSON file path to the `description_path` arg of the command-line interface program, `main.py`. The JSON file pointed to should be structured the same way as the aforementioned `description_dict`. E.g. for some file at `/your/path/descriptions.json` you might have: +One can also interface with `evaluator.evaluate` from a higher level by simply passing a JSON file path to the `description_dict_path` arg of the command-line interface program, `main.py`. The JSON file pointed to should be structured the same way as the aforementioned `description_dict`. E.g. for some file at `/your/path/descriptions.json` you might have: ```json { @@ -173,7 +173,7 @@ which can then be hooked up to the evaluator through the `main.py` CLI as: ```python python main.py \ --tasks cycle_letters,copa \ ---description_path /your/path/descriptions.json \ +--description_dict_path /your/path/descriptions.json \ ... ``` @@ -187,11 +187,12 @@ After registering your task, you can now check on your data downloading and veri ```bash python -m scripts.write_out \ - --tasks \ --output_base_path \ + --tasks \ --sets \ --num_fewshot K \ - --num_examples N + --num_examples N \ + --description_dict_path ``` Open the file specified at the `--output_base_path ` and ensure it passes diff --git a/tests/test_description_option/descriptions.json b/tests/test_description_dict/descriptions.json similarity index 100% rename from tests/test_description_option/descriptions.json rename to tests/test_description_dict/descriptions.json diff --git a/tests/test_description_option/test_description_option.py b/tests/test_description_dict/test_description_dict.py similarity index 52% rename from tests/test_description_option/test_description_option.py rename to tests/test_description_dict/test_description_dict.py index fd6b7b16f9..ad6dc13b6b 100644 --- a/tests/test_description_option/test_description_option.py +++ b/tests/test_description_dict/test_description_dict.py @@ -5,15 +5,13 @@ from lm_eval.evaluator import evaluate -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument('--description_path', default=None) - parser.add_argument('--num_fewshot', type=int, default=0) - parser.add_argument('--limit', type=int, default=None) - return parser.parse_args() - - -def main(): +def test_cli_description_dict_path(): + def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--description_dict_path', default=None) + parser.add_argument('--num_fewshot', type=int, default=0) + parser.add_argument('--limit', type=int, default=None) + return parser.parse_args() args = parse_args() task_names = ['hellaswag', 'copa'] @@ -21,14 +19,15 @@ def main(): lm = lm_eval.models.get_model('dummy')() description_dict = {} - if args.description_path: - with open(args.description_path, 'r') as f: + if args.description_dict_path: + with open(args.description_dict_path, 'r') as f: description_dict = json.load(f) num_fewshot = args.num_fewshot results = evaluate( lm, task_dict, + False, num_fewshot, args.limit, description_dict @@ -36,4 +35,4 @@ def main(): if __name__ == '__main__': - main() + test_cli_description_dict_path() diff --git a/tests/test_evaluator.py b/tests/test_evaluator.py index 070590d3f8..85e1449f08 100644 --- a/tests/test_evaluator.py +++ b/tests/test_evaluator.py @@ -48,8 +48,8 @@ def ll_perp_fn(reqs): lm.loglikelihood_rolling = ll_perp_fn limit = 10 - e1 = evaluator.evaluate(lm, task_dict, 0, limit, description_dict=None, bootstrap_iters=10) - e2 = evaluator.evaluate(lm, task_dict, 0, limit, description_dict=None, bootstrap_iters=10) + e1 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10, description_dict=None) + e2 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10, description_dict=None) # check that caching is working assert e1 == e2 From d7a8ab24ab8778351ae8651093d5883ef72f1ec7 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Wed, 15 Dec 2021 22:37:27 -0500 Subject: [PATCH 04/65] Add basic `description_dict` test --- tests/test_description_dict.py | 47 +++++++++++++++++++ tests/test_description_dict/descriptions.json | 3 -- .../test_description_dict.py | 38 --------------- 3 files changed, 47 insertions(+), 41 deletions(-) create mode 100644 tests/test_description_dict.py delete mode 100644 tests/test_description_dict/descriptions.json delete mode 100644 tests/test_description_dict/test_description_dict.py diff --git a/tests/test_description_dict.py b/tests/test_description_dict.py new file mode 100644 index 0000000000..bd4148477c --- /dev/null +++ b/tests/test_description_dict.py @@ -0,0 +1,47 @@ +import random +import lm_eval.tasks +import lm_eval.models + + +def test_description_dict(): + seed = 42 + num_examples = 1 + task_names = ["hellaswag", "winogrande"] + description_dict = { + "hellaswag": "Label for the relevant action:\nSentences describing context, with an incomplete sentence trailing answer that plausibly completes the situation.", + "winogrande": "Winograd schema sentence including a either a ___ blank with a missing word, making the pronoun ambiguous, or the same with the word filled in." + } + + task_dict = lm_eval.tasks.get_task_dict(task_names) + for task_name, task in task_dict.items(): + rnd = random.Random() + rnd.seed(seed) + + if task.has_training_docs(): + docs = task.training_docs() + elif set == "val" and task.has_validation_docs(): + docs = task.validation_docs() + elif set == "test" and task.has_test_docs(): + docs = task.test_docs() + + description = ( + description_dict[task_name] + if description_dict and task_name in description_dict + else "" + ) + + for _, doc in ( + zip(range(num_examples), docs) + if num_examples > 0 + else enumerate(docs) + ): + ctx = task.fewshot_context( + doc=doc, + num_fewshot=1, + provide_description=False, + rnd=rnd, + description=description, + ) + print(ctx + "\n\n") + assert description in ctx +test_description_dict() \ No newline at end of file diff --git a/tests/test_description_dict/descriptions.json b/tests/test_description_dict/descriptions.json deleted file mode 100644 index 6bace5dac5..0000000000 --- a/tests/test_description_dict/descriptions.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "hellaswag": "Label for the relevant action:\nSentences describing context, with an incomplete sentence trailing answer that plausibly completes the situation." -} diff --git a/tests/test_description_dict/test_description_dict.py b/tests/test_description_dict/test_description_dict.py deleted file mode 100644 index ad6dc13b6b..0000000000 --- a/tests/test_description_dict/test_description_dict.py +++ /dev/null @@ -1,38 +0,0 @@ -import json -import argparse -import lm_eval.tasks -import lm_eval.models -from lm_eval.evaluator import evaluate - - -def test_cli_description_dict_path(): - def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument('--description_dict_path', default=None) - parser.add_argument('--num_fewshot', type=int, default=0) - parser.add_argument('--limit', type=int, default=None) - return parser.parse_args() - args = parse_args() - - task_names = ['hellaswag', 'copa'] - task_dict = lm_eval.tasks.get_task_dict(task_names) - lm = lm_eval.models.get_model('dummy')() - - description_dict = {} - if args.description_dict_path: - with open(args.description_dict_path, 'r') as f: - description_dict = json.load(f) - - num_fewshot = args.num_fewshot - results = evaluate( - lm, - task_dict, - False, - num_fewshot, - args.limit, - description_dict - ) - - -if __name__ == '__main__': - test_cli_description_dict_path() From 3fdff22f5986a895766bcb905493886b3156b781 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Wed, 15 Dec 2021 22:38:43 -0500 Subject: [PATCH 05/65] Remove `print` from test --- tests/test_description_dict.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_description_dict.py b/tests/test_description_dict.py index bd4148477c..7d6c34ecfc 100644 --- a/tests/test_description_dict.py +++ b/tests/test_description_dict.py @@ -42,6 +42,5 @@ def test_description_dict(): rnd=rnd, description=description, ) - print(ctx + "\n\n") assert description in ctx test_description_dict() \ No newline at end of file From 09cd76c76be8f22d1b80e656069028306ee73e1b Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Wed, 15 Dec 2021 23:07:06 -0500 Subject: [PATCH 06/65] Fix assertion error string --- lm_eval/base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lm_eval/base.py b/lm_eval/base.py index e9661164d0..db45068929 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -459,9 +459,9 @@ def fewshot_description(self): def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert not provide_description, ( - "The `provide_description` arg will be removed in future versions. To provide " - "custom descriptions on a per-task basis, supply the `description_dict` " - "arg with your task-to-description dictionary." + "The `provide_description` arg will be removed in future versions. To prepend " + "a custom description to the context, supply the corresponding string via the " + "`description` arg." ) description = description + "\n\n" if description else "" From d1319950d7836102b9a2eaa359319143b2f1e00c Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Wed, 15 Dec 2021 23:25:59 -0500 Subject: [PATCH 07/65] Add newline to end of file --- tests/test_description_dict.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_description_dict.py b/tests/test_description_dict.py index 7d6c34ecfc..e94028e868 100644 --- a/tests/test_description_dict.py +++ b/tests/test_description_dict.py @@ -9,7 +9,7 @@ def test_description_dict(): task_names = ["hellaswag", "winogrande"] description_dict = { "hellaswag": "Label for the relevant action:\nSentences describing context, with an incomplete sentence trailing answer that plausibly completes the situation.", - "winogrande": "Winograd schema sentence including a either a ___ blank with a missing word, making the pronoun ambiguous, or the same with the word filled in." + "winogrande": "Winograd schema sentence including a either a ___ blank with a missing word, making the pronoun ambiguous, or the same with the word filled in.", } task_dict = lm_eval.tasks.get_task_dict(task_names) @@ -31,9 +31,7 @@ def test_description_dict(): ) for _, doc in ( - zip(range(num_examples), docs) - if num_examples > 0 - else enumerate(docs) + zip(range(num_examples), docs) if num_examples > 0 else enumerate(docs) ): ctx = task.fewshot_context( doc=doc, @@ -43,4 +41,6 @@ def test_description_dict(): description=description, ) assert description in ctx -test_description_dict() \ No newline at end of file + + +test_description_dict() From 10dd7d38535a3695546fc3d762fc37596950ad93 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Wed, 15 Dec 2021 23:36:47 -0500 Subject: [PATCH 08/65] Make `evaluate` and `simple_evaluate` description args consistent --- lm_eval/evaluator.py | 13 ++++--------- main.py | 6 ++++++ task-guide.md | 4 ++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 4c224edef3..9963bb5eb0 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -12,7 +12,7 @@ def simple_evaluate(model, model_args, task_names, num_fewshot=0, batch_size=None, device=None, no_cache=False, limit=None, bootstrap_iters=100000, - description_dict_path=None): + description_dict=None): """Instantiate and evaluate a model on a list of tasks. :param model: str @@ -33,8 +33,8 @@ def simple_evaluate(model, model_args, task_names, Limit the number of examples per task (only use this for testing) :param bootstrap_iters: Number of iterations for bootstrap statistics - :param description_dict_path: - Path to a JSON file containing `task_name: description` key-values for custom prompts + :param description_dict: + Dictionary of custom task descriptions of the form: `task_name: description` :return Dictionary of results """ @@ -52,11 +52,6 @@ def simple_evaluate(model, model_args, task_names, task_dict = lm_eval.tasks.get_task_dict(task_names) - description_dict = {} - if description_dict_path: - with open(description_dict_path, 'r') as f: - description_dict = json.load(f) - results = evaluate(lm, task_dict, False, num_fewshot, limit, description_dict=description_dict) # add info about the model and few shot config @@ -90,7 +85,7 @@ def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_i :param bootstrap_iters: Number of iterations for bootstrap statistics :param description_dict: - Dictionary of task descriptions of the form: `task_name: description` + Dictionary of custom task descriptions of the form: `task_name: description` :return Dictionary of results """ diff --git a/main.py b/main.py index e55015b4a2..c7f9ce6d8b 100644 --- a/main.py +++ b/main.py @@ -35,6 +35,11 @@ def main(): else: task_names = args.tasks.split(",") + description_dict = {} + if args.description_dict_path: + with open(args.description_dict_path, 'r') as f: + description_dict = json.load(f) + results = evaluator.simple_evaluate( model=args.model, model_args=args.model_args, @@ -44,6 +49,7 @@ def main(): device=args.device, no_cache=args.no_cache, limit=args.limit, + description_dict=description_dict ) dumped = json.dumps(results, indent=2) diff --git a/task-guide.md b/task-guide.md index bf63dac7d4..c649aba331 100644 --- a/task-guide.md +++ b/task-guide.md @@ -159,7 +159,7 @@ description_dict = { } ``` -One can also interface with `evaluator.evaluate` from a higher level by simply passing a JSON file path to the `description_dict_path` arg of the command-line interface program, `main.py`. The JSON file pointed to should be structured the same way as the aforementioned `description_dict`. E.g. for some file at `/your/path/descriptions.json` you might have: +One can also interface with `evaluator.evaluate`/`evaluator.simple_evaluate` from a higher level by simply passing a JSON file path to the `description_dict_path` arg of the command-line interface (CLI) programs, `main.py` and `write_out.py` . The JSON file pointed to should be structured the same way as the aforementioned `description_dict`. E.g. for some file at `/your/path/descriptions.json` you might have: ```json { @@ -168,7 +168,7 @@ One can also interface with `evaluator.evaluate` from a higher level by simply p } ``` -which can then be hooked up to the evaluator through the `main.py` CLI as: +which can then be used, for example, in the `main.py` CLI as: ```python python main.py \ From e3ddcfc277a449b4974159c4b275c0fd0cfc72c5 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 16 Dec 2021 00:04:55 -0500 Subject: [PATCH 09/65] Remove needless call --- tests/test_description_dict.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_description_dict.py b/tests/test_description_dict.py index e94028e868..aaec13fbce 100644 --- a/tests/test_description_dict.py +++ b/tests/test_description_dict.py @@ -41,6 +41,3 @@ def test_description_dict(): description=description, ) assert description in ctx - - -test_description_dict() From 09e1de936d7deb7938a313679b45057dfe31ef82 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 16 Dec 2021 00:19:00 -0500 Subject: [PATCH 10/65] Fix assertion error message --- lm_eval/base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lm_eval/base.py b/lm_eval/base.py index db45068929..442d8d5d8e 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -452,7 +452,7 @@ def higher_is_better(self): def fewshot_description(self): import warnings warnings.warn( - "`fewshot_description` will be removed in futures versions. Pass " \ + "`fewshot_description` will be removed in futures versions. Pass " "any custom descriptions to the `evaluate` function instead.", DeprecationWarning) return "" @@ -539,9 +539,9 @@ def fewshot_examples(self, k, rnd): def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0 assert not provide_description, ( - "The `provide_description` arg will be removed in future versions. To provide " - "custom descriptions on a per-task basis, supply the `description_dict` " - "arg with your task-to-description dictionary." + "The `provide_description` arg will be removed in future versions. To prepend " + "a custom description to the context, supply the corresponding string via the " + "`description` arg." ) return "" From 3f06b6032194e75ec3b6b9fdbd08597d37af6a55 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 16 Dec 2021 00:25:05 -0500 Subject: [PATCH 11/65] Remove unused import --- lm_eval/evaluator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 9963bb5eb0..e8b7c030c4 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -1,6 +1,5 @@ import collections import itertools -import json import random import lm_eval.metrics import lm_eval.models From e54380d882436c5136a2b474e341dfb26cd672a1 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 16 Dec 2021 00:53:30 -0500 Subject: [PATCH 12/65] Fix task example link --- task-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-guide.md b/task-guide.md index c649aba331..7f43de59d3 100644 --- a/task-guide.md +++ b/task-guide.md @@ -116,7 +116,7 @@ class TaskName(..., MultipleChoiceTask): This will require you to format your documents such that they contain `gold` and `choices` fields. They can also have other fields, but those will be ignored by `MultipleChoiceTask`. `choices` should be a list of possible continuations, and `gold` should be an integer specifying the index of the correct completion. -See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/105fa9741ff660f6a62c2eef0d2facfde36dda41/lm_eval/tasks/sat.py#L56) for an example. When used in combination with `HFTask`, it may be useful to override [`_convert_standard`](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/common.py#L28), which will be applied to every document in the HF dataset. See task](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/headqa.py) for an example of this. +See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/105fa9741ff660f6a62c2eef0d2facfde36dda41/lm_eval/tasks/sat.py#L56) for an example. When used in combination with `HFTask`, it may be useful to override [`_convert_standard`](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/common.py#L28), which will be applied to every document in the HF dataset. See this [task](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/headqa.py) for an example of this. You can now skip ahead to
registering your task. From 744482bb183deab3abf5c2206b217422221aaee1 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 16 Dec 2021 00:54:25 -0500 Subject: [PATCH 13/65] Fix task example link --- task-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/task-guide.md b/task-guide.md index 7f43de59d3..c750ab5a1c 100644 --- a/task-guide.md +++ b/task-guide.md @@ -116,7 +116,7 @@ class TaskName(..., MultipleChoiceTask): This will require you to format your documents such that they contain `gold` and `choices` fields. They can also have other fields, but those will be ignored by `MultipleChoiceTask`. `choices` should be a list of possible continuations, and `gold` should be an integer specifying the index of the correct completion. -See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/105fa9741ff660f6a62c2eef0d2facfde36dda41/lm_eval/tasks/sat.py#L56) for an example. When used in combination with `HFTask`, it may be useful to override [`_convert_standard`](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/common.py#L28), which will be applied to every document in the HF dataset. See this [task](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/headqa.py) for an example of this. +See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/105fa9741ff660f6a62c2eef0d2facfde36dda41/lm_eval/tasks/sat.py#L56) for an example. When used in combination with `HFTask`, it may be useful to override [`_convert_standard`](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/common.py#L28), which will be applied to every document in the HF dataset. See [this task](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/headqa.py) for an example of this. You can now skip ahead to registering your task. From 1bc6cdb160efa0bbc3198a15d2cf428a06158aec Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 16 Dec 2021 03:44:05 -0500 Subject: [PATCH 14/65] Add type info to doc-string --- lm_eval/evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index e8b7c030c4..6feb356788 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -32,7 +32,7 @@ def simple_evaluate(model, model_args, task_names, Limit the number of examples per task (only use this for testing) :param bootstrap_iters: Number of iterations for bootstrap statistics - :param description_dict: + :param description_dict: dict[str, str] Dictionary of custom task descriptions of the form: `task_name: description` :return Dictionary of results @@ -83,7 +83,7 @@ def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_i Limit the number of examples per task (only use this for testing) :param bootstrap_iters: Number of iterations for bootstrap statistics - :param description_dict: + :param description_dict: dict[str, str] Dictionary of custom task descriptions of the form: `task_name: description` :return Dictionary of results From acf76b50b6f69917a09379415111c0af3f00c5b3 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 16 Dec 2021 19:53:12 -0500 Subject: [PATCH 15/65] Add `description_dict` docs and update `task-guide` --- README.md | 3 +- docs/description_guide.md | 49 ++++++++++++++++++++++++++++ docs/img/fewshot_example_gpt3.png | Bin 0 -> 315681 bytes task-guide.md => docs/task_guide.md | 35 -------------------- 4 files changed, 50 insertions(+), 37 deletions(-) create mode 100644 docs/description_guide.md create mode 100644 docs/img/fewshot_example_gpt3.png rename task-guide.md => docs/task_guide.md (88%) diff --git a/README.md b/README.md index fc6c4e4ba7..112595d8da 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ To evaluate mesh-transformer-jax models that are not available on HF, please inv ## Implementing new tasks -To implement a new task in eval harness, see [this guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/task-guide.md). +To implement a new task in eval harness, see [this guide](./docs/task_guide.md). ## Cite as @@ -298,7 +298,6 @@ To inspect what the LM inputs look like, you can run the following command: ```bash python write_out.py \ --tasks all_tasks \ - --provide_description \ --num_fewshot 5 \ --num_examples 10 \ --output_base_path /path/to/output/folder diff --git a/docs/description_guide.md b/docs/description_guide.md new file mode 100644 index 0000000000..c97a82ae0c --- /dev/null +++ b/docs/description_guide.md @@ -0,0 +1,49 @@ +# Description Guide + +![fewshot-example](./img/fewshot_example_gpt3.png) +(Figure from [Brown et al., 2020](https://arxiv.org/pdf/2005.14165.pdf)) + +Task descriptions provide in-context task instruction for your language model. If you'd like to prepend a natural language description to your few-shot examples and prompt, you can do so on a per-task basis via the `description_dict` arg of [`evaluator.evaluate`](../lm_eval/evaluator.py). This `description_dict` must adhere to the following key-value structure: + +- **key**: the task name (`str`) as specified in the lm-eval-harness task registry (see the following section on task registry). +- **value**: the corresponding (`str`) description/prompt for the task identified by **key**. + +```python +description_dict = { + "task_name_1": "description", + "task_name_2": "description", + ... +} +``` + +Note that a task's description will be separated from its following few-shot examples and prompt by a new line as such: + +```python +""" + + + + + +""" +``` + +## Descriptions in File + +One can also interface with the aforementioned [`evaluator.evaluate`](../lm_eval/evaluator.py) (or `evaluator.simple_evaluate`) method from a higher level by simply passing a JSON file path to the `description_dict_path` arg of the command-line interface (CLI) program, `main.py`. The JSON file pointed to should be structured the same as the `description_dict`. E.g. for some file at `/your/path/descriptions.json` you may have: + +```json +{ + "cycle_letters": "Please unscramble the letters into a word, and write that word:", + "copa": "Given a premise and one alternative with a causal relation to the premise and another without, choose the more plausible alternative" +} +``` + +which can then be supplied to the CLI as: + +```python +python main.py \ +--tasks cycle_letters,copa \ +--description_dict_path /your/path/descriptions.json \ +... +``` diff --git a/docs/img/fewshot_example_gpt3.png b/docs/img/fewshot_example_gpt3.png new file mode 100644 index 0000000000000000000000000000000000000000..b199736867aff6d1aa8ad407ba31810f4016fb75 GIT binary patch literal 315681 zcmeFZbySqw+cyqFg9=D@Nq2WCA)V5VbT>$j2uMgHAgM?R!T^FaLrWtF3@zQ=>3icj za=yA}GlJ}-FjAXe2(5tZrKwcxuhwp#IO;mfT<#6?wX=I*z!3sXsHLChmn+@!#V zpl372nLm6p$9+yRoEodR>xAK~k6mcvMKm#j{9M31Vj?MmV!W^9))8?{;0U3lCI*>` zEfJBC$f}{|O+9raaB7FzWO!BAg4f2J#YnTH3l|(aKtB+11yUckA#V{C_V8H zq|M-XWF<+VIId?{%w`lxS>n=QX3DCN^ZPGNMWDh5Bx&P;6X5xy;R+I9QWFsE3=2Ck{=@)M0+p)1ooo<&6>P4ZXFd7P30;zTm*d8p=kr;Qj( zxXTsiou-g^#EZ=oDzZWmBkU&Hwmp!GH)KdmD~0xMr1o0!Vo&IPdlkBLIw78zCZ%GO z>L`kE-@?5POt)q>jJD$URKlR~)77^Ir_WMY z2C)Ps-}|UMlkw;Zmgl>WoL!i<7IfpES4&x>d#l~!=TauZY`iapz=sO2NEyb8qX;i3 z@om&_?Cwq?jcE|ZdrA`2pqvPzH*s77)S+XDs)*M?kKvxS9y~{pMast7&kECB8o_>g zHyZz5{}&_`s(utTO3axa6sz#C2HIfySnR7-VFEt#D1J=7_S2@7Fj(mw46d(CGGdEy zg@W(p?$Cx}(Y!R35}v}sgkJ4hd*6NDhj-H9AG&|~LB(6%nqGw3fZiR;Q3anhHh^GB zXApiXtk5>@%z@R}&UzRDv*YTmhc%Ja_fROdS6G0KH|60bylxefqx6%M+3{#XsObFVhY;vT-?5PM2(;iD`9^qYN4dBQ$u$Dc<3^Yhmk*2li=j}J_&q_2$dcKAf z@v1YEI+Y5~0s|w3< zw4*0M5@eS$*8Zp6x~*3qq!GS*KMZn0Q>q@m6NCnVtKy*0{*HDjfrW|` zM~VO7xh4mimM-d84Xz?W1=LUVj(oE`1m7M`AL{P%-iP=9?a5+CuDa{MJ>98E=R$YaKG;_)E+ zM<3#{;&Q$K@@gaNNP`*2ws?0RyIqy(LnLv|=&JQM5{tKL*3?4SeKPs)q*tlk1@*vM zZ+*Y&9X@!5-4N}W&HmNsj>$S90(UDZr&H9cb~76te7g2!6IvZXt&puw;_q&a5gsPf zFqHI{!!C5&sM^kmnJ*4@3r|V?h#KK%UyaS@?-iXssz>?pFdI7x^&X-zD2yXBRw+e> zfi4lp^L{i|KeXYSoVVE_|haLO`HQ(y|zad#fydl0D3 z60X2RiI*^%a2&53TM{cI=Wn}in`c{MYies_YgUP5yJYM8ExO21%{OmUWt3V!&cCbd zshAJ450Vc`4cISyN^U#fTEm{-H?Q~9j)Syn}Mz>r94yc_ly7aVayiyn9mKy0U*geZ<#}WCM4jebi%gz~RtAfKQr_m+!>hrV3o8;ZS5>I*z?n zzva5cH*Q!ZyCu3Aa-4LWOsa~af)dwC7j~@nLQO2EE$4&GwEKIZa#9R3J5mZE&h&Q~ z_yQw>4a5$DLJo%?zOKjwx*R+AyUon`jY)UX4q1Q|f_q4`eYG>~ayNMS-{_}I8hn2C zd2IXMHt3{p`?JU@xw$C5FORR^@!@{rv+a}3{qn=^-HBC?Vf``Te&5s+AByKHkO6}% znJmo4%toZfFuxy%ZihWblM5$)QGJBXDP-p47ikx6SD(+k5IGRIKvl@@h;NWw5sE+* zNVv2L z*Hm# z7oS|_xAfxF^h|?{OEKC979R4Qr(YMWVwzHHQ<&sGs?g-p=5Hx(WMB!IZoTMtk2#z_ zB*Af@Q_g~Mbvdigyf;tarFWyJ=%;-8{^@4dY1^wMEf-&I!Bi4%UvK*vp&5k$tfr`0 z+FgQTe37300lkbPkx6l1CC_9IRUNiX{4!}0sZuv;*Q&+(fmeOgNy4$o@m`e1tl~63 z3Up-CnZ-ogpLQQIZrw+bD|>JLVdq<=hVOf|p$sl5J~b`lY>dEl{iUjXibYHe1axMM zi#%EtVG%>$TJL8YcP@w&tVEd6na^$L#1i-`d|rM9^8>;%| zuy&qnEPqN%AfKVoqae<@z*@JP(-VUyevf59Z>Flnt$jJEEonZ9Ukj;ZqCwBs%9ZLQL^4;lA4oH_=p`0YWJ?_ zuJf4Fg86o4&FXXAdpbY9`}nyk#Hr|Gc?r|3@|02a)mro-*-F6vb>Xx8x&EPyEU$1c zkIm>=|JsS7+GjCeGx2z>U)AT?Ii1h+Ts%A;+3}ic+Jak;O zMq{Qk2z{M=V6!fk>c8)Du(({8-;q`8`D|*fid)U9KkKfr?>gzmRQSR53W>g-ezehU zqpH`}12cyr*^RhakpSVL<#8?Ft_}4oi>#izTjmon#%T4f`?g<3D#irH*o{n9!rix#sj zwk#TOCUv~1vGOkeQ~uI`V%8IJFaH@B-l)e}`wM~$c3Sp}y2u}c*X9e%liE#}M2A*( z1E-n&V?$#IPOlqyz3VpEPnzENE)^6;K@wpreBFpky3VRtOJ^Qf;2$rNR@x=^ z5KVk>;KC2!UV^XiVZ~IVcR&(*7+Hjfs{Ehl^6u-tPAeM1$IEW_fn^a`Gm7j9Qq zh-0wH#^n3KJ;F@~xHoxB1KH?QE!?eI?Yd}Fuo`#rQ{}{!VJFwiMwAHDspgZbCb%s1 zXhq+={Q$uKh_KL+vs6-oV*<*kaES24a7aK29{7mCliZeN;2Gc$e%6EF;KFR+5dZ9> z41C|bVu8=im_NP|;zQw3fdB3TAMb3?uihw-Y=mEB1QDPOPEzBMoE-42VeV>S;pk@V zkfZ#m(mAYQ@eeC@9Fz!NtzS#R~Lbb@Op_ zH}z(9bffv>BEPO9W#MM-YUAu~4ubCX({=6;V z2H9^~*g4ra*l({53>CSl6;ieFwy@WgvT*?R446YqfJ;E+Xa9e+{59i$jMVvSB&Q(X ze;N88O@ANy#LdF>k&^>3r@Po+^!jt&|7`qopa}cTt^dbX{ISnJYk`dxLlElWMzB6ShqQIKDw6rvn^6U=V>56}# z`kX<%BSlh4ozcq%_TU{auK&fAQwm_Rytn56b_cP*@33P*b=Ip%5Y(4J|L!f35`+sg zB^JW}_jDqL2gnp^gckWPrwBrQf%f?S-Ww^fc5>P&C@6T0Hu&|cX-j}tv-r+M0{Y6l z$3=nr^wGmje6NFTa|?V zm~z`|LLF0E;%VMD-L@o^-XX*`7B74=I!hbST2A#5_3Yfx{Re@GPUDey#{2Jw7!Ubk z@V2(L$V_uxHD6r&%zS4R>8b$mL&G^m^QfH5}{r7!X_G#4VT2MV4wmPuw!uY1j!{t7YBp#GTLJDq$c6O(b-4J|pofm_Ur{b05G#fs~_F&`Cy#!QYP z*Hm|`XO);MFWOW3&Bv@G@%j^})f-FZ;*anmrKjBMmc?on>p~Sq3AB&RLN?y`a=Lxg zui}#g?qGg&@{mkKT~tF9!&YkQ9EU%BUH45;%2NNs-ZqwLKKjmwM=XL>YbieO8)a(6 zIxVpI4YY^Oj@$iZBJfJ=0@@XiSPHPuN@A8Y1_p`Kqke3j2FOAI6f_hF zq~K2vG#0yRywf#t>O z9;5%(2zW^cE%m-E>GpqOky}{Uq4mTDZ)(u4C;V1BOJq^SJ^_bCG|2WT@?xv{TPt3? zeqqH+=B$)lw3oWuENzMa+YHINY)Ch@=F&dCW|%D}U~Zg=@BRSYc38Z=k`=-m3wvAb z_#8B;r{6O*(W!Yqz|&&GjyJ?x4mb40Z>>eJ5!Uz5VtN*sFPB^QxL)LWU!I zymKF*OFz8rO)8yDiN0xB?BbtYhJ=i_TB=4&r zTj5IYIeNUEnyFfiUsh^%Al738us@`xZ`00}+hJ=$sbI_gWJr|701X0x*@AoL_-4F~Z( zbQ`Dr0o85KzwbkKC!zqnIo|a*ZF6~-;#XvzWT#amR-qkcAs9Vx4qeLjQ+C`FmT{xS zf3k;jX1o}i7WqMj<4}6b-irRQSvx_$#!zQ_w|V2R(i;tdYrh_&;?ou%`Sa?}KE*fo z6sp(5chrXi7Xis?nSn)ReWdm_yt*D^-|SAFRBToQJeSTKWwf0O-KLe8X8Xk@x9sw( z;Qw4Aga{5}vzd3qQ)qAZWS(I!rx8;&L}kAI#x<}x|6k!ROs!0VE}VO?D^Nzq%Ioi$ zZ>Mzz+ZLT~5(YQQ8L0mSCAR|qM?qo_^;-+Nuk))$H2hM;#PVOt6%I{`36k?^O^lBq zx}!tMVasx04u3e5v2E#G41E4ZKB{`N$a3F5tI74(HvJ`MziLA0R56)1&(}XZr`u@c z^h&la4BJq?_A_Glz;{{dp-e#Z*>W$V}=DU+s=%(B8wDZ(4#|*^|d>U?a zBzu-(Z6tGvVlFwGz8DQ2te!DBs*=>^vql(y6YF^Qokt8^{Vxez!r}9$xLQe z0+{fTNu4(xasSz|K{^hAOnz5AgB_=q+Oa+d7bmsq`}EL$n}$3r_C57@?|*K}FDwcK z@jgk)yQc+1fF%E=9Ka88zC?m9uy6tC{EDt$;)nVLROi>NE{Oe0g8w0z)ri(;h8Rmn zl#2A3z;;)!F{S?^Vi0PqL@6H3VT;U?f3(4|<9v+llwMm55KOkLa!uvG%j2!>LP-Gf zWZHWJU*6J>^rj`^7t1V$++6K~Sl`rt+BUoMH8VE|PzSnTCgI=SCRPF*YpGz;Y3u|% z=Z-4JU+U=#CfJA(GWoW0#;1Cmr})Y;M5;pK!TRM2wXu+=KclLD1(BLPyY~3(u=1-eUEY+Pr`zJ8f^;n6O z{zRdlT>GS0WLbE6dp~txqamr`9hv2~xqj&zlkHHB!^1mmd~)!4zZ`=?y1d9Hrs29P zA-_R#ntU71jWN}OL&pd?Kve&Ft5E)4kf5Md4QYpjVHOLnCzSqZs3sggu-Ls)ru$L9 zqr#LPIkuHEUW4bF` z;Cf5Re-^Hip_GbLM*P`KoN&|rk)CMD&jjeLsE%{{sZ0DqOqPaT-k^@#8Dm@H4ZxIQ z6&nz{STG$qvx$eml7%9x_wk-5DbUwh=x7Nzt{=v;c zQUV`lp%&Hs;xA$Q8`%z1;I!q6TZLdps%GYVd+CPX_$|fXsbjA6q`hzY32kyR0aHI` z4rq)&{5dmi*OwUKpED0=$6u>kVQV)&AUbpDOe*}=dUn=5k_F%mp01mIosG4q*)eaG zr{NEwb>xg4H$)i0$I_2Oa3NyauJ?6*84B1e*b3S5$e=(@9|6=oi7k z-iXiML-Uq+Ss^yzA8F$kz}t~;kw>)5f*xWnBdtfKCiVW4bhkA`Xqxjh9AnUB&H4Wz zbu15r5%{{%44WqW9l>)%ElI$`+G|u_hAtFjTw+j9ldzV&J5-tQFW6Yfp7N2oFUZ) zJ~%*dt-eRQ<}XnH8+3yO5Ct&l^D$%8O}wB(;aU^ZHGY5+Z16K0_^F6)6kgF!MKry{ zoi%pA>RUGdQ(H3qw0^wKdRLBqYN!3K_jT0~H`;0Pr@H)He)1UHfP-#d0%p~oxk34z z{U_VMNBMj3Ob{Ees1!cT#hEec*9J{TaS&=74JDGN zh%!3uFYbt?0L7ti+UHG|L;Eo=)njrS^M*7#QqJ?g9Z;S9b|Dgo&y*=`kUXoLzM2*) zh#KgOzK>DJkTy7rzcK!Ps{Zu+e^o{QLjaK0AT^={&}64hW`M3qvQ-U}_AOiey%385 zo?(FGG;&_70U%*K^Wf?yNXT*RFGSyf#KA$*zY^7w>tgiHf-mo((bO7ZE6lM;=Mzk= z4gaBs{{{^&C4SIKAbFCFLdlDHAs0@xs{Mc3F4;jhVl(4XFwRp&&X}Dd|A7$kDcJDN zy8NwY@Jp6&_ftL+h|cCdA&-~&Q|`MnBw}!3xYh(++UxV(N1?%#Ov z_Fu3ys1gDtr^eW{mZ+t^r6)h2;aBcjoJl{x*xYBcFD?)Sf4!qc1O7kj&_{klQQ0YHHI zxd&Y%l+ia`nUt2Kf3^H$|DVZ7pl@cn3>)6gEJ}k_2;E*?<-o(kBQl@%0jT+jyvtjO zz9&q=AMoFz3J{hO6qclZdr2uu67YLG*m$FK*b<^OY%d; zKh5_CG~(MgixUtxt~sv&{0Hg;kZ^?JS$h7KC@UnVc(;c>zdQW5GuN}LlTo6|47bwFNI>$`#mndVy>TZ>4agSY{Sp~yOy^v9B>Xku;=^oN zbbDQsmFLwvp=6pE81|P5zr|^I?A2LB+qZyFAbI{MLF{h$^E$XM0dYeDO%o)zD^EAqV$8 z(v|XdMMLf+BSt!>&-sPKV59=}BT2t;mEQwww)EtJ1$(ZLoizu64rvQsl++QkqE@~K zpT3HtPO94bcglgMy}CbqkCz7=9CAPa(`cpO}0v|y6J-rv|sEVQFNIzcmzuJi#@&NR`<2bG%7mz?ULcISENxUUw zgfgIaAL7C1Bj(c{$=EZOZ8eXM)|gsl;EqJUIQ|1RUC;z_`I1HvAS32!p<6okt&kc? zH31gvF~Hf0e$a0Q_$-kRFq!(d#I}18|U*lXRR(H>e@nOo6F`G|4Tv{z&y?;!w&}~%g z?9&36ry^iYo?DIh_rhQp+Aep#6PWDU$e_6RjO#RjJ~?pagdVTIedCynY-4ZBpR-=S zY@~V^X?o#6{OM!*wTo%I|EYjU=y#EP{FQQi3`N;ak$U0v>7{Qofm!DP1cNtOuIk-C zlDpdvO;jD8V0UP`V@s0%mLIXe)Hncqv6VmeXF0?Fa=}%_U&d>`kXZc7T~=L3gdu!C zhv7g3?KWG!upoJw3Y>Il0+s`Pq|(FR$AgW~b`OrAOh5poh!^69;tTg#1pG>CjB0pg zie`xH&ka|V0;LNs+S_wOXvMJr+nO5RqmZ5z$+IvycJa6N6nGI$2dQclx(&bTP`$!w zuW6=z^gUqum$Lyl*9~Akdn*%Rc>Qt()5qYuZLYF7-*uqRl&AuD2s8eQ5B-wKKV5Ke zfJ{(U@fF}l5j`h=>czV4L#$83r)zWqm;Gl@i9rywi9OiY+`-sxwn=rGEkQVJMuqHYN}OQG8**oY=h~4U0BO*eAwX`~!iyBj9E|(&aBn?LER5 zi)GO_$->?!nKK(Vx-KaKVByMUxTN%3L7cy_kI3^WzcFo+x6-wHuzkbT`q??V6x%uk z+G!??ISH-Dm<3=;nE^rqxFWu$iRv#ljYh^a-$bB$e3CCTco6y9rQ^N<0N5pbc>mnR zp6fb7L2>SnHJ@|AH||)P=g@ivjJ0jYf%NfV{xw|f$%go_Jzd7%yGM!4K&${TCB1U% zh~UYaN28QvB&WX)dHm#0uqzeRsKo_VO?MnrZy}GqS^t)$=_SCNOER51ZR*Z|{C{lLT@-1Cl^HEtL{BSColgKS4`e)hwnupV&XG7gV9YIl80J@n6pK zTy9k`@}45TH%wH#$?BW!0$F{7)|y>M3cRekfI~5+9m50vL;tT$g8*&)`!wpW(*`nqio-%hRpTf7qDIURgef<1d_8(;O9E?|g_ zq4w~DfDn$;IFr@ik^a_(#!7}0m%}Xn!1c(sXklhZrNu!?tY+IoU%bny#H&`gjf;ggo%C*-A& zIMrjFDKl60R32XFH%?4Us11-PYG|YpaXqC(X1pVjYLzAWZi=uhlb^3D#s7j>(ZeIR zu(Y%iL93Mp~Bc<82Z9qy~AuB9L(R3qaBu%x0_{UxVrfaFY=6z(zRVd&ZUU z7e{T-RWQuzR!$&)8< zxcgHaOB&Cga+|eLrmtPP*p}w~IAoi47ikxGx5>)-aXGok?<utj)1s;ZT0dp-$4t;MsF}?BRrC05s_H^OuFX;kK0^` zo1zlt#EF2t!Yd>cTl?7nD3?yHdj%PU)2iRwt&6rZ%{-cjh+ZK)jFHzeAatS2i_j4>OX7e}z~|yI-1#MqY#_++lnT0iV(M-*^Duu2bylo@FZPurfAohP zDrc|QPok&YH#-NW3;Hl753Huq2eqb!&?f?t9Mvt zPs_1$cDyJYIc0Z)`;#>A(4D4Y^)!Mp1^!gFUD(!-CVi`yUIJ;J`*%pCHG09u9uRaQ z-U7Z{n6fgc{ilQV6%i(VG6s#lH*H)}|2#@8%t0cs%Q1o5}b>`=W)wYV9ZtZe7y-EZFN*?z|S|hfN6LP*lKl8ep zqcUDgti0_{r6h-yR!SrmjXv!zmb=iYkK_mJg=RqbbF~x^hPhb?q&2uEL+qy`KURuV zjFq}RF`ap~5&Oi1-LRJS$e{>-cxl$TRnU`o&o#p+wR=5@*(=3&Ti5J0`>Z99&;X0s zjS9GpTXd#{VC3h}RkH_OeNa!du+*K25vy&v-1#Av{Dv#N=}dR5w0a)#px-M~_4V2I zn?m^t+Y!El4dW(zti zmDJyyj5TZ|B?^QTj__1g<%^kBlzCBnn(j0lyY#3T^@%gB2r*yZ79+VAxG4&YAvU04 zE?Axanna4m^H#KJE2(c;oZQfXuUUvoo4+`(d&+zL!9hFoa$2;fhppRE{%!+3l+S0Z zd0CTWZNjXh_jtIfT>#RVaks+G*pP5Lr)7`Wz0?v0g|{>j*T;G3Gmu<_+!p9-pL8Z3 z=r>FlwcbBYw#LD4EIeDk@xBWpvBB~sou=z5lu4&?tMd+?a2eZBz#+J3jv{*P8(nlU z|43}`(FX%kJXlOI%v8*0jLYp53m4WFb>|)h5A^cbJpT<2{oM-x1lO+8eYH%)Q)QT2 zHIe^)P2+EK1y$3$G}}b|w4}3Cyw;sEW?HDe#y;;fOVAaAcG5=bFw^QdA%`S>+V5hN zw1XQvYdWL(J{xdT08zhFylS09ASN&I5^gqC-6Sk1MZBegC~ZUOMJe9SDaW~FofpOW zkkg7LEP?K>3hRod$eKLw5_*N>G~2=Kk1L)$yqT=~@pcG5MvM(Xp6TknMpt^j zH&^murn>~;ckcKMbfrA>HR?(}F}iaq;vCe&j~Lyw0qDhESlJ^N>V7vp8>2w~gqp3Y zmA+^)6pbzFzy+l&u?fFrZh~=-X2Z*glh<&8!-MF)nlaStfJvwE^5msQR&6+i*}5Ia z{G|W*n#K@4GfS%dv!dop+b;YH?}dQjD9rWC&Nbe%x#YeFXLA<_dFJ&&@#K{XZPmUTQlM<^R+0}T(Xvn>(y?8Wluz}Q`PVh1>@;@MMr>Y3{OPl#e(y0KDcdxDe(PJ6 zEZYjfqy3#rP1SZ<1s~9s9GPVrUc% zgfhyA@~O~!e!Ztt6tB~&URP-bide7s9^d=&t!c&FdB5%Law(e-epBj)T0crQ3%>Y| zpU?!5jzv{X0^I`{n&@=?#2$$QOfd9QY(D{a7-EoCBebIFCo+oJ^vanm;@5Gh$l1B{cr zG&Y_bnY(jDzKv7Y&gY9+*M~$5hgB)9I`R>-!UfRA)ps?^k0^v#%Xs5@UYQ$rVQw5L zuscA4d5KAMXD)M}_NEIi^mdAQ;v=&-cdrbUYDY8%VCozF=vy{(qn%>3hd0hFoY#Ud5xJi#=tG%}fUWuZ>feVnE8*JV`wyyPd+#$zIq* z?4&N&a_{)jWd!!+>(;Pr%w8gkM%N<{H-5#tU3WnNzBx?xSw}Jd6E2-JuS)1O84H9! z0^CPY?@F!OjaaoQXs>*}cc4Qg+Qszbus*_+Y~-R$_h5*G5vf0_u3B%&x?az7@jqa*N{> zcstWMgX~5$_Igukb&PF9ucN(sgZF)?+M>A&3P8a;dVS|dzE}hD6?S8i8Zk}*>ACX` zPH8rDD$B(Pu4lAR#&Py`KDHHW-}lf_+<8H_?F4=xKT!iq2VIb)ARl#E)b+yGPhl*> zw4aA;v}Qd^)8zn8IF18{1^or-zeNaL&Ug4PrzQz2aVL&T=8L$YB)u*ksIM8WuWu3z zc>y4iHWFV}famMo2F@kX~XRHf}ki}Pa5bNj54x4Pnle1k4kX1OoCo; zdW4IGi-b1mhu};$H8uNf=m)lGcB9x|ea9UJ7{*j|5ed?gnXs1{&1@`?@jhmuTXTIb zetB_z#U#1X&MUPFI5xBRMbh%})HH7E5hJItdjZ;Tmbl%=F9=AUVNb*YPg||bCN5IH z82O3bMX^`)>8l+O5=X9>cKxoxr!CLI)kUP;sCYu>!4@07)1v{&h+tiBNUgzC1Fsng zw`4>XC`$p(GZLRKcd7+6BsP+S&0V33v?HlOXKo)62mtg_o|(=m@!0>^-DNshEhi)N zUL)?fgC{;^yUQi?%8mSY0lC~a|5TAo-2|j@8-)%k~$6SIw0CJi92uiwabSn;v-GfUDV7;;>(mK zLYlO&hvVZ%HOg~Ck|4xl&_xj@2{)_)>bX_)5vmBQEMzIY(`ndJQ$;xEO}?=vC(x0{oo>ZGKy$WkOh&(+ObOG zC4m>`vYXuW0!$1$s>0ABW8$%PFR$a(d+bj1Z?Fq$=@{)18t|Vi*o^ zcsVb=L?$DE2p^+||GPOIfzh^munD6NmE|46=Zt+1pG+B!icx{|A#@XRKhTgo-Nq#8 z)33!(GvgM7q1{BaVIko^2>Qq^9Itn6EOp3wlE;>$vb_+?m8tGwY-Sk*Gq#nz55KW* zW+_$G)NF`JNP45UCKX%fT2C8Z@xF{neaA)yBd zFf4@7AoU)a8FgGV<@Go{@?%$pG$GWV&1vNIC`F5Q~1n6&=W8 zvA$->dyR@W$Ov{aYJmun{-#p;fV@hJ9{pu2abM)_ zlj*9){TfoP_x*m?2L$HbfhA>8{Pt4%>1Xu|-}eI#uTpm&$r}kZiZjKyv`SBB#T@*6 z>|hR* zePx(nDF@`cwf$&$Lx=fdd+f9M-heU^pS#Lu1L0YNC#o3s=Er6>Ax_g5p=Uc|LMR`{ z`_)3u=Ff(E^Vw4@SXE(bY$#TEFdc*o4d7z}91XSrG)JNxN5hT^xxS59;14GEZvy3n z_l{0CU5=K+vi#CgdEB!{?_n0;E|vl)JXish3A~y&mrdPci1ZUF@zb(=MPh$_SZtF= zPO;yUgT(N7k0Fi)$g>$$TtI3t>`Vd?b|ivzqgQRecBB_EwpGLq(_%A7f>u#J@$jbh zrSZK9M!}q)#$mvgjV88;PURXoS5RGdl%Bun*EaF;)fh*DatSZ<*-uSUAk!wcnvkvd z?)dlBpDiCJ^3vvaT=iPbI9m3TYCJ4|{hHz8m^Ve=(Wkpur;IfhBI;K??swLsZRYTO zPfJZp@BWIy8~KCNg`U0)FE-#@Z6Dvxr!N`&PVqiFz65V(L+YO-#rBt1U-Y+Aa^rH=jHY}U>3sR<`W2o7_5 zd_;9)o;rRbHtkSoN8ASTXJ~B@R~k$;tS4NRcBm;XS3Y6)L&KwxlQ}LB$TQ2*b|;1CJukuJ=D7 z9jp3jUK|WVkdKASkUhm3(dkpd{G|J=4$JT+`;?lDEj`JWF$A3Ho6$F{0$rk00h9Mp z{AuRaM%8EHJ~RV_Ix+hsO;hsIVU6BzUm?yCU?iHpV7{b>HKW|tP6(W>bKV_>3z_my zl#{d<5(}GI&(q-GU#~s$gXV)S*i^t|#bD9G>?@qnHy%0(hh<51ecgs(iU^~q!d@K= zZdAyRqsXO-FtpR|2dRB$$N*hbK%g`JSPmo=p(rK-qsjS5$DljkzaLPJB|+gnVTk*h zRi14=Dtg3yKWi=CVaZ@AKe3f{fOQJ({bL-_*LW0(Pirx!iUO;&s2i6R5VR4>`cZU#rgO5 zeqEN;L#R86#S2Eo4T76x)iiRNt|vkOAuRDGeEjf%t5Y$}HuNr;5gcQ@+;blV*Snkc zv+r63nT!OE;k6znc9tcXp=1x$@3!bM&=_+Hdy#*V*X|GCb(ml{_!`sLrD>gEn(KrMxy2uR5hPiWpX)>y4w{5H9mJ>Y_nyYvtn?!+3(xut4x*<< z+r&--P6uAKMW7{jdotsdUsz9KsIcr=Fl!&(f!=7+^6_L=>!*Vv$hT%}XQ_*jE1#7< zt}!Fu2qK|R{W3(_p2>}q!bIuf6c$VK5fdkTeFfeN>6n?3+1C)|jxHya@$-Gs4A0m` z3N{<@gM|s??Qt#35?@(`Au^2Ry%2)Y)T|$uy2DeXcSu*x$29Vzc>)jklPSndGY8!1 zDH0UMno73J*&H+*+w$rY>7APor>48G>0_cwt>zed5bd>1BAei>-h)WWV0SFLH3PLw zP?9(Z1?6OxHhDt4kB62NzK9jinUwrkpFl=l-yC%4a1R}uYHTZ>450K!5zAw}9zwE2 zAARF|H$Cz>m{7?Iw^>xu{n*d>VW4!jGosGbn?c<#Pr?2Rg$as*Zyu%(ESnR4z0CJ& zG`%+jsvFMG6Ng?dviIM^g_V#w6I80afSdfXrD`(pgE$W>ES_~~g1HwXOSDIkLioXz z`6%y1iM>H^+P+t&Ni`%=fb8D8g-h&gE^Ah??}1y_{C%RDCT{31_-GSZX{DmgKzyV{ ziK^eAjD0G!S-1`|Iz)B(cwUiV0eLJ18}byY(sF50k7(wKz9}ApG4mu422Bk- z4HMq;L$1h*U4xh&Eh`w;PL)JXW)$C1S(?BmWI_@*@{z5r=yJ_AP_wU#6z})QQ&A4v z6^>pS9*kwnqI!hBX3?|dN<@y=cn#0u3-|Ik2hv9yS`6I?s)c+iLvtXtE_{kpHxqJ>BH)d$(D zf{+l$q`tt@nM~3!U5+LP4(A*sVU%cRI0^38TxR~o?IvS$ts9V$N%tJwnz>UyA3};T zYx+l{+#Pf4IIUbe>5@S<2jkfyG_v=q2B^n8j)^q1v_~ZkErerhSe?tOYo!GZua+zB z2lO+dRsd-%v$sppfrp!AfM`d0U`UYNSJE=Zx_3doQB{Ep3Xgi;F%=9*+ zMK-KAS)j(O5_MaN=I5VsR<<%iXy+7sVxC=-71%-0b~i_AYNrHc%d}pI=u6^tkN4?j z#jE*e6Mo1(igd++#Tb6qFfA7jea*VeXifoFmX;OW#;D}za_&1>tjU;}R)k^*q_M5M zt!Zy2?2Q;PmIy_v`Qq>H3?#A*TmTOs{tz>t3DY1|#T&4AVj?U3^#?Aj_|#kOb5-i= z(bxSQ?1z#f=$aH_1D0=%G+wS$BJ%4Z9T!<%-P1Ff#Gln$W4WG3T24_$@-)KHd`cZ_ z1@dclMc;EWL*JwD)_6JlvM{O18;J#ShR{GEAl6KT2UFd@TpU(4S&#)An4<3b<0(>Y zGehp;!mRXx#Q6g?rZZ}2^oh{r;1#jh%x?e1nZ*|lF+o40ky&d`!w zdiRX0+o_f*dB^5`FsQBw$uk=oisX4_Q8(5O%K)YAyf~D7ZbYAtT1yqH3-)hgf(&Hv z@_mGE(~%qE0}uZ=1B+{LFFF%KcBx@(JBu4)g3LzpLLO1XW3Xlo0jc%XObHG1gz>xW zM~$D4&EOt@+)>uB+~!|2D!jPvP-NW%~AJY3Qo^zdj7aynKhR9#4%=eCM|#8)2J`Ig%G$2FaB z!!Zy)D1H}(&doGh@c8Y1WP}h?KAj4=W3;GeajK|qx)P1NDM5@O3a;5l%Z<@1ogGht zQQH|CJaf%EoTZnBEnW}!nb7i;$t>`60Yy;No$76~i{pTjXE1Aey+^V0d`Ji)VA&Wg z?D+g_k$#}^Y?Y0TiQU-O0b`Uhkhwi`fA53_&ptm>i1#&heVzNek_5D#t5^J0Z|;&D z6DyZaMWj9|+%-S@HZr0^J3m#ku$j1;K=%qVm2%<~coAcHv~I{)qieYna3Z%{-m^S6 zlY=54)dj5vV*L=^M!&RIPMDUQIrkG>cZrk2i|IA9kvyIjXW(V?d%Rd`tNl_dd7Q?# zRoVS0)9Q^Tle*1gr1cce;Jzp#^S*GM_^5%0I&%a``SvSBKiGN)tL(?M^u-H5i8^O@ ziQ)8eCq*D2YwBB2jo}(YOv?Sm3VPpj31t>RgA&_4zlUDnOVTdtTJ`ZPblk14 z&FtIK(v?Hv%5qxvnrmTP-f|M}=gQwtws9Rq+d$1SU(Rw-#}>tCiW7Ju*0XA@4PWBh z%RNwB7L4s}Y+;crIQK%1Pemjz-n}bc$#SWij3K5Y5tQr_hk$|^MZ(`U!wsY?L~zE5 zI50QHcB&AVb;7_C-QCX`J-5g|xe0$P78#Jlf9i6%hoe51!X}u2Gg(_w(qcbTKc?r( zP1e!O=Xid|S&(dAyf6NJe}UzTFRz;#h6vV|lPAKv)KK^v~D;*-Ue zl^?|wmMLgBp(}?`vRz3RX9>j@V8c5>}-pwy$XiVDgTPn?3#nHAVh8S?z|>#$Dpn^rMGWsU1gg@2Q| z&zzLSDU=xhDW^=eiY8B_ZoO1MZrirs+;h)TC_{Hzt8Xf=oMYN9O?QrpY+4qcd+scA z&N;_gg-#p=ER~JVKD*ZZ=tqk>6>G*SU8%&q{PKBL`Rmfiw5PS@z4tCP{rt%>pT4B>F|B5RXz%sMN{O5lz zf&y+|Dm)dW?|=WP=JeC2#Zqpuz2iWG=bv9|9(-__RpPR&{CW~DFDf<1jT>gUFdZt` zb9GeKKKtw%Smdq(gF{x4$bk*+dGPB973Hj(oD%-(t53opiztO{c1f5oD$S{=zwf@+ ztm2sYb@_ALou9F>9#UyLfBr;s>810`F~^L|o=E3aT9g0lzdnLN9IkvSFz95ox$e3% ztfG|9C7%z!%&8pZ^Y@N-oM09BL3DxQ_MUr|n3rDK0GU`%u_V%vTC8_|3d(zW#Issy zo_cDf`R#9C!2ps5YXQ#q$?)v6XW}_MfNL*9sk#@>O)zkP>G>SUp9)kemCrwawpHf3 zvI^u&|7~h&wbR;DR$31S3gy58Jsm22l#YjQDdwxgD&#-@@w1_j9+VD|84r1Ul;1tD7(9b{ptOryevrV|* zf;n~&Nm_Q^mEE!4-F)+tcEAGT$gZbZSBpW~>7c2r)N__wv0rj};Yip_OAnO|1l0o9!T zK?>~hEB|D3L?s4|<97_MLUOgFP z%z@m&)!hHGVduvart9*}XWQq%aOA)jUK2V#htc4>@JzKHBd6*xoh`?tkxdfi=47C( zu11JYzCgXJ&~9Z!}?rhHJ=d^=KULb-qQmoI0qz@7vP?D>gWV6$9o(V%QvFrerl zr$F&CIS8H6I@TEk>@62QH>@*sOeZ)Bj`{RFiV=>V{O#-Jt4TmSN0tam!_ zDA^cKI1c(*>FozT>*?Jeit6h4Av{xDr(f_ToeS$!a0Uxo#v@Vbn+W9A`*|6t>Y*n zw0~7*5ZYSC!S!0h_qsTGvv=)1<6s*T?$L1^n(XM#2|oj$K|gspQ{zC70_%C;6i zK)D9?-JS*Z&DYNFoI;!__s+2l>@$u77n>_0`lF7SDzEjnjbS`-9A#m<&(XDfFSVfW z&>Z?_)h4}^Ub>Y$ci22lXopMhYw}9z-BwkwuOC$MzlB0XqR0*?lF&nx>_4X%&Hii>kWA0j9pmKpnW<>a?FFt$zH1k0@vcFtm%aHA~Qwp+dStt3dv0u)x{{4(Q7w6JrJ#R)k!*>JYWVPox zwl8(4>+GMi{p)plHV^6d30qbqXWOu_+)Td;^|=l8lzqQ8Skl-GBM|dXi{vEx!_2#7 z!#_AC7q-QGcIa3>mJP~*&sJ~RV3z6X3~OL>mWOlg6yo#E;Lryqrp*swEs*B`*rfV) zkd5sU`w@(@H!=mejQ5xF?1ZJ&GJG_mTU z_%T(OU;XL@^S}dd*jxCnVx8k}eIAD$-&efG&4r_~=IpbNHSc-P$ySNTcd+lMKzjM* z4N%Uku!<>$adGp#;w)U`LUYC$(`~+@Zl}Wf zjyqmTS#!E@l#>>rRMee(@)Y~NcGvQ=N1x{x!m7SxG( z^Cp?;(?@~zYCAZ=$u)7}F!S+`pJOfB!|dJl@SciWDo|NZSYN_ZHoNqxYoTK~v7Wek z=~FsHcZ;Db1h@zJ{bw%cB?O60K8n)QO|y86NR?C&C^9d@*ezvPnRps<{Y>BYM$ zIK$!mV=9Vi@%-qcE36f8n7PX@)8@u}2G2#;w8xAYVinL-W@mR5P*iR;Kl|CU*3z40 z!EN5WiH0*OaJc|l{%3Nm$5cvlAO{C61g;Vm%^NW&WC^U8ciZyd`4J=P&5bvn*Et)* z(QN^G;;3UUBF}F^ncQk+U|C&y>G5`|dX_r}6VUOoBnLXm8s=k9;tv@z4_95a07~Ww zICFHGRMh_FH!oY(Y_FWl6kotQsM9VS<>0e--F2s13ut$qd$une@bNf)zdKp^ynpU< z7u#o+Q=Vs%DI5pG(<1$@yI!?S!q*rl+ebRe;o2zf!;aIQbDDlyeKTKi94-F;`JeAa zU!&RbWjY)r#P-IL7Ef^TXv?w2C8i%QvN7P~ZAy}u6!H3^TSSw>3Mum|Rujk^} zP#3MR~Q99_|3)oJp2$x$MgIs#X&mjF&)!Jd_Y=_wRQP%o{ox+DZ^@E zF<%Fn>dg#HdoyN8P2N7lxN_Ks7yo9k*@sY+;?jbST>}ebs7oSUIJ+b1a`} z;4pP$y%~(UIR;bau#C_@&Y26M!gJGLcA%cJ^TB_?=S!EGF)w3UrmsJ}!5l%Iac(Z2 zkuSYxz8Pho4O^n_684leIA?1*y!;5sIyk_Nd<+H6;gIFRSofnNY3mK@%qrC1HE%W9 zSa_`l@lV1i+hg$Ia41Im&A`+(6Hs@o966*_ib0wG?a1@|uRaW*p*9JR%=4X7i2Egx zR6ZyRpWk(8JDO4h2s&&VHUVwNU~~xCxn_MnhOL{7npO;zwTB2G*j|jz?Z* zj~!~paVj)C<3mul(zU&JIbd6{6X|V6+Pm>AQCYlpU%I}qO`L}IW!mULW(w9xn}%nK z_fQFW0Bz_S7zrN?Eee$Dly3sUGfX}l`B0{>Wtc+zW;=!WB+Ib3Y3)Z|xYl-a=m$}z zXFZ)x`WfcAkyryA^1-*DG5)9fj*e|J6Bz{Y6xpxvN?>xfwexz=Hg zr}cPO?tol;U$Xp$p}ro2I>CA|5q+F#=r;sPFPo)4_lF!eF^`-Q4teZa;vE>=T4nBA zyxIKwE8F;sJ(a)H@Z8_@j=5$urcC4Sjqp3bf2Z&!{-`}=%7|V)OXSb8*?_*)R&1P( ze+^`BVcAduTpuy8%1p*PVFDCOlkmZ2I<{&%NP<%IeOJm;i1Qg?`@0SwAA)`x>rW;6 za|6v}wD&AOI_@}FWh2Ut3i(Z#j&U814`D5DHnl9rLAZ~7_?L_hct`oNfS4l=N-#rG1Q>+$$lH4**b$#|}h9W%(vn`=8m5!ssZcgIRt zY`C_L`I>?@Y%KcwK_1zE zH|F3gzH9U0SRQ=$QPzzp2lh8MpkKZN{wA&My)8tlPGOV%a+Zdtjf(H!o4pON1Ll+_gUi^s$Ai!x0@ z{o){iq#RkXA9~Ak>&^FH*v!#+{Bac&6zb21Pn=-BaPe#_N50A@sk|s&5y+K5puFA! z1E*+cjNuF6Ypv&G~K(M-%_a1yz$2Kp-7&TF|8ox^j|7sX~|5*5G@G9X|u09 z62Ezd;#&r%g=X5!GnJWCN;wyF13EBW^GK_}nmwCJvZ!}pg|#aE!4Dp@(@?u9lxb=H z~RCIp6lSndYL4=2&arOpLq6mb;IfXnjjdQchd# zI58%^XITcEs+)>hDp9HAbyHji3gtlY&oU%Kr*Px_oLc_R|NMf~bm}~p$&ooN*B^Rl znY9KE>PJvd$c&buRMv9(XgW?ay=~h*t9*0HJjzO|-H(6#EXyoVq8-iKrO$k@-ar2M z3Je-pA5zW+`Q~)ZWAPn@*3L|W`J&Q~d1t=Eb(2b1THJDwMId8zx=f#Sg3kp9j!?ns zo;z3H$ePn+4<1}+>pj8T+kjdJ#GPCh7AVvP=S5fWhcPj_1Wj3lkb23Q9I?g>ywh_ zKmYT4%#j+3rwZp9`%0!>P)@^rhdT{bPRddjR$O_~R$qHjs7A)dfBWqwzhDGaFDorcIXN z!5)m49J%zu2R?8rrjZ|)%8N@gxjld5T!3vxtR$qsM&Y-%wV)BgkI=>j{1EI~sY#I) z*Hvixt7U;r401UVpaT0^Od-AoA8Jw#A3he_HL&Y#1D-mcrvrXadUf-D^B+&IGpnJT zs7B*<7#+L|rVKNGcIH$w3mplDX+mdV`+)}Y!lwP^m#=O$Pi}aNT8Tgkw%eVD_Ny3= z@iFAtKUrvw#fv#tM+Na(bS|I3niY?(*<+sExE~K;dNfrkKBmjo7#)Bsj~Qw%!W1dz z%^YP$Lt#M6z+4>KYO`|JVe?)(QcA3u^HTNgQ8q|9L8`{m2#KslLPoo4*bwW7na4IQG_b{sN~tlVXOzH&EBYSVRw ziCa|ogHGLSeBAxmf(hn=S!3*A3|dq5WykCVYhZg8*!bxE{AbRxYhW`>AsnB**I{+^ z|DIe2MMHyW1M)_c^G8mYfVw!&Dv9_Zda?;et7|^XV@!(|7|Gtj4_TQdZYu2HiWx3Gt*<{l~pL^CbJ;TiVm9s{fPo6!) zTL1O7Tx{arAI`wB2k)g7s2{iC-TW}#%?(s4N7Vtm?v5<|K@opE9z}jKVYT z6k=ZV8NJN=!^^juPv5=F?DrMem_qzStkpzIYrf0h0KeN_*+uf% z&~c;9HTZElcl=;Gh=Yv#n&bQV_E$EU+we}{j0vn8;d;UR?7~NkZ3ojoN_lQD>h2`G zr)!XD{+4E-BBD?ut;-#oAu*gt{=HU@U39O~#@H^KrNBiz}q zD28L(Ml0Dxuu}QUN7tHZ7<@uE3g!N8w4ondILTTRv!3ijJI!~)19-3B`uYxYfX^e| z_eVKCPl)*6E<4t||IF#=>*xM}k;fi8%wy~Jn%C`;M~oYr!_^rz*O>g! z+_C0bC?u(LXT8a#onbW1Nb?n}Q~3Ygx5Cu&`M|lM;PI(5rkab;hH=0PmFJrv(=y0% z$E#b+1FQBz7AFjDpyQ3m^JKiQ-hbRUa{+!UoH%)y?GM}Qz5K(rZUcV9E!ld&+`D|I zd2kJ6h$;u9#Q}oX;hlda-uYC-v;Ub(k&t7iDa5IiKL-lz|GH+r9f-_XFWAp`5$}cD zz~+_Bc$brfhX)nPoIY`gc^`hWTs(WU8IL~TfS48@xuT;r{I4O)_ZF=+%lLaY+}J~| z5g45OO$_L{_T*&;}Be5>@1gl(fVY#Q(9~B>& z9M{Ob_10&*#LsZF)*FT2g)?T1wic9BPEz?ur3lx5iI*6m6u)_4!CEvd}C_S#dd zr6%YFlOq<53Zdvowj%iRK^~x(7%-we{ zg%T*O7-gTIa{kGmoME`8t}Bc5-Mv5JZ~B*C-iRrXUplIcC_fdcEE`&Qj=@M}Qw|oO zGI|!ivr)0jx<|$J+O<2e?ZLgf?X%$GWZIkofm75+bFRSD+%Lh>dS#~)IJlcE9Xp<( zl9I}Jmb24gF@4s-DN{y;l#-QDzQ1M_*lxfF!=G}>6v(nLqawL8Mf1JyJ&3il(``#| zzVj;L;DCW?Fm&Vd!a*iDWv7+rn%J8+zXc^O*FsMl|L{)Aecg4ZVGzNw@O5R+`oRGT zVWltQ2IdT- zXb~=$_2AAsU&R{J8*TZ9>ndlN;9A_YE@!*z29IptzTdXZT%(>-!n^6-nJxz!oQdCX z?|a{=w)`Rzwx`+7tXj3peC=!Zqy`D_0=c;QKzTTbfYao2APT1mCl}_KWx;luOkJM2 zXBaB`X$>A`m`#@6Ut^h^itUtGQT0fHT~1So(~6`KQ~fkH97Y97j}jxjs;U9D5#evI zW+=E>QTRA%3h`KVOMyHBR)Gx*>@}VRcCZF^G74;dka`j?>aRfAux#hy;6nnHW-~{a zzrSp*IRgzUNA5p~PRB1^-eMkJ2gNn+$?A9+7Ddl~^b{ztwpe)z&B ze4Od3_{*B6y@(I<(}rX1(~0=7H5S&R17m;u;K!gP=umzAfj7-#RPIq3f_$Ed4_yCp z%?b9S?dE-l&Cj3T0EOjtD3`i^fR2<6jl)f+Of)x~cdWH;4ZpY|?}@lT3%kYW41IUe zI;)_q#1DY3uh@tic^Y|{1LgG%rypZZfs$(;UbvjfFyD?ZMOr);N`cL)#t(yk>*F&Ewt8>+2qN5tpm#SBjzOxxcCXIU7m+RbYDYESy5+BMg9Nu+0)EvPhVJ;yWiCuk==3D$fz*P^q1^Hb~1 zElYOr*L*w>_%ZgEr%g7WK7R%V@o+k$p1BtEW4N~TuU^_@ZpQ$PUHI4#$1T7I`H!78 z$y@-1#mlH~fA$+pRp+$3nBC$(p;W%|l*xF$U}0{1N^xS14#T#Fa(<0dh<8z78)K&s zKXG!G@8Z!hMPFg_@B%*k|HJQJH?JbCTwNVxyzZn4=5OD5ynTM-v@3DbVE(C0dI~>I z?t^l8(dxZscUTeET|^LOHb&cC0tNd=&zOdpDB@3vM#b5~%Xef{VBdNDY4&}&7;9TS z3kBB?U)*H&!jjbfV#+2{=R4}-ryXN1hJtAdf6!(NUxL>-Q0U(=*x_4Gt;3H347y7y z189pkef~T%uv4*_3yNmQ$oIjoUfFD(-Lwz0&R_ss969+g;Dg{7&zx*7!=SG@=-W|w z*q2>q1vaM;Kf|1Z4;gO6exdWoXPeg7_rA8(eBpP?@O2L#1;Kw5mLGZVNt4V!UOF4^ z;!5)-KFr;Z0SLEa;L65*e8#$w6r-hnc*QJpE!KN0u zSg{L(Flb#C&&|L`!uOv%!Ca0(PP4IwA!lof^A6yLGA_~c7}~h&f3Y|i4TU7fpx)nf z;Y{=4Q>WOOX%@k{=6}%6Jc-{Yt!PKOa~xPS2On%cf6+{<>>Wui&kKB3h^I9fld4KhcM9g zR=meKP`N2i#EAE@sl&`?FPUwQ$IL%&MwpPuy*}jcps(Nay7|`^w`5fE4@6()(+ek< zzkBzIsHgGzAH5bz5r{y-1Za7^cQ-yr@7-g+7qU^O!OO@|qs^$%qwz61_cdJ;7P;5a zv&qS3fz9_2+tBV@zQc8wUVg5leKUVR8x{;q>@H5A=(d~8IPw*a@!Wm)tJs#L6jto> zTyez-b}DUW71^EKv>N@+cOI~oevFfj@r=cw%rnlIVW-2UqK*o~uw$80>2%j!OYBs( z;r^Uf-x22ScEc5Z>hgpRvr!~%G z&C0T()%GKgEVqhHCm;8;M5Qus)F>KDb!8re>#&0xXz@;koU0>LgdTU?cxz2Pe?A6; zLV4TOVTPO4JD_yjWUc$1^)Z<+?AzWp!+i9kXC0Mhz$}P~%JcWuIi;jqF!$0{5B=gEb+<(8+qSU$V+OqIvhe;`;NSzr->OhYcSy9A`b? z02{V-VaI9rPdaG|W?h(@nzJKMLVOEb*j=70@O z_kZiH&!B&C^x!y_HwPDSZEY&ieaCu1tMMnESlMN;4rSrMgU^5d?Y2Dq`|{!M%7QU6 zpR=)@hb{fQQWjTVaUta@@E~=BFGK|lpUa(7^+p9n!!8JJuB9ar2n(aCt(dDa7*xvNM`s3h`Glg}A>4 z_WAgjo6HpAPJ#VpD6m(A6xa)o8D{<#A23dbh3fYoU1`3Td5cAb5kGE5)f9JmC=I%Ff zZ2;0_`gpMivcOA`*f+5j`Vd~m!G7TAA-HL%$^%ZFo`ZGdIT-Agu`|MSd7%(;ROxKKW)oU1e3+X4a;SFgBt zj`a!k<}4_3KC*DKxf&mc<0$u;{3%+G%TZk)f2rS_O8dDa2ucJtK#KHoeVLHOhbrq(6fKdo!jxZDoA{=d0$A zHJ?6jrs;sf`m1=(pXbaIuI{+DhGAJYtZTgH%E<|Za34E)qWK)!ZcYVnn@V@Bn?0jy zh8#~~Wcq(CT5BF%iRZU-TGG_5jL_CyoN;Npc82F+y?E`y@mBfx-6uCd#?Cu2XyQMw zoo}wd^Hrar6P=1bhXX!;b_((P-!%77f$g4s$Pr8--g^Z$#d#hI?7#c{Yi22?%+1x+ zLBe$jq2JXT9Htzv+l6LZ9U4R4yWQN(gFJ#|L^fN<~vYg8!ivPf?@`^ccsr` z_gshNbpxpRjDvo2d zW1sE`ta<-;crS6#P?u+y@z{3)_KcD@u%8irKQRpcPsi*EU&V8M1^QTV(r48?jOXYv zSeJ(t*dy=(@e^lHMW1Afc^noVpSgb(p102a4HL5RbjpHV7kTNYV;Fp_{lZyOY~N;L z4g+3;2=Mse`QY*r|AK+9zr?^+>-fyjA$t2v@%it}zhz_FX@uzvX4`JGjWZ_=HQz+P z{6c(Gh2?;~-NqB~hl2?-O(D+ptk0h{+I;&RbIdO>Xz|-mZiFn5fpSp}Zgkx7(Y58| zgy97^Fh2>_*nf(Fo$talu5kx-1~+H_%Q9_1fBPSPx5V7K6udaJaw5=o3E1DoP+o7@ya^w%xfBBOg6D4lz9CMSG#Ltw;r5%NeKKu-fsaKnv!na| z8eGGD?s>@?4x!z9Ju)1^Fw!pJm@1o#;Da!4MPrmTTuPu&I2VV@=)kIKQ_sFeKXMj( z#hb|SL)g;m^akhCAbfxNO4p;f;kw?H-AAjspa1+hyB4!k?oi=!{q<*C+7(hiPLpcp~>l%D=PS+Nyp>j3-*WK^V|b=I++ z3ZnG=X*sIQAO5h|D%YqSVjg&n>l0HsMa58HshYmGYuXjo`ul(W=V@4Db2|UvDj9Gc z+>0+hzH9JKAFnT2vIz>hMXBq2W@w$xDVV9C3_B`p@3`Y-YxPZem=42J5lW@)JKuT2 zQA+Q09>X#5zVsu@E56@_?s~q${NfkSnFk+y18D{`y)b<$A}_#q$@93f4C_V;Lc{n%qGaDoFm&;9NjpS8K0?Zt~uE%n<*uolc51K z<#C__t;fTTipUu=#-P0Cpq`G0#Wx38uy2*=*6|j(Io?mJ^Ghx{-YU??jmsuOB`l8*KzaTO1~07Xe2$qu;s^c zpkkPVb3XF_vv(eVQWe)5pN9%cM|!VPr1vILR4{^qqM%|$MeLf0MuS+uB*ywT_HK;9 zUJ&e3rT32X-aAOM!!iw5Bmgp|KShyEjxs}9-FWCJ40pPBLCYnZCb7kXdA$P{E)kT9rQeN{5$VV z6^OkgYRu9fPSHCqa~!}m6!u*_Br>2cb)zU>p^F=6-o~Q3AzXLnJo+K|1}@j2juhmXZgO)lLzn`tLqcO zf>%^W@_+7YE`XihCI!ImDGTfsu@>0!ft_Ojwq}KK z%eI<90^A8>T&X1^F;&L^<=etL9+^GuBf^{23Q9qAuZ8rRw;gx zsU!MTX!bbJUnN=16}`8?!qpvOkttcLY<&gba^Tjme6f`L%i(K$=+sr_?~_)0zT(`Z zO*u5>R4!e>w9tWaL4i$Jcaq_1(ZPR(A_Sl=m)tY@%E4VJmHW+J4NaqpdU5vO3E3dv{$aTQ zmC2%jU$F%+N|AIu9O%>Pub)dUO_K003V`<3G*L1^7pnt5cRomZPNY!IFUQZcZ@d&z zo|UDUY!Cj)UfJq@J5G4cMWtxSvbhd6?-LtB!4xRO0ZE}0erv)C^W60H0;$wdTwTdP z=~j^wW%3mRBTaV{N80Wk+si2s^l zrrg%XRFUR1agIdr0PJn%`cLPZHIla_uT`5glhxB1I(S~VYNr{75{>H&QvP5bDqkqC z@MO_?iBf5^d>hXKxIkei(_E8b9yp`AxmX}Y39$<#+g2QWzcYEId2#X@GaZ^y<_#BU zZxYg6+ClxXa0octAz$9(|;~PLi_0A3vIFmH=Rfg=s4q8Mv+jj2Sm>bS(^EcRf|~xq0 zwR3GT02H?PQ=+1h9zL-9eY(K>=%WxXptbr=fgaCHTxGUt-LiccB^mYIcA9UBh%Sq6 zh5yyb>Nk6tYhq+(MX5e=SDQ-ami9H$Mmg`AC?`Lm=Kf&jI`i(F^>$4N^^)-1&Md!K za$&C!=}V$SRwGq`LR@2+XOJ%e(a;Y3v|Ty$ox{O*#md2$O(w9vfvM!*lUA5}N3HPo zOeF4t+6W4_M%vR<(msoFu*aIIIc7HfUJ7*4^Nb3jgSfw+S>5zKt$MheF1cgn(^_CZ zKS}Zk&0iE;YdIF!?PP&{v;b`8EnG-=7TEKx5*eMKklMY2)`)JkOPPnhS!~w1@k6lDrS5x#d>R(THt7XpAVQ-L2uGswdsXa!<0N8~zUOJyDcN^NT zJtw8tee_T2Pmp_D}^UrhUO%s){0FO{pMVe)}JL@Xp4v^GXT%?Zt`6f<>-=96r+_rrpj@+f{Wxy@;Y z8zGHd++nPj<=hO`Z{$Z~`!m(}Zz@IPyE@e{XRE&)Vh@?7z`4ob;q%P@OuUv9{iz_BWUgZmGd^&8~#dY6<^!vX6@(YO{WsN)3YD)n*3pbd7w zM!vdh_O^?|#dz`jaYFHIR9)wj5F3SS_MZNs9RBaz^M|N=`oZsT*?kf1E%`asd|f~w z*Z+E8e_y0X5z!w5%pjDh`kSUdZ~oVp-&EM4cY&8)8fz_(9T)(xF>qj4OC{^BB`P%W z0Z6cp<(!3X49Hf$emPlCceLPUs58Y)1~3GG<^A`kTVRNYqy@MO$OFZ5<}^G7k2l^J zZzfGz9^TIYZeiKkqeml4Lmf9$dn3a#s}3JNS3vk|bzyH%K%I?+=7kqF6}5HdwgbEx zHR=cR!3WdB$+Mqo+qSwaLtFVky$ow@3<$Yej8uN`ng^*H&j7*(56E!xhxG+(@h6}B z)>CkF->QqPahR*ZpA6V?t-T=6s ztBC05qVwff9)Z`Z1%l5LAidGn&!vN+1b}pGfO;F?7?9ru#uAq8J$s53Rc-)0r4DyD z(PxIGkY|iRBkkT9pdEkUf%7aF9V|bct4&!hrHRQ=blzAyqig_pPoK#ukROZzk38~a z6!km)*KfVGlWE?(N)~m=BbJ4=RtbQ1aAk!J0^k|u17wp`V1B}aA7vkBvAth==I&w$ z!V*1bjv!439^-Q!nGt@`z8cGa+#}#e+Lg)PjPp+f2p_F6puKu1K(pVkUpuvFW&qiX zk|7LdpLzD#(P9&TIl-$da>aTbf9jM4Y%{hGyFBDc)z!rf0p#00t+oD4i zHgL+Oln#Ng;M*X79P8ZyHOcC5q_GZK88+-`M~@^G~|G){-DGC!J-Jh`m|~S zqGC5%{)mz9hjg&~!o(Hk&B?1R01>$(kU{`OUr`3P(_vS!{KpHZ&>|G7<^gS>wn-BY ztNQmuFF8>FeAL0C5;{i~$4_0@(E8_2SJxu@{gT5!-ko9oxll3)g~F2K4Wbtwa8BK{ z^ke$%!{012{{)RzHTm%!MDN#kg4;=&|ZE6Gh`}+d3pO z7YEAjS1xX13TktmRoE(e{hc}lU61K5e z7NE{#GooyCZZDs;>a+NsAH8I&8M&MdFtrE;DQ{d|zl<5uy}tF;>MkD(pU7l`4*Tm5 z{!Y$^{Ou|q*tZ;cAJ|x6-z4kR6;d*At%Gm+A}8T@>KC&^hxT~HojP%d41+r;{b+NXMf+X9_0E%$A>S@Dn>6=?g9_D1;KVhpE1P-(a9Eq5n0EX0{h9~%2&h}AKKG#9 zEo{}=9u*|eK8?zl8?H zGp(zfYyo_`4l6G6_ih~$O_gu8cO|nq+5<%O8`6`N@ziTtSQZ!B`dA3`jqPfa9XeOf8TLbtNSs!%ptwXx1qI^$g z|BdLjs!YQ~CH&^NP>#;FASmCXAD`22|v*QFFjzlqFqIzbdJ=Yb2$;s@j+0bJPW_?suvk*&o9!1N%-3 z9o%L0K0!0@(K<9;{-=3An2SOr6Q2t@*uA@DMQiO_QvEhxXkKRw?3O%X#~G!4#QU;L z-W|0EF{YaU*895GGu5(SXhIG_7T6f`S;bVf&o1p*ewJL$kZYW?YLzriq_kLAdpE`i zt@o1-jD9@yll*A!l53*XW@W5vB4yG_YwX{;*OhW<5j!`zD#rU{+)dPh-s_WAna{P? z->-LrNXRgCOXe2?R5iT-!PdoLFfbZ(B>7YxPYyFJ&8NuOe+63?w8{`4p`Wp{yd8}y2)T(O>>GiRM-?4^o@E5I{!*bZ#+?gak}I0ZfRn{Q6A5lH8d_C0OdR2Qvx ziva6Nxi$ImfCaC=|9)vWQO;4Y7^l3H6}ilrv({RN<8Rv^Si1qf05hRrcEFN9j{6>8 zW-f;J8804tj?X@uZ9!1#>H?s7-@d1rs#TF~@&Rjd+G(5iFiz(9?Y-lUGc6$JPl+-S zunPX{vpJ$dUJx4p+S2O10|f=B(~|+MKBw{r&G>TA%6eT49|@Uvf!H zfp=AODKgoSH)8>14B8#gpK%16LFdk=%Hp+-rRa9)#C}G~?%lzN3<_@)2LM4y3*Zt3 z4J@(k`JFEjEU&RR-!44C1$<5S?hVjbMG7Usb5U%*-m}_ciZUK07bw(;L;g^{-*e9& zByT9ZBXxKAF*dRGN5O;m1p10&D1f#)*?{b^poYrb)s?Z;y?Z0mty=>N9tW459P)_z zdXf3$lUddn&-GU)wyKC-wKB+gL zG4$nb+>tIy7Ps8e$@-cPdd{DQ|2zHYsi(eG|KA+G%z6WjJyi6uwEkYC4BHPLd~lex z0B5Wam~VRaY+`BbnX3~8%1j%t)>+LbpUkxFKphxI0NCDKE9OY;ZJd0wO7HOEaejE7 z{)#8c#@>qyU~`uSasu(E^uGpR`|B3_o%;V?0qk50?AYFLc)tO#FBm$@tn^ce>o5%; z*p;O1m9E1Be<>}1Eg#rX6yikC1ZOR7LKJ|PD;aX&`s8`xtDu0-bMKObw zaOCyO%awE~|55=jUoY5T9vrs{6xrKDb5o1X&21`~+s~|(C6La6R3-{>(jbH;@G3eu z?ANN2X(M2i!zC=9U7Og$WfhC^OkK6jd?e8LRUHT!SswbsxdOi*(ZO9wSp{bmnB=#} zTI>1`XPdEp-{U%vtEL08%NmzA7c{OAq6$_a5lbjg9?HhS^%z;NJRtdQJt_%+xPe)Vr48U*`XiL~Z|! z{$xo4^D`3E#pBaD-?z z9}$J`8ogxr{T>2r;$yapD1`87O&=qD@81low*e*Q0x8k_YvM}t%?ioJ6-cof$#VY* zf$#No;Cn0u>bX%sOudJ`wN_~1vWG0Nt73soBn59@!x#J0`v*C>w(-XcMe#@VJr(#B z@YkadpC6$RcQHvwf7N6``$x@PEu>*vP9UIP347VDujKIZMfC+>r-wouz<8(1 zkCK*^FZHsWAv7>TbGzCkav&wTrXc^Ty$ zH=nCd))okQhv<1b)fNz`Huth}8HQ#i6?v;bis7Oreoo-avTZ&POi^oUeBUkl`E%ss zIFTC#))oNlYh{7GUqD;bn+yF=;8D*yCC%w_f6z{Twkm7w^v!gth4*Wt*0(PgY&63J za*bV~K8#|73tH!1@7=(3kpkNZIsnbwsa(Y0)+f`0C+*l8>(EB~n%BoHF}HrbBnrUp zto8iKYg(D|Iuv34c2aO--K$|JJu;Ap-QFXa+Rqld%)qaIFmtrl?W27Nd`Q9Y`uf^u zi5aG*=0NsqUM@C<-4_5LuFyI%RN&w9Gu7wx+Z9kysyVN2DRc7~HO(nW0_tAMzm0zb zDa5ts{j^_mSzsT13h@zA%pCCXe7h&H5>wmf)7*ZLHV8FiK`A#~RIQklM2eXbIv6Ud z{RR7K+m4FE{DtN3Zta=qGYu;gHD%?KI;%Ye$`<37Zk4jkI`f+Lvr&_m(!aM&MRTs^ zbNUTs@%I}#2%zo~`HlP6#8u`E`G&UZ6P2Lc+B?53AI%-5_yB`Zyg*kE0#aV#SK_A^pto}6U;umP&b<~)ov(LKH?#mTFTJ$61<&Jwb^bP>Jjen6&IHn7jf5T- zieA5UrB`;X-+K=TBpfeV<+b6Y$QO&ujvec%4*TWndV~4?`z5jfjre=!dB7ihF8Auy z)G7(MbBAz30bhb;77#J!63;=cj^!x6boos{F3p-%wp7+ZesR56!T&;ujD;`O`^d`m znL&fjwS532LEFA3_9@21VkxVA`spl7fedJee6jk5V!la}DtaV7ZFqEg8P=yVfGSqy z4gjQ|TyaI~C_vJaSsq#EFEy{e`h7SV@`n1EKH||PhlCQ2k@*XCzy~xIwJsnx@~u!I znjHXn;I|ptZD;ij09w6zNej3(YE)TCB;I3sW%qJ|;Q(LQ_)vDCkoIw1y3`99;z9@rZEoGXj_#4k5>|M=sXVTBati1u<3N?Q2ORxgY*PaP-44NrN)qrT>7 zBEu@T;6vL#wq1Vi(=Q9!;Q8lA+wsox1j>N_>xK<0XdHOg+_UVgp`M4O0rmx#4}Gt8 z?K0N#Ijgpaq!s!0?z>avk9uKvuA>YnZon4dR|4^ueU~cltK-2iIV2yYRwh8c)>~HQ6!DoRN4y`y+t_!T>hZdvqj%7QUHxbubmJpN-Hp?@n6& z^#JT`0+k;v2p{y-QVZVLJjq*Hep7C2 zrF05~MH*C|gaEc4sHsDhC(dtR8q4zfBpo2CNO9}N!IC}jowmk2HhOvFaEgqz=^1@> zQ*)|l#8vEsfiivf!xqy^R*{zekcCtE<`?k%$2OH^X;;-06)3`icf3FvSW|u_iuOOt zPu~Vni@QNvSobc)D*CKivS!H|~^s2`^T+RHwfZ+^1$l959F zsa+NGCn-MURlNApjl>W7#&e^z*}ql?(0lvH)@#Mn>jiED30RR1>P96{Ka3~bjJ*e<+|3e+m;x4P{bXvyKC!5GL20NWFO8L&Qn8r#K4nexeW$`@nW` zP~sdEKdXLRAH^NXJKkJFIe0mkD{$(XkLQ^s7B~(yh+ZI`Rl|r<{+Iz!2;N_YW@op;wK)y1vbfkA>jB)(Ff1ksPRo(k(Ju!ZvwCTwbP4) zBDuBH79!>!uPPY=Bv+aHwLWcE+lPbtQ{SfL&7B=n!hookgaFZC81hW47CL|qAv z&Ck4Am);P7ecLzMr}%&}0PM&6wzO*kV=SHFpz`^%`sKf8X^ar)x5}feloc+NQpz2j zPOU-=BkEIX{U_&9^;-~}ITLdJI zkY&-GANc@mgiH4bwc%wAN}FCfh^!@$v9!i3A$IH&l?Jq*`pUW=8;2KH_09 zBfSpztT~kg(0wlU>-R({{obv0ZZ3NxS9X=Be%XTNUNN7bM1%!+JULt0s6fl*0^^S#&oyI~ z`IL7gn=W$aaDBUKrlUaYpX2P8k*&4B)?Rq9l(EG-Vr31diuMiW{2X2gUb2wr43BGI_2WFQ#fu4UszLj>u_?5*gBR7%-UImEDQg5{UU{Ia--%-;HXPr+tw{+!@6~vn_}O68E2YrG>7;`T)4cVdK@UAo2GfB&ap%T>Us`t>VVfTds(Mi!AuxHfd3+@Yr?(_4uvCNx>f0<`)8 z?6z%dSc}zi-pF?4y=m4 z?)e4AavczrAW>R+Xe>ejbH4m?js>r_YJI1BQ#PoBi%QuL+G8xW3l~QHGg5ddE8+Vs=NUOt%&0Qz5kd91CE8@d3bSmZje(w`1(gas@X z!~ns_ig@+pmZX3G_HiwO{b@wLYoDSshIihXB87)FVOnM^f&pkzI*1N=q?oWg%Cet+ zU$tsUOT7$`muv{y=LYo;;=T6T_oC2VY4=>n2mlnH>R5{VfyQL=ro3avE-@oU%n4i3 z!ZLv0a)8TN+Vs(*e-N9%#Hj07y7%wjN!g{=*5o~I+)^nlj0=Ou)CJ%27hl|5^5=~p zbHF+opYP+wEe`hy#sIY0SfIPHn)%7hSCHr2x$7*AIF{rD%772)Yp-o5B?J$^&io`- zj+f>11OtV)vx27YQGP7AJ?rDF#~{|v_ul)lrI4rW^x>1FoO;`BXILNBv3YZU&6;iI z#TUof{_5&Wf5GyCbp<>R>kc2*)%FFNo$ zr4Y}xz|N|3q;@|{A?^nvn1JpUfSr>R;yR466ygBqnqd8bL+sb3ec!Zd@i1*{*3-Q| zh4|l@)TfBXd5{i*R>&fP37Jq#K*W9L*EeU%a*M;HWb@m&OBBHZ?!=qtCB(m?(EH^2 z7N(^RaX3(l7dT9ry>4fSLfm(lqzB3fXdK*G6g9FoNd%_*)21%-WYuu(d$Y|_4!N9M zHOK#}x9SG1xfaqy;W(A|$x%zp;O|#@G}*{Uo8VrjmNfm(I7NqLMG`$8b&vDM7iG0~ zKWS+(B5>T*Sw8L_?bB3r)+cIFPFB+P@xXs5Vq6OG76QxvaCj8rq%WcpGw_qy=7ZTA zz1Kn&d#hf&Zk5lx{z|`uHaWygCP4fCl4#=}8ow&iMtZPYg+k_;zO6#a8p))TIS$es zC6MQ4QGjldCAp%4SiM?sPy9b zlC1#ac!$lt==LiXcKF?WRG{Ws`NpzeL*!C@Xg zQUP%C$zgLXfPE-5m!7BsK+AoPl(8)3t-mv6qhR!j=Vi6`z}OX$H9_OMsOaX$_H7}+ z7|Y766K1``lKY=y^l~bn=i!#p^@ei6@X`hKO?CC3cxh3NZx@O}Tx++VLY%&RrQCiD z6diV9QB%cx-d{q-=1)4<_*w@NSH3&fT4&n(sz*W1l{4>XZ>>uG&m61YPmuM+n`4(| z0J9&gQzfQKZea{S}WQ_LUa7NDFgZjza6NDEr) z`-aUk@8cpx^M(!Te+}f8Vvv-YPOmQCy+?4&c?HP+ziVY#-uOlB8Fk1O4gwHAkdk3* zvAB4yLA*N*D_1r8wZEQG_d@ZI7ntZ-fXi3NQul#wr{8jT-742^lg@fwv&waVT&h!3w?c`qQ&r))2TB)dtnOv(= zp4qaHIOBtPMj8WAjrd^ansUQb&FWS&L}auyh71CFRTfgW>_}2j!qMCsXEhmL)f>TnXfq z1bDaJv3;9a4~Q*SO$4sxFHk^WQh9UYNgiuNPURnzE^|*Z@BHbAhf9lsalnqCvK`CU zurBm04B1n>t3O<}B)1+M$$A6Ij34jOD?6%f01s%6@hR-A1`oGP^fCTC-3?}SCkyX8 z(>;I}z}uH!%9tSZ-U!eH;Dx1dks<~3T+oV;M^w#I1n?~unEkWelPN0;6a$oX=upe< z!DtWiNk>3#SXBeIW8F-kevKMcvcMP?kHd%05fJ~A^7JyLOy#Ah*QLv;qRp<7G;nsf z<%Mb)|Kt-UEDMiyC++BOWgmg_MY%gZl$E`w zY`|TB#D)zknzN;_h<|lJ;8e@u&&B^8>h6ag8g8xh-Q4N<%WfkI?Xua*2e~sg=|=>* z6|#N4WA9OBy{`t-HWO&DG)5+=|`Y>Y>>t~l23MN-y-PY1p$MdC) zV#mM#J=t2^JCL6-c-w8AtPCP^KvpI2?yrz5#=rdK6Ditw05)KBMHzVf=}(dw0tng#Oe-|hA6Z-VK?)or%nLdn9aePRZEn75v-F?ANHHstezdy~o zBj7r128H@%m$j1ba1Q50CtQ(GXI1sV4oJ?y`hb1h6cs)|MM2u$e}~hy%J9XYeeyU zp(wVwa^c=QUFc#gJhL(ZAXvFEnKEFePI$_ zsRNiVMC*rNIO@j)jDKk804oW!=U^_`fW^pl??}^6^|1-@pbo#D=vmM7Y*b$THNHka zhbH*I9xO0()M7oa4UL_Tb%1_#gK}n2mzpWPfRWxx`4zf<#3Jio&Zb9sw12p#V>Q!X z79;un78Bfy30QhOEUW3qMD=R@X(J2uhwGY1bMefYmLfY@@o;dC@6Dm|WBD^Q>k2#z zJ@IYl*0VI^$);r=j`I&`sLmi(j2$rU;Y_2ldORyDGh=m2|k8j$RW^ zImfB{Z5^DihOF{@&0VH*r&WxR2EEkwW(xo=~BlWP&)<%`u_$!^M=?f9v`_p z%8IL){7{a)zLfxy4y1z zU<-JESrn}23BW#*-4lA~Cx*>2f19{A$^!dR0oeD7c_JMY;t>E_Z6!I|bxto5`9U+tGfCR#;Yl|LN1jw5){1L<&LqqC|1_e`ad|i73mQpzGiCZ*5r#05^{HkXLiX8vy{e zwZOhQWPzQ=jUGyJ_k23f`u+`f4DBI$$vw;M+S61oUeE)Kk`yfcW0>gYpBl5=%#qc+ z9bj}g^}Fk}XL~^YuoF?wCz;I=i?&Yt&qgmyit2MVa}L{Gf1}&^2bKSKg73XV>HUn9 z1M{do{f{S8Ip#Jjmq*C*U_h(|wi0MBg@J!w(Oim$DC<#(^oAXpTb`7{b^!@5NLm(w!ifXKJ+`M^J>k}4hS>pHu z<+$L2CYCDq=nn3)e*1F#TssOSj#Pm{4imGke)Q|w~G9KU(z%M7m&Yk-WR7Wln;H;bC5T$;&zLW6$OP5%kE1!-5PP)uwJtxV0D z)W@HS-kmhZLtze>PCF3j6Ie~3aYjA;ikx}dn{obO?=o@XaN8!3&eZ#W7S&)1aUFhkt*3=AbVwH|UP|ATg$$;Cv7ohEz*v+42Y6BDd$NIrc*NZG z=0*WAd!6;F9zI>b{{LOu%2YVa1G|v|u&)t-z1Ig|H_HJEab$8(K=sHaTg=_E`d{x3 zLM4jovd(#ASi);JV6sX?jR(7u}^WlgMH^Ha!lq z7mBsxP61Uj*X)wdbFcsB)Aa#jJ@`}i24Vk2vClFu%$IMQKg)&0ge5wVS6yt-9PyV< z)vclpbo|K%`XKpnQ(@wX7(_@qwLe`z2Es*7}QIm|(`P z_fh)m+CR=d#at{JGknaZ5@@HbI>2~X{pXJ84`a?Zg?!R|=Q&CyViagbYCY3^@ zF>WA*IO*E~!9T8o?jpbAT_+B~K5Qa!K-oqwZEeY@|MOhg=_Yudg-F?Jpva*#nt<+Ut$X1YoOBvXmY#R+-v5 z98RUbA+lB43$%T5tc4cB*~z-&n@bv5H$jv)FeuxpdS5!nuV|yl`SRv5pUa1ReKFlI zCC7a}AW`Cey=b$1ozFEJ0QW-=cb8x90Ub|~@9JQ1-vQX4%??qBhu$$bm~B_RxOw%$ z26DX+>Nf%T#$KYCc^CV3!6tL1!2K{Ai^@~6Y(X>Z(k7;Y4!?6lt^{%=kUa_f^7GFE z^md!gn>GsI@R;IQQwtU@EUUzlI-t)Cz+^9{-_BD^XD9uREU+Vz2gLlh{!rLrcKvOI zU1BVR>UfUalsHR70Fr?NyIP=?ewk|cQA~52H}96!>lU+Q$!2STiNz*KL7`0`UH?9w z-+&=p^Ir#Oy8ZSuOqD9I`ou2_R;T!y28iFZX-|};Ado5(44^>S2xT0(o#=0_f*2FCZ&c?Eu(bKS(|=mv(kPk&6d9?Gb2} zp>u#=tfgkJUd>IdTB%!20tmkU{&ay^%fbLD`zd?@*R{qYvF#)#n71cd3vkyDs0(s( z7QL>`0b1SJppzf{g73t4_mJUFKm8oFr=~sWD?v2M#9z8}leHY5J$sF9GnYPPahA-` zkXMr8b$PjWq^|&q(?=XgPTZ_RhE=A3?wfu|x5aWAHUL0=SLdwL$zBY6xxe+6XZcLO zN4C@z|KwPjr_#?U0oLx1KmLvVqzwqn5syFKD^OX$M$kWV=dQK1)j#P>5*bhqlocL) z@H~yhM3mnyh3HQJ?7Q#&(6$G6kZF}F#myaeoT+QC?gj)9$+Wsk6q7J1uZdxv4w-7~1}h9czoFp{}AO z2OLMIt8;JR05{z5Z<{A_r5phHmtEF65WwEOd!KpXg)!C#J@E-JYg~3&OIe_wqF>3C z19{V@nPai)rcW_0@Bxn%da{<>wg89y4*+&&{ZalfxUO{p6~>F z{PF+$KCr2OE`S~B1?l((fPK+Bv(2&%k;xXTha9!Q*2D^ceZMTQ7dQ)S)&d>K^lw}) zX!n&{u2yh_&XKSTg*<$reVlIB?*F5%-XQQ&~; zQ&~G*t+p}vDP;ws`~nI7+^UkfM&&A^H2j;fMCox)Rq_G_41ISCTj# zojkz$z;=bmepciYyLN@^mO`?~?u&Wr&F!MX-R1+C+spUwlk!OlkYn>oAx#&RT4I6S zUjX(FU)oJ%q4WAp>Ggq4J$4;9Wd12&>-CAN%r1><8}!jvl)eMbYhX&M-=r#e#>H!s zR+<4{FV^X%HaiGIW2&Egr(fTxrm0*^l*L|7zv+J|wr7F;oyJe{hq_5sN*lQj06sDj$V-B^^8uk)pwQaSrq2m4!Bf`t!jr7n!kQkJuY(`@E|3jg8Bi zy98jz^X1O7Q$=Tf>3eg`b{*>5(|0mbqkh<@v1u!R@~U3a0Xgs0OT7iKVX|-`Y=K?T zDkr(ihx4hT%Ln$4W^k+pw(9?~ti6wx1@>QMf$b>7Pf{5#mGAj{g9LtUgUZGH zg)+ij0=P!664hI%_^3`iC0gI^4a!)?lI+a!LHQl6Yk75$`-UtuVtrtDkq_+0(rkfE zb1vGv$K3ShG&6UXUiw^@kY&nS-A^^0<%2GTp&b?(K;yGg%zOCzm0Ij|fJeIO5Tyku z{##a`O~r(gP7pqvaSnWmYk{4Hye<@g{ham?*|)$xARpN41#B*efQgh#eQ13DR;HE| zOWomL8j2fE0x)=%)~7Bn>cHmsP^Y0YNttuJlv8R7P)#*3zL#ly-zkuGvgoPp{HQcb z7tU*G#QVvA`XRmR#0KV}Kp)t;QAifkUx+3f06P^qP@~;CRIfec+h_n=VDFb&^Kh3G zFQ72+&Fr=2@^|Fwz$y5s9<@sqFdwU}OUfcX)j7&CvaH@=hJ3w9j5pqa67y>-DI;B_ z_m*06)p#x;`gX0;e=VBpv!q@7vr| zlFz%`kSl>)31nXa96ImVvCaC#cAyONVWg#UtX+DL-TI-v-!C9XL7+Nzew-aRMSr~SzH<%WP%44-7t73f^VV6g5=&~VssT*Bkad675C{hVAAm$G zB}h_~t3bN=sfBhqH83=*l(W8%l{gmRSWY7YK-C5f%GoXOuG+@8&~Rm^Q0m3!{GKVEEF0M|F)+$k=o zN;}~{7|Z0jGFWkWlZltEXQ@n`Xd~yVyh)SF_B&Sl$+jKT^5e%ZwcsM@Q`R0m8p-0l zp8d|XoC;V9zx?tz>+hT~O+Bd-fHMjfsRpdw2M-<|1^VaDpHF~z*MyeTD0e*b%*Zg! zJTk5>w?kO#rs@NnG6O{4d+#TfMS}xSqHdE|HBIZ^wVp~B|=qQ z$$H+1q!#&xMfJ@$54C-faY4F?EU>q2+hZ-qH|re01sE7Ey|krV9#qz)>r{i#Ea`d zs$VMr?9?o?dy@d{c>w@6 zKCn~YP{!8R{igqJ(dOnJ(MXS2woM1bp*vE#cgW@ltyVamZa|B4m;4;Nc_~r7Jqq#d0Qz6e(N>jaRFzuhSk_3TC1Z*ZN>7#2Z!!u@quzLVtV}b1+I7UAc0@wn;%B9hInO9R> zZT$6%MP?m9b10lH`spDTH8gdyF}6tpFbxXkr{vyb0+!wMK{ZSPDf>OvrKY*0X~mT0 zFITRk@tF|7jv6d(~0V86_y4U_(4#e{d)*5 zua^?$qazo~mCZV{L+KF;s6KbJt!(}%Hy1&N!KA@@GD^%c(G=nl0J~0#7TA=9ePPWZ z-vk5L)pU68DrZX>`Dc&H^3v#~=I`>K9cC**Hc}LO_*}V((4IaU&22OAi!YL6{BJn{ zU?ZFPvf^p~#CT)!`}&v0_O!k&Ol2tz?wwX;EHX5w5nK#*| z6)9TOlr3MuoR}|t?}mq0(!+n6c`cCzHud(e74dRRcf5G{r0+Tw*zf4?fZA5)Sxl~3 z1lEot6%SBj$dKXIq7wNdUq{7UwrplFGG5UEbJniiX|PblKPup}3w&o?=bQ!W(|md- zalo33L*4)+ehXcmsZ*z{^{ebr4oA{-&$-Wt%rXR&uCjnF7U0ejn)JOt_k{O}oB_XD zwybI?)3L}TURDABzK=cjwFQNP;!+-hKPl+T`}Aok8s|z$1EF3jsr?&~h4&o3swYia zA!UL?wr;dBzJD*jyj2)nOq4@(dKuQD{@G_oikkS4ZAYF3kOaWa3?yQWzRv=x3m0yP zlC3AB_-&e~OquUx$eO&g^~IYNJvCJJ-+i}Gz~%-~k?*jUytH|ck95jdq{vC4nl5GA zt!~|N0#E@<6PNp+|NPFzF9H8aV;-$ydn%exZY2JvEf*Jn$)lJ?<@ z+@*~o$e;gwc2e!_Po2Jk1u;IC0rUx6L;0hB|Mu3R+8^J2XB;DA`kn*LsY9nuayxio z(?Dz8M0I3-nK^Tflx@BVgWrdA*56tTVt}VX?ghN`WSlW|g?Z(bap5_M@?$l8!wnq* z%i-z}`OL7s%wK)=d#jwob<)O)`GN}?2U=4zmra_q+`4CA4kB<3+J4GhqXx_eyqyP! zM6y7EWYC~b!~K)I*CD-Pp%smtuUU}tvI~Q_|v@2IGF1fYRXGvX&lp|5T zyLRmplLA!aD)_8zhsp=C_^eCxJ&@p=jgAh{=E5`b`6@lb1ks5kTyaK zq7W}yw3zK=Sv-DB-_EtbPH*!CQiuzH1;DPP1L;%_gp(~9hmn>-Tvon|oCUV7)wBS1 zHFI&3iWdA!Hoe1*$8?0{@qQhmtrOMYuDu7$kMbw8c*|b1YWG25Xup`92Y!)j>tA)Y zmJz0r&Lc<27;r2Q_`lk3ZKU`QhA70bR*ok_4udjNh>P|_8_yeBS2i~bz;*y}GIgXL zSYUeqwhjUXINP9!_N-bZ1zKtUjktpBn-jf(DfSr>R;z4Djp4I^6Bjw%eyZ zvqODno_q{%{_$tCS&9IMbkImUBM-`%M~fd~+@OQi>7t%eX9_2Mx&Z8_L+UzFn9%Jk3Mxso77}Nm_q!M`DU%=wVy(L01ENJAqw#zYqWT2Q&wkzJx-vp z1F-YzdjCeH&D}j38tBAhyBcLj$>!r(>&>5J4IZ9blu-lu^!`_`My9+h>{AJ(xmRgg z06UgKTz;u7g?O5P7iDEFupjzD6yozYMga-{*xls=n>a@*#{_Ml8e%+h0jo5~LV&g8<*gWeb^VB@5b@t&{cQT}+gGjuxEtAl_P- zjJ$aE+eMoKDa1dN1$M*oMeN#>Oj<6^I{9M1_v1O{^QA!mc3A;-P@=im$L1$W4zG{N zY=LckU;|)Fam0TBnB7`3E3tQYcGP0?j4Y+MYai)CpBlx?Lzgr)g|j&hX@k|WczJBt zJoC;Xu`B>`(k|NL+^fCP-Cb(Snl`z)(3R#GkUoV)>{6jTaVf+Frj8F;V5c&dQ5Nc0D;B_(JiolGv1umP zJVARJntO}t@E6UyAIUO4OiiM(_0T!B%$2RHh#cw2DcP{+U8?=VXLB}~!CC{In<5TO zPnQMuD;Lx^1>Jjx6KE2j3t|Cm+Er`HbOG3vaslilB$yi|R{~i}fH`LC*3BM>EoE!M zulg)jqPQ#x%W2=n-kZU9YzJ!a`?bLSP_>6qF*{s0IoIy=y&uF5H+b-fFaX59(*up$ znv-?jm`uPD5^!?ZusIgI#@drWTHM1b7i(^SMJR*;a#?@<;lKa=Rp8KiOUVtmO@Nvk zAI;sm$qhylpkXAvJObnoSc_l48>oY^BHbZ&G4h2*+YeS!PQWF=7(hAd@5&HKGx80& zQQxPZ{w@rDM((oJ^OS@Ed7#~O0a-L{TG;}4Sb)aLz{N}V=d;g_GP7o_vB|1vnp2K9 z9Xix9=bhIuNdPapL^Efuw&1-VG^}2|l>AS33iqSP6B(kD-({(kp_BK^6~K$%$N`xV zqQ85ogl@&$^FQM~*Q{9;)3axj01(|vC-Se}i!{gLZ+*dn^=6gW=jb2w6R45dJJE+- z`%pG$u(^KbnYB%qF7@O?oUsu+FBAW7fBV9w@A9Po0vMvakduKi<5@nB3R7)Uj|L4Y zn120Ir+Gf8ccN+2R>_ijRM_$vP?T|buJ-FEo*3+x9?+Kg;L9)PS&Dwzkms(w_B7L~ zRkf5AIH(ss)Ug1j{}P}vr@gy&t#6fLg5*kD@Z7)tHBrxb#$t^34jpQmi!W}WHYKg# zcJ!wQi;aGm;K#f(xy^XAocCHh^LegxI_ci%0tKB##HP@nR`#89>@)a6;{ z%X6_|k=(ZZ;~!tyd5n9^U5y)8wDSsz3jToHrcGNZ#iD6eUfe?_0Qi0To~E&q?EWG{ zK=?-U@y9dGqD31leK~T(-#jwFvOK2%Y=3lGe|g`FYVdo3a^lrvN9-LEW}2^92x2b!e`@pbVAnd< z2X-O=o5P@4(jt9GG`ypiI0|tsRIgnSY=v{utBhKYim zUt~EuX~lN)#>ACoggY?yKSUb*L5(EfwVr%hSJz)9BtE zXYc<`8^*K1zFrjKcgY7fZJ9;2{YfVLPJTbD&#c(C&y4wDlbI%vf4=(6Lj9H&gwH}z zIof{V?;IpeNpvmda(nr}ep3D`4+p>|Whlh26NUJ0ABA{RQF*_1V|ppXu`-{udb=6& z!CW(4Ku}cRJxcd&9Vt|(%HiKcMv$&EA`5>!I!@@tlBlFrrDOqful%t0l)vUw4v+2g zN8ul;6i6ZdYlfL}TN_hF)`&4(AjeAS0PO1p8n21*fqggt_Cqli*kJ&>y8vvxl(BjB z2SAd|V#|4Z>KZfT8_^@VHrDlSQnL9+_qwK}4jfVm%vXE$;*-e-HWcClmAgqH>Lk4k zrXx`P71F5AWPz>uz*=B8!UFq9wW_9H9a=nLePFLO2Sc(xN%>!b4{TX37LlfUY#U@p z4%Fy?4oAl7P~eg8mYCTa@ENVE{;JZ{jCsBA)j2u8X()>7%9_(k3Lveh^`K#eqNbGE zmSSdS{E{?2u=~je_K#r-aVdOBaq1%rV5ew-t@2}mJ@At`W<)#-Y{@sK3ui}sNA(%c z2li4?h%fOJ&_bn2H-JsDKdX%%|9rlAUM^LB)R-WEHD6z^lzFgs)5v^p@1*w6E&+g# zeKya$FiUJVZr;!wbA#yeAGxrRWy?t|#iJ8D5Wqh4tNEy33)AkDlJ+3h_Bl7UJTXSb z8oxtBFE$>JIMJk9I|pEo5P&^E3UFZsNXfX40PL?s<$Y2-AUV~{eDRq^cFND%Eg#M? zUo6-hT>Yel+!pRSTxbI5pL}l1V3U&j#iSF7eT#|4kP@cp9Ha@V6q%?_h^dX3>@ixUc^5!o@ml7d7XJI+%^FL8LC}{)PhY(rrJQ zVNwbhA~zZ+D-xJv+NBe~CQ(^nPrpV0b}ofD(#Q?D5=e^#e$~PL)@_^3wyoRD!2=%6 zFY{C3B1Pn@<)pn4_n!>;LSAaV{(6B~BbGMSY8L>&vGhIl)UpC>ipUZ(uLVBbeg7xm z&ah##%oktG4+ArRKv!MW)|SVW&;LwT-+9m4uk*LJLDS{6>xXLBP>L3x-l2 zu0bQ+sZ;H2fyXYNR6em32dJg4l#g~e>#RB!hy|2QCE$M-%Tg>_9e~YTg;gn(#F^VE zk9?tTvH&8Mqr@d(sf|x?lp5T%RLT(>mt;$nC}^pa{cRc>FZMT4fRawy<)`KqhhSjua6E&ZRK-({@r zKlos}1&+xBC4sZgmOC{mETkeEp2Z~qKrh1(8GNcf&Qxy9X zE!asPqu-&xF>qj4Yb_irQ}Rj_0IOepb-bA|W0h4dATE^Px7^anvL_rb_Y^^aw)ptt zS(=~bTLlE-{G#{ZtFCITdF7PA_KQsiC6pygHd)$yTrLn8%bpc;>r7V6+$Y~*!)9Ao z3Y49|90%A7fL*FoVO`1ITLLZ;-VwWLXeilqyL72*6@5^`@dxO}KmUBL=Bpp9{JBSe zXx_Z40PHsUj@*(dM;?v)r8X_b2K|V6uu`SswqN6hA=NhZmoxS6$n-+}mCo|fxBxbd z7_ZPAitbC&8Y#;X{Kg|7gJ=tE83O%ID8#XdIg$s&zwxec*u?|DzF0o67f1QP{;w>s z+nb8g`eMwbJ6H>B0oV_IzR1keLT+b9ZDbvVcrQ_i9|`MStc6xdOLEMTt>!~n1TGY? zgB2pa@;Eb#3Qi2Z4Rh3e{|^D!56B1h6&N7^ zQ!hFb1nv))U)V9rx0zl0uK|2gI$RFqH6hsDg0 zJs;S6%wSRaj+WK4^MQSWDs-a&r2*ZgEvrp_?E1mly-t>tZ%$rq9v{6dZwt}u002M$ zNklHGiGBChP3HDwf<|hu?B|1D z;-|h?V4j+~F0z)Ye_f+?dg9_H$BYH`+HHGH;}@rB)8ZX8Sh;J@I7|R`+ZrB#osPCh zH4WyrEqae0E^u;y_7Bdw7TW6i0v!%m*S|lwsJRlzS^_`)^pn}KeY=3Rtzi%w zE71Z43z|}8%9w%$3TBQJSxTcqT;Mx%nKEB-@4VC(pmLk`dz&}!wpP@O7jL#ewkscb z*Q{CET9?+URW?&%lpWyagAb<3-}YjGLcd1UEw30{ptA1Sv#FKoYp;DTOWbYY_ioC9 z-(CRF8Z|uTI+u5*%zPJuWahga{u&hT(MP|sbp>?dK49vZawiA%Z)yN`hGe(Lg7?FR z&j|zY>_N^sr@sBmoL(M*nCnD6JjGf$ySf74U4Q-Q@^PLVWqY`S3BHiF>pMgXl10$t zu73b*VmS4C=ajd;^MENF@^cn2Jxpfjb5FK%wywX2yWU>lvUH@>mnoib&4cnkvi-+ud9){58F zFZwe>N()=$_k6U^54Yxj?XB;-ubceY1Ky?<_KHb!$dD1%pR{XZ6kP7P=MR=Kfosua z_tJ-TVd&7w){5ML_l#NmaASo`-;EbsdH~PUr?0X~0nU#$eWrHpG6M431zO`XcVVqO zWy*@M;eZ!O&S|Mt84~N)1U_GRbqXm@(C>PdZSU}Hk_;paXh>};|nUA(JsHVpv5xC2R0Vicgq5sO;9=ll;8k3Z%M|ZmTSXE{V4!JWw8B*O^W&p z*6C%WaKiyU{-KFR|A9Dkw>Bwz)NlNakq!!RWn(R{uayP%9-jqvGx;lj^(I+hr;$P& z3+(Upvh?srbImMK?MH>UjqHTwVpHOv;E$&`%BzA7j|ZK3in*X+c{}ccp873aqibvw zfL-sQZ>5Ip17&4_Jy{mm)w1aiHam4ZUS^O0*!^UIy;cZof|sA$*+wx2>gWP#nav3y{siTakI zy<7+QmO>l=TQ6&N46A>>ENa>FtOa%o1wYKF9)@AW;NCX|9t3ApSe-t1kFE}Rz2CYsUqbjfpwKci{&5b{zqcJ zI{^DySzzziOS{cW$pT<|6yilwo)^|?EgL8b@ey$;#4{^l#Fp!~{@b&_j!z+8UI$F+ zP#B~gnO7bWmD}?(HkgB<7*hfUP}hEE@!J z2n4XX5uy;UB@66S=2c3vN3Is)QHVFKTHF;n>XR%1u>0s8Z7%M7SbzxXFR%9zfPHEi zlTXT7yoVf#U@644{(UAT2b8#+1vUp+IYA+w>jV2pv}$f1zefoGV(;9!-E3UHK_J;L z-gZjvjfxd7CZGop&-c!9j0uR@FuMSDyffxl5&Smean3POe{=Mg9;MybdI$Yx`C+Y! zA3VRVLx)Zg7_ux3Rs+srDSG(z0+#kDKQBY9YXLN|Ey}w#=@Sp@ z&z?OShsu&wTX{Lh%FLA~tIx+C`^qX9cmVc)t@X3>TN?{_B~B|tmdV?!mHhbeOT%Cp z&3@&TZLCGEYr9A~9;k{hWc-G^`=xZ&f)8RSkNy2L**c~>UZlKk(1;a7`zqS_i!bKM z8hd}ZoU|dV0$7{}`HA-|u)mH1uxazFuWoArzerw*zolNsJfaU@X2y?SBLCY|omEuS z?-%X|Mi`})5-AapZs`UAX^`$gx?@ObZ~zGr=?cS|?O3`h(;G-v*2t#fg1 zxR?tTeCJ*7ckgF^o@eh@VB^?NqF_3>PUj<{W=9DDJKY~uakx9h$G@w90Kb0of~6_7 z3-Px3_!K`(pY0ZDWgSlp0p@*H+>7d4;(OJOs1Ke2K|uwBn&a&HEJz|r|BH#UR_BYG_%6zF~-o7fULKr*R>Th8jEzv3Wo@IlZvoouk>A9m( z&mp5t?kboVfHSIxPUE_gH${;Lno;{JHBPbx~#=; zfjV~~*BA3@!GpZ+AD@B;{Lj$@;29p~tm95%FedzD!pG##wL;s&4}S@fXJ4_L4C$Aj zk6TW^%<$6(X=^2kyYwWPI1KgGP%PA#w7d3(0q7S*h{5z#INp16fi}lT{81je>zC3r z1iTtpM!>c?E{pMUy-}S>EU`b5PHnrvA5MZjN^b5JtP%Iu%8}Gh0>CjH7ttx?YWYt9 zvOFObte>cj0>HM^@SHb*+gc2oQb0l(lI8EPs+wp9a{ zj17nAe>}x%>aa`-5$fy)4invU>Xc$5-f5v5{?i$Cc&x_S=2m{I9P3Tb$hOAtD>ZW! zd(mW23t;?uqD(}Zf$k}oKjlf9@ec7<1*yEQ=U;Et3d1`FVu~N;+dc%H4b1$fMDk44_v*D-I7t?%-%Ifq{_n%+jsk7AbfZ()a#Sg@w z_E=+idZy_@2Q7$mUHNf&6Ndzt5Vy_)K5Dh{Eiu)J*r8>P(Q3N z2j=z%@&E4T=r(;6PC84koFpt$k2HjqXF?MbL!|P^uc9|R1-PWfhE0fKf{p+SbJ)hp zM0219gre+Ac#7ATF0zVqLv3W$s>_lTui32v{q9c{1{dqhTMZ0kDvKQ}M%G&0y#{k> z?@)PWo~WsyK&M^gB;xpuHpduJ01!!gr@g22nVBu^UEeGXheJkU5#p01zvX%AjlWI? z{ z&UipecFI4We}9rGOcV(I;IlhPcVgqHk0#Mk#E0V&;v0su2_=&^W2wuG?MpCH`my!r zNZfJ)M7-{0`_Z!CvK#ym)&Nc8__n|n?S0|$eewh(* z#eozo4Wm(>AUj#=@8#k#7Y2*Sg!nzqg2Ws!mg)S2a`Xv4{(*9`$ImYd7lde~ickCW z;wL+)kkK^Wn2JZsf2xlm4i763YJ=eMO7&i*$qo!bT2HILMs69{WV;R$-0|^p%Z(i~ zFr;Vx;q{$L%)5H`)W>)uqb51g1L-wyaY*M%)BLGF=%15yABAx$(1VgZpU4#xrf~Q>hR8ACO{^!Ki74f_F2EtWMrOS&@P3MeDUUpr#A${ z><00qO>_0^fkpq_1S**-iFYnV20Usa7i;Go#_uKZJ}UA80MycJ>&z_d5C5$P`Fk`; zHoPCWN=F$mj%{(TN@^MW{^Uz}p-DC71Z%Ma@-5<*WrZCX7{Ou?IJ~nV({T z^;1;jeg(Hqs(bN{PTECPw`*`s%=wp4EqwXJ_i8%Z__L7>yL}rWb0w<|oyShGqjnm} zwr&Ie5FWD?(yz=p>?N1Z6MTv@Z7CGq_u)BqwPG?ifgB-+ug1;G{&s3VDwQ>4y0}gx zg?|XWG-P%1dIU-V_bcK`XOD1L1V#6=v28HoMn6X+j6$@%9MT^LB zksTX#nkYNtkhJS`H3+ghONUZb0Nyl z_JOUQ%G@+-+ZF)-lev$iyG&cnIhe%?K(a03p&KUyGH4SAj5}$a*)jC+UF1hCkAKt~ ze-mqb@ArG_F^YWv*qjY;OE*<*+#i>A&75uR?$3L<)>T~nyd10k&)4(2&~>#A5{9efDW z$lsM>&mJgzd9{*MZ$s}G#Ij=0_}g#erws+%d;Zh?3S->q0M2(QF~`JW8ktjvTx081 zi6+Hx%Lv((8+Z`rdsn|Ema(H3DK}SvV1y8Q3H$>BjQ05SH%S!+)_N|23${(8y(P}b zT+3vnBw-1QKxiED=N{>e)Fod$t9&|5b&6?w*yLyPtcIRhf741^!@Gs z%a5^R${P-MeX0p%JAAI9{SM#hki7F?W}z1dQ=;Fwllr!U7@>bltq&eTq~q^r&=shT zMV{_H$17hS!43fDzV*v_N58;r`Wj&DU#EjT9jC$&RF5&fT;xz={0HozSqXr%g@y`a z&)v4EyEjsOn%`1c42Lm5lBCDGBz{Ar!GEKpof-h@gnuR@1oP$&XFM7RlR*+dt8vGd z&Xn%n_)RAO0?c zFcZrhkbqTk>`X%q2v6Z&ci^ODL_auUO>Onz?|t&{#|lAXrlW@RoyHdrqqn4f+cV^v zEKh$kuI0((8c7fLfuh>CqUVYnV)vg)tq4g%;oLVy#lh#J>+rgNy*jLO``rie6@m<*f25cWZgF6>bR`JDQ+c`Hm&# z0>znzSku7b#k#A0r8b5qu66weo%d~xRbH;Vqx!o~zNdY&6-l5U8(bpk%-{#3+^}*m zG=hr#2E&~iQZ=2g{%;!nVj&9J;R6j?ZS~>5CAb&6-k5R+e!vE|oaa?Z$`9A8=a_vN z_{R0+XUXA-&q17NKiHI2*UVE3g5P56l6%Eu5vygpfP0rO6OQMAO2H1JN`1cg#a+Db zC5P9#zpDe;n7A@2py|Yb>dvnT6-yuR4cJ|2P%U6$e&;4P5LXs%N7clv%STR_0=-4i z+XPNPA{}F=mRwBp8=>1B#zAI>)8?eMz?r zX|8Q(wp3dP6jiv1pJKd?d29Rt96%)jtwj{KVoM{~VjwIh3VP}7&>loO>%KscEW4+n zR@bFOd&b`C?=aSia8DwKpU(^&2vV4?%Vy>jXt^X7>c0nIm`2XwBTc&My#$aX3@cOt ztIdt1u!))dao?Dh=zpQ8x!=~fBV_L|R|lnfEn-?@=?BmbH7i@MSod+H@lc3=KFZ@R zQq876C2)>!O3H-2)a7wkYod;F7yotocWITK_fTOK%pemohR?#*d0zB?<}5EdSehD{X@hHlG>3fsa?3uq?6ImtEfab}a7vB5`Gl*`5CjYz;{0 zeXYfIjm{8XhI zH3ckS_U2qkc4%$vnPMm95q)=t6zLM3N+sjWAJR|6gaC)@Li?vXzX&=}J{+Hi#&FHiO!&6`Y*a;Fxnxc96otq4dmHfcZ954;?koz6o zF~|qM*Z?1;>m8!dx*mjzNS9jPKdE@{3VSXxXA_wV>N)KU@iG!_v$p-}JuN!2f-t}V zDNa!kJN|(vHq4T<(Q@YG@!gojr;=!l8=WhM_JZ{$L=$^Y(x z;Dmr#WDa!&#F6(stDO>AllC-rwV|=2&0vjp8IFyR(*jkHB7Ez#pIYpx2|Ml61Dsi! zn6YQ1DXbOGEbvEiXCJFy9L$#ag+L4ETnq&F5VS><$ z%boPX`A0t0lWxm$^9rzWzE_sqh$ixATMLDO`_@}>gM`hFs^?3AHR4CYYvz1NrCjgx z^~s)A>GqQ`y5DIVTfISalVTXD=6mBqZ2rVMQ~{quSMj+YQh;C=Np%`5F6S&M#lf7j zH&+q5whlhQi<=^og1xf+kjy+Icga?-xGr)l9;_0g2Ks#_a;GkSs3d{ipX|s%NMRw$)jZ9 zht9vxRC+OkdyX?8Z@OIcw*(657Rv!`FD9q&VA9V-eUAbY7C0{JzdSnvFJ_&~iJfd| zT^X70nB!SSgkt0^3i7#r%LyCC!q4T%;(qGtDW|DT3ndsc-q)-C`(^?V!{?pKo1$2t z3o$J(+2vgs38t{8DQmVN#^IYou1i9v3A5P;2RtCQ zi~VHy0B8&Vd$acH9-A6@Mq<$V_$#n)qYEq;8Hf31XKitAoI+qm+w{PG!9QPba`}^{lqvQsS*J$=H zT{#}Q+SD4a~tEGoc=eR;Gnzd^Tnc< z?c(DNT)oJ7_f{RR{28?mN=W5NK%1~dAr({S;+$@z%14MT>c)BuUh9xibDBWwsy0c{ z^(hLSOneZIv6<>ECGo@<1DTh8!;X zg+b_Hw`MiF_{}V_O$9)VQZ$*}iY~=NnXsI-c0s$z_Ad8;s1a|b5(ozfQN3P;b7?mFz<#eZ#eLu?Zyx^{u#zs|SmC!{@r|$JbWPGv zvBbQy4F?tzb;5J<@9gm)8r2-@xSl9$Q-{v|DILi($stnl#MoKa&kvWe zhIh6re!d$FJzJWt`FVX}sMOdtK({=>)+hYv(;~fdb&u9~Pkj=1H2e|g%D$K|qD?OE z<2#fuHnNOPDk#M(bgYdPE%P!>3A5Rg=wFm-*%!?3w@y>^uUE5WC|ZWBgFWfhnclCy zXmlW-v-y`^oSWl-1bRompYzX^1Q{{C+v_^kwDzq-W4>K%@HbbunmFH}giG1fXTgV{98V&Ol zRqTAl-Ra@DyWOJ7uPs}!A6HQimgJF@t8N4UQ?Nfovo%RP9q^swj*k zI*iyKoJa1w`N}nNa5u!=A-v2sVfubxoBE*-ddyM)awxTEp|Aq2^Tk(A5H*sz%Sld2P5q&~irlOW}THgA`vP2c@~ae*rawFq+W zSvk|nuP+`=S2`OQX{#PeVmmh@w)2u)O&PrJs7D{|?r=X0E8`uo@%m9LhOC)kcaU)PQufT+@@*g}%Wwqg$PzccBZ&;a6BmYH%5JEzsfpy}QY4}% zH5J@s@UQUfO;b}Fs$1*$qv4_h52nn(GmTmYrlu2jHnJ7QcEyaxOG?2_f_#Y$#A|`# zW8W6~h~)NVSB$_t7Xs#C&zosQT`Fni@DC)5EGVM2)@&L#LpNDr`FUtVu%Z{kcq+N@ zcNyhp`PCc1ueADFVR2BYlFzjiXEBoJ!vSZl#B)p<);{yQ-fCm$sVU4($vGDD|A>Aj z)Rb1akMS0p6}}LJNplF`bZJHznYcr8PkI03{~>ij;f}q;X5^TU$CZU z~cFanzs<6P>QFa5F$jT3>j%E}~F!M&@W)2=njVV#R}n4{lj5M+)w zDtPm)OX45Bh14~Tq{a%Q6YWpd!h`BR50ux#Bk)g%pq>mwJ|-}D2OY)2$l0!JTMbSF zrZ2GI>NTWqL$$X$)n4Ci^XIX`?E;*ZL;Sng^=UsxSh~a5!P^J!d9S^B5XNgRO}~$cI?}_*`F`+by zM!!d+Z&>dzf8Ycy9*!W{`yk$#LhXawUD-Qs82kazD^HGln2m>jV2)VC&Ws1JWjyY? z&Wr`jX}$4OykC2SGQfBwd5UeRi_13Zfas2nh4P1&dUtIk)m0L00^+gu*xwJ0A(f4$ zRKK`pT1g^zdc4wr)Bz6D&?{V36c1)|b>@SB(8WU(db$e+55fxnu$2bPT)C9>J3YTG z8xtCkbw3gf*XqpPtnpv5{-sJ!z`>~lp25omwlXD^l6s^AsqKq&!D5tK?R#e2fg3_2 zD!X@`r$dINt`rBeMTCbs$3=f^`Q-%}TLLl!?@pr=6xV8oKcLmsV>DVKio7bcIU=)MIF(AHetQg} zT+&ZU?edv~$aVL~IK|b={xIjH{qzi1gHm$lV{+&i%VUtRfcx8_&}arDma_m{kGwaz zXQIXU!MDp~qpr6We3I8%-~5S+_LP~~XY$^}o?V@q6r0!vah}kr3+;vDdVTwStii0I zdrQH&1SW^6q&U_2@tBWukBfdbt!~qWI;Um)Q1&+xxJ4;?g4Fun`svcyZG7UatsE%* zVBX-Am>g<f?DaH(0Zmiw0Tj)vQ|( zbUmV|;9aE=^Ur79)S+Vj-lY*ld=z%w6OYgGyZw4-u|u!N|2sa7=1GNXwjHv<8_&&$ z<7~S2brB9`i0=4P%P(-|%XQOsiTHU^%}}^^A>E2t-=!-_v@)|-r9MIfAKW`pvDZ;KE`dvL=@8A22C$zME~UuI&pKvIV|rn z4cbeIeO!T*KSGrnubsl38a*@V1;J2zUb-quBDQ1V0=xN29FEj@p0(F_sGLMD*T#5= z+ZkP7s9+l^fk|W%FB=%3E*$EmCBpN#>`D(pueUniLgrTl=4mV=2=`t5^d#9=bG{t^ zsGcNx>$WEPGG_ixSD!XHL8j%;f1UB+#TtF_;jaG?eFE!uMG-n+S`?m{$CrdLlsdE? z43#3VGCSHuTJ^C|LY#)yMy=4mvy&n*1($1cx*8P`@dgZA54j5%4UL;BINi=s_y3N37pB=@32Qwwd2j@2 zJQm)8SU$X?n+?X_M55R%B^}v;loc<8yTqrex)L35kS`48R=%vj7K;>tEs9(SBHllc z5PL|%7G_3?q2oW0??)YfI_J$Izb-nCjd71g7JwVo;_V5-w$LNsz)jeA5tHcgf7BlX zh0IQa9?)oBkf0*1f7f_u;LGxMURNbQfhAJzm`9E!D;K|iz}i8i9>R_Va1=fp#N8w~ zgb-ywQs%)Q1WlrDk)_^v2bh02u%}6cJZT%q%f0E!le_VBlr*ynj!F@3A=`CSXhj5tD|hWsAs4ve`})= z|Glinut3R6`ng#a@21;f_*p>AU8}gG5vsPBS_EPEJ`a-ut4~7|w&YiL%tM$e98rCL zaHahGtvMjzGr9-yA~A!Qc#j6#;sLslJxL^Pfv4ePt*&DxJBAy*DwR8_YaDEFRV z*D2sCD-=<_xk+)*)~?#yrfD9D?avN+!SvUj5cR?mg1JQx`?bLn{WJ@{529MH)rX$$&t%YVnC=s$OWE_cAWt0g@MV*=0Bp zn!O0UVxS^hXgg8w30a@vnN57WamE!fZ#((OpT~&XoqXa)l}#jeg8)ySGP?ds#A_3I z`^}4o1oAUS;CJHQVx{4dKXz{LdFA=RXg$J(U~7x!oWOSP!kKINp7}~lXaL%ox+T7r@X=qWjMR}i2Vmnk4<%`T_ zN+w%iJZbN3&XU-d?tGHGQzVd+xxBc}EWPi@#?+eU+T%}jGqd@n*~?hccHDs_jIvuE z6`?#K$o8!Bb~Ul0(DK;HwRL4Ybga4LK3dyI(F`(HJ-1ZC0hsJ*5<~!_-Tk>u(raoDiED7x$+-@p&*hRpCKmCcyajcyRtMxj~L~#+oMc7 z{e zV)0!4&Dq364Fq~-I!d1vmUIR_Tco8}ShM5Rkk8Xr_=MY{Ror){#GH`5Z~2rD+~UOV z&pQbY^%_twq}z`k-Y=_?TpCB2@ZR#3xdoh04SCbq7JszP$y(0}$8Fij=DkiZ{MI@@ z(I=9$*e`-=##fEOQ)PSXAxle&3l}R1DN>JeDk0bb#3X9aJ>9WJ_JSkNt?ysbO$$H1 zPZufkGPA6cd>Hpp7sRv@yzo{!6M61xpy*&f&ba;Kp=<7w7%M)q zx(=EA%1&&3wSvT5a9kr)P%7LuXXnrI&%kq1_uAO1q0vE@`!e9RE`4#Ik4t2zAM-w5 zf$$%;XUy{gCy*h;EgndkgG#4|tmg4H*H-5&-QS54BkDU+DAYNeMB-o~w>yUY{FXt=f>vzYl+ zR=bO*5M(}G&h6e;=%B=U@hed$b#8YSg9;Dcm;~qrS1)&DOeb(MowWZ)woI~Vlw}8L ztX|t@R1RWaQ7%C)?n^KHK$~LWO!M%6dRBC+J8xMvs7%qwD#N4c)t_vT3t$Nitr z?t{1cW||um3W|N4t&gs!^NXm>9$tI=mEIX37(EG`={)9Zc{Lo}+YAKrdf<>Pq!LUd zVpH!0g*}J3H-hV57H-Zj^al38htTr)_Vzz31cg=W zahN6Lmcix%NVrW<6=vI7N5nN$=!#?GrsvgTaq9Q?1KmdAxuKB3iEytQ{YSnr4tKqT zQ2_0!PtKoZ6R3z?267`Jbwb$Wn+^Mlv`|kT)3`0goXeSEPC!xL3j+)#!PRRKdRM{7 z3-b1^tSMtOo`wqJ3YLc%R|kGWcP8PVa2Yml?6k)~z>iOrca>lr>)(l4A`}*KhoHjS zKStgK(0tGA7!G)(S}T6R9ifUHZd_!_zjXe!0FFKyo{jUk2w2^m={>v?g63T8EqX2K)Iyslp*@x}S4q~Gb zatzzk*thiB`S2VV{2&r13SY6&+2N@9fMitWNG)^gQr~Atv;!Yy&>7_D$HwYelqGa5 z4Vl>kss*p0YnoGMR7Tc|D&BWKOuVQaB=of^+*tj$lwdjf@{?uoEkzo}GfAql3!B>$dy(yQsqQ8%J^ z*9Yp@CdMzu!sBlLJ-c7pS`C2h<^CcUM7{H$Oz*aTw;SEY*3lM>T!;EbVN;9m$%;D> zISts87l}4`9e`}YP-5ZcxI(c<79C!nrDo>T2YRUO>)`gZ54@K3%@?3a!L=+ETkZ_wXq=SrIV4TV zBsNe$SUBwPP3G;39X%4P=PZ)gZGnGGG3jHVwTdzJtF9j1IMBFl8!=uD`P_eVtO_g? zNP)tzNWB~aoffSVS@~a>#7H0Jhd-D^W7Ewjwqo(_d&Hh<_hYU(A=kQM*FO3N#k$8m#2S1U$8Zhg{`7S&W!;NHf7@{% zRqOZUx~lO5C0`3>FN)Nb^To}<$zPLJ%>|7DXIcca%WQ`+!_jTt&clY^qa5@VKZqsj z^l`4ksC69FGwJ|yNi+G}$7{FOODi<@u2$+XcEdR+Y3NBLePumGQkj16UzLuPmivcp zN`-Fgo6tG7OubT_rk|voLX=!4uAHm$mLX!dpRc?2{zdQPzext-itBBn#H>0$_fa(I zn1<(4a@ecz!0vas>t4*>VvncxaOy+72 zKe=BGNU7Hhi|G$IMLMmo<%P}-RwY~rZxnv>AvrhIe5N{LZWMP*-u)JSz1ewQ*h6UQ z8)oq zohBxRZNY5c9(;F|uIKIld5d_*qJ=p}7;qMeu62&cp&R_+9IZE8mr^|9&E|~JCzGK{ z$D%vn82$;GjJbbnxs=O$T)w1l2c+)z)RrrKv-U)oDvQN0r>$x0?6lIKqv9~l=*V2g zdbUR6j_17w`b3@8lo$1fBDw#6nSVfi1BH29Kl;trPmMGnKd|-_=OuTx+tX4&$RmKjj9O>wjLrvu+geO)E$mu&z5W=u>S5X8q*C5F!bNR~+)@uV+i5TNX>mt_ z>6t;6ekoV?md&=hDczmL^n%~fsKF*K9B&12vtM->?aG4n*}P+$?|0w6btsOY@VjoTkGG;tB#&!Ua#w;$efvo&4E0%cR<+I7 zDE?8|;)MYQBgXrqtS$tB=gmpJv2I>g)}xOo;`okkC33>3QDW#Tl1>roONW4CB;g|C zhwIRETpw93&pwqa^d|YnmRxJs?v%fw&%l4JcTeX35a;Gv(xpesFxbrq{s4)-ehgaE zgcwZBy&RlkVP}$B-t{ox)_{)~*anQ|yw3}#RimHtE;Ei09%uem$JL8YO=r~vquKKC z-HYKJL|nc4McF_xAsp{XsPCsxLbOer{*~0zPqDnBFXy*an_fv?B-o$(q=eYK^nW_l6$1Sd zNAuo92fA*XsMYX{H;*d$;Euys83bTdDY=8F;M<2C(_{%-QpTu6^qHOD$MH z``?l^1h&CZBx}~p4&1i;NfTD^?wB_HH`czP28tE`G)w zB^MaM@XD(RW+O0hB>ssL?u+Wf@(;yBhubz8C-{wTQGEx~N`~P1Aj+*eIGvPZ#kFgj zYX$Vps6-@aLnMH7X^^$4QE&)@f^~R*@?U(oHiK-@7f%Y+rPA-ej4pT-X{KLe6s1y6 zd|p_2^}UWLdW?SK`t-aluC#e(3OCZ*#XcxRhIqi@0S*&P{P~~nTsQf`uY7o=2) zoDQR5s#)g4y@9W%1aW0U@28U^WkOBw(e<#5yr0*LX#I5R&GctWy;gNLgO8a5m%OQ4 zts&ZZzCO)=H)Gk``lS?#C(B;`8R#NTjX$y{>TTOgU&2=}l?|03NO@c&H2O`WlZWoy zCr^AOi#>Hhbp57Cy+k>*o$~NLn3r}!<6YIU%hzw8=$AUun|I2)5QApUjO&rCBeE>{ z!O?#8=3S7)wHOW^>E^wrH3Y7z=^s2=lf4{jPu>o@Gxf6rkRzJ`c!qS4_gV*B8*qI0 z{;&+A6`n~U*kK2E3Px@2sfGadCbg@G{g43B?u#9sx)Z@W$zgpPWyL2S@)VvPzp7lu zbNWmlWB#AiSAPdKSg_UgsTv{_WEL4lwEnz98mj#GKwIGvp_;EhzXnSCMJV#JfF{k$eqtG0oW~4xrFtP+^}6 zx)&{Ct*SoTSo24H5HoKOYYe=lm@8;>C{t(?tGXR*Q7xX%#$&?uuDrcA(gm@djn4kSKgevqBy~&$?L=or!4`PMFgp^rQFwO(;4M|dQNNeY|KOa`maCi ztJ$YgNs*?|XxEm{MO#@H8hsKH_~l0Rwd)M3U+0I!mH+%nb z&X`-JnO<8LzsQ~kkr!C?h#Fx}NS+}o>Z8F&mS9?~p>aXk5lh#j{b=W}?m1m>KeL!4 z(VgqM%cAa9du=0eeer>Jbf=MCqgcQRaemCe#pZntGH~OdHZuM4aIbzF!>>o znH#(Hj}XvwUt59bdkL`yjgun~*R#g!5cjRqVh#SNkfHU7cYtMqDBF{9v7jmWNO|wB z2dbND#JH&~`J@?L>EFShz~_Te^9&Li1oUg9&p+{<$@y*f#~)s|5eXZFors|MQO|!V zqD%88)e6nrUK8F# zwjAH7sslC;6M<1)an5fXx!;Tp^kIs8OuN=qf9|wWa=1z)@$6_d}RIA(CY3jzW7JFWSzedF+)I$(@Pp_*WGi3q%dHN%PLW8*E%8(im?C-g-o z(r?DJiCu^tdp8>_*(!LxWwYn~Zn~^W+xOcZ$D68D$ToTAXNQyM`|f{{f8l*W|JrvD z4f>Rs&fm1WVtt;7uDVEqqQ{OvR@N3dT*OC~&efzxlx_?UA?r|Kvrn!tu3peG$ampd z>vEziN^?B)o{s#MD=$Q-%i}R4kw~y&YBTj4zV+QxuUdlaU=0mpXS$4KBZRl{$o@`_;@;l~wN$FnAKcL*&Wiwx?SVCI9 zm#isAYJUpN?kTQ4yFeYRH%2SzsIuf^>0y&^h6z=?c9vsqV7;ouU8-8 z`gH^)kKiBS;p@!Qq;L5HofDh&HJa$=NkqEHK*8!iIN`~_qv zC)8Ih^=%Y}Z&VZ-kB&`ze!cibe4cdFcx$P13+94o`E`?~uDJmVIwjvdy^V?2=m7|d zhnsA}CALa(Kxq?}qCdIm=4fw9&z1Q)3B;zVPkQ~?M_t#=EcCjKB&qWL-2EHmLfD8; zEah$22tsu;ebN#-BR!-n$Sti0#I|cO0$-Wae1gs{U-g3o<(4KGWD-RT^+5R%y!bW( z6&@M39TxX|9du?68NGF!#6m6+ekTl5*&ms-;ez|K?29RD@3e0)@&q{!Q9vHc@GIuy zt7xWrvV`+~?M0#uz6dG%biR({q1-y$q_SGMmKzJ>H$Hs42XzM;hV(maU@{@W}aGY*x_C0XM^AP(pTU5x? zKiJr9LEAcD+kY{a+ojFEz8{h{{a0F^BWW9ofXQlN7F~A{_3tmxu5PP_OIr4FDpG9Y z(}tPT!wC?B84P?Zu z%H<~ccfU$%j!kw|GPP*gTZJvLHZSi~e~O3Dx8z$kkJc&iLA%SkFVS`kK22Ax=&)&T|0gB3qHP1T8(pab3SVfemP{WwhUMm=ey&* zj-B2tGn1!{F4wF1IA0~7uMA+}X1QK1 zSl-HfTU})PWLm^QJY%&Sd^*IcNyDvPG~mhA{MW7QZ}FCdvfJK?Q4zU7JJ}V80~VL# zhu{%||MvLuGNi+UAJG{^oN`-gk7XeF<#XN}AO0I-R&I3lolCa8->Bh4PGN&o;2N{K zgx3$RRnx_rm6$y%AEbLG@O9Ab^86<6<}WLWvnEH!*q-x2`_Z%x;ZItj5zsyMU-&j?PLm=}Opk?qokKW$A1$xh)Pm_pj$AKh&5N;b z@zac*sVZblOSCArPK>~{i3ffx{T;FDZjQ7`Ed0nsY9RZ;e*B?he;y+OuiV)U zmb+irhtTXTvS)TMGtKd$3q7BvVVB@Y|AdGxWy5D==zdH;{-3>((3sOn))3UU0naJg z!ZfN!V*fA;xQxs|Td~0C!*Cjs2bdlDbj3$u+Zegwo^iExn(wEEh$Ityd$^0%Y2b{d zjpluCOd90uyJD^yZoK?RRPjM~$#5gj$4v`QnvSpE-((Vf78s~L%YWIleZZi%XPit< z!Kt_^N39D6=1d5RxbWF>Np+3;^PtA>A)k~vjKIOpo<0U81brjX=mKyqt?;+&mh&{+ zT=oDN#1|fz_;HkTpVu)D?TFPvmd#XUx6(g7@d z+^0TXsCw$ZF?bfB?Y4UQ^hbY4x?hG%jHwaqRVV5I3lR`-urp)C!(h#U-#{&f$*y89 z*c4LT(K54HdmNwgCUTm`HB^gr4X=1orQW3ca zkzRbZL@0!z{3g#bBXDB7Jq_LR2tS;YvMJ}C_~e%@oHNQ|5gOP(=<-rr>jFfc{Ny3k zGL!P5iI2xz?|IdZSpLVC16|~r*h^1K(iHPTBz&xh%eHr#$^S)*k@!mP53PzFj=vAi z;^6oMi{l|=)+S{^JR;5cM@?NTO2DD_g<9ET#1nM?BnC)wYtG*KtV58AtWstx^=S%Z zEBM1>Rc6L^LHCGHt|7{AIE~cIzuR`l4bWQ=g;6`ODYI7H8$P?ve{xNF>JrK4!NI{1 zOl$<9sA>LjEhiJGAGRGx<7GdEGEV+`v)~p^8yD|wvoK8 zqIk~d3?{UY`q%YC%PCXleLtc-JzU;f=9=7^=jrmp2bLACU(7Fwjv-!gz{Ab-?c0|> z&T$Bw9I|1j*7YY6-R*L2fz+bl{`~fUNIDm{idOtkQ~liinYjRD(u0jKF6XgPshWkr zTC?Jv;I3?dlt~o5-Y*PMgQV8f-?o(vC1m2GtE1C1L3%Pgd>X}bvg!9J9N-0Y!$-DC@CqS3NFZ12%4m>vEb6_*>sqxvO`XR> zw=T6FWFfPK1r3jlvfT-!B;qaJT7>~QJ!SC*9G#z&ElhM_-*Tw<7N<5ykf&((f)vUrF+y!;|N<D@Uw}#sjp3dpJV2X5S2dFW8)ZmjI>BOB``eYI;GM+Lw478N|p_Q!ByG!?JWHITg)-<|vM zi)H|-!B2Zpq0CV7w!i@AG+h-pR?O<7(EgGyMl1W6QXvos7O|gFaWtePl58SHFiDoa z$UXPc#4SXqHwl_Okzdy_Rxj#lP$^_VvKJKyoBC_y<|Muut{H793_73k*~sxL^Qss} z|A4^-WeO>g5|SXg6@y}p?y>@QG4dLn6?$(2?o2%c(G^r`wo7^`JQ$f>s3)TEu(zzo zCZ99W@)_Jp#b4A||KKte7H!p0SVYWa&Q`tG5ip)5FjxORIc4u9NV_`l$H%W{*2IeI zpo(_m`r%5cA1M!*;D7-(QhWLF%k*zz*6;iqdG^GX7D z^XDs|?YEc6`cc1OpQEqHLVe_7bNxLK#8#BB^afZoRW=`*#@rE~-+IV$*c*(pFx~-B zjx&vm-w&jb&J=sx{VR*_dW~gv`_Q06Ja=}+BW){cRV?3xjL1C9KFjsV8|eQK_Lo#u zO!tMW^U;bxn#z~&w3=Nv(bKq#4|YoGWw#c`5d-#Ab`_>xC9ZSl8L)ivH9R5^CrPNx*k4i+6;D9e2O~@R`43X7fmKhter2bsORUDlAt|K`b zRbj0I1~IlCiWbAy>PMW`PpWlCVgHA=w*aeZ+x~{7JEc=Vx&)*f2?>#uZYk;R*dWq~ zNOwp`Zo0d>RJu!Y6VlabIh>}iMT9-h~>3B zMtNwmt6tvE1hQCyWQ#mG6;oquv+U;Yj}O|{xjM5d)dV+U;4cfV*i%H{*Gk*!M0um{ zxZ+M)@>>|3yCBTIi7?KsY|esCG1yHPzv|kL$l;cps`R%Y)Jz6*81d`+Re4wR52!bN z7J!$n7)%sT7BEKRw%K&CO925LHUdi3p;r6mwz&S^wp=)C~G1=BqrF(rHS^e}F z#2?M#+G>s#=kX;9E^rgi6>YNk3BoRA4d(U2ro|wGX6T|s2fV#l;GD35ojuO%smhxB ziSlZ%d=GVezlpFOjfTjKv|YhpA|Xve*tNl;#DYUI$B<4HFl=Yc%o74oYuAXRwa(80LjYZ;1>%n|4kP6OYe zgWCD^x!%o)z!S9+?}J`Mf21bC3V*`+>Xc0oM)Zm8W&ioo6h$2=IZXkZ{Wq!x*I`ke z?@lM~>gPN8Ep7zG{p9UBmj+;!SJA~$nbH~~>f({!z{=zvUsLYpJ$#{aL=~p+mg7(4 z+U<$q8)C`GJ5wgA)VEWtd&!k>Cx$hq_2p5}Ne=qJ#8;sV9M{YlpDW7QmxbJ<;C{Be z_IV?0@kx_(k=pAK78A_bEN2NVE071mhPEm3oUg}$XGILZY2ij5DMV<;#zC%&8==Zn znpi%mtRd8NOeq4-^f3avozKA@zvUO(BcKGu_Y2$d1bOPh%Lh^oCCfq_$X{0GFOh86 zkwEeAFkEiur+f2X!d^3$$nCmP^O@urs2nssKf@k$^xR}CdaaFS$K2q%y;iGAUvMEA zBm|GV<^bQi4YdasJgb8hNkaK>$OiOwW%Qc=rKdbw{S?8XGwarchPu(rMsy66Pvj?h zf-@%?oT=-&N=_?VopD(|2lC;|K6mX2i$~b*e9^hW8HWLVrsUjp?{6rg-bLk`w1S9V_0;|nivlQzM2FWn|brV63ZaDoy-g4hBR0jePI>$Zb zXO{OeED>DE=6G|G2uJEuHeaKMt=k?CN|vL*90FXx=ASL_8O2PU>Y(@~Bs!chZ z-V14}L+Fi?;@>4eO05Tq>jGKtsu-^Tdef<-YZLzb*AV45Vap55K`@{P6n%3RfZ{ zd0J{;nlCo;4wPB>=8!Sr7+kXXf&+S!hoJ=iG7_A#DoaNK&D4Na{oK{ov4jZ|Bug$@ z_zPB_^W(jCd{l@fd~2wRi*|R#!<-(E{XMy1;Rb|Z?WWGF5P#Ioq6h>++qX@wK%zP^ ze2jBzV|?{%u!1N^aH;)6TrRwdzR@CtoRuJiH@kq0Y5jSc(g3LPi(T^@xT*zZG;NO! zWzgVI54mAoIKo1^3b!Rn>%As!)LpZ^Iy0Q6^DENO)}U!TV1d{Kx}nZNRUhA>wk&Q} zBDoB;`77SbsST`S)H=C&iz24(h!pFX7bKQxo~Q>ah)dHJZ#kt%Z!K~X_rlKA`S!qT zr1EreEt?o!xqjuG(R|!$*Q<=Nc!v>tz;YJX%5RTq*<8m}Vy>5`Lb_B46h+xRT``_U zu21PxJNSH^#vfLpD={6^w3?GB>2Or{#8~6#dJhV*ViPLqcpug-SNRd7rrP=P6SR zZZ&N-8gmLlXYaks{(-A=i(T!p@f_xIywCC(GAB$_#s-3l?}R*ET`bf+ROE{$vP5>&@TO6(Fy|E zmL+aqa!%z|)slTlU|A*v+j?|Lo|l{5cz>E0!Px_%%ha3(HP=S&Am{T$Z9&kzqVmhg z=#it_ijFHpXohR^qpyskyRd$Owp*iTqs8m8{W^l6k10H?JX7Pl&N-eFOP4!)^=Z|` zeQrXvrB6@iT!NiqQZ}_|vwfu1zMsx=%-79qc%zf0V}B3TBSAB^7v7OR6?$QUn{zsM zDpPi?gr3^%*7woXxZY>C62s~;3wvUYijGk)wHLIpfAJy7PE(?M$RjJQ6jhRAD%Rh8 zOtzh4ST2zI3fiOOVA?})bw$VF+llSbU~XH0SGP#*RuXNh>&%e<&?bhuTJ&zik!0f0 zFi^*WF*^dU;iM9;OQZtc2hwQ#rBPsf4RuPD1VPGgnJx2F*kPP1BdC_g=J}Uk) ziaBVI9|K0$GRFtvs{Uk@Gp|0f@yYp_YykoT7}Aj(=HTgYnPyux0k9NoPtk&NoZ6!WST1jR<$^v&0<%%kUZnAVo>p~}+a zL(3cj4TqZcYssbzE}?KLTnx~6d@k2ptO18*BCK~lfG5;|8T_#&2PoK)N$4AeXN@6Y zOEL6S{5yIfCqznW%A!Br@puC6Tx5?7oVkV6U%kG#5{+J1j%GVkPm;`4+MY;A%fqJQ z9R2-fM|9nhu0OM8GfNe+GX5eF(p=ZG-C;uXlzqy)+sbDyLzmI=h-XDDGL0W=)=ePd zG`e8K&goQw>Pd`s#I99*{e|=ZagW7A!g%=FbWhTj4}wDMJueZKEqymnoa>%%*!2a9 zbbc180c(dwd_hy_svW@(+2{J-j(52%*I|!)9(n4LmTExL!nxU|}wK-_)PdXkEv&#AeAoxlNi6B@k`#(;&$I-^g z`C6v9>sXG_)^IXFU}E=XpwOSvgQJAa+lyuz?U>1bRQme;(VO@u`(k>Z(*s}XN|cq6 zISD-cDwdp%ZAC#(v;F$A0C{s+RPo!T?3;LpCF_vWXh#s~u=1#z7ZnpYP877Ose|Dt zun;c4q0J12_$eI;165Infw;d0lRTmn`%L61ZI}(YUZ!cA*ViZDz3Xnvv`)d5$@io5 zzRr9&6Sz8L%I5oQ1Kn}yDLXH<3`E{-Ql^E(&Lulcz*Sr>qzXTbLa{rng)D>)yfx4m z+^!irZhNd_aHi9K<(D_rq(j2uHfvR1^KA&^=3T>Pw6NN15AvV3!y;xowQm~G=P z`w`CgQ>V*DuH z9?9pv*R5yz=HbA^1bsO6ZW8j&v%L45W?2?IBQ3*dxs`3#i+jp!B?R!FDkKC=m5>i6 zJAk+OaqrL0FXLoB^~ak`hVdXMqD*PQiUWdUm>Nu1Zpe;9*)4uQUl@GHE?5F4qB{&AvIQ!y!L~ABK02t-T4_TkkeS|M+Q^ zfhK->I^5v~jTMPoe{q5ZwOAH@J-Diek9;yeBcmE;sH8uq0&8NI1hrwh_bk};M4(E? zvF+-Zq{f+e63hSLn4WO2K!{@}uu6LxVJflxvV-$*r!s4Bl#>U6Yom}5GnSE>kn(8~Hby+Z6x4iEPS-mvnMonM!5Qia=a^Tb& zl~>D{D_m|7gFjB{Ll`rr6*APGunVZq{f8@qC|l-?`o@cl+}MQgo2-mO!)x1 z^8G!VfM*BZEutk({QHClB;K7z#p1?T8`?S%5XHKLoMY-aJmn>E#wSC8gSDCk$U86_ z0#%MB&m?d+5^o6n$YiVEvZhba0#3KP-fyjym&IADH4o#652sCH|erkG*j zlihR0Gc0)J@!ee_*RZMA=9N5exnp8fpt-vFWeQ#l7TU+!D|~!%I;ZkV0x5}Y4wd;; zO0uU^)}>`1(PjX}JES6`j~yQ}d*N~nYF|q1sCErQUT>o)kWAUD5wo9wa z!wtrDlGbQi{!S-}-n>VvzDVo6P#7w7*6l8jW)S+@1rntAQM+~`T3anotwt79Qv_0A zEjb#hT*JYniU~u9QT*FUFN$_&sw*5o9D3GAb-~!KXb!1X=t8!j-Qb;Br}YyljWVx8 zqQl(os@{wA%M(Yd6g!|sxKMPHa!Y6myNV~ITYI#zDAam*x31eal_*|lBX&J%g6_$a z23Ch5cZOYHZ=LxOiUXnb^Pq1CUd?9_%YqSM>X2Cz!6UNng^`T8sCi5!X?F+x^2VC& zlx|nDezB%n_$rvbGRIAt`cl=LTn@|A^VvFfYq3ZD8q zPlIFf*F%8ab||Uw2C@3$s_csti}c{6sX$A6pVSv-65L3vY{IXhJyM^8=ExmVy*GH& z_1}hRfWng==->*Y#<8C^Rrl>&1d(oQ;6&(2JhdgQO0R@+ZFJ_mS6-f$K`WHL*Or`W+TM66G<%u_ zKVOfxWc0PW#-&ja$kTbl3+7LNNPt^RbS-m9^4d09({h=(Sry;KKWjJTwe{ge3z>Wd z#!jj0J0*dMThP7bWX2Jt;%bG&YD>_io}T4i@*`nmgJdofP8Yv}UD{KQ2{JS|uF1dv zm(f~>nHt`8m>P?4M%&_@m2}6NZ6HJH0i6juJ(wN_()}HTd;FG`g&pM)V&+zo41BK! zA>H>usguX}MiGa_XJ*L6o=ZwgM$y(EOMs0g9(-`lRYzl#OEEysDmvVWYZ`RY991d` zkosr@diI7y?h!G0LbIna$Sq;G#1hiK=)+a0_uY1F>B|GY@#Bsw?MYb1J*h>m8~pci z9x(UjKzXtzuUjAO9#HsD7cQ4bBz?3x(!C5N%4zY22hbP|B}~qr=W27+BjwZ(2nSzC zaEGTD6|Gv4eI+4+g*ypVBa1yqjj?g#%A{HujxRXJca`c6m2f?@BsZ{{BK6O_oT(Ys z_7Z0u$AY9KK=Z!V(B~GFIui$+gZLsiuCY~-wnuQzI4G919FXyHThj0A;neHA?$*m! zTn1|{v~Y2&e(r)eiXs7d)>oB zT&K?il)5Jlwnl76Hl5<1Uk#xEZ`B!#c^LMo2Do;3qGgQ(d@hi{~7~TMNV;4Hh^S2lpl<$@>uy? z+?d4J5XT`ASAjVwezl^5Q@adn)i^WY?o|4QiGJQCTW35;2QyhA}n+|no zI`eY0CuuFaEo!%-3QpJboBVzsqr>}I^^#=T>u=+1k=U_A#c$gx1!L1rNg3)0NqiVI zH89RmFHgK)&F~|zDwN803T-FsHuA!U;`1rMS2geMe~_Hg=GHPIAhnJ3eLu*?SmD_j zL!Kr~yD~v#QTm}}h)83O7pNC@USy%EpLW^->>k?NKI+~^ zqd1qJOjS{bGpQl3whUpJv-JM{g`Fe7AydLl&w#}7#<9(;s6JxLysO2~ozp1dI00Plqyg06rv`+HoafJg?38QF5U)D!@HSW`etm`ZT5v@z$&u)U&8Ly8 z)Ax|1-Y-r*l-wV#No(w{yXawxR26A>+g6Vt3ozdAvA1j$vbM z-NFo`Yia|sW$J-%D=CB%5o1S->y!In)TWx7qmg%PZ++)86Cic=cI$!_Um>G5Sdu)l zHDV`#U{Letb~tl%OCOD?7aNO&;*4LBY^xzokEF~Vp8+){PUE)IKV1xdpf>Sa@DkJs z(tF&I4B9}#olJl}pAB?N_33aEyr1R9(K&ohz@%g&oSjy?{&k+Ph;X4(1-CjP$;zZF zzlFsgx?blc99J4rvb8=@Vf%MKUZ8R-;_+*Ywn7*WWLul_wNb&rsrGt(B7eyf|GIof zSJ^~F7t2lUptk`|U-t`RWK{Hh46XfD=TSgFimua;g2TKmFrB;K>b0p#tau{p~kIw-ahuS7gWm0VA0lUk2P+w&_DSv9wx{v~2r5i&^atjZ0aF-fsWSRsB zi)ZC|@``G4l;cP+Tu!ah!wLsgfjMeRz>k9lwi=key(kRt6MiMo^mgq16iPhIWpi5U zv-@0-7`&8LAWs!qL!aO6JS;=(T2LMB%KZ?T!;5p?V8olMk~j{r?^Ct1>`|qn&VYG* zX8QfYSNY5Rmn69gZp>*c9q&vuQ7mWmc1R|%n86xMx?F3R_G~#hZY1-uWbtq|+)Yq| z#a}aIb(OzXTuryY44T96$G0!Bj1pNm_~5qEgLg?Lq41YUP>*fVku48XVJu=+ypLl+ zvboW)U%#aR_Qzwj6*?){{#8nM%l5pq>>#D*gbKlKU>SF}*P4&K@T9gtzDGD3r8KUt z1>l#~VlNiS;Y~)6!LKDsTW1BudPW^=_<7u`&7L7Q-W;=&O>K9ymu8gl-dwIZeDs_) z5J(jmPf?3I;bpPzEqnj$*}ffnO|~BVclV~P?zNRz7M-{_t-9-T?K*+EvFt&Cm4S)1 z_ur`z$gh?#-VcNkG8R!L^4i|(Rcm&~$$9WKPLg7`Z!cD!M$Nn0@$#tjrS8@F_({nI z)mzIk+e6sl3^`7EHHFl~`8`-D`R?IUL2Si`^k6E#{LoPzVZHqmWqjV%f&s=w^u5S# zwqfG+7v>H?*(X&_zN{?F*?BDOk_P$gViZiEtS|Qm85q%SheC)@J{B&H`_gA>yHKuW{9X765E$qh_@*lYR?=C9 za0{ZV^MHLIqXl@U)o7~2Kl3D`KQM`vB(W8mc=tjO^$RbG^~Y1&k=HOG8=GmYMAk#u za8=D{YEThtMwGX_fvz{l zjU_et{MmgBFv%zra?pFrNFeDk7CS*HO zkjeRqnU`u+T$(0Cu)a%D;n#Q^9Uga{%&9eK1Qx#<*G6sg8)GNb zZnQ(#9{5Zi&7XU0X$2%-91bkaF?{uo+AvYiSPq35iv_mgueW)vq(z?cD}0zaC7rMY zI2h{i#z6jD>+QA`e3tc0yI2O+o)J)Q;U(tBF7%={{?i@=S`U9=b(2{9;XXU=2)1r9 zQSxK=9v3L}V-*5rUbiFRyq2y@D&uyi8Kil%`Rgr%jMGBT{^hY1kB(f|s=@%=cDWiv z(!kT8QqTF8WqRJNPBur{g9NAq){crtxhKLSVI9dMqzCg7Z}B(h+vm)!rP2A9Lasi; z5f7-{95%V=8IcLxdyZ>kcyn9`RN%O9{xEep6eo})_ZI2}`Tb&^4w5g`I*0FeNb@dO zOFlkx)g21Z8ah2-x!5qbC~}Ee^_tY_FMI;ea=_va1MjEJTNh}KK~uE2mjtOun0MQ) z4HlLM4W9O<*(XvysvM^DL?8`lZSVWwcMMYu-o#1>LEfC@=p|?B~-9py}may zwz0pRq`ar3zwx@rI{6{W&eazV{(k2Sjvy8lCGy?1FP|R^pRFKQz%F}c-wZu(4qKY(42(kDMS|kyd?UUx1kIxxEx?OjfAL+fBD& zF$=R@X7aRvSzG-W)H&T_ZMBF^Qv5-GYd1%D5d~jI#ye4ksI3U&rnEh_kLz3WIwyF~ z433-Jo<(eCUgT7Iu}=UaeVb*qw{}8G)Ig+m%Go?7(PEBwY07%iB>uakZDQzEoXA11 z#lkwqqLo2-?{+?8ajAEFU^ddUoKeL!4@6kLsV1(W#5?R==8f~(bXGmr_3pdFni^e! zIuAnx2SZ_WdqIM?r^xdb>~NaHheDlv2f5v%JmIb-(jSuO(Y~_@@jGXs*(JzfV7ZnU zT)M37r`&s%Fr9O5H)p9bz(gxaTL%w7-av^4It7ZM_!|F*m9i4YGVbS{p)yjIXi3=L})2(e3A1B&4YVc;1s#8nb}t-yz%GjTw^rx;{U zRXW`t$KSjDT2_w*>34m!);7hq*cd1yE<~l4Rc_F0aL0?zr8#bpzJMugVDVaw|hO z4>hlM8wBa`#9wXLf`~5F63yRt-L(eEiXiuI+D*5=szz83C4Rjbr1{t&iMcVcUj58z)ehMchs)AeL=-I>Mrv0> z1H3NuUFp~OdUXCX&uv&&RD>hzo0+&I)Cgl@KeEpt8@=qOtoha{OJexR(yu@9sE%WwH3mjE zDI+58MwjT_zIRW z|2@K{3b$a$a+HJB=X(63{KIKF{z{z$jnuap1O!JTFUz4w&wB5LR+p-UvrCcA(r+3v zxG}xV7SpI%LSBJAOxrktTVad1AF&N`WsxT9!*SINaBk~)AyO?|(;*sX6W|(rJ=;BC z)uw&=Fqi>j=qa&wSwm_99>j^|nGjlh%BkGq5k-R4bVdGjZv(??@pYDFxQIfIes6ug?FLdtreGu@S#Pa^7|2AV;vFo<2y#8TKsf!`dY88EO37$EHao{Y!nb zN=hhtFyrCK3jtV5Q~2+Tsa-RPXu($18gX%CLyPea-m_6?7qnJ{R z$L3R1R-nKUF$?WkkJk?_>@v$|cZz<|r%QudNZSW{7Il(@bSW`>pj2jp| zo&E6j6{E%rebTVKIn5~Mo_V8B&PpZey)+;TS%{i6QEyDhZtf#UQ|~|}B{U@_F;tWA zQxIu~EJWpd+gn``&*4G+$5x@Ynu>M*3`aN9WI)}(`zXoBsMumSiS#s_aJ*aFebXQZ zx54!XalTq@BVN5(iMu>vQvnUWeKd2PD#Ky#s82|g=|5HWwo%)YolR-ziplRRjC<^1i8mi1Xl zI5~)OZey8fzE6T0#@nfa}|pAE}P1#ND@ED^>e)GdsvGuASUqm=S$rkPuynJ zP4ri3w~xZ%1+kYqY|ySR1ui@Wc6-anV^kAMVmgw0cVKzF1S_m^s(UWmX@rp$})f{S8oO|s!ReiY`UD~#OCyN_kR&UEL~UYMQn=4palw7 zeD{K$@x10STMqCZndwdt<6*fwnZCF8($n!$38VWtuqEwZRUNT_nhC!?bo9_ya%R)% zP3EIPv8u>1NQE86_>7P*WVg4_8A3Zg0rrTP937=Ko1L6Du(<3@_GvqPR?M+kfC*K1 z4}PHy`?a$EcI_pOy!wcj9&8NK(H^8^F;C<+ajDw=7{Q32%?J%lliYqV@u7x1uKVNG z6Be4Q`8ZuT*)34}x7dJ;y%qOrPJh12M^>7T0gY1}#E5H5^ERnkYLM8jN{Yj?d|bk^ zU^3vH7QjiR`}K-8_c-w*lRDn+!TUqs5vY&iQG8LZ>iFvkP|j z=UgIanR-A%LV{Ynp6SfXf*@T+vbm zvTi>2p2~kmh~%=oo8%sg!*5_Nc)s#HY$}9&TRw5OSN%q85<3~ zi6XA-v5>z>*{5gFD#P&1;C?pfs+?SM)IQ8v=StjJ^4Vf<0iHsJtmCQrGsSm0!_*oE zy`t^%qAE)wP~3j(u+=Gat=Fx)HOsY*bL>odE)AId>8Pz%v`=h(Z25~_S#+vV`zJ_Q zJJ{)-*v|A~0u4qmrpit*w;wunqm=%53_I*8LUnm?x5twEH)3XuGNdY$hH;OKXGtbG zdd=uLif=?^4bJNxBmeF0m5_}wr+%>K+<_RA|7|BwZ#$tP(1cRaMiVo!t55R&X6`|a zaLw-65o>nK$@qvn9P}XZMBKF#9Od*+VFXP}_~vifUul$@2fEqK!yo0+GY%RK1)$+C z_O@^A2EQXuiXm4@moSKzz)y z3B94s!%(`=VngA51y;I!8px2g#Y463GG>@quM}VO@G~Y<6Y0qEh2z!qnhrcY_$Z>n z5gS;#+x(O*1DF^ylXhQ}0)JR`cd2h0`WNEjDX8DQ_H%A-*2A?kFxgj8=({9}CWKatEGR z8%a7_7`a!QztZG<_t|OS^M}=c=~p@(k~&<~ddpCzbzX-zaWn7Pxq!a$JHO1ILMmd1 zy{HAXAtj8}d{P=r2j^^QX5rHYti9gwsUZF2z=Nm0M}|JgE=y^i7YXOEc#~IFv!&gI zEy)wdBPC)h+p+O0d2010IlQrh`k=WA11}QB@}o)E@$ssE0o)*w&HIti{Be9a*h$Ei zL!m`~p~(a2Y!Tn#D(f@SQ1XqY2v)!aAfM4pE5n(rY(iGO_Y>j}fl2P7`|DqlZ`_4% zz{(+@alMILt|X6!@6OFPe}{G87|^+^7iN?wJ0uKgt9UDbCH8RO(aZgLmk1lyR}_PY zjuPn|v9%8^x)_WpbIBZv86S|If^^0{7qtBa3;)*U?Y|-qp_TNd+M+4O;7O9g7Tm8^ zl>}huNdMv-9Ut^31ny27SnnqXi~|l#v~VZzbYK^0P`C`koQQv+N%&R*dkvxLPbK+R zr2Fe+UOgAVDB#= z>pQpZ%m_GRZh;tGnJtlJJCKgrqkg@S`tGx}x>&`J{YTXNXB_-L2II)-8=R?$?h+U!tQUqAH#tPApm7ts>J!5XSq4AzM$if!=F)arCaG9pX6jO@xxYdTv+v?T;CSm|; z?MaGL1Y6d^GM)cU|4;0sRColvGjFZb++;qb?x5d|;zb^%2O?)K<6b#M@^;+VlFOOH zuE)7U2S#YmME(FLs>14y5(Mw$mBj5%+z%}*#TRr8=D>IUR^+2UKZy#q5L(MYA3y$@ zYqc7$8STuf?d@Kd%Zk*tB}3Wyc*AcQ3iefg0qMpnK610W=bb!$A#%mHJVDsJOiDlv z$CWRY!4AbWWnxdEOBix}B{lw?NGBJ`>`Aq1F-MI%BALS_ zA?;4QkB?4pw>=-%&Ee(5HZn?gM*bllF$XQi+Qw13#PS55EJIPPW#XjN_VzNs0em0Q z&+)IzT8i1e^{M}>rMqJ-K-iEs_v+h|dZ6o{Zux(4-0M!vn6c9|;dfaXnnu5){MJ1v zhQiGh;02M&(sQuFdn@-~S?2g&SSa>LgBrcN$;v&*H|7=Si~-P!v)|3P{38=E!+mik zM|tRD@8DX*8xcU4`Fn6DEPxGFhLK1_?lyjGp)wpDWB2S%zWzZr(~Geqq3eskH1sk5 z#0t(Q4}Q4D7CB|eDFsYmlxN!Kf5z7Xtn?qkpx5}6@(z|%Z@d3$a6LmCn8Ah0jQR(7 z`)N5f<pg=dcH;@@w=@FqQzf|s56UNxK%nxs8ZnX+KeJ)xHIc!M)(UTT(m zN438T+K=N;^w2x&sG9nhm!THOk_vAYtKKsLz7wgMKwPobo3-r-d_%%$ikjV*hVwN%wysGsqFyQW5Ky;R4kl0;U>& z3i~NgW|~?dn8EL#3GvZZDebNn0HByE@9*JGKHHk$W+vv&Gm+ub`K<%_G?9uRQWI@pyf3Rjk30L52GFpnr}2r_7Nc^#r&^EIKCM zja@|k)`lZ#&GAU%O*&=Ma`#s|6e0Y-yW1rc6-||!n(3g`7QjdW>ue5C0=a(`; zfNoN&Brt^BJ>fsZZ1t%K@(xMs=Ht6&K*_i28;f#~tUi(dz7XX=@>ouTaN-WI{{}O^ zba;ya2!^*`4HnG)LwDSdzF-GKn19fS5c9Fh7U&Iw?>@>9lsn;XxQ7xKET!2 zFxXlKq?3tTVEAXH$AislvHlUb=7~<1_x1Nvx!Di6G*q;8RB$YmO{loDElDR?K8s;n zIL~3$mg7HS_IQ?!e?PUN_lpVu$~Mz&o&S7e0V7b8>iV+(+B&lx`x|69uVsBk4yR;X zaitTsShH6$rZHz(wZQh{`|K9^(ve>r{15tmQoU7U2JrQDCub-1k9g}gfcxio>;JLm z0QfUM!ni*-TRYHui(bII(-5|`sFC$8Qa zE4r+=2pgL8UikVJbOn48mjA0$e%n9^AxTumc$`VmuPbUx%A zu+X~x4WQ?AHpw%cLzMZRcI?PGq(7$DnAHBeQ&lsB01<+vL8Ov7jwj=LbqU%*aqNn&z4fPZ8Tvmu%n67p-N1BR9Usl_TQ+Z% zR=3&c?NlSA+;P)f4+#q}rmq>4CH^P)c?wFQ;z{|{OyvDr$K9SH_5QY|OB4`7756;^ z|B-yFDZbi48>Anxf%)gZ0KZ2Bz!Fs3tqTeO4%|@`&VT9jFR1u=&_bZ0Ux52dj&Pzx zfc-7Cn5-{xKWpm(jwXr68q@E={TmrO#nKs2b`B5im0!GgkzE5=YvVU=VUad~Wo;Co z{K)<>k-@0xsHiOcf9Irv&Q$*iWI=DwrxsCm1c)i1lWaItn+sgqPcl3DsR|pYV<7D7 z{EY&*D#$wmAQkq1>(uXm6+)3dDE~0q`DQ1Z zxjNO}e`n1vfPcUM#Q;uvoM)rS^$PPCr}LMnVuF&9UIVtSVsk(mc_-OzBL998XTIr{ zgzdF*0W!70AGt09TSkdl4T!E@h2Dta+l@HRz2hG<+BLlA<8K+1OS_Na=>NM@HDviM zCLLfuK<7Ht6K8;!WYAspF=PA$v3 zNc^8%uR6JOJO7Iu<1Bb5LQo&eonk3G0K8*-L`SOuNxaF*(k~Coe>MK^2diQ2_;aBC z4lAz@y1t=dHabRC5Z9n-1s7Yn$++a>w+<$W2d_v!3~k2GrSx+m@9yV+=O!H& zv>hXu1F(}CI6oJ0_(Ss!I|qByyj2oH1XG*kfOVb!;#BMLZRMahxPTBp=o*^^=7G$= z6v$BDiN)O@e=&3lGcN*~zwj+@yiaL2N+0otb7^@HgI;o&pSe`b$H^7Ae%$}+RLrik zB%HIlp^|>!JNlGsV>+4gu4A;g|8Z_8?$+NY{>05p1Z-r)RI$G!G{_I@SD?CJ3U^1<43tv+aelE{cVT|`{H4T9aCXt*N**8AI*3iTiP2(m=#f#%mT3}L>U6 zH&*;q=a-@_4B+kd0M`|GjpXdS-$RQe2Apt*l^3)_QPB1BD&@$3c0LGiyH7<%`rGJp z0KYv#t%#0#4_zPqO4<0&4#(_mPl>rxeu-KnF-YMy=&q$_WRy+XdILl*#X#gDn3IzI zBwZuRJxBK`FUQOWuM+Xl@Ofsf=3pZ&UMt^wxdbUJ~=H)(*;D(NMYsma3FKe{B2x zvwsr_Am02PKFM!vKm0vfV0eH_rlD`@UvtuBHBSJPO3eKadHgLwz{<#DAfq!IfW>;O zyl+1*{O?&9Ch$?e^2cAX-b)$)m`#M7F3bQMXY8YstL2OQOKJY2D_u`PMK6AxRd{-9 z+M=A9`(**sR@nBi_>ZLQ5b;Icp*da>YbafBiGV7J`X$+jn1F%4C!RzPw3VxF_Uw=5 z9f*GmTD}1Df|VDl);~!|#{;MZN<_oSOJG@K_ame9pJB=F))8PSlTo{S0KmxKYT*(w z1qR&uN=SJ{cSX4);_Xb@Clqs-Y-O4M(c`!essXkcTx4F`10mFpCIA1zH2i}BxwRI~ zk1~Bc1aT(ofmJ&y`d%l%VR>1Zukf4wzX<==K_u?&!!?kydVbwPeFwEoIt0`6e=XO4 zL&|&o>xTb4c=!OWY8rIMxWD`cz*tcfstx_;kA9v}VEUV6mj*rL{iP-{ z@qY<+oNeenZjqajy1tf{<$`?CADs_iNr?AuDLJLU1@~^=^_AbP+alT+yGoC;qp75b zt$fG7UoQ0Bf#Ji*p5b@Rotn|z*tEX>IwyHcZez$ zZ0o$jXk}sMo~EMO#OKPZiLzbVNhe&1NL5iBT%6K2KN&T5NXF?sq{f?Oo4usCq@+{b zd6Z4wtfi`24$~%jmw8Wzp;C5Nh)2m0L+Z0QDQfVX*;h9j*gmN^#p`XkpsxXx>4
4>ZB&=0{zeJ!tD zD}WcG8=Gv>1o0Idq%NbY-(EN{us4qTZPg2)iRPO(7{-028?+I3T*0K;LzG7{ogj|00G0 zYzQwjFD2Lo)FM;}>s)G0l&w%#lQ*W6k2C!E&rJH!NfBuY5u}|c%_B|gJL;zEWAZ*y=6IY9povN{%LbOcerLy zJdM|ZCVk56^g5Dgay9}VeS^@t!4sM?)3Pn5F@0G=C4TXpDhxjl*M<21OiX%2+l27~ z(5@~k3mr)VyK702_Wb4ic0-8icxAW#`SixQ-M7BQ#;RY+FhSg(+`LH|ZyQGaKWdmv6=o*G|T-%*mz{8j?TUJ3XeGJr^-)QME@uKb;ezc@>mgGd0a<)x25 zKK=%msMlAvS{+y*QwqTOIWXld(hvhFq#z0;hJTGXMCX zE5tnbDsp5&K(RpD;)_Mq^HB@lmxGv-4@>%E7-=VTJ&L|`P+7Xh!EExg-#XHKJgFBgpl6b%md*=TMX=v}2%axLnOmh7dw}rsM8%y9# z+rIiQxaBQ}3))=V<=j>qpO{0!1PacXS?rF&IHQg<2^1;mesa#P5M^ zOzE0DcUmrj)x!-dpDaYxeGr%MX1TDvTkc$)F%g{+E%P!gw3zZ8u1-#L^uhhbbDAoX z+YX(FwTvp~u08BRHYE{RC!n1C-fsZaFUYc5uAZ23hYkvv0Kx0U)td2iv3l;qR$0$6 zKpIa|fui=EY2N3DYil3`vPdAQo`HKwO5!;VC@L7}swKc1!f**ru&kt@1^pf2KvyE^ za+^wrtvlg|gJH*Q4u>Pw;jd1e(vO~~v(@MNC~r=bRk?8szx*Nq@bbZTETH1f>#!^5 zTBhlS1!2i(Mm0&69i`{q6P&#eV%G1}<0QXYjtU1PTir}wEl4S?g1Vj>Kp4>M6aA&O zfWuY<@~7_Zb;6?zs`)+dp9|3+930$uxMu7R5D#w`^nUmNFR zsY;UZHD}Du6_jF6daHcliFMtqt@XGN>wu47kAJ>$AY+zvh%scxdb0y zh4%+w{@)@osJQuZkDrj+IystNxhtrHr_ud53V1d7l+SP<(N{0-5?pfu;svtC&6&${ zw;w29cIWc^Z=t-~z&rS=ix%f6>Yl^>Mj3R;jfkE2P|OLxiL{CbZJgnBJ^eio1lToH z#2u0MVG2p#(}BYOkHIAhXgjm{*w3V}=%Nu}$6AV!&~^|meOq$ixezV0!6?R`KPvq6 zEjmd+BCyVEtYvu50lFPxg4zQy`p*tlj1O0Pm8uF?5`AH(e4$`gqhwj$jb%H44+JsR zK7RlNAJ7i~3BR+kvh(J{JHPwLfUAPlqYPVnnH%~s#Z9MQ(c$&gU<8I`TFp(|StGO%?=O2?>*pca3FG`wsvqOR zA%MP;?-r3QGXy+67O>1n7%s^0waBYi`JRWpw1?Xz%|uof^;Iszf~QY&9=j(D z2Xo^O7>H=PWOw`6YWgn1^^<4OAIixm^D$evWJ~{loV`_4TmSO~TD%l3w78ZarMLxm zE!tAt-7U0ukmBwdoC2jqf;$9vrx4ts6!+ja-~YP5r~7ta&&pXV>zvP-*|TTQ9{YbA zF-mMT@llaUX_2Xn4cQjXER7d)m;Z&A8$!BBBh1BlDxuL1;_M7}>maGpH@O!+E5~2G zJ*GcqLcLlQg$`SG1JNr+HqgwZ+uHA7Ooq4Ah-6dGJ8r#3F;%rZ(VdA7zJF}wYat=$ zQ)H0G#w*#^gRfB!|J_TO5>PkNB@;Ln1>w=c?WIK(p#PNr2PY*f>0l>cRrCnqpAw1T zU2_@j-``$)8GB!xw?HjkSNeHJCIXLKtymAw+IEIV4!-xAU+j+YnD@S1sIiKkYA9ht zKC+~}Twk?+^HnZU)c>Cwy5r=U&UYB)SL=FG%b)`nX&?rI0oLD#eAky9ZZ{5M*8ur!qx1x-xXB;cetX2Q4dcjuc=b@%#-!%(H4IMGYeIj@gMRJLg9(OGabws zU!mBW5}*Vr|xy zSG{E7XvFN_Xn9rn-JYMv2Rz8IYM1MVd@L$tMV>8V`h0W`V|oMJX)U}%bB#!Sbh&jl z!u9x>SUTu4kAJlaLy&5B#zwClIq%P8!^v{_r`Pa7fk(u9Z@Y6=Z}&U-a99ajLYcy6 zEpmS+N=9g8IHp`a8J#3Z*lL3QR^;U&XZm*-$H9fKm1;y|IJVoG>lfT81Is8$RNuL* z70?2yvfOms-HvkLyn{5AIP^rE zKpYx-8#Xxo2mZga6Zp>c`sJxejwaq=?Szr0paDASSV5C_lVF&nm6Z@qVVkiY=v^={ z_JKp$czc46lwBWy+`U=Y+uwf&F#~~WL@y_0!UnI`M!S@Z2eWg?FcNJ%c=p}p@_#ZQ zQN8b8mjwvD13TJb-PC>e(oP6`m%^Po0y_6KZuw&O9?yic*p&@i3Rz!hjOfeD(FvkH zxV2#Kw$MKLRX-$czJp2u;%R2}?izohXm;LSScDV%*_(EXVzlxGEM!E&ju-QPwFMYa zmK9LIyFO^<{okw8_s}K4b0mVBc=JKS2^^1j>2oD4)L(yRpOE86zg{!~E$jiH1wltR z?B>J663%o%Mt=|jjW<_6Uv9q%LWT(B$5oo}&-Z^12>Ed#Uvec(*EEt!&{YzjMl|B; zc*a`Bv>CJ$ljdM(n9aRLRc2}^;h9{$dQhv%IekN($3rFIZ-`GT&K8bOokYy6mT{g$ zw?kX+TU1yGN~(LN-ZcwueYmEHk8wYQb2DIB(eL((2n#O`ZZJ8noL~q`c%T1(9%s-@ zj08eVh1rxs;HO0C7RbkieEv;i!MOrv<5QZ)m&S zg?vU{afj@C9?YpaH}8-nhsRP5CE7Jrmnwla`+Q2ZZx-eP==36}M|ynB?=5o6OQ(yK zAwIt}3cB^o-m<8-m%dDS^B-&_H6Y%!Q5%`E7~r0y>XYn=Z@!S)vx$7g2&lXr2#TYP zm@ba4>eS-E8IY@PYmqekjETlx`fgp0jxT5h8{MPAudpgdRd)Y$fI0$pjEd@;LIhv^ zH{&3eKll#TtiVOQ*%OO>*6UaMq+uyFiq=JqN=X6FV|P=DxIcG&d3Y_42601Hr#8{B zjv{8O!P1MJc76`x&B@CEXyYAB+`#b){6DO^?ntPCdha;-A;S|nvOrG>bWX_;@f>b& zTEgv;2gQa^&NqJ0sbs5nm?yDsN3KcdLb@fKthU!!4iU-xTue%YI?R=4&X#EotPWo* zPP_C)y{!S?d&nRbNLfsZ? z;^M1=^9@c0W%~8MfSHa}O7iMC!hZ-;Vn*v6W`EMF#P25`swycZULDMj%vBhSg`srx z-1kC239^jx$dR?gkdTl-hZ7agwGJ>QWl7JS2xvQkUWxs++U>zy#Rqo%I<9XS6{oo` z9p)=j`=Uve7i!|vea%-()oEpzcmsBu?Pi$F+QfXX$=LMppNwtv9RB>W`u9^QnaLk@ zF2;}GgW15$iW<3qqPeo1P#BT2@A1rce zCV#S4^unii&@svI#UCc@NvsclOIsSI^lws047-s978Z>=mui|h>-`Ec%5YGy(nLR} zL5TxX76d)A@IeDW4yHXJ2-^(8lk_~M-mXY=254&#j0WYu!f+1^nWTDy1pF^L!=+3` zn0h;hEu|k~a698D;8&yl-46+J7lV>8byH0=5o)GhF!A;-9m(@;Ya9rl5Bev>3)9#q zjTaqWe%*t0t$~2i9G?dnp@}26SYTeSw3|sk)K#7Bu;GA}{|7ZS&_NL|I$_9A5Wc>1 z;V_$#!)*~Z3IVEnoUA(W1u5a{%Q4b4KdBZbg|O7yArH^{bQAw(`U0mmQ*+IptKxKH zo^8RI>U|^75F=>8Dm_$A{9KHv{W3_UCUfx@ZkxI{jraCz22Cz+bOHFPnl@kAE!LIg$?m!*-Oj2QH#lZ4e1(ZRn&#q`12OGE$tmrZ z)5^t~rD|py<2NizjcfiN^8D}07V1owx+V)`R2KpJ$nB!Gr+^YRnS79jdy8?q&vc`U zNwUNYJ}}`8v-em{k|7g^W6{mBd;HhqWuZeKGs=9*C-3{g}bzg0BaJ!*+#^vx4G~CiUN6mjzK%Q(JgU!=(~$ zqLq=*)zd!dXuv-uPNw!5{b@M}r~BbU^iZIkgO{zuuVb&~Z_Z2Cc7AQ|CzLo)Hv?8n z7v?}#AMZ)?$(dbH@89x%yUXkgZPvqWdTETAp*vri!Z6W01d-kq)G|;|S%)jly z8ABm)P#=yy4LahdU!@j@u60&Akq~c^TY>b`+IO{_uegJ$r-R$^g`EElJNpY;K+V>y z!8_Ku}kP|5{q92nR7*G#RzCoqPYjS-ckKD z3C+_ViW%}yrG-z-gm^Kb8uVrmy6BJ|@t;7=rmtJUuig%xG~8;Ri$B429M?+TY{a?5 zP_i{$d>t6t_k@=yusKoUmWuIo@RkUEZd$X+w5eEOPbb53Bn`qN-n**mQZd_(mCxTvbOGdgjjD^t$v&#ay;YO%}LfDbQ;P=3=PwhQUSdN~WD z5q9=BMQxp5xQ7W;gsHy0b;HjpGlJ&Qksd^`M>~}YqQ@B=!^{`Ndla16$l|}r;39Hk z2kuouuZC{oyNffNw#KQxS2lo7T5axyeUIEsmI`o3A-7hu$(m`)m8=kni#mF^p#9c4 z#B5L3FT5#h>?8sBt6YkcI9I?(>_2pOCLg%PGV})X_rC5I<wny9H`N-H_GrO;IsRB_f#k>jlDs>9iYRQ5IS9H8o?L&RR$D6`&pJEey1h887NPk;{p#$ zJdaQsLEBx#Zdx6#MF zwzcx0ReEw~=z#^Hq1i)QM-C?lnP(%Ad~FGvE)?cyYidvzbOIOko-iy((DeJn`Msm@7{#EM2z6G^aLM8cyX z26-rXy!4KlASgtfo9dZ%G)z}kPEJ|q@Zr5qNqI_tZ;V6&^6)V-t53EexGzwu-zE0O z#m4r87os`~1s8!yJji?c-Yt9iql_QRVjtG_5R73yDs4}mB`5j(ZTdkYPXE3v+;Tvw zj)BcyxMC+}mW7sGYjar1d`CAjo}Ky62U#V@W}e-Z+GM)ZrF(qMw5=N2^YT@&hKIBO z2My1c{&!zZ>u~sEkE- z;1eT^kj;+<^Yl- z$!gqC{W+W7L~&@|vq!25AjoKZA(E47Kw@UpN5^iKkII~IinRw08^Lac#2HvgTC>A5SOXsX>tFT@Q&LEw-6xLde;n{h9m_D^ zTnhJ^@YghKJjILV@2zepJx*-;K`mnTZ=j>cSBqPHy$KnX*B`-T3^%CJIWW(e4u%@6 zy@wXp?2>B@(Ot2WV(ntxGFn;(MUi`dfC9vU^;{Y4c+};pGI$ao9(mG8%!Kg7Jqt?8<6t3uvJsxEtMhOSNQsv=`9=4~bz3y$ zh_OmFf>u^!EwV z9Fh0aCWb6VkBy;6d-SS-MlkA5$np;7#4Seomp;wWpG^0~vv^7=-SBL6wuLk%tV!C1 z(qFx4sq%kV6>GD&#v(o4c{r^)jwMG#9>$Ww?)2L46CA^`9f5}~Ze+B#kYXvVJ)koG zL#}xy=mYfVSoU5p#BD*$WosF-^%Kvridhnu3by?1q>H$uqN)$GoNsi8oW<#lz%?|7 zSt~YKAJMKuy|=vl^FM;&BPJ@K1}Oz)4#)r8`o>bLV$r|mC(FCQ2`ds*$kvaG`=<4e z+o@f?PK+0;H^zHfqL4~4%#nHViTyYTr^Z2ZthH^j_T54)9Cx7+r z-uOgLt{AggZ`uzL{-5M5I!ImzFHuR=Ga6;4o>JQ@{+8G!x>hxL!2ZuK^4bMJjKu9W zalx$vW`D5?8xDU8IoUuKz_V0t!FKxo*Y8Rm@gL;#vE)@NLA!Hn0ngxC??4a_dlr8q zB^ulN5gUM>x1~>S?&6Z)@p#_+Y;Vb}OV*pli8h|5Dm$P1t9cXPPIXie-@wy)FabsF zp#Wav^6`xlKr`4631Q_K7W-|e<_O%j5^vrE-7Ki#FfT`j%jU7`ad*!~Hy@8x~`P_!h9IZ5^P6 z`@|j}+OVlr_)zhN=(}%q!cbohtTSr>=H_h7y-V}g*UETU4_X*zv&~X% zhJP9w>V9dx4k02ncUQNzk9YfY*ZNju=fFzDQs>{N1-#q1b$_Al9*nsf+sn7m!Ou9` zwvF4YAf@Q6={k7~wn7V@WaUCK^B3rgmsh?!rTo5@Qzei6U$(jPqHx}@sWkqM=+?Y1 z@+l_UM>o9VE8|!(W6f#>dv^+Aa%9uM%vYw5?9SQ$+1|CZTNj}m#NE@Jk$nr1Q%K4C zHcfW4{m-Mm*Z`d2)BW!EN(3Q?fFZhJ^F@9e&=-v)Ek5&F-ck<(S5)HVv{zTdSDEb$ z$OG2-*D_j8?6*aR3_mV%Ys(ew=`1c~Lx&z$C>2ZZw7^@xB#Kp-9fR{ml@hC_IN;37 zcBcqzU32uQmaK zdQiz!WVibt!3WQwI{sFxow#6KTnT)CyhSU;gj)p$&N&^_Mb}Oee#;kk9@K1xNo>0p zwRnImrYbl+E!!eTs)k2ei1*rTN8F{0@rdc5sG{sqh@%PH^7p4s1N?PA+JSeXTmJSm zMRbG%g%*fwv6<2W+`o!`AeoN<t2Tk98Czt<9#z5n=OW zaCIfq=f~D^3it`0BfHLa0p|55M_rW#Pu&jV7R_Ck1!jmRZQ^VZ+$#)UUW_=dk2@j$ zu~=X*A~*K;P#;2D;1|u_!BXNpPd(LIW{2^vdC}zS1RBNF(YHQk+3i=^VTfa)M-i$^ z#8x(?L5F^e5mQw^>MP?ySt`v|n0_D3Bl7cG3+w#E_>=Lqm>dL|rof@dHf?wgpKY;} zC4u6g-qH+xevzDPChL~e2fxza34Xl+52$dz%*L^i+){JmwR^NhMg9c!M9lFD>&i9u zo?YjDvV*o9{O^}>YLU~WcB(RWyq&~kMMB4qfw%J$WJNlTO35-_H-i1Pj)~W zg(9htZ%$uSpS~&KyeBy4Ie$^X>gRVI1iO2^7P9c(*J>P-?F+34BeUUJi0=A`DCL(P zDxBMIE`gmtydt}r45+ZM{7uS2I|whIGGWfh4qW}HvD!yh#uP^V~wJuvvyHTtx7vgV1y7T zQoQ^)N4(UB4WNike)TtbhuhgC#^pxt?;0LGZKRV)G7fM=`gQzc7NwB!lUu4|w_$u! zLDO`JHb!%!4TDg`dFSJZaOU@06d-7|99Me8 znB4Nqfwk8$F8y*bC^t7MNjU4~Va`T~)_2T7?(dz^Z!tRGB3iBZcME*0u}@yyaorOh zM6F@(W=7z#I8}S^M$Og#u&Vet`Lg!}eL21k?6qTgSHvapB*gef7avxf(&zHyx4~S} z$l~l-V<<7pv%DW~iP`bp5f35~T|C>9TAnDI z{nso*FR9?^tV@#TIWO#_aop?w&8Ggt{)4CV`HkBKg5o1uD@$6L;E+n*ET5f9?XqF# zu4rJ;Hp7s^ouQhVngD=u2uL(}u{Qx^>Ss5z+nOQe8H)^`CbXik=#N3VuI^7{+;XoW z2`oydkr-DGBUf!!7*ibc@5;(3B~ zmt8L8sa?;RsPa8sTZNqq&}`hV%v)tTMHciEc4K>f7f$PcnmiMu?x&=^$dk}kf;*ZQ zbdm^dyfSybd)g2I@RkLsCNBcgyyNYN2#8u@DS-mk-QIb-ZDmd>V-}E75wWb&5^_&S z<9+6R_V(G5OQ(}xp}~0u2uiJ+8Nmn+OU^G{b57a=xayH49UcqxczS_XzRiILEG4}3r<-R6=^d_L-V=;K%1{6s=Ke$`?X9KER@mz_~fo6oj28Og~OW=q~ zg-gc){wz)DACz5cRzjDo_s@Te;&J5#)H`}w-pQ*3`!7YlU!uM+Y6cYtB!{c!d40Nv zSQV3WRG{4(#v|$Fgdj-`R0#FZ%4=TZ*F{FM?1_amf(cZjiWfv`dc%@iS!klz`x-%L z7W(=Wm>>4qKa-`fYQ+px=EkhR3syN$>!)k#2&3&r~QRBMnFC&kt=zmzti)f^%3mR+?)T~u;B5~j(Ru>uh-SJi4IH?e#+gS(LMf`$+i=?K|?60 zsdJRV?G7B0Pk8p#NKE1PVquiww0{S@Q9RF7$D`5Avy=-&zfP*a+AcfM9*&B=FQ(|9 z+FpFSGk!twaVsS1Glu@cY?JJj8GhmwxP6OG($7{p8~r-nn0T`v%cGFI3Pj8~r@;C$ zNonw;bLK3VNIxjEu8ew9`yA@%I2fqf}7LgEBTMM)5Lap}eoGqG(R<6Z{YB*nHa>XJOXOifHU` zzh>9k<(r+4gL{5ewmDn3(U4(PBT7ZlU3M%fs^>PJ^fn=8j{P-Ed$w|rXo%NYTU&B4#5BVgAj2*v&R|oX zUJSukWL=(vN?S7N63X>;~folr1ofYZe%cwwYseFdW=YNsgo0x8tc| zy?*%-S@5u-8fvNzv}`m~7(Ad;@9F;`A_7!98=ZU^?$G`lPo;k`jpgyP-WDhrDwb<7 z_aMkj!zc8J6ugiHv?14PfRilv_Ew6ONiCQbh^!pr{nVyYLJnOzAFUrWF2rSw>NJxQ z0h~%Ne9+dc=;#q=1GEr7qyvo^c-4E4T-E->w~eU)i0r*2=YiFd8a+=|r>jgPk{jDk z#6Ck(x36y20!~g0VtPP{gg?lgh|*K~o<3N@>nP5W3+tYh+ffG{jbjoW`Bk~Bb)bxOEO}iI)|HQ58|Pa% z58$LI7_W>tE#R=Ww{$N}y7|R!Ne$?5&hy`zhZt*!81Kj+--GEL z>b|~?J2>;hhb~fGaRYghKe0#>PfkJd;!9^3=+p1TX`(Ax(OQ`>^y@daF}S%E_K-ndNGJ^xa zxxkxaS>ZXO8d{xi?+}f>cJ$M{Fh)0HA3c8dyp|`wJ|OwZ6&UxlPH^0^scN9pAi>Rf z?PQA++uqikjYYdD#4j=mP=dy0R!ikRI^#8ViI~&HcV0TbbSmqwp~`~w*c1;6+Gm3s&U4QNMd`9pzD*a3pbI6n{HJ2bF&$Z^w8?lr+F9c z;fEBk^+ou8r{f2%9v|Mp%q8KuR3CoUk6Iao#oCS@c9p6xcgen0gliCI`X@ z92s;9!MoZQ+XRyKBkZd+CaW*pM6mTEtRJe!B5!M^S1gdGQ+@j=$dTAs`uD70HGqTMHgFdG{R&c&NJTEZE z^y_b-kO-*7rRfxv5l<+(2}bwNS7+b_dlOI+hJDu<=E=j(c;~$7%liUyW!zk-1*d>J zqRw(yi#$@yu^VT^JV4K8;$vG^bRSUbAw{J>0!Gkd7Gi(MwEv1@OmwZr)y_q2n-Gxw z&AkRgg^|t9Y+L zo&m*!=s$^q;vUu+pxEGO`|4@btQ`~AWwX47J~JcldqJ)i@~wgI)cZAld-SCpFCwvH z)JMJQP)$J`q0oL8`ZV6sGYCd=HotBb>JJH zXfnNBd4WDec>Y|hE<+w8prZUyt{4yFee9|>Qs+weGGS8iM)UOgKR*Y7OP_P zZgMm_(rt7kOtElMeA5-8i7Eo8sBMHIGl$M+%o`3T`k5O~q9TH`Zdx!=|d`1jNP>h+~w9Zob-|nQx zVshSNqRr5q`3z8dh5)L_(*f%?d0=b4priX)f2XyMc6#yOU&^{|rO-w`5>NIHcT6xu zPGHe^(c$QwRH6};s!Ke6Wm?eFPRa#H`d%N}@s$Ky$6?K`GoC3QS#atNx3Ne(YSfAr z)MY4<@KW1{B;z&&$qJ5{xuZ!zVW3kZds9t=Wz3|mSwrh5r=^CgEsMO?>UFU!kvsvX ztbpt-u@b2<@RJ_zLE8x)V6_=L$a;GPAbi1;cC1Q#>=y|C3CD- z+8);*^gm<(U2^xC1x)NCkfP(Gr0i@eE)$n50kISC$+xDnwv(NmZ%sT%>DT`WzWz_O zfn#7y0b=H!Ouq^rb$mJix`tawoqV1RwEOX)3+6-(|I&ooP1d=68Q^T_i zH8ic)Zi(NzUea!s`U)h7`0yu186ufxy3VTn;>%_ANsqAI;sC2C)ifMQU8qwIAv2y- z$)|(x-ONUrdq)&LHw>I#nIwf!_&naOR99NGax6o*Cb&iiC_4mZ<~hX?a)qr3l+qg58C8k+m$TL#gG{wt#&oo?twEwUIV_jjMP-j(qM1IKKWMdem*(Maybe;X*} zwl+1_RZ`|*^8`@MV;@)xC^pb*>Wu6@DyY(^j8zhBR&$d!V;o(ef9S&;OfzJ}WCBr{ z9K|TO2~p|0`N(7@9LYNdEjZyF8 zC%H;*$2(Q^7=F{&(!SBueds9?8kiam(aZ%_Qhj)CX(_&RHe?vN<1c#CFIjHqlP7L- z%K#&Zl&jlISt40T>k`LdrrCnFPttNf5@F$3O<|F`tZ>E$!;DR_9X%}@-a0C_l<9rT zAgQ8bC;qL-oS^;IeApD6UDC3gsC+DR;D!b^l|l1;H~N6;hlLx1a$-b)O3BnKB#`Z7 zN;L3^IWNf8mlX0j%^5>)S8aY?P`@V?b3imJ(29CWY>?Bn^F^v0a|amfoY8!=1Bc=V z8rS%2UrDH|qN79*9-bp^i&~aqH2Sp(Ceq=h-xU(}2j+iN`t2OQ9ELz!ERit)2dHqUNh4nOHh3PpG^^I5FJ~6?(==8uFXNqn;c0{K;$RMDF6* zj#*CjI!Ud*v+2K0vjl;Uh{el`1I2S6in_IvezJ_PTc@)?P!)NUp8r|@Qv-}s%#G4!TS2e>xGh>nMfzyPcQOI2tln-}%h*?n0&mkGs=8^sJsO!YU5U7y8;7-_N56Wkcv4+(-Js zvbX(a8PMchW+BkJcs2-efjT6=arL$C-W)|2koB%nFxsCjOluQikn1qP!$DD5PHmJ> zT^8sP-@=%FxG5gnu5_nFC8q3(EP4LF=DlQ~yDpL2z2=Qn4+y9j#V~$Cyd3{qgJv=F znzIje@+GvWoOV|CX zF9^ggk-u)darC3H^LPj)H6}Da?!U_QJx)~15pFKR6x-X4KIyn0&)~74Ny$kI+`q_q z1ZZ?f5zk;nNn%Uq_Y%QN)q2vu=_7q}vhMl?O)Bj9aiurd4UwY&@C;fYtulZR4Yif$ z<>BU=FK8kUi`(H~29m+BiI0h&bSFjaNi?%>nptM5t>v1b-gss47;o<+c$qBEGCHV$ zG9N67NlbTT?j1-T(57dLB~0BIP|z>#=Nl5^PTi>@7rH~aEbe=CMz(=>zj_s>!WQY& zg}bV(mwG==Q!!CACyaTID-BAUyArG924FppJO|${@eHnYQiTFAy9NNNEV=VPTlBVT z?Jz|^Qy#p0)6Xas=@Z)0I0GD`TcN*nAO7La%`N=CtK|$+Js}6Hvjf>5LabiOMT=WgJm{h z1GU|N)jqy%QQ&(;%#!Wx5LzO^wP3j+f<4EiKDx0-rGkowViyLxNDR-CF0VH^0op{l zwPtQXapGtqvytHvy|$}(kik_xzGaJ0>OkiQ60GKl3}7FMof1m`>ROs^QXMl5m0EPjyGRvm3Yw zJ9VqexsE(5!B;^IeW2yATYcQmBbRcx)nMMZfu!ar%(=_2s?_2)6L!tcp`%nA8>YGAq zZ0jv;ytqdv6SQ3L@2s)}y_LEo|zbYf8?C)rJB2uAeVPOpEC*=@*xG%!i11a^kc_9-Ic= zwdx?sXC^i{+l{(zB0UxKA6{}_Y2q!Wt*i?QiUP{t(l(-93T(Rh0}DqFl~=VoA`fIM zS(9}X{(h*O3K&_uXMx(u1;qdVK+BIYsX(@E0qH2FolYo~gbC0oS(Ly4oJ7(hLs zUE1Fe&3s~>gazmA7fMOq24cUro^~34>bl?{E7g0!7`$u)sf_VZ@@sO0^AU=gzo8)p zH&gFDUr3}a0(Nb7?32B7DY%HSS4TU(!s0STfl^tak`3|fXF%5?JoF8EL!b5C}6ww?I+#52$vf^mDX{!J+aq zZh|H*d#Uv}0>d;q0s>kM`zw1N#;-(vvUVgp5q(hhy^IyeNS_Q4mF4Qxv$gm8FgmLq zs)X1_jCHSZ!J`ids6Tajdi>^`-)Of4xRO_$-dymV^EkBSCc z4=cJ%<<40?RaI5{5~F&1tz>+Da~TZ#^d#ILVZTHs(XWvD0GIN#Bs##qF0%20p?Et#(sz^kRBYQu#{vvSSYq0MRZJx&rUiJf>tQ}=!0}Nf@(FY5sH;D<> zeA_jg`+^i_7ExMtEg5tavv|WNHZCxHgBWOGw#!RN*;fBBd4El_B!GSTpRnV(^}ACv zC5LVIYDKE^nwf$3Zl=w{&Nlc?rSoRhH+I5-l5;k34y(y7FbzL~yV@L5Ozw8geG(R? zVYkmKmk;=F2-=sqZisE+)0Zw`_uV?~Xwc%#R^ktNL$-i7;lNB$+#`np+3*YHOxop- zsrf-iw*@9&&I*=4nueSB0@4(oTa=8StlGs;{%E;!P$A{eLD`vq*QQszGoVsq0^_Ww(7c&WWx= zCa*W^;u&j9*s3*SY;*KN6myJL=8)%!$?s%1wObZaZ_@g^3-!#U+5|J+cNFuhWEsOz zqrrw=xQrNr{I(`BVIF4}$@7CA_BL=Hskj4!xda0#VGpHWQK6~|#B&a{0|>^BXN83L*~xl%zQ2Ka z!Lc>su7hkBmMj@!iC&r_8*V*dBvBXge~OAg@C#%?XZi~Sz&B*S%U486@#Hq5vjRrQFRw9dbc!yCPU zO>LdpbbiXcU2BQKa4iYF7Wyf}PX4mJA`+oZ+QZc;@gy)|a0CWc$xp z+hDoNEwW2hk+g&t;vtC3oxxlCD0|}I(jcDkYXe8&uX`&or}Ih#~&R98l&G%#WsZNWwH(S<6Qjn3!npQ zLkJ<1W8#zb=|;F!vO3<-LCgc9ADzVnxTmnLb!hMoH_#03RU-1hIK`a-=FH<>U!d_5~KfG5GeaqQ>E?;NGg^J-BZE6Eo}xRMOv z88BxuR?FW(v3RTt#bz_i9+yFni*@xpR6lZx75}2$w=~ovXXlhO@El9Jn1=btGD=To z4U@~Wnyu3f&liZG2!!*kG|u{kIwJl!k~<9I_HeQR4$dUtdj_}JoYmQd_~QNy8dGVC z3{R7Wmu7tEZ~s7{ok)X+G-c~=7rsNi9|ecJ1nBdRws<(=C@c$CrxmVD^M zj`_ZIvh*j7w9=V+Dy&bVW_O-91*YqGYkSP({SeOIaht{IxxR#IDGq+l#d1uV0xJHi zbrM44FA{#kV>@x>5c3;S6T&y2#{FK2Dr+vC?evYI?&kpKYMfAyaLh=D%extweqh5- z=Z+P?j9uYc1J0Vo!?>8!pjTFee>8`TO6xgx@Xhm_h?4Wa#6w5n^A(PAtcHCyrr!5# z{UN?0vtiNsY1UhzM|p>o&pp5^Ovgzs{j&zsrZs%X7tEdjUL}`CoD@}fjL6|$J#-Z+ zWZ@r%?ozmf`Xet;NDeBwww*Vz#1^mCMTG;Nrmj>F2byl_VkOLq*HT4dZYxo)5oB0b`N>Yv%Tl6e?jeK=_X)V3{&~LG|0}O_|n>?v< z`64H5K`beTP}~b6AGVr%6NnNp;_6>doAs}^;cdiK8e;dtF!;ZbX-;%OdFuZ(4~;HB zicB`@6RnWnU0HNZiV-2LtGXG98-I(@JvPed?E#!79fqsECt@;x^@e`wRB;4j;tkIc z)Wl9O08JAnv-#|j&Zb6v4G&86th0WMb7&M@r|K9d@*(_f3~cVJw0@^8BMUa^qg|vT zuP?*jxYeWLSqNLiUS0D(iz&MnhmBK75{NSHXB*Zm9s0`bhC8XGs{J~P8^iem@LE-m zn3(veLPcR$X6DcH<4M5dO(glF2XUV3kU~=0tVe0TD7LNg;)NnRh^Tr4)Bdw&Q97VT zH5~4Kf%>S?TK{{+j^`Phn2C&N-v06PJ%p)F#%`%~W?hJzo?M;X(Y|!a7f`4a=8h%< zl^kN`iB zvgg)?M#60hRA(sl>kP~BNKRYy_*T%~x&H-@SRq6+=$Nsci6qeQmIb|^J)MdfRK`H` z0A16|qQQXcyLQk(Q(uz2dyKBEoA&PbU@+at@zK&7Yu6$fj%?QrE3^pZqzR;^A?1tb z8ccdDeZC03@eI{jt~IyKr1hJej;?dexIf`^`6AGg;(Q$(LB zSR!Wm&(FUm!2JtJT+E~#S65c@8FdO2uf#Z*oVzv;X%!}tno8cfsGX@dllw!QIjB4kBiyV#saQlM3ivZjj(B;~!m>YLtQi6| zJJa3O057%aDn8NUD@ZcARX;X-y0}PzsiE(mUu&@!`+{6iAy7i~;nc8Lf?3QJbR4W? z)1XbwEb>*Hnpa0%guLYpnA#ZrTpKc|6%WJH6ilC&%%<9z2WMn$*=l~NnHhT8kG6d} ze5MZrK+!S@Y5m1iBbX_&Coo^mO+9NLCrFvFeG^DS5GhNpwY>stqD_9OjDPA;_knf{ zxq0#Oi8A-NT;?$c`*rQoJO@U*o!dEzxBgbwl(7s;N+o$sLxDyMM^TBG!ScW$=ptcd zHy?;CsoH>$!?8SEi8l+TcX+b~nS;O!dvws|_ha)c|Ec;g3E|KuNtJURbO1Dy-pXu_ z>gM!_%1w#bE_#ef=?|+V;k?v+aAX zhPpx(#YViJbi&^_Hd}7qaEIl|gu1AKd>q zh(yX-Q88I{&4L43@Snn`MHn?;PJf-HJ4_*!^ZTx+u^;c*D4++R3$uLK`6rwQ-i(U? zJf=ZUR|XukB8#3?u0cPU#vl1dPUT*8@SNbVswpb;2%ot+n;w9|o>z683%8=J|s{R`o0W3YrbwWFF| ziiT_0Jdf*6T6Ith%aF;krI_n_;8YfqkfW+#vLa)q!e@R%RsGHG+f(E`O>Ed@i7I6D zCOU)yOpnwf*ByDs%+QIN{+odz zl%Q0s(S)RJJLF6H{dMs{x-UEpv?+9_GUfN+`Gs3ADy>d?3Z1RPVLradL7DEkT`PY8 zw1UU7P?ekDvYk_qarzfh{) z!$#;qayAGH5*P1yB`%QYFuC(3?7>SN`|wm_w|j^wAu(P?`6~R|clF}v8-Hn8lf&$V=cbAkyBi&unQbRX0NOuZDH%K=#FvN5I z&%O7>eSK!lf;H>>;=A|$>^+A?77caXcb6s#3t<@0ok%wQp_EDVs7?7XNh}w}zD{z> zIWgQk90!ZUh_1oOvJCe?ul?-LD`z0O#J8X0BF5TjNsaC-f{&OsHgLvFj2>ALN)hE~68#tLV?P+>!H_8abE?L|oMH!PPdAHfr zbm%56_E7-s`-R;L25w3s7?;LZ4yXG%9o1S+~9$E$5i6A1^owSbeX*B7_0 zS16=m^Riku3l(~%C@?y*`jL6Z7q{q{Fu8yaE+pO=)OY_xCi}^KeMz@e*xTKY+&5meNsPODTQ-wu!Gp2J>|a-B=<{f!l3h3 zpUG6HXM15LtBl|c+)xI3 zI2!;!Qa_;M8TdzL7k3(VCbHskXj#X56a=ywmS(cz>Cqh%F7|VaE41K(3~UZ3F=jzU z2y*XKbXdf?#z$@UITP4FXudxT;#F-;w~~)?Y#ofb#z^|RvOCvdVD|?kCWnhy(30J= zmS+`jo+{30dO95+Z_jt6)=o%}9Phz4GTH#ub(YA>TF}e$H^US4EdpYH$zKt;W))>YQMro1 zd_Fqjbfl}8JCMymP09SiZQW}yogk2gxz`K18)Uk+G%VUHW(~UVajj{TWd^Xv}X22n}LN(o)I=I z=OG;%?T%hq8|eJ$U%^Yn3W7n-$FqJMX5OCmS_Z>w%TH7GS;?9I(e((ZRSfazH1b#; z8rGNx{^YMuCunvmc&u^EpoRCm5mw7e>p9JE4$xni2M7wl5{!dF^p{bv$#KD6i6B<_ zC7zEVzV^{KmmP9pwdh~B_+i=r%Zc%eB2o0>{MAmo+n)NRZ*T~Yfb~9(a60*}$tIr! zQN@*tD(Nr_yuIWrUPDuZVt%i!npwj( zcWgbURrezS3o-%Q8XXULP>H5;xlEqCSwKutOPxx?!7hu*Y)yt$97u%K3*oUUZLJ}Fam7V<oYXO2Zofu-e zU6VnQ`n{WduR_fnpf{QH`lzCRmlwa;K7naz*w2|4l5$NDsYfOK>GT-wC!e3k$lfQ7y7-aL+){HMGTMidnB{7gaj%71;Cv9(b@IHJM2Y~h}7FpmYI$9ZV~os8^v zH~k-kOir=D=$+!P6f@O`iL+a%XV$oa)!~=sMDYM+0K>YrTuVFf(@hxUV#{Q}-GP+{ zd1I`r=GkG|x-w6`Hyb<5&5~^>o;cZPI)}7GRN23D;4Vo@l6E-n)CHGD?$N9tCUqUn z|Cv9qw1s@F;?ddUp{+aolV_=A9cYZn(TeB6) zvdjl{Z|c$K+2Pf#N)~xcAA%rj;@6}i&8;^z*3k-zpGkAdPixF%k!wb*HHn^C$@S72w|?XAIxCbZK89d>uyq;XleJCl=$HQ+!j-MVp{=m$ z#u+j7t6-t`G4rR-*d=k=u&}+i_^w@r#d$tU*ZZe1>^){M2Jwk^-}j|xTxK~bm-b8S)9 z=d3x#lG1Xw&(^TVT{*CmcIzoHuASJ|ja(0OV;xzhkGyO}h2Un!L&wFTZ~E`=h9XBS zuITO0YN@at9=F6y{7b*&up{dK)&&2JcI>e<6G9WD0)L_%>IY+7o#P&(62SbnqS+S$ zq#gT8RyfE`SAy>X^T`7^_&aTEC9|QYx5~5E6h&@R?e#n0rB@>CiVx3-I^}o+Dy%py z{}m;8Nuw_5z~R@bsZshQ0l4>*p|hnxEWo)5EB;Ig&8HJ*d!yD2p+Fli{#&<|r~zgF zl45T1&zq5P?bQ44d2%-F?CR(<&|M+XY~DT3j{A2~mqQKMw#O#lOxWsXf^X7Ah6OD-bRtdLD$dTlYi-u_b1KZ_=AHnsEN zW{F8eL?kKIxka~S=GXF36ytzRS2p7vUFFgxmu^0}n=PIGMRG3?*Q1JQ@&>1$YTqLsDe4UqnXd|Q)Etz8_T0jJRl_N4 zlv2}18TQ}p+4x1&Q5TxoPUW?ZC1-}1%v1`@bNMacSF^O9$VFFdoZD-s$Lzj z#{_mD&*03PnHN?|*Q#G>avFxV9WFLjoSUjcV7*E3&X6XtP9^iPk(Q0&&#H4sU)shu*%OE$Yc3BI^%@ZhyXA<4FvlBet} z@(JpA$w7G67{kboo#}Ecikdkwcf?b#un_PlM{0vtg_ac~WVl*txlJ7}U~tt4g72>h z^cSC7=7owb{{Gt8+PzN5Qdwf^II2>4HmIkEL6L3328jS6O#wEK%knB-Rf3 zP6L5|#LopL*^7L-l~BhV)eSn-s2?ngkIXYAz4h`}Rsv+q$gwsnP_g2b(ClK+Zy3G; z!{EfYe2_mKiNnfToYG7jqvUQKC+^X!->9NBg&f-{fFGZBltw0t67puz#d1_JRoDpU z@=svjZn6UHYX-xH5|UY*Ez*(nzpxId=gas)nT8I(uQ&}`cV}@F0V_!CqtiJuzKj6z zgkA`z!@Fir4c^TAM`YhsNDtirMWSbkMBT>S-4Al_1#iD@i8LOZ1@buiS8k(3_+Ev> zsfo5OpZWv6O9lxIKEC2?NFpc@OI<8v`O%OPl@YbOfVVnP7Mv(#59?C$1~lc zm;Lfz{97thqZSNy!Cu*(=CWvCLTE_!1|Qwy@w*Hq)TIEfA2Gl=bq`%|2}?-cUobFA zoX6$8!s07}wUQmUXE^@MV3o*6v-6apitR@7tjT8o;?Rx<)9Q4+SpE%~QGUo4nlH6# zgcUOwFw@FC^5%Db?uB>rhjL3ZdiprZW=n#V?~$@ynFIr5;sM>3BKv(>u6yUd>aa~c(NOVe zSRN07^Z($Y$i*A!*y~1`Glafog~YL6+KvY2-MSa3F65p}bP?_T4Bmj>6-4PH+C!d9 z1zD~%qySbUub)6eu0Ndt@g}2j-re^MKYk~>lG{z1@3Tqacl-&W=Prbe_;>?s8+pic zuC!rx$Im;>l$VPX=RB@lyNJLbl4_46GYbw?MLCl#Bw}z-`$VqleLNO z9(C_o4J}y8lNbi@e%8oCpcssYXOu-tOZ9K0=B$^FUMjZTz-Bk;&W*40UZAK>JoO z+gXec;c%tLOG7pq>>+^NLvB0_TBRHMK(odltnT22(WOE&*u3l-MQ6Bj>SN(yOiW4y zW6ZS)$^-#{$E&6#$4g>Z!F2sZ8IOjVquq;~+mBoR>rhaCX`Prc67T%(X2~h0zkw;c zTp1WK%y|I(vlxoG1R$?u9AGnSKI3L*f|&@Ctc+x`*l_Do+)Sf5o6GJ%O5~RQ05)dZ zih^<9(NX}XRYLVi*GnI6)Sh>q?H}d}YjVW4pOfL1`p38$+3ef%BPEy;vrJ@M4OdxN zIqapCR1u9&6W0<=56WLd&wjYk5`PE0bKd9mM2THo_eJBx<<`Idg8CL`|9?a0rf;992ZeTqYB%-Hbr!KzN=^;7jCVXZm(d@zIx&N2B5cA z(D}ik$GM4+X#KZ&Zs!>gK!>Cu5qSFkiO&XS>_b0}kn_)Qr`p@EJ@eMjl8P{Auu@+( zRfo)`rLH;y2`o=$? zM<~S87Pp1$O6$mMGVYd|A}-*<@CQHe13>R9m5t^O?q zFTn#t)ffqI?E5Q8GNGq`KT;p8y#4kK^w;%G$edkL>JX0GWNc2g=^#R}{cj4yaDh3B zX4u60;)|KA-|lyx8?iWS7lKV3OgMjlB7^;h?mD(B_=2RLoDs>_NKlQ6xzBD_f}yyi z)mn6=SDV|Q-}rMs3`f0RkfRvdACd#YVh$q#U+d8vNn0|yn+Porh{045N7}kD2s(*K z^?#O=18yLX9vCr(k%xbzN&#a8V@`(%W834Xn?GlvQ;K=;Ds_tg!Z)bxo!LuZiuKVYC)9z- z^GhKMW-u!zcc!zO;y%XHdG;Yfp6_C(UrN>KHC9_14?g(>u2MFBErK?SrhB*0s(|O< zn&FLl*FZvl;ps$boRGOS1xvn~r2kj9)@uJ>3n9JsFMz9uXhxl7Tn7-!PgqIEEI5>5 zcw|klgp}()MAR-|a;z3?k;to>xaq?19nLNbLffBT@6g=Ke2eWW&$$BivoBc}fhcw- zRP{K`S_{Imc)zlO5iV?Zvvz1pz@>xA=FW#a&rFt<2Bua3l6WQW!i|c=CV-hOqkF!^ zwO{X|_V)7kZHj{`ME>{$SoAas9+sam_pUGq?p3)7og{f#IwW(Yho7RU!&SU;(xS)S zD);YvV%&Ol_$YjEo3~aK|D>uhWMOA}u%{lL`5^Jqx%sT3n_u!h)vCjIJhop(BL0~JxmMtPu%D4%{K$G3pr>F)1a6~7N)Co&e zEhd2jaRT!}8Mxc-d$);MJv^P6`G+nEmVKp*q<~~)BVgo9G0#ZHDi$oU=(VgU=_bWu zqhdVwfP{M~Ugjm8l`J^jK}+^+Fj9+Mw)$OxNCVl^)Z^encKkgfNDbGTXh1bwc;=N4 zB;er>v*_0W$m3nbIkws@S-SQ}Du>ZZ;-VnR7^-ET4d^14Y}t=mqD1(ucKGy%cma$?YE01w-LFRHfLj)tUqN@pJH9vB#)bHwdMZz1EFJ~GlAtiZW5JD+`x=J{#VxhcvWR()&L%$?G486CK&)1-}45w z2yYVpR<)=lyJ3#lnzwRDUNPvng%2ZK!P2(G=5w8mWHFb=iXkSECuwU{Q%(Vpwx4;m$sr zYTm;p|1aK(O!gp;lA+b6J_X{s&l7YcF*7?ydxT22%cl$q>=865i3oHsG4}8WRNp~3 zn+2o4uN)__w273jja%z$OANN1$%%TdVm~qP8{%F2cq#u*ig!6(?>{$0UYh>AsF!kw zr%7mnl!nE{9TV=;9)}ILBYOvi|1631#O2r5^Ch}}#ifK(r$V4@P<{aYydjA}Nya&- zK?93tqJnjz;|>UwI=K&`kznkcp#1qJyy)#n1hdc6l2%yZrp}a^yL(1qT`Kz8g1e!r z+8&oUb1*j^*44vss+;a?8q|6y&(DdX(1U+twpNq6#hEna)t$D9)9l`j&$(8Dfpm>$ zKq|azNb4WH$RNP9ap-vt5GW!j#guN#Hg6s3cGw*i9iasZQh?Z|loWZE(rNOSb0O>; zFq+-aG@>!QpbqwYLT|vP>7u_^!+(#>bt9EYP>O%V1*nHH`1Xi2*Fbstw+3Gc;#;*@?|XdAq%+FBPv?-@5PSN*&sCz}#-B>H1w*og1XFTao`j zI+_!~Lx%?$X0Sd+*~-SIeSYd>tVP|~Mn!_fch`K$jesQgq!T=0Gxr)wg3Y)!$e-06 zN7i41Ix)}%2i~iEmQ5}p3w(U;^*Riy;Dc>t6uL#mz#@%?e8is2Z5C%dF+}s?cskv* z)C7RHhK5Y@y(W;=mrT*SG~M{$4P^`Q1EA$S7dd$@HE5ffGd?+PgI z$?!q9XM!H|ZgDahnhE17&W#tGLO2LnVIboW%h}639%U^uxQYF2BuMq?rr&eA%jmDF zS@=T$A5IisIElWFpp#=hr6k*Xg;Avhsz=_R2Gd2qJHnm3_}SgJ-?!J_=8YxnEX4YZ z78b6f`KD>i(1Z#|f@@9p#7D_H=FEk4{9mBoSnI;i)y8X=4wC)E%NSH6=5Nd_b~dxk zsTyY6JJC=n^(MQ968M9$&*MZ5EX#I=BKe#V)x#>ppX!Bvx|(yCK24LR&oD3cRAD~)!pwBXgjq#rc&-n zNhcj-;qrwsBMbso<;rS&n$o^?wrs-;VW;vDdSE&NfybI!Pc2a-i8Mr%l^jD!gzl+FP>h7xy9Zk)|W#7|I znYG&Uj^k+f0^WXBW-sT=CXKRXCc)`UDSn~(3qMJ~vq;YKErdqMRt~`N-MWqm%##uS z%P@jIZ#*bf&G>fTsa3c@m2Q;3?znbxDedADmqi~TC8Pt+341Z?SNi1Wh_N1qyXjb4 ztJpQ+Hpe2Q#O*MLJHy<4ipL&|lz{WWePsIFM&jr9{!e_k#>KTXZ>f&-^Y;^8pNU{1 zSTvdHdZK~}arjm6zHGg;)jB*gifU&#HR(gjRt)~3 zbGC3N5s|E8={QUauGPICpvE;(Xtnv6w5!@fnJyV9Ni{Iwk3SkmjV=;EhGSS!VAG5p zwSieHogVhQ&P=z3#V`41paK0ar}pQws>^O6k-{j#QeLe5;DQz6XtJ|D_E(rcJ3YDX zgC>*`=B=J-5W(1K`PCimbX1JGN^%y={)Bs`Z>A;L&qip7ButvaAJCluRd7cP2t=_XYZ?r*#HjeXJL zDeTeK!)n)b`@#aI8cG`weL-+#XmzlPd5|7HVLo8vjU+^0B>?wV;4{_RY zvXk}+Z&y7B?NkH;eU#~me0z6l%Q~-*ognC%7M$mmMPRkp-RyvX3QFQ0Wxvie+u6fu z9!^^OvxBO7mlZHtWYV`qYJX#cqGl7g7DI~@z~@CJ&N(52<3T^VNqyrVUXjo3rCA78 zBpU5f%sK@NeT85&F@YBdxL23wt4qYgPGxVo2cDMz>KjW}O^up9!>Mz2Ey_+8hywOd z2BJEqEI;Zwi(P&zV0Y};dgP`{K)DG z$b9@;gQ|6W@4e%iAGu?S@RWMOfnZD0K+3kzN2|haoY-T|)SQ2dUkO2P9X|^9voU=_ zJVJt<#{Rwb1zT^jcb!ierW4;^{}$B5&O*D$7jFHDx92z!*sQslho7|48$=MZE(fcU|+30vKll*1C8l$hCUwM*IPx?3`Bt0RK8Ai1EN+Kj7>t{6r_ z^zw|gMXNHj%i9~nrPdm+m-)rfkt3c%R%BwH@%a|gZF?+VQgsSIhFr4s+v=#dms|rO zKPX`Qbn3YF2q>Pptaubg(^`lymc4c&8$q*2yNn6u6jVJ3qe;w}&grP~+V!a@#dFa- z_PC(-;hEsWMKjUf!`C3Kuj3AG<63Odm-@D^?`_u}`ds;%1qnZSJ!CNrvcdi-ktzr_ zB`KW+s>t*MhCm)psk!o0aX&T8dS&6|hM>J)8bGl@ei?myVx0^g)V?~9`s-($M6_0s zoA+i$AyVch`px+q?{XW56>Ue`8=iG+sKvndfYi9La`Vj%RK_2ceC$zSOb|(O;dnP@6#*}a( z%I-e3P$cHoHo|F|4(YR(v)2eDT(W-bp8$Od<9n}_{3L@X$nQ!VnXcwBwG5_4(oFA~?=vhE&e?2rqX(opgkott6#mqu_qh)t*- zFxYQ}HA_kF8z*sdChytq8EVK|(j!R+4Y3SYfTYgL-V1IN$VNKjSLzEG}=z7Z6CpcU3wfl4X z&Rp>?#`)YZr0vNI_-NU}iB$HS6*1QfS{%~A#ec_d*NVjk(N{c=vszH5RY86Z^NbTt zjzaQF3S8W+_CLXIWi3}|>wV%PrHv^C3ChJSY-bh%UDf*At^)n`G{k>DMN68kz3r8| z{PDt7Z4$oKbh`e?ITk~LTYvc!MrO|UiGpHlhvB@_MrC=AH%ZK*70ZTyTz2)hFTFFv zP0q-yo4-yQ8{YBWczlT_u_>8<@?fFugNj&8iseqv;xh=`&Ed*+YfafiLdDM&s@HK<+k6wW~pIe_;Xw7F5gx_q_&U~Os)n(nZ%*2!T7xpOk zRqU#T$S?7^!a$F`$&Z24PzAAOQDBRPo5;RVbC=SYCTzG_b_>F?Z|!}1JgcRVITzKL;ymPlN1VlTF;(7Yuhtl#D6)htjzX8p*IQ9Rr`qRM-rjmeD@kbYOSo~jlL_B{|PE?jM? zNoB|8@Pk@GlPa8u6gMKG-fdc(QhQ_!SGR4lR%cEuzizxi|Hed`1hem zPtC2#&+Bp7szr}t6G?#)6kP*bQwmd!`e|}vLM~6jwm(*PvP!6yhO4I0>_)_?ghh-2 zRCmN3v_GU}bqfysDy(w~2&lS-3BEGGR9O`HB(biBfTm*Hgl(hXZ)~YWjL@q#Yll`n zbNJ}v)hV`M_y8e)zZB_7C$~tqieca;b*;LlWu}g=9ByBk(MwlW2{O!V57$^&MM}j* z=0*@B=UGzTq)AAXfbs3eMhIYgKcUs7dtdzR`+r-ch5exW7=|^P!?(oo8oR$CnG=Du zVSckp+Oxb-%FbNDfBu5GEvTHsP-Mz`KbJ3$$($WQ5*MyI82@vxkfiD>zfrDIGe#~{ zod(FD)cx{^O{Id+K=51&;kVSd3(3SW6L9N*bE_%4&05Tt1>}Ag3VAxAr-h1fo&X-c zy@vQgHlB{nvWiY#Zt(LS;sGImkZmzt%n+}~KFX1s?7E)QH>$R#xCqX^zs!5eCjLZu zAwBc@f?{kZ|6G;Q^s!ANPqG?F$*!UcQdAGB`TqjhZ6;CkiheLDw1^Y*J*#=6HsE!y zEjO*6!P`fiv>jYyhNYu4VeqH$!XI}Zn>AK-Hq;Zj!xfk@=jU0{L8sb{mJyHXC(_|O zt<2Ke311a+;2kyJs<{KW`yUbp2&xsSeTv{Jc#f^;!|w}`35wL`-ifIJFo0sOVRA{1 zZ>_)elqrWE-+{ik2`uSwiZhqv?X&yZ0ctk}g=nSQ_7cuFv=O!{ib!YcCvJ`2yinx! z1fMhX{(Fs4r>4bOWsCGfIhVX3rXW~Yy4_@&S_LKDljW2a$7gDF$)Pz%Y$sVua3ngw z#dU#CF29Y=1gKo(xGNlWwm(~(Ut>|b*u<|WAQ=e5{mB{DXW95%k{_sdqKxU}z*=l? zbP@kH_d&?(`g=1dYd=U(GSimM?p;-lwwi~l(;HuvsrfhGAm0y^5i*x-uf>9~%J-Q3BXJ5ukJ&gT#Nuey^#s_zCgF@_?HqO``TP|) zq07j)Uu)QLtmXH-j}tK8;xbZD<~=va8q`n3c3IqayDbY)>e8X1WKi=bWbHbkUvrx7 zNAB~6Qu{{^V+0Xk{<&7R!I0ql9_!0@kf*hcKvRT!g%55s?CcR)r-opk^PF_<^U_uG z`VpM`T?rAU?1GRQ)Ju$M^$lf9G!a<(JRHVzvL)-ASpOXlvfj!CTG)UfPt))Q8ZfVS8CWeXs2sL)Y91RQfkJ zJ|7P!^Nh&$67Qmh&p&9;}bASy9dl zptb60U@|;0@=dRXc!xlNiE!4ok_n4yga)8zes$H4$Fi`LeS>>sRlGt02!mc@#_b=_ zD1}&e$aymeqyl^$L~W>!mw(BWFVzUQPJQe=xQ;wg$9vu@ro@U=`Q0IGexN8t40@Q~ z!q_(iAJB~V{2hJI73-$q1`Byc{T(jszvOu`r-=w>zP$h_KJQJQY~ylmgMQ_sG4%eU zs>^nbS>AtT!y*wkUTU!WYV(+p5aW>cKq~ftD{yeEf)i9Ff`uvuY*P{zvVK}e%^wti zf)^IGh=KLJoL)PVH6~QCbOO?r&QJ|e;?3ZyZ@84|J4nMBb$>V8vFd17$|j0_M9i+Mz?*2+CrKwW>u|UI`Jo(TNCy!&mIP^HeX1S-E<9Cbu!HyC-k7sc83mU_jQfsbMhzT%Z< zfH``D&RApt2yY#hqKvjpppKJ{OLgP4cnr0Dhvt>J9PiD@zRWYc^B$JEADAJmptdO; zAmr~YWIh;AP&LVN%~;$og(Ry~vrYydC7(-5^BqcU$cl<#0Ic6mTIk zdTz31j?5beb`J1g5uO+VAc04)d;)q~_r~)x8a_+a-w#uZT^GY_hV#{iGfb?1CAhI$ zwi8i~Up(FykjH6@TK3a5O(DYb@j)%?qcXuOo=Y4aWDMfX8gfvAw?BUn!rt>JuPn5j zi>dLldAxh@efp-Yvh88?fbqzvpKh*|P_dDw;PZW7y)#j}=S8Yk%4C47)>Wem&N7mC zx4n6{+A}I`WVVEaRpVL7((;bZvxyW>t6Nz~-I5c}gGE`&eKo@@h3lX8?0xS`N6G1> zs=r$V`5J~Swf$3g<%5jBBXTTL^~Z&XzBnOyAc-rz=MnL4ZuqMgrDTdS{wm&~t-;%v zTxd<=om&a)j7$&+#7r7?P?F?Zso=rQ4?UAT94B~riYln(^*u2wJ!%XQu? z>PUj8pw)M~iO)kRdHl=U;lcYSKB|2RSVvj-2V3fEwJPeCf@m-OWGTdNcf{chsuf)# zF3K{ie`bhb4{j{rA+M6f6OC!;BaI|jZtZmevdquqbiTC8F|?o2^Y;1f^55x$*7IZK zlQo)ARqJ%EZ1crdI~A`}7YjfVLPbW0E3~DR4)Pm(P73Fpx%Lt*3#VW2j^Qu(*byX* z1R9OBOSJ2m!6_ae(2&=xbw?l9-O5XBO!%HElXlsdtsWM$|LD>_kJuXf1&4$cu z%pHba=i-KHh9NS4xbkg36W1}fp=w~r>f*t8)XkVHnd>7VcX_RX%xJ>BP3(Q-T=

pK`|2xW}VnB{9rntp7pnqkbhcu zABtz7CxtF9*O9@?CoB5jIgYI}>mFm7uP*iW0+89N=w>z{wCg-{0Yjk9B9LJKbMJnf zm;bvzG3R8vn8TPV>sRnU!7mXcKhx-9@1_>~^;=cc$tuyWpRgGStMDe?9IbfzD7K;?*0+J{FZmaP z32TFrKUbMCh8lOe0|hiD=4%ioLR7B_;bj?}2Q5nV@q>L;(2#dMNM*EkHNu4%Tayvl z!Lz&*;A>;w@8Z8sn1sd>!Tywn`lG|y+3Tth*V z<)ZtIXy(Z6?XAsBaeL&%iD^&RZP%LvP^;|$C~&)M*RSnr$~Qe8vXiQ>DKGAw&k?6e zu(^*ae@5lCoG{JEf@gLBh*mdxN3Ds?qrr$eYAGW~m4E{FAZW-(HKUM<20#9<^~NRn z9eVgD&X#r5nhct_2?5vt;u$9+ToBbpN{XX-LodS&BG~>g=dr|{IK@KzNqTgisE6%O zzdds16R!57y2{a%G;ItOE3D6?=+V&e$wbu_pv38&`AGq2@GvZf!s>xOlkSjXx5>sN zt*ab1SYDNu$z%s{TN~Bi*yYmOStTI6xnj?0OLY365fNE)UE~1!f!ilz1U^w=Lk3Za zw$^)|at5ZPQ~iB-iBjj#U*~Z$tkd5vM2{}WKLSrLPh~G(c?2*0sVQ<_V`Z4BcQ&qj zGglGQ{)y6G4`;ovk2>jm~lJcvX-fxq7}WaRop9C{QSEn zZCsp+C_%${IP&>DzGNltP#Oyt2O_jI=8+9A3EPDbW?iS_K_4D)zEBB>OoT9vU*+4_ z_NFWgPJ|%;#a#jzJxea)Tdc1(1(&ca*ueF_INktizr3 zgDxIva!+@BR>OvmT{M)Fi+HnC;wr<};%5X>+5|1a`R179j>iLci~dm}z)-aVZBfW| z-lh;zA5mdl_E^h0rrxtD3{v2zCviGB;6Lkx5C*Ry_$is(sS%)cR*@NI3^D7ihf zQ+45*r5wcOrvmXjgUd(sU(VmLXo;7sF<%(_WmYm?{q3fo-ld==pd;F?GT19&H`rkw zf?TIvZfly<;VD;6(y=7)vC77dzBjppXyYzxTxPXP6t66DK^hNPr$IdyEtF*kZbsE) zp@?;U4rP-8wFvx6=3X%$cDZ{4$=gqOApa?qQ0ybgX=uPlNlo9uWbc1ct=lRFnMcfD zWm_3!O@y=!@kl}b8(q^-#|7@HR7-y<{<|%+!WDlT!1a%oh5KhxNF|Z=y~&8U({~zJ zu@Id)&7=1P{VkN!R`&k)3r29%9rV!%-IK-Z9OVMEQs;+d&kw2bd!Wp)$jeDLNk_AO z#r!&FPLN}zSP?%S|LC`diJ9!ZA0cd_59_GD*^Ud+gZ4q$ z#3>Q`d@58eB2C@|M{f1?6KCLZ^8jF}ar7Hfbcqd!YnB1#MZWyGi#9@p4fAgnCvcBu zKQlTv6(*~`<3xBUqI(h&Z!#gEk5PEHFP}beCj1G}4Z=UNxd3%)jbeHV-8KY#m?Lld z&2I7%#S5qTKzb9h3BVn{HbfrDy@JOJOTP&uIZ5JvRQ3N>hIQmSyolK(k2}eT-V_Kp z%<`G(e?VsX=;uEA(Mh`tSi_{ZO~vMA(b;-+OF9!dUUQW!1Nl2woi7P=RG^%^JY%yP zs~J<=@)zVotIq-nV91lKGo2Xaqn|IXTn6p@UTLIrcaO*}EdS@BXF2Bf8b{Ec`MwmH z5*}xkCXq9cM63sQ#TFHGAqXCCCjxVcL6Kjr8bWV_`QIxvm`;)W_k89jn$(X42mWR{ zdsfPe*Owc!1&olXDOI}lbE%jc{#yd!Xc}k<=o& z{;&9`Q-Ry2OzIfK?8b@tVlYZt!m;btQiD5*#?>YL8HclZB<8Rj#=Za6NG5-@176`P zD_JVs-4<*YtRuGSmOokd%z3}x2>bQu;yz%0C9ko5Fl*}gJz|_vE3jaTfvR9qYO{w+ zId_OYt(SqH)?~%YPvT-|Y}u0mv-`k}>U*ib$W_2Ts8>X4cWE7pb!=fb*Dk`_dE0m| z9hW+{lI~--EUI+qy`BbYTSB>D_4#Y+Zy1op94Z+2Yic1olR^b5o{3Tuo1>utrDGlKPeOf$@N?B>nO)k( zcgSyxk-B2G?Vh}LE8`9;ZIO0N9G7>Cy7GGv+Go-&k5g5IU;W2MwpO@rNvk?k z1U8&epKaTor!N!<7VcHM;}3@*3a3-m31Drk;*aM^xhJIUyJ?y6zo@7m{eY3vW0z_} zDc%c40Wf|lhNlhiu7ID|agX4I6tF)lb|&y@C0zUQ0Q=uSL9&$ib+NB|_9)MD;>|^L z2{nr7frES$|J0l4lZ*pihe2lQ03Wd^?;*1!i74sZblq!oPYm3nOki&IX0j{5caz?0 z?xB!~ky{8mv^4&oyXIMEi??LbwiJDoexz{M>7c_3=Zb!KIXgum?9{s6U&JevQp6#H zlFzN2veP`r7}M{UJDS#mi4Y;ZA|TIv)vE&7$MKC(;@2YNFGfpdUbCtF4II?u@gLIj zO5tb4u`#Kj3ZrWFp@7+_Mc^b_(x(}V`1sDZ%ke$QN$MoW`!tRQ8b2L)!zcgwI98c6 zp#%S+oQV}Toskln*f-gZD@m|Wi~d?3JMm_{7IW`mxdv6??cwIb_tw~~V>Er{-fMMY zW~d}RNsWjNST%yh&t&hXA?JA%9JHds+)J7A`{6?($aO!?2u+L^3pu=f4-w$v4!3%t zs=V-W$+{zK@zu#M$swfS7ku4;){o4$c|fD5=K9t)nlH>o?x!_8^)n6l&o2c7v~~_m ziJMgH^G-mf5b65m1DgHbIHN{TNHhNGA7Fs@X&5!nYqav3|0QFX$N1AjJkDA_TGrqqHtyBgb-U<43A5-1aDH~{K%k_28J=x~fDN5pD ze_227-O+=y$UuhsV?TYqjdIEboMR?L?sDkC;tO6A(z2gFM6DeH79Jmwx1_&_G>K+r z2|TR#$Md*^z7I|Q{=x=7G=5r4nqW%svgX*Tzb%ZDyzpSenMNn-D7q?8l{}xn2S@}y z#H;Dtit1SN^%IgyNyk$y8)E39yA{e&8Sbdd&*aZPDN?1(kUas-yqb^Aa*v3wQh+($ zl|J6aZtxIa5<&akq)<6VP_!j~C5tnk&~GD@w8YPUCIGSa2ml81>dXD6f2BN*i=Doj zMje3gVWLiA#9pu%A=yF6`=B{L8#6}$ z=y!SeDWd?Fa<8i4liM~P)e6lHOJlvD<2z0LIMRfn#nHBdVwV%OTaA99w*{d}*FExO zd92f|8tu%Q2E!+qIV|8}(*O-ZRNy`F>JRb$J1=NaVfSEfrBoh(pwJjjXeTxE_vA|1$w7r~Ohaef51;n?R$^Sfq;ncgV-Y7bS?Jlr)LBgEngyg0zzbUJNQe);K} zk|I3H-?!C|5GRG|jO{7?ZhA)`1FGF#sfFxdKt+y*D*!^ng*DljvA#<1Q4<5QwO~5P za-4&cdxdA7>&df_lIi$NV@K3vIMHdvdo1!2{3jtD52tQDQ(S=bifG8EGUBkiPJK$p zjq&m^Rr=fO&z6@E5SJvlhF#XA9hldFa@-{2iHp8fzmc?!71N-A9SW2dG57ahFJqSC zdp?LZzWpm6J*qPA=ss_q^M~}Tc}Sq9oH|>)AqrVlS?-<~8tQ(?ZMn}C-@1+I@ysDu ziJmJ7?+25g3BI!$e_W|PQZDCL-qq_nIu9 z1uu}>kzYs$_;c*m(vKXG<_fZZb6o`-f!h{*C+}oClPPk_T7NeAv}4BTp^L8d4k$$cAd%;Ep4Ksr*Ff_A!}l1)a8uH0A{OF zt2B|z|3Lo(V+&vN$L$;kfk?CH=H4sUN<$)E1d4t)UE^|Sgru;hG9{*y`wps%Ag(43VG5;YJ5d8 zZQVP4Gl(LC%Zfduvb0e_t~I=^A$@q;vK4!|%@9x_r=;@;Op{i&;-pSyl7w6?E-pIy zuS~@g0FyCS@!4?n;NJtw*oC}AXgo_vExyP$0iXvU zcjP61^#S^%M(JYo{df4ualm<$af{_xrc1^Pmjx~StLGVxkO$9ja+kB`UJbq?+&^~R zM2+WWU#qQG)O3kjxaKs79f5A~&$aj{nJqZy3|;0Fy`K zK{lg^quc--eiR6l1!yi4!Q-ji4pWp((inZACBFx7T(x_xz6{Xo-@Zew_^8mtYw=U( z9jb2vmc44=mkKC-#gJ3g{ruU*P~y)>btBO%e#cFXkJ=S?mY>KO!1d3{lRxPq{&XEu zB!GZd!Fk5w`)QREN!!wU>2};jS3EKMMfQaf2L`VZo~1UbCP(raO?52k&&O+R`sTlp zv4ngp?;KtdQdGoCG}$n5>;zORR$TB@?Bjg6#HX~ZiOV@v_xTj?&f-8gq>CQATFS;( zL-ZGBLOj3t*nO*ZmW@uQ`1lI4RZs~fF5fquKnh6gWjUZQ_Y(RP3tHrn^Q;(N9TcH2 zU_9f^@OFJ_I=R`xO!?k}cU%)?{N|9FydGL*z|aAT=qP3t-#nr+oVa;!Onu$Z3#?ta zSV#E3C}0FID_l<`+@bWFtRe?b#n?}1Iv{pwuy3NZivv83z#3n>#CD^tA!TA!r;62N zgy|x>lvN(Jqhd_o@XP!z_V*@sg+}O_SOPE-#>Q=VDGEnVhe2&*Y?!U)(`oYgpg1|5hx5X+^*ra;6vun=4 z8OXmA&to|$3g13PW^w8pGm0zozZ8sEbM#2R%ww_fleI7MH|wLyDq)c>tHdV`GSi- zykFeyUhknHJIpYY(Zp)qKQ|a?=hB&AeSVJ)PGDpT{cE@EXVY6flS_a3CJP+TLnY?n z8k4n2tk39~e);>GKwWZVbI$M_6hr_1?cUzJJ@F>Ne0+NPsX|ZEGr{=Z@vzqPCT0$K z`o$9KQ;fuTWMAli_`D=n>mDLhx(;9x@0&ONA7}l4XgaH~Hn^^7Q=mvG-a>IJP>Q>| zyS2rwxTa`uN^y5DR$PL+ySsaECrEGz{CVE*`cHF^qh#;3*Ua2=7dA+dUiTa)*9a8DdiZ(u z2ye~3E38C1ii&ac7SasSACg5=8jB0iMBDYJb7)9Sz-puV7vRR`9#5}(CU{n-In**m z|Cwo5xTB9fTV+V*2|o44VT@Io@`&IZCaPu&LdQ7U{VwF@=CmFeUIxK%7Sy+l*>lRN zY0}9#?4gX?&g0TDjT+~e4(Ooh3p@SU4dr`O7Brmcw2`;0kK|6XfD*o!xe{0z2gb&ncn9K{Ft@BR&58nJI`MY7eAh+|R+*5%;az_#d9(~6+jz6r33;EX6 z%6Jsj4Fqzs9dv(LHslmx)AXDbObEwW5~Ss>T20DD;YsbtNBU&h&L`f$a$_1xLL8E* z%kzuybiFsTMKZ-snl;7!8V?}xykiY<8qf}Gx^5alA_!+J;KI zHB7bPVGXc#P60ym3N=Y=D)w4m(1La@wNaNj`g;HLEsUSEI=h|iKh^{GTP3MedR}V? zjKA*`0*jg~>6zU~v?30FDxpTap%U0TL`|WS0NPdlfb{v38N5y~>z~khIL+!R#*|k2 zogj!iX5+BT!fg;3X*h7ZpsZaC3J-IJSJ!$EJ!eX5#vaY1>%Jlx`GxrIsq0m)_W+AQ zglkk>iPXYn^_8vf8op8U9(4^b?P6oJ%@fHkk{iI9f18?eJvFea|-p$xPfSrFiq9^56t#Pl`9qS6oe&;tTXxD=adQ zGDv%ZR_wpkS|cO;9&m1YN6uVEh>H2d#C{oKEdjeS*U?^5NB!0V(%)T<-?8{ywMVcM zOw-`%RbWklcG?M7M5^t$tKOFKQjybCLE#HSc{1IzulU>QYbV9RhI8Tp;C2!DmGCf3 z02_o-5k+gnekRw+OH+E}sF1?o|N zW*hY1KP*WcA$!T$a&n>LpPT?}P*5F)a$^t@ohfyCK|(D+@`I?_8NOYwEX7$$Ev~>y z-Je5<8;Xsws+IWs4-#UeL;NA`z%z3F$I>=Q`> zcrHUsz&p%Tf>d*VIhcskq6sdaf;-M6iLd@mX8!6yC&EVhb+}#83i)d0KyL3Cy4D?l z8d5&v>lEvcM3Re*i>|~FBS~^pH!*9GpJ7!ddNo8tUS#05+;4I#WRNqt?!FUcXiBbh z^_Q9II+8wa^h)lTVwl~6Q{%N>=Frv05afsH zv+2AfTX4SbfMX&$%~&`%C+dK(KCWGYL{8*TtH+6!Q4uG3gETS2C}? z%TSa{!;wW2%ug!Z*}L-h86en3%6Ob{%2qPk_I;gObfA-x=5UF_J`c%f5Y91${^bQz znRO%r&M>lHSEBxwX$H!Bdxl^0IW{^#PRSZsk~}ocGMlgUfLqQ+t-$FntR~yq$7EB~ zCVUj&*N@Zq$A3@gZD&Nm>4z_uw{wURYOWMt5ueDdeaTgIr~W9}6M7l6{irT(j4tym5eX}17Yj%x9CfsX>o9K%N6A!S}(_}llSzHd5P z{LK~^b$d|ViZnX2FAO<>djfYuMV#w50LQMc%`C?+Dtf%ms#zj^+%roP+*Z0StbVz@ z@~nqb5gTv3r(^+S)U8^5I$*b%DaoS9R>A7O6TZi_C+&BwRUtjAJx9MPye%+EL4ib? z+1xn{p*r&+s9>`198O%!o0 zR_^!V$eu{)4-^%JXY(Qg(ZxR=!gm5Ky^XG3L!$1_#!MkHtZ+okC!L^oo%{=a=fS2v zmaACO$CHA?-){Uf&tIejOLZd>C{2F8#&Oa0=`f1U>pR_rra>OqsY6|rtqPl8Heb%x zrJruEMK*c!(*B@{fHh!w0tUGO(v4AgDE?gp1cbw=w-nJEhL+Cw94TJ`ie~UN+C0IV zg^+sc5dv1-!yh31G4&hL8stx_JNn-K9FDOD?2${y4~j%a>?D*mz+Lg9(X5$oO;(8i>qGM2dW zdJli=-tIaM5i%cgPVn{K9L>_NeZ7+J^hcnF5gK``e-VzG{uC@Fn<0Sj|G#aVvZU&y%oo7^pUp(KCQ8MXCCvE**=aqTH+`b8e3%g4c25{cI*Jorq#A; z+P`*$w~x4VW3y!UzbQLUYeu~(-ZiQ5=tAx*3cMNhKhy|P5q#qdaoBUoa z(JQ0QPzgf}e@%Jo0rDoL^DUpl^Y6vby|kXnYX1IG2_Pwn$__=j1-L;@Swzd>a{X8M zPF^!y_iI#*s0b{{%XFk>C^%m!l!6-bG_%o7S^8blXU!i$-^KF#IZAxz@r|0cX5T$3 zN!ka(Tz<*kX_*8p$g^yr+RwsAV#w>=%k>-`$@M7tjBm^7y?;fFH~KM9iulof4 zwDS>$>3T#$@`r1NxVMW6WyhxmEuK-Acr6kQ{Yjs&w=ZPUZaUt9lhq@4V;Btd`h@aae|TS+EoDjgu@uGs@Gp&0AERSu{nj z;ARYbasdQfiIG5e^8IkeQdfziq&p%RTngBj?nF_177!% z0Bcrck4rgjg;NbPUq=DS2MsAkCq9k#=j-BJ@e9nC0c*5o26^PY7)ls)eN3z@l1-OTN>86H4rd1^AdC7G0gvS@lfGqMIk-r2w zc~Xcl!;prF<+2dlG9-KWQcN{E>jd)R-r`+&GAIPiIBo$R6hh{NTU#aTA(Bp(_OF z{KUmZDRk#{=H1&p&Uv#LIGx((273kmQQl3e#J4ZsfH$bK7;?hj8#EJIve1NbjGi=N zERX9AL#~L?M`y|xm$$XZFSTQ;(421k;Dq*0!`%hi#mpAZUut&*kkY(H=gbV=p+~Z) z-Pvmg9~h@mv!&%nJBRvNlLXPz^>vtMW!8p*bYY7fPW0=8RcY%LroBQC@a_J##mzdW zEa1d#0+3mUn8*+^cIRWzx&8&OEz8I#;rsaCoOeQ7cinApH{NvD!g)kfu(vMeDS)?m zOLUb=+!$lR%J}SWsU)+YIzaS^$S8Lh3KVPjWh|S)V0bpS3ur zR4r#PRxvsMelEe^JSV>fMgf*RP;-i;w53Y8)OLOP2mvc$zrp)VoDQjM2c0r zn0zqs1*A_BXJCIrSVvboTv^(m>svqzKXcg!Pf}UPy&6+}WdiW2G0xVXpHG!Si$+-u z=@yp<6jK`7AW5I`&Av))=Up*GhMo4EFH0FTYrSRoWg$_LLR{=toa`B)*8t|`6a3oVV@aICt-Sp< z7g!YV@DrTlX6T%8Z5)klhIU%K0Fbwre##LS@_TufaMJg_?61&kbO?g)cPbeP=xu;$BCSz8X7;Gkl$*#(R;ENtp@8_ZRZKK9ulKI}gSr z@l3dcYaHd19ApG_ge#}_)%#P(Z+sIixS30B7 zT{_F4Xf%NH!m7`@teiiLTNa6p^B{SyX!6+d6>7m3h~9i zXe)i^3&ff1F6SA%?CL)c7i7)xIU~UncRAV%yU&l;%en%GD4w@elC8s$HJ#@t$DITZs8L6)1tr%pF_wflOX04eT*ASb4U#gN zrI(S10Vj3V&)svhkPp_(wz3KVY($FY`ndv`)y{;AW*AgexG>rJ(VHvaBj7>Mh&5^h z=Qf_m5$(W!m3!E*RgFl+ux`PiB5H@1vM*tYOT&oIQ%n$8a$!Bs27FBz3N(wDi+Q<> z_qO%RJOvBIj&-gTM6@b(%tPlI-;E~-u@)h)JFmVLHopdR$&d^Ll=Mh#bTFRXypUxi zSr5mK0YVX4ltSbSS@lwSlBG8(t=A0lqQDN!<`v0Nm!!RBX|gX;$Orz!llEqRl3i(2 z#9X-M$P=)KM>%nqB5(9L>YFm65JH#+#tgPt@ce}}R?NXCnTVKR8nZZ4QJu*pEh%wZ zTnbmS^h8{XvjlP{-zKf%SfCaUYNMXNK1)5Q_@((6TS=?8EkEu?SDC_I#*>hE^PQ#b5ROR$+rKxm-565iIF z`mkx_6nAkeMMfY&?ivjlhptg^u=>R@70(f}|5ffve3+fteVHdor+2+z{HTHK#=0mj zWoW%`?nc$!lr5h|-Oezaap{y;%cAVw>(!DwO=I z@deF3ZJFlnH<6fmSjxL7&uwK6#w;(K%@+A6m~`YzTns+1@J6i7kJ#GuEw<2C-QoJT zTV5jbtUIl(?Kxz(n#%~S5T}d!cAP9gcDpmzDIc_>ZOI7fwU+F3>8>8@(J|JkU@OBl z%4FO=`;G?sz8@bV`AxF0mj3#Iiy)X|VEjOJR;5teidD@M4FQTJX{AsvL@&AcE|#^? zBP312N_o5`5^g#dN%c1L%LLSB2M{ssNYt}pB7h6~RRh&;U{dG%uO%_eCgq|+2tVwH zRZ)OwGs+cA0Xi4hZ@VsL{rk?LSI{*#OYLWJIK)rV;P&|w&*g7ecK%zsh9WU)WhD!JlO77lPygwPSTPVf(gKH|@m>23M+fp=tbZ8R}LGq$NH zc${+>8vi@KZIznIVuXYrb_F0DK1HYjxOH_|Sv>u($^>-*1G`Mkcl{Bp4jzrk?I z&=I-!g}C4EKkkY0I#v#rJ4k=84o`57aE^0@o3N7;V_sc{e#yCxNI|{FQrRIVm7TY) zx!AGMT!H^}lxeC8&*RU$$ed<3J0EzCKcsP5KDIzp>x@;ii8#zcPu5$3Hr~?*hef1W z#*zL;UlQsPmA%JeEd}hvgfRX~;rtdWL9?q9qDpMI?#i$69RGS3Kv##8`kSu=pRW6t zXoe~e+VKL&$c%l6&i;F(De=;B{kta@?BToXw*XbmKYyYk#b_*{Ix0OyXI5xo#)3Gn+lvD4bAk>8GMUd!yv%k^`RA^* z_o*3{K&-KRdwKr-7TLbs@b!_`p*tMYA;w;}h0|>un7OB!Hh!vPk~bs#)hLdzjlUkp zzujl21=3IpbQ9L&{mLW!@D;;a$Ua`Q_ao$dWr&pFUxV6I@KG{6zVN_rbrJr|XjCYm z9AP(6(|dbh(BM||{_I$Q$M%zbhr~PW+> zYjFx%j~{tel;HQm-4n4-$?C>J;|A3mFiqI~W#ztLIZghbfmv>^-PVL}bRqOL>wUq+ zsf{+7ty}egwUm`AVKcaWDe$GidX~>}nSJ&(m5T@NHw-{=`(KnKMd3w)f{gKOu?(Db zTF9)u=Y}KiREV{7J(HXGXoz3lB@3jD&@G6tJROHKB>F11arl#;O0F#kCEx)Rmwms;YG=Ko75C+y_FjX%;sCF zpMMf_^nYX_d&i zyuz_Vo$j1Up|x(~@fN4^0&~!s;}(%LKhxpn5Ud-0Z!wBA1%SrCzqH@-mIfIR^>jBG z%3Uuiy8_*NkoBt5>;@#UTn#QywIIU`n76$-PD-z(88yPH^McW)-8$O-81Yj}$ zQQPl_5YI9aG zYFSvPIibJvrJ9{FVJha>2y8+-H45u`QGXyHBsem7Y^0U}_rl~u%RZA4j}C4`XTrSf zFyc;@QLtF6+(aK9?Cze6n;~D)%0+ELCMqJ77C0M9opM+u{*$UY4tPLP#)Jc$<`P23 z|Cs_T;d*=?xGrKLZA0Wc%-u50K~<-Gy4{}3PdIrXkk6Wkn$ZR49KF5Q@L#itXKZK) z;d#>e#9Bd_ZMPR4dJUSAJoNjbUnl2sd!D4UAkcoxx7w@YWVNHDW|AkxOgw(;zxc|f zzfYXE@$2QMG@M0fPq8^wjx;;+dw=2(1YJMpgE2Rf1rXF3#f!~;r2$AL;L3EJ)j<2E zcTI~3nI~Td?)L=9FD3u@aT`QQf8P1(i@Hn@3J2Luiq}+6j{EYO(U=i`X7$@7Mw|={ zk-26cB-QLA<;>^Yy=gt~I7w!nrP?&31V2A}S4!)VqDScu5r!VdlE**8*A_rH6av`v zeHR}O=jsVA?<>WDr{(5w5fcQ|REkN!-3Ua2K*%X~^)TaUh=lvLAF=X^67ns)Ve9+} z?hcc&GLrO605rVOfCZB{$eBdj;eh-eMvZEy0h0Xrt?*!STjv!eKJx9PS-3EMFitbh z)IA8L2&XH{b_O5yx$6z4$+65!Zf9UG0u^(1-=G=)r+@9^u7zS5zlIQELS%1$4vnLwIP3@aQ8s@e)Va^I3YI zJSSj=-X~>qkLksk_e@w>&GmS0{@<(=K4DRR<8yXh)Rj#Mf{dSb=k(r1vns7Y$d8^i!k1aJ{ zI4aDM(E_M!pv@ThacMMR2=S#=uCit<@cB*|Z5#9;okk<9Pi#OYC3awKT2&#^u2ThF&8bXji@@mf=VD$QlgW z%69U&RXoiquH4gTAgEzak}h={cuM(IQuXD{1bYe?z-A6O*GukQG1G%a$}@L`d=@Jp z?$#XkG-t`*DpiuDDXzqgDBbT(&)L>gM8=N-JpI`0!&(jK^uh0S!-q-JZM}tkW)OID z6i4Ui3Mzs5V{@bJavryRy{?WDRcS9b&)*>m4{u(Lvkas2NOEi%POwfH5~j!uiU@8D zBykhP^G17f>Jr}&=(EO8IzQWB&%Q}2+N%_CdF0^$J&u(G6o_wdC3WznxuZlgV4f~= z8_luKyz1Ege-^;~)(14ZN^xBcncFrc+zToH{ZdEj%GFUbchPajwH?{WAY{Z?C(`>o zLR9_dPhn$Z)Zdqs>30c_+R=`7wf z*`O3_yUyN6fc@DnyqDULm5N@!fP8EGeS;M_BztmODTQF@Vr;aiaYE`G-ucQtlh$mx6u@d z7yj!7LFGTCFrw);#?B4h&_z8b-T~M0^320kbeF=yOO(Hrc)G67KkY`00d_ni*m@Kw`(zm#nCutg^2=*Q9e z7bGD_yGu=l`i*XcyN`^d#st!U?U z%%B=3a1>PaLRK#Qjlxq{-M#e3Fyop7i9gcM*!xLhNqj#^D)ur1As+K5=J+aHi}QNz zIPyj}9UwWg<}&I9pkL6Ft#rauH?%x&kETe2hu(jttI4B^CU9^hZtJ5bdnuwbiLN(H#AkyuhQHsGt7XCwX9zur{Gmn2c(-Kj-3qgQy4do6<%`@t-p zYE3qIQuK9g-D#BBkTdlOr))^d&J(`ZB7 z;%ci3v9On)wCfG-6^4{ui+7W(fLX|wLZJ(I1&Q9fS*4@HgczZ#(7`uCXvSC%KJehC zr9$$p$c=bj`b)I6i$t;33i5Eq&^X1)qm_+hR?6<3;BH9g!+%tbCygTSU+?Zi_>M$M z7){@wxn1-rkN~U3Fx~K`Lx#n@QMM-IF;fa= zzUj1@H-K!9^ZL_@5IE7X+5}?PDo+WDM_ifgl^y$aP#gAX zNF6Wk8;L*&5O9Q*I3RrvcM1sFpK`_Rt6fxv8LdCeRHf5j{Z?M_i@~3!+h(pODiMl4 zn;$V?aZ0R{XBci*GmadO<(Vkp=)4<={1_HBgd>@e_F$jU1je*EZwcG9p8h<^J%vqq z99(&pv|{!2bAjXTzR!HceeP4i#3rtYq)6#1!0qIC;v)};Bxyp)ZK(90=(5^hI_A8H zg8Dl;vw+Hs{EO`RwTYGMr}{CMyThfmCl&i*%!QSDybRzy(cc2mu@*45Okq3!Db8aA zPZu1McSBS3en!TTre3v>V5DNeip!57Vdx-y_u(BPubHfo28D>O&laCG4p7~zAy(9K z1xJ~7C~^MkvQ0vyPOE%=SaQ#KE@wg~qg$Dt%qRTvYIi6N@@g1kAIm828-W6;yy9Io z=<}nwIoiiOw!Zg1f0}DN);i&uSoi{acXPSkc(y4p#AW`Q*}!9M=KniEQ|Q6-gf&QBOTeh!)^6L+9hXEpZ1+Dn3;KSbwBTD%{r;!m}o zn`+g5BS1Gkff#G|L28SrB`)`$L-T@kfmyL9f8uHo;;Ff>1@9VXd{^bAr0}-?jKOwy zQIoPm8XUl5H`A%*B(yWs)em3Er`v@c`~|g8T`1i(_QXCbXhO}J+l+lrN?l8nD%fEv z&G(hrc{gVX!x8HRHSe|%x0DJ)DgT`{3P`zZylICCB`S+jE;F^Gu00O$LULK9JCK`i zaysNrv+cnfWW^q3nRVJEhbq7YHRmj=B{cmq-7< zc+ySks$q6jmoS%8gO^A70>#KX@tUkQdp+Pkw4#Ri+PHnn(r(fb&njK#g+_tD4GOjr zpthC{k(;Ef7E2Nh;-w(8({pKKU_45bx3T3zR-(O!^md8JBUqU0XE0WbPlEdxCZfA7 zjcF?n=DLo}t8Y!$lr$>r*RZB)hJqDl<$|ZMqV4M+bKA9VxInF_xi^?wah`y+>pa14 z=azng|FTai>LSya8OElcb}F-!>{%Nfg6%(=~C;O{lcj zADW7Ugj;dlst|kLct#~U4 z{SEZeqf&?0*T9D*k&Igrg$oMPUBND2LD;7Xze{Gq^=r(+7V-KQ**e1c#c)Fnef=&` za|zI-xMbNoF&%}CyAV_<>3xenetWb1$$1E;If9V&cBQZm)#E)W-<0j-Hm6_$inF*% z7y!rrKjVmIM-Bt_AMAt?xO=a1QZ$QBUy}6agclsY?{Kzho(aG_Dp?4%qBd9c{#0;Dn0N3|o)5Q4i2}I#-_rmDJgI|Y;UZo)a zIRtHoC4bP++kPYL;MgSC8#iH7KI^c4 zIq%8>n#r?Yg|aJX#uI{Rg?&tDPq?j49>}@h!wy#V``uI&-HeO{V5>L`FqTQ%C7BFq znJ#KpC4)ktFsgT0US<(mEKWN5aPS9>nkl*+vD<86)T3ldpjOp5w%*a8lR?~4z0+R= z-p<{tVR#M2siV2G)$LV1gyDh9_co&00Wp0kZXCaph(^N}@WAJ=S{2R9JG75sQqAM6 zWum0%hW*<56Fk6BrW97G1;0;xfQzEk zQcuGvuGn!?1v&qlT?ulxKke*7Sb zT^~em&*gS0z>6m&jV2{YW|CAn{-k&Z9)RVmw*Cyuuxi=vD3j~#_MQxxT^Htn@(GsC zpFhQGlo8t_QaYiCUnhigaX>@uF*N{<44EW)}xt0m-blSkzx|feR zclTVXtBuDk>nY%Qv=VcND_{J}&E$geadT#!{A!K(Poc}yn(9C{;+aRmdO}p?1fO)S z!LrvZhD8KHYyF_rd`1QwVDmsIeU*`FrN2F0pQ6NcmKGOcWMhUo@qM@!9bG7;0LKnJ zG9BpvyZ^b+0V}cl=_~c6hIRZTmmxfxk>b7Q#<~zBn+nb%K!``OYsxbl z$ftAJ(JS8gs;qPBd)ss0CxEOHXqLd0a^rt??J%vBOm%#;{u8uOpA!D8Tg-3EbK*aP z9WR(@Ik}?#*}@;aRf?|vH6yEw1+IQFVcM+}w;}!b>EH*bY1K);s}B;kB-ijiKRyw@ zd0FV>KKrhS?AB8{{q82S@+O)u?RNg|n<#zt%;{`>bL(}ENX^hRQ__0!br6zW>;R!{ zr0O=pPa(7rH_rpO(|2;9$6Oe>LGJ0&^ISh4iPU6_cSZn~_xLenQsM%RO8N9^QyH%u znh@;p02&xVBfD+gt`Bn%x(e)2KwgnIElFW2-nF%>pMK(p&EqdA@cNy}+!2|5InNtr zQQ_zr)@@Z?t8;A zO6iTesQwW|dtcNC1$ooU;YZ?V30dr$_zzKQI87n%mvOfG4C<`nDp#K$pfKQ=0u8nl zTY5!mJ!GjmkG^`CP$R)pY-$@Si1T+2^(sXmzf3ex_$C+u^UVweH3#7gQQGvjj3MG4 z4eL#Rq&sDjBvHT5AHB6i?i#TLUhA39_8Os!@J7Q~{El2mq6Zw}Xp`6oU_6_!tP{Nt?SGbhA(cS)!28(DeiqGoHG^U%WB6jl`Y6MOubMGK zF+|~UbUuWo@+L%ow17e#YfqhLC*no6z2{u*s)$MHc=q+agWq-RyG#TC?Hez$4VS-x zx2cTZ8$v?PRrmfqb?$5;|4hABD^*G2QpWXKxGdT#qd=p`N;8dMU?9uNpcZ=?K&!Go zavk0|Q}mCp4UbmS#`LZY9`rXlonEhx6YbMSB>9nN!3@0fYsxnB6EXEu_GSjFnc@_{AHjx)6@3GUewpgP(i*{I<8~NeI{lmM!U8iJFG=1BEwKNL-)fXb!bnU4du2&fE)cTl%5h1CaD#ThT#|mjdD-rj^+3?ENZ0@=;!76~DJt7-o4YmTLzLz`+8w`)Oq8{1F zITXp)vabImMl6s-;9O|5dT+o&aS>Fw!vKAM?-sQqS+A1SJiEX_vrgc^bl~CckM(Q)ae>!i zNo%HOzn>bfIIPgL(-4I}wtupA*qw~~&WL%Dcs9Ax`PCaxnkW?2E4y zX4H!OUQX;UTPt8y{sqWq*TS+eVad1q@pb{o2AwI^&2(WqS$B3z;3-)6N^MT0fqVlD34(Hjq5=2saMDSg&QA>i%{wb(IP#1K;L-B&wJPD>OwkuPZ4Mk}I5_ zpMDCED`+}c&-9J%2^fg1P1bLuU43Pt;0l`d#X=VF5bMi(({WPK>Xh~lr#iM4r`DU^ z=midM%;0~H1vU75o!w1w`SgJvDCARUi-neBqAGlZz4Z&(>E{jNRTJF(ofXO&aTOcD zAJ&Uw9OUNlw+n@S$mthj83*S)7(#+LPxgkUs>}-mAOnN*MaYCgJDO)oJ)mX$5cM<> zGHzMof2Z!&rX2N}S{e82ixfPwtn2dugB2KVR zvGPJ+g;O$X>gh_g{Fm7h!t*70x}4S~7V2>J;gK^t#0(A1f`Z#*uIdo^UVDKRrc*l_ zeMkT2W&T8+!?N!@zEbp)hQ|}$GQ>u4`94A8qGl{!vT2DR68OE(@>F{D{*zkUi^T4Y zZJtF-hOA9(te64yjnY~q3`3@as==wqZgn8D%9(dj%QtcLxJq&@+l0w{BIVR5I@V(n zRYqyC&?UZ)MP5WfTn6vt6;*}o5KGff)mB~Dg7W|@$=yr-0Y>X6sgiwgEznjBOu?7pDW$2#KyMo5tw;w0Z{FB z{?mPIQ-l0O_7l0mu+0rpL9LEV%a%%u(Y0TH{F6dy-$Qnv2R=QIPEA0xb-pdNNJ|3P zX%`(a$%i-;ccEujoIb3DeQBmmI)6V6Hy4C(b+Eq5V*{tUCLrh!VI9xo!yK#?n$Ud0 zTXPc$p*kYefq~vjivG<7ifsLsqpc?lL$4!Ab}-}K{qyE*&7pl| zgjlwW`O1J!J-4qj66!%sDcbGbxKJFYuKy1=4}3&;?Fx!1Qfy5N$fzvA27g`@I_Z+9 zBE;Vo5x=6?khCu1?c6tT_xD0_-flv=EW!vU(H?$(SUE)5NF%yl7$j&?(Uu% zgka*VM6yY5wc}7p4q9`gZt#@X`>4^tMZzlXd8^Jo&1bYvB&%$6O1`kaE;~d6osj0y z?NxbCBBrn;PB*EVD7xNj7G0+mvcFNuCPFG%rzRtxZKnX5Erdo&m#sM)O*p?2)5a4B zb-j6mME(XN?*4%+qsa)ew2Zkxx7F~qwOT2zL)fiD@HVYl{xx)1qn`anFfH-9nQNnE z$n0p}_`8Pi$O(VOrb|@P43qD9Mi68VJz{Nk)^$lt*WWiFDP&sg>30&(ky`@FBWsA8 ztDwznD`?E=UxEhtC(1Yd6A3b~GMkW2UoOe7T{twJIQEhTp34&13&W@W96_FHF$9qN z?o0#Thij?paZJ0|$)slW=K;l`*?LBWnCkL6$ixSPI<$T z^stGlxz5cxLRzbo9USQf=eY_y9%F5A0j#fXZ>s#>`#XvKc6^#_q$`4EFc&e}L8wG_zoK=t}T^IAYC^>{~} zFE$0)Cht}t=dSi_rMW#&TDAS0yR4Gi|24EW2=(y-rGE57R1v*M_L;-1K<1S_nE`zX zqn|+JubL`8i>K%cK+%4M&nyb0aB-m7b|WK>@}XPu+&|@faTeyAZC{6V0Pjf5wQ`?S z_v`nAMG))V854!NQUA(9X-1-zFwAUvyGxbw~1oL0d^atCn7cDzyw)9M5>&&{B zgn>m^kRE)uUpMjJnZCVg{Xdr#3T0Qh-gmT9$a<@-btubw?wmK};lX0jXgMV>4+q(; z^jYpb84Gm$=d$-AqW>j4(@sch>ki>k4LtGs1(68dj;>aT6)~dDF=km0W1hr6m}7CQ zFeGS(s>^rM^T9`-85F}!!qbAz5x}Py2>v&JvYPLORCZ?rnmmgpL9Aq=Z}jzyx7#wH#&gE zDtA(FmE)s(id#cBL3@%lQDc=2R;dTarDr28o%u*{^(LA7ul-ljBI5hx2cc3~l^u0b zXFZ&c?_mYa&#HHJv)YC0d#{|86)71{X$wg%=1DFwEEk!NTz#T}SG;fwAUpI2dYSSu zji|d*OWcSZBRKiNm{t+-8t~|%Nk(82(0-v}B~MErRCF436;P*ZFYTqQQ!|j#sqaGs zEfK0@tckvANcJ1IH+QP5$NI!otT#x3JiKt{CLSi;=@?I7R5u4c|5&DWVLnzjyWd%9 zIBn)Oh*Qjk>|UTv*I6^VNatL*>v$||zL|KjD)CN-{Ntx2e#jK8QOIup&aW-D2q-ut<(ySIG_O|xe zz(8~9u!pgax~z-gG#g*+~=7TKb^@a*4ve65cdkQ)eouuP?QfwR&|fs; zA#vx2B;w8|#g3|Fk8>{x5>0;F}J zFN@#Kv0STk^SholY_^E7_8d3MOW1ZYi|YK{HetW(Nt)?OJ0}*r#t_>FFUY%Ff0^DW zVD}0Z<<7BgVD|&*lI|4D_76!((RYJyk0WsABZ&qK4AT7UbJ-In*3nX-S%ai~ z`EtZBE5k4`)Arc{3~kw*PN>87VEy)DQp{X~jj6BEF5Va0yeoe>BRYP9M_^mML-2kV zfY;&5*_dpc8N`)+GyU`(sKahm0)Agd1~sk-!RH)CL21)`xiGC!36~SiQ89lI$tdw| z1^jPN_%0=;>C%k1ly>`1(N;nT9zU}CzxYubi|sNaW9L=hmn`7_tk+SCjO*Nm^+vuA z=6C-QdTJN@#H0O@P+R^(^kgilq_Fr_{*Ri!ee-qlWTezoCq*yoQ$O_ozQ?_N1lm$_ zti7;D5TY)Fm?4#h%#PT~Cs#JfIoq}I+@NIV@R`SVig=}XoQ%gTUV+S)GZga^4>z=K z7+=X4nwQh|Cq(e9^svcsH=`;^&MT1tTfvnq=7xR4sTNrD&xK2O`1Y>lO{u~|Ao zKRb~E#$`bG!`Xe{H}MV^$5Ba0_z>qQT()X8+?VD>qltTHM?J24_3~rp>{xWveX@lL?lzE~FP3mzZOdx@-5 zWhoU%tUydB{Io1ci?Q@1&$^V^ZSg^yi?-wTn6AeC2@TfA5#=4^5uN|_w4BO*xy$#l_s^l7;hqxJ<_l|}@*#BTvAR3=YWWzY zxw7JwmY90?X~BN?A6Vn_IeVMnTvD*;`95Bm(g+i%+&pQbw`5GiJ(Bt3weSkoCL((Ht zZBj|;AS0$ofwJa&_0W%wnT1ZSqniJHsJyP2QCMmk2l6fe`N?T9`IAxii^4q&G#apq zhwf+;Hy+lM`O4cp&AfXV{Vzcj2%F_c^_x`@)`2KPnu36Vw=Q@ACGuB$?daFa8n7)ni z_3D!3%g~-z4(((*xaw~)I5(LRRE&}*Yp+n`Ri=ML3Hk(n+Oq_6l!v z_pK9q1EV=xUNP<#M~y`*BX(x~+xpvAJ9qj^ULLYuG3o2l3*~^X-VZ4r+`v_0d~tIG zaeq4Zn$oLJQTN$4+b*Tid)jA7>(1hRZx3q z1e7=gn^BVT=&X{G+YmRTizzL%pmYL;B$Zo#%=F_P^_*m`_XlG6Qf?xmb`}H>MiA z+{$UHP}(0wiDW}* z=7GCdW^qx3w1U;=N6F7CXivesgK!=P{SY0T8ixwnKs}?qp9yUGOE`6PbucSP`6t2t zA#F8ubZuy}AW%$>|E;(n=35*o9iV{Gn*GXPUf70#T}8CejYi;$8d8-zEkngd6L8)i zTk{c2KwKH^kd7ej<%e?;dvOBy7X4j>f5 zC7I_#uOn%xN(XfHkBzrXl$*@v0f)nB9a6_r)yV>`51( z{ai)c1OmlzO#?sp;8|hFEgGsVqyORK-&*{)w{c)Yw^+^(13Qer>re)c0~Z-o_1HvK zKIwaJ6EU2xUGVTa?>wcd*6`-n1K6VLkK?J)tg@T+oCRU#M)vi_^BzXSWx6`P^DouK zHhvgi+7ew4V^{?$I@-kJF&VP%hw*J&C2#1F2g@3ykt+NEEqeQt#_MbW9 zmugED11cw!t2rT1tjju*nLs|x4;R9hP_(^Z8QaqHZM1@fDDKJzXOepLWPbQ=%H@B( z@*gKEcMNClWwX6T3noMh<0zQH_bInQr3-K-La5-4Kpm(4+AnDUCH@VpzMS2*W)X?R zSYJAMEC3e{gAp#q+12Nd;*24T^)?YWfXBnbbe)^_KfV);FY9mDQ;h|2X{xTzj|pq; zK_cBMtcSK_+RTBYeKlYAXGiAAL36Yce9^SYx*shKpMQ6axZx}5Yh=7B-S~x1v*rtM z4bP8FKQR3HY-w>~_Fo+(S8rj1b}x7U zg%k~=nu~F$3x6)){Y0rdQ1GYNy{jsgL>a>?#EOG-@ISHun-J!8`05Byu0;$;oZDYF zX#1D4L`XN|pvcJfn8Se5{*L!iCf4M#)kmE7N}ZN zEUdXY$}DYm^*@8gI{X)HtXGa(7g&eaX!l;cHn-!Pfg%l>p4HMqjnJj`Mlb&I&nk$9 zqeTI2uo_@FUnm4_R}ca4Ads4&M}C1s=uPx2MmS~h*vzoq@sw2I!g>4Mw4?bya)N(Q z|6gar3<``$&)-b_Un5EFIyasT@|b&7vS64VlUD2NTki+p#@u}En&hd4bdO2d1{nfAawTt+(2AXgDY=SL?dA z-4(6eTnk_Y^h@RYKU(An{7DkUylc9}0sM$atjg2!zmD>+ZxEn`iUGSeIAWW_tifkM zF@s|B&Y1JNcq|0UEZk%Y8h$FT6rmKDllxfGzCs&K;Q*+cknMx^VhW^#kud9idCdIz ztqJ!y=iGYMU(lxh|DpW@aQ~l%c1V%1eHU#!P!;ZWS@6I3;~z9iN?9tE1GtVCD6wG( z+(O;`%w2iusgB$)s-Ou!r;0{=GpuHQIG?Gh>V?Ms8(J>DgvJL*nFBWTPYysFa7=62 zPUrkSlAn}Nqj7ZUOnjT8g)lZ z;m~;18`+GwzIx|Fl~7Ma)uR1m)y5XAXt{W0MKk9#e;v5GMpfY0I`I+~ipBw)kBx>S zbqW27|0T@pm;Uq-3T$0&?!h3@yZH7+sFXSuiq5)O;x(FRrEKo~yDcPNi$eQ*!CsjE z*aFa4sxfY+c{4o&0}Kj96N9f-%&I&5csADnU4|OI*<2ic8h{P)dB<1r>@V;C)sF|xLB$uyL5^#IKc5-oK_eKgn5bkPA;5ri;9UK4xBonXeAb9gf4k|N9!utS>FpqK zcsQLXOG86L|HueyXlSUprGD;u4VjXnF;Bx+v1A>(Sso!3mKw9xA}{?dry;(cS*xMj z>rS&~ZxwYu*XA&Tbj4@?^jLJhSD?gq*7t!SY|t+|)Z%bE{zzp0z`;VN%=OUOgOy~R z#@JYJa~ST;#}^{}i;UG;F>T|^GinrS4H9y!uO@?%AH4Qpg`rK1w1kVg9=k{;Ye}$< z*kvGG+dE5lUBAt$U`tMhvqmF3Tqkm!CO4gIedvy>2U=A3XBFn1ohAkI4c+Rm8JJ*8 zuDg62IC0lFNHnYp+N%ue9q@6(&w%~L#5mVvhNpWR%{gh0u5&9s(-?inZ z*gsh`^I%8A(I!}sUY?#B+s!<3KI|C3o`wAsJB&Ou9PHUWwHDXPCxs__Ew6S>xOYITHU97Cz=bukxN~|fL!L6&dT@4hGbO| zc;5X7JOT}*dSgD=Z03brrp>TQ4Zq%|(J!m?9p8f+kg>)u~OE2jDSh zGlMdvzt;W0F)21V$94P3fwz?T8vpsLRIN$cofgTf^oI9w338B6nk)O1Bnng(6F^#H zug1D{_up-`V&Y*yl&)XJ_>;c<=_8bb1w4@0h$bp#lBhN0L=FD|)Xx7tp#ImfUn%1O z-X=uddOtq7VQqt0HF`GAEYo$;nKqGwi3^gC~|?se%32i&0%HP2?#3q zfJ3#@lism$wot-@>H*k2TEuT=>=I`GbubnRVkGUY^O59h)(V*brMRFVOJHCiub3ED z5EVRQ?!dDx2?RPog|^vYac&O<-YxSS?e^-<-PP#7KlH^t z6K#T5hCKjH6gZK*uP0p(nTU!14W6!>pl1QET!=j>E1$>r0#hCmqHMR0dAXAv8f_O@ zqGL)_qLV@F5@T~ZA|c+pojO(jMXJD4Na*vYV+PoXEN)yusA={82rYCzhFt+=DfI;X z;@0av=TNF54*m3P$^CFV<8C?tOV(GEUw#KiJaj5}L3*4T$uAi5VZP!8mp^mwes}ho zyv1LfiJs^e=loS&Qh9LT&)kjp`J#WBajkQ*Cm$e-&cVS#A#6zb1OA!trD||9=b_>f z5kW@1lm^gCaZ3`Nl6BArF(vL=#2C+pj!sR;lVkXnR`PWbiMP?x@!6q+R>X3#3wyG2aS; z<+^3ULqj`eX29PFZ7rMES63t3+S;gZ-0#>{eI=R#lI(E*;fpEAHlp6GP`TCkHoSe^ ze(tfoHJj^HDF}0ft&VX1{h{}p@)(uouQV_*AXJam)@Pwcb#{usqrC_aRD)nt-MPzm zY-|hg8+SEh`zIzi52)bE5J3;$xS+XCh`olD<7#qD{ke;f@WXdJcM(Di_TSC@@Fae8 z?uRGwqrc}Yl6_8F-8xm${TBRSIo$XDbTZkH{x!aZBs*mfpPqV%wX*VGk@bfskRny- z06y|~`vL|CRhq%<0*Pw$0BSow)Nm-aVAOFW5N2J>!e2P}3MW)U46dl2Im-M|kv;NX zla&I*0Xq|^j|G;jZM2AOw_92MDIEA=+;){xi)s{rlZPqYn5x~D37$*;~s*=)NMWI{T0Y`_w z5fBAk7;w{?O^8oVSfse9qCoim5P=%(y;97xdEzKq5jij1zM9Kyx71SlHO+?(PrqHk-rVF3!IAzXG-Bt^Y0q+*8TYa#jl& zkBa||ZlzM8TQsjfv46Whyzp2BbeISbfD9$#<%RS!RZ`h7_V&`QQq$9^8hMb($6TTx z?bwnL`Hg7vq32FAV3A9(vX^i!=7=AfPB03@KNv+>Oh$I0E6`A42zLd$>)#9rFFMzY zOP4@S3it17{c_W9V~!7Ec-#y}U&B zyO0QvDIx|Nh^BQP%1P1vYB&yv7kEgQthul-AOe*%BKQ8_oC|#Y(4OnYfQ0Nzt`MUd z8zro4ZC_Uf$o~q+rBc&@Hi~l>a=FX_CD!puCH#$8`mX~aS%M4hX5~T|1yTs!`6}W4 zFWn@+2Sh=w{4(=^^H#Stjv4yTI(uEs9zA;IhouLC4(O~uf3&*eu7d3Q z1f$vmT5K2W@)!Mwe#Fswi&odvWI{tjLsGyQa6y^!DS+Xcn0gXIgM*@?dGjG-cG)Hz z>m|vOtzDEXR^Urs**m`(7e|s*KNc#tW(k<_@VAT3VgFa~6B-`g+1%_;Kup|OYTBMz zRVB2+{IR@ayYroU<3U+c!hbQU)v!8P(ON!u`!!P8KCAqbZj-Gm=S@3V3`o>109d&u?zf7cQ3TYf>G@inioq=O;`}&sNka>A-MC^*3248}kqtJ_AF`Ms2gtk$9TzuDO4oq&bc6`Y;6f^Y z(dt2V=!0A50h!2}x7N@2dF3sDcL^%3^3hu)tK-rE_w-6tN_nWvd z6^yPx3(p|q7zTp4aF+||&HF3;-UU2viR7$k~ z9??At_MF0I1NE0w=f`b&XF!vf|Q=t3I{+A2$zBN z7ft8K{OVO@1HMLmfimE&N}~U^L(_l}@fR=X!a~8Z_kMDO?&06`zc-cm^yN3+K6Qo} zBH)^Hs9nkIyyFB*9o&vda{DhTaiQC}kI@CYm%YHjiy@Ku`hpk` z6O$BvK|w5;Ly_-}Ik1%V_DICyT&EA&W`-3zY!4~+aHO|t#hZ>cR^c| zk^X;Rc4E=$8Wdnx9A;@pU*46ybumqv09&Z0f!hJ|HNh%d{mnH29Ou=lHRs`L7!Z~FW*43QD_M1u=HA4B`1v*Q^YagiIdlASNpdaEe_pfe zJV=KDA(FcG8&^?^4&XZfKc#Bi!6SF&GG6_hf2hL05)F5**!-_AEL7sCTG}vmZfZTA zO(ne$(qD89A%C44WAFR-Y61cRz>#jN%x3#Jfl|y@ei{R2exogs8EBH2>4V=%C5!YA zWWltxfwU^8J>D<6*CP1otLZ8oG3U$cCU}BZ{r-KO*vw;=dZ-mHm@tVGxBV zd3t$eH8rJZGp!bFtDF3sFi~{E!Oj)O96Pr~gB1x|EvRBpR_HK)zw2 zJVY|S+kf)sf(KOXJn`^g@Ba;n2X6Y&M>Hr^rrx39sF6CH0TsqS8wDs0(=A?KrN;d^ z3y-fgYyD5TZW&< zq_6U0{kyLcG0?#RKj^ z`_6umn{xvwW5g(O(F6!P56T%?0hB;btgBB~m}uuCi5>dl-mAYUrZ`ygP@D@ai0Me& zqYyJ*Vm$ABer)rbQq!$g>|qrl@G95(d`R6qr;t(pbbuByildkz|K4?_=GP+yRco+~ zVqd+}FE8yFtv@wBUN-bN3sRg5&EbjYS&%*SqI!546>gJJH0G;zdWwzVG%{{PO=(-l zzPwdiBI!}-J*S-0*_yK>AKBoja1If*n`m`K?|$-Eh^?5)1|wO(D`hXzp|*QYaV1v=9YU<&g}Z6m9;V>n00N>a8&>I`>zn>kXut$fUMiR>1Dr?) zZWW&!m!OYv0cQvX`Ygoex7*wHdUG7NKkJ&Lmv+C8-CFxA2A}uQeOeX*OO|0aIy_O8EcFW^0i6n0{G0jF z_vGb2V0itJ1i15X?6m_})rR7Sz5t39&#V@O=5^Hjf?(B5JgpMQ(G;)kWkCSJdt(wU zyJ4LztXs}+hc9h?seBfMZ$0WQ1FhDL6j@ENWCfyN33ITP;|?&!ZT`nv71D!iLJ^nu6A(<=hIqca1pbxA-WAyM^Gc zeGd*;jV$(&8$P$opJlc5%5{jwbjKL@?Yq|Q6whBj_ng_1{XJyl-oN; zBRqu~8A~Qc<wMr%X=U`#POM$pah469)W;HGwuN381F4?18z& z*787dfA=buR@w9?ID=HscLbxev$MRnZUL)~p}Va4zt*xo$^3LoYv99ej$&||47$9_ z03PYl_tshr7z{y8qt&`Z*QjCkyKP=Ze&OEV`X%B<#`J}zuf2`>z+p|C>Q>PJ4gE?Z~J8Zn^Ae!9$;~IR+4%Xd-5!g<54#_TC8n6dOa`_Q7kk33(+NxPuO`l1)Eq;tekPjG~6y;ckMa zs~-@0?zQTnV_FCL8<{e#bkr%f^QqcTvF_zX&2QJ59pFy#8ng?!@)zcfVpRx^=clNB zUjYdrFjn{FVZ|bSdHZQ@OZ&wodNuNry2cF76yy_9Y*atTaJL4LyYVLYEIXHN;o143TcsLu&Zg(%%ibi5uq%8^5TfHMm1y{6u0k~eNjX1Ho z;7AD&YNFuKy+ZZMaP;PQa%N@kQd91mMN>&%b*4T4&S%p=E_T|q8FFV7NRcyNiQQ@j zroc~y=Z5{(PWF-SZu4o^MC5JobozcJ((E?aZ$4Xb$R7Z&;u8&(A~keKkwAfCbLDJH zwFUaD>*b0exbN`Ve4w0})?y=x#-m#ii+oTm7adB&2yR;UCPsbpcGS@B zHgDcL-=_ikKfF)lA6J1UDCVI?A=Ay3Exl=Jfle+iTA@VkP21$p$(25=f?C?zUSF*% z&)~ad7Q8*!?+a7;I7a;FSL6t+jP8x=w)rnSLyFegVlGnJ4V$kVF52`FI$ODxaN>zAkgp{Rj zm8O&X!gV?hh$yuevFZ!=LnijkNg1TtEZZ5mHVfNQDvl!01_G66}I2gjhL4OJQz<#9ED$to^W0M96p()_@ZF*4o-fK5601g zO;6%6R%H@jUVTB}GFhg1<=D6YlkMTO;dFEIq=b*2xh^8sHA3wc3mQW=AmFqC{BhSc zQ!H-P(9|Z(6gj+|*kaSWL0V51X1m44NJu>i>l$z6mG3egN!aE6(Nnz*nhUD&5dR;TV6S=eK43}l#fF`eRxR$R>*r!l&6uMBc3^S4_K0x&SLWsw^Q{cD3FX%Z?`=8& z@5@{1IT3wt)(5#=A?*cL?F?T~epZdlS%pHRuOh-1JTc+B=diP+PEALfB;8+lAR=0q z)sVsc9eT$#6*%JpUL1KbY=@$Cw0MWR;I+el6goBR{W%b*0i$m8V88iOY`r)5K7>Jp z@$!65mNbFxD}uT&L|(Vp+Kh z9Gk@!-+iXnpKoX@EMv1+9r%(zGDTT-%*p?VC>)_Ut`aQU9>C98Qdglz(PbTZFs?$E zuN#{~2NIxSFf~8>d^0DjV!5GI*w9K_=Qcup?JNV&VW5$z%=PT*kfse-YIi0vEM}&? zXt!YO(5TV2RzInz*woXwSAt_eMX0Tqczn0q`N{pxJ6zaduP@t;W|XNK^wJsL%T5TI z@hx&v%JGSJm@K~M^nK}JZ<~=Fx6@%ju@u9=2NxtO6?UD)doCrkfxdta;l>n?FE`{z zBx70;y!Hn%mWvknkVa}XLwGgyH>1g%e%JODzlX5scMeSU1A@5K*0sv~JCMo_MkC|0 zdm94WktYvj=IIIzO?k6LD;-*fruT{pC+NNJN*pvFDt#moFm&tZI_ON`!RO%@MJAEb z+hBg(NiDTkx9A=(j*R7#{%&vuZ53OTn2vBgsUeBGMe*)l(jB2GcKL^)W|dwA;?D1V z;I3O!7AG4ojDYNfaam9=*Ny2nnj|K^bpY zINWGz;5J?E6$=`1ANy{Z62^oQc2#R9k8?&D4=}Q>I9n(4!WVP=>?IEMCc|^R>qe@N z{B49n60rA#7$$F~9G-%gXh#`GnmkLrkGU-|XQ?>blT42&ls0C!Sa#@(I63w1E#hQ% zQUY8@*uG)Mf!b=F+=@=%1&m%97ZrAA6RZ34Rp%#zhlUGTtu==z{ajnmsBX^Wg17<2 zQ=5v?(q~>2>-`etrt*Ft9?JFi=G!?*yVafA5Q?=I`4obN9>6!Bh0+G>^;+w7qx5V| z)hJOl_Gb_)+A()zTWicf8=auBHEXa%8Gdl_o8H^vxz@S#%FLnA9AAcKh>FTm-=&>f z4X<#s{cg#sSAX%eDyPV-st6b7RMMriMY_D(Q~+*YD3T96_NtJy`)o6E?4Er#CXClz zd?ZuOhkWzR;UhdzdQTa2#B+ewKs-L`m13{VIcwQO8nXvMTw80`sSMqHFfS5mzlZ)J zbau^cJ-gwnU#5Ndk`lP5)ssSiVnIbB^vezgK0TB_R&UFqQ~vktp32oB7!_o9n`!nH=Jmc4 zJ6Fo!BUm~yDXyrmuOE2Q^)abov$*NO?%8btV}@%YsRjviy{Wuh4#>~RO7*g*G$FvA zE96+bp!;-IWXH6xbqGo{YICMaq0{4#>7*7PhA7+{ZZ0s*p)ud^_ADsWYchZkhawu^ zI11+n+OL}I-D@nUxf`?N@@bj1JK6>*s-V)_@g$PY5-vzL+)*EFJj&kmuNQ zYT9R8vzN%+@^R!vkA2~U_|0iQr*d1*gH%)>?twU>jVfNH+$G&KhGs}rBF%uuz%kvh z{+*mRF)Mi-UoKlxq8Q^oku6NwI_j3H2$g-(DP7S|TUzLN;T|cr zIv*D8hnQpi`$+V6-K$odS6-ISxeUz?9d7K9)@HNhIB~qO#2Of4sZSY$NYM{f>NlA; zq10wiZ*nD-n-O30=PBBp z6o_3)5U{fzo#pId620a^leeSpQ#!rRCW$=%Jt zjVq9hKU#1@AR0Z+gTEngKUsa$H`E@+mSV#&y^n9Ot;Ey&vO80DGzs(;-+V*&vI>y; zH$BI?sZfVvnY$iOPIx>#XQxu9SlW{o_PFhjzRZMGSe;CJFEHwrKCR^4bPhs(s*t?; zt`jzNOU&9|V05~iqUWtpiD4ngaQXFC=67)s+|v`wN<<_Gg}1Ic9PUb_!|;gIQ+e4e zaBr4$sMmFK*;CodYy4ThGHRhXd8=UcozEQO%>?01X6?rZ?+U{YX8KY=1|}6Y z3pWL@>b-_|g$*_oEv~`6X2*^0ZFS2DA}ZN#yAfTvb^hn4AR_*bFn`Y^T;rsfFQY9 zMw#m%ixI~&V|KiS`;w++^NE#D!s=&HGSqKqs=N($F2~jF8gGAoE?%bBPIk|tr4QVt zlyZ=Cm>566USjMUP=7jj0DUAmu&HFd4z4+!Ee+jDmk6Jv{^AuJdPB!Slx@GEdctsy z4_#nZm=)3)-RTu**^p$p_z-8qQiE6ay>?FW^~KebY)v3(Ku+Fu-6wf3M(xPLdE49>3dd$cMWVV@D?4>?$)^Hhn=A2Tl zFWb-_;aZG(DV4yET)JimhUEm-#7iU;+Aoy{Zss%)>}5ccwq7qxq|PFZM0(0#lt4kB zqmQ=px2o>7r?B?Km<(-trx>5@X&7ZwMi)EvQoc*-`u5#_%3*lP-J>c@({1Vd#?!Vf z%B8++^Pb^05%Wh?#uMA~ybX<%OCiL3y906W%54tJ*YMp|Tln?YxvPO4WGL^D}1VhFyrwDD}j5w z&HE#^(Rb~1M;^2IwJJEzy*1x9d%5DUmdF`vJzah@QF#J&9Otj#dHOO;*rd+8WiFOA z`Fb?ZtAJ*_k$UgvAmNu<1mR_ACY4(`_jG01J7{b?I`*+p>6ZhuwCBqMvcd%>}{Xc zLlTM@R=1a_hpdiT)O{$H6weT!*Bj|XMw1KnhY$39r_=SeJl8}mmtC_W)in2aA9pT_ z0ZDZ_bcpNGbTdyd%ZCpiw%+4|mR928JcRhbAcIP-*eY(hgQ!2YRf%A(jQiV7jU4Ug zslWFHTf-IJuj`d(-Gg0DVZSCWLPFKJRwRjUv6C$BRs629vd2Y#i0%W1s9WUNj873w z=hXg&YDczFFdyp@@fNA4=v*T$2$zMcvC1Oh^T8(z59x4*UWT1^OSMV zv}N0Adc5PMebU1kvyMV3m~Ya%hV8uWEhMph#M+8Sm8N(Wy2QXv--}IkBMK^7pD#1w zhe^tO{%-BPDzeTG2}$->o~^DCJ7_VrMi5+6x3}=v*}WCpCc9dZTA{wco-o~Xti5F+ zz!M8|w20e|n_ykZeMP{cb+$EC;dC@G@y>e$hw<#`THIE`IAxc=aOU&U2`wb{uq_NN zsc<5@7Td$JclSey9!rY%YFtSyoX^L>&$j&dMAbNoaxkQiD&h9mDwBr$Q#r3U)QIc+ zyxq4mTdwC;=Wp!vzqClA@=8+aophQ!*!tKux%ILR8^b}$i<%KfC+|vm5MniulKagn z&#Gp4#1gLf>W78h6JaBwBZ`LSAGmk}SkF{mA)}(%5p-1@2xQYNSB*5`gDk4cwr>Y? zl-&0zeOVF;{q%d^(IWM)Gk6>!nmZlCRZ^!4+7J#Wwt#cjo5|**Ib~t80HS_w;*+ zmsyf{T~0FYjmt%1_~#{zkvZi<@=v=jE6#1{az_LmmOWszWSJ)LTQfFArV&pcB+^ue zSvci?u1XS7wXZu3nCYpzYl7+ZWRmBgiP$azP2hRlms9c^Y6^-vf#KqOAbk-z`|5Y9 zJcikz^gPzb5?>-DbR~8}+vht2z1~+H&mO6~C};|q@M+@^f7!}xIl(h1c=(|}1?IN; zuwCav+;UVN^T%#~I!{64G%OTovYO*zt3^fQcXLpv_i8EkR$>?{dMAwBR^;+sMYuFN z9qpX9{%JywxO%b45bMcy-K#CF6tcuO#azj~n^UTTaMy4e9PA-?-`??ds>>YTJ(sLJ zjS`EHfg?84l`uA4o@Apk`bSBK1_xv==*&Ctc#L?6Z`yIhaiPk0)mtwM`IU0FHFCnG z-Vxc%GiHq(uo}_oy_3IXCd!@=Qfn|Lu*el!KQzK4gtUp!MVdfXnLK@z!UhWctbI;r zYq#fk1+Kt&!zS}U!-j|Ra%N}AUkFq=)ed}DF(?r-IXf$NJPtL^-W+mHsGcnHZTaoE zAqFQ354-9Q2z{IeXjKS}mCB}dC5@^K?1ye|V+q_W8G-qRRb@nq;r8zI`Zc+BC~Qgc zm=xRTjpAQ=?y0rN<(mg0@L9*kI25&hX#CDHdeS$ic%y^2hzoyw3KDLR-8Nf*G`YFa z0JfUxGkI9j@nY*N9(F`}7|^53;Hxm)@J@GlS@HUj5(B>G| z4AVh(YE;q8RpvlIl9Lbi>B7ijjJ`U(Y&@s6O4zLHk!=xj@yWb zUlwO2D-dF?FtFZXn;!V!WF6$W==zCa<>TbhP{g>v@?{}*5hqhpZ`J(ZZ#f$t#_K{D zl`5&Rh4_OR^VuVN%cUp4+Oi3v`eq#208>}1<<|%OBBvesk@IaMAeiy43(g>Z(?~nHwVh6H>oO5_cv-L95Q4v%OIToi zXsf9jmT)%he|)5wAL7bLxaZmzZsHp64Ibhh^Z54S7`ag#<7M(Wp^vjrE<&`Ef&5s= zLHwRXF39pbJ!_Bui5qeK6|N69r1lV_!a-BBi861$Xa2C()BW?) zbrQV{WASh+FD&Y7a-BBh&MjN3nBxq&aw~qiAN->F-mkA&NF5sNR@W&LE>Sy>u-

98fAx#uO4H}(lvD$GDmm(8t; z*~cP9H8&$LS5-BaD@k>p@}@B76*Kf7ePnfc`#Gkmu=19G?-$UHK`&+EXWGv?tj@^lQVC#d+1~x`;&vUaD-jpVPTxkmyX0Z#r@WzV?ssfroAG`%cNiPM7v=f*7*;RaK zir+ronCD#V=L(Bpu`J`|7G5r3(K%mr*?q3EAf{7Wq^-<66MvVXVliMhZDTd0XnzUs zwwqWMay6AuI;y-f(xS|p=Si}rXWd-Nv~y)*jeGG_4<9FE^#?vXyZQ=?RkVXy zLFklB6VIuL^C)-fy6zOu)?Z%|KXoxGT9|gImTG7d%{F>lykt9p)iC1sRaCWOk)aAE z-2EUCm!Uz))w>gKEWNYAPt$MlxWdR`*#3*4Q`|`r$M<&FerNDBTCl{vEf|}kP7joOJ`@Hvd4HAH}Rp2u)$KpMoUTT6z2~wNFsHQE zOsXE_^{Hag92u45+sWmx;;o*MM^5b`I%joNWcva)o}5($^fX zfGVv_krO_56lG7U?@Abz8Vrf2n0Jef=ViakQ@~O})O%3m`%C0?r^9~{p>gDY9dnqjr+C0Og=KlLpQVeWCjn0Q9u!GI*+N-zQbTPTcst2|!r>0I5-w(>-P9o7pii>p~cvcqi zXj0lmGgQ)(bOhLbr-_op^5Lj;zs(-TWv*@?g?7Zaoi-8r0Nd~S1Z8n9^U1_bgdVou zt#~3m%hQ-6%i`J*yFY8??XV8$L}?xcKRlT4Iv6<5&ij^tW*2 zcFos?O|~#%{=A#Skm8`i+e7$F@I7IRYj+clI-^2G7wcE^2p?|fAtzO7E!Vvs4Cu5(^=UUFdMMXI02qVg;pp%ahxML z^FUZv-sdS~NQ)Q~>a6N4N-5$!L@P%+Nr&Y(q_4G``aVh!eFgFL-{xC<`f8Hqj-cc5 zZ26-bB$PT@p-9ouv+u8EAYa_~NOlI)M+RY@n|;Dw!Wgs;%e+cQ-L3M zj>d%4C9@)?of7LdFPTI?7rqg)QQ0$T@mQI&9bq&1Hp}9Ggo@(T!l7@NfMn4$(}`tq zG}mjt!%2nt2cK3aBh5eVua;IeKg{TSQJO6_ESP4iij#BebX^=;e@oK#naD={%k39$ zukzA6FKBm=-;&r)Uwn$rOTXKScVOFj_pXYT$JnbkWt*OJU#M2wjpbtT6`zy4wGPbU zJ+5tI50muv5!;ePmC^Kb3md0@xL1!@ygB~ynJ_!Ar*XWE{e6=DTJNaOp}T#G4;_kB z+LyA-0|jI~2DJspo`$v_k1yaM@;!r|#JSsvTRmi_sB*qaWMYdOd}AZ;p|kkjw#B`@ zL;CIXwy7)Z*m3vM*UG}i8r@a|*k&J^1n(kObq8bCLpc?wCJrxCsHE9$bgAAmn4F2> zTRhHZHXnEs>GTappL7J5i~opZg`L1BgKh5V62WjvA=93N7oD1H=~Z4*XU{qs^ONL( znx0e7U3(AD9*eOy2P|KlCp8JKYPSWI9^cB&-B2{t+;zFxNtME7$8v3v3mKtUph)|A z@=fdqAKb)X-Hom&iIkG4BCj!>gU2A~LHVZ}*A|s;C5yYc(pjmM-u!r9_>kedkMJ&% zzh<$WY~Pbe&q{|~KQtP4GiuAAvFq~c>C2LlPnnYN>vky)NuBiq_1?D z%3bzmVxD6nVtm-QZ%+sBt>dSf%7r5~N!!1%!5n06GBTpixN-Fy9DT)6zk5k^ncajfQNj^3ja z?>Oc&KFrM*L}n`baCka9zz&CA$AyKmd_d#v9(^ZSr$PIm7I8_RR8da8Ge0Ynf~-X8 zkc`Z#E^XAH;Rwz(7Q-$|gfTb-JqTc2T;{2mLwIg#WY}sIzhd^4_pWhC*h#6FQd?ho z$3~eQe0DFjFc;4poK(%0r&swb3QJz$vKLm=0_`4s4B;g{H*_WOyXGa_?+Qpbk;b|v zZ!h6cagZ2bcB*m&=w4}l{`CYF8WF5xeVYis1fhE$F?;e-EgSA#ky^+q<$@ZtQb3Ab zt})|C6-|0|-$qKDCFWHGYe^pG+9jHHX{cF&tPSeg;sg;Lj6I~PGx$=;ei+tr^vHI)| zCRCyV3i6P$yY^+rvqn9aE{x5zxE7t#M+`|%sSz|uGbC4h-nlmF{!Jbx;?#B*S5Xfo znasR#S3%f$m;Pf%vU5F$fu4H`EuV@$ja1E&8nvHKnG2@BT^Hpr!I7{1rP21Rk?+&y zAz1KDcZ{7`jZ4o)=~b@WdS0K8=3fvTO0fI&u@>#)BgvOuv}uJ?IC?1Q1ogE$UXGdK z&5=S54m|VDV=r1flhBxtSuU;kO_;qWxjn{gK=i4ejv^w&BZ!NqJ)2a3hd2HIVe74< zqI#o$;WIOIOLvHL3CIxAihxK6(lv;bl(h5=2q@hlDUH<74MR&K2n-F<(%o?R{_cC< zb^o~k&6#y(t+Sppdp~L0x}e<%V&6$@2H9vI{x+js?ulxW66bjBj9E-x`SP2mc~S=!|B#9+a%M zaQLu{{)-n)=eDa11Q~*@m-RX>9}}97?;D;CBtke4Ax(&MjKS-i^3td)?kN6`s=pGl zx1T^#iP&!%l^0B#wS*cTy;SUVNigsDEs%Y$?aE;DpTJi^2hs}@IOh7-1{N*}A~{vk5`TDkBAza+VohdX)QdlI z6aMJ%-LOX~2XB%nf?IK*tjG80U~2oxEeRCcpyx~MX74F-*fzL9 z6m*O!+Vb=RjEw%clKmEElDt=Y!N@*B=;gK7UNF`eHO%V}pFr;~d$IDdJpjdzX#Bmc zKAhmDp=gAy+uoCo^D9H^oaLk~p^0eV)u3b3j$79=5-;-?8%lO~ETcQNOZ2T~`;Rxd;R6Biz7q9p3>lsiX&! zOu7w+IUe(M`xsMEvlqEsVV|o<6e~Rc2ER)1!CEWbpYh+xHM(18s_dL0eyZc^X3J`$ zU)<~$ar$UvBYNUj3Xeq^A*!{-Pn=7i`}Klpqt?)|1meB5*6+>hIgI?X<7Br%x3Ru) zJ$&<|KE^em*UPw{^+>eZlZ|E+`pBMN(^nU5vVF+WzP+WV<#V+G z)%QR6GV{6c@k+~Epw!U3^PN!7*w0NvuafD&v#JlXEc$_ji#B>p7W$J&pIXm6YERrn z7q`RG&NCW<|8^~n3pUhpX%=Wlt3a*qq3K&YSnm9nD#6bF>cN7Q{l||vdWQeG@BiD~ zFxWl`sxYeqXN-XP>$iS_D(p#>3{qaylxU7Wf4`Z=K5StR14nb~8a{h~&@=#YcX3|h zwJ|9b->so`!Zxxg4PGjxK_hEhFakqJ`1daF*Z`dW^b1NPoqQohKzEG*!ch#gHt*eL zpU6@8p@v&2rhU(?zcBsWzBmcdF+Ba=yeHK7XgT0n3VwHf<=x+~w`e@z&o#7^vr}S1 zC)C6dPM^RZ3Dl-gmYR5V{@d@N#6?o_m!5<+=Rp#D2RuIXpBH=d4T^YM2)Dm3zR>9D zeXf&q$MBIG*==Z2NFoUA5CTY5e7oDnjBjhza4R$pIFxMlsC(sNel<=Yt{A~7ZFxIM z=j--~m(}pB$A3uqi%nkd1ql@fG9}jbW(_h5TgCan8%O>uiPU`4fV;uv3)I87@v^ct zf6;2tTP!yMvVGOvhHLoTLo}|5qYfqwWsyHO4oJZNCda7XO5Iw_Gy!lDR0dX72j0vp z4X%*t_-?v!4AcTHUe$-{!+#nX-7jJ&4x8v2hH&`ddM(p3epLVAq97w;#`0NVbiOmB1{>?NN_EuUi z#?R`X&Xohm0)Ykg%(Cgg=9GYd;6v^|*NPAWad5?(#QyhZZ^6U_uj4<@y^*8RHr~jO zDUdN`DYj{`2zK5SUMVlnbbKgQh(X1qpQlu#b)zg3DD=#*{q!F+#$xbPQMTB4U#E9XJ#!^x+fUg28{SwZiHc zqvg<&)r47(n}i2`&W6i=R2?W=6*wGh7g zVMY^{hVFTqAj-KU5QIrDN^GPpqehCX_&P|sWbSk-<7mOTb1n}E`3F|yP=S00@)Mp` z-!>V(OMGs~@pDL?7kitRER{rBzgji;9^jF~GLDJ%P_--cWMsCZ8#FEPzzUEQHanCy z^ZU*_8+17y@5I}4&@}Vp7wjp$bzZ9%>L|qR^FbHa<>C>;(TsrM2T{HT8ofHb=dK|0 zE|RBFpUlK5-6)`J=DH#;3m0{HAJ3E}KtBB#> z7iV#2zsnCS|2BB+;vuLq@X=zqrv=SKjb9numA`MJUKM}mTU-7rfJ^}Nhz6;Qi<1(2 z0EH&#&q8SDG}&@mBL<4JG|#`|ySzPnWj{a%nw+22>Aylw!)@XcK2X3nIokBN@PA$9 zK>oD|Jb?a=QfCQC`q6pUsa8GN#ht_KN)moByM>!z*>rKQBu1esFQk5HE!~&xyDN zy*JhP{Wh5HV^Si=A%nJH&xm`t(YC}+{btZslW5=luoJrka{z?uTtxaMgJ3a*H~aLvK1z-E-)LBnsM*E^Vsmu#idOp znXR%&o1pTqY<^&S78H7JZJ_;nWftWV$0Jy~EP}2Vwm19UTtn|0>S@MmE=#j-YpS}p zYh-BurBcTBCEgw3>!pq)pqC5FzjXlLwrE}uGqJCaZ;d{2Fd$M--|GpqSxc}y2 zZ>1pH<~%6@@_Ie$HO~Vdnh3vRKGb0}UGY>|kz5!Ul(aT*tZL4B#{Y=*QOw2DT0gf- zgC5yl{Ai;9N^}E$thOT1v>m_Z98HO?tzB%>XKSFL3>o?{=vw)1m9DUB`JAAACdRZh z(bOiz%x|gi>`T-QSC&aDd8idfUX4^zb+R!Su=r)gWAVJSu!vfF_o!rUx-ekQMS-D4 zctOiA*+h`DYZBD##}s;?Zex~Z8sPdE&{W$_A%G*(bl31H0)&5L?Kh>jQG+E%wG zX(yEdr$wIzEagfTgIM{V;V}EPS=ODHNmTnyhMFYeZdC0%6!FfMt|YVJy1k~MBmcWC z$Kp_dvZU2$b}gdi$!QdcZbT+Z5PEU{)80XB9Zw8oeP+Zo=r=8VS&b$6EZ2gig za*PSy{%r7(DOuuboSCYMinBjh3mtJ24u;$3k$B|EO*5-A{D3vLXa_ulpR({x;x0bT zOyhCA7I9h8B+5L}pzME)9V8ZSJjC6L`dE?iPez9Z=}jMq*}GJ?J|Nlz3g3k5Mm3jw zCxDak64)o}ParlHvl*oD4Cdx@RV`$*cVX@Khx4TgZcY77P8_qQwMd69OQH`J-S*@?1(O-0dnb@kxYHZ@R_Ep9hL2Q1Zt}t`;28}0;$jbPuUP<)rXYAkkYa7) zp~x;i3uA9dH=6k=32;UPpr1DOCz0UE5rg5kzfL2_RSW8pq9+1gALJRH513T{u7Tww zH*RI^%$T1saIc8h3qEb52L$)BwSJ`|pXE^al=fFIgui<>O&?c!`&P;Waq|Q7tPY84 zJ$1nk66^OCDJf&>SdYk0P~pZiFs6SU^ygNzQ}q|6!{(TC$HmFqL`|LmutkED+^h(! zi;|sOLCUS>nT{V+PXh-F8Civlx8)L{HZKbtrH+iXBZ6owAd+z>CDSpB2G zN+pE08^W8jmOm$ZC&YX6N|9jytKu3VR!p4;3HGrDFkKJhPVAE*!Sc@#wP)~oCQ7sp zp3^Bf{u=LO<+`i{5_YQ@8acIc6X7#X@vGwx#F=%C5ZTWr6TiF%#XX`P`*vW~I`~$@ z2&O-Q1p%W?-}?e*WE2{ctpL*IFe6o=Myqk5j0?{x+&-s4JlZWM@H<9m|83H{#MUgV zG6UK{XmksQc%1&1-G99kTSuc)z~c0$gSMaixgxEKm2)`EzqI|Yq=h1mGce$HHy`Rix8n&yNCY=@fJgrBEeZO_&g z=<(y87oS{mpBF}On7jo!=8w~kY82_4+q0~K00#pIE((Y;wvKxkY6eb^Hyg+@R zh<8(Hlqoy==2}|74%14Y4uYn?J`tnZyor^Tyg0Qy5Ydv##;}+?(waEIR)qct}&geH`?zEH`@K? z@1D2)-EuwsJ??#a8z44w47)EqhP64|r<)$SHePLRwOskh(eHX9?s94Y_r%vhnfxVsLDhp5TQSxAwzeMsbrO}3g8k)FnS7tTCmEi z?}})R08x7U%!|Kk=!;=G8Q$VhipjYPfCl zCkxev8dbNQ3(*ACo^(6^BKqr4X{4201wl|N5yin^R^-GQRPTB37;Tck5h*9#7}Gr% zSvT@0;a3Qo(CrN3eYz}n=?uLqJGT&p;((m61l~iRTLM7$*g+)8uI4XBU9Z;%qNdpt zcyDFzm1IWq_5OsxmYFp=f{ws zVyB}WCa0)-Yx^j^OuyR2PR}Fc4WNa{d)3?JW@r(i!9d#!tEw<&YF$R{!c{lW2rm*Pm@8f*1IqUCe^Y& zIAE}O@SS*G@ki=RmMN*9OZBt?0r;p|16U3*A%e|P#!fby2md;`mr!cNr^zHgR>IJc z6bwA~RYyYflQ$b8vfo)+yIo zx{%XJmP7Bje4)IylO*8mwV+V>^>sUzLZPJ9ZqD^?Ae*U|cAe~Lhvd=etFVM>B`{R~ zRx-D=Ys_Fo%Q7uaTlezr4F|qm^>iyTpDEpx5)GKX8k!AlU1pp;05KDzu0G-1z>>hX z&BAH%RZm@Q1;Ns-R?dv$_V~ZOV4WSQErx{_Np;e%f`c|>F{KhG$9Ia!@n&2GxCNc1 zWM}RQZP`;(eWop~37^w$x13MkJ#&+awSy$a7L)~>I$*0@r?GL<>(X;|abei?dorc| zrnF((tUAmj#OCJeuXnxNob)btcw(B>(n418mWRZH{AnvhlRyAnu`SylUo0Q6u8%}jd@3SWv&#eVeC+g4bj zRX`Dpo_lv3i-{tN$Am7bE7J!(bjgj2!uEXVkT=syEmE@McORo(N!!m^7!)8UF9=r)ouT_@<4cy!k6@mkRqQjK_DUNyWg93rP|@XVR?S&=)^CcWY472`8q{OU zESRmZZ@}K#+KE;85^V*1 z28Q$+=ycRIenG-rFz2pU*RjTaI7lAg)m8SnElMtH9MZ`igyfgTlf%0u-UU3qchY-f ztS!U%EOt2FNTTfYGihj(a@J%2qw{ChDd9I7eQ3f;ngtP>ljDr-zVAdMTryHkvt z{%+`11tCuc@(xSNtw3)AU}q0WX)a!Z$RY>r6lB2-SHZ|1o7k}HLs>8yU~nEkG9;@` z7sj{I;3f9;M(|eZ!FqF|!gqDlB__Yg6r4=grrhzZ_$9rAQps$I))5Ok9$xT=M zD)$5%TneiGcf}3IQ-H9W@$+QjsYMX&X0o=$7f369q&#{S@#H z3*_W>@NFZQ%D;FgwypB%32_6nmEr~L>u4%DjzuE!(Yr4zz{^E!YVr3D22h}h^=7tB zCO7@1SBiN9<#dEs>%ZIXO32;Fgrs$(Xp~VR=Tv*hQ_)S&pH|+|;tMV<=iJbZhRG=F zz=s`Ac?Y(m>sNB6dCz)OjDxOd3Rp#HqZh7x^$PdL_NX2-M6XH$ zO@S5(U#)9Cij#^M)3R76m8WNhSI;^A;(gK;Ca0rzu+>ozd@HubRX!zF>$Eb`9N*$h zS6Z&m$cJHt!!P|dl&9HH-9EDR1|IAAuq9AcBqkBPSbitoIY3?R;m2}+4J86;Kxb?}SvTpdSvhR~1IJ$qT(<8fx@47Ie z>xD=Rt2-IirGeaV5hBEwqSAML|mdts2j8H^$Rrk)6Z zlGnRmR*dlGVnU&Wyg$OJM-yk|&CU>&sC)`$I*)}Hw3{i|sz%HzI5??42(J1uPFmF5 z{`ypA{27^!8uaIe_RT>TED;fSslSV3sv#5kGM83#3*fAh^CoGGFuZak0U-Y&99x@2MKb!@Q#med8d70D`~eOSq6B}nbBUUz~4X9W+?Nzw{@D> z91p{jC0dTyI`a5=bB3r6wEZw!)3Yd2eI=l4CE%LR{d=taQXP-0YgmD$2}`C|A2bAd zn8+)oZUjYp>ou_feOt~k9vJUw!7U9g<&CKU%pZbxXp&n!dCLHt!QhqEsQdxUCQK-3 zhstd2s+|4HS>#((}K=wipbB0uGzhOP2@GHfFzbp9Pv>q#xnDA5LxF8HWK zeh{A|K`{dOJ7juVoG79g^X$+8jyjP9odd(&YOKDPXivnLK3*NWE-XFQx@%~g_xH|^ zXft{!2zE6wVYV=QigHcBJj=`?;TXq<_7(w`r}l3ZUadjkr9Lno{GDeWQgYB;p7>~w z3c>OxdKNca*xNQ(@U-X32nSCj;iSkuku8S>a^aX<%<&HAzDv~q%jsjDII!IGHcV@B z;9z8~(tsw)Z6}_6|m#zzW3vRJxwOkN|;__seUHtwqf0 z8$JB3fgaWS`=iTolCeK*;(W+dtgrG$8=-U#uR8m$kG0vaF&wk9G8Y%#%}nry>|^fd zEzi9eIO(k@yE7w06HsfY*Yqp&p(b&ib!T5jIcSIf8ukx^%}atQ(T=A& zCpQ)81W#;!sCv(?Q9X}0Bc<<0(v%Jh%$+$%%W{L7pAURlrO%(mlApE% z?cJ{Y?ThE@u||jmqW{Uzm|WGz+C8#uqMX8Y7(NN5*WYerjz!diq)hHq`fr0f`0-7T zH~8P)WnO+S=wRG8Vq2p?oyA{t6Tpriq|1FPz|RVmg&XJhtV{2e?^nRz?mIeJDSQci zkw$h171S^0pwDF!{p|91hiSl>{v~KqKf+9EZ6ILgDmz`HWA5zowSE;{MnO#nMrg7b zlBB7~#-eDyU6%%g-j}Vq6_PbE@E)BB{lQBPCN?$Ea&>T8cf4R zFS=tdCF7Y)iL6OGRnZ(dGgnw>i-%abL(;3WS6nVr%A@Ol|cAqh)c$ko>U zs=T9d%0De{Pc=EXUtR_D+)-+LOFVe=$ufOEI3YTYc!yR)vjwA=&cN@+!tcxVcSXm; zN)Xx`_d5%jMywk+=z>mE6a-J#`sN6r!vI)dfW|;5a8@%+iQbe4ki-&TbS_C0=BMJg z!$e)jb8`4CC14kZ>D~%KYgiB0W8$^timfi5Lpz-{BAmO6T`Ux^po_`&!OvBC5Q%LZ zJ^1Rrv#A7VzrsaVFmj&!@7$AhQX3MoBp`74Yv0NUIC}=5>B*@_Vc2!WSU}THVK^bldG|gpZ2!{; z0s|RR6C;t8m$y@b=)c58f}a6)2E)tJR|y<=PT+-;doKa#%RKmuamaVzMP^4&&F}%w zNB@w>m91ZbZd`MgKB1Crw}!OT?~rO1PH%h5Jyc^;WoZV))DZ74ZG0PXVX2hpdL7c^ zKC0XAJlrP!jRIk9jLcW_ndQ+UGalR~Zvb!ZF&}`+R736(mH%+QXtQ?~*!3Jh`8mqkVm4kJ^G|Nu*-_SShfTyPGc8+MgZ0~pWSeWL z?)!JjqXK5U{>oRYO!8rZ>$vZ2v=p#lar*i)ypK=4m1(3vC+DmcwTOm>81^x>GJ00i zb4OgPJ$%SJO-eNW*oQ6;;NJClFWOz#w&Yx5GCmZUacV{Mem3o?jH(G{RR?J3HK5Oy zx@mCwn*TR|^FdBu=jFZ%$3I)p0Ej1|kG>(fp6;H`%t@*C}ft*~RiW zs|`mFL&udXJ{Z!A2JS-L^VspUb4m}=+Y*T0RLqZ6KGs}g{ulK_^55kvyO~BZ%6Z(p z9D30py>Z+OY&MN#Hk7oY|J1ddZ;KVNv2<2?&g`KE(PSqEQs4u_V{dyojjx%Ko|>1f z(pXq;umaE=YH1o0djBj{lKA;VKK{}0;-Fx;;NV1LVFZxkyk`FI1OIDW43M2+jpmPb z%(252hfrQa@;oulw~r0phKz>{b<_Tba!&D5?bFGz_=Tm*)4$H#$F2jz)5m2j9g4=h z9REHrVI=*FcYgnw{rsEa;G2@|a%5ISUS$tsVe&oj`q;{Uns+OI@pU800k17Z09NNG zM812+xsxoNK$CK?ERGvmY2~LD^ta>PXW8}>YSd&C@N>MaQfnzBHJc<8Gl)E%NuoNJ z^k#XO^i>#-@9z*+3dU-8f6%ky#rD@P_R#{^-NV{L-0ifF-~J37n~b9v#M7gqCI6dW zuH4*Ak2(RB-e4nBjISC~iM`tB77X!Jo4Kg`66C+N?zNuDFZQY-EKD$ve?fX+$LdjK z9W3}2sDznTj%CwP>QJgXOP2QN@O?@nZTyU-``;|Py*HQ(Yc=$x$>~Sff&=^y6Kq(9 zOYtFwx`?*k?^#cXb=N$+qy?r5S8u6h2a1tp4+`0Z*A>Cit8_v2bf~k}ERl`NJ&`-) zf5CW%zu+2QtLBO{h=_c*i8a19A#fB}h?vj}$G}rVi$3=Uc7)+Fe!T1rYM|_)%%ZnC0ZO zxJa%~1pVNjvRAXuPWp>!?J7TS`5QOwW4Zx*R|71wBZ$*V}XWE?1$aEGQ)20YS5tDQ^7N-Az|I4bXXaU6yf`JHijcXumXm2Bd3P%DY3=FSnI(Ye=ggX3^o(LnG(-4G zb9RmY>A^Nmj0BJz^V9u~mZW8I>z(Pp+6qdeRVh6lM|BH4NLb_?wV7-F)^CI=BiCnm-v6;FW?c0g$BtN7dJf76^KuP!#_=r5e)Ll2a zpy)|$vNnry(kvYs_525hwWdpt3ls;f36^cJ>_~ooLmPETs4m314x)>)I{o;VZ?xcM z{^b3W*jvYk$UTnO!JezW<}_;N1@ONTosJTl>`c7-zHHd9B?Fs_i?(ko8u$QhH-T#W zs#n`mqJa+;)ny5MrrkdAmSl`6j}g|jJV!*FFj?)*y$SfI8^53R#dYUStDa14=q>0`q>j4=mH{9}$oU;R4y zl9ls0@vkBm)*4_;ZUk9x)PS^}36q2?&O#4T5PL=0>8U8`Cq7+iwNNUL$xL!<_K{_a zG|!`th}~^l8=DrZD3%w}oo?#iNZGOqr1-1tHpG;+x4XK!nBlv{8i9xZf8Gipt(Rqs z<^a5jOWqd#X{LtA{83923-;ejKQhWn56YaaqAZO#@@1gqHp?o*B?Qe2Q07q?;(#BL z8Rsk(AF>HSE8O|sSZ=%eA)O_%XJMT*S!D6u;V3XAgldXt`^MfMO?{9v5i3Gzf?VwArbm=}w@exSBLcsOg=^Zng=5s_u{ibk0) zgtm!lLvHy8#e$MQY#YNfSH)hhOyZiz{q5ITUZiq_I3F=3dT{vB1;Tqj>KP4wu~mzC z+Y?|%CAOa@NRfefa_ily_}i~Z0gf$l@2op|3uP$LYt zGtX>V#&|gq?pK}ib>F%=73&2M&I^PJSDc(9(m}*_c?4a7UnG=jq^o`1E?Nlj3*UbW zRo46(1)ATbyb>!2Ag~|w$2-hMqRO^6A*uHMEWi#2C|i%KYZfkl(+L>Bh=VcD-ah~; zz~ImOOHEuNr;8HSpbK$5nATo8028Qt0F4+TMEH=$c(5vM?@E;4<(|7{*VrCIC~fmQ zy(jQ?IMLJk6LDaB6oNJq5wKud4|vyygxk|Q&Lu{)Y8_`py(gu!w4u4=IBMpC^K)wjB#(TG1pB+i3_B7Zj5g zV?upkQH`~&m97?ZvX}}icebJ7-&5DW)1}nbq9U>2g$0hL3w^c1Ox3ODo!rh96_SVzVkcLQKl%%AqT}`>DdK28}^kQO-9yCA6wMN-|GLw0S7D z=0e`tG~YYrk|_F0&GDKrZZ34y6ij(@-_2X-4!)FeYk)SF!X?1|;q3O7K_0qKm+7?ZmBA$mMm+fUUW3PzB30zxl9K*tpOtt!9+1 z>0J01|EfJ}?zQ&^_6QgAz27N$gWtfIS_rYxWX6@mm(SEzhEco|5j}r@av8-R}L#f%vti zRca9;35^uUM~l4OH4$#%BBiWK+1?dLEVn3*3;PH~3sN~3VcLWjEm?3nT&B1@S*?^8 zF#+UCwxyh1%DC3k2Y<6kLNph)#9QaN3U~hYD2CPc2?O22gUwn+S`fTbr>V)f5A?fB zknwCEXE55cfW+#1{DId+``%Tbv-n`+l+c&t*5;~)YH$s61&)g9f#B)2Ex_sF(@G2b z+L(LyW%-hK%%{pog3p#gY%-x{;ggyQV=-yM7OH%{zjX-aw2BSieJEiT+yUedC0=}{ z`3-PSql$f+4RCD%po=IUumE_(A+Y!W$#j}|O0Gh#Q0|5>mq1L=4x?%@+Vsbf3KN)Be*O#DL<*VnWX{_`tmLNjAQpdje*0E*f9!9+*37K&s2dON|`q zD^|d!B?V`yERcAIs;n%GMZ8JnOB>7PxxW!d)Jsz)AD#IW|1q0)kX#c`)Gf?|EID6Ol!v3qbjh`ji-D z-(|pXU#^}@jgq%F`ossGW$nW1X|d`!u<;HGUUu#*-~?f>@pYR4)Kltmn`-(1?*Pa; zJ@_UTkZpR31wnvq&lVk?G99X;^j+%gYUDv&E4f5y6=9_DKXCg?D4sAL^MK z$I(Zv(eF_lE%jlaHP}@zTjE|}XTG^#GQf&f)kU(lOf+GJSGbhfG)#Mw_e9poV($J+ z%JRJTDEK#4V}@xHPWV*KR>!nUrFdnVT_DlIm8MH=0AN12m3|QOIx9rcy$k=^daDgU zds>oUY)#`vy&*Y^A`c|L<17+Kz_OrueK7+9{w}Pu8_ThLoMOurN%rGn3R1?Eu``WR zpI_zg8Ogxfs?S$`_l@vXa@d~>uv4=un-ND%F(^7ELc)F0q{1m$B67ee$DP-@i9W zdGZ^LksJ4RzmqBQ)c<`T$}O%P+U=g7X^!DnwRvMPK;|eE6&}T!`P=@FHdB4C?Ty24 zn}dp20O|P9kpY0AOy_nx$ph!<@dM{cP5EkX-gyat#&5%+jHX4|@qC7qOCq{5ldsBI zjr9~0ncFzKY|L8Bdxjaf%?Ok&HW95FGDIGE3u7dBerf_#`3DNs5lMZCtId$}yU< zrY)nq&;9>+0c4YiUg0x28b5xSFNJ~Uw5gY#F<$;jLbF}OtD(@tygb$CZ^s^8}4@Wsf-B64P>c7OJf0O++NT-2}w!dFM4bLPX&P(&U* z)Da!H3`qstgNs>$WMjl91BvZAD|e#9G2ks|Fs$w&R#h+o6noGGNh&7P=3lRACjHX~ z4B@{}1lhqggdK)ciR~QdH*xTHC}zeYsaWL!gla9_qF<>W+h=8#;cs5&>)g$b_t!VReON$gVKi>2uM@kUpg zqcwxRY6v=1eojV|DoqQzC;s^Sz1G5eY!VZ zscodo)D1W};F-bg%DN=hhD?Uouz0c0_WD0Z4H@t-wf~#ITsow^5i*6G^Si;!_Ny&L zNYC#?@mF58sV5Dnf0+MW!k(3y@yWc>)#jb@`%ch>m-^$WrZoIb$h881Zh8YeoDB{w z)!j`45cJ$hBSorQNcJ0i6gX>kDQ_)jl?BOV1b)R6S$c*#v;d&O!)StYS^8#@?_I%t zReH~%#hU$$^KFYtv)|-zmh%st{m*+27~_Ry?pm0w?9R@IuA}<2Ov@9PG=)~|!F>g_ zzFt4B(?aWeUMS^P_E`FWr%rlFA1%|#t`0oMdy&QrPxt2gq`wPBmgPntkM~)u=NXbgc>y#k-15S0B z7-akJG%%hjTeO}_kn2MLctZd?AOjB->?DNhMwA|{LsBgkz-;3XjeY?*8#$gGIe1DY z_-M^}^!szJ3_`#=j3D_{zg8+{8aL_vg$8!dEU$m*NYR&YNf64!(_5d_h+6m(J^#cP7f?~vG`!Ed$p;G{)?t{uk1`h}lW6M~_frWrjL}|o! z>VUi91a*5Juj3dW z%BfeTZui6d<`2wRM~3I*E|{g(QW2CA8Fj3gdCn0hnHbhYfqj86*3##TAec9NGJOy( zl5Fni8J@}4ukczW+%1<<4fQJfy@Ggt%1YE}ioEl%Dt|wCb`SEPMZiGXzHhEJq}s?L zu{wkyl!2x)Ci1@-z)m2V`d~)Xb~Pdoxl;{!CjPQW0R8V}U()%B8}Z?%=Z!}3C9O?0 zRKV;3ViyAA13&}I*mi?OPe&3E1vaW{90lYwRq{0O277s5i9&u_YW^0ndE~&`YEyG z(HiZgYr-r8zCKOxJ&!U7t-PipR;C}()pF0Kdc~lXG!O*^u7@(P;Ho+_#$gLuM?ShB zv|efp4&I^hdQZa*7@2}!mSTG4nG@d2j(B57ueri9$pZPV8^5gkCxLU%rv{_v-h3P6 zxfEwn8Ix*1IQqOOTym;7<;$8AVb1vSToMG;bEAlNc!PlyHPc#-1CUmZM_91!iiHHj zG`;KUvRk9SX6EwulekCqV5H4jS*GyVnx2f~uEUnN%{LtAKAytm(RZ=e82gXsdOm1_;iO_&kUzx z;Y${SyjZ-eMDgtsGZ(2^F?!fOsg8cHUjESX2jRNCHjtE5UoWQ6az}-JijNQ9wWY!L zmgL(G5TkV0zxdu35S`!`QtrvUbLi@c_g4>Xv21&XC$DqG(7IlP>>Xd@`f6^L6jXl6 zgQLIYaq*o){ctJtXuoQ*JE-96G-leJF`yae! z2exFkbsc9l;fP2o=F5lNGl$vt)8*~gaj;?exLLXa6WX%|bw@^tCWfF)Y%r0BWmC(k zwT{={o%^f5jEd*3$=wCcEQ_ZICbsmHZY-@Zddo3<*ctJ*^(GL2 z@*+jnxXLKHukWD)Vs|(R>9$)n1``EVv#fN>`0N zN>p8u0-=FDC6>FGY!a(I5@wllX{6bNFc_#!m%o8%e8uhn&+-I)mi2)##VxFCG;wuJ zV_747RUXs@&pW~1`T!qt`bp7@#}VRIJ&R-`AGo@{wG+SjEdOIz1PobwpSI;-3-3`o z_xYySEwyFn-SJpRfV{{j!&3^;@DUqoVmBi8*af;5SBKJYihXuhU*7Xfs^^my`Mnu` z10MOG-JjjBMKqMB%8fLC2FxQ8+3zAZc)jJYlZfrER3>P!sHGnApfUbRo-ff`^1q)( z1XC!{^f@%Z`i)nV8A4Ux0HXhH6Ed4c5<(hzF4*b;dFTX)$p2!?2HtXSzF#y-bogb& z(IXy2*=0xKqi`0y+&X+Q={@P;U<+eWmG# znr39VwVf zzOZW*7B&4L04_xfoasD=4?#O?JF|kk_;*8Y8G$p5i^Bo5l;sPvOA!6TYO{{s z)_jW3b-v93c05zpN@Z&M9+KN_xq-!MgyDB(-bt6UXSgsK0Gju|ID5;esNV4JcZdPW zQMwTk>6UH~5J>?6rG`crx|^Yq5-F8bN~DHv7`j_wkj@bp7(#N0Gr#|N&N|QY>a2Cv z+An7DYOl@S_kCU8>+}8I_myg^bLif>6dV39ZBJlXhyXx*?zbQO-CG<)(Q^m-l>F3V z2^_eq$+R~*&ACQa4eYs;D3(FBLBonzZ|)xdJ_$tNX46}}ap}1^eQbNq$p1586Zd+F zgNVcK>A=Jetf!G6l%QQKC{k8f; zF_xX$ZbbXp%dB^QcP6MdPUj9k<+(*F%l^RypE4WKfSj-Ay!PAvNu}hSB%j<)m4AD# zN%{_Kpw;be^)g((TJ-!NLnW5{XTTcYfX;QhfN51lx_~)B=KRFroIv7?6!Mcff*fpm z)<2tg<<%s;H9*O6Y${IM`yst zR$T7B>I#mghO#_Lc+8xGjAOofSe=f2<>G!V_6S=$byJg9mPD`xz+ba6dQM*syy}y$ zBh{VnEjHzhg(rG=#LXC>d6Q`opBZg(_+X20C$~S5xlFgf6Y7-+%5Y{PRhMnz>63Gx zvtp%feE=)zh=8-S;RF8I`?*qp^4PbA4@6X1$`(}Nxemm~y`J+}Po&HBqoMJ1TH6;S zC;=ZwXl~sh_BF|$btsjxSX;0)5s{xMi%B37Y4aWAOZv&^JnTR5s+(+I?lk8u4-Uhy zJ>Z-j$NQSX#Elq!=b<7F$cBx}xvb;0g~(sQ3ADFAOJ&<7^b`7G&lHHFL14!{0Lz%r zBL{5lg!O^mw0`BCAR3t>JWoTo!}^rjRtEk-nI8fe<@X77%^oN4XDZsQ;RPud1cPl7 zTMm*|?dskW74Mzq1cPZ)bpWs|(*<^+yF{|jAe5mx2}G0KShe}{-IP~P=lUt{NO zN{F}~WBBF65U^hro4FF(O0)h-$jdb48-h!?h3y&C1s7rbyaM022TA_fk=Uaggc7P> zD8YVWtN`(Tm9F)A?=V&)1pNn=`-&Gfc>UvNo7-&S(kim7z6AJNqoo~*MlXHhm z&BqKx(N6DOtL$`#gC6((NfR|5lB~5sU=>NMgZp1g2b;qRRd|xmQ5xdc^F>^6*1(?% zuMW?d8vD20-~N0(d%F{{-DEoyc=+Li|Ho8f`+*kmDzj&=_Kf2)9?IP}Z!nIZH~F*` z4HnrwN!{_o&+nJml`&5%xQCg|PD*tw2D^wu%lz$E#vNL&TIiRQGEz!pC`8kwm)C^3 zpvqiON=}`jDrOm+K4sc)f(c(n1{$7YY91ciNTWaQ%YTwGUGDT1YE60f{rReD9?|3i zX?-q!TMMy@$%71$BGO+EUGp}myTMi=vBxZ;jaMw0k^6j;#}hon#}q8$Dtmrd_Sq?7 zvQ#(UNKvfY=}yLTD~_bu=n5Y6zQuw#b05Eo$eM9gQ0)~qQ6j*$eeZX<_r{N%*h31uT|oWfnC2X{Q=qy2~!| zCkPavXkQw=z2M%B-L{{)Jb3UC1PN`Qj9jf#zi)aq7di4acIJrxS~lH6&h+UJedSAG zx06VFbgID#Zm?`vM>#5F36a{+2Tp;`}H9BgOY230>^&YSYjq2m>{;Gbb zZxJQ-QsjsB!JL;C?Q2E0Jnq8B`T0a(6#+hlTS|~)26p;h)aEh1s=bOXcVTloae z^ixW`A9E@$?Rpv=BHnf&J$238ls!P#Bc&OaeB#d#D=ip2tA zqeIRGWq!CYm2b}(#RF2c24cvTGub^{wC1H}I^I*iVV=8yne$G?C<2Y9(Q#&8r;LYhQR3 zJ%sRR&Rvu?E4J5Kvqp&qza|Ub^YFI3j$zJ+hW43LZIF{8;LZEHPmj0YY759ePJveU3zuegpdQ4H$bdNhFMl z3#~WtpR6uq*Wn-};JBDCVZVphDcDHfYhPRCCSJN^%Y?+;RcZ1-=NedkKIRnit@>og zA_ugCc7&!Z#fs-ZeWLRgUwIxL&YLmd{ykdx<5{9*6oFZDwMhlSpDoE^L5c6dVC1(% z$m|F%X0lS`#u76gy>xu~tE6P?@<|B`hiI_GUR`afNNbP^FtODM`sP40dYd98#6p*{s-9$* zXc^Tms_;hb=A(7Jp0e^py*PZxUe{%RFbhMvuf-pxa|jTDSre$ zOg*$oXN4mOk2eV?ppe7ANsYQ#`@5*~$4Ao`OI$=&=UHsgcej=qHPg@Dnku_X1^VQz zF6c6;)nedzDZ=csXMW$dspSw$Z+e-471I8jpZC8{j6KCp6lYSnPkPBOdv;8s$*2yn z@s5~|>{q1XYkHr;tR_tHua8P=rtR4z9hZ`lJS%XL9)#YhMzUUQ^Uycp zGd}$1M{9fXAQ-$Fq+-LjzVIHG@$=TsE$7yI0AkfN7@QSLdTo`r=cf^o-hKITG#zbR za@ny{#+3sKU4o$GG0CF@gVo<$Dv$Pvn_mW?tmz@qGv4tI!ck_ZkYlOH3hHaKthj9>D8nwniH zOh1NlY)5oX5w0LD{ca?apZE3a*N=OKItT0-7$sZ{-ygljMa0L)OKrTJ-D(kYI}e&$ zg(f>_%XCfRQZAuZm_U}6rFsnNZ?uy$g6V}!J{#bD}Mz{BCV5yOowM#z! z!}YGLH?cFkoYCy7N(IMn0QHoLZvOa4QsJqWW|@XMdEqnd(!L0TN3Zq1Jy^M=Rveb` z)QgK9LV|h-#p^@vY-3l=F)6v3O*?L>8mBW`AKwn>!$jPG&PYz_-|0n{bfEt_W?OnM~dp=Ea3yoax`n}4Zw8C+m z-upF35Xs8o2!i#5X+AH#*g9j_ZkG`yq1wR4%zRntaG?Z4%_#v|jyeT$)q;IqhEuxG zH?yPIiopphLiJ*s#&mlXPQm)OVA?dId2CDei4tzV=vNDOWu)a`g%dWO(C+F+dUFYe zfI$(!bBcNcW?Uy|lD0@wk|6foo62U8hb{cLB?*qG`_3NipFtO3rzi7|mIQ#dJc*X~ z^+;4tfEP@IsD89P;Dhxd)x!&7#mXM1%-}Mj|NUdo51O^yf3{^uqzL^DBfbyZXB#g1 zEtfHkJz>?4RY*~3g%wV^46gt2*1`$8mwEe7kn32kW7Hq!e(&=Z>`?en!C61f_z@O@ zZ32obat*x$hFgu`GG>>c7^ASx-c<%+G2|fQ{()C(ByKSMV6!jrBSd4dRWhAvWD$gz!9v3V?X;I`wejP_HJcU7~;* z>+IG)5~5=?NMpi{N;?SogUkTim*(=&6)dai45_EP1#09%hmph zyqJ$v(UM*2TIF822<_8&tb5?#>CU(LE(?A^CnMtYO8xkU0cAmWEk9k`KP?>3I&7sW zG>#Omx;`J!&SYf_aX5dODGvVp-~|`%T2TRapVTu{uYLS)^M{GK3-d1RDcuIzvXY+RK5~VjpLKWG@&a?hyjhrL49c?Km;292wwIw7 zCVxjBDr7pE5q^q~KXmjgiV0$vngcmMUJU(py)!kX2Q(QR7~s+SLvFV3T|t9bdalTO z(AT$w1Cg&@EuKB`{wJ1KiagzH{U5>aL`6Jrh7VPHBo>x8lEpe7GHzg%lEqqkoNoU= zlHaLe=S}mWa~^R}`_0Fs(&r>?e8J=Sr6*VaMf)6ZrzYUQOTtA(L9JZQ&3SzB%)#q` znh+v~iJTaq`0WzihuDn|%1oJ({Q~5GY?A4hCUpqfqRye6e5%-e|7-XJ5jZ~s9b5lt z;0Ux=UHnH$Z>NLcx}7_raa75~4GWCJ8?PIElOi>#bc#RfEn`xT!f-?~ZzHc0Rex-TPYfga`^R0%H7qY9Dl39(cC<2lK2pNf@(CYi8v|rWvW-Jc8ceUlNQ;xt zu?g2~<75~0X(F(8vAWNNai`)3X@>6MjBf_UwW+|albn_EF+I2#m2JGm3a@`qqIqd7D$&*3b3lki z60i&u)=7F~`93Tmtc&`8LH>;dRMOp?tqC~YUAAu^jq$(2@el? z8sqs)#_oS$!kxqiEB7Mw)&)8MUse-vN=qAaNER1I#6`NmSQ~!8d_;x~%I*Cpf>Gah z@xq_A-z5d4^JbVs(&CZ>8so|*O*iSiVr5t=KY0fhg>@1ee)qF9VwdT!*M_Lq>WrFQ zHXafR={MIzT&-(fT04X7Kgx$BojX$rih-}NK@g3_-HnP(q{tcH%4(@qqyQj@AsAdP z!mjQJi(Gt@7Rp~PjfVhuSVnDc6Bd!H-RnUC=P+@7!qA}uFWG&2++Z*xKzzsAml&e; zB)S=Enb1o#oxMXm@tcUCt^tlYnevzo{A+vhGb@=-ujVyh(L)IUB-{qV)4W3f!@e6k z8#dr$3G2-$2Y?KA9O2)E3!}w_H@=R$aK-_&@m9`{U!Mq9x2PJ(!)hiNT(C;>$8)H* zlVuq0VzM6=_(!11$xq}XeBPiXkfXtuqdves#vpB%-36XU7y}cJ`>_H(7%i5?Ys;b9 zAP7HT1-v{K1V)akfYsh*BqwPwsZ0|+ff0GWtQ!$w%6@S$_A+KGVQA8Mc6&zxun&$mk;Z3?8YgL=Q3T7z8F{5*KvrIz3wt?Q@6P0P^C%poiwx0VyMGb5)Gb!=$M8QM9U&DKjQptuJ6Xsb05@RO3{Qxe<3`?JM zw8tggApoqvATN~btzk$}9S&^7;{9+#`JG(l9QHnWY$Sa<|H&Sc$@ts!xm>G;%ekZ^ z4uXGydCEL|KKa+dEn|O@rNvdA@1Z+=i9FHU8uE#X*JW_l1&A+nq<)RN&o5j#Sd)dT z?}A%ZgkwDeYdnwad{So+N5a*|j;G8vaZzUx8!_XR+E3W{{b5&Ur?=Jz8P4cJ0+<^X zKE=v^l37fjX)PnLC>aQqfv-4r96`0QsmQic81M#|c>O6^jvNQ2f$jEC*x1H!Gmr_l z{AyQ5AI+Mi?WAew{SHF$)!$xD-O=|3$B+u{$5i|_axaf1I3L~B8!ldzgSF@LgJ6dpdW9u?~Q8E9PbfDQp=f zI~gh-4G^EF|CmMWSU>-ZSI`ajZ#-;ld|=!QvtRO1*QEG8dTo8G_ z8bWk>N@3z@aU(W9M8Zo?=_XO3z`CEAh>n7r^yh(yJ!qoWCBtvKkB)E0=OU}(|LQjf z2e8Z?HgrEY8E79DiP(2@MW}}SB1o9tG@j3xa4)6kAtQi3_?)FaVfJO2P<$_!YUAPy z)y9>u`V8_$oup|*#?fum7d-aD(jl(buiyMKk1=k%=n?s^Whhsz99gCtsMF8lKCrUA z;sYS_1`8*y84o^1 zXblvNL1I!D>heM&P$p9(`ds-bYp`9+54~?7X7&X{H+1i zbIE@;%nTdmY{3lmoA_P2y|D?zFDvka>tCoz8xUBUX!qnUay?G0CnX_3Qy{SH&b}1~ zY;q4fCWo$)JMn9;XD_(sZu5)%RkbDO%XhuA<&bBpoh8=kY^d?BFxTPcKMMKCuJ^u^ zUh$VR?B%6nP4pg>@TQ`J99gL6eqaBV|HM2=k+{QJP%w`8C7U$>fo)PuBMFX931zz7 zc*|;;i8t6(@S~sIG{8AVd~ao~%(s_*r4!s~CEwR(TtZb*m8TMLN+Bsu0cXJyma}X7uX*jbbA!Oel2o-k*&ul= zswhuDwPDIfwUN8-=Euj?mm3VmI72IMIB?;i6rmAaJm{TEp?AFkZ~b_E9_zI#Cn zNmUPBD*CJ6pjDle@`?7)k;6_Y1*1#ys{ICU0haNlRH|>ySeCto>HS|4tpvci@u=TO zS64XDx0&eZTbjDnO(l?FSz!2h1mvtd{2(6h)`@OTX`oRycckCvsljm5f2Us*J)E}@ z49{o7Qn78tgv1_Lw>R&@)9GpzQ%wp0Y6H>im_d&miWk5VS`F5!iYwSKuh2Oh_>M^! z0K#+;TK}?hIcD5JYn8MKFR}f!_NzW^IN9J%JHE}~PR`mQlX}ff)VOuz8Xo4sphzd} zd^!k)1rma>l|irM`MY4S7nRU|CZ8maL48R6hhedS*8qfQrfU|a)KuK@@i%h7$uk@C zhmIfBU&q}O6CbzyXDnQr!Nn}gkP&<2i@2#-)8B=ULNPVpT+*VkJO99t@ih)S`9h0r z!uMSK&jR~u&&cNt`#DPA{GqnxJq_CWCT!+F*!D<;uw^~zvyYjB_Q(`ET$XttIIL^B zLq3uNooj~f-dkWL_V_&kmJ1my9o^2-MgYqAlZ9Qqm1TnJH9tL%EC-fe<%R^wh&O7- zfKc2=!?~{SD7{YAo>agrJF4>P38FhK9Il=`sLJ-U(P{;)GJAok4~d5+j~d?;hh<4q zFc%~-GRb&icVmU_AgnOBmb9~M8uV(-GWL(1 zT^}|}=MkgC%~j(R+EY5P@L^Y%Ofa}G^mL{xXT`2#;0z-Id2z?Ob69h;gEN~>+{_ZK zJyz%DOv-yl5$g7I1`Y1eSHsG6n)f>~>$*tSPngcZuM;9khJ*$3t?DX?O?Ia@<4RYl z9f~2Q%6b36oY7CAUF-Ts@NamZI!RVXkH*K~oz#!$*|s+~s1--0UJ!=Fa2-l5`b@X) z$kT=7T=)d%yqh#n*~45rCJg5D(C>#>*iOxl6*l?veGo&ho7lXF?w)K))vpTUCb=uj zXt>MTS805pLxcETZfv~!=Q}~z|Ea92K81$yg^y>PjT2CFeNj5dAp6Xb>vW zh!vstRQqt&uyu+8;4=Jk>HISfg9!~)3~17f-5?V;Jm$MHkc)u=>~ln(j;~!%e_Zt0 z3es*gaNnq^HnYm2sK4n9lqeVr+?v6^(jELWC8xLElY?P#+^ut+d}KnCm+=$k$;C{> zIE;w@u*_d=Brm>pgTfS?RQ+U84W)IsIOWCTH0m3 zTtdnoC7zNKPm4m9wOmQD@YBx299N z@d({M|Gt#MihRs(1c@H};MbgNBEW8DHrM%OwdT)Pr1B4L$tr$F9_2QX)c(XF$!X_8 zr;1&|&lTCK04N3k%&R&q>_n$O1KUI-{~jH>G|j=fS3;!!7nx4^^sBpz`R;W6tK|8* zjv2!92iH~&0E7$9PaPKUC48b%8~O5=FWdjdr>EyXhzVE3wi4LSLc2up)mb2yx5LuR z%Rk%QJ#BOYB@;G^Y%E|v_JKQbMeL!&J#4GNeD^^>r>>6nsF*#&Jagth)c%$ z^y~w4qClhsUgYZTkLgGUu)oFq2nm_i6I~8rQjDa2Ti~(S13U;GIbCAXJW=Vm6q2jb z$g1nU%fac_-oZ4!&t#(7eXX(Fvbp)$p7HPH-)DOr8wI}dQe=6~Kj3!qQ03l#%bv6 zOIv;&x}$zU`n3aH{HYp|!&(T2fUCw0-df7;UX6Swr)B>reK6Q>lHN2W-o`NZET7ot zn>X7XOLH#z0S~NNayVJ#i&ub=evG$3Us#kFY_iWQ&-K^rnM*{)lKiYotn$%aS9RUR z{bhjLvM-uxi!~))BdpCSbj!lw+5mhnvBI7N!=2n_u?~w6=u-%sKZSLo-Bm}O$ROwJ zG=-MFf0%U(PUFyYM@P7MEyg+0zr7BE)J73G=CWzLqep*FTzpYmsdH3NnH|G!;(zSk zWDxg&{Tj;kLyQoY6l1MxZ_d)?qOmxF6TID(dK--UDrR(@;3nTPj4t!B#;T=Eco2Hm zrwx^+5JqEk-r+MQe>Yy8Z-bVt{9R4ytr6?`j2G0$YrWwn9wk~%RF7jh1hwWGRulT&`hBj5Oh34 z+o6-01lfqgTmQz6`f2&T?UY-RL4rGbZ%a2WR z2_MX`qDKQQPLF!V&smzMRu8fZhGQpwmul6d^^oUCs7gC+8XXdul+}-H59D`J{hI~XLJvIkTPe0cf5o+4VbNN@7IIrG$-L2kI zOYHe~w~G`{I#@Da7n5BY4?Qe_yS^WGB||BXl(oN5#Q(pq?;Jwj)Ci4a7Z*%LWhEKq z9wqAz>MeOWi+wLR+T6fkbDZ3kGu>ISw%5DBri(dzyMfVh6V=_?`e<%S=*ge2+|XY| z@Ch>_)RF--gpH3){E8puvEc$6yZn`%93^%TmoCuSMb(nLRH)gAD2DCzKX`*SCMQ^~ zigDg8x4M$1H}bqC%XAm=?*4jk-aBagCnazBBkZ}3{htV>(`s}7#9S#7(QPk}>ewJ_ z>XGv^LfSu>xy-)O6-+Aud!)L%)0P4Lt4kQtd3XuQ@z!lL$pAZ=7dZ*Lus({$)Bb|1 zaNh1CkROpfg$bE^-u^Hr)bmPKhGeAej<0zrTA&XL$PJ&Je4?LZA>pNK_Luju_v(VA zO$+NR*?_DcBfIYEm-4>L7!rRbRo7*Buh9I@KReTET;HOW(iOCN-wQlHVtBVy!@;KL zB?O`|Hty!w&Q8fopS{l2vR*#)4SG;z8Ft>s6Eu6AcEDWy^?jl6W#Wbdvx(cjU&CV7 zH?uA*?BLBd4Cj`shu=+ose_nl5u-Y35QGP270~4_pg1+6)GDpeHWOd%spb*|JlwI z7hX<7HZu-|&Rg*6DY0xsVyy&=@XCsNA3oZACfosUpx7WKAEnQ{R5McvU9JX-Kf_W}WUpOD^jBs=Kx)fS-S4<@k?8!O`6pd5}Tt8P@rn z6du%I-@FW%nxP)f@`L0!MFqM%h|5TM>-vr4I27xgF1humH48&)o)^-B^8H`4axo*D~E zb?;{*J;Tn`7qI`M%f;wM`7(Hwss-owuA2;8E||mT6C5Ec(*NolS=XW%Wqjv?nSE=&XA`8&ZwP zH-aa%25|yOEH2R5HAsOnuy*uoGFqRzcC z=zQVfJ&d5u4A=e>`jWM`s!cmtT~0Ac@V*&Hs!;Uu=kQ}hE&q8wNT6o+c>JcjZf6aH z`wAjEVm|A8<;aU$tQBq(DbwiIRB0;ymUiV^Fu^R$&fBXliBW>wCR)!r>+PDH770pi z=^~m`A58nH@q_~B(L?M7oniZ!g`4fn<}m<~!uRbwN4quMn#{J=)%g|3W!Ki&jQG^; z@7K(kG+IIyo|--J_-_e&1X17yHm-h1&HApL6N^)vk}tNXlQW&z-9_vM%fO3@-PhraAujq3sCnV}B9F%mWNL;#RQd8W_17a;QgY zo15rkiVmJ961VdX>B62gL5x!Ez^-r${=quEYqZNBbzOQM^H=zDM@j%b+ajGlM1D`< zTLPmm$@nynnY?QQHLL)CGrb%ZWp-?6U@0x5&zg(1{N(E9V(F!0vBwh%VD~)+lSA!ZfUmfS zFn@_Q?S>(EHrl`E7E+Ci>D2=dT0w7ZOe9uj2BZCzp&`6xaVfw^@XO|hlW(9MB*Z8r z=r#EMfk2;nH$(n~k_p&6VC;BML1;{T&-_#SM`9Fcib1d;>9u>F{J+9&6*4qg)39`a8DxRD9u@jXajFG zWG3$qwDADO9{wj?`-Iu891d&Rg9jmnIhJ<;8917ojd6cI6s?Gl}15*crQ2 z_cXtLWc_P8(AS*|RYY>U0DI}>GLzwF@Cw>N^D3$j1D!PCfu)3#9D^}iA-~JvC3bua z&kxJ7wV1j5SXQ2#E0iK=SDeplEyz(f(r3{JIbI`SbE1yN$Nz;hpLT^2n(}c=t``&6 zh5@Ixfq#p)A<>K|LNk5*E8}k<&%5%*GMdbmz0Tpqg#m}MQgOC5c=-ncgRC2wB?mzaLQo9t=?-liv{Tu`0gArsm%$37v} zp6(Q|`tiIvQV*j+T(v$#G9Y5PV6Rudn8nE~ScLna&n~*Gb*m!8es<|hhf1cNP$I`0 z-fl8fX1d08m+N)}k*-Zpnr6B%Kl5A0%n)GKDa&1e^IK}$9vW%o-wFaRhI16x<64$9 zy%wSJ(|)X$cKMOWD0ylUdGt7yG^9wPKamBgx1^BD`=mhK7IrY&3hgq9*OopiCbuzY_n0{r-hk;{pC5 zF}kILcG5G{ZRuo4q|NO$x=+r?ky4f+j@dp-2u@0zBwlrWHhrCg?CV|aCB9ld z(y}`s62vP!zMYC^Se#E~iVZiL&x;?)43Iy7p~@x?2) zrIf50V#ccy78A)($!F@JOwP+*ZX-jsKTL?*63Y74EKJaZXYGkUMTMMohrG5=6x{JB(KiW@L;52*vWp*gFa1w z3(J6z%-Vv-@*lCI6NGcz)YO!tL5cS1QatA&!AfeR0*uP6q=+pESLT@f^zbf}B-^=N zUmuL*3F><6UTd;z_lbApw4cZ3*Ol7vQ-gP}M=dJ;iDZcWfBg+FLrxI8k#p<)kI{KK)K|YfWLPowaw6>jej*Jk+8q;vYM2)#+ z(A=~e^>F#3d<(1JdTJ{H6MQlZnTmzqp6~pbWLi`6ia3MgERtqLr1ZYoY|4A*?A&yf zK0n6dA@4t9i{T>386>Sf9*7gsjP_t5tqP0XSG^CgTU7!PCqk!rYs5Dgg+9uW%n=vD zCh6;mNGK|}Rn12+AG z@J67o<1!tOcvS-Y5Or(f=Pka35VzLrq=E1VpWOlG(<3$lmy<($?|b-t_i?$%gN>H9 zdT_4Lmj3A-hku7P7G^a*(fyF=i}ug|OL*`(p8eqKtUY4dJsv?C{hpm*ILA6yt5I?I z2KDpcJT*2`B_Nj$arUWz@5dys?pLR*+~?!T*pqrP7nRc#I`X(#iv0#t|T zO;m2bUWq~k2)rEmHTDT?qm_8n&^_UXSuW9LDSRu|_vcjURIn2bV4qZ&QQ9)#Vvk4= z&{=PgN`9;KNW4HhboU|wh13HFec>oH54tAB2=O5|-{E1Nalwo)=iu^;&fqZKB;0I) zzUsTXv~%`+pg3pHQr-Y=HeS%KSzH)vm1Qp0Knp#4;C|>IZK{{9ICK5O`*I*>OKUw(enc7X;n!OK`VU@j6KI^?zrvhZ!eUgx3 zV3~HvdfW6J`W@^~)%|9zJbJv(tE*=pWCq~yrCfPkxK0#_7keo8JI1fal`BBp=49wT z$SN`<`+~)srY;8pXdbi_^?IH;COMYeE!VGN1-;l{rEw+pVGEVugQY~isZyc!4MEg! z_z#%j@oqXucJq7@zO62rgR6CHeSLPyR;TTMxDtG?jByLh^)UJ%xf1nUFe>|oB>Br0 zw}Vl?^ZRS7aSqczutpD=RrFuCoF$QGg$`fzfZ|b-&YqD00)5A_C0YvvD{0uR z!Y`m+33~CI%3w7ZY}Hdp!k9qf?>Z6V@L4xUn#^a#R@+e&wbr|=W0e9fSz(uQIA|eOmoV~>P|*q3eBM+YL(($*W0b=9slHo{W2va$lO>Orynw)Nyp$^pObwBcJpw|bd83%ByCTzH_> z_oWTy>-B&%SmTA6ZtCYmH)+%R_(nX&TINnlpst53vfF z&SdVC<2(#aK6Sd--Fchz=g(Ns5H1;rAN_2FoC2NA)z>!Dbo zFAqZeb!LbXHi|}k7FQV%tX=MT$c5~p4dRNg%$MTL+f9OtO?ft9Oj3{d@k9r1Z$kMB z{{?c@n8KckW3mmoj&tGnzMEH#l!x;YXh>66)gLH2N^Hv3E1p;oD40ZfEO#Nt_gj?y zLa9WN`MT-8cFgr^--&DQh`tdRfmqOvaeq;#E$UjQ#nO!MC&vhCFOkGqT^We5uw*Kf_X&e`KS^AsA z{cL~&^lo!#T0CGRy;k4)^p39)D4x%+t*t#b^MrQeb%RYYJMNRm|6Rqe6p100rZ1uaZT_TN;5EMutuYW~m)eFe=mR~3j+ z?E}K@7WVERB8%f6AzwVvo9KK*oLyKT#1ae5v2s+0xQ%*@TNWx?evc4Q?kQo?#!9K$vjVEHk7t zRfJh>x?&C(NT1t$9L+vMt3?St4kk7G9(LYPcksKernAWnh7b&VlJJiP!7ZN0)QkEg zW#b}H5!`$zIuBY-P|C^z*PMyI3-Pk=bbD5p1wvQ^_DNz;*~zsUbSSS4pq)F0X)M5M ztzO{&$pRq9Yqx|yzF^n9#(r2H*y*+cbEMRu4LobVW*Rr#sKD8`q1*hDB`8>oFGq3> z36i{jXCf6s71=hZd}R)jDEYE=Y}rYbNvZ%3>{f`TSZq=GmoGuu+Qqy(>G-oQp3En& zHibBIT+ayWx%frt?6)w&&xC(McD9BTrZc(ef+1wkN5^?VbMo)c>v|jOaMEZah#`MA zEWEDAig0CO#9+eip`v^VnHVYrQW1&`fAg@SLC~$;3Q-|KX*Y6y!Np*LMdiz#FJyt- z)QJ38lH<}4fj(W$DkyUmTzv5Z6!I=+6&lZEA@dJ$Y>#XE1gYC-W4zl@*kYlD1`&J* z#UrbZXW(ZaZ|y6>?k288-8lHrA<~)6F)cxrsVCYvO za^|<5VhFL3>w-zfRo#@^qX+C$`^roQpE4s!|Dva(FRAmCOV&RQQ^cBpPZ`?NSc-8O zY2speV8=|?qn>SdAkCopX9Kq(IM0&LO5ygkr2A8MU4+2Pd+^4b>pkY#RXcr(-}Y-g zf98aEZG)pb#+Xj7O4X*dTv%{W+bz9ot3-{$MxKTiRnyY`k!;6}Ve_LwktaDX2e*1~ zEd*B>5XM$ys1R&LLns%0RV@qjt)fA=vnAgRsb8X^AytAD(YV@pk@u}917i@x0OOtBZ*M7EU5Sz++(Vtt@P6cjZIJufEmYO9zbT1hO{QnsXc6&ZW z$4Gqs;^-CZofRCO=q-1k-z&7Zgw@OjsEq1jbulqcT8choQncn*FTNJT zkM6*;de@z;52eD#o1h=;?OYi9IQdIe|EgfBxxv_Pg8ACuuuG1ji;4S-@0A08dh;l9 zXlp&+8dZ<4NpuEutRf7RlI7(r2hQ$}rEfOHNCn6)7+2}Lx?jxB?j$x7^2}KNU>D@OwsJsu1d}Gb-INjdf~?$LLYm>rP=D6LTm|JdndU=J5W^`KKy2RD zoi7(GHI_FJe-_@u7 zxDvNbVfN6o9)8Y#*iC4mt4k~2_^mJIQ>tYt_2t?HUv-VVc#{g7-XqS0$~)(;xfdLI z=N2b*NU)qXhqcIB-!UCuNb;eV)XQl3vzPVXq;F13;BPRdqxJ z^4eIt29)nbM?iD3pMFo|tg=F|y{O`B^*SxRH~yuR+NJF)TbwND$6_*(hD}Q+_y?eB z)D=vmb?tAw30_-2Bf#%{w96Hn$Ev6^YGjp9i!|pcyBJ&UTfKMuFUfN}0FL+s>rRjI z{b;ro=Pd4mYPQ$Bd7f@NM(>|Y_EKM84QflhSorv1AJhFjKQ?<@uv>YTjR(69_Ua{2 zYSTSmiVI?9&s$krbRSX@h~Iwh!FGb~+ZBggycP;8fY9+>Pn$NEY)F<5wx+cZ@D!Hy z-OcqTpg5})8w+Y#_=_KH)E`)RQZMt%*y`Eo8`qAm86@MoH^ zi0fw{VSQIbcdcbNZX5esdkV}Dy=X@RqD76F`sZsP`H#EHU9s~dbz_7`BMHi&?1f*% zy~nY~H)0e6=}_1fq3#06Vj;&`IUy-~QenveT*q{$IH7Cx@Q9@Ax=jvP98IeiW4tbZ zd;I29_)=p&Eo8LM&8mtmXtYoh6?^n?YQmSoIQ;jRc6F{DTDnrK5;uq=T|aB(n%1;i zd;^z&BrIgO##frp-E7glRK#VMV;z;v($F>gJt5jIoC&K#MN(0|uDx9(-yRAIJ5Rg~ zrJf5w!rKI;R=9gp!i%7Q-iKS~t!#!|^a( zA+D7j?0=PS#%S7Zs7$CzJlO7hvqi^(<7-b`>S^sc9`E3%|<(3 ze+Cowf6X|hqkSh@HfPgriO~%yE`;B82cMQrwue@AP|y8=uwmg zKX6;`IC;()K=6#?^zA#)+sj-<;&_cWe>K-TUDZvAFnKcMnw;iPzZF$Zor`wMXj}Tg zxJgo-z3b`C(F4p;V9k5hKSGfpVAr?H_U*bDm%Mq{m*cmy#bda5*6b$y@5D{#t_LOvN#lN5XoS78&9{-TO14@@%ay}Sisex5RMw(c~ z=T_o(REXddQAOw)))TaiW}`vGd13O6;kb+=q7oi>2l8no$1i%~yMi5#7f$8c zInlkXh1Ey7ip9W;=U-ekSGWvm*28!RR-%27)!nV%&9c)^1@Tg(!WviHNM>&FJ`mqC=ujowH#?k)j!HrcGXi=Pt}&# zUr$v%6kts9SuQ0G^I@|1FtTYrkMP`~-srfcoNAuf;KA3xB@y#d`arw@c1CgsTH-f) z{;clC;9hH?I-X2xd-UuH648XyZZ+)j@e0rN{RZf#;;u)Zy_<;V48MfA_LmD1K5$nU zlGs3+xl@JZ;Cuam$n6GIGnKbR_9=4sg6taiekie9eCz0_>R zHdYZ0NdwSSxWrS!x&7Pm6L4@?qs`h_oUj9x9fjZ!qms);yJ9`XseD(LLK7y?VCw^& z26_a4G%vJm2rHHr?q)LH>CCR5tRQM+;MF>G$C+fZhg7?&=A{LjMt)aBgokl`FZ4AZg5j}G@W8Fzd!LbsMu%VPR;Jo8XB~x1_z8ZU*T%TZpODDZO z;r=Vnz&-JUrm*~RTMo71i*xb>{b~)p-WHi=x)i@;bn)d+$##(Vk;>gX_)RCKiF-9x zi%BG16sQCOA3vg6#cCy0o_`!e>EO*t78j>6kgacE#(T*tt>?Xpczi7W z;AM5TP9H_o!@K@hwHIvOE1YY(ja`yEafsY43W}SJv;sxp>$? zr>pcSsE*$)hA8+J=UwYhB*r)$h+`w(wwD1N-Op=aeI1mvH!0r3I?0oxm!x+sCqlq2 zJM&YNdnqg9@O9nIO_&RF8Adu%cw8HCVpsGSdyQ=)dieKh3jWFER`N&)-Z})iN6s5* z+|Uanp%sr~=9%jPwO0Vu)Z?TWvw#ex=cGO-GQu~DP4Y||Tja?;B3h=x4ny0QCHNEh zVbe$FQpp=Q3Jkrckn%@}1mXVfMYmyO-0RkOTD{b z6KtN&#JY@Q)s%ej_m|$bUxUA_h){W*{%lC51IBECv^A9>YE|$Q(*I4W_NBiL#8d3K zhdN|kR_W9v&3^8Cudji%xCcnvhN4}$Y>TdTHlOcR-0kElCc@i~3Gt9(j8Da#)_d++ z=2XIra8dS7^v~kR4$SKMNGUct&6CF|xNl5yEg+T zFj5k>E8cAUfFfGCv%BX$&N=M>nRlW(o{^^BNaKJXpOhcs^XH$SiQM8|s02-GIu>_z_add}!X>5kIg{lYLWk|U!w z1B1=^CSM>Bt<->L>*Xe!;kpo}acjL8FMXK>dTjLRoC=Oxq{w;gRw3=HaFJ4zq|>d_ zyw<6-SX|X>Os=f=5Abrkh&OdMC#@{u*3=D`^ODTKrfw?De6V3Eu4qe5LwM$9 z(UemjXSlb;)NE!Mb0*6^rpl;2cCRNSBokfYtj**QbLpY4-F)8Vidfq-RPi|WIsA5g2Z?_4AfG`b~sUt9oaRfSf2X=`wyoKj8$KR6b z*p+uBY(meCLiY(9Ox(QP(#_>;8(i*de(vJ$5jRo)j&b+3_0&O(uA(TnLt!*K%WHr1 zc6q;jbi>fismfd48N6eolVP5yn@>cf@4qB}I{dhAD}mvKMzFx}^128GVb`rWjKXLW zAP)97O4m>F!=hu6LeLH)4HlyBNAJL~DYk?mHtGBtxBY8OGJM~;ZyGjKs4;>9P!j6z zY~WQWdcs5j^lSqaJ5(dTZS!FHi2PQ-_0sG&L^)RLZHzCmwQ_TY26oYQry@mkS`4@9 z(rl`z0&)xjQpyl*r+p4}LVfAfSniKrmnzetI@j)Pnd=HXg!J0lf#XmHc(!i$JJC|9 zf-}M)ll9`xFGo$HZN*UobT*&tPO#5>Uaq~KBjVM>)x1siJcH+14J8WS9Wt*keyrm; z1J1CZr=rcadEPe^CPSvlCzyM6u|G7QkKA;*Q@Ht_W-!=rU0&>Xond6{5yGkHVXy}? zM{Z>2RJQ%9AgOkJ?EM95RFfw5D`NUlM|#06bvaM;;qX16`;3z9qm;;& z%x^Mu3oLt6Sn54k&00tEE!L~MxwIdWZ-09VleB)a?)`UT_^ch# zO@$@CsME8zy@q4k%oQCAxx9d>SfoK{uvMGTwL>&1qo*l8i)(PVK-Qp;K+=p zli8@T-3s&S%2cApRg`44a>DVf)#lzYMr9Z$c-we09f!~4jyhEyd*l`c4*_8xBR_6+ z_M7jiFI@NN8-!D~adV_Zb#u>l^4e48YArqm8{U`AWV=h;zWex|C*kB#y%^#%RgFR} zeBRsOeH-b^lo@j~F4}OfEcAy<*V1_-=AI=T-s$kPz&R2YHi_wX{3KzIJ;N5c#d?=G zUFY$ejNw4BZUGUi4-sQBrzosm32%(Xt!KwWo{CQOv#4S&39W{7%!J#$C;cS8y4W6Q z4i;kSqcDrqEf=0>GDtnEdYDK^QbV=(ZT-pJuf`5fqj3(ZF=EjsAAc~VAF?ZcP~UFe zo50;>)0u?&oh_J&i70wv4jY4XUKK1}Q=AAVc2IoF|ACq_} zb{?ka9^%`26=|%Y9xAFB5OYgAShSr72l`IBx?hhCIS_g7?L8)btlyPsN3PQ}No2w# z!2V!1j!;>UC3b(GRb=fegTLdh=gU|n{cYQYqaWV|c#&dXbFxVbgG!h z9#D!eFW2GL3&D~1_s6jtn4ScQ+MXP!<8yP~JiulN1_>LJ&bt$XJ73&CNNaXcEwBmHTE@?=Al+(#34w++Q zE2ZjZRhB7;qywN9KE9(!3%#Cx!=QvL`?eYmdm!t*YzXKFDsVGtf%4z)00 z5T+^ay?Z9n|SOBF)060UYrFd>Q9QH3a@v<&Pa zku4&#f9P%jQskk6_8)j$s!EuU|1twmwMi@{z z+TU)*&{LgjKQASQXXN#^RpYze0&a=eVSlJ_c9TmJb*aiE97{zj+snDsk6Wwiw z<%T zedC``zVG>Y{f>+bB{>M0U|)6im`OulaoUANOw$}?NG?rrMt){}aU;LI`>c4|km@Mb zao4avOdz$d4DB=&NwrlCo3(>*A1@C1tKyt9@0_cYzlIc1g1^d`PdEi@`8n9%#Qy=& z>ftc%+2f;QaAOl?&lIJqk<%+&t6opn9lZcCQsT2z3Ar44=_+TZC;D3)gDH{ws0H>m zERu}0Ds#_vw(vy4VDeAr=98%ET}bh~@7FYdsM$3X zmolHHIY(!QUpL(`t(CMh43Nck21l5ih5OSD3w>Va!trIKp` z{A|8h zpI3k6>k6l4^0ZpD@$!++KCe|S?$)y1N6aCAwn;*7?bbqUGORFr4Bk}6&mMZMA1pOu zT)d>}bt5$3+MQB?3kxx(&E7n^kD?|Q z6PI`ywGWlJjxGl=y!Ii07YpcyQ7niH{up#{Xx~wdlpvPxcRlF%4NT1B=n3}i@3_nM zG%@uQaXYx40W@JI__)j@j&spjs#Mkl5Hx0_Jucf|E-v@4dqYmNKC@CaGiyI4BuM97 zy;9Q|LpvaX0f6b8tpPnd^Gmi=;7ApPwuAn`zE`ELQ}AUEfG z50 z9H1Eu9AAQ2r|$jh;sjgGo@2y2f-6&{`-Kkt@#8+88F!7!sSJ`BiqNq`uN^-zAoOG# zYoBSfN*o;-oebP2ENt>Y9$LC(xrL%&h>eBdLUB=4jNoShZ>hz#|4M}eKPL&ILBjg# z(p>386Ul68X|AkLPM{=~5ukwQriyR1XlRg3Bx|rXlbz_+@82wj;GpoK;u>X;-G>fi^O z`3mwP3w zE_b}{k@U=mjLi*}bdXP6N7>V5hSoA>5l!TLdQ2a5C7*uiiz)0$eQ@ji z^W)Gw7?lz9%<^pMWBlcuslG-AQli8}6Wt8gF&T2kU>rnGmwuBAWF;^WL+_(ry}Az! zZ1f8|RP=%v7Tn9n2@5(vqJC|+o7);BKvXNuO>YodQo8?nUqPFreZib)L+T_mvh%h7 zb6Ou9gV1sNPtqut6Z4Z;EUB0&j)ef!yE+(eswaN#*I;CsRDrp<7x?7aGajHNPNSgQ zwaY&WO#^1A^fpz)O*w$MFnuQ&Ya0uSx;F*`3yayWRI6+}&t*2P)F$@DiHF|{Rhd{H01(}ix*{kvmnM^KZF3XDS@~$RD zECd>fgTtImQ#UWPMD!LwoK=JvCiSRt{2JtvTxQ(QM4t2*_$>uIp@;sGiD7~~#0nM6 zzLU1}b-9spY;3YV*G9Khi0=SnWL(qqv*nXVc$WH}=_q^#p~fK6d8P2*i7VB-vA&$i6N~$f=kLWE!Y%vVa(d z)~Qlmt|34K!6$_vZTUZ6Sztpr;c=|~c!8X=r?vY<>V%yY3Ve|PBEI;iU`QMihU!Do zY}thXaIiXrHKsYT?Z4VpOI?ZRP~cR)-%=5w)%k80CxLC?^qmE#wub``X0Bl&&;2wz zsDZ=AgFBw3LlLoRT>bRSyZN$x*$=kHK%ImU3%h4V(X_z5G%29v#g5_cI-CF!}bc4`~iLSev z=!=&FUaOCzJqPwOqH&1$Cn=rT7lfn+&bwuXtBw^hK0R#%6d#uD?YXS2u9hw@FBcky zhUAd;#U4*~V%75%n@#p;e%qd(Crq%-FaMz31TE|MuLa-|dP{wXCsqkwfrKfOcLbH| zf%Cuf%3C=zUf~wfBN{j-E4l{GyUVL?KZM|r&P&JBj>DSxH6ey(uHN4$3M6l{MW0RW z(rCT$5i&@O-W2+TBgQC?#;7zgqMyZRTC{G!=kFzx3F<#fRz_4woD6CEYg z@;|9PbX+wU*fJ2&=vT+Y6BJy)uaMs4TV2X5cnkl7BBT+6qaD5^<+U~9nEt8hW2{LdYE~M2mj?D}$ekD{}%_AnK0}HUjowyu0gez;8#L+w);fE6JmKGoJ5Y8}bKOA)`OOsFI6+Uu~_w;8ABBPP6S* z2SH&}3A#vdnqKF;{)(t>knk~TNz%Wzf@&5r5MqbxA{}EjE3raXwclsMiv`Sbz2{RW z`Sh&7iBJ0F*?-kHv`A%=U?Ms}Y|vAZiGd(DE338WA8rD=Q{jt5zg`0Z0W6AVCUWnA z2VmuOYU`(j)`NKAMaL34-$0K@sHcU-I{8L&U*c3!Yl2H=>c$h=q$neZ-W*U!25yig zjOS5tcfThvYG^cbIEOG-*=%0d$JCqJbDq1+6l2B^NH+c%?8I~rdom2wp|J^*K2mtZdxS zx4)zZ_^Dcj{1(Ome^2Swa(_fS2k6?I{^&4QLe_?O%=57v^Zmo)Oy?HN6sukAC73g` z;93yTht4lwzvW8RLm`JcUlthE9>@pSQ>?W}Frf((|1Nd|E>Oj<@AXciS%7C_y~Tg* zqxv?!NB%YX4klPktZ5DYKHG)j*@i>tFGLCW4sS{jr+n*4XBp)4pa-0#>_{$f~uebd!X z<~yKtQ2hrha4)F7Vk+?M)r1LP$U!KXrX|y>h$aFWKnzX)vYEdq8NYwZNL(s=T}Jv+ zb2(&L4T9fm#2^RoHd5|R4LKw8ZLUMB1~v%TU&@5PlJHi_(RzQEj6pqz;nFnx{+r(y z0WT#!|IAqev`>{pBYdy_0X2Ml8^zboA5?FMiJ(a+gD0Z6Olsw0UYV!gU;X<+@TVzI z&IF3uR)>ZiuBH*vB1yxEQpFBMd$5EnqagGJ=xQm%b^>`@bftU#wpu^`7Znd;$^xyg zuebS>B|A1VV_R|b?Af!jgand;!a@p>E&BYcNtPHu-cbjZMA&j5UpZ6^|8W30KlQIHe5a|j_J~)9qhzsAHC2q3*vtT8ds8VC4z61 z{<7gkMMcLxf3^lVIXmQn6)9N?oIjU~3mE|RqR6a*vvhz)HFm61zJzIimH%JYJs}cF zD13$RCpU*#Dh6&RjWJBD_EsCJ@}tp zd61*f_7}oehMeKg59@Y=CQvQVH-vBccLf|fbU3Zn+0=PWA4*vV@F4G}VYe)_oYu_O zKCg`j!BwQ2kMYUW^cjxV#AGV2z}??xng%2tB>`v=e4-OfYut1zpu$yE=L%w|dVzq_ zux4BZZD7R`tw-g!uEgbUQ+)jfGa0aAYwzcz8nmszq}X*lSJh;+4*@huYjBtx&Vk~> zC?)i3x_>LHzg#S-z%;<5?l^$;Nc0ufz_?TtFhGI8V?OTT17o4~mqGjIf?R$A{&0ht z0ma42$_im+hgXZK4Y_xdxL_w=mq z2Gt8Vml#|uJ*+(K_xV(Evn62`no7^AlwNyZR>W|_ZjQy{@CE3ePI*-y()OR&Db97*AfL-YxF)s9f@h%93 z?@Z%z8~tO+fzQdNke|JQ*|I}Ha68EK@UU5bfB)Fj6l+P0bUn-a@4tji+D`(-=md?8 z)gwL*=*xq>e(fM?-wM*d>ekqu^B~El;X4b_=EJHoQZ^$Qb8`sg4LOECOKU*VwGhDo zl8LkJ`I>N&i>mCWr9C|goV}UYE#6Ez|4;2fl&XwipM6@ctSZ)mT6AxYp_fL$bz#*-b|qu80VS=ZC9nENNePX-Mm*uH zWlw8Vzza1CC=C2F^Ci))4x2CS15a`Hg2rdKUWtP%>ZPO08lNR-S`3I`DCPA;cBdZe+Uie z^ak@7`Gh0wjB(M|)GO=82@J8f+7``#hXsoELKWjTF8TWfER!i(9@76KzkGW!$x5Ud z#YIIWikF{XABd4?X={_DJ}oY@B!K?^C@E_Mah?U74an9ZpLQKr!XDc(-YOZM?Ch1v zBwHzuZ`9qUGo?duk)ery@khzVHU&{EQy3Xq;P8&-s~>blXrSFbi;;&=UYNXgjQ=r{ zH_d0*wBbctC3(~TSUGrZd(OvT+n~-ig~Qmj2_ODbJ4pXq?Z8p(t_QZlu@-OLITii2 zWtS8s7=C*;|5Sq)8`eME7}DReo12@P1q3g$8veCmX(WLfjmp3ZWOGOwNu=`M7w@+U zfJM_vptu-n#q*BsJpvxF^$4jRKQ2DxN*t&l7e493R>1rBAifzQqYmzp)Os5J`|tmD z0h^-(LP=kw8a@GhEI7eiz-Kn1CK1m@^} zV=s&6EVOo@?A6Xyk0K?_fIe4Dldakbiof$8IMYjxg~li@b%B5?d64V%(&^@|M=d^Q z2Bwz$KezAYCp0RcoH;2G9Tb;z*-?w%`+z|Lzy<{G0@aBnNhWe0-F>BarS1L!HZdQ7 zX@DBE78mN0H3-RBfWY_i(yJ32+ z_cNx0$n?4?a}m?oC1%;n_{n+f#O+Cx905$QsBz3+Z^PEy!)!RfI}#>cCHPLgsRQYs zdB|s!S01^-X5D?#r@d#|5rGaw#%@3~dH()lzttyKD29E~-FL9PDAF#i3ZO~H|9iH& zzZax$1wOu!u(^4oQcJI2mf0A@n*3)8_z9Uc|6|Df6xgm6t3-O3hOuA^u*b+*A*1>l5iSXudlBsL;xf(4@tr7m!uEH z|7$_I@PE=@6Me-2=M^|fh7P5Q+T`#o0c(4wNI(AJA9r0D0~bE9)w!3!GbY<7Bonnj z9a4f3m&%6aEl$MLf04pAFO*R9Fo%khL(F6~K`6p{35!jiqw7cuVCu>N-mrPkiokmD z+LBa*7po@2rMLc1un7^thy!9+2E-HYBwC68%5Gi*tfgWmfr1t?;5Iz#m*e;&EMlgB zK!AXgOpljgC?36vA=}k{0^FS6C}Lw?krTs|fd<8;&@H~^j{<;=2|x|J@Mo{0FQs0U z@27toGE|tZeQRJj`j#0lb}982&IO!jsCbbfkXfhbf*?^`^d`5 z%7dA4Rf^6?1_Xv<4b;*(wQpt*@GCGT4~1W_q!aT~|Fh4?-yw*TYF$3g*2*Pjp1U;z zl*q?C`&xZvk%?gv+yjLlgc95qo z1^M+SHUDSW{wIU~CvWzrAYOo!h@W)*3y|_FYw-_llE__BQq=QIaCe8LQi1Mjm;W|L zzc29V@vBf>xRfc5f#gCLZPd_Z_!o#~U;|nOtIb_?nGOpfsb@p*=#MIr4p5O;2nhPC z5-v9Qko6!xD<~)|&8^EsZ)Jz->+2U~ zNtq>}6ro!%UcLbeBcPQa5WHLj-~%N+Q!M)<29yv%UX&7{Dh8C|6#z>G9Ih`0&xrsB zPW$dZzy9|p%z#Y*a^Jrqc$zuCQEMFc1t3Y;bJ6v*biHY&_DSvn-6H)PDFkA6fVbb* zQ{-B_tsoCcD?lVaQYszlrPqmD`MGeBujF$pvMxl>g4Oar`wW%U(+CsMuL`aP9+hv4 zdJ1T4G;iYf*Z!#5xWJ$sFr*){m7B{5oSWxYpmF6%NpuqeG3mvEcWxjBh@r?{kKWZZ z{R4I?1WB;4T&zZU*&YY6%d6k+IE^a^u=A(MEHg2H!XFR~OL|C>n( zMQ<;HDf5#|kB(YIkaAW5>btk&r)j59>%z*VUh%mAVT}SeQHD8w3cPOde{GxgHfLx< z`sG%UV?gjIsFyDg{PtSY{$UehkN5o8hNZ?e>A3x z(tenTFQZyxsv2p7OYX*Z0n9Oz{7vd=9wAwT7e4w^yO2s(7g9qv41CV(df={Jo&3LM z<$sIn#RUvZ10Zx4AUlPIq|wcMr30eVe`Tlg&>ksU0?Cn9kUrnUl4c%j#J?F_CJD(z zBm)>SJvH@=n3%l0FAvu_^74LwX93`Vgx9S8sQ|+Y^xWdlHlN{srY;Ep>5WWrB96107Q0*0VFl`&Hr%;FBB_WKl$=G65aT~zA%5E^&OC+`tRRl2!4OT z-=8D#20&vddo2NIMfv&N<`BcLQ#w9ZP{<9K6Vmj<%x*PIgrtTRf#x5#m+5U)1x&=) zn0CRtccDdv3c6Q!W*%JT)eRqjcLDDX=(*GM{S}Qi=m&4ScK!gk@Wp=&&TI?zDzAD0 z{CwRwAxI5?$2+rjooUlK8fjMkhbqJ5AnOlwo0>9nnnAmKD(pToF+gQ&0H_!HB1l?T(!k}Wl5qb^uDLVJ3{d0w@Mf!)f3`YM;Xjw58u!#Pc`B$-J z31m8^Uy-qxacOx*fh=HK9jHkD@nJIHCxq{UK=q|COzUQpDks{Re+HH>qKKKVcJE}d zLw6bS*8Yr{z0ipOMg8z<^DH6h{L9l22m~GzLi6eR1F(!uJfegW+vd+P%}@en6qG6; zd7c%i(s3CTeRUBPeKneH_{IW~M#2TO+s2T6rXmfiHKr=C&&gGf71i_6yJF15C1qBG(Mo5 zw=i29o67L;@B^$Oir;sJ-~W4ZGYg~W1(tMXQ|=J!=uLKP>!m-#DFy`^TfkQ={aRei zq2J(ET3KDK&p`qt8L{Rg{|Dfv@7uXeCBXbELMYqSU5){5ThFQm`91W%U(i^B9N3`4 z7;zWXQyxMbR~CRv;8%uYZ|>b^h@3o=2+--xGDtkS!nx>=CLk0xM|)vSer;c7OMng` zt^MjZ`URQQ*zTgyQT=LA}bei`n#RGnN;E^OU{IDlkT*Ak(^Fp!XuE{+CWEs;ty9x*g)8p}G1?jDs&+a##k z?|rYwg8xTQ8D2T`pI`(+hM_5;;fR0kyr1O!+!@}4 zWH_-m0ez2IkY8B#?HgtRb?_a_eKTla50Nd=FOkfmg{|1*Q5A+++E=e{B09y!CfduZ zF{5T~RE}x~P>1D2Zncly6(HYIkiFdUj`T1c`wJzpFm3Q56R^tHP2>jO-Bj#EnYSMweD0E+e)0x(jiSV}K}#1Ww{j4}W@! zHNIJDBfK~HvhmpFbOsk*Q$6e{rb5Qu_YFF*JQ7pMSJ%`Tyf7*JCV!@re`F#QRMM!I z5T7aagH|G2BY7Oz&*JxJN8+N}&Bj_mKRP@Vaiyc!*F?3zhw>b9h zjSG5;DH9Ozj%YyT%P22{y=;6R54qfZamW{om_@Fjf&Ozbk76-rcdAg3e6bR2Ds{G^ z6o#G$Fu%CP*W};kva}Xef!gS;IQ(=AOGFI!GRBwllf^g~)VX`UKvjbIccgUlH z>z<_uR!$oEE+VUx8V1Yi#K>g6Mc<0=>$ZqQNtYv8mTIW1taKexlUIAi^?Qe@UBuW~ zUs)u*1ei1>>Vh;ij>lrhhlFhFogoRgPVP`WLicf~k(5lw*;x2EG?#cS`1$=l^_hD-izlQ3P`w z?sesz!^68)>tAy8*Hx@shwa>?OwuAlYxs?M%e%!q2dssi2He(ORjTlf5IsP~yzf^s z$RQE}hZ0>omT_)l72QAgz$PukBN#bTIf5+KsrPG(3 zoKD|Lp9eC2g!r4j7UkYQ-Z1Qe(kX*;jIf=IzP6a)NUfKP4z#%~>k)7CdO_^gZU8E*jbehQuDz)HnL@^V<#-hSROacB-u&Kj` zrJS|FN=EFax8*Yf(SgdAG^!BEvXa?d*LsT>&$KpZ7}sY! zDODefmAOTnE_WzZkt%%Rws$ysEoipx9HIg>Ay;J!d%ZX{HtX&cG<~r6%nFW_Bfq&m z&PL64m+YN`iGlU3&u9AM1HJO~4`Go>z61#pTc+!PjwJHGW~cG!$Jx;#&R*YZ%JkON z5AwIyaYCZ1YZq=w+wDtz$Pob!WN-FtK3!>3T(2K2QqeUC5pm=`&fJtO6cw{Q?6Y}S z>FcQ!Vd(rwk>;g~ER8RS7s~h+bj->Mg@1`*=#oG=$WaB)+Rsq1K%LAJZJs{@DubSs zds7ZZ<)wjc;hnK;_1I$&O^m0B!tXu3r~f*f33Ny}w+2uoP7YTUe!GsJFUv;G&KR;s8oRVYO)biu9^)d(PuFAd7Jv$@@Ws8(2A*fbIV~bsG15V@1B8n&OLL=aP_V2 z9zoApAGfg6V>31zJdybCrO%ckDnPcaAv*Ap+%J3>wvbh&2Fk(Z=G|l^(uH+HJ~PK` z?<0hs%cpIPV7mM9f{Y*}EwGc}vTrK?q?l(qJJjqw-Jqyj3FuaA94s~V5o1(wxgtQt z#i}17hDf7B*|Y%S3ecSq2oSj$f$b!J zrv*BtLGcdQq2kC`2w&jX&c%iW<>>tpF=jzKU-K|;lz^S|p>~V#qsGG@<%prXD<3f7 z+$^mL&J|9+iH7^-dFwigRr?M%jY6Cryr4GO6tZ~TpYh3>(4>3esn#nzMU^i>;d-vQ}npARF7v0A~FA$M`Jt zKzy*uGl2%}dqi$HKvEMgADi+w6-(k5#V z=Lcb5s?O%MCdvz{ru2PwD7>ljP%%9XL(OokBX^G=vU_kAsGS~k#z}acAQ#I)4=hX` zVA)$Akzme0eO@-*$GN!SSK` z8w)C=BW}V6*ySle2e!u5ck*NMDp;7Zv`~tkmBDk{H>bAS+IFT8LQ{`ZuH*K}BA0EJ zyM^RGzIuDUb?9j8~$C(x=$7k63U{5&)1EQmS6vWcVSv0CxC@y9Y;t3xtYoFMQ zXttGA!M+WNY;s)i75JBUO7IRAaZZEgZ7>J(^A$(9HIqzicn%sS*{0YuJRc|Bb^FD7 zvc6zFeW0UXnErIKU=^Tw-*dle|2FN&Fk98+n(r3&$zm`wlsof7FL4$VP$a|;G{xnY z8iSq9$Lk{9LPD5Qi|z|(i1-N|U)aeE-};Z`oys^~^KECo90IKa8T`Z~<_-#(6~i~c zq?Syd2Yh~1b50JX&pz?=CehgKjD+4g|Dr2QHRk1Yj}-uN*vg=WL1TdH?sJexytmFY z-U_;f=BCoN(9OLC-D-4Ads03pD!DdBTH@oo({2`}hgtIo7k7t|3)?EjajBW19? z;g6eQ$UsbyL2Uk*VpMq&ALfQdJYjc0OEhsE%X%u$FRJrV|{?54sf%sE9Fge zwTTUA>VQ?x?l${;I;(RpseWuJ%E+23Y!m0b5&2-MkNd|v4V=EN?&jtSoptMK&eI*i zdb?2$r@e7rvgb{RuNZrrcA{h7tln#ipI#pkyg`PHECi+R*))1&$1l?GxvIi>o(-*M zE%%Yc%+I_;EPe zEdg5o*)vsKy69A2Z8y?+fXmig0C3p~JMv)KqrYQ_aZCU%?=3gF%@WTJz1^Z)7WOd< z=>1W&7fdZ~6KY<;2FKeSNKH7AUb029EEk!$%Lxirbp`8R@Pe{#d<}fHxB;A1WOw2db_YHj6OH8TIC6k`e54;$ zrJ>&m?(dxmqn9Prya@n187V&nT-Ic=gOhmpmK=0B4V~%X#6F4A%J05HMz{(#1l$$sMgqj^a zf1ka@PtH(SIwlIAhz)(l>9%4vg$(#B!4?p-(naz-KRc~@zN)_V_3KurX)WqrU-DgU z#sc|p>1y8hi@zKXX$TPD5*%@K)u~>etcS00F>nY!(Hb2co8ansX4Pwz%Nug~4Zsf} zW84Q>KywW?Fhq%T-T+DXrpU#iijX^`^SJVF_3h*+sr8Jz$S0~{p!&8vfqD<+at7c` zsqbPvbp5(vVK96K`)h#Z5vd>fU8MI4}=VX zV-nC~@sz?lrH}X4%j4N}JQhkRddY=d3YF4?v@j8>rb1UajwNZphhSeT1A;-o$sOfY zReTF}r0>59tleOxy`s=7XNAWIs1~XVo@%=+~AslDu1YH2k~?z`?V%PfJxU z#w8$6sxhO+^{j<;`x;V?1W-LE^$61DGRR>YIGs5hs2cbz+J9Y^@6kS$LKCZ#vfVqw zxef0Gc>05y zzpc zdLMVK4Jf6ft0`?dYk0FR^?8)gs`uDMLHiNW=wcD%6w@G7yljL7Xbdj)WcWVb{^x3A z*F9_IGTi>|*_Wr5Sko>Z8N3+G>Ffs!i7cYB^r%d^&%pQl0cFZp=*bGvx}cvh=hp>3Cck$Z6h0iH55SZ)#-O|I&Ah z?^TkG63X5aUGR89TE|*v&TO1)OmT$E5&9e!}V$+i~GHRoFZZ^$zYg~0rHK&S* zB~caPadre8%w&Z2or-wV(Gn7=#}l@w%OudxoEB{b1UlgTKYDKHo;`Yd7juoBe}S56 zd1kxpG}gRD=*_6mc+!m1HFoHrMw>l#B)J&2HfQ6H1>NX}(aOa(omNW}li4AAZw% zVDUj@@!s&;hnkq1o#nRUXGm4e(?C{v-1N`389p>@ z9;{&=FPpL1+c7b%N${~kn^$$f3qm?kMp7J+<{qD&Lz2cNK(UBGY_BovE@6o6z|Hq9 z=}1zAXpZ4LDx65C@;Bzp+Z(_3j`{P$v!(WsaqZQaHVW45u^RFA1>jGWoKey#wfNq| z3`Q$#)~*;SHk2G-|JaXB(&rd)4{>wFIIp?&={yC)bauL}FQU28Z`MI9}nZ)kyshX=cir=M*K zItPzGmfDq~HSj3*$&LoP1}Rm2xe3MbX0vVF3tO;mPHh(?@D|%U&FbN-@RAky;PRzM zp$fZ&hX;37P57g->w@T#?}JE~XZ=l^k;d!>WYd#{Zhx1Cn?EWFa0XM}LMz=Kj7cYO zKWQ^q5Yr8=kumi&Lc>DnQyWfo1qoQx5Ltm|@!x>p^?9oJW2C+PJj?pZB{Q+AS#pDe z<2ln~lNKpZF}DY#@#7e^jhmlepAXJBU(;5zSPxJi!or{r#FyH{fA5z?#5QO8U|h># z9F$;)#fMlsOCV+sMS;IBc;}Y|VjYUH{hEd^C+8O}Ati;@d+zPJ+{EwRKt6*Nm{wpu z)Aza7X*1b#^RwWr;B1R4M-yA*kDMPpFrQ(!xjZG{Anr8?a8fF8O=bItH= z!w}oI1ZxR?n?bsL5MEgLl4ID9o!GPWXq+a!0~>d;O=ZeQ%oMxc)^18+k`yLmDeG9B z)%)@~Qyj1aKRgVmu@0YKzq6-_$r~afB6Ks8kf0!W1%>eTt}e4C7V!;l%~>%UMc~#u z*Y!bF$krH|+!&hZFb-sU7~bi5PaNiyaubl2dN+Fszu_W>9X8_A6 zbvjg%28^w&Z@6OkWlpBJ_FiX3Pku)f+VbG2qbQbv9ClB7Ck}Kkxz?oGdeVp;#o|E@ zy29-3YwXO^a?D^;$?^0AeA?9?rc14SKelK+`Ao}phG_yZ_;sizY|X&?yw?%ovl)h# zT}^Fe%$`vcfjf#!tfFcwX;Z43E(yFKsGyUo z{xm+8K{gSGvsUHUC8n3A&mQznXCPg=3=PxHu!MOe-{`s0($^;oet$imq+j=BGv468*9KR5mp-I!{XGOV7s&yy>knX<(no%0yu_?b=L zVp1^so*e#NWq+UNB~^>3TmMUSy)>OdqK(3v=N4}}6yb3KAp}u#-_@SK&M0^3eQndB zj!M41uG*9!85XSZ@{BS{P+TxKAvL0QEtS!^-(k53b!pJy#n5gJ@-BQRRrFQ7J{<}} z$lGg}E2qn^A;XWbiCokU9%QE-Wp3JzW>!5{R8ggw=l&oZ-QJ`YeB3`%p}&_rGunCA zKB2YtxwcE`L!eK=d)rhAQS!_p-wj=rSJz4EsoMtf4sRW)d6>pMmlVCty|VQ%vpYIU z4r5t*AR>jUct4IRp3;p9jXR1<7X0u52>oFIzZ%bxiCn zn~)y#IU_}pTQLu~Z@e0iG}9o@FdP=_Zxe8Jt@Q0WNY7Q9r|1*Unc#6y4o5Fw5lzWF z&8%EHl8ibv)Yb2MWK45duMl)(e271tV6zq}bF7QaMgB&cz}7wWJAP4#_ww&Fl6Sr_iu2#k>#nP8 zuP?G{nMx%WS}CQqNI$kK#-VH))q{ zyw!khBq~aEb2DNpmw8f^1F_bkKyi0eHSve8XORTZ2T70_uDaso<;2~|y*5zSZgS@n z*;=FK$=X=W%ePtL<>#oLjTSbf7H_X1+%xfwlh&XwF&<-Ei@ipOqM7qGQ9jM=_U^e& zJ}ZS=>Ot_gPld&=?F4;MvZ*zeN8emy z7WFVFRW6jKbRj}?dEniTaI@TL{2(MFxse*O*6~0g7PZt7Hys6s$0x|zUm%rr=gGR2 zHHKUKwPQ;Qq=}Jkp#Ok^ynNW}h3u>>`a5@&=;-Jg#zw`S{$G08`$8|D&F;V+7sexn z3#C%GX=>|}!VYI%8#~ zZZd}Ac1FTxwBD8&3k2=O5#7>wId3VASHO4>+SBF=-J2hIl{_R5<)c1CGmJpMb2%BW ziRK@tQE)?5UP+~zSzw4XP^Xs}jj8c1C%WC$t&ghBFMsa0g}R2>Qs(z(YeT#S`|pbb0+u8uDg(9TMvqHZ}}D$?k>io23RLf*dE- zPjNaPCg`IaglRnHUpb9oQHZ<^B9Zi{~zMM zDk|#mYxg${T?0rrA|=vN(jcvrAPqx+D^w8Y`L+21uLpL1$-#Xv= z&bd2x=k~pswSKeqyPv(Ecr$qH^?CgIYlaUFz(@^6U^dmdP-!_udE%E$K(cQ22mR{) zB7j}W;LqD1m0XEM8rOJ9znrYvhZ5zt=FBEuu^3bwrf_cwS28oVnlL+;EwJ#;yhkgG zk-V(D=dzN{v>Y`t9m3H+46P1)nUv(rHQ0MrGAWuH+EAMFDQX!9P+V?kPCm8!qJ6L! zx~Be$nEjqMr(0cpl+|y8(qi<~@oD{q1A0PVP2TTY9S*KG%!qT@F|JMNt6h)I8tu?C zn`K#H|6joZ4Li-O11CdYJ9pJh8p#HbqP)ENuqurdIlHu8{~f-aisj>+H0F7*l9Rr$ zL8FjrlKn~xDTY;J+oypc$8Ibu1O+FOf*os0ZoQ>!G%UibOe}t0_sQcYJj{Ov>tSB|kq_B8;@=&V)e69r`=X=7`=W#${9 z_EipOL@TcJ?JS`Q`j-A zy!te1hm0q=a_jf%@1_;Ub1j~_`FeqJ;i2C;`S3*NU1QzPFq@R8<)Ce%XQ#sBG!KJh zSH}Yj4zs^z8m@EVx55}+M;SlSq+L?Yov5!p$ZWG)xZ1vo+aPStEn7CPkZ+vVm0uYh z1)R5>pMTA*5*iJc2{kZ0LJKs#uUil!tKJFUatYdfwv*5h`a)bsl&l^5Zs$! z?t-BwYK6wDeYsKT!@d%Rllt&bDu!>-K;q=O*yrPpCO^i_5!I%biG#Mk@#Kh0DLJ3B zRUm8lq!LUsIPvQDl8bnGd8gq&OLqw_%f_PCG*Z=Y%q9rjFHJ|3zcp(5s5fnv+eR2w ztp8coRKY!R(1>;xzUDa|i{5IC=Qy@)vu0aVzr0Ux4Juwh@wJY_tabK#yft<=Y}0R3 z)kwk)<%6FLKL2<$T3S|33A&+ETk%M1ij_Z)Dd;9iPM_eA0_-v@mf@4v1;?2cGYE&I2h&UKgl2n%nm;Lc_Mf>%iGPM4OWP>MF2--M{jUUY)Vf;Q|lm z0|T0Pr@H%*TIEWqq$x+!DqU&oOJ5>}{BM}viOw^meWW3x{@BvTA`d$oJEf!y zTgnQdo4kVjv8hNiBG342E(#Q_H?&Nt(Sm_*k{JPrIH z_@);-UvPn2XU0P9;`Pua4OHuucwx@&jP^)Gu_fu8^{_eD$n`u9^icaW=Xzl=I`nSi z?N(p)Ld)^$n$gte7z2XV2qkt~1ZMOtQoNL5ka}v`;j>vY zBt9#n0UA)r#d&-W;#xShboGnMA#pH?s`2Nl`@ab^R^Uw;GV?A!&Ru8YJf!|FxxF%= zCQ9Z?)#~}q%AK-yz=WzD!iE!I1i`17$7%zDaVLHCOaogg#FDDznx<3DC~C2>;np?r z?Wol#PYiyuDYYwNlO2^$zYdG)uO$~|E~dA5FId*H-GWCws3sQirYc5)Gn{o!7%A^e z1kQ(5GTmIn(Ri+Y_mv^_<%>uCNqXN?zHd40#JtLoCIsw9w#-l@yV1VuKb!~(-9MB) zGrv^`RmMnwub-~}x)nf*5JU%ITEOQ|>iV^K^|yUb!_VzPccVfa#ihTrLc{;G+Jk+q z6jENyfq;xHzErk$5sP+-t#(Fy`?x2#?({2^84$*mt6ZV}iT$FHi#CCn3)4TkSOB7i z;Sb|BAhebHHnG2Do%ey%JiA~%K1kr~U)==pk_TQLX}^0-wUywE3_9}+|It; zIDP%ETewhxZ^)Eu>bAP;S2YKoAm~GZw?)Ir?T`4Y&Pun$Pj8fRUo5fazF4*cYJMnI zH7>nqGtz%Qi=uLI0=^A=<0UFMGS?AgeHwCyj_=(XDU;Xf0@#Of05F@7UmGz~O>j~S zMyClJAD){7>pUbJ;#N+g#U_x4V2VjB1K*D+J6`*R&S2?uMCl4Qy9P;>W(4W z;jfW<3ciE3A=UTuu4y%!cytlriI}ucN13H=Yl+90(3ko?kTL(y2cDV6^cWnlTI6#Khp@SVM>0AQE=vi&I`xNaXTV*oa z!PyRrrDlAnC{NebU(&m-mGlUsVV@h3G5#ar+>v11x?pJj?+190XuxHnK}fde^sHsCfq9SgKX z{4OSl39GKjIun%oPz9HgY3Sw8gZ*iG7wEgUmhw5}Thzzwl+ctzt$Nb6=+MBg^aySK z7wA9F#)R#y3eMFO!E@h@nVol@Q$4CE*H-1`w$eFIHZojGkEU$Y_=!|z8owNqi#KM% zpUg8ne`VrJ`gt(8)9>M|$XCNfx^cVVk$qV3FG%TNF!^jzxxHCk8D$wZ2q z$vQfFXi08fDfGSl(}>H#^gXC7VM-(5S1&2-f~E!Ao+p$%&J7 z47d^Jjwy_WF@#El5a!sW0t1 z2kr?wLrr~`s3oTa48(Z%U7Ru9P*>J;=qBT?!+^3z)Yg8{sNlJMN>r7PP3pB9Mba7v z@AG5fs{YAUuG55HG=(tqJYe3bB{_(Vw?Hi^=3M)4)GX;~Q9ZXKw{D0*nyY@P`5Nup zPSQvGEPT(ota8v`vMb5Z&SLs>E|M3W=Z!JXw`sFjxQ{Fg`Dm`*yx^PI#KHCzlns0L zZl_obrfgYglfh5>QMlA8?1sdo)A_{!=&Y>Y1?pJ*mEzg=h~dC^L5bfSFR0&xJVfy| z9B5=Ieuhg*uQy{{TtlD__HzpT+Cbe^S?t$^2V|h< z0setvq9{edhKJr^Eo^3*DbF+C8K!2M5P3~&*NJcc{h zC*q95p5Xb>8&r;-)Gw~X_CWeA+I2fRkVW&+OkL^{_S=pNpvGSn9oD9oQ#q|8k43uiZk0r*$uAn*! z`{dwG?6o^V=#Y#A-Nl%^E7#Cg;gj8h-NIdq&HZI@nS&5TM`23$uWGN-K6x&Fx*8J1 z^wQwya!uITT?xGd0ejd4&`wnfg%bBjk5KOlnwGFAh3Zx(kVD@AA*DS*lo@^7)vYoq z&zZCZtd+LOoK5nl$a0$Q@w4F4qe6&zZm;N!-ZGwePXKBzxWp!7!OO{_*}^vt`D(9$ zwN73$x*+$(JfQ+a((rcy2;aHF!4y(PR^>)BT|n50rSt3_W%BA*RUGd2^}ls?Hrhy? zgzQXiVQTh|dV?1cin_Sc@_r4spCNxeQIuJ6h{4Zw zbN>iB>@^g2!(Dujj;9CanYS15AiCY~`@>K4h+T$VHdmk;?XOhTbbMEW02%_)3HA@` z1~(A^OS%9QONGe^KsbrRK+rZY&{UNR2ytG5%{3&2t->t7#Lr~_vAyztKc>DdJawR~ zv_<};5yQtA7V<&k0ZFHJKXx~*K(0AI;x!S%2xPGHSkX1wqoNspV9X{2B)# zGHoC`2)NnJUnZipiCs1KIpx8Wuq9vrq(wET@~`e-0RP0Wi?`__JG7kVfGvJq6RH3!yJN2CrrQoUYVmM zNM10dghk7=I&P!npkcxGAhuc{Tby2=?sGtmrCr3RG9q_L6pS#eAdC^ z)k5BMa>@+e$u%Rr>q+T?48iU`UNUc%Js8Sp(P0k+kiAdnZbmm$c$j(<*zVjM67#ZO z#DUSAs{X6{iVxL__GoIfhOzeI9=9L#@tE`64yjTU7;)eO@@E!BqDyT_I2K#g4&n2g4mcgFF*Q^==`mnc5fnJPh?KI&QXjTmk6 zcmYfAqWo9PNOM4AuG8Qa*MY3g;XSsdqwP?F^~_*Xg!cPRqWyPWB;0Ib7|^`$S#us@ zdd|3^?o4&yiM5T+wlm(0e{V(p#IBr;LMMG8$LY=(+(4TR)_OBtD-s~L zfUb^vV}6+!)(}QkAdciBJwBu&^X34Lr`LL$d9O38zJ6cg1)ioZ3k@~8o>iE5 zJ?LkkgNEUm+T_Kr=Z0tq^*8!kr*P};@m9;zI5aX@EU_$z*$Wq(R()#ok%A+_CRV$a z;?+!~a9Qo&%en!K?!vX~2lVSZ9lfyat?gW+M4C|~vrlL9paM0h9vUxUW>i;r{T)Ap{;<~VC0r!66RRCM8D`|+M z{>|Sm1UknU5e;)az zT~XC-1CP?v$^9HbdbT_j_90f>T^X;dKuucb?oc@5Qs~B2E;Ilga8LXxq!A25in-|M-VCQ!N-*15mc{uoeiUHrzVRc`tNwOcmMz>?wIRC3hn zxW>h^oo$aVeuLe?tmYsS+<(SM7N_TDwv%l^=zxPC^yXo)BF599c4Af{!iQo9z))m- z91loG4delulrwET-u$3!Wu~BaZ0xGvZ?#^MO0OPwbtNC~3;AT|&CQ_a%{l^kJa+FW z=u?PGe9HYbW>HZpb38<&4zxK_xDyc{ti;_5g1|(b7gp?o$~602zTb%;iZ$#f%R8NI6| z*%hNxz!X+9z}goeEow9tY1i{ieHowX?=PIZ>1mO5&157;Jl+&!GEGnBe=n8Sp^eKF z*uoKctuZ8t#EKBX6DLG^(vxbU|CNZtA|HMA&iCx;nq#F+tdjssK2L0@Z~#LWE93`S zvLjdM)1#UQ!$-=Pl6FTMav_d3*Fkcw{vJPlzO#U*ASmnQF9v2Djcs{3{qlKtY?WP- zzk$M=-~VkJh4GmRmeA>_)ipY_SOr_+_dzI-eNlLBMQ52!Q=W%O8VSzq8r0DE4;Br!jL6rk`2zGvB;7ydR7}?iP@W_Irma>_HFrDh zd{XC5qDUbAozJfTIU%COQN$ujqp{tkj7|EE36;Uj44SX>e_FHIA+j>BBeo&d=d~zF z`FUrTi(q}>59)A>iVt?yopLgro5gT^L6`K%Ln7G3t+v)PA9?XugWrodUsv?7d?us9 zWohp|lCE9A!T#wHkCaOzcPu#sYO+`y`$Q9`*_%7!ivb=$83&TJgGW1>dC!9Z3yLMd zZq!~G821Q^$F8XHJI%PhJTqQR%tky1jg|0BUdqrXz=M7}opr|3k$wD+@DoLf!(x)- zi4~~3S$Z=tx-WX_RA1;58+IcT5x~5JRfYQMVPw;9akrBeHcOd0WYku^sKe_0$S3JX z097JAiEQ)=uOGIjll~<{y{nbj@Y`cPn4nh#gzYZdJ4Zgg>+5*qwFmw z^?}}8;Q>a&v%F<|@K4b~MY&I}+xV7zduSDp5=(+u&7*^lRO#_ajP!I}?uvem9}m@1 z4cPx(zJ8*8OI4M`AlH)gCA5_Y=^08)JJU{4U&e|4DqXu)lWIh4&u}^HgFh|8@5dL6 zNXESMr$<>8;q9I7!5_dDVbn7IrX^>Po*l!Jx}=2AI9GMtqh2ELO(DqPx7oFyM)U)g zLXX+zHQ{0A55S^$k%Tg2nD5<2sBD03y|yn9;@`}Wv=PA8j5Bu%VcMm8$-fc?=OgSR z=M8GcwAjInMZTtN;r+F0>BZj~b8=4iRhiQ(M4tnKnWi_t8yfrZ=4ClorRk(e;Gy0R zKAre8NbW@f02}OvD`0jU3c7ZN{Sm>8911UjJ z8=TWWKj(b8!MUQ$8?@tfWh)A2*<}V9Z=cRLD*1Pzd0_09b!SIxT^`q$BBzFqN!M`2 zL#x;Sk$*b2VUP2FS|KEXi5L|xddl9X_;#L=!SdsPdYEOOQNo2Z?RgMN`*`6(JlXF< z!eN+s>5P9&Arz9N(zu`KPe-Y zW((cqCc5^N4zL7+mVrD_1!rE->`JMlgPAeJ=|6!vG zBWdCw>u1x5Hgty1gM9wfJJL0v2R1Q>)KgO`mU&+_x+j=FCDk?Y{-O5gsNk8S>fX=(#?p7AdipCi93t0YVNOFRyK{kieYi`BM>r6mb6A61&Dn9|5m(># z^ydEroX?}Qg9%LLu($v(wva1}iouYJ+~6Lb`{_`N+@sz$gLEGCuhl7|*=`#9CEPq` zc7^7#y%93M{^e^}KfW!qO9`|xr*4W7@V%K8U#|a!FUm%@BD{qKiO8~!vIoKGPbL+p z+WCF~dqg@!#|++%Vh#8!Uxk58{>>&Khc&{e2O8Et2l`grbC&7@8d z>_~jlEJS{8auYG}Ee+R=Rq})LNiFy0@8$2aQftOn*gY3&64pbQgur~C1ch+wv@#zF zPKg7XhjFV8x){0BJhSP#@zu_AzXr}vH0g|rkBvR#NwDRQRz-Hx+hWxYr-tRzluUrR z#=qE!U;K4CaXLQ$9W8l->srb_)?0xo;;uINx&i;l{rM&4kS)QX< zgZ)yQ}UYdS+O|5QNan{%BGJly2<#Izq@BuIe^x>2J#XBsR#A4@}NEdev$fJe# zF<%Klh?mKSJdq&szc?B+fUgTwEs%`V;*UUff#!$h4Nv~;6?QQ}2IA}4qQoOQ3PG@; zx~#PQ?2`k{ek^RL6k(pv*Z9!#!-OB6y(g_Whl^6R;L4M3ng($6`|s0&KrdCe}BcDLcXKTC&P3u7BQ& zD_Rudq|YN)sq|>f;?~pPW3IC=y#S%gSW;Q^1eSM?k5`T9>r z)A_A`BU4{(fxJIOB^2ZiEmkXLt_jG%Wxh2Q4Tx|AN z#U*j=t(CQJ|KAVEuJ*~`=h`dj_)vE~FKoUFGM~34G67ewk6)0 zn(rcQq){#%incc18fS&Lddt!gHq|28!__@Z;nxo!fix+wjOoMJg#uMYwb3t4)njnaweI*(N>8)(`Ne9Q7|zOw|} z6|i>e(a>lCCfgm+b;ouiT3`?e{MOup7Kuq7VJ#1GFg<4GS{?JI=&uGR1JDBgQz#V{ zghZYx-W8c)5SqR6-X{=$Amk|oppm$!6~CISvbg%yU9RST`0J92oqWhQI)$jP~)UB6F-vEY# z&i}3#kHvp>3LBEOq&=bEo|8TZ^CsVU8_1HPgIzh43Yb4od_Dhr40;%G(2#zkNrfQ>)MD6UKJ@_DQvhI-cjs`gM^B9;!76?C^+~BG zZ_XG_bpy7&J9UTgLSv5EvlV&qBz7wI6sDK`VL;VP$id7PMv)!}JqR^w%Ja6jwip$K z24l+vgyw#bY)pW1Mj)%1MHI1CR4LttZL0`t9+Mc~Rt5-Lhm~TK1C=V7Nf-X=Y9$~maEE)p0ⅆx*jGP_bj+Lesz_d-^eGNo+77Fj5=yAlgP&|&~iNFE^~4wLHp-36D# z1H4Fqy^7DoUCVW_)`+~pAgfJs--NTNNDQBYz0Ov^q3*yt>1d-+hu@233CAb~#TdR+ zZj!VxS)6W!4)N`wzBp_4T?B<7n1l6roF`E4MPRVCAH1 z21LASfn=oTGpjTMxfi@htw>Q1e!a2#`+dJHVXQ&9`Bu6Kj+bz`ur1ci*sXWmL`#SW}YiOsPIyV#ePWz{W(wH(Ptx zv*cu+TS|+QXL0LytIGk}ZuB0d7j@f!O)$G1#72Kk@{10p!?`opQQI5M|3M_>wU@TH z*rbb#i<<<(Mv~#6q$DJaBRtCnj%#+E{!fV?I_skn#p}z=p+;Tu(}OeD7inoFx|K|i zmdOf2o+qpr&{PCHLWT02;cT{rigaxzOjdkZg()6#^guJF<+S&e^--cV=11rJbUg~N z3wXG`a?^(%ePlvS6KdC2GD+$+C5aRXpx&!7ZfWB?Xhj<~ul`(`l?ER5+@YkKUsDBhPCK8RTq zh>N>tzSG2%9~p)vG6V*)jmg`4hPHOxk-rU#LTXZhF$U)G+>0WAd4#QhGFs%;H^DSE z`zOoBkJrm5jY_Xvxd9Q?RYcX^P!fK|f^Y>8A+jclbwUMm z?9P*>$NI2m^P~M+w=C0$;Y{x(-GZQw)6O)}%VV=f!|e~*rztCN{}?w*l8`A2O5rj{ zdqI$f^nJNvOXHm{iL9!;ZVT;=vo%v~%RD&>$pY1=tu=|a}W&G!WEHS=DQ>pUXB$7*yz z8#|#U@GF5!q{-Z#n4+hA8O3@~;eHzjvwZiGlQEA0Uz$dGWrp>zAx2Qmr(IZuFa650 zhZg#76fnDt4gHrpTm3?gGGGQ!iI4GC~c^uM^x?%#1&-01CGclawJ(hqi)! z68OXGY>&^)?QE$(z#i^ph_)uI-GXYKxjX!t^SP7Luyt$?qBkI|ytlG1!;L0jW_i)8mTx%Oy$INw!eOvV5( zMDUUwWp(0~M$Fq1_WA?sW`}<^-(h z{YQ&Q&v5p?{x%}Xeg+F3@{xu}Cd{CaBSZ^o^M@u(Et~gV!Gg99?t`yj9&C-BTu;=o zmHntr8P?>L7*wMO)f%vJgPz%}_tGV5o+9t8fz zGPy>IbS=qI65o&>L+PeK@Gr0#=g%j#uvGdBSGVwR%YJZ65(8gQ?uS*s{?F9=~$Lz5kxcHocpxqhE>Jx(ci}_4p@td4_00V6w!7G zc)rQ_`InLs%^0I#min6B>EG*FmvYo!?_J9$omNm&mZ8o{(*^B{ovh7^g`Vm^BR=#! z7VFa#ukf=9T-Pckw2%yeh9S7QWo%xr7FkeOo4FGcBplD5%a)yix8E!DGF}>pRmFGp zc5|-(oNS?6`qvwzrwMUzM*efR>{_UI&i(8kh+2_hk<*Hr0iF4|xmBa;v;Rl#7s3_# zK=&Ud^%B8#RcB$~tMv(n4vYzq57a$fzkE)AV1Vaohcbgz5m9ket_%-7TkQuOx{$u* zUh8+!Hz}kPho^9Qblf9}_qeX?VXVtbZySqddg^PPM?ryXA77pb9i-Sx8QTS*??TKL zwp(`R`Xlm~tjm~!rG`8k4(e`G2GHTOZn&-#kUb$3XA;uCobD4>Ak5g|N#=bYNi@^{#BzBJY?Mk$rsZtaoqP)~G5+cv33!@NtDf z4oi|`u6C7Jp5}#*kGOrX9aIt~UGsh!m-?6|WDGrCvEhn!m@<2$AVYn9IzOme1Wg;` zk9lvHHY)M273bmulp0oQPX%r^B-x`5KT@2i$Fh|?LBCnR*g2jF;>yRK2$Rh{Z7%A7 z8Xn0sFsyV0Xn=P~RUmt2KmC7*K^z{dCCht#+WQq`P3k>=@jy)HRMuqH z={~>K@LA;U@(AlKu|VX{PsiY;rf9%=%(8C8)W=o5(AJXG4Zd=^zl26wWKXV^h6N#*#HBwW(WNCPU(=2u!l;b zs~Nm0qm_9bTi?Y0PZ{E4Rs6KocfD~uZ??jK{vGY;yZ48QZGl(`FC*umPh87bH`5>$ zA2{zo_ZUxg2i%mWov7e?71YUaKnYE+Y9;3c`eKVALZo|-Qxw>9BVGG}<;Bj?)mxru z`hpMUX>)hkc!x?b2Fvl3v32QV)@I}K8KYFBj>vZn)u^Q6( zk?9MOz1J5?leurrMW`xO1rdnv#*`^EL#-NW{lUKIOFN!W=^dw5 zXfX120Hr(=80k!Z`<^Z(LgDY%!}jmvWJ@cdKeT|5mYyM9GfUbNf$NicVQZy?O;1<6 z8fPp67>m@6fCQ{}Xb}yvw;zAh;i`svMBI-4nCTKyPUWQ$Bp33#M%s}M?B@{t#L5_5 zZbq}FuJ0zsEgKNJMrEVm^ao+GS$Rj6yvzjY&BT$oBZr0LK5VhF*u@pt>YcTp+ddu7 zYx_tm43B(e7=u9Ej@#RMcwK@hG2s+Kr3ITb^j>e9N7aE?MFR=gxEU409rful8#J6) z$ynq9`4MBAWo$Kp&G4HwD8zw&XY%#I&Z4$&y4Kb@H(hT_?#+jyfNFB-B0D8*JS&sM zvO4k4XH;?XXv&r)SCp8h1r!@9DG@rm{FFwL0wQwM?Wmq$Wh^%vF=t*D4a^nKEwyj~ zUJD`jIJJabKMquL{O69p+Gn{>6<_}Z90u$m;xl-7c$eJS#>Lup7)W^yQY)Xn0r1Te_T(K=vmjMXmg*UICqm^L=2NUD7+9F8@GHkw4^*Q_7N7VQ`73&6#%mUu2xFw}KRLaZeQ`@gV&Vo||2*I!HnLN_(_W5IoJhMs|dw=B*>LUqmX$0Sqq_m7||G z6cV&YZH5uG#eQRy8#F#|--Zic4be^(V19;k=UKMa>snk*Dx)E^zuE(0fB9fL z+3uX-zg)10^gfF^25GqtyV^BV5G3>7RI6y+6o$<+LeqBZegI zGs-_abW#&pv?v_meC%nyU@71QEzuu%I~ey zj7ROt?a)SREgDNa-+f$%1@KE5=CEEW%JB%)$}Oze$&KnLy#A4P#&KvEZPmXiOLh4L zFNA=n0Rshp@4y2h+rIM30rujti+NA(@jgzoc@W?;_7FsfL|*0^uncVP@<03RF?9Wr z(RmiB^ibZ5?KRaB)R{%SouvT|yd6;&Kn@D#Em#LzYXM#ok8q3DT)l(xvzs~o-u&bL zQp1Aj^%B@yee4Jv-|o`bc!GjIS$;X5cfd7rLxL$kn#781teTkD(-24<9h#JtD}?yn z*lT@tM_ZHYzWhUwH>>1xaXq-=Zr-2lAAW*Z&y&-ABu5-)|WIwqst!X zCyNEqo%d7{KRrc=PuuW#FBTyM<#-uWo=p{-&5e&1(W}c;@he;9oPyErJcfH@*2@M8 zCjNiGE#6;|m{@M+mG)~s{ea&`9bc9=vh589k;c@RR>QsbWqi++CYDVX7iH`9Sx(Phm!<4)4OA7TRJnGB z^{I*~4Du>0N0_a~xYtwln9}|PV7a^VMin12E&Vie5yV&Yo@fYbJ>0UYf4vd?XW7zG zgk$NWy~|yVNl=G`FuvnS%L8dT?GDC2gcdhQ8Ar3kq4R7#ip}-LVD!!59rA+tNZ_0S z3vZr`>R0?uFZ zauy%JbSF5|Xz3gq8=rFh3%i?`%B)%JWAM5P&ll#Wq=3+R{T7kuvs!6IESB0r2O$I#yZ!@qn~#?lMFewo*O&(N}Q+j>QL!=c08}h zLcBxbG|0Ro%Kc$l*-h~xQUjAkmTKc(Zmw@{*))jAvg2fRLOox5+T{J8B#EFvB91PG zP^@!5Rs6l_OIR?*(LCA@qF%js^oZO_uYj?4p$Lr6JN2ueF?7aq9{5MCOu`KF9tuwB zSarpZ9V@IFGD|qBH=C%m0@=>xjf|;Ww$8NA%rZ@vxNB|t?Jv*}58-Toz?a80B--NY zk;ka^Ltz!BX4&688{z;$-FQZGE>v>dRK-oond++px+EL-GPk!n=u**{Rv;S;4$}p7 z3G2ShzE?v0NgU;a{EoT+!*eV^uy=W}ez?Fn^07{onbZzlOR>DNw=<5(&1W#OWoh5O z0L5L{#D>1jQ|4TBX{HfRWm{O7f`WLhd|o+YzY46W0~|0#`NNVDwI$8tcOL=YNoL4D ziT*9b`LrY&;*nGWCoL6eWR!&aM}CwI;OUT#O{*qk#A>Fj?RcVc_ZY2F^u0_yE(V)Y z&pZ$>FQC0VeMuIuAZC@sUtJ9}uU!YsT}N_TGLZ`&4zP57U?c z>z_lAmzqv7LS%i&+>jCdHeNoVM~V zcYiWE44hT`Cz7u1si{d$mAu#5T`|d~WaOp~;0m+z zQB5J6>AF9kY&1T8W?L?7h)*zKc6vb)cfZ1A{H2TUp4prlautv~nMtl7dU2tW+Qj1! zio#;0H2TspY>)k&nsjZpN4&qARg3HQ&a0ts%pXa>G|QZcqXgXah`4qaftE+T0ULnW zZu9m!I|v=lm)mTa`BCu=Lis>_h?eZK28vz8x)FT0Z`~?QVw%u2NH|De=IF83^qrf1 zPn5^UygYBOxE<`D6ocjU9N60hD5Vza?W6&r!$52kK*wy3TeJD3b*cIO=UbEUOUql(>sv8E5jdC?j1eMX4M0j6sHyF|#JE0rg zgN(J1Aj&RZ0!`#r;GRQ!DiB9|-bu_VB{hmIZ##mw3Uw=6&TD=BT%lLm|21lts*Ggz zAq)rljzX374cd&Al@;#q%`EALo~|w>KfhO}<;b(@_75?}@&6rq@E}VTy@s|h0@X4fDy&PrVwR<1|M$#fo^x^ z-LDKFx_%T_EV7J#Tyt(Zm+h|lq5S0-y_w2(d|!=<)E0bYoQ&hF>kXX00ub7aP+M&g zj-s==mSgP4jaz*eoHgX`@%s^m_Q2l*UadxQMM>u+(s$U8%2zA3g1Ocm^D7_Re5n6< z=F~CrM~hkrPVbJ{%nzp|54AXqJL|GjTRobp0{iFk*kq1VOuPdxDElcwj0sNOFn(tX zk+2dpmr!41$4K!>K!@cDf%c@uNj{GEd`@a_eK;C5-1xFH`)7e8w3NE5Ps}@q)5i=$ z`^62EemnK>0UtQ$W{VI3?G=aZg$`#R|6sr#X;s1?d;x!{;fIS49@ ziK0o{YALckdk025E{`X76Num{n19Yv%yh1-ek4T_pFR`EQ4wPL6n% z`q1Zp4x1IPoFo(?xLWq30;}Kf$7ocv5}iyGw(lc93O)LX)X}j$e9whENJOF~TksSx zAheeJ=GNVRCvzN7K@8AkX3<}A zBgNl1Yx9ec<|R1wHo@@WyZIIeGZck5_asX&&KJ<960k53)Sb|s0xb5}2=#e`KIX3t zo0^v^CB@=os-1n`ct9;VqsfU>5X7z*ng|Ee%nl5#FTeA4!oPxu6-$Yr9_-DaRg?t){y9k~934 zq1K6Tkw0jWE~A}5vD3?S<|Y$Ij=qFkLV(7k`VgKcQ?Bol4T6TrHk-ave!ZRZO?fXR z9}9*7c$J7_t$46BtoY zPz-{)@QSw@a|%jzE4f`=u|J`W+DF=->f|)}g&(I$Id0skUj*bQeX*Y{ZhCQBo@?JE z+i>Wdt@i(XD2@cH9p5lacZU?mPOqDQ94f4D^>eQ>d6Fgs1|>g6Z05^F_xsS|;{26B zTk(ATIw=de8MgNt;_zZYiOFOzPCl>A5#Otov47|h0dXlwzCuHQiNQ@>B&DC13JQrT zqp^fSdva?o22hpnRa3g>xzTd6Dw!(gR-AU6l4V-${mtX8qNn_R8UAN83Ol#Hd7kmp z67`x%@{#KlKzjrVRx@uPe6Z#3Sal4fYb9Ki>#B8RgYZupP6yyS&L>+hBnUyU;)tn4 zkFIwX0iAUmRRc{;97#FasK4-&dZ*n=&E&H>_} zh4cQAX`0^xV8JicnS!1J4*#rvHcMf{ECp;+ik)9{E%*SyIWb4r#iVA42>k5^JHO^s zKi})bh)*|Ge-0RrT@SSHD6X2)_+Ss{>b}pbIWw?-w6(aDHeb8P(^dnB=a@q5L53YJ zEP6TVrvt-2-W=#=BV1)K|1CYy&RqTRHOjHrAcwT=HtvBAj^*giPEnV#<&%Ow=kVYM zsLCgOQ47}oRtfrg=A;TVtsRed&l)~F%cIAuCSe!D&vSb#^Q&xRnQd~V;T7a>ZhTb# z+RCrHCDTV|k{V%J#!TBBx-dmApT%A4D*@F_D~d;s@A`c?^)VB^D>s&&=9k|-kK6Ku z)zETR!ai>0chOk$V6n+ZBd$Bn!4xrYK176u2!41>Z}2vZRgHf{F2sOW)L#|4%)QLE zb#?kBd3|25^i<<#G^J{riU_GNh1FvD^fc{-Q${5o;+q49AD-4Y*zgk^Z$_S+AN7yn;dpQ z_MzkQ4b5K35auAl~-V;1%@tU+Bll*IfJ(TwIQXKSPyzP1YTI*8d1b+q%N@8@`*LHkp zt?g5vbe2r_*7kKiMDjRm*8lxqZfB;R;CWs!MPbUfQgA(0kI&VQhd3RZ4RjH+e!E=s z%aVQxGDb=b8m%n+1$}UN!|R1V1A$4Bm-rxgpYQk2mTzQy>y!M($y0FaUQkawz0ONM z;yFG(Cd5>oWC7N3IVDDUO+uqpdox+NRUtQbL3?Iop&eo^b$*_UvwmKRhu>MNLcUvc zp8Hydc8N9r_p;~zAD7+v*CC-?1F%ii9T-F`wuzNi)2rdwTT0+=ffLjEq*Cj#1AB>v z`$_;@`$wfpB)+%Tyktpy_cd12Q9dveK#RnM)&n9e?UNxS$%*CUEoPu#q2a)Cxh@M@ z4}QqjiJ2)$*?SHwlTg&^=E)XIawHCDZKaA1et)jQ9`+x-DLE{Rjs;$7{HIb|hNmA3 zgh@ee|J5If{ZIW7N`l(f=ZSNWd_=cCSW7PBL(OB>s=FWWW=%DD{B~^x9;>nN79P6% zVY8Bx?gvs+JveLjynNMf+rjA}iaCQ>9gu;aH?V|pk$}iB@Ru&Xq9=8PA|2K0mjJ^f zSo!MdB64=*7t!*-a}rX@45{#F+I|;lU;k=cNwKzj&AiW8AP)RyJQ!&-P|8IG5KLza zGUx55eAsvF22f^ChG+vAPos1gV5AQFhM~fpiZM?lSwX{*>qENd&2aNVTh6`Ux5juPUlf^rq1VkwAL6!h?mV*wTP5z}xlBF=h-SaPbl3aqgzc$VzEkHney%WD_Xa(F%8)x9*f3ZGyhtx3SDvGT zyI_^xAB=t+fevpQKk_m$US#d3gIlD2n7(1w3%#fO{d4c-A)Y+{_Sl}2#2X*8zQMwK3}rO;CB;H0r@>CP zZj1HXFHxiji{#bhiiZl72jx=awfwz1f~B0o&tmU<#CXzpGQ<6Bm-q zl8WB&`i$Bi&<{RbtCuWrVcBIk2miXRncYPcjX2fHx4)$$?ky%)pK8kg&N3s|`J2Fm z@{3Z)l}K(ZDR1^GcihI>_}43bdozZBW^G3Ik4fnH|HUJ|MZSb_L>v%kSA3ax3_!sk zXp^VR`mB2vzUu0|a@GT1yk>b@dC(+Os{?T#(bYHkb#;J!aSQ^0J=6bM3qIV4i$P8J zc+z{UYcg)x24Ll?zAxP4ldjzJP8>ZKm?v$|9L6^`Pe0>g82KA~0F*fR*~8zWtW7Bp z=Di*U#4A=-Ww;-Wn_#BKREi%Bpq<+Gyi2asd|4D~)2ZNMm~U}9z!35yM|x=10K=(G zeRlf<8;(nJtNmKS9S<%+4d@>m3z-8PVJHlw(U6=v)wPfBi7~HkfG=*;&aIK_Xj?%MsZwmbMXHEypAGTV=Ojsg;=pPL^bDC4}$Kmybxt zF&~a-KBp@4VRHx?8Ljw)!;#sJFn`?5&^m$a*@@x;Oi7~ zdkmE%!-+OZ^>*=PF*!Gj9`VgO*_PV};Y?(Peo~Pb356a{kk*L}dBniI2#S0{yu2XN z^XzBNM@(;c;HAwcz(prOTIaJWlOF?-Y^SU;&}vTy(9U7Pw$xFFW9x@M>EkX>{8=vb zGnqqqekq(T>uH+`X;tmQZfWXXXs4fI-Q~)((h7b=LLQ@$oOVNdqTv)k? zv*>oYY^wa}j>4G7x_HO0h)wtNNy=N| zsIR^=3>1ng}-z^c@KRe)nnp9c2iwq{WaX% z-2rG{FsLv#w-d1Msacm-0-r`0VnN5MDQ`~50r1oUdt%o}oxKGBJRShOPm%fF;-`B_ zcWPujP#~BCrkg}>U<^dw=*ydbUcCqk&G(*)AF4qq9P+a2`1P9|4e{0IT?s~U|HzZ=6oXAsQ7lUxD@p;`ze|=g_10PR*!|@ zr+SK)!1u>ubxBweir(x6=`_&n7s%b4V`!LAQ1z(KVn6`XUOX`X&WZ!8V zGwsc|V1bk|HdT)w;Z6-d9`TdweX*$0#~LDC`R*7_j9;mYPluD^Kz*Rh$6@pP7z<8N z<5d&ukMoaWwx%%F7m}L|Mv~A0XZZBDPP6qkpKIQG)k!?f-AWIgWhlG8#CRq!5^j8y z-QewK?2qQS1BY%v#uXrsfzem}embTl$z44CiR`Jun-0Uj8hvt)XA~FVO$EE5oquNA z=C@?O@n$bwf-eZJ>Y%@N*s%sXCPwG$B1~P9TQjF&P3Ns@L(6ho?36>5Ya#();(H5{ zgpiH3P7=ksG6lm=SfP6-d>N^d(;xdMKRe~s_O8(l?4cO^0y@b~MUGqVDlXd`Lf^6_ zajaQ#b91A#=_kH|YwLZhPOhtrMJ5ZXr0`K)JsI)!Un4Cq4Gi<^{qaw4E8p5dk&If| zSA6_%KV`#>>$gt-ms?x>gK(jNA1Vk7cmWp;b{d8qpAp)+mAil1-Xs_j=^SW}Z;*F$ zCXqP_j1@lS6BP8)Tk!N^pr($ZgG+wElRZ{e&aE;mG5hkCJ4bcl@^p=E7~Ar21F9vA zf!oz8wF!??oR3M-?~&691#>^(!w+P9vs9&>Rk5d~lhLGtj;lNmy~vLP)fg&l~Dx&3*jse&jaIqhj4 z|NKjg-P{vAU3l}V+K@ey9VobvJQe!@IhXvtVe5tOMP8p;1%ZIksv$WY_+=&>Jet~oy;c=g7y{`=!zz(XUcwG4qu$IC^{~~1=>H}HC?zHubr{hxQ zBU7yJ2i^@oE$4N!-vl1DwA*(MZYX3~XW4d$-l{zVa4NBMd2H9}a(u9iW4;wnYEUva z=4lI=JnaO9N}otqxOQX(MW%EGr~jQMMMgD?yf4(x6A&=ud_^FG2e>#rf-Q}m0}=@%cwA4l3K)u#)TIYV*@ASVfz45>78rRA7TG=#-*48ACgWgGfeO3T z3l{jkY1wv#57a-9KI1zhQJ$-GRqAS>R6kQLqzOQB@aUL{a*BHqOUHHeYD3}YmM;p)$u6&hMC;F5R|2{)z6K9(=(k=x7JFt%Mt<(~ zE|`!oBDt*<^AjXr;Ka%PM~=Xb-&}D#eoo&_4GbY9bNwp&#&qo}Ej*Vy`Do%7OA!km zO+RI><~Br8|5Y4^JrG57V-ZS?!$`Fn%CEvsO159gZ^7H22WaM&D(N(d3ddR-cXIUT zYe{f$8@+>h5KYjgwBe|Ve2@3B^4`b1HyaY~YAP1tQx`B&+iMf&J+qED*6&iN+(>_EU))%{1>b+mX$I8Yx%6)Qh zfiQK%_(Jm#04nHZ*aVcSWHv-x8kq`jF#)6^u9AZb&!i2^BTP#qqGhPb!bmAGCpqc} zOL_f3uU*ttI|l9WR3c?-1BkB99PoW~{VmNn5*&m)R1DEdSZ*@?g5~DY^Zjtj=EQYU zqOH|1O(I&OwMD8j5O?{|Li%0gNpF8h;O2 z;D&gxJGH?(s7`)%ffG*|k2N5?rMZ%$9`SRamg0AgtBmp%R>*gByLi zByLQQOU;U`E!j@6RvY54tl_{X z2{RHqnW1!1#YbbIFP|!l=kpd!eg_ufi*@5Khv7%uB>nB_Jmq%wJxkIMjWC5glj!9H7aWovj@YMI8sFu9j|VYqm+uM#I_CqGV!= zP$Y@5%_CRh_}MSo{eWHG!Vbh8r5Y0Up0{6?luN(|4~ofQz+m9w&nVCc7oBK6A0TIp z8d1B%88C)zDb*__bHlT1NQOQM=|>+-@jQ{UOL;!1pe4`1;MB@>VNlm_cpZ|h^{yxe zfBiu(t{^_fMZg0iW|79Q`n`k66``qYF_)vtZke;R8*)Had>i1;9|0bjJRis-S<(lY zG@Xy)>f5gRLvovUd-STq_kz+W7-O1Bg|Y*6Cowj-!(0*L0fLlCMd}<+Vbt`69Rmu? z&-rDX*0Et+JWA3oseL#(fD0UqZi^HbHkdsBJ2B1OUaB`&Z6l#>G#;s@B)L}Om#-+w z3b#CBctg%AWDL<}!;cB&&Vg?-V`yNfTdvUd!UCHDOjU1yaazGFhl$@9#=b8@zl}mU z=QAwWAp^M&-kv#lIj#dkyt*{8?kITO8xjcQ=?}$%kNPBTe4WJP8yN#4#!1KFPyqCy z@e-Y^`iG*sJ&c!|2Ue5pOyaIQ1wd*Lv8+R~{-H}7`|eO_9pe@R6!~HTbJoPU>;Z3| zwwa#%u5lZI8x_38iK-m^`?%WA^!jd_iuIJC_;+~io?_?(&j$x+;vb0U^f_IL{fmD^ z7=-yc@N3JPR6C)Q*ZdQPA=A6VCyF)`^c&Pa29u2FuSaGL5VNg5ZRg;&q%YDH7MhDL zJrPUH5pHf-=hDpKSM_qLlHS#db^q*k1q8I1`2#eA{|{j*#v1bFaySi4Ne8)yb|BT= zr3`x47Gp5fW@4vLB;|AL$mDhB5SCk9Ec8f0zZmrF2c9`bOOLVNtr+6&_(#oeFCk|& z5(VV2VoWNKA2qtII2gB+-;{$xTDx#{5qsaIh@bP;*9WxtVMLa;kM)}xR~o|9EnWuD zUI%LN*0vNe$3d8xFo0ztxd!n+S|B#zaU9{RxOS;1wNH9L>SdfwC-IT?#|)vXTFyF`5#XzHiF28lr{-4wrb-~*uxqh;>>@TO=Wc= zjFzzjg^T#4D~LKVav3zP(2+?)D1wRKY%Nad1yPb!xOz~n;yhv3IZ`%pX1p0%L<-?Y z%fN)hdH)&m9!9|%dP2B8L-Jk{dSP>m_m~|mkmcByaE7Q#P=zio$4jjiZ%$JP5dEpF zf%z=2t%o>k-_Q*$Jl4AoZ{jg_Bo|o615nW8$n#LG(v2oT(3`hznDTrN6S4mi>&0o{ zkpL!e@;oc15`St@$~cs)P~4s_E|w8cb-u(mBd5X`3EH{0d0p+9oL+lC*WQONbfo@v`tc=Y+X{N0faofmU&cP~yXr+7V;@6~l2R;UW;u|ve& z4p`msnairINXApO2;(gYoeV3$U{L9-(J4W}3q#90FRN^b7n&g=Wk7;!O5G7edrhT9 zk)^_G{b4;*m8pi92&t$$U^Jsv{K&z)-U|#0PyJMip#}p88&@D``2q)WN<~p=9N5Kk zc2bT6J%+3dW?8P5RBO)0uPa!N&gQ{2i9&Bav$gare}G9=kZ285vAPhLR8S<8;x9$3 zDO++`n<)BVeZq^z_yW8YoEfPD_}fzP^ao3(bz^$4_>6?}zEtY`cAJi)L_W>MosKT- zIo;nX`N>G%2>LyG_$`t?8*FwzgIajIvh8DaB1(ktNf8S~JLf(C1!Gz$GBj)viw9XHAxFg`T;J`2#;m21lPnHYeAc0`-(Aq7Sx2{mjIRGGv=1U_Lbr=AeI zj&<`D-BA~j8v~?>eHCBDDFa}NELwDj6tlI}0sHCQ+9n)r#K34-UCtk&cZ!N-^a{PE zVGf)VZNKJ#~D0++IJt~ikO+14cyuH z%36P2WRQSRVG_67g}@6oy*MNRMYIeveFfX$T;tLEAM&n2l`AF&5dykr?lr5`SZ zM!z6d`*@*jek#0zsA#$18|7J5h|+hGePRHT3-O8Fl94mW&0O-ELn)BZ3v;v^J*TNNK_w(>g`{5GtBI2*5)eKKZQGG#p`-($te+$VcGui+pzJz7K3lT8L>H8@R zAJdUT)=)rkl5j+-dHS0oDU&-Mj5J=7=fwqIMQ-=rYPBJUP$p0V~O>c`#y-I^NrUU>%w> zoVQg5Tw=>;!$pokk~XRiqK}lcBZn3+eo|2l7GHS#waNDN^y5;^*}F|DTlgWWxqE42 z2Ls;2?V4YXjnMgQQi>33rk^h-Jd8AokFAbqzg4g=Q4 z7(VaY-Of`}$9&>~uRZUHf(6!p0r#`Y7kczTzM!4Q~lCY;BnNjY|yr=On#) zc6}cX>SaXzL%O|voiOePK89hu9hb&G)WMw9@AfDkDs8aSaT6aTLp9ue@y|4F?ZcXh z3-IkI^E@r<&twAKL5#yN>Bw%E2cdxuZr2B@Wc#8%Ef~9!cC{2HwNi4{hgtv>4gmg8 z2Ov)%piA^{f_)HYd0z)Vca|O}m}>Lf!4Lv>Be02ppcl)_Pa@!ff!^Cc+41`Ig`Y<> zhehG@jV;fKEUlN0i1r4gGVs4m8(<;(^y2>8ror_&)@GkaiqmPuxwqX?Gu5gf>m>a- zG$U5d)Gzm@c_w7ggM&3Z7Au6SI{yFAm0;$l6lMO~{o)u4lNRfZ2Q{4G(vU};KT2S2 z<*$j<4Q}3ks7PH||4^dCuW4qK7L>dESx55iZ+)U!Z&_wx!~x32>sSXvM+WRu z&;n2zm`saH)~n)Hriqo%)x@TuWR}4(@cA#J^!L_h#l(BrLZP2$NfPv9S$PvwZ!u)+ zK-SZeSgEU?1V|@w!qHHdDaWWlLx;Ia(apXez4w!J-0i{htV|D6R4KVGM)H=?f|(*w zy86p*)S_JR+s(m&oJ-(qvsdo(PySq=Zej<9TBX=O%$+@mIPWoJPIFi91Fl4M= znDnh6dgZtvK?wuMURRCoV7SW{z(NbO=YWnZcD84S!PcmAP69}$#Nx$=c*v>b&_VT;a&A&DIb3lcq8SMc;Q! zy-k0C+i;I-4fY0~bJOv1MighSyqPH7deQc4+i5>n@JL{xF2aSAw#Lh&zy096`fs&m z!XTnQ5Y8M8#_*3GF&~p=U#obWcih&tC||d7d1|tU-I8U4?Sgl*Ru2Of~{V@N2 zqoJP(7zVUL96)MsNU}EsN@s~C_>v{t05$NbVAFDc87M9=WJE4| z9UCOi(~pA-O|`==`1ZC!LE0_dmPa2$elrsJ-gzI%B}TU*{^QO1=c~nAzfJ$P17JkB z$lxzt5|?t?9y1^Iq`RwaouPG119J8v9FstD>EM6tF`#-|W{ffeQ<*9@KA-tfC_N(} zLi=Y6ISU-aingX&Ws>`i>X7p5?CAEU-L>77_gtM)(q6Z^t&aBJ+t6W~OgzQj8w|h& z@{TTFZG48MlSa?BCPMNeMObD7>b`?b_w+g=K3zVkikgD7yz*Lx8Bq-_7<*^=?`k&ryadE&~$75xgGwEo#T5=C~B zD7f83ZvV_XyG0n?vH$<3yZGlb|IQ5iz7fz&;fhC-)_s56o{>Q%uh23nx91ukpMn>6 zPz-;?YF88%QY47LhlY-3{l{XHT?+1=m?cWR^b5nRWjeOL_F{QH0wg(xdd$4!Qg}$u zXv7&ZP8Mr;{fC}+`q6U`5jB3c`9mC(0guLVXi@i9W>F{ydAQK~g!XlZ##!q9EN(2& zWXYedY=$SY7q}fDAvZ&5wvp$tz>BSF5~NIKETKO{8ru#e4KYsUW`pQPG>rHIT<62r zTl;!HCA|N;BFWk5kDHQBeP*XN_fd7_Vq^M1%yvZG`whMEE6|jr17*j^19xzmwd6v; zzp))3$MT@eo_va!2wnZCKXvzuNwvYue#VM{^h+#7&{%V>@qhw!ZpPsnwZ1Z$Ir^4} zM0$<8Tl7mZ@tcC;40B9FJ@85kVZ=gd1{@K#a-Cbjx<{(=593mOdjE*?S;z#-}QvBglYbI-OpBD88vA z|JlyAIlu8PCSqhmyu3T+C_$$me%d@XPv~*IRpE{q9niN%wR}TBa9KyJ#>S)ct-QFv9ZL+#O=v-9KG$?kxX3Hai{#_LHE8Wl<4? zaS0^+2{&^}XsUtk1A~VHOGE{a_#)|oDD~tZ@86SCJV$oA`~SMQSJ+ybQw;W=Tx<+L zwgn$9IS!k&r7NuI9GDO0U@TAaSkF-By}eu|&od*zm?O>m_|ZbeP0K z^$esR*z&wy3zAY_S@`2+CICg(t)gk*+Xy*tW$&Hny+dnIcVGWw4>19vxJxXZQj|o8 z6ccdtQVWUW>X9n=7bFcMxhzSPYWi)USaZD;xbUW(Vf2_7(vq0+L!YNPB=b0Uwc^wJ z4ehfR$`xOJbh_SndOSj?gy8sci3|@dx2=wc=aS^_bR`j7RDF9^pfG&XOC9!GB~Y9X zIz<+~4d17Mk42?Z-8~3E76iZX+PkEkC{S)LK%b4!FELEbrF#ipXh9s>$Y|gQA`?*RJ8YDg0OLuUxSTtrlaU4cVtC4sNfFUPQ76{FXZYh1GjR&VdrGN+rdmu6*UDJ?EC3igs%qZ&a1t~zv^wVQl41y zxv^pX`gcoZR-fmiG9F$t0y~Frzg`6r`hVZ5`O(EJ+0h6ZANHq7ttSty5{Q1P=bM9<+khgug`}0oDJG@<@)h|@}1HW$y z%vMVdb)hIc?{-=UJ>_?piZ$;2eqfmnoRes6$P;Ou-~uVmnEf^nco34Z6umW!ax5k} z4YM&-*DAr1$i0JF;(o+JNg@M`o+$Tyw5Hl;~xy?!rkV+OB7CCMZ%> zh?US)DN(!@!kmcBh_@IW|CAMtVrq9$g11V=UU*7nRL$Bj@Arw&_nnu`wZ5~oQ%-d+ z=226khChY+o(i=SANJAKyIGb;UoL$VSBE>T5P+VBI!?;w-u5^Ix(_rK92S~K6xR|D z2BTRK62s-G^buXLZR;2P_KL@**~lu}V9cXVeF8~F5=u|;mHiv7E6qemyE{camd!x1 z5k`o3Lwk#di1NH&>;<|cDhG@cNI8Nm?fEhSB&iOq+XEozk4C?{JTl3c<)@WMXjjo+ zwe)td>bdSi!rD3mxVZL<3p`iem@wRLD-H$ieGGUfipr_|gnNtedb?L}`FNm>NXqRv zf}{`q-D{B02v>*_|06f5ig!WsMm%@>;3Hl)W?Z&6W|y0qylZsxd+e(Y>7>cG=t_!z zDlWUDAn3_?((Sscf*QLva0M4w$+jpP-=%yk=ioG6xP2Ib;H7KZU+SEnsj+g957>Y6 z;`>*pG7@^J-!~U~KwVE}y302&*t6wb3+4o%*ZDt!J3HSa!3P^zlY7^hoz>H$-02t7 zyH#SF@`#I(?EjaGc^{dB(s0J_vJ#=V=&eRR=(W_ZyY`p6<`OSELZ(O5%tB-rJKAco z+nbx0s;b1szD-Cce%aY5%GFXO+`oA8^`)6b+%N3;CTAY&yNfP*e?t7A9j=_?V%+c3 z=tCSL%-rqr3w-`hEb1Wq<-Xf{p#sMtmfoJ+FO^LNYXUW>K%GxS#}j5C4wU+jutjTY zD8i0>Dil5&IW(Wj@U}qt{=pv3=*E|UITDwu`_6zabjG1o9u}P*lYA>a4FC$8SXIrX zlLozC5pS7nYPW(dYg1FgOkGo=@p=T2G7MJNVq}@`UX`L36F&upZ+fh#2U?rO4^=(M z-U`1vm!fmO>m4zQ!TN%{^GQK3w;76rdDQBffx-_uhk|ElYMuHXzfrU%*ccs?b=m&~ z<+7lK!i1#oqzuFE(JgZqz-~kOPhIq`N>P3f$(|{q3QTLMNS8`-hAJ{$n{^lEhO8jw z7PPJSQq=J4W|$S>>`9SO|C$*NCgKF!@b1hUsIjj#!^$J~UT7_~(rI#o6r)~u$I5+{ zsF_hBORP>zVPXl)#+lhrZ>_l9Mc2dEh@|1(q`$pkI4=+AOHgx?6-kZKe$&x1v#`54 z%ET4b5a)fNerLmrN0~ZT>tlh@u4g3X9kMb66&zjSPb3R8`(DgHE5Qtu9RIVKea`XQ;jf8t_hSJS2wd7Z4ZpG67R zH}K!TbD~1JDH*`6Ugfj)5u&uT@Hf(EZPHn?)ytPTqkOk5w}04#zPCRk=cs~H{ZQ?g0w=|1oZO369{pnc9EJ89s8EePIdlAc zRl4+NQJ6{F2EVDHu)9Z@`opn!7l}vD&mi-ge^(_xF~R(E4}I>z1HSTGaYY|?E&k4Qo2VZ)p~;v`j@`If+aavUXkfVk3T8nKh0pMF#~fNw<9y9tl2Zi=Cf$ zOp1{%@m-AOT$P{n7Y8CW5bs((hj)JMX*}B8@LHlUFgVC)z)@)F-&ws08|F&GP~vfq27TZd>)7CaHyC=xD^d!clrFC9U?wOl}PUTT@0+-_5US6CZDWYtEOoN|AUh*LA0pxQ86IbzLl5FH6J%!||!% zcgL%BRe6QPJ$Bs2Mm{y!|cA;nF! zuWy1gPgcZ%swo}+1}pC+?7`Fzka3qXs!~{rQ4Z0+f#5veQnn%;oP~lN0lRvr-+}3F zGWf80Mwy;V;qLB(XlnC)SH6rDhE@kQDjZE?TkXF4NZUB~SM!?H&tm-jW!P3Q!^6XY z?c|fOcv6j9pA)1P?sBD!5jG`@8PbW&Vp2CnL7x8Hk&Jkz05xD9uvA4Ls;~BQ_Fqd-e^77xA;*i_$pRa|4 zatJ^JZUmrfCOA26tyt-9cSpa)h$p(PRGxScfZoBI z3=K()Bt##Rn>Z@niMIA`*taS8z2>D|g~GasXyAuSDt|S4qOba*UxVxhmufvy!%6lh z7nO#>&Y8H*Wy8L-NHZ(_PzqRjDj;pJRVBEWi597S(H%6Td2u-_)#ANYU-%|y{)5`S z3-s}R4bgGdGT&8I_t;>mF3;MRFYm_AJ=yPgJxKON^?slj%#7koEG-(FdUUSQ0`oCt z*k;&=J9fu+*LRdd@SIeZYr%_ zwgF#YcN90B^S3UbsL{Zq!V|=mW46S%2?L5|W3DAcF5OLvwW7o1FPx7=Z=$yPrqO)A z_ng;&4xwa)k9?S^dX&S@4BL;V%G?lT$4R#&OL-H@-gI2pr-Af&q9_Og?bLu~OkYi5 zRE8~i^_qC}_9}&Fu5;)e8h#RBcn$J-^ke>HT{YMHr0W)o>k=I{^EO29)y_vkq-;=Y z=zXomvmoIUI=j_*;hwciC=DX=#+hLGjGYf^16f=`lehp;44FzdQ>X6-${>7!YuiDn z$C-46)mL922*2>~pvutgrD!8az;lnMc+Po|0bNMzFe;0<*>^M2qv>Wis6XGZ8A%>) z&^;mAXPSYP3y^#Y@!C4&SG9}_MozPjr*?-sp#9q!5Yxi$gHKIolb@7}57|&`GIGIR z=>2H9RN8kM5_5kGcM-QXan_x%cG+5O-%>y-#VL7>735zK=|;HN)|)l-`b$d{zJM-$ zt~Ew3Nrm)30K4z6@ir3)8;1F;3$oL$UOQC%eyW1|-%l^WJ4}66JdkywRbXa)UIAqOaStqP69b(;Sm%rHVtLYLm2JrJ7YA>JPTh>pGl6lqJ^Jj39TY4}) zIEU3hCcnwPBoD1oqq(#2Zk5UWWc28V4RoKbninvdxB8C zdBXG6Oi#6lW`KK9o%gM;pc8b+wx^R4v~$Cwkm&I4=HV`)XQk3I?&R%qt~Qe=ERQiirLY$zET^-}Jp%{=&4d`#F z?}VJG41t88;HmaH^NtxNOtb0p-f8XB`4tSj?)TE)$T z9-beDMf9`a)`>Ucu5#F^!?=PBX{X%g`q^uIAmlrZV|xhw;p_18E8BL0OR#2zDwgO2 z^dWE50Y?SP+t|X%VvIn%``Hm(KJguV$Q?pGmss60H@Pf!y0S4}BYg9RgAHY#%HZw+ z8DPp%eN!*&_?gg+;Ah#c@@8E_wTj*ax&==%0o}D|x4j01TgGg}AK+0HnQd!U9DZtic zOP1i4(!-qQ03G@>KH&b$R_6Rrha~h@`XYHK-%Xi@@_|Jhiql`VrF@G;_V8u1;QUB0 ziTr+vv@+S~({5UpM)P`^=~WAyoXInUQrmAOAKHbRuLf>{k)hW~$NoxO8_CaD4e}2) zZf8xox`HHN%tv3xl9dGYV9?Y)L_dZLI@o4y(wm;A{%(m_nP577gKGr54e z@4Gkj|N^hUYOLWr{Sa$ zkH*qskPmacrx|q~ie$1l3vGSzX+oC;JP?LjW$@79sSm~?5BEEMEKi06e0;b|m9p$A z)j^+^JWS#?BcNd$TNG-6Ee^UzPi2`UaX8-R)*$Ac|;DGl%n_eQ=5mc99M#kpp)Jw zzbJbWYTKbkm-_8MO^2F|^*7-vcdUcBJGywWs~BG3lC$F#+tWAYiij9EnJ=oT9bL^!3>^*??u)+S zaI=g{-JPGeyKkFiP4HiBtw{4gdL$AZn{PGg;74nD^tS&cLx?pk1~?~#a|rZyA7APF z(+Ii-6+KFcj=V>6Gub-nG9)0OEN>oK*;o=CJ=OeUyc?Q|AP|PVysK|Cf`=ZFHWE_G z5B2s<1EIL%^Qnrslm>||y;{Nhn&+4n9&HAEZ??9HdHxsT>z6kS9cQdEKk$j^w&{DQ ze6CFgWvJmH1U}TFGOv89m&;!4CT*q2g`i zWYa=l?My)});1j)G*3PRve!J5MhQX}bwyj{?d&_HKgoE#VI|Huhm`_Jz&-!c!l?8Kx!Ch=qL=9vAm-Z#78ds(Xw<_J(Y{!Dco%rzVD3&4 zAJ4m}{kqa;dR&7aRBp}&3AY8?bK|(r@;_>CzfRJ zX$qkSJsw}s4{tImEX~4Os|c*0^CA&vK=e5A7gj0&RKh6 ztGxY+=c3c|aM=<)XKwv1=axkPE{*l;Snoj|>FA)eHoariE}uEo{eF~l*+_FRVsvj` zy<@g!KGLbBF~&c>q^G$RBVMjJkN^p>f2Bq*vXrJ*B7KMTA$~~B>H|4GL1)e%e9>d+ zM*_FM42Q}NS@U~k{SArl&cHdOQMP%SSWgK;>ZW^d*=Sesps>wGd-|J9yRcJYt8XwF zOO|?1uui@xHjBVfZf;V5u8_kT?{__(i?Xwq$6^a0#PPIQFu)xw?Hx}tG-~bXDMvf= zO*w)h)@6E=;fM3#?!m=fcXJblskgOOp-8l^rZ8Y!x8rofTp(MMgDLjq+9&J2o`X|b zL{>>N)2{m)N)*Z%snPpU!Kyt^P3O4aGw@W8zQ+5BK$inYEq_y$%V99t88)1WAaP39 zc6>>*oOJT$F8f7Sot(zNDQyu9<4lk6rj)UPAEFHB1)c07M}BmHGmO7=a2$uNH2MM$ z-RdmS;jJwmQI8tNJ4v`IQob~L6n1JQZDU24B4JPeyGNe8 z0b}PFd)~M5OKvjGNy>K5{j}W>mtW#6VhO;~Hj(`WqF9q&_T64s_ULU)0fzLcMFtDL znvS>qDzb!wZXYx+n7T;mk;m;ez9M;MDjOer zka-XwrC^L%rR;(#EAnV5`h#(mN~OS*^)v=r;V=Oiyez+F29I6$)EI)LFBf&+Z;5Dq zLSGH9E$4?rw`|G@*En=Oj48z;+FgSK`iigiCSfkP-8&XjQ*}Ch{i8@riGfpz%w!;P zG6PS_GR_X0v%5);bP_xG<~eGCKGy##IF_;u8QFCDCp?tEEgJFd0ceK`-sBmN{HwNW zJKrrZD3*j3LRfdrYMC%>7Q*fCuvSBK=2P%Vj(F*vdhrDR6qAA5r%Dk^?~QiRYlXP; z&T!(cya&X$^V!Av2ZJTN&!IA9rPw1$W`vS;Z+Af5fd1RE|^~cX5 zDG=KYM|0{U64&-NJ}4S#IhRmy2A;E@ylnbU4(V#M*A%YOdXbt{n{?fk=}Yr!V^liJ zb?4c?yz=#5e$p#dv3cArv1W6 zr5Ppi1>!|KNmBKz;^b3t&wRaGU&P>}3c8Cj1wR$PQ8u!*sbcE;;O?S0Cb)a=#3aH$ zLt17eqz@bDc?ZB7JHNMbbqT={Nm2&j6l?UQ9pRb;Cl!^EcdfnoFxFEtD|HBU%5rU- zZALb`?Lda!wx-lQ?V7jIR-@;6jp>RAZT*liFU-|s>!XF1FGS@EER(hR%S;!34R~~? zt!nwI2+oK@>q|R9fTaoIya(WKY}qackO<0(ZvUhxe5wS*xj{f&c8g3jB#Gk#SXgW3 z{z2b}@QSBH7~+AAa{ z>En-UOvr~iET)7G+ELwyX69ra-0uXv)JjuLld1tNciAkW-XEYy{G zL?#@Gzbz5-BcPJdc}8D(o+;8?kn+)k87(w}0Mu&rCJ7>fi!-QUcQUb}=2C0zK@2at zWdeoKv+!H>pd0Zn&rU4q02_+^jOo(Klz#hAWs1WDglK4MDAplwAlW7ZVJ)mY*HxZr z`+mO0!Gx76>SSrTp&s`QEl1&YfmrK|j(WGK^K({{nV)H&aenfqGEEkzevTT}7Iu&R zNC6Zx-D#bO?O{C1xwKinc5Q_oJhxs+S7~R1l@Bd{w%mOl{vskFl6x7wj$M=U_O++n z^M%#0gT&yVoA(SkSeIX7OPCER8<|6o2o<<1{|9w%0TpH2wGB&`bO}gzBMOp&C?X+U z(v8w3T|+l0rP7U3L&p$934*l1&@Bo>Hwb(eD)>D2efzBUeZTKt>;2bav1YEBxz5`A z*n1y)pXW8}elu*~Nqxs{l=Lf1 zR7+4Cd7448Ef3v^@#0#r?|l6ne5^fV|8`4$aLC)UeIV87U}CGg9wYX-cW+g;&pj4LB0{6~IU{-fkla)$$3z!qfY*Y183BDd7{lzos89J1)%HI0_KRw zuUU#sg|<1l>^PQ)d?#_g67VZd4BOY9%roSL7zz$H;uQCoqfWtYV~bGa84j-A3f`?A z&1fQvaAE5^LE4@U@$j0fqvWUFATFPKw^+I6sdv2Cf}Er9>4qV-$Eo3(7TFLMR8B7S z`xv3D_1)>MqmenE8Dpg>$b??~#z3YUeG`gj2JUhD10GN|o28hV8NI#2;QZ?uEJIKq zGgs4Ykx&_%`2gS@-ObJrxDt7QwA>y;50uB znN}RU-EVz5Ijp@V?OOP@+CI;E&Jo1Z;7cjTbhLB?9`9fX+^?srWC72;G$Rr9aOLh- zG{~|_TsS>yn)9NYU_ePM@R3y@a%nbJ(ME9waP)xzNEDj{=IPN#1v|ZmQ(`98mOUsn z7SvqDm_4!iadRJdCeY2GI&=Njk67@mgjUB9Ezi~77U7u%*AX|u{v!%riIfo|>DY5o zid6L3E!r>+&LfMwaE$(Gs@S{a<8S#CUVw zJi5W6TB5X(p9j|Z9nGjeALm>kSAx-rFvhBNRuM_+HJM_>U2zN##KMO44)`wV7HFnT z_RwWFedRG%TZ7Y}Pij!$x`tVAxd#7QP=Eg_X7FB}3*|;px9etfOJCeu zEc=8w>o;K_eevh^%&1-bLvKG&D^ge1q-iRMDv=fnjSN^!D-^0EFB}=(NS_Xxv2nD< z*htT^rK;G8w%2IguRvZa-1Brj(Hdjw`bl_iW+shS`saDChZ?6I_SDt}qBUn+ls?y) z_HLq(#&xGByMso$sK`l$6mhv`R`vgov5F{@8?Vb}7 zakqr38m#b7*U1KxKHop)HnGHxm=%+!i~{9M%CQUw?|+*j=j*28yk499I3H{4Ilb|! zcCx0 zv$RO54DyP>vgZ*Z(!(6NUs^+d+M|p?^1#kkBkNT0xhxZ8Q$!1k%4A{PM?*sXHNY2q5oYhj_%FzdzIrKGjxXwFjR**B&s=h_Hkw-j?TVnSz>h(i=|geG>Jckfah z7G~>w>&kE2%cx1~`ykxr(_o)@7a>dZ%^}xmqPeo=?N$?k_~%JYaw41&E>s=gzH4Px z>e!4uul-!7f{JzE4#ALZu*=k|@->3_^q+fSb+d?uyJXKQOUE1BLYI3#=&(s_$t+;S z6ux`%z{gXe(c4o@c%lFP{y&+>rG_?v=TBKr6y-jjeU>(c7jN|^s>B8uSIQKP4&zt{4*n{!8j_W1tkH-`f$-C>056Ffc0lP7xgE0$Rs zwI9FNw0*J@0Z!t8_q^s%iFA<+oM!-Aj&do4PSrp%Ot0I|rql9!E}~BoTHS0XWqDCd z0&CaMx+=Az>mNqWhOV!WKejvL>ldmxXMHn^Nc6ZO5J>(U_LW=v`(Rsnww-NnKi}W< z2e zT9H^5m~$S#)jfcfeZFQyL^VAt`$m_iYI@qA&$u>s(!8{HN9R+W?IIfWSsDWAyjb+C z#AF{R`AhT4sZ=yag;b)ozlVdS)usv2o7Z?)ZQp5UzTS&>UMi(uTf5;)i5gzU5(eg7 zbUYPXZV5cJu$=dm6!Jv2H1OeM1~ORW&E&AO+ErrhaFEt$uY6GZn1%sJ*NUr+5|;!Z zApFmE7nLDYnBs@9ox(1iBdrCLeV7f8xRc0;ln>U3Rug5{GV@6D3Ik=3aZ9rX*K2rD z!DGv4a4C`kD6(0P4Sar$G+Xsc8wpKt9`xR=OhgL53QdEV1B=xhkv#$=#SPZ@QMK~~; zTd*FV7bm9V^e7$mm&aJawgFgMp9e7?Q~C!=Jpi2=7vhWIkMJn1PxNn_)Ix1? z5RmF!ppk2*c4I=07#u;FRmQop>Q*kWsBhY)8zL638&E8tI5c#Cos%SQ>Jor(2-DI0 zJM<==RiBxcPN8B6l|0G4Lpb-mbZVYzH5}t1=>vOSLshYpG&GVNBDrt+*v$kC(LQjK zKDJi_t%w^L=N!3+C&V7Px+u`;3JlMiG_Mt!hT{Bsky6ZOF{DuuIJ^K6Qa43Cq=}Q_uVLSqF`wDbUSp;(T0@e04 z>WH$~t-J$J#f*lKu(L8gxh+>@IFIAE8WL+JzO|-Y3ctt6CKmGybh^x(Fn&Olu8ieR zbT+f#z9PWs4;gWoW(?po%}hURV#M|lx!ym$vlTl#7_BZC5huEtH*xFnJ`Iszwc(Q< z4?(;qk~rLlKE0l-q-Jl^FbY1YeAugYG;DYJny!_EOB$yS>YhiN>m8%WmZ+eg%{+A3 zcw@ML=B$HJiBsIUY-nBM7Je7jMBCStV4NaQ80!@ySNx2#Obv#Q69H0IK{%ZK&ga~h%(s8HCMQs#{nG(HnzzB0$3Co4lN3DN z`-Xq=7`ivjd@j&U^|T=?=aAB#R^Bq_yq#u>IyDG!YM5BX2-4$pe%MzX8hpqhmpj8l zwX!L^Ol7tq=;oO;0ZQaWnO=3W^(N%_+~m!Ss$FYCJ@nz6S%KW-JlMUY^>m}GJMPh0 zZ@i&IC&>v#0;VhK!BDyCB2-A&+Ixtfg3XbK$K{TQst&TCLK^R+qgXxZiv#VrC3bkV zX7uys;QMad5Er@Xg%8&^N9wGSG%M)Yz&_WPCaS5QCM&P=STEWOjCb0Wyk6%d#Y;q- zkPf*yH(1`R=NCBE1h)Aqmd*f^ADxBs^G+pwM_58G*vV)U!!MKfa|w>cp-cuI2K)jA zpXD0DXiibU;6?_i2CT?&^g(IBS=<3D(g`0a%*>5EHh@c%sL!Sl+Y_<^CptPrw zZvx(ovTDtwW4K_#HX&CyX#Jf>E=~ks2nOt;MX>uaTTw@HlJx2*Kd`!{4gruJu4pi` zu3@m?cG@Z%U7<2JWz1TiF0wsHezNd=Pm@CQ;maN)$4nZgcs)^5ylKIpaeGbuIt65n zZl$mXww&R!E5`$*8uKYe!Be$}8(&^?EZ;XrJmX(0L1Y>1Yb`@HZ(cdhmTrx(Ofp~9 z_AA@eJ^qrH0F&Y&`}~!68l?)mq)&QAA!Xgi;3-x#s!M}E32coW%!-;cKLnObYjDl# zc{nA~UDbWkIVCHvzMbyr>T*akUE1rXd~l;;;+`UJPrLhgp3kz_A%9!L2i^>vneZIT zM!VjJeJPEybX_D(`w51wPP8T~i|8 zH7E4xtwU_G%2Zh5q`ymk$-K55?P$`M9pC5lQ_+1lOwV^dv80-m$mf*n>&+Dkii-Bey-O3qtd+Ui!YlU~s?kGyFaC(A z5m>?DCB2wTr9`l`tS{Zd#4y_+l>Itilzkw)oX%}+;2-VrvL9CWKwPI3?EknoUezLD zMtX1i$V(rM{hQYHo`Pm^nc*cxu12iUSHCWQ#C#f)mbAG=W44W5@Ed@_0JI?%GIE za}MN|ZtFp>Qs0{-M2!^gDWW?}X%(Xg;*2!pQS;we-w?8~ZUKE3=HWRIiSt4e*nEW( zeE*#bV1XtJdu!_Hf=M%Y9tT4?JO&fj22E|!x1@!fTEH0=Vc?#rWo$Ly6GlLe3_L`6G`Fdm~+HR&^xS1g_UM>CH#HdLfOdw4|&&%ETE5;{Zk4^iM6vwGqb z9R@A-OEA*8{jxY>p-{W2@a9e$B*?4B%mdp}dNAy{=u{Q|+TI5*vcy=!;}*^}be18_ z#EuprpsJwR$Wd*TyLw;NA1Z%+R}9f~y9wB+Uz<+Z4nzT#1dh*ET~*mMkO!V(F#$6;;MBA%7)V>TK=HKe zLQBgi)M%ZLJkW|0e>$@EtRg8*8?@JWP_aAZwBXUM)M`yWf*0lC5TW1el^=3sH{4@B zVxc*(eN>GRW+ji9g&DBAjnftYx}_K<2VHD3-i#8$?~~atzs`<-uEzpROt+oC{V_1Z zRqV7M!yROq;U4`aiP*-+Qm$6IL-E|-idf|lB$ ze5X&mxvi@Gz^pORwqqyEVZfAs0xDk7GpD$#&MeE+jE=6hn^-L-l&9Z&qU>9XChyt%a!1Hnx|82X?Z@_lM;;#m^d%h zR1S&tIr@|hF}=4eOaR2Op9h~fpZj5RZeUU#c}dMXtgUv>k<|G-9G#L&I^}430z(r! zTVc}nx25BqKOk%n98h_pUyl~u<}sqV+iY&=$2k+IvOdzIK6mnk`CM;>Q)50SSnPIx zabL1Pb`I|B5l~jJQFNb<#_hd}O2u1k86tC(#j6g?TH*ylnX=f)d1sxow- zf-<8mtSR+r$J7()yXAoUuKl}ZgwifMxlxDlEVAS0!k)=t!{30gB$|~nP^a)PxiVK4@gk> zQJb|MacAVzY_xV%Kc#+)6mD6y@!=XRk?o=HQiSbUI_^EKBSSF+OiDJ#x(BmTUi8N? zl{a=ZQ4&X0X?9*yuU6i;MjeLhQn$ys+u=b!vaAz)|A_B~-7F2esl)p_U3};xOj#$x zu1MH7oeTttFlBwaI+iOhHMmXuGPYFDoUss0Ch)BB+G?!#aRMov zRSPArmF0S)_tFy;_~ge2Wo%$wPix^v26sg`3;oLcNIF!nE8g#^`x<<<#^Wk8OJhbG zPfQux$WwX*pIt;}11%n2o&~TEabaN)qmPDRw0b(a6$4eP};__Ix zB%v?0<`CR5yUnQ@QVe~?Yt;|cSdVsWNqfIo zM&Xkd#=|225&gC%(u=~B@HACR9BFfWwZ;*G_CH(#y9%;`M~!F0Zc7o%B>4iic$8az zTKmV&5-*qetkcE<1@JdJ{AC}vWhvpNiBswgIe(D4P`wgF7D&U-Ukgve?^dwvl+fZl zdQ)!0`*Cs6h?uxbYlu5>UmL^qMSzS$HN^(&NT|l$`shI=vgic1v_|V<&cL$vz)(dT z@6*BO&JLiWMxC>qM(pW{Lh6x7h>O#Cw$`;;F!9j)mnWRqcN;ve$zo(Prv%d8!)ohLwcccR4tk>UZFyhDR@(>Q6LXGn$prB-PK1@DfjX zW=s!nGUz)4hxwIoue&|Hz7(SU=%9Cb4uO4?7_v-sGOO%<_dqA4F8~XmI?6#jhaBMN zk$vWT{+16S)Dpaxw^OA$H>1#y5^V8=Ckq&1H;peg5mK2`bhM2IEF<6V}WE^w$b?wi!4v(<4@5Cg$%11A=oVI;zY z;3p`61{nYtcs^pOC&P&1d?x~Hj(?4=5Y%w1l^iD3%%?3_cptKZ=5Z%1j1duy(9S>a zrrmO|gA`IcyN?@CQM+hUh>wj$Oe(*oZ7OyM-Qo``K6aFbF=iKCI9qW72zlAgVVO}x ziC%}On$_NC`-$)B756I+R%An#Acm>d*w3=6#Gp7cvE{fF*Odj|g+O*? z5}F9Mmb~ys%puHM!*E0t-89K#d{eet@693@I|xzPtf~O;A@WR9Swl9?Oz7C}8X?t&+{nepQD=0nBrp#z+Hr6$tJ z2DhO^s0X*7@B+n*dp1zYUo+fK4?GKP8>IF8!b=tZNHA+mCyxcUG9K{GmyTXe!ZB)K6_h)(IV2#qW8;uojBJr@p;`eJuL*3WUjTa^7Hs@w>_yJAE+5!e_Q?G z9{-N%?ZQOZygt7Am{^V7E8SbhcSaQA5ULi6eysW-Mn7cJvAcR~%;)V=%GqG3F}(Fq zBe1gg-GPO|M@fyca&?=^8If9m8~y%P-KAX)dzxGC7`keO-eACT;i5-eBZlb*AWi@R z!1O-229Nu-V%Osz1<~pG=)fjBO8yTwkz;mU@4GFC$p0AeN0!&l7bE7@EEb6VStZAd znM-*AMpy~#y$e4o9Fimap8u$l^IKZ^YOfAZZ$`=n!7o`;c5WsY7PBR8SEjHnbqkL| z?`eXjBKW}3)mgwECcB~n=U1Oi@xSgP?>B3#ZKP^hhd|)V^!z{P9+n z)g+X>fFsxiT{#m`$eMNWgP~i!+W}QlgGJKmCP;Gsu*_-%748z+rIKtvS<5K|F7%(Or~PuZ?38k}z` zYZ_4VwLqAMO4Qz29<%@2DU5*VDZg9HQa#=k1|_Bp`w<-d5WX{v#cZfv@Yn=ojzlq| z0;3afU*~Gm=nix}ff7AH9;b!QeQr;39vf@&9KTSy-hJc=t`n z>83YJ(Yu;ytbh}%BLUIB56#tH_ z1=Q#mg(V#&I@5Y%5X0g%wF0@-Fn^&6o`i2S1}pf%?AMB4A!CU6^u?4Upg2#j(!RtP zQ5|@aefy1cc1S3m1uX}d`)tQ*VphYz7`opWoopZ#`3l9Ej!jyEh!}w_^G7N5TgQ-S>5N2xi2uVKJsG-BAfC2&s( z_Ss~aqoe`uIT$L>3a&-1g4pk*Kn8z$jx5B8YG7z_EQ<3x)x0hMCIBg;K@7d{A2SQ` zECYcLPQDAaJs_PYHNb{*f&lg{5b@;efEOP|x>p2%D`@zzO7paywri`ONHxYzosg!H z`6_3gjYpQaYoCE6+Eog(owW%jXRJD|7gfrdM^xOEziDKQ_Sse4k8`w={zrp$cnB+1 zB(~G0O+KEXqP129qR|)Jh?Pm;qnAlZ1O+Q(uW$*Jh zSP4LHQ}D%vb4!4{Jj6j$^Ab8Fuq_Qghl|WMh>jVmA^g;U)QwTb^H-vqg$_~KfKn`X zTR^OBV2Ncb-FRXPtQH0aWQT#HkyS2pGf?uUdghDSu@|!!1G=W`*Hl&7yzXJzaJmNY zv}_y&2Lg%nWb@`CflK7MxbMmp(u8{D1)b8DGX;!AQiu@nLsG8khWLEzJ#wZP$+fko z1-4#kbAU^^M4POMeyRg4TSyHCyH8Pl_uq}zgz9vxXBDCE4^ibm4sJ(B!#B&hJaxqrC_FBap-A81vjFuxWQ=@ z(@D!GxBb4_Rb@SESN{dQvolkbwH{af_wNSDVY-T(UCI~sAS~<}HDyEBoP^HIsHB=L zf%>mqGhy+7L)ot7h!UfX34azdY8*L@_NBP0AyV=EmNgYfUDmu;pzYCKfqY88J|F|h z7yvC#pzgUkcf7sCH(`Z6Ic_sHZXi2b)r2vP<2o?_aiuP7<*TX6!=}aAdRI`sv9in}oSa^2`&N@zAH|CC@gZn^{5Ics4GYoy~@wXcasy{-L_ z`ru&+1b(8^jn0fce#2)e*hOngMJ^4|tIya0@O5GZrD#r^;mbW71KXRsW59VQf~Bj6 zO1Vz)TUU!PH?-ae#e{jdKz?6 z@x~?p9MbM#I7p%5&B9K6(JdBhM9-#JL`cP|X3noT6vTj!u4do}{m6>thgi~a{1lr~ z7m53INED$$I^e&V+sC@zXG{m?vzh@KZLaX31%J$GbFEHg{HKv>d+6)VomV8^M;XS- z%y?&NI5i;JAO1GK!!3Db71XE4786I2mpc-%ZniaU>v!`B}U_|k&R3G8_!tqO4@^P2oop+0^(LUH7x4JQ&O*Ri6qWxVuFNb`JZlY}9B8JIUdO23{4D`n!U6w(~*U`W%fCm}MErcnChbN)DZh6n(RFbeQOFX4$%9yqjV-<<_a0rdP^u2RsTn#Ml3jCY;& zPnZu}chUd`;WGMGqxKgLrdy%|4j$l{j;g9>IL4zmXN0ng{c-*tQz5W{Jv8F9txYAj z17aUu4VY01=u8y{ShkqbghTNOF$Z`n?Pmb;w^h98o%jKhA_|Ig3K47yf}g+p>o$rs zh^A;bH=pf8LTzkpdcX3I``jx);m=!;9B_LSsX>EB*~Ro8u1j3F}%3}InKd_x47VsYX}mm)-?1b~OU>I(+h z8&3_G3+^#Q{!wzAP%aiNzBSM_egwdqvo4iiH>sllEGtAi(j~nf5Q=LFnZM)tzpR>g zD8WzT^w3hH{gMM*1cX}H?Zqqf(3%LhN}UkqOfH6iKlg$Wp%!mL=D17yvh|CcDnvg; zimQ|ZOrNFv@Q+f*@)WTQ$~E?0t@)SQ(!Bf^{ZTLhF{qva`jaA-AR~P_KYueQm)`xW z%l5-!#erkc_7n~70{D~6N2O%Z7wBq=%g(-#`ky4%g%Jo~U5eb|FYi+q1l-5!se#KA zQ(_?vASirWvwZ~-e_d42B7Q$P>|+6;G{Y~+&AEuPl#8e26P{qgT8xvKrVh4hpq_xKRa~gzhjF2@ranjiDV9NC2AG5ArSQcP&dUoWL4fjiW?qC z4%12r|4IcxHGptYl8zBP)&QIj#Zb0Ci-*&z9~P>+sQCRbA|zINW%&vr>F{J|06+Z` z1HdC>h{&$^T`2T__N}2nS!bYY3LuuB16n@3_HX)s87FWV>u~~yyQpNah5k`z7axmr zwK*c^rG;MN!QzFvOa{M*rv8Ws@%UUW^OI%2ahn99H-XAJ`YwqJ~D-B@x+>sfF&Od*#th_9dQot^KY=cRN<36Cn zm{yHF54VIU`Sj#wh2pJL#~^Had6=We#;joNr_U7rh>LaX2snO1{u>|{c*Z|FWk#;fTFyV*EUHSRS;$FlRSXi=uo5lC{qn^1$!*v zM)GMKwTyqZnIYn57Ym2Q)wU|36XL?32P9R_Dq^YnxGn!7@;~tY5tQ4=pY8q#%FlS) z|4t$_pDLp8(+3d)M`6-@7!*ms+8Qj>i2n}}kVN`zOgQ+~7lSi^42Zk?_k2Xyj=+yy z`^;Ge;}^U3>kVq>|DM{2qB2)A2xbIwIXB)63MxQWEo$+&Q!v&39Qn~mAv(~e(hHo* zT}llN^M&slP~`FbF$rucD^*SAkLcO?DE&4GY;16t@Ap?aA6~esbtzA(JT}hczJ1fd z09*%10Go>BZhj!c4dM2LoD?Xa5q!_(0D--saIdy(HY9C!M{fa2U+AHEVi94}fZ zg*+-Ism6lwuf*S93{5{RqCGpf95^@w<%zpUQ{jJ&^!vN&qZ~XShDF>^=V1l=-;mwW zB!T5TEn$oQqog_oh?^6^42?fEe~+c&X0iP*k$&xN$v;TSSS6mdAN#7oI+UWl0L>x` zzzYv=j$>+ftcFIslvZ!OlT6WnA_9>yOtg@MZ%B1q$va&P_l35Hn_%S9W5EduRf+%X zRSP}7(00mCG6TQ037QR6r$=X0R2fDhfmO7t|0l!Ti~OFgtEjjvQ8R)$E@=#HU3}~vTKtJ%$gkv-S5e2kYz(Mrt zA|QAnoMd${eHs|2;`RdJB0{-VS9XOVe;o=a0RLGZBBZuYCgTe(#q-iJxsp1DP=MEwevkd}lFGK&| zKlvMw3yA|^P9hNJs7a!E=j8vEP~h}M!0}%g;q(SdjH>g^h<_yS{t5s;?pH0EpBt62t7SL>gwtVw}$tG zlL`$t<#g%AC+u0fvo=);iru=G}k7R1eto z0f{Vn6`|$l9tv1+9`^5ptnXz0JdboSkr#ICpBz7-8oU`G)_mid@L3SZimASM&-3?n z_;n?Q@HfLK-!vNid%e@?f5csg5c-htyEgxGiV?`tqdVO)15BGyA>YS8Csd9Y0dW1k zt!i=A*RcziG)1AhQAtxkt3Q^Vdk2=Kz_>4tY(he}b0NgUH(RIs`z$DcT39d3k zS6{h&VMHU2;#}fTgI5A1t#8e$y}D{20T605->=|{JarLA1@on}UyAb87pLNV^PnT- zu&K&pHJe{OWDx+=2`j4Jl@~}Qxvt2(`ZL+51N1d2yl{~C67vB6UG>tZedBGs>caoD zb^4U7Lgp{T^T__sJ^#y@)V2>6|kF7i25rO%pl{;x~Zgl`P^)Z<{nr<4s& zUdAmp0(paSEp~T<`TK%QX|vs#Z+s$7da7*C5jOG#WEtQ6$$6*?6Q9I{UzBiN4fHY7 z&#a}qndui5w(;qqi&Uyunj34;cf9mc&!I-Mq)MmQBAs&V7l|^7FJLA5ktwcU#aD;R zMa15){zzT_gWUkxJh(8+D#L8~zsvxG-%rkvIdk2JRDuLnO5baVs;X&gJ83>DR^RD; z+(}T5X#w6U$qd#NFb(lKZ~3S`12NazQ8l_+_lkb zp^GUfMOf$b5B9F^*1yvEA9}~+080;W zc~-;k4Sq>x%wymJbD7pF+BhQE$Nb=YZuXs&ardi|4KXqCNS2hA z{f#9lj3{~(XFqmk|7V=G&3RMEX-0jxpv%H?CB1Dulp6B-&5-o*(}WVhdmebN zks*K{P>07IjJ3t;K&yANl7;nZG`~$70R1#>0O-o=r98g$(pXF{d`a11L=Od(SnG*K zVoZ1xd)iTwUW5D7$UJB^sn2C%ErR3tN3rdd_xbAuP~Ip&(*J{|lr^5b?QPWcJ6C!8!t5r# zN7-NhFjeK)vCj`Y%7vFAs?F;ydtUt(d*zLi_n0KGN-vSS^C|D*vcFg50&m^aN||y@ z0^{k2ox9-I$Up7pHdJ}^-J9_7<-Mcr_7t}O{?rWEV^FBkw@qYS!8lqaME5-W$Nq9s z1uCOaeV(xuk0D`kHrFTQEo*{y(5gG|(uR{+w`(0%-En!vbuI1^_mVrMsS*{cI86;w zJrK-27W6D9t7m9%_H0<+^j_i(_NL^xed(X}qO$vF^M9iX|hx^n!=rlhM zwL-tleu;Y(v>%QOtDAntt$8nl%-*OtS3WWCHQ}>|%Sw@rerq&)o@`$RA3Og|O+ZF; zitQUdfO5X;Vz`V5h&VxINN6~BgqM(kI#2uaQua6Jp6Xau7F^18I0r+YCi1s<&xI1o zu_4Z5#X%{oIoF8z<3-s;O)K=Jp_y)(Dj7)XlP(#V^(|}}ZCu>k3MpY)A3Qr85nnj4 zW5Ih7ID(C$)gWdbS)5O+TSp2fY-Fq8ReX?r&7m_h1q4m(oM$t7OZ@d!T|qd7rCY7X zpH|nOnP7_gYD*Y2GM?IirWKd)G7vKqhQu59V5^RdLr<%6SRFWRy2RQ7 zjE${ml-E6#S2p=bBFfO~y%p3~i$XX%no=l#WI;mGq7iZ9n*)!9-{5h3ytdWwF=Z9m zi~W@^s1t(~VpG)o6ErO6xYYdQXhP_|Q;#Y* zVm1EU^6o~NWDErxT|POXPw7-{CEx#lv=jIIK`k$>K92;D7ucFKcoV?+YEex=-iC0lM%)^ zq86?16yla-X9{1mbKiL&7pJ#PrpA=zwF`9`l9M3e-Y) zsUPIrTY=A?>r=TGmQ&0Vb)Cr) zOp88xq{=8FKTJOa;+(aD2gN>9pJ4&Fd%%7HklAB*O+$?%Bc~EwfGMbNeCHErUn#mj zj(YN~iF5+X(s`V#P`05O>21#OdrXSq>gp7c;wCTnqCI~_Bik#ZE)~1`V@D0aB&Iq@ zCq8{iChXPxuxf)0QI{T^33M#0x^A1hG;+WAx20wJ)N!C*>=@CQNCnC{prnb-N7_Gn|8? zI2TtC-dL1^N~-))7WxOVyr&b4j%He{dA+Bm_}Pqv4tOP zR?QplV`1{_6(iq_Jpx;2g{_}0RH~#F74MV2PD8e>`VKK%JPr ztu@4Vrb+a`VAT3^B0a+a{Y+uPyQDB(sy34X7I*S@oM(33mb;xJCkYOvx8@v{efb?$ zSt;={Sp^Wj(t!_#acHCJNRwmOTzoyf36Q_JtYYeNK+FaNn)Vvaa^0+m2_T)0ySrl@ z-TB4u2HEbhW!uePx9znX8oCcZM?*fAi>YWlhPQ+mhMeAZo~xu38O@rek)C{+|~3V!Z{3kPofMo;w&&V+Q0F-=BRg>8oTr5N?~HZSntbYl|IA$vedXm}8VpjERfs;V@@!!(wgb$~_TSv-FtVy1B(lN`!V z+s0W=f5_$2$t`hjnfr_F%fv4m`ML-qma;K&^mqYc5`|Mpn_MM201bpsOL)jgc;zesZLGaQ}5hMo)wB0Aj9?UwKR-4prL=a{Z zZ;K0;`$g7Y6C-!1%sp$y5 z+e(f&+Kvrf47X;agMcEdQmPbv%`xFVd#_T_Z%y4ytphDb7AW`3Soa_GC$cYMx#|_z zA&yh|MeN_!YU{Lv#YP;pskDdiY~KE2x6jWZlkTPNp*ZW(Fl0K}$;GB$t*$ntwJ^Q# zdD1T*Mc8FSY5M5BCA^}6e#CLa%j2MMbn+yb-4OkJijPtLRVGGT?wqz|KPC%9p~~C+6jR(ukrrT2_tN_ASV5!Uzd**nA&590@?nKVABZwLtk&uW zun-{vVb}I&6@`xpoZ6c!{d);){QT{%9tn#S&vpGfQJ5 zg->rw%UI0SW*k4)@12>qR%W=JhAjXWMe)T7LJl3c#UDv3w>@1 zgF%fDEWN(*T&j#UZU1(b9Y)tlgqYujA4VjM@^vFc7mRnzT${F*z6bf@WJxE|uMQHhzwh6`L*guUkzC=)d9aS(`+wX?x+96SG zz5=R)H#2Re1U9Sux)UzRS>Z?4mp)H=6Ve_R?qqAv-@@ef%*^{yFMaq$VIqUHruRq^9I8yMEQS=?s-dMdgv={QW@q>d%U!RVq$!ig3Z(OqJ zH~1({&vh!$C-U>}(fAx1fC{IzI3PSPv9_KrdF~S&*avr-y&|VzVtyel>AK}S$aagM zI)Dj%=;4z`zpUB6iROi^!y`xZoV-H?%P=&P`9E~MWmufuwxylm1h?Q`K!Qtf3r^wg z?yf-!4G9{8ThIy#5`w$CySqzKc;PNx?>=XL{dJ$+|LcN3YdvesG444hIsELE`Iu(% zyQbQL19gUmhlv0IajON&7;SYD@^i#B1pRmBAkd+X81bjEs;Z zPn?CN9R58E;FWJCBmP?|_9%3q0~qP+FiR}QGaka*`8Z@Qq`GQ##v&uNN@DYF-Jxb0 z{FI0?$G@j(twyW7GAH3aF0uhu9gUjU_@~Pd!SmA_M{6e0pI0JM4IAz8e~@4QcuTy` z|A{|3sUrv_7yN>#YO8GK?-z2I_DLai7{z)h>XhB4;jo|yOO{E1V;zs0@8mNoW3bsb zu^XFw7!KILy<^s8dRMx_ThHoxUo^Mg=MakQ`{viD0kV+}2Uv7~hk}x!6#sTTcHsou zCHg;){m-S4AzFQhEJE|6#=Rx;TU)UbNGH!@AZ`Y@)6!4*j7m3H4=Iom6?W+_N6Fz% za$_dxuN!hnd38cpge04>U`ub=ls`a;M!C*|82uLE9o>ElCDrt}<;c3x8<-!)T{?p6 zp1&M5N7{`u+a%4;QmH*fYl%A(&{?6^`DTlAA`Ct`|1JSD?iN+{Q!?JFA^L?=(yLmj zV@X*n20C<+{BvJo#kR2ci0DcYzLHj>h(_WQ6TQvh@ILR@bF_s(Iln;NM_pdDJdDdY4pq@Zv2Y&kcv5Rcm zoBge=(Q0AJC&!d`l|R5nmz`T#QvdHGi1}JflngOWonKqMR@< zLPz&Y0<&L057(IY%Vmz@HDZU}i&fU#L65#gCDI>&X#Fj=<2fFqyz}w_ z0WGzE7Wx0B`lyf&q|KlqM#Tb4Xh#mX3(dwzeC;${-E;sHb5`sv#YjQ#D`J+x{OymJ zMVYtm%EqP@-d!Dtu!LqQ3wog}6YvTTKHBTVZ1J+81aTz;2H|f;rd_Pm*jsx;V*BUK zw8#0L3ak}7-*|#E_EG}XhVUfsM@{(##S6-O@HAMlHMsL-YFfP1aRkwGI-g}jEwv4_ zjafn)9PsAvt$PNmi7%6}2f3IDYl97qL@0qrivu`M9mK;{DJg%D+ppuy(>3mK9DPa96E+nWA&5 zQ-D;1>g@WJ4a~Mki$f2tk4UzaVb%EIZS5-fI@FKG74(*&@nsS&*iV*u%i@m#Wad`3 zxDP%!Ff^Py9efmW#2A9Bi8*bW*v8LWa^KTZ-z|W!->Obj*CR`6qQcvJGlJR~Rda5= zYppCxX3qidR3vWq$scM{4ONccxdS?DJ*W-C7?-kYbJZBTz6N)#l=9|@3>D_}4jz%; z1L>RuHA0cDtMxF?yErnf%MDI{+Wz)Gp@@w&`}~{JXT70@AMI%7ClX(w7_rv`VleUB zF;>ajYyHD>>$B_CDs1U%7gp&Fkt+5`wdznDH=3&Q&rE@Fm%gmA^6}f6mJA`ZeP{16 zK8Rz>@yB5Q&)VT%(y|I~L-@6n8GjoWAjCjJgDxyA%mn6cSnI%$kdQEBhF>BhBNHjs zq*&*?%!mmc@god(w^{7zk(Q=cPTbqG^AbxI2N;u*8DOlB!+!~MaQN#NVWOi`;FL!U z2b{~6m;{FoSDSs(E-ybB&%#oL7W=ohEVRYN#Z41(XMGD*6VsmL<>ljRYCfonFUT5) z-tIsk@5Mg0lQXfi8P;hXL>k-M7ZDN?zEiTt5RC@qc=z=70%c{xms}?i5{M*F9taLL zZ`DzkceuiO#Flp!KDe$KT`9*EiW^^F?C|{2OzP|5WMq`x{~h!O+IzCvOjJfe9!mtp z09s#%1?~#C2idV69qYwKQ9ejf63%UsjDp2Y3sk{n*H?Egt4)K8w%}5DA>8~@(W>^~ zQ0n?m85Z&zHADz+K`tAWFDOJC)*T(ZQgSErTSuVNvO>SwZP5^70J5S!w>g!JL6ei$ z36Kcq#gN+D{uq>ay(HIHdDh|0rvmU>&#E;!0k@%B*(h6mktpIi9|&vB6P6TAD*41r zE>X3H9tY{ojJA%u?(3m=8oyh$CX@`W|M=|mxb}wuP^W;q`$Mq8v;t(-|3xp*!Ah>N zLe~-J#!xZj7zrH?jHH|o)EDn;_oCI8qMOzPy4P#n^)E8XEACw})z~sg{uY0{)3|e@ z?7C=njzxS+%)@d1o|1u}r$y6hkMOZG3(MoiD!z}57}u+iZXKpMaFaDl@p>w7`PD7X2T&El7=KC;NSXp_$i17jZVMN*!^T<3&J48vhiZCP`en3O(3d|4YF(cm=@q2CS*ikg=QoWS}x4LlVAm%kK)4N(){{9 zpz&-tMPme+GV4JLef;n}|8`sSEz;uZ8=Fws`U(CPUu_dpVpy*NB5^#=0^IZxo3egI#i`S|TC&wo{44Bz6pxJYcHsW% zkZ4iC)w-A-<3Xt_Bb39Fx!!f%WU0>CkV#TZSlCeP@l*;2UGb~m4SZIliEnO|^~3dX z7k5nrQpL4hp=iz5mq>dXn}SQ&x~UL+mS?1;W51pFm77xJa9<2LQDgB#-WgQy)5(f{ ztoXC<{!(o*U1F&p!f}%P^X0Uvb$2+{Uy@^heZX$IE%TR*AM6G-@_Uq{L2m4ofvY%6 zAMu-2!D8HI9j|0;Q0Y^3%T3sLY{#^E#N2Hg^laUl}gg(YPw)@TBKLhVdm=WR z)#3YBlKUr`RXf+3!t!6vF}(|-^O-wmf@;i)MV>6{cJ??&Cit;8OF0fRX+;ft9{>bE zfwUuce=|dLC*Mf$B7^?(*Ou8_-*yk7G+LuGrs%q1Q7~Kol=#hG8Ij6Q5&h@nY1as^ zDYxq_F?pXT&;8<_9Eg%`{D!>3`k|VZ4+`sZ=o=rhIUZ>?o6&YwOg9)>C^koL8bEFA z3CfTciS<{wzw!2pA8HCkPvU(?#yHYYH}fA?{|4y#dc8#&44^v4ZvO;?#Y-6yWO5L@ zCN+y0zaH0#8I>T#Du)tRxY1H=Q^_xY`*GQy?Q!$ETA*3NBoB`9nU6TXn-wwvJ4iTr z9Ug3$c{@Ieb;D^t_#bdRauC*?ft?6n$;U+`3@pjz=WmKVcPgo?thHUzF<3|H0q~a) z+lk2s3l{1{%11XdRab5t#AuR}%2xi^q-OfKkaiZ(G zPBhXn;_mU+Dmwy zrIdYX*7|h4oTAGa<(yc`O6(LM|1OR~P(wxZN~_;I8>o_)6$C%IB5u>+nks@aD&kL{ z?shX>34sb8GfQH_6PkB_i^~$>R;+r$kGVsw0?|kyVKwz7pf2s~gTo?ig0e;Oagr|< zVfgYqN&H0MW*BjcNX5A5jgjEpPSUJJh@j{0yEywfEXGUUwv8_sTE@qCV`{5LYNvNl zu<%s3Sdu_N{dt9Xw+Pj6n^XOg6znj~#19DSPf{vi`aq14<+BE=@`S3~oey)&c6~N` z+7Ot~3>t{1ohsHX|GYM7q$Dwwm2;laQMW|s1U7-jiQWInAnzP9C`avH;CO7)4PDFw+MDn*9C|0#?td@r!gBn1eFJ*t2970(dWZjf#(rvmdK@{?5*)peu-Y+C%tMu9~ zloKi{xoHJBk_pGRn6%g$@R7G4K=)Y$gy|gMkoh<-e3JK zG`P4rHQU-JD6QP_b^AH=WhMq~!$Vz)@Dx3iM{YOHls(PohZH&O(d-kscUG9%G*0($ zj>&n*fxp*KU^n93K$HxKU7$UnO zJ3fdZQ&MA=l-$oBrrX`oJe-yH)yY#`H549GqRhlyOe_Q64JmMuXOZj7%XlCaqWV@SWfXsX5i~yZ4ta0T{0#-s+_)u!(fH89|*q zU(BcilumTpZANT^?TVi`nj4!?^ExDFx%ZLQcF8_eW#{dc=vVJN-K@_$Br|+?UuWc%;WSWk`HD7lwM)}uEM7pEU2XBE% zj3BpM>{X`HZZW4vg*>bR#BWcZrN8T? zzPw{CbgnqCGzA?Gv6Lroc1Juofvp>UeCZMKvI{gQ)CQmM)EhL6gNA#Fd)C-nRE!Od z`k%)hSPF*^OQ$`W0s@_m`4-~5GOsVLx9^z?uMSGvy|1pq7nYnWs2$0VpvV1J+A4>$ zhQY@Nhgg-h%Vy$pd<)~tafiiObHmQrA{B8>6blWisk+IVibP0thQ3Ky7#RKE3eW=5 zXE-?l6*oFC?M>Com~8D>sr1_h-{c!=4gEtgjXJJV=F&R_0gVzDJ>hg=-=*_x?(1xs zMKxm$o1qCSw0l^V30=E{-E?W^So{DzfQvg11_A9=Z-UnEkI zj!14^^kR9{5$0MTzG0%s4RiK0)FoV^i6TNp>j9Q6)LH$bCtqoGsYzh;@-Z zE~ruIxqj;S?1=sBHa#1{xPx3iyh4|LyT5hp@sgL4SnPpMzk)f?dvcsPOkn#d!DT?y znd$W`EQ7Is5GY9+Zih8&b1I8$y52lg!UIxgjogPR|AYQvmNu;mb4RXU@8Mpi- zrZY>2p@xvD-(HC>eTD3?^A_LF7^vAL^eE#GngTCv0bN3-(Y=+_4!HIs+Dxw2i#3R zjk#!$Y|(#GlL8L#u!ls{#M9Fwzv>qS3@ek4$;B}$I!-fmJ^{WVfNu%?h`y3qE^~Aw z`*DCi&KIP|6Tlm#m)HTIcz5XyuV-rMH1AY08&lJ9mQP4Ws{sB$vuScesKPGDRI+^U z{QdH!kwPSCXVP74Mttcbv73~Ns?#K9}Jk($yEk`fE}6zAAR{@dYjcx!t#)zLd2=N zXI~sYSvnc zrP}F2Q9}z(%RWr~2De<>5#I#QP}&~*q_=gw@~^L-C#N$pum#Hg+xwxdSoJhpZ)My(avZFnEWB^qBY)OFae?-s+$aQnOGieTDH<;CpJ-J+Ui zs()5N{^`#^KZQED&WcYU{@f8%rGn&5pr2W7h$4=Tuq-*Dh>vzu{LzeFwVES$bp$W) zA|l#4A6s$sOT+2rY@pS98+@Hr#!ZIzNb1R+CJ`~X~9Y*BiASvjg59_XEZdX0ol^VKZ%iZ`De{|Z^>xzVGLVzE7%lRb5-{5 za1j8xu=bZAuz3{_JJHt~+s4n@6AWNnUyawYGWH`(s;EhfZW(JC9#I`NnhFWcgE*GA zGP3B<^yc7f3iH+t#;>KPr++)m52};HPcm;LARrLsy(`NWOx~w00cx4--0hBL7OTyT zlq;VnnKS{cfHO$~HbF;s7trEoyRZ_us!V`4oMW$&F}tjVjYKc|2bU%cf8cF-Y;SD zh?tn4le%GcJKI4&4Im!nvr5&U9{5vN_(8@zcwtxHHXCwQGZ(mHWJcw=WZUzvtQrYW z0otD6Cuh-uJ0^7pMg%e@FvJjLk8pl7eA8zu@h%zNF3aYr*2EZ7dh$-E%G@X4vh!+#Z>JsON~5VS zk2MmYN#;?Sz(X?0ETL)%vxm!$HlbG`F*rR{$1)!G%NWK~4R7g=#t3-wc+uRxJ%{dt z%|)_n{!0T!>eG{1bstX$#g{2 zD2XgcZh91VgEC#iUB=3ycczy4kFnGVy!QJ79PrD}-ilqG1Vb zCXw0TP?d)rO)s70M#+G7S^fh7`Jcg8QGyh?Yt4myw~z2ueCFadQik#VcS%HzWg;gh zsXUcvTQP8V@g^0$)`^ShfcP~#=SLa@X$upozrs6UyQ^RxY zn0GC9lXNNKnbHi)d1@T!y5WCq)~rqcv6&toFUDE8R}5n3fhLIpg_M_7jOxWwQWs<| z2?Djde;A9z%;5_H#~pA69q|hy+`+cen;!{R$56ogxCwU0{69OMfof1aJ<1 z-kkcZ2Ns@(FQXeI?p|WgAvJL%rd+qfYrk)gwI+pZOH?k4IN=UZgx3}uigRVg#h)fw zj4%$dDFoEm8+Y(|W$#-873m)L3&A=_AD~}OI1W9{rNCl=Gg~3Cl)`fv!ZR}XnT?~Q zU$U&4u9Iy7x7^@W-XCP-YwzR!o}8SGk;yR`0>UxjaPIbtF=0`^|UZ&-Sai zBITG?BdM*0g-SOEaAyfhNO%`3O^C5&?9JLF(5yNx&-_-Vv|a>i>Wj$E!qR*2i&>Qx zf1%dH+`o)UP*|E~DHIn+z^v!b&lUx$A2kYMCN;*8cx|HVlQby%5j*i^?*Xsx6yJ;` zdZQe1NDS_l4g&ek5s9zaJOe6UFwHHX0Nxx8u%EV-!8DiGrKd;O-FF|i#}Ax>h|US)z_q|N!-`SfINy-2 zA+(p0DFaLP{N6AiNBn#?K7F3H$M-zO{=aIFYz>PP^api5HGhRd2#)B|OGKl;xX6;{ zW_mFv)QcIuq-#Rw%~`(?3ep$8`=&k*LYIW#Wg!*PgwdVFVZY%b%B0dlW60=wckHW( zj~qQia_?>y;THN+s_{UKF)hqVMxG@!&WB;dn}77IM}%B_7fG;QmxVy`u_7)~-R^(Z zJrtv8r<8Bj@lKfEN$;Izvsb+tx85)XkmiO1&G?qY(A0!K4(@g2yWcKiOJ+Ncpm;CNem!L4FVupH;7^TF3dHIgL(fYi%RFPFEWR zg&#!IA|3|UOO7+d0-FM!uIB%iG5!SZCHXSF?G%|RiZEFKTm_=vA%znZvZ?fiZ!VIi zX6|snxM~kV zsxXkY`!VWaml^r!VGsf7jzu@F3c=RU-9TL1SZSU|r}iGpois$(dJI*P$_+!E3p{I877X z!18eRh6SZA$rcr}_5u}IG{f_H@lVJZAH?gEh%y?A%;vBkRoGz@U6N#D2N} zgqMF=!=MhNTTbJ&`Y;Gn6@Su#%*57-8k@%sT4z_~;b8&i3c zq$BG;?uA_yyX}wX(E+EOs3x#IQuc>$EM;hWf{$=ti{;#K8z^kot*vXE-x8j^(BZaQ z-u~S5rd#=YjT&*cx2F>~56{*9XT!uz>Y&@)2@FGm1~EweUT-M=U3JK^8CO|*vPIbjDkZ|($Y~Y*V<;Y?!ns75%NS+Q1exA zM78@-0M>GG)H_=FuLA2-@+VwxyXdEA>1;DJ;02%`rEIOrplvJOlZo%(ie;HB$lb?j zV3*QXd{sS_^fh}4+)}tpM&fH#e(#Snph#0OzMoC2PD66nyMp^=9h)8J=>2I&PGx6w zM_Ds!T?b-wj}O|JBZ9Bl?_mtSkPa$wYWAOdqD~i)Y4ufY8-0-xsI$~pKMN!Iv?l{q zSSr)HaXP3!nt^9>koc(Z*(4LMnL;VreYW1{N2_g_)T!ZwY>QhXJnP+v`e}Wwa}<#) za(*yX^}<`&ln|*fzA_FLR927>D2wnpVhtq#H&?2qc3B3Xirk0Y7&=o6|IC~l{W_95 zz)3pCtRm~BkZ|$Qrm!d6pQySn|E62QppDz}w}@C^E1@NOaob@u1yfj9bMSr3nt^-O z$>HH&ubEmS#R$}EPvV=z>5pcH%^O8i1K!u0ZEYfjS`g22Ni>1ykM(cFm&-hR1|AGb zJzCcCyaf-?`VqcsF4f0lhDI=%Q~iAxsa~3qceNI%5sHj!%D(8)qg-!NB zzVAEfHo7R4RzmYDPl2S4eB*mc)WOStpxngt^nFb%DQi!OS5X1S`by;Tb3$i&a|-E) zciVB|I-3Jt3r=k><~fe|XV4t~GIp|0&DO(jLEAC>p@y++{oisIqR^$P9e=HsMdsWA zPWR77pOfUNxve7Kgs9-J7EPRRv_EQuz4BRIHsVchf2dn_(X{$RV@%HHq=qDLVV}gY zDT$1K+VgqyIObo~Psm5KnWzpiKdDI6FGDxAPq+9qA)}cbVVm$>Y9XW4MN}4w#)4JP z@!z7b#m6p?gRS(}Ip^?y2gC>6s2U+7_(u?jJk9(l^8mKj;ZOgh?b#18j-gnn!`-5B zJIp~8<_uY-dyK%9`lILs z!V1VLq=1iBFIai4J(R!wYGAHk1@>;02j2l?#)3djwGfOF(%N?r5+kHPIGIRA994_j z-$)D6qtG-t7>@I^nu6#69Fh}_LgCcDY?R$cn*K-+n!mhWeu-OT0$L$#%L6}I_ zF20{xjMWHmehISh>fq-p#)UYN1X^}hT3nR5U&fI33*+_dfmIwiRwaHoR682Z`i>oS zc7@CbtP z62oBF50=1X0MNrJ6wraKU@ZShd-Cx=yaAX9{WJ)(G~wqPAmhl1O;ntw0mn8)d5JOe zaBLh+7+PF_+3lk}+EE{G?1-jPA|De|{{?m$Rpq>CaO>s4>~Liz;C*y-wDupn-OqDK z++HyxTwNTS3cpg<=I2f)FlR?kCoZ|?dMXK|KDaBDd(6@6EBs2b3P@u&mYnLnCW(E_ z_E}TUX$SmXluK-c3-9`KqigGFY*tBHePOuY8s|yY8n%BtB4)3#tZRHIxYe~Xc>x{2 z8G2BPvrinz#jPJvRzdK;*d3)p$h_;ZC+J_G4pA0KABMjKfOC$fJ-QURm7<&4lWDjs ztVQB>t*FS7C*80eD-1xsY53TcwbPTlxo^*MyuZ>|ZREF~FNweM-)!`M>5=UXGW4TW z2APJt#4RB^cp#7W8e8kb5?vpn@aaT6>MvA2PqW~f0ZB7@pVKFL;lUUtTXa!Ua)sM% z6}$ya2Y)kg9L0iaw&xdsbQmAUpy%CHM;jT{g1x&~v)s7~$gCY3$LUSyRuVSaTv$C$ z>cYGEA1Qz6{y{LBC*!3tURmHjrRb?FeepP$xpPLZV7vYX*3$%tAMN7Z~OR#3>Xju-#k zX23o%rY)3e1KvWTwwW&o(~l5L-LF*sGFwDQL^NpV+RDJ9Q*x@cX=}SsVa~kbRbe!m zqmW9u)K{^GsW=v-CZAKq0e}Ps@-05+0h2=4HZn*G=0BWSk$7!wNy~Tfz<7ue?QooS z8Q^kfIAy*jX=T2N(l0S7lIeAcXvO|!&{;1rYR&$vuBkRsQ_wuz-oX4CbiZF1(L!Rj zKb7y`!kHz|pWbzPUccs7Y+&_s+&;6^a7)$ow5vm3 zTUBUa^5y10Q**xiX-!%0%jYI%H{rYKNR@A#yDfHTxi{>`>Qw)H(!Cff%x{-g@JS!b zN!MEllC}tO|tQw;km14Q)5j_U*aVc23M?Ml);wI0Ne*C6DtRr#j%Snq`VA*MvU6 zEQzn;Cz)!bBOSziec3_Y@+ZNq+`7-)_W4d6vVWL#aohW-Vq6V5Azp$K%g-)RoLcTrIxVDapiFS zi2AFI8WPg)jhy(=`cvD#aoyK$vQB4sh5s{okNYZwu?inH|E~IangtN>p?7+PE4tf{cg+rtDlm#1GZ*>=Z|nwM9aJ#NTF#r4t~C5N|6)R zJ5VoH_S_v&Hy;JQnx^tANK#^YJ<|I|1gc0CbZee_Q}?zGIR5zj^l+$ITQ}!jN(?78 zO5JvR53e)$UCYjYPYa9Trgc5eW4f1%9xkGE|CyFrpQl4`Y|cG;s9r7mU71$BYzXd; zp~NV>o@J8tUR7~+ID#BK{T<(y>ivc8;)i+_6A^x|X(vh@-?3Xxx_+bVN|BX%QRcM} zx4?lvQN}@#INuM{u<*41mf2)FS@+vzc8xjU{jpAiI&nKLsiaZqz!c<>*~3&|1@fg9 zdiu|qN38XEd3|R-$k+J1${LR z-E;2nQ%k|PTg}!J1U!@C7$MoO5FVCZ309bIG(BoG-hE~GCZbcBylT_#E5YA*+rt;@ zLeO?kdLT&5{;-2X&7K(Lh~aRe`s((a%HHFJ)R;>Jvv#$jYdx)u@9yi5?E{B(`&%2` zC&nuBwYqh-bz0ci(!1&l+@b!}1bL?jYsSHC;;Y0t5)M;Ik6xT-X>lGQ?bfiMXdvdxe_Cq5MT^q-8x~y4OpuP@! zda>Fh8gTzC1Tw(YZ_d^G^>_175v2hdG1D#n-1^r&Yz`HD`nd5`b_7*N`|xUBfBV;~Zw>00!2$|P+29=Cff$OQ~bp=axU7u=; z%Y|1{Q&Wd#iZn&df9c0rcB;+KoA^6_)R;`e5|Km5G%u_zRLh2Aa+d~)y};x09PNleQ@DEQ zBYl%CnVCMGu~SsexpeuKF=SkAu_v*IwL8$%cQdT^Ez)gu6f|A&Aq165?XDg)65NxB z#D@<0*4p-@N7O|fzP>Bqi0}jE^@BKTZDy(3C~bDRs)`$QRN3Eiz%{D2$G;v>{ieTc zSa3D5gE4ka?MemxNRA354T>wiQSS(8_=heT6FOuar=#{?Kki>rc>q1Q|ABl0wO$pf znz}}o{wq1}{6CWOHC)4HtZPf>uTL#c4Q)gAt$v5)#(}N0b>8Gh{qW8`_+JQ2nf7Ze zO+vk%Oh(N7nU;E@BYi2+atTe-G%^xtd-bbC;BEyPQ~#^V==QfZ`axfON35)#brqIe zLr=%=+jo1it+5D$eT|3bJ1giI##Z=-+Z=wO@z0Pti775*@hZuhSJP-YGY?kNWgejs zjJ*(z^w7teqO*j=f%pl#dG;a1Ou-5qPqedI!61=}a&#p#C|%9jwkdlOj?T{O%nV%N zG%v*zDEr~zAdVBPe^ZPxXCy0domg?_ib0Pk#%;yH0;}>})X9V3= zG>#rbY}*S^o326O9@L48MoR#h;xOhCXX04x#hfSZJ+P(~6AZ&3rqoPYnrA!j>y4KuI*#1Z2a(T> zE4VF&+x~J_PT{D|C-cmOV~u5k?9d>)n2v|FX7=ID+OOa=_}v^sdpxlhjFNRc`Yhme@-;_9^0_UjJNqayTsyZ+i{&xHYibR z*Z!s=%&d@V1CUY_u~W%7wg~1K7ttJpLl9u;>eaWV{cuXi&rFkJ(9{%(@7VX{>Bf3@ zBt7TG;}Vx~aVUNpGv-8#TE*y3=Gqh>et(kkTfBfxtoxR7ZkzgV+4oM5 zcng=2FRqQ3>)+FJN#|+U!Ek2$J)rw(6THBLMjq-S15xC4pN$ znfqp%2S|og@}`RbyNgqzlB+Uyz*Rdp;RRm9mPr6xWw9evFK*E)$Z`WzLBR>~ z2wR7(#rH!MWADW-xDPGb>Hr#P63|dVat55-FL#0f^&pGo-GTWmX3?qb&D#B^Qwh<> zhSV;_%l-F@=Uv}pc=NdACo!7x2a{~!?)g7KttRVNa40HY7cPtWG`!;t_37}lQBxZX zpNuzn&S}ka_G3XwYt0+irVBu{`uXELA+f<&&t2-(fYOdUNg2SxH>P$VZ3> zU+&++tfhWW&iV=BC?7D_X4wtKQKk>&d^4gWSY6qCqd4n|?4}lOLbuei9ZW%Iu%GqD z_v=caS;^H0R>tho6@L2;tp@Y1t@-mjgNxe5I`1{@s0Sr>1#Ese6$?~mU)=qU6HBBH zepGK&G^4x*3s!KzBcEcSFn;t|T@#QDB$`oukZY^iYqy;-MEQIk*o;}Pyp@$(b+w?0 z)BO{Nl)I;vrl;7;7B!=T@qKB3LZDSJ+)1x8SQ`fDry~YT{*uYL>k;fQn!c2r7dk3^ zIM_~PP~3svU)?8jaHMv<%WCrcr;5jU??AY%Q#F)7dO^sZ+6d@z`D2#mXq86ZpR3d;KW}h+5Joc_@cDKDA+l?vXB< z_AG`Y@wbE$+S$hM0e6nSC8nZl3eq*Cb&7%gRfs6Bu;~fvU3PW^=hk;m<%i1C%_*1n=PzT2Ay;qA#h_XB zM742UKP_22yb9?(UWt>n?K&SD?0)A!5};Pt3=zFI)wJd%COf6-xZRdFK5gDk!lP>m z`y{WL?6U7Z;JQkE{2=Rj+hx8utIcN7;M$JYr9PaA3%pcv_F9Wv4pKmQyv7HXA1>F36mM=Jx zoUC8=Ta`o!uOm!`JI_T7m#tgj`GEu*%0`14No`2Z8Nemmub`T$yT>!3W2x3rlyCCz zUUmn?_K$Zk2~XS&yu6e)iu%WNtV7<+C*0%lHkW%;`nATDLkxcll$}!K6 zS(*v8AIN|T$C0x&;S4mVRi4E85$jP7|EFbE5fRBi^@R9cEbXTcJ;ZufYYx87x_)S` z2OC!n4M?@;!1aOJ6MFaIHrY(vt=RC`olWA4`k|}!l508#!>s@#Mlj+J4_LiC=VPO&}E(LlSZTUg^!@klacX8KjFdTZ&m5>4I9 zmqQ`PGY9KtJ9b3OD!10vEn@yjPVAjs1PFaLA>%~Zh6BF`h$xxW6~|9XPxpdA26a8# zuTN^q*z(0ArPt&T&2HZs%b-iEta;}v9`{@r+!q?Vloycf+=p7=0fNG>ld>g zgJb1q9Vf4GE)2&-Ovd&zb{Xdb9amz)@Nt5h71m5@e%oYmGCEt(@0@5>@BHmN7QGQ) zCu>pu-T;cfetwDM-qU)`aEh;F?+y*s?2il-_i*jd2_ za{rO4`sADqU^`9!bp-YKQ5~1`diMXEweatRO;Xxy``$|f0bj0F6Rq+j!6Yj#LrL{5 z zefj5kE1E;=*)CkNPz~laZ_FeWfn937-sE*ixHvR%bT4XcVexCokaEOvX^H&?xMUzY zl%A9U8jQf9KACo0GBKr2(kd+b2kCLLcz4kZYcm9iJ= zW4rhbdd8~Xj@84x2xhs)F^&^Oj_yg%*u)N7%BAl3%gc!y;^&n#D$lWh#uRwt!TU0- zkBa{?=VP*D%D=L+pF%RP8O%s%+e9(lGg1dlXoQ=-t*pePq$(|@Br@C%Id&r1Use2d zy$NK2T+D*1A~1^$g}Rfjo1UGmkAO4|5-N$I=S%=Ij9h4}YU)6ZvJAUNN|=O&>F1Wo zo9szs!P;#n7C*N+LxpuZ0<8MYQ{H=nj1rwGTed?U3eg(Zll!div)hhD+ zLpBR|1Grk|v-mq=!N{9S;H>6RNP6_eB{KCYy98BBLt2e;s2qRn6FPJaKeROX{80Ox z{eN ztQi<^HFx6D?53bm28o{!#hpxQG;LI;;ZlaLSdtJJ@%%uMnlSpWcGLgM7>kJgs6NTQ z*y!A>1uKsFAXgf|^!xcNYVm183I99P=ofhDQbxza6kToXZ+DaF1>v5?KbAL+)|l6* z1@)TaZGF`HS|cQHLNq5;HjZWEr?ad1Wkyp5BPh0*p^!f&_ zH|P?H)&Jzk=Ti_7`o@9N1!%TUP7NFx0#pN8xsP}~)p>%e8S{W!)64uUEXAp9@J1>Y zm>H#5*70<6s!1O!EY7HwWvRwKspWdfky)g+P4?H_AQL#5FceZ=JW>4s_hpIjcQTX; zl>x!~>dtAs(Zm?tUXD{~p#g;|*;;E3_;>@x=&Y`vOgt4a(tJcJ2E@s~4Bq_bE52#N`$G_tjgeT2kJLNK_~@>Ita>yxh0pOtNKHJG&$+HmP7KnN1fk$GgNSnb31gH`gQFZGooPxM-xu_mWwCJ`0>= zeLH<Y$|5LWoRV~Q5{EaUw;*`k*kW5ZNP^4&UeIfQG~K*7T)&u?Jj zKF~SsMEcJ$4)NeG|S=f}@r0z;7wsX(zV(Zw_D6RlT_36l#P`H$;vJ3x#$c>Vlc3O#exN#CP@zub~Vd=&6rpG zY8i#}W!_xhW4vN4h}gX0O=kNu?gq(j8Z2RVauYv1B8*1rKs83t^*{S)UJ|dyJ5W!9 ze&GVV$F7gRC2v*>fg^sOnN(Q&u|$3RAVXK}3tu!kFfbvh%1CIIOj0%1U zrm!gJ+$z^)%pAW_Utj;*=@rCU*m{WY?x=cP$R@@`b4ZQzGKpH-Vz`$|;B3=_(aO1yS zk4tC>pFGXeS;ObA(hlF`f8WV1dt-BYPN1>#GA6}-+%WJNrLs|D=80c^4eBgdF)oFD zWP7j|QtDwQ&V|_My-NG*HhBk|cI`D%9p5b2y~l=15-5JYZE%;q(-OpOiEQMEj_*x3^{~=pp=42x6(0y zGy_9RH#0Ca(%mt{-NWN^&hwn(dEdL%eeYWL`UkF=+51=f8=r4(`S`(771tlNNBv@M zubc1d2SLVg!Hfa0b$O#^6+Zu)>%I9EPtz4t~9!QMG{?4_Ud z;y!*lNIu9X@#Dk>C@}_^?sv#DC67}V%zuEEW`a(o`Ian9D05=pVyyAu^ zo@$cxfcoZgW`4LNHZh`M-Y2{IyJR6p%8&Mz232R(_h!}A z3yj}>x#`%sZ^_>GK+P10NvFy~#0&kV3KxChjKOmEdessFFU!T3as#G5Wrg7_J8UVf zu~VmDNsy!YkC(_K&#iY>y&z}nBEb()h4AnlpBf1r*a^xZR{KST(& zL)}W#UM1x!keSOWKR>>n@CX{}|1#x9UM>_$@*T}1%+S;{?W)pM|aGS0e`uLj5esqEp{8*X$LBmAkxe#hX z_DY1{>v1fx|D5>%f!Fv>KXZ0Mzo-F|%;P0)ifaF`>^DU1pXKKxrSm`1P`}B%u~x}c zrt@Y`V9IxlH#TT3tM8OtFR_0>|?oYeKm)y>C-`6JCfy@CtD4{RqZ{z z&%>0VdMOlf$9?dvo-(f4hR<&K{xkWiydgoC*1X52b`#%kzDK;=7+^xhk}&9hwqSA^ z?^ovZ^H1~47MWufaqLy3X{ED@pDM1djbxu%oNE7o zjAfava=rYOJ^qC9>*>y>r-nwD=kB0>o1X!4em;_E>AR+ffequ%7t3;+%LkPbjHAk8 z1lxCq#L90Z!7MwkxdC+vR|~tH8Uqu%g-2h?E3@{xg&zW?M6-aUvfUQfsYtw2w130a zW^J_M{Z9G`)ktGS9X{?gGc#vs0vn&)<&(U!@vWb^=~8czw;nJDq2_7tsNl%=*`(YO zhxbF>5X*Oesfpb4tSCcy;O)4Zz+`K< zQaCoq60d-KJKb|jb5d2tF@TvY;F4p@d4YrRvXGX{BN_!eaaWq~wFquu($(8BJgGQ& zS6)6>4E^MDobcT|L(Q~C+tO}~r)d^OV7eKmlQo54KECe|Mlk_6?K$<6;yFwvKQaXb zR%rRxZkI*r{m@s1v>6o(wO(|O){gI@`|?ea1#?9f;^OaeEaFebJjgSa5GelI_7#Qt zq~D>@)43vsm{zT2!V8+Cj21IY$o#@5uVib7Udc`jMNl&1n=&5!LZ|;?1z>&jl;J*H zjjtRZvdZn*D_J{A2yYkCb#E0q7WQR3)4vM=vr9@v=0p)Hd4>;@)NBXs%7eGZK3{pW z@)GcgfPvt+^I*%%t{wYt6i)8kk=8dv(S;T*A6Qr)9PXhV+N<5RKOwnyHyPx0`;B6L zX7I7Hs;ocf*~y0kNfyr*dQy%)ja*!2gV!#0c2bPTN?2k5t6H}*6UkY(Q@957Soka~ z*pQ<}m^ zv&Q=JRW3bO+eu?K-!9|%y#O4&F=Hy(={TlEL)y0E0`HR?|5$BfB>DFe^jd+f1>b!4 z+L6cvHr?M03ZBu-z`A2afvdu|1K%rN8AXvT(O{9{Mg-t%_Isr2Is8s(!2f#&bLJO5JPrFM@+eDiD3+_wfUWqO=#7py!Gn(btGe- zZ4HGHJWZjONZ|lxl_yJ|brIZ4zyw*rME~iS1j#?n^PR@}W^0D6s6# z*?!7V|E+*K`_uJ^;p$BFjg@l86nkUMdQ^NO(=B4x829JG)rb5bf}t%4@bufAqNf zcaQ3%u#AuYLw+ao`IS#g@cex#PfbdiB$mZI!v{LA$Ee57E}9UHuus&Wh-{ZJ)5*k3 zSw7fUuKm#8V!3Ni#i2*9XG=PYJZlw;K9Bd2PN{b|4CAy|wFQoHA zn@Q%_uN5=MTldGsRwTQrJ*myF-cniY&j<%12ANJn(mNn;AR}AytAI4JBkClo_PEI} z16U;gZs9lqvnPvyWyi)&z-jEN4y zO{RMrn}X`5R8023(!Cdr-oM-2f(t)heA3M@N@cDLjVJd$!%Td;y5`{P-zeVV>n|Lz zTR}TngIUYb?HqT(6y}4a9i+)h;&}_oPIngnt%^CP>~v%hNpEh0O%||&{ViEQxN*qd z@8mI1_3R*>U+~J1ugrN1Ar!L(On~lV)Fz14fov!Wjv#E=xBmrml{( z_@KxsR3gEnV}}Ou2gd&B;1ORb>Z$PH(8F(i>m_b)9uHg&oOy&Azj_!l!PKula-(J7 zWek~f5$!h5{3o5(aF)VbzOaNuZ9TtBM30DNpJq^~!149=_f*2Zw7Wl32AYEO5#zc0 zZ@s>+kwtOmkvoq!3NLF(k|liJjwgZ$aPz*PC|Q2MuHG;CBL%o5tpJe0BYIl z9f8C>CQX~^dQr#ayt0d=%w3T3A8!&T8~x=NfGPzJY=}-w$or3zxq=6T)T-+aF|PpU z=oRbmz_` zuHYA9dof~QxclxzsjlB^pn^~QkZz8tg`d>4!MWE12a0N^=%dJdDD9OQbwu{eONNKA z?}D;js05P54v9bu6O8vXv7GGJd0jr>F|WTt#{`!&Y<_S}*~a~1uGYMb!iFq5=bZwaiU(M96?(j1m=YtSuF$UuXQn-%g1f0<~$`S94;ZS(94<= zYGD@@0+lK#3xoTw-;^6xzI^i1Vupn8+=2YtCZ)B4Aa)9uwNZ|5gi%%A(y1L&1~GVs z#IKtA1Oz4GDfhx`r!c-p?63>);L__zRJ(y)Ucm`g_Q<_?NW#zBtD9<;G?k^PvMB1^wVbp76g%Dbb2yD(`;+~aV60r{eq z#>Px{31Qa_u;S5O2Pc-orJU^S2&+W5`$D#;$t*dCJyqU{uX@(bnz-n&p)#)%_iw>s zgBX*YfUl<)$R#EN3QNQuIJkq0rH3}gjg4Od?I={2hw}28PS&)^ogDZKzyFYzDZb-; zH}@ELmE>o22>u8p(F)qd)5$j&fGAxmE_)^kG3+wTsM${#89=oU3Tx<{oTuIyV00!h z=c#v~sh?2@%NyQ|0(cM>4Y<^@`LiSNQC38a>oYNbeH``fsr zybEoa?c4d z{5tJm=ajj+?O2fc~f(6GI+x+}oqJM318PH&$h zPi``GK-EJQbyrmvTDvx;I@IlV<8LtmZaCsVnUq?f6cv1nsH+;O$7mdV2gnfpd?uzv zW)r%)KjB?)XE1T&dx}XFgp;6DYCea2&&e?9)?g?Lz8fH`yl4JKG~pSsZlH)r6AQbM zJYp|%h`xSQSM?`@Cqod8gODY!$l0;bTTx3Xs~2-(HS}vrcmxUhQ)o z#w>(g>rWb2v}*n2v%2>}MC5&(+$L&Xr0ma!*`al$OcL3~JF3u?cH*^-k;T+r=tsvY zrSphWJ03A1BdgOzC1$8jz?aeqX;GwdeV_an|GN!tkvKP|9z6Q;5CqinaAEkwNA1#unKq&5gr2a(VB@827}Ke-Bly+F$-9#B@hIZ@*nSGjSthk(62mp%=fH6 zVEdl!2AcLs=s$dz^6F(j+HH4fuwDzfLf-mtq|pxOjMfJ2%#&vIJsUD|$bty+cQ|!; ztJkBldh{X|fDY{Gl>{)RRhB2mfP-{AQWgD9>he~H`e`%0gDaseuNJl%jmp<{H@|er z0%gz3&GOFS);oh*pwS!9!ijIOC&iCgu=}*{zPW*p&exSB0jn_EiJGzC^7+xJdcwDv z28M%mi@ZKv%+`J&8cI zWimy~KfB3eyNX=+xi)=SpHRVt&yhHhjq-pd&-DAnC5c48Z`Vw3U%fi(dV8+@xfBAE z%Ih7c1;iZz*?}^CI&HMy+MeSy2Am`;h6m-72r@WcFK)idGz<76j$#8b1^h#X$ zfk=0IqTyBUTL;(Es&kFuYP}h&M+0uOsPCAU_L1|r|B-4E29%bgproWqYZ{9ed7|v_NEaXOk^SbXF5lbKCplijS2OFqsOk3K6*%^C}IOo3lbBD{7DfOyPi5$bgc&`S2B%LhsS#k zu9e3d+vr@nZPAN00xxshHbrOVJ2@fH>LIG5gL+h}F)Fa9D4vAi4)Vc-(LoI84DqRi}x)+JDDV1?=6A!EvXG!mbgpgJmQ>KW{cZCr&k`5vK))Ihd} z>+=U$-5wRfrRsOA!nIA~Yj#lGn_lgh{F6i*zLw2P4NzYTOYdGgzIKamWeo0qzQIMu zQZsPYXf2v|pmDa7tP=A&k5EC0Jn578yl+*9;?0=tNp7;! z9*B}09R{Sr%e{L^?nJ|@Z3Jf-8eH&d(QjI%TsJAr#7o~jcqc1;R`)t?`IK_f#fkq% zG6Th{*y<@O?n_TRwYB??^=*IZA5>)L5J1@0M<|~$NdoPaX0H~&CaRO1!ZtDQ_jQ?W z-MYn9U@cQdrJ zi!O6kpYgvI+!hv4sMxJ~yZ7zu*USKxO6ipg>kcdA@3e9@_8$X{H;jSw;JzBD2av?y z?wEA)L>c1Fv%D^6e|9`C&6SEY9iQOeOc1pBvh@PF2O}rV)3`hOzD%0&NP_4m5b^r~ zJ`b2lfi#<**K6xP@rnG_~)rW z{FEIr#@iOnh`)FU5ruv@%n%ZwoCc?gCL#Y&5<%0#U+9<^Xzm@fAzS`J^G1-M{wcrr ztns`o?=(JN1J}hXOFT`g7i|)Syqyosqe}13u!=_a^gHe6ryBA}dk$S-;MtJ7olB>e zi{aLCiXcC3+K7PFvS)KVEN%%rV6RpqJbi>SE;;D&jc{)Z|2azvl%K_mitarPRh>WI ztg0$R-eEG)k-=P6LnMYIUiAipz`prguuzYlJ4azC;!Oq4bVhYG_kx4o2nBfp`|USG zW*a?vmqT)Cl*1ZQeGU%rN@Q5HL``R=32QpY#4Hzt-&Km#=n&sGOr6K<>#KU%n`#*y ziq6S>RQNeXdb2Sxsjw*GGQmubLV$ug4&7z715q|smuf9$>39UF=wz(sDFyeX=SorM=Pp?5^LYaHT%quizge}?l z0%TIPEVXSavZyCy1k3a^CG6lZ{J5EFpb;*yr>Lc`bmaFaQ`17C@+bn{wyr1DoQ@8+ z-&~TVevw&0v2OkGN~@SV;_$|*T0+;}fL z*ozS{%GtrE#TN+AKNOj)|3b}?MJ&!71=rX#?ZT|p`5AXidM7%0idtFBO#AUBPJdXd zvkIA@q^;Ehma&_8+=OFdM(gU2*j~}a}2>3xah-F@sOOfX; znjOIDgBdc8!;&R^{e!p5qkVrWz-k=%FGyR})gZGnK*6G%FcF`LbuES;c-)_e2*}>O zm{Pk#dtflRj?ZPfV)4RIm10>|$^$p=4vilx0;6bZPd3N8#&`g4R^(~jc-#5In(=ia ze*cM)X*l9dxX3Q!k&&UJZCPja;EvA_J4bIa0T+?!Tkkmsk#T*`v2M??vd3XxuRBRa zR;M$0*B7ztPq@u9es+kRaF%FUKQpqgP^^TkE)`^_Z67n%IL_Q3thEa=N35IWbZQ(a z#EH4A<-YRm_Y*@&Ml-FFOC2O1F=jbhjN8m@+oMbK*s2VIf^})%$FmoeMjkC}dIGki z_)x+6?y@&98ssl|f0n$(L+&kB zy4y6x&Id`$Ckq6Mz_${(Zw^$tl6xM`M(HxItZqizZPLXsodp(c$d*}^VqK`c?||kA z;a!fB-0-_hs6hPtX@9UXHkDS19pst(ni4h6)aqG5=||v>X~=^|dfs+c0c+F1mzD2~ zFxvx#Z%VqeOcc7l6Z~9TKI7>Rd--9={8led3Izc|TM2~*LX2X8#R!@K4+4u~&HYMb zy9%B8_5(5*kD%t}&b(Mi9Z&k~%0m{MG+)2=nSr6q)ejc;``AQ$dF(;rLJP($rUMlO zUtdy~8XGj=6KKqVZutZwF810k2cCQo`k=t_oJLKK4feq2<9E{hC?}s__JhmfA3m6e z5{BdXCQ!s9?zcSX+9K4qT>{1C1{!=iEc{odNTp2B%kYu_a(K-P;jE$e>%~*>vc%7G%Oy%vPa0?K< zQ~cL&!3Z*m7F5&`3rsY9r}75fH65+eH|$&zbq9x}?CLPucVdI)>;v1k24=3)w7=w2 zdV29p@iXzb(+mvf#@%a46D-fMyyB4oh?ab~toH-T3bHA}yWF?F4Io%i^ zB34s}4KY25=Y8ylN6Q6piDwTY5`kp;dWIi6rd_SAMAw?$?vszVD-RX%%D?Wn5&r-q zlmCEidXs(03%SYQO+Mj$c5?Z)@GBPhkj+5Lad*c%IAD(MLa(iee|A=mjsP;J@})l$zrT65Oc=Z~2mg z)(OaYr|bM!)?}Z`Qja>CSq{Z4?l0Z)GPp7(${$1dagZ|Rq3*;dK`ySe)5CC0OXvN8 z+UrGK?HcqG)JomMd}Nttv8BeysMF@vss-1H<2m8w<=oZiBhjpfvz2>~DP(Ybg>Z>c zcAOIWBwC0$IT;*WJPH{w6n`hYl^x$!yPOoNl0j$2yMN8sju>TGIoXp+E@n=GI`WxH zKlBmK_c4h9?Vas#f|oB`iaDa5jdRB<4a;yCH4|d~(o6-@-RU9RF9$(Jtvz==q(I{K zBg0bow(4p5Sr<-<(!I=y!Q`eMx0n2)xbbZ*UOr6^92BJH2FKa(d*PL)lQ5L|JLNx0I(r`vBX1*%fQMN{#ONIixWBYAG>1d$BCIBN*6 zuJfd8=}-oDB{@7qkK?Zdk@q?unYp=*QldDG7ogNf^>O1?R%|c}64W(cMt;?nl*5Xb zX1M4{i5K^6-V)2zY&X}q;$D5Tvu)}fY^#23W*HPNrlESG^Ne`YaV1V^@Oww4bkT`H z?*0~FW1UtiXHR&?UNi&}RrW`ncGnx`Y~sRoKX^`H#`S3)3oea>jo6hsGA~D0D($+| zY$tf!=eEH+09@g-leI3Rt~GX{A*cOGr#j)2*mk{3sV|H?PDpj9jv3col_Zyr&N_5X z_ycU3`X){|i`oWwm(tdlPl31P3^qYA)ed>cg?3JC1BhiO3z8p4Tl-4VN4@SZAQQ`D z&`R$ea_@tN;@%yx76-5GxaHoq$^tiZs)x^V`=v7=LuP4$yzN1;;bLMxC&lu*LV~!7 zc;!U7W$s54bSkJOKhy=jY_bb&L60;_WF&dFsqU*HS8&?0Di$27GLpP1k?`iYf_Z)r zVQvjV4u2)eo>9j`>D1e{Kx%Ah{{v`mDapb;2nR&S;v*B~bIKo=>LZy{|9$!P_PjH9 zC?YILuG$J0?fJ6jRC9Z~dMA7l1u8Y&4LbaAYsdHd$RhLVHu+owTlP+!f*L?04Qv7> z9xH?^Zn)@CP0>3bwZkqYaP9aZ?#B`9UMq6_{N}4L-RjH?Xn4GwXA2dY`JOgK74yrE z@X=#$0lU{LuLqj-m?!=3t1KJpIP!ZDZ!!U-Ml}s5M%|o9JYK;kg#rLDs} z{Cc+cJH-+Z;>`ol-WW$?=fKbR%!WJL$xObCG9Olb4?h?df9_b~XslvGuA6BiYG2$h zCkT$2qghOhnHB?X3smQSf)1hX=l2ZWf_74I+;c83Q2RPL%gHL;$1dGo zsEO$8)r<3J2tBEqZ%n>+H9=Pr(`)XQaia1I3VXU=(amyzyVvvbI7Z@v z-48P9bZI2!J83p)aoLh>T7*GH>q$KxrXRA<9CKQ#OQV?Pe%niCXGh2!z!iR)+kRAp zR+8HKxa7xt3_giB&;F1Hf~`q=0;%~&BuTq8939>Cp$OR)Ie>UdkS@22WcGdIaj>jhAh3F&0EY9>?|94)#J2xEkrUuj8z}r zwWrkNc|zaT{9GKGu)oi}S!Z@l@XeWt#7n9SO>}DM2gfv7Z zgr!hUA8EGeN$)raKsTp0TbS3Omr5DAgfjZjCA&OGr*B4$b5ZRXJodI4-^y&3jq>Mt zUleNLVk$Yn6Dd9k%kk7#qCgP-j5i8^WCtWUt+C9R7F-J#-EkG}o!nbJ_0jMe{_r{z z>Lb3blS7M%HYxC?9;vfr@lqc<>-$W;lC>>{CZv}_CIC-10-^eiIo+v!QgD@9Z!4hcCxHiDy<@(t$4!l@@Eh zt7bY2y~>M&33A4OR+r8p_x5bB_Q~UDY)Hcm<^acGI54Wtv2}X*D_+_`6BU3;^+m+Z z$uosc<|;ujww;@TFC4rkW_qjl4h0_I zI62*ocG5W@Ye;TXt(QqfcHab^_i!6Hat$*Cb;Q~eZw^;-+u?B@a85GSLcEkuKh!^? za^4}iCzpKPyv8SWKZh8#O^J%#8a+E5R8Byj>Eeo8;+1lMrEPsks?m}&>@R}kzm z3V_g_Pi`Kh6t|{yQ%zqRPTMzHm`{)r3FQGVTTRt_BPl-CqX;tjBg)7WwjXm#{XM8ukE2jHEVpU&k9ErX(oQ&SWj1>a}~#P+0mf0uqaJbG5a zRJZ7mR~}2f+oQJ4P%QEEzE~#ss^Jc9{qoJV&6Go&F(WgHyxK*tPAc46l#bz=VI2ci zQ&}~ilhRp+s8;#YxK~6Ym8^S9lKBW;pTwmcx{qZTnpp=C8ikh+zJ8m+@ZC&1e}scH zuBB?f@YJOE`F&)Q`p3*(T#s~EI>tAU+Qi~Ly#P;gy3kMwSXyVRY3N6w$ra(p2qD=| z>p3nN#5+v`yJdFM**y$~PpiCPW~*bBcIhl+W+a@Cnnpn~in-#JoNi+cQdD}#&ujFV zy(wbeHHwGLr^Z?gp<$P(Vd~<;FZOWqqQ6li`k3KQxw{j2xuCD}am_*!H+(c@ZD(;4lvBukrAXjkpTn2VI56MA?Chm*qSg~-393C_ zVsKc3rcB{^Y|fPh7Cjl?sr{ZT&~TPA+9pMZ2)uq~dr+T(F= zp3oNVAYXgsllJ1OahVSK=;SoMqM&3g)Hb8{OHB3gJP`UV<=`z?5uP#ZXzk?guFJs(j|VqU)0od^h+jFFsgh% zo=>yd^kVv^q~or6ck08YeX5HRcH%RoKbdTj8^&nLZX~L6iRHg@)JxZ{MmU#h8OMF9 zvS;8ET7zB`vJ6(VRSPKRvsaby$Ruehku@A@offN5Y7s0DucD4U6y03O518RJZE@B+ z8(KtJh1($a69lT#o-romxaA4wTR2XYYe>W$1qQc-6r>r{-kUK_7Ur z!jb_l=A#l*R?S8LpN_*o2>l+3R!-lOBVQbl4I2!=K;MiA2FE{oR~DC08c&7Y1$hK$ z@|wt>Pya7}SRVO+AnZDCEjjhD5v9RyK;kZSd!H6pm^6K>)XGBJc&=D5=SXe%l0OyU zWQ|gkK{l4v?D9B?b31vE6fd~DEgMZ3Lf4#mQ%}bxT?E=gK9~l|Rww%+uq@Qn` zmFT{dwe;>*BCW(Nvd3Wv2O!tOo!HTY#t>*Wu;gLpebk?nU%R5Ek^b}zVEOD^N%k4* zw#hRD7LlhRoS^tGyFUMztm{LG%H#Hv$O7##17_J0P3?iAGXR=jI5`#bJ~FL>J`5U%a zQo)d$x&2muKJ~vXVC^dKjBxLRCxY+xe7SFw<9ZS|ahyJaM(3Se?NA>8&%y+{YGWo&A5(yX!2qRHHCvyT3R{?#8N+F?80iL)7J_%t zmg)-${z|z(<<1)%Hkgu3t;Y{+mN8&0hi79uBF1WD>=sv^?E>i<2G9k0 zX7kvEO^wb4C2TvboC2@ioX_`7Y;MrC{Y(; zRE#fC_;2R)L0l%(LNKkfFD5p}avCbCR9ROpjE#*gEe+=EaSYkqPWAv}+j%)p8IDb$ zP?;pdz>9Nk_XieC?aD+^J{H8{iJH7LW>>rRU4W$$(jrSuB+BGK;^??R%N9YH+}KyH zf7|H!<|iqCm>~9`)MIKnJCIIeCu!cX>q51o8l4A)qMyg}hciaQRm*}p`O35fy#ABy zf5q%V4I6~zSWv-Cng>S#7tD)9efNOh?Nf%?H<$`mC1n8#H**&ko`^@En@9aWLK(4o zWYlYR^n3yF_w83u@hT4UdtCF7Me4%C*?X@nTIO`z#mmf~WpMnJd@wM~Q@we&P&krs zCI8_;PQSK0mZg64Or@}MLLG+#|NQwo5w{M)t5uD)riE(SE|F`Ofeog=^tzAkw_HGd z8Q_Agp|6!i@RFWoH_AWf=urZvfP~m!7z#cI^jH?1uia_Oe^|jX&mBShl^lTAFxQj! z|5zg_y9}-e%fd4WQZ`uBX6{FUKgazWQ9j3~^-UOxwEnfPNt$Gz)qR6ycvU~qOabzU zIC=YbJ0{e*if@|_`n4TeKz8nZ7*U0A-v{#z6S0~6cAIwzGks1FjcTk?vrH&DSE6^n zPAd!t-YsL5@I(koH*eo#hMk&QTCK^X7(AD z$5`4zu4u2doRc3qi%uGAuM1*+O098&t1iW%`2Zg&6aLE0Ruw6Lo>da{#C}iDe7>Xj zE2gbIw7<5XXZi_BDlq+`09ZYUdODu)rW7ZEZx23a-a$mpJ1e6M;#6Lx*i4Bk?yYm*0+e&coSYyUr4w(u{`Y=z!^v zLTRD>!jT(y0i=0dx&AktM#utd1RX{9Gy0AlSG(rsSBRFQh4}W}T*URo>5=(WVK)2Z zg%GEgWp-q1+iQ?~h)})>&^Kj`*?e_w(jLt)IzP5*ylQJ)wI-H7CfzvFSvOWHTHalS zt{53Ehm1KJS9u4O#Q9a(IlWABDRU|*c3iDoE{B{^*a#E2pywu~*6rtCSj~($BOO;y zS%>V&)=E3;pyOj*gI&#ov6LwFOH31NF!2NhD9uzNO(B3oes?xpH+u9CrdeeFkO#L_ z0PyiJBQ5tSt*^<@Aq)L)8K{{8kQ2V|V&pvABw;NZ^U;5dof`+(>(1gcl1HkzRRq-P zfAGh@psmLbeJg!BC1LtID}6kFjNS%d1Cw!4j+<=6sHS-0%d)>C1@Jw9Ez^3Pz<=B` zgynR@#JzG2ZA6$;>eb(FFNYftJfFLHp??6aWC>pYhw_byW$uM(v@KtvyKyzyC*e~h z_;*J`$o&o2%{K`<_R#aQL-Y{?qO``I80B&s%(wa={DEh~y^-PZTp^*IApE}&`d?0L zNEcXSWun{BuOUSKAX~|<)@w(ZAqXbJ_!-u}$-rL=(Mt3oMn#p~9Q?eRw-v$0_G`$; zqS*C^AWSQklS|~WWRE@$|4&mJfnN}#^f|x<`-(E_za*71*g_~SK`7&$l%%9xf>7mQ zwF_QpO&E!aJ(g?=wT z!Qb#k0GHCBteD>~zEH&CrKQ)OJ_9PMU5xqZpWpwDG7z*NPO$VHe(fMlz!gJ#%5dfU zvHLn-d7SC9njq&nhNoX%O9j(E_}M4;7c%(gBCg}#1}n++lKvu>2tt6QPP_B`mb!X# z#RShR4AgPQnmq$K8uhn-_Hq4o2zO%RntNFgK7LTuE&9%1EC$BKQbxh3=soUK`&!x*w*c{l7=<<(d|gygL%756>x3?1j}~r~PCg+XuPaO*&e;~{mylCO-Uyf{ zt)eWy9ZQjD;bugCtLM>1p1RZ}-ld-(NdLHwixaTKJRt6dnyjZuMg*4rYwz27^4^1l zXmEc!&@xXls1#_2YI7g#0;TeNA;lLK)5`B$#oDcgHB(xq!%uN7-t2%x(uXG!WGPjv#y{Y1~^{dWx&hboYP73C@CPwNQaE<3gL%UNu z^J|~RUa~+7nrXMqphjC>ME!UN`ma|s55H^QY!s#mlQ9I z7$L~;6JpwSz3rKSAfXx?Q;}imNoD9EnRXB#`Z=4~Z@E5#$a-jlAOk(<--~QjBf{FX z*mHT}3vlbz)7_I9ch1c#og7_7kFckb)&Ici_oqVLgF`d6fAl^^$~gR=OrL_sYYY_e zbY@36FzXSt6sNqJ6S)9LL?*)6hTrM{t!ST$U1v$>gV#2X9$S9>Q(ETmzMKM;`q-~A z>z+wUWQA#1deS>6=102|Z?1N}@LTlN`x(-A>9-r4#@mURd|mW#)z&P#gM0cP;slWl zdO(a45P%+B68L=Jt71OLU5hjR%iTHNW??&r0< z*)p*D=G_h34`Ta;(>K_O-d@WI&kCnsv97>_3` zvQEogCNa$RI{P5(bDkpXa7r(u@E4Mjv5>kSSpIcF6G=6ik&zd>)-5r(tlL|>+(EbUCWZi(##xe@qG-I`kZ(9_S_tB#f6lO zs$%H8;V$=pN5u1UyGki?$!ReycbgLD{;?Qpo5UI1K6x6M)==~)xe;4oxTXe&Ik~gt6^1|%1f(GM*XQ+*X9!3e ztV<3$e4HUC^bh=?6(O?+DqYP@r<@SjZyXmF%i8N4m_9an`mD`F8^OCj`ho7Zi-Y3b z>&awtwp(+7+oh9EB--w|iQ4^Xs`%F7UE~7iC>^ma0%Bt9Zd##3jND$9TgUflzl7y+ zf>fWexR7%X+n;#npR0`bS#y>Y#J35pOpKmFe##FPS3Gw>V)t~(_X*bCxBR(#|Jl;e zU`oZUJzu(yz#*_<9tYSKujfYR5p)>$i>S{?eD#qG6Ic4JIGRia9l1I;Q04I1Mc0?m zIm4e3qac+pq0~9u&*fDYSLF^fM$NFU01kDzLUXJuATx<$|D1AFBDHd{v`n*P)pl}D z?|tl4=b`(6qnKA`iabJ=#lz3H!$0Ty0QPF>>L&oGkJ$VzW)=PMWft^E4Y6%_3R}WKR=?nJh z(_UW7_QGf(=9W^Mnwz^8SQCP&`2}hNg(HOq>ghPPZGg zh2&~hl|FWdqPIIO791xaDSJW=O=OG{rHpANXnuL9cGCA9>`jbRRXlu>1vg4fiu3qP zCm?o(a?pPM^WUUP1C9pfQs!6t1_SfwSC>~R9jdfER?C9<_EC`c&(KHHyLL!wt^r|m zLCvb%s@)k?Ef1fEah@HTf24#_U_dy|13T}K>?V5a1rt(guSA=kUf>z9tdyEzPfRIE zN!f|^acT96V@au9i!~C`p*3N<21N&q7!fo%wCA`PV^By9JKK&=t0U$CN0!^GXx@SG zJxyS^qL#stKX+1GalU&-VD^GJ$5jh6@sdJ*?NXcIt4nd6W!NR7rT?4Z{;So+wv>cJ zR|lG#)rRwV^fDcUCjPP6|C#AJYhqfiDdasIjEkinl^DOPXd{3cInJ=tuCxz+f@%DR zD7>wcT09XuGy(bP?rU0c3l=4Yi@DiLIhJoYQN(Kf8TU$PvuZijSL zUxHY@E0wfnD6`uib{bn1TB`lo-5qF{c;+m<9pLu(oEIMu<#h8^vA9j>?y&mi+ zNP>{8mXRQ1J}=k|*|&T2?~i{Hl!IBwhUm-Mb!%$vubRpb$ge8w+1V!qDGo% z3eH2VP&GIo)@yla40ui5A1;L*UVZ@Q!>U8o)#+eiJ!WlYBzBxf^HO@%1H3h^7Qh&e zQ0#60Utd&}8C>QA?VYS<`cobMi!<%7AAKJK4zj{W-+!y){|=LXaLV9{bMe*vsrCMg z(aiymMT<%=*?)1#FI38s0|4p&1j+wCf6!Ih?>cSAc{TRbN&xC!h65&b8u#~KTp z&(#zbNS~{z5A3v@*k_L~9t)Gj#z(MV`#qL~u*g~qtL=y$yh%AHAz0e~Yr_~1nD>An z^1kpK-g;q#bM#YtPmdD!XIwTeP4w(VZk?^-=)G9Vyjf;~k0+E@12uu~N_Acf^a zC(LIjaqP)s15#KFk%TUYN1uB-GCtB*VHpz}#0tYc3enI}6!vMI2eJOgmLi_?&SBa`2X)0(Kv4cPxV?n^0!9O szZgmsTmTd6y@&j_#DW7dz&~XsxoZabx-ndb1N?iSsG*QA2Y&Ov07QEtFaQ7m literal 0 HcmV?d00001 diff --git a/task-guide.md b/docs/task_guide.md similarity index 88% rename from task-guide.md rename to docs/task_guide.md index c750ab5a1c..f3b2c986ba 100644 --- a/task-guide.md +++ b/docs/task_guide.md @@ -142,41 +142,6 @@ def doc_to_target(self, doc): Understand that the strings from `doc_to_text` and `doc_to_target` will be concatenated together to build up labeled examples in the k-shot setting where k > 0. Design with that in mind 👍. -### Formatting Prompts - -If you'd like to prepend your few-shot examples with a natural language description or provide a lone custom prompt for a zero-shot task, you can do so on a per-task basis via the `description_dict` arg of `evaluator.evaluate` which is accessible from the `evaluator` module. This `description_dict` must adhere to the following key-value structure: - -- **key**: the task name as specified in the lm-eval-harness task registry (see the following section on task registry). -- **value**: the corresponding description/prompt for the task identified by **key**. - -E.g. - -```python -description_dict = { - "task_name_1": "fewshot description", - "task_name_2": "fewshot description", - ... -} -``` - -One can also interface with `evaluator.evaluate`/`evaluator.simple_evaluate` from a higher level by simply passing a JSON file path to the `description_dict_path` arg of the command-line interface (CLI) programs, `main.py` and `write_out.py` . The JSON file pointed to should be structured the same way as the aforementioned `description_dict`. E.g. for some file at `/your/path/descriptions.json` you might have: - -```json -{ - "cycle_letters": "Please unscramble the letters into a word, and write that word:", - "copa": "Given a premise and one alternative with a causal relation to the premise and another without, choose the more plausible alternative" -} -``` - -which can then be used, for example, in the `main.py` CLI as: - -```python -python main.py \ ---tasks cycle_letters,copa \ ---description_dict_path /your/path/descriptions.json \ -... -``` - ### Registering Your Task Now's a good time to register your task to expose it for usage. All you'll need to do is import your task module in `lm_eval/tasks/__init__.py` and provide an entry in the `TASK_REGISTRY` dictionary with the key as the name of your benchmark task (in the form it'll be referred to in the command line) and the value as the task class. See how it's done for other tasks in the [file](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/__init__.py). From 70f92733af76ae1c8ad1524f4c1712f7ff9c05ea Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Fri, 17 Dec 2021 01:41:36 -0500 Subject: [PATCH 16/65] Fix doc reference --- docs/description_guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/description_guide.md b/docs/description_guide.md index c97a82ae0c..3627e23e6b 100644 --- a/docs/description_guide.md +++ b/docs/description_guide.md @@ -5,7 +5,7 @@ Task descriptions provide in-context task instruction for your language model. If you'd like to prepend a natural language description to your few-shot examples and prompt, you can do so on a per-task basis via the `description_dict` arg of [`evaluator.evaluate`](../lm_eval/evaluator.py). This `description_dict` must adhere to the following key-value structure: -- **key**: the task name (`str`) as specified in the lm-eval-harness task registry (see the following section on task registry). +- **key**: the task name (`str`) as specified in the lm-eval-harness [task registry](../lm_eval/tasks/__init__.py). - **value**: the corresponding (`str`) description/prompt for the task identified by **key**. ```python From d34ae3cfc9fba7f17da48c8a33ea87f2a9d27995 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Tue, 21 Dec 2021 15:38:49 -0500 Subject: [PATCH 17/65] Add `description_dict` to results config --- lm_eval/evaluator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 6feb356788..a838dcd853 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -62,7 +62,8 @@ def simple_evaluate(model, model_args, task_names, "device": device, "no_cache": no_cache, "limit": limit, - "bootstrap_iters": bootstrap_iters + "bootstrap_iters": bootstrap_iters, + "description_dict": description_dict } return results From 8ebe36b25f81be9a9cd237156a2037bf750d6189 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Tue, 21 Dec 2021 15:48:03 -0500 Subject: [PATCH 18/65] Add positional arg deprecation decorator --- lm_eval/base.py | 1 + lm_eval/evaluator.py | 12 +++++++++++- lm_eval/tasks/prost.py | 8 +++++++- lm_eval/tasks/truthfulqa.py | 15 +++++++++++++-- lm_eval/utils.py | 17 ++++++++++++++++- scripts/cost_estimate.py | 10 +++++++++- tests/test_evaluator.py | 20 ++++++++++++++++++-- tests/test_version_stable.py | 11 ++++++++++- 8 files changed, 85 insertions(+), 9 deletions(-) diff --git a/lm_eval/base.py b/lm_eval/base.py index 442d8d5d8e..5492ba9cfb 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -457,6 +457,7 @@ def fewshot_description(self): DeprecationWarning) return "" + @utils.positional_deprecated def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert not provide_description, ( "The `provide_description` arg will be removed in future versions. To prepend " diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index a838dcd853..02fb71285c 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -6,8 +6,10 @@ import lm_eval.tasks import lm_eval.base import numpy as np +from lm_eval.utils import positional_deprecated +@positional_deprecated def simple_evaluate(model, model_args, task_names, num_fewshot=0, batch_size=None, device=None, no_cache=False, limit=None, bootstrap_iters=100000, @@ -51,7 +53,14 @@ def simple_evaluate(model, model_args, task_names, task_dict = lm_eval.tasks.get_task_dict(task_names) - results = evaluate(lm, task_dict, False, num_fewshot, limit, description_dict=description_dict) + results = evaluate( + lm=lm, + task_dict=task_dict, + provide_description=False, + num_fewshot=num_fewshot, + limit=limit, + description_dict=description_dict + ) # add info about the model and few shot config results["config"] = { @@ -69,6 +78,7 @@ def simple_evaluate(model, model_args, task_names, return results +@positional_deprecated def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_iters=100000, description_dict=None): """Instantiate and evaluate a model on a list of tasks. diff --git a/lm_eval/tasks/prost.py b/lm_eval/tasks/prost.py index d53ece825b..7bcbf1e917 100644 --- a/lm_eval/tasks/prost.py +++ b/lm_eval/tasks/prost.py @@ -38,7 +38,13 @@ def has_test_docs(self): def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0, 'PROST is designed to probe models in a zero-shot fashion only.' - return super().fewshot_context(doc, num_fewshot, provide_description, rnd, description) + return super().fewshot_context( + doc=doc, + num_fewshot=num_fewshot, + provide_description=provide_description, + rnd=rnd, + description=description + ) def _convert_standard(self, doc): out_doc = { diff --git a/lm_eval/tasks/truthfulqa.py b/lm_eval/tasks/truthfulqa.py index 9fe941f614..6322625401 100644 --- a/lm_eval/tasks/truthfulqa.py +++ b/lm_eval/tasks/truthfulqa.py @@ -87,7 +87,13 @@ def doc_to_target(self, doc): def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, provide_description, rnd, description) + return super().fewshot_context( + doc=doc, + num_fewshot=num_fewshot, + provide_description=provide_description, + rnd=rnd, + description=description + ) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of @@ -219,7 +225,12 @@ def doc_to_target(self, doc): def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, provide_description, rnd, description) + return super().fewshot_context( + doc=doc, + num_fewshot=num_fewshot, + provide_description=provide_description, + rnd=rnd, + description=description) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of diff --git a/lm_eval/utils.py b/lm_eval/utils.py index c3d718a500..aca7791181 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -1,6 +1,7 @@ import os import re import collections +import functools class ExitCodeError(Exception): @@ -138,4 +139,18 @@ def get_original(self, newarr): assert all(cov) - return res \ No newline at end of file + return res + +def positional_deprecated(fn): + """ + A decorator to nudge users into passing only keyword args (`kwargs`) to the + wrapped function, `fn`. + """ + @functools.wraps(fn) + def _wrapper(*args, **kwargs): + if len(args) != 0: + print(f"WARNING: using {fn.__name__} with positional arguments is " + "deprecated and will be disallowed in a future version of " + "lm-evaluation-harness!") + return fn(*args, **kwargs) + return _wrapper diff --git a/scripts/cost_estimate.py b/scripts/cost_estimate.py index 4339b8dbd2..f08d2f37f2 100644 --- a/scripts/cost_estimate.py +++ b/scripts/cost_estimate.py @@ -51,7 +51,15 @@ def main(): values = [] for taskname in task_list.split(","): lm.tokencost = 0 - evaluator.evaluate(lm, {taskname: tasks.get_task(taskname)()}, False, 0, None, bootstrap_iters=10) + evaluator.evaluate( + lm=lm, + task_dict={taskname: tasks.get_task(taskname)()}, + provide_description=False, + num_fewshot=0, + limit=None, + bootstrap_iters=10, + description_dict=None + ) print(taskname, lm.tokencost) values.append([taskname, lm.tokencost, lm.tokencost / 1000 * 0.0008, lm.tokencost / 1000 * 0.0012, lm.tokencost / 1000 * 0.006, lm.tokencost / 1000 * 0.06]) diff --git a/tests/test_evaluator.py b/tests/test_evaluator.py index 85e1449f08..ad12b053da 100644 --- a/tests/test_evaluator.py +++ b/tests/test_evaluator.py @@ -48,8 +48,24 @@ def ll_perp_fn(reqs): lm.loglikelihood_rolling = ll_perp_fn limit = 10 - e1 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10, description_dict=None) - e2 = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10, description_dict=None) + e1 = evaluator.evaluate( + lm=lm, + task_dict=task_dict, + provide_description=False, + num_fewshot=0, + limit=limit, + bootstrap_iters=10, + description_dict=None + ) + e2 = evaluator.evaluate( + lm=lm, + task_dict=task_dict, + provide_description=False, + num_fewshot=0, + limit=limit, + bootstrap_iters=10, + description_dict=None + ) # check that caching is working assert e1 == e2 diff --git a/tests/test_version_stable.py b/tests/test_version_stable.py index d230112de1..a19cbf3f90 100644 --- a/tests/test_version_stable.py +++ b/tests/test_version_stable.py @@ -99,5 +99,14 @@ def greedy_until(reqs): lm.greedy_until = greedy_until limit = None - result = evaluator.evaluate(lm, task_dict, False, 0, limit, bootstrap_iters=10) + result = evaluator.evaluate( + lm=lm, + task_dict=task_dict, + provide_description=False, + num_fewshot=0, + limit=limit, + bootstrap_iters=10, + description_dict=None + ) + assert_target(f"{taskname}-v{task_class.VERSION}-res", result) From aea963a10c0d4baa43c6585ab11771e1ebce4415 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Tue, 21 Dec 2021 15:49:55 -0500 Subject: [PATCH 19/65] Format for consistency --- lm_eval/tasks/truthfulqa.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lm_eval/tasks/truthfulqa.py b/lm_eval/tasks/truthfulqa.py index 6322625401..0eeb364e0f 100644 --- a/lm_eval/tasks/truthfulqa.py +++ b/lm_eval/tasks/truthfulqa.py @@ -230,7 +230,8 @@ def fewshot_context(self, doc, num_fewshot, provide_description, rnd, descriptio num_fewshot=num_fewshot, provide_description=provide_description, rnd=rnd, - description=description) + description=description + ) def construct_requests(self, doc, ctx): """ Uses RequestFactory to construct Requests and returns an iterable of From 5855f482c48f7f23efac13bcd38370e7ffc17470 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Thu, 23 Dec 2021 18:01:53 +0100 Subject: [PATCH 20/65] Allow users to specify en headqa or es --- README.md | 3 ++- lm_eval/tasks/__init__.py | 3 ++- lm_eval/tasks/headqa.py | 9 +++++++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index fc6c4e4ba7..53cb81b019 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,8 @@ To implement a new task in eval harness, see [this guide](https://github.com/Ele |openbookqa |✓ |✓ |✓ | 500|acc, acc_norm | |squad2 |✓ |✓ | | 11873|exact, f1, HasAns_exact, HasAns_f1, NoAns_exact, NoAns_f1, best_exact, best_f1| |race |✓ |✓ |✓ | 1045|acc | -|headqa |✓ |✓ |✓ | 2742|acc, acc_norm | +|headqa_es |✓ |✓ |✓ | 2742|acc, acc_norm | +|headqa_en |✓ |✓ |✓ | 2742|acc, acc_norm | |mathqa |✓ |✓ |✓ | 2985|acc, acc_norm | |webqs |✓ | |✓ | 2032|acc | |wsc273 | | |✓ | 273|acc | diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 561c7fbde0..eb75aea500 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -132,7 +132,8 @@ "squad2": squad.SQuAD2, "race": race.RACE, # "naturalqs": naturalqs.NaturalQs, # not implemented yet - "headqa": headqa.HeadQA, + "headqa_es": headqa.HeadQAEs, + "headqa_en": headqa.HeadQAEn, "mathqa": mathqa.MathQA, "webqs": webqs.WebQs, "wsc273": wsc273.WinogradSchemaChallenge273, diff --git a/lm_eval/tasks/headqa.py b/lm_eval/tasks/headqa.py index 3c66dc064b..d59a88df23 100644 --- a/lm_eval/tasks/headqa.py +++ b/lm_eval/tasks/headqa.py @@ -2,10 +2,9 @@ from lm_eval.base import MultipleChoiceTask -class HeadQA(HFTask, MultipleChoiceTask): +class HeadQABase(HFTask, MultipleChoiceTask): VERSION = 0 DATASET_PATH = "head_qa" - DATASET_NAME = None def has_training_docs(self): return True @@ -31,3 +30,9 @@ def fewshot_description(self): def doc_to_text(self, doc): return doc["query"] + +class HeadQAEn(HeadQABase): + DATASET_NAME = "en" + +class HeadQAEs(HeadQABase): + DATASET_NAME = "es" \ No newline at end of file From cdab2c033c75f22c5a0dc2b1c5e004f8ae4413cc Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Thu, 23 Dec 2021 19:33:00 -0700 Subject: [PATCH 21/65] headqa: maintain backwards compatibility --- lm_eval/tasks/__init__.py | 1 + lm_eval/tasks/headqa.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index eb75aea500..af09702de2 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -132,6 +132,7 @@ "squad2": squad.SQuAD2, "race": race.RACE, # "naturalqs": naturalqs.NaturalQs, # not implemented yet + "headqa": headqa.HeadQAEsDeprecated, # for backwards compat - headqa used to default to es "headqa_es": headqa.HeadQAEs, "headqa_en": headqa.HeadQAEn, "mathqa": mathqa.MathQA, diff --git a/lm_eval/tasks/headqa.py b/lm_eval/tasks/headqa.py index d59a88df23..18bb77519f 100644 --- a/lm_eval/tasks/headqa.py +++ b/lm_eval/tasks/headqa.py @@ -35,4 +35,10 @@ class HeadQAEn(HeadQABase): DATASET_NAME = "en" class HeadQAEs(HeadQABase): - DATASET_NAME = "es" \ No newline at end of file + DATASET_NAME = "es" + +# for backwards compatibility +class HeadQAEsDeprecated(HeadQABase): + DATASET_NAME = "es" + + print("WARNING: headqa is deprecated. Please use headqa_es or headqa_en instead. See https://github.com/EleutherAI/lm-evaluation-harness/pull/240 for more info.") \ No newline at end of file From 22c4124fcc91d9552d1b71d9ae9655da7ef8d390 Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Thu, 23 Dec 2021 19:36:22 -0700 Subject: [PATCH 22/65] Add new testdata --- tests/testdata/headqa_en-v0-loglikelihood | 1 + tests/testdata/headqa_en-v0-res.json | 1 + tests/testdata/headqa_es-v0-loglikelihood | 1 + tests/testdata/headqa_es-v0-res.json | 1 + 4 files changed, 4 insertions(+) create mode 100644 tests/testdata/headqa_en-v0-loglikelihood create mode 100644 tests/testdata/headqa_en-v0-res.json create mode 100644 tests/testdata/headqa_es-v0-loglikelihood create mode 100644 tests/testdata/headqa_es-v0-res.json diff --git a/tests/testdata/headqa_en-v0-loglikelihood b/tests/testdata/headqa_en-v0-loglikelihood new file mode 100644 index 0000000000..11f07878fb --- /dev/null +++ b/tests/testdata/headqa_en-v0-loglikelihood @@ -0,0 +1 @@ +09da45119b12a0144e3081f8fb790c2a22af7b9c3aac42f54423d348a711fbf5 \ No newline at end of file diff --git a/tests/testdata/headqa_en-v0-res.json b/tests/testdata/headqa_en-v0-res.json new file mode 100644 index 0000000000..6ac5a9c0b8 --- /dev/null +++ b/tests/testdata/headqa_en-v0-res.json @@ -0,0 +1 @@ +{"results": {"headqa_en": {"acc": 0.23559445660102116, "acc_norm": 0.2447118891320204, "acc_norm_stderr": 0.008211629406841468, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_en": 0}} \ No newline at end of file diff --git a/tests/testdata/headqa_es-v0-loglikelihood b/tests/testdata/headqa_es-v0-loglikelihood new file mode 100644 index 0000000000..9129d834b6 --- /dev/null +++ b/tests/testdata/headqa_es-v0-loglikelihood @@ -0,0 +1 @@ +767ca34d9714edd9fb030ddbcc35a64e5180d1e247b0cb557fbb22fdf971ad1f \ No newline at end of file diff --git a/tests/testdata/headqa_es-v0-res.json b/tests/testdata/headqa_es-v0-res.json new file mode 100644 index 0000000000..0964db9bbb --- /dev/null +++ b/tests/testdata/headqa_es-v0-res.json @@ -0,0 +1 @@ +{"results": {"headqa_es": {"acc": 0.23559445660102116, "acc_norm": 0.25018234865062, "acc_norm_stderr": 0.008272783230806014, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_es": 0}} \ No newline at end of file From 7b2b2a23689cab7faf90fdf05105a235713d57ea Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Thu, 23 Dec 2021 20:10:16 -0700 Subject: [PATCH 23/65] Make simple_evaluate take LM and Task objects directly too --- lm_eval/evaluator.py | 30 +++++++++++++++++++----------- lm_eval/tasks/__init__.py | 18 ++++++++++++++++-- 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 02fb71285c..10648dcfb1 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -10,18 +10,19 @@ @positional_deprecated -def simple_evaluate(model, model_args, task_names, +def simple_evaluate(model, model_args=None, tasks=[], num_fewshot=0, batch_size=None, device=None, no_cache=False, limit=None, bootstrap_iters=100000, description_dict=None): """Instantiate and evaluate a model on a list of tasks. - :param model: str - Name of model, see lm_eval.models.get_model - :param model_args: str - String arguments for each model class, see LM.create_from_arg_string - :param task_names: list[str] - List of task names + :param model: Union[str, LM] + Name of model or LM object, see lm_eval.models.get_model + :param model_args: Optional[str] + String arguments for each model class, see LM.create_from_arg_string. + Ignored if `model` argument is a LM object. + :param tasks: list[Union[str, Task]] + List of task names or Task objects :param num_fewshot: int Number of examples in few-shot context :param batch_size: int, optional @@ -42,16 +43,23 @@ def simple_evaluate(model, model_args, task_names, random.seed(1234) np.random.seed(1234) - lm = lm_eval.models.get_model(model).create_from_arg_string(model_args, { - 'batch_size': batch_size, 'device': device - }) + assert tasks != [], "No tasks specified" + + if isinstance(model, str): + if model_args is None: model_args = "" + lm = lm_eval.models.get_model(model).create_from_arg_string(model_args, { + 'batch_size': batch_size, 'device': device + }) + else: + assert isinstance(model, lm_eval.base.LM) + lm = model if not no_cache: lm = lm_eval.base.CachingLM( lm, 'lm_cache/' + model + '_' + model_args.replace('=', '-').replace(',', '_').replace('/', '-') + '.db' ) - task_dict = lm_eval.tasks.get_task_dict(task_names) + task_dict = lm_eval.tasks.get_task_dict(tasks) results = evaluate( lm=lm, diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 17736f982e..1f8b9b36f0 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -1,6 +1,8 @@ from pprint import pprint +from typing import List, Union import sacrebleu +import lm_eval.base from . import superglue from . import glue @@ -232,8 +234,20 @@ def get_task(task_name): raise KeyError(f"Missing task {task_name}") -def get_task_dict(task_name_list): +def get_task_name_from_object(task_object): + for name, class_ in TASK_REGISTRY.items(): + if class_ is task_object: + return name + + # this gives a mechanism for non-registered tasks to have a custom name anyways when reporting + return task_object.EVAL_HARNESS_NAME if hasattr(task_object, "EVAL_HARNESS_NAME") else task_object.__name__ + + +def get_task_dict(task_name_list: List[Union[str, lm_eval.base.Task]]): return { task_name: get_task(task_name)() - for task_name in task_name_list + for task_name in task_name_list if isinstance(task_name, str) + } + { + get_task_name_from_object(task_object): task_object + for task_object in task_name_list if not isinstance(task_object, str) } From d86aabc4aa78406ee22114752790287d4fa18fce Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Thu, 23 Dec 2021 20:22:07 -0700 Subject: [PATCH 24/65] more changes --- lm_eval/evaluator.py | 10 ++++++---- lm_eval/tasks/__init__.py | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 10648dcfb1..95fe42d9e0 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -22,7 +22,7 @@ def simple_evaluate(model, model_args=None, tasks=[], String arguments for each model class, see LM.create_from_arg_string. Ignored if `model` argument is a LM object. :param tasks: list[Union[str, Task]] - List of task names or Task objects + List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise. :param num_fewshot: int Number of examples in few-shot context :param batch_size: int, optional @@ -64,7 +64,6 @@ def simple_evaluate(model, model_args=None, tasks=[], results = evaluate( lm=lm, task_dict=task_dict, - provide_description=False, num_fewshot=num_fewshot, limit=limit, description_dict=description_dict @@ -87,13 +86,13 @@ def simple_evaluate(model, model_args=None, tasks=[], @positional_deprecated -def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_iters=100000, description_dict=None): +def evaluate(lm, task_dict, provide_description=None, num_fewshot=0, limit=None, bootstrap_iters=100000, description_dict=None): """Instantiate and evaluate a model on a list of tasks. :param lm: obj Language Model :param task_dict: dict[str, Task] - Dictionary of tasks + Dictionary of tasks. Tasks will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise. :param provide_description: bool Not implemented, and this option is deprecated and will be removed in a future version in favor of a different description providing method :param num_fewshot: int @@ -111,6 +110,9 @@ def evaluate(lm, task_dict, provide_description, num_fewshot, limit, bootstrap_i # TODO: todo: implement proper description-providing system assert not provide_description # not implemented. + if provide_description is not None: + # nudge people to not specify it at all + print("WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict") task_dict_items = [ (name, task) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 1f8b9b36f0..1759149b43 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -240,7 +240,7 @@ def get_task_name_from_object(task_object): return name # this gives a mechanism for non-registered tasks to have a custom name anyways when reporting - return task_object.EVAL_HARNESS_NAME if hasattr(task_object, "EVAL_HARNESS_NAME") else task_object.__name__ + return task_object.EVAL_HARNESS_NAME if hasattr(task_object, "EVAL_HARNESS_NAME") else type(task_object).__name__ def get_task_dict(task_name_list: List[Union[str, lm_eval.base.Task]]): From a34bbe695d8da0eaf8e488b082efd8ad1ffe1745 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Fri, 24 Dec 2021 02:36:34 -0500 Subject: [PATCH 25/65] Remove more `provide_description` uses --- lm_eval/base.py | 43 +++++++++++++++++++++++++++++++--- lm_eval/evaluator.py | 1 - scripts/write_out.py | 1 - tests/test_description_dict.py | 1 - tests/test_evaluator.py | 2 -- tests/test_version_stable.py | 1 - 6 files changed, 40 insertions(+), 9 deletions(-) diff --git a/lm_eval/base.py b/lm_eval/base.py index 5492ba9cfb..c3f897c711 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -1,6 +1,7 @@ import abc from typing import Iterable import numpy as np +import random import re import os import json @@ -458,14 +459,41 @@ def fewshot_description(self): return "" @utils.positional_deprecated - def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): + """ Returns a fewshot context string that is made up of a prepended description + (if provided), the `num_fewshot` number of examples, and an appended prompt example. + + :param doc: str + The document as returned from training_docs, validation_docs, or test_docs. + :param num_fewshot: int + The number of fewshot examples to provide in the returned context string. + :param provide_description: bool + Not implemented, and this option is deprecated and will be removed in a future version in favor of a different description providing method + :param rnd: random.Random + The pseudo-random number generator used to randomly sample examples. + WARNING: If you do not provide a `rnd` arg, a default `random.Random` + object will be created and seeded with this Task's name attribute, `__name__`. + :param description: str + The task's description that will be prepended to the fewshot examples. + :returns: str + The fewshot context. + """ assert not provide_description, ( "The `provide_description` arg will be removed in future versions. To prepend " - "a custom description to the context, supply the corresponding string via the " + "a custom description to the context, supply the corresponding string via the " "`description` arg." ) + if provide_description is not None: + # nudge people to not specify it at all + print("WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict") + description = description + "\n\n" if description else "" + # TODO (jon-tow): Remove this default `rand` behaviour after `provide_description` is removed and remove the respective `rand` arg warning in the docs above. + if rnd is None: + rnd = random.Random() + rnd.seed(self.__name__) + if num_fewshot == 0: labeled_examples = "" else: @@ -537,13 +565,22 @@ def fewshot_examples(self, k, rnd): assert k == 0 return [] - def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert num_fewshot == 0 assert not provide_description, ( "The `provide_description` arg will be removed in future versions. To prepend " "a custom description to the context, supply the corresponding string via the " "`description` arg." ) + if provide_description is not None: + # nudge people to not specify it at all + print("WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict") + + # TODO (jon-tow): Remove this default `rand` behaviour after `provide_description` is removed and remove the respective `rand` arg warning in the docs above. + if rnd is None: + rnd = random.Random() + rnd.seed(self.__name__) + return "" def higher_is_better(self): diff --git a/lm_eval/evaluator.py b/lm_eval/evaluator.py index 95fe42d9e0..087f2a1313 100644 --- a/lm_eval/evaluator.py +++ b/lm_eval/evaluator.py @@ -159,7 +159,6 @@ def evaluate(lm, task_dict, provide_description=None, num_fewshot=0, limit=None, ctx = task.fewshot_context( doc=doc, num_fewshot=num_fewshot, - provide_description=provide_description, rnd=rnd, description=description ) diff --git a/scripts/write_out.py b/scripts/write_out.py index b39fd64541..2039d3934f 100644 --- a/scripts/write_out.py +++ b/scripts/write_out.py @@ -63,7 +63,6 @@ def main(): ctx = task.fewshot_context( doc=doc, num_fewshot=args.num_fewshot, - provide_description=args.provide_description, rnd=rnd, description=description ) diff --git a/tests/test_description_dict.py b/tests/test_description_dict.py index aaec13fbce..f80f529063 100644 --- a/tests/test_description_dict.py +++ b/tests/test_description_dict.py @@ -36,7 +36,6 @@ def test_description_dict(): ctx = task.fewshot_context( doc=doc, num_fewshot=1, - provide_description=False, rnd=rnd, description=description, ) diff --git a/tests/test_evaluator.py b/tests/test_evaluator.py index ad12b053da..363384a05c 100644 --- a/tests/test_evaluator.py +++ b/tests/test_evaluator.py @@ -51,7 +51,6 @@ def ll_perp_fn(reqs): e1 = evaluator.evaluate( lm=lm, task_dict=task_dict, - provide_description=False, num_fewshot=0, limit=limit, bootstrap_iters=10, @@ -60,7 +59,6 @@ def ll_perp_fn(reqs): e2 = evaluator.evaluate( lm=lm, task_dict=task_dict, - provide_description=False, num_fewshot=0, limit=limit, bootstrap_iters=10, diff --git a/tests/test_version_stable.py b/tests/test_version_stable.py index a19cbf3f90..7dd36a94b6 100644 --- a/tests/test_version_stable.py +++ b/tests/test_version_stable.py @@ -102,7 +102,6 @@ def greedy_until(reqs): result = evaluator.evaluate( lm=lm, task_dict=task_dict, - provide_description=False, num_fewshot=0, limit=limit, bootstrap_iters=10, From 57d0718a718243d6ee64a995f3da8fc2146bbd29 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Fri, 24 Dec 2021 02:38:30 -0500 Subject: [PATCH 26/65] Remove all `provide_description` argument uses --- lm_eval/tasks/truthfulqa.py | 6 ++---- scripts/cost_estimate.py | 1 - 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/lm_eval/tasks/truthfulqa.py b/lm_eval/tasks/truthfulqa.py index 0eeb364e0f..67259a5a04 100644 --- a/lm_eval/tasks/truthfulqa.py +++ b/lm_eval/tasks/truthfulqa.py @@ -85,12 +85,11 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " - def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." return super().fewshot_context( doc=doc, num_fewshot=num_fewshot, - provide_description=provide_description, rnd=rnd, description=description ) @@ -223,12 +222,11 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): return " " - def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert num_fewshot == 0, "TruthfulQA is intended only for the zero-shot setting." return super().fewshot_context( doc=doc, num_fewshot=num_fewshot, - provide_description=provide_description, rnd=rnd, description=description ) diff --git a/scripts/cost_estimate.py b/scripts/cost_estimate.py index f08d2f37f2..d2e60bfa0d 100644 --- a/scripts/cost_estimate.py +++ b/scripts/cost_estimate.py @@ -54,7 +54,6 @@ def main(): evaluator.evaluate( lm=lm, task_dict={taskname: tasks.get_task(taskname)()}, - provide_description=False, num_fewshot=0, limit=None, bootstrap_iters=10, From 0e232f7af6ff6a48bd78b3ff3752549e723056de Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Fri, 24 Dec 2021 02:40:11 -0500 Subject: [PATCH 27/65] Update new `task` arg and task dict getter --- lm_eval/tasks/__init__.py | 7 +++++-- lm_eval/tasks/prost.py | 3 +-- main.py | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 1759149b43..99513d3202 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -244,10 +244,13 @@ def get_task_name_from_object(task_object): def get_task_dict(task_name_list: List[Union[str, lm_eval.base.Task]]): - return { + task_name_dict = { task_name: get_task(task_name)() for task_name in task_name_list if isinstance(task_name, str) - } + { + } + task_name_from_object_dict = { get_task_name_from_object(task_object): task_object for task_object in task_name_list if not isinstance(task_object, str) } + assert set(task_name_dict.keys()).isdisjoint(set(task_name_from_object_dict.keys())) + return {**task_name_dict, **task_name_from_object_dict} diff --git a/lm_eval/tasks/prost.py b/lm_eval/tasks/prost.py index 7bcbf1e917..e972d39ac0 100644 --- a/lm_eval/tasks/prost.py +++ b/lm_eval/tasks/prost.py @@ -36,12 +36,11 @@ def has_validation_docs(self): def has_test_docs(self): return True - def fewshot_context(self, doc, num_fewshot, provide_description, rnd, description=None): + def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert num_fewshot == 0, 'PROST is designed to probe models in a zero-shot fashion only.' return super().fewshot_context( doc=doc, num_fewshot=num_fewshot, - provide_description=provide_description, rnd=rnd, description=description ) diff --git a/main.py b/main.py index c7f9ce6d8b..cf13fe1315 100644 --- a/main.py +++ b/main.py @@ -43,7 +43,7 @@ def main(): results = evaluator.simple_evaluate( model=args.model, model_args=args.model_args, - task_names=task_names, + tasks=task_names, num_fewshot=args.num_fewshot, batch_size=args.batch_size, device=args.device, From 666b615f2f80e213354dc5dc78aa85e639d48ff9 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Fri, 24 Dec 2021 13:04:59 +0100 Subject: [PATCH 28/65] Fix README --- README.md | 292 +++++++++++++++++++++++++++--------------------------- 1 file changed, 146 insertions(+), 146 deletions(-) diff --git a/README.md b/README.md index 987b79573e..18ec4dc342 100644 --- a/README.md +++ b/README.md @@ -128,152 +128,152 @@ To implement a new task in eval harness, see [this guide](https://github.com/Ele |openbookqa |✓ |✓ |✓ | 500|acc, acc_norm | |squad2 |✓ |✓ | | 11873|exact, f1, HasAns_exact, HasAns_f1, NoAns_exact, NoAns_f1, best_exact, best_f1| |race |✓ |✓ |✓ | 1045|acc | -|headqa_es |✓ |✓ |✓ | 2742|acc, acc_norm | -|headqa_en |✓ |✓ |✓ | 2742|acc, acc_norm | -|mathqa |✓ |✓ |✓ | 2985|acc, acc_norm | -|webqs |✓ | |✓ | 2032|acc | -|wsc273 | | |✓ | 273|acc | -|winogrande |✓ |✓ | | 1267|acc | -|anli_r1 |✓ |✓ |✓ | 1000|acc | -|anli_r2 |✓ |✓ |✓ | 1000|acc | -|anli_r3 |✓ |✓ |✓ | 1200|acc | -|ethics_cm |✓ | |✓ | 3885|acc | -|ethics_deontology |✓ | |✓ | 3596|acc, em | -|ethics_justice |✓ | |✓ | 2704|acc, em | -|ethics_utilitarianism_original | | |✓ | 4808|acc | -|ethics_utilitarianism |✓ | |✓ | 4808|acc | -|ethics_virtue |✓ | |✓ | 4975|acc, em | -|math_algebra |✓ | |✓ | 1187|acc | -|math_counting_and_prob |✓ | |✓ | 474|acc | -|math_geometry |✓ | |✓ | 479|acc | -|math_intermediate_algebra |✓ | |✓ | 903|acc | -|math_num_theory |✓ | |✓ | 540|acc | -|math_prealgebra |✓ | |✓ | 871|acc | -|math_precalc |✓ | |✓ | 546|acc | -|arithmetic_2da | |✓ | | 2000|acc | -|arithmetic_2ds | |✓ | | 2000|acc | -|arithmetic_3da | |✓ | | 2000|acc | -|arithmetic_3ds | |✓ | | 2000|acc | -|arithmetic_4da | |✓ | | 2000|acc | -|arithmetic_4ds | |✓ | | 2000|acc | -|arithmetic_5da | |✓ | | 2000|acc | -|arithmetic_5ds | |✓ | | 2000|acc | -|arithmetic_2dm | |✓ | | 2000|acc | -|arithmetic_1dc | |✓ | | 2000|acc | -|hendrycksTest-abstract_algebra |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-anatomy |✓ |✓ |✓ | 135|acc, acc_norm | -|hendrycksTest-astronomy |✓ |✓ |✓ | 152|acc, acc_norm | -|hendrycksTest-business_ethics |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-clinical_knowledge |✓ |✓ |✓ | 265|acc, acc_norm | -|hendrycksTest-college_biology |✓ |✓ |✓ | 144|acc, acc_norm | -|hendrycksTest-college_chemistry |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-college_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-college_mathematics |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-college_medicine |✓ |✓ |✓ | 173|acc, acc_norm | -|hendrycksTest-college_physics |✓ |✓ |✓ | 102|acc, acc_norm | -|hendrycksTest-computer_security |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-conceptual_physics |✓ |✓ |✓ | 235|acc, acc_norm | -|hendrycksTest-econometrics |✓ |✓ |✓ | 114|acc, acc_norm | -|hendrycksTest-electrical_engineering |✓ |✓ |✓ | 145|acc, acc_norm | -|hendrycksTest-elementary_mathematics |✓ |✓ |✓ | 378|acc, acc_norm | -|hendrycksTest-formal_logic |✓ |✓ |✓ | 126|acc, acc_norm | -|hendrycksTest-global_facts |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-high_school_biology |✓ |✓ |✓ | 310|acc, acc_norm | -|hendrycksTest-high_school_chemistry |✓ |✓ |✓ | 203|acc, acc_norm | -|hendrycksTest-high_school_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-high_school_european_history |✓ |✓ |✓ | 165|acc, acc_norm | -|hendrycksTest-high_school_geography |✓ |✓ |✓ | 198|acc, acc_norm | -|hendrycksTest-high_school_government_and_politics|✓ |✓ |✓ | 193|acc, acc_norm | -|hendrycksTest-high_school_macroeconomics |✓ |✓ |✓ | 390|acc, acc_norm | -|hendrycksTest-high_school_mathematics |✓ |✓ |✓ | 270|acc, acc_norm | -|hendrycksTest-high_school_microeconomics |✓ |✓ |✓ | 238|acc, acc_norm | -|hendrycksTest-high_school_physics |✓ |✓ |✓ | 151|acc, acc_norm | -|hendrycksTest-high_school_psychology |✓ |✓ |✓ | 545|acc, acc_norm | -|hendrycksTest-high_school_statistics |✓ |✓ |✓ | 216|acc, acc_norm | -|hendrycksTest-high_school_us_history |✓ |✓ |✓ | 204|acc, acc_norm | -|hendrycksTest-high_school_world_history |✓ |✓ |✓ | 237|acc, acc_norm | -|hendrycksTest-human_aging |✓ |✓ |✓ | 223|acc, acc_norm | -|hendrycksTest-human_sexuality |✓ |✓ |✓ | 131|acc, acc_norm | -|hendrycksTest-international_law |✓ |✓ |✓ | 121|acc, acc_norm | -|hendrycksTest-jurisprudence |✓ |✓ |✓ | 108|acc, acc_norm | -|hendrycksTest-logical_fallacies |✓ |✓ |✓ | 163|acc, acc_norm | -|hendrycksTest-machine_learning |✓ |✓ |✓ | 112|acc, acc_norm | -|hendrycksTest-management |✓ |✓ |✓ | 103|acc, acc_norm | -|hendrycksTest-marketing |✓ |✓ |✓ | 234|acc, acc_norm | -|hendrycksTest-medical_genetics |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-miscellaneous |✓ |✓ |✓ | 783|acc, acc_norm | -|hendrycksTest-moral_disputes |✓ |✓ |✓ | 346|acc, acc_norm | -|hendrycksTest-moral_scenarios |✓ |✓ |✓ | 895|acc, acc_norm | -|hendrycksTest-nutrition |✓ |✓ |✓ | 306|acc, acc_norm | -|hendrycksTest-philosophy |✓ |✓ |✓ | 311|acc, acc_norm | -|hendrycksTest-prehistory |✓ |✓ |✓ | 324|acc, acc_norm | -|hendrycksTest-professional_accounting |✓ |✓ |✓ | 282|acc, acc_norm | -|hendrycksTest-professional_law |✓ |✓ |✓ | 1534|acc, acc_norm | -|hendrycksTest-professional_medicine |✓ |✓ |✓ | 272|acc, acc_norm | -|hendrycksTest-professional_psychology |✓ |✓ |✓ | 612|acc, acc_norm | -|hendrycksTest-public_relations |✓ |✓ |✓ | 110|acc, acc_norm | -|hendrycksTest-security_studies |✓ |✓ |✓ | 245|acc, acc_norm | -|hendrycksTest-sociology |✓ |✓ |✓ | 201|acc, acc_norm | -|hendrycksTest-us_foreign_policy |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-virology |✓ |✓ |✓ | 166|acc, acc_norm | -|hendrycksTest-world_religions |✓ |✓ |✓ | 171|acc, acc_norm | -|wmt14-en-fr | | |✓ | 3003|bleu, chrf, ter | -|wmt14-fr-en | | |✓ | 3003|bleu, chrf, ter | -|wmt16-en-ro | | |✓ | 1999|bleu, chrf, ter | -|wmt16-ro-en | | |✓ | 1999|bleu, chrf, ter | -|wmt16-de-en | | |✓ | 2999|bleu, chrf, ter | -|wmt16-en-de | | |✓ | 2999|bleu, chrf, ter | -|wmt20-cs-en | | |✓ | 664|bleu, chrf, ter | -|wmt20-de-en | | |✓ | 785|bleu, chrf, ter | -|wmt20-de-fr | | |✓ | 1619|bleu, chrf, ter | -|wmt20-en-cs | | |✓ | 1418|bleu, chrf, ter | -|wmt20-en-de | | |✓ | 1418|bleu, chrf, ter | -|wmt20-en-iu | | |✓ | 2971|bleu, chrf, ter | -|wmt20-en-ja | | |✓ | 1000|bleu, chrf, ter | -|wmt20-en-km | | |✓ | 2320|bleu, chrf, ter | -|wmt20-en-pl | | |✓ | 1000|bleu, chrf, ter | -|wmt20-en-ps | | |✓ | 2719|bleu, chrf, ter | -|wmt20-en-ru | | |✓ | 2002|bleu, chrf, ter | -|wmt20-en-ta | | |✓ | 1000|bleu, chrf, ter | -|wmt20-en-zh | | |✓ | 1418|bleu, chrf, ter | -|wmt20-fr-de | | |✓ | 1619|bleu, chrf, ter | -|wmt20-iu-en | | |✓ | 2971|bleu, chrf, ter | -|wmt20-ja-en | | |✓ | 993|bleu, chrf, ter | -|wmt20-km-en | | |✓ | 2320|bleu, chrf, ter | -|wmt20-pl-en | | |✓ | 1001|bleu, chrf, ter | -|wmt20-ps-en | | |✓ | 2719|bleu, chrf, ter | -|wmt20-ru-en | | |✓ | 991|bleu, chrf, ter | -|wmt20-ta-en | | |✓ | 997|bleu, chrf, ter | -|wmt20-zh-en | | |✓ | 2000|bleu, chrf, ter | -|iwslt17-en-ar | | |✓ | 1460|bleu, chrf, ter | -|iwslt17-ar-en | | |✓ | 1460|bleu, chrf, ter | -|anagrams1 | |✓ | | 10000|acc | -|anagrams2 | |✓ | | 10000|acc | -|cycle_letters | |✓ | | 10000|acc | -|random_insertion | |✓ | | 10000|acc | -|reversed_words | |✓ | | 10000|acc | -|pile_arxiv | |✓ |✓ | 2407|word_perplexity, byte_perplexity, bits_per_byte | -|pile_books3 | |✓ |✓ | 269|word_perplexity, byte_perplexity, bits_per_byte | -|pile_bookcorpus2 | |✓ |✓ | 28|word_perplexity, byte_perplexity, bits_per_byte | -|pile_dm-mathematics | |✓ |✓ | 1922|word_perplexity, byte_perplexity, bits_per_byte | -|pile_enron | |✓ |✓ | 1010|word_perplexity, byte_perplexity, bits_per_byte | -|pile_europarl | |✓ |✓ | 157|word_perplexity, byte_perplexity, bits_per_byte | -|pile_freelaw | |✓ |✓ | 5101|word_perplexity, byte_perplexity, bits_per_byte | -|pile_github | |✓ |✓ | 18195|word_perplexity, byte_perplexity, bits_per_byte | -|pile_gutenberg | |✓ |✓ | 80|word_perplexity, byte_perplexity, bits_per_byte | -|pile_hackernews | |✓ |✓ | 1632|word_perplexity, byte_perplexity, bits_per_byte | -|pile_nih-exporter | |✓ |✓ | 1884|word_perplexity, byte_perplexity, bits_per_byte | -|pile_opensubtitles | |✓ |✓ | 642|word_perplexity, byte_perplexity, bits_per_byte | -|pile_openwebtext2 | |✓ |✓ | 32925|word_perplexity, byte_perplexity, bits_per_byte | -|pile_philpapers | |✓ |✓ | 68|word_perplexity, byte_perplexity, bits_per_byte | -|pile_pile-cc | |✓ |✓ | 52790|word_perplexity, byte_perplexity, bits_per_byte | -|pile_pubmed-abstracts | |✓ |✓ | 29895|word_perplexity, byte_perplexity, bits_per_byte | -|pile_pubmed-central | |✓ |✓ | 5911|word_perplexity, byte_perplexity, bits_per_byte | -|pile_stackexchange | |✓ |✓ | 30378|word_perplexity, byte_perplexity, bits_per_byte | -|pile_uspto | |✓ |✓ | 11415|word_perplexity, byte_perplexity, bits_per_byte | -|pile_ubuntu-irc | |✓ |✓ | 22|word_perplexity, byte_perplexity, bits_per_byte | -|pile_wikipedia | |✓ |✓ | 17511|word_perplexity, byte_perplexity, bits_per_byte | -|pile_youtubesubtitles | |✓ || 1000|acc +|mathqa |✓ |✓ |✓ | 2985|acc, acc_norm | +|headqa_es |✓ |✓ |✓ | 2742|acc, acc_norm | +|headqa_en |✓ |✓ |✓ | 2742|acc, acc_norm | +|webqs |✓ | |✓ | 2032|acc | +|wsc273 | | |✓ | 273|acc | +|winogrande |✓ |✓ | | 1267|acc | +|anli_r1 |✓ |✓ |✓ | 1000|acc | +|anli_r2 |✓ |✓ |✓ | 1000|acc | +|anli_r3 |✓ |✓ |✓ | 1200|acc | +|ethics_cm |✓ | |✓ | 3885|acc | +|ethics_deontology |✓ | |✓ | 3596|acc, em | +|ethics_justice |✓ | |✓ | 2704|acc, em | +|ethics_utilitarianism_original | | |✓ | 4808|acc | +|ethics_utilitarianism |✓ | |✓ | 4808|acc | +|ethics_virtue |✓ | |✓ | 4975|acc, em | +|math_algebra |✓ | |✓ | 1187|acc | +|math_counting_and_prob |✓ | |✓ | 474|acc | +|math_geometry |✓ | |✓ | 479|acc | +|math_intermediate_algebra |✓ | |✓ | 903|acc | +|math_num_theory |✓ | |✓ | 540|acc | +|math_prealgebra |✓ | |✓ | 871|acc | +|math_precalc |✓ | |✓ | 546|acc | +|arithmetic_2da | |✓ | | 2000|acc | +|arithmetic_2ds | |✓ | | 2000|acc | +|arithmetic_3da | |✓ | | 2000|acc | +|arithmetic_3ds | |✓ | | 2000|acc | +|arithmetic_4da | |✓ | | 2000|acc | +|arithmetic_4ds | |✓ | | 2000|acc | +|arithmetic_5da | |✓ | | 2000|acc | +|arithmetic_5ds | |✓ | | 2000|acc | +|arithmetic_2dm | |✓ | | 2000|acc | +|arithmetic_1dc | |✓ | | 2000|acc | +|hendrycksTest-abstract_algebra |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-anatomy |✓ |✓ |✓ | 135|acc, acc_norm | +|hendrycksTest-astronomy |✓ |✓ |✓ | 152|acc, acc_norm | +|hendrycksTest-business_ethics |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-clinical_knowledge |✓ |✓ |✓ | 265|acc, acc_norm | +|hendrycksTest-college_biology |✓ |✓ |✓ | 144|acc, acc_norm | +|hendrycksTest-college_chemistry |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-college_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-college_mathematics |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-college_medicine |✓ |✓ |✓ | 173|acc, acc_norm | +|hendrycksTest-college_physics |✓ |✓ |✓ | 102|acc, acc_norm | +|hendrycksTest-computer_security |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-conceptual_physics |✓ |✓ |✓ | 235|acc, acc_norm | +|hendrycksTest-econometrics |✓ |✓ |✓ | 114|acc, acc_norm | +|hendrycksTest-electrical_engineering |✓ |✓ |✓ | 145|acc, acc_norm | +|hendrycksTest-elementary_mathematics |✓ |✓ |✓ | 378|acc, acc_norm | +|hendrycksTest-formal_logic |✓ |✓ |✓ | 126|acc, acc_norm | +|hendrycksTest-global_facts |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-high_school_biology |✓ |✓ |✓ | 310|acc, acc_norm | +|hendrycksTest-high_school_chemistry |✓ |✓ |✓ | 203|acc, acc_norm | +|hendrycksTest-high_school_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-high_school_european_history |✓ |✓ |✓ | 165|acc, acc_norm | +|hendrycksTest-high_school_geography |✓ |✓ |✓ | 198|acc, acc_norm | +|hendrycksTest-high_school_government_and_politics |✓ |✓ |✓ | 193|acc, acc_norm | +|hendrycksTest-high_school_macroeconomics |✓ |✓ |✓ | 390|acc, acc_norm | +|hendrycksTest-high_school_mathematics |✓ |✓ |✓ | 270|acc, acc_norm | +|hendrycksTest-high_school_microeconomics |✓ |✓ |✓ | 238|acc, acc_norm | +|hendrycksTest-high_school_physics |✓ |✓ |✓ | 151|acc, acc_norm | +|hendrycksTest-high_school_psychology |✓ |✓ |✓ | 545|acc, acc_norm | +|hendrycksTest-high_school_statistics |✓ |✓ |✓ | 216|acc, acc_norm | +|hendrycksTest-high_school_us_history |✓ |✓ |✓ | 204|acc, acc_norm | +|hendrycksTest-high_school_world_history |✓ |✓ |✓ | 237|acc, acc_norm | +|hendrycksTest-human_aging |✓ |✓ |✓ | 223|acc, acc_norm | +|hendrycksTest-human_sexuality |✓ |✓ |✓ | 131|acc, acc_norm | +|hendrycksTest-international_law |✓ |✓ |✓ | 121|acc, acc_norm | +|hendrycksTest-jurisprudence |✓ |✓ |✓ | 108|acc, acc_norm | +|hendrycksTest-logical_fallacies |✓ |✓ |✓ | 163|acc, acc_norm | +|hendrycksTest-machine_learning |✓ |✓ |✓ | 112|acc, acc_norm | +|hendrycksTest-management |✓ |✓ |✓ | 103|acc, acc_norm | +|hendrycksTest-marketing |✓ |✓ |✓ | 234|acc, acc_norm | +|hendrycksTest-medical_genetics |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-miscellaneous |✓ |✓ |✓ | 783|acc, acc_norm | +|hendrycksTest-moral_disputes |✓ |✓ |✓ | 346|acc, acc_norm | +|hendrycksTest-moral_scenarios |✓ |✓ |✓ | 895|acc, acc_norm | +|hendrycksTest-nutrition |✓ |✓ |✓ | 306|acc, acc_norm | +|hendrycksTest-philosophy |✓ |✓ |✓ | 311|acc, acc_norm | +|hendrycksTest-prehistory |✓ |✓ |✓ | 324|acc, acc_norm | +|hendrycksTest-professional_accounting |✓ |✓ |✓ | 282|acc, acc_norm | +|hendrycksTest-professional_law |✓ |✓ |✓ | 1534|acc, acc_norm | +|hendrycksTest-professional_medicine |✓ |✓ |✓ | 272|acc, acc_norm | +|hendrycksTest-professional_psychology |✓ |✓ |✓ | 612|acc, acc_norm | +|hendrycksTest-public_relations |✓ |✓ |✓ | 110|acc, acc_norm | +|hendrycksTest-security_studies |✓ |✓ |✓ | 245|acc, acc_norm | +|hendrycksTest-sociology |✓ |✓ |✓ | 201|acc, acc_norm | +|hendrycksTest-us_foreign_policy |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-virology |✓ |✓ |✓ | 166|acc, acc_norm | +|hendrycksTest-world_religions |✓ |✓ |✓ | 171|acc, acc_norm | +|wmt14-en-fr | | |✓ | 3003|bleu, chrf, ter | +|wmt14-fr-en | | |✓ | 3003|bleu, chrf, ter | +|wmt16-en-ro | | |✓ | 1999|bleu, chrf, ter | +|wmt16-ro-en | | |✓ | 1999|bleu, chrf, ter | +|wmt16-de-en | | |✓ | 2999|bleu, chrf, ter | +|wmt16-en-de | | |✓ | 2999|bleu, chrf, ter | +|wmt20-cs-en | | |✓ | 664|bleu, chrf, ter | +|wmt20-de-en | | |✓ | 785|bleu, chrf, ter | +|wmt20-de-fr | | |✓ | 1619|bleu, chrf, ter | +|wmt20-en-cs | | |✓ | 1418|bleu, chrf, ter | +|wmt20-en-de | | |✓ | 1418|bleu, chrf, ter | +|wmt20-en-iu | | |✓ | 2971|bleu, chrf, ter | +|wmt20-en-ja | | |✓ | 1000|bleu, chrf, ter | +|wmt20-en-km | | |✓ | 2320|bleu, chrf, ter | +|wmt20-en-pl | | |✓ | 1000|bleu, chrf, ter | +|wmt20-en-ps | | |✓ | 2719|bleu, chrf, ter | +|wmt20-en-ru | | |✓ | 2002|bleu, chrf, ter | +|wmt20-en-ta | | |✓ | 1000|bleu, chrf, ter | +|wmt20-en-zh | | |✓ | 1418|bleu, chrf, ter | +|wmt20-fr-de | | |✓ | 1619|bleu, chrf, ter | +|wmt20-iu-en | | |✓ | 2971|bleu, chrf, ter | +|wmt20-ja-en | | |✓ | 993|bleu, chrf, ter | +|wmt20-km-en | | |✓ | 2320|bleu, chrf, ter | +|wmt20-pl-en | | |✓ | 1001|bleu, chrf, ter | +|wmt20-ps-en | | |✓ | 2719|bleu, chrf, ter | +|wmt20-ru-en | | |✓ | 991|bleu, chrf, ter | +|wmt20-ta-en | | |✓ | 997|bleu, chrf, ter | +|wmt20-zh-en | | |✓ | 2000|bleu, chrf, ter | +|iwslt17-en-ar | | |✓ | 1460|bleu, chrf, ter | +|iwslt17-ar-en | | |✓ | 1460|bleu, chrf, ter | +|anagrams1 | |✓ | | 10000|acc | +|anagrams2 | |✓ | | 10000|acc | +|cycle_letters | |✓ | | 10000|acc | +|random_insertion | |✓ | | 10000|acc | +|reversed_words | |✓ | | 10000|acc | +|pile_arxiv | |✓ |✓ | 2407|word_perplexity, byte_perplexity, bits_per_byte | +|pile_books3 | |✓ |✓ | 269|word_perplexity, byte_perplexity, bits_per_byte | +|pile_bookcorpus2 | |✓ |✓ | 28|word_perplexity, byte_perplexity, bits_per_byte | +|pile_dm-mathematics | |✓ |✓ | 1922|word_perplexity, byte_perplexity, bits_per_byte | +|pile_enron | |✓ |✓ | 1010|word_perplexity, byte_perplexity, bits_per_byte | +|pile_europarl | |✓ |✓ | 157|word_perplexity, byte_perplexity, bits_per_byte | +|pile_freelaw | |✓ |✓ | 5101|word_perplexity, byte_perplexity, bits_per_byte | +|pile_github | |✓ |✓ | 18195|word_perplexity, byte_perplexity, bits_per_byte | +|pile_gutenberg | |✓ |✓ | 80|word_perplexity, byte_perplexity, bits_per_byte | +|pile_hackernews | |✓ |✓ | 1632|word_perplexity, byte_perplexity, bits_per_byte | +|pile_nih-exporter | |✓ |✓ | 1884|word_perplexity, byte_perplexity, bits_per_byte | +|pile_opensubtitles | |✓ |✓ | 642|word_perplexity, byte_perplexity, bits_per_byte | +|pile_openwebtext2 | |✓ |✓ | 32925|word_perplexity, byte_perplexity, bits_per_byte | +|pile_philpapers | |✓ |✓ | 68|word_perplexity, byte_perplexity, bits_per_byte | +|pile_pile-cc | |✓ |✓ | 52790|word_perplexity, byte_perplexity, bits_per_byte | +|pile_pubmed-abstracts | |✓ |✓ | 29895|word_perplexity, byte_perplexity, bits_per_byte | +|pile_pubmed-central | |✓ |✓ | 5911|word_perplexity, byte_perplexity, bits_per_byte | +|pile_stackexchange | |✓ |✓ | 30378|word_perplexity, byte_perplexity, bits_per_byte | +|pile_uspto | |✓ |✓ | 11415|word_perplexity, byte_perplexity, bits_per_byte | +|pile_ubuntu-irc | |✓ |✓ | 22|word_perplexity, byte_perplexity, bits_per_byte | +|pile_wikipedia | |✓ |✓ | 17511|word_perplexity, byte_perplexity, bits_per_byte | +|pile_youtubesubtitles | |✓ || 1000|acc |blimp_adjunct_island | |✓ | | 1000|acc |blimp_anaphor_gender_agreement | |✓ | | 1000|acc |blimp_anaphor_number_agreement | |✓ | | 1000|acc From e34c6bd606ab68d3b73f19dc3732bc28465a6fa0 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Fri, 24 Dec 2021 13:06:02 +0100 Subject: [PATCH 29/65] Fix README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 18ec4dc342..f0d910cca8 100644 --- a/README.md +++ b/README.md @@ -273,7 +273,7 @@ To implement a new task in eval harness, see [this guide](https://github.com/Ele |pile_uspto | |✓ |✓ | 11415|word_perplexity, byte_perplexity, bits_per_byte | |pile_ubuntu-irc | |✓ |✓ | 22|word_perplexity, byte_perplexity, bits_per_byte | |pile_wikipedia | |✓ |✓ | 17511|word_perplexity, byte_perplexity, bits_per_byte | -|pile_youtubesubtitles | |✓ || 1000|acc +|pile_youtubesubtitles | |✓ | | 1000|acc |blimp_adjunct_island | |✓ | | 1000|acc |blimp_anaphor_gender_agreement | |✓ | | 1000|acc |blimp_anaphor_number_agreement | |✓ | | 1000|acc From 383605122c428437dae452e543e5a73deadbe63f Mon Sep 17 00:00:00 2001 From: Igor Ostrovsky Date: Sat, 25 Dec 2021 17:49:47 +0000 Subject: [PATCH 30/65] Fix bits_per_byte metric in PerplexityTask bits_per_byte was calculated as average per-byte loglikelihood, which would work if loglikelihood was base-2 log, but it is natural log. To correct for that, bits_per_byte should be divided by math.log(2). Also, it should be true that 2^bits_per_byte == byte_perplexity, which is true after the fix. --- lm_eval/base.py | 6 +++--- lm_eval/metrics.py | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lm_eval/base.py b/lm_eval/base.py index 927ecb49f0..65d4ae15d7 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -10,7 +10,7 @@ import torch import torch.nn.functional as F -from lm_eval.metrics import mean, weighted_perplexity, weighted_mean +from lm_eval.metrics import mean, weighted_perplexity, weighted_mean, bits_per_byte from lm_eval import utils from abc import abstractmethod @@ -560,14 +560,14 @@ def process_results(self, doc, results): return { "word_perplexity": (loglikelihood, words), "byte_perplexity": (loglikelihood, bytes_), - "bits_per_byte": (-loglikelihood, self.count_bytes(doc)) + "bits_per_byte": (loglikelihood, bytes_), } def aggregation(self): return { "word_perplexity": weighted_perplexity, "byte_perplexity": weighted_perplexity, - "bits_per_byte": weighted_mean + "bits_per_byte": bits_per_byte, } @classmethod diff --git a/lm_eval/metrics.py b/lm_eval/metrics.py index c95d4cd61c..76bfdf7364 100644 --- a/lm_eval/metrics.py +++ b/lm_eval/metrics.py @@ -102,6 +102,9 @@ def weighted_mean(items): def weighted_perplexity(items): return math.exp(-weighted_mean(items)) +def bits_per_byte(items): + return -weighted_mean(items) / math.log(2) + def bleu(items): """The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric From df1fc6c355c771eb090efabb230bb267bbb1e885 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Sun, 26 Dec 2021 12:51:30 +0100 Subject: [PATCH 31/65] Fix multirc --- lm_eval/tasks/superglue.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index 33598f2301..ae21bcedfa 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -228,7 +228,7 @@ def doc_to_target(self, doc): @staticmethod def format_answer(answer, label): label_str = "yes" if label else "no" - return f"{label_str}, {answer}" + return f"{answer}\nIs the answer correct? {label_str}" def construct_requests(self, doc, ctx): true_choice = self.format_answer(answer=doc["answer"], label=True) @@ -240,7 +240,8 @@ def construct_requests(self, doc, ctx): return ll_true_choice, ll_false_choice def process_results(self, doc, results): - pred = np.argmax(results) + ll_true_choice, ll_false_choice = results + pred = ll_true_choice > ll_false_choice return { "acc": (pred, doc) } From 23a420674b49476c5be66b0bf4e0d487ca38f1c7 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Sun, 26 Dec 2021 13:00:33 +0100 Subject: [PATCH 32/65] Add capital letters --- lm_eval/tasks/superglue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index ae21bcedfa..f3d07b3483 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -227,7 +227,7 @@ def doc_to_target(self, doc): @staticmethod def format_answer(answer, label): - label_str = "yes" if label else "no" + label_str = "Yes" if label else "No" return f"{answer}\nIs the answer correct? {label_str}" def construct_requests(self, doc, ctx): From b0a123102305a8b55ef3af24b535929a60642b52 Mon Sep 17 00:00:00 2001 From: rokosbasilisk Date: Sun, 26 Dec 2021 21:20:30 +0530 Subject: [PATCH 33/65] add asdiv task --- eval.sh | 1 + lm_eval.egg-info/PKG-INFO | 448 ++++++++++++++++++++++++++ lm_eval.egg-info/SOURCES.txt | 74 +++++ lm_eval.egg-info/dependency_links.txt | 1 + lm_eval.egg-info/requires.txt | 24 ++ lm_eval.egg-info/top_level.txt | 2 + lm_eval/tasks/__init__.py | 2 + lm_eval/tasks/apps.py | 130 ++++++++ lm_eval/tasks/asdiv.py | 135 ++++++++ 9 files changed, 817 insertions(+) create mode 100644 eval.sh create mode 100644 lm_eval.egg-info/PKG-INFO create mode 100644 lm_eval.egg-info/SOURCES.txt create mode 100644 lm_eval.egg-info/dependency_links.txt create mode 100644 lm_eval.egg-info/requires.txt create mode 100644 lm_eval.egg-info/top_level.txt create mode 100644 lm_eval/tasks/apps.py create mode 100644 lm_eval/tasks/asdiv.py diff --git a/eval.sh b/eval.sh new file mode 100644 index 0000000000..68c468f4ba --- /dev/null +++ b/eval.sh @@ -0,0 +1 @@ +python main.py --model gpt2 --model_args pretrained=EleutherAI/gpt-neo-125M --device cuda:0 --tasks math_asdiv diff --git a/lm_eval.egg-info/PKG-INFO b/lm_eval.egg-info/PKG-INFO new file mode 100644 index 0000000000..f4d1cbda8d --- /dev/null +++ b/lm_eval.egg-info/PKG-INFO @@ -0,0 +1,448 @@ +Metadata-Version: 2.1 +Name: lm-eval +Version: 0.1.0 +Summary: A framework for evaluating autoregressive language models +Home-page: https://github.com/EleutherAI/lm-evaluation-harness +Author: Leo Gao +Author-email: lg@eleuther.ai +License: UNKNOWN +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE.md + +# Language Model Evaluation Harness + +![](https://github.com/EleutherAI/lm-evaluation-harness/workflows/Build/badge.svg) +[![codecov](https://codecov.io/gh/EleutherAI/lm-evaluation-harness/branch/master/graph/badge.svg?token=JSG3O2427J)](https://codecov.io/gh/EleutherAI/lm-evaluation-harness) + +## Overview + +This project provides a unified framework to test autoregressive language models (GPT-2, GPT-3, GPTNeo, etc) on a large number of different evaluation tasks. + +Features: + +- 100+ tasks implemented +- Support for GPT-2, GPT-3, GPT-Neo, GPT-NeoX, and GPT-J, with flexible tokenization-agnostic interface +- Task versioning to ensure reproducibility + +## Install + +```bash +pip install lm-eval +``` + +## Basic Usage + +To evaluate a model, (e.g. GPT-2) on NLU tasks (e.g. LAMBADA, HellaSwag), you can run the following command. + +```bash +python main.py \ + --model gpt2 \ + --device cuda:0 \ + --tasks lambada,hellaswag +``` +(This uses gpt2-117M by default as per HF defaults, use --model_args to specify other gpt2 sizes) + +Additional arguments can be provided to the model constructor using the `--model_args` flag. Most importantly, the `gpt2` model can be used to load an arbitrary HuggingFace model. For example, to run GPTNeo use the following: + +```bash +python main.py \ + --model gpt2 \ + --model_args pretrained=EleutherAI/gpt-neo-2.7B \ + --device cuda:0 \ + --tasks lambada,hellaswag +``` + +If you have access to the OpenAI API, you can also evaluate GPT-3: + +```bash +export OPENAI_API_SECRET_KEY=YOUR_KEY_HERE +python main.py \ + --model gpt3 \ + --model_args engine=davinci \ + --tasks lambada,hellaswag +``` + +To evaluate mesh-transformer-jax models that are not available on HF, please invoke eval harness through [this script](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/eval_harness.py). + +## Implementing new tasks + +To implement a new task in eval harness, see [this guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/task-guide.md). + +## Cite as + +``` +@software{eval-harness, + author = {Gao, Leo and + Tow, Jonathan and + Biderman, Stella and + Black, Sid and + DiPofi, Anthony and + Foster, Charles and + Golding, Laurence and + Hsu, Jeffrey and + McDonell, Kyle and + Muennighoff, Niklas and + Phang, Jason and + Reynolds, Laria and + Tang, Eric and + Thite, Anish and + Wang, Ben and + Wang, Kevin and + Zou, Andy}, + title = {A framework for few-shot language model evaluation}, + month = sep, + year = 2021, + publisher = {Zenodo}, + version = {v0.0.1}, + doi = {10.5281/zenodo.5371628}, + url = {https://doi.org/10.5281/zenodo.5371628} +} +``` + +### Full Task List + +| Task Name |Train|Val|Test|Val/Test Docs| Metrics | +|---------------------------------------------------------|-----|---|----|------------:|------------------------------------------------------------------------------| +|cola |✓ |✓ | | 1043|mcc | +|mnli |✓ |✓ | | 9815|acc | +|mnli_mismatched |✓ |✓ | | 9832|acc | +|mrpc |✓ |✓ | | 408|acc, f1 | +|rte |✓ |✓ | | 277|acc | +|qnli |✓ |✓ | | 5463|acc | +|qqp |✓ |✓ | | 40430|acc, f1 | +|sst |✓ |✓ | | 872|acc | +|wnli |✓ |✓ | | 71|acc | +|boolq |✓ |✓ | | 3270|acc | +|cb |✓ |✓ | | 56|acc, f1 | +|copa |✓ |✓ | | 100|acc | +|multirc |✓ |✓ | | 4848|acc | +|record |✓ |✓ | | 10000|f1, em | +|wic |✓ |✓ | | 638|acc | +|wsc |✓ |✓ | | 104|acc | +|coqa |✓ |✓ | | 500|f1, em | +|drop |✓ |✓ | | 9536|em, f1 | +|lambada | |✓ | | 5153|ppl, acc | +|lambada_cloze | |✓ | | 5153|ppl, acc | +|wikitext | |✓ |✓ | 62|word_perplexity, byte_perplexity, bits_per_byte | +|piqa |✓ |✓ | | 1838|acc, acc_norm | +|prost | | |✓ | 18736|acc, acc_norm | +|pubmedqa | | |✓ | 1000|acc | +|sciq |✓ |✓ |✓ | 1000|acc, acc_norm | +|qa4mre_2011 | | |✓ | 120|acc, acc_norm | +|qa4mre_2012 | | |✓ | 160|acc, acc_norm | +|qa4mre_2013 | | |✓ | 284|acc, acc_norm | +|triviaqa |✓ |✓ | | 11313|acc | +|arc_easy |✓ |✓ |✓ | 2376|acc, acc_norm | +|arc_challenge |✓ |✓ |✓ | 1172|acc, acc_norm | +|logiqa |✓ |✓ |✓ | 651|acc, acc_norm | +|hellaswag |✓ |✓ | | 10042|acc, acc_norm | +|openbookqa |✓ |✓ |✓ | 500|acc, acc_norm | +|squad2 |✓ |✓ | | 11873|exact, f1, HasAns_exact, HasAns_f1, NoAns_exact, NoAns_f1, best_exact, best_f1| +|race |✓ |✓ |✓ | 1045|acc | +|headqa |✓ |✓ |✓ | 2742|acc, acc_norm | +|mathqa |✓ |✓ |✓ | 2985|acc, acc_norm | +|webqs |✓ | |✓ | 2032|acc | +|wsc273 | | |✓ | 273|acc | +|winogrande |✓ |✓ | | 1267|acc | +|anli_r1 |✓ |✓ |✓ | 1000|acc | +|anli_r2 |✓ |✓ |✓ | 1000|acc | +|anli_r3 |✓ |✓ |✓ | 1200|acc | +|ethics_cm |✓ | |✓ | 3885|acc | +|ethics_deontology |✓ | |✓ | 3596|acc, em | +|ethics_justice |✓ | |✓ | 2704|acc, em | +|ethics_utilitarianism_original | | |✓ | 4808|acc | +|ethics_utilitarianism |✓ | |✓ | 4808|acc | +|ethics_virtue |✓ | |✓ | 4975|acc, em | +|math_algebra |✓ | |✓ | 1187|acc | +|math_counting_and_prob |✓ | |✓ | 474|acc | +|math_geometry |✓ | |✓ | 479|acc | +|math_intermediate_algebra |✓ | |✓ | 903|acc | +|math_num_theory |✓ | |✓ | 540|acc | +|math_prealgebra |✓ | |✓ | 871|acc | +|math_precalc |✓ | |✓ | 546|acc | +|arithmetic_2da | |✓ | | 2000|acc | +|arithmetic_2ds | |✓ | | 2000|acc | +|arithmetic_3da | |✓ | | 2000|acc | +|arithmetic_3ds | |✓ | | 2000|acc | +|arithmetic_4da | |✓ | | 2000|acc | +|arithmetic_4ds | |✓ | | 2000|acc | +|arithmetic_5da | |✓ | | 2000|acc | +|arithmetic_5ds | |✓ | | 2000|acc | +|arithmetic_2dm | |✓ | | 2000|acc | +|arithmetic_1dc | |✓ | | 2000|acc | +|hendrycksTest-abstract_algebra |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-anatomy |✓ |✓ |✓ | 135|acc, acc_norm | +|hendrycksTest-astronomy |✓ |✓ |✓ | 152|acc, acc_norm | +|hendrycksTest-business_ethics |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-clinical_knowledge |✓ |✓ |✓ | 265|acc, acc_norm | +|hendrycksTest-college_biology |✓ |✓ |✓ | 144|acc, acc_norm | +|hendrycksTest-college_chemistry |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-college_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-college_mathematics |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-college_medicine |✓ |✓ |✓ | 173|acc, acc_norm | +|hendrycksTest-college_physics |✓ |✓ |✓ | 102|acc, acc_norm | +|hendrycksTest-computer_security |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-conceptual_physics |✓ |✓ |✓ | 235|acc, acc_norm | +|hendrycksTest-econometrics |✓ |✓ |✓ | 114|acc, acc_norm | +|hendrycksTest-electrical_engineering |✓ |✓ |✓ | 145|acc, acc_norm | +|hendrycksTest-elementary_mathematics |✓ |✓ |✓ | 378|acc, acc_norm | +|hendrycksTest-formal_logic |✓ |✓ |✓ | 126|acc, acc_norm | +|hendrycksTest-global_facts |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-high_school_biology |✓ |✓ |✓ | 310|acc, acc_norm | +|hendrycksTest-high_school_chemistry |✓ |✓ |✓ | 203|acc, acc_norm | +|hendrycksTest-high_school_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-high_school_european_history |✓ |✓ |✓ | 165|acc, acc_norm | +|hendrycksTest-high_school_geography |✓ |✓ |✓ | 198|acc, acc_norm | +|hendrycksTest-high_school_government_and_politics |✓ |✓ |✓ | 193|acc, acc_norm | +|hendrycksTest-high_school_macroeconomics |✓ |✓ |✓ | 390|acc, acc_norm | +|hendrycksTest-high_school_mathematics |✓ |✓ |✓ | 270|acc, acc_norm | +|hendrycksTest-high_school_microeconomics |✓ |✓ |✓ | 238|acc, acc_norm | +|hendrycksTest-high_school_physics |✓ |✓ |✓ | 151|acc, acc_norm | +|hendrycksTest-high_school_psychology |✓ |✓ |✓ | 545|acc, acc_norm | +|hendrycksTest-high_school_statistics |✓ |✓ |✓ | 216|acc, acc_norm | +|hendrycksTest-high_school_us_history |✓ |✓ |✓ | 204|acc, acc_norm | +|hendrycksTest-high_school_world_history |✓ |✓ |✓ | 237|acc, acc_norm | +|hendrycksTest-human_aging |✓ |✓ |✓ | 223|acc, acc_norm | +|hendrycksTest-human_sexuality |✓ |✓ |✓ | 131|acc, acc_norm | +|hendrycksTest-international_law |✓ |✓ |✓ | 121|acc, acc_norm | +|hendrycksTest-jurisprudence |✓ |✓ |✓ | 108|acc, acc_norm | +|hendrycksTest-logical_fallacies |✓ |✓ |✓ | 163|acc, acc_norm | +|hendrycksTest-machine_learning |✓ |✓ |✓ | 112|acc, acc_norm | +|hendrycksTest-management |✓ |✓ |✓ | 103|acc, acc_norm | +|hendrycksTest-marketing |✓ |✓ |✓ | 234|acc, acc_norm | +|hendrycksTest-medical_genetics |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-miscellaneous |✓ |✓ |✓ | 783|acc, acc_norm | +|hendrycksTest-moral_disputes |✓ |✓ |✓ | 346|acc, acc_norm | +|hendrycksTest-moral_scenarios |✓ |✓ |✓ | 895|acc, acc_norm | +|hendrycksTest-nutrition |✓ |✓ |✓ | 306|acc, acc_norm | +|hendrycksTest-philosophy |✓ |✓ |✓ | 311|acc, acc_norm | +|hendrycksTest-prehistory |✓ |✓ |✓ | 324|acc, acc_norm | +|hendrycksTest-professional_accounting |✓ |✓ |✓ | 282|acc, acc_norm | +|hendrycksTest-professional_law |✓ |✓ |✓ | 1534|acc, acc_norm | +|hendrycksTest-professional_medicine |✓ |✓ |✓ | 272|acc, acc_norm | +|hendrycksTest-professional_psychology |✓ |✓ |✓ | 612|acc, acc_norm | +|hendrycksTest-public_relations |✓ |✓ |✓ | 110|acc, acc_norm | +|hendrycksTest-security_studies |✓ |✓ |✓ | 245|acc, acc_norm | +|hendrycksTest-sociology |✓ |✓ |✓ | 201|acc, acc_norm | +|hendrycksTest-us_foreign_policy |✓ |✓ |✓ | 100|acc, acc_norm | +|hendrycksTest-virology |✓ |✓ |✓ | 166|acc, acc_norm | +|hendrycksTest-world_religions |✓ |✓ |✓ | 171|acc, acc_norm | +|wmt14-en-fr | | |✓ | 3003|bleu, chrf, ter | +|wmt14-fr-en | | |✓ | 3003|bleu, chrf, ter | +|wmt16-en-ro | | |✓ | 1999|bleu, chrf, ter | +|wmt16-ro-en | | |✓ | 1999|bleu, chrf, ter | +|wmt16-de-en | | |✓ | 2999|bleu, chrf, ter | +|wmt16-en-de | | |✓ | 2999|bleu, chrf, ter | +|wmt20-cs-en | | |✓ | 664|bleu, chrf, ter | +|wmt20-de-en | | |✓ | 785|bleu, chrf, ter | +|wmt20-de-fr | | |✓ | 1619|bleu, chrf, ter | +|wmt20-en-cs | | |✓ | 1418|bleu, chrf, ter | +|wmt20-en-de | | |✓ | 1418|bleu, chrf, ter | +|wmt20-en-iu | | |✓ | 2971|bleu, chrf, ter | +|wmt20-en-ja | | |✓ | 1000|bleu, chrf, ter | +|wmt20-en-km | | |✓ | 2320|bleu, chrf, ter | +|wmt20-en-pl | | |✓ | 1000|bleu, chrf, ter | +|wmt20-en-ps | | |✓ | 2719|bleu, chrf, ter | +|wmt20-en-ru | | |✓ | 2002|bleu, chrf, ter | +|wmt20-en-ta | | |✓ | 1000|bleu, chrf, ter | +|wmt20-en-zh | | |✓ | 1418|bleu, chrf, ter | +|wmt20-fr-de | | |✓ | 1619|bleu, chrf, ter | +|wmt20-iu-en | | |✓ | 2971|bleu, chrf, ter | +|wmt20-ja-en | | |✓ | 993|bleu, chrf, ter | +|wmt20-km-en | | |✓ | 2320|bleu, chrf, ter | +|wmt20-pl-en | | |✓ | 1001|bleu, chrf, ter | +|wmt20-ps-en | | |✓ | 2719|bleu, chrf, ter | +|wmt20-ru-en | | |✓ | 991|bleu, chrf, ter | +|wmt20-ta-en | | |✓ | 997|bleu, chrf, ter | +|wmt20-zh-en | | |✓ | 2000|bleu, chrf, ter | +|iwslt17-en-ar | | |✓ | 1460|bleu, chrf, ter | +|iwslt17-ar-en | | |✓ | 1460|bleu, chrf, ter | +|anagrams1 | |✓ | | 10000|acc | +|anagrams2 | |✓ | | 10000|acc | +|cycle_letters | |✓ | | 10000|acc | +|random_insertion | |✓ | | 10000|acc | +|reversed_words | |✓ | | 10000|acc | +|pile_arxiv | |✓ |✓ | 2407|word_perplexity, byte_perplexity, bits_per_byte | +|pile_books3 | |✓ |✓ | 269|word_perplexity, byte_perplexity, bits_per_byte | +|pile_bookcorpus2 | |✓ |✓ | 28|word_perplexity, byte_perplexity, bits_per_byte | +|pile_dm-mathematics | |✓ |✓ | 1922|word_perplexity, byte_perplexity, bits_per_byte | +|pile_enron | |✓ |✓ | 1010|word_perplexity, byte_perplexity, bits_per_byte | +|pile_europarl | |✓ |✓ | 157|word_perplexity, byte_perplexity, bits_per_byte | +|pile_freelaw | |✓ |✓ | 5101|word_perplexity, byte_perplexity, bits_per_byte | +|pile_github | |✓ |✓ | 18195|word_perplexity, byte_perplexity, bits_per_byte | +|pile_gutenberg | |✓ |✓ | 80|word_perplexity, byte_perplexity, bits_per_byte | +|pile_hackernews | |✓ |✓ | 1632|word_perplexity, byte_perplexity, bits_per_byte | +|pile_nih-exporter | |✓ |✓ | 1884|word_perplexity, byte_perplexity, bits_per_byte | +|pile_opensubtitles | |✓ |✓ | 642|word_perplexity, byte_perplexity, bits_per_byte | +|pile_openwebtext2 | |✓ |✓ | 32925|word_perplexity, byte_perplexity, bits_per_byte | +|pile_philpapers | |✓ |✓ | 68|word_perplexity, byte_perplexity, bits_per_byte | +|pile_pile-cc | |✓ |✓ | 52790|word_perplexity, byte_perplexity, bits_per_byte | +|pile_pubmed-abstracts | |✓ |✓ | 29895|word_perplexity, byte_perplexity, bits_per_byte | +|pile_pubmed-central | |✓ |✓ | 5911|word_perplexity, byte_perplexity, bits_per_byte | +|pile_stackexchange | |✓ |✓ | 30378|word_perplexity, byte_perplexity, bits_per_byte | +|pile_uspto | |✓ |✓ | 11415|word_perplexity, byte_perplexity, bits_per_byte | +|pile_ubuntu-irc | |✓ |✓ | 22|word_perplexity, byte_perplexity, bits_per_byte | +|pile_wikipedia | |✓ |✓ | 17511|word_perplexity, byte_perplexity, bits_per_byte | +|pile_youtubesubtitles | |✓ | | 1000|acc +|blimp_adjunct_island | |✓ | | 1000|acc +|blimp_anaphor_gender_agreement | |✓ | | 1000|acc +|blimp_anaphor_number_agreement | |✓ | | 1000|acc +|blimp_animate_subject_passive | |✓ | | 1000|acc +|blimp_animate_subject_trans | |✓ | | 1000|acc +|blimp_causative | |✓ | | 1000|acc +|blimp_complex_NP_island | |✓ | | 1000|acc +|blimp_coordinate_structure_constraint_complex_left_branch| |✓ | | 1000|acc +|blimp_coordinate_structure_constraint_object_extraction | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_1 | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_2 | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_irregular_1 | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_irregular_2 | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_with_adj_2 | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_with_adj_irregular_1 | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_with_adj_irregular_2 | |✓ | | 1000|acc +|blimp_determiner_noun_agreement_with_adjective_1 | |✓ | | 1000|acc +|blimp_distractor_agreement_relational_noun | |✓ | | 1000|acc +|blimp_distractor_agreement_relative_clause | |✓ | | 1000|acc +|blimp_drop_argument | |✓ | | 1000|acc +|blimp_ellipsis_n_bar_1 | |✓ | | 1000|acc +|blimp_ellipsis_n_bar_2 | |✓ | | 1000|acc +|blimp_existential_there_object_raising | |✓ | | 1000|acc +|blimp_existential_there_quantifiers_1 | |✓ | | 1000|acc +|blimp_existential_there_quantifiers_2 | |✓ | | 1000|acc +|blimp_existential_there_subject_raising | |✓ | | 1000|acc +|blimp_expletive_it_object_raising | |✓ | | 1000|acc +|blimp_inchoative | |✓ | | 1000|acc +|blimp_intransitive | |✓ | | 1000|acc +|blimp_irregular_past_participle_adjectives | |✓ | | 1000|acc +|blimp_irregular_past_participle_verbs | |✓ | | 1000|acc +|blimp_irregular_plural_subject_verb_agreement_1 | |✓ | | 1000|acc +|blimp_irregular_plural_subject_verb_agreement_2 | |✓ | | 1000|acc +|blimp_left_branch_island_echo_question | |✓ | | 1000|acc +|blimp_left_branch_island_simple_question | |✓ | | 1000|acc +|blimp_matrix_question_npi_licensor_present | |✓ | | 1000|acc +|blimp_npi_present_1 | |✓ | | 1000|acc +|blimp_npi_present_2 | |✓ | | 1000|acc +|blimp_only_npi_licensor_present | |✓ | | 1000|acc +|blimp_only_npi_scope | |✓ | | 1000|acc +|blimp_passive_1 | |✓ | | 1000|acc +|blimp_passive_2 | |✓ | | 1000|acc +|blimp_principle_A_c_command | |✓ | | 1000|acc +|blimp_principle_A_case_1 | |✓ | | 1000|acc +|blimp_principle_A_case_2 | |✓ | | 1000|acc +|blimp_principle_A_domain_1 | |✓ | | 1000|acc +|blimp_principle_A_domain_2 | |✓ | | 1000|acc +|blimp_principle_A_domain_3 | |✓ | | 1000|acc +|blimp_principle_A_reconstruction | |✓ | | 1000|acc +|blimp_regular_plural_subject_verb_agreement_1 | |✓ | | 1000|acc +|blimp_regular_plural_subject_verb_agreement_2 | |✓ | | 1000|acc +|blimp_sentential_negation_npi_licensor_present | |✓ | | 1000|acc +|blimp_sentential_negation_npi_scope | |✓ | | 1000|acc +|blimp_sentential_subject_island | |✓ | | 1000|acc +|blimp_superlative_quantifiers_1 | |✓ | | 1000|acc +|blimp_superlative_quantifiers_2 | |✓ | | 1000|acc +|blimp_tough_vs_raising_1 | |✓ | | 1000|acc +|blimp_tough_vs_raising_2 | |✓ | | 1000|acc +|blimp_transitive | |✓ | | 1000|acc +|blimp_wh_island | |✓ | | 1000|acc +|blimp_wh_questions_object_gap | |✓ | | 1000|acc +|blimp_wh_questions_subject_gap | |✓ | | 1000|acc +|blimp_wh_questions_subject_gap_long_distance | |✓ | | 1000|acc +|blimp_wh_vs_that_no_gap | |✓ | | 1000|acc +|blimp_wh_vs_that_no_gap_long_distance | |✓ | | 1000|acc +|blimp_wh_vs_that_with_gap | |✓ | | 1000|acc +|blimp_wh_vs_that_with_gap_long_distance | |✓ | | 1000|acc + + +## Usage + +### Evaluate a task + +Additional arguments can be provided to the model constructor using the `--model_args` flag. Most importantly, the `gpt2` model can be used to load an arbitrary HuggingFace model as follows: + + +```bash +python main.py \ + --model gpt2 \ + --model_args pretrained=EleutherAI/gpt-neo-1.3B \ + --device cuda:0 \ + --tasks lambada,hellaswag \ + --num_fewshot 2 +``` + +To inspect what the LM inputs look like, you can run the following command: + +```bash +python write_out.py \ + --tasks all_tasks \ + --provide_description \ + --num_fewshot 5 \ + --num_examples 10 \ + --output_base_path /path/to/output/folder +``` + +This will write out one text file for each task. + +### Code Structure + +There are two major components of the library: + +1. LMs (language models), e.g. GPT-2, GPT-3 +2. Tasks, e.g. MNLI, RTE, SQuAD (coming soon) + +Both LMs (`lm_eval.models`) and Tasks (`lm_eval.tasks`) are kept in a registry data structure, for easy CLI instantiation. + +**If you want to extend either models or tasks, simply add a new LM or Task subclass, and decorate with the registry decorator**. + +The [GPT-3 Evaluations Project](https://github.com/EleutherAI/lm_evaluation_harness/projects/1) tracks our progress implementing new tasks. Right now, we are focused on getting all the datasets loaded so that we can dedupe against the training data. Implementing the actual evaluations is nice but not necessary at the current moment. + +### Task Versioning + +To help improve reproducibility, all tasks have a VERSION field. When run from the command line, this is reported in a column in the table, or in the "version" field in the evaluator return dict. The purpose of the version is so that if the task definition changes (i.e to fix a bug), then we can know exactly which metrics were computed using the old buggy implementation to avoid unfair comparisons. To enforce this, there are unit tests that make sure the behavior of all tests remains the same as when they were first implemented. Task versions start at 0, and each time a breaking change is made, the version is incremented by one. + +When reporting eval harness results, please also report the version of each task. This can be done either with a separate column in the table, or by reporting the task name with the version appended as such: taskname-v0. + +## Description + +### 1. LM Evaluation +Given an LM, we want to evaluate it on a wide range of NLU tasks. We should at least cover the set of tasks in the GPT-3 paper, and any other tasks/benchmarks that are relevant. We will follow the GPT-3 format of a) zero-shot, b) one-shot, c) few-shot evaluation. + +To do this, we need 3 components: +* Data downloader (shared with later sections, potentially needs to be directly linked to the latter 2 components) +* Task formatter +* Task evaluator + +The **data downloader** should download data for the relevant tasks. +* We should heavily rely on Hugging Face's NLP for this. They are already doing most of the work with handling data scripts/caching. +* Optionally, we can rely directly on HF-NLP's caching, but that makes it awkward to handle non-HF-NLP datasets. Otherwise, we can just write them out to .jsonl. My feeling is that NLU data storage will be a drop in the bucket compared to LM data. +* Where we're not using HF-NLP, we can keep the data in the raw format (.jsonl, tsv, etc) and let the other components handle transforming it. + +The **task formatter** formats the task input data into an LM-usable format. +* We should potentially support multiple formats for a given task, e.g. some formats may be better or worse suited for LM evaluation. See also: prompt-engineering +* The task formatter should also support zero/one/few-shot packing of training examples into an input. This may require weird interactions with the tokenizer for dealing with max-token issues. + +The **task evaluator** scores a task. +* In essence, we want to generation output predictions for all our input examples, and feed them into some function that pops out a score (or scores) +An alternative approach is to collect the output logits and score them against the expected set of outputs. +* Some tasks have weird evaluation schemes, so we should make this as general as possible. +* Will thus likely have to be closely tied with the formatter. +* Likewise, we should take advantage of HF-NLP's metrics. +We might as well provide a sufficiently general API for the model to support OpenAI API as well. This can double up as an effort to reproduce the OpenAI NLU results. + +### 2. Removing val/test data from LM training set +With the data downloader in place, we simply need to (1) expose the val/test examples, and (2) remove them from the training set. + +* Arguably, (2) should be handled by LM preprocessing in a more general way. There are probably non-NLU-eval cases where we want to remove some specific data from training. +* Depending on how exactly we do the val/test removal, we may want to format the same example multiple ways to ensure that they don't get leaked into the training set in a slightly tweaked format. +* Thought experiment: SQuAD is based largely on Wikipedia. What exactly would we want to remove from the LM? +* [GPT-3]: In GPT-3, they attempted to remove val/test from their LM set, but there was a bug that caused leakage. So they ended up doing the opposite: removing overlaps from the LM set from the val/test. Funky. +* [GPT-3]: See page 30 and Appendix C for details. They do some funky n-gram based search and removal. We should think about whether we want to follow their protocol exactly + +### 3. Adding task training data to LM training set +This part is the easiest. I guess we just write out some text files containing the training data? We can let the usual LM preprocessing pipeline handle it from there. + + diff --git a/lm_eval.egg-info/SOURCES.txt b/lm_eval.egg-info/SOURCES.txt new file mode 100644 index 0000000000..71a6a2fdb8 --- /dev/null +++ b/lm_eval.egg-info/SOURCES.txt @@ -0,0 +1,74 @@ +LICENSE.md +README.md +setup.py +lm_eval/__init__.py +lm_eval/base.py +lm_eval/evaluator.py +lm_eval/metrics.py +lm_eval/utils.py +lm_eval.egg-info/PKG-INFO +lm_eval.egg-info/SOURCES.txt +lm_eval.egg-info/dependency_links.txt +lm_eval.egg-info/requires.txt +lm_eval.egg-info/top_level.txt +lm_eval/models/__init__.py +lm_eval/models/dummy.py +lm_eval/models/gpt2.py +lm_eval/models/gpt3.py +lm_eval/tasks/__init__.py +lm_eval/tasks/anli.py +lm_eval/tasks/arc.py +lm_eval/tasks/arithmetic.py +lm_eval/tasks/blimp.py +lm_eval/tasks/cbt.py +lm_eval/tasks/common.py +lm_eval/tasks/coqa.py +lm_eval/tasks/drop.py +lm_eval/tasks/glue.py +lm_eval/tasks/headqa.py +lm_eval/tasks/hellaswag.py +lm_eval/tasks/hendrycks_ethics.py +lm_eval/tasks/hendrycks_math.py +lm_eval/tasks/hendrycks_test.py +lm_eval/tasks/lambada.py +lm_eval/tasks/lambada_cloze.py +lm_eval/tasks/lambada_multilingual.py +lm_eval/tasks/logiqa.py +lm_eval/tasks/mathqa.py +lm_eval/tasks/mc_taco.py +lm_eval/tasks/mutual.py +lm_eval/tasks/naturalqs.py +lm_eval/tasks/openbookqa.py +lm_eval/tasks/pile.py +lm_eval/tasks/piqa.py +lm_eval/tasks/prost.py +lm_eval/tasks/pubmedqa.py +lm_eval/tasks/qa4mre.py +lm_eval/tasks/quac.py +lm_eval/tasks/race.py +lm_eval/tasks/sat.py +lm_eval/tasks/sciq.py +lm_eval/tasks/squad.py +lm_eval/tasks/storycloze.py +lm_eval/tasks/superglue.py +lm_eval/tasks/translation.py +lm_eval/tasks/triviaqa.py +lm_eval/tasks/truthfulqa.py +lm_eval/tasks/unscramble.py +lm_eval/tasks/webqs.py +lm_eval/tasks/wikitext.py +lm_eval/tasks/winogrande.py +lm_eval/tasks/wsc273.py +scripts/__init__.py +scripts/cost_estimate.py +scripts/fewshot_description_experiment.py +scripts/get_prompts.py +scripts/make_gpt2_test_cases.py +scripts/make_table_tasks.py +scripts/write_out.py +scripts/clean_training_data/__init__.py +scripts/clean_training_data/archiver.py +scripts/clean_training_data/generate_13_grams.py +scripts/clean_training_data/janitor.py +scripts/clean_training_data/process_sorted_buckets.py +scripts/clean_training_data/sort_13_gram_buckets.py \ No newline at end of file diff --git a/lm_eval.egg-info/dependency_links.txt b/lm_eval.egg-info/dependency_links.txt new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/lm_eval.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/lm_eval.egg-info/requires.txt b/lm_eval.egg-info/requires.txt new file mode 100644 index 0000000000..c0f5b007a4 --- /dev/null +++ b/lm_eval.egg-info/requires.txt @@ -0,0 +1,24 @@ +black +best_download>=0.0.6 +datasets==1.15.1 +click>=7.1 +scikit-learn>=0.24.1 +torch>=1.7 +transformers>=4.1 +sqlitedict==1.6.0 +pytablewriter==0.58.0 +sacrebleu==1.5.0 +rouge-score==0.0.4 +bleurt@ https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt +pycountry==20.7.3 +numexpr==2.7.2 +lm_dataformat==0.0.20 +pytest==6.2.3 +pybind11==2.6.2 +tqdm-multiprocess==0.0.11 +zstandard==0.15.2 +jsonlines==2.0.0 +mock==4.0.3 +openai==0.6.4 +jieba==0.42.1 +nagisa==0.2.7 diff --git a/lm_eval.egg-info/top_level.txt b/lm_eval.egg-info/top_level.txt new file mode 100644 index 0000000000..3c734591d0 --- /dev/null +++ b/lm_eval.egg-info/top_level.txt @@ -0,0 +1,2 @@ +lm_eval +scripts diff --git a/lm_eval/tasks/__init__.py b/lm_eval/tasks/__init__.py index 53d7e88f16..86b61f68d8 100644 --- a/lm_eval/tasks/__init__.py +++ b/lm_eval/tasks/__init__.py @@ -45,6 +45,7 @@ from . import mutual from . import truthfulqa from . import blimp +from . import asdiv ######################################## # Translation tasks @@ -164,6 +165,7 @@ "math_num_theory": hendrycks_math.MathNumberTheory, "math_prealgebra": hendrycks_math.MathPrealgebra, "math_precalc": hendrycks_math.MathPrecalculus, + "math_asdiv": asdiv.Asdiv, # arithmetic "arithmetic_2da": arithmetic.Arithmetic2DPlus, diff --git a/lm_eval/tasks/apps.py b/lm_eval/tasks/apps.py new file mode 100644 index 0000000000..f5d6a42cce --- /dev/null +++ b/lm_eval/tasks/apps.py @@ -0,0 +1,130 @@ +""" +Measuring Coding Challenge Competence With APPS +https://arxiv.org/pdf/2105.09938 + +@article{hendrycksapps2021, + title={Measuring Coding Challenge Competence With APPS}, + author={Dan Hendrycks and Steven Basart and Saurav Kadavath and Mantas Mazeika and Akul Arora and Ethan Guo and Collin Burns and Samir Puranik and Horace He and Dawn Song and Jacob Steinhardt}, + journal={NeurIPS}, + year={2021} +} +""" +from lm_eval.base import Task +from pathlib import Path +from best_download import download_file +import xml.etree.ElementTree as ET +from lm_eval.base import rf +from lm_eval.metrics import mean,perplexity +import numpy as np +from zipfile import ZipFile +import os + +class Apps(Task): + VERSION = 0 + DATASET_PATH = Path("data/asdiv") + + def download(self): + if self.DATASET_PATH.exists(): + return + Path.mkdir(self.DATASET_PATH) + url = "https://people.eecs.berkeley.edu/~hendrycks/APPS.tar.gz" + checksum = "2f71f8003929d605369ad924be4b95c15879fc2bfac0d4d01a81f8aabceaad5c" + zip_path = self.DATASET_PATH / "master.zip" + download_file(url, str(zip_path), checksum) + with ZipFile(zip_path, "r") as zip: + zip.extractall(self.DATASET_PATH) + os.remove(zip_path) + + def _convert_standard(self, problem): + #TODO: include solution-type and formula + out_doc = { + "question" : problem.find('Question').text, + "body" : problem.find('Body').text, + "answer": problem.find('Answer').text + } + return out_doc + + def load_docs(self, textfilename, tfds=False): + tree = ET.parse(textfilename) + root = tree.getroot() + for pid, problem in enumerate(root.iter('Problem')): + out_doc = self._convert_standard(problem) + yield out_doc + + def _strip_bracket(self,test_str): + ret = '' + skip1c = 0 + skip2c = 0 + for i in test_str: + if i == '(': + skip2c += 1 + elif i == ')'and skip2c > 0: + skip2c -= 1 + elif skip1c == 0 and skip2c == 0: + ret += i + return ret + + def has_training_docs(self): + return False + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return False + + def training_docs(self): + raise NotImplementedError("This dataset has no training docs") + + def test_docs(self): + raise NotImplementedError("This dataset has no test docs") + + def validation_docs(self): + data_xml_path = self.DATASET_PATH / "nlu-asdiv-dataset-master/dataset/ASDiv.xml" + return self.load_docs(data_xml_path) + + def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + assert num_fewshot == 0, "ASDiv is intended only for the zero-shot setting." + return super().fewshot_context(doc, num_fewshot, provide_description, rnd) + + + def fewshot_description(self): + # TODO: add solution-type and formula + desc = "information containing the context of the question\nQuestion: Text of a question.\nAnswer: Answer to the question, based on the passage.\n" + return desc + + def doc_to_text(self, doc): + # TODO: add solution-type + return doc['body'] + '\n' + 'Question:' + doc['question'] + '\n' + 'Answer:' + + def doc_to_target(self, doc): + # TODO: add formula + + answer = self._strip_bracket(doc['answer']) + if len(answer)>0: # check if answer is present only in brackets + return answer + else: + return doc['answer'] + + def construct_requests(self, doc, ctx): + ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) + + return ll, is_greedy + + def process_results(self, doc, results): + ll, is_greedy = results + + return { + 'acc': int(is_greedy) + } + + def aggregation(self): + return { + 'acc': mean + } + + def higher_is_better(self): + return { + 'acc': True + } + diff --git a/lm_eval/tasks/asdiv.py b/lm_eval/tasks/asdiv.py new file mode 100644 index 0000000000..dcde1f1e23 --- /dev/null +++ b/lm_eval/tasks/asdiv.py @@ -0,0 +1,135 @@ +""" +ASDiv: A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers +https://arxiv.org/abs/2106.15772 + +@misc{miao2021diverse, + title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers}, + author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su}, + year={2021}, + eprint={2106.15772}, + archivePrefix={arXiv}, + primaryClass={cs.AI} +} +""" +from lm_eval.base import Task +from pathlib import Path +from best_download import download_file +import xml.etree.ElementTree as ET +from lm_eval.base import rf +from lm_eval.metrics import mean,perplexity +import numpy as np +from zipfile import ZipFile +import os + +#currently ignoring formula for answer generation + +# given a subset, splits return the docs +class Asdiv(Task): + VERSION = 0 + DATASET_PATH = Path("data/asdiv") + + def download(self): + if self.DATASET_PATH.exists(): + return + Path.mkdir(self.DATASET_PATH) + url = "https://github.com/chaochun/nlu-asdiv-dataset/archive/refs/heads/master.zip" + checksum = "2f71f8003929d605369ad924be4b95c15879fc2bfac0d4d01a81f8aabceaad5c" + zip_path = self.DATASET_PATH / "master.zip" + download_file(url, str(zip_path), checksum) + with ZipFile(zip_path, "r") as zip: + zip.extractall(self.DATASET_PATH) + os.remove(zip_path) + + def _convert_standard(self, problem): + #TODO: include solution-type and formula + out_doc = { + "question" : problem.find('Question').text, + "body" : problem.find('Body').text, + "answer": problem.find('Answer').text + } + return out_doc + + def load_docs(self, textfilename, tfds=False): + tree = ET.parse(textfilename) + root = tree.getroot() + for pid, problem in enumerate(root.iter('Problem')): + out_doc = self._convert_standard(problem) + yield out_doc + + def _strip_bracket(self,test_str): + ret = '' + skip1c = 0 + skip2c = 0 + for i in test_str: + if i == '(': + skip2c += 1 + elif i == ')'and skip2c > 0: + skip2c -= 1 + elif skip1c == 0 and skip2c == 0: + ret += i + return ret + + def has_training_docs(self): + return False + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return False + + def training_docs(self): + raise NotImplementedError("This dataset has no training docs") + + def test_docs(self): + raise NotImplementedError("This dataset has no test docs") + + def validation_docs(self): + data_xml_path = self.DATASET_PATH / "nlu-asdiv-dataset-master/dataset/ASDiv.xml" + return self.load_docs(data_xml_path) + + def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + assert num_fewshot == 0, "ASDiv is intended only for the zero-shot setting." + return super().fewshot_context(doc, num_fewshot, provide_description, rnd) + + + def fewshot_description(self): + # TODO: add solution-type and formula + desc = "information containing the context of the question\nQuestion: Text of a question.\nAnswer: Answer to the question, based on the passage.\n" + return desc + + def doc_to_text(self, doc): + # TODO: add solution-type + return doc['body'] + '\n' + 'Question:' + doc['question'] + '\n' + 'Answer:' + + def doc_to_target(self, doc): + # TODO: add formula + + answer = self._strip_bracket(doc['answer']) + if len(answer)>0: # check if answer is present only in brackets + return answer + else: + return doc['answer'] + + def construct_requests(self, doc, ctx): + ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) + + return ll, is_greedy + + def process_results(self, doc, results): + ll, is_greedy = results + + return { + 'acc': int(is_greedy) + } + + def aggregation(self): + return { + 'acc': mean + } + + def higher_is_better(self): + return { + 'acc': True + } + From bce9f289f8eee207fbc56288492f1db0807961eb Mon Sep 17 00:00:00 2001 From: rokosbasilisk Date: Sun, 26 Dec 2021 21:30:05 +0530 Subject: [PATCH 34/65] remove apps --- lm_eval/tasks/apps.py | 130 ------------------------------------------ 1 file changed, 130 deletions(-) delete mode 100644 lm_eval/tasks/apps.py diff --git a/lm_eval/tasks/apps.py b/lm_eval/tasks/apps.py deleted file mode 100644 index f5d6a42cce..0000000000 --- a/lm_eval/tasks/apps.py +++ /dev/null @@ -1,130 +0,0 @@ -""" -Measuring Coding Challenge Competence With APPS -https://arxiv.org/pdf/2105.09938 - -@article{hendrycksapps2021, - title={Measuring Coding Challenge Competence With APPS}, - author={Dan Hendrycks and Steven Basart and Saurav Kadavath and Mantas Mazeika and Akul Arora and Ethan Guo and Collin Burns and Samir Puranik and Horace He and Dawn Song and Jacob Steinhardt}, - journal={NeurIPS}, - year={2021} -} -""" -from lm_eval.base import Task -from pathlib import Path -from best_download import download_file -import xml.etree.ElementTree as ET -from lm_eval.base import rf -from lm_eval.metrics import mean,perplexity -import numpy as np -from zipfile import ZipFile -import os - -class Apps(Task): - VERSION = 0 - DATASET_PATH = Path("data/asdiv") - - def download(self): - if self.DATASET_PATH.exists(): - return - Path.mkdir(self.DATASET_PATH) - url = "https://people.eecs.berkeley.edu/~hendrycks/APPS.tar.gz" - checksum = "2f71f8003929d605369ad924be4b95c15879fc2bfac0d4d01a81f8aabceaad5c" - zip_path = self.DATASET_PATH / "master.zip" - download_file(url, str(zip_path), checksum) - with ZipFile(zip_path, "r") as zip: - zip.extractall(self.DATASET_PATH) - os.remove(zip_path) - - def _convert_standard(self, problem): - #TODO: include solution-type and formula - out_doc = { - "question" : problem.find('Question').text, - "body" : problem.find('Body').text, - "answer": problem.find('Answer').text - } - return out_doc - - def load_docs(self, textfilename, tfds=False): - tree = ET.parse(textfilename) - root = tree.getroot() - for pid, problem in enumerate(root.iter('Problem')): - out_doc = self._convert_standard(problem) - yield out_doc - - def _strip_bracket(self,test_str): - ret = '' - skip1c = 0 - skip2c = 0 - for i in test_str: - if i == '(': - skip2c += 1 - elif i == ')'and skip2c > 0: - skip2c -= 1 - elif skip1c == 0 and skip2c == 0: - ret += i - return ret - - def has_training_docs(self): - return False - - def has_validation_docs(self): - return True - - def has_test_docs(self): - return False - - def training_docs(self): - raise NotImplementedError("This dataset has no training docs") - - def test_docs(self): - raise NotImplementedError("This dataset has no test docs") - - def validation_docs(self): - data_xml_path = self.DATASET_PATH / "nlu-asdiv-dataset-master/dataset/ASDiv.xml" - return self.load_docs(data_xml_path) - - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): - assert num_fewshot == 0, "ASDiv is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, provide_description, rnd) - - - def fewshot_description(self): - # TODO: add solution-type and formula - desc = "information containing the context of the question\nQuestion: Text of a question.\nAnswer: Answer to the question, based on the passage.\n" - return desc - - def doc_to_text(self, doc): - # TODO: add solution-type - return doc['body'] + '\n' + 'Question:' + doc['question'] + '\n' + 'Answer:' - - def doc_to_target(self, doc): - # TODO: add formula - - answer = self._strip_bracket(doc['answer']) - if len(answer)>0: # check if answer is present only in brackets - return answer - else: - return doc['answer'] - - def construct_requests(self, doc, ctx): - ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) - - return ll, is_greedy - - def process_results(self, doc, results): - ll, is_greedy = results - - return { - 'acc': int(is_greedy) - } - - def aggregation(self): - return { - 'acc': mean - } - - def higher_is_better(self): - return { - 'acc': True - } - From 440216db7b1a3faebc0b1cb085e20192e13ce386 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Mon, 27 Dec 2021 11:46:45 +0100 Subject: [PATCH 35/65] Questions in BoolQ don't have interrogation punctuation at the end --- lm_eval/tasks/superglue.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index 33598f2301..c26988107a 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -13,7 +13,7 @@ class BoolQ(HFTask): - VERSION = 0 + VERSION = 1 DATASET_PATH = "super_glue" DATASET_NAME = "boolq" @@ -31,7 +31,7 @@ def fewshot_description(self): return "Read the following passages and answer each question with a yes or a no." def doc_to_text(self, doc): - return f"{doc['passage']}\nQuestion: {doc['question']}\nAnswer:" + return f"{doc['passage']}\nQuestion: {doc['question']}?\nAnswer:" def doc_to_target(self, doc): return " " + yesno(doc['label']) From 97ca18e0854dbf01b68d0f77c249f5ebe64ec057 Mon Sep 17 00:00:00 2001 From: Leo Gao <54557097+leogao2@users.noreply.github.com> Date: Tue, 28 Dec 2021 18:57:52 -0700 Subject: [PATCH 36/65] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f0d910cca8..8a2d43998f 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ pip install lm-eval ## Basic Usage -To evaluate a model, (e.g. GPT-2) on NLU tasks (e.g. LAMBADA, HellaSwag), you can run the following command. +To evaluate a model, (e.g. GPT-2) on NLU tasks (e.g. LAMBADA, HellaSwag), you can run the following command. **When reporting results from eval harness, please include the task versions (shown in `results["versions"]`) for reproducibility.** This allows bug fixes to tasks while also ensuring that previously reported scores are reproducible. ```bash python main.py \ From 0d9d47da05170e3c9629b76360f614104f654056 Mon Sep 17 00:00:00 2001 From: Leo Gao <54557097+leogao2@users.noreply.github.com> Date: Tue, 28 Dec 2021 19:00:01 -0700 Subject: [PATCH 37/65] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8a2d43998f..949cbdb74b 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ pip install lm-eval ## Basic Usage -To evaluate a model, (e.g. GPT-2) on NLU tasks (e.g. LAMBADA, HellaSwag), you can run the following command. **When reporting results from eval harness, please include the task versions (shown in `results["versions"]`) for reproducibility.** This allows bug fixes to tasks while also ensuring that previously reported scores are reproducible. +To evaluate a model, (e.g. GPT-2) on NLU tasks (e.g. LAMBADA, HellaSwag), you can run the following command. **When reporting results from eval harness, please include the task versions (shown in `results["versions"]`) for reproducibility.** This allows bug fixes to tasks while also ensuring that previously reported scores are reproducible. See the [Task Versioning](https://github.com/EleutherAI/lm-evaluation-harness#task-versioning) section for more info. ```bash python main.py \ From 5a53c3684523cd873bdef18cb8264fecd82a3f21 Mon Sep 17 00:00:00 2001 From: Leo Gao <54557097+leogao2@users.noreply.github.com> Date: Tue, 28 Dec 2021 19:20:10 -0700 Subject: [PATCH 38/65] pile: Switch download over to backup host temporarily We can have fallback instead after @researcher2 implements it in best_download --- lm_eval/tasks/pile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/pile.py b/lm_eval/tasks/pile.py index 68ff7ed9a8..6936905f0c 100644 --- a/lm_eval/tasks/pile.py +++ b/lm_eval/tasks/pile.py @@ -19,8 +19,8 @@ class PilePerplexityTask(PerplexityTask, abc.ABC): def download(self): # TODO: separate pile val/test out by component so we don't have to scan the entire file once per set os.makedirs("data/pile/", exist_ok=True) - download_file("https://the-eye.eu/public/AI/pile/val.jsonl.zst", self.VAL_PATH, "264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92") - download_file("https://the-eye.eu/public/AI/pile/test.jsonl.zst", self.TEST_PATH, "0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e") + download_file("http://eaidata.bmk.sh/data/pile/val.jsonl.zst", self.VAL_PATH, "264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92") + download_file("http://eaidata.bmk.sh/data/pile/test.jsonl.zst", self.TEST_PATH, "0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e") def validation_docs(self): rdr = lm_dataformat.Reader(self.VAL_PATH) From f42a8d6619a321ca270b1159702f2d9874cbb375 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Wed, 29 Dec 2021 02:27:47 +0000 Subject: [PATCH 39/65] Add testdata for boolq v1 --- tests/testdata/boolq-v1-loglikelihood | 1 + tests/testdata/boolq-v1-res.json | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/testdata/boolq-v1-loglikelihood create mode 100644 tests/testdata/boolq-v1-res.json diff --git a/tests/testdata/boolq-v1-loglikelihood b/tests/testdata/boolq-v1-loglikelihood new file mode 100644 index 0000000000..7811121c9f --- /dev/null +++ b/tests/testdata/boolq-v1-loglikelihood @@ -0,0 +1 @@ +6577e0d88572772ef08e64f624c0e3df0953286ae1f118ccef15623b59ffeabf \ No newline at end of file diff --git a/tests/testdata/boolq-v1-res.json b/tests/testdata/boolq-v1-res.json new file mode 100644 index 0000000000..291b9f122d --- /dev/null +++ b/tests/testdata/boolq-v1-res.json @@ -0,0 +1 @@ +{"results": {"boolq": {"acc": 0.5048929663608562, "acc_stderr": 0.00874463623355505}}, "versions": {"boolq": 1}} \ No newline at end of file From 0bde758966b7787d368412cc8ab02869d3463523 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Wed, 29 Dec 2021 02:32:49 +0000 Subject: [PATCH 40/65] Revert "Add capital letters" This reverts commit 23a420674b49476c5be66b0bf4e0d487ca38f1c7. --- lm_eval/tasks/superglue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index f3d07b3483..ae21bcedfa 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -227,7 +227,7 @@ def doc_to_target(self, doc): @staticmethod def format_answer(answer, label): - label_str = "Yes" if label else "No" + label_str = "yes" if label else "no" return f"{answer}\nIs the answer correct? {label_str}" def construct_requests(self, doc, ctx): From d7e3248990acc20e48c2facc34840384acfdc1e0 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Wed, 29 Dec 2021 04:40:09 +0000 Subject: [PATCH 41/65] Pretty sure questions need to be paragraph dependent also --- lm_eval/metrics.py | 7 ++++--- lm_eval/tasks/superglue.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lm_eval/metrics.py b/lm_eval/metrics.py index c95d4cd61c..749159b09a 100644 --- a/lm_eval/metrics.py +++ b/lm_eval/metrics.py @@ -52,13 +52,14 @@ def acc_all(items): docs = list(zip(*items))[1] for doc, pred in zip(docs, preds): + paragraph_id = doc["idx"]["paragraph"] question_id = doc["idx"]["question"] - if question_id not in question_scoring_dict: - question_scoring_dict[question_id] = [] + if (paragraph_id, question_id) not in question_scoring_dict: + question_scoring_dict[(paragraph_id, question_id)] = [] gold_label = doc["label"] == 1 - question_scoring_dict[question_id].append(gold_label == pred) + question_scoring_dict[(paragraph_id, question_id)].append(gold_label == pred) acc = np.mean([int(all(x)) for x in question_scoring_dict.values()]) return acc diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index ae21bcedfa..2ce5810352 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -202,7 +202,7 @@ def convert_choice(choice): class MultiRC(HFTask): - VERSION = 0 + VERSION = 1 DATASET_PATH = "super_glue" DATASET_NAME = "multirc" From 73d0ae5e448c36ff663fa82376c78f6b43a6cee8 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Wed, 29 Dec 2021 04:40:56 +0000 Subject: [PATCH 42/65] Add testdata for multirc v1 --- tests/testdata/multirc-v1-loglikelihood | 1 + tests/testdata/multirc-v1-res.json | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/testdata/multirc-v1-loglikelihood create mode 100644 tests/testdata/multirc-v1-res.json diff --git a/tests/testdata/multirc-v1-loglikelihood b/tests/testdata/multirc-v1-loglikelihood new file mode 100644 index 0000000000..52a89c6f9e --- /dev/null +++ b/tests/testdata/multirc-v1-loglikelihood @@ -0,0 +1 @@ +0e793bd6f637a70a04c6f2cda080188fc037961b2f909095fe63f7bdbc4a90c6 \ No newline at end of file diff --git a/tests/testdata/multirc-v1-res.json b/tests/testdata/multirc-v1-res.json new file mode 100644 index 0000000000..938141bbb8 --- /dev/null +++ b/tests/testdata/multirc-v1-res.json @@ -0,0 +1 @@ +{"results": {"multirc": {"acc": 0.046169989506820566, "acc_stderr": 0.006801377886208738}}, "versions": {"multirc": 1}} \ No newline at end of file From 04635731923251fd79500dc0e1e356f43e666df5 Mon Sep 17 00:00:00 2001 From: rokosbasilisk Date: Wed, 29 Dec 2021 22:55:23 +0530 Subject: [PATCH 43/65] remove unrequired files&add pin commit hash --- eval.sh | 1 - lm_eval.egg-info/PKG-INFO | 448 -------------------------- lm_eval.egg-info/SOURCES.txt | 74 ----- lm_eval.egg-info/dependency_links.txt | 1 - lm_eval.egg-info/requires.txt | 24 -- lm_eval.egg-info/top_level.txt | 2 - lm_eval/tasks/asdiv.py | 11 +- 7 files changed, 5 insertions(+), 556 deletions(-) delete mode 100644 eval.sh delete mode 100644 lm_eval.egg-info/PKG-INFO delete mode 100644 lm_eval.egg-info/SOURCES.txt delete mode 100644 lm_eval.egg-info/dependency_links.txt delete mode 100644 lm_eval.egg-info/requires.txt delete mode 100644 lm_eval.egg-info/top_level.txt diff --git a/eval.sh b/eval.sh deleted file mode 100644 index 68c468f4ba..0000000000 --- a/eval.sh +++ /dev/null @@ -1 +0,0 @@ -python main.py --model gpt2 --model_args pretrained=EleutherAI/gpt-neo-125M --device cuda:0 --tasks math_asdiv diff --git a/lm_eval.egg-info/PKG-INFO b/lm_eval.egg-info/PKG-INFO deleted file mode 100644 index f4d1cbda8d..0000000000 --- a/lm_eval.egg-info/PKG-INFO +++ /dev/null @@ -1,448 +0,0 @@ -Metadata-Version: 2.1 -Name: lm-eval -Version: 0.1.0 -Summary: A framework for evaluating autoregressive language models -Home-page: https://github.com/EleutherAI/lm-evaluation-harness -Author: Leo Gao -Author-email: lg@eleuther.ai -License: UNKNOWN -Platform: UNKNOWN -Classifier: Programming Language :: Python :: 3 -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Requires-Python: >=3.6 -Description-Content-Type: text/markdown -License-File: LICENSE.md - -# Language Model Evaluation Harness - -![](https://github.com/EleutherAI/lm-evaluation-harness/workflows/Build/badge.svg) -[![codecov](https://codecov.io/gh/EleutherAI/lm-evaluation-harness/branch/master/graph/badge.svg?token=JSG3O2427J)](https://codecov.io/gh/EleutherAI/lm-evaluation-harness) - -## Overview - -This project provides a unified framework to test autoregressive language models (GPT-2, GPT-3, GPTNeo, etc) on a large number of different evaluation tasks. - -Features: - -- 100+ tasks implemented -- Support for GPT-2, GPT-3, GPT-Neo, GPT-NeoX, and GPT-J, with flexible tokenization-agnostic interface -- Task versioning to ensure reproducibility - -## Install - -```bash -pip install lm-eval -``` - -## Basic Usage - -To evaluate a model, (e.g. GPT-2) on NLU tasks (e.g. LAMBADA, HellaSwag), you can run the following command. - -```bash -python main.py \ - --model gpt2 \ - --device cuda:0 \ - --tasks lambada,hellaswag -``` -(This uses gpt2-117M by default as per HF defaults, use --model_args to specify other gpt2 sizes) - -Additional arguments can be provided to the model constructor using the `--model_args` flag. Most importantly, the `gpt2` model can be used to load an arbitrary HuggingFace model. For example, to run GPTNeo use the following: - -```bash -python main.py \ - --model gpt2 \ - --model_args pretrained=EleutherAI/gpt-neo-2.7B \ - --device cuda:0 \ - --tasks lambada,hellaswag -``` - -If you have access to the OpenAI API, you can also evaluate GPT-3: - -```bash -export OPENAI_API_SECRET_KEY=YOUR_KEY_HERE -python main.py \ - --model gpt3 \ - --model_args engine=davinci \ - --tasks lambada,hellaswag -``` - -To evaluate mesh-transformer-jax models that are not available on HF, please invoke eval harness through [this script](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/eval_harness.py). - -## Implementing new tasks - -To implement a new task in eval harness, see [this guide](https://github.com/EleutherAI/lm-evaluation-harness/blob/master/task-guide.md). - -## Cite as - -``` -@software{eval-harness, - author = {Gao, Leo and - Tow, Jonathan and - Biderman, Stella and - Black, Sid and - DiPofi, Anthony and - Foster, Charles and - Golding, Laurence and - Hsu, Jeffrey and - McDonell, Kyle and - Muennighoff, Niklas and - Phang, Jason and - Reynolds, Laria and - Tang, Eric and - Thite, Anish and - Wang, Ben and - Wang, Kevin and - Zou, Andy}, - title = {A framework for few-shot language model evaluation}, - month = sep, - year = 2021, - publisher = {Zenodo}, - version = {v0.0.1}, - doi = {10.5281/zenodo.5371628}, - url = {https://doi.org/10.5281/zenodo.5371628} -} -``` - -### Full Task List - -| Task Name |Train|Val|Test|Val/Test Docs| Metrics | -|---------------------------------------------------------|-----|---|----|------------:|------------------------------------------------------------------------------| -|cola |✓ |✓ | | 1043|mcc | -|mnli |✓ |✓ | | 9815|acc | -|mnli_mismatched |✓ |✓ | | 9832|acc | -|mrpc |✓ |✓ | | 408|acc, f1 | -|rte |✓ |✓ | | 277|acc | -|qnli |✓ |✓ | | 5463|acc | -|qqp |✓ |✓ | | 40430|acc, f1 | -|sst |✓ |✓ | | 872|acc | -|wnli |✓ |✓ | | 71|acc | -|boolq |✓ |✓ | | 3270|acc | -|cb |✓ |✓ | | 56|acc, f1 | -|copa |✓ |✓ | | 100|acc | -|multirc |✓ |✓ | | 4848|acc | -|record |✓ |✓ | | 10000|f1, em | -|wic |✓ |✓ | | 638|acc | -|wsc |✓ |✓ | | 104|acc | -|coqa |✓ |✓ | | 500|f1, em | -|drop |✓ |✓ | | 9536|em, f1 | -|lambada | |✓ | | 5153|ppl, acc | -|lambada_cloze | |✓ | | 5153|ppl, acc | -|wikitext | |✓ |✓ | 62|word_perplexity, byte_perplexity, bits_per_byte | -|piqa |✓ |✓ | | 1838|acc, acc_norm | -|prost | | |✓ | 18736|acc, acc_norm | -|pubmedqa | | |✓ | 1000|acc | -|sciq |✓ |✓ |✓ | 1000|acc, acc_norm | -|qa4mre_2011 | | |✓ | 120|acc, acc_norm | -|qa4mre_2012 | | |✓ | 160|acc, acc_norm | -|qa4mre_2013 | | |✓ | 284|acc, acc_norm | -|triviaqa |✓ |✓ | | 11313|acc | -|arc_easy |✓ |✓ |✓ | 2376|acc, acc_norm | -|arc_challenge |✓ |✓ |✓ | 1172|acc, acc_norm | -|logiqa |✓ |✓ |✓ | 651|acc, acc_norm | -|hellaswag |✓ |✓ | | 10042|acc, acc_norm | -|openbookqa |✓ |✓ |✓ | 500|acc, acc_norm | -|squad2 |✓ |✓ | | 11873|exact, f1, HasAns_exact, HasAns_f1, NoAns_exact, NoAns_f1, best_exact, best_f1| -|race |✓ |✓ |✓ | 1045|acc | -|headqa |✓ |✓ |✓ | 2742|acc, acc_norm | -|mathqa |✓ |✓ |✓ | 2985|acc, acc_norm | -|webqs |✓ | |✓ | 2032|acc | -|wsc273 | | |✓ | 273|acc | -|winogrande |✓ |✓ | | 1267|acc | -|anli_r1 |✓ |✓ |✓ | 1000|acc | -|anli_r2 |✓ |✓ |✓ | 1000|acc | -|anli_r3 |✓ |✓ |✓ | 1200|acc | -|ethics_cm |✓ | |✓ | 3885|acc | -|ethics_deontology |✓ | |✓ | 3596|acc, em | -|ethics_justice |✓ | |✓ | 2704|acc, em | -|ethics_utilitarianism_original | | |✓ | 4808|acc | -|ethics_utilitarianism |✓ | |✓ | 4808|acc | -|ethics_virtue |✓ | |✓ | 4975|acc, em | -|math_algebra |✓ | |✓ | 1187|acc | -|math_counting_and_prob |✓ | |✓ | 474|acc | -|math_geometry |✓ | |✓ | 479|acc | -|math_intermediate_algebra |✓ | |✓ | 903|acc | -|math_num_theory |✓ | |✓ | 540|acc | -|math_prealgebra |✓ | |✓ | 871|acc | -|math_precalc |✓ | |✓ | 546|acc | -|arithmetic_2da | |✓ | | 2000|acc | -|arithmetic_2ds | |✓ | | 2000|acc | -|arithmetic_3da | |✓ | | 2000|acc | -|arithmetic_3ds | |✓ | | 2000|acc | -|arithmetic_4da | |✓ | | 2000|acc | -|arithmetic_4ds | |✓ | | 2000|acc | -|arithmetic_5da | |✓ | | 2000|acc | -|arithmetic_5ds | |✓ | | 2000|acc | -|arithmetic_2dm | |✓ | | 2000|acc | -|arithmetic_1dc | |✓ | | 2000|acc | -|hendrycksTest-abstract_algebra |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-anatomy |✓ |✓ |✓ | 135|acc, acc_norm | -|hendrycksTest-astronomy |✓ |✓ |✓ | 152|acc, acc_norm | -|hendrycksTest-business_ethics |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-clinical_knowledge |✓ |✓ |✓ | 265|acc, acc_norm | -|hendrycksTest-college_biology |✓ |✓ |✓ | 144|acc, acc_norm | -|hendrycksTest-college_chemistry |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-college_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-college_mathematics |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-college_medicine |✓ |✓ |✓ | 173|acc, acc_norm | -|hendrycksTest-college_physics |✓ |✓ |✓ | 102|acc, acc_norm | -|hendrycksTest-computer_security |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-conceptual_physics |✓ |✓ |✓ | 235|acc, acc_norm | -|hendrycksTest-econometrics |✓ |✓ |✓ | 114|acc, acc_norm | -|hendrycksTest-electrical_engineering |✓ |✓ |✓ | 145|acc, acc_norm | -|hendrycksTest-elementary_mathematics |✓ |✓ |✓ | 378|acc, acc_norm | -|hendrycksTest-formal_logic |✓ |✓ |✓ | 126|acc, acc_norm | -|hendrycksTest-global_facts |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-high_school_biology |✓ |✓ |✓ | 310|acc, acc_norm | -|hendrycksTest-high_school_chemistry |✓ |✓ |✓ | 203|acc, acc_norm | -|hendrycksTest-high_school_computer_science |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-high_school_european_history |✓ |✓ |✓ | 165|acc, acc_norm | -|hendrycksTest-high_school_geography |✓ |✓ |✓ | 198|acc, acc_norm | -|hendrycksTest-high_school_government_and_politics |✓ |✓ |✓ | 193|acc, acc_norm | -|hendrycksTest-high_school_macroeconomics |✓ |✓ |✓ | 390|acc, acc_norm | -|hendrycksTest-high_school_mathematics |✓ |✓ |✓ | 270|acc, acc_norm | -|hendrycksTest-high_school_microeconomics |✓ |✓ |✓ | 238|acc, acc_norm | -|hendrycksTest-high_school_physics |✓ |✓ |✓ | 151|acc, acc_norm | -|hendrycksTest-high_school_psychology |✓ |✓ |✓ | 545|acc, acc_norm | -|hendrycksTest-high_school_statistics |✓ |✓ |✓ | 216|acc, acc_norm | -|hendrycksTest-high_school_us_history |✓ |✓ |✓ | 204|acc, acc_norm | -|hendrycksTest-high_school_world_history |✓ |✓ |✓ | 237|acc, acc_norm | -|hendrycksTest-human_aging |✓ |✓ |✓ | 223|acc, acc_norm | -|hendrycksTest-human_sexuality |✓ |✓ |✓ | 131|acc, acc_norm | -|hendrycksTest-international_law |✓ |✓ |✓ | 121|acc, acc_norm | -|hendrycksTest-jurisprudence |✓ |✓ |✓ | 108|acc, acc_norm | -|hendrycksTest-logical_fallacies |✓ |✓ |✓ | 163|acc, acc_norm | -|hendrycksTest-machine_learning |✓ |✓ |✓ | 112|acc, acc_norm | -|hendrycksTest-management |✓ |✓ |✓ | 103|acc, acc_norm | -|hendrycksTest-marketing |✓ |✓ |✓ | 234|acc, acc_norm | -|hendrycksTest-medical_genetics |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-miscellaneous |✓ |✓ |✓ | 783|acc, acc_norm | -|hendrycksTest-moral_disputes |✓ |✓ |✓ | 346|acc, acc_norm | -|hendrycksTest-moral_scenarios |✓ |✓ |✓ | 895|acc, acc_norm | -|hendrycksTest-nutrition |✓ |✓ |✓ | 306|acc, acc_norm | -|hendrycksTest-philosophy |✓ |✓ |✓ | 311|acc, acc_norm | -|hendrycksTest-prehistory |✓ |✓ |✓ | 324|acc, acc_norm | -|hendrycksTest-professional_accounting |✓ |✓ |✓ | 282|acc, acc_norm | -|hendrycksTest-professional_law |✓ |✓ |✓ | 1534|acc, acc_norm | -|hendrycksTest-professional_medicine |✓ |✓ |✓ | 272|acc, acc_norm | -|hendrycksTest-professional_psychology |✓ |✓ |✓ | 612|acc, acc_norm | -|hendrycksTest-public_relations |✓ |✓ |✓ | 110|acc, acc_norm | -|hendrycksTest-security_studies |✓ |✓ |✓ | 245|acc, acc_norm | -|hendrycksTest-sociology |✓ |✓ |✓ | 201|acc, acc_norm | -|hendrycksTest-us_foreign_policy |✓ |✓ |✓ | 100|acc, acc_norm | -|hendrycksTest-virology |✓ |✓ |✓ | 166|acc, acc_norm | -|hendrycksTest-world_religions |✓ |✓ |✓ | 171|acc, acc_norm | -|wmt14-en-fr | | |✓ | 3003|bleu, chrf, ter | -|wmt14-fr-en | | |✓ | 3003|bleu, chrf, ter | -|wmt16-en-ro | | |✓ | 1999|bleu, chrf, ter | -|wmt16-ro-en | | |✓ | 1999|bleu, chrf, ter | -|wmt16-de-en | | |✓ | 2999|bleu, chrf, ter | -|wmt16-en-de | | |✓ | 2999|bleu, chrf, ter | -|wmt20-cs-en | | |✓ | 664|bleu, chrf, ter | -|wmt20-de-en | | |✓ | 785|bleu, chrf, ter | -|wmt20-de-fr | | |✓ | 1619|bleu, chrf, ter | -|wmt20-en-cs | | |✓ | 1418|bleu, chrf, ter | -|wmt20-en-de | | |✓ | 1418|bleu, chrf, ter | -|wmt20-en-iu | | |✓ | 2971|bleu, chrf, ter | -|wmt20-en-ja | | |✓ | 1000|bleu, chrf, ter | -|wmt20-en-km | | |✓ | 2320|bleu, chrf, ter | -|wmt20-en-pl | | |✓ | 1000|bleu, chrf, ter | -|wmt20-en-ps | | |✓ | 2719|bleu, chrf, ter | -|wmt20-en-ru | | |✓ | 2002|bleu, chrf, ter | -|wmt20-en-ta | | |✓ | 1000|bleu, chrf, ter | -|wmt20-en-zh | | |✓ | 1418|bleu, chrf, ter | -|wmt20-fr-de | | |✓ | 1619|bleu, chrf, ter | -|wmt20-iu-en | | |✓ | 2971|bleu, chrf, ter | -|wmt20-ja-en | | |✓ | 993|bleu, chrf, ter | -|wmt20-km-en | | |✓ | 2320|bleu, chrf, ter | -|wmt20-pl-en | | |✓ | 1001|bleu, chrf, ter | -|wmt20-ps-en | | |✓ | 2719|bleu, chrf, ter | -|wmt20-ru-en | | |✓ | 991|bleu, chrf, ter | -|wmt20-ta-en | | |✓ | 997|bleu, chrf, ter | -|wmt20-zh-en | | |✓ | 2000|bleu, chrf, ter | -|iwslt17-en-ar | | |✓ | 1460|bleu, chrf, ter | -|iwslt17-ar-en | | |✓ | 1460|bleu, chrf, ter | -|anagrams1 | |✓ | | 10000|acc | -|anagrams2 | |✓ | | 10000|acc | -|cycle_letters | |✓ | | 10000|acc | -|random_insertion | |✓ | | 10000|acc | -|reversed_words | |✓ | | 10000|acc | -|pile_arxiv | |✓ |✓ | 2407|word_perplexity, byte_perplexity, bits_per_byte | -|pile_books3 | |✓ |✓ | 269|word_perplexity, byte_perplexity, bits_per_byte | -|pile_bookcorpus2 | |✓ |✓ | 28|word_perplexity, byte_perplexity, bits_per_byte | -|pile_dm-mathematics | |✓ |✓ | 1922|word_perplexity, byte_perplexity, bits_per_byte | -|pile_enron | |✓ |✓ | 1010|word_perplexity, byte_perplexity, bits_per_byte | -|pile_europarl | |✓ |✓ | 157|word_perplexity, byte_perplexity, bits_per_byte | -|pile_freelaw | |✓ |✓ | 5101|word_perplexity, byte_perplexity, bits_per_byte | -|pile_github | |✓ |✓ | 18195|word_perplexity, byte_perplexity, bits_per_byte | -|pile_gutenberg | |✓ |✓ | 80|word_perplexity, byte_perplexity, bits_per_byte | -|pile_hackernews | |✓ |✓ | 1632|word_perplexity, byte_perplexity, bits_per_byte | -|pile_nih-exporter | |✓ |✓ | 1884|word_perplexity, byte_perplexity, bits_per_byte | -|pile_opensubtitles | |✓ |✓ | 642|word_perplexity, byte_perplexity, bits_per_byte | -|pile_openwebtext2 | |✓ |✓ | 32925|word_perplexity, byte_perplexity, bits_per_byte | -|pile_philpapers | |✓ |✓ | 68|word_perplexity, byte_perplexity, bits_per_byte | -|pile_pile-cc | |✓ |✓ | 52790|word_perplexity, byte_perplexity, bits_per_byte | -|pile_pubmed-abstracts | |✓ |✓ | 29895|word_perplexity, byte_perplexity, bits_per_byte | -|pile_pubmed-central | |✓ |✓ | 5911|word_perplexity, byte_perplexity, bits_per_byte | -|pile_stackexchange | |✓ |✓ | 30378|word_perplexity, byte_perplexity, bits_per_byte | -|pile_uspto | |✓ |✓ | 11415|word_perplexity, byte_perplexity, bits_per_byte | -|pile_ubuntu-irc | |✓ |✓ | 22|word_perplexity, byte_perplexity, bits_per_byte | -|pile_wikipedia | |✓ |✓ | 17511|word_perplexity, byte_perplexity, bits_per_byte | -|pile_youtubesubtitles | |✓ | | 1000|acc -|blimp_adjunct_island | |✓ | | 1000|acc -|blimp_anaphor_gender_agreement | |✓ | | 1000|acc -|blimp_anaphor_number_agreement | |✓ | | 1000|acc -|blimp_animate_subject_passive | |✓ | | 1000|acc -|blimp_animate_subject_trans | |✓ | | 1000|acc -|blimp_causative | |✓ | | 1000|acc -|blimp_complex_NP_island | |✓ | | 1000|acc -|blimp_coordinate_structure_constraint_complex_left_branch| |✓ | | 1000|acc -|blimp_coordinate_structure_constraint_object_extraction | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_1 | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_2 | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_irregular_1 | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_irregular_2 | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_with_adj_2 | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_with_adj_irregular_1 | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_with_adj_irregular_2 | |✓ | | 1000|acc -|blimp_determiner_noun_agreement_with_adjective_1 | |✓ | | 1000|acc -|blimp_distractor_agreement_relational_noun | |✓ | | 1000|acc -|blimp_distractor_agreement_relative_clause | |✓ | | 1000|acc -|blimp_drop_argument | |✓ | | 1000|acc -|blimp_ellipsis_n_bar_1 | |✓ | | 1000|acc -|blimp_ellipsis_n_bar_2 | |✓ | | 1000|acc -|blimp_existential_there_object_raising | |✓ | | 1000|acc -|blimp_existential_there_quantifiers_1 | |✓ | | 1000|acc -|blimp_existential_there_quantifiers_2 | |✓ | | 1000|acc -|blimp_existential_there_subject_raising | |✓ | | 1000|acc -|blimp_expletive_it_object_raising | |✓ | | 1000|acc -|blimp_inchoative | |✓ | | 1000|acc -|blimp_intransitive | |✓ | | 1000|acc -|blimp_irregular_past_participle_adjectives | |✓ | | 1000|acc -|blimp_irregular_past_participle_verbs | |✓ | | 1000|acc -|blimp_irregular_plural_subject_verb_agreement_1 | |✓ | | 1000|acc -|blimp_irregular_plural_subject_verb_agreement_2 | |✓ | | 1000|acc -|blimp_left_branch_island_echo_question | |✓ | | 1000|acc -|blimp_left_branch_island_simple_question | |✓ | | 1000|acc -|blimp_matrix_question_npi_licensor_present | |✓ | | 1000|acc -|blimp_npi_present_1 | |✓ | | 1000|acc -|blimp_npi_present_2 | |✓ | | 1000|acc -|blimp_only_npi_licensor_present | |✓ | | 1000|acc -|blimp_only_npi_scope | |✓ | | 1000|acc -|blimp_passive_1 | |✓ | | 1000|acc -|blimp_passive_2 | |✓ | | 1000|acc -|blimp_principle_A_c_command | |✓ | | 1000|acc -|blimp_principle_A_case_1 | |✓ | | 1000|acc -|blimp_principle_A_case_2 | |✓ | | 1000|acc -|blimp_principle_A_domain_1 | |✓ | | 1000|acc -|blimp_principle_A_domain_2 | |✓ | | 1000|acc -|blimp_principle_A_domain_3 | |✓ | | 1000|acc -|blimp_principle_A_reconstruction | |✓ | | 1000|acc -|blimp_regular_plural_subject_verb_agreement_1 | |✓ | | 1000|acc -|blimp_regular_plural_subject_verb_agreement_2 | |✓ | | 1000|acc -|blimp_sentential_negation_npi_licensor_present | |✓ | | 1000|acc -|blimp_sentential_negation_npi_scope | |✓ | | 1000|acc -|blimp_sentential_subject_island | |✓ | | 1000|acc -|blimp_superlative_quantifiers_1 | |✓ | | 1000|acc -|blimp_superlative_quantifiers_2 | |✓ | | 1000|acc -|blimp_tough_vs_raising_1 | |✓ | | 1000|acc -|blimp_tough_vs_raising_2 | |✓ | | 1000|acc -|blimp_transitive | |✓ | | 1000|acc -|blimp_wh_island | |✓ | | 1000|acc -|blimp_wh_questions_object_gap | |✓ | | 1000|acc -|blimp_wh_questions_subject_gap | |✓ | | 1000|acc -|blimp_wh_questions_subject_gap_long_distance | |✓ | | 1000|acc -|blimp_wh_vs_that_no_gap | |✓ | | 1000|acc -|blimp_wh_vs_that_no_gap_long_distance | |✓ | | 1000|acc -|blimp_wh_vs_that_with_gap | |✓ | | 1000|acc -|blimp_wh_vs_that_with_gap_long_distance | |✓ | | 1000|acc - - -## Usage - -### Evaluate a task - -Additional arguments can be provided to the model constructor using the `--model_args` flag. Most importantly, the `gpt2` model can be used to load an arbitrary HuggingFace model as follows: - - -```bash -python main.py \ - --model gpt2 \ - --model_args pretrained=EleutherAI/gpt-neo-1.3B \ - --device cuda:0 \ - --tasks lambada,hellaswag \ - --num_fewshot 2 -``` - -To inspect what the LM inputs look like, you can run the following command: - -```bash -python write_out.py \ - --tasks all_tasks \ - --provide_description \ - --num_fewshot 5 \ - --num_examples 10 \ - --output_base_path /path/to/output/folder -``` - -This will write out one text file for each task. - -### Code Structure - -There are two major components of the library: - -1. LMs (language models), e.g. GPT-2, GPT-3 -2. Tasks, e.g. MNLI, RTE, SQuAD (coming soon) - -Both LMs (`lm_eval.models`) and Tasks (`lm_eval.tasks`) are kept in a registry data structure, for easy CLI instantiation. - -**If you want to extend either models or tasks, simply add a new LM or Task subclass, and decorate with the registry decorator**. - -The [GPT-3 Evaluations Project](https://github.com/EleutherAI/lm_evaluation_harness/projects/1) tracks our progress implementing new tasks. Right now, we are focused on getting all the datasets loaded so that we can dedupe against the training data. Implementing the actual evaluations is nice but not necessary at the current moment. - -### Task Versioning - -To help improve reproducibility, all tasks have a VERSION field. When run from the command line, this is reported in a column in the table, or in the "version" field in the evaluator return dict. The purpose of the version is so that if the task definition changes (i.e to fix a bug), then we can know exactly which metrics were computed using the old buggy implementation to avoid unfair comparisons. To enforce this, there are unit tests that make sure the behavior of all tests remains the same as when they were first implemented. Task versions start at 0, and each time a breaking change is made, the version is incremented by one. - -When reporting eval harness results, please also report the version of each task. This can be done either with a separate column in the table, or by reporting the task name with the version appended as such: taskname-v0. - -## Description - -### 1. LM Evaluation -Given an LM, we want to evaluate it on a wide range of NLU tasks. We should at least cover the set of tasks in the GPT-3 paper, and any other tasks/benchmarks that are relevant. We will follow the GPT-3 format of a) zero-shot, b) one-shot, c) few-shot evaluation. - -To do this, we need 3 components: -* Data downloader (shared with later sections, potentially needs to be directly linked to the latter 2 components) -* Task formatter -* Task evaluator - -The **data downloader** should download data for the relevant tasks. -* We should heavily rely on Hugging Face's NLP for this. They are already doing most of the work with handling data scripts/caching. -* Optionally, we can rely directly on HF-NLP's caching, but that makes it awkward to handle non-HF-NLP datasets. Otherwise, we can just write them out to .jsonl. My feeling is that NLU data storage will be a drop in the bucket compared to LM data. -* Where we're not using HF-NLP, we can keep the data in the raw format (.jsonl, tsv, etc) and let the other components handle transforming it. - -The **task formatter** formats the task input data into an LM-usable format. -* We should potentially support multiple formats for a given task, e.g. some formats may be better or worse suited for LM evaluation. See also: prompt-engineering -* The task formatter should also support zero/one/few-shot packing of training examples into an input. This may require weird interactions with the tokenizer for dealing with max-token issues. - -The **task evaluator** scores a task. -* In essence, we want to generation output predictions for all our input examples, and feed them into some function that pops out a score (or scores) -An alternative approach is to collect the output logits and score them against the expected set of outputs. -* Some tasks have weird evaluation schemes, so we should make this as general as possible. -* Will thus likely have to be closely tied with the formatter. -* Likewise, we should take advantage of HF-NLP's metrics. -We might as well provide a sufficiently general API for the model to support OpenAI API as well. This can double up as an effort to reproduce the OpenAI NLU results. - -### 2. Removing val/test data from LM training set -With the data downloader in place, we simply need to (1) expose the val/test examples, and (2) remove them from the training set. - -* Arguably, (2) should be handled by LM preprocessing in a more general way. There are probably non-NLU-eval cases where we want to remove some specific data from training. -* Depending on how exactly we do the val/test removal, we may want to format the same example multiple ways to ensure that they don't get leaked into the training set in a slightly tweaked format. -* Thought experiment: SQuAD is based largely on Wikipedia. What exactly would we want to remove from the LM? -* [GPT-3]: In GPT-3, they attempted to remove val/test from their LM set, but there was a bug that caused leakage. So they ended up doing the opposite: removing overlaps from the LM set from the val/test. Funky. -* [GPT-3]: See page 30 and Appendix C for details. They do some funky n-gram based search and removal. We should think about whether we want to follow their protocol exactly - -### 3. Adding task training data to LM training set -This part is the easiest. I guess we just write out some text files containing the training data? We can let the usual LM preprocessing pipeline handle it from there. - - diff --git a/lm_eval.egg-info/SOURCES.txt b/lm_eval.egg-info/SOURCES.txt deleted file mode 100644 index 71a6a2fdb8..0000000000 --- a/lm_eval.egg-info/SOURCES.txt +++ /dev/null @@ -1,74 +0,0 @@ -LICENSE.md -README.md -setup.py -lm_eval/__init__.py -lm_eval/base.py -lm_eval/evaluator.py -lm_eval/metrics.py -lm_eval/utils.py -lm_eval.egg-info/PKG-INFO -lm_eval.egg-info/SOURCES.txt -lm_eval.egg-info/dependency_links.txt -lm_eval.egg-info/requires.txt -lm_eval.egg-info/top_level.txt -lm_eval/models/__init__.py -lm_eval/models/dummy.py -lm_eval/models/gpt2.py -lm_eval/models/gpt3.py -lm_eval/tasks/__init__.py -lm_eval/tasks/anli.py -lm_eval/tasks/arc.py -lm_eval/tasks/arithmetic.py -lm_eval/tasks/blimp.py -lm_eval/tasks/cbt.py -lm_eval/tasks/common.py -lm_eval/tasks/coqa.py -lm_eval/tasks/drop.py -lm_eval/tasks/glue.py -lm_eval/tasks/headqa.py -lm_eval/tasks/hellaswag.py -lm_eval/tasks/hendrycks_ethics.py -lm_eval/tasks/hendrycks_math.py -lm_eval/tasks/hendrycks_test.py -lm_eval/tasks/lambada.py -lm_eval/tasks/lambada_cloze.py -lm_eval/tasks/lambada_multilingual.py -lm_eval/tasks/logiqa.py -lm_eval/tasks/mathqa.py -lm_eval/tasks/mc_taco.py -lm_eval/tasks/mutual.py -lm_eval/tasks/naturalqs.py -lm_eval/tasks/openbookqa.py -lm_eval/tasks/pile.py -lm_eval/tasks/piqa.py -lm_eval/tasks/prost.py -lm_eval/tasks/pubmedqa.py -lm_eval/tasks/qa4mre.py -lm_eval/tasks/quac.py -lm_eval/tasks/race.py -lm_eval/tasks/sat.py -lm_eval/tasks/sciq.py -lm_eval/tasks/squad.py -lm_eval/tasks/storycloze.py -lm_eval/tasks/superglue.py -lm_eval/tasks/translation.py -lm_eval/tasks/triviaqa.py -lm_eval/tasks/truthfulqa.py -lm_eval/tasks/unscramble.py -lm_eval/tasks/webqs.py -lm_eval/tasks/wikitext.py -lm_eval/tasks/winogrande.py -lm_eval/tasks/wsc273.py -scripts/__init__.py -scripts/cost_estimate.py -scripts/fewshot_description_experiment.py -scripts/get_prompts.py -scripts/make_gpt2_test_cases.py -scripts/make_table_tasks.py -scripts/write_out.py -scripts/clean_training_data/__init__.py -scripts/clean_training_data/archiver.py -scripts/clean_training_data/generate_13_grams.py -scripts/clean_training_data/janitor.py -scripts/clean_training_data/process_sorted_buckets.py -scripts/clean_training_data/sort_13_gram_buckets.py \ No newline at end of file diff --git a/lm_eval.egg-info/dependency_links.txt b/lm_eval.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789179..0000000000 --- a/lm_eval.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/lm_eval.egg-info/requires.txt b/lm_eval.egg-info/requires.txt deleted file mode 100644 index c0f5b007a4..0000000000 --- a/lm_eval.egg-info/requires.txt +++ /dev/null @@ -1,24 +0,0 @@ -black -best_download>=0.0.6 -datasets==1.15.1 -click>=7.1 -scikit-learn>=0.24.1 -torch>=1.7 -transformers>=4.1 -sqlitedict==1.6.0 -pytablewriter==0.58.0 -sacrebleu==1.5.0 -rouge-score==0.0.4 -bleurt@ https://github.com/google-research/bleurt/archive/b610120347ef22b494b6d69b4316e303f5932516.zip#egg=bleurt -pycountry==20.7.3 -numexpr==2.7.2 -lm_dataformat==0.0.20 -pytest==6.2.3 -pybind11==2.6.2 -tqdm-multiprocess==0.0.11 -zstandard==0.15.2 -jsonlines==2.0.0 -mock==4.0.3 -openai==0.6.4 -jieba==0.42.1 -nagisa==0.2.7 diff --git a/lm_eval.egg-info/top_level.txt b/lm_eval.egg-info/top_level.txt deleted file mode 100644 index 3c734591d0..0000000000 --- a/lm_eval.egg-info/top_level.txt +++ /dev/null @@ -1,2 +0,0 @@ -lm_eval -scripts diff --git a/lm_eval/tasks/asdiv.py b/lm_eval/tasks/asdiv.py index dcde1f1e23..d84fc81a54 100644 --- a/lm_eval/tasks/asdiv.py +++ b/lm_eval/tasks/asdiv.py @@ -32,9 +32,9 @@ def download(self): if self.DATASET_PATH.exists(): return Path.mkdir(self.DATASET_PATH) - url = "https://github.com/chaochun/nlu-asdiv-dataset/archive/refs/heads/master.zip" - checksum = "2f71f8003929d605369ad924be4b95c15879fc2bfac0d4d01a81f8aabceaad5c" - zip_path = self.DATASET_PATH / "master.zip" + url = "https://github.com/chaochun/nlu-asdiv-dataset/archive/55790e5270bb91ccfa5053194b25732534696b50.zip" + checksum = "8f1fe4f6d5f170ec1e24ab78c244153c14c568b1bb2b1dad0324e71f37939a2d" + zip_path = self.DATASET_PATH / "55790e5270bb91ccfa5053194b25732534696b50.zip" download_file(url, str(zip_path), checksum) with ZipFile(zip_path, "r") as zip: zip.extractall(self.DATASET_PATH) @@ -85,7 +85,7 @@ def test_docs(self): raise NotImplementedError("This dataset has no test docs") def validation_docs(self): - data_xml_path = self.DATASET_PATH / "nlu-asdiv-dataset-master/dataset/ASDiv.xml" + data_xml_path = self.DATASET_PATH / "nlu-asdiv-dataset-55790e5270bb91ccfa5053194b25732534696b50/dataset/ASDiv.xml" return self.load_docs(data_xml_path) def fewshot_context(self, doc, num_fewshot, provide_description, rnd): @@ -109,11 +109,10 @@ def doc_to_target(self, doc): if len(answer)>0: # check if answer is present only in brackets return answer else: - return doc['answer'] + return " "+doc['answer'] def construct_requests(self, doc, ctx): ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) - return ll, is_greedy def process_results(self, doc, results): From 6653cc509e810752eeda2349c20c315a18c7fd5d Mon Sep 17 00:00:00 2001 From: Igor Ostrovsky Date: Thu, 30 Dec 2021 12:46:12 -0800 Subject: [PATCH 44/65] Bump the version number for all tasks based on PerplexityTask This is due to the change in the bits_per_byte calculation. --- lm_eval/tasks/pile.py | 2 +- lm_eval/tasks/wikitext.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lm_eval/tasks/pile.py b/lm_eval/tasks/pile.py index 68ff7ed9a8..de16caa3db 100644 --- a/lm_eval/tasks/pile.py +++ b/lm_eval/tasks/pile.py @@ -10,7 +10,7 @@ class PilePerplexityTask(PerplexityTask, abc.ABC): - VERSION = 0 + VERSION = 1 PILE_SET_NAME = None VAL_PATH = 'data/pile/val.jsonl.zst' diff --git a/lm_eval/tasks/wikitext.py b/lm_eval/tasks/wikitext.py index 24f9ec3507..b84cd3838b 100644 --- a/lm_eval/tasks/wikitext.py +++ b/lm_eval/tasks/wikitext.py @@ -41,7 +41,7 @@ def wikitext_detokenizer(string): class WikiText(PerplexityTask): - VERSION = 0 + VERSION = 1 def download(self): if not os.path.exists('data/wikitext/wikitext-2-raw/wiki.valid.raw'): @@ -87,4 +87,4 @@ def doc_to_target(self, doc): def count_words(self, doc): # count number of words in *original doc before detokenization* - return len(re.split(r"\s+", doc)) \ No newline at end of file + return len(re.split(r"\s+", doc)) From 72d7cc0cc545b187b9cb243bc1fb73d350657298 Mon Sep 17 00:00:00 2001 From: rokosbasilisk Date: Fri, 31 Dec 2021 19:48:50 +0530 Subject: [PATCH 45/65] remove _strip_bracket function --- lm_eval/tasks/asdiv.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/lm_eval/tasks/asdiv.py b/lm_eval/tasks/asdiv.py index d84fc81a54..beee30e813 100644 --- a/lm_eval/tasks/asdiv.py +++ b/lm_eval/tasks/asdiv.py @@ -56,19 +56,6 @@ def load_docs(self, textfilename, tfds=False): out_doc = self._convert_standard(problem) yield out_doc - def _strip_bracket(self,test_str): - ret = '' - skip1c = 0 - skip2c = 0 - for i in test_str: - if i == '(': - skip2c += 1 - elif i == ')'and skip2c > 0: - skip2c -= 1 - elif skip1c == 0 and skip2c == 0: - ret += i - return ret - def has_training_docs(self): return False @@ -105,11 +92,12 @@ def doc_to_text(self, doc): def doc_to_target(self, doc): # TODO: add formula - answer = self._strip_bracket(doc['answer']) + answer = doc['answer'].split(' (')[0] + print(answer) if len(answer)>0: # check if answer is present only in brackets return answer else: - return " "+doc['answer'] + return doc['answer'] def construct_requests(self, doc, ctx): ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) From 83e1a1166cb2df68accf353737e1d0450a719517 Mon Sep 17 00:00:00 2001 From: rokosbasilisk Date: Fri, 31 Dec 2021 19:50:15 +0530 Subject: [PATCH 46/65] removed strip_bracket function --- lm_eval/tasks/asdiv.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lm_eval/tasks/asdiv.py b/lm_eval/tasks/asdiv.py index beee30e813..0ff563538e 100644 --- a/lm_eval/tasks/asdiv.py +++ b/lm_eval/tasks/asdiv.py @@ -93,11 +93,7 @@ def doc_to_target(self, doc): # TODO: add formula answer = doc['answer'].split(' (')[0] - print(answer) - if len(answer)>0: # check if answer is present only in brackets - return answer - else: - return doc['answer'] + return answer def construct_requests(self, doc, ctx): ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) From 33315a1f50b163856e3c83f1ea514654c7367deb Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Fri, 31 Dec 2021 17:13:20 -0700 Subject: [PATCH 47/65] pile/wikitext: add testdata --- tests/testdata/pile_arxiv-v1-loglikelihood_rolling | 1 + tests/testdata/pile_arxiv-v1-res.json | 1 + tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling | 1 + tests/testdata/pile_bookcorpus2-v1-res.json | 1 + tests/testdata/pile_books3-v1-loglikelihood_rolling | 1 + tests/testdata/pile_books3-v1-res.json | 1 + tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling | 1 + tests/testdata/pile_dm-mathematics-v1-res.json | 1 + tests/testdata/pile_enron-v1-loglikelihood_rolling | 1 + tests/testdata/pile_enron-v1-res.json | 1 + tests/testdata/pile_europarl-v1-loglikelihood_rolling | 1 + tests/testdata/pile_europarl-v1-res.json | 1 + tests/testdata/pile_freelaw-v1-loglikelihood_rolling | 1 + tests/testdata/pile_freelaw-v1-res.json | 1 + tests/testdata/pile_github-v1-loglikelihood_rolling | 1 + tests/testdata/pile_github-v1-res.json | 1 + tests/testdata/pile_gutenberg-v1-loglikelihood_rolling | 1 + tests/testdata/pile_gutenberg-v1-res.json | 1 + tests/testdata/pile_hackernews-v1-loglikelihood_rolling | 1 + tests/testdata/pile_hackernews-v1-res.json | 1 + tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling | 1 + tests/testdata/pile_nih-exporter-v1-res.json | 1 + tests/testdata/pile_opensubtitles-v1-loglikelihood_rolling | 1 + tests/testdata/pile_opensubtitles-v1-res.json | 1 + tests/testdata/pile_openwebtext2-v1-loglikelihood_rolling | 1 + tests/testdata/pile_openwebtext2-v1-res.json | 1 + tests/testdata/pile_philpapers-v1-loglikelihood_rolling | 1 + tests/testdata/pile_philpapers-v1-res.json | 1 + tests/testdata/pile_pile-cc-v1-loglikelihood_rolling | 1 + tests/testdata/pile_pile-cc-v1-res.json | 1 + tests/testdata/pile_pubmed-abstracts-v1-loglikelihood_rolling | 1 + tests/testdata/pile_pubmed-abstracts-v1-res.json | 1 + tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling | 1 + tests/testdata/pile_pubmed-central-v1-res.json | 1 + tests/testdata/pile_stackexchange-v1-loglikelihood_rolling | 1 + tests/testdata/pile_stackexchange-v1-res.json | 1 + tests/testdata/pile_ubuntu-irc-v1-loglikelihood_rolling | 1 + tests/testdata/pile_ubuntu-irc-v1-res.json | 1 + tests/testdata/pile_uspto-v1-loglikelihood_rolling | 1 + tests/testdata/pile_uspto-v1-res.json | 1 + tests/testdata/pile_wikipedia-v1-loglikelihood_rolling | 1 + tests/testdata/pile_wikipedia-v1-res.json | 1 + tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling | 1 + tests/testdata/pile_youtubesubtitles-v1-res.json | 1 + tests/testdata/wikitext-v1-loglikelihood_rolling | 1 + tests/testdata/wikitext-v1-res.json | 1 + 46 files changed, 46 insertions(+) create mode 100644 tests/testdata/pile_arxiv-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_arxiv-v1-res.json create mode 100644 tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_bookcorpus2-v1-res.json create mode 100644 tests/testdata/pile_books3-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_books3-v1-res.json create mode 100644 tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_dm-mathematics-v1-res.json create mode 100644 tests/testdata/pile_enron-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_enron-v1-res.json create mode 100644 tests/testdata/pile_europarl-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_europarl-v1-res.json create mode 100644 tests/testdata/pile_freelaw-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_freelaw-v1-res.json create mode 100644 tests/testdata/pile_github-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_github-v1-res.json create mode 100644 tests/testdata/pile_gutenberg-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_gutenberg-v1-res.json create mode 100644 tests/testdata/pile_hackernews-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_hackernews-v1-res.json create mode 100644 tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_nih-exporter-v1-res.json create mode 100644 tests/testdata/pile_opensubtitles-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_opensubtitles-v1-res.json create mode 100644 tests/testdata/pile_openwebtext2-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_openwebtext2-v1-res.json create mode 100644 tests/testdata/pile_philpapers-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_philpapers-v1-res.json create mode 100644 tests/testdata/pile_pile-cc-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_pile-cc-v1-res.json create mode 100644 tests/testdata/pile_pubmed-abstracts-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_pubmed-abstracts-v1-res.json create mode 100644 tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_pubmed-central-v1-res.json create mode 100644 tests/testdata/pile_stackexchange-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_stackexchange-v1-res.json create mode 100644 tests/testdata/pile_ubuntu-irc-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_ubuntu-irc-v1-res.json create mode 100644 tests/testdata/pile_uspto-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_uspto-v1-res.json create mode 100644 tests/testdata/pile_wikipedia-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_wikipedia-v1-res.json create mode 100644 tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling create mode 100644 tests/testdata/pile_youtubesubtitles-v1-res.json create mode 100644 tests/testdata/wikitext-v1-loglikelihood_rolling create mode 100644 tests/testdata/wikitext-v1-res.json diff --git a/tests/testdata/pile_arxiv-v1-loglikelihood_rolling b/tests/testdata/pile_arxiv-v1-loglikelihood_rolling new file mode 100644 index 0000000000..3aa1d8c734 --- /dev/null +++ b/tests/testdata/pile_arxiv-v1-loglikelihood_rolling @@ -0,0 +1 @@ +814f9954e44368559602c00f7e85fa3971acdfd0315f508ec7df6318a79c55ec \ No newline at end of file diff --git a/tests/testdata/pile_arxiv-v1-res.json b/tests/testdata/pile_arxiv-v1-res.json new file mode 100644 index 0000000000..05cbab3873 --- /dev/null +++ b/tests/testdata/pile_arxiv-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_arxiv": {"bits_per_byte": 1.55095665856779e-05, "byte_perplexity": 1.0000107504701365, "word_perplexity": 1.0000819333090385}}, "versions": {"pile_arxiv": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling b/tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling new file mode 100644 index 0000000000..b37a91cc2d --- /dev/null +++ b/tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling @@ -0,0 +1 @@ +5c17ddfebeab8c41dabadb6fc216ceda91e3fe5dc95aaf1b2c843d7f11828b03 \ No newline at end of file diff --git a/tests/testdata/pile_bookcorpus2-v1-res.json b/tests/testdata/pile_bookcorpus2-v1-res.json new file mode 100644 index 0000000000..967c14934b --- /dev/null +++ b/tests/testdata/pile_bookcorpus2-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_bookcorpus2": {"bits_per_byte": 1.6780040419457868e-06, "byte_perplexity": 1.000001163104447, "word_perplexity": 1.0000066499426599}}, "versions": {"pile_bookcorpus2": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_books3-v1-loglikelihood_rolling b/tests/testdata/pile_books3-v1-loglikelihood_rolling new file mode 100644 index 0000000000..b483d3b45b --- /dev/null +++ b/tests/testdata/pile_books3-v1-loglikelihood_rolling @@ -0,0 +1 @@ +0f8f36f705b999b6d55fa72ff89a82793dd1cb568ab1f8727a6a2086a12b9410 \ No newline at end of file diff --git a/tests/testdata/pile_books3-v1-res.json b/tests/testdata/pile_books3-v1-res.json new file mode 100644 index 0000000000..6ff7a51711 --- /dev/null +++ b/tests/testdata/pile_books3-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_books3": {"bits_per_byte": 1.2901280503011222e-06, "byte_perplexity": 1.0000008942490204, "word_perplexity": 1.0000052870063607}}, "versions": {"pile_books3": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling b/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling new file mode 100644 index 0000000000..2fb27786c5 --- /dev/null +++ b/tests/testdata/pile_dm-mathematics-v1-loglikelihood_rolling @@ -0,0 +1 @@ +d5b7967c0ece8b816f3921a8bd0fad23365349e935b491595e2ad1135af42da6 \ No newline at end of file diff --git a/tests/testdata/pile_dm-mathematics-v1-res.json b/tests/testdata/pile_dm-mathematics-v1-res.json new file mode 100644 index 0000000000..192e9066a4 --- /dev/null +++ b/tests/testdata/pile_dm-mathematics-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_dm-mathematics": {"bits_per_byte": 8.910951449933553e-05, "byte_perplexity": 1.0000617679162955, "word_perplexity": 1.0002875035042451}}, "versions": {"pile_dm-mathematics": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_enron-v1-loglikelihood_rolling b/tests/testdata/pile_enron-v1-loglikelihood_rolling new file mode 100644 index 0000000000..57dbe76460 --- /dev/null +++ b/tests/testdata/pile_enron-v1-loglikelihood_rolling @@ -0,0 +1 @@ +4baa6ccdc9e3aa9921675ab4400d5e89d7b546b844a8ea28f6461d649066418a \ No newline at end of file diff --git a/tests/testdata/pile_enron-v1-res.json b/tests/testdata/pile_enron-v1-res.json new file mode 100644 index 0000000000..abe7b45f9a --- /dev/null +++ b/tests/testdata/pile_enron-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_enron": {"bits_per_byte": 0.0004564546920781453, "byte_perplexity": 1.000316440339552, "word_perplexity": 1.00224668051869}}, "versions": {"pile_enron": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_europarl-v1-loglikelihood_rolling b/tests/testdata/pile_europarl-v1-loglikelihood_rolling new file mode 100644 index 0000000000..8027260755 --- /dev/null +++ b/tests/testdata/pile_europarl-v1-loglikelihood_rolling @@ -0,0 +1 @@ +e67d3dbccd47d308bfc5b0e66b76d0dfc5e386ebfa94e056562c2281c395543f \ No newline at end of file diff --git a/tests/testdata/pile_europarl-v1-res.json b/tests/testdata/pile_europarl-v1-res.json new file mode 100644 index 0000000000..b948f0d369 --- /dev/null +++ b/tests/testdata/pile_europarl-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_europarl": {"bits_per_byte": 1.2477664839621123e-05, "byte_perplexity": 1.000008648895605, "word_perplexity": 1.000063506523818}}, "versions": {"pile_europarl": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_freelaw-v1-loglikelihood_rolling b/tests/testdata/pile_freelaw-v1-loglikelihood_rolling new file mode 100644 index 0000000000..7b5771f491 --- /dev/null +++ b/tests/testdata/pile_freelaw-v1-loglikelihood_rolling @@ -0,0 +1 @@ +d77f3f68aadd6cbf1290c2f6737b2ed5d5c2a60e4c81a65c280f207783caabe1 \ No newline at end of file diff --git a/tests/testdata/pile_freelaw-v1-res.json b/tests/testdata/pile_freelaw-v1-res.json new file mode 100644 index 0000000000..dd0e0bac36 --- /dev/null +++ b/tests/testdata/pile_freelaw-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_freelaw": {"bits_per_byte": 4.5623635481434923e-05, "byte_perplexity": 1.0000316243943415, "word_perplexity": 1.000203169094218}}, "versions": {"pile_freelaw": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_github-v1-loglikelihood_rolling b/tests/testdata/pile_github-v1-loglikelihood_rolling new file mode 100644 index 0000000000..cf8251e4f6 --- /dev/null +++ b/tests/testdata/pile_github-v1-loglikelihood_rolling @@ -0,0 +1 @@ +df384c3df3d8f53273e97127c5bb84c17e638acad7d6bc9c91f6dee96d43b639 \ No newline at end of file diff --git a/tests/testdata/pile_github-v1-res.json b/tests/testdata/pile_github-v1-res.json new file mode 100644 index 0000000000..cc06a45501 --- /dev/null +++ b/tests/testdata/pile_github-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_github": {"bits_per_byte": 0.00013764216145332133, "byte_perplexity": 1.0000954108274611, "word_perplexity": 1.0009643183931227}}, "versions": {"pile_github": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_gutenberg-v1-loglikelihood_rolling b/tests/testdata/pile_gutenberg-v1-loglikelihood_rolling new file mode 100644 index 0000000000..bd7b15927f --- /dev/null +++ b/tests/testdata/pile_gutenberg-v1-loglikelihood_rolling @@ -0,0 +1 @@ +02a559f74a9105145e7d4d9c5ddea372b5b4938f5368dc8ffafc39cbe3b4c7ef \ No newline at end of file diff --git a/tests/testdata/pile_gutenberg-v1-res.json b/tests/testdata/pile_gutenberg-v1-res.json new file mode 100644 index 0000000000..6d22ed3ff5 --- /dev/null +++ b/tests/testdata/pile_gutenberg-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_gutenberg": {"bits_per_byte": 1.7952329146458065e-06, "byte_perplexity": 1.0000012443614075, "word_perplexity": 1.0000072174665404}}, "versions": {"pile_gutenberg": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_hackernews-v1-loglikelihood_rolling b/tests/testdata/pile_hackernews-v1-loglikelihood_rolling new file mode 100644 index 0000000000..48b767bfe7 --- /dev/null +++ b/tests/testdata/pile_hackernews-v1-loglikelihood_rolling @@ -0,0 +1 @@ +ec1082ee5a5326e0d57aa4e73b634937140c1de9af95f154e8ab57b05d9b422b \ No newline at end of file diff --git a/tests/testdata/pile_hackernews-v1-res.json b/tests/testdata/pile_hackernews-v1-res.json new file mode 100644 index 0000000000..ea135278b7 --- /dev/null +++ b/tests/testdata/pile_hackernews-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_hackernews": {"bits_per_byte": 0.00014672607267878518, "byte_perplexity": 1.0001017079354932, "word_perplexity": 1.0006273924348839}}, "versions": {"pile_hackernews": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling b/tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling new file mode 100644 index 0000000000..5f76588a81 --- /dev/null +++ b/tests/testdata/pile_nih-exporter-v1-loglikelihood_rolling @@ -0,0 +1 @@ +520ea6e04e8a39dc0b5f63a837429a78a40e63d39d109096101feb8c5b2cf8d8 \ No newline at end of file diff --git a/tests/testdata/pile_nih-exporter-v1-res.json b/tests/testdata/pile_nih-exporter-v1-res.json new file mode 100644 index 0000000000..0e40fc8268 --- /dev/null +++ b/tests/testdata/pile_nih-exporter-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_nih-exporter": {"bits_per_byte": 0.00035193728014978225, "byte_perplexity": 1.0002439740903082, "word_perplexity": 1.0016712202288802}}, "versions": {"pile_nih-exporter": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_opensubtitles-v1-loglikelihood_rolling b/tests/testdata/pile_opensubtitles-v1-loglikelihood_rolling new file mode 100644 index 0000000000..47805d3b5f --- /dev/null +++ b/tests/testdata/pile_opensubtitles-v1-loglikelihood_rolling @@ -0,0 +1 @@ +0f1c23a1f4ddec0c2b1ff34de8d1505b0eb9e2868d8edbcc1b6de13d02f32036 \ No newline at end of file diff --git a/tests/testdata/pile_opensubtitles-v1-res.json b/tests/testdata/pile_opensubtitles-v1-res.json new file mode 100644 index 0000000000..1468294732 --- /dev/null +++ b/tests/testdata/pile_opensubtitles-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_opensubtitles": {"bits_per_byte": 2.1948356082685497e-05, "byte_perplexity": 1.0000152135568616, "word_perplexity": 1.0000856162053249}}, "versions": {"pile_opensubtitles": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_openwebtext2-v1-loglikelihood_rolling b/tests/testdata/pile_openwebtext2-v1-loglikelihood_rolling new file mode 100644 index 0000000000..22046e4405 --- /dev/null +++ b/tests/testdata/pile_openwebtext2-v1-loglikelihood_rolling @@ -0,0 +1 @@ +5d6c19665f429ab1ccbe027da67f42bdaf219f819ab093673976eee55e015ff4 \ No newline at end of file diff --git a/tests/testdata/pile_openwebtext2-v1-res.json b/tests/testdata/pile_openwebtext2-v1-res.json new file mode 100644 index 0000000000..ca433e3c85 --- /dev/null +++ b/tests/testdata/pile_openwebtext2-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_openwebtext2": {"bits_per_byte": 0.000184802319359215, "byte_perplexity": 1.000128103411166, "word_perplexity": 1.0007951516532847}}, "versions": {"pile_openwebtext2": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_philpapers-v1-loglikelihood_rolling b/tests/testdata/pile_philpapers-v1-loglikelihood_rolling new file mode 100644 index 0000000000..4fbbc241ba --- /dev/null +++ b/tests/testdata/pile_philpapers-v1-loglikelihood_rolling @@ -0,0 +1 @@ +339ba5d8c044c4a3ff9b9a8eaa24da1d6c01b72972074eb671a7da049eeb7047 \ No newline at end of file diff --git a/tests/testdata/pile_philpapers-v1-res.json b/tests/testdata/pile_philpapers-v1-res.json new file mode 100644 index 0000000000..5a2f77678a --- /dev/null +++ b/tests/testdata/pile_philpapers-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_philpapers": {"bits_per_byte": 9.004690592465457e-06, "byte_perplexity": 1.0000062415953748, "word_perplexity": 1.0000409888564146}}, "versions": {"pile_philpapers": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_pile-cc-v1-loglikelihood_rolling b/tests/testdata/pile_pile-cc-v1-loglikelihood_rolling new file mode 100644 index 0000000000..d5369ed3c9 --- /dev/null +++ b/tests/testdata/pile_pile-cc-v1-loglikelihood_rolling @@ -0,0 +1 @@ +731fdef4a43949b179ba0c540148ebc2fa41583dd583ef580dd812076c66a451 \ No newline at end of file diff --git a/tests/testdata/pile_pile-cc-v1-res.json b/tests/testdata/pile_pile-cc-v1-res.json new file mode 100644 index 0000000000..bd2772e32a --- /dev/null +++ b/tests/testdata/pile_pile-cc-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_pile-cc": {"bits_per_byte": 0.0001620742639125056, "byte_perplexity": 1.0001123476295946, "word_perplexity": 1.0006738958554477}}, "versions": {"pile_pile-cc": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_pubmed-abstracts-v1-loglikelihood_rolling b/tests/testdata/pile_pubmed-abstracts-v1-loglikelihood_rolling new file mode 100644 index 0000000000..de5660d60a --- /dev/null +++ b/tests/testdata/pile_pubmed-abstracts-v1-loglikelihood_rolling @@ -0,0 +1 @@ +66436569a43163afb2caf422d32c5f329899e74c49865d4d13881fd465fd9976 \ No newline at end of file diff --git a/tests/testdata/pile_pubmed-abstracts-v1-res.json b/tests/testdata/pile_pubmed-abstracts-v1-res.json new file mode 100644 index 0000000000..21b6bb451f --- /dev/null +++ b/tests/testdata/pile_pubmed-abstracts-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_pubmed-abstracts": {"bits_per_byte": 0.0005417858444030858, "byte_perplexity": 1.0003756078534862, "word_perplexity": 1.0025884332779}}, "versions": {"pile_pubmed-abstracts": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling b/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling new file mode 100644 index 0000000000..283109f32e --- /dev/null +++ b/tests/testdata/pile_pubmed-central-v1-loglikelihood_rolling @@ -0,0 +1 @@ +40b39d120d99a145690444e86acc3e3e24d41e6e0538a75e26929ad84926e5e0 \ No newline at end of file diff --git a/tests/testdata/pile_pubmed-central-v1-res.json b/tests/testdata/pile_pubmed-central-v1-res.json new file mode 100644 index 0000000000..4d4a241ace --- /dev/null +++ b/tests/testdata/pile_pubmed-central-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_pubmed-central": {"bits_per_byte": 2.2812488135667854e-05, "byte_perplexity": 1.0000158125368497, "word_perplexity": 1.000123107107861}}, "versions": {"pile_pubmed-central": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling b/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling new file mode 100644 index 0000000000..dcf0e64cf0 --- /dev/null +++ b/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling @@ -0,0 +1 @@ +e524bfb3e21cbdaddc117403a50df598520c7bf5b2c60ad8f2372cfa564e79be \ No newline at end of file diff --git a/tests/testdata/pile_stackexchange-v1-res.json b/tests/testdata/pile_stackexchange-v1-res.json new file mode 100644 index 0000000000..2773302990 --- /dev/null +++ b/tests/testdata/pile_stackexchange-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_stackexchange": {"bits_per_byte": 0.0003302063346758449, "byte_perplexity": 1.0002289077852733, "word_perplexity": 1.0016993562258851}}, "versions": {"pile_stackexchange": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_ubuntu-irc-v1-loglikelihood_rolling b/tests/testdata/pile_ubuntu-irc-v1-loglikelihood_rolling new file mode 100644 index 0000000000..ce04199863 --- /dev/null +++ b/tests/testdata/pile_ubuntu-irc-v1-loglikelihood_rolling @@ -0,0 +1 @@ +4eb69e314f0864ec8890e2323d7e76f8a8309692c4f090e2b41bf4be681a811d \ No newline at end of file diff --git a/tests/testdata/pile_ubuntu-irc-v1-res.json b/tests/testdata/pile_ubuntu-irc-v1-res.json new file mode 100644 index 0000000000..0e3b1b2597 --- /dev/null +++ b/tests/testdata/pile_ubuntu-irc-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_ubuntu-irc": {"bits_per_byte": 2.3513498942121155e-06, "byte_perplexity": 1.0000016298328778, "word_perplexity": 1.0000108866656874}}, "versions": {"pile_ubuntu-irc": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_uspto-v1-loglikelihood_rolling b/tests/testdata/pile_uspto-v1-loglikelihood_rolling new file mode 100644 index 0000000000..4649d3b9b7 --- /dev/null +++ b/tests/testdata/pile_uspto-v1-loglikelihood_rolling @@ -0,0 +1 @@ +789b2bdb31564d512b70f801316f49320a26c83ba361226bac0afb255341d477 \ No newline at end of file diff --git a/tests/testdata/pile_uspto-v1-res.json b/tests/testdata/pile_uspto-v1-res.json new file mode 100644 index 0000000000..599ae44ef4 --- /dev/null +++ b/tests/testdata/pile_uspto-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_uspto": {"bits_per_byte": 0.000174024142670342, "byte_perplexity": 1.00012063161925, "word_perplexity": 1.0007716198916954}}, "versions": {"pile_uspto": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_wikipedia-v1-loglikelihood_rolling b/tests/testdata/pile_wikipedia-v1-loglikelihood_rolling new file mode 100644 index 0000000000..e44bd27628 --- /dev/null +++ b/tests/testdata/pile_wikipedia-v1-loglikelihood_rolling @@ -0,0 +1 @@ +ef9ec0dd408316ca6537228a6812e839f14b30608973081d41efc47c138338da \ No newline at end of file diff --git a/tests/testdata/pile_wikipedia-v1-res.json b/tests/testdata/pile_wikipedia-v1-res.json new file mode 100644 index 0000000000..4f2314e66b --- /dev/null +++ b/tests/testdata/pile_wikipedia-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_wikipedia": {"bits_per_byte": 0.00024287370359008176, "byte_perplexity": 1.0001683613940646, "word_perplexity": 1.001084677949439}}, "versions": {"pile_wikipedia": 1}} \ No newline at end of file diff --git a/tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling b/tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling new file mode 100644 index 0000000000..81c2e5ed06 --- /dev/null +++ b/tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling @@ -0,0 +1 @@ +68263c52adc0086011e2220b619983935cabb1cc1f5f9f8ee1a74ab2a7457967 \ No newline at end of file diff --git a/tests/testdata/pile_youtubesubtitles-v1-res.json b/tests/testdata/pile_youtubesubtitles-v1-res.json new file mode 100644 index 0000000000..fcf2faa8bc --- /dev/null +++ b/tests/testdata/pile_youtubesubtitles-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_youtubesubtitles": {"bits_per_byte": 3.3827117222045906e-05, "byte_perplexity": 1.000023447445816, "word_perplexity": 1.0001529192262875}}, "versions": {"pile_youtubesubtitles": 1}} \ No newline at end of file diff --git a/tests/testdata/wikitext-v1-loglikelihood_rolling b/tests/testdata/wikitext-v1-loglikelihood_rolling new file mode 100644 index 0000000000..f09af45a38 --- /dev/null +++ b/tests/testdata/wikitext-v1-loglikelihood_rolling @@ -0,0 +1 @@ +b6f83e6cf7535ee41b0057c3e2ec2cf7f2fa5a9119b305c479a83091d1142b2c \ No newline at end of file diff --git a/tests/testdata/wikitext-v1-res.json b/tests/testdata/wikitext-v1-res.json new file mode 100644 index 0000000000..122098aec2 --- /dev/null +++ b/tests/testdata/wikitext-v1-res.json @@ -0,0 +1 @@ +{"results": {"wikitext": {"bits_per_byte": 3.202519859941674e-05, "byte_perplexity": 1.0000221984224973, "word_perplexity": 1.000118710696617}}, "versions": {"wikitext": 1}} \ No newline at end of file From 4b3dee673a776de7be988b40a1a9c40b27e04e4d Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Sat, 1 Jan 2022 21:44:56 -0700 Subject: [PATCH 48/65] asdiv: space convention --- lm_eval/tasks/asdiv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/asdiv.py b/lm_eval/tasks/asdiv.py index 0ff563538e..ed46445968 100644 --- a/lm_eval/tasks/asdiv.py +++ b/lm_eval/tasks/asdiv.py @@ -93,7 +93,7 @@ def doc_to_target(self, doc): # TODO: add formula answer = doc['answer'].split(' (')[0] - return answer + return " " + answer def construct_requests(self, doc, ctx): ll, is_greedy = rf.loglikelihood(ctx, self.doc_to_target(doc)) From cb3babd3620cc597f3399c17622db7bddec99a39 Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Mon, 3 Jan 2022 00:35:41 -0700 Subject: [PATCH 49/65] Improve pile test efficiency --- lm_eval/tasks/pile.py | 7 ++++--- tests/test_tasks.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lm_eval/tasks/pile.py b/lm_eval/tasks/pile.py index 68ff7ed9a8..c7cea5ac97 100644 --- a/lm_eval/tasks/pile.py +++ b/lm_eval/tasks/pile.py @@ -18,9 +18,10 @@ class PilePerplexityTask(PerplexityTask, abc.ABC): def download(self): # TODO: separate pile val/test out by component so we don't have to scan the entire file once per set - os.makedirs("data/pile/", exist_ok=True) - download_file("https://the-eye.eu/public/AI/pile/val.jsonl.zst", self.VAL_PATH, "264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92") - download_file("https://the-eye.eu/public/AI/pile/test.jsonl.zst", self.TEST_PATH, "0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e") + if not os.path.exists("data/pile/test.jsonl.zst"): + os.makedirs("data/pile/", exist_ok=True) + download_file("https://the-eye.eu/public/AI/pile/val.jsonl.zst", self.VAL_PATH, "264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92") + download_file("https://the-eye.eu/public/AI/pile/test.jsonl.zst", self.TEST_PATH, "0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e") def validation_docs(self): rdr = lm_dataformat.Reader(self.VAL_PATH) diff --git a/tests/test_tasks.py b/tests/test_tasks.py index 97baeacf8a..46812798a9 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -32,7 +32,7 @@ def test_basic_interface(taskname, task_class): limit = None - if taskname in ["triviaqa"]: + if taskname in ["triviaqa"] or taskname.startswith("pile_"): limit = 10000 if task.has_validation_docs(): arr = list(islice(task.validation_docs(), limit)) From 9d87d47696f73c18597e12c74c16f001f0497ffe Mon Sep 17 00:00:00 2001 From: Leo Gao <54557097+leogao2@users.noreply.github.com> Date: Mon, 3 Jan 2022 00:36:42 -0700 Subject: [PATCH 50/65] Delete test_cache.db --- tests/test_cache.db | Bin 12288 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/test_cache.db diff --git a/tests/test_cache.db b/tests/test_cache.db deleted file mode 100644 index 7477f429bf681485fd985975b0c979298d0a181c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI#&r8EF6u|MM$`E06w;g(U?4+QA_z&1hL9s*a3c6E?EW)tsP*c&9f1m%6N1M@| zcQ4-u`SB7G$S0>ad&ri~%FFtFWt?27k5k zsUUPJbm8x=1MDM!00IagfB*srAb Date: Mon, 3 Jan 2022 22:58:42 -0700 Subject: [PATCH 51/65] Update description_guide.md --- docs/description_guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/description_guide.md b/docs/description_guide.md index 3627e23e6b..b3fea0834f 100644 --- a/docs/description_guide.md +++ b/docs/description_guide.md @@ -41,7 +41,7 @@ One can also interface with the aforementioned [`evaluator.evaluate`](../lm_eval which can then be supplied to the CLI as: -```python +```bash python main.py \ --tasks cycle_letters,copa \ --description_dict_path /your/path/descriptions.json \ From d2636b4e71d2eb23faf6af547c53ed139b79d376 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Tue, 4 Jan 2022 02:38:18 -0500 Subject: [PATCH 52/65] Enforce `rnd` args with assertions --- lm_eval/base.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/lm_eval/base.py b/lm_eval/base.py index c3f897c711..9313fcffba 100644 --- a/lm_eval/base.py +++ b/lm_eval/base.py @@ -471,13 +471,13 @@ def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, Not implemented, and this option is deprecated and will be removed in a future version in favor of a different description providing method :param rnd: random.Random The pseudo-random number generator used to randomly sample examples. - WARNING: If you do not provide a `rnd` arg, a default `random.Random` - object will be created and seeded with this Task's name attribute, `__name__`. + WARNING: This is currently a required arg although it's optionalized with a default `None`. :param description: str The task's description that will be prepended to the fewshot examples. :returns: str The fewshot context. """ + assert rnd is not None, "A `random.Random` generator argument must be provided to `rnd`" assert not provide_description, ( "The `provide_description` arg will be removed in future versions. To prepend " "a custom description to the context, supply the corresponding string via the " @@ -489,11 +489,6 @@ def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description = description + "\n\n" if description else "" - # TODO (jon-tow): Remove this default `rand` behaviour after `provide_description` is removed and remove the respective `rand` arg warning in the docs above. - if rnd is None: - rnd = random.Random() - rnd.seed(self.__name__) - if num_fewshot == 0: labeled_examples = "" else: @@ -567,6 +562,7 @@ def fewshot_examples(self, k, rnd): def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert num_fewshot == 0 + assert rnd is not None, "A `random.Random` generator argument must be provided to `rnd`" assert not provide_description, ( "The `provide_description` arg will be removed in future versions. To prepend " "a custom description to the context, supply the corresponding string via the " @@ -576,11 +572,6 @@ def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, # nudge people to not specify it at all print("WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict") - # TODO (jon-tow): Remove this default `rand` behaviour after `provide_description` is removed and remove the respective `rand` arg warning in the docs above. - if rnd is None: - rnd = random.Random() - rnd.seed(self.__name__) - return "" def higher_is_better(self): From 76dc60938b43ec471149640e7ed954f3229fbc3e Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Sat, 8 Jan 2022 21:37:02 +0100 Subject: [PATCH 53/65] Best-download have backward compatibility issue --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fdb74c4d79..b6ba841b97 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ python_requires='>=3.6', install_requires=[ "black", - "best_download>=0.0.6", + "best_download>=0.0.6,<0.0.8", "datasets==1.15.1", "click>=7.1", "scikit-learn>=0.24.1", From b2f6bce86c62a40498d863d103bd3c25c6d7f672 Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Sat, 8 Jan 2022 13:37:42 -0700 Subject: [PATCH 54/65] Fix fewshot_context method handling --- lm_eval/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lm_eval/utils.py b/lm_eval/utils.py index aca7791181..2a8c6d17fe 100644 --- a/lm_eval/utils.py +++ b/lm_eval/utils.py @@ -2,6 +2,7 @@ import re import collections import functools +import inspect class ExitCodeError(Exception): @@ -148,7 +149,7 @@ def positional_deprecated(fn): """ @functools.wraps(fn) def _wrapper(*args, **kwargs): - if len(args) != 0: + if len(args) != 1 if inspect.ismethod(fn) else 0: print(f"WARNING: using {fn.__name__} with positional arguments is " "deprecated and will be disallowed in a future version of " "lm-evaluation-harness!") From c65412e530c1780ec7b2b970373e9e83f796ab20 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Sat, 8 Jan 2022 21:45:40 +0100 Subject: [PATCH 55/65] Actually it shouldn't be hard to fix it to be compatible with future version --- lm_eval/tasks/arithmetic.py | 2 +- lm_eval/tasks/coqa.py | 4 ++-- lm_eval/tasks/drop.py | 2 +- lm_eval/tasks/hendrycks_ethics.py | 2 +- lm_eval/tasks/hendrycks_math.py | 2 +- lm_eval/tasks/hendrycks_test.py | 2 +- lm_eval/tasks/lambada.py | 4 ++-- lm_eval/tasks/lambada_multilingual.py | 4 ++-- lm_eval/tasks/logiqa.py | 2 +- lm_eval/tasks/mutual.py | 4 ++-- lm_eval/tasks/pile.py | 4 ++-- lm_eval/tasks/qa4mre.py | 4 ++-- lm_eval/tasks/sciq.py | 4 ++-- lm_eval/tasks/triviaqa.py | 2 +- lm_eval/tasks/truthfulqa.py | 4 ++-- lm_eval/tasks/unscramble.py | 2 +- lm_eval/tasks/wikitext.py | 2 +- setup.py | 2 +- 18 files changed, 26 insertions(+), 26 deletions(-) diff --git a/lm_eval/tasks/arithmetic.py b/lm_eval/tasks/arithmetic.py index 147b66a175..b3256b5c87 100644 --- a/lm_eval/tasks/arithmetic.py +++ b/lm_eval/tasks/arithmetic.py @@ -21,7 +21,7 @@ def download(self): url = 'https://raw.githubusercontent.com/openai/gpt-3/master/data/' + file_name if not os.path.exists(self.directory): os.makedirs(self.directory) - download_file(url, self.directory+file_name, checksum) + download_file(url, local_file=self.directory+file_name, expected_checksum=checksum) self.set_docs() @abc.abstractmethod diff --git a/lm_eval/tasks/coqa.py b/lm_eval/tasks/coqa.py index beba53a663..d9f6194580 100644 --- a/lm_eval/tasks/coqa.py +++ b/lm_eval/tasks/coqa.py @@ -16,8 +16,8 @@ def download(self): sh ("""mkdir -p data/coqa""") - download_file("http://downloads.cs.stanford.edu/nlp/data/coqa/coqa-train-v1.0.json", coqa_train_filepath, "b0fdb2bc1bd38dd3ca2ce5fa2ac3e02c6288ac914f241ac409a655ffb6619fa6") - download_file("http://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json", coqa_dev_filepath, "dfa367a9733ce53222918d0231d9b3bedc2b8ee831a2845f62dfc70701f2540a") + download_file("http://downloads.cs.stanford.edu/nlp/data/coqa/coqa-train-v1.0.json", local_file=coqa_train_filepath, expected_checksum="b0fdb2bc1bd38dd3ca2ce5fa2ac3e02c6288ac914f241ac409a655ffb6619fa6") + download_file("http://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json", local_file=coqa_dev_filepath, expected_checksum="dfa367a9733ce53222918d0231d9b3bedc2b8ee831a2845f62dfc70701f2540a") def has_training_docs(self): return True diff --git a/lm_eval/tasks/drop.py b/lm_eval/tasks/drop.py index 97d1098327..3b89b7d1ec 100644 --- a/lm_eval/tasks/drop.py +++ b/lm_eval/tasks/drop.py @@ -27,7 +27,7 @@ def download(self): url = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip" checksum = "39d2278a29fd729de301b111a45f434c24834f40df8f4ff116d864589e3249d6" zip_path = self.DATASET_PATH / "drop_dataset.zip" - download_file(url, str(zip_path), checksum) + download_file(url, local_file=str(zip_path), expected_checksum=checksum) with ZipFile(zip_path, "r") as zip: zip.extractall(self.DATASET_PATH) diff --git a/lm_eval/tasks/hendrycks_ethics.py b/lm_eval/tasks/hendrycks_ethics.py index 50e94a508c..55d50ffc5d 100644 --- a/lm_eval/tasks/hendrycks_ethics.py +++ b/lm_eval/tasks/hendrycks_ethics.py @@ -20,7 +20,7 @@ class Ethics(Task): def download(self): if not os.path.exists('data/ethics/done'): sh("mkdir -p data") - download_file("https://people.eecs.berkeley.edu/~hendrycks/ethics.tar", "data/ethics.tar", "40acbf1ac0da79a2aabef394d58889136b8d38b05be09482006de2453fb06333") + download_file("https://people.eecs.berkeley.edu/~hendrycks/ethics.tar", local_file="data/ethics.tar", expected_checksum="40acbf1ac0da79a2aabef394d58889136b8d38b05be09482006de2453fb06333") sh(""" tar -xf data/ethics.tar -C data/ rm data/ethics.tar diff --git a/lm_eval/tasks/hendrycks_math.py b/lm_eval/tasks/hendrycks_math.py index 379e727d61..0c96ba52dc 100644 --- a/lm_eval/tasks/hendrycks_math.py +++ b/lm_eval/tasks/hendrycks_math.py @@ -18,7 +18,7 @@ class Math(Task): def download(self): if not (self.DATASET_PATH / 'test').exists() or not (self.DATASET_PATH / 'done').exists(): sh(f"mkdir -p {self.DATASET_PATH}") - download_file("https://people.eecs.berkeley.edu/~hendrycks/MATH.tar", f"{self.DATASET_PATH}.tar", "01256fd7cd5430596fdf07e6e6a5827111b5235b7ffed679c662a12f898932da") + download_file("https://people.eecs.berkeley.edu/~hendrycks/MATH.tar", local_file=f"{self.DATASET_PATH}.tar", expected_checksum="01256fd7cd5430596fdf07e6e6a5827111b5235b7ffed679c662a12f898932da") sh(f""" tar -xf {self.DATASET_PATH}.tar -C data/ && touch {self.DATASET_PATH / 'done'} rm {self.DATASET_PATH}.tar diff --git a/lm_eval/tasks/hendrycks_test.py b/lm_eval/tasks/hendrycks_test.py index 46c0306fcd..0546c9d9e1 100644 --- a/lm_eval/tasks/hendrycks_test.py +++ b/lm_eval/tasks/hendrycks_test.py @@ -45,7 +45,7 @@ def __init__(self, subject): def download(self): if not (self.DATASET_PATH / 'done').exists(): sh("mkdir -p data") - download_file("https://people.eecs.berkeley.edu/~hendrycks/data.tar", "data/data.tar", "78a804365a59028188fb19bd1adcadc5e0c260b220a9d8b2e33a5ea7d5fbe3b4") + download_file("https://people.eecs.berkeley.edu/~hendrycks/data.tar", local_file="data/data.tar", expected_checksum="78a804365a59028188fb19bd1adcadc5e0c260b220a9d8b2e33a5ea7d5fbe3b4") sh(""" tar -xf data/data.tar -C data/ rm data/data.tar diff --git a/lm_eval/tasks/lambada.py b/lm_eval/tasks/lambada.py index bcb4ae019c..a73602f7f5 100644 --- a/lm_eval/tasks/lambada.py +++ b/lm_eval/tasks/lambada.py @@ -14,8 +14,8 @@ def download(self): if not os.path.exists("data/lambada/lambada_test.jsonl"): download_file( "http://eaidata.bmk.sh/data/lambada_test.jsonl", - "data/lambada/lambada_test.jsonl", - "4aa8d02cd17c719165fc8a7887fddd641f43fcafa4b1c806ca8abc31fabdb226" + local_file="data/lambada/lambada_test.jsonl", + expected_checksum="4aa8d02cd17c719165fc8a7887fddd641f43fcafa4b1c806ca8abc31fabdb226" ) except: # fallback - for some reason best_download doesnt work all the time here diff --git a/lm_eval/tasks/lambada_multilingual.py b/lm_eval/tasks/lambada_multilingual.py index dd6da10bef..7123ecf01a 100644 --- a/lm_eval/tasks/lambada_multilingual.py +++ b/lm_eval/tasks/lambada_multilingual.py @@ -32,8 +32,8 @@ def download(self): if not os.path.exists(f): download_file( url, - f, - CHECKSUMS[self.LANG] + local_file=f, + expected_checksum=CHECKSUMS[self.LANG] ) except: # fallback - for some reason best_download doesnt work all the time here diff --git a/lm_eval/tasks/logiqa.py b/lm_eval/tasks/logiqa.py index e403623beb..97412738dd 100644 --- a/lm_eval/tasks/logiqa.py +++ b/lm_eval/tasks/logiqa.py @@ -19,7 +19,7 @@ def download(self): ] for split in splits: file = self.DATASET_PATH / f"{split['name']}.txt" - download_file(f"{base_url}/{split['name']}.txt", str(file), split["checksum"]) + download_file(f"{base_url}/{split['name']}.txt", local_file=str(file), expected_checksum=split["checksum"]) def has_training_docs(self): return True diff --git a/lm_eval/tasks/mutual.py b/lm_eval/tasks/mutual.py index 17274a46fd..221c28070f 100644 --- a/lm_eval/tasks/mutual.py +++ b/lm_eval/tasks/mutual.py @@ -36,8 +36,8 @@ def download(self): master_zip = Path("data/master.zip") download_file( "https://github.com/Nealcly/MuTual/archive/master.zip", - str(master_zip), - "bb325cf6c672f0f02699993a37138b0fa0af6fcfc77ec81dfbe46add4d7b29f9") + local_file=str(master_zip), + expected_checksum="bb325cf6c672f0f02699993a37138b0fa0af6fcfc77ec81dfbe46add4d7b29f9") with zipfile.ZipFile(master_zip, 'r') as zip: zip.extractall("data") Path("data/MuTual-master/data").rename(str(self.BASE_PATH)) diff --git a/lm_eval/tasks/pile.py b/lm_eval/tasks/pile.py index 68ff7ed9a8..2ffd8b833a 100644 --- a/lm_eval/tasks/pile.py +++ b/lm_eval/tasks/pile.py @@ -19,8 +19,8 @@ class PilePerplexityTask(PerplexityTask, abc.ABC): def download(self): # TODO: separate pile val/test out by component so we don't have to scan the entire file once per set os.makedirs("data/pile/", exist_ok=True) - download_file("https://the-eye.eu/public/AI/pile/val.jsonl.zst", self.VAL_PATH, "264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92") - download_file("https://the-eye.eu/public/AI/pile/test.jsonl.zst", self.TEST_PATH, "0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e") + download_file("https://the-eye.eu/public/AI/pile/val.jsonl.zst", local_file=self.VAL_PATH, expected_checksum="264c875d8bbd355d8daa9d032b75fd8fb91606218bb84dd1155b203fcd5fab92") + download_file("https://the-eye.eu/public/AI/pile/test.jsonl.zst", local_file=self.TEST_PATH, expected_checksum="0bb28c52d0b5596d389bf179ce2d43bf7f7ffae76b0d2d20b180c97f62e0975e") def validation_docs(self): rdr = lm_dataformat.Reader(self.VAL_PATH) diff --git a/lm_eval/tasks/qa4mre.py b/lm_eval/tasks/qa4mre.py index 67810ad747..de3d2e56d6 100644 --- a/lm_eval/tasks/qa4mre.py +++ b/lm_eval/tasks/qa4mre.py @@ -32,8 +32,8 @@ def download(self): if not os.path.isfile(f"data/qa4mre/QA4MRE-{year}-{lang}"): download_file( url_path, - f"data/qa4mre/QA4MRE-{year}-{lang}_GS.xml", - sha256sums[year], + local_file=f"data/qa4mre/QA4MRE-{year}-{lang}_GS.xml", + expected_checksum=sha256sums[year], ) def has_training_docs(self): diff --git a/lm_eval/tasks/sciq.py b/lm_eval/tasks/sciq.py index b750354a7b..021a79d78f 100644 --- a/lm_eval/tasks/sciq.py +++ b/lm_eval/tasks/sciq.py @@ -13,8 +13,8 @@ def download(self): os.makedirs('data/sciq', exist_ok=True) download_file( 'https://ai2-public-datasets.s3.amazonaws.com/sciq/SciQ.zip', - 'data/sciq/SciQ.zip', - '7f3312f6ac6b09970b32942d106a8c44ec0dad46a0369f17d635aff8e348a87c', + local_file='data/sciq/SciQ.zip', + expected_checksum='7f3312f6ac6b09970b32942d106a8c44ec0dad46a0369f17d635aff8e348a87c', ) with zipfile.ZipFile("data/sciq/SciQ.zip", "r") as zf: zf.extractall("data/sciq/") diff --git a/lm_eval/tasks/triviaqa.py b/lm_eval/tasks/triviaqa.py index e61a40bdde..1b23a1f908 100644 --- a/lm_eval/tasks/triviaqa.py +++ b/lm_eval/tasks/triviaqa.py @@ -12,7 +12,7 @@ class TriviaQA(Task): def download(self): if not os.path.exists('data/triviaqa/unfiltered-web-train.jsonl'): os.makedirs("data/triviaqa/", exist_ok=True) - download_file("http://eaidata.bmk.sh/data/triviaqa-unfiltered.tar.gz", "data/triviaqa/triviaqa-unfiltered.tar.gz", "adc19b42769062d241a8fbe834c56e58598d9322eb6c614e9f33a68a2cf5523e") + download_file("http://eaidata.bmk.sh/data/triviaqa-unfiltered.tar.gz", local_file="data/triviaqa/triviaqa-unfiltered.tar.gz", expected_checksum="adc19b42769062d241a8fbe834c56e58598d9322eb6c614e9f33a68a2cf5523e") sh(""" cd data/triviaqa/ tar -xf triviaqa-unfiltered.tar.gz diff --git a/lm_eval/tasks/truthfulqa.py b/lm_eval/tasks/truthfulqa.py index f0b46196bc..854b63d475 100644 --- a/lm_eval/tasks/truthfulqa.py +++ b/lm_eval/tasks/truthfulqa.py @@ -58,7 +58,7 @@ def download(self): Path.mkdir(self.DATASET_PATH, parents=True) mc_url = "https://raw.githubusercontent.com/sylinrl/TruthfulQA/013686a06be7a7bde5bf8223943e106c7250123c/data/mc_task.json" checksum = "6eb4125d25750c0145c4be2dce00440736684ab6f74ce6bff2139571cc758954" - download_file(mc_url, str(self.DATASET_PATH / "mc_task.json"), checksum) + download_file(mc_url, local_file=str(self.DATASET_PATH / "mc_task.json"), expected_checksum=checksum) def has_training_docs(self): return False @@ -163,7 +163,7 @@ def download(self): Path.mkdir(self.DATASET_PATH, parents=True) url = "https://raw.githubusercontent.com/sylinrl/TruthfulQA/013686a06be7a7bde5bf8223943e106c7250123c/TruthfulQA.csv" checksum = "8d7dd15f033196140f032d97d30f037da7a7b1192c3f36f9937c1850925335a2" - download_file(url, str(self.DATASET_PATH / "TruthfulQA.csv"), checksum) + download_file(url, local_file=str(self.DATASET_PATH / "TruthfulQA.csv"), expected_checksum=checksum) def has_training_docs(self): return False diff --git a/lm_eval/tasks/unscramble.py b/lm_eval/tasks/unscramble.py index dc742a2cee..542df9b4eb 100644 --- a/lm_eval/tasks/unscramble.py +++ b/lm_eval/tasks/unscramble.py @@ -29,7 +29,7 @@ def download(self): if not file.exists(): rawfile = file.parent / (file.name + ".gz") base_url = "https://raw.githubusercontent.com/openai/gpt-3/master/data" - download_file(f"{base_url}/{self.FILENAME}.gz", str(rawfile), self.CHECKSUM) + download_file(f"{base_url}/{self.FILENAME}.gz", local_file=str(rawfile), expected_checksum=self.CHECKSUM) extract_gzip(gz=rawfile, to=file) def has_training_docs(self): diff --git a/lm_eval/tasks/wikitext.py b/lm_eval/tasks/wikitext.py index 24f9ec3507..94a93c0899 100644 --- a/lm_eval/tasks/wikitext.py +++ b/lm_eval/tasks/wikitext.py @@ -46,7 +46,7 @@ class WikiText(PerplexityTask): def download(self): if not os.path.exists('data/wikitext/wikitext-2-raw/wiki.valid.raw'): os.makedirs("data/wikitext/", exist_ok=True) - download_file("https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip", "data/wikitext/wikitext-2-raw-v1.zip", "ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11") + download_file("https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip", local_file="data/wikitext/wikitext-2-raw-v1.zip", expected_checksum="ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11") sh("cd data/wikitext/ && unzip wikitext-2-raw-v1.zip") def fewshot_description(self): diff --git a/setup.py b/setup.py index b6ba841b97..fdb74c4d79 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ python_requires='>=3.6', install_requires=[ "black", - "best_download>=0.0.6,<0.0.8", + "best_download>=0.0.6", "datasets==1.15.1", "click>=7.1", "scikit-learn>=0.24.1", From 02a4def21b4de5f29c6314fb73abe1810d194128 Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Sat, 8 Jan 2022 13:53:50 -0700 Subject: [PATCH 56/65] Update blimp fewshot_context --- lm_eval/tasks/blimp.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/lm_eval/tasks/blimp.py b/lm_eval/tasks/blimp.py index e8e7bd9f2b..8a52d888ca 100644 --- a/lm_eval/tasks/blimp.py +++ b/lm_eval/tasks/blimp.py @@ -29,9 +29,18 @@ def download(self): self.data["validation"] = self.data["train"] del self.data["train"] - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert num_fewshot == 0 - assert not provide_description + assert rnd is not None, "A `random.Random` generator argument must be provided to `rnd`" + assert not provide_description, ( + "The `provide_description` arg will be removed in future versions. To prepend " + "a custom description to the context, supply the corresponding string via the " + "`description` arg." + ) + if provide_description is not None: + # nudge people to not specify it at all + print("WARNING: provide_description is deprecated and will be removed in a future version in favor of description_dict") + return "" def doc_to_text(self, doc): From 2d9fc25632fb0e48c8d6f7d183252800ce02afcd Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Sun, 9 Jan 2022 00:26:43 +0100 Subject: [PATCH 57/65] Missed asdiv --- lm_eval/tasks/asdiv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/asdiv.py b/lm_eval/tasks/asdiv.py index ed46445968..732d0f1d6c 100644 --- a/lm_eval/tasks/asdiv.py +++ b/lm_eval/tasks/asdiv.py @@ -35,7 +35,7 @@ def download(self): url = "https://github.com/chaochun/nlu-asdiv-dataset/archive/55790e5270bb91ccfa5053194b25732534696b50.zip" checksum = "8f1fe4f6d5f170ec1e24ab78c244153c14c568b1bb2b1dad0324e71f37939a2d" zip_path = self.DATASET_PATH / "55790e5270bb91ccfa5053194b25732534696b50.zip" - download_file(url, str(zip_path), checksum) + download_file(url, local_file=str(zip_path), expected_checksum=checksum) with ZipFile(zip_path, "r") as zip: zip.extractall(self.DATASET_PATH) os.remove(zip_path) From ea3fd79bd2309fec6e2fea74aaa072a967173dc3 Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Tue, 11 Jan 2022 10:32:12 +0100 Subject: [PATCH 58/65] Fix CB --- lm_eval/tasks/superglue.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index 33598f2301..849fb0781b 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -93,14 +93,14 @@ def doc_to_target(self, doc): # True = entailment # False = contradiction # Neither = neutral - return " {}".format({0: "True", 1: "Neither", 2: "False"}[doc["label"]]) + return " {}".format({0: "True", 1: "False", 2: "Neither"}[doc["label"]]) def construct_requests(self, doc, ctx): ll_true, _ = rf.loglikelihood(ctx, ' True') - ll_neither, _ = rf.loglikelihood(ctx, ' Neither') ll_false, _ = rf.loglikelihood(ctx, ' False') + ll_neither, _ = rf.loglikelihood(ctx, ' Neither') - return ll_true, ll_neither, ll_false + return ll_true, ll_false, ll_neither def process_results(self, doc, results): gold = doc["label"] From b421f0529a4d2bf1ef8e68cd74671be44010c37a Mon Sep 17 00:00:00 2001 From: thomasw21 <24695242+thomasw21@users.noreply.github.com> Date: Tue, 11 Jan 2022 12:50:07 +0100 Subject: [PATCH 59/65] Bump version --- lm_eval/tasks/superglue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lm_eval/tasks/superglue.py b/lm_eval/tasks/superglue.py index 849fb0781b..489d18a6ae 100644 --- a/lm_eval/tasks/superglue.py +++ b/lm_eval/tasks/superglue.py @@ -65,7 +65,7 @@ def aggregation(self): class CommitmentBank(HFTask): - VERSION = 0 + VERSION = 1 DATASET_PATH = "super_glue" DATASET_NAME = "cb" From caff4f1099349f599a3bdeb7725bb2add06d741b Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Thu, 13 Jan 2022 02:36:29 -0500 Subject: [PATCH 60/65] Conform `asidv` to the new description api --- lm_eval/tasks/asdiv.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lm_eval/tasks/asdiv.py b/lm_eval/tasks/asdiv.py index 732d0f1d6c..58bcdcd250 100644 --- a/lm_eval/tasks/asdiv.py +++ b/lm_eval/tasks/asdiv.py @@ -31,7 +31,7 @@ class Asdiv(Task): def download(self): if self.DATASET_PATH.exists(): return - Path.mkdir(self.DATASET_PATH) + Path.mkdir(self.DATASET_PATH, parents=True) url = "https://github.com/chaochun/nlu-asdiv-dataset/archive/55790e5270bb91ccfa5053194b25732534696b50.zip" checksum = "8f1fe4f6d5f170ec1e24ab78c244153c14c568b1bb2b1dad0324e71f37939a2d" zip_path = self.DATASET_PATH / "55790e5270bb91ccfa5053194b25732534696b50.zip" @@ -75,10 +75,14 @@ def validation_docs(self): data_xml_path = self.DATASET_PATH / "nlu-asdiv-dataset-55790e5270bb91ccfa5053194b25732534696b50/dataset/ASDiv.xml" return self.load_docs(data_xml_path) - def fewshot_context(self, doc, num_fewshot, provide_description, rnd): + def fewshot_context(self, doc, num_fewshot, provide_description=None, rnd=None, description=None): assert num_fewshot == 0, "ASDiv is intended only for the zero-shot setting." - return super().fewshot_context(doc, num_fewshot, provide_description, rnd) - + return super().fewshot_context( + doc=doc, + num_fewshot=num_fewshot, + rnd=rnd, + description=description + ) def fewshot_description(self): # TODO: add solution-type and formula @@ -115,4 +119,3 @@ def higher_is_better(self): return { 'acc': True } - From 1c52e917cf9196a72dee111ff3a107b115e9da96 Mon Sep 17 00:00:00 2001 From: Leo Gao Date: Fri, 14 Jan 2022 21:29:31 -0700 Subject: [PATCH 61/65] cb: add testdata --- tests/testdata/cb-v1-loglikelihood | 1 + tests/testdata/cb-v1-res.json | 1 + 2 files changed, 2 insertions(+) create mode 100644 tests/testdata/cb-v1-loglikelihood create mode 100644 tests/testdata/cb-v1-res.json diff --git a/tests/testdata/cb-v1-loglikelihood b/tests/testdata/cb-v1-loglikelihood new file mode 100644 index 0000000000..ad7e928fe6 --- /dev/null +++ b/tests/testdata/cb-v1-loglikelihood @@ -0,0 +1 @@ +77b11f4348eb8a7f57faf95c531fda01ab4bf0e729f91a82451ed8e71ec8e66d \ No newline at end of file diff --git a/tests/testdata/cb-v1-res.json b/tests/testdata/cb-v1-res.json new file mode 100644 index 0000000000..1cff410b2c --- /dev/null +++ b/tests/testdata/cb-v1-res.json @@ -0,0 +1 @@ +{"results": {"cb": {"acc": 0.3392857142857143, "acc_stderr": 0.06384226561930825, "f1": 0.2819143819143819}}, "versions": {"cb": 1}} \ No newline at end of file From 03c15e065e6d4c68e9ecbc9b78c45450bf8a9976 Mon Sep 17 00:00:00 2001 From: Jonathan Tow Date: Sun, 23 Jan 2022 16:11:51 -0500 Subject: [PATCH 62/65] Update `headqa` deprecation warning to display on init only --- lm_eval/tasks/headqa.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lm_eval/tasks/headqa.py b/lm_eval/tasks/headqa.py index 7551f380ac..d9ac2d87c1 100644 --- a/lm_eval/tasks/headqa.py +++ b/lm_eval/tasks/headqa.py @@ -37,4 +37,6 @@ class HeadQAEs(HeadQABase): class HeadQAEsDeprecated(HeadQABase): DATASET_NAME = "es" - print("WARNING: headqa is deprecated. Please use headqa_es or headqa_en instead. See https://github.com/EleutherAI/lm-evaluation-harness/pull/240 for more info.") \ No newline at end of file + def __init__(self): + super().__init__() + print("WARNING: headqa is deprecated. Please use headqa_es or headqa_en instead. See https://github.com/EleutherAI/lm-evaluation-harness/pull/240 for more info.") \ No newline at end of file From 1dc6eb013b33cae6e52d8756ff9dbae188e1d69a Mon Sep 17 00:00:00 2001 From: hadyelsahar Date: Wed, 2 Feb 2022 00:45:41 +0100 Subject: [PATCH 63/65] adding support of XGLM model --- lm_eval/models/__init__.py | 2 + lm_eval/models/xglm.py | 84 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 lm_eval/models/xglm.py diff --git a/lm_eval/models/__init__.py b/lm_eval/models/__init__.py index a12f68a513..1fba7ce81c 100644 --- a/lm_eval/models/__init__.py +++ b/lm_eval/models/__init__.py @@ -1,12 +1,14 @@ from . import gpt2 from . import gpt3 from . import dummy +from . import xglm MODEL_REGISTRY = { "hf": gpt2.HFLM, "gpt2": gpt2.GPT2LM, "gpt3": gpt3.GPT3LM, "dummy": dummy.DummyLM, + "XGLM": xglm.XGLM, } diff --git a/lm_eval/models/xglm.py b/lm_eval/models/xglm.py new file mode 100644 index 0000000000..1c0da62030 --- /dev/null +++ b/lm_eval/models/xglm.py @@ -0,0 +1,84 @@ +import transformers +import torch +from lm_eval.base import BaseLM + + +class XGLM(BaseLM): + def __init__(self, device='cuda', pretrained='facebook/xglm-1.7B', revision='main', subfolder=None, tokenizer=None, batch_size=1): + super().__init__() + assert isinstance(device, str) + assert isinstance(pretrained, str) + assert isinstance(batch_size, int) + if device: + self._device = torch.device(device) + else: + self._device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + # TODO: update this to be less of a hack once subfolder is fixed in HF + self.xglm = transformers.AutoModelForCausalLM.from_pretrained( + pretrained, + # cache_dir="/users/zyong2/data/zyong2/huggingface/xglm" + ).to(self.device) + print(f"🤖 Loading model {pretrained}") + self.xglm.eval() + # pretrained tokenizer for neo is broken for now so just hard-coding this to gpt2 + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + pretrained if tokenizer is None else tokenizer, revision=revision, subfolder=subfolder) + # assert isinstance(self.tokenizer, ( + # transformers.GPT2Tokenizer, transformers.GPT2TokenizerFast, + # transformers.T5Tokenizer, transformers.T5TokenizerFast, + # )), "this tokenizer has not been checked for compatibility yet!" + self.vocab_size = self.tokenizer.vocab_size + # if isinstance(self.tokenizer, (transformers.GPT2Tokenizer, transformers.GPT2TokenizerFast)): + # assert self.tokenizer.encode('hello\n\nhello') == [31373, 198, 198, 31373], \ + # self.tokenizer.encode('hello\n\nhello') + # multithreading and batching + self.batch_size_per_gpu = batch_size # todo: adaptive batch size + # TODO: fix multi-gpu + # gpus = torch.cuda.device_count() + # if gpus > 1: + # self.gpt2 = nn.DataParallel(self.gpt2) + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + @property + def max_length(self): + try: + return self.xglm.config.n_ctx + except AttributeError: + # gptneoconfig doesn't have n_ctx apparently + return self.xglm.config.max_position_embeddings + @property + def max_gen_toks(self): + return 256 + @property + def batch_size(self): + # TODO: fix multi-gpu + return self.batch_size_per_gpu # * gpus + @property + def device(self): + # TODO: fix multi-gpu + return self._device + def tok_encode(self, string: str): + return self.tokenizer.encode(string, add_special_tokens=False) + + def tok_decode(self, tokens): + return self.tokenizer.decode(tokens) + def _model_call(self, inps): + """ + inps: a torch tensor of shape [batch, sequence] + the size of sequence may vary from call to call + returns: a torch tensor of shape [batch, sequence, vocab] with the + logits returned from the model + """ + with torch.no_grad(): + return self.xglm(inps)[0][:, :, :256008] + + def _model_generate(self, context, max_length, eos_token_id): + result = self.xglm.generate( + context, + max_length=max_length, + eos_token_id=eos_token_id, + do_sample=False + ) + return result \ No newline at end of file From c3b72670d23d013c91b6a0397ac2289fc57c506f Mon Sep 17 00:00:00 2001 From: hadyelsahar Date: Thu, 10 Feb 2022 12:21:22 +0100 Subject: [PATCH 64/65] Fix new lines tokenizer issue in XGLM --- lm_eval/models/xglm.py | 20 +++++++++++++++++--- lm_eval/tasks/translation.py | 3 +-- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/lm_eval/models/xglm.py b/lm_eval/models/xglm.py index 1c0da62030..3aac8f9ead 100644 --- a/lm_eval/models/xglm.py +++ b/lm_eval/models/xglm.py @@ -1,6 +1,8 @@ import transformers import torch +from lm_eval import utils from lm_eval.base import BaseLM +from tqdm import tqdm class XGLM(BaseLM): @@ -23,6 +25,7 @@ def __init__(self, device='cuda', pretrained='facebook/xglm-1.7B', revision='mai # pretrained tokenizer for neo is broken for now so just hard-coding this to gpt2 self.tokenizer = transformers.AutoTokenizer.from_pretrained( pretrained if tokenizer is None else tokenizer, revision=revision, subfolder=subfolder) + # assert isinstance(self.tokenizer, ( # transformers.GPT2Tokenizer, transformers.GPT2TokenizerFast, # transformers.T5Tokenizer, transformers.T5TokenizerFast, @@ -60,10 +63,21 @@ def device(self): # TODO: fix multi-gpu return self._device def tok_encode(self, string: str): + # HACK: to overcome problem of XGLM tokenizer removing new lines + # we replace newline with SEP token + # WARNING: Since typical SEP token == EOS token + # Generation will stop after the first appearance of SEP token prevnting XGLM from + # outputting Multi line generations + string = string.replace("\n", self.tokenizer.sep_token) return self.tokenizer.encode(string, add_special_tokens=False) - + def tok_decode(self, tokens): - return self.tokenizer.decode(tokens) + # HACK: to overcome problem of XGLM tokenizer removing new lines + # replace back the generated sep_tokens with newlines + output = self.tokenizer.decode(tokens) + output = output.replace(self.tokenizer.sep_token, "\n") + print(output) + return output def _model_call(self, inps): """ inps: a torch tensor of shape [batch, sequence] @@ -81,4 +95,4 @@ def _model_generate(self, context, max_length, eos_token_id): eos_token_id=eos_token_id, do_sample=False ) - return result \ No newline at end of file + return result diff --git a/lm_eval/tasks/translation.py b/lm_eval/tasks/translation.py index 2e70b03a84..4d65de43fb 100644 --- a/lm_eval/tasks/translation.py +++ b/lm_eval/tasks/translation.py @@ -107,7 +107,7 @@ def doc_to_text(self, doc): language_codes = self.sacrebleu_language_pair.split("-") src_lang = code_to_language(language_codes[0]) tar_lang = code_to_language(language_codes[1]) - return f"{src_lang} phrase: " + doc["src"] + f"\n{tar_lang} phrase:" + return f"\nTranslate {src_lang} to {tar_lang}:\n [{src_lang}] " + doc["src"] + f"\n[{tar_lang}]" def doc_to_target(self, doc): # This shows a single target, though there may be multiple targets in a lang test @@ -132,7 +132,6 @@ def process_results(self, doc, results): if tar_lang_code in NO_SPACE_LANG: doc["ref"] = NO_SPACE_LANG[tar_lang_code]([doc["ref"]])[0] results = NO_SPACE_LANG[tar_lang_code](results) - # These metrics are corpus-level not sentence level, so we'll hide the # results in this dict and compute the corpus score in the aggregate method ref_pred = (doc["ref"], results) From fbdc61c66a86ba12f359df84757043ec743d283e Mon Sep 17 00:00:00 2001 From: hadyelsahar Date: Thu, 17 Feb 2022 14:42:01 +0100 Subject: [PATCH 65/65] integration of bigscience model --- lm_eval/models/__init__.py | 2 + lm_eval/models/bigscience.py | 84 ++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 lm_eval/models/bigscience.py diff --git a/lm_eval/models/__init__.py b/lm_eval/models/__init__.py index 1fba7ce81c..9ffd1ceffb 100644 --- a/lm_eval/models/__init__.py +++ b/lm_eval/models/__init__.py @@ -2,6 +2,7 @@ from . import gpt3 from . import dummy from . import xglm +from . import bigscience MODEL_REGISTRY = { "hf": gpt2.HFLM, @@ -9,6 +10,7 @@ "gpt3": gpt3.GPT3LM, "dummy": dummy.DummyLM, "XGLM": xglm.XGLM, + "bigscience":bigscience.BigScience, } diff --git a/lm_eval/models/bigscience.py b/lm_eval/models/bigscience.py new file mode 100644 index 0000000000..e54b062716 --- /dev/null +++ b/lm_eval/models/bigscience.py @@ -0,0 +1,84 @@ +import transformers +import torch +from lm_eval.base import BaseLM +# +# +# +# ​ +class BigScience(BaseLM): + + def __init__(self, device='cuda', pretrained='bigscience/tr5b-1B3-multilingual-alpha-checkpoints', revision='global_step118500', subfolder=None, tokenizer=None, batch_size=1): + super().__init__() + + assert isinstance(device, str) + assert isinstance(pretrained, str) + assert isinstance(batch_size, int) + + if device: + self._device = torch.device(device) + else: + self._device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + # TODO: update this to be less of a hack once subfolder is fixed in HF + self.bigscience = transformers.AutoModelForCausalLM.from_pretrained( + pretrained, revision=revision + ).to(self.device) + self.bigscience.eval() + # pretrained tokenizer for neo is broken for now so just hard-coding this to gpt2 + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + pretrained if tokenizer is None else tokenizer, revision=revision, subfolder=subfolder) + # assert isinstance(self.tokenizer, ( + # transformers.GPT2Tokenizer, transformers.GPT2TokenizerFast, + # transformers.T5Tokenizer, transformers.T5TokenizerFast, + # )), "this tokenizer has not been checked for compatibility yet!" + self.vocab_size = self.tokenizer.vocab_size + # if isinstance(self.tokenizer, (transformers.GPT2Tokenizer, transformers.GPT2TokenizerFast)): + # assert self.tokenizer.encode('hello\n\nhello') == [31373, 198, 198, 31373], \ + # self.tokenizer.encode('hello\n\nhello') + # multithreading and batching + self.batch_size_per_gpu = batch_size # todo: adaptive batch size + # TODO: fix multi-gpu + # gpus = torch.cuda.device_count() + # if gpus > 1: + # self.gpt2 = nn.DataParallel(self.gpt2) + @property + def eot_token_id(self): + # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence* + return self.tokenizer.eos_token_id + @property + def max_length(self): + try: + return self.bigscience.config.n_ctx + except AttributeError: + # gptneoconfig doesn't have n_ctx apparently + return self.bigscience.config.max_position_embeddings + @property + def max_gen_toks(self): + return 256 + @property + def batch_size(self): + # TODO: fix multi-gpu + return self.batch_size_per_gpu # * gpus + @property + def device(self): + # TODO: fix multi-gpu + return self._device + def tok_encode(self, string: str): + return self.tokenizer.encode(string, add_special_tokens=False) + def tok_decode(self, tokens): + return self.tokenizer.decode(tokens) + def _model_call(self, inps): + """ + inps: a torch tensor of shape [batch, sequence] + the size of sequence may vary from call to call + returns: a torch tensor of shape [batch, sequence, vocab] with the + logits returned from the model + """ + with torch.no_grad(): + return self.bigscience(inps)[0][:, :, :130000] + def _model_generate(self, context, max_length, eos_token_id): + result = self.bigscience.generate( + context, + max_length=max_length, + eos_token_id=eos_token_id, + do_sample=False) + return result