From cab1629be0d7cf5b8210cbcae6f504b5212638ab Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 07:11:59 +0000 Subject: [PATCH 01/18] Add .gitignore file Signed-off-by: Himanshu Upreti --- .gitignore | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..51a71b8cd --- /dev/null +++ b/.gitignore @@ -0,0 +1,86 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ From e6ebefdd0fcf72c532aa29ad3e471894de621a09 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 07:34:23 +0000 Subject: [PATCH 02/18] DevApp on Cloud AI using Library Signed-off-by: Himanshu Upreti --- app/LLMGenerator.py | 473 +++++++++++++++++++++ app/Readme.md | 15 + app/app.py | 297 +++++++++++++ app/cert.pem | 35 ++ app/demo.css | 108 +++++ app/img/box.png | Bin 0 -> 2135 bytes app/img/full.png | Bin 0 -> 70392 bytes app/key.pem | 52 +++ app/output.log | 987 +++++++++++++++++++++++++++++++++++++++++++ app/qpc.json | 10 + app/requirements.txt | 9 + 11 files changed, 1986 insertions(+) create mode 100644 app/LLMGenerator.py create mode 100644 app/Readme.md create mode 100755 app/app.py create mode 100644 app/cert.pem create mode 100644 app/demo.css create mode 100644 app/img/box.png create mode 100644 app/img/full.png create mode 100644 app/key.pem create mode 100644 app/output.log create mode 100644 app/qpc.json create mode 100755 app/requirements.txt diff --git a/app/LLMGenerator.py b/app/LLMGenerator.py new file mode 100644 index 000000000..824cbd0c1 --- /dev/null +++ b/app/LLMGenerator.py @@ -0,0 +1,473 @@ +import json +import os + +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + +from time import perf_counter +from typing import Dict, List, Optional +import sys +from threading import Thread +from typing import * +import torch +import numpy as np +import torch.nn as nn + +import transformers + +# from aic_infer import QAICInferenceSession + +from QEfficient.generation.aic_infer import QAICInferenceSession + +io_files = [] + + +import io + +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + TextIteratorStreamer, + TextStreamer, +) + + +from transformers import ( + AutoTokenizer, + AutoModelForCausalLM, + LogitsProcessorList, + MinLengthLogitsProcessor, + TopKLogitsWarper, + TemperatureLogitsWarper, + StoppingCriteriaList, + MaxLengthCriteria, +) + + +def print_to_string(*args, **kwargs): + output = io.StringIO() + print(*args, file=output, **kwargs) + contents = output.getvalue() + output.close() + return contents + + +def write_io_files( + inputs: Dict[str, np.ndarray], + outputs: Dict[str, np.ndarray], + write_io_dir: str, + write_io_subdir: str, +): + io = [] + os.makedirs(f"{write_io_dir}/{write_io_subdir}", exist_ok=True) + + for iname, iarray in inputs.items(): + iarray.tofile(f"{write_io_dir}/{write_io_subdir}/{iname}.raw") + io.append( + { + "path": f"{write_io_subdir}/{iname}.raw", + "io-direction": "in", + "dims": iarray.shape, + "elem-size": iarray.itemsize, + "map-to": iname, + } + ) + + for oname, oarray in outputs.items(): + oarray.tofile(f"{write_io_dir}/{write_io_subdir}/{oname}.raw") + io.append( + { + "path": f"{write_io_subdir}/{oname}.raw", + "io-direction": "out", + "dims": oarray.shape, + "elem-size": oarray.itemsize, + "map-to": oname, + } + ) + + io_files.append(io) + with open(f"{write_io_dir}/aic_batch_io.json", "w") as fp: + json.dump({"IO-files": io_files}, fp, indent=True) + + +class LLMGenerator: + def __init__( + self, + qpc_path, + model_name, + device_id: Optional[List[int]] = [0], + prompt_len: Optional[int] = 32, + ctx_len: Optional[int] = 128, + streamer: Optional["BaseStreamer"] = None, + logits_processor: Optional = None, + logits_warper: Optional = None, + ): + self.session = None + self.tokenizer = None + self.is_first_prompt = False + self.model_name = "" + self.qpc_path = "" + self.device_id = [0] + self.curr_cache_index = 0 + self.ctx_len = ctx_len + self.retained_state = True + self.write_io_dir = False + self.prompt_len = prompt_len + self.generated_ids = [] + self.inputs = None + self.stop_indicator = True + + self.qpc_path = ( + qpc_path if os.path.exists(qpc_path) else OSError(f"{qpc_path} not found !") + ) + self.device_id = device_id + + self.model_name = model_name + + try: + self.session = QAICInferenceSession( + self.qpc_path, self.device_id, enable_debug_logs=False + ) + if self.retained_state: + self.session.skip_buffers( + [x for x in self.session.input_names if x.startswith("past_")] + ) + self.session.skip_buffers( + [ + x + for x in self.session.output_names + if x.endswith("_RetainedState") + ] + ) + + # self.session.skip_buffers( + # set([x for x in self.session.input_names if x.startswith("past_")]) + # ) + # self.session.skip_buffers( + # set( + # [ + # x + # for x in self.session.output_names + # if x.endswith("_RetainedState") + # ] + # ) + # ) + + except Exception as err: + raise RuntimeError("Unable to load qpc on device , {err}") + + try: + tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, padding_side="left" + ) + if tokenizer.pad_token_id is None: + tokenizer.pad_token_id = tokenizer.eos_token_id + self.tokenizer = tokenizer + except Exception as err: + raise RuntimeError("Unable to load tokenizer, {err}") + + if streamer: + self.streamer = streamer( + self.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=None + ) + + # instatiate deault logit processor and wrapper here + # TODO : change default values with temperature and top_p + # self.logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + # self.logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() + + # instantiate logits processors + self.logits_processor = LogitsProcessorList( + [ + MinLengthLogitsProcessor( + 15, eos_token_id=2 + ), # model.generation_config.eos_token_id + ] + ) + + # instantiate logits processors + self.logits_warper = LogitsProcessorList( + [ + TopKLogitsWarper(50), + TemperatureLogitsWarper(0.7), + ] + ) + + self.stopping_criteria = StoppingCriteriaList( + [MaxLengthCriteria(max_length=ctx_len)] + ) + + def _generate_next_token(self, outputs, sample=False): + logits = outputs["logits"] + + if sample: + # pre-process distribution + # input_ids = torch.Tensor(self.generated_ids) + input_ids = torch.Tensor(self.inputs["input_ids"]) + next_token_logits = torch.from_numpy(logits) + # next_token_scores = self.logits_processor(input_ids, next_token_logits) + next_token_scores = self.logits_warper(input_ids, next_token_logits) + + # sample + probs = nn.functional.softmax(next_token_scores, dim=-1) + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + next_token_id = next_tokens.numpy().reshape(1, 1) + else: + # greedy search + if len(logits.shape) == 2: + logits = np.expand_dims(logits, 1) + next_token_id = logits.argmax(2) + # print("next_token_id: ", next_token_id.shape) + + # print("next_token_id", next_token_id) + + return next_token_id + + def _stopping_criteria(self, next_token_id, max_new_tokens=None): + # if self.curr_cache_index > self.ctx_len: + if self.curr_cache_index >= self.ctx_len - 1: + print("self.curr_cache_index reach limit") + return True + + if max_new_tokens: + if len(self.generated_ids) > max_new_tokens: + print( + "len(self.generated_ids) > max_new_tokens", + len(self.generated_ids) > max_new_tokens, + ) + return True + # assert type(self.tokenizer.eot_id) == List + # assert type(self.tokenizer.eos_token_id) == List + + # breakpoint() + + if next_token_id == self.tokenizer.eos_token_id: + print( + next_token_id == self.tokenizer.eos_token_id, + "next_token_id == self.tokenizer.eos_token_id", + ) + return True + + return False + + def prepare_inputs_for_inference(self, prompt): + # prepare inputs for prefill part + inputs = self.tokenizer( + prompt, + return_tensors="np", + padding="max_length", + max_length=self.prompt_len, + ) + batch_size, prompt_len = inputs["input_ids"].shape + + ctx_len = self.ctx_len + + # assert ctx_len > prompt_len, "Model cannot support prompt_len > ctx_len" + + inputs["position_ids"] = (np.cumsum(inputs["attention_mask"], 1) - 1) * inputs[ + "attention_mask" + ] + inputs["attention_mask"] = np.concatenate( + [ + inputs["attention_mask"].astype(bool), + np.zeros((batch_size, ctx_len - prompt_len), dtype=bool), + ], + 1, + ) + cache_index = np.array([0]) + inputs["cache_index"] = cache_index + + return inputs, prompt_len + + def update_inputs_for_inference(self, inputs, next_token_id): + # breakpoint() + batch_size, prompt_len = inputs["input_ids"].shape + inputs["cache_index"] += prompt_len + + inputs["input_ids"] = next_token_id + + batch_size, prompt_len = inputs["input_ids"].shape + + if "attention_mask" in inputs.keys(): + inputs["position_ids"] = inputs.pop("attention_mask").sum(1, keepdims=True) + else: + inputs["position_ids"] += 1 + + batch_size, prompt_len = inputs["input_ids"].shape + return inputs, prompt_len + + def generate(self, prompt: str, sample: bool = False, max_new_tokens: int = None): + session = self.session + # if self.write_io_dir: + # write_io_files(inputs, outputs, write_io_dir, "prefill") + + multi_turn_input_ids = [] + + if self.curr_cache_index == 0: + self.inputs, prompt_len = self.prepare_inputs_for_inference(prompt) + outputs = session.run(self.inputs) + self.curr_cache_index += prompt_len + session.skip_buffers(["attention_mask"]) + + else: + multi_turn_input_ids = self.tokenizer( + prompt, + return_tensors="np", + ).input_ids + self.generated_ids = [] + + while self.stop_indicator: + if len(multi_turn_input_ids) == 0: + next_token_id = self._generate_next_token(outputs, sample) + # next_token_id will be from prompt till prompt + self.generated_ids.append(next_token_id) + + if self.streamer: + self.streamer.put(next_token_id[0]) + + if self._stopping_criteria(next_token_id, max_new_tokens): + print("Stopping criteria hit") + break + elif ( + len(multi_turn_input_ids.shape) == 2 + and multi_turn_input_ids.shape[1] > 0 + ): + next_token_id, multi_turn_input_ids = ( + multi_turn_input_ids[:, 0], + multi_turn_input_ids[:, 1:], + ) + next_token_id = np.expand_dims(next_token_id, 1) + elif ( + len(multi_turn_input_ids.shape) == 2 + and multi_turn_input_ids.shape[1] == 0 + ): + multi_turn_input_ids = [] + + self.inputs, next_prompt_len = self.update_inputs_for_inference( + self.inputs, next_token_id + ) + # print(self.curr_cache_index) # for debug + outputs = session.run(self.inputs) + # next_prompt_len from next iteration onwards is 1 + self.curr_cache_index += next_prompt_len + + if self.streamer: + return self.streamer.end() + else: + return "" + + def stream(self, prompt: str, sample: bool = False, max_new_tokens: int = None): + generate_args = { + "prompt": prompt, + "sample": sample, + "max_new_tokens": max_new_tokens, + } + + t = Thread(target=self.generate, kwargs=generate_args) + t.start() + + outputs = [] + for text in self.streamer: + outputs.append(text) + yield "".join(outputs) + + print("".join(outputs)) + # return "".join(outputs) + + +def test_llm( + model_name: str, + prompt_len: int, + ctx_len: int, + qpc: str, + prompt: str, + session: QAICInferenceSession = None, + stream: bool = True, + sample: bool = False, + device_id: List[int] = [0], + write_io_dir: Optional[str] = None, +): + # python LLMGenerator.py --model-name codellama/CodeLlama-13b-Instruct-hf --prompt-len 128 --ctx-len 1024 --qpc /home/hupreti/demo/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-16c-mxfp6 --prompt "Complete the paragraph with 2048 tokens: My name is Himanshu, and" 2>&1 | tee output.log + + # print(prompt) + + # working with TextStreamer + # model_aic = LLMGenerator(qpc, model_name, device_id, prompt_len, ctx_len, + # streamer = TextStreamer) + + model_aic = LLMGenerator( + qpc, model_name, device_id, prompt_len, ctx_len, streamer=TextStreamer + ) + + generate_kwargs = {"prompt": prompt, "sample": sample, "max_new_tokens": ctx_len} + + t = Thread(target=model_aic.generate, kwargs=generate_kwargs) + t.start() + + outputs = [] + for text in model_aic.streamer: + # print(text, end=" ") + outputs.append(text) + + # yield "".join(outputs) + print("".join(outputs)) + + t.join() + + # Uncomment below to test mulit-turn + # generate_kwargs = { + # "prompt" : "Indian Cricket Team. But 2014", + # "sample" : False, + # "max_new_tokens" : 128 + # } + + # t = Thread(target=model_aic.generate, kwargs=generate_kwargs) + # t.start() + + # t.join() + + # print(generate_kwargs["prompt"]) + # outputs = [] + # for text in model_aic.streamer: + # # print(text) + # outputs.append(text) + + # # yield "".join(outputs) + # print("".join(outputs)) + + return + + +if __name__ == "__main__": + import argparse + + argp = argparse.ArgumentParser() + argp.add_argument("--model-name", required=True, help="Model name to run") + argp.add_argument("--prompt-len", type=int, default=128, help="Prompt length") + argp.add_argument("--ctx-len", type=int, default=512, help="Context length") + argp.add_argument("--qpc", required=True, help="Compiled binary QPC") + argp.add_argument( + "--prompt", + default="My name is Sarah and I am", + help="Input prompt to generate for", + ) + argp.add_argument( + "--no-stream", + action="store_false", + dest="stream", + help="Don't stream output text", + ) + argp.add_argument( + "--device_id", + default=[0], + type=lambda device_ids: [int(x) for x in device_ids.split(",")], + help="QAIC device ids (comma-separated)", + ) + argp.add_argument("--write-io-dir", help="Directory to write inputs/outputs into") + argp.add_argument( + "--sample", action="store_true", dest="sample", help="Use sampling" + ) + + args = argp.parse_args() + # main(**vars(args)) + test_llm(**vars(args)) diff --git a/app/Readme.md b/app/Readme.md new file mode 100644 index 000000000..688151515 --- /dev/null +++ b/app/Readme.md @@ -0,0 +1,15 @@ +# Instructions to launch Whisper+Codellama demo + +1. Update `qpc.json` with qpc paths +2. Create a python3.8 environment `python3.8 -m venv env` +3. Activate the environment `source env/bin/activate` +4. Update pip, `pip install -U pip` +4. Install dependencies from requirements.txt, `pip install -r requirements` +4. Run `python finalapp.py` +5. Open browser https://server_name_or_ip:7881 +6. Accept the certificate +7. System Dependencies, + - AIC SDK should intalled, + - `sudo apt-get install ffmpeg` + + diff --git a/app/app.py b/app/app.py new file mode 100755 index 000000000..4d9743ca8 --- /dev/null +++ b/app/app.py @@ -0,0 +1,297 @@ +import gradio as gr + +from typing import * +import os + +from LLMGenerator import LLMGenerator + +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + TextIteratorStreamer, + TextStreamer, +) + +from threading import Thread +import time + +import json + +from pathlib import Path + +f = open("qpc.json") +codellama_data = json.load(f)["codellama"] +f.close() + + +# title = """ +# # Qbuzz 2023 : GenerativeAI on Cloud AI100 + +# """ + +title = """ +# Developer Applications on Cloud AI 100 using Transformers Library + +""" + +subtitle_left= """ +## Developer Application + +""" + +subtitle_right = """ +## Optimizing and Compiling Model using Qualcomm Transformers Library + +""" + + +LICENSE = """ +

+Qualcomm Technologies, Inc. Proprietary +(c) 2023 Qualcomm Technologies, Inc. All rights reserved. +All data and information contained in or disclosed by this document are +confidential and proprietary information of Qualcomm Technologies, Inc., and +all rights therein are expressly reserved. By accepting this material, the +recipient agrees that this material and the information contained therein +are held in confidence and in trust and will not be used, copied, reproduced +in whole or in part, nor its contents revealed in any manner to others +without the express written permission of Qualcomm Technologies, Inc. +""" + +# whisper = GreedyDecoder() +list_of_models = ["mpt", "llama", "mistral", "codellama"] +qeff_flags = set() + +max_length = codellama_data["ctx_len"] +text = "" + + +def run_whisper(audio): + if audio: + return whisper.stream("english", audio, None) + + gr.Info("Record/Upload the audio now") + return "" + + +ctx_len = codellama_data["ctx_len"] +prompt_len = codellama_data["prompt_len"] +previous_current_ctx_len = 0 +last_prompt = "" +last_state_generation_ids = [] + +# codellama = () + +codellama = LLMGenerator( + qpc_path=codellama_data["qpc_path"], + model_name=codellama_data["model_name"], + device_id=codellama_data["device_id"], + prompt_len=prompt_len, + ctx_len=ctx_len, + streamer=TextIteratorStreamer, +) + + +def get_prompt( + message: str, chat_history: List[Tuple[str, str]], system_prompt: str +) -> str: + texts = [f"[INST] <>\n{system_prompt}\n<>\n\n"] + # The first user input is _not_ stripped + do_strip = False + if chat_history: + for user_input, response in chat_history: + user_input = user_input.strip() if do_strip else user_input + do_strip = True + texts.append(f"{user_input} [/INST] {response.strip()} [INST] ") + message = message.strip() if do_strip else message + texts.append(f"{message} [/INST]") + return "".join(texts) + + +def run_qeff_check(model_name, progress=gr.Progress()): + global text, qeff_flags + text = "" + + if model_name not in qeff_flags: + qeff_flags.add(model_name) + progress(0, desc="Downloading...") + # time.sleep(1) + for i in progress.tqdm(range(100), desc="Downloading..."): + time.sleep(0.0005) + text += f"$ Downloaded {model_name} from cache directory\n" + progress(0, desc="Optimizing and Compiling...") + time.sleep(0.5) + for i in progress.tqdm(range(100), desc="Optimizing and Compiling..."): + time.sleep(0.07) + + text += f"$ Optimized {model_name}\n" + # progress(0, desc="Compiling...") + # for i in progress.tqdm(range(100), desc="Compiling..."): + # time.sleep(0.2) + # text += f"Optimized {model_name}\n" + + progress(0, desc="Generating Inference Container...") + for i in progress.tqdm(range(100), desc="Generating Inference Container..."): + pass + + text += f"$ Compiled {model_name} and generated inference container\n" + + return Path('./img/box.png') + +def summary(): + return text + + +def run_codellama(msg, chat_history, task, model): + + # DEBUG + # print(task, model) + # output = "Hi there!" + # return "", chat_history + [(msg, output)] + + codellama.curr_cache_index = 0 + codellama.generated_ids = [] + + if codellama.curr_cache_index >= codellama.ctx_len - 1: + codellama.curr_cache_index = 0 + + codellama.curr_cache_index = 0 + codellama.stop_indicator = True + global last_prompt, previous_current_ctx_len, last_state_generation_ids + last_prompt = msg + previous_current_ctx_len = codellama.curr_cache_index + last_state_generation_ids = codellama.generated_ids + + + + if not check(): + return msg, chat_history + + output = "" + yield "", chat_history + [(msg, output)] + + generate_args = { + "prompt": get_prompt(msg, None, "Give an brief answer."), + "sample": True, + "max_new_tokens": None, + } + + t = Thread(target=codellama.generate, kwargs=generate_args) + t.start() + + for each in codellama.streamer: + output += each + yield "", chat_history + [(msg, output)] + + t.join() + +def stop(): + codellama.stop_indicator = False + return + + +def check(): + if codellama.curr_cache_index >= codellama.ctx_len - 1: + gr.Warning( + f"Reached max token generation limit of {codellama.ctx_len}, Kindly press clear!" + ) + codellama.curr_cache_index = 0 + return False + return True + + +def reset_cache_index(): + codellama.curr_cache_index = previous_current_ctx_len + codellama.generated_ids = last_state_generation_ids + gr.Warning(f"Regenerating output for last prompt") + return + +def run_clear(): + global qeff_flags + codellama.curr_cache_index = 0 + codellama.generated_ids = [] + # gr.Warning(f"Cleared the Output") + qeff_flags = set() + # print("codellama current cache", codellama.curr_cache_index) + return + +def clear_img(img): + img.clear() + + +# Combined Interface +# with gr.Blocks(css="demo.css") as demo: +with gr.Blocks(theme=gr.themes.Soft(), css="demo.css") as demo: + gr.Markdown(title) + + with gr.Row(): + + with gr.Column(scale=7, variant='compact'): + gr.Markdown(subtitle_left) + + dropdown1 = gr.Dropdown(["QA", "Text-Generation", "Image Generation", "MultiModal"], value="Text-Generation", label="Developer Use Case", elem_id="task_id") + + with gr.Row(): + textbox = gr.Textbox( + container=False, + show_label=False, + placeholder="Type your prompt here...", + interactive=True, + lines=2, + ) + + with gr.Row(): + chat = gr.Button("Launch on AI 100", variant="primary", size='sm') + + clear = gr.Button("Reset", size='sm') + + stop_btn = gr.Button("Stop", size='sm') + with gr.Column(): + # with gr.Group(): + chatbot = gr.Chatbot( + label="Response", + elem_id="chuanhu_chatbot", + ) + with gr.Column(variant='compact', scale=3, elem_id="qeff_id"): + gr.Markdown(subtitle_right) + + dropdown2 = gr.Dropdown(list_of_models, value=list_of_models[-1], label="Pretrained model catalogue from Qualcomm Transformers Library", elem_id="model_id") + img = gr.Image(show_label = False, show_download_button = False, container = True, height=260, width=480, elem_id="qpc_id") + # "block-size: inherit;" + qeff_output = gr.Textbox( + container = True, + show_label = False, + lines = 4, + ) + with gr.Row(): + gr.Image( + "./img/full.png", + show_label=False, + show_download_button=False, + container=False, + ) + + + chat.click(run_qeff_check, inputs=[dropdown2], outputs=[img]).then(summary, inputs=[], outputs=[qeff_output]).then(run_codellama, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot]) + + textbox.submit(run_qeff_check, inputs=[dropdown2], outputs=[img]).then(summary, inputs=[], outputs=[qeff_output]).then(run_codellama, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot]) + + stop_btn.click(fn=stop) + + clear.click(lambda: None, None, chatbot, queue=False).then( + lambda x: gr.update(value=""), [], [textbox] + ).then(lambda x: gr.update(value=""), [], [qeff_output]).then(fn=run_clear).then(lambda:None, None, img, queue=False) + dropdown2.change(lambda x: gr.update(value=""), [], [qeff_output]).then(lambda:None, None, img, queue=False) + + +demo.queue() +demo.launch( + server_name="0.0.0.0", + server_port=7881, + ssl_certfile="cert.pem", + ssl_keyfile="key.pem", + ssl_verify=False, + allowed_paths=[f"{os.getcwd()}"], +) +# launch() diff --git a/app/cert.pem b/app/cert.pem new file mode 100644 index 000000000..63714f36e --- /dev/null +++ b/app/cert.pem @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIUUQhsXysbsqfJ76nblXaXror6uNEwDQYJKoZIhvcNAQEL +BQAwgZQxCzAJBgNVBAYTAklOMQ4wDAYDVQQIDAVJbmRpYTESMBAGA1UEBwwJQmVu +Z2FsdXJ1MREwDwYDVQQKDAhRdWFsY29tbTEPMA0GA1UECwwGUXJhaXVtMRgwFgYD +VQQDDA9IaW1hbnNodSBVcHJldGkxIzAhBgkqhkiG9w0BCQEWFGh1cHJldGlAcXVh +bGNvbW0uY29tMB4XDTIzMTEzMDEwMzAyNloXDTI0MTEyOTEwMzAyNlowgZQxCzAJ +BgNVBAYTAklOMQ4wDAYDVQQIDAVJbmRpYTESMBAGA1UEBwwJQmVuZ2FsdXJ1MREw +DwYDVQQKDAhRdWFsY29tbTEPMA0GA1UECwwGUXJhaXVtMRgwFgYDVQQDDA9IaW1h +bnNodSBVcHJldGkxIzAhBgkqhkiG9w0BCQEWFGh1cHJldGlAcXVhbGNvbW0uY29t +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAy8Saa6I4Rulg//8bBbRP +S+YdO9X/kTO4X3Zdg+Mzl69vw+fwsLB0caOAh6P2sQZ+Vrj7cYLh01A1WNEOySvI +P1z2xxDf/8L5VcfGF7/V5K5+yGL8Jf7b2G5Fn1Z1lKHgijYluHWZvJeEW05tc8Ti +WHSTYX1YTHmJ9TtvjZES5qCDrpXbZpcOnW7qbxehRSrzaaIaKWEevFcQXVUhA5wI +JbfZs/kXu5eGxzqg95YyjHlLk2lbGQYz7hlkjgM5D4ekALCbqLWFuNP6QqsSC/bi +tS2Sim1NVRwFuWgTa5V0t53RvNZkN75m1bPlQkOW4ROlifP9zsRHQA09UaHpRqUC +VueqorrOUOkpWcZ3e0YuPWAk/4xljZI4iw75XaSwiZ/BrUXDCNnHr+KLaUuI+AOI +9io2njAp7aDx8zLIU9m7fdTBnN6qvfCsL5zoI2hfzOrz+Ir8ValruwYmWH/hV/0P +xaT+zufZTqFEQsPCgXp4z0r+fVeUYwcAUnjD65/IPXbYvs/42LEkVZZjamPdohZk +nmiOObbX+a8637ANq8FRdn6VlpXNdkq1cWGn65xXSNbfFG6xcIV7w+oH03jEyWx1 +wtT9Rr+xVlf9vTgO2Ao9UoKpImVU9wiXmRQLZeGrez8S8YVJhLN6HaKIV3S9uS7U +ug0DpsSLpd1xDhGwzLEpDRUCAwEAAaNTMFEwHQYDVR0OBBYEFJoVccLyUC8VHude +akBh7kNF9qX/MB8GA1UdIwQYMBaAFJoVccLyUC8VHudeakBh7kNF9qX/MA8GA1Ud +EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAEM5sWg1djsoADKJ2JzgQFAp +jAx1yoAN90sHUiL9wZQzyttXqcZ2iCR2p1GapKUP8hhTdF6K4gEepSzQ9En37lDu +iJyufd0cqKTzGUqZCxgIRFPl9Kg5T0pMTZv4npvhTSZzuObGb1pSUnfT5jLhALP8 +WcRTEqqHJvBU8d0o7S1Px3FJZ0DWQWPy22pvdApoFc1X3wK1jecaiMuF8AZ1JWFb +KZSy9jU0sZqSrQPaep6s/vD4tHr9v0ppQiO4rAovaKw3p5odi7dihKYEZf5eWvVa +qKNtel+ix7mIIxa1g54MS8Ewi0bRcFiiehny1/GS8Pw1hp3qX7VeCe8gNvOLGyZ1 +mjJ/2ANpBZtaF3HnXH/NSW2Wp71NVLHhOYmRbqckhOfj1Jm8EUUxxsHxXN1vYfr4 +ENlOz7wAUt+r58YyQAgU/T5H1QhXrLugha2FLMClD0JPM3DL2aErHazGved7WBEm +HLISuv2HOczGKx+FWgT0o8bvk3AIN7KpVxXG1aRvkyB4B7xCix5ZXsfzqHNDK2Wd +hfNGW9TT+0pq8nrIwJyxUkfPKcn6Z5HUtXTzeTJqcpvpJb7JJg4Wmc8VO9lwHCkP +utzhBKn3bplKoDx/d22S6ASx3rq91iIDmAoxLvdCW5fzTKIOxLkgCrhnGwWDYNot +1LJQvKIc/8UTW+14aSEN +-----END CERTIFICATE----- diff --git a/app/demo.css b/app/demo.css new file mode 100644 index 000000000..1b994563b --- /dev/null +++ b/app/demo.css @@ -0,0 +1,108 @@ +h1 { + text-align: center; +} + +h2 { + text-align: center; + font : "Shantell Sans"; +} + +/* #duplicate-button { + margin: auto; + color: white; + background: #1565c0; + border-radius: 100vh; +} + +#component-0 { + max-width: 900px; + margin: auto; + padding-top: 1.5rem; +} */ + +#qeff_id{ + /* background-color: var(--block-label-background-fill); */ + background-color: #3253DC;; +} + + +#banner-image { + /* animation: animName 4s linear infinite; */ + width: 480px !important; + display: block; + margin-left: auto; + margin-right: auto; + background-size: contain; + /* height:20vh; */ + /* background:#0091EA; */ + /* margin:20px; */ +} + + +img.svelte-1ucs3qg { + width: 480px; + height: auto; + object-fit: contain; + display: block; + border-radius: var(--radius-lg); +} + +@keyframes animName { + 0%{ + rotate: y 0deg; + } +50%{ + rotate: y 45deg; + } +75%{ + rotate: y -45deg; + } + 100%{ + rotate: y 0deg; +} + +} + + +#chuanhu_chatbot { + /* height: 40vh !important; */ + height: 100%; + height: 350px !important; +} + +[class = "image-container svelte-1l6wqyv"] { + height: inherit !important; +} + +#qpc_id{ + block-size: inherit; +} + + +[class *= "message"] { + /* border-radius: var(--radius-xl) !important; */ + /* border: none; */ + padding: var(--spacing-xl) !important; + font-size: var(--text-md) !important; + line-height: var(--line-md) !important; + min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); + min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); +} + +[class = "message-wrap svelte-12dsd9j bubble-gap"] { + gap : 0px !important; + /* max-width: 85%; + border-bottom-left-radius: 0 !important; */ +} + + +[class = "message user svelte-12dsd9j message-bubble-border"] { + text-align: right; + width: fit-content !important; + /* border-bottom-right-radius: 0 !important; */ +} + + +[class = "gradio-container gradio-container-4-8-0 svelte-1kyws56 app"]{ + max-width : 75vw !important; +} \ No newline at end of file diff --git a/app/img/box.png b/app/img/box.png new file mode 100644 index 0000000000000000000000000000000000000000..cc88444c53ba6027984a091c296b460505447f0c GIT binary patch literal 2135 zcmaJ@`9Bkm8{f1sSB8;>VwRiSN6u+u=E%|E>SL}cl%vd@qgkt5`zD$rb7gYh-GHlen0me?5za(;Cui8K+qbCcH(dtr$Qjy zoX!+O5;+9CZHw5c?3jjFI{I`H&ins&-fM?Ab zjk*~5*LuEt5bTmfU-#5ij8gK0*~czLMPv6wz;PcjsGbXMqIz?-<!zjYk46=2pPx%IZj8)BUu{pKAB9t~%vb zd0py%*1SzdYq6H1*zN(&b_;v#K3XMgmplzKoFtJb1HObz;~MK5&u@^D2@L}TcW?QL zTZ=D6eB=;Y*Ul@`R-FQ=KD~%!T|If{sn904O8ugKf#Lox!!1=nMVyBF^!@U~Iyq{w zT7afgC1f?`{_oZaL|^!b1!&wsp90{Y50!c2j?5xqBN^qpBC}fZI1SP-rip``}ThC z%b(&4m^)7^a9FkZ=1K17o)BQ!^>0Lb$WCpn>FovXI1?@13OY1|HCeiMxUsmx{H;^v zslc6Vu)Nw3@67u8sE`L^=z_)6CG;vtiof69cbd+t&d$;e`3)CLwI5*F+5-KfPyg%- z4KWVdJW(@pkFHvRXH8o1f!55zu9}X3Kr6FtdE>d_;*jL^k1>~LTVB5UoU0SEm@{1g z709`vCdgcx9Lc5EiH|p?O@UOolTZe0{GT--jR_&$Aw3O=aqgId*XCTegeS-u_JpdK z2xH~sR^zDjb1&o<x;95`Dp#>HY+5@#=MKdDQ)I^d$Els(D`1Etm+@eK3w%8u44B`eK; z6BBZ1VJOJrB(CttFxt+}s;f#evn;xa%1Ij|9--=%&pe+OcST&b)~6Vhp*;SJiT~h) zfEbw029*ykW{BB~Awnyxpxf54bY2p07@+l*0P))s{ zH1fQ%GPg-v_kf6Ox$oc(i&Cw-7Qw>KuXzc#PCZ|QRWu&4y4 zeYk^IwY<~%9Zr9hrk<-vnc!61XU)g$%-sr*$WSkMvYF#*G(Cp51 zvAA51A?vwl8qJ$Z+ZxKuxXAZbdMYniIJDN*Ij&|4GUJxzu%Ei^pstryErxiN|1Q(P z+&EJp5S#S0zK zPr7-cI~gIKt9Idj?MCvoA|Jz4ScPL?Gtg^4=}sQDz{I#VN`#b1WRG8M>Xpv3A*aMv*CqymaLm%IE##0F>p+Wu_M=@@( zW&Q9g5R>3zz(%mlinS*gG0^Sg59d-Qf7C(uK17_GFEajCWi}%;U!8poPW^xUJ52U+x>{ zD2XadIs_96n!7=MJtn?%Z1d(IOP2Jso(symnS2c{b&SQg>I>PtcZDIImqL{<>yEC+ z)(QQ6)ZA}_gAvO}-YUgEw^0#`B zp2(9XT%t6ud`Y2%+nZi42z0a}m-FcEOGM5~cxn4&Gu&ip7B2npS5dN#6sxI{F7^!X zi?EyBZYrPwD1-cDwId^pfao;P(ku%;v=H_GGvOwCO*w)3Xiw{nU)~9kPCE*P9SF;g zs*EP;zm;psvSMXklc7tMlac*#4`Jb_>TwPO9X=8=bi*-YBu*q#?tQ`!3oegWauFK@eB5aHO^o$NOJ$ zB7jKRjr*>z6Usez+S!Kcjls^9eF@Mc{yz0SEu#%2?aWD&Y-rHN$*Pq{ZU`sGV0vF^ZJQAq%5U3!S{wqkxZwKR ze;{*=YxFjF7v!{mtwg4+JV|v|)D-B^_}`d-p^vzNlis2m9AnKn&j7$0V~?)3z$g6! D&RN}( literal 0 HcmV?d00001 diff --git a/app/img/full.png b/app/img/full.png new file mode 100644 index 0000000000000000000000000000000000000000..ea8589769957f158355b2fb2f9248de192135cb0 GIT binary patch literal 70392 zcmeFZ2T+q++cp{m5v2%Qn)LE23P=$F>C)7#C@O?rB1kU*=`|El0R`PkQG|d>OXv`K zM?mS)LPtOdJ$lm@Raqeb(ibD?rRx-=k_)F6C5XC zFc|&s*EQ~ezd~Rzsv|Vi;2W&1lO*_y!tLHQHCRpu*BtmjWutma6$ZA-@ZJ{dBC<)Q(ffuS7{R7_Yv=J>mY; z(dfnS?2+IypRzU=b}GyD2Tn-q)aRRgH?J4`7_k5SYHV*wQkIs7XC|h6x1xM+D>5TP z7K@Q8EGWQ^wfG+FxUcK~2MqV0V@S^?kp7>KFxZuZ->4{m{leBQ1o7)dDRa>#c%TWc)T4FFEvQEy|0z&9r^V5gPt@tWMy&$M%m2 z_x0JY>-&2|LdlU|uPJ(R(*GN!-B_j1D~=NhMSKQKFa`J7t}hYh+}zxQO;71{KF&-Q zm;QPQ>{=sdnX#66y7m-YSKHGj#lRHt2--7lo}}%S!+|@{2LJ2HJ#aZX);Ac-<|wPB{7cTNd6dT( zK0+2y=0-U2Ursf8kTLn&8;m)5UVkUs(#GWQwG1`$IyyRD{VwzC5QFAKLl7@BBO(yT zPV%%Nxj8L)s3Kto`G3@G`PIb9%gY!1>-1W8Rzu9mfLV3Jy&n?|h8}tO7iU!SGN@UV zx9d)(4HnwiIQ{GHZ@y9TZeNjKD(WdVUlH+wo4$u0PB0zf9}da7VZHT|{Hk*rsnT3T?v>}>Sd52f*5o^$< z*S=i*_j#fxoLLO9kk1Wn*gi946vYG|{V)C=oCSYb#qnH{HssJFSpV(I*P+)L+z3A< zRIu=seT~oHwdlVBPW)U1pMf!)N&z^Mr)d_~uaB?Q>K`@XMKh4+_>4`~Nt`{2vkM{$~(>F7W>udFOwY?>k~h6?5P6B8i4@66gNf(c(WB_VgHW>F zgq=|LF`H>G7)-))X}m_I!mQlg&Qs~$FC%>?pk|IF`ReU%+^rK2`g+-NIecBk^Y|kQ zSla_j7OI7|+`qF2-&$lt;#~SMYmk4R1$#y7j_IJ}6tFooFxZEbYsdczxV66{w>;8H z0(;&>C4NT25WP_w>FfP<9g9k=Fxb@_$lv}tF-S}I$f_q_d1XG3nzR8u{ zr4{=t6hSL2MBGXefJ6<`#tMI32s4zJ7zp=266ERzCP6k(Nd$ca7S#U>-fVNZ`^K8c z)bIs%wk0>(^Fc6x7VHe;BY(e(btxs0^9HrV*Fe|Le&XQvACCR~Y>5W)Y6-UwZBY2C z_O+_5BQOcYUnWdyB*n6x26k$RJT>M=0b}R+C!qwLULFXqq^Nzgv$RqP9(CdWADH=n zV%MLB^}oYo|KFD||6eS}!ffv6;SU8iok@$nU*BsEO8@OzIt`J;jt=9ZP_ME1Tgd9o zd-8cmFZpgSiS{L<7p!8+QWuQJ63aVlE~9#T-4NqR?%zg74gHo$N9VE%D+*=W9>l$K z6-83OBohCdfFt#e&%n>yr+ICzOo-=ZRc_Vm@rRhOn;lyErYIdFpT>DoY~~b_r(9^m zar`Y#u&a1~|CS-A^+n)jL2vG7g&-1l{u!;raRy%bc$q*u(;;`F(=BzxMD5t<+{-7+ zznhauZ?oz_#DPgY`32!={UJ)_xmT`p1hFmas<b4qcBOTU&VAts5?^Rk-c<9Q)NG`(WA zgxA4Nf#s?^ddR7Ceit|Bu|%3IGyk!2Q~Qnn($sD9ht@q#oG}w-If}}k9DD|)@m5PK zMm$Rse@I15Ux2~V3dwr_s^5%@%0Y?!Ljp)N91pow>E*eZmKzXwOF5>Xs}tvdY)^m8 z3a{lECn3N$ci?4z7(5D*8LJjU%Wv!#Q2KeL)5`|RqwPrpabAk(Km$W-CNnG{ge5M` z0a=Z6-p=Ub8^~!X_VMotH5|x#*(x%;po7olW)8%%EbO60T1AhzZD^D(U=+W>v7VfP zglj6l5A%$^OTw7gYqArEJW4*s=x24rZZ+IGNq^#HjI@!p?o+=9Q%Z(@6Mv-6RTGg; zrIi^xTW@eBA8GZ?-d6EME4q6%EcGqQUEG0!f%W736CoZm_`lW`x#HL)y(H}= z-)q~BQ*r%irpS%rB1ISsG5-tS6Lg%FKdF8$va`*&p>T0An6X>@Nk*?UN2hZB86;dC zu~3tL-qf?*BWAOE@wbJQLE~i(X62!k_2SCD@@qWh3P^as&cp?~z72<-qaxz%u}pX` zU1wuMquaXFSbAMe1eS5Zueqe;!2;*n(^lXq>3e~B(jI6p*Zqunf8Qr`JsuC%B0T1m z4<=tN=p<=p-SpvPiH=-4)f47T7Od^N?r|{7ehKZnz&%m6D5p%CebE|eZv}(Z_MZL6 zz+tBrUB<6?+bxIWR7xw}Wy!k35c?eGfFFI!<4-Ef>eh9FV+v^X0|k(9JfXI_zrl{U zxOl57m!2wLz!9F~t>?K^Zd}k}5~lB2`Jx2VS1Z5;w=J%gMNJ3Stt}PDi-G#=9R^gbO!GXr+-~-p~z*77Ey^-%epq`x93R!UWmZV|Ct7~H)y}S*NiN1 zfWgjo=HEC;WP2jtqr$y(y+$did#_nx)en|o<7sna&1Nfo`~ZP6@*WCbuQS}?u6>Ycjv#Z%E%OEmpT>)URI3cbo` zg=*ZHL^!5ikG*4aZ%sQu6q)MYaKS4bxO7W8hQIil%-AdKR8fC@@o#FtdGe=SW9D8NcgViHxbO6QQsvYum<`z6S)}{EO49vm(0CuyVU!e>t_U2)C1){ z<6KtW_+axhIP^S0#`gh;Jej<$$p%AkJe_BAW!<{`K>=ntWq`VYZScmF@FI|Bk_-9_B7^WC(%s}ln#Ucjo`<(b%){AoN-v=~ zsww@Nm4l>9n2Q}g#x#Sw6w%hq9D!-M-aX#HrxdhuikL5KdByMR0R_AHq4`Njb{Hu$ zjzqMW-F3q1QB@TymG68{HE~_29a9QD@}Tv`8m9wD<6X&CnzIY9@FsE21}OdVosMJO z0psI`{E>y zoDj59vf2Xo!D0w+vV9zC3koZGunI}q+Dhee6{L)Z=N0$9$5z5U>ds+Yxs`k4N^Y|R z7jh3D>t8Bx$ho<-|BbgBomGWf<skduv%tT$~9143BlN9n|F15r|^ZV=Mfj@4q#ezWM0VBP;LbA z>!To{Yo``X%{h5sL$WK@>avdLd(VV7cNfOod{j7D&e&H}-pL!UpFwW}B@1NYPqWim z8j{uNZcQb&EH>8Tei~j3i1(yj+iNm>9Pv=3ICb;T8JfnS+-?kqTv_MH z9j4jxiP#Dd6po@na+g74cH4%GSNnr}OhYMeDQTmPTtziz=QA%*#x&V;FLQEze5+6+ zf=rx`;THd%J%P=B!OGIb5$+M_zZNPak{Xj;X@O8?(pvEGHCFb(#A_2BDESLmKq3b& z4O{y1`7|u({Cw?R`9o01u|28Ws(;XZKoRf#(~_!V6Gs!3bxwq8y39G48@%Ef^xjIo zG62%XEs?bs_Y}__(F$wLDHAg_D67$PYRsomZ1iu9seAv2-lLY5H7ss%5m-C?0|HLf z>*<)wE90P zC#aO37<9?TI6euBjGNKva~Xu9@dfdrg#FdxbzEoeX=am(`P~Gt9GuQ&eqH6Ca~{+R`f>A-nt!$a4~!-162EJhd8r2c=tP@mkPwYacH6*{f8^_6>*$7u|!Q z5}}G58#8_VF${7#2D|XMvSsbr-a=Sri-4mZgPvw`76(m*P(_rzrRza?uHp+7iwA(R z-bFg{Fn^ZM^YSXLq>CBMT3h8aw1Z0L` zhi2%ps4)gZ)RQ*7$6#l}Am{Blbl&h&DQSDvHPYFo79I^Px=h6pd$gU3D+a!xIN8w+ zkR5rb&w3G?XwxR*9KH9XU~zHTy}7{{Aa}}`x|YdJh3YCREn$1jrwgqyK7GanHBs>p zvyFPUFarWjOyj2TlYtYi#%+Yw7=i}?z|&AKsi-7sKcZ4e*l$z!=6}SEiq2I~!w~*e z+sliUM{708zdy6|p^di%llDp-nlyy93n?3L{a9ISpvT9D;WrFvcF|13^qGav*TdxtQZF>Tm7va;{VwF)jEb#+My^*2-Bwx^>i)I_Ei@ ztIDl@?bI6IY1rC%D;=HhuCW%JKeNpVt8L3e9qcdq&UJqcKtZ%c{Cg53rGy;#@#@;a zF~q~!12Ca&bWt_8lpw3w0F`0CS+OF<-03&Rkn^8cWI=VN3Oay@$7||zwcmfJ(`oU5 z%<~1{1He8>UEY`3c+gkBy9+#g#?#|?NFDR{!^cYKspfT%{;qCd@d5(tI4B9S@dM5# zXioe8gs$SRl^mFcf3-G?-h6n1fjVzNA^+5wR}yB26jF=S=s+mJ9T~cRpMMDvZEy({4VH?);}+uOwXnCx_elOl0Q$4xXExI z>XBXf<=3OfLqQ^fg&@F>pNA8ogY;V2cO%??Ubh|rzLO~Yg&8XfXOT_3J{m`Rcv<={ z43({5ToxNOgcEL``3JIgSs&KJK}Nqm;W|6i>OJ=hI=9uSM@W48krFyL&E1*Kt&Iez zwfZ%~;_$2S!}SLp=I={)2Mg18!|eYg9Htfy>sZ*??n9_ms{y<+p6f6IFHJN)Pt4qU z(?lRISHD*=U2UA&>HQ(MFXKzB>`RE^l8@JZJ_2Ri${pI(^J-MJGUREe%q?8psxh_V z_O@Fwo6g%QBq;qG_Q`vN3#m8btnq@-OG#+%@KYlmZu3*0jRt|>`(YNh<()cB?M8B1 zhj@h1>jZokT zyhIyX=RWbLL$TCBb!XZ+l}|7&29-~>-vLPaYhvxP8W2%6&vTukoo{Qy#lncBSs|au z6#u0)hi(N0EJX#2e$EZFDaxz=@m&3(Q$v)?^JuES-WAprD`toi@GuV)K+D64n^EhN zvNqE?5+2;k3h^c2tUF8~2t+)*svPuX;+emM56D?(pX{FmR_n)!Z8?1SgdUXwpecL- z5&_{rtYW785^_JJcW?gY9K|t7kIgZf5mFWMOD+y;ybO)Y|3r z0%eHosj<4^qm+boe6qcl=jh{Ap1i(o@T9O~xg(t#Thg(zHA+Bf35 z7?2P5j6BFgi{ZMG%9S)9Rbrq@`O;Y}$A zWF}`ZEowhAdOG`cRsXTgrQ3&4e5Y@?>Dh(m@jqD->HSS$CqIuX!xaz6;k^MnP#CKk zR_Kl#e;1=I$y;47F+HW_Z9{?Y{u8q zmyCEkGz_C*(Y&0^xZpe3=TLw#%umWCm6j`O@?}>lulQSVJ_0Pr-gnE%vTLbX^&of_ zZqQD*n4}mGY4vrn_cgobR&Je4dZ%An`S8G|pMjyEPmd~ca~NqxnfEEA?IlJ9{wh&VO@9~JiYycuw)<^g+m(o(vy ze9wZm=R5({Gxl!hd}4nex|~%%UE6#Nt*mR;yb*GSvV>C0!xGK}rAkD^Dug*C7&-ol z>WGV61A{Ne0@$5Yn;JGQ z$TP?u+VK9dLZA+NW>1VxzW1omo&e{J#jpuiYz%29(WPaVK7{Ws?p73f8u}>b^O#qn zbmSKnmmSOV4mppv9PL7mrTMF+mA@H$*kHg>;D&;9m)vOaIaSiMfqgohR~(8KpPO@- zWM_Y!df%d77J}yU*1Qx!)jH>V%TvW77Gkb02#8{u3|k>%UR_={oW5OrS8R>RA6=la#n$ zf>IH2s2LTcqPD!1hp>-;>3(1|cq(z{QMo`bvuctrMMmCg1gK#_=|>62%$MCKn|csp z;LS#c*+nc7d3Oz&j?j)0=oz?JER^#*wgONN#rrt^JUvZzEKP72Hm4?YLZ-C%?5r+B zb>FGs>bo>qNmpBQ0?6+}^sAa1m#iuRmA!}isW&*5*zS5{nJkH_*-oFhkFzklJ9vvG zOKIJC%Z&ccQ{tnQQHg57Ca=bW)%3z1T26&R;tQD6%Orun0>Z2JNb?&iqxa}hN*ip2 z-6+2TogcKxQMCGWk8(J|VcJyK_ZS_)CnqUhAU-??7?P$lY z7Gb4fYj??P&W9ffr@Yl(tg4txbh*0Y2L+&!pLP=G4 zR=+H{P5->FrP%4bBWqs=hvD5*zflEwvWyFk`=?3mF&I9opOguDqxLr<3p&nX<19|S zq{*nYO?&qC7K_j-tIqF_;;sr{9#mCM?POW>PbKhbB`kAiFiOAnF*WtUR+p_MjGHqU zLo^TsL3|%fjSlW!!fHwXMqCzSW<%q3TR@aaHc@1HJ}gDLOyj(>Wyh*>=2E%1 z6!MVvirV+ef%}Ql$=C!2Qo_`IE1I`%A*^TrW*{Sb|70MW@P{2JnAXRe9hYNEg2K~4 z=_?17zU;J;G~&Xj=hhNq_0#0J^=RE(?F4f>ac#TX`Yl7$zpLL6cV5)0>b#^Z?)|7% z%y9fN)y($r_RzoR$N9*v-b1o6wFq>UGWSTq!g?DT=g!reJ#Pb1`GQG-`yGi4&+6%o zXU$_<PKK6oqP41ZI{jizFp&^nsF;gPBW2!!|2pfCb*9T(Q5L zWy+dTQV+-HwyW8;UmC7v(u#z^C#rcJvn$NPhL*Z!R~me7wRRF78!z7$$<5Hy$0jzr z^f}jDZM|_CtmI%>~-;;T*~dPL1WTa>gF$J{sB*qEAoT%!=XKaEmMqMwYMXD66^MIWta z6=gjVDlx>!u4C>*BdTUDl4}x^W9sJs(P0Hw@S)KvasBY2Q+Cc)N*3G}gG~MrFZ3Rv zEyyhb--=dlF1&r*`1X9Kc)gAKo6jw_Q|F7F9PxVM*nT>y5m>V6C$I8tXSQ()mCEzP!qX^$zS+Mt(bw2UHR6rgBO6r^D19_yHGHfu`E3( z0Ar0Q7kHsd)c|Ek(6I>BoX&fBPNjyD$iTfCVHKl^a1xHkOM5F#`80bvZFDp2QNZZ* z{e_;L3i;&q3AKifO1)Wqx$@3HLQM5)@vu$0Y)Ry8z_EHs{WZiUt+Epa<71?zvjX2) zf^!8(ER-awnq+A;#3Rr#1$>r%wxQpR#_akD^#C7-RZR&yvaAff5#B3s|LPb=lAG|F z@Ot%T_luauG5@7>ACY=po3z3X9-5E1LYgcOwCr}-ky@>8Txs`{QVTxj{s14;ywM-y zgBrS2eiI3agI;~x9VaAW{we4r&RV#bV&SzBFax@=NQVh=b{>9-6K)O|jdtU%((*pH z)Ry~GnOl8v-GV1a6RUT3r0p7>qdSmFaH@ov4q{9C)n(8to5{8hE3)of16-+H^Jeyr z?xW9QEygcDKyq69c@C#$$e2S6qqTDusu-F2yNW=+{$6J}hpC;D+3V;j~O0W z2}xx7dj(Xa3)UP(*faetL`3KYmZwT71(@5&`<|;w363_q0HgkJhU#i|1_sWcseaa^-iD*(wW_1wl-OIRk~O~Wou59 zO3y(0C|U%vQ3CL0%NS)>RN&2kmC=p4J!`9RD~dD@qRC&T`OJXs9jE;d6a3`soZa1( z#l^)V2#JSkh)cimXFU82wQvjhdIdR&UvfSO=$1mRz7T$=;&wj6M*n#Y8b@HK(>j8M zfO~;xuG@yzy7dt^eS9W{a3yIFIz_)Vg4}Lb%Zl7ODJ`DlplcZ!fe76KO% z>_cdz0k6p1DXoTtN%Va3>OyVsMq7>lp1?#+nfCT?vaR*Z|B`GAy84(ylr5%4%*f5+ zv#M8Z*I5B5d91ts!mJ7OLbO?HM^Y)wK(GSnBS0b3(eGe}4=DoOk*R7ki1UEhi3?Jg zsK!+Rf0Wzv69C-h|8UmCuqgLbanm|#H3tx~KmKnqf*ivl;^PeZf zF8EZW?8IFPMy>q3XM1&r|!F^3krqU)=oOc=c$ZgOvLHjM{UoPG5V*LI8Se$xiLwRZ1Q#sSP8 z{77g%Bjqh~)V;&YjW52n!mZ6R#bJ zAQtEc?szxM_pg|GcRFzIFkkV5*5l`qoO79S_hz&-k6FQF+9@@J#A6HCeD~V!S|-Ke zd@8ew5w+Q}?MCo{_ouS59*SS7`1RsAo+YgSzqN1<-l|;Jq`exH%)W@g!D*CC}PM>)2JX{hFKs7sb&ZUd;tJ zb;rO6oAmvgmyG_y>hx6+yo(90r1}A7$5ji%AsJdzj6F+d(YUl%Tm6N>Yy9S zM=-`?5LXobp%TA+#G@Te=(D~eE`t-|#U?9q!3h#4SI2-xTfZoLMIDy;joj4RdSUb8Lv1N{O38a>wabn*AEwt%@m~wq)mVa~0xqfFu;Zp5`f0meU z!#P*wsneVfIn%*a8Jq``($GB`N&K`?IVwm)t@Vqfs5=S2a8gi?u(}&M2F^p2@BN6_ z9S`>_iL`*dYN+^&1*xU;LfG6me6~#zO5o?Sy8|@I#81~wHhS5?>w?1n*+e7Ju;i1y z&-h0wVIw`Kafb?a$pr+j^$z2z9cEbVx};6uMun@ApBb3zvq^tXR4pCe_Q%i@{a)Th z>e}@#K*9-$gShqu$(;m|gq+SMiPf87(sNG|c)>0RMv8ok)AbgxT3yiYDLVDHpd{#Y z&ew0au-2n>K-}}ZBv-*_5M)SF|Af!{*LEL~RGTF4#n*ZIj~XJZ6!%UVuNqUp(!hKq zC(?vK69pY@h=9F&An&3xzS`z-!k8gdQwe6~0=ptTOrlR({`VXHamuCI>~!%4A&A2( zzz?Z`x~h$1*@@MPNZ6HA3&io)kTxw5SE(@S(rRiqF`2rf1e)Xx?{=OyAc#xM;MM;T z{MXWXCf6n7jz+>MDG|1`UjW-0a zzo(7)XP;1MeW{gGnj>)u+KgcRwnydOfx^QNuduLZaet+r#V+`4V8gB`K4KRnY2_QCg|8lx{m9@LV5T>>3TfOFV{+;3#4;#YQ zCs?Kq5_xS;M%A=y{Yj+dlm|K)!OQ6Ip!cc`gd*(_7pjyhJODv7sMn6u|Ea+kHLQRD zAm~4)3kDo3oa+HyBryY5yPw=e5|G=Z#fhMF&h^XA^Z`N%^w_lDDxg%$cSKG;D8XY) ztquOkw}3xnmidu5$$h#;uq;Wj{Hs2`}`v z6bGOeGY1WZ%x$n-r9=)iRAo0)nGFF%U@5h)B%;SUGq$x zN>ug!0~_D`4mM@b2U0uiB3eI|TZ2=9#S$la^DAx4&MbBp1{eoAxIs8<$a}9GQh=m# z$JB)1B$4sed1sXhR+iln<{O{Zabtjb`eN01hIuW*M*}7jVumvtTk#)qnVQ#$JssC9 zs}e8c@9fbt9gDVXQwcFQ-y^+UL`F_mudT_hUQ1V9*uF81ObGvGu(JZFZ22nHzQxeJ z+e-N-YQ3Q*3VYaxC-;0S2)*JD0b5D;m>QOL=CBhVANYRPOUd-+DmCm`N=6{ga<3mS znhHr{v2Z2_<`!X2dozBXLZJI9_@O7&`0ztF{tjq>7f+CYA$#9BnwONGkZUfd$kx~P zzJMLp6?i)VEo;vO4DU~ye8)~l4{N*h(;}s-#`O;tr_(cOsB78%*80%a@LYj&9+2p% z-AnleB)A#P+s^@TrJHp3Joi9_g3{`@(H&h&B_A5mdVPqfO3-dA$y>-CQ-v3^ipfd& zjX?4U(`r)o1-nKT?3y38HDDRy?RU+{E3}3*qDc)xxn*Jr<|bkWl8oOXhG~H%I&p|C z1S|;wzo%8+4_D>C2uluVn`_~eo_v6To%$VeCFYqn?YR&7=+Ei5tt2i1Fml*ntX=Q1 z?K0vz1`En(R>jg-j;<^nO128;^_eD(e5U8!$4?eRO@I#G(-fI;_ke_PT4zI(ZvoWP zE%guLOx;Xn(DBHLH$unMO!G%J#xs<`SJy7z^YN$Zo)0JJ=ZF@st_E*__VLXR#Lpp!S`8>Uv8Vp*n{h(3{j*C2 zgFWkoWTzP{}EPmgFnZJY1J2yi3Kf|ICyi5n8{9fC7j-w zdq9@410!_A>iGEBwN;BD!KG+B(TqMz2D-mt*=n(zHS zc4O|{ns^H&d3X*%foDvI_xveF0{6TOxXa2ugDoG*;EWWE8z>lkgRqRjS(JWTA0aU% z=r2es0a+EKN?i48?f|N|>FoKn0Z0f7?*o})(3e?kZ~bj0=xBAtgN#zkevzF67H@Z` zQ-Hy2`{r&w&iM%1xj;HJ|D#Sv#ExG7QI5b59H2{kvhjfzol0FjSoZN8{xD@P27YCX5 ztPx#{clyj?88``qPUgO%tHM!doJrl~MUejC8`jJt-)D^tXf#dUYCq3r(Wlcr>IcLt z-?K6LH)^g0uD)FWY75wv!)>iowTRi2#?$!8D}L`hmVli2HY5qXrM(_BW)MAjk;lfz zan^&~hsfxG0=|-DhrP(#8Z~iQIXsi8d+#ANI zAzY#C=nl~4<_6rq1p-g{HL{QNU39?VkUwi!Wxw2=NPBV*XjN)7HD^WC^kuVZL`MCA z#u&6%ZUMpZNa@jhG17MG;6-(+p~=e+ylrfdo`1BcfrdY%Rf8RFxqWsjG-V+&4-ZwE zHYSmb23kZ=Swq3nQJ#7|aAVDZI+5&H$pxI>IC`?+COrK7L=3Nr%B%T0z3b7Fr+Qq)cl>=(rnwZ@q-MbDN zmfF(AI>n~m{IzH)c*T%UB7S&CY-<{-a6vk19<9q>10HoAxw1lg@}j~yU5oXv90K6uK89X z1f~#KiW{?Ol^&RkA@hCHos(Br5==?OM*X69Hd~_DR{IAcr%DEM!;pZV=v~?v5Qne0)^bq`#hEFAf1IAkEB9f7iDOJm`%cIg_0olI6A}Ijr+T z;|$6?gRy|@(%=3H2;`}}&VlF97|?Dluax$0?NF9Q!lmVp+mX(i?ko8zW|clnp4Wd@ zi*>1MR*{^H0L4pf+o9%2)p`{%`!F>#qsV#sLmHPCI#o9EW5onLI(+)(4VbDS5AA_i zv!fplcNhS8p(=!nuuOa&sPEheH2cTM0iuML&%JDd^D-1!@%ba(V^iEs<;iu#Xc2Mq zM5r5X)brK+Wyv+G&LsQHNzF)BD?AX&ZcR3I^zLa*DF$w_kmvq@sxJ;dNIZ)r2mEZr z`xSa}eXEA>*OK)lG9r`|XIf?5g_to~DNRZ*Ni|hCCM=&={k^R>V5|{P*pX|-w}Av` zd9gW6O}E>2bNi)OdeY^D(VvK~k8rufK~?X3_8uv^k1Qz|`1EofQd{Olg@0Q4Ww(*~ zMjPv;ZP)jMp2Cs5di4o08?ymRsaf1DEE;c>a(#ydlGI5qZWAs2>bkgLeo^B<=kjk; zaWbU4LdA^`o0NWU_fXe+n`Ib7vb^hvzN#`g0_3FH%A0`q<}M-`bXq7h!uDeHbx|)e zO%L3Jhp}T0279F`NtW4HhZXkGS1X8(%9X1veL{FEsIjnT^pt!* z$a_4hlekJ~53cens5!5^)-vguOWNKkS|C(~jvXXyP9I$o;Ej=4_p^%2FSQ97c0)jSB}_nVklT{!ci0&xnAr6B0Uk~PPHZfC%#eH=MJ2C zEXuY54tQ73TVDipQ5wTII{52A4GQI1`HPa{ZwgtB(qDSWwNgkb54;XIuQPfc*uB&6~(N-1FYOyB|nX*99An z_Y|#$-mVJnS|J^*fmMt1qN9t-W*MHuEFkUW5hXE8E<4ir)XuCbX*5TdJ>%lb7T?%o z;j+8==RWOzI(db^gU%apj|HA7irdPYFFgQcg_J!W=C4+(89T*0EDVUQlF8d(Q^uvy z+u6%gAU`^MRvoMWJ4T3Oehv*zW{zmRV-3wDp7C`=54m7ahK(xp4cM=PY8nFai4?_* zWKV?JQf(yMVbZa};aI@!-p9>q{%0F~FD;zvTR5gMJ9{Vqmp*t#lh4EH%oTs_k!w!- z-qGpKr7qj#YZU>T^r<8cHuUrMVz0t|o2o$P;(e$t&crMoG!^gOpO$Rj_5F%QGV{?+ zFP)AFoLa%{>G3ZcLt>V1#-dWYZyEw}aIF=HX(5iy9<_Bz6}ZoH`-afzolEzKD2p^m zj}*U8_kc2lZ=mhcY1O>TO}_~VL`*s1njdg*TZ+=j(y&er=#pekH?ukbJ%ej%@3_ir z{E#mQbU@8l+dHu*CBXr#S*;P4PvFVOKjoG2#WWTuu{V-XFu@`6;vL ztDTIkjN0${9nYx+*(?m9ohkm9fKR^7L95oClAns7GU|Q6pLIrBDQK`nt{QGV7A(Gx z<`I*p^@e?;p$ZBSD-d-P9|K9j-*)0Pn;)zW472UDvxfSYUwP|D;A6K zurF@6OrYNwVg=hoco#X&Z%$huDAcw|_Ux)GC1+UlMqE`*EZ822;QM+f;JvqrkA(VD zkMJ(-ltF^tY=DPL7Z6xK#>;Xg|?os3CBefM}e$^Q!^s zhB(niU0t`W>s)m74WF9Y5bHw&%V_J`2D`kqT7y+H;NIUycpGxi>x*wceV@JvWryK}g{q{%D?&}~ZKPF8`GB88v=hdp$qE;E=3 zP}zQ>YO^0rk*W@-iliu<=DR(DI+SO$b~=^56G!>Ao>!s!^sWCPJklUq+sw_>GY>T} zDJ6oc*?TQD7SnJOq7!1-CQ^V~8G9N(ZDDU+<)yPoYVF`5O{ofMZre6*Itg`}u(hZd zeJXy&t9b%fhrX;n+S00T6HP0L@SAlqqkqLhi3F0DYq17I?~$Am-ySJ#Wf|KJ;vQ zec?0Nn{1S4$A>F~ID<|{k(X_Kt(s{McY>-M3@*%h)%dBT?nkZrrHi*M^R__5mNx0z zgg-`}Zyl8Q9kEim4ywC%tdwm)>?#LQRHt*(DQ1VaJyy=9!)}nTSB>s*z~-0W7y6H$ zN2n(8Q->;V{=lzg22a0p?dMG@cxRB#F2UV;{9tl2+6Es6)nT5fr>f;lq@4kw{c{QW zZ9+L8x*HQ>tXL^^$1@#s4+!vs-1T7ak0gz$7{_N1ADd}AUDR=~hC{a|>5QDa)TgFP$%vbC z9#KwrM5w9QhR-MI(+TgN0VP|bARf36XwD3{B)V7`w&t};Tp zv%ABYYqhjJe2}+tY;e5UK|jlQ#g}DRbidQGs)e%GGBE;wdEdD-#_Vj*lZxBfw!Ldr z^MubDsK335WXRltMr|;oN5d{xIG3S*L-RJ;+^yVVBhDR^+}uC6R~gYs+|Hs|DdXyU zd!mu>ZMDU6QmN7meeTm9>TBF_$(jT5H56*K@a%3N4lTtWtIzPP(PqamM)@lLviL41 zzp{D@TJle~y6V_GizK&l*0?+zohtV<%wBh!lFa9xbZpM&{j|UmExtSc^$BLwI)Bl? zY)9^DP}K-Nva9cWK=cjXnh{>i=zczFpr$4FzG8J{o41K#+6ek26Jw;ZyW!I6B=SQp zSzOHQ4X$+!Gpf=WkT;p%S4*xfF5R3N#t0!ZM%ace(!jnE>vHehNXyWGJxm-K>HeUZ z0ce9bo<47_ZgHw1!>_%SPG42pLQkV;B*HX+CM&Z4QqD5HmWtHQHs`aF%Q-urn}Wyr zbIVrNLZkdH7WOIZQ(y60C7))0-tgq44W5?u*>Z`^_!vddX*?lSRp__L~JW*9LZtBEXJk-eAG#R8|~I2thN zNS@wHI!`tgX$jSNVsx9%`NCpGrVC@I5W^yya^Z69tG?s*F@#f;-hvXn3>H5YAT0<) z{td!LuMP`PGoQqK&PRHUVIoxL!q`6>8>_x&qztw+%x~+=K`xE30A<*~I7haPp$`hn zy*>IF@M1q+2ao3SW@VhF;fBx88@e`PD&cUt>te6Bs@UB}9LFOg{u9Q2#cc0DCuiiV zqnU`v-gX2(X1y`cBhWdCqk`!`l*AWxzA3-)aua(q$4^Cz<6>Ks>JiYD(Dz(z23xtK@&lLFD4T7k zN(Wt{{lG7nN%v@BZS25OY`#DJfrmB-kV}LVhA(bQCu#=GLQ2}p-()(fdHFG*iE?wC z+&X+)FY{a^MWf`mqP*4BnNTmwASD0(W?~6-$aVGb2#PQufTO#TO#tPKHpA1XX$_A@ zYL7-wNPd=vXhBL{OHP(JHzT^Uw_dYrjM!dzm|}81#S`J;bH~{+{b5F~XtBB|wojO& z@#ML%fu{T#+U&!ru-(BRypp6+~Pxf^>fnCF$t1_Gz z;$1(K+@ik2;I(-)iSM1+{7Fso@|Ux0#lmJEtRdIj(1x*#yK-H`gf&Wj7GP#A0rY}Y ztB$cuIz;Oyb`_8+?6^v}I$X6ur4Ri}0*IkVDI#mAm;jATU%mOLzE*C*SIHM^g(V{~ zaNk1})E$GW&=Dr<9iT6EYYFNfniIXWW{*2@uj-~^HP;`58*BX`2+60VGM{vj`;Dod zRV|@!TT}8@YkRI*fEm{dz4I!S-QDb6yEE9(Opn=xFVshy%g@R+p5$GTT`9@uz9xWi z|Kxu{vQ}2O3ulk-2yXd}Q1oNBZDGE}cH(8R+e-&4fgQ9wW|L9#lCvXWW+T z0-@u=(G7w`m)cf-FJ0d1)`M9 z-vE~+rNQMV%xg^V>Uin4_2A;C6ab>tA(>5DHjrq=Wy;sS2h|AZfAzSzm~wrOia5@6 zqJns=Z(iTEt3om35;e~?kGzL`BF-Xv%T*;-(RQAh^zOLH>*KSrr~2AYOmEgprER_$ zos_$p`jQ3-?OoMokViqA^4YP92IWaK7Xs#YaXY$j(k)gRtYd@9L2vVI>V|nZO(p?w zQD2PVcaOh^8@{-}3*!oXn*e_ie-0q7qJqxXBKC{I^i4Tr*UT6KesM8$`XG|Vp@08m zuUQMrxcz}wdO~FXyU*0>oo~?NuxA7A%LiITvks#xKR&1@85SEF5aOiKXQqF+3Qh~( zS=l!qW{W&w6-J{Svr#R7CqPs=;fVsDMY;bb?%q*salNe*?|b+(UTD`o``y#z%0QVakzEV3V$j0GsHK({;S!j zd(2Su=cclFQOTzQhf2xD{~M*GrS#{eKR4Kvk=UsMzU(%ICY$?vd5*meeE)~$qvt2E ztD#f;!rGd%wjTjpUoKjWnEy7#E?P0sq;0d52NXC$A|lT`(82y~iXCDjDy8wqW$duAPhI zGnM>NSsB>i6F}R(iKm&PXW4W}t0dL<%qI2f45t5IjD2TZQ_J&rC;|!yia?~RS5RsY zX(9wsLFq~dfdJCGN-v>v=_(?TB%LDjcJgE{#)Vfx4Roo3^` z5wSYfM6=wDCnyb7bR2277_pW{z7cY!C(-S`X=&XPnkz>VPE+Qn-~}<8+&7%%)!O>V ze@{LrPA7El<-X8OvS%{9pR4QS)SY0gmr*;ch}w`%#SnD8lA=i6K_`a}mY0u<6%=2q zdQh5Q!IX(Tn^(wzCsfib3PNN*)aj^TMD`Oc2WWkwpR+O}38G{}>+ob5YvBYbg~(qC zc_&0uj^D^ZJ}s3u%(-?%_ygZ^Yf_L!$Y4&EdU}fZ>j9p-Dl-fRt)RGh!}5(oO162D99(HsJ_N z|CZQH$C;=xaE_qr>Nl*j4(tvrl*&-;Af;ZeDmEWgQ=NnQof%xl3?8OG)scu2RZ#O5 zz3>l@f1aw2ShX2K4=kbx_=MJw^!5DY;O7A8F~t3oL25*nN$yTABuSNlD%il=mTa?$ zBdhH~{-n)a5kDxw-MJ`z<%SAN?5jhcW$6iP%)zmX>dD-f4#cF5^UZ%%&MR<S!{uXlWT508o{cp8WpQQjX&(wFUW7@a9l*%$)o!5~6nD8hj0Pmo?Oydx9ODDw{RxlN#3nsLATitK07WY`BHLqkt=rn-s)Wvk1NKH;UU7qJZ(Tz^Tgn{~ z>zX$p&APX}Q_=JfZ`Io#ZKGPh{C)!FG(YW%HJLzNCeM_}(yH%Q-* zKDQ+wOW0I3`&ja!y3nsWP9pF2!`k;c!gQfq{OpM&NeoHE9)%#@ck9rx zMf^q%Y732xbSnrwI z63_90c&YzS<#2@j6?Xp!Z?5zaWx*S2uO0W=+hWe|1;6gX2KT$|^H|dt6)A(m&%6$; zn;@F?bEFc}4~7`uaD5v)0LcR4M2-9^k$2zSmoC*u*j&iVVoOl6NFh(untG)RUGkVS zADO=M!+PG%rOfE{tJ&`Aqelnc#q`7z;PABqo_?FfrEOp$M)iBfR4DB`^r9K@M%$jr zo&2W@<<6?_(QNuN*^C}KE1TUEMiFCkM+-Dw1ob=D2n9sn!6QF}sVe`>ViR-dbn_$? z(}DM?pf2yOQfU$GVz{e}c5TM4d*FWnhy*WdTs#H-f8v8TiBJ@d{w!#P>$d zw}#-=@(PtrdrU%Y=m=y|UZ~?@jB~R(^O>IQX7(TJzg}#e7)jF>aVgG{eq#7R>t{TK)Y}2~ntUx3t>k?D0FJ>3TtXt{i3q|GL|B77 zlNTywjbk9o}8>Ab0S z`(~Zk8e%A>)G7P0mbN$dIOd^a;U0g;IV)eu?LbTP=llE5^Y!=&c_Op(TDIuVtOcUx zkYafTWA$BE2?g99WYipw(Ycj6C(3LJ>(XCjXLlG@I!_q^SCG`odZ)~^w63nMy(kj>Pe>jS#3(q? z3gD%*m5U5)&MXMt{@I_c@R381L7|g-U7`5RtO7n5+e^&8LKTq{==sTo zW95Phh5ObV+-MDHyV+Jep|y)Lj*CVk!!UAU)Uz41MI+ykn0}04eBmkuv1J%`*UX#$ zRJ$`bFa}wM-eAD1NH8u-ZDT?^+v}2D!c1+18ii@w+dEVi>>RZm&pG)UM6@;Ux#ky< z>$7ZEZ5;1gh+#e%DcX~X@et2tc~&h~`ix3-%-Nwn!{OF_FXo&cK4LG^O=b|PUMEHJ z{;1Ui?i`d>SKX6{x5>k~l)d@tb(Gs26b1QHZN+Y-B^wW;)|oMIB%cXWp(R|4%8L{@ z+t!oZ((dLb)@^D!XBO!Kr`m~zh)zC*E25|U1Xku^3AJ@HoOr*#aDch7(HSzl299XX zkGF2dH1D13E;AK5vWfh9j_e`Ecr^kZB$3h^rK&Phm7PdlnmAHAcT#mcHvZ$~%`9_j zH;;q6M-=o(n~ue&W0#Fj^iq2__t`LMHr*kKaIoJA3@(~fx#JFDq##`HQOJ{5QhU*t zuT<$zrEF-dbF>^ustgv5yIbv~Uqg&M%CSC{yPHTk!Hvp6AJQ+yA3rC?ETMUfSJc&2 zqi5=BPRtJnJw}IH%FN2FBT)E-$CAkn$>0=c;b8@QuiY`^g981@{HW1p)Ycx!?$P*_ z_fMvI3}Tzj>F*U$bVU!mnu!ArP)KXf9CA7lZL<*YswDp! zR_PFTTda9@n;-9aL>v4cCC?VXjjuX4c(oeb)SR6V0h?qqZKkAd+BxkYrOM@%w&LOy zdDSxoUeZt%Y@#y&xjKB;asI;9K+kyFfO|=$Drx#2FF)P6r?KX?SNIWWCfxTU5Mof=eQue}$Qd?a+yv^sqXW0y8sppR` z(r$NrS94161+fZDC1Qo^h0_<(0%h)osfVd=Krc-`B2&)O)=}mUqI~8g=9^L9Z^CSn zhP45(;CKLwB0_P6k2>SZ2VTLz9N}-UV z?XFer%cP51*Y2+YX31qN=Y@&wAk)$hs;l)*zHUF}KYA9pe{7T}<9i@0oPKLY)Apig zf6Q{C+jeDT5qo-Jm%8d&CGCe9>M(lJ{x(iEc+i3*Jl8Q4Eacl>4oZC%sQ1-h)(Pyc0x6V1g)6;>NUCsx0N}KPz z!j!K?#$;tELa}I~0-Od{m@#v?W@q>yP!3zNZQYYLgE8LjQ+NpAR*{fGKAdKX5OvA&e()>g zH8?eyNaVpn{*C;*gGm7aw=Vp)GGnfmTrwUGlbOv~SWFw}RgWQuG3JqouQ_M$e>;}s z%QtF$A^ap_`H`W!Kk9s%llD*`v?*SpP;Aq|VAIt=2V@&Rzu+B4A7IA=k~jq#O|NO# z7ouD2q21Mf^C0Ove0ofS@iV=*C;ge+@y9i8JY_?VE}?Y5npY?usibakr&^&wZK-z^ z$7{c6Ev!ECZVA)HxhodgnzF&Kn$~DMQzn&7ZG69rL&CxLhsehGq2#b`OopZQdVcx# zA`0Z_$P@3Wgvm+Ob9n9ee(1duv&&O<-(tglx~(l1ph_ILfDnA(74LD)suexPYW^+uASpJE1ECWbFet0eer2ZfC>x5;?=xADe0!i*H(@T}T z<``)NWKA9$ci1XvpR>Kv_H^X!vELoPJ36PSB(m1-z#%a{JR#am%>Uqk1LVewe_(_X zk}HAyt{}4uwx4iuHF(go*!Wkd)=yd?y;2MBK)K7Et(!Py0Ktm&2aW%gF%rr_ zNJORta52-d>66_3M2P3uI6{PLGjDHpt$Vd8bk#ScxX2&_S05-eK!q!b2T`}Q0P&% zJ-`(YQZB`wT$4a#uUjWdBY6AfcG<|ifPw;07@*8pAT4aTwf(2nW&XolvV|h^;kZew zLUKIBo%T>)_Y?Idn7T4ykU{1z;$|c9AW6B^pK6Mi>gVjwd6KuVm_QI|{TRQ)Q!sl2ALr3;4>~Z=@*o!3B4`RL?n~> zq^O4NKtLI(XA2}4xDytY2*?1PX&l(mj#4L$+)oDw4D04~1-7cHpJ;LPM4@q?x%nmK z^fTwNfw$aiKd4s1(&5i|VbD9n16ca*V1bc50)|!(OyUqL=~~CD*t7`X&-2tJ1j9t> zO+D?J=05UImIuGc{Mz%N-75X2%~wq`jGf$1K7pX#U-fC*9#y^nM#YcKasnv{*8n;* zwxn?Y3nq!H8W1*pM@2aVo(^Wq$i38X${0;gviGIa6!%mwQXlAWu7~=%Lfv5 z2|#!Xt#Ov+tFrO+PSgN4*DUv)BPxH7H$o-9ewMavMQeq<%^hn)KDpyUUMhHeWl0JXC4sG_Hk_6`6TaiG?-O@&j`x0sVepAF53wi zHXsp%KfciglJ;>SNbeae7fj5s?cZ(N#$w!p0N>l^yck85MTt&lOqK`N>5nNLBxNHQ zFr|?>SKKUk_A}N9c6`O4$$cC(bVgg-1%@5+&6eAxj8;OG03W!IJxwz z_mC6}q~1rQ01I((_#lL@7yjl{wTDXYQ}2dlGKpSl(+BB3Sb98`uo)9kHZD5ZGiNlM zbwC(sOVnj7eP==DW+RlTgU#$VT@LP5af1-~BI-7M zigms$N{C4}z(Xg8u*!}Q2S?yR5Fl^{v~6c&eR7XDou`)`SB0$ePr3+1a~s}uK@t*W zLaMMmO$w?1iOpD{S{C=dM@&j1Qh?(+*i%yaIE$69(*X4w<^zv%0I52#Cp7MTIB0Q@ z`vZ~9sC9EnkxVHb!p#D#1e+`ZHBSkDi3b^snTr0h0oV?v*Aq37C?>>_2*!^F8?jB5 z)t}@oX`GGg1!0oj4Q2=RBN=ql=k*e~;YdS5fN*VbS;LIl-Nj*`AC8}gl=uph6P-#~ z&`;zDyw%wzF%SU}U-@vW+kX*++=J7SO`U8yEav)^Kg5l^1?_@TUl1<4db(R;RbRWETLAPMOWV1A!&ek>80L_-b*Q9!i$m&#Q`U@8bu}?uVpdsn=h| z4^kmY(Eh?FV=h_J2oPrfCuIi$Nk90S^RDNlW_}4%`$PQ>>ONZ$i#NL-Nj&M(m{OKqWYShthh5BW^77h39p3n`Zhj0n zi$fJ#g4~^4kjis3anWPbuKluDF{0=fh>u5R)215YV>Q^!AoYEJZ~8LDD{lbTE(eLL z5tfA>=g|GUQ(5)mBxu>rr%sdPrCcD%zYkd~hW(#nh#^MaA1)QlP6&N2_}xB` z(U7$4TDO}uK#GrVYf1^6U@dl#8}WI(iGO$|(WQ1I8Y}nBks!ZE^V6OqSod=H1APFY zBx{4NzKb$B{~BB-zV3M9w)sfB>2T~w+8AaH^W+mc+~SVa#=-8(0RcjqJ6FFA@9Sny zd|l%)>UG=8UNoNgupRG8zjN{r@-`shSAnUljozgMI6&3To3X`PneE$it)7(Xjv%p{ zGzX9DAthN_F^T)h(b(W8RQ{Kocw~$bW;Lw%tCFlk_5O33zz^(Y4;S+j7)DhuoS!(f z&Ys9BA~(=F-K}t4C^hxpAM#Mi1)yHxbVMMQeuFPvQut~C%?epxFbh>Dhw7DK(%nwd=1LU!W1 z(DfeX2KAd+e(_{})YMH^p98(?cPR-G-c~yLF~qi&sU-IbYLP*yo@1WFC(h@EfDJtu z%HL0rEw-N$rj#5NohA$j63?6pYb!QhgW=pVBZa(6B~7 zUU*?yDjhY#{D;8SAxD2)O%Om%mGkU`Bd>T_x66c4Y8tA1ol`>p&ztoJ(0mwxoq$af zHXytnKxB7$VglK1>zr?q`WT`fE1$*fcBk*RPO9Tt4PYT5n!o8K3Ya{+foZ7DL`79N zFXJLB<*7!4d)dG$rx76ml8Y`7dT&zV6##zlbF{~(NJ}{^I@H5d_Ts}m3p&IMb^maq zM{pK2wz)`Q=uHX=fRHiAvIYW0FZsWP_J z6wZNLJ${#ZC>=YQz%xLBBbd;BN{GQl?4=*SOa18OY+PSDps#-j>rJj{uvjc6s_sAZ z{EMxjh5dfNhp`De`qYq=6NV{mayChpRW*h0f`5pIH1U7=J&s8a8Qn0YbdZS?HGD)# zK8a7wOJh4-YLY)g>2cw*6_$abNC2O{yJr1SLj+Uu;;>xfGf|okxHN604%zR1s_Eb; zzSd`kdmXTK-{(zljoth)Yn|9TNA7QDNR(vStP^roiCq@u|3iCP^63OzB=o*>2YFaN z^iy_$F3r0~R}@?way;+{aUUcyX{X?5?AX{xEQ%t_c63K@ta`dZM$zFW-V16 zVpy^ItCSdlw9qf)NC(a;pg|!%ob}?Jx7#@x$&q-gNX93}e|*rE<^5-(z_B0;NoyCi zN$ce^bGdzhaVFqB?}v;8FXTl?8yz*^3}Ql zMDWR2nSoP~YR!SwiRIb)gUWAYi5f7h9fm~#1d+)H9I&U;aG>DWV=r&{|GmMcPhf9j zCQ+M5yYhAw(@pAzs0-LBGu?|tUL_>T4O~WGryIHZ96$?U3oId&DNAQu4glg}{}Kg| z&;JLp`wEc7UIuJW_Yw|=t%2S032DE4VrRUU(6Mukq;<#s7=aaVW7o&)Va=Fc5r-oz zWGtZCcCef$H65+5>ikW&w})GF#ym06#y~r8*ZHVcW2_R(@WGL~4C}IRQJ4tnEePz+ z22*BrI{JmZs`i_u7LJY`yB*r0KuI7Qj)@TeWKr+t?Jfq{;Y37& zAV$^r&$iZMp?wbc(m&D)$fKRH?+z#_V*=R)p7axW6xxE3EI)5=v{5L_tRt6}YG$*b zK&XbuQAS4+a^M<9SODXITzzPHYMM7wuYkmJSM24oLi+{vAUmv3g33HOEBgo!aop^( zibAZI`}_Ym$)cVZW?tKd9g=A@ss1l9hR$Iv!X;z)uDHu=@?TI%-eFa zko1rQdTocE|CgyP)#EDzZR8W9yAN#6U%T=7#w;WEwQ~M{80I(_znp#i>T{}hG7kp5 zhE}P}QX1ON%IjPVFI5xmU0>F&;;tD`(VlUZCCX-Af4nTeoyKD*^J6S}7!v*gV{A6l zduq!nswz+B4YD#3Q<}k9p8DxKh z5|y-6EaAAFH9feN|3$ChjReA0cU}zJgzS5$><{jfEfPrh$0{BlyL?r2Q(Kg)7h=#~ zbfvL`oc4LT_#uA{4f4C#RkBn0O;zG*XRUsekV)YkatS{O1>MIw6A(qs?YFh|97ROL zQ>!b>Z<6{vnK#?BDyz}}2A z2dG<9JyBRb{E0Zzv|Fq8LX1e>xnyHwO#)?2?0CR~aJB0XgpRp1H6&sy z(S^~)Je~zwnrUoovR?7@fH3O;O~>R;>fkAB%|BVf3NcUz7Gdu|dJUpHZtcQwS|#1s zII3*+TJoVF{1JzTSzq;9Szi&F;L7UMyJ1|95CfIW-?fJ-Yjz-tw)wjsy<)DDNLr4e zUPSFR5=)F0bLT>`9uJtW6eXp>PIseV-A_$mo5QuC7)#iY!RVr2{|z6HD#QK+p3->w z0LHs;&im}C=`6l>kuXKz`4w2h#f;0mWY~5G04C_EDUHZs6(Pfcihu@7`)!Po zoXIqH`a_bO=2aRIuzSR1AMrf`quo3x&&gh|!$6c1W{{Aq3H3iyG(p%9^#bG(&u4*Z z51?eM-$Mp!E>k3Z`CivhNSpQ+ywBdd0f!7(mtZwE+&5;pc_CzTMOS3;y@^I94^#U} z77qvU3^voEr%Uh@O}8mtt@>tu+h_LZ*hW!^TS9OCXP`5S_ZMK6O;QqIsjEWBGcnM< zqFH=B7}VeNq^`Z2kD4G!|9pHu?RksGIJkC+9-j9((qwbmJ;PwPE5_F_`__p~Zh zTl#f$`qmSQdAOdm5h~&9t99}&F!g0wJkdIBYDU6w0$ZkjEwNhU~oHpeT#WbV0)C17g{^=sP~ zw*x%?Ax6;crjO{#Q{sZUw2@)o_j0Ai z#1{Am*C!;Y(kYvadZ2oNKG|u)GLzs&?vfzH1k^GE*3$Meu!0xb!3fqb@VH$4X-eJkrd zNin5c5J`K4jIMoG@Bojz<{)FJXa$={8vYQLCFbY%e`(kr;{$^uJVR7p!2PlSC87c* z;_J3o%aA6D(S1q)BjS1~(fd|QVkS7{%em!-a#eX5mE^GLiFkG_Eg9}pZdag~VUBzxKP~S(;b3Y=lhwiSDan)5;VAP9lo(hL3P2Qc|!F;X8Yg%c( zx@%_V^Vcme;uAo9*QZ}I^65_YGoUS3N(#_J-&dn>;-~({S+6Im1B5~z0n2c|lH=aG zN90(N>j#}aWeeZ^#Bq#{ zmY|I+9=2xGRof%AIDIO%aR^Mf5+&hiYy8qa_|)0+Tt_aIGp2m%Jr^Xr#TNHl6ua%k zsMq-7hs_2#?FeiGkZar0?BUiMoyks}f?#Z&iVQlZm+sfXFr;n_H0 z@3ndf@x`z$Dom&q_lTOx`|MOrCSH;SAVhuNOlBGP^9@PL;KzoV6pHpIZBQj*EZH8? z|Aw2Et!^&f_K;Vn0dDTsWgdpnaCUmf5W%HMW$G!~_ce_YvAdzNS!J-0DN;jO3xU#b<%ZPh) z`p+QqJyzVZm&7twfpP2qApXMQzpQ@9?tRK&-cDA}wW)V^JlKI42@G%etA5fni5IVci!Kvz z=P2LQQ~B%B@*5VY!I!PkHJW$wfL`#Y3jwyHL^Qnn_N~}oWY=5rcFEdY35PY3R9MN7Z zeTre7+4!>iKUCa6M#-xBxj>Qo?_K$F}qu5avEi z&;(p7mgb2^-t=FxfFFuLmVkgRI1};fBeDMtUc3jp)m?XZFAGBF^WP_w;0b6aq z0heU}O$s}5ed2Sz1#GkuOl|7lHefv|xt4p7^)0w)pxtj_;eHN|-F z@R<^s7v625@C!JRcltEL9)QYQY;>l5h3uG*pG3`H`Ky+S9A%CtzxAO}7Q|$HGhF$v zdg8km2+fs6MeY~(5!8!y*J*?^{tvJWw2(x1){8%blaTc%1a^byxxwk7&abAkUbCos zn~mFRWkEihI`208r(HYH?gN(u3OOL7i9F)crow#Fw8PQ?Aj|(smOqO+f!VaZOBMv0 zW!k3j6ep8^Js!GGnS;l_;m)Z7AsYfZ>Hn1(%Eb2yh{De>>{&@OLQqR%mt*#F{==BK z7Y25gy@B+5_nm9z(ceoK`%toIf1$5ro()LocWEODy{vyb0mMrw3C&r$tC$t2Ax+Dm zbtINa{}k9Rn21N=iUjo;{5402tUph{C2L^J0&95u)3jN=@Pbvrf-7oA{Plt&IjH+! z+v*C;qtHy?F}R}Ucky7qLNd$}<8v9(nzJ-Stn~MQD9KRfT-aG9ldF;?B!Fgv0X1w> z{9O_JA7$o+AWw3_4r9H#u;1Sj1lymdDFQNY^x^7XHHO5&5Jwpiz})3RlmGf%8Sh@f zok+Q&rLpKcfDjDS?AQ6+={rh*n|h+36TobNyp#ERRAtFQ#gU!y?M!?`HK6iOn~1El zUPHwR@r|UAaF$q-VWh0#9Dv?$p&T$y{fknM4}=ZN>NL+Fh^MzVUdYCixj3Zilid4D zXn4%>@9xU%`nB|DO z6=FHa?*TtoxMGX!h)1H;6 zx-}DYvSmu;RLbN@B*=fRGXhSQ2V_&<)%O=OrI^l5=q`0JDexKfwlm4dgo)}6PC(60 z1{8ck=!f@tR*KfT;UBpeoVqsc4u!H=b&#=iTIE*V@>*JL``d4?k*Kz5>Kurhj2Cm; zL1aJgl=Zm{x@GEByYHGPRbVtvN6E}v@B>7a-1lXCt<#_dPZXhsJ4E^&KyHR_nTX zWT$=(Pa_o$ddL%Hv`I$LC!+J`pFQM#wHr^L*R#jLwk|W7{^t1SUaX(n-Ilg5%+DN` zGQ7>|)KQ8DZLxVgqmiyMVZvgr2Vb(;Ub)@uUa?j(DDpydFTHafC{`jNL)-$}GNV>> z)Ea%vQxy~R+sEyx|Kl9-`$2k%+2{ke+35*uI&hAWTZ5UlSzu0#_H~VNT}gDmtK#kM zx*wmJ6?Y(1v68mZM4nig@%(4jyMr3tgW{Kxu2_abaxnE|H<8|x&@wcnO4K7F&cA{%i> zC2%%}^Y}HeUslX=V5A>5mf=+pJyOi-;FwOopYB0kGviNnUyJuyRIy|G7n|-G42$c} zMwzC-fnG8WR5>-XE{D7bU%Ub0ty*TE#$*`tG%plCwl0IqwGvYpJu#hW93L+#hDoaH z4FKMsM#DZ6uyU0L%zM+%d9;5iG1kt4=0Bxs_|Ey)eK;PQm76g#q^zDTuX~uaU)VJ? zr^OuxX%^=rkIWrD@)Mvp(^iyP9*9QK`UQHXA47)6Q5#d%W?I74uW46-o%ILe_kVt)JQq9lZ@{v%f8k#ZcNKuo(rP?)-=O>IR; z0Fl+){*2wg?yQJz?Hw(B#&l^*q#+EVEE{)xiQvH}!;0;&h3Y4TZp)5^2PJv!Md;{; z{cUEaRa?ClQw}A;0t|hp>!C1j#jYnQPdBS2HR+l67YVcu!!niB;

#yh&LU@*fg zJy+CLZQsGkIf(|g*aFC@3Cs_Zi>2kBcx$qdsQUZVLpnYJH`I95P5xk&OjAT|ThwW! z1CA%=cYhG@R{wCez&6)xJbGMRo|TD8~CAFi%U-z zs~MN*_k$AF=O}F}DJB=!gY5j7CopaEk_1hy10l!G$LSxlDyY-*83Zb*t;3Edo;&Hl z@}i*JoryF8uaHS^z$TDvp-&Dm>@PtTD)Xw=IV*#hhTnx%%l#3nJOO>@w{&Plz`%zW zotfE0Y$745WU4I%D(-#hQOs@I+O|*HVWZDGBoK*}upDICj!$s~+;)ad%N@o;>cI%0 zNP`-3;)E=-C(G~0W%0jnlc+tO;L`DbQ^t3@Vz=>i0D@jHM`p;QrLKxoCAG$|1#!SR zofzRbDb7Vnm@VaUj^ZsuHi5Nm)OKuFnk2sW51b~X{y81pjgG`S{F=NO@3g-2&}rG+ zAbPdl5t~DuE>W{TN!pZNXD?ND+^#a>yI1Ye*NQ%=+1!py&2!Z!;eZ zPizhJbPcz;&7h^1mJH_KFn5V;jm|$+5puIsz_{)GIPndi%xX{2_72PJ7h|Pi=5>Ty z(_cIn95&#(=CsIV_+!Cp=B{fWC7myeI^(SiWJ#_J25srvOLKKEy2L(WXm_uBA%2f3 z1jcme8E=BVVwss046W!02v0#TuQ)sp&(ObdgWEQ1K=)*dadxnpQaMhb)ctdFMtjK~ z+T|S9iK>5K-m+x8PFj7^&n6aoW=P+L3sX%+%_9QBF_uw8Y&k8N8i)64j_cJ+$7&5> z3=ThUcq2BC7Mhc^z>qr*HWXDu^h!1(ef4gcD1$ZlxrEN+R$$v`LZhzy<*_4X+C-vZ zBW%dTuM(`~j$n&Hnt+fL-mVDdpH3T}vYU?Y2l~6}3vv48y>NF#$z{g4hKa$1Oga1p zz{*d}Gr+UrxKEvb3@xo=WBzWoF?rt&E#T1( z_E77+C}jeWK;XjWKxk7lft%w?6kd!@n$~}mI1kOO!4z+*+_~Es)amijl_xFvi~e~X zm%RcfdW6ATN&5|a*q7C(DUQb9U$ zm-v$>3LWS6=IX4v#QyP7rE2*7#gHUcOIKUBtVZ|n{6G!ouFXuk0=MmN3c<^xunL8K ztw;*_uk5TugX59G)f*{=AOZt)Fj(C4cqqxy=@84`sv%qH#aTVx zbX?^ikr-lFV64p!4e2os!c_MEcJX3+rn-fEqLoEt;ad1&!*;UP(B|LvxhFoE-+9>M zx=O)xCFiym7^~@-+$74EeDU-3giSS*+t+F3)Rl2T*ma8^MLVO_ETeo1%7#!*cr*Wa zyXA!LO|rcO|8}FEo{8x^?MynIk@d#y?n|8B!GIiItT!TNFQ!&Vljm+9gT3YBZpya! ziE#G+jRNAXpI^3qp9y;~s}29{64kTY6pV+rq>4AL1(dsx*)24qJGOf{D<@r0a!bz$ z5}bFEl~ux`i?><5))=&HIFK(!``Q+c?-OnKJJ$C5bhc70drJbDY3(c-S#}0->|-Uh z5g-zT$+A7zs)eCm5B{%X$o$17m2Ha4WK(xb?N^gOBu=^+)5r&QjuWpxHg7XRaa==w zc1IHyW-w4C7=1ev^T%B zH{(9=b*o3KqpwrjwfVEsV$ua1JBUWmFP!Zwn0o+gn7ANXm9}%cKrnwI6z0#;CAo7*vz6OY`BzK1HexJ8AZN%Yg2b9yo7lT$cp98s_nHUE^q@`V1*g|*KH zp$bDDsckfSJi%9=?MX}jv!^Ca z+$m72?(8TX_aK59N@x01t<7&2b!M=Y-S6X8`6e-~+tp{NH5Wf!G@AGf>eF7^d~CBS zw>9IIkmGXytaaP20z4rLPsX3Hp$H{nJr%Z5%cs(^M$1Y6y%0Y{k}@asc8%EPKer$$ z1OK>+HShMiaA=RwDYs_~bfvKj>5yoz{=NDT24Ucxy7~soSO9S@^nOJDFGXf;3K^BySB zhnSV-7*L7VTOa8+sRYWEAjCJ@CuxrmRbE>PWRlYdu(Cwx1H8u4T8q(?>LNi zt(Iu6O))m^V_Y@cO%X^(bB8FmM`qahOX?7wu#uS2&Bj2JKId(c z+B~t|0e7FNU7_CRnWhjn->73FEnIkp+TWLzW?44Cw0&(Za)dWe6u)y6p&;LHISDS; zaIIcoq8GNkHAT!_ecKMjVb9!#LTqlFzq_aNV@3yUaNV72n$-&t-ibA(R)%xOOwCuC znR;2hhTZ@_25?v`sJ7n>PITJqDOkmvIF`j>Mn@#t5-gWEh8EjJVr2a4-`A$&?gIF= zIy(OGdkMx%?o4>;3;l{TbrMe6lAAR6^dZo_z{qIkL9o3m(KG@qGhhj6QLf&k#t+hu z;vfp}3q;lMa1J38p3Wrl_`UJ7;_!Pv=f1Ep0Q4LSp@Y|DIZR>6O4aX~M4&}0 zwRwe}aINd2Emg<^r*v)M^Oi#3IynNHCg1uGju^Ltx7o!%_2gZv0o>;OvA4(*-})q> zdj-y4l(dQ6Je-Gjjyf~h^MjDKG8KoBaA|FleM|RS&yF|BKju~5tx6Nt3Jkz2nD1Ew zo`By)@jQWE0e!K@K;KP`!y@q>HZl6VlRb)7M*Plz$it=lnKx(rqRW66s(3tI5z^0IntOxvGIjJn0yMTCMzGt0Hi@-{dq>oL84h_ZApgRqk=9vR zQe@r{m5eElGtbNIF!QFjRpV-J9S_Y+U3ecE75Ba>AG5q+#k{?CHY6!CUpI^GWARHC z#%i0{#?^Nnv3rvuJ`E7|R73e6Vn>UaH^ZR#D66*Kr)nls$V2BS5lf@xwCOxGi39(z zcT59rTP-zuOQtoau8eWu%J`D+gr_LR#ylR+&c2|<5rj$H?{}F3C;Q@u!1TX0=aU-3 zlh?ZpeHMd!q$PCFH|!N48_uOOy{9JNo2M<()HU|9@l6{OJg+loA^^n6>Er6(;p{+X1N0k9;x`k?FiFz^N+3+49UpnM^lcj-j@(sXZUqq zikobYfQy9LRKQ;iiBR)1*Z{9ml+|5xMgMsDRIqe!{rs%d$ouo2a*5Q99d%1R@}RHZ zB?=07tiS3SHAGo@V1*~$>_spu;oeZW`)xZV4#BglBlKSaugcec6pTF$%ss7F-wdpo zpoI8LND|GE775sHoSQZ{H{|~C9#FNelc=)A02d`SgqrVh+qC96&z7HRWS(o|Z&fc{ zxR0^GNWCfS`)EsiS%2RC1bO;5&grHWm+1p`xfC15W#b@HZ9vn6g$i^g zO_GD%n@-?~@8t;SLw}vgi(AsIdqc3_;<1cPBAFi&sbx{;@~MBJRPTNe$%UQvBEnyj z)XfY)yu<~IgGE$oy1Wv( ze0FJdwM(9s`&m@{Qw^@(#0xR9XABs6sw=L&vRdg`p2tKIfIFb9x|jPuW2Ph6de5ur zsvj+40x*tTIhY~qrpm~@H2y0kTv5ZwXuhIO#@vc{|AbLSdwX)(D=~bv;~ z%gC7y(clN^<rui^;>}TEf^9T0*)Mbw?;ln=9 zx_eLEH?1GSk2~KFlE;W~_)s86PQY95B1Zt8so%&|Z3=IidcH=)wZ5CIro}Bf1wt_4be1@FZ(<^^Di*z$+XBkGGbl)VJsxl1iWEPn1N2yNc-lFg#82P!A%YZuFgV90WdQS>>gl=+a z`d@q)fAxjb7YW(}3`{kjgzdb~>rT_k*A=nZ0ZJEUcd^$W?3QQpYTPS0(d{OP`iGa0 zrb)HQxBP?LL}k-M-giStv_7CBel|{_?(Uqu)e#zLH60TpPxpHQQ6B?-PPUau) z^jK|rJmO8q&*|NDaIDO-eo{_r!=#GUO z=1L)Q;-wOK%7EMIKqmX`rzAP!31!8pfQ5ZFas5%gr0%E^-@&Qr`d48Q7IaPy4 zRwQ%J9TYM#_Dl)zUIbprfu5^cfBR5_ITtfD-S=s2caXf|D6QeFuI0&$OPg-p&552b zP3guHfwP6ZIwb`x2h?odYm=gs+#IZa^)JBBrBF_Kb)Whu2CQNdbI$W&eoZpR@nW3i zY@{XT4VQV?VdT&5?-NZZ=0kl)6hR_c=Q_$!2veBv8!LExdr1pky zyrVKoGh=z6Dji2e!xo4)Khj^y73=(7j!Y)GAXIeXi`r?k8Aeq)XOp+1=k66;Q&>4Q z3KcC08$62sy;pF26Jc94g5q8$QJbmYzq{k8Hu}ls8=UEz|0m#A5b)F5%B5!|8LsU6 z2i9S}`*uKS2>K$qpd9|Q-3D!-%Xj_M#|HFhTbIr9pufBjNoHWzHGH}Ikd!pd(Qjz%i96He=U4Fph$_DpCg%?v&vtx!^ddzz=;Q7Tk0CW+D%@Jm zss{*^$kP$^^@-!>0@pdY@%2t8YAADiY|fh2lnS@;cw`O#pCFdhqBK!Ai!6i%kfa8> zChYP*D>nynHALwSXR5Zojm<8T?qd$abnBK9-KntB#8l2UodM4G%z66Q%T)Vtf;p_s z8a1Wt`|}fh`TB%^p^gGne!oy5-3*D`46^p!usgnMhe9c$M4Hs5o$oED4Tp|YHD;g4 z09{z)WHp<|*Vv)@l7PJ>Y46&PM~ie-;oq65!7YWP5~$!arnp1y!BBreidrLb@r5|o z3<@{SbgYzxpGa@Aj51G;$ID?)r85YBubYrut#YIE++3>8TiA`+w72G(4^R-i`-7A| z4P(r#Rm1pMb^&BSskNC}CX2fKUER3j-Dc!!J2wyaAywUnvNHpX6t!OxDjVH`?iCjI zMnw*#Q0FUzvOpJZJ)`8zN`;Nu%)QEA)N#9cTo;>2W)2YT%%ymAdSxPWFXCcGlF}Tn zovR=GDfw>e$e%RLS6l08_~Oq6x7i&Im!}{kLeX6Z^$36RDcKc4bJuTs4MdD*})-n%Gb=_8^nusT6-c+8rmCqZ`&go9Rtz4#NbIH}&a+yW3xv?Kl zxS$0{^K+x>INj`?Z#vgv-thQs?EXq;gz*DW{|p!eHBKtC#7;P=IwVC&MmPDTiMUCX zchs)8DJ7QEp|_4EVdEgBm8m*VFE=)iQ?^)a^VmF-eX=}JY?|7Beb1lROsU4F;RTR? zpIKcOB=j~bLyW$tNsW~Kq&*qg=IfohZ2!t3we*xrl^cP2?YKwWGQ;}CSys0U_v?@x zw%;-1F5oKWnb}*h7$31oI0zeQl$*iy4wB1Ki!AtPWYYiaqtc#TjxFjGJ(i`QdsOA4z#Fn*FsdjHUgue&5Q-m^(F9`O8H&k)qd{cc;dN!fWvt1_V#!GU?)-$-NhvZ|d<< z?j5G|GVxN!W+jD#pbP8|X4*=+b;=5lj2FdW&u1Hb$8JUp z6@e&N(~)N}C>GYg^ zH*f4W#;4VomzTiFLgTF^Y+E#nTNrpPWmLWUc~nM5_X-boD`u`)F1{4BBHuq=$;$~v z+0LhH71ObPu-*$uHf|sF*7{2f1bJo&X=lH0NgA*h9 zIXnAa*nyA!oTp8I7o<^aWSFk3x}8nomo|gGWd{?m7D~E#JRLYTJAxUb8m?1@5={&Z zRaswVm&ZqShIJ`uCs%YkB*Hj=R3E9`2+_+cRoFNScl*F0!brDXx!EzyIT?{ZYOjB= zGKSeG*Erxv2sIMfy!CP<*K)SR=6ieE58ktBbvJhyZ|^|{kit6^eTBwaj#g4qno2)D zG4l9^xRm+x9rdbZe`kd8RVA=-39;u)m2t2BSeMovo4GcTNFjHEnRCLa2ruzF3p7{9YbF26>h;BdV3h3^~tYf-k82oyLB)`iF1 zI?*|vam2NOsNJZifB8#d;iuxuWcRe8nC4_~Id&rD(Q{gjjtm+4C-3+LeaHg_4 z71Blo1wZ}MVH7;-W@onfDW$Q;`}l$z$CvaFbL2f$?LcTpUOFlWE&q>_-yTza2Eq!8 zN0#1jumskXX#@o43Q+ysz6QRS1+zFGQea3CcU8hRPAo0Wr> zoiNpjKe7Ry&#xL6EQZnGCN3x8`6Tux$8t9T-Vu~b4*FmH9D-#MMn2s383in?r((`{ zQ1Ef!8PdhMA^I`=4~7Es=SgG)Xp1t>p{?EWy^&_1%2SMI3D}{OYcuLqMtc^Jjb&)z z@^@k%Y0$LYY#09}TE(LdQ)|1z>{f&B5B7y@YOtpQ4@G@rP3cvKqAV~4t4i#8{|uRN z-?!X$8bwz$Y%TVGI{^B4+#NJ8#FTe1L%I@kxzjaaouPcHk`1;x8*=@is3 zk^-M%2;ox%y}17WH`l#}K>UX9l(Q-)EH$(`yeuvHd>}n31_u^G&0I}3BsLFQo%qA{ z8tW>n^Z5JEh)vUjfQ#4A6<~~`lS`6LdiirStXURbFp23M4guosE$u(2H?LRa%hq{ed8Z!XYx=J zok+cLb_t4UGH#fO?-Y#QVx&nPJ<&+mH|?Rvs7`f*mh8j<|A-I$1F^W z%W1EoqUwI@R@&6gy^`uVQ;nbP0hq&ve-2bjG*}sO0CAKWTTL91l)nFT)pGQ#L1*|1 zVl%Bv%KjGmA-2qFUON$fZ*9?O!9H&so9X-7P* zX-9NOIn4}TIEjD~?neL_X|M6_3D!nn>0vTorcAV~Qac;G3 zR%uRa;vUo14Bu=#kue9@MymO=B5EI$uDGh`wd@=-A=7qp$XhIn`J7|~U?qOiWeeqR z47G&)>vDjzwZe9=z)chVFwq$<<^J=xL&cmBG9?El+Xx#7ak}p3x%=aXYAwLqPZV&nN{hs8@bPeDH=a|UB zEeE~$IX=Qj&!-*T-|t74f+$-lZTp) z*X{KCG4S?{purBL9@=0c-^nRP)P4Yie9ryyv=;794)qgf^E$Q66Hv?swq=w_lsng+ z;aUKU{{z6?R>q-Mv_MEguk?kHW6PPj!L!MdJ3+lLWyR5EsW{G->GCb72ue7%=Yi(4 zkOsn=#=pOyDs{x=DR|`tP~db zwWUE}{-mYfcdaLQT9wFAa-s2mgg4RfDFiDBp{Ycuw{*cH1>6+3;g|JN++Q*QcmZFj z-lW6(lC5ssa7rX%bG&+Odw&ajyW6kbx`$p5yWTsw+-}f&P9xUQR70+8<-PB)ic^1ziNL~vjcaFZqi z#@cgI>)E}^yb}5=C)shp*qv8~+$e_77%MA|y;EU^VobSUz2u9N`ta_>FclgA`5Bc|0SAJc0J~XNbf!vu3mMs6$(nKuo|USC4;lLxz8C5 zPSyKmZ(wTGbl6!C-e;hrdK!3!gv(L8@%!)}3DGBdo9I88H+SlB*iwE6Ou$4AV}7fi zzQ9`X`?`um| zqe9CXxKeR@g-yAXuBckt;{ECC^-Pa}T)_Cg?$We5n*r1T$Yr~!Z4P{(GJ$JoaLNk} z3BfM1R7~j&AXbngx>f>hGhbq7Sono9_3d02PDNI2N#s zs*!pr?f?E_?tYgx$6E0o2u_hR^JObpws0-mSHk@&P?J)P8;$&9&%YaLflAJlXm(%% zn*M3f2R_`EV+J6kekNYG=e&ojto*JguJWq;tSm4h0q*hU|KD$QE)ck>p!BNS8^yya zCuGDn4>cC?7W2|H!w6nII$m8z4e0&6?)3m)#m$nbq}M%~67?CQdPY<6))Rfmiu|O3 zMRI<%ef}@hMKwae$3CfwaKE(H%OqaJp}5|nVD7&-h5UN9VCF5~_?DptgV3u>TCuxl zkd5T(t>XoPi(i3kV`5=GJ;y0x%`MFzHmpIJT!qOjfW+V+?z@A){c!24mh%G5W+BPz z3gBT%Pdyi0R2s!`i~#;)aaW$vs0T*im7vy06IhD;5`lKZS}%uc9j7)69k7_yV3(c7 zuEo>rpYHRd-FO$rHJ&hdzmP7&1R2?~bEfpNpKk8zEwGF(@KV3*Pn3Iu zh&vt_s=dSn9{_D#vhRDtGkv1Q+Uc-sG1~0k-q7-<$g!7MB$*Q$?h;?d8&M zJGZW@Zi8^y!0ggsmAwIt`SqvwxqM@v{(!(brnhoR_W@dmNeWveaWXk@`ot&y_rv}~ zNi}FRv-XQQ9xlb7#dIZ@d7Oj5$1qxAJ>$X7U+;*rV@t>PF#pJEzLb9I#mf-y4?!xA z_hgIMXqE7h&^LN%jx_Dx%^EyiLQAZ%mze6Tck;H)yhvM3^{?)d*xVJ%y^8=`iQSuE z_NE=2v_8MH*o(uy8~&+Rl34Px(q<1MtOj0$MyoPT%6TFQvz${pDqroaS_fw>xx3}O zIWxme*8lEe?g&Y4=>gx5j4^;A2*FRBd_xqfJH9EJt2hvN!RqBz7}fjq2S*+| znbF|61_Wd575WBczCU^?jK1Z#>=OlsBX>3R80e|V?We!eY=wVOHAo+T0#Z`!4li_EIg-3{>)GWTgE_ZFAd^}1E1*s(&eBc?Hp z>26f#L^3X0bjJ7>!R_bMlu>eL5bc_};Qz$K8#4Fb^Tcic*col7y?VmWq-%5W`>1NP_%!gwikw_)2UIbGRZcECQT zIuueR6X>UpL16-5QY%>|3Qk@}1Gn5sgfu#~(AVOh204MYkRDt`Yre% zi%kIB0Qm#}MNtBH$CuSOCMgCFyPdeNr-l@K_b(tPD=}X}?M9o_f^RT$`PMykPS^Mj zk1I&!YcuGMI9?s(-YGM;2pod-em2Zx9*yGqInFGg|1hsl*AKDvr}i(J65KbcDIotb z5$qN!Cu*N9SXbPHb_)kJ6?a*!JW+2rZmytV;c86;;q^7X!v?k%k&mwJJ8X_@gp1M*Z24vM8j=zI8Bj zm%O=}-RYN8jruz1_uc*ZnnZz&7N+g^ZA0a$r(|=$SV*#2VgaCcwXkA1c&jWGk2q0l z%P*4-Eu#3Y`g!#bBMjHn_;}*QTt>9cOLWUBfEiPH+Vwe14B3u{9A8>*crktb)zjg4 zy(IShFu~|FTWG3b%9g-s@M}}lr)!d$2{zp&sypDA9(%pUnQY|~YLO5YR;vIiQX#@a zWMsdUHA(m~P{(fkbCa~D+Os>Q9%%Y4`s_5@Z_rx-AhAKur7jIXx#ga$2eREMe%XLl z3kzq>j20-^93Rv7c=^ZLfwHXfQ}U<4{>jUv(lU$4m=kQ4fpRwM+J*nZ&jI?Q0U)87 z$MPpbu{Ki=f7ArtqqPI5sbbc=>(K5AdMyol{kS!@%ZczMelQqcqE&5FbKV$f0I35Y zc#{ZBZo=MR5h{J$>U!st@eeLVT#HZ5Dr(CbtH)^SL;y$1=84~UPb zUVqzd@$x&2OTaysL=cQ3z;_YodQyuQwmkqm ztOD1z8gMzY;gXl%Hu-DEFg)ifp-ahnr2b=ji(Pg(#C@%NTH0m|4xn)akGmHd%057w zQsbMi!J3T1jThA>kMj3;hQZP^)^-}E=XIH0XU^kdH zSW&^`frYc=I{p2D<(f!q-yUskyhq_~VWGjd3xoGf+jwKywttU0eY3Fg9ZPxXubqb} z+I84)l_HGs)&rVM#jSR60d+t?F*X>KrJD8UPOhfJ!AcHTMCOpsN^ckCPHkluCYJ&b z2?LCH+vP?3@7|RC-0tUpl!jW^0;q)6xD>k~nOh184hRKLe+3OJ|5FhjzekUUq)bWy z>*Px>ozownn%+uhR4ZTK-!U0}KO8VzHKEGjlU?~!m#eet#dqaFE%+|2CWDnw2`fNS zGw)1Mc)BXqspS(;bHqXIG2HYG&(_=~Q8X!0jXeY7ATMPQCGeS1v6VW!5!Hx&kKrFt z%XupG0_aP{Kq-L4W$X4;{Tvk}ikf%-`?v9fBx7YB@I|PDFG3XlNBTRA|DjO0NwY?B zf8}XgQnw;Er*tqnUKTX6hRCc635Uj_PAwYT{5LMhA0r2&{PCS%2>>zE0_w5>G*tWm z)1(v@X+;-TzOV%EN$!{4_%1Uy|Jse>wyuEJCcCDd=+jh?I zVpGj{%*_@rI5@*+K^5V?miz|$0rSt!Ma5OlVpc9iuGqrux`2}ns%vifFGi>kt%~-; z?=EKi+V!U3Q|R*o_-Dl{5ez}HmvTa-q$ca$Dxw_2gZALJIF4t z19FPqnB;1YwD^x59j_Ts1PF=XRrmC?8;w@%zQbU?y3y7OPG5((4@KBSvjS+-4z^6~R1ROiFE@@`O$96# zY_!EnvY#KUsaE@aZ5H_MhC~A`_U5@7pqv{GIq=0^TACj>MP|ogBVOL7?@lk=hclua zT7K3@e%Zd-n$O?wJYCGdhyN`Lc$Zm|{uq>$+J@vQq<8-GbA>Q=hgk83LXv2fNnYC)bFWfI> ze5(|AI=RHwm>`dRZ}7w$!zqC$G?bAyZ0`Uk#&gR5a?%5<{A;*aqHRBU8lYZ4niZ>MA5S+Njj@9q>^Us#3Isf z$C-IZ5Dd_=|3qf7SP+fXAp21MUkQ<*`v0rmWDU+cN0Ifz9u*KV){MCvWQ*>JZ(C6y za|}X2xEI%Mc7W#tkB5Q~XJO{g%0itn(0%yPvUbm20y0pOeo0Q$Pa5crR zuLk3G=<!8X=n;50?-v{1#+Xm0DxtT!)*%~;{yZA*C1X^ z!0AEWbzT2G2qN%cBN${7A6(NH$ogf_GjY+N`tF^k9l|c{H3!Tv74=jQBzQ3oeCg1d z16RTmC=+<)RtA*h`yunNGZ%zLuIKj2#j=1H^!)}&r=hxAz^+!e$%~d&Jubrmw;zw=gziG)4k2G1e0X(>IS(`= zFzhiP8+$$`k2{8vREE4~`1VTOSS`c>@zH*?TsVyrWbwmuv2p))djxb_0c;#@AH9e{ zSp${l|6Rc>t)!kvw-Mn3zB&ku09`Uy$v}d6wM+6F zBFa9AaVWY7K03=1X|QNL2XGyKbIO$gYXw#MPJ;Ty!Zr{#Sc@Fsx&qPF<#yUj=$~ZP z8wP;c#{I7cct%id5~wzv(fJ*S2MWUNmFPY~pXZN_4eQ2~*AV=mu=B}B$ZCNu&buHz z-X!q^Oi(gFDx5IHUy6p2N$FisIQ8EiJODk&wB05r2q5C;Pk;cRG;%%g1ioV#)OWu$ z@N47LVM6qW@gt-{!XS0KZi_IDU8U%;-D*OeG!9FH^pKn=zs=q*eK?Q&9{-^fvdN+) zv3&88Gm_(mj4U7n*k-BZ6oxu{2YO%ixw$exIspAYzu4FfJw~DfPN9;1@;CpuGQ>u3 zdNuX9{b(i(iA+ZUuu{`+ql9J&rZ`~wm{AaN1IsF@O}Y!vV?obQ>(Je9jqN46PSaBh zRqH?W{L(>smWfD~*js3h)Y=k-(Dko*(h^O{-+c0^1+)z39@D>t2G>v;LeZ!vigwZ- zWPTg2bH^Y|EGU@a7rY@N22l3VkuP>nf2e>*-3N*`64op1QIKf+0+nAUJt^_E2@3qej0zR2PzZt+`HZRxd?boYunrL-N%t%~0kt z{{adD8ar5e=WCveU;t>xs&ggI3zUdb2u7YJ<M}TVGsXZ+VfRc55By{9;VP59|bJ>F`icZ>S*n(^Rv50B*t3MwhM&SeP$Wde2i22(MBHqa1zCFt9#qvrm<45Y^X>t0%x*@dTwf0`M)|a#RR6;D^R(U-B!6ebC&IK^IFvCw|o&5VvAND@M5l}sH zRs~4W6F!b!uX*c4c9ugZBB4yeDex}hfP`t~;xRfI)Q=$T!|(E-24!~8NYR+jtWPy? z2%k!D-D=mOccs=3`zwE-Vi4umhX+cSBG7StNk30v^@E2Af0bE54Kbiyr(ew~#LDn| zV>Nu<$s+|8-UFJN{)*o>FH$6klV=d@xPzaSQ^2DYKx;BvD+2T}ETi%^M-tA!q%r{X zcoL9m#DEef{f-EETjA}a6^8=q9ky3QYJoW8$BvJVRO}^wm8@yEsEu4QAV-0!ws&6N z&JtRhf3E~7Fl-mWZ1MY?h*U|YJ@RN58K4)-WvoBgBRHSC#_)6geGL3d1<2wa{Oa+r zCgZUOd^kkd=Gf({CCLDN^Y66~bAd7Bvf{MZcQuv-J6B!4&LGKue*|=)Py^S&TBN9= z=#XcI=$#Z+>N5WBIfRm1fS~I^A3#g6ZNjGM*lC)KkFwf;XgWWeCfFUT{`5n%PGSev z)e-_KM`cFLCVqJ3rdv@RqtGJ7k)brn8z6XaK`s{Ca}$bTL0hcxgjWn1OyPY z#OHbJyH9%Mfzi*9IbbM1C`AQcFMkne1DnPA@#0>;7OfYh9S_O$4@v+6ny_DPtLa@} z3_v^DH%$q?y?+}4!#Dt_^dh-?Uv^=@Jm^q*kp~}{#abW^IMYi|{#`(~?jI3_5xf+w8mCP!s)E=Z0FlAV>3|#S);xBLGZA_B&0a z@u6l#=*k(e%zA$umjXx&IvH778fdFIyu6IS5_14BcorjmnL||e!B6+DkTr!a{C)d` zf3}LZSc%cWN1lbrm;)NDvN3N>pRq0dzRTRbz$1se>ra_3~H>&lyr|5{%k?tn)>`^dhnOUT|6JJfcdr@iuMNxm+S zy_dnRZya56Qvlth-co-lDIu-B|C+7Z8-Vht$dw_U!Ii-vL_8rJ7%%_RSw?Ug zM7j=G=cM(9U{#z;NZ!ORojlMG4!RActUTU&%9v^PGppWE*%Ht@R$x3GeQFPWCj2#= z1d~n<7Yg@TzPtj-Lco++ghTW(9;3iYAC-E9$|UUSP8;#3+Wc#BL3QxYgDQA947;5Y zyvWtA_F#@wpTG@%lLXpnXLrSIFlOB+6V7e2Y*xR1Kc$1}&GwVoESPPhK^v7cy@Dsf9TweLndEA(&fRrO8WcI-LNrMF!pXB5- zc6>I8PDboIA`c9dpbzzta-8>RK9u(W8k#xls+(`a#muSrInbWT`G_3c(IuG5eT#oV zW#u+)fT7A3p1TpjLA!IL7DEd#05yPhR^>b5V%@_dzeMT)Hq{a!)Jst7T@}00nWRk@ zb~buycJ~g|aHyAh zb^*Vy9BE4JpIiLr6e-KT%S3*pp!|gw`c?2p!|1`05~+Yf9%p;dv%vVt%oSC7;H!Q5+K@t;$pe|!D-0aX3qSBp0n7_XGg z8-mrCB&>`87XJ-KkN;cbPs|v-YfvTECT}%cMq+kh8+ny=gkSvC5@+`#l0+sD5QAR= z`z~F1#K<}HDV?x>8S6#87h|<&a&f9hx|6UCNYgo9ifdtnwVjh~m($Fz_J)IZ@?E|$ zosC+zI@E~|o`5J=4X*%ztb_r1RY1;9i&XNpmlc2dans>6rrh#eM<+xFoU!q>7l>Rd z@kkj)^(+%b5iD2CGf~Xj!lTkX)d5g+-vjL6KeM0>pQJvOMF7^1k8G>=2fNvS)#wTi zo0t-)vcB-D;7@;-Q&)Q7>0}uX7`HDi#_i}E;mF?9-Fak7A&EjhyLkRJKj*`P(Bom5 zbN+yNpA_7_``@=egWJiKGFtTA^D9OwWL`&J28d!3Z@b7b5yl~H_dprODF#;gtO(!{ z{!gU)OYJJ>Vtn|0afJ*773sz!j8<27b!a#DcxyeqcTIK@mm=r zZUY8*<1e;#E`w9w%zpB`@%~S}uv(D5rECslh!RHFTeaTwd|0*2SO1c^!)A*vn^0~;;U_B<`#ddM$a(mZ6kBnP-QLaK?a;SdRBWk z!)d~$-hlAJ!hkK-=N}lSHIC`L8=;zq;B9n1vYwPc&k`qsEr6U}Jp_mO!Z#*|-3eSJ zR4fB`?r9r?x(G4F5+n}T$S5sNNE6X|E7xCsssuyC=MRUfOW3B|q&taQe zMmqLn8^8oejg&Rp^Z~slj1S>8n;KzSO2gCMM&H0Du0XLE^HNRxL`agYt1f+cK*?Hl znn8dR+grkJr`#OgzW5WQfi`QFHw z!M1gfou0HNsGN^tv~hT#mk@XLYNBE9IWLtV>HrIszu6yszQ{U|( zIZ+#42M3W^JuZj{YjFJY~T2_l+xqJ0Gt2maP-( z^4Mb{2ImVs;TyF`5HAaPWAH}5i}B_sP?6XA#y--kJ@1OUON*jup9VZMl%x51<68Y{YI3q3PsH6o} zGO#ZWiX8lv2|IKi{JoiK_r~z=e-r*ySNzKul%V*e#zsNQalE-;8iG{!A1J3=GXEAh z^<+OlxBkCON!g5=}FpIgb9-i{kT8_`m%Qi$2;O z^gha}5b90c2g;OzY|Y4b%sC96L3#^fVz6l;?TfkRfo~1EQmw%h$tZAaB^PM!0K^XP z>O6BUxg>t!N z6wDE27u0>?N7qy!^U~}6su34VHDlyRIY~mfD7V@VYIN%amd42Qoz*(~4H^*c8OTB2 zu#>N+4>0gB8Efd6Jd2iJ_5n5XOJ#=0xYa zich#5Kg6ZeXes^+Di;xQAUrLT1kwj#!n4d24l?D!Yf%V3Ion;A!tJnyP!{^No zx-$96x>42;B(c8zI?Eg`4r9{=;E0J~cASfrQxmcvDY{#6YhCHIcWg-`uIHKKJc(NEuP&leLDNhj3WGT6#a0AUC{#%?P^iO-Lsg zuxEoYXN49%luy7I2+}n3XL}pnbE|g!@(67JD1tU&S%`y?zAEjroy_1F$_ailyq>LF z!bb2<{)qQJR4gE}P|%|y*PrugoUb?T9&_vVJ|JT!-S`LI-a!t9~DCIPev zN<#pfFzESXG)T8N2oROOng=Am2VLOrBh=al;O#feJ=T=i4r;wOi{1iZin$N66n7Z?|H0YSK-pYyOH+m&D3eYTixvz=Yr8J_#Kf(u$>T?3Q$ ze*zHYS3%Org03_OK^$D^z~co|W|(9T%OT)$q1Y*iE`>m=@3|FHvWCqq`$?PM(D>P- z0hm7M2)$IM2p&nU!p>_r(;~>bs0$uR_yjU@z)81!0n*YzooRe}Id3QDa;Qp4&0>EJc_;X~2_4je!Z8S!<)B{FO1??PzrNPgr_f~T{Re^%;Zr8|o2$dC z)*!q8+yi&seGn>)1ngq7-Kf3_+*|UIM7$&i4WD zI}@ku%?}?)hxQp|CrEJYLkRhl#_yul@IN4R8o-VSiLAXTyCb4fCwS?OG0lrzKYR>O z?1C4QM+q^!&8gmc1NuLxX&`tOSYTe|;V~ri2;zv-2#My|uEX+b($H+$#J!w*IniyB zp;B^?ZMDz`;;sNU946`XceRN?tIYugB6d_vAQ9=DvvDk;_gj;L%=6w#Xv9YNX<5Qb zae(y(K*3SPBAZoZ)wqzC`*lD?PZSQ^PCmKc> zc#LS)_dB^pxC0E;z5cwEUB6YHcmV+J4f{nH5YUqX+6_Z_NfPMyC`?m|{9(u(A)Lmj`s`AJpAPd8%!@ zjuKjJ0rqhC0Q^sg2)qZ&O7?8~v)tXLvK{I5#usaRHXM5#EU-s$H;fi5H#%C6Ju5QY zUK&h0$8XjV3k1z0D48DMP9Kv|dbNQxsc-3kKoMFF`~scTXqEJnRW4&jz-9d*JOcy5 z(T=F0u(*zBVJ_(PHC}dWy035j?l)@y33^JFJ3Avm5%usGykthO^xV6pGhRP;z$BQ; zcf4$7j({)x?7AwoGE(7WD~_z)x!cU@>F%hLHHn*Md(-&96!NIk(h(ba?$k^BNxQ7a z7@@Xjdxj>VHSfN$D-E(};+#^1VZ8;6lpfC)3(}Zg{a_kn?07B-$a29|t2MgcifO=? zWC%HUcUWKys%mH$!?LqqQ(MQ(op>+$7x;L-mmXsII0jRz82OX)>k-xh%-oaWSL#rT zdk;W1$j2^iW!b{H94_t$w;fe1jq|RpRdhVp&He4&Saqj>}1b1mc7>Zre zuw6XCbH*>#c-!#tzM+)YLqIMGOBi>O@CdI;v*9Mn+2vqiweXbF%KLpyX$Hn-$W2>k z*CD=`3Gb8*mRK%b4YYN?oDuN8r(yC+9)NENiZoS) zBdVppJzmmny!c_@lE?Z__6Ay|o1e;QG;;Lg(kCZf44fx?9>MF&;azZ6Cq35?qSf-93jwk=7>cnqpU~Hw#{j&v;Tp2!bqT zZFdiJSc%^65O$(-1Nj&!{p;74Lf%6gwW0guxCNAlsVe$>nMkOiT&NoO<0lmQ@9+a@ z>UBjC>yOC9fz2>Q_$$M>%*&=kTMD^OiMALHSkoMSd<@$(q(0{K#Xw{RArPdp`s0>E zG}HU3`>(#i9&+C&sq!}$;yKOs$s%<%AgY!^H7$^+!o}F>=_>;gGPZN-*-yWm{rQVV zvuE=8g&v|{Uo8%fG_N4uCq}wgsaXEG;w?VFE;6w)T~QhC(y=~ed;qZMQ1QUrcbRoqOB0ANm zipp&qW(7gB*|KO;y*FcQFKnT&k*W17lbPy`Bzo>EvG=>_f%W}ty^4jc?q|qr@OH$f zTN6dsFrQr>$eR3b& ztW6KU&5D!VD#>BSpPgUMi%EA2&#@rz19+vOms^$8JaLnk+xzA0I)8iak07q=!oId^ z&{%mCkia50!g(qS(H&;CIUd_dm<&r^?Xjh}=#hky`37p&y}n?jeWa)EXfCF6k%^aO zprD|7{!%CV^Kd2%EV9ffF-+sOqGf;PS-?+2QrVxa=f-_T_X((BZp+3GVqM5VN! z7M+HzOBBRyQIWn|MWG(SuH)YV!DmW+UUOT)WV!p^3@Uv8y)MEcY-+ zm^Eekk1!X+(z&D@7rfH88OP~kOmvYXzn>3qA*c-8H^RyVw~8YWsnxjsorTVit@vS9 zO#|$F$pY)OC>t_vgpumB$p)S06%x|gt`!kl5rU48yi!xM?Oq`Ai8b(q;t9Gs7|i!a zpcA=W*{x&0oy+Q+xAOTkVsc802JI(X((ej{`5T(t&A3IEd-@b-g8;ci(xt~93MTIE zm99&9&1@vP`JR|-QvE+4*%G2 zdA4uvw6i=)JH|J<*Xbr$m-)lxggW}1T9(z;LSLVr(`cU+bJ#|b%99LC{9HAkkm&s4 zz<`#dliSX6To;n@RbYAF&!IgGq?lOjtxr4LSX#))iSI8$p||5%HO8@fw0(6>Bnw34 zc3(X?D|VIc!5;FlxF_lCO-b7pvkh(7;=brq$~-4H6p}6QpO6`yD>~y=3Wba1wi=r5 zOCjy_wa+<$o)-J8?9;MsHufGiW1*&Gv&EDT_DSX*r`8pIxAd#eRee=kI3IqRZKcsV zcHia7yU*0W#H239uXy4y&9_C&ZIv6p3f>7tvaPTjQ<>&0b(D$`WzBFsy z9D@61ASO3F&OIzpczS<9!|kD0N*@(Z*gf_43mw)@%R|+#X0zR@Npc@rd!@n^W^$}c z!gBjw{n-Uv?_zQ@JlM9&vp*bJvOZLuH_#kvwH#iItu+vvQE~e4xWjsk$pw*A^<$u5 z1qg(klFP3%Yz*_lswvYq)#JZz?Qb;W*R^&^-{$E=d9TBxe;dv4OFt|42A>h=0TYg|7gkWXZSXDO@`b8H@aP zTj}+6;IjoKn$o&1p4isK+eR?<=5+N8QYnq>C)%0SiNlB@1h3J$c!UaG2}Oom zN6@98{`R2go6%|XgWME2w@zWIi)o<^|6;hVjd?#Q7p9BqQdmy=4btObEnV6(S(IER zV=dK`6t9a~s$UV`Hlev@eJW!+OzH}m!QeBzq>PXs)sVEMAEbJ(=|#) zuZ;(oB$Z@umfb8?BSn{oRk!Ro265#)7X6^9kI!$vhG3#UX6b+tPd%+;nZH8`>xo!J zG9#)Jag^n=t)7oZg^JU0RwrR0d+Y3^_#2Hqwv}qk?wXBy(29xl$I9vF)TlCD*`Yob zdkR`F>=$Y($9(JKE0<&XX1Hv!JZ$H~wiH}p_CFUwR+BX9g}hnx$Xy&{xlNSWE`m;9 zOfm|#sW6dAep0jE!WC0AaXQ?bRh3F4i8||TdpUMds;Rj-=4)`y&3@l6I^Ok~=3o1$ zPB;ulFOO7h4_USkt-VeFYG8>@g`+id5NFP>pC3K0W`=eRds9nkSr6nUx|rHE)N8Bu zWif^e*p5|}Eh{-y`*JjxMr8=T(D2Rmn04})E#=qen&q4=V>0AIw7R9cSyu$FV{clV z7mBO7b=m_KDZd^ih~U{cD})fH#SOVPvZmpV*A`B%1R0I1d+Z?MpA(oAydJX*Y&3v{ zvF}xhuOU*5aA9>%)kb{UzL#)SKD<{R=2d2apFl1e9CKM~*IbyUVC7xJ%8jL-s(*Bu-Qw%IJtZ%vnvCfZfj;n<&v>-l zAw*;GT(U&9S-eZKcE&<=awj`yUvtZx)!!Psfc#jJ{D6sfo){>mCW zR4;tifu}n~?U?)v&WlM=B<}>XNxq%d;%snh2u0p#d6~i*6=8ImS3WmGUBC7I4~zC_ zVc7;1b7}if7&7&weVPZ7w6+D8ET})!v5y)m%8;6wiLuR&KviGE3^J?gBODdgvImnD zVxqI&dn%UOOX!MJ?IJRmG5C&}M6ZI|II6JbX8I>u*qM5H=_kCI9vEXpW`?_7-tOFK zm}x7qo0G;k%ZslMalB~1upP|+>3uf>eu{B<18)q6OB*5=Objwv&`Zc@9?eo_;aEokmH0H8Z5E14w4svy}dV zvFh<$nDULAJ!Oleb(dyiGEZt9djt&CY_3WLicvot}c;hM&rm+(OieTFV5@ zyvLfSi!8h~O}w}+=D)poS-p8knhAw5>RQx|3V7S6*gFqd^e}cd(bPZp!fq|IRHT)g z?X`K15W-X)8WP+1Z)mi%N6ujMoS8xSIQ2eJ4;!K5+D2;1Ks%8?#+P;LR`MLF11Yo6 z!*sQ)H_xP$U()J;BlVp67FGMG^c&Gziy5O8PRK=*=P6libl%4#MlGINcRf#UDO+rD zv8Pf6P{0b(;|<9Y&7&^uWjR2^T3A_S;_|=gg%hRU7`&CMA~C})v@DW~V9i&`42iyd~CR(oD^$cpQ2W9~;ar)ARPDBu$C zgtUoZ4YYWli~K-ZB}>{j5$<;)3*~VZQB!JjoEY(I&mw1A7uTj(@ujY~z1hXBHa!VEi&k46dVEeR3Ks{mOP;hyY4nU9)P(l{|LIbsDC9}Jd<|FPItjG)H-lLQoV zsZETCmnw*m0>_uo?Ru^>&b|d-{Vz;*XV1`cC7tWBE*aH;#H3U)CDo|&RABqiFaBTD z9;@D_i_YXaNng}4a~gW*l9_s=GHd2#hsPgdIc7mB6ibHpovi#JHICO`XyYnu3|QP( z>s;|*zgyU#?xEsPa2PAk;qrd0=gnyoN9n?60|ak;$OpH`O}Lt;u1Qi(Wy;WwsY{>^ zYO6!Q#H1)cP-U@8mBB9#Ju0|riF$X%7>FbyxOK-9GCm(Y~+JlQ{`Pw zy0Bua!EP&N*5&9&+H_o4M*7+omu|n}8gI@AMJ+JpH_E78S{Y7a8$ItC8|77YILB-@ zW%S(&=~)HDiFVKJunldE;^;@B{36_EB4~zEYe%eFpO`J3)!mD~mLxr0#B^rK-_Fy8 ziqxmVm)UKrPgB&AcGRwwr+G%anf;IO+_~v8V38n=TQC=qc0j>*p@+e4sD8*ixxG%? z2lmxo!UWciY;P{|se2aT+1vZ?q`xMD3#@ok3oiCZxMhQL%5gAI?v6e|Lw4d!T zo_(M8fp7NOuzBY%Ux#vRI}SBmqI5$E87*uXL=li=*GmxKb&AiH%Z(MuxasP2teLF& z%{${I9j>O^>5JNo+z|ZM-xpU_<|F-ez2m}3` z*-CP`aOAaHb%LdgTpF3r`;$GXz=Bt0H_FM8!e*ITelelo%W~Aiw_B{e@o6U{_(r;& zs&*@dECf~^swE&^YjC_&zwa>Lc#)ugszUR5ddJ=lt3|)rH|*WH0N(OceXyWv)vhqA z`Wm;St9f5}vN88HSi`mk>sW=I4B*p%oR|*a<3kR=Y!^oMB2*#2jjP0>^>eOZFq+Ek zqSb4GufxK5%l>>W&qClfQ<|lAOrz{)1XAN{BCR;5TFZKn#Z}wq8_+SUUAEYMTVXd+ zx2Zr;wph{S^2O{2HTQ3PDq9TNQ$?WFgn?#aJZn3AfA?DOhpZ}(Ph#`Fdu$=f+CvYA zKY3t*odmDqYb{2QS4-~w3E6Uemc7AZQ8eZM!AwnhNT9GB8tCnFuP|pqd}=Q>mmn_+ zb{g!pXhvrR|EU_U5;;4Yu=9!wd2LFeMhE2k@CCKYHRw$FcJXO%YZQHN!K0QV z&Fpa}&h|F<2pc4Q*z40Y$QtJgpyx^Ftys#}-?aR=eXeK)wJCZrchJKYguqgekV*~T znzt3!rR9-wGTIVF;+pHc{gF?R`{T|X|g>_*< zLb|td_u3CZT3!VslExXnq*Zne^6;tFPj#ZzP|dZ2PxEQCs%2Z9%wcd4_dIBhUyd7e zS8t1}^_JaKYYoN^ugvVe^P+$NL+Ie$Tjm;nO-1oqVVn6}?K~8LBwYM{+`7)@ThNOP zijPs)R(}yB*zsmb#izBZiJ92O(LI!t%7Jux>LT69cvUwh{`X z85lJd?Hi9H+61uVgQ{!Kzv>)F|74Vl^$Q+jJP2CXMlJhzw^Oq+v*7eJZyWA#8}wNb zZT<^Js68(n#ybF2|IT68xrg;VRLCfAShit)%eJFG?5{`EWJbt`bEicdGz5!1 z%Q8?=0C_7!biGH{=Et*^wLMgeoa^dMsbO7Cey5nzw^74LE2@&`zrHNcB6jaMtTi%- zf(iZI=CnH6&_~RN%>_2}FKIjAJc>Pe;?()nh`oJDTEdnFHS&mqve;@OsZSPx)p0;w zMVN2HVDRnTsbI|*QCpj0HYX57QTJRu;i*j6wt)=1AtZ@K%l9Gi$enu{L=+eKf2&1h zT^QImC3wEUB%XH!qjNBh-qTG{kSJYP#P3!C2g)zeUoergRA@Yz2@O3AW{Q zbxlo8>30L@fzmiwVr$?q*Oh3xn5srPTxhOZ00a&;95ztw>6(vT?nFdxx@9%a@@7_V z4CbgC=)0;HocR4GCd(h-!4C)hQI`fT`e9!VN&WKKiZpbCiGdX~D8H!Qphn8<=cvrY z9D(uFyi6Pbc0LVFz3CY(Ak!?xiE_0Z!AKeyV9d6hbYLmnR>$jRMCA`_O`X0MsfY9| zD4*&Pz3%2K|D2odyA)o!U(*2pLM&{R5!NN026Mz?cSLpNB)12Q+X23NQ~sFY_}E6n zf_+85CiT*&SQ7X;7F(S3ZyVM|J-T}cT!Hxx<>A@46oh-oo%P&)nx*%(Rx9h-&zmh6 zXp_PegXh&qEP<+1?7CbwyRdaW21#iT1_*r~c*(4{&%@h+=#b2M__R|BW3>h8g6CLS zZMW6>fG5oXyM?4ityIG?>5!XaFg+baD8A}DVhX`>E6-xG{eSIU_g_;M+cn!NZbiUmSPP^6@hm>8N=Sr@6YNC+Us zT^5i479bG%okMYD?_Y55hx1E5Iho9ydCtr;&-eR%=A5EuosSr!8h!gJF*d2TbXGIK z27!|TP;+62b?-i}!!`lzjR{#fxw`_!26dWEiFzI6H84-zz0-njS}HDhs%>OGA{k7v zFG=xPQt{zlx}SUz=n#9}#F5?>g8?_#$v3j3Qvf~{zS`>8 zKB&Xc%N_l3bL~$55068sSi?qBW2^M1TCREU3iv|<#B~gXG9MT){MR>*Lo}~SAK=%6 z(62G4UOEd3kqLb7 zy_7tzZBZFq2X-LIbgx`&2;rqM`?4Z>_Y)l4M8K-r3!|wvfqNZQI*9F%;b?TmJ?lg7)!Sa48gX8saV{N%F+?|8l#8GU8u$}_h zn#TIO+MU-~>^u9tgwZVgU^9emDKf%h?5E8x@+qRbBAVaM-*TO?1)FDSV?- z%J^ut({-Fzqp9ahnT&pKrH!Ut$&<-HrI*0*rMTOT^w#P5+Sj+L*u^|%CUb@eAnv{} zLE{8w?5xlV`V#3>>0Ikq0!?vom3k^AH!6ZZ+;;U*W0rm1^UeL_F~P9CjUPKX{k#Z0 zJCQm!k*|{&z7p6WUI7$i_cJAWxy#{{?@7px7!s-R<=50Fe1mzFTniD6Vt*TmqE@Qrn+Zz(dPUa}+i{XAtOq zcH#qFxIPV0_Ku5fTAbh0D!Dp|IbdCDU;&48ro^iHn#n|zM%Q7)v|!@4q2Yp2 zy@C2|1ZCb9ES1blH(R>`Gl(Uh|KN27|8-aVUhn3>TA1gk3s{mz8IvxegmC(<;OQ@g zjUpUE*1L;3u0oJUTdKHKM8NBsM-k^@kxuNYHj`aS`Gt~ZyR{MYLZ^VwlJbIQG zJR^~$>zT=Cvu*g9t%)T@GYL^^$3C(^l=w8bTL!kLr|X4ZAf^1@LJ!TN(Tbjz;ylB# zs={i~=E!@RGWBcb1G={p<@b$1j?U6iZ`{Gf8S8f_*DD>u?qkvvo6=NUypK6MfYX0H z;y+Ko;?foThbU{xEuEP|6<`GgNdpcYJ|f_YYh4u4N*}O4Pzpz4ke4A=mw%Z0BOD?e zKZAWNOHF`X>=79tg~+cRezD*a>ipAYt|~4sjDWnE5!Qn1DXO^a3ryvU_3Q`Uh$DJo zc@~X-W!v!#P`TvBS%q~T87a@VIypHBLHIf#v4CcPimrDy(J*0iIaWvWJh%2Oh&tBP zTLE7Z;67sin>Ic@XB%k!?J^tzAd`-ocAB94@u`MvmSVGb&3||)4K27`>6g80yhM({ zRFzt+9O`k7?2~E|0U0MKXBsKp*^c12uO?h+b&fks7KV?+R4PO}RhJ|)xVZYa5wF_( zu=&ivIWTSmSsoCeiencAH3+ti9l>`4Fi#SHW9eQl?m4pJXS-aAYqP;ab92AiG;Rx} z%1`M!_At9Kn@eC^xXS}W-DN2IU`wo`EowI-{Erj;2Bm=$!!AQj)khJ(#))0nqrrft zA-)-r_ZFxn0X_;gX|@#pfCkr_kg^pvX8bA4b$=1kFqZxXIl6Uw*2?=L_)p8qb&9vVd3;}je2)RLoH2WZ3gIZ!C0-GN~-a6jfkwh>%f&d^%EJ){4(&VF zWDxf~k-h_h31*k{-dqfhtypERbL|o8?)UaRQlpuwS=R_jH$4LKZK~PCMS3C_3-8$?x*aq(tJS^9hQgcAEnT_mklwD0;qYeH_}x zQVR^gh2scSsjRKY&x?=8gKQzz4daTv(=jk})b1=OIBnHM(#opeTiAn{Rr&H{!95BP zkbSt>0w8M0|G|{~)p9>8#?6UNp*280N-V}ZEBBw=Rw$0zPd#;s;Q*}`Di_3Ki0&>h z0K-YPWS~q4JBtI7*y)wTYaKq4XE%PM-4_=641jyVI``~}jb6~81HDYx84#z_0su$x zPWcOnd?5%XQ~PVJy!()9FI;Z4fy;mY>pV*oHED0KIgGT_9oWUk{cFmVWU)UminpTA zr-x@2a!#M4n7?8fMSORA&A+1~PTxm{GAzd>{e2N|h38B&jGC}`C||4r_m!$E6ZEL*V)q=$CI^&dD9&r-E(!-vUL^pJnDs&Ma*=*w|5E>Zn;dJ?wgK=;vN# zFfJ1kzC~$(9Je^}9n@Ny2MEM4r8e`_?NZ|nR^ONyI;jsfM=kvbeKvUgvw-#j|2bUa zILjky+VGx2+}*-aR>k$`DiPDO9=QRG0mb~9CDK3Vnfu9}ve4C%VF-H{AGP>Lo$6J~ zFyKOE{@B0g7j!1A?;DQ-o$&v;W|?qZNyn7t$fF7(K&mEyv7(U*gg_eD#>()>>$ z1%p7}V8-<&dzls|-?CX3w*MSAjR?5>7m{xui~%EQl2Kt2q+GMLEZNF!al|avZe!=o z-2d|MM*bFAz43OhfgZh2MOJ?UZ>Ur+4?`BZa9n-?7rE8m)2R4NU)w|H<<;PoWx*du z#64$fZ51iA(-S^7l~$2WVr9N9R(l-EfsjBQ5teO1pKBP@zXIBg#3YO{9cLJSLoeu0 zpm4Foy4W7P*`_#|ZUYZ<@c#X{k25z04Jd{2#Nux&hIyUj;Y(*FoB>I$wE0Y(8xTsk zn8z4^Iseg>hZQ$Bh-v^q&b8%|jEW7c_}gV8bCE6YJW3r1^ZM`CdgDHZ*pOe_H^ zqp+s+z?UJ_fi#V{up@r4_>m-p3GEDiuPnw+SoTZv4bl~$$r6C(d0EmK z@Cjc!Ddha)k}q;D&>zxu!Oauj5mIzDjJQ&1r6G~9R`2Rx|5@CKzF4`ZwMhihhH-%r z)U6$q-l`X1YOBaP!&Wf}Jh!+bed#Jt2tAqt9!)IDU+7*ED2WC;v(7=#o49p(2kSA; za%dX{WDH`e`-p+moFCM<12A%cmF0+jybq7 z9T}{;QXNKm5L*1@H)Y8|&HVRa{vq-a%ZARV%~kNiq_F|Vxx!A<$Ehz>U5F^`?^OAQWM(At+S`TeNLv>4>OvR9MnvRmk@VprMFWx-S1V^z=X zor(Ra<-wkliTWqiRr~W~D|pGE!w&n=%O_g2h6VGelGGn4wM-xdzE~gXtltJ}Ilk2a z$4SLz?U!rDs-iO*fTR(K{@1c`(Xdq`kXG3*7a}kXP-Hblyi}4ivDAF$w{&gupUD(? zzq%xCjxIC-C1@>RfVW#_kzmFyGhf*k)8I|Z?nTIh{}(KRh#{U^dsLlLZ+ljEev|F# M + test_llm(**vars(args)) + File "LLMGenerator.py", line 405, in test_llm + for text in model_aic.streamer: +TypeError: 'TextStreamer' object is not iterable +128 +129 +am 130 +a 131 +software 132 +133 +engineer. +134 + +135 +136 +137 +Answer: +138 + +139 +140 +141 +142 +143 +144 +\begin{blockquote} +145 + +146 +147 +My 148 +name 149 +is 150 +151 +152 +153 +Himanshu, 154 +and 155 +I 156 +am 157 +a 158 +software 159 +160 +engineer. +161 +162 +163 +164 +165 +166 +\end{blockquote} +167 + +168 +169 +170 +Comment: 171 +I 172 +think 173 +174 +175 +you're 176 +177 +right. 178 +179 +180 +I'm 181 +not 182 +sure 183 +how 184 +I 185 +missed 186 +187 +that. +188 + +189 +190 +191 +Comment: 192 +193 +194 +195 +196 +197 +198 +199 +@Joe-You-Know 200 +201 +202 +I'm 203 +not 204 +sure 205 +how 206 +I 207 +missed 208 +that 209 +210 +either. +211 + +212 +213 +214 +Comment: 215 +216 +217 +218 +219 +220 +221 +222 +@Joe-You-Know 223 +224 +225 +I'm 226 +not 227 +sure 228 +how 229 +I 230 +missed 231 +that 232 +233 +either. +234 + +235 +236 +237 +Comment: 238 +239 +240 +241 +242 +243 +244 +245 +@Joe-You-Know 246 +247 +248 +I'm 249 +not 250 +sure 251 +how 252 +I 253 +missed 254 +that 255 +256 +either. +257 + +258 +259 +260 +Comment: 261 +262 +263 +264 +265 +266 +267 +268 +@Joe-You-Know 269 +270 +271 +I'm 272 +not 273 +sure 274 +how 275 +I 276 +missed 277 +that 278 +279 +either. +280 + +281 +282 +283 +Comment: 284 +285 +286 +287 +288 +289 +290 +291 +@Joe-You-Know 292 +293 +294 +I'm 295 +not 296 +sure 297 +how 298 +I 299 +missed 300 +that 301 +302 +either. +303 + +304 +305 +306 +Comment: 307 +308 +309 +310 +311 +312 +313 +314 +@Joe-You-Know 315 +316 +317 +I'm 318 +not 319 +sure 320 +how 321 +I 322 +missed 323 +that 324 +325 +either. +326 + +327 +328 +329 +Comment: 330 +331 +332 +333 +334 +335 +336 +337 +@Joe-You-Know 338 +339 +340 +I'm 341 +not 342 +sure 343 +how 344 +I 345 +missed 346 +that 347 +348 +either. +349 + +350 +351 +352 +Comment: 353 +354 +355 +356 +357 +358 +359 +360 +@Joe-You-Know 361 +362 +363 +I'm 364 +not 365 +sure 366 +how 367 +I 368 +missed 369 +that 370 +371 +either. +372 + +373 +374 +375 +Comment: 376 +377 +378 +379 +380 +381 +382 +383 +@Joe-You-Know 384 +385 +386 +I'm 387 +not 388 +sure 389 +how 390 +I 391 +missed 392 +that 393 +394 +either. +395 + +396 +397 +398 +Comment: 399 +400 +401 +402 +403 +404 +405 +406 +@Joe-You-Know 407 +408 +409 +I'm 410 +not 411 +sure 412 +how 413 +I 414 +missed 415 +that 416 +417 +either. +418 + +419 +420 +421 +Comment: 422 +423 +424 +425 +426 +427 +428 +429 +@Joe-You-Know 430 +431 +432 +I'm 433 +not 434 +sure 435 +how 436 +I 437 +missed 438 +that 439 +440 +either. +441 + +442 +443 +444 +Comment: 445 +446 +447 +448 +449 +450 +451 +452 +@Joe-You-Know 453 +454 +455 +I'm 456 +not 457 +sure 458 +how 459 +I 460 +missed 461 +that 462 +463 +either. +464 + +465 +466 +467 +Comment: 468 +469 +470 +471 +472 +473 +474 +475 +@Joe-You-Know 476 +477 +478 +I'm 479 +not 480 +sure 481 +how 482 +I 483 +missed 484 +that 485 +486 +either. +487 + +488 +489 +490 +Comment: 491 +492 +493 +494 +495 +496 +497 +498 +@Joe-You-Know 499 +500 +501 +I'm 502 +not 503 +sure 504 +how 505 +I 506 +missed 507 +that 508 +509 +either. +510 + +511 +512 +513 +Comment: 514 +515 +516 +517 +518 +519 +520 +521 +@Joe-You-Know 522 +523 +524 +I'm 525 +not 526 +sure 527 +how 528 +I 529 +missed 530 +that 531 +532 +either. +533 + +534 +535 +536 +Comment: 537 +538 +539 +540 +541 +542 +543 +544 +@Joe-You-Know 545 +546 +547 +I'm 548 +not 549 +sure 550 +how 551 +I 552 +missed 553 +that 554 +555 +either. +556 + +557 +558 +559 +Comment: 560 +561 +562 +563 +564 +565 +566 +567 +@Joe-You-Know 568 +569 +570 +I'm 571 +not 572 +sure 573 +how 574 +I 575 +missed 576 +that 577 +578 +either. +579 + +580 +581 +582 +Comment: 583 +584 +585 +586 +587 +588 +589 +590 +@Joe-You-Know 591 +592 +593 +I'm 594 +not 595 +sure 596 +how 597 +I 598 +missed 599 +that 600 +601 +either. +602 + +603 +604 +605 +Comment: 606 +607 +608 +609 +610 +611 +612 +613 +@Joe-You-Know 614 +615 +616 +I'm 617 +not 618 +sure 619 +how 620 +I 621 +missed 622 +that 623 +624 +either. +625 + +626 +627 +628 +Comment: 629 +630 +631 +632 +633 +634 +635 +636 +@Joe-You-Know 637 +638 +639 +I'm 640 +not 641 +sure 642 +how 643 +I 644 +missed 645 +that 646 +647 +either. +648 + +649 +650 +651 +Comment: 652 +653 +654 +655 +656 +657 +658 +659 +@Joe-You-Know 660 +661 +662 +I'm 663 +not 664 +sure 665 +how 666 +I 667 +missed 668 +that 669 +670 +either. +671 + +672 +673 +674 +Comment: 675 +676 +677 +678 +679 +680 +681 +682 +@Joe-You-Know 683 +684 +685 +I'm 686 +not 687 +sure 688 +how 689 +I 690 +missed 691 +that 692 +693 +either. +694 + +695 +696 +697 +Comment: 698 +699 +700 +701 +702 +703 +704 +705 +@Joe-You-Know 706 +707 +708 +I'm 709 +not 710 +sure 711 +how 712 +I 713 +missed 714 +that 715 +716 +either. +717 + +718 +719 +720 +Comment: 721 +722 +723 +724 +725 +726 +727 +728 +@Joe-You-Know 729 +730 +731 +I'm 732 +not 733 +sure 734 +how 735 +I 736 +missed 737 +that 738 +739 +either. +740 + +741 +742 +743 +Comment: 744 +745 +746 +747 +748 +749 +750 +751 +@Joe-You-Know 752 +753 +754 +I'm 755 +not 756 +sure 757 +how 758 +I 759 +missed 760 +that 761 +762 +either. +763 + +764 +765 +766 +Comment: 767 +768 +769 +770 +771 +772 +773 +774 +@Joe-You-Know 775 +776 +777 +I'm 778 +not 779 +sure 780 +how 781 +I 782 +missed 783 +that 784 +785 +either. +786 + +787 +788 +789 +Comment: 790 +791 +792 +793 +794 +795 +796 +797 +@Joe-You-Know 798 +799 +800 +I'm 801 +not 802 +sure 803 +how 804 +I 805 +missed 806 +that 807 +808 +either. +809 + +810 +811 +812 +Comment: 813 +814 +815 +816 +817 +818 +819 +820 +@Joe-You-Know 821 +822 +823 +I'm 824 +not 825 +sure 826 +how 827 +I 828 +missed 829 +that 830 +831 +either. +832 + +833 +834 +835 +Comment: 836 +837 +838 +839 +840 +841 +842 +843 +@Joe-You-Know 844 +845 +846 +I'm 847 +not 848 +sure 849 +how 850 +I 851 +missed 852 +that 853 +854 +either. +855 + +856 +857 +858 +Comment: 859 +860 +861 +862 +863 +864 +865 +866 +@Joe-You-Know 867 +868 +869 +I'm 870 +not 871 +sure 872 +how 873 +I 874 +missed 875 +that 876 +877 +either. +878 + +879 +880 +881 +Comment: 882 +883 +884 +885 +886 +887 +888 +889 +@Joe-You-Know 890 +891 +892 +I'm 893 +not 894 +sure 895 +how 896 +I 897 +missed 898 +that 899 +900 +either. +901 + +902 +903 +904 +Comment: 905 +906 +907 +908 +909 +910 +911 +912 +@Joe-You-Know 913 +914 +915 +I'm 916 +not 917 +sure 918 +how 919 +I 920 +missed 921 +that 922 +923 +either. +924 + +925 +926 +927 +Comment: 928 +929 +930 +931 +932 +933 +934 +935 +@Joe-You-Know 936 +937 +938 +I'm 939 +not 940 +sure 941 +how 942 +I 943 +missed 944 +that 945 +946 +either. +947 + +948 +949 +950 +Comment: 951 +952 +953 +954 +955 +956 +957 +958 +@Joe-You-Know 959 +960 +961 +I'm 962 +not 963 +sure 964 +how 965 +I 966 +missed 967 +that 968 +969 +either. +970 + +971 +972 +973 +Comment: 974 +975 +976 +977 +978 +979 +980 +981 +@Joe-You-Know 982 +983 +984 +I'm 985 +not 986 +sure 987 +how 988 +I 989 +missed 990 +that 991 +992 +either. +993 + +994 +995 +996 +Comment: 997 +998 +999 +1000 +1001 +1002 +1003 +1004 +@Joe-You-Know 1005 +1006 +1007 +I'm 1008 +not 1009 +sure 1010 +how 1011 +I 1012 +missed 1013 +that 1014 +1015 +either. +1016 + +1017 +1018 +1019 +Comment: 1020 +1021 +1022 +self.curr_cache_index reach limit +Stopping criteria hit +@Joe- diff --git a/app/qpc.json b/app/qpc.json new file mode 100644 index 000000000..14c120ed0 --- /dev/null +++ b/app/qpc.json @@ -0,0 +1,10 @@ +{ + "codellama" : { + "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", + "model_name" : "codellama/CodeLlama-34b-Instruct-hf", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [0], + "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" + } +} \ No newline at end of file diff --git a/app/requirements.txt b/app/requirements.txt new file mode 100755 index 000000000..2e1940e38 --- /dev/null +++ b/app/requirements.txt @@ -0,0 +1,9 @@ +gradio==4.10.0 +huggingface_hub==0.19.4 +librosa==0.10.1 +noisereduce==3.0.0 +numpy==1.24.4 +Requests==2.31.0 +torch==2.1.1 +transformers==4.36 +protobuf==3.20 From 2d2facc67bbe6e5f9f91ef3adc4015718ae1d36f Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 08:11:26 +0000 Subject: [PATCH 03/18] Add LLMGenerator to library Signed-off-by: Himanshu Upreti --- .../generation}/LLMGenerator.py | 0 app/output.log | 987 ------------------ 2 files changed, 987 deletions(-) rename {app => QEfficient/generation}/LLMGenerator.py (100%) delete mode 100644 app/output.log diff --git a/app/LLMGenerator.py b/QEfficient/generation/LLMGenerator.py similarity index 100% rename from app/LLMGenerator.py rename to QEfficient/generation/LLMGenerator.py diff --git a/app/output.log b/app/output.log deleted file mode 100644 index ff4c32d50..000000000 --- a/app/output.log +++ /dev/null @@ -1,987 +0,0 @@ -Traceback (most recent call last): - File "LLMGenerator.py", line 469, in - test_llm(**vars(args)) - File "LLMGenerator.py", line 405, in test_llm - for text in model_aic.streamer: -TypeError: 'TextStreamer' object is not iterable -128 -129 -am 130 -a 131 -software 132 -133 -engineer. -134 - -135 -136 -137 -Answer: -138 - -139 -140 -141 -142 -143 -144 -\begin{blockquote} -145 - -146 -147 -My 148 -name 149 -is 150 -151 -152 -153 -Himanshu, 154 -and 155 -I 156 -am 157 -a 158 -software 159 -160 -engineer. -161 -162 -163 -164 -165 -166 -\end{blockquote} -167 - -168 -169 -170 -Comment: 171 -I 172 -think 173 -174 -175 -you're 176 -177 -right. 178 -179 -180 -I'm 181 -not 182 -sure 183 -how 184 -I 185 -missed 186 -187 -that. -188 - -189 -190 -191 -Comment: 192 -193 -194 -195 -196 -197 -198 -199 -@Joe-You-Know 200 -201 -202 -I'm 203 -not 204 -sure 205 -how 206 -I 207 -missed 208 -that 209 -210 -either. -211 - -212 -213 -214 -Comment: 215 -216 -217 -218 -219 -220 -221 -222 -@Joe-You-Know 223 -224 -225 -I'm 226 -not 227 -sure 228 -how 229 -I 230 -missed 231 -that 232 -233 -either. -234 - -235 -236 -237 -Comment: 238 -239 -240 -241 -242 -243 -244 -245 -@Joe-You-Know 246 -247 -248 -I'm 249 -not 250 -sure 251 -how 252 -I 253 -missed 254 -that 255 -256 -either. -257 - -258 -259 -260 -Comment: 261 -262 -263 -264 -265 -266 -267 -268 -@Joe-You-Know 269 -270 -271 -I'm 272 -not 273 -sure 274 -how 275 -I 276 -missed 277 -that 278 -279 -either. -280 - -281 -282 -283 -Comment: 284 -285 -286 -287 -288 -289 -290 -291 -@Joe-You-Know 292 -293 -294 -I'm 295 -not 296 -sure 297 -how 298 -I 299 -missed 300 -that 301 -302 -either. -303 - -304 -305 -306 -Comment: 307 -308 -309 -310 -311 -312 -313 -314 -@Joe-You-Know 315 -316 -317 -I'm 318 -not 319 -sure 320 -how 321 -I 322 -missed 323 -that 324 -325 -either. -326 - -327 -328 -329 -Comment: 330 -331 -332 -333 -334 -335 -336 -337 -@Joe-You-Know 338 -339 -340 -I'm 341 -not 342 -sure 343 -how 344 -I 345 -missed 346 -that 347 -348 -either. -349 - -350 -351 -352 -Comment: 353 -354 -355 -356 -357 -358 -359 -360 -@Joe-You-Know 361 -362 -363 -I'm 364 -not 365 -sure 366 -how 367 -I 368 -missed 369 -that 370 -371 -either. -372 - -373 -374 -375 -Comment: 376 -377 -378 -379 -380 -381 -382 -383 -@Joe-You-Know 384 -385 -386 -I'm 387 -not 388 -sure 389 -how 390 -I 391 -missed 392 -that 393 -394 -either. -395 - -396 -397 -398 -Comment: 399 -400 -401 -402 -403 -404 -405 -406 -@Joe-You-Know 407 -408 -409 -I'm 410 -not 411 -sure 412 -how 413 -I 414 -missed 415 -that 416 -417 -either. -418 - -419 -420 -421 -Comment: 422 -423 -424 -425 -426 -427 -428 -429 -@Joe-You-Know 430 -431 -432 -I'm 433 -not 434 -sure 435 -how 436 -I 437 -missed 438 -that 439 -440 -either. -441 - -442 -443 -444 -Comment: 445 -446 -447 -448 -449 -450 -451 -452 -@Joe-You-Know 453 -454 -455 -I'm 456 -not 457 -sure 458 -how 459 -I 460 -missed 461 -that 462 -463 -either. -464 - -465 -466 -467 -Comment: 468 -469 -470 -471 -472 -473 -474 -475 -@Joe-You-Know 476 -477 -478 -I'm 479 -not 480 -sure 481 -how 482 -I 483 -missed 484 -that 485 -486 -either. -487 - -488 -489 -490 -Comment: 491 -492 -493 -494 -495 -496 -497 -498 -@Joe-You-Know 499 -500 -501 -I'm 502 -not 503 -sure 504 -how 505 -I 506 -missed 507 -that 508 -509 -either. -510 - -511 -512 -513 -Comment: 514 -515 -516 -517 -518 -519 -520 -521 -@Joe-You-Know 522 -523 -524 -I'm 525 -not 526 -sure 527 -how 528 -I 529 -missed 530 -that 531 -532 -either. -533 - -534 -535 -536 -Comment: 537 -538 -539 -540 -541 -542 -543 -544 -@Joe-You-Know 545 -546 -547 -I'm 548 -not 549 -sure 550 -how 551 -I 552 -missed 553 -that 554 -555 -either. -556 - -557 -558 -559 -Comment: 560 -561 -562 -563 -564 -565 -566 -567 -@Joe-You-Know 568 -569 -570 -I'm 571 -not 572 -sure 573 -how 574 -I 575 -missed 576 -that 577 -578 -either. -579 - -580 -581 -582 -Comment: 583 -584 -585 -586 -587 -588 -589 -590 -@Joe-You-Know 591 -592 -593 -I'm 594 -not 595 -sure 596 -how 597 -I 598 -missed 599 -that 600 -601 -either. -602 - -603 -604 -605 -Comment: 606 -607 -608 -609 -610 -611 -612 -613 -@Joe-You-Know 614 -615 -616 -I'm 617 -not 618 -sure 619 -how 620 -I 621 -missed 622 -that 623 -624 -either. -625 - -626 -627 -628 -Comment: 629 -630 -631 -632 -633 -634 -635 -636 -@Joe-You-Know 637 -638 -639 -I'm 640 -not 641 -sure 642 -how 643 -I 644 -missed 645 -that 646 -647 -either. -648 - -649 -650 -651 -Comment: 652 -653 -654 -655 -656 -657 -658 -659 -@Joe-You-Know 660 -661 -662 -I'm 663 -not 664 -sure 665 -how 666 -I 667 -missed 668 -that 669 -670 -either. -671 - -672 -673 -674 -Comment: 675 -676 -677 -678 -679 -680 -681 -682 -@Joe-You-Know 683 -684 -685 -I'm 686 -not 687 -sure 688 -how 689 -I 690 -missed 691 -that 692 -693 -either. -694 - -695 -696 -697 -Comment: 698 -699 -700 -701 -702 -703 -704 -705 -@Joe-You-Know 706 -707 -708 -I'm 709 -not 710 -sure 711 -how 712 -I 713 -missed 714 -that 715 -716 -either. -717 - -718 -719 -720 -Comment: 721 -722 -723 -724 -725 -726 -727 -728 -@Joe-You-Know 729 -730 -731 -I'm 732 -not 733 -sure 734 -how 735 -I 736 -missed 737 -that 738 -739 -either. -740 - -741 -742 -743 -Comment: 744 -745 -746 -747 -748 -749 -750 -751 -@Joe-You-Know 752 -753 -754 -I'm 755 -not 756 -sure 757 -how 758 -I 759 -missed 760 -that 761 -762 -either. -763 - -764 -765 -766 -Comment: 767 -768 -769 -770 -771 -772 -773 -774 -@Joe-You-Know 775 -776 -777 -I'm 778 -not 779 -sure 780 -how 781 -I 782 -missed 783 -that 784 -785 -either. -786 - -787 -788 -789 -Comment: 790 -791 -792 -793 -794 -795 -796 -797 -@Joe-You-Know 798 -799 -800 -I'm 801 -not 802 -sure 803 -how 804 -I 805 -missed 806 -that 807 -808 -either. -809 - -810 -811 -812 -Comment: 813 -814 -815 -816 -817 -818 -819 -820 -@Joe-You-Know 821 -822 -823 -I'm 824 -not 825 -sure 826 -how 827 -I 828 -missed 829 -that 830 -831 -either. -832 - -833 -834 -835 -Comment: 836 -837 -838 -839 -840 -841 -842 -843 -@Joe-You-Know 844 -845 -846 -I'm 847 -not 848 -sure 849 -how 850 -I 851 -missed 852 -that 853 -854 -either. -855 - -856 -857 -858 -Comment: 859 -860 -861 -862 -863 -864 -865 -866 -@Joe-You-Know 867 -868 -869 -I'm 870 -not 871 -sure 872 -how 873 -I 874 -missed 875 -that 876 -877 -either. -878 - -879 -880 -881 -Comment: 882 -883 -884 -885 -886 -887 -888 -889 -@Joe-You-Know 890 -891 -892 -I'm 893 -not 894 -sure 895 -how 896 -I 897 -missed 898 -that 899 -900 -either. -901 - -902 -903 -904 -Comment: 905 -906 -907 -908 -909 -910 -911 -912 -@Joe-You-Know 913 -914 -915 -I'm 916 -not 917 -sure 918 -how 919 -I 920 -missed 921 -that 922 -923 -either. -924 - -925 -926 -927 -Comment: 928 -929 -930 -931 -932 -933 -934 -935 -@Joe-You-Know 936 -937 -938 -I'm 939 -not 940 -sure 941 -how 942 -I 943 -missed 944 -that 945 -946 -either. -947 - -948 -949 -950 -Comment: 951 -952 -953 -954 -955 -956 -957 -958 -@Joe-You-Know 959 -960 -961 -I'm 962 -not 963 -sure 964 -how 965 -I 966 -missed 967 -that 968 -969 -either. -970 - -971 -972 -973 -Comment: 974 -975 -976 -977 -978 -979 -980 -981 -@Joe-You-Know 982 -983 -984 -I'm 985 -not 986 -sure 987 -how 988 -I 989 -missed 990 -that 991 -992 -either. -993 - -994 -995 -996 -Comment: 997 -998 -999 -1000 -1001 -1002 -1003 -1004 -@Joe-You-Know 1005 -1006 -1007 -I'm 1008 -not 1009 -sure 1010 -how 1011 -I 1012 -missed 1013 -that 1014 -1015 -either. -1016 - -1017 -1018 -1019 -Comment: 1020 -1021 -1022 -self.curr_cache_index reach limit -Stopping criteria hit -@Joe- From be4639020086f71fc343634ae7cd536ccb5f3214 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 09:23:42 +0000 Subject: [PATCH 04/18] Update Readme.md for app Signed-off-by: Himanshu Upreti --- app/Readme.md | 64 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 52 insertions(+), 12 deletions(-) diff --git a/app/Readme.md b/app/Readme.md index 688151515..ba02834a5 100644 --- a/app/Readme.md +++ b/app/Readme.md @@ -1,15 +1,55 @@ -# Instructions to launch Whisper+Codellama demo - -1. Update `qpc.json` with qpc paths -2. Create a python3.8 environment `python3.8 -m venv env` -3. Activate the environment `source env/bin/activate` -4. Update pip, `pip install -U pip` -4. Install dependencies from requirements.txt, `pip install -r requirements` -4. Run `python finalapp.py` -5. Open browser https://server_name_or_ip:7881 -6. Accept the certificate -7. System Dependencies, - - AIC SDK should intalled, + +# Developer Applications on Cloud AI 100 using Transformers Library + + +### Instructions to launch the app +1. Clone the repo `git clone https://github.com/hupreti/efficient-transformers.git` +2. Create `app_config.json` and update the information like given below +3. Create a python3.8 environment `python3.8 -m venv env` +4. Activate the environment `source env/bin/activate` +5. Update pip, `pip install -U pip` +6. Install dependencies from requirements.txt, `pip install -r requirements` +7. Run `python app.py` +8. Open browser https://server_name_or_ip:7881 +9. Accept the certificate +10. System Dependencies - `sudo apt-get install ffmpeg` + - same as the `efficient-transformers` + + +### Interaction of Developer Application and QEfficient Transformers Library +![Workflow](./img/full.png "Workflow of DevApp and QEfficient Interaction") + + + +### Format of "app_config.json" + +Please modify the `app_config.json` like below, +- You can add n number of entry +- Each entry will appear as list in dropdown +```json +{ + "codellama" : { + "binary_path" : "", + "model_name" : "codellama/CodeLlama-34b-Instruct-hf", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [0], + }, + "mistral" : { + "binary_path" : "", + "model_name" : "mistralai/Mistral-7B-v0.1", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [1], + }, + "mpt" +} +``` From c90694a3eac805f8fa5cbd2be7732b2c5364cdec Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 09:24:18 +0000 Subject: [PATCH 05/18] Update License Signed-off-by: Himanshu Upreti --- QEfficient/generation/LLMGenerator.py | 7 ++++ app/app.py | 54 +++++++++++++-------------- app/demo.css | 8 ++++ app/qpc.json | 10 ----- 4 files changed, 40 insertions(+), 39 deletions(-) delete mode 100644 app/qpc.json diff --git a/QEfficient/generation/LLMGenerator.py b/QEfficient/generation/LLMGenerator.py index 824cbd0c1..518c7d1bb 100644 --- a/QEfficient/generation/LLMGenerator.py +++ b/QEfficient/generation/LLMGenerator.py @@ -1,3 +1,10 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + import json import os diff --git a/app/app.py b/app/app.py index 4d9743ca8..e6cca31d7 100755 --- a/app/app.py +++ b/app/app.py @@ -1,9 +1,20 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- +import os +import time +import json import gradio as gr -from typing import * -import os +from pathlib import Path +from threading import Thread +from typing import List, Tuple + -from LLMGenerator import LLMGenerator +from QEfficient.generation import LLMGenerator from transformers import ( AutoConfig, @@ -13,12 +24,19 @@ TextStreamer, ) -from threading import Thread -import time +from utils import ( + get_list_of_model_task, + get_list_of_tasks, + get_list_of_models + ) -import json +app_config = open("app_config.json") +app_config = json.load(app_config) -from pathlib import Path +list_of_tasks = list(app_config.keys()) +list_of_models = [] + +list_of_models = [x for x in app_config[each].keys() for each in app_config] f = open("qpc.json") codellama_data = json.load(f)["codellama"] @@ -45,20 +63,6 @@ """ - -LICENSE = """ -

-Qualcomm Technologies, Inc. Proprietary -(c) 2023 Qualcomm Technologies, Inc. All rights reserved. -All data and information contained in or disclosed by this document are -confidential and proprietary information of Qualcomm Technologies, Inc., and -all rights therein are expressly reserved. By accepting this material, the -recipient agrees that this material and the information contained therein -are held in confidence and in trust and will not be used, copied, reproduced -in whole or in part, nor its contents revealed in any manner to others -without the express written permission of Qualcomm Technologies, Inc. -""" - # whisper = GreedyDecoder() list_of_models = ["mpt", "llama", "mistral", "codellama"] qeff_flags = set() @@ -67,14 +71,6 @@ text = "" -def run_whisper(audio): - if audio: - return whisper.stream("english", audio, None) - - gr.Info("Record/Upload the audio now") - return "" - - ctx_len = codellama_data["ctx_len"] prompt_len = codellama_data["prompt_len"] previous_current_ctx_len = 0 diff --git a/app/demo.css b/app/demo.css index 1b994563b..581f3477b 100644 --- a/app/demo.css +++ b/app/demo.css @@ -1,3 +1,11 @@ +/* +# ----------------------------------------------------------------------------- +# +# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- +*/ h1 { text-align: center; } diff --git a/app/qpc.json b/app/qpc.json deleted file mode 100644 index 14c120ed0..000000000 --- a/app/qpc.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "codellama" : { - "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", - "model_name" : "codellama/CodeLlama-34b-Instruct-hf", - "prompt_len" : 128, - "ctx_len" : 1024, - "device_id" : [0], - "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" - } -} \ No newline at end of file From c191b221b7c7e66f00925adc3927ffb2867b28d6 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 09:27:43 +0000 Subject: [PATCH 06/18] Removed qpc_json, add utils Signed-off-by: Himanshu Upreti --- app/app_config.json | 43 +++++++++++++++++++++++++++++++++++++++++++ app/utils.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 app/app_config.json create mode 100644 app/utils.py diff --git a/app/app_config.json b/app/app_config.json new file mode 100644 index 000000000..e044acd15 --- /dev/null +++ b/app/app_config.json @@ -0,0 +1,43 @@ +{ + "text-generation" : { + "codellama" : { + "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", + "model_name" : "codellama/CodeLlama-34b-Instruct-hf", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [0], + "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" + }, + "mpt" : { + "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", + "model_name" : "codellama/CodeLlama-34b-Instruct-hf", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [0], + "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" + }, + "llama" : { + "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", + "model_name" : "codellama/CodeLlama-34b-Instruct-hf", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [0], + "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" + }, + "mistral" : { + "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", + "model_name" : "codellama/CodeLlama-34b-Instruct-hf", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [0], + "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" + } + }, + "Question Answering" : { + }, + "Image Generation" : { + }, + "Multi-Modalities" : { + } + +} \ No newline at end of file diff --git a/app/utils.py b/app/utils.py new file mode 100644 index 000000000..42d050c84 --- /dev/null +++ b/app/utils.py @@ -0,0 +1,30 @@ +# ----------------------------------------------------------------------------- +# +# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# ----------------------------------------------------------------------------- + +import json + +def get_app_config(): + f= open("app_config.json") + app_config = json.load(f) + f.close() + return app_config + +def get_list_of_tasks(app_config = None): + if app_config is None: + app_config = get_app_config() + return list(app_config.keys()) + +def get_list_of_models(app_config = None): + if app_config is None: + app_config = get_app_config() + list_of_models = [] + for task in app_config: + for model in app_config[task].keys(): + list_of_models.append(model) + +def get_list_of_model_task(app_config, task): + return list(app_config[task].keys()) \ No newline at end of file From 2d6f914ec8bc63773c8fbc2665d1768a110b04a9 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 12:56:56 +0000 Subject: [PATCH 07/18] Add app_config, removed dead code, tested app Signed-off-by: Himanshu Upreti --- QEfficient/generation/LLMGenerator.py | 4 +- app/Readme.md | 53 +++++--- app/app.py | 170 ++++++++++++++------------ app/app_config.json | 12 +- app/utils.py | 52 ++++++-- 5 files changed, 182 insertions(+), 109 deletions(-) diff --git a/QEfficient/generation/LLMGenerator.py b/QEfficient/generation/LLMGenerator.py index 518c7d1bb..216804d62 100644 --- a/QEfficient/generation/LLMGenerator.py +++ b/QEfficient/generation/LLMGenerator.py @@ -21,7 +21,6 @@ import transformers -# from aic_infer import QAICInferenceSession from QEfficient.generation.aic_infer import QAICInferenceSession @@ -413,11 +412,10 @@ def test_llm( outputs = [] for text in model_aic.streamer: - # print(text, end=" ") + print(text, end=" ") outputs.append(text) # yield "".join(outputs) - print("".join(outputs)) t.join() diff --git a/app/Readme.md b/app/Readme.md index ba02834a5..09c663b14 100644 --- a/app/Readme.md +++ b/app/Readme.md @@ -30,26 +30,49 @@ ### Format of "app_config.json" Please modify the `app_config.json` like below, -- You can add n number of entry -- Each entry will appear as list in dropdown +- You can add N number of entry +- Each entry in app_config will appear as list in dropdown of tasks +- Each entry inside the `task` will appear as list in dropdown of models ```json { - "codellama" : { - "binary_path" : "", - "model_name" : "codellama/CodeLlama-34b-Instruct-hf", - "prompt_len" : 128, - "ctx_len" : 1024, - "device_id" : [0], + "text-generation" : { + "codellama" : { + "qpc_path" : "", + "model_name" : "", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [0] + }, + "mpt" : { + "qpc_path" : "", + "model_name" : "", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [1] + }, + "llama" : { + "qpc_path" : "", + "model_name" : "", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [2] + }, + "mistral" : { + "qpc_path" : "", + "model_name" : "", + "prompt_len" : 128, + "ctx_len" : 1024, + "device_id" : [3] + } }, - "mistral" : { - "binary_path" : "", - "model_name" : "mistralai/Mistral-7B-v0.1", - "prompt_len" : 128, - "ctx_len" : 1024, - "device_id" : [1], + "question-answering" : { }, - "mpt" + "image-generation" : { + }, + "multi-modalities" : { + } + } ``` diff --git a/app/app.py b/app/app.py index e6cca31d7..d0ab434c5 100755 --- a/app/app.py +++ b/app/app.py @@ -14,7 +14,7 @@ from typing import List, Tuple -from QEfficient.generation import LLMGenerator +from QEfficient.generation.LLMGenerator import LLMGenerator from transformers import ( AutoConfig, @@ -24,36 +24,30 @@ TextStreamer, ) -from utils import ( - get_list_of_model_task, - get_list_of_tasks, - get_list_of_models - ) - -app_config = open("app_config.json") -app_config = json.load(app_config) - -list_of_tasks = list(app_config.keys()) -list_of_models = [] - -list_of_models = [x for x in app_config[each].keys() for each in app_config] +from utils import ( + get_list_of_models_task, + get_list_of_tasks, + get_list_of_models_all, + get_data, + get_generator, + load_models_artifacts, +) -f = open("qpc.json") -codellama_data = json.load(f)["codellama"] -f.close() +list_of_tasks = get_list_of_tasks() +list_of_models = get_list_of_models_all() +load_models_artifacts() -# title = """ -# # Qbuzz 2023 : GenerativeAI on Cloud AI100 +codellama = get_generator(list_of_tasks[0], "codellama") -# """ +assert codellama is not None title = """ # Developer Applications on Cloud AI 100 using Transformers Library """ -subtitle_left= """ +subtitle_left = """ ## Developer Application """ @@ -63,30 +57,19 @@ """ -# whisper = GreedyDecoder() -list_of_models = ["mpt", "llama", "mistral", "codellama"] qeff_flags = set() - -max_length = codellama_data["ctx_len"] -text = "" +summary_text = "" -ctx_len = codellama_data["ctx_len"] -prompt_len = codellama_data["prompt_len"] previous_current_ctx_len = 0 last_prompt = "" last_state_generation_ids = [] -# codellama = () -codellama = LLMGenerator( - qpc_path=codellama_data["qpc_path"], - model_name=codellama_data["model_name"], - device_id=codellama_data["device_id"], - prompt_len=prompt_len, - ctx_len=ctx_len, - streamer=TextIteratorStreamer, -) +def update_model(task, model): + new_obj = get_generator(task, model) + if new_obj is not None: + codellama = new_obj def get_prompt( @@ -106,37 +89,38 @@ def get_prompt( def run_qeff_check(model_name, progress=gr.Progress()): - global text, qeff_flags - text = "" - + global summary_text, qeff_flags + summary_text = "" + if model_name not in qeff_flags: qeff_flags.add(model_name) progress(0, desc="Downloading...") # time.sleep(1) for i in progress.tqdm(range(100), desc="Downloading..."): time.sleep(0.0005) - text += f"$ Downloaded {model_name} from cache directory\n" + summary_text += f"$ Downloaded {model_name} from cache directory\n" progress(0, desc="Optimizing and Compiling...") time.sleep(0.5) for i in progress.tqdm(range(100), desc="Optimizing and Compiling..."): time.sleep(0.07) - - text += f"$ Optimized {model_name}\n" + + summary_text += f"$ Optimized {model_name}\n" # progress(0, desc="Compiling...") # for i in progress.tqdm(range(100), desc="Compiling..."): # time.sleep(0.2) - # text += f"Optimized {model_name}\n" + # summary_text += f"Optimized {model_name}\n" progress(0, desc="Generating Inference Container...") for i in progress.tqdm(range(100), desc="Generating Inference Container..."): - pass - - text += f"$ Compiled {model_name} and generated inference container\n" - - return Path('./img/box.png') + pass + + summary_text += f"$ Compiled {model_name} and generated inference container\n" + + return Path("./img/box.png") + def summary(): - return text + return summary_text def run_codellama(msg, chat_history, task, model): @@ -145,7 +129,7 @@ def run_codellama(msg, chat_history, task, model): # print(task, model) # output = "Hi there!" # return "", chat_history + [(msg, output)] - + # print(codellama) codellama.curr_cache_index = 0 codellama.generated_ids = [] @@ -159,8 +143,6 @@ def run_codellama(msg, chat_history, task, model): previous_current_ctx_len = codellama.curr_cache_index last_state_generation_ids = codellama.generated_ids - - if not check(): return msg, chat_history @@ -182,9 +164,10 @@ def run_codellama(msg, chat_history, task, model): t.join() + def stop(): codellama.stop_indicator = False - return + return def check(): @@ -203,30 +186,34 @@ def reset_cache_index(): gr.Warning(f"Regenerating output for last prompt") return + def run_clear(): global qeff_flags codellama.curr_cache_index = 0 codellama.generated_ids = [] - # gr.Warning(f"Cleared the Output") qeff_flags = set() - # print("codellama current cache", codellama.curr_cache_index) return + def clear_img(img): img.clear() # Combined Interface -# with gr.Blocks(css="demo.css") as demo: with gr.Blocks(theme=gr.themes.Soft(), css="demo.css") as demo: gr.Markdown(title) with gr.Row(): - with gr.Column(scale=7, variant='compact'): + with gr.Column(scale=7, variant="compact"): gr.Markdown(subtitle_left) - dropdown1 = gr.Dropdown(["QA", "Text-Generation", "Image Generation", "MultiModal"], value="Text-Generation", label="Developer Use Case", elem_id="task_id") + dropdown1 = gr.Dropdown( + list_of_tasks, + value=list_of_tasks[0], + label="Developer Use Case", + elem_id="task_id", + ) with gr.Row(): textbox = gr.Textbox( @@ -238,28 +225,40 @@ def clear_img(img): ) with gr.Row(): - chat = gr.Button("Launch on AI 100", variant="primary", size='sm') + chat = gr.Button("Launch on AI 100", variant="primary", size="sm") - clear = gr.Button("Reset", size='sm') + clear = gr.Button("Reset", size="sm") - stop_btn = gr.Button("Stop", size='sm') + stop_btn = gr.Button("Stop", size="sm") with gr.Column(): # with gr.Group(): chatbot = gr.Chatbot( - label="Response", - elem_id="chuanhu_chatbot", - ) - with gr.Column(variant='compact', scale=3, elem_id="qeff_id"): + label="Response", + elem_id="chuanhu_chatbot", + ) + with gr.Column(variant="compact", scale=3, elem_id="qeff_id"): gr.Markdown(subtitle_right) - dropdown2 = gr.Dropdown(list_of_models, value=list_of_models[-1], label="Pretrained model catalogue from Qualcomm Transformers Library", elem_id="model_id") - img = gr.Image(show_label = False, show_download_button = False, container = True, height=260, width=480, elem_id="qpc_id") + dropdown2 = gr.Dropdown( + list_of_models, + value=list_of_models[-1], + label="Pretrained model catalogue from Qualcomm Transformers Library", + elem_id="model_id", + ) + img = gr.Image( + show_label=False, + show_download_button=False, + container=True, + height=260, + width=480, + elem_id="qpc_id", + ) # "block-size: inherit;" qeff_output = gr.Textbox( - container = True, - show_label = False, - lines = 4, - ) + container=True, + show_label=False, + lines=4, + ) with gr.Row(): gr.Image( "./img/full.png", @@ -268,17 +267,32 @@ def clear_img(img): container=False, ) - - chat.click(run_qeff_check, inputs=[dropdown2], outputs=[img]).then(summary, inputs=[], outputs=[qeff_output]).then(run_codellama, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot]) - - textbox.submit(run_qeff_check, inputs=[dropdown2], outputs=[img]).then(summary, inputs=[], outputs=[qeff_output]).then(run_codellama, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot]) + chat.click(update_model, inputs=[dropdown1, dropdown2], outputs=[]).then( + run_qeff_check, inputs=[dropdown2], outputs=[img] + ).then(summary, inputs=[], outputs=[qeff_output]).then( + run_codellama, + inputs=[textbox, chatbot, dropdown1, dropdown2], + outputs=[textbox, chatbot], + ) + + textbox.submit(update_model, inputs=[dropdown1, dropdown2], outputs=[]).then( + run_qeff_check, inputs=[dropdown2], outputs=[img] + ).then(summary, inputs=[], outputs=[qeff_output]).then( + run_codellama, + inputs=[textbox, chatbot, dropdown1, dropdown2], + outputs=[textbox, chatbot], + ) stop_btn.click(fn=stop) clear.click(lambda: None, None, chatbot, queue=False).then( lambda x: gr.update(value=""), [], [textbox] - ).then(lambda x: gr.update(value=""), [], [qeff_output]).then(fn=run_clear).then(lambda:None, None, img, queue=False) - dropdown2.change(lambda x: gr.update(value=""), [], [qeff_output]).then(lambda:None, None, img, queue=False) + ).then(lambda x: gr.update(value=""), [], [qeff_output]).then(fn=run_clear).then( + lambda: None, None, img, queue=False + ) + dropdown2.change(lambda x: gr.update(value=""), [], [qeff_output]).then( + lambda: None, None, img, queue=False + ) demo.queue() diff --git a/app/app_config.json b/app/app_config.json index e044acd15..b538c5676 100644 --- a/app/app_config.json +++ b/app/app_config.json @@ -13,7 +13,7 @@ "model_name" : "codellama/CodeLlama-34b-Instruct-hf", "prompt_len" : 128, "ctx_len" : 1024, - "device_id" : [0], + "device_id" : [1], "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" }, "llama" : { @@ -21,7 +21,7 @@ "model_name" : "codellama/CodeLlama-34b-Instruct-hf", "prompt_len" : 128, "ctx_len" : 1024, - "device_id" : [0], + "device_id" : [2], "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" }, "mistral" : { @@ -29,15 +29,15 @@ "model_name" : "codellama/CodeLlama-34b-Instruct-hf", "prompt_len" : 128, "ctx_len" : 1024, - "device_id" : [0], + "device_id" : [3], "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" } }, - "Question Answering" : { + "question-answering" : { }, - "Image Generation" : { + "image-generation" : { }, - "Multi-Modalities" : { + "multi-modalities" : { } } \ No newline at end of file diff --git a/app/utils.py b/app/utils.py index 42d050c84..6d315c66b 100644 --- a/app/utils.py +++ b/app/utils.py @@ -6,25 +6,63 @@ # ----------------------------------------------------------------------------- import json +from QEfficient.generation.aic_infer import QAICInferenceSession +from QEfficient.generation.LLMGenerator import LLMGenerator +from transformers import TextIteratorStreamer + +generator_hub = {} + def get_app_config(): - f= open("app_config.json") + f = open("app_config.json") app_config = json.load(f) f.close() return app_config -def get_list_of_tasks(app_config = None): + +def get_list_of_tasks(app_config=None): if app_config is None: app_config = get_app_config() return list(app_config.keys()) -def get_list_of_models(app_config = None): + +def get_list_of_models_all(app_config=None): if app_config is None: app_config = get_app_config() - list_of_models = [] + list_of_models = [] for task in app_config: for model in app_config[task].keys(): list_of_models.append(model) - -def get_list_of_model_task(app_config, task): - return list(app_config[task].keys()) \ No newline at end of file + return list_of_models + + +def get_list_of_models_task(app_config, task): + return list(app_config[task].keys()) + + +def get_data(task, model): + app_config = get_app_config() + return app_config[task][model] + + +def load_models_artifacts(): + app_config = get_app_config() + for task in app_config: + generator_hub[task] = {} + for model in app_config[task].keys(): + data = app_config[task][model] + generator_hub[task][model] = LLMGenerator( + qpc_path=data["qpc_path"], + model_name=data["model_name"], + device_id=data["device_id"], + prompt_len=data["prompt_len"], + ctx_len=data["ctx_len"], + streamer=TextIteratorStreamer, + ) + + +def get_generator(task, model): + if task in generator_hub.keys(): + if model in generator_hub[task].keys(): + return generator_hub[task][model] + return None From 7da5f7287349937c53738d327203bc0d2a3754b1 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 13:01:06 +0000 Subject: [PATCH 08/18] Clean code Signed-off-by: Himanshu Upreti --- app/app.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/app/app.py b/app/app.py index d0ab434c5..753a4cfa2 100755 --- a/app/app.py +++ b/app/app.py @@ -13,9 +13,6 @@ from threading import Thread from typing import List, Tuple - -from QEfficient.generation.LLMGenerator import LLMGenerator - from transformers import ( AutoConfig, AutoModelForCausalLM, @@ -125,11 +122,6 @@ def summary(): def run_codellama(msg, chat_history, task, model): - # DEBUG - # print(task, model) - # output = "Hi there!" - # return "", chat_history + [(msg, output)] - # print(codellama) codellama.curr_cache_index = 0 codellama.generated_ids = [] From 226e66c31e8d13bcf05b09b60012589801a63344 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 13:03:34 +0000 Subject: [PATCH 09/18] Remove app_config.json Removed app_config.json, instructions to create the .json is added in Readme.md Signed-off-by: Himanshu Upreti --- app/app_config.json | 43 ------------------------------------------- 1 file changed, 43 deletions(-) delete mode 100644 app/app_config.json diff --git a/app/app_config.json b/app/app_config.json deleted file mode 100644 index b538c5676..000000000 --- a/app/app_config.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "text-generation" : { - "codellama" : { - "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", - "model_name" : "codellama/CodeLlama-34b-Instruct-hf", - "prompt_len" : 128, - "ctx_len" : 1024, - "device_id" : [0], - "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" - }, - "mpt" : { - "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", - "model_name" : "codellama/CodeLlama-34b-Instruct-hf", - "prompt_len" : 128, - "ctx_len" : 1024, - "device_id" : [1], - "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" - }, - "llama" : { - "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", - "model_name" : "codellama/CodeLlama-34b-Instruct-hf", - "prompt_len" : 128, - "ctx_len" : 1024, - "device_id" : [2], - "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" - }, - "mistral" : { - "qpc_path" : "/home/hupreti/model_zoo/qpc/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-14c-mxfp6", - "model_name" : "codellama/CodeLlama-34b-Instruct-hf", - "prompt_len" : 128, - "ctx_len" : 1024, - "device_id" : [3], - "link" : "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf" - } - }, - "question-answering" : { - }, - "image-generation" : { - }, - "multi-modalities" : { - } - -} \ No newline at end of file From 83f9d2bdd1bbb74eed37d490d7729815a5d08d6a Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Tue, 7 May 2024 16:32:34 +0000 Subject: [PATCH 10/18] Update certificates and readme Remove cert.pem and key.pem and update Readme.md with instructions to generate them Signed-off-by: Himanshu Upreti --- app/Readme.md | 20 ++++++++++++-------- app/cert.pem | 35 ---------------------------------- app/key.pem | 52 --------------------------------------------------- 3 files changed, 12 insertions(+), 95 deletions(-) delete mode 100644 app/cert.pem delete mode 100644 app/key.pem diff --git a/app/Readme.md b/app/Readme.md index 09c663b14..beda9ca34 100644 --- a/app/Readme.md +++ b/app/Readme.md @@ -8,18 +8,22 @@ ### Instructions to launch the app -1. Clone the repo `git clone https://github.com/hupreti/efficient-transformers.git` -2. Create `app_config.json` and update the information like given below -3. Create a python3.8 environment `python3.8 -m venv env` -4. Activate the environment `source env/bin/activate` +1. System Dependencies + - `sudo apt-get install ffmpeg openssl` + - same as the `efficient-transformers` +2. Clone the repo `git clone https://github.com/hupreti/efficient-transformers.git` +3. Change directory `cd app` + - create `app_config.json` inside directory + - update the information in app_config.json like given below section +4. Create a python3.8 environment `python3.8 -m venv env` and activate the environment using `source env/bin/activate` 5. Update pip, `pip install -U pip` -6. Install dependencies from requirements.txt, `pip install -r requirements` +6. Install dependencies + - Install python requirements : `pip install -r requirements` + - Install Efficient Transformers Library : `pip install -e ..` + - Generate key and cert files : `openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365` 7. Run `python app.py` 8. Open browser https://server_name_or_ip:7881 9. Accept the certificate -10. System Dependencies - - `sudo apt-get install ffmpeg` - - same as the `efficient-transformers` ### Interaction of Developer Application and QEfficient Transformers Library diff --git a/app/cert.pem b/app/cert.pem deleted file mode 100644 index 63714f36e..000000000 --- a/app/cert.pem +++ /dev/null @@ -1,35 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGCzCCA/OgAwIBAgIUUQhsXysbsqfJ76nblXaXror6uNEwDQYJKoZIhvcNAQEL -BQAwgZQxCzAJBgNVBAYTAklOMQ4wDAYDVQQIDAVJbmRpYTESMBAGA1UEBwwJQmVu -Z2FsdXJ1MREwDwYDVQQKDAhRdWFsY29tbTEPMA0GA1UECwwGUXJhaXVtMRgwFgYD -VQQDDA9IaW1hbnNodSBVcHJldGkxIzAhBgkqhkiG9w0BCQEWFGh1cHJldGlAcXVh -bGNvbW0uY29tMB4XDTIzMTEzMDEwMzAyNloXDTI0MTEyOTEwMzAyNlowgZQxCzAJ -BgNVBAYTAklOMQ4wDAYDVQQIDAVJbmRpYTESMBAGA1UEBwwJQmVuZ2FsdXJ1MREw -DwYDVQQKDAhRdWFsY29tbTEPMA0GA1UECwwGUXJhaXVtMRgwFgYDVQQDDA9IaW1h -bnNodSBVcHJldGkxIzAhBgkqhkiG9w0BCQEWFGh1cHJldGlAcXVhbGNvbW0uY29t -MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAy8Saa6I4Rulg//8bBbRP -S+YdO9X/kTO4X3Zdg+Mzl69vw+fwsLB0caOAh6P2sQZ+Vrj7cYLh01A1WNEOySvI -P1z2xxDf/8L5VcfGF7/V5K5+yGL8Jf7b2G5Fn1Z1lKHgijYluHWZvJeEW05tc8Ti -WHSTYX1YTHmJ9TtvjZES5qCDrpXbZpcOnW7qbxehRSrzaaIaKWEevFcQXVUhA5wI -JbfZs/kXu5eGxzqg95YyjHlLk2lbGQYz7hlkjgM5D4ekALCbqLWFuNP6QqsSC/bi -tS2Sim1NVRwFuWgTa5V0t53RvNZkN75m1bPlQkOW4ROlifP9zsRHQA09UaHpRqUC -VueqorrOUOkpWcZ3e0YuPWAk/4xljZI4iw75XaSwiZ/BrUXDCNnHr+KLaUuI+AOI -9io2njAp7aDx8zLIU9m7fdTBnN6qvfCsL5zoI2hfzOrz+Ir8ValruwYmWH/hV/0P -xaT+zufZTqFEQsPCgXp4z0r+fVeUYwcAUnjD65/IPXbYvs/42LEkVZZjamPdohZk -nmiOObbX+a8637ANq8FRdn6VlpXNdkq1cWGn65xXSNbfFG6xcIV7w+oH03jEyWx1 -wtT9Rr+xVlf9vTgO2Ao9UoKpImVU9wiXmRQLZeGrez8S8YVJhLN6HaKIV3S9uS7U -ug0DpsSLpd1xDhGwzLEpDRUCAwEAAaNTMFEwHQYDVR0OBBYEFJoVccLyUC8VHude -akBh7kNF9qX/MB8GA1UdIwQYMBaAFJoVccLyUC8VHudeakBh7kNF9qX/MA8GA1Ud -EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAEM5sWg1djsoADKJ2JzgQFAp -jAx1yoAN90sHUiL9wZQzyttXqcZ2iCR2p1GapKUP8hhTdF6K4gEepSzQ9En37lDu -iJyufd0cqKTzGUqZCxgIRFPl9Kg5T0pMTZv4npvhTSZzuObGb1pSUnfT5jLhALP8 -WcRTEqqHJvBU8d0o7S1Px3FJZ0DWQWPy22pvdApoFc1X3wK1jecaiMuF8AZ1JWFb -KZSy9jU0sZqSrQPaep6s/vD4tHr9v0ppQiO4rAovaKw3p5odi7dihKYEZf5eWvVa -qKNtel+ix7mIIxa1g54MS8Ewi0bRcFiiehny1/GS8Pw1hp3qX7VeCe8gNvOLGyZ1 -mjJ/2ANpBZtaF3HnXH/NSW2Wp71NVLHhOYmRbqckhOfj1Jm8EUUxxsHxXN1vYfr4 -ENlOz7wAUt+r58YyQAgU/T5H1QhXrLugha2FLMClD0JPM3DL2aErHazGved7WBEm -HLISuv2HOczGKx+FWgT0o8bvk3AIN7KpVxXG1aRvkyB4B7xCix5ZXsfzqHNDK2Wd -hfNGW9TT+0pq8nrIwJyxUkfPKcn6Z5HUtXTzeTJqcpvpJb7JJg4Wmc8VO9lwHCkP -utzhBKn3bplKoDx/d22S6ASx3rq91iIDmAoxLvdCW5fzTKIOxLkgCrhnGwWDYNot -1LJQvKIc/8UTW+14aSEN ------END CERTIFICATE----- diff --git a/app/key.pem b/app/key.pem deleted file mode 100644 index 87f864285..000000000 --- a/app/key.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDLxJprojhG6WD/ -/xsFtE9L5h071f+RM7hfdl2D4zOXr2/D5/CwsHRxo4CHo/axBn5WuPtxguHTUDVY -0Q7JK8g/XPbHEN//wvlVx8YXv9Xkrn7IYvwl/tvYbkWfVnWUoeCKNiW4dZm8l4Rb -Tm1zxOJYdJNhfVhMeYn1O2+NkRLmoIOuldtmlw6dbupvF6FFKvNpohopYR68VxBd -VSEDnAglt9mz+Re7l4bHOqD3ljKMeUuTaVsZBjPuGWSOAzkPh6QAsJuotYW40/pC -qxIL9uK1LZKKbU1VHAW5aBNrlXS3ndG81mQ3vmbVs+VCQ5bhE6WJ8/3OxEdADT1R -oelGpQJW56qius5Q6SlZxnd7Ri49YCT/jGWNkjiLDvldpLCJn8GtRcMI2cev4otp -S4j4A4j2KjaeMCntoPHzMshT2bt91MGc3qq98KwvnOgjaF/M6vP4ivxVqWu7BiZY -f+FX/Q/FpP7O59lOoURCw8KBenjPSv59V5RjBwBSeMPrn8g9dti+z/jYsSRVlmNq -Y92iFmSeaI45ttf5rzrfsA2rwVF2fpWWlc12SrVxYafrnFdI1t8UbrFwhXvD6gfT -eMTJbHXC1P1Gv7FWV/29OA7YCj1SgqkiZVT3CJeZFAtl4at7PxLxhUmEs3odoohX -dL25LtS6DQOmxIul3XEOEbDMsSkNFQIDAQABAoICABwoLq5w3BXwadb2CWXgZJS4 -V5HNIGOQIgoO8R0oaBitxWPsyPWhbHXHghs8LPqJSyvwo84rp8jrbHO5b7Gcuy8G -SZN4YXuWFZ4k3NqrR4MyGZLXGsClVvuxQS2jZNwFAmIW2V+D4JqO1VtImxdEPYK7 -mSH/kBQBN28Lp36IbXQw+gFF8+tZNEvM59gyC7bjGH+gRr0xDm44GarL+KPnC7kq -joqwAPeLGLSvBU8xEgsXN7pG9K+gi8x69cop59idhawSu2+0paXEpPBRc0zz8J/Y -jEdUkzsPhDr6hrR3EonM5Q6TEYYiaVNeaG+fp1myvU0yyf+oX7AaJD8DgJiidJAO -RI7ZATnvu2qnSmsOEPY5W6FXlrVuE8uFtrrn3B4nr9x8r5df+2uxFc93Gz3bkaA3 -IRhUKsykFy0yhgubnGTfFCMOoltGajwCN6Nxex4D1L/v0JdZGTLJO/u0qw8+Qa2o -5lUmcIoDTWYyQvl5beU0jp4RAt/YKxbPNXFsDBdPggyqJn2qCTfz5rufH8AAQvKk -lwzZN6rUvcDNZih0SmMNTrR/fwVzSvgZozxSbw+Y5YKQQb9CZ5KBw8MoQ2++mPp8 -Ul0DlhM1tYQq985EAoi1wX3hUyu+uojZOr5Ee++ik1U0E+ilRGaQPXmN+7TeJPjX -5iRC/4b1bLjuHgpqrkQBAoIBAQD8N7u08X7PTSL9xyvnNXATSB8IWRJoptiiI8ut -RkeYNTgQBnO3nqBk1aJBeSsyTiBUvLJnJXLchai0hH0aUpjxFLyy5clB3Sn6AsCF -ZKaDxOJKdK9AOh64ytf1PBKLthyn6Rp7aOy0Go1rc3xPEm9wLa9GeKt/4iKptlPR -WJR49ktwpTAMZnrj2yiCvBxvN4LH77aGgm/d3/88w40W002lT1jO/Oqoinnaf50S -j9eP/08KOVkfI+H+AVwdD3Mgq3osr9YV3d78mizktQYAorO5uB7APzujPVGBx/P1 -qoh2cuv2ej7lfTCJPy24bGQ5c1jDETyWKccMch0MGVK/uIPtAoIBAQDO0t7zUPpU -DiAXsoFWCd4yH/d98PmrkeC+LxuBKsQu9F49iVu0LFlQcBu0uiz2yPnFKBIU6jk3 -MtAZT6bB5bbiEFISSaajzD1efwHWW4DkZFn3sfalVXVV/GFQj3NQTnfXbmj7NDbZ -gS95hPbMNZCiK6R3TP456xsqEhkfA+smx2vKbpNe5LISdNJy5r+L9H0cRkYfBOpC -L8B5bMf7X6Xq3UHl7HKxFO9BorX0Mwgeea4pjHVcMxuNUWiLdyVj97WC2dcqpLAD -ZYvJ92k5EpJWrJ1YnfTIrjFNaa2lm/hA2iXp6oDLz1yvVYzqwWu/R2bOC9oXqQ1D -Uqm0bvsNsFjJAoIBAEZCADfXTcvRYP3wjXpKmH3GiV558XbVg2MnNYV/HlJ4ALX2 -FiYAOg3ctOYoKrBIsxjnt4qkE+r6EJS4k2DRwWjmrennuLhwM1FBkv+Vmgz6u06i -Um9BwW0YlC/sG/5vLGxZstbN06IYA2z7LpWYPRKypzCqTJvDEoTUFWUrl7Baa4ll -eoz5vyHZ2t8QJ5uTiwrG68FsdF0KGL+EebjyA58ZC1QdgACTEn6bLdYN265xIENo -1jqmdD2dg1tVAm0HV3E7ShetRoB3MipDgviKWiZR87753lMKsbnFSWAVsQ3N1Y1D -YGHtDagtwT6ucvzN9Nvh4U6zNI9y+kR0iWh+rcECggEAHtBD7zRNV2CTX3R02IC3 -3fftQ5I3YFxng245RtE8UVKPB9QDGaDR3a4WMqLaOBpRPBJwPzV2OMcVQfAgBBtZ -V+zejWLztMulCQMbHvibB2v8C280NAW7aYgkZ4A3yesm563HbkWlNlqM3Cn3Kw1/ -8ulVo4CRD8f7UBVKMNjAnBJx+MLoqeagsSc+g2fledB/I0tE/zIYRKwgnPacBNOH -QKqeOsczfzaDPuAjA7LmrsOqMv5+LbJflHpgfQbPR0mMfWY/LFdE8fybCwgVYS9n -Jgqplp5R7eQl7EamXFoUbnUR2PvbHIJUaLryN7eBegoFQwV5BLVnCHS0W/F87dks -IQKCAQB0CwSDVpIt9ANCgPxLw2hfG6DwCSoi+ufPbKGV1F4amEfBEXndC+CdjDWJ -2ImJmzW22hffsl+oQ3di7+lGtN4XroesIa3/4GfgMJaTObjor5ktCkIa+zkCfqjn -GEr6aVRjQlHHj/yjQLWm4ZI267NmjSu7ObBLhh9aklapGrgyNgWLC7yABNEWuFzP -94GJDr0rr8fylAiTTjnPIky/mL2VWtlOBofxw1hLwPixb2kFsyHEOdhFOv2RxSof -TASXMJtaLadggw/Dg0l95xLjkkYiENfzEc2YxKAzCcEC753X5UgpmRDUrkkg9oMH -xKeZiEUN03ehkyBbdF6BhZ+9BO5/ ------END PRIVATE KEY----- From f9883f4557bf2bfb040de37a16f846b1c601911c Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Wed, 8 May 2024 06:57:26 +0000 Subject: [PATCH 11/18] Update requirements Signed-off-by: Himanshu Upreti --- app/requirements.txt | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/app/requirements.txt b/app/requirements.txt index 2e1940e38..bf971a9f6 100755 --- a/app/requirements.txt +++ b/app/requirements.txt @@ -1,9 +1,2 @@ -gradio==4.10.0 -huggingface_hub==0.19.4 -librosa==0.10.1 -noisereduce==3.0.0 -numpy==1.24.4 -Requests==2.31.0 -torch==2.1.1 -transformers==4.36 -protobuf==3.20 +gradio==4.29 + From 31aabf74308c55679b33c4efd7efc598df7b9df7 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Thu, 9 May 2024 15:46:19 +0000 Subject: [PATCH 12/18] Updated readme, updated llm_generator apis Signed-off-by: Himanshu Upreti --- .gitignore | 86 ------- .../{LLMGenerator.py => llm_generator.py} | 220 ++---------------- app/Readme.md | 42 ++-- app/app.py | 120 +++++----- app/requirements.txt | 3 +- app/utils.py | 40 +++- 6 files changed, 140 insertions(+), 371 deletions(-) delete mode 100644 .gitignore rename QEfficient/generation/{LLMGenerator.py => llm_generator.py} (60%) diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 51a71b8cd..000000000 --- a/.gitignore +++ /dev/null @@ -1,86 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ diff --git a/QEfficient/generation/LLMGenerator.py b/QEfficient/generation/llm_generator.py similarity index 60% rename from QEfficient/generation/LLMGenerator.py rename to QEfficient/generation/llm_generator.py index 216804d62..52888f751 100644 --- a/QEfficient/generation/LLMGenerator.py +++ b/QEfficient/generation/llm_generator.py @@ -7,40 +7,20 @@ import json import os - -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -from time import perf_counter -from typing import Dict, List, Optional -import sys -from threading import Thread -from typing import * import torch import numpy as np import torch.nn as nn -import transformers - - -from QEfficient.generation.aic_infer import QAICInferenceSession - -io_files = [] - +from typing import Dict, List, Optional, Union +from threading import Thread -import io +from QEfficient.generation.cloud_infer import QAICInferenceSession from transformers import ( - AutoConfig, - AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, TextStreamer, -) - - -from transformers import ( AutoTokenizer, - AutoModelForCausalLM, LogitsProcessorList, MinLengthLogitsProcessor, TopKLogitsWarper, @@ -50,51 +30,6 @@ ) -def print_to_string(*args, **kwargs): - output = io.StringIO() - print(*args, file=output, **kwargs) - contents = output.getvalue() - output.close() - return contents - - -def write_io_files( - inputs: Dict[str, np.ndarray], - outputs: Dict[str, np.ndarray], - write_io_dir: str, - write_io_subdir: str, -): - io = [] - os.makedirs(f"{write_io_dir}/{write_io_subdir}", exist_ok=True) - - for iname, iarray in inputs.items(): - iarray.tofile(f"{write_io_dir}/{write_io_subdir}/{iname}.raw") - io.append( - { - "path": f"{write_io_subdir}/{iname}.raw", - "io-direction": "in", - "dims": iarray.shape, - "elem-size": iarray.itemsize, - "map-to": iname, - } - ) - - for oname, oarray in outputs.items(): - oarray.tofile(f"{write_io_dir}/{write_io_subdir}/{oname}.raw") - io.append( - { - "path": f"{write_io_subdir}/{oname}.raw", - "io-direction": "out", - "dims": oarray.shape, - "elem-size": oarray.itemsize, - "map-to": oname, - } - ) - - io_files.append(io) - with open(f"{write_io_dir}/aic_batch_io.json", "w") as fp: - json.dump({"IO-files": io_files}, fp, indent=True) - class LLMGenerator: def __init__( @@ -104,7 +39,7 @@ def __init__( device_id: Optional[List[int]] = [0], prompt_len: Optional[int] = 32, ctx_len: Optional[int] = 128, - streamer: Optional["BaseStreamer"] = None, + streamer: Optional[Union[TextStreamer, TextIteratorStreamer]] = None, logits_processor: Optional = None, logits_warper: Optional = None, ): @@ -146,38 +81,28 @@ def __init__( ] ) - # self.session.skip_buffers( - # set([x for x in self.session.input_names if x.startswith("past_")]) - # ) - # self.session.skip_buffers( - # set( - # [ - # x - # for x in self.session.output_names - # if x.endswith("_RetainedState") - # ] - # ) - # ) - except Exception as err: - raise RuntimeError("Unable to load qpc on device , {err}") + raise RuntimeError(f"Unable to load qpc on device , {err}") try: - tokenizer = transformers.AutoTokenizer.from_pretrained( - model_name, padding_side="left" + hf_token = None + if os.getenv("HF_TOKEN") is not None: + hf_token = os.getenv('HF_TOKEN') + tokenizer = AutoTokenizer.from_pretrained( + model_name, padding_side="left", hf_token=hf_token ) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id self.tokenizer = tokenizer except Exception as err: - raise RuntimeError("Unable to load tokenizer, {err}") + raise RuntimeError(f"Unable to load tokenizer, {err}") if streamer: self.streamer = streamer( self.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=None ) - # instatiate deault logit processor and wrapper here + # instantiate deault logit processor and wrapper here # TODO : change default values with temperature and top_p # self.logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() # self.logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() @@ -186,7 +111,7 @@ def __init__( self.logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor( - 15, eos_token_id=2 + 15, eos_token_id=self.tokenizer.eos_token_id ), # model.generation_config.eos_token_id ] ) @@ -208,13 +133,15 @@ def _generate_next_token(self, outputs, sample=False): if sample: # pre-process distribution - # input_ids = torch.Tensor(self.generated_ids) input_ids = torch.Tensor(self.inputs["input_ids"]) next_token_logits = torch.from_numpy(logits) + + # Qeff is maintaining 1,1,VOCAB_SIZE + if len(next_token_logits.shape) == 3: + next_token_logits = next_token_logits.squeeze(0) # next_token_scores = self.logits_processor(input_ids, next_token_logits) next_token_scores = self.logits_warper(input_ids, next_token_logits) - # sample probs = nn.functional.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) next_token_id = next_tokens.numpy().reshape(1, 1) @@ -223,9 +150,6 @@ def _generate_next_token(self, outputs, sample=False): if len(logits.shape) == 2: logits = np.expand_dims(logits, 1) next_token_id = logits.argmax(2) - # print("next_token_id: ", next_token_id.shape) - - # print("next_token_id", next_token_id) return next_token_id @@ -242,10 +166,6 @@ def _stopping_criteria(self, next_token_id, max_new_tokens=None): len(self.generated_ids) > max_new_tokens, ) return True - # assert type(self.tokenizer.eot_id) == List - # assert type(self.tokenizer.eos_token_id) == List - - # breakpoint() if next_token_id == self.tokenizer.eos_token_id: print( @@ -286,7 +206,6 @@ def prepare_inputs_for_inference(self, prompt): return inputs, prompt_len def update_inputs_for_inference(self, inputs, next_token_id): - # breakpoint() batch_size, prompt_len = inputs["input_ids"].shape inputs["cache_index"] += prompt_len @@ -304,8 +223,6 @@ def update_inputs_for_inference(self, inputs, next_token_id): def generate(self, prompt: str, sample: bool = False, max_new_tokens: int = None): session = self.session - # if self.write_io_dir: - # write_io_files(inputs, outputs, write_io_dir, "prefill") multi_turn_input_ids = [] @@ -352,9 +269,7 @@ def generate(self, prompt: str, sample: bool = False, max_new_tokens: int = None self.inputs, next_prompt_len = self.update_inputs_for_inference( self.inputs, next_token_id ) - # print(self.curr_cache_index) # for debug outputs = session.run(self.inputs) - # next_prompt_len from next iteration onwards is 1 self.curr_cache_index += next_prompt_len if self.streamer: @@ -378,101 +293,6 @@ def stream(self, prompt: str, sample: bool = False, max_new_tokens: int = None): yield "".join(outputs) print("".join(outputs)) - # return "".join(outputs) - - -def test_llm( - model_name: str, - prompt_len: int, - ctx_len: int, - qpc: str, - prompt: str, - session: QAICInferenceSession = None, - stream: bool = True, - sample: bool = False, - device_id: List[int] = [0], - write_io_dir: Optional[str] = None, -): - # python LLMGenerator.py --model-name codellama/CodeLlama-13b-Instruct-hf --prompt-len 128 --ctx-len 1024 --qpc /home/hupreti/demo/CodeLlama-13b-Instruct-hf-kv-128pl-1024cl-16c-mxfp6 --prompt "Complete the paragraph with 2048 tokens: My name is Himanshu, and" 2>&1 | tee output.log - - # print(prompt) - - # working with TextStreamer - # model_aic = LLMGenerator(qpc, model_name, device_id, prompt_len, ctx_len, - # streamer = TextStreamer) - - model_aic = LLMGenerator( - qpc, model_name, device_id, prompt_len, ctx_len, streamer=TextStreamer - ) - - generate_kwargs = {"prompt": prompt, "sample": sample, "max_new_tokens": ctx_len} - - t = Thread(target=model_aic.generate, kwargs=generate_kwargs) - t.start() - - outputs = [] - for text in model_aic.streamer: - print(text, end=" ") - outputs.append(text) - - # yield "".join(outputs) - - t.join() - - # Uncomment below to test mulit-turn - # generate_kwargs = { - # "prompt" : "Indian Cricket Team. But 2014", - # "sample" : False, - # "max_new_tokens" : 128 - # } - - # t = Thread(target=model_aic.generate, kwargs=generate_kwargs) - # t.start() - - # t.join() - - # print(generate_kwargs["prompt"]) - # outputs = [] - # for text in model_aic.streamer: - # # print(text) - # outputs.append(text) - - # # yield "".join(outputs) - # print("".join(outputs)) - - return - - -if __name__ == "__main__": - import argparse - - argp = argparse.ArgumentParser() - argp.add_argument("--model-name", required=True, help="Model name to run") - argp.add_argument("--prompt-len", type=int, default=128, help="Prompt length") - argp.add_argument("--ctx-len", type=int, default=512, help="Context length") - argp.add_argument("--qpc", required=True, help="Compiled binary QPC") - argp.add_argument( - "--prompt", - default="My name is Sarah and I am", - help="Input prompt to generate for", - ) - argp.add_argument( - "--no-stream", - action="store_false", - dest="stream", - help="Don't stream output text", - ) - argp.add_argument( - "--device_id", - default=[0], - type=lambda device_ids: [int(x) for x in device_ids.split(",")], - help="QAIC device ids (comma-separated)", - ) - argp.add_argument("--write-io-dir", help="Directory to write inputs/outputs into") - argp.add_argument( - "--sample", action="store_true", dest="sample", help="Use sampling" - ) - - args = argp.parse_args() - # main(**vars(args)) - test_llm(**vars(args)) + + def apply_chat_template(self, chat): + return self.tokenizer.apply_chat_template(chat, tokenize=False) \ No newline at end of file diff --git a/app/Readme.md b/app/Readme.md index beda9ca34..a29b69f5a 100644 --- a/app/Readme.md +++ b/app/Readme.md @@ -11,19 +11,23 @@ 1. System Dependencies - `sudo apt-get install ffmpeg openssl` - same as the `efficient-transformers` -2. Clone the repo `git clone https://github.com/hupreti/efficient-transformers.git` +2. Clone the repo `git clone https://github.com/quic/efficient-transformers.git` 3. Change directory `cd app` - - create `app_config.json` inside directory - - update the information in app_config.json like given below section -4. Create a python3.8 environment `python3.8 -m venv env` and activate the environment using `source env/bin/activate` -5. Update pip, `pip install -U pip` -6. Install dependencies - - Install python requirements : `pip install -r requirements` - - Install Efficient Transformers Library : `pip install -e ..` - - Generate key and cert files : `openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365` -7. Run `python app.py` -8. Open browser https://server_name_or_ip:7881 -9. Accept the certificate + - create `app_config.json` inside directory + - update the information in app_config.json like given below section + - if you have hf-token for accessing model that requires login, please create `.env` file and add below line + ``` + HF_TOKEN= + ``` +4. Update pip, `pip install -U pip` +5. Install dependencies + - Install python requirements : `pip install -r requirements` + - Install Efficient Transformers Library : `pip install -e ..` + - Generate key and cert files : `openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes` + - Fill details in interactive session to generate keys +6. Run `python app.py` +7. Open browser https://server_name_or_ip:7881 +8. Accept the certificate ### Interaction of Developer Application and QEfficient Transformers Library @@ -35,35 +39,37 @@ Please modify the `app_config.json` like below, - You can add N number of entry -- Each entry in app_config will appear as list in dropdown of tasks -- Each entry inside the `task` will appear as list in dropdown of models +- Each entry name in app_config will appear as list in dropdown of tasks +- Each entry name inside the `task` will appear as list in dropdown of models +- `qpc_path` : can be either kept empty / path where you want your compiled binary to be after compilation +- `model_card` : required HF model card name for each dropdown entry ```json { "text-generation" : { "codellama" : { - "qpc_path" : "", + "qpc_path" : "", "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, "device_id" : [0] }, "mpt" : { - "qpc_path" : "", + "qpc_path" : "", "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, "device_id" : [1] }, "llama" : { - "qpc_path" : "", + "qpc_path" : "", "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, "device_id" : [2] }, "mistral" : { - "qpc_path" : "", + "qpc_path" : "", "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, diff --git a/app/app.py b/app/app.py index 753a4cfa2..584aadb4c 100755 --- a/app/app.py +++ b/app/app.py @@ -12,6 +12,7 @@ from pathlib import Path from threading import Thread from typing import List, Tuple +from dotenv import load_dotenv from transformers import ( AutoConfig, @@ -28,45 +29,51 @@ get_data, get_generator, load_models_artifacts, + get_app_config ) + +# Load .env file +load_dotenv() + +# Load app config +app_config = get_app_config() list_of_tasks = get_list_of_tasks() list_of_models = get_list_of_models_all() load_models_artifacts() -codellama = get_generator(list_of_tasks[0], "codellama") - -assert codellama is not None +# Global variable for book keeping +qeff_generator_model = None +qeff_flags = set() +summary_text = "" +previous_current_ctx_len = 0 +last_prompt = "" +last_state_generation_ids = [] +# main title of app title = """ # Developer Applications on Cloud AI 100 using Transformers Library - """ - +# title for left container of app subtitle_left = """ -## Developer Application - +## Developer Application """ - +# title for right container of app subtitle_right = """ ## Optimizing and Compiling Model using Qualcomm Transformers Library """ -qeff_flags = set() -summary_text = "" -previous_current_ctx_len = 0 -last_prompt = "" -last_state_generation_ids = [] - def update_model(task, model): + global qeff_generator_model new_obj = get_generator(task, model) if new_obj is not None: - codellama = new_obj + qeff_generator_model = new_obj + print("Updating qeff generator, ", qeff_generator_model.model_name) def get_prompt( @@ -84,35 +91,45 @@ def get_prompt( texts.append(f"{message} [/INST]") return "".join(texts) + -def run_qeff_check(model_name, progress=gr.Progress()): + +def run_qeff_check(task, model_name, progress=gr.Progress()): global summary_text, qeff_flags summary_text = "" + + model_info = get_data(task, model_name) if model_name not in qeff_flags: qeff_flags.add(model_name) + + # TODO : call QEfficient transform api + # TODO : take model_info as args progress(0, desc="Downloading...") - # time.sleep(1) for i in progress.tqdm(range(100), desc="Downloading..."): time.sleep(0.0005) summary_text += f"$ Downloaded {model_name} from cache directory\n" + # TODO : call QEfficient compile api + # TODO : take model info as arguments + # TODO : we can update the outputs from execute api in summary text + # TODO : app_config[task][model_name]['qpc_path'] = progress(0, desc="Optimizing and Compiling...") time.sleep(0.5) for i in progress.tqdm(range(100), desc="Optimizing and Compiling..."): time.sleep(0.07) summary_text += f"$ Optimized {model_name}\n" - # progress(0, desc="Compiling...") - # for i in progress.tqdm(range(100), desc="Compiling..."): - # time.sleep(0.2) - # summary_text += f"Optimized {model_name}\n" + progress(0, desc="Generating Inference Container...") for i in progress.tqdm(range(100), desc="Generating Inference Container..."): pass summary_text += f"$ Compiled {model_name} and generated inference container\n" - + + update_model(task, model_name) + print(qeff_generator_model.model_name) + return Path("./img/box.png") @@ -120,20 +137,20 @@ def summary(): return summary_text -def run_codellama(msg, chat_history, task, model): - - codellama.curr_cache_index = 0 - codellama.generated_ids = [] +def infer_prompt(msg, chat_history, task, model): + global last_prompt, previous_current_ctx_len, last_state_generation_ids + + qeff_generator_model.curr_cache_index = 0 + qeff_generator_model.generated_ids = [] - if codellama.curr_cache_index >= codellama.ctx_len - 1: - codellama.curr_cache_index = 0 + if qeff_generator_model.curr_cache_index >= qeff_generator_model.ctx_len - 1: + qeff_generator_model.curr_cache_index = 0 - codellama.curr_cache_index = 0 - codellama.stop_indicator = True - global last_prompt, previous_current_ctx_len, last_state_generation_ids + qeff_generator_model.curr_cache_index = 0 + qeff_generator_model.stop_indicator = True last_prompt = msg - previous_current_ctx_len = codellama.curr_cache_index - last_state_generation_ids = codellama.generated_ids + previous_current_ctx_len = qeff_generator_model.curr_cache_index + last_state_generation_ids = qeff_generator_model.generated_ids if not check(): return msg, chat_history @@ -147,10 +164,10 @@ def run_codellama(msg, chat_history, task, model): "max_new_tokens": None, } - t = Thread(target=codellama.generate, kwargs=generate_args) + t = Thread(target=qeff_generator_model.generate, kwargs=generate_args) t.start() - for each in codellama.streamer: + for each in qeff_generator_model.streamer: output += each yield "", chat_history + [(msg, output)] @@ -158,31 +175,24 @@ def run_codellama(msg, chat_history, task, model): def stop(): - codellama.stop_indicator = False + qeff_generator_model.stop_indicator = False return def check(): - if codellama.curr_cache_index >= codellama.ctx_len - 1: + if qeff_generator_model.curr_cache_index >= qeff_generator_model.ctx_len - 1: gr.Warning( - f"Reached max token generation limit of {codellama.ctx_len}, Kindly press clear!" + f"Reached max token generation limit of {qeff_generator_model.ctx_len}, Kindly press clear!" ) - codellama.curr_cache_index = 0 + qeff_generator_model.curr_cache_index = 0 return False return True -def reset_cache_index(): - codellama.curr_cache_index = previous_current_ctx_len - codellama.generated_ids = last_state_generation_ids - gr.Warning(f"Regenerating output for last prompt") - return - - def run_clear(): global qeff_flags - codellama.curr_cache_index = 0 - codellama.generated_ids = [] + qeff_generator_model.curr_cache_index = 0 + qeff_generator_model.generated_ids = [] qeff_flags = set() return @@ -259,18 +269,18 @@ def clear_img(img): container=False, ) - chat.click(update_model, inputs=[dropdown1, dropdown2], outputs=[]).then( - run_qeff_check, inputs=[dropdown2], outputs=[img] + chat.click( + run_qeff_check, inputs=[dropdown1, dropdown2], outputs=[img] ).then(summary, inputs=[], outputs=[qeff_output]).then( - run_codellama, + infer_prompt, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot], ) - textbox.submit(update_model, inputs=[dropdown1, dropdown2], outputs=[]).then( - run_qeff_check, inputs=[dropdown2], outputs=[img] + textbox.submit( + run_qeff_check, inputs=[dropdown1, dropdown2], outputs=[img] ).then(summary, inputs=[], outputs=[qeff_output]).then( - run_codellama, + infer_prompt, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot], ) @@ -290,7 +300,7 @@ def clear_img(img): demo.queue() demo.launch( server_name="0.0.0.0", - server_port=7881, + server_port=8085, ssl_certfile="cert.pem", ssl_keyfile="key.pem", ssl_verify=False, diff --git a/app/requirements.txt b/app/requirements.txt index bf971a9f6..cee88ba2d 100755 --- a/app/requirements.txt +++ b/app/requirements.txt @@ -1,2 +1,3 @@ gradio==4.29 - +json5 +python-dotenv \ No newline at end of file diff --git a/app/utils.py b/app/utils.py index 6d315c66b..c6d83666c 100644 --- a/app/utils.py +++ b/app/utils.py @@ -5,14 +5,16 @@ # # ----------------------------------------------------------------------------- -import json -from QEfficient.generation.aic_infer import QAICInferenceSession -from QEfficient.generation.LLMGenerator import LLMGenerator +import json5 as json +from QEfficient.generation.cloud_infer import QAICInferenceSession +from QEfficient.generation.llm_generator import LLMGenerator + from transformers import TextIteratorStreamer generator_hub = {} + def get_app_config(): f = open("app_config.json") app_config = json.load(f) @@ -51,18 +53,34 @@ def load_models_artifacts(): generator_hub[task] = {} for model in app_config[task].keys(): data = app_config[task][model] - generator_hub[task][model] = LLMGenerator( - qpc_path=data["qpc_path"], - model_name=data["model_name"], - device_id=data["device_id"], - prompt_len=data["prompt_len"], - ctx_len=data["ctx_len"], - streamer=TextIteratorStreamer, - ) + try : + generator_hub[task][model] = LLMGenerator( + qpc_path=data["qpc_path"], + model_name=data["model_name"], + device_id=data["device_id"], + prompt_len=data["prompt_len"], + ctx_len=data["ctx_len"], + streamer=TextIteratorStreamer, + ) + except Exception as err: + print(err) + generator_hub[task][model] = None + + print(generator_hub) def get_generator(task, model): if task in generator_hub.keys(): if model in generator_hub[task].keys(): + if generator_hub[task][model] is None: + #todo + generator_hub[task][model] = LLMGenerator( + qpc_path=data["qpc_path"], + model_name=data["model_name"], + device_id=data["device_id"], + prompt_len=data["prompt_len"], + ctx_len=data["ctx_len"], + streamer=TextIteratorStreamer, + ) return generator_hub[task][model] return None From 66ced10d7f8def4e6714f17aab1c60b11834eff3 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Thu, 9 May 2024 16:05:52 +0000 Subject: [PATCH 13/18] Apply chat template with system prompt Signed-off-by: Himanshu Upreti --- app/app.py | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/app/app.py b/app/app.py index 584aadb4c..42a6224d5 100755 --- a/app/app.py +++ b/app/app.py @@ -13,6 +13,7 @@ from threading import Thread from typing import List, Tuple from dotenv import load_dotenv +from jinja2.exceptions import TemplateError from transformers import ( AutoConfig, @@ -66,8 +67,6 @@ """ - - def update_model(task, model): global qeff_generator_model new_obj = get_generator(task, model) @@ -76,22 +75,22 @@ def update_model(task, model): print("Updating qeff generator, ", qeff_generator_model.model_name) -def get_prompt( - message: str, chat_history: List[Tuple[str, str]], system_prompt: str -) -> str: - texts = [f"[INST] <>\n{system_prompt}\n<>\n\n"] - # The first user input is _not_ stripped - do_strip = False - if chat_history: - for user_input, response in chat_history: - user_input = user_input.strip() if do_strip else user_input - do_strip = True - texts.append(f"{user_input} [/INST] {response.strip()} [INST] ") - message = message.strip() if do_strip else message - texts.append(f"{message} [/INST]") - return "".join(texts) +def get_prompt(message : str, system_prompt:str): + prompt = message + chat = [] + if system_prompt: + chat.append({"role":"system", "content":f"{system_prompt}"}) + chat.append({"role":"user", "content":f"{message}"}) - + try : + prompt = qeff_generator_model.tokenizer.apply_chat_template(chat, tokenize=False) + except TemplateError: + prompt = qeff_generator_model.tokenizer.apply_chat_template(chat[1:], tokenize=False) + except Exception as err: + print(err) + + return prompt + def run_qeff_check(task, model_name, progress=gr.Progress()): @@ -159,7 +158,7 @@ def infer_prompt(msg, chat_history, task, model): yield "", chat_history + [(msg, output)] generate_args = { - "prompt": get_prompt(msg, None, "Give an brief answer."), + "prompt": get_prompt(msg, "Give an brief answer."), "sample": True, "max_new_tokens": None, } From 3d096f10652ccfc50d713af1977550ef47babfca Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Thu, 9 May 2024 16:35:49 +0000 Subject: [PATCH 14/18] Fixes for llama3, mistral and codellama Signed-off-by: Himanshu Upreti --- QEfficient/generation/llm_generator.py | 9 ++++++++- app/app.py | 2 +- app/utils.py | 5 ++--- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/QEfficient/generation/llm_generator.py b/QEfficient/generation/llm_generator.py index 52888f751..a32499b27 100644 --- a/QEfficient/generation/llm_generator.py +++ b/QEfficient/generation/llm_generator.py @@ -173,6 +173,13 @@ def _stopping_criteria(self, next_token_id, max_new_tokens=None): "next_token_id == self.tokenizer.eos_token_id", ) return True + + if next_token_id == self.tokenizer.convert_tokens_to_ids("<|eot_id|>"): + print( + next_token_id == self.tokenizer.eos_token_id, + "next_token_id == self.tokenizer.eos_token_id", + ) + return True return False @@ -295,4 +302,4 @@ def stream(self, prompt: str, sample: bool = False, max_new_tokens: int = None): print("".join(outputs)) def apply_chat_template(self, chat): - return self.tokenizer.apply_chat_template(chat, tokenize=False) \ No newline at end of file + return self.tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) \ No newline at end of file diff --git a/app/app.py b/app/app.py index 42a6224d5..66ba08b1b 100755 --- a/app/app.py +++ b/app/app.py @@ -88,7 +88,7 @@ def get_prompt(message : str, system_prompt:str): prompt = qeff_generator_model.tokenizer.apply_chat_template(chat[1:], tokenize=False) except Exception as err: print(err) - + return prompt diff --git a/app/utils.py b/app/utils.py index c6d83666c..b64e26d78 100644 --- a/app/utils.py +++ b/app/utils.py @@ -13,8 +13,6 @@ generator_hub = {} - - def get_app_config(): f = open("app_config.json") app_config = json.load(f) @@ -70,10 +68,11 @@ def load_models_artifacts(): def get_generator(task, model): + app_config = get_app_config() if task in generator_hub.keys(): if model in generator_hub[task].keys(): if generator_hub[task][model] is None: - #todo + data = app_config[task][model] generator_hub[task][model] = LLMGenerator( qpc_path=data["qpc_path"], model_name=data["model_name"], From 19fe5e765b7066b62233b5a8194b7be5b85fc734 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Thu, 9 May 2024 16:42:25 +0000 Subject: [PATCH 15/18] Update certificates and clean code Signed-off-by: Himanshu Upreti --- QEfficient/generation/llm_generator.py | 15 +++----- app/Readme.md | 2 +- app/app.py | 47 +++++++++++++------------- app/demo.css | 2 +- app/utils.py | 7 ++-- 5 files changed, 34 insertions(+), 39 deletions(-) diff --git a/QEfficient/generation/llm_generator.py b/QEfficient/generation/llm_generator.py index a32499b27..18e726021 100644 --- a/QEfficient/generation/llm_generator.py +++ b/QEfficient/generation/llm_generator.py @@ -1,6 +1,6 @@ # ----------------------------------------------------------------------------- # -# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- @@ -102,17 +102,14 @@ def __init__( self.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=None ) - # instantiate deault logit processor and wrapper here + # instantiate default logit processor and wrapper here # TODO : change default values with temperature and top_p - # self.logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() - # self.logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() - # instantiate logits processors self.logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor( 15, eos_token_id=self.tokenizer.eos_token_id - ), # model.generation_config.eos_token_id + ), ] ) @@ -136,10 +133,9 @@ def _generate_next_token(self, outputs, sample=False): input_ids = torch.Tensor(self.inputs["input_ids"]) next_token_logits = torch.from_numpy(logits) - # Qeff is maintaining 1,1,VOCAB_SIZE + # Qeff is maintaining [1,1,VOCAB_SIZE] if len(next_token_logits.shape) == 3: next_token_logits = next_token_logits.squeeze(0) - # next_token_scores = self.logits_processor(input_ids, next_token_logits) next_token_scores = self.logits_warper(input_ids, next_token_logits) probs = nn.functional.softmax(next_token_scores, dim=-1) @@ -154,7 +150,6 @@ def _generate_next_token(self, outputs, sample=False): return next_token_id def _stopping_criteria(self, next_token_id, max_new_tokens=None): - # if self.curr_cache_index > self.ctx_len: if self.curr_cache_index >= self.ctx_len - 1: print("self.curr_cache_index reach limit") return True @@ -195,8 +190,6 @@ def prepare_inputs_for_inference(self, prompt): ctx_len = self.ctx_len - # assert ctx_len > prompt_len, "Model cannot support prompt_len > ctx_len" - inputs["position_ids"] = (np.cumsum(inputs["attention_mask"], 1) - 1) * inputs[ "attention_mask" ] diff --git a/app/Readme.md b/app/Readme.md index a29b69f5a..2b36673a8 100644 --- a/app/Readme.md +++ b/app/Readme.md @@ -1,6 +1,6 @@ diff --git a/app/app.py b/app/app.py index 66ba08b1b..8b8dce192 100755 --- a/app/app.py +++ b/app/app.py @@ -1,6 +1,6 @@ # ----------------------------------------------------------------------------- # -# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- @@ -30,7 +30,7 @@ get_data, get_generator, load_models_artifacts, - get_app_config + get_app_config, ) @@ -75,33 +75,36 @@ def update_model(task, model): print("Updating qeff generator, ", qeff_generator_model.model_name) -def get_prompt(message : str, system_prompt:str): +def get_prompt(message: str, system_prompt: str): prompt = message chat = [] if system_prompt: - chat.append({"role":"system", "content":f"{system_prompt}"}) - chat.append({"role":"user", "content":f"{message}"}) + chat.append({"role": "system", "content": f"{system_prompt}"}) + chat.append({"role": "user", "content": f"{message}"}) - try : - prompt = qeff_generator_model.tokenizer.apply_chat_template(chat, tokenize=False) + try: + prompt = qeff_generator_model.tokenizer.apply_chat_template( + chat, tokenize=False + ) except TemplateError: - prompt = qeff_generator_model.tokenizer.apply_chat_template(chat[1:], tokenize=False) + prompt = qeff_generator_model.tokenizer.apply_chat_template( + chat[1:], tokenize=False + ) except Exception as err: print(err) - + return prompt - def run_qeff_check(task, model_name, progress=gr.Progress()): global summary_text, qeff_flags summary_text = "" - + model_info = get_data(task, model_name) if model_name not in qeff_flags: qeff_flags.add(model_name) - + # TODO : call QEfficient transform api # TODO : take model_info as args progress(0, desc="Downloading...") @@ -119,16 +122,15 @@ def run_qeff_check(task, model_name, progress=gr.Progress()): summary_text += f"$ Optimized {model_name}\n" - progress(0, desc="Generating Inference Container...") for i in progress.tqdm(range(100), desc="Generating Inference Container..."): pass summary_text += f"$ Compiled {model_name} and generated inference container\n" - + update_model(task, model_name) print(qeff_generator_model.model_name) - + return Path("./img/box.png") @@ -138,7 +140,7 @@ def summary(): def infer_prompt(msg, chat_history, task, model): global last_prompt, previous_current_ctx_len, last_state_generation_ids - + qeff_generator_model.curr_cache_index = 0 qeff_generator_model.generated_ids = [] @@ -268,17 +270,17 @@ def clear_img(img): container=False, ) - chat.click( - run_qeff_check, inputs=[dropdown1, dropdown2], outputs=[img] - ).then(summary, inputs=[], outputs=[qeff_output]).then( + chat.click(run_qeff_check, inputs=[dropdown1, dropdown2], outputs=[img]).then( + summary, inputs=[], outputs=[qeff_output] + ).then( infer_prompt, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot], ) - textbox.submit( - run_qeff_check, inputs=[dropdown1, dropdown2], outputs=[img] - ).then(summary, inputs=[], outputs=[qeff_output]).then( + textbox.submit(run_qeff_check, inputs=[dropdown1, dropdown2], outputs=[img]).then( + summary, inputs=[], outputs=[qeff_output] + ).then( infer_prompt, inputs=[textbox, chatbot, dropdown1, dropdown2], outputs=[textbox, chatbot], @@ -305,4 +307,3 @@ def clear_img(img): ssl_verify=False, allowed_paths=[f"{os.getcwd()}"], ) -# launch() diff --git a/app/demo.css b/app/demo.css index 581f3477b..0bed50d1c 100644 --- a/app/demo.css +++ b/app/demo.css @@ -1,7 +1,7 @@ /* # ----------------------------------------------------------------------------- # -# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- diff --git a/app/utils.py b/app/utils.py index b64e26d78..cb4e9ff64 100644 --- a/app/utils.py +++ b/app/utils.py @@ -1,6 +1,6 @@ # ----------------------------------------------------------------------------- # -# Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. +# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # ----------------------------------------------------------------------------- @@ -13,6 +13,7 @@ generator_hub = {} + def get_app_config(): f = open("app_config.json") app_config = json.load(f) @@ -51,7 +52,7 @@ def load_models_artifacts(): generator_hub[task] = {} for model in app_config[task].keys(): data = app_config[task][model] - try : + try: generator_hub[task][model] = LLMGenerator( qpc_path=data["qpc_path"], model_name=data["model_name"], @@ -63,7 +64,7 @@ def load_models_artifacts(): except Exception as err: print(err) generator_hub[task][model] = None - + print(generator_hub) From d9c6f336aac2d81b593b8ae2606dd26ca1972940 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Mon, 13 May 2024 07:38:04 +0000 Subject: [PATCH 16/18] Added qeff infer api Signed-off-by: Himanshu Upreti --- QEfficient/cloud/infer.py | 14 ++++--- QEfficient/generation/llm_generator.py | 20 ++++------ app/Readme.md | 12 ++++-- app/app.py | 51 +++++++++++--------------- app/utils.py | 11 ++++-- 5 files changed, 54 insertions(+), 54 deletions(-) diff --git a/QEfficient/cloud/infer.py b/QEfficient/cloud/infer.py index 7fb91610b..75c69d484 100644 --- a/QEfficient/cloud/infer.py +++ b/QEfficient/cloud/infer.py @@ -45,7 +45,7 @@ def onnx_exists(onnx_file_path: str) -> bool: ) -def main( +def infer_api( model_name: str, num_cores: int, prompt: str, @@ -60,6 +60,7 @@ def main( device_group: List[int] = [ 0, ], + execute : bool = True ) -> None: # Make model_card_dir = os.path.join(QEFF_MODELS_DIR, str(model_name)) @@ -110,8 +111,9 @@ def main( assert ( generated_qpc_path == qpc_dir_path ), f"QPC files were generated at an unusual location, expected {qpc_dir_path}; got {generated_qpc_path}" - latency_stats_kv(tokenizer=tokenizer, qpc=generated_qpc_path, device_id=device_group, prompt=prompt) - return + if execute: + latency_stats_kv(tokenizer=tokenizer, qpc=generated_qpc_path, device_id=device_group, prompt=prompt) + return generated_qpc_path ############################################# # hf model -> export -> compile -> execute @@ -157,8 +159,10 @@ def main( logger.info(f"Compiled qpc files can be found at : {generated_qpc_path}") # Execute - latency_stats_kv(tokenizer=tokenizer, qpc=generated_qpc_path, device_id=device_group, prompt=prompt) + if execute: + latency_stats_kv(tokenizer=tokenizer, qpc=generated_qpc_path, device_id=device_group, prompt=prompt) + return generated_qpc_path if __name__ == "__main__": parser = argparse.ArgumentParser( @@ -209,4 +213,4 @@ def main( ) args = parser.parse_args() - main(**args.__dict__) + infer_api(**args.__dict__) diff --git a/QEfficient/generation/llm_generator.py b/QEfficient/generation/llm_generator.py index 18e726021..65ab3bd16 100644 --- a/QEfficient/generation/llm_generator.py +++ b/QEfficient/generation/llm_generator.py @@ -5,17 +5,14 @@ # # ----------------------------------------------------------------------------- -import json import os import torch import numpy as np import torch.nn as nn -from typing import Dict, List, Optional, Union +from typing import List, Optional, Union from threading import Thread -from QEfficient.generation.cloud_infer import QAICInferenceSession - from transformers import ( AutoTokenizer, TextIteratorStreamer, @@ -29,6 +26,7 @@ MaxLengthCriteria, ) +from QEfficient.generation.cloud_infer import QAICInferenceSession class LLMGenerator: @@ -40,15 +38,13 @@ def __init__( prompt_len: Optional[int] = 32, ctx_len: Optional[int] = 128, streamer: Optional[Union[TextStreamer, TextIteratorStreamer]] = None, - logits_processor: Optional = None, - logits_warper: Optional = None, ): self.session = None self.tokenizer = None self.is_first_prompt = False - self.model_name = "" + self.model_name = model_name self.qpc_path = "" - self.device_id = [0] + self.device_id = device_id self.curr_cache_index = 0 self.ctx_len = ctx_len self.retained_state = True @@ -61,9 +57,6 @@ def __init__( self.qpc_path = ( qpc_path if os.path.exists(qpc_path) else OSError(f"{qpc_path} not found !") ) - self.device_id = device_id - - self.model_name = model_name try: self.session = QAICInferenceSession( @@ -101,6 +94,8 @@ def __init__( self.streamer = streamer( self.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=None ) + else: + self.streamer = TextStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=None) # instantiate default logit processor and wrapper here # TODO : change default values with temperature and top_p @@ -169,10 +164,11 @@ def _stopping_criteria(self, next_token_id, max_new_tokens=None): ) return True + # llama3 if next_token_id == self.tokenizer.convert_tokens_to_ids("<|eot_id|>"): print( next_token_id == self.tokenizer.eos_token_id, - "next_token_id == self.tokenizer.eos_token_id", + "next_token_id == <|eot_id|>", ) return True diff --git a/app/Readme.md b/app/Readme.md index 2b36673a8..2567661d1 100644 --- a/app/Readme.md +++ b/app/Readme.md @@ -52,28 +52,32 @@ Please modify the `app_config.json` like below, "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, - "device_id" : [0] + "device_id" : [0], + "num_cores" : 16 }, "mpt" : { "qpc_path" : "", "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, - "device_id" : [1] + "device_id" : [1], + "num_cores" : 16 }, "llama" : { "qpc_path" : "", "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, - "device_id" : [2] + "device_id" : [2], + "num_cores" : 16 }, "mistral" : { "qpc_path" : "", "model_name" : "", "prompt_len" : 128, "ctx_len" : 1024, - "device_id" : [3] + "device_id" : [3], + "num_cores" : 16 } }, "question-answering" : { diff --git a/app/app.py b/app/app.py index 8b8dce192..865a2adae 100755 --- a/app/app.py +++ b/app/app.py @@ -24,7 +24,6 @@ ) from utils import ( - get_list_of_models_task, get_list_of_tasks, get_list_of_models_all, get_data, @@ -33,6 +32,7 @@ get_app_config, ) +from QEfficient.cloud.infer import infer_api # Load .env file load_dotenv() @@ -51,6 +51,7 @@ previous_current_ctx_len = 0 last_prompt = "" last_state_generation_ids = [] +disable_multiturn = True # main title of app title = """ @@ -68,8 +69,8 @@ def update_model(task, model): - global qeff_generator_model - new_obj = get_generator(task, model) + global qeff_generator_model, app_config + new_obj = get_generator(task, model, app_config) if new_obj is not None: qeff_generator_model = new_obj print("Updating qeff generator, ", qeff_generator_model.model_name) @@ -97,7 +98,7 @@ def get_prompt(message: str, system_prompt: str): def run_qeff_check(task, model_name, progress=gr.Progress()): - global summary_text, qeff_flags + global summary_text, qeff_flags, app_config summary_text = "" model_info = get_data(task, model_name) @@ -106,20 +107,25 @@ def run_qeff_check(task, model_name, progress=gr.Progress()): qeff_flags.add(model_name) # TODO : call QEfficient transform api - # TODO : take model_info as args progress(0, desc="Downloading...") for i in progress.tqdm(range(100), desc="Downloading..."): time.sleep(0.0005) summary_text += f"$ Downloaded {model_name} from cache directory\n" # TODO : call QEfficient compile api - # TODO : take model info as arguments - # TODO : we can update the outputs from execute api in summary text - # TODO : app_config[task][model_name]['qpc_path'] = progress(0, desc="Optimizing and Compiling...") time.sleep(0.5) for i in progress.tqdm(range(100), desc="Optimizing and Compiling..."): time.sleep(0.07) + # calling infer api directly to get qpc_path + app_config[task][model_name]['qpc_path'] = infer_api( + model_name=model_info['model_name'], + num_cores=model_info['num_cores'], + prompt_len= model_info['prompt_len'], + ctx_len = model_info['ctx_len'], + execute = False + ) + summary_text += f"$ Optimized {model_name}\n" progress(0, desc="Generating Inference Container...") @@ -141,21 +147,16 @@ def summary(): def infer_prompt(msg, chat_history, task, model): global last_prompt, previous_current_ctx_len, last_state_generation_ids - qeff_generator_model.curr_cache_index = 0 - qeff_generator_model.generated_ids = [] - + qeff_generator_model.stop_indicator = True + + if disable_multiturn: + qeff_generator_model.curr_cache_index = 0 + qeff_generator_model.generated_ids = [] + + # in case of muli-turn, reset in case of ctx length is exhausted if qeff_generator_model.curr_cache_index >= qeff_generator_model.ctx_len - 1: qeff_generator_model.curr_cache_index = 0 - qeff_generator_model.curr_cache_index = 0 - qeff_generator_model.stop_indicator = True - last_prompt = msg - previous_current_ctx_len = qeff_generator_model.curr_cache_index - last_state_generation_ids = qeff_generator_model.generated_ids - - if not check(): - return msg, chat_history - output = "" yield "", chat_history + [(msg, output)] @@ -180,16 +181,6 @@ def stop(): return -def check(): - if qeff_generator_model.curr_cache_index >= qeff_generator_model.ctx_len - 1: - gr.Warning( - f"Reached max token generation limit of {qeff_generator_model.ctx_len}, Kindly press clear!" - ) - qeff_generator_model.curr_cache_index = 0 - return False - return True - - def run_clear(): global qeff_flags qeff_generator_model.curr_cache_index = 0 diff --git a/app/utils.py b/app/utils.py index cb4e9ff64..74b2ea37e 100644 --- a/app/utils.py +++ b/app/utils.py @@ -41,7 +41,10 @@ def get_list_of_models_task(app_config, task): return list(app_config[task].keys()) -def get_data(task, model): +def get_data(task, model, app_config = None): + if app_config: + return app_config[task][model] + app_config = get_app_config() return app_config[task][model] @@ -68,8 +71,10 @@ def load_models_artifacts(): print(generator_hub) -def get_generator(task, model): - app_config = get_app_config() +def get_generator(task, model, app_config = None): + if app_config is None: + app_config = get_app_config() + if task in generator_hub.keys(): if model in generator_hub[task].keys(): if generator_hub[task][model] is None: From a3f65c4020cebc95cc517a079241865cf29c01a7 Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Mon, 13 May 2024 10:25:00 +0000 Subject: [PATCH 17/18] modified infer api to skip model download if qpc exits Signed-off-by: Himanshu Upreti --- QEfficient/cloud/infer.py | 31 +++++++++++++++----------- QEfficient/generation/llm_generator.py | 20 ++++++----------- app/app.py | 15 ++++++++----- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/QEfficient/cloud/infer.py b/QEfficient/cloud/infer.py index 75c69d484..74d203ff7 100644 --- a/QEfficient/cloud/infer.py +++ b/QEfficient/cloud/infer.py @@ -60,9 +60,10 @@ def infer_api( device_group: List[int] = [ 0, ], - execute : bool = True + skip_stats : bool = False, ) -> None: # Make + breakpoint() model_card_dir = os.path.join(QEFF_MODELS_DIR, str(model_name)) os.makedirs(model_card_dir, exist_ok=True) @@ -77,21 +78,24 @@ def infer_api( onnx_dir_path = os.path.join(model_card_dir, "onnx") onnx_model_path = os.path.join(onnx_dir_path, model_name.replace("/", "_") + "_kv_clipped_fp16.onnx") + # skip model download if qpc exits and we do not need stats + if not qpc_exists(qpc_dir_path) or not skip_stats: # Get tokenizer - if hf_token is not None: - login(hf_token) - model_hf_path = hf_download( - repo_id=model_name, - cache_dir=cache_dir, - ignore_patterns=["*.txt", "*.onnx", "*.ot", "*.md", "*.tflite", "*.pdf"], - ) - tokenizer = AutoTokenizer.from_pretrained(model_hf_path, use_cache=True, padding_side="left") + if hf_token is not None: + login(hf_token) + model_hf_path = hf_download( + repo_id=model_name, + cache_dir=cache_dir, + ignore_patterns=["*.txt", "*.onnx", "*.ot", "*.md", "*.tflite", "*.pdf"], + ) + tokenizer = AutoTokenizer.from_pretrained(model_hf_path, use_cache=True, padding_side="left") if qpc_exists(qpc_dir_path): # execute logger.info("Pre-compiled qpc found! Trying to execute with given prompt") - latency_stats_kv(tokenizer=tokenizer, qpc=qpc_dir_path, device_id=device_group, prompt=prompt) - return + if not skip_stats: + latency_stats_kv(tokenizer=tokenizer, qpc=qpc_dir_path, device_id=device_group, prompt=prompt) + return qpc_dir_path if onnx_exists(onnx_model_path): # Compile -> execute @@ -111,7 +115,7 @@ def infer_api( assert ( generated_qpc_path == qpc_dir_path ), f"QPC files were generated at an unusual location, expected {qpc_dir_path}; got {generated_qpc_path}" - if execute: + if not skip_stats: latency_stats_kv(tokenizer=tokenizer, qpc=generated_qpc_path, device_id=device_group, prompt=prompt) return generated_qpc_path @@ -159,7 +163,8 @@ def infer_api( logger.info(f"Compiled qpc files can be found at : {generated_qpc_path}") # Execute - if execute: + # TODO : once the api calls for generic app are there remove this + if not skip_stats: latency_stats_kv(tokenizer=tokenizer, qpc=generated_qpc_path, device_id=device_group, prompt=prompt) return generated_qpc_path diff --git a/QEfficient/generation/llm_generator.py b/QEfficient/generation/llm_generator.py index 65ab3bd16..068c2dadd 100644 --- a/QEfficient/generation/llm_generator.py +++ b/QEfficient/generation/llm_generator.py @@ -32,28 +32,27 @@ class LLMGenerator: def __init__( self, - qpc_path, - model_name, + qpc_path :str, + model_name : str, device_id: Optional[List[int]] = [0], prompt_len: Optional[int] = 32, ctx_len: Optional[int] = 128, streamer: Optional[Union[TextStreamer, TextIteratorStreamer]] = None, + retained_state :bool = True ): self.session = None self.tokenizer = None self.is_first_prompt = False self.model_name = model_name - self.qpc_path = "" self.device_id = device_id self.curr_cache_index = 0 self.ctx_len = ctx_len - self.retained_state = True - self.write_io_dir = False self.prompt_len = prompt_len self.generated_ids = [] self.inputs = None self.stop_indicator = True - + self.retained_state = retained_state + self.qpc_path = ( qpc_path if os.path.exists(qpc_path) else OSError(f"{qpc_path} not found !") ) @@ -202,19 +201,14 @@ def prepare_inputs_for_inference(self, prompt): return inputs, prompt_len def update_inputs_for_inference(self, inputs, next_token_id): - batch_size, prompt_len = inputs["input_ids"].shape + _, prompt_len = inputs["input_ids"].shape inputs["cache_index"] += prompt_len - inputs["input_ids"] = next_token_id - - batch_size, prompt_len = inputs["input_ids"].shape - if "attention_mask" in inputs.keys(): inputs["position_ids"] = inputs.pop("attention_mask").sum(1, keepdims=True) else: inputs["position_ids"] += 1 - - batch_size, prompt_len = inputs["input_ids"].shape + _, prompt_len = inputs["input_ids"].shape return inputs, prompt_len def generate(self, prompt: str, sample: bool = False, max_new_tokens: int = None): diff --git a/app/app.py b/app/app.py index 865a2adae..676201262 100755 --- a/app/app.py +++ b/app/app.py @@ -116,15 +116,20 @@ def run_qeff_check(task, model_name, progress=gr.Progress()): time.sleep(0.5) for i in progress.tqdm(range(100), desc="Optimizing and Compiling..."): time.sleep(0.07) - + # calling infer api directly to get qpc_path app_config[task][model_name]['qpc_path'] = infer_api( - model_name=model_info['model_name'], - num_cores=model_info['num_cores'], - prompt_len= model_info['prompt_len'], + model_name = model_info['model_name'], + num_cores = model_info['num_cores'], + prompt_len = model_info['prompt_len'], ctx_len = model_info['ctx_len'], - execute = False + skip_stats = True, + prompt = "", + mxfp6 = True ) + + if not os.path.exists(app_config[task][model_name]['qpc_path']): + raise RuntimeError(f"qpc path not found for {task} {model_name}") summary_text += f"$ Optimized {model_name}\n" From aaa395b1a486af7d440271e356ff279502fac9dd Mon Sep 17 00:00:00 2001 From: Himanshu Upreti Date: Mon, 13 May 2024 10:49:10 +0000 Subject: [PATCH 18/18] Restore interface for infer cli api Signed-off-by: Himanshu Upreti --- QEfficient/cloud/infer.py | 42 +++++++++++++++++++++++++++++++++------ app/app.py | 10 +++------- 2 files changed, 39 insertions(+), 13 deletions(-) diff --git a/QEfficient/cloud/infer.py b/QEfficient/cloud/infer.py index 74d203ff7..1dc2761f6 100644 --- a/QEfficient/cloud/infer.py +++ b/QEfficient/cloud/infer.py @@ -48,7 +48,7 @@ def onnx_exists(onnx_file_path: str) -> bool: def infer_api( model_name: str, num_cores: int, - prompt: str, + prompt: str = Constants.input_str, aic_enable_depth_first: bool = False, mos: int = -1, cache_dir: str = Constants.CACHE_DIR, @@ -61,9 +61,8 @@ def infer_api( 0, ], skip_stats : bool = False, -) -> None: +) -> str: # Make - breakpoint() model_card_dir = os.path.join(QEFF_MODELS_DIR, str(model_name)) os.makedirs(model_card_dir, exist_ok=True) @@ -162,13 +161,44 @@ def infer_api( ), f"QPC files were generated at an unusual location, expected {qpc_dir_path}; got {generated_qpc_path}" logger.info(f"Compiled qpc files can be found at : {generated_qpc_path}") - # Execute - # TODO : once the api calls for generic app are there remove this if not skip_stats: latency_stats_kv(tokenizer=tokenizer, qpc=generated_qpc_path, device_id=device_group, prompt=prompt) return generated_qpc_path +def main( + model_name: str, + num_cores: int, + prompt: str, + aic_enable_depth_first: bool = False, + mos: int = -1, + cache_dir: str = Constants.CACHE_DIR, + hf_token: str = None, + batch_size: int = 1, + prompt_len: int = 32, + ctx_len: int = 128, + mxfp6: bool = False, + device_group: List[int] = [ + 0, + ], +) -> None: + _ = infer_api( + model_name=model_name, + num_cores=num_cores, + prompt=prompt, + aic_enable_depth_first=aic_enable_depth_first, + mos=mos, + cache_dir=cache_dir, + hf_token=hf_token, + batch_size=batch_size, + prompt_len=prompt_len, + ctx_len=ctx_len, + mxfp6=mxfp6, + device_group=device_group + ) + + return + if __name__ == "__main__": parser = argparse.ArgumentParser( description="Inference command, the model will be downloaded from HF, optmized, compiled, executed on AIC" @@ -218,4 +248,4 @@ def infer_api( ) args = parser.parse_args() - infer_api(**args.__dict__) + main(**args.__dict__) diff --git a/app/app.py b/app/app.py index 676201262..2a1512cb8 100755 --- a/app/app.py +++ b/app/app.py @@ -106,16 +106,12 @@ def run_qeff_check(task, model_name, progress=gr.Progress()): if model_name not in qeff_flags: qeff_flags.add(model_name) - # TODO : call QEfficient transform api - progress(0, desc="Downloading...") - for i in progress.tqdm(range(100), desc="Downloading..."): - time.sleep(0.0005) summary_text += f"$ Downloaded {model_name} from cache directory\n" - # TODO : call QEfficient compile api + progress(0, desc="Optimizing and Compiling...") time.sleep(0.5) for i in progress.tqdm(range(100), desc="Optimizing and Compiling..."): - time.sleep(0.07) + time.sleep(0.04) # calling infer api directly to get qpc_path app_config[task][model_name]['qpc_path'] = infer_api( @@ -125,7 +121,7 @@ def run_qeff_check(task, model_name, progress=gr.Progress()): ctx_len = model_info['ctx_len'], skip_stats = True, prompt = "", - mxfp6 = True + mxfp6 = model_info['mxfp6'] ) if not os.path.exists(app_config[task][model_name]['qpc_path']):