diff --git a/py/autoevals/moderation.py b/py/autoevals/moderation.py index 842e6f0..4b4e542 100644 --- a/py/autoevals/moderation.py +++ b/py/autoevals/moderation.py @@ -35,7 +35,7 @@ def __init__( super().__init__(api_key=api_key, base_url=base_url, client=client) self.threshold = threshold - def _run_eval_sync(self, output, __expected=None): + def _run_eval_sync(self, output, expected=None, **kwargs): moderation_response = run_cached_request( client=self.client, request_type=REQUEST_TYPE, input=output, **self.extra_args )["results"][0] diff --git a/py/autoevals/oai.py b/py/autoevals/oai.py index dbd0bc6..72e9c0d 100644 --- a/py/autoevals/oai.py +++ b/py/autoevals/oai.py @@ -194,11 +194,11 @@ def prepare_openai(client: Optional[LLMClient] = None, is_async=False, api_key=N if is_async: complete_fn = openai_obj.ChatCompletion.acreate embedding_fn = openai_obj.Embedding.acreate - moderation_fn = openai_obj.Moderations.acreate + moderation_fn = openai_obj.Moderation.acreate else: complete_fn = openai_obj.ChatCompletion.create embedding_fn = openai_obj.Embedding.create - moderation_fn = openai_obj.Moderations.create + moderation_fn = openai_obj.Moderation.create client = Client( openai=openai, complete=complete_fn, diff --git a/py/autoevals/version.py b/py/autoevals/version.py index 7374298..ab5cdea 100644 --- a/py/autoevals/version.py +++ b/py/autoevals/version.py @@ -1 +1 @@ -VERSION = "0.0.115" +VERSION = "0.0.116"