diff --git a/speech_recognition/recognizers/google.py b/speech_recognition/recognizers/google.py index 01afde38..17f0d12c 100644 --- a/speech_recognition/recognizers/google.py +++ b/speech_recognition/recognizers/google.py @@ -233,8 +233,7 @@ def recognize_legacy( *, endpoint: str = ENDPOINT, ): - """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API. + """Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API. The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**. diff --git a/speech_recognition/recognizers/whisper_api/groq.py b/speech_recognition/recognizers/whisper_api/groq.py index 77d5c3a1..54631b5b 100644 --- a/speech_recognition/recognizers/whisper_api/groq.py +++ b/speech_recognition/recognizers/whisper_api/groq.py @@ -35,8 +35,7 @@ def recognize( model: GroqModel = "whisper-large-v3-turbo", **kwargs: Unpack[GroqOptionalParameters], ) -> str: - """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Groq Whisper API. + """Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Groq Whisper API. This function requires login to Groq; visit https://console.groq.com/login, then generate API Key in `API Keys `__ menu. diff --git a/speech_recognition/recognizers/whisper_api/openai.py b/speech_recognition/recognizers/whisper_api/openai.py index c474790a..c5b6fbed 100644 --- a/speech_recognition/recognizers/whisper_api/openai.py +++ b/speech_recognition/recognizers/whisper_api/openai.py @@ -35,8 +35,7 @@ def recognize( model: WhisperModel = "whisper-1", **kwargs: Unpack[OpenAIOptionalParameters], ) -> str: - """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the OpenAI Whisper API. + """Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the OpenAI Whisper API. This function requires an OpenAI account; visit https://platform.openai.com/signup, then generate API Key in `User settings `__. diff --git a/speech_recognition/recognizers/whisper_local/whisper.py b/speech_recognition/recognizers/whisper_local/whisper.py index 4ae54b7e..6287245e 100644 --- a/speech_recognition/recognizers/whisper_local/whisper.py +++ b/speech_recognition/recognizers/whisper_local/whisper.py @@ -47,8 +47,7 @@ def recognize( load_options: LoadModelOptionalParameters | None = None, **transcribe_options: Unpack[TranscribeOptionalParameters], ) -> str | TranscribeOutput: - """ - Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using Whisper. + """Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using Whisper. Pick ``model`` from output of :command:`python -c 'import whisper; print(whisper.available_models())'`. See also https://github.com/openai/whisper?tab=readme-ov-file#available-models-and-languages.