Skip to content

Commit

Permalink
enable to select other llm service `Azure AI Inference (GitHub Models…
Browse files Browse the repository at this point in the history
…)` (#60)

* refactor: introduce llm client factory method

* lint: add unused-import plugin and fix some lint errors

* feat: add the configuration to change LLM Service

* refactor: rename OpenAI client

* build: specify commonjs for @azure/core-rest-pipeline to fix error

* feat: add azure client for llm service

* feat: enable to select llm service on CLI

* docs: update settings documentation

* chore: remove todo comments

* docs: add description about service option for cli

* chore: fix lint error
  • Loading branch information
kaakaa authored Aug 25, 2024
1 parent bdcf5ed commit 2efeb2d
Show file tree
Hide file tree
Showing 14 changed files with 266 additions and 26 deletions.
6 changes: 4 additions & 2 deletions .eslintrc.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
},
"plugins": [
"@typescript-eslint",
"import"
"import",
"unused-imports"
],
"rules": {
"@typescript-eslint/consistent-type-imports": [
Expand Down Expand Up @@ -46,7 +47,8 @@
}
],
"no-throw-literal": "error",
"semi": "off"
"semi": "off",
"unused-imports/no-unused-imports": "error"
},
"ignorePatterns": [
"out",
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ Options:
-i, --input <file> input yaml file path to generate slide (default: "slides.yaml")
-o, --output <file> output path to write markdown file
-l, --locale <locale> locale of generated slide
-s, --service <service> service to use ("openai" or "azure-ai-inference") (default: "openai")
-u, --apiurl <url> base url of openai api (e.g.: https://api.openai.com/v1)
-k, --apikey api key of openai (or openai-compatible) api
-m, --model <model> model of openai api
Expand Down
36 changes: 28 additions & 8 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -73,37 +73,53 @@
"configuration": {
"title": "Slidaiv",
"properties": {
"slidaiv.apiKey": {
"type": "null",
"default": null,
"markdownDescription": "Set API Key to authorize requests to OpenAI API from [here](command:slidaiv.command.setApiKey)."
"slidaiv.llmService": {
"type": "string",
"order": 1,
"default": "openai",
"enum": [
"openai",
"azure-ai-inference"
],
"description": "Select LLM service to generate Slidev contents. (default: openai)"
},
"slidaiv.baseUrl": {
"type": "string",
"order": 2,
"default": "https://api.openai.com/v1",
"description": "Specify OpenAI API Base URL (default: https://api.openai.com/v1). Enter this if you use OpenAI Compatible API."
"markdownDescription": "Specify LLM service's base URL (default: `https://api.openai.com/v1`). If you select `azure-ai-inference` in `#slidaiv.llmService#`, you cannot configure this setting, and `https://models.inference.ai.azure.com` will be used."
},
"slidaiv.apiKey": {
"type": "null",
"order": 3,
"default": null,
"markdownDescription": "API Key must be set from [here](command:slidaiv.command.setApiKey). API Key will be used to authorize requests to selected LLM Service (`#slidaiv.llmService#`)."
},
"slidaiv.model": {
"type": "string",
"default": "gpt-3.5-turbo",
"description": "Specify the model to use. (default: gpt-3.5-turbo)"
"order": 4,
"default": "gpt-4o",
"description": "Enter the LLM model name. (default: gpt-4o)"
},
"slidaiv.prompt.generate": {
"type": "string",
"order": 10,
"editPresentation": "multilineText",
"default": "",
"markdownDescription": "System Prompt for `Generate Slidev contents` command. \nYou can use variable `${locale}` in your prompt, which will be replaced with locale setting in runtime.\n\nIf empty, [the default prompt](https://github.com/kaakaa/slidaiv/blob/master/src/client/prompts.ts) will be used. (Default is empty)"
},
"slidaiv.prompt.decorate": {
"type": "string",
"order": 11,
"editPresentation": "multilineText",
"default": "",
"markdownDescription": "System Prompt for `Decorate contents (Experimental)` command.\n\nIf empty, [the default prompt](https://github.com/kaakaa/slidaiv/blob/master/src/client/prompts.ts) will be used. (Default is empty)"
},
"slidaiv.debug": {
"type": "boolean",
"order": 90,
"default": false,
"description": "Enable to output debug logs."
"description": "Enable debug log"
}
}
}
Expand Down Expand Up @@ -142,12 +158,16 @@
"esbuild": "^0.23.0",
"eslint": "^8.57.0",
"eslint-plugin-import": "^2.29.1",
"eslint-plugin-unused-imports": "^4.1.3",
"ts-loader": "^9.5.1",
"typescript": "^5.4.5",
"webpack": "^5.91.0",
"webpack-cli": "^5.1.4"
},
"dependencies": {
"@azure-rest/ai-inference": "1.0.0-beta.2",
"@azure/core-auth": "^1.7.2",
"@azure/core-sse": "^2.1.3",
"@slidev/parser": "^0.49.27",
"@slidev/types": "^0.49.27",
"cli-progress": "^3.12.0",
Expand Down
77 changes: 77 additions & 0 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 5 additions & 6 deletions src/cli/main.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,8 @@ import { MultiBar, Presets } from 'cli-progress';
import { parse } from "@slidev/parser";

import { Logger } from '@/logger';
import { loadConfig as loadSettings, SlidevHeader } from '@/cli/util';
import type { GeneratedSlide } from '@/cli/util';
import { Client } from '@/client/openai';
import type { CustomCancellationToken } from '@/client/llmClient';
import { loadConfig as loadSettings, SlidevHeader, type GeneratedSlide } from '@/cli/util';
import { LLMClientFactory, type CustomCancellationToken } from '@/client/llmClient';
import { SlidevPage } from '@/model/slidev';

Logger.init((message: string) => { console.log(message); });
Expand All @@ -20,8 +18,9 @@ program
.option('-i, --input <file>', 'input yaml file path to generate slide', 'slides.yaml')
.option('-o, --output <file>', 'output path to write markdown file')
.option('-l, --locale <locale>', 'locale of generated slide')
.option('-s, --service <service>', 'service to use ("openai" or "azure-ai-inference")', 'openai')
.option('-u, --apiurl <url>', 'base url of openai api (e.g.: https://api.openai.com/v1)')
.option('-k, --apikey', 'api key of openai (or openai-compatible) api ')
.option('-k, --apikey <apikey>', 'api key of openai (or openai-compatible) api ')
.option('-m, --model <model>', 'model of openai api')
.option('-d, --debug', 'output extra debugging', false);
const options = program.parse().opts();
Expand All @@ -37,7 +36,7 @@ class CancelHandler implements CustomCancellationToken {
// Set up
const multi = new MultiBar({}, Presets.shades_classic);
const progress = multi.create(settings.slides?.length, 0);
const client = new Client(settings.context, settings.context.locale);
const client = LLMClientFactory.create(settings.context, settings.context.locale);

multi.log("Generating slides...\n");

Expand Down
3 changes: 2 additions & 1 deletion src/cli/util.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,14 @@ title: AI-generated slides
`;

export function loadConfig(f: string, options: OptionValues): CLISettings {
const { input, output, locale, apiurl, apikey, model, debug } = options;
const { input, output, locale, service, apiurl, apikey, model, debug } = options;
const settings = yaml.parse(f) as CLISettings;

const loc = locale ?? settings.context.locale ?? "en";

return {
context: {
service: service ?? settings.context.service ?? "openai",
apiKey: apikey ?? settings.context.apiKey ?? "dummy",
baseUrl: apiurl ?? settings.context.baseUrl ?? "https://openai.com/v1",
model: model ?? settings.context.model ?? "gpt-4o",
Expand Down
Loading

0 comments on commit 2efeb2d

Please sign in to comment.