diff --git a/__tests__/openai.test.ts b/__tests__/openai.test.ts index d8aeade..f1d95fc 100644 --- a/__tests__/openai.test.ts +++ b/__tests__/openai.test.ts @@ -1,4 +1,4 @@ -import { testConnection, generatePrompt, testAssertion, writeAnalysis } from '../src/open_ai'; +import { testConnection, generatePrompt, testAssertion, writeAnalysis, openAiFeedback } from '../src/open_ai'; import { getDiff, getSinglePR, getAssertion, getChangedFiles, getFileContent } from '../src/github'; describe('OpenAI Functionality', () => { @@ -27,13 +27,16 @@ describe('OpenAI Functionality', () => { const file: any = await getFileContent(changedFiles, 'hasura', 'v3-docs'); const prompt: string = generatePrompt(diff, assertion, file); const response = await testAssertion(prompt); + console.log(response); expect(response).toBeTruthy(); - }, 50000); + }, 100000); it('Should create a nicely formatted message using the response', async () => { - expect( - writeAnalysis( - `[{"satisfied": "\u2705", "scope": "diff", "feedback": "You did a great job!"}, {"satisfied": "\u2705", "scope": "wholeFile", "feedback": "Look. At. You. Go!"}]` - ) - ).toContain('You did a great job!'); + const feedback: openAiFeedback = { + feedback: [ + { satisfied: '\u2705', scope: 'Diff', feedback: 'You did a great job!' }, + { satisfied: '\u2705', scope: 'Integrated', feedback: 'Look. At. You. Go!' }, + ], + }; + expect(writeAnalysis(feedback)).toContain('You did a great job!'); }); }); diff --git a/bable.config.js b/babel.config.js similarity index 100% rename from bable.config.js rename to babel.config.js diff --git a/dist/index.js b/dist/index.js index fc9dcc6..f09bcda 100644 --- a/dist/index.js +++ b/dist/index.js @@ -219,7 +219,7 @@ async function main() { const file = await (0, github_1.getFileContent)(changedFiles, org, repoName); const prompt = (0, open_ai_1.generatePrompt)(diff, assertion, file); const rawAnalysis = await (0, open_ai_1.testAssertion)(prompt); - const analysis = (0, open_ai_1.writeAnalysis)(rawAnalysis?.toString() ?? ''); + const analysis = (0, open_ai_1.writeAnalysis)(rawAnalysis); console.log(analysis); core.setOutput('analysis', analysis); return analysis; @@ -273,7 +273,42 @@ const openai = new openAi({ }); // This wil generate our prompt using the diff, assertion, and whole file const generatePrompt = (diff, assertion, file) => { - const comboPrompt = `As a senior engineer, you're tasked with reviewing a documentation PR. Your review will be conducted through two distinct lenses, both centered around an assertion related to usability. The first lens will focus on examining the diff itself — providing targeted feedback on what the PR author actually contributed. The second lens will compare the diff to the entire set of changed files, assessing how the contribution fits within the larger context in relation to the usability assertion. For each lens, provide feedback and determine if the usability assertion is satisfied. You should speak directly to the author and refer to them in second person. Your output should be a JSON-formatted array with two objects. Each object should contain the following properties: 'satisfied' (either a ✅ or ❌ to indicate if the assertion is met), 'scope' (either 'Diff' or 'Integrated'), and 'feedback' (a string providing your targeted feedback for that lens). Here's the assertion: ${assertion}\n\nHere's the diff:\n\n${diff}\n\nHere's the original files:\n\n${file}\n\nBear in mind that some of the files may have been renamed. Remember, do not wrap the JSON in a code block.`; + const comboPrompt = `As a senior engineer, you're tasked with reviewing a documentation PR. Your review comprises two distinct perspectives, each focused on a specific aspect of usability. + +- **First Perspective**: Examine the PR's diff. Provide targeted feedback on the author's contribution. +- **Second Perspective**: Assess how the diff integrates with the entire set of changed files, evaluating its contribution to the overall usability. + +**Usability Assertion**: ${assertion} + +**PR Diff**: ${diff} + +**Original Files**: ${file} + +(Note: Some files may have been renamed.) + +**Your Task**: Provide feedback for each perspective. Determine if the usability assertion is met in each context. + +**Output Format**: Your response should be a JSON-formatted array containing exactly two objects. Each object must have the following properties: +- 'satisfied': Indicate if the assertion is met (✅ for yes, ❌ for no). +- 'scope': 'Diff' for the first perspective, 'Integrated' for the second. +- 'feedback': A string providing your targeted feedback. + +Example Output: +{ + "feedback": [ + { + "satisfied": "✅", + "scope": "Diff", + "feedback": "Your changes in the PR are clear and enhance the readability of the documentation." + }, + { + "satisfied": "❌", + "scope": "Integrated", + "feedback": "The changes do not align well with the overall structure and flow of the existing documentation." + } + ] +} +`; return comboPrompt; }; exports.generatePrompt = generatePrompt; @@ -302,10 +337,12 @@ const testAssertion = async (prompt) => { const chatCompletion = await openai.chat.completions.create({ model: 'gpt-4-1106-preview', messages: conversation, + response_format: { type: 'json_object' }, }); const analysis = chatCompletion.choices[0].message.content; console.log(`✅ Got analysis from OpenAI`); - return analysis; + const parsedAnalysis = JSON.parse(analysis); + return parsedAnalysis; } catch (error) { console.error(error); @@ -315,18 +352,19 @@ const testAssertion = async (prompt) => { exports.testAssertion = testAssertion; // We decided to send things back as JSON so we can manipulate the data in the response we'll be sending back to GitHub const writeAnalysis = (analysis) => { - // We've still got to double-check because ChatGPT will sometimes return a string that's not valid JSON by wrapping it in code blocks - const regex = /^```(json)?/gm; - analysis = analysis.replace(regex, ''); - const analysisJSON = JSON.parse(analysis); - let message = `## DX: Assertion Testing\n\n`; - const feedback = analysisJSON.map((item) => { - // we'll create some markdown to make the feedback look nice - return `### ${item.satisfied} ${item.scope}\n\n${item.feedback}\n\n`; - }); - feedback.unshift(message); - const feedbackString = feedback.join(''); - return feedbackString; + if (analysis === null) { + return `Error testing the assertions. Check the logs.`; + } + else { + let message = `## DX: Assertion Testing\n\n`; + const feedback = analysis.feedback.map((item) => { + // we'll create some markdown to make the feedback look nice + return `### ${item.satisfied} ${item.scope}\n\n${item.feedback}\n\n`; + }); + feedback.unshift(message); + const feedbackString = feedback.join(''); + return feedbackString; + } }; exports.writeAnalysis = writeAnalysis; diff --git a/dist/open_ai/index.js b/dist/open_ai/index.js index ef4d30b..f7990c2 100644 --- a/dist/open_ai/index.js +++ b/dist/open_ai/index.js @@ -37,7 +37,42 @@ const openai = new openAi({ }); // This wil generate our prompt using the diff, assertion, and whole file const generatePrompt = (diff, assertion, file) => { - const comboPrompt = `As a senior engineer, you're tasked with reviewing a documentation PR. Your review will be conducted through two distinct lenses, both centered around an assertion related to usability. The first lens will focus on examining the diff itself — providing targeted feedback on what the PR author actually contributed. The second lens will compare the diff to the entire set of changed files, assessing how the contribution fits within the larger context in relation to the usability assertion. For each lens, provide feedback and determine if the usability assertion is satisfied. You should speak directly to the author and refer to them in second person. Your output should be a JSON-formatted array with two objects. Each object should contain the following properties: 'satisfied' (either a ✅ or ❌ to indicate if the assertion is met), 'scope' (either 'Diff' or 'Integrated'), and 'feedback' (a string providing your targeted feedback for that lens). Here's the assertion: ${assertion}\n\nHere's the diff:\n\n${diff}\n\nHere's the original files:\n\n${file}\n\nBear in mind that some of the files may have been renamed. Remember, do not wrap the JSON in a code block.`; + const comboPrompt = `As a senior engineer, you're tasked with reviewing a documentation PR. Your review comprises two distinct perspectives, each focused on a specific aspect of usability. + +- **First Perspective**: Examine the PR's diff. Provide targeted feedback on the author's contribution. +- **Second Perspective**: Assess how the diff integrates with the entire set of changed files, evaluating its contribution to the overall usability. + +**Usability Assertion**: ${assertion} + +**PR Diff**: ${diff} + +**Original Files**: ${file} + +(Note: Some files may have been renamed.) + +**Your Task**: Provide feedback for each perspective. Determine if the usability assertion is met in each context. + +**Output Format**: Your response should be a JSON-formatted array containing exactly two objects. Each object must have the following properties: +- 'satisfied': Indicate if the assertion is met (✅ for yes, ❌ for no). +- 'scope': 'Diff' for the first perspective, 'Integrated' for the second. +- 'feedback': A string providing your targeted feedback. + +Example Output: +{ + "feedback": [ + { + "satisfied": "✅", + "scope": "Diff", + "feedback": "Your changes in the PR are clear and enhance the readability of the documentation." + }, + { + "satisfied": "❌", + "scope": "Integrated", + "feedback": "The changes do not align well with the overall structure and flow of the existing documentation." + } + ] +} +`; return comboPrompt; }; exports.generatePrompt = generatePrompt; @@ -66,10 +101,12 @@ const testAssertion = async (prompt) => { const chatCompletion = await openai.chat.completions.create({ model: 'gpt-4-1106-preview', messages: conversation, + response_format: { type: 'json_object' }, }); const analysis = chatCompletion.choices[0].message.content; console.log(`✅ Got analysis from OpenAI`); - return analysis; + const parsedAnalysis = JSON.parse(analysis); + return parsedAnalysis; } catch (error) { console.error(error); @@ -79,17 +116,18 @@ const testAssertion = async (prompt) => { exports.testAssertion = testAssertion; // We decided to send things back as JSON so we can manipulate the data in the response we'll be sending back to GitHub const writeAnalysis = (analysis) => { - // We've still got to double-check because ChatGPT will sometimes return a string that's not valid JSON by wrapping it in code blocks - const regex = /^```(json)?/gm; - analysis = analysis.replace(regex, ''); - const analysisJSON = JSON.parse(analysis); - let message = `## DX: Assertion Testing\n\n`; - const feedback = analysisJSON.map((item) => { - // we'll create some markdown to make the feedback look nice - return `### ${item.satisfied} ${item.scope}\n\n${item.feedback}\n\n`; - }); - feedback.unshift(message); - const feedbackString = feedback.join(''); - return feedbackString; + if (analysis === null) { + return `Error testing the assertions. Check the logs.`; + } + else { + let message = `## DX: Assertion Testing\n\n`; + const feedback = analysis.feedback.map((item) => { + // we'll create some markdown to make the feedback look nice + return `### ${item.satisfied} ${item.scope}\n\n${item.feedback}\n\n`; + }); + feedback.unshift(message); + const feedbackString = feedback.join(''); + return feedbackString; + } }; exports.writeAnalysis = writeAnalysis; diff --git a/package.json b/package.json index b585a3b..2b0925f 100644 --- a/package.json +++ b/package.json @@ -7,6 +7,7 @@ "build": "tsc && ncc build dist/index.js -o dist", "watch": "tsc -w & nodemon --no-deprecation dist/index.js", "test": "jest --watchAll --verbose --silent", + "test-loud": "jest --watchAll --verbose", "start": "node --no-deprecation dist/index.js" }, "keywords": [], diff --git a/src/index.ts b/src/index.ts index 020d6a3..77640ff 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,7 +1,7 @@ import dotenv from 'dotenv'; import * as core from '@actions/core'; import { getSinglePR, getAssertion, getDiff, getChangedFiles, getFileContent } from './github'; -import { generatePrompt, testAssertion, writeAnalysis } from './open_ai'; +import { generatePrompt, testAssertion, writeAnalysis, openAiFeedback } from './open_ai'; dotenv.config(); @@ -26,7 +26,7 @@ async function main() { const file: any = await getFileContent(changedFiles, org, repoName); const prompt: string = generatePrompt(diff, assertion, file); const rawAnalysis = await testAssertion(prompt); - const analysis = writeAnalysis(rawAnalysis?.toString() ?? ''); + const analysis = writeAnalysis(rawAnalysis); console.log(analysis); core.setOutput('analysis', analysis); return analysis; diff --git a/src/open_ai/index.ts b/src/open_ai/index.ts index 7167bd0..df7c52d 100644 --- a/src/open_ai/index.ts +++ b/src/open_ai/index.ts @@ -10,9 +10,55 @@ const openai = new openAi({ apiKey: api_key, }); +/** + * We're using the JSON export for OpenAI, so we're using this type to dictate how we can + * access properties and iterate over them in the output. + */ +export type openAiFeedback = { + feedback: [ + { satisfied: string; scope: string; feedback: string }, + { satisfied: string; scope: string; feedback: string } + ]; +}; + // This wil generate our prompt using the diff, assertion, and whole file export const generatePrompt = (diff: string, assertion: string | null, file: string): string => { - const comboPrompt = `As a senior engineer, you're tasked with reviewing a documentation PR. Your review will be conducted through two distinct lenses, both centered around an assertion related to usability. The first lens will focus on examining the diff itself — providing targeted feedback on what the PR author actually contributed. The second lens will compare the diff to the entire set of changed files, assessing how the contribution fits within the larger context in relation to the usability assertion. For each lens, provide feedback and determine if the usability assertion is satisfied. You should speak directly to the author and refer to them in second person. Your output should be a JSON-formatted array with two objects. Each object should contain the following properties: 'satisfied' (either a ✅ or ❌ to indicate if the assertion is met), 'scope' (either 'Diff' or 'Integrated'), and 'feedback' (a string providing your targeted feedback for that lens). Here's the assertion: ${assertion}\n\nHere's the diff:\n\n${diff}\n\nHere's the original files:\n\n${file}\n\nBear in mind that some of the files may have been renamed. Remember, do not wrap the JSON in a code block.`; + const comboPrompt = `As a senior engineer, you're tasked with reviewing a documentation PR. Your review comprises two distinct perspectives, each focused on a specific aspect of usability. + +- **First Perspective**: Examine the PR's diff. Provide targeted feedback on the author's contribution. +- **Second Perspective**: Assess how the diff integrates with the entire set of changed files, evaluating its contribution to the overall usability. + +**Usability Assertion**: ${assertion} + +**PR Diff**: ${diff} + +**Original Files**: ${file} + +(Note: Some files may have been renamed.) + +**Your Task**: Provide feedback for each perspective. Determine if the usability assertion is met in each context. + +**Output Format**: Your response should be a JSON-formatted array containing exactly two objects. Each object must have the following properties: +- 'satisfied': Indicate if the assertion is met (✅ for yes, ❌ for no). +- 'scope': 'Diff' for the first perspective, 'Integrated' for the second. +- 'feedback': A string providing your targeted feedback. + +Example Output: +{ + "feedback": [ + { + "satisfied": "✅", + "scope": "Diff", + "feedback": "Your changes in the PR are clear and enhance the readability of the documentation." + }, + { + "satisfied": "❌", + "scope": "Integrated", + "feedback": "The changes do not align well with the overall structure and flow of the existing documentation." + } + ] +} +`; return comboPrompt; }; @@ -29,7 +75,7 @@ export const testConnection = async (): Promise => { // Then, we'll create a function that takes in the diff, the author's assertion(s), and the prompt, // and returns the analysis from OpenAI -export const testAssertion = async (prompt: string): Promise => { +export const testAssertion = async (prompt: string): Promise => { let conversation = [ { role: 'system', @@ -41,11 +87,13 @@ export const testAssertion = async (prompt: string): Promise => { const chatCompletion = await openai.chat.completions.create({ model: 'gpt-4-1106-preview', messages: conversation, + response_format: { type: 'json_object' }, }); - const analysis: any = chatCompletion.choices[0].message.content; + const analysis = chatCompletion.choices[0].message.content; console.log(`✅ Got analysis from OpenAI`); - return analysis; + const parsedAnalysis: openAiFeedback = JSON.parse(analysis); + return parsedAnalysis; } catch (error) { console.error(error); return null; @@ -53,17 +101,17 @@ export const testAssertion = async (prompt: string): Promise => { }; // We decided to send things back as JSON so we can manipulate the data in the response we'll be sending back to GitHub -export const writeAnalysis = (analysis: string): string => { - // We've still got to double-check because ChatGPT will sometimes return a string that's not valid JSON by wrapping it in code blocks - const regex = /^```(json)?/gm; - analysis = analysis.replace(regex, ''); - const analysisJSON = JSON.parse(analysis); - let message = `## DX: Assertion Testing\n\n`; - const feedback = analysisJSON.map((item: any) => { - // we'll create some markdown to make the feedback look nice - return `### ${item.satisfied} ${item.scope}\n\n${item.feedback}\n\n`; - }); - feedback.unshift(message); - const feedbackString = feedback.join(''); - return feedbackString; +export const writeAnalysis = (analysis: openAiFeedback | null): string => { + if (analysis === null) { + return `Error testing the assertions. Check the logs.`; + } else { + let message = `## DX: Assertion Testing\n\n`; + const feedback = analysis.feedback.map((item: any) => { + // we'll create some markdown to make the feedback look nice + return `### ${item.satisfied} ${item.scope}\n\n${item.feedback}\n\n`; + }); + feedback.unshift(message); + const feedbackString = feedback.join(''); + return feedbackString; + } };