Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fixing final image recognition workflow #72

Merged
merged 3 commits into from
Mar 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions Stronger.xcodeproj/project.pbxproj
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
objects = {

/* Begin PBXBuildFile section */
1E0F29852BA394ED008CC7D3 /* LLMOnboardingViewCamera.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1E0F29842BA394ED008CC7D3 /* LLMOnboardingViewCamera.swift */; };
1E0F29872BA394FD008CC7D3 /* LLMOpenAITokenOnboardingCamera.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1E0F29862BA394FD008CC7D3 /* LLMOpenAITokenOnboardingCamera.swift */; };
1E9330CB2B968B1800BC620A /* FoodClassifierApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1E9330CA2B968B1800BC620A /* FoodClassifierApp.swift */; };
1E9330CD2B968BF900BC620A /* MobileNetV2.mlmodel in Sources */ = {isa = PBXBuildFile; fileRef = 1E9330CC2B968BF900BC620A /* MobileNetV2.mlmodel */; };
1E9330CF2B968C0700BC620A /* MobileNet.mlmodel in Sources */ = {isa = PBXBuildFile; fileRef = 1E9330CE2B968C0700BC620A /* MobileNet.mlmodel */; };
Expand Down Expand Up @@ -142,6 +144,8 @@
/* End PBXContainerItemProxy section */

/* Begin PBXFileReference section */
1E0F29842BA394ED008CC7D3 /* LLMOnboardingViewCamera.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LLMOnboardingViewCamera.swift; sourceTree = "<group>"; };
1E0F29862BA394FD008CC7D3 /* LLMOpenAITokenOnboardingCamera.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LLMOpenAITokenOnboardingCamera.swift; sourceTree = "<group>"; };
1E9330CA2B968B1800BC620A /* FoodClassifierApp.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FoodClassifierApp.swift; sourceTree = "<group>"; };
1E9330CC2B968BF900BC620A /* MobileNetV2.mlmodel */ = {isa = PBXFileReference; lastKnownFileType = file.mlmodel; path = MobileNetV2.mlmodel; sourceTree = "<group>"; };
1E9330CE2B968C0700BC620A /* MobileNet.mlmodel */ = {isa = PBXFileReference; lastKnownFileType = file.mlmodel; path = MobileNet.mlmodel; sourceTree = "<group>"; };
Expand Down Expand Up @@ -398,6 +402,8 @@
1E9330D12B968FA800BC620A /* ChatWindowAfterCamera.swift */,
63A3157D2B6C760C00B6B9E4 /* LLMOnboardingView.swift */,
63A315802B6C7A3F00B6B9E4 /* LLMOpenAITokenOnboarding.swift */,
1E0F29842BA394ED008CC7D3 /* LLMOnboardingViewCamera.swift */,
1E0F29862BA394FD008CC7D3 /* LLMOpenAITokenOnboardingCamera.swift */,
6325F3922B830B9C00A31314 /* ProteinRing.swift */,
63935E9A2B8C21C500ADB7D1 /* ProteinStats.swift */,
1E9330CA2B968B1800BC620A /* FoodClassifierApp.swift */,
Expand Down Expand Up @@ -742,6 +748,7 @@
2FE5DCB129EE6107004B9AB4 /* AccountOnboarding.swift in Sources */,
38C4563E2B96CF3B009D69AA /* WorkoutHomeButton.swift in Sources */,
2FE5DC3A29EDD7CA004B9AB4 /* Welcome.swift in Sources */,
1E0F29852BA394ED008CC7D3 /* LLMOnboardingViewCamera.swift in Sources */,
4051334D2B8C8D5400ED62BA /* WorkoutHome.swift in Sources */,
2FE5DC3829EDD7CA004B9AB4 /* InterestingModules.swift in Sources */,
40EFC2A62B87DAD2006B3B59 /* Height.swift in Sources */,
Expand Down Expand Up @@ -777,6 +784,7 @@
566155292AB8447C00209B80 /* Package+LicenseType.swift in Sources */,
5680DD392AB8983D004E6D4A /* PackageCell.swift in Sources */,
2F5E32BD297E05EA003432F8 /* StrongerDelegate.swift in Sources */,
1E0F29872BA394FD008CC7D3 /* LLMOpenAITokenOnboardingCamera.swift in Sources */,
405133482B8C808E00ED62BA /* WorkoutSelection.swift in Sources */,
405133572B8F332100ED62BA /* StartDate.swift in Sources */,
63935E9B2B8C21C500ADB7D1 /* ProteinStats.swift in Sources */,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
{
"originHash" : "2ece80fc0af652a3895c8e8f601d5e43ffbaa2f147638cdb5f5280adf8b050d4",
"pins" : [
{
"identity" : "abseil-cpp-binary",
Expand Down Expand Up @@ -361,5 +362,5 @@
}
}
],
"version" : 2
"version" : 3
}
47 changes: 25 additions & 22 deletions Stronger/ProteinTracker/ChatWindowAfterCamera.swift
Original file line number Diff line number Diff line change
Expand Up @@ -19,31 +19,34 @@
import Vision

struct ChatWindowAfterCamera: View {
private static let llmSchema: LLMOpenAISchema = .init(
private static let llmSchemaCamera: LLMOpenAISchema = .init(
parameters: .init(
modelType: .gpt3_5Turbo,
systemPrompt: """
You are Pro-Chatbot. Your task is to confirm the food items logged by the user using a camera \
and proceed with logging their total protein intake.
You are Pro-bot. Your task is to estimate the protein content of the meal the user just \
recorded with images, and log in the user's total protein intake. \

You will approach this task in a step-by-step manner.\

[STEP 1]. Process the [logged foods] list, if there are "_" in any strings or food, remove them and replace it with a space.

[STEP 2]. Your first prompt should say: "These are the foods you logged with your camera: [logged foods]. Is this correct?"

[STEP 1]. Your first prompt should say: "These are the foods you logged with your camera: [logged foods]. Is this correct?"


[STEP 2]. Now for each food item in the list, call the "GetProteinContent" \
function to get its protein content.

[STEP 3]. Ask the user if the protein content for each food item is correct. Then, ask if \
the quantity of each item is correct. If not, prompt them to alter the number of each item, and \
for that quantity, multiply that individual item's protein content by the quantity.

[STEP 3]. Add the protein content of all the food items to get the total protein intake \
for the user. Ask the user if this total protein content seems correct (show the user the \
calculation breakdown. Ask the user if they want to add more food items.

[STEP 4]. If the user adds more food items, repeat the steps to compute the protein content\
[STEP 3]. Your next prompt will say "The protein content for the food is: [logged food]: [x] grams. \
Repeat for each food. To get the protein content, or each food item in the list, call the "get_protein_content" \
function to get its protein content. Do NOT ask the user for protein information. \
Estimate it yourself using information from the "get_protein_content" function. \

[STEP 4]. You can ask the user for the quantity of food in terms of fist size or palm size. \
Do not ask the user for quantity in terms of grams. Estimate the protein content to the best of your ability. \

[STEP 5]. Add the protein content of all the food items to get the total protein intake \
for the user. Ask the user if they want to add more food items.

[STEP 6]. If the user adds more food items, repeat the steps to compute the protein content\
for every new food item and update the total protein intake.

[STEP 5]. If the user does not add new food items, call the "LogProteinIntake" function \
[STEP 7]. If the user does not add new food items, call the "log_protein_content" function \
to log in the total protein intake for the user. Once you have logged in the total protein \
intake for the user, inform the user that their protein intake has been logged in \
and end the conversation.
Expand All @@ -54,7 +57,7 @@
LogProteinIntake()
}

@LLMSessionProvider(schema: Self.llmSchema) var session: LLMOpenAISession
@LLMSessionProvider(schema: Self.llmSchemaCamera) var session: LLMOpenAISession
@State var showOnboarding = true

var loggedFoodItems: [String]
Expand All @@ -67,7 +70,7 @@
)
.navigationTitle("Pro-Bot")
.sheet(isPresented: $showOnboarding) {
LLMOnboardingView(showOnboarding: $showOnboarding)
LLMOnboardingViewCamera(showOnboarding: $showOnboarding)

Check warning on line 73 in Stronger/ProteinTracker/ChatWindowAfterCamera.swift

View check run for this annotation

Codecov / codecov/patch

Stronger/ProteinTracker/ChatWindowAfterCamera.swift#L73

Added line #L73 was not covered by tests
}
.task {
session.context.removeAll()
Expand Down
4 changes: 2 additions & 2 deletions Stronger/ProteinTracker/FoodClassifierApp.swift
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ struct FoodClassifierApp: View {
@State private var navigateToChatAfterCamera = false

var body: some View {
NavigationStack {
// NavigationStack {
VStack {
Spacer()
imageDisplay
Expand Down Expand Up @@ -259,7 +259,7 @@ struct FoodClassifierApp: View {
}
.alert(isPresented: $imageClassifier.showAlertAfterLog, content: logFoodAlert)
.navigationTitle("Food Image Classifier")
}
// }
}
private var selectOrTakePictureButton: some View {
Button("Select or Take Picture") {
Expand Down
29 changes: 29 additions & 0 deletions Stronger/ProteinTracker/LLMOnboardingViewCamera.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
//
// LLMOnboardingView.swift
// Stronger
//
// Created by Kevin Zhu on 03/02/24.
//
// SPDX-FileCopyrightText: 2023 Stanford University
//
// SPDX-License-Identifier: MIT
//

import SpeziLLMOpenAI
import SpeziOnboarding
import SwiftUI

struct LLMOnboardingViewCamera: View {
@Binding var showOnboarding: Bool

var body: some View {
OnboardingStack {
LLMOpenAITokenOnboardingCamera(showOnboarding: $showOnboarding)
}
.interactiveDismissDisabled(showOnboarding)
}

Check warning on line 24 in Stronger/ProteinTracker/LLMOnboardingViewCamera.swift

View check run for this annotation

Codecov / codecov/patch

Stronger/ProteinTracker/LLMOnboardingViewCamera.swift#L19-L24

Added lines #L19 - L24 were not covered by tests
}

#Preview {
LLMOnboardingViewCamera(showOnboarding: .constant(false))
}
26 changes: 26 additions & 0 deletions Stronger/ProteinTracker/LLMOpenAITokenOnboardingCamera.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
//
// This source file is part of the Stanford Spezi open source project
//
// SPDX-FileCopyrightText: 2022 Stanford University and the project authors (see CONTRIBUTORS.md)
//
// SPDX-License-Identifier: MIT
//

import SpeziLLMOpenAI
import SpeziOnboarding
import SwiftUI

/// Onboarding view that gets the OpenAI token from the user.
struct LLMOpenAITokenOnboardingCamera: View {
@Binding var showOnboarding: Bool

var body: some View {
LLMOpenAIAPITokenOnboardingStep {
showOnboarding = false
}
}

Check warning on line 21 in Stronger/ProteinTracker/LLMOpenAITokenOnboardingCamera.swift

View check run for this annotation

Codecov / codecov/patch

Stronger/ProteinTracker/LLMOpenAITokenOnboardingCamera.swift#L17-L21

Added lines #L17 - L21 were not covered by tests
}

#Preview {
LLMOpenAITokenOnboardingCamera(showOnboarding: .constant(false))
}
18 changes: 0 additions & 18 deletions Stronger/Resources/Localizable.xcstrings
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,6 @@
}
}
}
},
"Commonly Saved Items" : {

},
"Complete" : {

Expand Down Expand Up @@ -479,12 +476,6 @@
}
}
}
},
"Is this your last set for this exercise?" : {

},
"Large portion" : {

},
"Last Week's Fitness Progress\n" : {

Expand Down Expand Up @@ -535,12 +526,6 @@
}
}
}
},
"New Meal" : {

},
"Next" : {

},
"No workout available" : {

Expand Down Expand Up @@ -893,9 +878,6 @@
},
"Workout Selection" : {

},
"You indicated: %@" : {

},
"Your current week is %lld" : {

Expand Down
Loading