Skip to content

Commit

Permalink
tests: rename huggingface name (#309)
Browse files Browse the repository at this point in the history
  • Loading branch information
nicoloboschi authored Mar 4, 2024
1 parent 0b422b9 commit a646b3e
Show file tree
Hide file tree
Showing 7 changed files with 12 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def bedrock_cohere_embedding():


@pytest.fixture
def huggingface_hub_llm():
def huggingface_hub_flant5xxl_llm():
return HuggingFaceHub(
repo_id="google/flan-t5-xxl",
huggingfacehub_api_token=get_required_env("HUGGINGFACE_HUB_KEY"),
Expand All @@ -153,7 +153,7 @@ def huggingface_hub_llm():


@pytest.fixture
def huggingface_hub_embedding():
def huggingface_hub_minilml6v2_embedding():
return HuggingFaceInferenceAPIEmbeddings(
api_key=get_required_env("HUGGINGFACE_HUB_KEY"),
model_name="sentence-transformers/all-MiniLM-l6-v2",
Expand Down Expand Up @@ -190,7 +190,7 @@ def nvidia_mixtral_llm():
("vertex_embedding", "vertex_llm"),
("bedrock_titan_embedding", "bedrock_anthropic_llm"),
("bedrock_cohere_embedding", "bedrock_meta_llm"),
("huggingface_hub_embedding", "huggingface_hub_llm"),
("huggingface_hub_flant5xxl_llm", "huggingface_hub_minilml6v2_embedding"),
("nvidia_embedding", "nvidia_mixtral_llm"),
],
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
MetadataFilters,
ExactMatchFilter,
)
from llama_index.vector_stores.astra import AstraDBVectorStore
from llama_index.vector_stores.astra_db import AstraDBVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI

Expand Down
2 changes: 1 addition & 1 deletion ragstack-e2e-tests/e2e_tests/llama_index/test_astra.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
MetadataFilters,
ExactMatchFilter,
)
from llama_index.vector_stores.astra import AstraDBVectorStore
from llama_index.vector_stores.astra_db import AstraDBVectorStore
from llama_index.llms.openai import OpenAI


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,15 +154,15 @@ def bedrock_cohere_embedding():


@pytest.fixture
def huggingface_hub_llm():
def huggingface_hub_flant5xxl_llm():
return "huggingface-hub", HuggingFaceInferenceAPI(
model_name="google/flan-t5-xxl",
token=get_required_env("HUGGINGFACE_HUB_KEY"),
)


@pytest.fixture
def huggingface_hub_embedding():
def huggingface_hub_minilml6v2_embedding():
# There's a bug in Llama-Index HuggingFace Hub embedding
# so we use LangChain's wrapped one for now
return (
Expand All @@ -184,7 +184,7 @@ def huggingface_hub_embedding():
("vertex_embedding", "vertex_llm"),
("bedrock_titan_embedding", "bedrock_anthropic_llm"),
("bedrock_cohere_embedding", "bedrock_meta_llm"),
("huggingface_hub_embedding", "huggingface_hub_llm"),
("huggingface_hub_minilml6v2_embedding", "huggingface_hub_flant5xxl_llm"),
],
)
def test_rag(vector_store, embedding, llm, request):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
try:
from llama_index.vector_stores import AstraDBVectorStore
except ImportError:
from llama_index.vector_stores.astra import AstraDBVectorStore
from llama_index.vector_stores.astra_db import AstraDBVectorStore

from e2e_tests.test_utils import get_required_env, random_string
from e2e_tests.test_utils.vector_store_handler import (
Expand Down
4 changes: 2 additions & 2 deletions ragstack-e2e-tests/pyproject.langchain.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ langchain = { git = "https://github.com/langchain-ai/langchain.git", branch = "m
langchain_core = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/core" }
langchain_community = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/community" }
langchain-openai = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/openai" }
langchain-google-genai = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/google-genai" }
langchain-google-vertexai = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/google-vertexai" }
langchain-google-genai = { git = "https://github.com/langchain-ai/langchain-google.git", branch = "main", subdirectory = "libs/genai" }
langchain-google-vertexai = { git = "https://github.com/langchain-ai/langchain-google.git", branch = "main", subdirectory = "libs/vertexai" }
langchain-nvidia-ai-endpoints = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/nvidia-ai-endpoints" }
llama-index = { version = "0.9.48", extras = ["langchain"] }
llama-parse = { version = "0.1.4" }
Expand Down
2 changes: 1 addition & 1 deletion ragstack-e2e-tests/pyproject.llamaindex.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ beautifulsoup4 = "^4"

llama-index = { git = "https://github.com/run-llama/llama_index.git", branch = "main" }
llama-index-embeddings-langchain = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/embeddings/llama-index-embeddings-langchain" }
llama-index-vector-stores-astra = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-astra" }
llama-index-vector-stores-astra-db = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-astra-db" }
llama-index-vector-stores-cassandra = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-cassandra" }
llama-index-llms-bedrock = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-bedrock" }
llama-index-llms-azure-openai = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-azure-openai" }
Expand Down

0 comments on commit a646b3e

Please sign in to comment.