diff --git a/ragstack-e2e-tests/e2e_tests/langchain/test_compatibility_rag.py b/ragstack-e2e-tests/e2e_tests/langchain/test_compatibility_rag.py index 114769e74..8d8f73fa6 100644 --- a/ragstack-e2e-tests/e2e_tests/langchain/test_compatibility_rag.py +++ b/ragstack-e2e-tests/e2e_tests/langchain/test_compatibility_rag.py @@ -144,7 +144,7 @@ def bedrock_cohere_embedding(): @pytest.fixture -def huggingface_hub_llm(): +def huggingface_hub_flant5xxl_llm(): return HuggingFaceHub( repo_id="google/flan-t5-xxl", huggingfacehub_api_token=get_required_env("HUGGINGFACE_HUB_KEY"), @@ -153,7 +153,7 @@ def huggingface_hub_llm(): @pytest.fixture -def huggingface_hub_embedding(): +def huggingface_hub_minilml6v2_embedding(): return HuggingFaceInferenceAPIEmbeddings( api_key=get_required_env("HUGGINGFACE_HUB_KEY"), model_name="sentence-transformers/all-MiniLM-l6-v2", @@ -190,7 +190,7 @@ def nvidia_mixtral_llm(): ("vertex_embedding", "vertex_llm"), ("bedrock_titan_embedding", "bedrock_anthropic_llm"), ("bedrock_cohere_embedding", "bedrock_meta_llm"), - ("huggingface_hub_embedding", "huggingface_hub_llm"), + ("huggingface_hub_flant5xxl_llm", "huggingface_hub_minilml6v2_embedding"), ("nvidia_embedding", "nvidia_mixtral_llm"), ], ) diff --git a/ragstack-e2e-tests/e2e_tests/langchain_llamaindex/test_astra.py b/ragstack-e2e-tests/e2e_tests/langchain_llamaindex/test_astra.py index 3ec3793d0..675daaec4 100644 --- a/ragstack-e2e-tests/e2e_tests/langchain_llamaindex/test_astra.py +++ b/ragstack-e2e-tests/e2e_tests/langchain_llamaindex/test_astra.py @@ -31,7 +31,7 @@ MetadataFilters, ExactMatchFilter, ) - from llama_index.vector_stores.astra import AstraDBVectorStore + from llama_index.vector_stores.astra_db import AstraDBVectorStore from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI diff --git a/ragstack-e2e-tests/e2e_tests/llama_index/test_astra.py b/ragstack-e2e-tests/e2e_tests/llama_index/test_astra.py index 45da9a627..aa1c2d8e5 100644 --- a/ragstack-e2e-tests/e2e_tests/llama_index/test_astra.py +++ b/ragstack-e2e-tests/e2e_tests/llama_index/test_astra.py @@ -37,7 +37,7 @@ MetadataFilters, ExactMatchFilter, ) - from llama_index.vector_stores.astra import AstraDBVectorStore + from llama_index.vector_stores.astra_db import AstraDBVectorStore from llama_index.llms.openai import OpenAI diff --git a/ragstack-e2e-tests/e2e_tests/llama_index/test_compatibility_rag.py b/ragstack-e2e-tests/e2e_tests/llama_index/test_compatibility_rag.py index 85c1bb9ed..f41fa5632 100644 --- a/ragstack-e2e-tests/e2e_tests/llama_index/test_compatibility_rag.py +++ b/ragstack-e2e-tests/e2e_tests/llama_index/test_compatibility_rag.py @@ -154,7 +154,7 @@ def bedrock_cohere_embedding(): @pytest.fixture -def huggingface_hub_llm(): +def huggingface_hub_flant5xxl_llm(): return "huggingface-hub", HuggingFaceInferenceAPI( model_name="google/flan-t5-xxl", token=get_required_env("HUGGINGFACE_HUB_KEY"), @@ -162,7 +162,7 @@ def huggingface_hub_llm(): @pytest.fixture -def huggingface_hub_embedding(): +def huggingface_hub_minilml6v2_embedding(): # There's a bug in Llama-Index HuggingFace Hub embedding # so we use LangChain's wrapped one for now return ( @@ -184,7 +184,7 @@ def huggingface_hub_embedding(): ("vertex_embedding", "vertex_llm"), ("bedrock_titan_embedding", "bedrock_anthropic_llm"), ("bedrock_cohere_embedding", "bedrock_meta_llm"), - ("huggingface_hub_embedding", "huggingface_hub_llm"), + ("huggingface_hub_minilml6v2_embedding", "huggingface_hub_flant5xxl_llm"), ], ) def test_rag(vector_store, embedding, llm, request): diff --git a/ragstack-e2e-tests/e2e_tests/test_utils/astradb_vector_store_handler.py b/ragstack-e2e-tests/e2e_tests/test_utils/astradb_vector_store_handler.py index 45e2c005e..005cc1608 100644 --- a/ragstack-e2e-tests/e2e_tests/test_utils/astradb_vector_store_handler.py +++ b/ragstack-e2e-tests/e2e_tests/test_utils/astradb_vector_store_handler.py @@ -14,7 +14,7 @@ try: from llama_index.vector_stores import AstraDBVectorStore except ImportError: - from llama_index.vector_stores.astra import AstraDBVectorStore + from llama_index.vector_stores.astra_db import AstraDBVectorStore from e2e_tests.test_utils import get_required_env, random_string from e2e_tests.test_utils.vector_store_handler import ( diff --git a/ragstack-e2e-tests/pyproject.langchain.toml b/ragstack-e2e-tests/pyproject.langchain.toml index e4921e1d3..03d77de75 100644 --- a/ragstack-e2e-tests/pyproject.langchain.toml +++ b/ragstack-e2e-tests/pyproject.langchain.toml @@ -30,8 +30,8 @@ langchain = { git = "https://github.com/langchain-ai/langchain.git", branch = "m langchain_core = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/core" } langchain_community = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/community" } langchain-openai = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/openai" } -langchain-google-genai = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/google-genai" } -langchain-google-vertexai = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/google-vertexai" } +langchain-google-genai = { git = "https://github.com/langchain-ai/langchain-google.git", branch = "main", subdirectory = "libs/genai" } +langchain-google-vertexai = { git = "https://github.com/langchain-ai/langchain-google.git", branch = "main", subdirectory = "libs/vertexai" } langchain-nvidia-ai-endpoints = { git = "https://github.com/langchain-ai/langchain.git", branch = "master", subdirectory = "libs/partners/nvidia-ai-endpoints" } llama-index = { version = "0.9.48", extras = ["langchain"] } llama-parse = { version = "0.1.4" } diff --git a/ragstack-e2e-tests/pyproject.llamaindex.toml b/ragstack-e2e-tests/pyproject.llamaindex.toml index 49dec6971..0eaf934d3 100644 --- a/ragstack-e2e-tests/pyproject.llamaindex.toml +++ b/ragstack-e2e-tests/pyproject.llamaindex.toml @@ -26,7 +26,7 @@ beautifulsoup4 = "^4" llama-index = { git = "https://github.com/run-llama/llama_index.git", branch = "main" } llama-index-embeddings-langchain = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/embeddings/llama-index-embeddings-langchain" } -llama-index-vector-stores-astra = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-astra" } +llama-index-vector-stores-astra-db = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-astra-db" } llama-index-vector-stores-cassandra = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-cassandra" } llama-index-llms-bedrock = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-bedrock" } llama-index-llms-azure-openai = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-azure-openai" }