Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests: add support for llama-index 0.10.x #290

Merged
merged 9 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 28 additions & 13 deletions ragstack-e2e-tests/e2e_tests/langchain_llamaindex/test_astra.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,34 @@
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.astradb import AstraDB
from llama_index import (
Document,
OpenAIEmbedding,
ServiceContext,
StorageContext,
VectorStoreIndex,
)
from llama_index.llms import OpenAI
from llama_index.vector_stores import (
AstraDBVectorStore,
MetadataFilters,
ExactMatchFilter,
)

try:
# llamaindex 0.9.x
from llama_index import (
Document,
OpenAIEmbedding,
ServiceContext,
StorageContext,
VectorStoreIndex,
)
from llama_index.llms import OpenAI
from llama_index.vector_stores import (
AstraDBVectorStore,
MetadataFilters,
ExactMatchFilter,
)
except ImportError:
# llamaindex 0.10.x
from llama_index.core import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.core.schema import Document
from llama_index.core.vector_stores import (
MetadataFilters,
ExactMatchFilter,
)
from llama_index.vector_stores.astra import AstraDBVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI


from e2e_tests.test_utils import skip_test_due_to_implementation_not_supported
from e2e_tests.test_utils.astradb_vector_store_handler import (
Expand Down
47 changes: 32 additions & 15 deletions ragstack-e2e-tests/e2e_tests/llama_index/test_astra.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,38 @@
get_required_env,
is_astra,
)
from llama_index import (
ServiceContext,
StorageContext,
VectorStoreIndex,
Document,
)
from llama_index.embeddings import BaseEmbedding
from llama_index.llms import OpenAI, LLM
from llama_index.node_parser import SimpleNodeParser
from llama_index.schema import NodeWithScore
from llama_index.vector_stores import (
AstraDBVectorStore,
MetadataFilters,
ExactMatchFilter,
)

try:
# llamaindex 0.9.x
from llama_index import (
ServiceContext,
StorageContext,
VectorStoreIndex,
Document,
)
from llama_index.embeddings import BaseEmbedding
from llama_index.llms import OpenAI, LLM
from llama_index.node_parser import SimpleNodeParser
from llama_index.schema import NodeWithScore
from llama_index.vector_stores import (
AstraDBVectorStore,
MetadataFilters,
ExactMatchFilter,
)
except ImportError:
# llamaindex 0.10.x
from llama_index.core import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.llms import LLM
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.schema import NodeWithScore, Document
from llama_index.core.vector_stores import (
MetadataFilters,
ExactMatchFilter,
)
from llama_index.vector_stores.astra import AstraDBVectorStore
from llama_index.llms.openai import OpenAI


from e2e_tests.test_utils import skip_test_due_to_implementation_not_supported
from e2e_tests.test_utils.astradb_vector_store_handler import AstraDBVectorStoreHandler
Expand Down
63 changes: 41 additions & 22 deletions ragstack-e2e-tests/e2e_tests/llama_index/test_compatibility_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,47 @@

import pytest
from langchain.embeddings import VertexAIEmbeddings, HuggingFaceInferenceAPIEmbeddings
from llama_index import (
VectorStoreIndex,
StorageContext,
ServiceContext,
Document,
)
from llama_index.embeddings import (
OpenAIEmbedding,
AzureOpenAIEmbedding,
BedrockEmbedding,
)
from llama_index.llms import (
OpenAI,
AzureOpenAI,
Vertex,
Bedrock,
HuggingFaceInferenceAPI,
ChatMessage,
Gemini,
)
from llama_index.multi_modal_llms import GeminiMultiModal
from llama_index.schema import ImageNode

try:
# llamaindex 0.9.x
from llama_index import ServiceContext, StorageContext, VectorStoreIndex, Document
from llama_index.embeddings import (
OpenAIEmbedding,
AzureOpenAIEmbedding,
BedrockEmbedding,
)
from llama_index.llms import (
OpenAI,
AzureOpenAI,
Vertex,
Bedrock,
HuggingFaceInferenceAPI,
ChatMessage,
Gemini,
)
from llama_index.multi_modal_llms import GeminiMultiModal
from llama_index.schema import ImageNode
except ImportError:
# llamaindex 0.10.x
from llama_index.core import (
ServiceContext,
StorageContext,
VectorStoreIndex,
Document,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.schema import ImageNode
from llama_index.llms.openai import OpenAI
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.bedrock import BedrockEmbedding
from llama_index.llms.vertex import Vertex
from llama_index.llms.bedrock import Bedrock
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.llms.gemini import Gemini
from llama_index.multi_modal_llms.gemini import GeminiMultiModal


from e2e_tests.conftest import (
set_current_test_info,
Expand Down
18 changes: 11 additions & 7 deletions ragstack-e2e-tests/e2e_tests/llama_index/test_llama_parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,18 @@
except ImportError:
pytest.skip("llama_parse is not supported, skipping tests", allow_module_level=True)

from llama_index import (
VectorStoreIndex,
StorageContext,
ServiceContext,
)

from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms import OpenAI
try:
# llamaindex 0.9.x
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.embeddings import OpenAIEmbedding
from llama_index.llms import OpenAI
except ImportError:
# llamaindex 0.10.x
from llama_index.core import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding


from e2e_tests.conftest import (
set_current_test_info,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,11 @@
from langchain_community.chat_message_histories import AstraDBChatMessageHistory
from langchain_community.vectorstores.astradb import AstraDB
from langchain_core.chat_history import BaseChatMessageHistory
from llama_index.vector_stores import AstraDBVectorStore

try:
from llama_index.vector_stores import AstraDBVectorStore
except ImportError:
from llama_index.vector_stores.astra import AstraDBVectorStore

from e2e_tests.test_utils import get_required_env, random_string
from e2e_tests.test_utils.vector_store_handler import (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,22 @@
)
from langchain_community.vectorstores.cassandra import Cassandra
from langchain_core.chat_history import BaseChatMessageHistory
from llama_index.schema import TextNode
from llama_index.vector_stores import CassandraVectorStore
from llama_index.vector_stores.types import (
VectorStoreQuery,
)

try:
# llamaindex 0.9.x
from llama_index.schema import TextNode
from llama_index.vector_stores import CassandraVectorStore
from llama_index.vector_stores.types import (
VectorStoreQuery,
)
except ImportError:
# llamaindex 0.10.x
from llama_index.core.schema import TextNode
from llama_index.vector_stores.cassandra import CassandraVectorStore
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
)


from e2e_tests.test_utils import (
random_string,
Expand Down
2 changes: 1 addition & 1 deletion ragstack-e2e-tests/pyproject.langchain.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ black = "*"
ruff = "*"
google-cloud-aiplatform = "^1.36.4"
boto3 = "^1.29.6"
huggingface-hub = "^0.19.4"
huggingface-hub = "^0.20.3"
azure-storage-blob = "^12.19.0"
pillow = "^10.2.0"
testcontainers = "^3.7.1"
Expand Down
18 changes: 16 additions & 2 deletions ragstack-e2e-tests/pyproject.llamaindex.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ black = "*"
ruff = "*"
google-cloud-aiplatform = "^1.36.4"
boto3 = "^1.29.6"
huggingface-hub = "^0.19.4"
huggingface-hub = "^0.20.3"
azure-storage-blob = "^12.19.0"
pillow = "^10.2.0"
testcontainers = "^3.7.1"
Expand All @@ -25,7 +25,21 @@ trulens-eval = "^0.21.0"
beautifulsoup4 = "^4"

llama-index = { git = "https://github.com/run-llama/llama_index.git", branch = "main" }
llama-parse = { version = "0.2.0" }
llama-index-embeddings-langchain = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/embeddings/llama-index-embeddings-langchain" }
llama-index-vector-stores-astra = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-astra" }
llama-index-vector-stores-cassandra = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/vector_stores/llama-index-vector-stores-cassandra" }
llama-index-llms-bedrock = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-bedrock" }
llama-index-llms-azure-openai = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-azure-openai" }
llama-index-llms-gemini = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-gemini" }
llama-index-llms-huggingface = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-huggingface" }
llama-index-llms-vertex = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/llms/llama-index-llms-vertex" }
llama-index-embeddings-bedrock = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/embeddings/llama-index-embeddings-bedrock" }
llama-index-embeddings-azure-openai = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/embeddings/llama-index-embeddings-azure-openai" }
llama-index-embeddings-gemini = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/embeddings/llama-index-embeddings-gemini" }
llama-index-embeddings-huggingface = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/embeddings/llama-index-embeddings-huggingface" }
llama-index-multi-modal-llms-gemini = { git = "https://github.com/run-llama/llama_index.git", branch = "main", subdirectory = "llama-index-integrations/multi_modal_llms/llama-index-multi-modal-llms-gemini" }

llama-parse = { git = "https://github.com/run-llama/llama_parse.git", branch = "main" }

langchain = { version = "0.1.2" }
langchain-core = "0.1.15"
Expand Down
2 changes: 1 addition & 1 deletion ragstack-e2e-tests/pyproject.ragstack-ai.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ google-cloud-aiplatform = "^1.36.4"
langchain-google-genai = "^0.0.4"
langchain-nvidia-ai-endpoints = "^0.0.1"
boto3 = "^1.29.6"
huggingface-hub = "^0.19.4"
huggingface-hub = "^0.20.3"
azure-storage-blob = "^12.19.0"
pillow = "^10.2.0"
testcontainers = "^3.7.1"
Expand Down
2 changes: 1 addition & 1 deletion ragstack-e2e-tests/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ pytest = "*"
black = "*"
ruff = "*"
boto3 = "^1.34.21"
huggingface-hub = "^0.19.4"
huggingface-hub = "^0.20.3"
azure-storage-blob = "^12.19.0"
pillow = "^10.2.0"
testcontainers = "^3.7.1"
Expand Down
Loading