From 3ade7c546acc2a19ece51b8927bd2eeed0dc8394 Mon Sep 17 00:00:00 2001 From: Kirill Radzikhovskyy Date: Sat, 19 Oct 2024 10:10:06 +1100 Subject: [PATCH] python312Packages.llama-cpp-python: init at 0.3.1 --- .../llama-cpp-python/default.nix | 96 +++++++++++++++++++ pkgs/top-level/python-packages.nix | 2 + 2 files changed, 98 insertions(+) create mode 100644 pkgs/development/python-modules/llama-cpp-python/default.nix diff --git a/pkgs/development/python-modules/llama-cpp-python/default.nix b/pkgs/development/python-modules/llama-cpp-python/default.nix new file mode 100644 index 00000000000000..9a2f9458a8757a --- /dev/null +++ b/pkgs/development/python-modules/llama-cpp-python/default.nix @@ -0,0 +1,96 @@ +{ + lib, + buildPythonPackage, + cmake, + fetchFromGitHub, + gitUpdater, + ninja, + pathspec, + pyproject-metadata, + pytestCheckHook, + pythonOlder, + scikit-build-core, + + config, + cudaSupport ? config.cudaSupport, + cudaPackages ? { }, + + diskcache, + jinja2, + numpy, + typing-extensions, + scipy, + huggingface-hub, +}: + +buildPythonPackage rec { + pname = "llama-cpp-python"; + version = "0.3.1"; + pyproject = true; + + disabled = pythonOlder "3.7"; + + src = fetchFromGitHub { + owner = "abetlen"; + repo = "llama-cpp-python"; + rev = "refs/tags/v${version}"; + hash = "sha256-eO1zvNJZBE5BCnbgbh00tFIRWBCWor1lIsrLXs/HFds="; + fetchSubmodules = true; + }; + + dontUseCmakeConfigure = true; + SKBUILD_CMAKE_ARGS = lib.strings.concatStringsSep ";" ( + lib.optionals cudaSupport [ + "-DGGML_CUDA=on" + "-DCUDAToolkit_ROOT=${lib.getDev cudaPackages.cuda_nvcc}" + "-DCMAKE_CUDA_COMPILER=${lib.getExe cudaPackages.cuda_nvcc}" + ] + ); + + nativeBuildInputs = [ + cmake + ninja + pathspec + pyproject-metadata + scikit-build-core + ]; + + buildInputs = lib.optionals cudaSupport ( + with cudaPackages; + [ + cuda_cudart # cuda_runtime.h + cuda_cccl # + libcublas # cublas_v2.h + ] + ); + + propagatedBuildInputs = [ + diskcache + jinja2 + numpy + typing-extensions + ]; + + nativeCheckInputs = [ + pytestCheckHook + scipy + huggingface-hub + ]; + + disabledTests = [ + # tries to download model from huggingface-hub + "test_real_model" + "test_real_llama" + ]; + + pythonImportsCheck = [ "llama_cpp" ]; + + passthru.updateScript = gitUpdater { rev-prefix = "v"; }; + + meta = { + description = "Python bindings for llama.cpp"; + homepage = "https://github.com/abetlen/llama-cpp-python"; + license = lib.licenses.mit; + maintainers = with lib.maintainers; [ kirillrdy ]; + }; +} diff --git a/pkgs/top-level/python-packages.nix b/pkgs/top-level/python-packages.nix index bf85502d987aeb..f37b66fd781110 100644 --- a/pkgs/top-level/python-packages.nix +++ b/pkgs/top-level/python-packages.nix @@ -7318,6 +7318,8 @@ self: super: with self; { lizard = callPackage ../development/python-modules/lizard { }; + llama-cpp-python = callPackage ../development/python-modules/llama-cpp-python { }; + llama-cloud = callPackage ../development/python-modules/llama-cloud { }; llama-index = callPackage ../development/python-modules/llama-index { };