forked from ggerganov/llama.cpp
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'master' of https://github.com/l3utterfly/llama.cpp
- Loading branch information
Showing
199 changed files
with
25,042 additions
and
15,107 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
ARG ASCEND_VERSION=8.0.rc2.alpha003-910b-openeuler22.03-py3.8 | ||
|
||
FROM cosdt/cann:$ASCEND_VERSION AS build | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
RUN yum install -y gcc g++ cmake make | ||
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest | ||
ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH | ||
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH} | ||
ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH} | ||
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH} | ||
ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME} | ||
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp | ||
ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit | ||
ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME} | ||
|
||
# find libascend_hal.so, because the drive hasn`t been mounted. | ||
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH | ||
|
||
RUN echo "Building with static libs" && \ | ||
source /usr/local/Ascend/ascend-toolkit/set_env.sh --force && \ | ||
cmake -B build -DGGML_CANN=ON -DBUILD_SHARED_LIBS=OFF && \ | ||
cmake --build build --config Release --target llama-cli | ||
|
||
# TODO: use image with NNRT | ||
FROM cosdt/cann:$ASCEND_VERSION AS runtime | ||
COPY --from=build /app/build/bin/llama-cli /llama-cli | ||
|
||
ENV LC_ALL=C.utf8 | ||
|
||
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest | ||
ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH | ||
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH} | ||
ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH} | ||
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH} | ||
ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME} | ||
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp | ||
ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit | ||
ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME} | ||
|
||
ENTRYPOINT ["/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,35 +1,37 @@ | ||
ARG UBUNTU_VERSION=22.04 | ||
# This needs to generally match the container host's environment. | ||
ARG CUDA_VERSION=11.7.1 | ||
ARG CUDA_VERSION=12.6.0 | ||
# Target the CUDA build image | ||
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} | ||
# Target the CUDA runtime image | ||
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} | ||
|
||
FROM ${BASE_CUDA_DEV_CONTAINER} AS build | ||
|
||
# Unless otherwise specified, we make a fat build. | ||
ARG CUDA_DOCKER_ARCH=all | ||
# CUDA architecture to build for (defaults to all supported archs) | ||
ARG CUDA_DOCKER_ARCH=default | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y build-essential git | ||
apt-get install -y build-essential git cmake | ||
|
||
WORKDIR /app | ||
|
||
COPY . . | ||
|
||
# Set nvcc architecture | ||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} | ||
# Enable CUDA | ||
ENV GGML_CUDA=1 | ||
|
||
RUN make -j$(nproc) llama-cli | ||
# Use the default CUDA archs if not specified | ||
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \ | ||
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \ | ||
fi && \ | ||
cmake -B build -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ | ||
cmake --build build --config Release --target llama-cli -j$(nproc) | ||
|
||
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime | ||
|
||
RUN apt-get update && \ | ||
apt-get install -y libgomp1 | ||
|
||
COPY --from=build /app/llama-cli /llama-cli | ||
COPY --from=build /app/build/ggml/src/libggml.so /libggml.so | ||
COPY --from=build /app/build/src/libllama.so /libllama.so | ||
COPY --from=build /app/build/bin/llama-cli /llama-cli | ||
|
||
ENTRYPOINT [ "/llama-cli" ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,13 +1,52 @@ | ||
{ inputs, ... }: | ||
|
||
{ | ||
perSystem = | ||
{ config, lib, ... }: | ||
{ | ||
config, | ||
lib, | ||
system, | ||
... | ||
}: | ||
{ | ||
devShells = | ||
lib.concatMapAttrs | ||
(name: package: { | ||
${name} = package.passthru.shell; | ||
${name + "-extra"} = package.passthru.shell-extra; | ||
}) | ||
config.packages; | ||
let | ||
pkgs = import inputs.nixpkgs { inherit system; }; | ||
stdenv = pkgs.stdenv; | ||
scripts = config.packages.python-scripts; | ||
in | ||
lib.pipe (config.packages) [ | ||
(lib.concatMapAttrs ( | ||
name: package: { | ||
${name} = pkgs.mkShell { | ||
name = "${name}"; | ||
inputsFrom = [ package ]; | ||
shellHook = '' | ||
echo "Entering ${name} devShell" | ||
''; | ||
}; | ||
"${name}-extra" = | ||
if (name == "python-scripts") then | ||
null | ||
else | ||
pkgs.mkShell { | ||
name = "${name}-extra"; | ||
inputsFrom = [ | ||
package | ||
scripts | ||
]; | ||
# Extra packages that *may* be used by some scripts | ||
packages = [ | ||
pkgs.python3Packages.tiktoken | ||
]; | ||
shellHook = '' | ||
echo "Entering ${name} devShell" | ||
addToSearchPath "LD_LIBRARY_PATH" "${lib.getLib stdenv.cc.cc}/lib" | ||
''; | ||
}; | ||
} | ||
)) | ||
(lib.filterAttrs (name: value: value != null)) | ||
]; | ||
}; | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
{ | ||
lib, | ||
llamaVersion, | ||
numpy, | ||
tqdm, | ||
sentencepiece, | ||
pyyaml, | ||
poetry-core, | ||
buildPythonPackage, | ||
pytestCheckHook, | ||
}: | ||
|
||
buildPythonPackage { | ||
pname = "gguf"; | ||
version = llamaVersion; | ||
pyproject = true; | ||
nativeBuildInputs = [ poetry-core ]; | ||
propagatedBuildInputs = [ | ||
numpy | ||
tqdm | ||
sentencepiece | ||
pyyaml | ||
]; | ||
src = lib.cleanSource ../../gguf-py; | ||
pythonImportsCheck = [ | ||
"numpy" | ||
"gguf" | ||
]; | ||
nativeCheckInputs = [ pytestCheckHook ]; | ||
doCheck = true; | ||
meta = with lib; { | ||
description = "Python package for writing binary files in the GGUF format"; | ||
license = licenses.mit; | ||
maintainers = [ maintainers.ditsuke ]; | ||
}; | ||
} |
Oops, something went wrong.