Name: llama.cpp Version: b2060 Release: 1%{?dist} Summary: Port of Facebook's LLaMA model in C/C++ License: MIT URL: https://github.com/ggerganov/llama.cpp Source0: %{url}/archive/%{version}/%{name}-%{version}.tar.gz ExcludeArch: %{ix86} BuildRequires: cmake BuildRequires: clang-devel BuildRequires: libcxx-devel BuildRequires: openblas-devel BuildRequires: clblast-devel BuildRequires: vulkan-loader-devel BuildRequires: glslc glslang %description Port of Facebook's LLaMA model in C/C++. %package devel Summary: Development package for %{name} Requires: %{name}%{?_isa} = %{version}-%{release} %description devel Files for development with %{name}. %prep %autosetup -p1 %build export CC=clang export CXX=clang++ %cmake -DCMAKE_INSTALL_LIBDIR=%{_libdir} \ -DBUILD_SHARED_LIBS=True \ -DLLAMA_BLAS_VENDOR=OpenBLAS \ -DLLAMA_CLBLAST=True \ .. ## -DLLAMA_VULKAN=True \ %cmake_build %install %cmake_install %check #%cmake_test %ldconfig_scriptlets %files %license LICENSE %{_bindir}/baby-llama %{_bindir}/batched %{_bindir}/batched-bench %{_bindir}/beam-search %{_bindir}/benchmark %{_bindir}/convert-llama2c-to-ggml %{_bindir}/convert-lora-to-ggml.py %{_bindir}/convert.py %{_bindir}/embedding %{_bindir}/export-lora %{_bindir}/finetune %{_bindir}/infill %{_bindir}/imatrix %{_bindir}/llama-bench %{_bindir}/llava-cli %{_bindir}/lookahead %{_bindir}/lookup %{_bindir}/main %{_bindir}/parallel %{_bindir}/passkey %{_bindir}/perplexity %{_bindir}/quantize %{_bindir}/quantize-stats %{_bindir}/save-load-state %{_bindir}/server %{_bindir}/simple %{_bindir}/speculative %{_bindir}/test-autorelease %{_bindir}/test-backend-ops %{_bindir}/test-grad0 %{_bindir}/test-grammar-parser %{_bindir}/test-llama-grammar %{_bindir}/test-model-load-cancel %{_bindir}/test-quantize-fns %{_bindir}/test-quantize-perf %{_bindir}/test-rope %{_bindir}/test-sampling %{_bindir}/test-tokenizer-0-falcon %{_bindir}/test-tokenizer-0-llama %{_bindir}/test-tokenizer-1-bpe %{_bindir}/test-tokenizer-1-llama %{_bindir}/tokenize %{_bindir}/train-text-from-scratch %{_libdir}/libggml_shared.so %{_libdir}/libllama.so %{_libdir}/libllava_shared.so %files devel %{_libdir}/cmake/Llama/ %{_includedir}/ggml*.h %{_includedir}/llama.h %changelog * Sun Feb 04 2024 Peter Robinson - b2060-1 - Update to b2060 * Thu Feb 01 2024 Peter Robinson - b2038-1 - Update to b2038 * Tue Jan 30 2024 Peter Robinson - b2008-1 - Update to b2008 * Fri Jan 12 2024 Peter Robinson - b1834-1 - Update to b1834 * Sun Jan 07 2024 Peter Robinson - b1786-1 - Update to b1786 * Wed Dec 06 2023 Peter Robinson - b1614-1 - Update to b1614 * Fri Dec 01 2023 Peter Robinson - b1593-1 - Update to b1593 * Sat Nov 25 2023 Peter Robinson - b1560-1 - Update to b1560 * Tue Nov 21 2023 Peter Robinson b1547-1 - Initial package