# Generated by go2rpm 1.14.0 %bcond bootstrap 0 %bcond check 1 %bcond rocm 1 # Only tested on x86_64: ExclusiveArch: x86_64 %global debug_package %{nil} %if %{with bootstrap} %global __requires_exclude %{?__requires_exclude:%{__requires_exclude}|}^golang\\(.*\\)$ %endif # https://github.com/ollama/ollama %global goipath github.com/ollama/ollama %global forgeurl https://github.com/ollama/ollama Version: 0.4.4 %gometa -L -f %global common_description %{expand: Get up and running with Llama 3.2, Mistral, Gemma 2, and other large language models.} %global golicenses LICENSE %global godocs docs examples CONTRIBUTING.md README.md SECURITY.md\\\ app-README.md integration-README.md llama-README.md\\\ llama-runner-README.md macapp-README.md Name: ollama Release: %autorelease Summary: Get up and running AI LLMs License: Apache-2.0 AND MIT URL: %{gourl} Source: %{gosource} BuildRequires: make BuildRequires: fdupes BuildRequires: gcc-c++ %if %{with rocm} BuildRequires: rocm-comgr-devel BuildRequires: rocm-hip-devel BuildRequires: rocminfo BuildRequires: rocblas-devel BuildRequires: hipblas-devel BuildRequires: rocm-runtime-devel BuildRequires: rocm-compilersupport-macros Requires: rocblas Requires: hipblas %endif %description %{common_description} %gopkg %prep %goprep -A # Remove some .git cruft for f in `find . -name '.gitignore'`; do rm $f done # Rename README's mv app/README.md app-README.md mv integration/README.md integration-README.md mv llama/README.md llama-README.md mv llama/runner/README.md llama-runner-README.md mv macapp/README.md macapp-README.md # Makefile does its own thing.. need to add gopath sed -i -e 's@GOARCH=$(ARCH)@GOARCH=$(ARCH) GOPATH=%{gobuilddir}:/usr/share/gocode@' llama/make/Makefile.default # Use this to see issue with finding device-libs # sed -i -e 's@-D_GNU_SOURCE@-D_GNU_SOURCE -print-rocm-search-dirs@' llama/make/Makefile.rocm # Fix use of usr/bin/env python3 sed -i -e 's@env python3@python3@' examples/langchain-python-rag-privategpt/ingest.py sed -i -e 's@env python3@python3@' examples/langchain-python-rag-privategpt/privateGPT.py # Force rocm runner %if %{with rocm} sed -i -e 's@RUNNER_TARGETS := default@RUNNER_TARGETS := default rocm@' llama/Makefile # rm HIP_PATH setting sed -i -e '/HIP_PATH?=/d' llama/Makefile sed -i -e '/HIP_PATH?=/d' llama/make/common-defs.make # rm HIP_PLATFORM setting sed -i -e '/export HIP_PLATFORM/d' llama/make/common-defs.make # rm HIP_LIB_DIR setting sed -i -e '/HIP_LIB_DIR :=/d' llama/Makefile # Fix $(HIP_PATH)/lib -> /usr/lib64 sed -i -e 's@$(HIP_PATH)/lib@/usr/lib64@' llama/Makefile sed -i -e 's@$(HIP_PATH)/lib@/usr/lib64@' llama/make/Makefile.rocm # Tries to figure out libraries assuming libraries in a directory only gpu libs sed -i -e 's@ROCM_TRANSITIVE_LIBS_INITIAL@#ROCM_TRANSITIVE_LIBS_INITIAL@' llama/make/Makefile.rocm sed -i -e 's@GPU_TRANSITIVE_LIBS@#GPU_TRANSITIVE_LIBS@' llama/make/Makefile.rocm # use clang directly clang_path=`hipconfig -l`/clang sed -i -e "s@GPU_COMPILER:=\$(GPU_COMPILER_LINUX)@GPU_COMPILER:=${clang_path}@" llama/make/Makefile.rocm sed -i -e "s@GPU_COMPILER:=\$(GPU_COMPILER_LINUX)@GPU_COMPILER:=${clang_path}@" llama/make/gpu.make # Add some gpu's to the build list # ollama' list # HIP_ARCHS_COMMON := gfx900 gfx940 gfx941 gfx942 gfx1010 gfx1012 gfx1030 gfx1100 gfx1101 gfx1102 # HIP_ARCHS_LINUX := gfx906:xnack- gfx908:xnack- gfx90a:xnack+ gfx90a:xnack- # our list # gfx900 gfx906:xnack- gfx908:xnack- gfx90a:xnack+ gfx90a:xnack- gfx942 gfx1010 gfx1012 gfx1030 gfx1031 gfx1035 gfx1100 gfx1101 gfx1102 gfx1103 gfx1151 # need gfx942 gfx1012 gfx1031 gfx1035 gfx1103 gfx1151 sed -i -e 's@gfx1102@gfx1102 gfx942 gfx1012 gfx1031 gfx1035 gfx1103 gfx1151@' llama/make/Makefile.rocm # we do not build for gfx940 or gfx941 sed -i -e 's@gfx900 gfx940 gfx941 gfx942@gfx900 gfx942@' llama/make/Makefile.rocm # parallel-jobs is not supported on all clangs # switch with the resource dir # add --offload-compress device_lib_path=`${clang_path} --print-resource-dir`/amdgcn/bitcode sed -i -e "s@-parallel-jobs=2@--rocm-device-lib-path=${device_lib_path} --offload-compress@" llama/make/Makefile.rocm # do not make a copy of the rocblas/library sed -i -e '/cd $(GPU_LIB_DIR)/d' llama/make/Makefile.rocm # do not copy the system librocblas.so or libhipblas.so sed -i -e '/$(CP) $(dir/d' llama/make/gpu.make # do not copy the system librocblas.so.* or libhiblas.so.* sed -i -e '/$(CP) $(GPU_LIB_DIR)/d' llama/make/gpu.make # Install location of libggml_rocm.so is not in the system path sed -i -e 's@TARGET_CGO_LDFLAGS = @TARGET_CGO_LDFLAGS = -Wl,-rpath=%{_libdir}/ollama/bin/dist/linux-amd64/lib/ollama @' llama/make/gpu.make %endif %if %{without bootstrap} %generate_buildrequires %go_generate_buildrequires %endif %if %{without bootstrap} %build export GO111MODULE=off export GOPATH=$(pwd)/_build:%{gopath} %make_build VERSION=%{version} %gobuild -o %{gobuilddir}/bin/ollama %{goipath} %gobuild -o %{gobuilddir}/bin/runner %{goipath}/llama/runner %endif %install %gopkginstall %if %{without bootstrap} mkdir -p %{buildroot}%{_bindir} mkdir -p %{buildroot}%{_libdir}/ollama/bin # Not copying system ROCm libs here so do not need them if [ -d %{gobuilddir}/../dist/linux-amd64-rocm ]; then rm -rf %{gobuilddir}/../dist/linux-amd64-rocm fi cp -r %{gobuilddir}/../dist %{buildroot}%{_libdir}/ollama/bin/ install -m 0755 -vp %{gobuilddir}/bin/* %{buildroot}%{_libdir}/ollama/bin/ pushd . cd %{buildroot}%{_bindir} ln -s ../%{_lib}/ollama/bin/ollama ollama popd %endif #Clean up dupes: %fdupes %{buildroot}%{_prefix} %if %{without bootstrap} %if %{with check} %check %gocheck %endif %endif %if %{without bootstrap} %files %dir %{_libdir}/ollama %license LICENSE %doc CONTRIBUTING.md README.md SECURITY.md %{_bindir}/ollama %{_libdir}/ollama/bin/dist/linux-amd64/lib/ollama/libggml_rocm.so %{_libdir}/ollama/bin/dist/linux-amd64/lib/ollama/runners/{cpu,cpu_avx,cpu_avx2,rocm}/ollama_llama_server %{_libdir}/ollama/bin/{ollama,runner} %endif %gopkgfiles %changelog %autochangelog