Cannot build
Hello,
I cannot build this library on my Mac (15.1.1 M2):
bundle exec rake lib/llama_cpp/llama_cpp.bundle -- --with-opt-dir=/opt/homebrew
(in /Users/kitaitimakoto/src/github.com/yoshoku/llama_cpp.rb)
/usr/bin/make
cd tmp/arm64-darwin24/llama_cpp/3.3.6
compiling ../../../../ext/llama_cpp/llama_cpp.cpp
In file included from ../../../../ext/llama_cpp/llama_cpp.cpp:1:
In file included from ../../../../ext/llama_cpp/llama_cpp.h:11:
In file included from /Users/kitaitimakoto/.rubies/ruby-3.3.6/include/ruby-3.3.0/ruby.h:38:
In file included from /Users/kitaitimakoto/.rubies/ruby-3.3.6/include/ruby-3.3.0/ruby/ruby.h:15:
In file included from /Users/kitaitimakoto/.rubies/ruby-3.3.6/include/ruby-3.3.0/ruby/internal/config.h:22:
/Users/kitaitimakoto/.rubies/ruby-3.3.6/include/ruby-3.3.0/arm64-darwin24/ruby/config.h:156:9: warning: 'DEPRECATED' macro redefined [-Wmacro-redefined]
156 | #define DEPRECATED(x) __attribute__ ((__deprecated__)) x
| ^
/opt/homebrew/include/llama.h:28:13: note: previous definition is here
28 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
| ^
../../../../ext/llama_cpp/llama_cpp.cpp:107:18: error: no matching function for call to 'llama_batch_get_one'
107 | ptr->batch = llama_batch_get_one(nullptr, n_tokens, pos_zero, seq_id);
| ^~~~~~~~~~~~~~~~~~~
/opt/homebrew/include/llama.h:785:34: note: candidate function not viable: requires 2 arguments, but 4 were provided
785 | LLAMA_API struct llama_batch llama_batch_get_one(
| ^
786 | llama_token * tokens,
| ~~~~~~~~~~~~~~~~~~~~~
787 | int32_t n_tokens);
| ~~~~~~~~~~~~~~~~~~
../../../../ext/llama_cpp/llama_cpp.cpp:163:16: error: no member named 'all_pos_0' in 'llama_batch'
163 | ptr->batch.all_pos_0 = NUM2INT(all_pos_0);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:164:31: error: no member named 'all_pos_0' in 'llama_batch'
164 | return INT2NUM(ptr->batch.all_pos_0);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:169:31: error: no member named 'all_pos_0' in 'llama_batch'
169 | return INT2NUM(ptr->batch.all_pos_0);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:175:16: error: no member named 'all_pos_1' in 'llama_batch'
175 | ptr->batch.all_pos_1 = NUM2INT(all_pos_1);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:176:31: error: no member named 'all_pos_1' in 'llama_batch'
176 | return INT2NUM(ptr->batch.all_pos_1);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:181:31: error: no member named 'all_pos_1' in 'llama_batch'
181 | return INT2NUM(ptr->batch.all_pos_1);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:187:16: error: no member named 'all_seq_id' in 'llama_batch'
187 | ptr->batch.all_seq_id = NUM2INT(all_seq_id);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:188:31: error: no member named 'all_seq_id' in 'llama_batch'
188 | return INT2NUM(ptr->batch.all_seq_id);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:193:31: error: no member named 'all_seq_id' in 'llama_batch'
193 | return INT2NUM(ptr->batch.all_seq_id);
| ~~~~~~~~~~ ^
../../../../ext/llama_cpp/llama_cpp.cpp:1739:20: warning: 'llama_token_prefix' is deprecated: use llama_token_fim_pre instead [-Wdeprecated-declarations]
1739 | return INT2NUM(llama_token_prefix(ptr->model));
| ^
/opt/homebrew/include/llama.h:910:5: note: 'llama_token_prefix' has been explicitly marked deprecated here
910 | DEPRECATED(LLAMA_API llama_token llama_token_prefix(const struct llama_model * model), "use llama_token_fim_pre instead");
| ^
/opt/homebrew/include/llama.h:28:56: note: expanded from macro 'DEPRECATED'
28 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
| ^
../../../../ext/llama_cpp/llama_cpp.cpp:1744:20: warning: 'llama_token_middle' is deprecated: use llama_token_fim_mid instead [-Wdeprecated-declarations]
1744 | return INT2NUM(llama_token_middle(ptr->model));
| ^
/opt/homebrew/include/llama.h:911:5: note: 'llama_token_middle' has been explicitly marked deprecated here
911 | DEPRECATED(LLAMA_API llama_token llama_token_middle(const struct llama_model * model), "use llama_token_fim_mid instead");
| ^
/opt/homebrew/include/llama.h:28:56: note: expanded from macro 'DEPRECATED'
28 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
| ^
../../../../ext/llama_cpp/llama_cpp.cpp:1749:20: warning: 'llama_token_suffix' is deprecated: use llama_token_fim_suf instead [-Wdeprecated-declarations]
1749 | return INT2NUM(llama_token_suffix(ptr->model));
| ^
/opt/homebrew/include/llama.h:912:5: note: 'llama_token_suffix' has been explicitly marked deprecated here
912 | DEPRECATED(LLAMA_API llama_token llama_token_suffix(const struct llama_model * model), "use llama_token_fim_suf instead");
| ^
/opt/homebrew/include/llama.h:28:56: note: expanded from macro 'DEPRECATED'
28 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
| ^
../../../../ext/llama_cpp/llama_cpp.cpp:2360:18: warning: 'llama_load_session_file' is deprecated: use llama_state_load_file instead [-Wdeprecated-declarations]
2360 | bool res = llama_load_session_file(ctx_ptr->ctx, StringValueCStr(filename), session_tokens.data(), session_tokens.capacity(), &n_token_count_out);
| ^
/opt/homebrew/include/llama.h:718:15: note: 'llama_load_session_file' has been explicitly marked deprecated here
718 | LLAMA_API DEPRECATED(bool llama_load_session_file(
| ^
/opt/homebrew/include/llama.h:28:56: note: expanded from macro 'DEPRECATED'
28 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
| ^
../../../../ext/llama_cpp/llama_cpp.cpp:2411:16: warning: 'llama_save_session_file' is deprecated: use llama_state_save_file instead [-Wdeprecated-declarations]
2411 | bool res = llama_save_session_file(ctx_ptr->ctx, StringValueCStr(filename), session_tokens.data(), sz_session_tokens);
| ^
/opt/homebrew/include/llama.h:731:15: note: 'llama_save_session_file' has been explicitly marked deprecated here
731 | LLAMA_API DEPRECATED(bool llama_save_session_file(
| ^
/opt/homebrew/include/llama.h:28:56: note: expanded from macro 'DEPRECATED'
28 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
| ^
6 warnings and 10 errors generated.
make: *** [llama_cpp.o] Error 1
rake aborted!
Command failed with status (2): [/usr/bin/make]
/Users/kitaitimakoto/.gem/ruby/3.3.6/gems/rake-compiler-1.2.8/lib/rake/extensiontask.rb:195:in `block (2 levels) in define_compile_tasks'
/Users/kitaitimakoto/.gem/ruby/3.3.6/gems/fileutils-1.7.3/lib/fileutils.rb:241:in `chdir'
/Users/kitaitimakoto/.gem/ruby/3.3.6/gems/fileutils-1.7.3/lib/fileutils.rb:241:in `cd'
/Users/kitaitimakoto/.gem/ruby/3.3.6/gems/rake-compiler-1.2.8/lib/rake/extensiontask.rb:194:in `block in define_compile_tasks'
/Users/kitaitimakoto/.gem/ruby/3.3.6/gems/rake-13.2.1/exe/rake:27:in `'
Tasks: TOP => lib/llama_cpp/llama_cpp.bundle => copy:llama_cpp:arm64-darwin24:3.3.6 => tmp/arm64-darwin24/llama_cpp/3.3.6/llama_cpp.bundle
(See full trace by running task with --trace)
llama.cpp's API seems changed.
I couldn't fix it because I cannot write C++...
Hi @KitaitiMakoto . Did you manage to overcome this?
No...
I'm getting a build issue too:
Installing llama_cpp 0.17.10 with native extensions
Gem::Ext::BuildError: ERROR: Failed to build gem native extension.
current directory: /rails/vendor/bundle/ruby/3.3.0/gems/llama_cpp-0.17.10/ext/llama_cpp
/ruby/3.3.0/bin/ruby extconf.rb --with-opt-dir\=/opt/homebrew/
checking for -lstdc++... yes
checking for -lllama... yes
checking for llama.h... yes
creating Makefile
current directory: /rails/vendor/bundle/ruby/3.3.0/gems/llama_cpp-0.17.10/ext/llama_cpp
make DESTDIR\= sitearchdir\=./.gem.20250130-17945-vlg2zd sitelibdir\=./.gem.20250130-17945-vlg2zd clean
current directory: /rails/vendor/bundle/ruby/3.3.0/gems/llama_cpp-0.17.10/ext/llama_cpp
make DESTDIR\= sitearchdir\=./.gem.20250130-17945-vlg2zd sitelibdir\=./.gem.20250130-17945-vlg2zd
compiling llama_cpp.cpp
In file included from llama_cpp.cpp:1:
In file included from ./llama_cpp.h:11:
In file included from /ruby/3.3.0/include/ruby-3.3.0/ruby.h:38:
In file included from ruby/3.3.0/include/ruby-3.3.0/ruby/ruby.h:15:
In file included from /ruby/3.3.0/include/ruby-3.3.0/ruby/internal/config.h:22:
/ruby/3.3.0/include/ruby-3.3.0/arm64-darwin23/ruby/config.h:155:9: warning: 'DEPRECATED' macro redefined [-Wmacro-redefined]
155 | #define DEPRECATED(x) __attribute__ ((__deprecated__)) x
| ^
/opt/homebrew//include/llama.h:28:13: note: previous definition is here
28 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
| ^
llama_cpp.cpp:110:18: error: no matching function for call to 'llama_batch_get_one'
110 | ptr->batch = llama_batch_get_one(nullptr, n_tokens, pos_zero, seq_id);
| ^~~~~~~~~~~~~~~~~~~
/opt/homebrew//include/llama.h:832:34: note: candidate function not viable: requires 2 arguments, but 4 were provided
832 | LLAMA_API struct llama_batch llama_batch_get_one(
OSX 15.1.1 M1 chip Installed using homebrew
I download a llama on my ~ home directory on my Linux machine.
cd git clone https://github.com/ggml-org/llama.cpp
and make
cd llama.cpp mkdir build cd build cmake .. make -j10
and what next? how install gem? gem install [what directory name]
Now (v0.23.3) I can build! Thank you.
% bundle exec rake lib/llama_cpp/llama_cpp.bundle -- --with-opt-dir=/opt/homebrew
@tywy7
First, i ran llama-cli as its README says with a model I want use:
% llama-cli --hf-repo reach-vb/gemma-1.1-2b-it-Q8_0-GGUF --model gemma-1.1-2b-it.Q8_0.gguf -p "The meaning to life and the universe is"
Then I got model file models/gemma-1.1-7b-it.Q4_K_M.gguf and successfully load it in LlamaCPP:
require 'llama_cpp'
LlamaCpp.ggml_backend_load_all
model_params = LlamaCpp::LlamaModelParams.new
model = LlamaCpp::llama_model_load_from_file('path/to/models/gemma-1.1-7b-it.Q4_K_M.gguf', model_params)
context_params = LlamaCpp::LlamaContextParams.new
context = LlamaCpp.llama_init_from_model(model, context_params)
puts LLaMACpp.generate(context, 'Hello, World.')
LlamaCpp.llama_free(context)
LlamaCpp.llama_model_free(model)
Wow, I found llama_cpp.rb is implemented in C (not C++) now: https://yoshoku.hatenablog.com/entry/2025/02/02/180000