@@ -1231,6 +1231,29 @@ static VALUE rb_llama_model_load_from_splits(VALUE self, VALUE paths, VALUE para
1231
1231
return TypedData_Wrap_Struct (rb_cLlamaModel , & llama_model_wrapper_data_type , model_wrapper );
1232
1232
}
1233
1233
1234
+ /**
1235
+ * @overload llama_model_save_to_file(model, path_model)
1236
+ * @param [LlamaModel] model
1237
+ * @param [String] path_model
1238
+ * @return [NilClass]
1239
+ */
1240
+ static VALUE rb_llama_model_save_to_file (VALUE self , VALUE model , VALUE path_model ) {
1241
+ if (!rb_obj_is_kind_of (model , rb_cLlamaModel )) {
1242
+ rb_raise (rb_eArgError , "model must be a LlamaModel" );
1243
+ return Qnil ;
1244
+ }
1245
+ if (!RB_TYPE_P (path_model , T_STRING )) {
1246
+ rb_raise (rb_eArgError , "path_model must be a String" );
1247
+ return Qnil ;
1248
+ }
1249
+ llama_model_wrapper * model_wrapper = get_llama_model_wrapper (model );
1250
+ const char * path_model_ = StringValueCStr (path_model );
1251
+ llama_model_save_to_file (model_wrapper -> model , path_model_ );
1252
+ RB_GC_GUARD (model );
1253
+ RB_GC_GUARD (path_model );
1254
+ return Qnil ;
1255
+ }
1256
+
1234
1257
/**
1235
1258
* @overload llama_init_from_model(model, params)
1236
1259
* @param [LlamaModel] model
@@ -4803,6 +4826,9 @@ void Init_llama_cpp(void) {
4803
4826
/* llama_model_load_from_splits */
4804
4827
rb_define_module_function (rb_mLlamaCpp , "llama_model_load_from_splits" , rb_llama_model_load_from_splits , 2 );
4805
4828
4829
+ /* llama_model_save_to_file */
4830
+ rb_define_module_function (rb_mLlamaCpp , "llama_model_save_to_file" , rb_llama_model_save_to_file , 2 );
4831
+
4806
4832
/* llama_model_free */
4807
4833
rb_define_module_function (rb_mLlamaCpp , "llama_model_free" , rb_llama_model_free , 1 );
4808
4834
0 commit comments