File tree 7 files changed +33
-6
lines changed
langchain_llamacpp_chat_model
7 files changed +33
-6
lines changed Original file line number Diff line number Diff line change 8
8
9
9
class LlamaChatModel (BaseChatOpenAI ):
10
10
model_name : str = "unknown"
11
+ llama : Llama = None
11
12
12
13
def __init__ (
13
14
self ,
@@ -19,3 +20,9 @@ def __init__(
19
20
client = LLamaOpenAIClientProxy (llama = llama ),
20
21
async_client = LLamaOpenAIClientAsyncProxy (llama = llama ),
21
22
)
23
+ self .llama = llama
24
+
25
+ @property
26
+ def _llm_type (self ) -> str :
27
+ """Return type of chat model."""
28
+ return self .llama .model_path
Original file line number Diff line number Diff line change @@ -45,8 +45,8 @@ def _create_models_settings():
45
45
return models
46
46
47
47
48
- def create_llama (request ) -> Llama :
49
- local_path = _model_local_path (request . param )
48
+ def create_llama (params ) -> Llama :
49
+ local_path = _model_local_path (params )
50
50
51
51
return Llama (
52
52
model_path = local_path ,
Original file line number Diff line number Diff line change @@ -19,7 +19,7 @@ class TestAInvoke:
19
19
params = models_to_test , ids = [config ["repo_id" ] for config in models_to_test ]
20
20
)
21
21
def llama (self , request ) -> Llama :
22
- return create_llama (request )
22
+ return create_llama (request . param )
23
23
24
24
@pytest .fixture
25
25
def instance (self , llama ):
Original file line number Diff line number Diff line change @@ -12,7 +12,7 @@ class TestAStream:
12
12
params = models_to_test , ids = [config ["repo_id" ] for config in models_to_test ]
13
13
)
14
14
def llama (self , request ) -> Llama :
15
- return create_llama (request )
15
+ return create_llama (request . param )
16
16
17
17
@pytest .fixture
18
18
def instance (self , llama ):
Original file line number Diff line number Diff line change @@ -20,7 +20,7 @@ class TestInvoke:
20
20
params = models_to_test , ids = [config ["repo_id" ] for config in models_to_test ]
21
21
)
22
22
def llama (self , request ) -> Llama :
23
- return create_llama (request )
23
+ return create_llama (request . param )
24
24
25
25
@pytest .fixture
26
26
def instance (self , llama ):
Original file line number Diff line number Diff line change
1
+ from llama_cpp import Llama
2
+ import pytest
3
+ from langchain_llamacpp_chat_model import LlamaChatModel
4
+ from tests .test_functional .models_configuration import create_llama , models_to_test
5
+
6
+
7
+ class TestInvoke :
8
+
9
+ @pytest .fixture ()
10
+ def llama (self ) -> Llama :
11
+
12
+ return create_llama (models_to_test [0 ])
13
+
14
+ @pytest .fixture
15
+ def instance (self , llama ):
16
+ return LlamaChatModel (llama = llama )
17
+
18
+ def test_llm_type (self , instance : LlamaChatModel ):
19
+ result = instance ._llm_type
20
+ assert models_to_test [0 ]["repo_id" ] in result
Original file line number Diff line number Diff line change @@ -12,7 +12,7 @@ class TestStream:
12
12
params = models_to_test , ids = [config ["repo_id" ] for config in models_to_test ]
13
13
)
14
14
def llama (self , request ) -> Llama :
15
- return create_llama (request )
15
+ return create_llama (request . param )
16
16
17
17
@pytest .fixture
18
18
def instance (self , llama ):
You can’t perform that action at this time.
0 commit comments