Skip to content

Commit

Permalink
Merge pull request #87 from neph1/update-v0.31.1
Browse files Browse the repository at this point in the history
llama3 "template"
  • Loading branch information
neph1 authored Jun 30, 2024
2 parents 455a30c + c9e164f commit 31bb7c3
Show file tree
Hide file tree
Showing 6 changed files with 22 additions and 14 deletions.
2 changes: 1 addition & 1 deletion llm_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ DUNGEON_LOCATION_TEMPLATE: '{"index": (int), "name": "", "description": 25 words
CHARACTER_TEMPLATE: '{"name":"", "description": "50 words", "appearance": "25 words", "personality": "50 words", "money":(int), "level":"", "gender":"m/f/n", "age":(int), "race":""}'
FOLLOW_TEMPLATE: '{{"response":"yes or no", "reason":"50 words"}}'
ITEM_TYPES: ["Weapon", "Wearable", "Health", "Money", "Trash", "Food", "Drink", "Key"]
PRE_PROMPT: 'You are a creative game keeper for a role playing game (RPG). You craft detailed worlds and interesting characters with unique and deep personalities for the player to interact with. Do not acknowledge the task, just perform it.'
PRE_PROMPT: 'You are a creative game keeper for a role playing game (RPG). You craft detailed worlds and interesting characters with unique and deep personalities for the player to interact with. Do not acknowledge the task or speak directly to the user, just perform it.'
BASE_PROMPT: '<context>{context}</context>\n[USER_START]Rewrite [{input_text}] in your own words using the information found inside the <context> tags to create a background for your text. Use about {max_words} words.'
DIALOGUE_PROMPT: '<context>{context}</context>\nThe following is a conversation between {character1} and {character2}; {character2}s sentiment towards {character1}: {sentiment}. Write a single response as {character2} in third person pov, using {character2} description and other information found inside the <context> tags. If {character2} has a quest active, they will discuss it based on its status. Respond in JSON using this template: """{dialogue_template}""". [USER_START]Continue the following conversation as {character2}: {previous_conversation}'
COMBAT_PROMPT: '<context>{context}</context>\nThe following is a combat scene between {attackers} and {defenders} in {location}. [USER_START] Describe the following combat result in about 150 words in vivid language, using the characters weapons and their health status: 1.0 is highest, 0.0 is lowest. Combat Result: {input_text}'
Expand Down
3 changes: 2 additions & 1 deletion tale/llm/LivingNpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,8 @@ def _defer_result(self, action: str, verb: str="idle-action"):
self.tell_action_deferred(verb)

def tell_action_deferred(self, verb: str):
actions = '\n'.join(self.deferred_actions) + '\n'
actions = '\n'.join(self.deferred_actions) + '\n\n'
actions = actions.replace('\n\n\n', '\n\n')
deferred_action = ParseResult(verb=verb, unparsed=actions, who_info=None)
self.tell_others(actions)
self.location._notify_action_all(deferred_action, actor=self)
Expand Down
16 changes: 12 additions & 4 deletions tale/llm/io_adapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,13 @@

class AbstractIoAdapter(ABC):

def __init__(self, url: str, stream_endpoint: str, user_start_prompt: str, user_end_prompt: str):
def __init__(self, url: str, stream_endpoint: str, user_start_prompt: str, user_end_prompt: str, system_start_prompt: str = '', prompt_end: str = ''):
self.url = url
self.stream_endpoint = stream_endpoint
self.system_start_prompt = system_start_prompt
self.user_start_prompt = user_start_prompt
self.user_end_prompt = user_end_prompt
self.prompt_end = prompt_end

@abstractmethod
def stream_request(self, request_body: dict, io = None, wait: bool = False) -> str:
Expand All @@ -37,9 +39,7 @@ def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict

class KoboldCppAdapter(AbstractIoAdapter):



def __init__(self, url: str, stream_endpoint: str, data_endpoint: str, user_start_prompt: str, user_end_prompt: str):
def __init__(self, url: str, stream_endpoint: str, data_endpoint: str, user_start_prompt: str, user_end_prompt: str, system_start_prompt: str = '', prompt_end: str = ''):
super().__init__(url, stream_endpoint, user_start_prompt, user_end_prompt)
self.data_endpoint = data_endpoint
self.place_context_in_memory = False
Expand Down Expand Up @@ -87,6 +87,8 @@ def parse_result(self, result: str) -> str:
return json.loads(result)['results'][0]['text']

def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict:
if self.system_start_prompt:
prompt = self.system_start_prompt + prompt
if self.user_start_prompt:
prompt = prompt.replace('[USER_START]', self.user_start_prompt)
if self.user_end_prompt:
Expand All @@ -96,6 +98,8 @@ def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict
request_body['memory'] = f'<context>{context}</context>'
else:
prompt = prompt.replace('<context>{context}</context>', f'<context>{context}</context>')
if self.prompt_end:
prompt = prompt + self.prompt_end
request_body['prompt'] = prompt
return request_body

Expand Down Expand Up @@ -143,12 +147,16 @@ def parse_result(self, result: str) -> str:
raise LlmResponseException("Error parsing result from backend")

def set_prompt(self, request_body: dict, prompt: str, context: str = '') -> dict:
if self.system_start_prompt:
prompt = self.system_start_prompt + prompt
if self.user_start_prompt:
prompt = prompt.replace('[USER_START]', self.user_start_prompt)
if self.user_end_prompt:
prompt = prompt + self.user_end_prompt
if context:
prompt = prompt.replace('<context>{context}</context>', f'<context>{context}</context>')
#request_body['messages'][0]['content'] = f'<context>{context}</context>'
if self.prompt_end:
prompt = prompt + self.prompt_end
request_body['messages'][1]['content'] = prompt
return request_body
4 changes: 2 additions & 2 deletions tale/llm/llm_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ def __init__(self, config: dict = None, backend_config: dict = None):
headers['Authorization'] = f"Bearer {backend_config['OPENAI_API_KEY']}"
self.openai_json_format = json.loads(backend_config['OPENAI_JSON_FORMAT'])
self.headers = headers
self.io_adapter = LlamaCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], config['USER_START'], config['USER_END'])
self.io_adapter = LlamaCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], config['USER_START'], config['USER_END'], config.get('SYSTEM_START', ''), config.get('PROMPT_END', ''))
else:
self.io_adapter = KoboldCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], backend_config['DATA_ENDPOINT'], config['USER_START'], config['USER_END'])
self.io_adapter = KoboldCppAdapter(self.url, backend_config['STREAM_ENDPOINT'], backend_config['DATA_ENDPOINT'], config['USER_START'], config['USER_END'], config.get('SYSTEM_START', ''), config.get('PROMPT_END', ''))
self.headers = {}

self.stream = backend_config['STREAM']
Expand Down
3 changes: 1 addition & 2 deletions tale/player.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,7 @@ def tell(self, message: str, *, end: bool=False, format: bool=True, evoke: bool=
msgs = msg.split('\n\n')
if len(msgs) > 1:
for msg in msgs:
self._output.print(msg, end=end, format=format)
self._output.p()
self._output.print(msg, end=True, format=format)
else:
self._output.print(msg, end=end, format=format)

Expand Down
8 changes: 4 additions & 4 deletions tests/test_llm_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def test_do_say(self):
self.npc.do_say(what_happened='something', actor=self.npc2)
assert(self.npc.sentiments['actor'] == 'kind')
assert(len(self.npc._observed_events) == 2)
assert ["test : Hello there, how can I assist you today?\n"] == self.msg_trace_npc.messages
assert ["test : Hello there, how can I assist you today?\n\n"] == self.msg_trace_npc.messages

@responses.activate
def test_idle_action(self):
Expand All @@ -186,8 +186,8 @@ def test_idle_action(self):
self.llm_util._character.io_util.response = []
action = self.npc.idle_action()
assert(action == 'sits down on a chair')
assert(llm_cache.get_events(self.npc2._observed_events) == 'test : sits down on a chair\n')
assert ["test : sits down on a chair\n"] == self.msg_trace_npc.messages
assert(llm_cache.get_events(self.npc2._observed_events) == 'test : sits down on a chair\n\n')
assert ["test : sits down on a chair\n\n"] == self.msg_trace_npc.messages

@responses.activate
def test_do_react(self):
Expand Down Expand Up @@ -256,7 +256,7 @@ def test_give_action(self):
self.npc.autonomous_action()
assert self.npc.search_item('test item', include_location=False) == None
assert(self.npc2.search_item('test item', include_location=False))
assert ["test : Test gives test item to test\n"] == self.msg_trace_npc.messages
assert ["test : Test gives test item to test\n\n"] == self.msg_trace_npc.messages

@responses.activate
def test_move_action(self):
Expand Down

0 comments on commit 31bb7c3

Please sign in to comment.