Skip to content

Commit 5565a30

Browse files
FEAT+FIX: Greeting and minor fixes (#90)
* FEAT+FIX: Greeting and minor fixes * FEAT: Greeting feature (like in text-generation webui) * FIX: !ai and !chat prefixes removed from the user's message * Remove unnecessary sentence * Update textgen_webui.py * FIX: Greeting function keeps appending itself to the conversation history. * FEAT: greeting for openai --------- Co-authored-by: dborodin836 <dborodin836@gmail.com>
1 parent 3227b0b commit 5565a30

File tree

4 files changed

+22
-3
lines changed

4 files changed

+22
-3
lines changed

config.ini

+6
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,12 @@ ENABLE_SOFT_LIMIT_FOR_CUSTOM_MODEL=True
9999
ENABLE_CUSTOM_MODEL=False
100100
CUSTOM_MODEL_COMMAND=!ai
101101
CUSTOM_MODEL_CHAT_COMMAND=!chat
102+
; Adds the first message on behalf of AI,
103+
; does the same thing as the "greeting" in text generation webui.
104+
; Should help with the way you want the AI to talk.
105+
; Example:
106+
; GREETING=Hello fellow human. *Bzzzt!* *Steam pump sounds* Don't mind it, it's my circuits working on burning money, so I can work a bit longer. So, tell me, do you want to buy hats? These precious... magnificent... works of art? I'm sure you can't withstand their beauty! I give you hats, you give me money, I live longer and give you more hats. What do you think?
107+
GREETING=
102108

103109
; 127.0.0.1:5000 or your-uri-here.trycloudflare.com
104110
CUSTOM_MODEL_HOST=127.0.0.1:5000

config.py

+1
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ class Config(BaseModel):
7474
CUSTOM_MODEL_HOST: str
7575
CUSTOM_MODEL_COMMAND: str
7676
CUSTOM_MODEL_CHAT_COMMAND: str
77+
GREETING: str
7778

7879
CONFIRMABLE_QUEUE: bool
7980

modules/api/openai.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from config import config
77
from modules.logs import get_logger, log_gui_general_message, log_gui_model_message
88
from modules.servers.tf2 import send_say_command_to_tf2
9-
from modules.typing import MessageHistory
9+
from modules.typing import MessageHistory, Message
1010
from modules.utils.text import add_prompts_by_flags, remove_hashtags
1111

1212
main_logger = get_logger("main")
@@ -64,6 +64,9 @@ def handle_cgpt_request(
6464
gui_logger.error(f"Request '{user_prompt}' violates OPENAI TOS. Skipping...")
6565
return conversation_history
6666

67+
if not conversation_history:
68+
conversation_history.append(Message(role="assistant", content=config.GREETING))
69+
6770
conversation_history.append({"role": "user", "content": message})
6871

6972
response = get_response(conversation_history, username, model)
@@ -94,7 +97,10 @@ def handle_gpt_request(
9497
)
9598
return
9699

97-
response = get_response([{"role": "user", "content": message}], username, model)
100+
response = get_response([
101+
Message(role="assistant", content=config.GREETING),
102+
Message(role="user", content=message)
103+
], username, model)
98104

99105
if response:
100106
main_logger.info(

modules/commands/textgen_webui.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
def handle_custom_model(logline: LogLine, shared_dict: dict):
1212
main_logger.info(
1313
f"'{config.CUSTOM_MODEL_COMMAND}' command from user '{logline.username}'. "
14-
f"Message: '{logline.prompt.removeprefix(config.GPT_COMMAND).strip()}'"
14+
f"Message: '{logline.prompt.removeprefix(config.CUSTOM_MODEL_COMMAND).strip()}'"
1515
)
1616
log_gui_model_message(
1717
"CUSTOM",
@@ -23,8 +23,11 @@ def handle_custom_model(logline: LogLine, shared_dict: dict):
2323
logline.prompt, enable_soft_limit=config.ENABLE_SOFT_LIMIT_FOR_CUSTOM_MODEL
2424
)
2525

26+
message = message.removeprefix(config.CUSTOM_MODEL_COMMAND).strip()
27+
2628
response = get_custom_model_response(
2729
[
30+
{"role": "assistant", "content": config.GREETING},
2831
{"role": "user", "content": message},
2932
]
3033
)
@@ -44,6 +47,9 @@ def handle_custom_chat(logline: LogLine, shared_dict: dict):
4447
)
4548

4649
message = add_prompts_by_flags(logline.prompt)
50+
message = message.removeprefix(config.CUSTOM_MODEL_CHAT_COMMAND).strip()
51+
if not conversation_history:
52+
conversation_history.append({"role": "assistant", "content": config.GREETING})
4753
conversation_history.append({"role": "user", "content": message})
4854
response = get_custom_model_response(conversation_history)
4955

0 commit comments

Comments
 (0)