From 9536b24a631166fdc2fb09b41689cf1b971e5b20 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Mon, 24 Feb 2025 22:51:41 +0000 Subject: [PATCH] Check if terminal is compatible with emojis before using them Just in case it doesn't. Signed-off-by: Eric Curtin --- ramalama/model.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ramalama/model.py b/ramalama/model.py index 1463268e..d4590d29 100644 --- a/ramalama/model.py +++ b/ramalama/model.py @@ -13,6 +13,7 @@ get_gpu, run_cmd, ) +from ramalama.console import EMOJI from ramalama.gguf_parser import GGUFInfoParser from ramalama.kube import Kube from ramalama.model_inspect import GGUFModelInfo, ModelInfoBase @@ -213,8 +214,7 @@ def setup_container(self, args): conman_args.extend(container_labels) # if args.subcommand is run add LLAMA_PROMPT_PREFIX to the container - if hasattr(args, "subcommand") and args.subcommand == "run": - # if podman + if EMOJI and hasattr(args, "subcommand") and args.subcommand == "run": if os.path.basename(args.engine) == "podman": conman_args += ["--env", "LLAMA_PROMPT_PREFIX=🦭 > "] elif os.path.basename(args.engine) == "docker": @@ -392,7 +392,7 @@ def build_exec_args_run(self, args, model_path, prompt): exec_model_path = model_path if not args.container else MNT_FILE # override prompt if not set to the local call - if "LLAMA_PROMPT_PREFIX" not in os.environ: + if EMOJI and "LLAMA_PROMPT_PREFIX" not in os.environ: os.environ["LLAMA_PROMPT_PREFIX"] = "🦙 > " exec_args = ["llama-run", "-c", f"{args.context}", "--temp", f"{args.temp}"]