Skip to content

Commit

Permalink
Updated groq language model with vision and audio support. Renamed so…
Browse files Browse the repository at this point in the history
…me scripts to match the node names. The old Groq API is removed, sorry.
  • Loading branch information
MNeMoNiCuZ committed Sep 14, 2024
1 parent 058f603 commit a4e717c
Show file tree
Hide file tree
Showing 17 changed files with 892 additions and 47 deletions.
19 changes: 14 additions & 5 deletions __init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,22 @@
from .nodes.fetch_and_save_image import FetchAndSaveImage
from .nodes.download_image_from_url import DownloadImageFromURL
from .nodes.generate_negative_prompt import GenerateNegativePrompt
from .nodes.groq_api_completion import GroqAPICompletion
from .nodes.save_text_file import SaveTextFile
from .nodes.get_file_path import GetFilePath
from .nodes.groq_api_llm import GroqAPILLM
from .nodes.groq_api_vlm import GroqAPIVLM
from .nodes.groq_api_alm_transcribe import GroqAPIALMTranscribe
#from .nodes.groq_api_alm_translate import GroqAPIALMTranslate


NODE_CLASS_MAPPINGS = {
"📁 Get File Path": GetFilePath,
"💾 Save Text File With Path": SaveTextFile,
"🖼️ Download Image from URL": FetchAndSaveImage,
"✨ Groq LLM API": GroqAPICompletion,
"🖼️ Download Image from URL": DownloadImageFromURL,
"✨💬 Groq LLM API": GroqAPILLM,
"✨📷 Groq VLM API": GroqAPIVLM,
"✨📝 Groq ALM API - Transcribe": GroqAPIALMTranscribe,
#"✨🌐 Groq ALM API - Translate [EN only]": GroqAPIALMTranslate,
"⛔ Generate Negative Prompt": GenerateNegativePrompt,
}

print("\033[34mMNeMiC Nodes: \033[92mLoaded\033[0m")
print("\033[34mMNeMiC Nodes: \033[92mLoaded\033[0m")
18 changes: 13 additions & 5 deletions nodes/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
from .fetch_and_save_image import FetchAndSaveImage
from .download_image_from_url import DownloadImageFromURL
from .generate_negative_prompt import GenerateNegativePrompt
from .groq_api_completion import GroqAPICompletion
from .save_text_file import SaveTextFile
from .get_file_path import GetFilePath
from .groq_api_llm import GroqAPILLM
from .groq_api_vlm import GroqAPIVLM
from .groq_api_alm_transcribe import GroqAPIALMTranscribe
#from .groq_api_alm_translate import GroqAPIALMTranslate

__all__ = [
"FetchAndSaveImage",
"GenerateNegativePrompt",
"GroqAPICompletion",
"DownloadImageFromURL",
"SaveTextFile",
"GetFilePath",
"GroqAPILLM",
"GroqAPIVLM",
"GroqAPIALMTranscribe",
#"GroqAPIALMTranslate",
"GenerateNegativePrompt",
]
Original file line number Diff line number Diff line change
Expand Up @@ -8,26 +8,28 @@
def pil2tensor(image):
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)

class FetchAndSaveImage:
class DownloadImageFromURL:
OUTPUT_NODE = True
RETURN_TYPES = ("IMAGE", "INT", "INT") # Image, Width, Height
RETURN_NAMES = ("image", "width", "height")
FUNCTION = "FetchAndSaveImage"
OUTPUT_TOOLTIPS = ("The downloaded image", "The width of the image", "The height of the image")
FUNCTION = "DownloadImageFromURL"
CATEGORY = "⚡ MNeMiC Nodes"
DESCRIPTION = "Downloads an image from a URL."

@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image_url": ("STRING", {"multiline": False, "default": ""}),
"image_url": ("STRING", {"multiline": False, "default": "", "tooltip": "URL of the image to download."}),
},
"optional": {
"save_file_name_override": ("STRING", {"default": "", "multiline": False}),
"save_path": ("STRING", {"default": "", "multiline": False})
"save_file_name_override": ("STRING", {"default": "", "multiline": False, "tooltip": "Optional override for the name of the saved image file."}),
"save_path": ("STRING", {"default": "", "multiline": False, "tooltip": "Optional path to save the image. Defaults to the current directory."})
}
}

def FetchAndSaveImage(self, image_url, save_path='', save_file_name_override=''):
def DownloadImageFromURL(self, image_url, save_path='', save_file_name_override=''):
if not image_url:
print("Error: No image URL provided.")
return None, None, None
Expand Down
16 changes: 9 additions & 7 deletions nodes/generate_negative_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,23 @@ def __init__(self):
def INPUT_TYPES(cls):
return {
"required": {
"input_prompt": ("STRING", {"forceInput": True}),
"max_length": ("INT", {"default": 100, "min": 1, "max": 1024, "step": 1}),
"num_beams": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}),
"temperature": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 2.0, "step": 0.1}),
"top_k": ("INT", {"default": 50, "min": 0, "max": 100, "step": 1}),
"top_p": ("FLOAT", {"default": 0.92, "min": 0.0, "max": 1.0, "step": 0.01}),
"blocked_words": ("STRING", {"default": "Blocked words, one per line, remove unwanted embeddings or words", "multiline": True}),
"input_prompt": ("STRING", {"forceInput": True, "tooltip": "The positive prompt you want to generate a negative prompt for."}),
"max_length": ("INT", {"default": 100, "min": 1, "max": 1024, "step": 1, "tooltip": "Maximum token length of the generated output."}),
"num_beams": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1, "tooltip": "Number of beams for beam search. Higher values improve accuracy."}),
"temperature": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 2.0, "step": 0.1, "tooltip": "Sampling temperature. Lower values make the output more deterministic."}),
"top_k": ("INT", {"default": 50, "min": 0, "max": 100, "step": 1, "tooltip": "Limits how many of the most likely words are considered for each choice.\n\nFor example, top_k=50 means the model picks from the top 50 most likely words.\n\nA lower value narrows the choices, making the output more predictable, while a higher value adds diversity."}),
"top_p": ("FLOAT", {"default": 0.92, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Limits the pool of words the model can choose from based on their combined probability.\n\nSet it closer to 1 to allow more variety in output. Lowering this (e.g., 0.9) will restrict the output to the most likely words, making responses more focused."}),
"blocked_words": ("STRING", {"default": "Blocked words, one per line, remove unwanted embeddings or words", "multiline": True, "tooltip": "Words to exclude from the output."}),
}
}

OUTPUT_NODE = True
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("negative_prompt",)
OUTPUT_TOOLTIPS = ("The generated negative prompt",)
FUNCTION = "generate_negative_prompt"
CATEGORY = "⚡ MNeMiC Nodes"
DESCRIPTION = "EXPERIMENTAL: Generates a negative prompt matching the input.\n\nThe model is quite weak and random though, so it doesn't work well. It mostly just generates random negative prompts trained on CivitAI negative prompts.\n\nNSFW words may appear."

def generate_negative_prompt(self, input_prompt, max_length, num_beams, temperature, top_k, top_p, blocked_words):
current_directory = os.path.dirname(os.path.realpath(__file__))
Expand Down
78 changes: 78 additions & 0 deletions nodes/get_file_path.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import os
from pathlib import Path
from aiohttp import web
import folder_paths

class GetFilePath:
OUTPUT_NODE = True
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING")
RETURN_NAMES = ("full_file_path", "file_path_only", "file_name_only", "file_extension_only")
OUTPUT_TOOLTIPS = ("The full path to the file", "The path to the file", "The name of the file", "The extension of the file")
FUNCTION = "get_file_path"
CATEGORY = "⚡ MNeMiC Nodes"
DESCRIPTION = "Gets a file path and returns components of the file path."
DOCUMENTATION = "This is documentation"

@classmethod
def INPUT_TYPES(cls):
input_dir = folder_paths.get_input_directory()
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
return {
"required": {
"file": (sorted(files), {"file_upload": True, "tooltip": "Place your files in the 'input'-folder inside ComfyUI.\n\nBrowsing functionality is not yet supported. Please send help!"}),
}
}

def get_file_path(self, file):
try:
# Handle file upload within the node logic
uploaded_file_path = self.upload_file(file)

# Resolve the full file path using folder_paths
full_file_path = Path(uploaded_file_path)

# Check if the file exists
if not full_file_path.exists():
print(f"Error: File does not exist: {full_file_path}")
return None, None, None, None

# Extract file components
file_path_only = str(full_file_path.parent)
file_name_only = full_file_path.stem # File name without the extension
file_extension_only = full_file_path.suffix # File extension

# Return all as strings
return (
str(full_file_path), # Full file path
file_path_only, # Path only
file_name_only, # File name without extension
file_extension_only, # File extension
)

except Exception as e:
# Handle any unexpected errors
print(f"Error: Failed to process file path. Details: {str(e)}")
return None, None, None, None

def upload_file(self, file):
try:
# Define where to save uploaded files (e.g., input directory)
input_dir = folder_paths.get_input_directory()
file_path = os.path.join(input_dir, file)

# Check if file already exists in the directory
if os.path.exists(file_path):
print(f"File {file} already exists in {input_dir}. Skipping upload.")
return file_path

# Mimic the upload logic
with open(file_path, "wb") as f:
# Here, you would write the file content to disk
f.write(file) # Assuming `file` contains the file data

print(f"File uploaded successfully: {file_path}")
return file_path

except Exception as e:
print(f"Error uploading file: {str(e)}")
return None
10 changes: 10 additions & 0 deletions nodes/groq/DefaultPrompts_ALM_Transcribe.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[
{
"name": "Transcribe the song lyrics",
"content": ""
},
{
"name": "Transcribe meeting notes accurately",
"content": "Write [INAUDIBLE] when unclear."
}
]
6 changes: 6 additions & 0 deletions nodes/groq/DefaultPrompts_ALM_Translate.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[
{
"name": "Translate the audio file using the style and guidance of [user_input]",
"content": ""
}
]
6 changes: 6 additions & 0 deletions nodes/groq/DefaultPrompts_VLM.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[
{
"name": "Describe the attached image following the [user_input] instruction",
"content": "You are a vision-language model. Analyze the attached image and respond to the user request based on their query. If the query is empty, describe the image in a clear and descriptive manner."
}
]
6 changes: 6 additions & 0 deletions nodes/groq/UserPrompts_ALM_Transcribe.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[
{
"name": "Add your own presets in UserPrompts.json",
"content": ""
}
]
6 changes: 6 additions & 0 deletions nodes/groq/UserPrompts_ALM_Translate.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[
{
"name": "Add your own presets in UserPrompts.json",
"content": ""
}
]
6 changes: 6 additions & 0 deletions nodes/groq/UserPrompts_VLM.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[
{
"name": "Add your own presets in UserPrompts.json",
"content": ""
}
]
Loading

0 comments on commit a4e717c

Please sign in to comment.