Skip to content

Commit

Permalink
adding backend and agents
Browse files Browse the repository at this point in the history
  • Loading branch information
Hiteshydv001 committed Feb 15, 2025
1 parent 02efc12 commit 76d0d24
Show file tree
Hide file tree
Showing 17 changed files with 493 additions and 67 deletions.
31 changes: 31 additions & 0 deletions agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from .summarize_tool import SummarizeTool
from .write_post_tool import WritePostTool
from .sanitize_data_tool import SanitizeDataTool
from .summarize_validator_agent import SummarizeValidatorAgent
from .write_post_validator_agent import WritePostValidatorAgent
from .sanitize_data_validator_agent import SanitizeDataValidatorAgent
from .refiner_agent import RefinerAgent
from .validator_agent import ValidatorAgent
from .generate_comment_agent import GenerateCommentAgent
from .sentiment_analysis_agent import SentimentAnalysisAgent

class AgentManager:
def __init__(self, max_retries=2, verbose=True):
self.agents = {
"summarize": SummarizeTool(max_retries=max_retries, verbose=verbose),
"write_post": WritePostTool(max_retries=max_retries, verbose=verbose),
"sanitize_data": SanitizeDataTool(max_retries=max_retries, verbose=verbose),
"summarize_validator": SummarizeValidatorAgent(max_retries=max_retries, verbose=verbose),
"write_post_validator": WritePostValidatorAgent(max_retries=max_retries, verbose=verbose),
"sanitize_data_validator": SanitizeDataValidatorAgent(max_retries=max_retries, verbose=verbose),
"refiner": RefinerAgent(max_retries=max_retries, verbose=verbose),
"validator": ValidatorAgent(max_retries=max_retries, verbose=verbose),
"generate_comment": GenerateCommentAgent(max_retries=max_retries, verbose=verbose),
"sentiment_analysis": SentimentAnalysisAgent(max_retries=max_retries, verbose=verbose),
}

def get_agent(self, agent_name):
agent = self.agents.get(agent_name)
if not agent:
raise ValueError(f"Agent '{agent_name}' not found.")
return agent
74 changes: 74 additions & 0 deletions agents/agent_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import google.generativeai as genai
from abc import ABC, abstractmethod
import streamlit as st
import time

# Configure Gemini API using Streamlit secrets
GEMINI_API_KEY = st.secrets.get("GEMINI_API_KEY")

if GEMINI_API_KEY:
genai.configure(api_key=GEMINI_API_KEY)
else:
raise ValueError("GEMINI_API_KEY is missing! Make sure to set it in Streamlit secrets.")

class AgentBase(ABC):
def __init__(self, name, max_retries=3, verbose=True):
self.name = name
self.max_retries = max_retries
self.verbose = verbose

@abstractmethod
def execute(self, *args, **kwargs):
pass

def call_gemini(self, prompt, model="gemini-pro"):
"""Calls Gemini AI with retries, backoff, and caching to avoid rate limits."""
if not GEMINI_API_KEY:
raise ValueError(f"[{self.name}] GEMINI_API_KEY is missing. Check Streamlit secrets.")

# Check cache (avoid redundant API calls)
cache_key = f"gemini_cache_{hash(prompt)}"
if cache_key in st.session_state:
if self.verbose:
print(f"[{self.name}] Returning cached response.")
return st.session_state[cache_key]

retries = 0
while retries < self.max_retries:
try:
if self.verbose:
print(f"[{self.name}] Sending prompt to Gemini ({model}): {prompt}")

# Create a model instance
gemini_model = genai.GenerativeModel(model)
response = gemini_model.generate_content(prompt)

if response and hasattr(response, "text"):
reply = response.text.strip()
else:
reply = "No response generated."

# Cache response
st.session_state[cache_key] = reply

if self.verbose:
print(f"[{self.name}] Received response: {reply}")
return reply

except Exception as e:
if "429" in str(e) or "Resource has been exhausted" in str(e):
wait_time = 2 ** retries # Exponential backoff: 2s, 4s, 8s...
print(f"[{self.name}] Rate limit reached. Retrying in {wait_time} seconds...")
time.sleep(wait_time) # Wait before retrying
else:
print(f"[{self.name}] Unexpected error: {e}")
break # Stop retrying for non-rate-limit errors

retries += 1

# If `gemini-pro` fails, try `gemini-lite` as a fallback
if model == "gemini-pro":
print(f"[{self.name}] Switching to 'gemini-lite' due to failures.")
return self.call_gemini(prompt, model="gemini-lite")

raise Exception(f"[{self.name}] Failed to get response from Gemini after {self.max_retries} retries.")
15 changes: 15 additions & 0 deletions agents/generate_comment_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
from .agent_base import AgentBase

class GenerateCommentAgent(AgentBase):
def __init__(self, max_retries=2, verbose=True):
super().__init__(name="GenerateCommentAgent", max_retries=max_retries, verbose=verbose)

def execute(self, post_content):
"""Generates a relevant LinkedIn comment for the given post."""
prompt = (
"You are an expert at generating engaging LinkedIn comments. Based on the given post content, "
"write a professional, insightful, and engaging comment:\n\n"
f"Post: {post_content}\n\nGenerated Comment:"
)
comment = self.call_gemini(prompt, model="gemini-pro")
return comment
17 changes: 17 additions & 0 deletions agents/refiner_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from .agent_base import AgentBase

class RefinerAgent(AgentBase):
def __init__(self, max_retries=2, verbose=True):
super().__init__(name="RefinerAgent", max_retries=max_retries, verbose=verbose)

def execute(self, draft):
"""Refines a LinkedIn post for clarity, engagement, and professional impact."""
prompt = (
"You are an expert social media editor who enhances LinkedIn posts for clarity, engagement, "
"and professional impact.\n\n"
"Please refine the following LinkedIn post draft to make it more engaging, concise, and impactful:\n\n"
f"{draft}\n\nRefined LinkedIn Post:"
)

refined_post = self.call_gemini(prompt, model="gemini-pro")
return refined_post
16 changes: 16 additions & 0 deletions agents/sanitize_data_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from .agent_base import AgentBase

class SanitizeDataTool(AgentBase):
def __init__(self, max_retries=3, verbose=True):
super().__init__(name="SanitizeDataTool", max_retries=max_retries, verbose=verbose)

def execute(self, data):
"""Sanitizes data by removing sensitive information."""
prompt = (
"You are an AI assistant that sanitizes data by removing sensitive information.\n\n"
"Remove all sensitive information from the following data:\n\n"
f"{data}\n\nSanitized Data:"
)

sanitized_data = self.call_gemini(prompt, model="gemini-pro")
return sanitized_data
19 changes: 19 additions & 0 deletions agents/sanitize_data_validator_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from .agent_base import AgentBase

class SanitizeDataValidatorAgent(AgentBase):
def __init__(self, max_retries=2, verbose=True):
super().__init__(name="SanitizeDataValidatorAgent", max_retries=max_retries, verbose=verbose)

def execute(self, original_data, sanitized_data):
"""Validates that sensitive information has been removed from data."""
prompt = (
"You are an AI assistant that validates the sanitization of data by checking for the removal of sensitive information.\n\n"
"Given the original data and the sanitized data, verify that all sensitive information has been removed.\n"
"List any remaining sensitive information in the sanitized data and rate the sanitization process on a scale of 1 to 5, where 5 indicates complete sanitization.\n\n"
f"Original Data:\n{original_data}\n\n"
f"Sanitized Data:\n{sanitized_data}\n\n"
"Validation:"
)

validation = self.call_gemini(prompt, model="gemini-pro")
return validation
15 changes: 15 additions & 0 deletions agents/sentiment_analysis_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
from .agent_base import AgentBase

class SentimentAnalysisAgent(AgentBase):
def __init__(self, max_retries=2, verbose=True):
super().__init__(name="SentimentAnalysisAgent", max_retries=max_retries, verbose=verbose)

def execute(self, text):
"""Analyzes sentiment of the given text."""
prompt = (
"You are an expert in sentiment analysis. Determine the sentiment of the following text "
"and provide a concise summary:\n\n"
f"Text: {text}\n\nSentiment:"
)
sentiment = self.call_gemini(prompt, model="gemini-pro")
return sentiment
16 changes: 16 additions & 0 deletions agents/summarize_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from .agent_base import AgentBase

class SummarizeTool(AgentBase):
def __init__(self, max_retries=3, verbose=True):
super().__init__(name="SummarizeTool", max_retries=max_retries, verbose=verbose)

def execute(self, text):
"""Summarizes any LinkedIn post concisely for better insights."""
prompt = (
"You are an AI assistant specializing in summarizing LinkedIn posts for quick insights.\n\n"
"Please generate a concise and insightful summary of the following LinkedIn post:\n\n"
f"{text}\n\nSummary:"
)

summary = self.call_gemini(prompt, model="gemini-pro")
return summary
19 changes: 19 additions & 0 deletions agents/summarize_validator_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from .agent_base import AgentBase

class SummarizeValidatorAgent(AgentBase):
def __init__(self, max_retries=2, verbose=True):
super().__init__(name="SummarizeValidatorAgent", max_retries=max_retries, verbose=verbose)

def execute(self, original_text, summary):
"""Validates the accuracy and quality of a LinkedIn post summary."""
prompt = (
"You are an AI assistant that evaluates the quality of LinkedIn post summaries.\n\n"
"Given the original LinkedIn post and its summary, determine whether the summary accurately and concisely captures the key insights.\n"
"Provide a brief analysis and rate the summary on a scale of 1 to 5, where 5 indicates excellent quality.\n\n"
f"Original Post:\n{original_text}\n\n"
f"Summary:\n{summary}\n\n"
"Evaluation:"
)

validation = self.call_gemini(prompt, model="gemini-pro")
return validation
20 changes: 20 additions & 0 deletions agents/validator_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from .agent_base import AgentBase

class ValidatorAgent(AgentBase):
def __init__(self, max_retries=2, verbose=True):
super().__init__(name="ValidatorAgent", max_retries=max_retries, verbose=verbose)

def execute(self, topic, article):
"""Validates the quality and relevance of a LinkedIn post."""
prompt = (
"You are an AI assistant that evaluates LinkedIn posts for clarity, engagement, and relevance.\n\n"
"Given the topic and the LinkedIn post below, assess whether the post effectively covers the topic, maintains engagement, and aligns with professional standards.\n"
"Provide a brief analysis and rate the post on a scale of 1 to 5, where 5 indicates excellent quality.\n\n"
f"Topic: {topic}\n\n"
f"Post:\n{article}\n\n"
"Evaluation:"
)

# Call Gemini to validate the post
validation = self.call_gemini(prompt, model="gemini-pro")
return validation
18 changes: 18 additions & 0 deletions agents/write_post_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from .agent_base import AgentBase

class WritePostTool(AgentBase):
def __init__(self, max_retries=3, verbose=True):
super().__init__(name="WriteArticleTool", max_retries=max_retries, verbose=verbose)

def execute(self, topic, outline=None):
"""Generates an engaging LinkedIn post based on the given topic and outline."""
prompt = f"You are an expert LinkedIn content writer.\n\nWrite a compelling and engaging LinkedIn post on the following topic:\nTopic: {topic}\n\n"

if outline:
prompt += f"Outline:\n{outline}\n\n"

prompt += "Post:\n"

# Call Gemini to generate the LinkedIn post
post = self.call_gemini(prompt, model="gemini-pro")
return post
20 changes: 20 additions & 0 deletions agents/write_post_validator_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from .agent_base import AgentBase

class WritePostValidatorAgent(AgentBase):
def __init__(self, max_retries=2, verbose=True):
super().__init__(name="WritePostValidatorAgent", max_retries=max_retries, verbose=verbose)

def execute(self, topic, article):
"""Validates a generated research article for its quality, structure, and relevance to the topic."""
prompt = (
"You are an AI assistant that validates research articles.\n\n"
"Given the topic and the article, assess whether the article comprehensively covers the topic, follows a logical structure, and maintains academic standards.\n"
"Provide a brief analysis and rate the article on a scale of 1 to 5, where 5 indicates excellent quality.\n\n"
f"Topic: {topic}\n\n"
f"Article:\n{article}\n\n"
"Validation:"
)

# Call Gemini to validate the article
validation = self.call_gemini(prompt, model="gemini-pro")
return validation
24 changes: 19 additions & 5 deletions frontend/src/app/generate_comments/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/com
import { Textarea } from "@/components/ui/textarea";
import { MessageCircle } from "lucide-react";

const API_BASE_URL = "http://127.0.0.1:5000"; // ✅ Change this if needed

export default function GenerateComments() {
const [post, setPost] = useState("");
const [comments, setComments] = useState([]);
Expand All @@ -24,11 +26,18 @@ export default function GenerateComments() {
setComments([]);

try {
const response = await axios.post("http://127.0.0.1:5000/generate-comments", { post });
setComments(response.data.comments);
} catch (error) {
console.error("Error:", error);
setError("Failed to generate comments. Please try again.");
const response = await axios.post(`${API_BASE_URL}/generate_comments`, { post });

if (response.data.error) {
setError(response.data.error); // ✅ Show API error messages
} else if (!response.data.comments || response.data.comments.length === 0) {
setError("No comments were generated. Try again with a different post.");
} else {
setComments(response.data.comments);
}
} catch (err) {
console.error("API Error:", err);
setError("Failed to generate comments. Please check if the backend is running.");
} finally {
setLoading(false);
}
Expand All @@ -37,6 +46,7 @@ export default function GenerateComments() {
return (
<div className="container mx-auto px-4 py-12">
<div className="max-w-4xl mx-auto">
{/* Header Section */}
<div className="flex items-center gap-4 mb-8">
<MessageCircle className="h-8 w-8 text-primary" />
<div>
Expand All @@ -45,6 +55,7 @@ export default function GenerateComments() {
</div>
</div>

{/* Input Section */}
<Card>
<CardHeader>
<CardTitle>Input LinkedIn Post</CardTitle>
Expand All @@ -60,14 +71,17 @@ export default function GenerateComments() {
</CardContent>
</Card>

{/* Generate Button */}
<div className="flex justify-center mt-6">
<Button size="lg" onClick={handleGenerateComments} disabled={loading} className="w-full">
{loading ? "Generating..." : "Generate Comments"}
</Button>
</div>

{/* Error Message */}
{error && <p className="text-red-500 text-sm mt-2 text-center">{error}</p>}

{/* Generated Comments Section */}
{comments.length > 0 && (
<Card className="mt-6">
<CardHeader>
Expand Down
2 changes: 1 addition & 1 deletion frontend/src/app/sentiment_analysis/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ export default function SentimentAnalysis() {
setConfidence("");

try {
const response = await axios.post("http://127.0.0.1:5000/analyze-sentiment", { text });
const response = await axios.post("http://127.0.0.1:5000/analyze_sentiment", { text });
setSentiment(response.data.sentiment);
setConfidence(response.data.confidence);
} catch (error) {
Expand Down
Loading

0 comments on commit 76d0d24

Please sign in to comment.