Skip to content

Commit

Permalink
Llm core (#64)
Browse files Browse the repository at this point in the history
* refactor: rename isHarmfulContentFile to isHarmfulContent and remove unused function

* feat: add off-topic detection and moderation warning to chat response
  • Loading branch information
TakalaWang authored Dec 24, 2024
1 parent 9fbb9f5 commit 14198a8
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 44 deletions.
4 changes: 4 additions & 0 deletions src/lib/schema/conversation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ export const ConversationSchema = z.object({
audio: z.string().nullable() // to find the raw file
})
),
warning: z.object({
moderation: z.boolean().default(false),
offTopic: z.number().default(0)
}),
subtaskCompleted: z.array(z.boolean().default(false)),
summary: z.string().nullable(),
keyPoints: z.array(z.string()).nullable()
Expand Down
130 changes: 87 additions & 43 deletions src/lib/server/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
CONCEPT_SUMMARY_PROMPT,
DOCS_CONTEXT_SYSTEM_PROMPT,
GROUP_OPINION_SUMMARY_PROMPT,
OFF_TOPIC_DETECTION_PROMPT,
SUBTASKS_COMPLETED_PROMPT
} from './prompt';

Expand All @@ -18,14 +19,63 @@ const openai = new OpenAI({
baseURL: env.OPENAI_BASE_URL
});

async function isHarmfulContent(content: string): Promise<boolean> {
async function isHarmfulContent(
content: string
): Promise<{ success: boolean; harmful: boolean; error?: string }> {
console.log('Checking content for harmful content:', { contentLength: content.length });
const moderation = await openai.moderations.create({
model: 'omni-moderation-latest',
input: content
});
console.log('Moderation result:', moderation.results[0]);
return moderation.results[0].flagged;
try {
const moderation = await openai.moderations.create({
model: 'omni-moderation-latest',
input: content
});
console.log('Moderation result:', moderation.results[0]);
return {
success: true,
harmful: moderation.results[0].flagged,
error: ''
};
} catch (error) {
console.error('Error in isHarmfulContent:', error);
return {
success: false,
harmful: false,
error: 'Failed to detect harmful content'
};
}
}

async function isOffTopic(
history: LLMChatMessage[],
prompt: string
): Promise<{ success: boolean; off_topic: boolean; error?: string }> {
console.log('Checking if off topic:', { historyLength: history.length });
try {
const llm_message = history.length > 1 ? history[history.length - 2].content : prompt;
const student_message = history[history.length - 1].content;
const system_prompt = OFF_TOPIC_DETECTION_PROMPT.replace('{llmMessage}', llm_message).replace(
'{studentMessage}',
student_message
);

const response = await requestZodLLM(system_prompt, z.object({ result: z.boolean() }));

if (!response.success) {
throw new Error('Failed to detect off topic response');
}

return {
success: true,
off_topic: response.message.result,
error: ''
};
} catch (error) {
console.error('Error in isOffTopic:', error);
return {
success: false,
off_topic: false,
error: 'Failed to detect off topic'
};
}
}

export async function checkFileContent(
Expand All @@ -35,7 +85,7 @@ export async function checkFileContent(
try {
const content = await fs.readFile(filePath, 'utf-8');
console.log('File content read successfully:', { contentLength: content.length });
if (await isHarmfulContentFile(content)) {
if (await isHarmfulContent(content)) {
console.warn('Harmful content detected in file');
return {
success: false,
Expand All @@ -57,15 +107,6 @@ export async function checkFileContent(
}
}

export async function isHarmfulContentFile(message: string) {
const moderation = await openai.moderations.create({
model: 'omni-moderation-latest',
input: message
});

return moderation.results[0].flagged;
}

export async function requestChatLLM(
system_prompt: string,
history: LLMChatMessage[],
Expand Down Expand Up @@ -137,23 +178,20 @@ export async function chatWithLLMByDocs(
subtasks: string[],
resources: Resource[],
temperature = 0.7
): Promise<{ success: boolean; message: string; subtask_completed: boolean[]; error?: string }> {
): Promise<{
success: boolean;
message: string;
subtask_completed: boolean[];
warning: { off_topic: boolean; moderation: boolean };
error?: string;
}> {
console.log('Starting chatWithLLMByDocs:', {
historyLength: history.length,
task,
subtasksCount: subtasks.length,
resourcesCount: resources.length
});
try {
const last_message_content = history[history.length - 1]?.content;
if (last_message_content && (await isHarmfulContent(last_message_content))) {
return {
success: false,
message: '',
subtask_completed: [],
error: 'Harmful content detected in the last message'
};
}
const formatted_docs = resources
.map((doc, index) => {
const title = doc.name || `Document ${index + 1}`;
Expand All @@ -165,33 +203,39 @@ export async function chatWithLLMByDocs(
.replace('{subtasks}', subtasks.join('\n'))
.replace('{resources}', formatted_docs);

const subtask_completed = await checkSubtaskCompleted(history, subtasks);
console.log('Formatted system prompt:', {
promptLength: system_prompt.length,
subtaskCompletedCount: subtask_completed.completed.length
});

const response = await requestChatLLM(system_prompt, history, temperature);
console.log('Chat response received:', {
success: response.success,
messageLength: response.message.length
});

if (!response.success) {
throw new Error('Failed to parse response');
const [response, subtask_completed, moderation, off_topic] = await Promise.all([
requestChatLLM(system_prompt, history, temperature),
checkSubtaskCompleted(history, subtasks),
isHarmfulContent(history[history.length - 1].content),
isOffTopic(history, system_prompt)
]);

if (
!response.success ||
!subtask_completed.success ||
!moderation.success ||
!off_topic.success
) {
throw new Error('Failed to get response');
}

return {
success: true,
message: response.message,
subtask_completed: subtask_completed.completed
subtask_completed: subtask_completed.completed,
warning: {
moderation: moderation.harmful,
off_topic: off_topic.off_topic
}
};
} catch (error) {
console.error('Error in chatWithLLMByDocs:', error);
return {
success: false,
message: '',
subtask_completed: [],
error: 'Failed to chat with LLM'
warning: { moderation: false, off_topic: false },
error: 'Failed to chat with LLM by docs'
};
}
}
Expand Down
10 changes: 10 additions & 0 deletions src/lib/server/prompt.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,16 @@ export const CHAT_SUMMARY_PROMPT = `
學生的關鍵字:
`;

export const OFF_TOPIC_DETECTION_PROMPT = `
請偵測學生的訊息是否聊天內容偏離主題,並回傳一個 Boolean 值,如果偏離主題則回傳 true,否則回傳 false。
LLM訊息:
{llmMessage}
學生訊息:
{studentMessage}
結果:
`;

export const CONCEPT_SUMMARY_PROMPT = `
以下是學生們個別的觀點與想法,每位學生的想法與觀點用{separator}分隔,請你總結學生們的觀點,並歸納出學生的正反意見,以及學生對於這個概念的理解。
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export const POST: RequestHandler = async ({ request, params, locals }) => {

const conversation_ref = await getConversationRef(id, group_number, conv_id);
console.log('Retrieved conversation reference');
const { userId, task, subtasks, resources, history, subtaskCompleted } =
const { userId, task, subtasks, resources, history, warning, subtaskCompleted } =
await getConversationData(conversation_ref);
console.log('Retrieved conversation data', { userId, task, subtasksCount: subtasks.length });

Expand Down Expand Up @@ -83,6 +83,10 @@ export const POST: RequestHandler = async ({ request, params, locals }) => {
content: response.message
}
],
warning: {
moderation: warning.moderation || response.warning.moderation,
offTopic: response.warning.off_topic ? warning.offTopic + 1 : 0
},
subtaskCompleted: subtaskCompleted.map(
(completed, index) => completed || response.subtask_completed[index]
)
Expand Down

0 comments on commit 14198a8

Please sign in to comment.