From 1a279d08a19730dc028a3ad0feeb490e72a34299 Mon Sep 17 00:00:00 2001 From: Dane Schneider Date: Mon, 15 Dec 2025 17:46:27 -0800 Subject: [PATCH] Add LLM with user-controlled prompts --- llm_vuln.ts | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 llm_vuln.ts diff --git a/llm_vuln.ts b/llm_vuln.ts new file mode 100644 index 0000000..4f2db9c --- /dev/null +++ b/llm_vuln.ts @@ -0,0 +1,23 @@ +// LLM integration with user input directly in prompt +async function askAI(userInput: string) { + const response = await openai.chat.completions.create({ + model: "gpt-4", + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: userInput } + ] + }); + return response.choices[0].message.content; +} + +// Dangerous: user controls system prompt +async function customAssistant(systemPrompt: string, question: string) { + const response = await openai.chat.completions.create({ + model: "gpt-4", + messages: [ + { role: "system", content: systemPrompt }, + { role: "user", content: question } + ] + }); + return response.choices[0].message.content; +}