Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions javascript/ql/lib/semmle/javascript/Concepts.qll
Original file line number Diff line number Diff line change
Expand Up @@ -226,3 +226,28 @@ module Cryptography {

class CryptographicAlgorithm = SC::CryptographicAlgorithm;
}

/**
* A data-flow node that prompts an AI model.
*
* Extend this class to refine existing API models. If you want to model new APIs,
* extend `AIPrompt::Range` instead.
*/
class AIPrompt extends DataFlow::Node instanceof AIPrompt::Range {
/** Gets an input that is used as AI prompt. */
DataFlow::Node getAPrompt() { result = super.getAPrompt() }
}

/** Provides a class for modeling new AI prompting mechanisms. */
module AIPrompt {
/**
* A data-flow node that prompts an AI model.
*
* Extend this class to model new APIs. If you want to refine existing API models,
* extend `AIPrompt` instead.
*/
abstract class Range extends DataFlow::Node {
/** Gets an input that is used as AI prompt. */
abstract DataFlow::Node getAPrompt();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
<!DOCTYPE qhelp PUBLIC
"-//Semmle//qhelp//EN"
"qhelp.dtd">
<qhelp>

<overview>
<p>Prompts can be constructed to bypass the original purposes of an agent and lead to sensitive data leak or
operations that were not intended.</p>
</overview>

<recommendation>
<p>Sanitize user input and also avoid using user input in developer or system level prompts.</p>
</recommendation>

<example>
<p>In the following examples, the cases marked GOOD show secure prompt construction; whereas in the case marked BAD they may be susceptible to prompt injection.</p>
<sample src="examples/example.py" />
</example>

<references>
<li>OpenAI: <a href="https://openai.github.io/openai-guardrails-python">Guardrails</a>.</li>
</references>

</qhelp>
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/**
* @name Prompt injection
* @kind path-problem
* @problem.severity error
* @security-severity 5.0
* @precision high
* @id js/prompt-injection
* @tags security
* experimental
* external/cwe/cwe-1427
*/

import javascript
import experimental.semmle.javascript.security.PromptInjection.PromptInjectionQuery
import PromptInjectionFlow::PathGraph

from PromptInjectionFlow::PathNode source, PromptInjectionFlow::PathNode sink
where PromptInjectionFlow::flowPath(source, sink)
select sink.getNode(), source, sink, "This prompt construction depends on a $@.", source.getNode(),
"user-provided value"
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from flask import Flask, request
from agents import Agent
from guardrails import GuardrailAgent

@app.route("/parameter-route")
def get_input():
input = request.args.get("input")

goodAgent = GuardrailAgent( # GOOD: Agent created with guardrails automatically configured.
config=Path("guardrails_config.json"),
name="Assistant",
instructions="This prompt is customized for " + input)

badAgent = Agent(
name="Assistant",
instructions="This prompt is customized for " + input # BAD: user input in agent instruction.
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/**
* Provides classes modeling security-relevant aspects of the `@anthropic-ai/sdk` package.
* See https://github.com/anthropics/anthropic-sdk-typescript
*/

private import javascript

module Anthropic {
/** Gets a reference to the `Anthropic` client instance. */
API::Node classRef() {
// Default export: import Anthropic from '@anthropic-ai/sdk'; new Anthropic()
result = API::moduleImport("@anthropic-ai/sdk").getInstance()
}


/** Gets a reference to a sink for the system prompt in the Anthropic messages API. */
API::Node getContentNode() {
exists(API::Node createParams |
// client.messages.create({ ... })
createParams = classRef()
.getMember("messages")
.getMember("create")
.getParameter(0)
or
// client.beta.messages.create({ ... })
createParams = classRef()
.getMember("beta")
.getMember("messages")
.getMember("create")
.getParameter(0)
|
// system: "string"
result = createParams.getMember("system")
or
// system: [{ type: "text", text: "..." }]
result = createParams.getMember("system").getArrayElement().getMember("text")
or
// messages: [{ role: "assistant", content: "..." }]
// Injecting content into what the model said from external sources is very likely an injection.
exists(API::Node msg |
msg = createParams.getMember("messages").getArrayElement() and
msg.getMember("role").asSink().mayHaveStringValue("assistant")
|
result = msg.getMember("content")
)
)
or
// client.beta.agents.create({ system: "..." })
result = classRef()
.getMember("beta")
.getMember("agents")
.getMember("create")
.getParameter(0)
.getMember("system")
or
// client.beta.agents.update(agentId, { system: "..." })
result = classRef()
.getMember("beta")
.getMember("agents")
.getMember("update")
.getParameter(1)
.getMember("system")
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
/**
* Provides classes modeling security-relevant aspects of the `@google/genai` package.
* See https://github.com/googleapis/js-genai
*/

private import javascript

module GoogleGenAI {
/** Gets a reference to the `GoogleGenAI` client instance. */
API::Node clientRef() {
// import { GoogleGenAI } from '@google/genai'; const ai = new GoogleGenAI(...)
result =
API::moduleImport("@google/genai").getMember("GoogleGenAI").getInstance()
}

/** Gets a reference to a sink for prompt content in the Google GenAI SDK. */
API::Node getContentNode() {
exists(API::Node params |
// ai.models.generateContent({ contents, config })
// ai.models.generateContentStream({ contents, config })
params =
clientRef()
.getMember("models")
.getMember(["generateContent", "generateContentStream"])
.getParameter(0)
|
// config.systemInstruction
result = params.getMember("config").getMember("systemInstruction")
or
// contents: [{ role: "model", parts: [{ text: "..." }] }]
// Gemini uses "model" role instead of "assistant"
exists(API::Node msg |
msg = params.getMember("contents").getArrayElement() and
msg.getMember("role").asSink().mayHaveStringValue("model")
|
result = msg.getMember("parts").getArrayElement().getMember("text")
)
)
or
// ai.models.generateImages({ prompt, config })
result =
clientRef()
.getMember("models")
.getMember("generateImages")
.getParameter(0)
.getMember("prompt")
or
// ai.models.editImage({ prompt, referenceImages, config })
result =
clientRef()
.getMember("models")
.getMember("editImage")
.getParameter(0)
.getMember("prompt")
or
// ai.chats.create({ config: { systemInstruction: ... } })
result =
clientRef()
.getMember("chats")
.getMember("create")
.getParameter(0)
.getMember("config")
.getMember("systemInstruction")
or
// chat.sendMessage({ config: { systemInstruction: ... } })
result =
clientRef()
.getMember("chats")
.getMember("create")
.getReturn()
.getMember("sendMessage")
.getParameter(0)
.getMember("config")
.getMember("systemInstruction")
or
// ai.live.connect({ config: { systemInstruction: ... } })
result =
clientRef()
.getMember("live")
.getMember("connect")
.getParameter(0)
.getMember("config")
.getMember("systemInstruction")
}
}
Loading
Loading