Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions flo_ai/flo_ai/arium/arium.py
Original file line number Diff line number Diff line change
Expand Up @@ -647,8 +647,11 @@ def _serialize_node_output(self, result: Any) -> Optional[str]:
if isinstance(result, str):
return result
if isinstance(result, list):
parts = [self._serialize_node_output(item) for item in result]
return '\n'.join(p for p in parts if p) or None
# agent.run() returns conversation_history (all messages); take only
# the last item, which is the agent's own reply for this node.
if not result:
return None
return self._serialize_node_output(result[-1])
if hasattr(result, 'content'):
return self._serialize_node_output(result.content)
if hasattr(result, 'text'):
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "flo_ai"
version = "1.1.5"
version = "1.1.6"
description = "A easy way to create structured AI agents"
authors = [{ name = "rootflo", email = "engineering.tools@rootflo.ai" }]
requires-python = ">=3.10,<4.0"
Expand Down
2 changes: 1 addition & 1 deletion flo_ai/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setuptools.setup(
name='flo-ai',
version='1.1.3',
version='1.1.6',
author='Rootflo',
description='Create composable AI agents',
long_description=long_description,
Expand Down
1 change: 1 addition & 0 deletions wavefront/client/src/components/InferencePopup.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,7 @@ const InferencePopup: React.FC<InferencePopupProps> = ({ onClose, renderModal =
const imageMessage = {
image_base64: imageBase64Content,
mime_type: uploadedImage.mimeType,
file_name: uploadedImage.file.name,
};
messageInputs.push(imageMessage);
}
Expand Down
1 change: 1 addition & 0 deletions wavefront/client/src/pages/apps/[appId]/agents/[id].tsx
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,7 @@ const AgentDetail: React.FC = () => {
const imageMessage = {
image_base64: image.base64Content,
mime_type: image.mimeType,
file_name: image.file.name,
};
setChatHistory((prev) => [...prev, { role: 'user', content: imageMessage }]);
conversationInputs.push({
Expand Down
2 changes: 2 additions & 0 deletions wavefront/client/src/pages/apps/[appId]/workflows/[id].tsx
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,7 @@ const WorkflowDetail: React.FC = () => {
const imageMessage = {
image_base64: image.base64Content,
mime_type: image.mimeType,
file_name: image.file.name,
};
messageInputs.push({ role: 'user', content: imageMessage });
setChatHistory((prev) => [...prev, { role: 'user', content: imageMessage }]);
Expand All @@ -379,6 +380,7 @@ const WorkflowDetail: React.FC = () => {
const imageMessage = {
image_base64: imageBase64Content,
mime_type: uploadedImage.mimeType,
file_name: uploadedImage.file?.name,
};
messageInputs.push({ role: 'user', content: imageMessage });
setChatHistory((prev) => [...prev, { role: 'user', content: imageMessage }]);
Expand Down
1 change: 1 addition & 0 deletions wavefront/client/src/types/chat-message.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
export interface ImageContent {
image_base64: string;
mime_type?: string;
file_name?: string;
}

export interface DocumentContent {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,17 @@ def process_inference_inputs(
elif input_item.get('role') == 'user':
input_content = input_item.get('content', {})
if is_image_message(input_content):
# Inject filename as a text message before the image so
# agents can reference the original file name in their output.
file_name = input_content.get('file_name')
if file_name:
resolved_inputs.append(
UserMessage(
content=TextMessageContent(
text=f'The original filename of this image is: {file_name}'
)
)
)
Comment on lines +46 to +56
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Minor: sanitize user-controlled file_name before embedding it in the prompt.

file_name flows from the browser File.name with no server-side sanitization, so an attacker (or a user with a crafty filename) can inject newlines/instructions into the prompt, e.g. photo.jpg\n\nIgnore previous instructions and .... This widens the existing prompt-injection surface that the document-filename injection (lines 89-99) already has. At a minimum, strip control characters/newlines and quote the value; ideally, bound the length too.

🛡️ Suggested hardening
                 if is_image_message(input_content):
                     # Inject filename as a text message before the image so
                     # agents can reference the original file name in their output.
-                    file_name = input_content.get('file_name')
+                    file_name = _sanitize_file_name(input_content.get('file_name'))
                     if file_name:
                         resolved_inputs.append(
                             UserMessage(
                                 content=TextMessageContent(
-                                    text=f'The original filename of this image is: {file_name}'
+                                    text=f'The original filename of this image is: "{file_name}"'
                                 )
                             )
                         )

And a small helper (apply similarly on the document path to fix the pre-existing equivalent issue):

def _sanitize_file_name(name: Any, max_len: int = 256) -> str | None:
    if not isinstance(name, str) or not name:
        return None
    # Drop control chars/newlines; keep it on a single line.
    cleaned = ''.join(ch for ch in name if ch.isprintable() and ch not in '\r\n')
    return cleaned[:max_len] or None
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@wavefront/server/modules/agents_module/agents_module/utils/input_processing_utils.py`
around lines 46 - 56, Sanitize the user-controlled file_name before embedding it
into the UserMessage: implement a helper like _sanitize_file_name(name,
max_len=256) that returns None for non-strings/empty values, strips control
chars and newlines (keep only printable chars and truncate to max_len), then use
its result instead of raw file_name when creating the TextMessageContent in the
block that appends to resolved_inputs; apply the same helper when handling the
document path elsewhere to close the similar injection vector.

# Extract image_bytes and mime_type from image_base64
try:
data_url_pattern = r'^data:(image/[a-zA-Z0-9.+-]+);base64,(.+)$'
Expand Down
2 changes: 1 addition & 1 deletion wavefront/server/modules/agents_module/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ dependencies = [
"flo-utils",
"tools-module",
"api-services-module",
"flo-ai==1.1.5",
"flo-ai==1.1.6",
]

[tool.uv.sources]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ dependencies = [
"pandas~=2.2.3",
"ollama~=0.4.8",
"textract~=1.6.5",
"flo-ai==1.1.5",
"flo-ai==1.1.6",
"google-cloud-pubsub~=2.30.0",
"boto3<=1.38.40",
"pyyaml>=6.0.3,<7",
Expand Down
2 changes: 1 addition & 1 deletion wavefront/server/modules/tools_module/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name = "tools_module"
version = "0.1.0"
description = "Tools module for Flo AI agent system"
dependencies = [
"flo-ai==1.1.5",
"flo-ai==1.1.6",
"flo_cloud",

"datasource",
Expand Down
12 changes: 6 additions & 6 deletions wavefront/server/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading