Skip to content

Base

Agent

Bases: Node

Base class for an AI Agent that interacts with a Language Model and tools.

Source code in dynamiq/nodes/agents/base.py
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
class Agent(Node):
    """Base class for an AI Agent that interacts with a Language Model and tools."""

    AGENT_PROMPT_TEMPLATE: ClassVar[str] = AGENT_PROMPT_TEMPLATE

    llm: BaseLLM = Field(..., description="LLM used by the agent.")
    group: NodeGroup = NodeGroup.AGENTS
    error_handling: ErrorHandling = Field(default_factory=lambda: ErrorHandling(timeout_seconds=3600))
    tools: list[Node] = []
    files: list[io.BytesIO | bytes] | None = None
    images: list[str | bytes | io.BytesIO] = None
    name: str = "Agent"
    max_loops: int = 1
    tool_output_max_length: int = TOOL_MAX_TOKENS
    tool_output_truncate_enabled: bool = True
    memory: Memory | None = Field(None, description="Memory node for the agent.")
    memory_limit: int = Field(100, description="Maximum number of messages to retrieve from memory")
    memory_retrieval_strategy: MemoryRetrievalStrategy | None = MemoryRetrievalStrategy.ALL
    verbose: bool = Field(False, description="Whether to print verbose logs.")
    file_store: FileStoreConfig = Field(
        default_factory=lambda: FileStoreConfig(enabled=False, backend=InMemoryFileStore()),
        description="Configuration for file storage used by the agent.",
    )
    file_attachment_preview_bytes: int = Field(
        default=512,
        description="Maximum number of bytes/characters from each uploaded file to surface as an inline preview.",
    )

    input_message: Message | VisionMessage | None = None
    role: str | None = Field(
        default=None,
        description="""Agent basic instructions.
            Can be used to provide additional context or instructions to the agent.
            Accepts Jinja templates to provide additional parameters.""",
    )
    description: str | None = Field(default=None, description="Short human-readable description of the agent.")
    _prompt_blocks: dict[str, str] = PrivateAttr(default_factory=dict)
    _prompt_variables: dict[str, Any] = PrivateAttr(default_factory=dict)
    _mcp_servers: list[MCPServer] = PrivateAttr(default_factory=list)
    _mcp_server_tool_ids: list[str] = PrivateAttr(default_factory=list)
    _tool_cache: dict[ToolCacheEntry, Any] = {}
    _history_offset: int = PrivateAttr(
        default=2,  # Offset to the first message (default: 2 — system and initial user messages).
    )

    model_config = ConfigDict(arbitrary_types_allowed=True)
    input_schema: ClassVar[type[AgentInputSchema]] = AgentInputSchema
    _json_schema_fields: ClassVar[list[str]] = ["role", "description"]

    @classmethod
    def _generate_json_schema(
        cls, llms: dict[type[BaseLLM], list[str]] = {}, tools=list[type[Node]], **kwargs
    ) -> dict[str, Any]:
        """
        Generates full json schema for Agent with provided llms and tools.
        This schema is designed for compatibility with the WorkflowYamlParser,
        containing enough partial information to instantiate an Agent.
        Parameters name to be included in the schema are either defined in the _json_schema_fields class variable or
        passed via the fields parameter.

        It generates a schema using the provided LLMs and tools.

        Args:
            llms (dict[type[BaseLLM], list[str]]): Available llm providers and models.
            tools (list[type[Node]]): List of tools.

        Returns:
            dict[str, Any]: Generated json schema.
        """
        schema = super()._generate_json_schema(**kwargs)
        schema["properties"]["llm"] = {
            "anyOf": [
                {
                    "type": "object",
                    **llm._generate_json_schema(models=models, fields=["model", "temperature", "max_tokens"]),
                }
                for llm, models in llms.items()
            ],
            "additionalProperties": False,
        }

        schema["properties"]["tools"] = {
            "type": "array",
            "items": {"anyOf": [{"type": "object", **tool._generate_json_schema()} for tool in tools]},
        }

        schema["required"] += ["tools", "llm"]
        return schema

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._intermediate_steps: dict[int, dict] = {}
        self._run_depends: list[dict] = []
        self._prompt = Prompt(messages=[])

        expanded_tools = []
        for tool in self.tools:
            if isinstance(tool, MCPServer):
                self._mcp_servers.append(tool)
                subtools = tool.get_mcp_tools()
                expanded_tools.extend(subtools)
                self._mcp_server_tool_ids.extend([subtool.id for subtool in subtools])
            else:
                expanded_tools.append(tool)

        self.tools = expanded_tools

        if self.file_store_backend:
            if self.file_store.agent_file_write_enabled:
                self.tools.append(FileWriteTool(file_store=self.file_store_backend))

            self.tools.append(FileReadTool(file_store=self.file_store_backend, llm=self.llm))
            self.tools.append(FileSearchTool(file_store=self.file_store_backend))
            self.tools.append(FileListTool(file_store=self.file_store_backend))

        self._init_prompt_blocks()

    @model_validator(mode="after")
    def validate_input_fields(self):
        if self.input_message:
            self.input_message.role = MessageRole.USER

        return self

    def get_context_for_input_schema(self) -> dict:
        """Provides context for input schema that is required for proper validation."""
        role_for_validation = self.role or ""
        if role_for_validation and (
            "{% raw %}" not in role_for_validation and "{% endraw %}" not in role_for_validation
        ):
            role_for_validation = f"{{% raw %}}{role_for_validation}{{% endraw %}}"
        return {"input_message": self.input_message, "role": role_for_validation}

    @property
    def to_dict_exclude_params(self):
        return super().to_dict_exclude_params | {
            "llm": True,
            "tools": True,
            "memory": True,
            "files": True,
            "images": True,
            "file_store": True,
        }

    def to_dict(self, **kwargs) -> dict:
        """Converts the instance to a dictionary."""
        data = super().to_dict(**kwargs)
        data["llm"] = self.llm.to_dict(**kwargs)

        data["tools"] = [tool.to_dict(**kwargs) for tool in self.tools if tool.id not in self._mcp_server_tool_ids]
        data["tools"] = data["tools"] + [mcp_server.to_dict(**kwargs) for mcp_server in self._mcp_servers]

        data["memory"] = self.memory.to_dict(**kwargs) if self.memory else None
        if self.files:
            data["files"] = [{"name": getattr(f, "name", f"file_{i}")} for i, f in enumerate(self.files)]
        if self.images:
            data["images"] = [{"name": getattr(f, "name", f"image_{i}")} for i, f in enumerate(self.images)]

        data["file_store"] = self.file_store.to_dict(**kwargs) if self.file_store else None

        return data

    def init_components(self, connection_manager: ConnectionManager | None = None):
        """
        Initialize components for the manager and agents.

        Args:
            connection_manager (ConnectionManager, optional): The connection manager. Defaults to ConnectionManager.
        """
        connection_manager = connection_manager or ConnectionManager()
        super().init_components(connection_manager)
        if self.llm.is_postponed_component_init:
            self.llm.init_components(connection_manager)

        for tool in self.tools:
            if tool.is_postponed_component_init:
                tool.init_components(connection_manager)
            tool.is_optimized_for_agents = True

    def sanitize_tool_name(self, s: str):
        """Sanitize tool name to follow [^a-zA-Z0-9_-]."""
        s = s.replace(" ", "-")
        sanitized = re.sub(r"[^a-zA-Z0-9_-]", "", s)
        return sanitized

    def _init_prompt_blocks(self):
        """Initializes default prompt blocks and variables."""
        self._prompt_blocks = {
            "date": "{{ date }}",
            "tools": "{{ tool_description }}",
            "instructions": "",
            "context": "{{ context }}",
        }
        self._prompt_variables = {
            "tool_description": self.tool_description,
            "date": datetime.now().strftime("%d %B %Y"),
        }

    def set_block(self, block_name: str, content: str):
        """Adds or updates a prompt block."""
        self._prompt_blocks[block_name] = content

    def set_prompt_variable(self, variable_name: str, value: Any):
        """Sets or updates a prompt variable."""
        self._prompt_variables[variable_name] = value

    def _prepare_metadata(self, input_data: dict) -> dict:
        """
        Prepare metadata from input data.

        Args:
            input_data (dict): Input data containing user information

        Returns:
            dict: Processed metadata
        """
        EXCLUDED_KEYS = {"user_id", "session_id", "input", "metadata", "files", "images", "tool_params"}
        custom_metadata = input_data.get("metadata", {}).copy()
        custom_metadata.update({k: v for k, v in input_data.items() if k not in EXCLUDED_KEYS})

        if "files" in custom_metadata:
            del custom_metadata["files"]
        if "images" in custom_metadata:
            del custom_metadata["images"]
        if "tool_params" in custom_metadata:
            del custom_metadata["tool_params"]

        user_id = input_data.get("user_id")
        session_id = input_data.get("session_id")

        if user_id:
            custom_metadata["user_id"] = user_id
        if session_id:
            custom_metadata["session_id"] = session_id

        return custom_metadata

    def execute(
        self,
        input_data: AgentInputSchema,
        input_message: Message | VisionMessage | None = None,
        config: RunnableConfig | None = None,
        **kwargs,
    ) -> dict[str, Any]:
        """
        Executes the agent with the given input data.
        """
        log_data = dict(input_data).copy()

        if log_data.get("images"):
            log_data["images"] = [f"image_{i}" for i in range(len(log_data["images"]))]

        if log_data.get("files"):
            log_data["files"] = [f"file_{i}" for i in range(len(log_data["files"]))]

        logger.info(f"Agent {self.name} - {self.id}: started with input {log_data}")
        self.reset_run_state()
        config = ensure_config(config)
        self.run_on_node_execute_run(config.callbacks, **kwargs)

        custom_metadata = self._prepare_metadata(dict(input_data))

        input_message = input_message or self.input_message or Message(role=MessageRole.USER, content=input_data.input)
        input_message = input_message.format_message(**dict(input_data))

        use_memory = self.memory and (dict(input_data).get("user_id") or dict(input_data).get("session_id"))

        if use_memory:
            history_messages = self._retrieve_memory(dict(input_data))
            if len(history_messages) > 0:
                history_messages.insert(
                    0,
                    Message(
                        role=MessageRole.SYSTEM,
                        content="Below is the previous conversation history. "
                        "Use this context to inform your response.",
                    ),
                )
            if isinstance(input_message, Message):
                memory_content = input_message.content
            else:
                text_parts = [
                    content.text for content in input_message.content if isinstance(content, VisionMessageTextContent)
                ]
                memory_content = " ".join(text_parts) if text_parts else "Image input"
            self.memory.add(role=MessageRole.USER, content=memory_content, metadata=custom_metadata)
        else:
            history_messages = None

        if self.role:
            # Only auto-wrap the entire role in a raw block if the user did not
            # provide explicit raw/endraw markers. This allows roles to mix
            # literal sections (via raw) with Jinja variables like {{ input }}
            # without creating nested raw blocks.
            if ("{% raw %}" in self.role) or ("{% endraw %}" in self.role):
                self._prompt_blocks["role"] = self.role
            else:
                self._prompt_blocks["role"] = f"{{% raw %}}{self.role}{{% endraw %}}"

        files = input_data.files
        uploaded_file_names: set[str] = set()
        if files:
            if not self.file_store_backend:
                self.file_store = FileStoreConfig(enabled=True, backend=InMemoryFileStore())
                self.tools.append(FileReadTool(file_store=self.file_store.backend, llm=self.llm))
                self.tools.append(FileSearchTool(file_store=self.file_store.backend))
                self.tools.append(FileListTool(file_store=self.file_store.backend))
                self._init_prompt_blocks()
            normalized_files = self._ensure_named_files(files)
            uploaded_file_names = {
                getattr(f, "name", None)
                for f in normalized_files
                if hasattr(f, "name") and getattr(f, "name") is not None
            }
            input_message = self._inject_attached_files_into_message(input_message, normalized_files)

        if input_data.tool_params:
            kwargs["tool_params"] = input_data.tool_params

        self._prompt_variables.update(dict(input_data))
        kwargs = kwargs | {"parent_run_id": kwargs.get("run_id")}
        kwargs.pop("run_depends", None)

        result = self._run_agent(input_message, history_messages, config=config, **kwargs)

        if use_memory:
            self.memory.add(role=MessageRole.ASSISTANT, content=result, metadata=custom_metadata)

        execution_result = {
            "content": result,
        }

        if self.file_store_backend and not self.file_store_backend.is_empty():
            stored_files = self.file_store_backend.list_files_bytes()
            filtered_files = self._filter_generated_files(stored_files, uploaded_file_names)
            if filtered_files:
                execution_result["files"] = filtered_files
                logger.info(
                    f"Agent {self.name} - {self.id}: returning {len(filtered_files)} generated file(s) in FileStore"
                )

        logger.info(f"Node {self.name} - {self.id}: finished with RESULT:\n{str(result)[:200]}...")

        return execution_result

    def retrieve_conversation_history(
        self,
        user_query: str = None,
        user_id: str = None,
        session_id: str = None,
        limit: int = None,
        strategy: MemoryRetrievalStrategy = MemoryRetrievalStrategy.ALL,
    ) -> list[Message]:
        """
        Retrieves conversation history for the agent using the specified strategy.

        Args:
            user_query: Current user input to find relevant context (for RELEVANT/HYBRID strategies)
            user_id: Optional user identifier
            session_id: Optional session identifier
            limit: Maximum number of messages to return (defaults to memory_limit)
            strategy: Which retrieval strategy to use (ALL, RELEVANT, or HYBRID)

        Returns:
            List of messages forming a valid conversation context
        """
        if not self.memory or not (user_id or session_id):
            return []

        filters = {}
        if user_id:
            filters["user_id"] = user_id
        if session_id:
            filters["session_id"] = session_id

        limit = limit or self.memory_limit

        if strategy == MemoryRetrievalStrategy.RELEVANT and not user_query:
            logger.warning("RELEVANT strategy selected but no user_query provided - falling back to ALL")
            strategy = MemoryRetrievalStrategy.ALL

        conversation = self.memory.get_agent_conversation(
            query=user_query,
            limit=limit,
            filters=filters,
            strategy=strategy,
        )
        return conversation

    def _retrieve_memory(self, input_data: dict) -> list[Message]:
        """
        Retrieves memory messages when user_id and/or session_id are provided.
        """
        user_id = input_data.get("user_id")
        session_id = input_data.get("session_id")

        user_query = input_data.get("input", "")
        history_messages = self.retrieve_conversation_history(
            user_query=user_query,
            user_id=user_id,
            session_id=session_id,
            strategy=self.memory_retrieval_strategy,
        )
        logger.info("Agent %s - %s: retrieved %d messages from memory", self.name, self.id, len(history_messages))
        return history_messages

    def _run_llm(
        self, messages: list[Message | VisionMessage], config: RunnableConfig | None = None, **kwargs
    ) -> RunnableResult:
        """Runs the LLM with a given prompt and handles streaming or full responses.

        Args:
            messages (list[Message | VisionMessage]): Input messages for llm.
            config (Optional[RunnableConfig]): Configuration for the runnable.
            kwargs: Additional keyword arguments.

        Returns:
            RunnableResult: Generated response.
        """
        try:
            llm_result = self.llm.run(
                input_data={},
                config=config,
                prompt=Prompt(messages=messages),
                run_depends=deepcopy(self._run_depends),
                **kwargs,
            )
            self._run_depends = [NodeDependency(node=self.llm).to_dict(for_tracing=True)]
            if llm_result.status != RunnableStatus.SUCCESS:
                error_message = f"LLM '{self.llm.name}' failed: {llm_result.error.message}"
                raise ValueError({error_message})

            return llm_result

        except Exception as e:
            raise e

    def stream_content(
        self,
        content: str | dict,
        source: str,
        step: str,
        config: RunnableConfig | None = None,
        **kwargs,
    ) -> str | dict:
        """
        Streams data.

        Args:
            content (str | dict): Data that will be streamed.
            source (str): Source of the content.
            step (str): Description of the step.
            config (Optional[RunnableConfig]): Configuration for the runnable.
            **kwargs: Additional keyword arguments.

        Returns:
            str | dict: Streamed data.
        """
        if not isinstance(source, str):
            raise ValueError(
                f"stream_content source parameter must be a string, got {type(source).__name__}: {source}. "
                f"This likely indicates incorrect parameter passing from the calling code."
            )

        return self.stream_response(content=content, source=source, step=step, config=config, **kwargs)

    def stream_response(
        self, content: str | dict, source: str, step: str, config: RunnableConfig | None = None, **kwargs
    ):
        if not isinstance(source, str):
            raise ValueError(
                f"stream_response source parameter must be a string, got {type(source).__name__}: {source}. "
                f"This likely indicates a parameter ordering issue in the calling code."
            )

        response_for_stream = StreamChunk(
            choices=[StreamChunkChoice(delta=StreamChunkChoiceDelta(content=content, source=source, step=step))]
        )

        self.run_on_node_execute_stream(
            callbacks=config.callbacks,
            chunk=response_for_stream.model_dump(),
            **kwargs,
        )
        return content

    def _run_agent(
        self,
        input_message: Message | VisionMessage,
        history_messages: list[Message] | None = None,
        config: RunnableConfig | None = None,
        **kwargs,
    ) -> str:
        """Runs the agent with the generated prompt and handles exceptions."""
        formatted_prompt = self.generate_prompt()
        system_message = Message(role=MessageRole.SYSTEM, content=formatted_prompt)
        if history_messages:
            self._prompt.messages = [system_message, *history_messages, input_message]
        else:
            self._prompt.messages = [system_message, input_message]

        try:
            llm_result = self._run_llm(self._prompt.messages, config=config, **kwargs).output["content"]
            self._prompt.messages.append(Message(role=MessageRole.ASSISTANT, content=llm_result))

            if self.streaming.enabled:
                return self.stream_content(
                    content=llm_result,
                    source=self.name,
                    step="answer",
                    config=config,
                    **kwargs,
                )
            return llm_result

        except Exception as e:
            raise e

    def _get_tool(self, action: str) -> Node:
        """Retrieves the tool corresponding to the given action."""
        tool = self.tool_by_names.get(self.sanitize_tool_name(action))
        if not tool:
            raise AgentUnknownToolException(
                f"Unknown tool: {action}."
                "Use only available tools and provide only the tool's name in the action field. "
                "Do not include any additional reasoning. "
                "Please correct the action field or state that you cannot answer the question."
            )
        return tool

    def _apply_parameters(self, merged_input: dict, params: dict, source: str, debug_info: list = None):
        """Apply parameters from the specified source to the merged input."""
        if debug_info is None:
            debug_info = []
        for key, value in params.items():
            if key in merged_input and isinstance(value, dict) and isinstance(merged_input[key], dict):
                merged_nested = merged_input[key].copy()
                merged_input[key] = deep_merge(value, merged_nested)
                debug_info.append(f"  - From {source}: Merged nested {key}")
            else:
                merged_input[key] = value
                debug_info.append(f"  - From {source}: Set {key}={value}")

    def _regenerate_node_ids(self, obj: Any) -> Any:
        """Recursively assign new IDs to cloned nodes and nested models."""
        if isinstance(obj, BaseModel):
            if hasattr(obj, "id"):
                setattr(obj, "id", str(uuid4()))

            for field_name in getattr(obj, "model_fields", {}):
                value = getattr(obj, field_name)
                if isinstance(value, list):
                    setattr(obj, field_name, [self._regenerate_node_ids(item) for item in value])
                elif isinstance(value, dict):
                    setattr(obj, field_name, {k: self._regenerate_node_ids(v) for k, v in value.items()})
                else:
                    setattr(obj, field_name, self._regenerate_node_ids(value))
            return obj
        if isinstance(obj, list):
            return [self._regenerate_node_ids(item) for item in obj]
        if isinstance(obj, dict):
            return {k: self._regenerate_node_ids(v) for k, v in obj.items()}
        return obj

    def _clone_tool_for_execution(self, tool: Node, config: RunnableConfig | None) -> tuple[Node, RunnableConfig]:
        """Clone tool and align config overrides so each execution is isolated."""
        base_config = ensure_config(config)
        try:
            tool_copy = self._regenerate_node_ids(tool.clone())
        except Exception as e:
            logger.warning(f"Agent {self.name} - {self.id}: failed to clone tool {tool.name}: {e}")
            return tool, base_config

        local_config = base_config
        try:
            local_config = base_config.model_copy(deep=False)
            original_override = base_config.nodes_override.get(tool.id)
            if original_override:
                local_config.nodes_override[tool_copy.id] = original_override
        except Exception as e:
            logger.warning(
                f"Agent {self.name} - {self.id}: failed to prepare config override for cloned tool {tool.name}: {e}"
            )
            local_config = base_config

        return tool_copy, local_config

    def _run_tool(
        self,
        tool: Node,
        tool_input: dict,
        config,
        update_run_depends: bool = True,
        collect_dependency: bool = False,
        delegate_final: bool = False,
        **kwargs,
    ) -> Any:
        """Runs a specific tool with the given input."""
        merged_input = tool_input.copy() if isinstance(tool_input, dict) else {"input": tool_input}

        if isinstance(tool, ContextManagerTool):
            merged_input["history"] = self._prompt.messages[self._history_offset :]

        raw_tool_params = kwargs.get("tool_params", ToolParams())
        tool_params = (
            ToolParams.model_validate(raw_tool_params) if isinstance(raw_tool_params, dict) else raw_tool_params
        )

        if self.file_store_backend and tool.is_files_allowed:
            for field_name, field in tool.input_schema.model_fields.items():
                if field.json_schema_extra and field.json_schema_extra.get("map_from_storage", False):
                    if field_name in merged_input:
                        merged_input[field_name] = FileMappedInput(
                            input=merged_input[field_name], files=self.file_store_backend.list_files_bytes()
                        )
                    else:
                        merged_input[field_name] = self.file_store_backend.list_files_bytes()
            if isinstance(tool, Python):
                merged_input["files"] = self.file_store_backend.list_files_bytes()

        if tool_params:
            debug_info = []
            if self.verbose:
                debug_info.append(f"Tool parameter merging for {tool.name} (ID: {tool.id}):")
                debug_info.append(f"Starting with input: {merged_input}")

            # 1. Apply global parameters (lowest priority)
            global_params = tool_params.global_params
            if global_params:
                self._apply_parameters(merged_input, global_params, "global", debug_info)

            # 2. Apply parameters by tool name (medium priority)
            name_params_any = tool_params.by_name_params.get(tool.name) or tool_params.by_name_params.get(
                self.sanitize_tool_name(tool.name)
            )
            if name_params_any:
                if isinstance(name_params_any, ToolParams):
                    if self.verbose:
                        debug_info.append(
                            f"  - From name:{tool.name}: encountered nested ToolParams (ignored for non-agent tool)"
                        )
                elif isinstance(name_params_any, dict):
                    self._apply_parameters(merged_input, name_params_any, f"name:{tool.name}", debug_info)

            # 3. Apply parameters by tool ID (highest priority)
            id_params_any = tool_params.by_id_params.get(tool.id)
            if id_params_any:
                if isinstance(id_params_any, ToolParams):
                    if self.verbose:
                        debug_info.append(
                            f"  - From id:{tool.id}: encountered nested ToolParams (ignored for non-agent tool)"
                        )
                elif isinstance(id_params_any, dict):
                    self._apply_parameters(merged_input, id_params_any, f"id:{tool.id}", debug_info)

            if self.verbose and debug_info:
                logger.debug("\n".join(debug_info))

        child_kwargs = kwargs | {"recoverable_error": True}
        is_child_agent = isinstance(tool, Agent)

        if is_child_agent and tool_params:
            nested_any = (
                tool_params.by_id_params.get(getattr(tool, "id", ""))
                or tool_params.by_name_params.get(getattr(tool, "name", ""))
                or tool_params.by_name_params.get(self.sanitize_tool_name(getattr(tool, "name", "")))
            )
            if nested_any:
                if isinstance(nested_any, ToolParams):
                    nested_tp = nested_any
                elif isinstance(nested_any, dict):
                    nested_tp = ToolParams.model_validate(nested_any)
                else:
                    nested_tp = None
                if nested_tp:
                    child_kwargs = child_kwargs | {"tool_params": nested_tp}

        effective_delegate_final = delegate_final and is_child_agent
        if is_child_agent and isinstance(merged_input, dict) and "delegate_final" in merged_input:
            effective_delegate_final = effective_delegate_final or bool(merged_input.pop("delegate_final"))

        tool_to_run = tool
        tool_config = ensure_config(config)
        if getattr(self, "parallel_tool_calls_enabled", False):
            tool_to_run, tool_config = self._clone_tool_for_execution(tool, tool_config)

        tool_result = tool_to_run.run(
            input_data=merged_input,
            config=tool_config,
            run_depends=deepcopy(self._run_depends),
            **child_kwargs,
        )
        dependency_node = tool_to_run if tool_to_run is not tool else tool
        dependency_dict = NodeDependency(node=dependency_node).to_dict(for_tracing=True)
        if update_run_depends:
            self._run_depends = [dependency_dict]
        if tool_result.status != RunnableStatus.SUCCESS:
            error_message = f"Tool '{tool.name}' failed: {tool_result.error.to_dict()}"
            if tool_result.error.recoverable:
                raise ToolExecutionException({error_message})
            else:
                raise ValueError({error_message})
        tool_result_output_content = tool_result.output.get("content")

        self._handle_tool_generated_files(tool, tool_result)

        tool_result_content_processed = process_tool_output_for_agent(
            content=tool_result_output_content,
            max_tokens=self.tool_output_max_length,
            truncate=self.tool_output_truncate_enabled and not effective_delegate_final,
        )

        self._tool_cache[ToolCacheEntry(action=tool.name, action_input=tool_input)] = tool_result_content_processed

        output_files = tool_result.output.get("files", [])
        if collect_dependency:
            return tool_result_content_processed, output_files, dependency_dict

        return tool_result_content_processed, output_files

    def _ensure_named_files(self, files: list[io.BytesIO | bytes]) -> list[io.BytesIO | bytes]:
        """Ensure all uploaded files have name and description attributes and store them in file_store if available."""
        named = []
        for i, f in enumerate(files):
            if isinstance(f, bytes):
                bio = io.BytesIO(f)
                bio.name = f"file_{i}.bin"
                bio.description = "User-provided file"

                if self.file_store_backend:
                    try:
                        self.file_store_backend.store(
                            file_path=bio.name,
                            content=f,
                            content_type="application/octet-stream",
                            metadata={"description": bio.description, "source": "user_upload"},
                            overwrite=True,
                        )
                    except Exception as e:
                        logger.warning(f"Failed to store file {bio.name} in file_store: {e}")

                named.append(bio)
            elif isinstance(f, io.BytesIO):
                if not hasattr(f, "name"):
                    f.name = f"file_{i}"
                if not hasattr(f, "description"):
                    f.description = "User-provided file"

                if self.file_store_backend:
                    try:
                        content = f.read()
                        f.seek(0)

                        self.file_store_backend.store(
                            file_path=f.name,
                            content=content,
                            content_type="application/octet-stream",
                            metadata={"description": f.description, "source": "user_upload"},
                            overwrite=True,
                        )
                    except Exception as e:
                        logger.warning(f"Failed to store file {f.name} in file_store: {e}")

                named.append(f)
            else:
                named.append(f)
        return named

    def _handle_tool_generated_files(self, tool: Node, tool_result: RunnableResult) -> None:
        """
        Handle files generated by tools and store them in the file store.

        Args:
            tool: The tool that generated the files
            tool_result: The result from the tool execution
        """
        if not self.file_store_backend:
            return

        if isinstance(tool_result.output, dict) and "files" in tool_result.output:
            tool_files = tool_result.output.get("files", [])
            if tool_files:
                stored_files = []
                for file in tool_files:
                    if isinstance(file, io.BytesIO):
                        file_name = getattr(file, "name", f"file_{id(file)}.bin")
                        file_description = getattr(file, "description", "Tool-generated file")
                        content_type = getattr(file, "content_type", "application/octet-stream")

                        content = file.read()
                        file.seek(0)

                        self.file_store_backend.store(
                            file_path=file_name,
                            content=content,
                            content_type=content_type,
                            metadata={"description": file_description, "source": "tool_generated"},
                            overwrite=True,
                        )
                        stored_files.append(file_name)
                    elif isinstance(file, bytes):
                        file_name = f"file_{id(file)}.bin"
                        file_description = f"Tool-{tool.name}-generated file"
                        content_type = "application/octet-stream"
                        self.file_store_backend.store(
                            file_path=file_name,
                            content=file,
                            content_type=content_type,
                            metadata={"description": file_description, "source": "tool_generated"},
                            overwrite=True,
                        )
                        stored_files.append(file_name)
                    else:
                        logger.warning(f"Unsupported file type from tool '{tool.name}': {type(file)}")

                logger.info(f"Tool '{tool.name}' generated {len(stored_files)} file(s): {stored_files}")

    INTERNAL_CACHE_SUFFIXES: ClassVar[tuple[str, ...]] = (EXTRACTED_TEXT_SUFFIX,)

    @classmethod
    def _filter_generated_files(cls, files: list[io.BytesIO], uploaded_names: set[str]) -> list[io.BytesIO]:
        if not files:
            return []

        filtered: list[io.BytesIO] = []
        for file in files:
            name = getattr(file, "name", None)
            if not name:
                filtered.append(file)
                continue
            if name in uploaded_names:
                continue
            if cls._is_internal_cache_file(name, uploaded_names):
                continue
            filtered.append(file)
        return filtered

    @classmethod
    def _is_internal_cache_file(cls, name: str, uploaded_names: set[str]) -> bool:
        for suffix in cls.INTERNAL_CACHE_SUFFIXES:
            if not name.endswith(suffix):
                continue
            base_name = name[: -len(suffix)]
            if not base_name:
                return True
            if (not uploaded_names) or (base_name in uploaded_names):
                return True
        return False

    def _inject_attached_files_into_message(
        self, input_message: Message | VisionMessage, files: list[io.BytesIO]
    ) -> Message | VisionMessage:
        if not files:
            return input_message

        if not isinstance(input_message, Message):
            return input_message

        file_lines = []

        for f in files:
            name = getattr(f, "name", None) or "unnamed_file"
            description = getattr(f, "description", "") or ""
            description = description.strip()
            if description:
                file_lines.append(f"- {name}: {description}")
            else:
                file_lines.append(f"- {name}")

        if not file_lines:
            return input_message

        file_section = "\n".join(["\nAttached files available to you:"] + file_lines) + "\n"
        preview_section = self._build_file_previews_section(files)
        if preview_section:
            file_section = f"{file_section}{preview_section}"

        if isinstance(input_message.content, str):
            input_message.content = f"{input_message.content.rstrip()}{file_section}"
        else:
            input_message.content = input_message.content + file_section

        return input_message

    def _build_file_previews_section(self, files: list[io.BytesIO]) -> str:
        """Build a short, truncated preview section for uploaded files."""
        if not files or self.file_attachment_preview_bytes <= 0:
            return ""

        previews: list[str] = []
        max_bytes = max(1, self.file_attachment_preview_bytes)
        for file_obj in files:
            preview = self._extract_file_preview(file_obj, max_bytes)
            if preview:
                previews.append(preview)

        if not previews:
            return ""

        return "\n".join(["File previews (truncated, may be incomplete):", *previews]) + "\n"

    @staticmethod
    def _extract_file_preview(file_obj: io.BytesIO, max_bytes: int) -> str:
        """Extract a textual/hex preview from a BytesIO without consuming it."""
        if not hasattr(file_obj, "read"):
            return ""

        seekable = hasattr(file_obj, "seek")
        position = 0
        if seekable:
            try:
                position = file_obj.tell()
            except Exception:
                seekable = False

        try:
            if seekable:
                file_obj.seek(0)
            snippet = file_obj.read(max_bytes)
        except Exception:
            return ""
        finally:
            if seekable:
                try:
                    file_obj.seek(position)
                except Exception as exc:
                    logger.debug(
                        "Failed to restore file pointer for preview on %s: %s", getattr(file_obj, "name", ""), exc
                    )

        if not snippet:
            return ""

        try:
            preview_text = snippet.decode("utf-8")
            descriptor = "text"
        except UnicodeDecodeError:
            preview_text = snippet.hex()
            descriptor = "hex"

        suffix = "..." if len(snippet) >= max_bytes else ""
        name = getattr(file_obj, "name", "uploaded_file")
        return f"- {name} ({descriptor} preview): {preview_text}{suffix}"

    @property
    def file_store_backend(self) -> FileStore | None:
        """Get the file store backend from the configuration if enabled."""
        return self.file_store.backend if self.file_store.enabled else None

    @property
    def tool_description(self) -> str:
        """Returns a description of the tools available to the agent."""
        return (
            "\n".join(
                [
                    f"{tool.name}:\n <{tool.name}_description>\n{tool.description.strip()}\n<\\{tool.name}_description>"
                    for tool in self.tools
                ]
            )
            if self.tools
            else ""
        )

    @property
    def tool_names(self) -> str:
        """Returns a comma-separated list of tool names available to the agent."""
        return ",".join([self.sanitize_tool_name(tool.name) for tool in self.tools])

    @property
    def tool_by_names(self) -> dict[str, Node]:
        """Returns a dictionary mapping tool names to their corresponding Node objects."""
        return {self.sanitize_tool_name(tool.name): tool for tool in self.tools}

    def reset_run_state(self):
        """Resets the agent's run state."""
        self._intermediate_steps = {}
        self._run_depends = []
        self._tool_cache: dict[ToolCacheEntry, Any] = {}

    def generate_prompt(self, block_names: list[str] | None = None, **kwargs) -> str:
        """Generates the prompt using specified blocks and variables."""
        temp_variables = self._prompt_variables.copy()
        temp_variables.update(kwargs)

        formatted_prompt_blocks = {}
        for block, content in self._prompt_blocks.items():
            if block_names is None or block in block_names:
                formatted_content = Template(content).render(**temp_variables)
                if content:
                    formatted_prompt_blocks[block] = formatted_content

        prompt = Template(self.AGENT_PROMPT_TEMPLATE).render(formatted_prompt_blocks).strip()
        prompt = self._clean_prompt(prompt)
        return textwrap.dedent(prompt)

    def _clean_prompt(self, prompt_text):
        cleaned = re.sub(r"\n{3,}", "\n\n", prompt_text)
        return cleaned.strip()

    def get_clone_attr_initializers(self) -> dict[str, Callable[[Node], Any]]:
        base = super().get_clone_attr_initializers()
        from dynamiq.prompts import Prompt

        base.update(
            {
                "_prompt": (lambda _self: Prompt(messages=[]) if Prompt else None),
            }
        )
        return base

file_store_backend: FileStore | None property

Get the file store backend from the configuration if enabled.

tool_by_names: dict[str, Node] property

Returns a dictionary mapping tool names to their corresponding Node objects.

tool_description: str property

Returns a description of the tools available to the agent.

tool_names: str property

Returns a comma-separated list of tool names available to the agent.

execute(input_data, input_message=None, config=None, **kwargs)

Executes the agent with the given input data.

Source code in dynamiq/nodes/agents/base.py
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
def execute(
    self,
    input_data: AgentInputSchema,
    input_message: Message | VisionMessage | None = None,
    config: RunnableConfig | None = None,
    **kwargs,
) -> dict[str, Any]:
    """
    Executes the agent with the given input data.
    """
    log_data = dict(input_data).copy()

    if log_data.get("images"):
        log_data["images"] = [f"image_{i}" for i in range(len(log_data["images"]))]

    if log_data.get("files"):
        log_data["files"] = [f"file_{i}" for i in range(len(log_data["files"]))]

    logger.info(f"Agent {self.name} - {self.id}: started with input {log_data}")
    self.reset_run_state()
    config = ensure_config(config)
    self.run_on_node_execute_run(config.callbacks, **kwargs)

    custom_metadata = self._prepare_metadata(dict(input_data))

    input_message = input_message or self.input_message or Message(role=MessageRole.USER, content=input_data.input)
    input_message = input_message.format_message(**dict(input_data))

    use_memory = self.memory and (dict(input_data).get("user_id") or dict(input_data).get("session_id"))

    if use_memory:
        history_messages = self._retrieve_memory(dict(input_data))
        if len(history_messages) > 0:
            history_messages.insert(
                0,
                Message(
                    role=MessageRole.SYSTEM,
                    content="Below is the previous conversation history. "
                    "Use this context to inform your response.",
                ),
            )
        if isinstance(input_message, Message):
            memory_content = input_message.content
        else:
            text_parts = [
                content.text for content in input_message.content if isinstance(content, VisionMessageTextContent)
            ]
            memory_content = " ".join(text_parts) if text_parts else "Image input"
        self.memory.add(role=MessageRole.USER, content=memory_content, metadata=custom_metadata)
    else:
        history_messages = None

    if self.role:
        # Only auto-wrap the entire role in a raw block if the user did not
        # provide explicit raw/endraw markers. This allows roles to mix
        # literal sections (via raw) with Jinja variables like {{ input }}
        # without creating nested raw blocks.
        if ("{% raw %}" in self.role) or ("{% endraw %}" in self.role):
            self._prompt_blocks["role"] = self.role
        else:
            self._prompt_blocks["role"] = f"{{% raw %}}{self.role}{{% endraw %}}"

    files = input_data.files
    uploaded_file_names: set[str] = set()
    if files:
        if not self.file_store_backend:
            self.file_store = FileStoreConfig(enabled=True, backend=InMemoryFileStore())
            self.tools.append(FileReadTool(file_store=self.file_store.backend, llm=self.llm))
            self.tools.append(FileSearchTool(file_store=self.file_store.backend))
            self.tools.append(FileListTool(file_store=self.file_store.backend))
            self._init_prompt_blocks()
        normalized_files = self._ensure_named_files(files)
        uploaded_file_names = {
            getattr(f, "name", None)
            for f in normalized_files
            if hasattr(f, "name") and getattr(f, "name") is not None
        }
        input_message = self._inject_attached_files_into_message(input_message, normalized_files)

    if input_data.tool_params:
        kwargs["tool_params"] = input_data.tool_params

    self._prompt_variables.update(dict(input_data))
    kwargs = kwargs | {"parent_run_id": kwargs.get("run_id")}
    kwargs.pop("run_depends", None)

    result = self._run_agent(input_message, history_messages, config=config, **kwargs)

    if use_memory:
        self.memory.add(role=MessageRole.ASSISTANT, content=result, metadata=custom_metadata)

    execution_result = {
        "content": result,
    }

    if self.file_store_backend and not self.file_store_backend.is_empty():
        stored_files = self.file_store_backend.list_files_bytes()
        filtered_files = self._filter_generated_files(stored_files, uploaded_file_names)
        if filtered_files:
            execution_result["files"] = filtered_files
            logger.info(
                f"Agent {self.name} - {self.id}: returning {len(filtered_files)} generated file(s) in FileStore"
            )

    logger.info(f"Node {self.name} - {self.id}: finished with RESULT:\n{str(result)[:200]}...")

    return execution_result

generate_prompt(block_names=None, **kwargs)

Generates the prompt using specified blocks and variables.

Source code in dynamiq/nodes/agents/base.py
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
def generate_prompt(self, block_names: list[str] | None = None, **kwargs) -> str:
    """Generates the prompt using specified blocks and variables."""
    temp_variables = self._prompt_variables.copy()
    temp_variables.update(kwargs)

    formatted_prompt_blocks = {}
    for block, content in self._prompt_blocks.items():
        if block_names is None or block in block_names:
            formatted_content = Template(content).render(**temp_variables)
            if content:
                formatted_prompt_blocks[block] = formatted_content

    prompt = Template(self.AGENT_PROMPT_TEMPLATE).render(formatted_prompt_blocks).strip()
    prompt = self._clean_prompt(prompt)
    return textwrap.dedent(prompt)

get_context_for_input_schema()

Provides context for input schema that is required for proper validation.

Source code in dynamiq/nodes/agents/base.py
482
483
484
485
486
487
488
489
def get_context_for_input_schema(self) -> dict:
    """Provides context for input schema that is required for proper validation."""
    role_for_validation = self.role or ""
    if role_for_validation and (
        "{% raw %}" not in role_for_validation and "{% endraw %}" not in role_for_validation
    ):
        role_for_validation = f"{{% raw %}}{role_for_validation}{{% endraw %}}"
    return {"input_message": self.input_message, "role": role_for_validation}

init_components(connection_manager=None)

Initialize components for the manager and agents.

Parameters:

Name Type Description Default
connection_manager ConnectionManager

The connection manager. Defaults to ConnectionManager.

None
Source code in dynamiq/nodes/agents/base.py
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
def init_components(self, connection_manager: ConnectionManager | None = None):
    """
    Initialize components for the manager and agents.

    Args:
        connection_manager (ConnectionManager, optional): The connection manager. Defaults to ConnectionManager.
    """
    connection_manager = connection_manager or ConnectionManager()
    super().init_components(connection_manager)
    if self.llm.is_postponed_component_init:
        self.llm.init_components(connection_manager)

    for tool in self.tools:
        if tool.is_postponed_component_init:
            tool.init_components(connection_manager)
        tool.is_optimized_for_agents = True

reset_run_state()

Resets the agent's run state.

Source code in dynamiq/nodes/agents/base.py
1331
1332
1333
1334
1335
def reset_run_state(self):
    """Resets the agent's run state."""
    self._intermediate_steps = {}
    self._run_depends = []
    self._tool_cache: dict[ToolCacheEntry, Any] = {}

retrieve_conversation_history(user_query=None, user_id=None, session_id=None, limit=None, strategy=MemoryRetrievalStrategy.ALL)

Retrieves conversation history for the agent using the specified strategy.

Parameters:

Name Type Description Default
user_query str

Current user input to find relevant context (for RELEVANT/HYBRID strategies)

None
user_id str

Optional user identifier

None
session_id str

Optional session identifier

None
limit int

Maximum number of messages to return (defaults to memory_limit)

None
strategy MemoryRetrievalStrategy

Which retrieval strategy to use (ALL, RELEVANT, or HYBRID)

ALL

Returns:

Type Description
list[Message]

List of messages forming a valid conversation context

Source code in dynamiq/nodes/agents/base.py
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
def retrieve_conversation_history(
    self,
    user_query: str = None,
    user_id: str = None,
    session_id: str = None,
    limit: int = None,
    strategy: MemoryRetrievalStrategy = MemoryRetrievalStrategy.ALL,
) -> list[Message]:
    """
    Retrieves conversation history for the agent using the specified strategy.

    Args:
        user_query: Current user input to find relevant context (for RELEVANT/HYBRID strategies)
        user_id: Optional user identifier
        session_id: Optional session identifier
        limit: Maximum number of messages to return (defaults to memory_limit)
        strategy: Which retrieval strategy to use (ALL, RELEVANT, or HYBRID)

    Returns:
        List of messages forming a valid conversation context
    """
    if not self.memory or not (user_id or session_id):
        return []

    filters = {}
    if user_id:
        filters["user_id"] = user_id
    if session_id:
        filters["session_id"] = session_id

    limit = limit or self.memory_limit

    if strategy == MemoryRetrievalStrategy.RELEVANT and not user_query:
        logger.warning("RELEVANT strategy selected but no user_query provided - falling back to ALL")
        strategy = MemoryRetrievalStrategy.ALL

    conversation = self.memory.get_agent_conversation(
        query=user_query,
        limit=limit,
        filters=filters,
        strategy=strategy,
    )
    return conversation

sanitize_tool_name(s)

Sanitize tool name to follow [^a-zA-Z0-9_-].

Source code in dynamiq/nodes/agents/base.py
537
538
539
540
541
def sanitize_tool_name(self, s: str):
    """Sanitize tool name to follow [^a-zA-Z0-9_-]."""
    s = s.replace(" ", "-")
    sanitized = re.sub(r"[^a-zA-Z0-9_-]", "", s)
    return sanitized

set_block(block_name, content)

Adds or updates a prompt block.

Source code in dynamiq/nodes/agents/base.py
556
557
558
def set_block(self, block_name: str, content: str):
    """Adds or updates a prompt block."""
    self._prompt_blocks[block_name] = content

set_prompt_variable(variable_name, value)

Sets or updates a prompt variable.

Source code in dynamiq/nodes/agents/base.py
560
561
562
def set_prompt_variable(self, variable_name: str, value: Any):
    """Sets or updates a prompt variable."""
    self._prompt_variables[variable_name] = value

stream_content(content, source, step, config=None, **kwargs)

Streams data.

Parameters:

Name Type Description Default
content str | dict

Data that will be streamed.

required
source str

Source of the content.

required
step str

Description of the step.

required
config Optional[RunnableConfig]

Configuration for the runnable.

None
**kwargs

Additional keyword arguments.

{}

Returns:

Type Description
str | dict

str | dict: Streamed data.

Source code in dynamiq/nodes/agents/base.py
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
def stream_content(
    self,
    content: str | dict,
    source: str,
    step: str,
    config: RunnableConfig | None = None,
    **kwargs,
) -> str | dict:
    """
    Streams data.

    Args:
        content (str | dict): Data that will be streamed.
        source (str): Source of the content.
        step (str): Description of the step.
        config (Optional[RunnableConfig]): Configuration for the runnable.
        **kwargs: Additional keyword arguments.

    Returns:
        str | dict: Streamed data.
    """
    if not isinstance(source, str):
        raise ValueError(
            f"stream_content source parameter must be a string, got {type(source).__name__}: {source}. "
            f"This likely indicates incorrect parameter passing from the calling code."
        )

    return self.stream_response(content=content, source=source, step=step, config=config, **kwargs)

to_dict(**kwargs)

Converts the instance to a dictionary.

Source code in dynamiq/nodes/agents/base.py
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
def to_dict(self, **kwargs) -> dict:
    """Converts the instance to a dictionary."""
    data = super().to_dict(**kwargs)
    data["llm"] = self.llm.to_dict(**kwargs)

    data["tools"] = [tool.to_dict(**kwargs) for tool in self.tools if tool.id not in self._mcp_server_tool_ids]
    data["tools"] = data["tools"] + [mcp_server.to_dict(**kwargs) for mcp_server in self._mcp_servers]

    data["memory"] = self.memory.to_dict(**kwargs) if self.memory else None
    if self.files:
        data["files"] = [{"name": getattr(f, "name", f"file_{i}")} for i, f in enumerate(self.files)]
    if self.images:
        data["images"] = [{"name": getattr(f, "name", f"image_{i}")} for i, f in enumerate(self.images)]

    data["file_store"] = self.file_store.to_dict(**kwargs) if self.file_store else None

    return data

AgentManager

Bases: Agent

Manager class that extends the Agent class to include specific actions.

Source code in dynamiq/nodes/agents/base.py
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
class AgentManager(Agent):
    """Manager class that extends the Agent class to include specific actions."""

    _actions: dict[str, Callable] = PrivateAttr(default_factory=dict)
    name: str = "Agent Manager"
    input_schema: ClassVar[type[AgentManagerInputSchema]] = AgentManagerInputSchema

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._init_actions()

    def to_dict(self, **kwargs) -> dict:
        """Converts the instance to a dictionary."""
        data = super().to_dict(**kwargs)
        data["_actions"] = {
            k: getattr(action, "__name__", str(action))
            for k, action in self._actions.items()
        }
        return data

    def _init_actions(self):
        """Initializes the default actions for the manager."""
        self._actions = {
            "plan": self._plan,
            "assign": self._assign,
            "final": self._final,
            "handle_input": self._handle_input,
        }

    def add_action(self, name: str, action: Callable):
        """Adds a custom action to the manager."""
        self._actions[name] = action

    def get_context_for_input_schema(self) -> dict:
        """Provides context for input schema that is required for proper validation."""
        return {"actions": list(self._actions.keys())}

    def execute(
        self, input_data: AgentManagerInputSchema, config: RunnableConfig | None = None, **kwargs
    ) -> dict[str, Any]:
        """Executes the manager agent with the given input data and action."""
        log_data = dict(input_data).copy()

        if log_data.get("images"):
            log_data["images"] = [f"image_{i}" for i in range(len(log_data["images"]))]

        if log_data.get("files"):
            log_data["files"] = [f"file_{i}" for i in range(len(log_data["files"]))]

        logger.info(f"Agent {self.name} - {self.id}: started with input {log_data}")
        self.reset_run_state()
        config = config or RunnableConfig()
        self.run_on_node_execute_run(config.callbacks, **kwargs)

        action = input_data.action

        self._prompt_variables.update(dict(input_data))

        kwargs = kwargs | {"parent_run_id": kwargs.get("run_id")}
        kwargs.pop("run_depends", None)
        _result_llm = self._actions[action](config=config, **kwargs)
        result = {"action": action, "result": _result_llm}

        execution_result = {
            "content": result,
        }
        logger.info(f"Agent {self.name} - {self.id}: finished with RESULT:\n{str(result)[:200]}...")

        return execution_result

    def _plan(self, config: RunnableConfig, **kwargs) -> str:
        """Executes the 'plan' action."""
        prompt = Template(self._prompt_blocks.get("plan")).render(**(self._prompt_variables | kwargs))
        llm_result = self._run_llm([Message(role=MessageRole.USER, content=prompt)], config, **kwargs).output["content"]

        return llm_result

    def _assign(self, config: RunnableConfig, **kwargs) -> str:
        """Executes the 'assign' action."""
        prompt = Template(self._prompt_blocks.get("assign")).render(**(self._prompt_variables | kwargs))
        llm_result = self._run_llm([Message(role=MessageRole.USER, content=prompt)], config, **kwargs).output["content"]

        return llm_result

    def _final(self, config: RunnableConfig, **kwargs) -> str:
        """Executes the 'final' action."""
        prompt = Template(self._prompt_blocks.get("final")).render(**(self._prompt_variables | kwargs))
        llm_result = self._run_llm([Message(role=MessageRole.USER, content=prompt)], config, **kwargs).output["content"]
        if self.streaming.enabled:
            return self.stream_content(
                content=llm_result,
                step="manager_final_output",
                source=self.name,
                config=config,
                **kwargs,
            )
        return llm_result

    def _handle_input(self, config: RunnableConfig, **kwargs) -> str:
        """
        Executes the single 'handle_input' action to either respond or plan
        based on user request complexity.
        """
        prompt = Template(self._prompt_blocks.get("handle_input")).render(**(self._prompt_variables | kwargs))
        llm_result = self._run_llm([Message(role=MessageRole.USER, content=prompt)], config, **kwargs).output["content"]
        return llm_result

add_action(name, action)

Adds a custom action to the manager.

Source code in dynamiq/nodes/agents/base.py
1415
1416
1417
def add_action(self, name: str, action: Callable):
    """Adds a custom action to the manager."""
    self._actions[name] = action

execute(input_data, config=None, **kwargs)

Executes the manager agent with the given input data and action.

Source code in dynamiq/nodes/agents/base.py
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
def execute(
    self, input_data: AgentManagerInputSchema, config: RunnableConfig | None = None, **kwargs
) -> dict[str, Any]:
    """Executes the manager agent with the given input data and action."""
    log_data = dict(input_data).copy()

    if log_data.get("images"):
        log_data["images"] = [f"image_{i}" for i in range(len(log_data["images"]))]

    if log_data.get("files"):
        log_data["files"] = [f"file_{i}" for i in range(len(log_data["files"]))]

    logger.info(f"Agent {self.name} - {self.id}: started with input {log_data}")
    self.reset_run_state()
    config = config or RunnableConfig()
    self.run_on_node_execute_run(config.callbacks, **kwargs)

    action = input_data.action

    self._prompt_variables.update(dict(input_data))

    kwargs = kwargs | {"parent_run_id": kwargs.get("run_id")}
    kwargs.pop("run_depends", None)
    _result_llm = self._actions[action](config=config, **kwargs)
    result = {"action": action, "result": _result_llm}

    execution_result = {
        "content": result,
    }
    logger.info(f"Agent {self.name} - {self.id}: finished with RESULT:\n{str(result)[:200]}...")

    return execution_result

get_context_for_input_schema()

Provides context for input schema that is required for proper validation.

Source code in dynamiq/nodes/agents/base.py
1419
1420
1421
def get_context_for_input_schema(self) -> dict:
    """Provides context for input schema that is required for proper validation."""
    return {"actions": list(self._actions.keys())}

to_dict(**kwargs)

Converts the instance to a dictionary.

Source code in dynamiq/nodes/agents/base.py
1397
1398
1399
1400
1401
1402
1403
1404
def to_dict(self, **kwargs) -> dict:
    """Converts the instance to a dictionary."""
    data = super().to_dict(**kwargs)
    data["_actions"] = {
        k: getattr(action, "__name__", str(action))
        for k, action in self._actions.items()
    }
    return data

AgentStatus

Bases: str, Enum

Represents the status of an agent's execution.

Source code in dynamiq/nodes/agents/base.py
272
273
274
275
276
class AgentStatus(str, Enum):
    """Represents the status of an agent's execution."""

    SUCCESS = "success"
    FAIL = "fail"

StreamChunk

Bases: BaseModel

Model for streaming chunks with choices containing delta updates.

Source code in dynamiq/nodes/agents/base.py
266
267
268
269
class StreamChunk(BaseModel):
    """Model for streaming chunks with choices containing delta updates."""

    choices: list[StreamChunkChoice]

StreamChunkChoice

Bases: BaseModel

Stream chunk choice model.

Source code in dynamiq/nodes/agents/base.py
260
261
262
263
class StreamChunkChoice(BaseModel):
    """Stream chunk choice model."""

    delta: StreamChunkChoiceDelta

StreamChunkChoiceDelta

Bases: BaseModel

Delta model for content chunks.

Source code in dynamiq/nodes/agents/base.py
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
class StreamChunkChoiceDelta(BaseModel):
    """Delta model for content chunks."""
    content: str | dict
    source: str
    step: str

    @field_validator('source')
    @classmethod
    def validate_source(cls, v):
        """Ensure source is always a string."""
        if not isinstance(v, str):
            raise ValueError(f"source must be a string, got {type(v).__name__}: {v}")
        return v

    def _recursive_serialize(self, obj, key_path: str = "", index: int = None):
        """Recursively serialize an object, converting any BytesIO objects to FileInfo objects."""
        if isinstance(obj, io.BytesIO):
            return convert_bytesio_to_file_info(obj, key_path, index).model_dump()

        elif isinstance(obj, dict):
            result = {}
            for k, v in obj.items():
                new_key_path = f"{key_path}.{k}" if key_path else k
                result[k] = self._recursive_serialize(v, new_key_path)
            return result

        elif isinstance(obj, list):
            result = []

            for i, item in enumerate(obj):
                new_key_path = f"{key_path}[{i}]" if key_path else f"item_{i}"
                result.append(self._recursive_serialize(item, new_key_path, i))
            return result

        else:
            return obj

    @model_serializer
    def serialize_content(self):
        """Serialize content dict, converting any BytesIO objects to base64 strings while preserving key structure."""
        if self.content is None or not isinstance(self.content, dict):
            return {"content": self.content, "source": self.source, "step": self.step}

        serialized_content = self._recursive_serialize(self.content)

        result = {
            "content": serialized_content,
            "source": self.source,
            "step": self.step,
        }

        return result

serialize_content()

Serialize content dict, converting any BytesIO objects to base64 strings while preserving key structure.

Source code in dynamiq/nodes/agents/base.py
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
@model_serializer
def serialize_content(self):
    """Serialize content dict, converting any BytesIO objects to base64 strings while preserving key structure."""
    if self.content is None or not isinstance(self.content, dict):
        return {"content": self.content, "source": self.source, "step": self.step}

    serialized_content = self._recursive_serialize(self.content)

    result = {
        "content": serialized_content,
        "source": self.source,
        "step": self.step,
    }

    return result

validate_source(v) classmethod

Ensure source is always a string.

Source code in dynamiq/nodes/agents/base.py
212
213
214
215
216
217
218
@field_validator('source')
@classmethod
def validate_source(cls, v):
    """Ensure source is always a string."""
    if not isinstance(v, str):
        raise ValueError(f"source must be a string, got {type(v).__name__}: {v}")
    return v