Skip to content

Fine-Grained Configuration / X-Config

Advanced configuration for session behavior, prompt settings, and response formatting.

FineGrainedConfigHeader pydantic-model

Structured configuration header (X-Config).

Groups overrides into FSM, prompt, response format, and per-LLM generation config sections.

Example
config = FineGrainedConfigHeader.dual_llm(
    max_n_turns=10,
    disable_rllm=False,
    include_program=True,
)
config = FineGrainedConfigHeader.single_llm(max_n_turns=50)
Show JSON schema:
{
  "$defs": {
    "FsmOverrides": {
      "additionalProperties": false,
      "description": "Overrideable FSM fields (shared + dual-llm-only).\n\nSingle-LLM configs silently ignore dual-llm-only fields.",
      "properties": {
        "min_num_tools_for_filtering": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Minimum number of registered tools to enable tool-filtering LLM step. Set to None to disable.",
          "title": "Min Num Tools For Filtering"
        },
        "clear_session_meta": {
          "anyOf": [
            {
              "enum": [
                "never",
                "every_attempt",
                "every_turn"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "When to clear session meta information. 'never': never clear; 'every_attempt': clear at the beginning of each PLLM attempt; 'every_turn': clear at the beginning of each turn.",
          "title": "Clear Session Meta"
        },
        "max_n_turns": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Maximum number of turns allowed in the session. If None, unlimited turns are allowed.",
          "title": "Max N Turns"
        },
        "history_mismatch_policy": {
          "anyOf": [
            {
              "enum": [
                "reject",
                "restart_turn",
                "continue"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Controls behaviour when incoming messages diverge from stored history in stateless mode. 'reject' rejects the request with an error, 'restart_turn' truncates stored history to the last consistent point and restarts the turn, 'continue' silently accepts the caller's version and continues.",
          "title": "History Mismatch Policy"
        },
        "clear_history_every_n_attempts": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Single-step mode only. Clear all failed step history every N attempts to save tokens.",
          "title": "Clear History Every N Attempts"
        },
        "disable_rllm": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to skip the response LLM (RLLM) review step.",
          "title": "Disable Rllm"
        },
        "enable_multistep_planning": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "When False (single-step), each attempt solves independently. When True (multi-step), each step builds on previous.",
          "title": "Enable Multistep Planning"
        },
        "enabled_internal_tools": {
          "anyOf": [
            {
              "items": {
                "enum": [
                  "parse_with_ai",
                  "verify_hypothesis",
                  "set_policy",
                  "complete_turn"
                ],
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "List of internal tool IDs available to planning LLM.",
          "title": "Enabled Internal Tools"
        },
        "prune_failed_steps": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Multi-step mode only. Remove failed steps from history after turn completes.",
          "title": "Prune Failed Steps"
        },
        "force_to_cache": {
          "anyOf": [
            {
              "items": {
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "List of tool ID regex patterns to always cache their results regardless of the cache_tool_result setting.",
          "title": "Force To Cache"
        },
        "max_pllm_steps": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Maximum number of steps allowed per turn.",
          "title": "Max Pllm Steps"
        },
        "max_pllm_failed_steps": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Maximum number of failed steps allowed per turn.",
          "title": "Max Pllm Failed Steps"
        },
        "max_tool_calls_per_step": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Maximum number of tool calls allowed per PLLM attempt. If None, no limit is enforced.",
          "title": "Max Tool Calls Per Step"
        },
        "reduced_grammar_for_rllm_review": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to paraphrase RLLM output via reduced grammar before feeding back to planning LLM.",
          "title": "Reduced Grammar For Rllm Review"
        },
        "retry_on_policy_violation": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "When True, allow planning LLM to retry after policy violation.",
          "title": "Retry On Policy Violation"
        },
        "wrap_tool_result": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to wrap tool results in Ok/Err types.",
          "title": "Wrap Tool Result"
        },
        "detect_tool_errors": {
          "anyOf": [
            {
              "enum": [
                "none",
                "regex",
                "llm"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether and how to detect errors in tool results. 'none': do not detect; 'regex': use regex patterns; 'llm': use an LLM to analyze tool results.",
          "title": "Detect Tool Errors"
        },
        "detect_tool_error_regex_pattern": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The regex pattern to use for detecting error messages in tool results when detect_tool_errors is set to 'regex'.",
          "title": "Detect Tool Error Regex Pattern"
        },
        "detect_tool_error_max_result_length": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The maximum length of tool result to consider for error detection. Longer results will be truncated. If None, no limit is enforced.",
          "title": "Detect Tool Error Max Result Length"
        },
        "strict_tool_result_parsing": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "If True, only parse external tool results as JSON when the tool declares an output_schema. When False, always attempt json.loads on tool results.",
          "title": "Strict Tool Result Parsing"
        },
        "tool_result_transform": {
          "anyOf": [
            {
              "enum": [
                "none",
                "codex"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Transform applied to tool result values before error detection. 'none': no transform; 'codex': strip Codex CLI metadata prefix and extract exit code + output.",
          "title": "Tool Result Transform"
        }
      },
      "title": "FsmOverrides",
      "type": "object"
    },
    "GenerationConfigOverrides": {
      "additionalProperties": false,
      "description": "Overrideable generation-config fields (all optional).\n\nOnly fields explicitly set are merged into the target LLM's generation\nconfig; unset fields keep their existing values.\n\nAttributes:\n    frequency_penalty: Penalize tokens based on existing frequency (-2.0 to 2.0).\n    logit_bias: Modify likelihood of specified tokens.\n    logprobs: Whether to return log probabilities of output tokens.\n    max_completion_tokens: Upper bound for generated tokens.\n    presence_penalty: Penalize tokens based on presence in text so far (-2.0 to 2.0).\n    seed: Seed for deterministic sampling.\n    temperature: Sampling temperature (0.0 to 2.0).\n    top_p: Nucleus sampling threshold.\n    reasoning: Reasoning/extended thinking configuration.",
      "properties": {
        "frequency_penalty": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Frequency penalty (-2.0 to 2.0).",
          "title": "Frequency Penalty"
        },
        "logit_bias": {
          "anyOf": [
            {
              "additionalProperties": {
                "type": "integer"
              },
              "type": "object"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Token likelihood modifications.",
          "title": "Logit Bias"
        },
        "logprobs": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to return log probabilities.",
          "title": "Logprobs"
        },
        "max_completion_tokens": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Upper bound for generated tokens.",
          "title": "Max Completion Tokens"
        },
        "presence_penalty": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Presence penalty (-2.0 to 2.0).",
          "title": "Presence Penalty"
        },
        "seed": {
          "anyOf": [
            {
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Seed for deterministic sampling.",
          "title": "Seed"
        },
        "temperature": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Sampling temperature (0.0 to 2.0).",
          "title": "Temperature"
        },
        "top_p": {
          "anyOf": [
            {
              "type": "number"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Nucleus sampling threshold.",
          "title": "Top P"
        },
        "reasoning": {
          "anyOf": [
            {
              "$ref": "#/$defs/ReasoningConfigOverride"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Reasoning/thinking configuration."
        }
      },
      "title": "GenerationConfigOverrides",
      "type": "object"
    },
    "LlmOverrides": {
      "additionalProperties": false,
      "description": "Per-LLM generation config overrides.\n\nEach LLM slot can have its generation parameters overridden independently\nvia the ``llm`` section of the ``X-Config`` header. If an LLM's override\nis ``None``, that LLM's generation config remains unchanged.\n\nExample:\n    ```python\n    from sequrity.control import FineGrainedConfigHeader, LlmOverrides, GenerationConfigOverrides\n\n    config = FineGrainedConfigHeader(\n        llm=LlmOverrides(\n            pllm=GenerationConfigOverrides(temperature=0.5, reasoning=ReasoningConfigOverride(enabled=True, effort=\"high\")),\n            rllm=GenerationConfigOverrides(max_completion_tokens=512),\n        ),\n    )\n    ```",
      "properties": {
        "pllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Planning LLM generation config."
        },
        "rllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Review LLM generation config."
        },
        "grllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Grammar-constrained LLM generation config."
        },
        "qllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Query LLM generation config."
        },
        "tllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tool filtering LLM generation config."
        },
        "tagllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tag LLM generation config."
        },
        "policy_llm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Policy LLM generation config."
        },
        "tool_result_error_detector_llm": {
          "anyOf": [
            {
              "$ref": "#/$defs/GenerationConfigOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Tool result error detector LLM generation config."
        }
      },
      "title": "LlmOverrides",
      "type": "object"
    },
    "LlmPromptOverrides": {
      "additionalProperties": false,
      "description": "Base overrides for other LLM types (GRLLM, QLLM, etc.).",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        }
      },
      "title": "LlmPromptOverrides",
      "type": "object"
    },
    "PllmPromptOverrides": {
      "additionalProperties": false,
      "description": "Overrideable PLLM prompt fields.",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal', 'code').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        },
        "clarify_ambiguous_queries": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether planning LLM is allowed to ask for clarification on ambiguous queries.",
          "title": "Clarify Ambiguous Queries"
        },
        "context_var_visibility": {
          "anyOf": [
            {
              "enum": [
                "none",
                "basic-notext",
                "basic-executable",
                "all-executable",
                "all"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The visibility level of context variables in the PLLM prompts. 'none': do not show any; 'basic-notext': show basic types but not text; 'basic-executable': show basic types and executable memory variables; 'all-executable': show all executable memory variables; 'all': show all.",
          "title": "Context Var Visibility"
        },
        "query_inline_roles": {
          "anyOf": [
            {
              "items": {
                "enum": [
                  "assistant",
                  "tool",
                  "developer",
                  "system"
                ],
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "List of roles whose messages will be inlined into the user query.",
          "title": "Query Inline Roles"
        },
        "query_role_name_overrides": {
          "anyOf": [
            {
              "additionalProperties": {
                "type": "string"
              },
              "type": "object"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Overrides for message role names in the inlined user query. For example, {'assistant': 'developer'} will change the role of assistant messages to developer.",
          "title": "Query Role Name Overrides"
        },
        "query_include_tool_calls": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include upstream tool calls in inlined query.",
          "title": "Query Include Tool Calls"
        },
        "query_include_tool_args": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include arguments of upstream tool calls.",
          "title": "Query Include Tool Args"
        },
        "query_include_tool_results": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include results of upstream tool calls.",
          "title": "Query Include Tool Results"
        },
        "debug_info_level": {
          "anyOf": [
            {
              "enum": [
                "minimal",
                "normal",
                "extra"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Level of detail for debug/execution information in planning LLM prompt.",
          "title": "Debug Info Level"
        }
      },
      "title": "PllmPromptOverrides",
      "type": "object"
    },
    "PromptOverrides": {
      "additionalProperties": false,
      "description": "Prompt overrides for all LLMs.",
      "properties": {
        "pllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/PllmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the planning LLM prompt."
        },
        "rllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/RllmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the review LLM prompt."
        },
        "grllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/LlmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the GRLLM prompt."
        },
        "qllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/LlmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the QLLM prompt."
        },
        "tllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/TllmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the tool-formulating LLM prompt."
        },
        "tagllm": {
          "anyOf": [
            {
              "$ref": "#/$defs/LlmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the tag LLM prompt."
        },
        "policy_llm": {
          "anyOf": [
            {
              "$ref": "#/$defs/LlmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the policy LLM prompt."
        },
        "error_detector_llm": {
          "anyOf": [
            {
              "$ref": "#/$defs/LlmPromptOverrides"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Configuration for the error detector LLM prompt."
        }
      },
      "title": "PromptOverrides",
      "type": "object"
    },
    "ReasoningConfigOverride": {
      "additionalProperties": false,
      "description": "Reasoning configuration for generation config overrides.\n\nAttributes:\n    enabled: Whether reasoning/extended thinking is enabled.\n    effort: Reasoning effort level (e.g., \"low\", \"medium\", \"high\").\n    budget: Maximum number of reasoning tokens allowed.\n    adaptive: When true, the model automatically decides whether to use extended thinking.",
      "properties": {
        "enabled": {
          "default": true,
          "description": "Whether reasoning is enabled.",
          "title": "Enabled",
          "type": "boolean"
        },
        "effort": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Reasoning effort level (e.g., 'low', 'medium', 'high').",
          "title": "Effort"
        },
        "budget": {
          "anyOf": [
            {
              "minimum": 1,
              "type": "integer"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Maximum number of reasoning tokens allowed.",
          "title": "Budget"
        },
        "adaptive": {
          "default": false,
          "description": "When true, the model automatically decides whether to use extended thinking.",
          "title": "Adaptive",
          "type": "boolean"
        }
      },
      "title": "ReasoningConfigOverride",
      "type": "object"
    },
    "ResponseFormatOverrides": {
      "additionalProperties": false,
      "description": "Overrideable response-format fields (dual-llm only).",
      "properties": {
        "strip_response_content": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "When True, returns only essential result value as plain text, stripping all metadata.",
          "title": "Strip Response Content"
        },
        "stream_thoughts": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to stream the model's thinking process in the response.",
          "title": "Stream Thoughts"
        },
        "include_program": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include the generated program in the response.",
          "title": "Include Program"
        },
        "include_policy_check_history": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include policy check results even when there are no violations.",
          "title": "Include Policy Check History"
        },
        "include_namespace_snapshot": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include snapshot of all variables after program execution.",
          "title": "Include Namespace Snapshot"
        }
      },
      "title": "ResponseFormatOverrides",
      "type": "object"
    },
    "RllmPromptOverrides": {
      "additionalProperties": false,
      "description": "Overrideable RLLM prompt fields.",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        },
        "debug_info_level": {
          "anyOf": [
            {
              "enum": [
                "minimal",
                "normal",
                "extra"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Level of detail for debug/execution information in RLLM prompt.",
          "title": "Debug Info Level"
        }
      },
      "title": "RllmPromptOverrides",
      "type": "object"
    },
    "TllmPromptOverrides": {
      "additionalProperties": false,
      "description": "Overrideable TLLM (tool-formulating LLM) prompt fields.",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        },
        "add_tool_description": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include tool descriptions in tool-filtering prompt.",
          "title": "Add Tool Description"
        },
        "add_tool_input_schema": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include tool input JSON schemas in tool-filtering prompt.",
          "title": "Add Tool Input Schema"
        }
      },
      "title": "TllmPromptOverrides",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Structured configuration header (``X-Config``).\n\nGroups overrides into FSM, prompt, response format, and per-LLM\ngeneration config sections.\n\nExample:\n    ```python\n    config = FineGrainedConfigHeader.dual_llm(\n        max_n_turns=10,\n        disable_rllm=False,\n        include_program=True,\n    )\n    config = FineGrainedConfigHeader.single_llm(max_n_turns=50)\n    ```",
  "properties": {
    "fsm": {
      "anyOf": [
        {
          "$ref": "#/$defs/FsmOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "FSM configuration overrides."
    },
    "prompt": {
      "anyOf": [
        {
          "$ref": "#/$defs/PromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Prompt configuration overrides for all LLMs."
    },
    "response_format": {
      "anyOf": [
        {
          "$ref": "#/$defs/ResponseFormatOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Response format configuration for dual-LLM sessions."
    },
    "llm": {
      "anyOf": [
        {
          "$ref": "#/$defs/LlmOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Per-LLM generation config overrides (temperature, reasoning, etc.)."
    }
  },
  "title": "FineGrainedConfigHeader",
  "type": "object"
}

Config:

  • extra: forbid

Fields:

single_llm classmethod

single_llm(
    min_num_tools_for_filtering: int | None = 10,
    clear_session_meta: Literal["never", "every_attempt", "every_turn"] = "never",
    max_n_turns: int | None = 50,
) -> FineGrainedConfigHeader

Create a Single LLM fine-grained configuration.

Source code in src/sequrity/control/types/headers.py
@classmethod
def single_llm(
    cls,
    min_num_tools_for_filtering: int | None = 10,
    clear_session_meta: Literal["never", "every_attempt", "every_turn"] = "never",
    max_n_turns: int | None = 50,
) -> FineGrainedConfigHeader:
    """Create a Single LLM fine-grained configuration."""
    return cls(
        fsm=FsmOverrides(
            min_num_tools_for_filtering=min_num_tools_for_filtering,
            clear_session_meta=clear_session_meta,
            max_n_turns=max_n_turns,
        ),
    )

dual_llm classmethod

dual_llm(
    min_num_tools_for_filtering: int | None = 10,
    clear_session_meta: Literal["never", "every_attempt", "every_turn"] = "never",
    max_n_turns: int | None = 5,
    clear_history_every_n_attempts: int | None = None,
    disable_rllm: bool | None = True,
    enable_multistep_planning: bool | None = None,
    enabled_internal_tools: list[InternalSqrtToolIdType] | None = None,
    prune_failed_steps: bool | None = None,
    force_to_cache: list[str] | None = None,
    max_pllm_steps: int | None = None,
    max_pllm_failed_steps: int | None = None,
    max_tool_calls_per_step: int | None = None,
    reduced_grammar_for_rllm_review: bool | None = None,
    retry_on_policy_violation: bool | None = None,
    pllm_debug_info_level: DebugInfoLevel | None = None,
    strip_response_content: bool | None = None,
    stream_thoughts: bool | None = None,
    include_program: bool | None = None,
    include_policy_check_history: bool | None = None,
    include_namespace_snapshot: bool | None = None,
) -> FineGrainedConfigHeader

Create a Dual LLM fine-grained configuration.

Source code in src/sequrity/control/types/headers.py
@classmethod
def dual_llm(
    cls,
    # FSM shared
    min_num_tools_for_filtering: int | None = 10,
    clear_session_meta: Literal["never", "every_attempt", "every_turn"] = "never",
    max_n_turns: int | None = 5,
    # FSM dual-llm only
    clear_history_every_n_attempts: int | None = None,
    disable_rllm: bool | None = True,
    enable_multistep_planning: bool | None = None,
    enabled_internal_tools: list[InternalSqrtToolIdType] | None = None,
    prune_failed_steps: bool | None = None,
    force_to_cache: list[str] | None = None,
    max_pllm_steps: int | None = None,
    max_pllm_failed_steps: int | None = None,
    max_tool_calls_per_step: int | None = None,
    reduced_grammar_for_rllm_review: bool | None = None,
    retry_on_policy_violation: bool | None = None,
    # Prompt
    pllm_debug_info_level: DebugInfoLevel | None = None,
    # Response format
    strip_response_content: bool | None = None,
    stream_thoughts: bool | None = None,
    include_program: bool | None = None,
    include_policy_check_history: bool | None = None,
    include_namespace_snapshot: bool | None = None,
) -> FineGrainedConfigHeader:
    """Create a Dual LLM fine-grained configuration."""
    fsm = FsmOverrides(
        min_num_tools_for_filtering=min_num_tools_for_filtering,
        clear_session_meta=clear_session_meta,
        max_n_turns=max_n_turns,
        clear_history_every_n_attempts=clear_history_every_n_attempts,
        disable_rllm=disable_rllm,
        enable_multistep_planning=enable_multistep_planning,
        enabled_internal_tools=enabled_internal_tools,
        prune_failed_steps=prune_failed_steps,
        force_to_cache=force_to_cache,
        max_pllm_steps=max_pllm_steps,
        max_pllm_failed_steps=max_pllm_failed_steps,
        max_tool_calls_per_step=max_tool_calls_per_step,
        reduced_grammar_for_rllm_review=reduced_grammar_for_rllm_review,
        retry_on_policy_violation=retry_on_policy_violation,
    )

    prompt = None
    if pllm_debug_info_level is not None:
        prompt = PromptOverrides(
            pllm=PllmPromptOverrides(debug_info_level=pllm_debug_info_level),
        )

    response_fmt = None
    if any(
        v is not None
        for v in [
            strip_response_content,
            stream_thoughts,
            include_program,
            include_policy_check_history,
            include_namespace_snapshot,
        ]
    ):
        response_fmt = ResponseFormatOverrides(
            strip_response_content=strip_response_content,
            stream_thoughts=stream_thoughts,
            include_program=include_program,
            include_policy_check_history=include_policy_check_history,
            include_namespace_snapshot=include_namespace_snapshot,
        )

    return cls(
        fsm=fsm,
        prompt=prompt,
        response_format=response_fmt,
    )

dump_for_headers

dump_for_headers(
    mode: Literal["json_str"] = ..., *, overrides: dict[str, Any] | None = ...
) -> str
dump_for_headers(
    mode: Literal["json"], *, overrides: dict[str, Any] | None = ...
) -> dict
dump_for_headers(
    mode: Literal["json", "json_str"] = "json_str",
    *,
    overrides: dict[str, Any] | None = None,
) -> dict | str

Serialize for use as the X-Config HTTP header value.

Parameters:

  • mode

    (Literal['json', 'json_str'], default: 'json_str' ) –

    Output format — "json" for a dict, "json_str" for a JSON string.

  • overrides

    (dict[str, Any] | None, default: None ) –

    Optional dict to deep-merge into the serialized output. Nested dicts are merged recursively; non-dict values replace existing ones. This lets you inject fields the Pydantic model doesn't define while keeping extra="forbid" validation at construction time.

Example
config = FineGrainedConfigHeader.dual_llm(max_n_turns=5)
header_json = config.dump_for_headers(overrides={
    "fsm": {
        "max_n_turns": 20,              # override existing
        "custom_beta_flag": True,       # add custom field that is not defined on the model
    },
    "prompt": {
        "pllm": {"debug_info_level": "extra"},  # nested override
    },
    "experimental_section": {"key": "value"},   # new top-level key
})
Source code in src/sequrity/control/types/headers.py
def dump_for_headers(
    self, mode: Literal["json", "json_str"] = "json_str", *, overrides: dict[str, Any] | None = None
) -> dict | str:
    """Serialize for use as the ``X-Config`` HTTP header value.

    Args:
        mode: Output format — ``"json"`` for a dict, ``"json_str"`` for a JSON string.
        overrides: Optional dict to deep-merge into the serialized output.
            Nested dicts are merged recursively; non-dict values replace
            existing ones. This lets you inject fields the Pydantic model
            doesn't define while keeping ``extra="forbid"`` validation at
            construction time.

    Example:
        ```python
        config = FineGrainedConfigHeader.dual_llm(max_n_turns=5)
        header_json = config.dump_for_headers(overrides={
            "fsm": {
                "max_n_turns": 20,              # override existing
                "custom_beta_flag": True,       # add custom field that is not defined on the model
            },
            "prompt": {
                "pllm": {"debug_info_level": "extra"},  # nested override
            },
            "experimental_section": {"key": "value"},   # new top-level key
        })
        ```
    """
    data = self.model_dump(mode="json", exclude_none=True)
    if overrides:
        _deep_merge(data, overrides)
    if mode == "json":
        return data
    elif mode == "json_str":
        return json.dumps(data)
    else:
        raise ValueError(f"Unsupported mode for dump_for_headers: {mode}")

FsmOverrides pydantic-model

Overrideable FSM fields (shared + dual-llm-only).

Single-LLM configs silently ignore dual-llm-only fields.

Show JSON schema:
{
  "additionalProperties": false,
  "description": "Overrideable FSM fields (shared + dual-llm-only).\n\nSingle-LLM configs silently ignore dual-llm-only fields.",
  "properties": {
    "min_num_tools_for_filtering": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Minimum number of registered tools to enable tool-filtering LLM step. Set to None to disable.",
      "title": "Min Num Tools For Filtering"
    },
    "clear_session_meta": {
      "anyOf": [
        {
          "enum": [
            "never",
            "every_attempt",
            "every_turn"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "When to clear session meta information. 'never': never clear; 'every_attempt': clear at the beginning of each PLLM attempt; 'every_turn': clear at the beginning of each turn.",
      "title": "Clear Session Meta"
    },
    "max_n_turns": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Maximum number of turns allowed in the session. If None, unlimited turns are allowed.",
      "title": "Max N Turns"
    },
    "history_mismatch_policy": {
      "anyOf": [
        {
          "enum": [
            "reject",
            "restart_turn",
            "continue"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Controls behaviour when incoming messages diverge from stored history in stateless mode. 'reject' rejects the request with an error, 'restart_turn' truncates stored history to the last consistent point and restarts the turn, 'continue' silently accepts the caller's version and continues.",
      "title": "History Mismatch Policy"
    },
    "clear_history_every_n_attempts": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Single-step mode only. Clear all failed step history every N attempts to save tokens.",
      "title": "Clear History Every N Attempts"
    },
    "disable_rllm": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether to skip the response LLM (RLLM) review step.",
      "title": "Disable Rllm"
    },
    "enable_multistep_planning": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "When False (single-step), each attempt solves independently. When True (multi-step), each step builds on previous.",
      "title": "Enable Multistep Planning"
    },
    "enabled_internal_tools": {
      "anyOf": [
        {
          "items": {
            "enum": [
              "parse_with_ai",
              "verify_hypothesis",
              "set_policy",
              "complete_turn"
            ],
            "type": "string"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "List of internal tool IDs available to planning LLM.",
      "title": "Enabled Internal Tools"
    },
    "prune_failed_steps": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Multi-step mode only. Remove failed steps from history after turn completes.",
      "title": "Prune Failed Steps"
    },
    "force_to_cache": {
      "anyOf": [
        {
          "items": {
            "type": "string"
          },
          "type": "array"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "List of tool ID regex patterns to always cache their results regardless of the cache_tool_result setting.",
      "title": "Force To Cache"
    },
    "max_pllm_steps": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Maximum number of steps allowed per turn.",
      "title": "Max Pllm Steps"
    },
    "max_pllm_failed_steps": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Maximum number of failed steps allowed per turn.",
      "title": "Max Pllm Failed Steps"
    },
    "max_tool_calls_per_step": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Maximum number of tool calls allowed per PLLM attempt. If None, no limit is enforced.",
      "title": "Max Tool Calls Per Step"
    },
    "reduced_grammar_for_rllm_review": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether to paraphrase RLLM output via reduced grammar before feeding back to planning LLM.",
      "title": "Reduced Grammar For Rllm Review"
    },
    "retry_on_policy_violation": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "When True, allow planning LLM to retry after policy violation.",
      "title": "Retry On Policy Violation"
    },
    "wrap_tool_result": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether to wrap tool results in Ok/Err types.",
      "title": "Wrap Tool Result"
    },
    "detect_tool_errors": {
      "anyOf": [
        {
          "enum": [
            "none",
            "regex",
            "llm"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether and how to detect errors in tool results. 'none': do not detect; 'regex': use regex patterns; 'llm': use an LLM to analyze tool results.",
      "title": "Detect Tool Errors"
    },
    "detect_tool_error_regex_pattern": {
      "anyOf": [
        {
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The regex pattern to use for detecting error messages in tool results when detect_tool_errors is set to 'regex'.",
      "title": "Detect Tool Error Regex Pattern"
    },
    "detect_tool_error_max_result_length": {
      "anyOf": [
        {
          "type": "integer"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "The maximum length of tool result to consider for error detection. Longer results will be truncated. If None, no limit is enforced.",
      "title": "Detect Tool Error Max Result Length"
    },
    "strict_tool_result_parsing": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "If True, only parse external tool results as JSON when the tool declares an output_schema. When False, always attempt json.loads on tool results.",
      "title": "Strict Tool Result Parsing"
    },
    "tool_result_transform": {
      "anyOf": [
        {
          "enum": [
            "none",
            "codex"
          ],
          "type": "string"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Transform applied to tool result values before error detection. 'none': no transform; 'codex': strip Codex CLI metadata prefix and extract exit code + output.",
      "title": "Tool Result Transform"
    }
  },
  "title": "FsmOverrides",
  "type": "object"
}

Config:

  • extra: forbid

Fields:

min_num_tools_for_filtering pydantic-field

min_num_tools_for_filtering: int | None = None

Minimum number of registered tools to enable tool-filtering LLM step. Set to None to disable.

clear_session_meta pydantic-field

clear_session_meta: Literal['never', 'every_attempt', 'every_turn'] | None = None

When to clear session meta information. 'never': never clear; 'every_attempt': clear at the beginning of each PLLM attempt; 'every_turn': clear at the beginning of each turn.

max_n_turns pydantic-field

max_n_turns: int | None = None

Maximum number of turns allowed in the session. If None, unlimited turns are allowed.

history_mismatch_policy pydantic-field

history_mismatch_policy: Literal['reject', 'restart_turn', 'continue'] | None = None

Controls behaviour when incoming messages diverge from stored history in stateless mode. 'reject' rejects the request with an error, 'restart_turn' truncates stored history to the last consistent point and restarts the turn, 'continue' silently accepts the caller's version and continues.

clear_history_every_n_attempts pydantic-field

clear_history_every_n_attempts: int | None = None

Single-step mode only. Clear all failed step history every N attempts to save tokens.

disable_rllm pydantic-field

disable_rllm: bool | None = None

Whether to skip the response LLM (RLLM) review step.

enable_multistep_planning pydantic-field

enable_multistep_planning: bool | None = None

When False (single-step), each attempt solves independently. When True (multi-step), each step builds on previous.

enabled_internal_tools pydantic-field

enabled_internal_tools: list[InternalSqrtToolIdType] | None = None

List of internal tool IDs available to planning LLM.

prune_failed_steps pydantic-field

prune_failed_steps: bool | None = None

Multi-step mode only. Remove failed steps from history after turn completes.

force_to_cache pydantic-field

force_to_cache: list[str] | None = None

List of tool ID regex patterns to always cache their results regardless of the cache_tool_result setting.

max_pllm_steps pydantic-field

max_pllm_steps: int | None = None

Maximum number of steps allowed per turn.

max_pllm_failed_steps pydantic-field

max_pllm_failed_steps: int | None = None

Maximum number of failed steps allowed per turn.

max_tool_calls_per_step pydantic-field

max_tool_calls_per_step: int | None = None

Maximum number of tool calls allowed per PLLM attempt. If None, no limit is enforced.

reduced_grammar_for_rllm_review pydantic-field

reduced_grammar_for_rllm_review: bool | None = None

Whether to paraphrase RLLM output via reduced grammar before feeding back to planning LLM.

retry_on_policy_violation pydantic-field

retry_on_policy_violation: bool | None = None

When True, allow planning LLM to retry after policy violation.

wrap_tool_result pydantic-field

wrap_tool_result: bool | None = None

Whether to wrap tool results in Ok/Err types.

detect_tool_errors pydantic-field

detect_tool_errors: Literal['none', 'regex', 'llm'] | None = None

Whether and how to detect errors in tool results. 'none': do not detect; 'regex': use regex patterns; 'llm': use an LLM to analyze tool results.

detect_tool_error_regex_pattern pydantic-field

detect_tool_error_regex_pattern: str | None = None

The regex pattern to use for detecting error messages in tool results when detect_tool_errors is set to 'regex'.

detect_tool_error_max_result_length pydantic-field

detect_tool_error_max_result_length: int | None = None

The maximum length of tool result to consider for error detection. Longer results will be truncated. If None, no limit is enforced.

strict_tool_result_parsing pydantic-field

strict_tool_result_parsing: bool | None = None

If True, only parse external tool results as JSON when the tool declares an output_schema. When False, always attempt json.loads on tool results.

tool_result_transform pydantic-field

tool_result_transform: Literal['none', 'codex'] | None = None

Transform applied to tool result values before error detection. 'none': no transform; 'codex': strip Codex CLI metadata prefix and extract exit code + output.

PromptOverrides pydantic-model

Prompt overrides for all LLMs.

Show JSON schema:
{
  "$defs": {
    "LlmPromptOverrides": {
      "additionalProperties": false,
      "description": "Base overrides for other LLM types (GRLLM, QLLM, etc.).",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        }
      },
      "title": "LlmPromptOverrides",
      "type": "object"
    },
    "PllmPromptOverrides": {
      "additionalProperties": false,
      "description": "Overrideable PLLM prompt fields.",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal', 'code').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        },
        "clarify_ambiguous_queries": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether planning LLM is allowed to ask for clarification on ambiguous queries.",
          "title": "Clarify Ambiguous Queries"
        },
        "context_var_visibility": {
          "anyOf": [
            {
              "enum": [
                "none",
                "basic-notext",
                "basic-executable",
                "all-executable",
                "all"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "The visibility level of context variables in the PLLM prompts. 'none': do not show any; 'basic-notext': show basic types but not text; 'basic-executable': show basic types and executable memory variables; 'all-executable': show all executable memory variables; 'all': show all.",
          "title": "Context Var Visibility"
        },
        "query_inline_roles": {
          "anyOf": [
            {
              "items": {
                "enum": [
                  "assistant",
                  "tool",
                  "developer",
                  "system"
                ],
                "type": "string"
              },
              "type": "array"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "List of roles whose messages will be inlined into the user query.",
          "title": "Query Inline Roles"
        },
        "query_role_name_overrides": {
          "anyOf": [
            {
              "additionalProperties": {
                "type": "string"
              },
              "type": "object"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Overrides for message role names in the inlined user query. For example, {'assistant': 'developer'} will change the role of assistant messages to developer.",
          "title": "Query Role Name Overrides"
        },
        "query_include_tool_calls": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include upstream tool calls in inlined query.",
          "title": "Query Include Tool Calls"
        },
        "query_include_tool_args": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include arguments of upstream tool calls.",
          "title": "Query Include Tool Args"
        },
        "query_include_tool_results": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include results of upstream tool calls.",
          "title": "Query Include Tool Results"
        },
        "debug_info_level": {
          "anyOf": [
            {
              "enum": [
                "minimal",
                "normal",
                "extra"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Level of detail for debug/execution information in planning LLM prompt.",
          "title": "Debug Info Level"
        }
      },
      "title": "PllmPromptOverrides",
      "type": "object"
    },
    "RllmPromptOverrides": {
      "additionalProperties": false,
      "description": "Overrideable RLLM prompt fields.",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        },
        "debug_info_level": {
          "anyOf": [
            {
              "enum": [
                "minimal",
                "normal",
                "extra"
              ],
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Level of detail for debug/execution information in RLLM prompt.",
          "title": "Debug Info Level"
        }
      },
      "title": "RllmPromptOverrides",
      "type": "object"
    },
    "TllmPromptOverrides": {
      "additionalProperties": false,
      "description": "Overrideable TLLM (tool-formulating LLM) prompt fields.",
      "properties": {
        "flavor": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template variant to use (e.g., 'universal').",
          "title": "Flavor"
        },
        "version": {
          "anyOf": [
            {
              "type": "string"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Prompt template version. Combined with flavor to load template.",
          "title": "Version"
        },
        "add_tool_description": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include tool descriptions in tool-filtering prompt.",
          "title": "Add Tool Description"
        },
        "add_tool_input_schema": {
          "anyOf": [
            {
              "type": "boolean"
            },
            {
              "type": "null"
            }
          ],
          "default": null,
          "description": "Whether to include tool input JSON schemas in tool-filtering prompt.",
          "title": "Add Tool Input Schema"
        }
      },
      "title": "TllmPromptOverrides",
      "type": "object"
    }
  },
  "additionalProperties": false,
  "description": "Prompt overrides for all LLMs.",
  "properties": {
    "pllm": {
      "anyOf": [
        {
          "$ref": "#/$defs/PllmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the planning LLM prompt."
    },
    "rllm": {
      "anyOf": [
        {
          "$ref": "#/$defs/RllmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the review LLM prompt."
    },
    "grllm": {
      "anyOf": [
        {
          "$ref": "#/$defs/LlmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the GRLLM prompt."
    },
    "qllm": {
      "anyOf": [
        {
          "$ref": "#/$defs/LlmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the QLLM prompt."
    },
    "tllm": {
      "anyOf": [
        {
          "$ref": "#/$defs/TllmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the tool-formulating LLM prompt."
    },
    "tagllm": {
      "anyOf": [
        {
          "$ref": "#/$defs/LlmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the tag LLM prompt."
    },
    "policy_llm": {
      "anyOf": [
        {
          "$ref": "#/$defs/LlmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the policy LLM prompt."
    },
    "error_detector_llm": {
      "anyOf": [
        {
          "$ref": "#/$defs/LlmPromptOverrides"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Configuration for the error detector LLM prompt."
    }
  },
  "title": "PromptOverrides",
  "type": "object"
}

Config:

  • extra: forbid

Fields:

  • pllm (PllmPromptOverrides | None)
  • rllm (RllmPromptOverrides | None)
  • grllm (LlmPromptOverrides | None)
  • qllm (LlmPromptOverrides | None)
  • tllm (TllmPromptOverrides | None)
  • tagllm (LlmPromptOverrides | None)
  • policy_llm (LlmPromptOverrides | None)
  • error_detector_llm (LlmPromptOverrides | None)

pllm pydantic-field

pllm: PllmPromptOverrides | None = None

Configuration for the planning LLM prompt.

rllm pydantic-field

rllm: RllmPromptOverrides | None = None

Configuration for the review LLM prompt.

grllm pydantic-field

grllm: LlmPromptOverrides | None = None

Configuration for the GRLLM prompt.

qllm pydantic-field

qllm: LlmPromptOverrides | None = None

Configuration for the QLLM prompt.

tllm pydantic-field

tllm: TllmPromptOverrides | None = None

Configuration for the tool-formulating LLM prompt.

tagllm pydantic-field

tagllm: LlmPromptOverrides | None = None

Configuration for the tag LLM prompt.

policy_llm pydantic-field

policy_llm: LlmPromptOverrides | None = None

Configuration for the policy LLM prompt.

error_detector_llm pydantic-field

error_detector_llm: LlmPromptOverrides | None = None

Configuration for the error detector LLM prompt.

ResponseFormatOverrides pydantic-model

Overrideable response-format fields (dual-llm only).

Show JSON schema:
{
  "additionalProperties": false,
  "description": "Overrideable response-format fields (dual-llm only).",
  "properties": {
    "strip_response_content": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "When True, returns only essential result value as plain text, stripping all metadata.",
      "title": "Strip Response Content"
    },
    "stream_thoughts": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether to stream the model's thinking process in the response.",
      "title": "Stream Thoughts"
    },
    "include_program": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether to include the generated program in the response.",
      "title": "Include Program"
    },
    "include_policy_check_history": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether to include policy check results even when there are no violations.",
      "title": "Include Policy Check History"
    },
    "include_namespace_snapshot": {
      "anyOf": [
        {
          "type": "boolean"
        },
        {
          "type": "null"
        }
      ],
      "default": null,
      "description": "Whether to include snapshot of all variables after program execution.",
      "title": "Include Namespace Snapshot"
    }
  },
  "title": "ResponseFormatOverrides",
  "type": "object"
}

Config:

  • extra: forbid

Fields:

strip_response_content pydantic-field

strip_response_content: bool | None = None

When True, returns only essential result value as plain text, stripping all metadata.

stream_thoughts pydantic-field

stream_thoughts: bool | None = None

Whether to stream the model's thinking process in the response.

include_program pydantic-field

include_program: bool | None = None

Whether to include the generated program in the response.

include_policy_check_history pydantic-field

include_policy_check_history: bool | None = None

Whether to include policy check results even when there are no violations.

include_namespace_snapshot pydantic-field

include_namespace_snapshot: bool | None = None

Whether to include snapshot of all variables after program execution.