Fine-Grained Configuration / X-Config
Advanced configuration for session behavior, prompt settings, and response formatting.
FineGrainedConfigHeader
pydantic-model
Structured configuration header (X-Config).
Groups overrides into FSM, prompt, response format, and per-LLM generation config sections.
Example
Show JSON schema:
{
"$defs": {
"FsmOverrides": {
"additionalProperties": false,
"description": "Overrideable FSM fields (shared + dual-llm-only).\n\nSingle-LLM configs silently ignore dual-llm-only fields.",
"properties": {
"min_num_tools_for_filtering": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Minimum number of registered tools to enable tool-filtering LLM step. Set to None to disable.",
"title": "Min Num Tools For Filtering"
},
"clear_session_meta": {
"anyOf": [
{
"enum": [
"never",
"every_attempt",
"every_turn"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "When to clear session meta information. 'never': never clear; 'every_attempt': clear at the beginning of each PLLM attempt; 'every_turn': clear at the beginning of each turn.",
"title": "Clear Session Meta"
},
"max_n_turns": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of turns allowed in the session. If None, unlimited turns are allowed.",
"title": "Max N Turns"
},
"history_mismatch_policy": {
"anyOf": [
{
"enum": [
"reject",
"restart_turn",
"continue"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Controls behaviour when incoming messages diverge from stored history in stateless mode. 'reject' rejects the request with an error, 'restart_turn' truncates stored history to the last consistent point and restarts the turn, 'continue' silently accepts the caller's version and continues.",
"title": "History Mismatch Policy"
},
"clear_history_every_n_attempts": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Single-step mode only. Clear all failed step history every N attempts to save tokens.",
"title": "Clear History Every N Attempts"
},
"disable_rllm": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to skip the response LLM (RLLM) review step.",
"title": "Disable Rllm"
},
"enable_multistep_planning": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "When False (single-step), each attempt solves independently. When True (multi-step), each step builds on previous.",
"title": "Enable Multistep Planning"
},
"enabled_internal_tools": {
"anyOf": [
{
"items": {
"enum": [
"parse_with_ai",
"verify_hypothesis",
"set_policy",
"complete_turn"
],
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"description": "List of internal tool IDs available to planning LLM.",
"title": "Enabled Internal Tools"
},
"prune_failed_steps": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Multi-step mode only. Remove failed steps from history after turn completes.",
"title": "Prune Failed Steps"
},
"force_to_cache": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"description": "List of tool ID regex patterns to always cache their results regardless of the cache_tool_result setting.",
"title": "Force To Cache"
},
"max_pllm_steps": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of steps allowed per turn.",
"title": "Max Pllm Steps"
},
"max_pllm_failed_steps": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of failed steps allowed per turn.",
"title": "Max Pllm Failed Steps"
},
"max_tool_calls_per_step": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of tool calls allowed per PLLM attempt. If None, no limit is enforced.",
"title": "Max Tool Calls Per Step"
},
"reduced_grammar_for_rllm_review": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to paraphrase RLLM output via reduced grammar before feeding back to planning LLM.",
"title": "Reduced Grammar For Rllm Review"
},
"retry_on_policy_violation": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "When True, allow planning LLM to retry after policy violation.",
"title": "Retry On Policy Violation"
},
"wrap_tool_result": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to wrap tool results in Ok/Err types.",
"title": "Wrap Tool Result"
},
"detect_tool_errors": {
"anyOf": [
{
"enum": [
"none",
"regex",
"llm"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether and how to detect errors in tool results. 'none': do not detect; 'regex': use regex patterns; 'llm': use an LLM to analyze tool results.",
"title": "Detect Tool Errors"
},
"detect_tool_error_regex_pattern": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "The regex pattern to use for detecting error messages in tool results when detect_tool_errors is set to 'regex'.",
"title": "Detect Tool Error Regex Pattern"
},
"detect_tool_error_max_result_length": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "The maximum length of tool result to consider for error detection. Longer results will be truncated. If None, no limit is enforced.",
"title": "Detect Tool Error Max Result Length"
},
"strict_tool_result_parsing": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "If True, only parse external tool results as JSON when the tool declares an output_schema. When False, always attempt json.loads on tool results.",
"title": "Strict Tool Result Parsing"
},
"tool_result_transform": {
"anyOf": [
{
"enum": [
"none",
"codex"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Transform applied to tool result values before error detection. 'none': no transform; 'codex': strip Codex CLI metadata prefix and extract exit code + output.",
"title": "Tool Result Transform"
}
},
"title": "FsmOverrides",
"type": "object"
},
"GenerationConfigOverrides": {
"additionalProperties": false,
"description": "Overrideable generation-config fields (all optional).\n\nOnly fields explicitly set are merged into the target LLM's generation\nconfig; unset fields keep their existing values.\n\nAttributes:\n frequency_penalty: Penalize tokens based on existing frequency (-2.0 to 2.0).\n logit_bias: Modify likelihood of specified tokens.\n logprobs: Whether to return log probabilities of output tokens.\n max_completion_tokens: Upper bound for generated tokens.\n presence_penalty: Penalize tokens based on presence in text so far (-2.0 to 2.0).\n seed: Seed for deterministic sampling.\n temperature: Sampling temperature (0.0 to 2.0).\n top_p: Nucleus sampling threshold.\n reasoning: Reasoning/extended thinking configuration.",
"properties": {
"frequency_penalty": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"description": "Frequency penalty (-2.0 to 2.0).",
"title": "Frequency Penalty"
},
"logit_bias": {
"anyOf": [
{
"additionalProperties": {
"type": "integer"
},
"type": "object"
},
{
"type": "null"
}
],
"default": null,
"description": "Token likelihood modifications.",
"title": "Logit Bias"
},
"logprobs": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to return log probabilities.",
"title": "Logprobs"
},
"max_completion_tokens": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Upper bound for generated tokens.",
"title": "Max Completion Tokens"
},
"presence_penalty": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"description": "Presence penalty (-2.0 to 2.0).",
"title": "Presence Penalty"
},
"seed": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Seed for deterministic sampling.",
"title": "Seed"
},
"temperature": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"description": "Sampling temperature (0.0 to 2.0).",
"title": "Temperature"
},
"top_p": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"default": null,
"description": "Nucleus sampling threshold.",
"title": "Top P"
},
"reasoning": {
"anyOf": [
{
"$ref": "#/$defs/ReasoningConfigOverride"
},
{
"type": "null"
}
],
"default": null,
"description": "Reasoning/thinking configuration."
}
},
"title": "GenerationConfigOverrides",
"type": "object"
},
"LlmOverrides": {
"additionalProperties": false,
"description": "Per-LLM generation config overrides.\n\nEach LLM slot can have its generation parameters overridden independently\nvia the ``llm`` section of the ``X-Config`` header. If an LLM's override\nis ``None``, that LLM's generation config remains unchanged.\n\nExample:\n ```python\n from sequrity.control import FineGrainedConfigHeader, LlmOverrides, GenerationConfigOverrides\n\n config = FineGrainedConfigHeader(\n llm=LlmOverrides(\n pllm=GenerationConfigOverrides(temperature=0.5, reasoning=ReasoningConfigOverride(enabled=True, effort=\"high\")),\n rllm=GenerationConfigOverrides(max_completion_tokens=512),\n ),\n )\n ```",
"properties": {
"pllm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Planning LLM generation config."
},
"rllm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Review LLM generation config."
},
"grllm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Grammar-constrained LLM generation config."
},
"qllm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Query LLM generation config."
},
"tllm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Tool filtering LLM generation config."
},
"tagllm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Tag LLM generation config."
},
"policy_llm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Policy LLM generation config."
},
"tool_result_error_detector_llm": {
"anyOf": [
{
"$ref": "#/$defs/GenerationConfigOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Tool result error detector LLM generation config."
}
},
"title": "LlmOverrides",
"type": "object"
},
"LlmPromptOverrides": {
"additionalProperties": false,
"description": "Base overrides for other LLM types (GRLLM, QLLM, etc.).",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
}
},
"title": "LlmPromptOverrides",
"type": "object"
},
"PllmPromptOverrides": {
"additionalProperties": false,
"description": "Overrideable PLLM prompt fields.",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal', 'code').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
},
"clarify_ambiguous_queries": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether planning LLM is allowed to ask for clarification on ambiguous queries.",
"title": "Clarify Ambiguous Queries"
},
"context_var_visibility": {
"anyOf": [
{
"enum": [
"none",
"basic-notext",
"basic-executable",
"all-executable",
"all"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "The visibility level of context variables in the PLLM prompts. 'none': do not show any; 'basic-notext': show basic types but not text; 'basic-executable': show basic types and executable memory variables; 'all-executable': show all executable memory variables; 'all': show all.",
"title": "Context Var Visibility"
},
"query_inline_roles": {
"anyOf": [
{
"items": {
"enum": [
"assistant",
"tool",
"developer",
"system"
],
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"description": "List of roles whose messages will be inlined into the user query.",
"title": "Query Inline Roles"
},
"query_role_name_overrides": {
"anyOf": [
{
"additionalProperties": {
"type": "string"
},
"type": "object"
},
{
"type": "null"
}
],
"default": null,
"description": "Overrides for message role names in the inlined user query. For example, {'assistant': 'developer'} will change the role of assistant messages to developer.",
"title": "Query Role Name Overrides"
},
"query_include_tool_calls": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include upstream tool calls in inlined query.",
"title": "Query Include Tool Calls"
},
"query_include_tool_args": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include arguments of upstream tool calls.",
"title": "Query Include Tool Args"
},
"query_include_tool_results": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include results of upstream tool calls.",
"title": "Query Include Tool Results"
},
"debug_info_level": {
"anyOf": [
{
"enum": [
"minimal",
"normal",
"extra"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Level of detail for debug/execution information in planning LLM prompt.",
"title": "Debug Info Level"
}
},
"title": "PllmPromptOverrides",
"type": "object"
},
"PromptOverrides": {
"additionalProperties": false,
"description": "Prompt overrides for all LLMs.",
"properties": {
"pllm": {
"anyOf": [
{
"$ref": "#/$defs/PllmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the planning LLM prompt."
},
"rllm": {
"anyOf": [
{
"$ref": "#/$defs/RllmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the review LLM prompt."
},
"grllm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the GRLLM prompt."
},
"qllm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the QLLM prompt."
},
"tllm": {
"anyOf": [
{
"$ref": "#/$defs/TllmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the tool-formulating LLM prompt."
},
"tagllm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the tag LLM prompt."
},
"policy_llm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the policy LLM prompt."
},
"error_detector_llm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the error detector LLM prompt."
}
},
"title": "PromptOverrides",
"type": "object"
},
"ReasoningConfigOverride": {
"additionalProperties": false,
"description": "Reasoning configuration for generation config overrides.\n\nAttributes:\n enabled: Whether reasoning/extended thinking is enabled.\n effort: Reasoning effort level (e.g., \"low\", \"medium\", \"high\").\n budget: Maximum number of reasoning tokens allowed.\n adaptive: When true, the model automatically decides whether to use extended thinking.",
"properties": {
"enabled": {
"default": true,
"description": "Whether reasoning is enabled.",
"title": "Enabled",
"type": "boolean"
},
"effort": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Reasoning effort level (e.g., 'low', 'medium', 'high').",
"title": "Effort"
},
"budget": {
"anyOf": [
{
"minimum": 1,
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of reasoning tokens allowed.",
"title": "Budget"
},
"adaptive": {
"default": false,
"description": "When true, the model automatically decides whether to use extended thinking.",
"title": "Adaptive",
"type": "boolean"
}
},
"title": "ReasoningConfigOverride",
"type": "object"
},
"ResponseFormatOverrides": {
"additionalProperties": false,
"description": "Overrideable response-format fields (dual-llm only).",
"properties": {
"strip_response_content": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "When True, returns only essential result value as plain text, stripping all metadata.",
"title": "Strip Response Content"
},
"stream_thoughts": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to stream the model's thinking process in the response.",
"title": "Stream Thoughts"
},
"include_program": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include the generated program in the response.",
"title": "Include Program"
},
"include_policy_check_history": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include policy check results even when there are no violations.",
"title": "Include Policy Check History"
},
"include_namespace_snapshot": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include snapshot of all variables after program execution.",
"title": "Include Namespace Snapshot"
}
},
"title": "ResponseFormatOverrides",
"type": "object"
},
"RllmPromptOverrides": {
"additionalProperties": false,
"description": "Overrideable RLLM prompt fields.",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
},
"debug_info_level": {
"anyOf": [
{
"enum": [
"minimal",
"normal",
"extra"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Level of detail for debug/execution information in RLLM prompt.",
"title": "Debug Info Level"
}
},
"title": "RllmPromptOverrides",
"type": "object"
},
"TllmPromptOverrides": {
"additionalProperties": false,
"description": "Overrideable TLLM (tool-formulating LLM) prompt fields.",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
},
"add_tool_description": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include tool descriptions in tool-filtering prompt.",
"title": "Add Tool Description"
},
"add_tool_input_schema": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include tool input JSON schemas in tool-filtering prompt.",
"title": "Add Tool Input Schema"
}
},
"title": "TllmPromptOverrides",
"type": "object"
}
},
"additionalProperties": false,
"description": "Structured configuration header (``X-Config``).\n\nGroups overrides into FSM, prompt, response format, and per-LLM\ngeneration config sections.\n\nExample:\n ```python\n config = FineGrainedConfigHeader.dual_llm(\n max_n_turns=10,\n disable_rllm=False,\n include_program=True,\n )\n config = FineGrainedConfigHeader.single_llm(max_n_turns=50)\n ```",
"properties": {
"fsm": {
"anyOf": [
{
"$ref": "#/$defs/FsmOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "FSM configuration overrides."
},
"prompt": {
"anyOf": [
{
"$ref": "#/$defs/PromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt configuration overrides for all LLMs."
},
"response_format": {
"anyOf": [
{
"$ref": "#/$defs/ResponseFormatOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Response format configuration for dual-LLM sessions."
},
"llm": {
"anyOf": [
{
"$ref": "#/$defs/LlmOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Per-LLM generation config overrides (temperature, reasoning, etc.)."
}
},
"title": "FineGrainedConfigHeader",
"type": "object"
}
Config:
extra:forbid
Fields:
-
fsm(FsmOverrides | None) -
prompt(PromptOverrides | None) -
response_format(ResponseFormatOverrides | None) -
llm(LlmOverrides | None)
single_llm
classmethod
single_llm(
min_num_tools_for_filtering: int | None = 10,
clear_session_meta: Literal["never", "every_attempt", "every_turn"] = "never",
max_n_turns: int | None = 50,
) -> FineGrainedConfigHeader
Create a Single LLM fine-grained configuration.
Source code in src/sequrity/control/types/headers.py
dual_llm
classmethod
dual_llm(
min_num_tools_for_filtering: int | None = 10,
clear_session_meta: Literal["never", "every_attempt", "every_turn"] = "never",
max_n_turns: int | None = 5,
clear_history_every_n_attempts: int | None = None,
disable_rllm: bool | None = True,
enable_multistep_planning: bool | None = None,
enabled_internal_tools: list[InternalSqrtToolIdType] | None = None,
prune_failed_steps: bool | None = None,
force_to_cache: list[str] | None = None,
max_pllm_steps: int | None = None,
max_pllm_failed_steps: int | None = None,
max_tool_calls_per_step: int | None = None,
reduced_grammar_for_rllm_review: bool | None = None,
retry_on_policy_violation: bool | None = None,
pllm_debug_info_level: DebugInfoLevel | None = None,
strip_response_content: bool | None = None,
stream_thoughts: bool | None = None,
include_program: bool | None = None,
include_policy_check_history: bool | None = None,
include_namespace_snapshot: bool | None = None,
) -> FineGrainedConfigHeader
Create a Dual LLM fine-grained configuration.
Source code in src/sequrity/control/types/headers.py
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 | |
dump_for_headers
dump_for_headers(
mode: Literal["json", "json_str"] = "json_str",
*,
overrides: dict[str, Any] | None = None,
) -> dict | str
Serialize for use as the X-Config HTTP header value.
Parameters:
-
(modeLiteral['json', 'json_str'], default:'json_str') –Output format —
"json"for a dict,"json_str"for a JSON string. -
(overridesdict[str, Any] | None, default:None) –Optional dict to deep-merge into the serialized output. Nested dicts are merged recursively; non-dict values replace existing ones. This lets you inject fields the Pydantic model doesn't define while keeping
extra="forbid"validation at construction time.
Example
config = FineGrainedConfigHeader.dual_llm(max_n_turns=5)
header_json = config.dump_for_headers(overrides={
"fsm": {
"max_n_turns": 20, # override existing
"custom_beta_flag": True, # add custom field that is not defined on the model
},
"prompt": {
"pllm": {"debug_info_level": "extra"}, # nested override
},
"experimental_section": {"key": "value"}, # new top-level key
})
Source code in src/sequrity/control/types/headers.py
FsmOverrides
pydantic-model
Overrideable FSM fields (shared + dual-llm-only).
Single-LLM configs silently ignore dual-llm-only fields.
Show JSON schema:
{
"additionalProperties": false,
"description": "Overrideable FSM fields (shared + dual-llm-only).\n\nSingle-LLM configs silently ignore dual-llm-only fields.",
"properties": {
"min_num_tools_for_filtering": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Minimum number of registered tools to enable tool-filtering LLM step. Set to None to disable.",
"title": "Min Num Tools For Filtering"
},
"clear_session_meta": {
"anyOf": [
{
"enum": [
"never",
"every_attempt",
"every_turn"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "When to clear session meta information. 'never': never clear; 'every_attempt': clear at the beginning of each PLLM attempt; 'every_turn': clear at the beginning of each turn.",
"title": "Clear Session Meta"
},
"max_n_turns": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of turns allowed in the session. If None, unlimited turns are allowed.",
"title": "Max N Turns"
},
"history_mismatch_policy": {
"anyOf": [
{
"enum": [
"reject",
"restart_turn",
"continue"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Controls behaviour when incoming messages diverge from stored history in stateless mode. 'reject' rejects the request with an error, 'restart_turn' truncates stored history to the last consistent point and restarts the turn, 'continue' silently accepts the caller's version and continues.",
"title": "History Mismatch Policy"
},
"clear_history_every_n_attempts": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Single-step mode only. Clear all failed step history every N attempts to save tokens.",
"title": "Clear History Every N Attempts"
},
"disable_rllm": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to skip the response LLM (RLLM) review step.",
"title": "Disable Rllm"
},
"enable_multistep_planning": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "When False (single-step), each attempt solves independently. When True (multi-step), each step builds on previous.",
"title": "Enable Multistep Planning"
},
"enabled_internal_tools": {
"anyOf": [
{
"items": {
"enum": [
"parse_with_ai",
"verify_hypothesis",
"set_policy",
"complete_turn"
],
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"description": "List of internal tool IDs available to planning LLM.",
"title": "Enabled Internal Tools"
},
"prune_failed_steps": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Multi-step mode only. Remove failed steps from history after turn completes.",
"title": "Prune Failed Steps"
},
"force_to_cache": {
"anyOf": [
{
"items": {
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"description": "List of tool ID regex patterns to always cache their results regardless of the cache_tool_result setting.",
"title": "Force To Cache"
},
"max_pllm_steps": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of steps allowed per turn.",
"title": "Max Pllm Steps"
},
"max_pllm_failed_steps": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of failed steps allowed per turn.",
"title": "Max Pllm Failed Steps"
},
"max_tool_calls_per_step": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "Maximum number of tool calls allowed per PLLM attempt. If None, no limit is enforced.",
"title": "Max Tool Calls Per Step"
},
"reduced_grammar_for_rllm_review": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to paraphrase RLLM output via reduced grammar before feeding back to planning LLM.",
"title": "Reduced Grammar For Rllm Review"
},
"retry_on_policy_violation": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "When True, allow planning LLM to retry after policy violation.",
"title": "Retry On Policy Violation"
},
"wrap_tool_result": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to wrap tool results in Ok/Err types.",
"title": "Wrap Tool Result"
},
"detect_tool_errors": {
"anyOf": [
{
"enum": [
"none",
"regex",
"llm"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether and how to detect errors in tool results. 'none': do not detect; 'regex': use regex patterns; 'llm': use an LLM to analyze tool results.",
"title": "Detect Tool Errors"
},
"detect_tool_error_regex_pattern": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "The regex pattern to use for detecting error messages in tool results when detect_tool_errors is set to 'regex'.",
"title": "Detect Tool Error Regex Pattern"
},
"detect_tool_error_max_result_length": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"description": "The maximum length of tool result to consider for error detection. Longer results will be truncated. If None, no limit is enforced.",
"title": "Detect Tool Error Max Result Length"
},
"strict_tool_result_parsing": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "If True, only parse external tool results as JSON when the tool declares an output_schema. When False, always attempt json.loads on tool results.",
"title": "Strict Tool Result Parsing"
},
"tool_result_transform": {
"anyOf": [
{
"enum": [
"none",
"codex"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Transform applied to tool result values before error detection. 'none': no transform; 'codex': strip Codex CLI metadata prefix and extract exit code + output.",
"title": "Tool Result Transform"
}
},
"title": "FsmOverrides",
"type": "object"
}
Config:
extra:forbid
Fields:
-
min_num_tools_for_filtering(int | None) -
clear_session_meta(Literal['never', 'every_attempt', 'every_turn'] | None) -
max_n_turns(int | None) -
history_mismatch_policy(Literal['reject', 'restart_turn', 'continue'] | None) -
clear_history_every_n_attempts(int | None) -
disable_rllm(bool | None) -
enable_multistep_planning(bool | None) -
enabled_internal_tools(list[InternalSqrtToolIdType] | None) -
prune_failed_steps(bool | None) -
force_to_cache(list[str] | None) -
max_pllm_steps(int | None) -
max_pllm_failed_steps(int | None) -
max_tool_calls_per_step(int | None) -
reduced_grammar_for_rllm_review(bool | None) -
retry_on_policy_violation(bool | None) -
wrap_tool_result(bool | None) -
detect_tool_errors(Literal['none', 'regex', 'llm'] | None) -
detect_tool_error_regex_pattern(str | None) -
detect_tool_error_max_result_length(int | None) -
strict_tool_result_parsing(bool | None) -
tool_result_transform(Literal['none', 'codex'] | None)
min_num_tools_for_filtering
pydantic-field
min_num_tools_for_filtering: int | None = None
Minimum number of registered tools to enable tool-filtering LLM step. Set to None to disable.
clear_session_meta
pydantic-field
clear_session_meta: Literal['never', 'every_attempt', 'every_turn'] | None = None
When to clear session meta information. 'never': never clear; 'every_attempt': clear at the beginning of each PLLM attempt; 'every_turn': clear at the beginning of each turn.
max_n_turns
pydantic-field
max_n_turns: int | None = None
Maximum number of turns allowed in the session. If None, unlimited turns are allowed.
history_mismatch_policy
pydantic-field
history_mismatch_policy: Literal['reject', 'restart_turn', 'continue'] | None = None
Controls behaviour when incoming messages diverge from stored history in stateless mode. 'reject' rejects the request with an error, 'restart_turn' truncates stored history to the last consistent point and restarts the turn, 'continue' silently accepts the caller's version and continues.
clear_history_every_n_attempts
pydantic-field
clear_history_every_n_attempts: int | None = None
Single-step mode only. Clear all failed step history every N attempts to save tokens.
disable_rllm
pydantic-field
disable_rllm: bool | None = None
Whether to skip the response LLM (RLLM) review step.
enable_multistep_planning
pydantic-field
enable_multistep_planning: bool | None = None
When False (single-step), each attempt solves independently. When True (multi-step), each step builds on previous.
enabled_internal_tools
pydantic-field
enabled_internal_tools: list[InternalSqrtToolIdType] | None = None
List of internal tool IDs available to planning LLM.
prune_failed_steps
pydantic-field
prune_failed_steps: bool | None = None
Multi-step mode only. Remove failed steps from history after turn completes.
force_to_cache
pydantic-field
List of tool ID regex patterns to always cache their results regardless of the cache_tool_result setting.
max_pllm_steps
pydantic-field
max_pllm_steps: int | None = None
Maximum number of steps allowed per turn.
max_pllm_failed_steps
pydantic-field
max_pllm_failed_steps: int | None = None
Maximum number of failed steps allowed per turn.
max_tool_calls_per_step
pydantic-field
max_tool_calls_per_step: int | None = None
Maximum number of tool calls allowed per PLLM attempt. If None, no limit is enforced.
reduced_grammar_for_rllm_review
pydantic-field
reduced_grammar_for_rllm_review: bool | None = None
Whether to paraphrase RLLM output via reduced grammar before feeding back to planning LLM.
retry_on_policy_violation
pydantic-field
retry_on_policy_violation: bool | None = None
When True, allow planning LLM to retry after policy violation.
wrap_tool_result
pydantic-field
wrap_tool_result: bool | None = None
Whether to wrap tool results in Ok/Err types.
detect_tool_errors
pydantic-field
detect_tool_errors: Literal['none', 'regex', 'llm'] | None = None
Whether and how to detect errors in tool results. 'none': do not detect; 'regex': use regex patterns; 'llm': use an LLM to analyze tool results.
detect_tool_error_regex_pattern
pydantic-field
detect_tool_error_regex_pattern: str | None = None
The regex pattern to use for detecting error messages in tool results when detect_tool_errors is set to 'regex'.
detect_tool_error_max_result_length
pydantic-field
detect_tool_error_max_result_length: int | None = None
The maximum length of tool result to consider for error detection. Longer results will be truncated. If None, no limit is enforced.
strict_tool_result_parsing
pydantic-field
strict_tool_result_parsing: bool | None = None
If True, only parse external tool results as JSON when the tool declares an output_schema. When False, always attempt json.loads on tool results.
tool_result_transform
pydantic-field
tool_result_transform: Literal['none', 'codex'] | None = None
Transform applied to tool result values before error detection. 'none': no transform; 'codex': strip Codex CLI metadata prefix and extract exit code + output.
PromptOverrides
pydantic-model
Prompt overrides for all LLMs.
Show JSON schema:
{
"$defs": {
"LlmPromptOverrides": {
"additionalProperties": false,
"description": "Base overrides for other LLM types (GRLLM, QLLM, etc.).",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
}
},
"title": "LlmPromptOverrides",
"type": "object"
},
"PllmPromptOverrides": {
"additionalProperties": false,
"description": "Overrideable PLLM prompt fields.",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal', 'code').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
},
"clarify_ambiguous_queries": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether planning LLM is allowed to ask for clarification on ambiguous queries.",
"title": "Clarify Ambiguous Queries"
},
"context_var_visibility": {
"anyOf": [
{
"enum": [
"none",
"basic-notext",
"basic-executable",
"all-executable",
"all"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "The visibility level of context variables in the PLLM prompts. 'none': do not show any; 'basic-notext': show basic types but not text; 'basic-executable': show basic types and executable memory variables; 'all-executable': show all executable memory variables; 'all': show all.",
"title": "Context Var Visibility"
},
"query_inline_roles": {
"anyOf": [
{
"items": {
"enum": [
"assistant",
"tool",
"developer",
"system"
],
"type": "string"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"description": "List of roles whose messages will be inlined into the user query.",
"title": "Query Inline Roles"
},
"query_role_name_overrides": {
"anyOf": [
{
"additionalProperties": {
"type": "string"
},
"type": "object"
},
{
"type": "null"
}
],
"default": null,
"description": "Overrides for message role names in the inlined user query. For example, {'assistant': 'developer'} will change the role of assistant messages to developer.",
"title": "Query Role Name Overrides"
},
"query_include_tool_calls": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include upstream tool calls in inlined query.",
"title": "Query Include Tool Calls"
},
"query_include_tool_args": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include arguments of upstream tool calls.",
"title": "Query Include Tool Args"
},
"query_include_tool_results": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include results of upstream tool calls.",
"title": "Query Include Tool Results"
},
"debug_info_level": {
"anyOf": [
{
"enum": [
"minimal",
"normal",
"extra"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Level of detail for debug/execution information in planning LLM prompt.",
"title": "Debug Info Level"
}
},
"title": "PllmPromptOverrides",
"type": "object"
},
"RllmPromptOverrides": {
"additionalProperties": false,
"description": "Overrideable RLLM prompt fields.",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
},
"debug_info_level": {
"anyOf": [
{
"enum": [
"minimal",
"normal",
"extra"
],
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Level of detail for debug/execution information in RLLM prompt.",
"title": "Debug Info Level"
}
},
"title": "RllmPromptOverrides",
"type": "object"
},
"TllmPromptOverrides": {
"additionalProperties": false,
"description": "Overrideable TLLM (tool-formulating LLM) prompt fields.",
"properties": {
"flavor": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template variant to use (e.g., 'universal').",
"title": "Flavor"
},
"version": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"description": "Prompt template version. Combined with flavor to load template.",
"title": "Version"
},
"add_tool_description": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include tool descriptions in tool-filtering prompt.",
"title": "Add Tool Description"
},
"add_tool_input_schema": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include tool input JSON schemas in tool-filtering prompt.",
"title": "Add Tool Input Schema"
}
},
"title": "TllmPromptOverrides",
"type": "object"
}
},
"additionalProperties": false,
"description": "Prompt overrides for all LLMs.",
"properties": {
"pllm": {
"anyOf": [
{
"$ref": "#/$defs/PllmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the planning LLM prompt."
},
"rllm": {
"anyOf": [
{
"$ref": "#/$defs/RllmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the review LLM prompt."
},
"grllm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the GRLLM prompt."
},
"qllm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the QLLM prompt."
},
"tllm": {
"anyOf": [
{
"$ref": "#/$defs/TllmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the tool-formulating LLM prompt."
},
"tagllm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the tag LLM prompt."
},
"policy_llm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the policy LLM prompt."
},
"error_detector_llm": {
"anyOf": [
{
"$ref": "#/$defs/LlmPromptOverrides"
},
{
"type": "null"
}
],
"default": null,
"description": "Configuration for the error detector LLM prompt."
}
},
"title": "PromptOverrides",
"type": "object"
}
Config:
extra:forbid
Fields:
-
pllm(PllmPromptOverrides | None) -
rllm(RllmPromptOverrides | None) -
grllm(LlmPromptOverrides | None) -
qllm(LlmPromptOverrides | None) -
tllm(TllmPromptOverrides | None) -
tagllm(LlmPromptOverrides | None) -
policy_llm(LlmPromptOverrides | None) -
error_detector_llm(LlmPromptOverrides | None)
pllm
pydantic-field
Configuration for the planning LLM prompt.
rllm
pydantic-field
Configuration for the review LLM prompt.
tllm
pydantic-field
Configuration for the tool-formulating LLM prompt.
tagllm
pydantic-field
Configuration for the tag LLM prompt.
policy_llm
pydantic-field
Configuration for the policy LLM prompt.
ResponseFormatOverrides
pydantic-model
Overrideable response-format fields (dual-llm only).
Show JSON schema:
{
"additionalProperties": false,
"description": "Overrideable response-format fields (dual-llm only).",
"properties": {
"strip_response_content": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "When True, returns only essential result value as plain text, stripping all metadata.",
"title": "Strip Response Content"
},
"stream_thoughts": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to stream the model's thinking process in the response.",
"title": "Stream Thoughts"
},
"include_program": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include the generated program in the response.",
"title": "Include Program"
},
"include_policy_check_history": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include policy check results even when there are no violations.",
"title": "Include Policy Check History"
},
"include_namespace_snapshot": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "null"
}
],
"default": null,
"description": "Whether to include snapshot of all variables after program execution.",
"title": "Include Namespace Snapshot"
}
},
"title": "ResponseFormatOverrides",
"type": "object"
}
Config:
extra:forbid
Fields:
-
strip_response_content(bool | None) -
stream_thoughts(bool | None) -
include_program(bool | None) -
include_policy_check_history(bool | None) -
include_namespace_snapshot(bool | None)
strip_response_content
pydantic-field
strip_response_content: bool | None = None
When True, returns only essential result value as plain text, stripping all metadata.
stream_thoughts
pydantic-field
stream_thoughts: bool | None = None
Whether to stream the model's thinking process in the response.
include_program
pydantic-field
include_program: bool | None = None
Whether to include the generated program in the response.
include_policy_check_history
pydantic-field
include_policy_check_history: bool | None = None
Whether to include policy check results even when there are no violations.
include_namespace_snapshot
pydantic-field
include_namespace_snapshot: bool | None = None
Whether to include snapshot of all variables after program execution.