Skip to content

Advanced SQRT Examples - 2026-01-25

This tutorial demonstrates advanced security policies using SQRT with the Sequrity Control API. Each example shows how to define policies that enforce security, compliance, and fairness constraints at the architectural level.

Here is an overview of the examples covered:

Example Description
1. Preventing Sensitive Data Leaks Prevent emailing confidential documents to untrusted domains
2. Enforcing Complex Business Logic Issue customer refunds only after multiple approval requests
3. Ensuring Factual Accuracy with Data Provenance Require AI outputs to be based on verified data sources
4. Enforcing Legal and Compliance Mandates Protect PII data with de-identification and usage restrictions
5. Audit, Fairness, Transparency, and Interpretability Prevent discriminatory decision-making and block sensitive data from AI parsing
Download Example Code

Setup & Helper Functions

Before running the examples, configure your API keys and endpoint:

CONFIG = {
    "open_router_api_key": os.getenv("OPENROUTER_API_KEY", "your OpenRouter key"),
    "sequrity_key": os.getenv("SEQURITY_API_KEY", "your SequrityAI key"),
    "endpoint_url": os.getenv("SEQURITY_BASE_URL", "https://api.sequrity.ai").rstrip("/") + "/control/chat/openrouter/v1/chat/completions",
}

Helper Functions

For convenience, we define helper functions, run_workflow and send_request_to_endpoint, to run the agentic workflow, which involves sending requests to the Sequrity Control API and executing tool calls as directed by the AI agent, and sending tool results back to the API.

run_workflow and send_request_to_endpoint Functions

run_workflow orchestrates the multi-turn interaction with the Sequrity Control API:

def t_print(s, max_len: int | None = 240):
    # trim long strings for printing
    s = str(s)
    if max_len is not None and len(s) > max_len:
        s = s[: max_len - 3] + "..."
    print(s)


def run_workflow(
    model: str,
    messages: list[dict],
    tool_defs: list[dict],
    tool_map: dict[str, Callable],
    features: FeaturesHeader | None,
    security_policy: SecurityPolicyHeader | None,
    fine_grained_config: FineGrainedConfigHeader | None,
    reasoning_effort: Literal["minimal", "low", "medium", "high"] = "minimal",
) -> tuple[Literal["success", "denied by policies", "unexpected error"], list[dict]]:
    interaction_id = 1
    console = Console()

    while True:
        print(f"\t--- Interaction {interaction_id} ---")
        response = send_request_to_endpoint(
            model=model,
            messages=messages,
            tool_defs=tool_defs,
            features=features,
            security_policy=security_policy,
            fine_grained_config=fine_grained_config,
            reasoning_effort=reasoning_effort,
        )

        if response is None:
            print("No response received, terminating workflow.")
            return "unexpected error", messages

        # append assistant message
        messages.append(response.choices[0].message.model_dump(exclude_none=True))

        finish_reason = response.choices[0].finish_reason
        if finish_reason == "stop" or finish_reason == "error":
            content = response.choices[0].message.content
            details = json.loads(content)
            if "program" in details:
                print("\nExecuted program:")
                syntax = Syntax(details["program"], "python", theme="github-dark", line_numbers=True)
                console.print(syntax)

            if details["status"] == "failure":
                if "denied by argument checking policies" in content or "Control flow violation" in content:
                    t_print(f"\t🚨 Request denied by policies:\n\t{details['error']['message']}")
                    return "denied by policies", messages
                elif '"denied": [{' in content:
                    t_print(f"\t🚨 Request denied by policies:\n\t{details['policy_check_history']}")
                    return "denied by policies", messages
                else:
                    t_print(f"\t❌ Request failed due to error:\n\t{details['error']['message']}")
                    return "unexpected error", messages
            else:
                # status == "success"
                t_print(f"☑️ Final response: {content}")
                return "success", messages
        elif finish_reason == "tool_calls":
            tool_calls = response.choices[0].message.tool_calls
            for tool_call in tool_calls:
                tool_name = tool_call.function.name
                tool_args = json.loads(tool_call.function.arguments)

                if tool_name in tool_map:
                    fn = tool_map[tool_name]
                    tool_result = fn(**tool_args)
                    t_print(f"\t🛠️ Executed tool '{tool_name}' with args {tool_args}")
                    messages.append(
                        {
                            "role": "tool",
                            "content": tool_result,
                            "tool_call_id": tool_call.id,
                        }
                    )
                else:
                    print(f"\t⛔ Tool '{tool_name}' not found in tool map.")
                    return "unexpected error", messages
        else:
            print(f"\t⛔ Unknown finish reason: {finish_reason}, terminating workflow. Messages: {messages[-1]}")
            return "unexpected error", messages
        interaction_id += 1

send_request_to_endpoint handles the HTTP communication:

def send_request_to_endpoint(
    model: str,
    messages: list[dict],
    tool_defs: list[dict],
    features: FeaturesHeader | None,
    security_policy: SecurityPolicyHeader | None,
    fine_grained_config: FineGrainedConfigHeader | None,
    reasoning_effort: Literal["minimal", "low", "medium", "high"] = "minimal",
    session_id: str | None = None,
):
    try:
        response = client.control.chat.create(
            messages=messages,
            model=model,
            llm_api_key=CONFIG["open_router_api_key"],
            features=features,
            security_policy=security_policy,
            fine_grained_config=fine_grained_config,
            provider="openrouter",
            reasoning_effort=reasoning_effort,
            tools=tool_defs,
            session_id=session_id,
        )
        return response
    except Exception as e:
        print(f"API Request failed: {e}")
        return None

run_workflow orchestrates the multi-turn interaction with the Sequrity Control API:

def t_print(s, max_len: int | None = 240):
    # trim long strings for printing
    s = str(s)
    if max_len is not None and len(s) > max_len:
        s = s[: max_len - 3] + "..."
    print(s)


def run_workflow(
    model: str,
    messages: list[dict],
    tool_defs: list[dict],
    tool_map: dict[str, Callable],
    enabled_features: dict | None,
    security_policies: dict | None,
    security_config: dict | None,
    reasoning_effort: str = "minimal",
) -> tuple[Literal["success", "denied by policies", "unexpected error"], list[dict]]:
    interaction_id = 1
    console = Console()

    while True:
        print(f"\t--- Interaction {interaction_id} ---")
        response_json, _ = send_request_to_endpoint(
            model=model,
            messages=messages,
            tool_defs=tool_defs,
            enabled_features=enabled_features,
            security_policies=security_policies,
            security_config=security_config,
            reasoning_effort=reasoning_effort,
        )

        if response_json is None:
            print("No response received, terminating workflow.")
            return "unexpected error", messages

        # append assistant message
        messages.append(response_json["choices"][0]["message"])

        finish_reason = response_json["choices"][0]["finish_reason"]
        if finish_reason == "stop" or finish_reason == "error":
            content = response_json["choices"][0]["message"]["content"]
            details = json.loads(content)
            if "program" in details:
                print("\nExecuted program:")
                syntax = Syntax(details["program"], "python", theme="github-dark", line_numbers=True)
                console.print(syntax)

            if details["status"] == "failure":
                if "denied by argument checking policies" in content or "Control flow violation" in content:
                    t_print(f"\t🚨 Request denied by policies:\n\t{details['error']['message']}")
                    return "denied by policies", messages
                elif '"denied": [{' in content:
                    t_print(f"\t🚨 Request denied by policies:\n\t{details['policy_check_history']}")
                    return "denied by policies", messages
                else:
                    t_print(f"\t❌ Request failed due to error:\n\t{details['error']['message']}")
                    return "unexpected error", messages
            else:
                # status == "success"
                t_print(f"☑️ Final response: {content}")
                return "success", messages
        elif finish_reason == "tool_calls":
            tool_calls = response_json["choices"][0]["message"]["tool_calls"]
            for tool_call in tool_calls:
                tool_name = tool_call["function"]["name"]
                tool_args = json.loads(tool_call["function"]["arguments"])

                if tool_name in tool_map:
                    fn = tool_map[tool_name]
                    tool_result = fn(**tool_args)
                    t_print(f"\t🛠️ Executed tool '{tool_name}' with args {tool_args}")
                    messages.append(
                        {
                            "role": "tool",
                            "content": tool_result,
                            "tool_call_id": tool_call["id"],
                        }
                    )
                else:
                    t_print(f"\t⛔ Tool '{tool_name}' not found in tool map.")
                    return "unexpected error", messages
        else:
            print(f"\t⛔ Unknown finish reason: {finish_reason}, terminating workflow. Messages: {messages[-1]}")
            return "unexpected error", messages
        interaction_id += 1

send_request_to_endpoint handles the HTTP communication:

def send_request_to_endpoint(
    model: str,
    messages: list[dict],
    tool_defs: list[dict],
    enabled_features: dict | None,
    security_policies: dict | None,
    security_config: dict | None,
    reasoning_effort: str = "minimal",
    session_id: str | None = None,
) -> tuple[dict | None, str | None]:
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {CONFIG['sequrity_key']}",
        "X-Api-Key": CONFIG["open_router_api_key"],
        "X-Features": json.dumps(enabled_features),
    }
    if security_policies:
        headers["X-Policy"] = json.dumps(security_policies)
    if security_config:
        headers["X-Config"] = json.dumps(security_config)
    if session_id:
        headers["X-Session-ID"] = session_id

    payload = {
        "model": model,
        "messages": messages,
        "tools": tool_defs,
        "reasoning_effort": reasoning_effort,
    }

    url = CONFIG["endpoint_url"]
    assert isinstance(url, str), "endpoint_url must be configured"
    try:
        response = requests.post(url=url, headers=headers, json=payload)
        response.raise_for_status()
        session_id = response.headers.get("X-Session-ID", None)
        return response.json(), session_id
    except requests.exceptions.RequestException as e:
        print(f"API Request failed: {e}")
        if e.response is not None:
            print(f"Response content: {e.response.text}")
        return None, session_id

Example 1: Preventing Sensitive Data Leaks

Imagine an AI agent with access to internal documents and email tools. With Sequrity Control, you can prevent accidental data leakage by tagging sensitive data and restricting where it can be sent.

SQRT Policy

let sensitive_docs = {"internal_use", "confidential"};
tool "get_internal_document" -> @tags |= sensitive_docs;
tool "send_email" {
    hard deny when (body.tags overlaps sensitive_docs) and (not to.value in {str matching r".*@trustedcorp\.com"});
}

This policy:

  1. Tags all data from get_internal_document with internal_use and confidential
  2. Denies send_email if the body contains sensitive data AND the recipient is not from the trusted domain trustedcorp.com

Tool Definitions & Session Configurations

We define mock tool implementations for get_internal_document and send_email:

def mock_get_internal_document(doc_id: str) -> str:
    return f"# Internal Transaction\ndocument ID: {doc_id}..."


def mock_send_email(to: str, subject: str, body: str) -> str:
    return "Email sent successfully."
def mock_get_internal_document(doc_id: str) -> str:
    return f"# Internal Transaction\ndocument ID: {doc_id}..."


def mock_send_email(to: str, subject: str, body: str) -> str:
    return "Email sent successfully."
Tool Signatures
# Tool definitions
tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "get_internal_document",
            "description": "Retrieve an internal document by its ID. Returns the document content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "doc_id": {
                        "type": "string",
                        "description": "The ID of the internal document to retrieve.",
                    }
                },
                "required": ["doc_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_email",
            "description": "Send an email to a specified recipient. Returns a confirmation string upon success.",
            "parameters": {
                "type": "object",
                "properties": {
                    "to": {
                        "type": "string",
                        "description": "The recipient's email address.",
                    },
                    "subject": {
                        "type": "string",
                        "description": "The subject of the email.",
                    },
                    "body": {
                        "type": "string",
                        "description": "The body content of the email.",
                    },
                },
                "required": ["to", "subject", "body"],
            },
        },
    },
]

tool_map = {
    "get_internal_document": mock_get_internal_document,
    "send_email": mock_send_email,
}
# Tool definitions
tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "get_internal_document",
            "description": "Retrieve an internal document by its ID. Returns the document content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "doc_id": {
                        "type": "string",
                        "description": "The ID of the internal document to retrieve.",
                    }
                },
                "required": ["doc_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_email",
            "description": "Send an email to a specified recipient. Returns a confirmation string upon success.",
            "parameters": {
                "type": "object",
                "properties": {
                    "to": {
                        "type": "string",
                        "description": "The recipient's email address.",
                    },
                    "subject": {
                        "type": "string",
                        "description": "The subject of the email.",
                    },
                    "body": {
                        "type": "string",
                        "description": "The body content of the email.",
                    },
                },
                "required": ["to", "subject", "body"],
            },
        },
    },
]

tool_map = {
    "get_internal_document": mock_get_internal_document,
    "send_email": mock_send_email,
}
Session Configurations
# Create features header using FeaturesHeader class
features = FeaturesHeader.dual_llm()

# Create security policy using SecurityPolicyHeader class
security_policy = SecurityPolicyHeader(
    mode="standard",
    codes=PolicyCode(
        code=r"""
    let sensitive_docs = {"internal_use", "confidential"};
    tool "get_internal_document" -> @tags |= sensitive_docs;
    tool "send_email" {
        hard deny when (body.tags overlaps sensitive_docs) and (not to.value in {str matching r".*@trustedcorp\.com"});
    }
    """
    ),
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_non_executable_memory=True,
    ),
)

# Create fine-grained config using FineGrainedConfigHeader class
fine_grained_config = FineGrainedConfigHeader(
    response_format=ResponseFormatOverrides(
        strip_response_content=False,
        include_program=True,
    ),
)
enabled_features = {"agent_arch": "dual-llm"}
security_policies = {
    "mode": "standard",
    "codes": {
        "code": r"""
    let sensitive_docs = {"internal_use", "confidential"};
    tool "get_internal_document" -> @tags |= sensitive_docs;
    tool "send_email" {
        hard deny when (body.tags overlaps sensitive_docs) and (not to.value in {str matching r".*@trustedcorp\.com"});
    }
    """,
    },
    "presets": {
        "default_allow": True,
        "enable_non_executable_memory": True,
    },
    "fail_fast": True,
    "auto_gen": False,
}
security_config = {
    "response_format": {
        "strip_response_content": False,
        "include_program": True,
    },
}

Case 1: Untrusted Email (Denied)

Sending a confidential document to an untrusted email address research@gmail.com should be blocked:

print("=== Preventing Sensitive Data Leaks (untrusted email) ===")
messages = [
    {"role": "user", "content": "Retrieve the internal document with ID 'DOC12345' and email it to research@gmail.com"}
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=tool_defs,
    tool_map=tool_map,
    features=features,
    security_policy=security_policy,
    fine_grained_config=fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"
print("=== Preventing Sensitive Data Leaks (untrusted email) ===")
messages = [
    {"role": "user", "content": "Retrieve the internal document with ID 'DOC12345' and email it to research@gmail.com"}
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=tool_defs,
    tool_map=tool_map,
    enabled_features=enabled_features,
    security_policies=security_policies,
    security_config=security_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"
Expected Output
=== Preventing Sensitive Data Leaks (untrusted email) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Retrieve the internal document with ID 'DOC12345' and email it to research@gmail.com"}]
    🛠️ Executed tool 'get_internal_document' with args {'doc_id': 'DOC12345'}
    --- Turn 2 ---
    📤 Sending request (Session ID: 41b82b31-f97c-11f0-b254-7d965b6bbd63):
    [{'role': 'tool', 'name': 'get_internal_document', 'content': '# Internal Transaction\ndocument ID: DOC12345...', 'tool_call_id': 'tc-41b82b31-f97c-11f0-b254-7d965b6bbd63-448e9163-f97c-11f0-906e-7d965b6bbd63'}]
    🚨 Request denied by policies:
    Tool call send_email denied by argument checking policies.

Case 2: Trusted Email (Allowed)

Sending the same document to an email of trusted domain admin@trustedcorp.com succeeds:

print("=== Preventing Sensitive Data Leaks (trusted email) ===")
messages = [
    {
        "role": "user",
        "content": "Retrieve the internal document with ID 'DOC12345' and email it to admin@trustedcorp.com",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=tool_defs,
    tool_map=tool_map,
    features=features,
    security_policy=security_policy,
    fine_grained_config=fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "success"
print("=== Preventing Sensitive Data Leaks (trusted email) ===")
messages = [
    {
        "role": "user",
        "content": "Retrieve the internal document with ID 'DOC12345' and email it to admin@trustedcorp.com",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=tool_defs,
    tool_map=tool_map,
    enabled_features=enabled_features,
    security_policies=security_policies,
    security_config=security_config,
    reasoning_effort="minimal",
)
assert result == "success"
Expected Output
=== Preventing Sensitive Data Leaks (trusted email) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Retrieve the internal document with ID 'DOC12345' and email it to admin@trustedcorp.com"}]
    🛠️ Executed tool 'get_internal_document' with args {'doc_id': 'DOC12345'}
    --- Turn 2 ---
    📤 Sending request (Session ID: 44934967-f97c-11f0-9577-7d965b6bbd63):
    [{'role': 'tool', 'name': 'get_internal_document', 'content': '# Internal Transaction\ndocument ID: DOC12345...', 'tool_call_id': 'tc-44934967-f97c-11f0-9577-7d965b6bbd63-4676f195-f97c-11f0-b351-7d965b6bbd63'}]
    🛠️ Executed tool 'send_email' with args {'to': 'admin@trustedcorp.com', 'subject': 'Internal Document DOC12345', 'body': '...'}
    --- Turn 3 ---
    📤 Sending request (Session ID: 44934967-f97c-11f0-9577-7d965b6bbd63):
    [{'role': 'tool', 'name': 'send_email', 'content': 'Email sent successfully.', 'tool_call_id': 'tc-44934967-f97c-11f0-9577-7d965b6bbd63-4677b0d1-f97c-11f0-8e28-7d965b6bbd63'}]
    ☑️ Final Response (Session ID: 44934967-f97c-11f0-9577-7d965b6bbd63):
    {"status": "success", ...}

Example 2: Enforcing Complex Business Logic

Sequrity Control can enforce stateful business rules. This example implements a customer refund policy that requires multiple requests before a refund is issued.

SQRT Policy

tool "issue_refund" {
    session before {
        // before issue tool call to issue_refund, add "final_attempt" to session tags if "attempt3" is in session tags.
        when "attempt3" in @tags { @tags |= {"final_attempt"}; }
    }
    session before {
        // before issue tool call to issue_refund, add "attempt3" to session tags if "attempt2" is in session tags.
        when "attempt2" in @tags { @tags |= {"attempt3"}; }
    }
    session before {
        // before issue tool call to issue_refund, add "attempt2" to session tags if "attempt1" is in session tags.
        when "attempt1" in @tags { @tags |= {"attempt2"}; }
    }
    session before {
        // before issue tool call to issue_refund, add "attempt1" to session tags.
        @tags |= {"attempt1"};
    }

    hard allow when "final_attempt" in @session.tags;
}

This policy tracks refund attempts using session metadata. Only the 4th attempt is approved.

Tool Definitions & Policy Configuration

Tool implementation for issue_refund:

def mock_issue_refund(order_id: str) -> str:
    return f"💵 Refund for order {order_id} has been issued."
def mock_issue_refund(order_id: str) -> str:
    return f"💵 Refund for order {order_id} has been issued."
Tool Signature
# Tool definitions for refund example
refund_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "issue_refund",
            "description": "Issue a refund for a given order ID. Returns a confirmation string upon success.",
            "parameters": {
                "type": "object",
                "properties": {
                    "order_id": {
                        "type": "string",
                        "description": "The ID of the order to refund.",
                    }
                },
                "required": ["order_id"],
            },
        },
    }
]

refund_tool_map = {
    "issue_refund": mock_issue_refund,
}
# Tool definitions for refund example
refund_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "issue_refund",
            "description": "Issue a refund for a given order ID. Returns a confirmation string upon success.",
            "parameters": {
                "type": "object",
                "properties": {
                    "order_id": {
                        "type": "string",
                        "description": "The ID of the order to refund.",
                    }
                },
                "required": ["order_id"],
            },
        },
    }
]

refund_tool_map = {
    "issue_refund": mock_issue_refund,
}
Security Policies and Configuration
# Create features header for refund example
refund_features = FeaturesHeader.dual_llm()

# Create security policy for refund example
refund_security_policy = SecurityPolicyHeader(
    mode="standard",
    codes={
        "language": "sqrt",
        "code": r"""
            tool "issue_refund" {
                session before {
                    when "attempt3" in @tags { @tags |= {"final_attempt"}; }
                }
                session before {
                    when "attempt2" in @tags { @tags |= {"attempt3"}; }
                }
                session before {
                    when "attempt1" in @tags { @tags |= {"attempt2"}; }
                }
                session before {
                    @tags |= {"attempt1"};
                }

                hard allow when "final_attempt" in @session.tags;
            }
        """,
    },
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_non_executable_memory=True,
    ),
)

# Create fine-grained config for refund example
refund_fine_grained_config = FineGrainedConfigHeader(
    fsm=FsmOverrides(
        clear_session_meta="never",
        retry_on_policy_violation=False,
        max_pllm_steps=1,  # disable auto-retry as we want to count attempts accurately
        max_n_turns=5,  # we need multiple turns to reach the refund approval
    ),
    response_format=ResponseFormatOverrides(
        strip_response_content=False,
        include_program=True,
    ),
)
refund_enabled_features = {"agent_arch": "dual-llm"}
refund_security_policies = {
    "mode": "standard",
    "codes": {
        "language": "sqrt",
        "code": r"""
            tool "issue_refund" {
                session before {
                    when "attempt3" in @tags { @tags |= {"final_attempt"}; }
                }
                session before {
                    when "attempt2" in @tags { @tags |= {"attempt3"}; }
                }
                session before {
                    when "attempt1" in @tags { @tags |= {"attempt2"}; }
                }
                session before {
                    @tags |= {"attempt1"};
                }

                hard allow when "final_attempt" in @session.tags;
            }
        """,
    },
    "presets": {
        "default_allow": True,
        "enable_non_executable_memory": True,
    },
    "fail_fast": True,
    "auto_gen": False,
}
refund_security_config = {
    "fsm": {
        "clear_session_meta": "never",
        "retry_on_policy_violation": False,
        "max_pllm_steps": 1,  # disable auto-retry as we want to count attempts accurately
        "max_n_turns": 5,  # we need multiple turns to reach the refund approval
    },
    "response_format": {
        "strip_response_content": False,
        "include_program": True,
    },
}

Helper Functions for Multi-Turn Refund Requests

We simulate a customer requesting a refund 4 times with helper functions send_request_refund_example and run_refund_tool. This needs a multi-turn setup, where each turn represents a refund request attempt.

Note that the first three attempts should be denied, and only the fourth attempt should succeed.

send_request_refund_example and run_refund_tool Functions
def send_request_refund_example(
    model: str,
    messages: list[dict],
    tool_defs: list[dict],
    session_id: str | None,
    features: FeaturesHeader | None,
    security_policy: SecurityPolicyHeader | None,
    fine_grained_config: FineGrainedConfigHeader | None,
    reasoning_effort: str = "minimal",
):
    response = send_request_to_endpoint(
        model=model,
        messages=messages,
        session_id=session_id,
        tool_defs=tool_defs,
        features=features,
        security_policy=security_policy,
        fine_grained_config=fine_grained_config,
        reasoning_effort=reasoning_effort,
    )
    assert response is not None
    # Convert response to dict format for message appending
    messages.append(response.choices[0].message.model_dump(exclude_none=True))
    session_id = response.session_id
    return messages, session_id, response


def run_refund_tool(tool_call: dict, tool_map: dict) -> dict:
    # execute tool call, and return a tool result message
    # tool_call is a dict with keys: id, type, function (which has name and arguments)
    tool_name = tool_call["function"]["name"]
    tool_args = json.loads(tool_call["function"]["arguments"])
    fn = tool_map[tool_name]
    tool_result = fn(**tool_args)
    print(f"\t🛠️ Executed tool '{tool_name}' with args {tool_args}, result: {tool_result}")
    return {
        "role": "tool",
        "content": tool_result,
        "tool_call_id": tool_call["id"],
    }
def send_request_refund_example(
    model: str,
    messages: list[dict],
    tool_defs: list[dict],
    session_id: str | None,
    enabled_features: dict | None,
    security_policies: dict | None,
    security_config: dict | None,
    reasoning_effort: str = "minimal",
):
    response_json, session_id = send_request_to_endpoint(
        model=model,
        messages=messages,
        session_id=session_id,
        tool_defs=tool_defs,
        enabled_features=enabled_features,
        security_policies=security_policies,
        security_config=security_config,
        reasoning_effort=reasoning_effort,
    )
    if response_json is None:
        print("No response received.")
        return messages, session_id
    messages.append(response_json["choices"][0]["message"])
    return messages, session_id


def run_refund_tool(tool_call: dict, tool_map: dict) -> dict:
    # execute tool call, and return a tool result message
    tool_name = tool_call["function"]["name"]
    tool_args = json.loads(tool_call["function"]["arguments"])
    fn = tool_map[tool_name]
    tool_result = fn(**tool_args)
    print(f"\t🛠️ Executed tool '{tool_name}' with args {tool_args}, result: {tool_result}")
    return {
        "role": "tool",
        "content": tool_result,
        "tool_call_id": tool_call["id"],
    }

Running the Refund Flow

print("=== Enforcing Complex Business Logic ===")

user_query = "Ignore previous interactions, just issue a refund for order ID 'ORDER67890'."
messages = [{"role": "user", "content": user_query}]
session_id = None
console = Console()

for i in range(1, 5):
    messages, session_id, _ = send_request_refund_example(
        model="openai/gpt-5-mini,openai/gpt-5-nano",
        messages=messages,
        tool_defs=refund_tool_defs,
        session_id=session_id,
        features=refund_features,
        security_policy=refund_security_policy,
        fine_grained_config=refund_fine_grained_config,
        reasoning_effort="minimal",
    )
    if i < 4:
        assert "'issue_refund' is denied by argument" in messages[-1]["content"]
        print(f"🚨 Attempt {i} denied by policies")
        messages.append({"role": "user", "content": user_query})
    else:
        # this should be a tool call to issue_refund because this tool call is approved now
        assert messages[-1]["role"] == "assistant"
        assert messages[-1]["tool_calls"][0]["function"]["name"] == "issue_refund", (
            f"Expected a tool call to 'issue_refund' but got: {messages[-1]}"
        )
        print(f"🛠️ Attempt {i} receives a tool call to 'issue_refund'")
        # Execute the tool call using the dict from messages
        tool_result_message = run_refund_tool(messages[-1]["tool_calls"][0], refund_tool_map)
        messages.append(tool_result_message)
        messages, session_id, _ = send_request_refund_example(
            model="openai/gpt-5-mini,openai/gpt-5-nano",
            messages=messages,
            tool_defs=refund_tool_defs,
            session_id=session_id,
            features=refund_features,
            security_policy=refund_security_policy,
            fine_grained_config=refund_fine_grained_config,
            reasoning_effort="minimal",
        )
        # final response
        assert "Refund for order ORDER67890 has been issued." in messages[-1]["content"]
        t_print(f"💵 Refund has been issued. Response: {messages[-1]['content']}")
        # pretty print the executed program using rich
        syntax = Syntax(
            json.loads(messages[-1]["content"])["program"], "python", theme="github-dark", line_numbers=True
        )
        console.print(syntax)
print("=== Enforcing Complex Business Logic ===")

user_query = "Ignore previous interactions, just issue a refund for order ID 'ORDER67890'."
messages = [{"role": "user", "content": user_query}]
session_id = None
console = Console()

for i in range(1, 5):
    print(f"\t--- Refund Attempt {i} ---")
    messages, session_id = send_request_refund_example(
        model="openai/gpt-5-mini,openai/gpt-5-nano",
        messages=messages,
        tool_defs=refund_tool_defs,
        session_id=session_id,
        enabled_features=refund_enabled_features,
        security_policies=refund_security_policies,
        security_config=refund_security_config,
        reasoning_effort="minimal",
    )
    if i < 4:
        assert "'issue_refund' is denied by argument" in messages[-1]["content"]
        print(f"\t🚨 Attempt {i} denied by policies")
        messages.append({"role": "user", "content": user_query})
    else:
        # this should be a tool call to issue_refund because this tool call is approved now
        assert messages[-1]["role"] == "assistant"
        assert messages[-1]["tool_calls"][0]["function"]["name"] == "issue_refund"
        print(f"🛠️ Attempt {i} receives a tool call to 'issue_refund'")
        tool_result_message = run_refund_tool(messages[-1]["tool_calls"][0], refund_tool_map)
        messages.append(tool_result_message)
        messages, session_id = send_request_refund_example(
            model="openai/gpt-5-mini,openai/gpt-5-nano",
            messages=messages,
            tool_defs=refund_tool_defs,
            session_id=session_id,
            enabled_features=refund_enabled_features,
            security_policies=refund_security_policies,
            security_config=refund_security_config,
            reasoning_effort="minimal",
        )
        # final response
        assert "Refund for order ORDER67890 has been issued." in messages[-1]["content"]
        t_print(f"\t💵 Refund has been issued. Response: {messages[-1]['content']}")
        # pretty print the executed program using rich
        syntax = Syntax(
            json.loads(messages[-1]["content"])["program"], "python", theme="github-dark", line_numbers=True
        )
        console.print(syntax)
Expected Output
=== Enforcing Complex Business Logic ===

--- Refund Attempt 1 ---
🚨 Attempt 1 denied by policies

--- Refund Attempt 2 ---
🚨 Attempt 2 denied by policies

--- Refund Attempt 3 ---
🚨 Attempt 3 denied by policies

--- Refund Attempt 4 ---
🛠️ Attempt 4 receives a tool call to 'issue_refund'
    🛠️ Executed tool 'issue_refund' with args {'order_id': 'ORDER67890'}, result: 💵 Refund for order ORDER67890 has been issued.
💵 Refund has been issued. Response: {"status": "success", "final_return_value": {"value": "\ud83d\udcb5 Refund for order ORDER67890 has been issued.", "meta": {"tags": ["__non_executable"], "consumers": ["*"], "producers": []}}, "program": "# in_progress\n# Initialize final return value\nfinal_return_value = None\n\n# Issue refund for ..."}

Example 3: Ensuring Factual Accuracy with Data Provenance

Sequrity Control's provenance system ensures AI outputs are grounded in verified data sources, preventing hallucination.

SQRT Policy

tool "get_quarterly_earning_report" -> @producers |= {"verified_financial_data"};
tool "get_marketing_analysis" -> @producers |= {"verified_marketing_data"};
tool "generate_business_summary" {
    hard allow when @args.producers superset of {"verified_financial_data", "verified_marketing_data"};
}

This policy requires generate_business_summary to only use inputs from verified sources.

Tool Definitions & Policy Configuration

We define mock tool implementations and configure the security policies:

def mock_get_quarterly_earning_report(company: str, quarter: str) -> str:
    return f"# Quarter Report\nCompany: {company}\nQuarter: {quarter}\nThe quarterly earning shows a revenue of $5 billion..."


def mock_get_marketing_analysis(company: str) -> str:
    return f"# Marketing Analysis\nCompany: {company}\nThe marketing analysis indicates a strong brand presence..."


def mock_generate_business_summary(earning_report: str, marketing_analysis: str | None = None) -> str:
    return "# Business Summary\nBased on the earning report and marketing analysis, the company is performing well..."
def mock_get_quarterly_earning_report(company: str, quarter: str) -> str:
    return f"# Quarter Report\nCompany: {company}\nQuarter: {quarter}\nThe quarterly earning shows a revenue of $5 billion..."


def mock_get_marketing_analysis(company: str) -> str:
    return f"# Marketing Analysis\nCompany: {company}\nThe marketing analysis indicates a strong brand presence..."


def mock_generate_business_summary(earning_report: str, marketing_analysis: str | None = None) -> str:
    return "# Business Summary\nBased on the earning report and marketing analysis, the company is performing well..."
Tool Signatures
# Tool definitions for provenance example
provenance_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "get_quarterly_earning_report",
            "description": "Retrieve the quarterly earning report for a specified company and quarter. Returns the report content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "company": {
                        "type": "string",
                        "description": "The name of the company.",
                    },
                    "quarter": {
                        "type": "string",
                        "description": "The quarter for which to retrieve the report (e.g., 'Q1 2023').",
                    },
                },
                "required": ["company", "quarter"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "get_marketing_analysis",
            "description": "Retrieve the marketing analysis for a specified company. Returns the analysis content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "company": {
                        "type": "string",
                        "description": "The name of the company.",
                    }
                },
                "required": ["company"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "generate_business_summary",
            "description": "Generate a business summary based on the provided earning report and optional marketing analysis. Returns the summary as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "earning_report": {
                        "type": "string",
                        "description": "The content of the quarterly earning report.",
                    },
                    "marketing_analysis": {
                        "type": "string",
                        "description": "The content of the marketing analysis (optional).",
                    },
                },
                "required": ["earning_report"],
            },
        },
    },
]

provenance_tool_map = {
    "get_quarterly_earning_report": mock_get_quarterly_earning_report,
    "get_marketing_analysis": mock_get_marketing_analysis,
    "generate_business_summary": mock_generate_business_summary,
}
# Tool definitions for provenance example
provenance_tool_defs: list[dict[str, Any]] = [
    {
        "type": "function",
        "function": {
            "name": "get_quarterly_earning_report",
            "description": "Retrieve the quarterly earning report for a specified company and quarter. Returns the report content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "company": {
                        "type": "string",
                        "description": "The name of the company.",
                    },
                    "quarter": {
                        "type": "string",
                        "description": "The quarter for which to retrieve the report (e.g., 'Q1 2023').",
                    },
                },
                "required": ["company", "quarter"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "get_marketing_analysis",
            "description": "Retrieve the marketing analysis for a specified company. Returns the analysis content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "company": {
                        "type": "string",
                        "description": "The name of the company.",
                    }
                },
                "required": ["company"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "generate_business_summary",
            "description": "Generate a business summary based on the provided earning report and optional marketing analysis. Returns the summary as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "earning_report": {
                        "type": "string",
                        "description": "The content of the quarterly earning report.",
                    },
                    "marketing_analysis": {
                        "type": "string",
                        "description": "The content of the marketing analysis (optional).",
                    },
                },
                "required": ["earning_report"],
            },
        },
    },
]

provenance_tool_map = {
    "get_quarterly_earning_report": mock_get_quarterly_earning_report,
    "get_marketing_analysis": mock_get_marketing_analysis,
    "generate_business_summary": mock_generate_business_summary,
}
Security Policies and Configuration
# Create features header for provenance example
provenance_features = FeaturesHeader.dual_llm()

# Create security policy for provenance example
provenance_security_policy = SecurityPolicyHeader(
    mode="standard",
    codes={
        "language": "sqrt",
        "code": r"""
            tool "get_quarterly_earning_report" -> @producers |= {"verified_financial_data"};
            tool "get_marketing_analysis" -> @producers |= {"verified_marketing_data"};
            // Allow generating business summary only if data comes from verified financial and marketing sources
            tool "generate_business_summary" {
                hard allow when @args.producers superset of {"verified_financial_data", "verified_marketing_data"};
            }
        """,
    },
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_non_executable_memory=True,
    ),
)

# Create fine-grained config for provenance example
provenance_fine_grained_config = FineGrainedConfigHeader(
    response_format=ResponseFormatOverrides(
        strip_response_content=False,
        include_program=True,
    ),
)
provenance_enabled_features = {"agent_arch": "dual-llm"}
provenance_security_policies = {
    "mode": "standard",
    "codes": {
        "language": "sqrt",
        "code": r"""
            tool "get_quarterly_earning_report" -> @producers |= {"verified_financial_data"};
            tool "get_marketing_analysis" -> @producers |= {"verified_marketing_data"};
            // Allow generating business summary only if data comes from verified financial and marketing sources
            tool "generate_business_summary" {
                hard allow when @args.producers superset of {"verified_financial_data", "verified_marketing_data"};
            }
        """,
    },
    "presets": {
        "default_allow": True,
        "enable_non_executable_memory": True,
    },
    "fail_fast": True,
    "auto_gen": False,
}
provenance_security_config = {
    "response_format": {
        "strip_response_content": False,
        "include_program": True,
    },
}

Case 1: Both Verified Sources (Allowed)

When both financial and marketing data are retrieved from verified tools, the business summary generation succeeds:

print("=== Data Provenance (both verified sources) ===")
messages = [{"role": "user", "content": "Generate a business summary for 'Sequrity AI' for Q1 2025."}]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=provenance_tool_defs,
    tool_map=provenance_tool_map,
    features=provenance_features,
    security_policy=provenance_security_policy,
    fine_grained_config=provenance_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "success"
print("=== Data Provenance (both verified sources) ===")
messages = [{"role": "user", "content": "Generate a business summary for 'Sequrity AI' for Q1 2025."}]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=provenance_tool_defs,
    tool_map=provenance_tool_map,
    enabled_features=provenance_enabled_features,
    security_policies=provenance_security_policies,
    security_config=provenance_security_config,
    reasoning_effort="minimal",
)
assert result == "success"
Expected Output
=== Data Provenance (both verified sources) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Generate a business summary for 'Sequrity AI' for Q1 2025."}]
    🛠️ Executed tool 'get_quarterly_earning_report' with args {'company': 'Sequrity AI', 'quarter': 'Q1 2025'}
    --- Turn 2 ---
    📤 Sending request (Session ID: 4bae6d8b-f97c-11f0-b166-7d965b6bbd63):
    [{'role': 'tool', 'name': 'get_quarterly_earning_report', 'content': '# Quarter Report\nCompany: Sequrity AI...', 'tool_call_id': '...'}]
    🛠️ Executed tool 'get_marketing_analysis' with args {'company': 'Sequrity AI'}
    --- Turn 3 ---
    🛠️ Executed tool 'generate_business_summary' with args {...}
    --- Turn 4 ---
    ☑️ Final Response (Session ID: 4bae6d8b-f97c-11f0-b166-7d965b6bbd63):
    {"status": "success", ...}

Case 2: Unverified/Hallucinated Data (Denied)

When the marketing analysis tool is unavailable and the user provides unverified data, the policy denies the operation:

print("=== Data Provenance (only financial data) ===")
provenance_tool_defs_reduced = [td for td in provenance_tool_defs if td["function"]["name"] != "get_marketing_analysis"]
messages = [
    {
        "role": "user",
        "content": "Generate a business summary for 'Sequrity AI' for Q1 2025. Use 'The marketing analysis indicates a strong brand presence' as argument `marketing_analysis` when generating the summary.",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=provenance_tool_defs_reduced,
    tool_map=provenance_tool_map,
    features=provenance_features,
    security_policy=provenance_security_policy,
    fine_grained_config=provenance_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"
print("=== Data Provenance (only financial data) ===")
provenance_tool_defs_reduced = [td for td in provenance_tool_defs if td["function"]["name"] != "get_marketing_analysis"]
messages = [
    {
        "role": "user",
        "content": "Generate a business summary for 'Sequrity AI' for Q1 2025. Use 'The marketing analysis indicates a strong brand presence' as argument `marketing_analysis` when generating the summary.",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=provenance_tool_defs_reduced,
    tool_map=provenance_tool_map,
    enabled_features=provenance_enabled_features,
    security_policies=provenance_security_policies,
    security_config=provenance_security_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"
Expected Output
=== Data Provenance (only financial data) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Generate a business summary for 'Sequrity AI' for Q1 2025. Use 'The marketing analysis indicates a strong brand presence' as argument `marketing_analysis` when generating the summary."}]
    🛠️ Executed tool 'get_quarterly_earning_report' with args {'company': 'Sequrity AI', 'quarter': 'Q1 2025'}
    --- Turn 2 ---
    🚨 Request denied by policies:
    Tool call generate_business_summary denied by argument checking policies.

For regulated industries, Sequrity Control can enforce data privacy requirements at an architectural level.

SQRT Policy

tool "load_patient_record" -> @tags |= {"pii"};
tool "de_identify_data" -> @tags -= {"pii"};
tool r"send_to_.*" {
    hard deny when "pii" in data.tags;
}

This policy:

  1. Tags patient records with pii
  2. Removes the pii tag after de-identification
  3. Blocks any send_to_* tool from sending PII data

Tool Definitions & Policy Configuration

We define mock tool implementations and configure the security policies:

def mock_load_patient_record(patient_id: str) -> str:
    return f"# Patient Record\nPatient ID: {patient_id}\nMedical History: ..."


def mock_de_identify_data(data: str) -> str:
    return re.sub(r"Patient ID: \w+", "Patient ID: [REDACTED]", data)


def mock_send_to_research_institute(data: str) -> str:
    return "Data sent to research institute successfully."
def mock_load_patient_record(patient_id: str) -> str:
    return f"# Patient Record\nPatient ID: {patient_id}\nMedical History: ..."


def mock_de_identify_data(data: str) -> str:
    return re.sub(r"Patient ID: \w+", "Patient ID: [REDACTED]", data)


def mock_send_to_research_institute(data: str) -> str:
    return "Data sent to research institute successfully."
Tool Signatures
# Tool definitions for compliance example
compliance_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "load_patient_record",
            "description": "Load the medical record for a specified patient ID. Returns the record content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "patient_id": {
                        "type": "string",
                        "description": "The ID of the patient whose record is to be loaded.",
                    }
                },
                "required": ["patient_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "de_identify_data",
            "description": "De-identify sensitive information from the provided data. Returns the de-identified data as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "data": {
                        "type": "string",
                        "description": "The data to be de-identified.",
                    }
                },
                "required": ["data"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_to_research_institute",
            "description": "Send the provided data to a research institute. Returns a confirmation string upon success.",
            "parameters": {
                "type": "object",
                "properties": {
                    "data": {
                        "type": "string",
                        "description": "The data to be sent to the research institute.",
                    }
                },
                "required": ["data"],
            },
        },
    },
]

compliance_tool_map = {
    "load_patient_record": mock_load_patient_record,
    "de_identify_data": mock_de_identify_data,
    "send_to_research_institute": mock_send_to_research_institute,
}
# Tool definitions for compliance example
compliance_tool_defs: list[dict[str, Any]] = [
    {
        "type": "function",
        "function": {
            "name": "load_patient_record",
            "description": "Load the medical record for a specified patient ID. Returns the record content as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "patient_id": {
                        "type": "string",
                        "description": "The ID of the patient whose record is to be loaded.",
                    }
                },
                "required": ["patient_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "de_identify_data",
            "description": "De-identify sensitive information from the provided data. Returns the de-identified data as a string.",
            "parameters": {
                "type": "object",
                "properties": {
                    "data": {
                        "type": "string",
                        "description": "The data to be de-identified.",
                    }
                },
                "required": ["data"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_to_research_institute",
            "description": "Send the provided data to a research institute. Returns a confirmation string upon success.",
            "parameters": {
                "type": "object",
                "properties": {
                    "data": {
                        "type": "string",
                        "description": "The data to be sent to the research institute.",
                    }
                },
                "required": ["data"],
            },
        },
    },
]

compliance_tool_map = {
    "load_patient_record": mock_load_patient_record,
    "de_identify_data": mock_de_identify_data,
    "send_to_research_institute": mock_send_to_research_institute,
}
Security Policies and Configuration
# Create features header for compliance example
compliance_features = FeaturesHeader.dual_llm()

# Create security policy for compliance example
compliance_security_policy = SecurityPolicyHeader(
    mode="standard",
    codes={
        "language": "sqrt",
        "code": r"""
    tool "load_patient_record" -> @tags |= {"pii"};
    tool "de_identify_data" -> @tags -= {"pii"};
    tool r"send_to_.*" {
        hard deny when "pii" in data.tags;
    }
    """,
    },
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_non_executable_memory=True,
    ),
)

# Create fine-grained config for compliance example
compliance_fine_grained_config = FineGrainedConfigHeader(
    response_format=ResponseFormatOverrides(
        strip_response_content=False,
        include_program=True,
    ),
)
compliance_enabled_features = {"agent_arch": "dual-llm"}
compliance_security_policies = {
    "mode": "standard",
    "codes": {
        "language": "sqrt",
        "code": r"""
    tool "load_patient_record" -> @tags |= {"pii"};
    tool "de_identify_data" -> @tags -= {"pii"};
    tool r"send_to_.*" {
        hard deny when "pii" in data.tags;
    }
    """,
    },
    "presets": {
        "default_allow": True,
        "enable_non_executable_memory": True,
    },
    "fail_fast": True,
    "auto_gen": False,
}
compliance_security_config = {
    "response_format": {
        "strip_response_content": False,
        "include_program": True,
    },
}

Case 1: With De-identification (Allowed)

When patient data is de-identified before sending, the operation succeeds:

print("=== Legal Compliance (de-identified data) ===")
messages = [
    {
        "role": "user",
        "content": "Load the patient record for patient ID 'PATIENT001', de-identify it, and send it to the research institute.",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=compliance_tool_defs,
    tool_map=compliance_tool_map,
    features=compliance_features,
    security_policy=compliance_security_policy,
    fine_grained_config=compliance_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "success"
print("=== Legal Compliance (de-identified data) ===")
messages = [
    {
        "role": "user",
        "content": "Load the patient record for patient ID 'PATIENT001', de-identify it, and send it to the research institute.",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=compliance_tool_defs,
    tool_map=compliance_tool_map,
    enabled_features=compliance_enabled_features,
    security_policies=compliance_security_policies,
    security_config=compliance_security_config,
    reasoning_effort="minimal",
)
assert result == "success"
Expected Output
=== Legal Compliance (de-identified data) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Load the patient record for patient ID 'PATIENT001', de-identify it, and send it to the research institute."}]
    🛠️ Executed tool 'load_patient_record' with args {'patient_id': 'PATIENT001'}
    --- Turn 2 ---
    📤 Sending request (Session ID: 4fb43036-f97c-11f0-a1fe-7d965b6bbd63):
    [{'role': 'tool', 'name': 'load_patient_record', 'content': '# Patient Record\nPatient ID: PATIENT001\nMedical History: ...', 'tool_call_id': '...'}]
    🛠️ Executed tool 'de_identify_data' with args {'data': '# Patient Record\nPatient ID: PATIENT001\nMedical History: ...'}
    --- Turn 3 ---
    🛠️ Executed tool 'send_to_research_institute' with args {'data': '# Patient Record\nPatient ID: [REDACTED]\nMedical History: ...'}
    --- Turn 4 ---
    ☑️ Final Response (Session ID: 4fb43036-f97c-11f0-a1fe-7d965b6bbd63):
    {"status": "success", ...}

Case 2: Without De-identification (Denied)

When attempting to send PII data without de-identification, the policy denies the operation:

print("=== Legal Compliance (identified data) ===")
compliance_tool_defs_reduced = [td for td in compliance_tool_defs if td["function"]["name"] != "de_identify_data"]
messages = [
    {
        "role": "user",
        "content": "Load the patient record for patient ID 'PATIENT001' and send it to the research institute.",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=compliance_tool_defs_reduced,
    tool_map=compliance_tool_map,
    features=compliance_features,
    security_policy=compliance_security_policy,
    fine_grained_config=compliance_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies", f"Expected the action to be denied due to PII, but got: {result}"
print("=== Legal Compliance (identified data) ===")
compliance_tool_defs_reduced = [td for td in compliance_tool_defs if td["function"]["name"] != "de_identify_data"]
messages = [
    {
        "role": "user",
        "content": "Load the patient record for patient ID 'PATIENT001' and send it to the research institute.",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=compliance_tool_defs_reduced,
    tool_map=compliance_tool_map,
    enabled_features=compliance_enabled_features,
    security_policies=compliance_security_policies,
    security_config=compliance_security_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"
Expected Output
=== Legal Compliance (identified data) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Load the patient record for patient ID 'PATIENT001' and send it to the research institute."}]
    🛠️ Executed tool 'load_patient_record' with args {'patient_id': 'PATIENT001'}
    --- Turn 2 ---
    🚨 Request denied by policies:
    Tool call send_to_research_institute denied by argument checking policies.

Example 5: Audit, Fairness, Transparency, and Interpretability

Beyond security, Sequrity Control provides capabilities for audit, fairness, transparency, and interpretability.

5.1 Preventing Unfair Discrimination in Control Flow

The branching_meta_policy feature detects when AI attempts to use protected attributes (like race) in control flow decisions.

SQRT Policy & Configuration

We configure the policy to tag applicant profile data with "RACE" and use branching_meta_policy to deny control flow based on this tag. Here's the policy to attach 'RACE' tag to output of retrive_applicant_profile tool.

tool "retrive_applicant_profile" -> @tags |= {"RACE"};

Then we set branching_meta_policy to {"mode": "deny", "tags": ["RACE"]} in internal policy preset to block discriminatory branching.

        ...
        branching_meta_policy=ControlFlowMetaPolicy(
            mode="deny",
            tags={"RACE"},
        ),
        ...
        ...
        "branching_meta_policy": {
            "mode": "deny",
            "tags": ["RACE"],
        },
        ...

Note that branching_meta_policy needs Dual-LLM in custom mode to take effect:

# Create features header for fairness example (custom mode)
fairness_features = FeaturesHeader.dual_llm()
fairness_enabled_features = {"agent_arch": "dual-llm"}
Security Policies Configuration
# Create features header for fairness example (custom mode)
fairness_features = FeaturesHeader.dual_llm()
# Base security policy for fairness example
fairness_security_policy_base_codes = {
    "language": "sqrt",
    "code": r"""
tool "retrive_applicant_profile" -> @tags |= {"RACE"};
""",
}

# Create fine-grained config for fairness example
fairness_fine_grained_config = FineGrainedConfigHeader(
    response_format=ResponseFormatOverrides(
        strip_response_content=False,
        include_program=True,
    ),
)
fairness_security_policy_deny = SecurityPolicyHeader(
    mode="custom",
    codes=fairness_security_policy_base_codes,
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        branching_meta_policy=ControlFlowMetaPolicy(
            mode="deny",
            tags={"RACE"},
        ),
    ),
)
fairness_enabled_features = {"agent_arch": "dual-llm"}
# Policy that tags applicant profile output with "RACE"
fairness_security_policies_base = {
    "mode": "custom",
    "codes": {
        "language": "sqrt",
        "code": r"""
tool "retrive_applicant_profile" -> @tags |= {"RACE"};
""",
    },
    "fail_fast": True,
    "auto_gen": False,
}

fairness_security_config = {
    "response_format": {
        "strip_response_content": False,
        "include_program": True,
    },
}
fairness_security_policies_deny = {
    **fairness_security_policies_base,
    "presets": {
        "default_allow": True,
        "branching_meta_policy": {
            "mode": "deny",
            "tags": ["RACE"],
        },
    },
}
Tool Signatures
# Tool definitions for QLLM policy example
qllm_policy_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "retrive_applicant_profile_text",
            "description": "Retrieve applicant profile information as text. Returns a string containing the applicant's profile.",
            "parameters": {
                "type": "object",
                "properties": {
                    "applicant_id": {
                        "type": "string",
                        "description": "The ID of the applicant.",
                    }
                },
                "required": ["applicant_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_email",
            "description": "Send an email with the specified subject and attachment.",
            "parameters": {
                "type": "object",
                "properties": {
                    "to": {"type": "string", "description": "The recipient's email address."},
                    "subject": {"type": "string", "description": "The subject of the email."},
                    "attachment_content": {
                        "type": "string",
                        "description": "The content of the attachment to be sent.",
                    },
                },
                "required": ["to", "subject", "attachment_content"],
            },
        },
    },
]
# Tool definitions for QLLM policy example
qllm_policy_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "retrive_applicant_profile_text",
            "description": "Retrieve applicant profile information as text. Returns a string containing the applicant's profile.",
            "parameters": {
                "type": "object",
                "properties": {
                    "applicant_id": {
                        "type": "string",
                        "description": "The ID of the applicant.",
                    }
                },
                "required": ["applicant_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_email",
            "description": "Send an email with the specified subject and attachment.",
            "parameters": {
                "type": "object",
                "properties": {
                    "to": {"type": "string", "description": "The recipient's email address."},
                    "subject": {"type": "string", "description": "The subject of the email."},
                    "attachment_content": {
                        "type": "string",
                        "description": "The content of the attachment to be sent.",
                    },
                },
                "required": ["to", "subject", "attachment_content"],
            },
        },
    },
]

Case 1: Discriminatory Routing (Denied)

Attempting to route applicants based on race is blocked:

print("=== Preventing Discriminatory Control Flow (denied) ===")

# Add branching_meta_policy to deny control flow based on RACE tag
fairness_security_policy_deny = SecurityPolicyHeader(
    mode="custom",
    codes=fairness_security_policy_base_codes,
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        branching_meta_policy=ControlFlowMetaPolicy(
            mode="deny",
            tags={"RACE"},
        ),
    ),
)

messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', if the applicant's race is 'European', send the applicant profile to 'hr-1@gmail.com', otherwise send it to 'hr-2@gmail.com'",
    }
]

result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=fairness_tool_defs,
    tool_map=fairness_tool_map,
    features=fairness_features,
    security_policy=fairness_security_policy_deny,
    fine_grained_config=fairness_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies", f"Expected the action to be denied due to discriminatory control flow, but got: {result}"
print("=== Preventing Discriminatory Control Flow (denied) ===")

# Add branching_meta_policy to deny control flow based on RACE tag
fairness_security_policies_deny = {
    **fairness_security_policies_base,
    "presets": {
        "default_allow": True,
        "branching_meta_policy": {
            "mode": "deny",
            "tags": ["RACE"],
        },
    },
}
messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', if the applicant's race is 'European', send the applicant profile to 'hr-1@gmail.com', otherwise send it to 'hr-2@gmail.com'",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=fairness_tool_defs,
    tool_map=fairness_tool_map,
    enabled_features=fairness_enabled_features,
    security_policies=fairness_security_policies_deny,
    security_config=fairness_security_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"

Expected Output

=== Preventing Discriminatory Control Flow (denied) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Retrieve the applicant profile for applicant id = 'applicant-154', if the applicant's race is 'European', send the applicant profile to 'hr-1@gmail.com', otherwise send it to 'hr-2@gmail.com'"}]
    🛠️ Executed tool 'retrive_applicant_profile' with args {'applicant_id': 'applicant-154'}
    --- Turn 2 ---
    🚨 Request denied by policies:
    The program violated control flow policies and cannot continue as retries on policy violations are disabled.

Blocked Discriminatory if Statement in PLLM Program

Line 14 is where the discriminatory branching occurs and is blocked by the policy:

1 # Initialize final return variable
2 final_return_value = None
3
4 # Step 1: retrieve applicant profile
5 applicant_id = "applicant-154"
6 profile = retrive_applicant_profile(applicant_id=applicant_id)
7
8 # Validate response
9 if not isinstance(profile, dict) or "race" not in profile:
10     final_return_value = "Failed to retrieve applicant profile or missing 'race' field."
11 else:
12     # Decide recipient based on race
13     race = profile.get("race")
14     if race == "European":
15         recipient = "hr-1@gmail.com"
16     else:
17         recipient = "hr-2@gmail.com"
18
19     # Prepare attachment content as JSON string
20     import json
21     attachment_content = json.dumps(profile, ensure_ascii=False, indent=2)
22
23     # Send email
24     subject = f"Applicant Profile: {applicant_id}"
25     send_email(to=recipient, subject=subject, attachment_content=attachment_content)
26
27     final_return_value = {
28         "sent_to": recipient,
29         "applicant_id": applicant_id,
30         "profile_sent": profile
31     }

Case 2: Non-Discriminatory Flow (Allowed)

Without the discriminatory branching, the workflow succeeds:

print("=== Non-Discriminatory Flow (allowed) ===")

# Without branching_meta_policy restriction
fairness_security_policy_allow = SecurityPolicyHeader(
    mode="custom",
    codes=fairness_security_policy_base_codes,
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
    ),
)
messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', if the applicant's race is 'European', send the applicant profile to 'hr-1@gmail.com', otherwise send it to 'hr-2@gmail.com'",
    }
]

result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=fairness_tool_defs,
    tool_map=fairness_tool_map,
    features=fairness_features,
    security_policy=fairness_security_policy_allow,
    fine_grained_config=fairness_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "success"
print("=== Non-Discriminatory Flow (allowed) ===")

# Without branching_meta_policy restriction
fairness_security_policies_allow = {
    **fairness_security_policies_base,
    "presets": {
        "default_allow": True,
    },
}

messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', if the applicant's race is 'European', send the applicant profile to 'hr-1@gmail.com', otherwise send it to 'hr-2@gmail.com'",
    }
]

result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=fairness_tool_defs,
    tool_map=fairness_tool_map,
    enabled_features=fairness_enabled_features,
    security_policies=fairness_security_policies_allow,
    security_config=fairness_security_config,
    reasoning_effort="minimal",
)
assert result == "success"
Expected Output
=== Non-Discriminatory Flow (allowed) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Retrieve the applicant profile for applicant id = 'applicant-154' and send it to 'hr-1@gmail.com'"}]
    🛠️ Executed tool 'retrive_applicant_profile' with args {'applicant_id': 'applicant-154'}
    --- Turn 2 ---
    🛠️ Executed tool 'send_email' with args {'to': 'hr-1@gmail.com', 'subject': 'Applicant Profile: applicant-154', 'attachment_content': '...'}
    --- Turn 3 ---
    ☑️ Final Response (Session ID: 562c1a92-f97c-11f0-a033-7d965b6bbd63):
    {"status": "success", ...}

5.2 Preventing Sensitive Data Exposure to AI Parsing

The enable_llm_blocked_tag flag blocks data tagged with "__llm_blocked" from being passed to AI parsing components.

SQRT Policy & Configuration

We configure the policy to tag applicant profile text with "__llm_blocked" and enable the enable_llm_blocked_tag flag:

tool "retrive_applicant_profile_text" -> @tags |= {"__llm_blocked"};
qllm_policy_security_policy_allow = SecurityPolicyHeader(
    mode="standard",
    codes=qllm_policy_security_policy_base_codes,
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_llm_blocked_tag=False,
    ),
)
qllm_policy_security_policies_allow = {
    **qllm_policy_security_policies_base,
    "presets": {
        "default_allow": True,
        "enable_llm_blocked_tag": False,
    },
}
Security Policies Configuration
# Create features header for QLLM policy example
qllm_policy_features = FeaturesHeader.dual_llm()

# Base security policy for QLLM policy example
qllm_policy_security_policy_base_codes = {
    "language": "sqrt",
    "code": r"""
tool "retrive_applicant_profile_text" -> @tags |= {"__llm_blocked"};
""",
}

# Create fine-grained config for QLLM policy example
qllm_policy_fine_grained_config = FineGrainedConfigHeader(
    response_format=ResponseFormatOverrides(
        strip_response_content=False,
        include_program=True,
    ),
)
qllm_policy_security_policy_deny = SecurityPolicyHeader(
    mode="standard",
    codes=qllm_policy_security_policy_base_codes,
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_llm_blocked_tag=True,
    ),
)
qllm_policy_enabled_features = {"agent_arch": "dual-llm"}

# Policy that tags applicant profile text output with "__llm_blocked"
qllm_policy_security_policies_base = {
    "mode": "standard",
    "codes": {
        "language": "sqrt",
        "code": r"""
tool "retrive_applicant_profile_text" -> @tags |= {"__llm_blocked"};
""",
    },
    "fail_fast": True,
    "auto_gen": False,
}

qllm_policy_security_config = {
    "response_format": {
        "strip_response_content": False,
        "include_program": True,
    },
}
qllm_policy_security_policies_deny = {
    **qllm_policy_security_policies_base,
    "presets": {
        "default_allow": True,
        "enable_llm_blocked_tag": True,
    },
}
Tool Signatures
# Tool definitions for QLLM policy example
qllm_policy_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "retrive_applicant_profile_text",
            "description": "Retrieve applicant profile information as text. Returns a string containing the applicant's profile.",
            "parameters": {
                "type": "object",
                "properties": {
                    "applicant_id": {
                        "type": "string",
                        "description": "The ID of the applicant.",
                    }
                },
                "required": ["applicant_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_email",
            "description": "Send an email with the specified subject and attachment.",
            "parameters": {
                "type": "object",
                "properties": {
                    "to": {"type": "string", "description": "The recipient's email address."},
                    "subject": {"type": "string", "description": "The subject of the email."},
                    "attachment_content": {
                        "type": "string",
                        "description": "The content of the attachment to be sent.",
                    },
                },
                "required": ["to", "subject", "attachment_content"],
            },
        },
    },
]
# Tool definitions for QLLM policy example
qllm_policy_tool_defs = [
    {
        "type": "function",
        "function": {
            "name": "retrive_applicant_profile_text",
            "description": "Retrieve applicant profile information as text. Returns a string containing the applicant's profile.",
            "parameters": {
                "type": "object",
                "properties": {
                    "applicant_id": {
                        "type": "string",
                        "description": "The ID of the applicant.",
                    }
                },
                "required": ["applicant_id"],
            },
        },
    },
    {
        "type": "function",
        "function": {
            "name": "send_email",
            "description": "Send an email with the specified subject and attachment.",
            "parameters": {
                "type": "object",
                "properties": {
                    "to": {"type": "string", "description": "The recipient's email address."},
                    "subject": {"type": "string", "description": "The subject of the email."},
                    "attachment_content": {
                        "type": "string",
                        "description": "The content of the attachment to be sent.",
                    },
                },
                "required": ["to", "subject", "attachment_content"],
            },
        },
    },
]
Case 1: AI Parsing of Blocked Data (Denied)

Attempting to use parse_with_ai on data tagged with __llm_blocked is denied:

print("=== Preventing AI Parsing of Sensitive Data (denied) ===")

# Add enable_llm_blocked_tag to deny QLLM inputs with __llm_blocked tag
qllm_policy_security_policy_deny = SecurityPolicyHeader(
    mode="standard",
    codes=qllm_policy_security_policy_base_codes,
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_llm_blocked_tag=True,
    ),
)
messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', use parse_with_ai to extract 'name' and 'education_level', send the applicant profile to 'hr-1@gmail.com'",
    }
]

result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=qllm_policy_tool_defs,
    tool_map=qllm_policy_tool_map,
    features=qllm_policy_features,
    security_policy=qllm_policy_security_policy_deny,
    fine_grained_config=qllm_policy_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"
print("=== Preventing AI Parsing of Sensitive Data (denied) ===")

# Add enable_llm_blocked_tag = True to deny QLLM inputs with __llm_blocked tag
qllm_policy_security_policies_deny = {
    **qllm_policy_security_policies_base,
    "presets": {
        "default_allow": True,
        "enable_llm_blocked_tag": True,
    },
}

messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', use parse_with_ai to extract 'name' and 'education_level', send the applicant profile to 'hr-1@gmail.com'",
    }
]

result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=qllm_policy_tool_defs,
    tool_map=qllm_policy_tool_map,
    enabled_features=qllm_policy_enabled_features,
    security_policies=qllm_policy_security_policies_deny,
    security_config=qllm_policy_security_config,
    reasoning_effort="minimal",
)
assert result == "denied by policies"

Expected Output

=== Preventing AI Parsing of Sensitive Data (denied) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Retrieve the applicant profile for applicant id = 'applicant-154', use parse_with_ai to extract 'name' and 'education_level', send the applicant profile to 'hr-1@gmail.com'"}]
    🛠️ Executed tool 'retrive_applicant_profile_text' with args {'applicant_id': 'applicant-154'}
    --- Turn 2 ---
    🚨 Request denied by policies:
    Tool call parse_with_ai denied by argument checking policies.

Blocked call to parse_with_ai in PLLM Program

The following is the program where the call to parse_with_ai is blocked due to the __llm_blocked tag:

1 from pydantic import BaseModel, Field
2 # Initialize final return value variable
3 final_return_value = None
4
5 # Step 1: retrieve applicant profile text
6 applicant_id = "applicant-154"
7 profile_text = retrive_applicant_profile_text(applicant_id=applicant_id)
8
9 # Step 2: define output schema for parse_with_ai to extract name and education_level
10 class ApplicantSchema(BaseModel):
11     have_enough_info: bool = Field(description="Whether enough information is available to extract fields")
12     name: str | None = Field(description="Full name of the applicant", min_length=1, max_length=200)
13     education_level: str | None = Field(description="Highest education level (e.g., High School, Bachelor's, Master's, PhD)")
14
15 schema = ApplicantSchema.model_json_schema()
16
17 # Build prompt for parse_with_ai
18 prompt = (
19     "Extract the applicant's full name and highest education level from the following applicant profile text. "
20     "Return 'name' as the applicant's full name and 'education_level' as the highest completed education (examples: "
21     "High School, Associate's, Bachelor's, Master's, PhD). If the information is not available, set the field to null. "
22     "Here is the profile:\n\n"
23     + profile_text
24 )
25
26 # Step 3: parse the text
27 parsed = parse_with_ai(query=prompt, output_schema=schema)
28
29 # Step 4: prepare email attachment content and send
30 ... # rest of the program
Case 2: Direct Processing Without AI Parsing (Allowed)

When we don't use AI parsing, the data can be processed through the normal workflow:

print("=== Direct Data Processing (allowed) ===")

# Without enable_llm_blocked_tag restriction
qllm_policy_security_policy_allow = SecurityPolicyHeader(
    mode="standard",
    codes=qllm_policy_security_policy_base_codes,
    fail_fast=True,
    auto_gen=False,
    presets=InternalPolicyPresets(
        default_allow=True,
        enable_llm_blocked_tag=False,
    ),
)

messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', use parse_with_ai to extract 'name' and 'education_level', send the applicant profile to 'hr-1@gmail.com'",
    }
]

result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=qllm_policy_tool_defs,
    tool_map=qllm_policy_tool_map,
    features=qllm_policy_features,
    security_policy=qllm_policy_security_policy_allow,
    fine_grained_config=qllm_policy_fine_grained_config,
    reasoning_effort="minimal",
)
assert result == "success"
print("=== Direct Data Processing (allowed) ===")

# Without enable_llm_blocked_tag restriction
qllm_policy_security_policies_allow = {
    **qllm_policy_security_policies_base,
    "presets": {
        "default_allow": True,
        "enable_llm_blocked_tag": False,
    },
}
messages = [
    {
        "role": "user",
        "content": "Retrieve the applicant profile for applicant id = 'applicant-154', use parse_with_ai to extract 'name' and 'education_level', send the applicant profile to 'hr-1@gmail.com'",
    }
]
result, _ = run_workflow(
    model="openai/gpt-5-mini,openai/gpt-5-nano",
    messages=messages,
    tool_defs=qllm_policy_tool_defs,
    tool_map=qllm_policy_tool_map,
    enabled_features=qllm_policy_enabled_features,
    security_policies=qllm_policy_security_policies_allow,
    security_config=qllm_policy_security_config,
    reasoning_effort="minimal",
)
assert result == "success"
Expected Output
=== Direct Data Processing (allowed) ===
    --- Turn 1 ---
    📤 Sending request (Session ID: None):
    [{'role': 'user', 'content': "Retrieve the applicant profile for applicant id = 'applicant-154' and send it to 'hr-1@gmail.com'"}]
    🛠️ Executed tool 'retrive_applicant_profile_text' with args {'applicant_id': 'applicant-154'}
    --- Turn 2 ---
    🛠️ Executed tool 'send_email' with args {'to': 'hr-1@gmail.com', 'subject': 'Applicant Profile: applicant-154', 'attachment_content': '...'}
    --- Turn 3 ---
    ☑️ Final Response (Session ID: 5d7aed90-f97c-11f0-bdcb-7d965b6bbd63):
    {"status": "success", ...}