# Code Repository Analysis
Generated on 2025-04-04 17:15:31.331912

## Repository Summary

- **Extension analyzed**: `.py`
- **Number of files**: 142
- **Root folder**: `.`
- **Total lines of code**: 22908

## Project Structure

```
└── ./
    ├── docs/
    │   └── create_doc_boilerplate.py
    ├── examples/
    │   ├── 01_introduction/
    │   │   ├── 01_simple_example.py
    │   │   ├── 02_typed_output.py
    │   │   ├── 03_tool_and_code_agent.py
    │   │   ├── 04_descriptions.py
    │   │   ├── 05_typed_output2.py
    │   │   ├── 06_simple_hand_off.py
    │   │   └── 07_all_base_concepts.py
    │   ├── 02_concepts/
    │   │   ├── api/
    │   │   │   ├── api_client.py
    │   │   │   └── api_server.py
    │   │   ├── context/
    │   │   │   └── context.py
    │   │   ├── evaluator/
    │   │   │   ├── multi_hops_memory.py
    │   │   │   └── multi_hops_zep.py
    │   │   ├── modules/
    │   │   │   └── use_modules.py
    │   │   ├── router/
    │   │   │   └── router_example.py
    │   │   └── tools/
    │   │       └── azure_tools_example.py
    │   ├── 03_apps/
    │   │   ├── dynamic_apps/
    │   │   │   └── main.py
    │   │   ├── gemini_dev/
    │   │   │   └── main.py
    │   │   ├── roguelike/
    │   │   │   └── game.py
    │   │   └── story_gen/
    │   │       ├── cursor.py
    │   │       └── main.py
    │   ├── 05_documentation/
    │   │   └── getting-started/
    │   │       ├── first_agent_01.py
    │   │       ├── first_agent_02.py
    │   │       └── first_agent_03.py
    │   └── playground/
    │       ├── 02_cook_book/
    │       │   ├── repo_analyzer/
    │       │   │   ├── repo_analyzer.py
    │       │   │   └── repo_analyzer_llm.py
    │       │   ├── save_and_load/
    │       │   │   ├── load_01.py
    │       │   │   ├── load_02.py
    │       │   │   ├── load_03.py
    │       │   │   ├── load_04.py
    │       │   │   ├── save_01.py
    │       │   │   ├── save_02.py
    │       │   │   └── save_03.py
    │       │   ├── flock_without_llms.py
    │       │   ├── long_research_no_handoff.py
    │       │   ├── painting_by_numbers.py
    │       │   ├── project_manager.py
    │       │   └── self_improvement_with_memory.py
    │       ├── hier/
    │       │   ├── her_vis.py
    │       │   └── hier_mem.py
    │       ├── misc/
    │       │   ├── memory.py
    │       │   ├── self_learner.py
    │       │   └── self_learner2.py
    │       └── website/
    │           └── app.py
    ├── scripts/
    │   └── code_collector.py
    ├── src/
    │   └── flock/
    │       ├── cli/
    │       │   ├── constants.py
    │       │   ├── create_agent.py
    │       │   ├── create_flock.py
    │       │   ├── execute_flock.py
    │       │   ├── load_agent.py
    │       │   ├── load_examples.py
    │       │   ├── load_flock.py
    │       │   ├── load_release_notes.py
    │       │   ├── loaded_flock_cli.py
    │       │   ├── manage_agents.py
    │       │   ├── registry_management.py
    │       │   ├── settings.py
    │       │   ├── view_results.py
    │       │   └── yaml_editor.py
    │       ├── core/
    │       │   ├── api/
    │       │   │   ├── ui/
    │       │   │   │   ├── __init__.py
    │       │   │   │   ├── routes.py
    │       │   │   │   └── utils.py
    │       │   │   ├── __init__.py
    │       │   │   ├── endpoints.py
    │       │   │   ├── main.py
    │       │   │   ├── models.py
    │       │   │   └── run_store.py
    │       │   ├── context/
    │       │   │   ├── context.py
    │       │   │   ├── context_manager.py
    │       │   │   └── context_vars.py
    │       │   ├── execution/
    │       │   │   ├── local_executor.py
    │       │   │   └── temporal_executor.py
    │       │   ├── interpreter/
    │       │   │   └── python_interpreter.py
    │       │   ├── logging/
    │       │   │   ├── formatters/
    │       │   │   │   ├── enum_builder.py
    │       │   │   │   ├── theme_builder.py
    │       │   │   │   ├── themed_formatter.py
    │       │   │   │   └── themes.py
    │       │   │   ├── span_middleware/
    │       │   │   │   └── baggage_span_processor.py
    │       │   │   ├── telemetry_exporter/
    │       │   │   │   ├── base_exporter.py
    │       │   │   │   ├── file_exporter.py
    │       │   │   │   └── sqlite_exporter.py
    │       │   │   ├── __init__.py
    │       │   │   ├── logging.py
    │       │   │   ├── telemetry.py
    │       │   │   └── trace_and_logged.py
    │       │   ├── mixin/
    │       │   │   ├── dspy_integration.py
    │       │   │   └── prompt_parser.py
    │       │   ├── serialization/
    │       │   │   ├── __init__.py
    │       │   │   ├── callable_registry.py
    │       │   │   ├── json_encoder.py
    │       │   │   ├── secure_serializer.py
    │       │   │   ├── serializable.py
    │       │   │   └── serialization_utils.py
    │       │   ├── tools/
    │       │   │   ├── dev_tools/
    │       │   │   │   └── github.py
    │       │   │   ├── azure_tools.py
    │       │   │   ├── basic_tools.py
    │       │   │   ├── llm_tools.py
    │       │   │   └── markdown_tools.py
    │       │   ├── util/
    │       │   │   ├── cli_helper.py
    │       │   │   ├── hydrator.py
    │       │   │   └── input_resolver.py
    │       │   ├── __init__.py
    │       │   ├── flock.py
    │       │   ├── flock_agent.py
    │       │   ├── flock_evaluator.py
    │       │   ├── flock_factory.py
    │       │   ├── flock_module.py
    │       │   ├── flock_registry.py
    │       │   └── flock_router.py
    │       ├── evaluators/
    │       │   ├── declarative/
    │       │   │   └── declarative_evaluator.py
    │       │   ├── memory/
    │       │   │   ├── azure_search_evaluator.py
    │       │   │   └── memory_evaluator.py
    │       │   ├── natural_language/
    │       │   │   └── natural_language_evaluator.py
    │       │   └── zep/
    │       │       └── zep_evaluator.py
    │       ├── modules/
    │       │   ├── azure-search/
    │       │   │   └── azure_search_module.py
    │       │   ├── callback/
    │       │   │   └── callback_module.py
    │       │   ├── memory/
    │       │   │   ├── memory_module.py
    │       │   │   ├── memory_parser.py
    │       │   │   └── memory_storage.py
    │       │   ├── output/
    │       │   │   └── output_module.py
    │       │   ├── performance/
    │       │   │   └── metrics_module.py
    │       │   └── zep/
    │       │       └── zep_module.py
    │       ├── platform/
    │       │   ├── docker_tools.py
    │       │   └── jaeger_install.py
    │       ├── routers/
    │       │   ├── agent/
    │       │   │   ├── __init__.py
    │       │   │   ├── agent_router.py
    │       │   │   └── handoff_agent.py
    │       │   ├── default/
    │       │   │   ├── __init__.py
    │       │   │   └── default_router.py
    │       │   ├── llm/
    │       │   │   ├── __init__.py
    │       │   │   └── llm_router.py
    │       │   └── __init__.py
    │       ├── workflow/
    │       │   ├── __init__.py
    │       │   ├── activities.py
    │       │   ├── agent_activities.py
    │       │   ├── temporal_setup.py
    │       │   └── workflow.py
    │       ├── __init__.py
    │       └── config.py
    └── tests/
        ├── serialization/
        │   ├── __init__.py
        │   └── test_yaml_serialization.py
        └── __init__.py
```

## Key Files

These files appear to be central to the codebase based on dependencies and naming conventions:

### src\flock\core\flock.py

- **Lines**: 602
- **Last modified**: 2025-04-04 17:11:47
- **Used by**: 104 files

**Description**: High-level orchestrator for creating and executing agents.

**Classes**:
- `Flock`: 12 methods

**Content**:
```py
# src/flock/core/flock.py
"""High-level orchestrator for creating and executing agents."""

from __future__ import annotations  # Ensure forward references work

import asyncio
import os
import uuid
from pathlib import Path
from typing import TYPE_CHECKING, Any, TypeVar

from box import Box
from opentelemetry import trace
from opentelemetry.baggage import get_baggage, set_baggage

# Pydantic and OpenTelemetry
from pydantic import BaseModel, Field  # Using Pydantic directly now

# Flock core components & utilities
from flock.config import TELEMETRY
from flock.core.context.context import FlockContext
from flock.core.context.context_manager import initialize_context
from flock.core.execution.local_executor import run_local_workflow
from flock.core.execution.temporal_executor import run_temporal_workflow
from flock.core.logging.logging import LOGGERS, get_logger, get_module_loggers

# Import FlockAgent using TYPE_CHECKING to avoid circular import at runtime
if TYPE_CHECKING:
    from flock.core.flock_agent import FlockAgent
else:
    # Provide a forward reference string or Any for runtime if FlockAgent is used in hints here
    FlockAgent = "FlockAgent"  # Forward reference string for Pydantic/runtime

# Registry and Serialization
from flock.core.flock_registry import (
    get_registry,  # Use the unified registry
)
from flock.core.serialization.serializable import (
    Serializable,  # Import Serializable base
)

# NOTE: Flock.to_dict/from_dict primarily orchestrates agent serialization.
# It doesn't usually need serialize_item/deserialize_item directly,
# relying on FlockAgent's implementation instead.
# from flock.core.serialization.serialization_utils import serialize_item, deserialize_item
# CLI Helper (if still used directly, otherwise can be removed)
from flock.core.util.cli_helper import init_console

# Cloudpickle for fallback/direct serialization if needed
try:
    import cloudpickle

    PICKLE_AVAILABLE = True
except ImportError:
    PICKLE_AVAILABLE = False


logger = get_logger("flock")
TELEMETRY.setup_tracing()  # Setup OpenTelemetry
tracer = trace.get_tracer(__name__)
FlockRegistry = get_registry()  # Get the registry instance

# Define TypeVar for generic methods like from_dict
T = TypeVar("T", bound="Flock")


# Inherit from Serializable for YAML/JSON/etc. methods
# Use BaseModel directly for Pydantic features
class Flock(BaseModel, Serializable):
    """High-level orchestrator for creating and executing agent systems.

    Flock manages agent definitions, context, and execution flow, supporting
    both local debugging and robust distributed execution via Temporal.
    It is serializable to various formats like YAML and JSON.
    """

    name: str | None = Field(
        default_factory=lambda: f"flock_{uuid.uuid4().hex[:8]}",
        description="A unique identifier for this Flock instance.",
    )
    model: str | None = Field(
        default="openai/gpt-4o",
        description="Default model identifier to be used for agents if not specified otherwise.",
    )
    description: str | None = Field(
        default=None,
        description="A brief description of the purpose of this Flock configuration.",
    )
    enable_temporal: bool = Field(
        default=False,
        description="If True, execute workflows via Temporal; otherwise, run locally.",
    )
    # --- Runtime Attributes (Excluded from Serialization) ---
    # Store agents internally but don't make it part of the Pydantic model definition
    # Use a regular attribute, initialized in __init__
    # Pydantic V2 handles __init__ and attributes not in Field correctly
    _agents: dict[str, FlockAgent]
    _start_agent_name: str | None
    _start_input: dict

    # Pydantic v2 model config
    model_config = {
        "arbitrary_types_allowed": True,
        "ignored_types": (
            type(FlockRegistry),
        ),  # Prevent validation issues with registry
        # No need to exclude fields here, handled in to_dict
    }

    def __init__(
        self,
        name: str | None = None,
        model: str | None = "openai/gpt-4o",
        description: str | None = None,
        show_flock_banner: bool = True,
        enable_temporal: bool = False,
        enable_logging: bool
        | list[str] = False,  # Keep logging control at init
        agents: list[FlockAgent] | None = None,  # Allow passing agents at init
        **kwargs,  # Allow extra fields during init if needed, Pydantic handles it
    ):
        """Initialize the Flock orchestrator."""
        # Initialize Pydantic fields
        super().__init__(
            name=name,
            model=model,
            description=description,
            enable_temporal=enable_temporal,
            **kwargs,  # Pass extra kwargs to Pydantic BaseModel
        )

        # Initialize runtime attributes AFTER super().__init__()
        self._agents = {}
        self._start_agent_name = None
        self._start_input = {}

        # Set up logging
        self._configure_logging(enable_logging)

        # Register passed agents
        if agents:
            # Ensure FlockAgent type is available for isinstance check
            # This import might need to be deferred or handled carefully if it causes issues
            from flock.core.flock_agent import FlockAgent as ConcreteFlockAgent

            for agent in agents:
                if isinstance(agent, ConcreteFlockAgent):
                    self.add_agent(agent)
                else:
                    logger.warning(
                        f"Item provided in 'agents' list is not a FlockAgent: {type(agent)}"
                    )

        # Initialize console if needed
        if show_flock_banner:
            init_console()

        # Set Temporal debug environment variable
        self._set_temporal_debug_flag()

        # Ensure session ID exists in baggage
        self._ensure_session_id()

        logger.info(
            "Flock instance initialized",
            model=self.model,
            enable_temporal=self.enable_temporal,
        )

    # --- Keep _configure_logging, _set_temporal_debug_flag, _ensure_session_id ---
    # ... (implementation as before) ...
    def _configure_logging(self, enable_logging: bool | list[str]):
        """Configure logging levels based on the enable_logging flag."""
        # logger.debug(f"Configuring logging, enable_logging={enable_logging}")
        is_enabled_globally = False
        enabled_loggers = []

        if isinstance(enable_logging, bool):
            is_enabled_globally = enable_logging
        elif isinstance(enable_logging, list):
            is_enabled_globally = bool(
                enable_logging
            )  # Enable if list is not empty
            enabled_loggers = enable_logging

        # Configure core loggers
        for log_name in LOGGERS:
            log_instance = get_logger(log_name)
            if is_enabled_globally or log_name in enabled_loggers:
                log_instance.enable_logging = True
            else:
                log_instance.enable_logging = False

        # Configure module loggers (existing ones)
        module_loggers = get_module_loggers()
        for mod_log in module_loggers:
            if is_enabled_globally or mod_log.name in enabled_loggers:
                mod_log.enable_logging = True
            else:
                mod_log.enable_logging = False

    def _set_temporal_debug_flag(self):
        """Set or remove LOCAL_DEBUG env var based on enable_temporal."""
        if not self.enable_temporal:
            if "LOCAL_DEBUG" not in os.environ:
                os.environ["LOCAL_DEBUG"] = "1"
                logger.debug(
                    "Set LOCAL_DEBUG environment variable for local execution."
                )
        elif "LOCAL_DEBUG" in os.environ:
            del os.environ["LOCAL_DEBUG"]
            logger.debug(
                "Removed LOCAL_DEBUG environment variable for Temporal execution."
            )

    def _ensure_session_id(self):
        """Ensure a session_id exists in the OpenTelemetry baggage."""
        session_id = get_baggage("session_id")
        if not session_id:
            session_id = str(uuid.uuid4())
            set_baggage("session_id", session_id)
            logger.debug(f"Generated new session_id: {session_id}")

    # --- Keep add_agent, agents property, run, run_async ---
    # ... (implementation as before, ensuring FlockAgent type hint is handled) ...
    def add_agent(self, agent: FlockAgent) -> FlockAgent:
        """Adds an agent instance to this Flock configuration."""
        # Ensure FlockAgent type is available for isinstance check
        from flock.core.flock_agent import FlockAgent as ConcreteFlockAgent

        if not isinstance(agent, ConcreteFlockAgent):
            raise TypeError("Provided object is not a FlockAgent instance.")
        if not agent.name:
            raise ValueError("Agent must have a name.")

        if agent.name in self._agents:
            logger.warning(
                f"Agent '{agent.name}' already exists in this Flock instance. Overwriting."
            )
        self._agents[agent.name] = agent
        FlockRegistry.register_agent(agent)  # Also register globally

        # Set default model if agent doesn't have one
        if agent.model is None:
            # agent.set_model(self.model) # Use Flock's default model
            if self.model:  # Ensure Flock has a model defined
                agent.set_model(self.model)
                logger.debug(
                    f"Agent '{agent.name}' using Flock default model: {self.model}"
                )
            else:
                logger.warning(
                    f"Agent '{agent.name}' has no model and Flock default model is not set."
                )

        logger.info(f"Agent '{agent.name}' added to Flock.")
        return agent

    @property
    def agents(self) -> dict[str, FlockAgent]:
        """Returns the dictionary of agents managed by this Flock instance."""
        return self._agents

    def run(
        self,
        start_agent: FlockAgent | str | None = None,
        input: dict = {},
        context: FlockContext
        | None = None,  # Allow passing initial context state
        run_id: str = "",
        box_result: bool = True,  # Changed default to False for raw dict
        agents: list[FlockAgent] | None = None,  # Allow adding agents via run
    ) -> Box:
        """Entry point for running an agent system synchronously."""
        return asyncio.run(
            self.run_async(
                start_agent=start_agent,
                input=input,
                context=context,
                run_id=run_id,
                box_result=box_result,
                agents=agents,
            )
        )

    async def run_async(
        self,
        start_agent: FlockAgent | str | None = None,
        input: dict | None = None,
        context: FlockContext | None = None,
        run_id: str = "",
        box_result: bool = True,  # Changed default
        agents: list[FlockAgent] | None = None,  # Allow adding agents via run
    ) -> Box:
        """Entry point for running an agent system asynchronously."""
        # This import needs to be here or handled carefully due to potential cycles
        from flock.core.flock_agent import FlockAgent as ConcreteFlockAgent

        with tracer.start_as_current_span("flock.run_async") as span:
            # Add passed agents first
            if agents:
                for agent_obj in agents:
                    if isinstance(agent_obj, ConcreteFlockAgent):
                        self.add_agent(
                            agent_obj
                        )  # Adds to self._agents and registry
                    else:
                        logger.warning(
                            f"Item in 'agents' list is not a FlockAgent: {type(agent_obj)}"
                        )

            # Determine starting agent name
            start_agent_name: str | None = None
            if isinstance(start_agent, ConcreteFlockAgent):
                start_agent_name = start_agent.name
                if start_agent_name not in self._agents:
                    self.add_agent(
                        start_agent
                    )  # Add if instance was passed but not added
            elif isinstance(start_agent, str):
                start_agent_name = start_agent
            else:
                start_agent_name = (
                    self._start_agent_name
                )  # Use pre-configured if any

            # Default to first agent if only one exists and none specified
            if not start_agent_name and len(self._agents) == 1:
                start_agent_name = list(self._agents.keys())[0]
            elif not start_agent_name:
                raise ValueError(
                    "No start_agent specified and multiple agents exist or none are added."
                )

            # Get starting input
            run_input = input if input is not None else self._start_input

            # Log and trace start info
            span.set_attribute("start_agent", start_agent_name)
            span.set_attribute("input", str(run_input))
            span.set_attribute("run_id", run_id)
            span.set_attribute("enable_temporal", self.enable_temporal)
            logger.info(
                f"Initiating Flock run. Start Agent: '{start_agent_name}'. Temporal: {self.enable_temporal}."
            )

            try:
                # Resolve start agent instance from internal dict
                resolved_start_agent = self._agents.get(start_agent_name)
                if not resolved_start_agent:
                    # Maybe it's only in the global registry? (Less common)
                    resolved_start_agent = FlockRegistry.get_agent(
                        start_agent_name
                    )
                    if not resolved_start_agent:
                        raise ValueError(
                            f"Start agent '{start_agent_name}' not found in Flock instance or registry."
                        )
                    else:
                        # If found globally, add it to this instance for consistency during run
                        self.add_agent(resolved_start_agent)

                # Create or use provided context
                run_context = context if context else FlockContext()
                if not run_id:
                    run_id = f"flockrun_{uuid.uuid4().hex[:8]}"
                set_baggage("run_id", run_id)  # Ensure run_id is in baggage

                # Initialize context
                initialize_context(
                    run_context,
                    start_agent_name,
                    run_input,
                    run_id,
                    not self.enable_temporal,
                    self.model
                    or resolved_start_agent.model
                    or "default-model-missing",  # Pass effective model
                )

                # Execute workflow
                logger.info(
                    "Starting agent execution",
                    agent=start_agent_name,
                    enable_temporal=self.enable_temporal,
                )

                if not self.enable_temporal:
                    result = await run_local_workflow(
                        run_context, box_result=False
                    )  # Get raw dict
                else:
                    result = await run_temporal_workflow(
                        run_context, box_result=False
                    )  # Get raw dict

                span.set_attribute("result.type", str(type(result)))
                # Avoid overly large results in trace attributes
                result_str = str(result)
                if len(result_str) > 1000:
                    result_str = result_str[:1000] + "... (truncated)"
                span.set_attribute("result.preview", result_str)

                # Optionally box result before returning
                if box_result:
                    try:
                        from box import Box

                        logger.debug("Boxing final result.")
                        return Box(result)
                    except ImportError:
                        logger.warning(
                            "Box library not installed, returning raw dict. Install with 'pip install python-box'"
                        )
                        return result
                else:
                    return result

            except Exception as e:
                logger.error(f"Flock run failed: {e}", exc_info=True)
                span.record_exception(e)
                span.set_status(trace.Status(trace.StatusCode.ERROR, str(e)))
                # Depending on desired behavior, either raise or return an error dict
                # raise # Option 1: Let the exception propagate
                return {
                    "error": str(e),
                    "details": "Flock run failed.",
                }  # Option 2: Return error dict

    # --- ADDED Serialization Methods ---

    def to_dict(self) -> dict[str, Any]:
        """Convert Flock instance to dictionary representation."""
        logger.debug("Serializing Flock instance to dict.")
        # Use Pydantic's dump for base fields
        data = self.model_dump(mode="json", exclude_none=True)

        # Manually add serialized agents
        data["agents"] = {}
        for name, agent_instance in self._agents.items():
            try:
                # Agents handle their own serialization via their to_dict
                data["agents"][name] = agent_instance.to_dict()
            except Exception as e:
                logger.error(
                    f"Failed to serialize agent '{name}' within Flock: {e}"
                )
                # Optionally skip problematic agents or raise error
                # data["agents"][name] = {"error": f"Serialization failed: {e}"}

        # Exclude runtime fields that shouldn't be serialized
        # These are not Pydantic fields, so they aren't dumped by model_dump
        # No need to explicitly remove _start_agent_name, _start_input unless added manually

        # Filter final dict (optional, Pydantic's exclude_none helps)
        # return self._filter_none_values(data)
        return data

    @classmethod
    def from_dict(cls: type[T], data: dict[str, Any]) -> T:
        """Create Flock instance from dictionary representation."""
        logger.debug(
            f"Deserializing Flock from dict. Provided keys: {list(data.keys())}"
        )

        # Ensure FlockAgent is importable for type checking later
        try:
            from flock.core.flock_agent import FlockAgent as ConcreteFlockAgent
        except ImportError:
            logger.error(
                "Cannot import FlockAgent, deserialization may fail for agents."
            )
            ConcreteFlockAgent = Any  # Fallback

        # Extract agent data before initializing Flock base model
        agents_data = data.pop("agents", {})

        # Create Flock instance using Pydantic constructor for basic fields
        try:
            # Pass only fields defined in Flock's Pydantic model
            init_data = {k: v for k, v in data.items() if k in cls.model_fields}
            flock_instance = cls(**init_data)
        except Exception as e:
            logger.error(
                f"Pydantic validation/init failed for Flock: {e}", exc_info=True
            )
            raise ValueError(
                f"Failed to initialize Flock from dict: {e}"
            ) from e

        # Deserialize and add agents AFTER Flock instance exists
        for name, agent_data in agents_data.items():
            try:
                # Ensure agent_data has the name, or add it from the key
                agent_data.setdefault("name", name)
                # Use FlockAgent's from_dict method
                agent_instance = ConcreteFlockAgent.from_dict(agent_data)
                flock_instance.add_agent(
                    agent_instance
                )  # Adds to _agents and registers
            except Exception as e:
                logger.error(
                    f"Failed to deserialize or add agent '{name}' during Flock deserialization: {e}",
                    exc_info=True,
                )
                # Decide: skip agent or raise error?

        logger.info("Successfully deserialized Flock instance.")
        return flock_instance

    # --- API Start Method ---
    def start_api(
        self,
        host: str = "127.0.0.1",
        port: int = 8344,
        server_name: str = "Flock API",
        create_ui: bool = False,
    ) -> None:
        """Start a REST API server for this Flock instance."""
        # Import locally to avoid making API components a hard dependency
        try:
            from flock.core.api import FlockAPI
        except ImportError:
            logger.error(
                "API components not found. Cannot start API. "
                "Ensure 'fastapi' and 'uvicorn' are installed."
            )
            return

        logger.info(
            f"Preparing to start API server on {host}:{port} {'with UI' if create_ui else 'without UI'}"
        )
        api_instance = FlockAPI(self)  # Pass the current Flock instance
        # Use the start method of FlockAPI
        api_instance.start(
            host=host, port=port, server_name=server_name, create_ui=create_ui
        )

    # --- CLI Start Method ---
    def start_cli(
        self,
        server_name: str = "Flock CLI",
        show_results: bool = False,
        edit_mode: bool = False,
    ) -> None:
        """Start a CLI interface for this Flock instance.

        This method loads the CLI with the current Flock instance already available,
        allowing users to execute, edit, or manage agents from the existing configuration.

        Args:
            server_name: Optional name for the CLI interface
            show_results: Whether to initially show results of previous runs
            edit_mode: Whether to open directly in edit mode
        """
        # Import locally to avoid circular imports
        try:
            from flock.cli.loaded_flock_cli import start_loaded_flock_cli
        except ImportError:
            logger.error(
                "CLI components not found. Cannot start CLI. "
                "Ensure the CLI modules are properly installed."
            )
            return

        logger.info(
            f"Starting CLI interface with loaded Flock instance ({len(self._agents)} agents)"
        )

        # Pass the current Flock instance to the CLI
        start_loaded_flock_cli(
            flock=self,
            server_name=server_name,
            show_results=show_results,
            edit_mode=edit_mode,
        )

    # --- Static Method Loaders (Keep for convenience) ---
    @staticmethod
    def load_from_file(file_path: str) -> Flock:
        """Load a Flock instance from various file formats (detects type)."""
        p = Path(file_path)
        if not p.exists():
            raise FileNotFoundError(f"Flock file not found: {file_path}")

        if p.suffix in [".yaml", ".yml"]:
            return Flock.from_yaml_file(p)
        elif p.suffix == ".json":
            return Flock.from_json(p.read_text())
        elif p.suffix == ".msgpack":
            return Flock.from_msgpack_file(p)
        elif p.suffix == ".pkl":
            if PICKLE_AVAILABLE:
                return Flock.from_pickle_file(p)
            else:
                raise RuntimeError(
                    "Cannot load Pickle file: cloudpickle not installed."
                )
        else:
            raise ValueError(
                f"Unsupported file extension: {p.suffix}. Use .yaml, .json, .msgpack, or .pkl."
            )
```

### examples\03_apps\story_gen\main.py

- **Lines**: 157
- **Last modified**: 2025-04-02 17:29:19
- **Used by**: 2 files

**Classes**:
- `Scene`: 0 methods
- `Character`: 0 methods
- `Chapter`: 0 methods
- `Prompt`: 0 methods
- `Issue`: 0 methods
- `ComicBookSeries`: 0 methods
- `PageLayout`: 0 methods
- `IssueLayout`: 0 methods
- `Story`: 0 methods
- `StoryBible`: 0 methods

**Content**:
```py
from typing import Optional
from pydantic import BaseModel, Field
from flock.core import FlockFactory, Flock, flock_registry
from flock.core.flock_registry import flock_type
from flock.routers.default.default_router import DefaultRouter, DefaultRouterConfig

FlockRegistry = flock_registry.get_registry()

class Scene(BaseModel):
    title: str
    setting: str = Field(..., description="Setting of the scene")
    goal: str = Field(..., description="Goal of the scene")
    conflict: str = Field(..., description="Conflict of the scene")
    outcome: str = Field(..., description="Outcome of the scene")
    characters_involved: list[str] = Field(..., description="Name of characters/entities involved in the scene")
    story_beats: list[str] = Field(..., description="Story beats of the scene")


class Character(BaseModel):
    name: str = Field(..., description="Name of the character")
    role: str = Field(..., description="Role of the character")
    age: str = Field(..., description="Age of the character")
    appearance: str = Field(..., description="Appearance of the character")
    image_prompt: str = Field(..., description="Very detailed image prompt for image generation to represent the character")
    personality_traits: list[str] = Field(..., description="Personality traits of the character")
    backstory: str = Field(..., description="Backstory of the character")
    motivations: str = Field(..., description="Motivations of the character")
    weaknesses: str = Field(..., description="Weaknesses of the character")
    character_arc: str = Field(..., description="How the character evolves throughout the story")
    
class Chapter(BaseModel):
    title: str = Field(..., description="Title of the chapter")
    chapter_number: int = Field(..., description="Chapter number of the chapter")
    purpose: str = Field(..., description="Purpose of the chapter")
    summary: str = Field(..., description="Key events or chapter summary")
    scenes: list[Scene] = Field(..., description="Scenes of the chapter")
    


########################################################

class Prompt(BaseModel):
    prompt: str = Field(..., description="Detailed Prompt for image generation")
    title: str = Field(..., description="Title of the prompt")
    
# Define the whole comic book series as a whole
    
class Issue(BaseModel):
    title: str = Field(..., description="Title of the issue")
    issue_number: int = Field(..., description="Issue number of the issue")
    issue_description: str = Field(..., description="Description/Summary of the issue")
    issue_scenes: dict[int,str] = Field(..., description="Scenes of the story the issue visualizes. Key is the page number and value is the scene title as defined in the story chapters.")
    issue_cover_image_prompt: str = Field(..., description="Cover image prompt for the issue")
    number_of_pages: int = Field(..., description="Number of pages in the issue")
    number_of_panels: int = Field(..., description="Number of panels in the issue")
    linked_concept_art_prompts: list[str] = Field(..., description="Concept art prompts that are linked to the issue. The prompts are linked to the issue by the title of the prompt.")
    
class ComicBookSeries(BaseModel):
    title: str = Field(..., description="Title of the comic book series")
    issues: list[Issue] = Field(..., description="Issues of the comic book series")
    concept_art_prompts: list[Prompt] = Field(..., description="Concept art prompts for the comic book series. Includes character concept art, setting concept art, etc. Everything that needs consistency across the series.")
    
    
########################################################
    
class PageLayout(BaseModel):
    issue_number: int = Field(..., description="Issue number of the page layout")
    page_number: int = Field(..., description="Page number of the page layout")
    amount_of_panels: int = Field(..., description="Amount of panels on the page")
    layout_description: str = Field(..., description="Description of the panel layout of the page")
    page_prompt: str = Field(..., description="Prompt for the page")
    story_scene_title: str = Field(..., description="Title of the story scene that is depicted in the page")
    
class IssueLayout(Issue):
    page_layouts: list[PageLayout] = Field(..., description="Page layouts for the issue")
    
    
@flock_type 
class Story(BaseModel):
    title: str
    status: str = Field(default="Idea", description="Idea, Drafting, Revising, Completed")
    genre: list[str] = Field(..., description="Genre(s) of the story")
    tone: str = Field(..., description="Tone of the story") 
    themes: list[str] = Field(..., description="Themes of the story")
    central_conflict: str = Field(..., description="Central conflict of the story")
    brief_summary: str = Field(..., description="Brief summary of the story")
    long_summary: str = Field(..., description="Long-form summary of the story.")
    characters: list[Character] = Field(..., description="Important characters and/or entities of the story")
    chapters: list[Chapter] = Field(..., description="All chapters of the story. At least one chapter per act.")
    
@flock_type 
class StoryBible(BaseModel):
    timeline: dict[str, str]  = Field(..., description="Timeline of the story")
    worldbuilding_notes: dict[str, str]  = Field(..., description="Worldbuilding notes of the story")
    consistency_rules: list[str]  = Field(..., description="Consistency rules of the story")
    writing_reference: Optional[str] = Field(default=None, description="Writing reference and/or style guidelines")

MODEL = "gemini/gemini-2.5-pro-exp-03-25" #"groq/qwen-qwq-32b"    #"openai/gpt-4o" # 
flock = Flock(model=MODEL)


story_agent = FlockFactory.create_default_agent(name="story_agent",
                                              description="An agent that is a master storyteller",
                                              input="story_idea: str",
                                              output="story: Story, story_bible: StoryBible",
                                              max_tokens=60000,
                                              write_to_file=True)


flock.add_agent(story_agent)
result = flock.run(start_agent=story_agent, 
                   input={'story_idea': 
                       'In a world right at the cusp between LLMs and AGI some guy is experiencing the most peculiar story.' 
                       'Absurdist Fiction, Satire, Technological Comedy. As much content as possible. At least 10.000 words'}) 



# story_agent = FlockFactory.create_default_agent(name="story_agent",
#                                               description="An agent that is a master storyteller",
#                                               input="story_idea: str",
#                                               output="story: Story, story_bible: StoryBible",
#                                               max_tokens=60000,
#                                               write_to_file=True)

# comic_book_series_agent = FlockFactory.create_default_agent(name="comic_book_series_agent",
#                                               description="An agent that is a master comic book writer." 
#                                               "Generates a comic book series based on a story and a story bible.",
#                                               input="story: Story, story_bible: StoryBible",
#                                               output="comic_book_series: ComicBookSeries",
#                                               max_tokens=60000,
#                                               write_to_file=True)

# comic_book_issue_agent = FlockFactory.create_default_agent(name="comic_book_issue_agent",
#                                               description="An agent that is a master comic book writer." 
#                                               "Generates details for each issue of the comic book series.",
#                                               input="comic_book_series: ComicBookSeries",
#                                               output="comic_book_pages: list[PageLayout]",
#                                               max_tokens=60000,
#                                               write_to_file=True)


# story_agent.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=comic_book_series_agent.name))
# comic_book_series_agent.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=comic_book_issue_agent.name))

# flock.add_agent(comic_book_series_agent)	
# flock.add_agent(comic_book_issue_agent)
# flock.start_api(server_name="Storyteller Agent", create_ui=True)

# result = flock.run(start_agent=story_agent, input={'story_idea': 'A story about a young woman who discovers she has the ability to time travel.'}) 
# story_overview = result.story
# story_bible = result.story_bible



# flock.add_agent(comic_book_series_agent)
# result = flock.run(start_agent=comic_book_series_agent, input={'story': story_overview, 'story_bible': story_bible}) 
# comic_book_series = result.comic_book_series
```

### src\flock\core\api\main.py

- **Lines**: 237
- **Last modified**: 2025-04-02 17:29:19

**Description**: Main Flock API server class and setup.

**Classes**:
- `FlockAPI`: 6 methods

**Content**:
```py
# src/flock/core/api/main.py
"""Main Flock API server class and setup."""

from typing import Any

import uvicorn
from fastapi import FastAPI
from fastapi.responses import RedirectResponse

# Flock core imports
from flock.core.flock import Flock
from flock.core.logging.logging import get_logger

from .endpoints import create_api_router

# Import components from the api package
from .run_store import RunStore
from .ui.routes import FASTHTML_AVAILABLE, create_ui_app
from .ui.utils import format_result_to_html, parse_input_spec  # Import UI utils

logger = get_logger("api.main")


class FlockAPI:
    """Coordinates the Flock API server, including endpoints and UI."""

    def __init__(self, flock: Flock):
        self.flock = flock
        self.app = FastAPI(title="Flock API")
        self.run_store = RunStore()  # Create the run store instance
        self._setup_routes()

    def _setup_routes(self):
        """Includes API routers."""
        # Create and include the API router, passing self
        api_router = create_api_router(self)
        self.app.include_router(api_router)

        # Root redirect (if UI is enabled later) will be added in start()

    # --- Core Execution Helper Methods ---
    # These remain here as they need access to self.flock and self.run_store

    async def _run_agent(
        self, run_id: str, agent_name: str, inputs: dict[str, Any]
    ):
        """Executes a single agent run (internal helper)."""
        try:
            if agent_name not in self.flock.agents:
                raise ValueError(f"Agent '{agent_name}' not found")
            agent = self.flock.agents[agent_name]
            # Type conversion (remains important)
            typed_inputs = self._type_convert_inputs(agent_name, inputs)

            logger.debug(
                f"Executing single agent '{agent_name}' (run_id: {run_id})",
                inputs=typed_inputs,
            )
            result = await agent.run_async(typed_inputs)
            logger.info(
                f"Single agent '{agent_name}' completed (run_id: {run_id})"
            )

            # Use RunStore to update
            self.run_store.update_run_result(run_id, result)

        except Exception as e:
            logger.error(
                f"Error in single agent run {run_id} ('{agent_name}'): {e!s}",
                exc_info=True,
            )
            # Update store status
            self.run_store.update_run_status(run_id, "failed", str(e))
            raise  # Re-raise for the endpoint handler

    async def _run_flock(
        self, run_id: str, agent_name: str, inputs: dict[str, Any]
    ):
        """Executes a flock workflow run (internal helper)."""
        try:
            if agent_name not in self.flock.agents:
                raise ValueError(f"Starting agent '{agent_name}' not found")

            # Type conversion
            typed_inputs = self._type_convert_inputs(agent_name, inputs)

            logger.debug(
                f"Executing flock workflow starting with '{agent_name}' (run_id: {run_id})",
                inputs=typed_inputs,
            )
            result = await self.flock.run_async(
                start_agent=agent_name, input=typed_inputs
            )
            # Result is potentially a Box object

            # Use RunStore to update
            self.run_store.update_run_result(run_id, result)

            # Log using the local result variable
            final_agent_name = (
                result.get("agent_name", "N/A") if result is not None else "N/A"
            )
            logger.info(
                f"Flock workflow completed (run_id: {run_id})",
                final_agent=final_agent_name,
            )

        except Exception as e:
            logger.error(
                f"Error in flock run {run_id} (started with '{agent_name}'): {e!s}",
                exc_info=True,
            )
            # Update store status
            self.run_store.update_run_status(run_id, "failed", str(e))
            raise  # Re-raise for the endpoint handler

    # --- UI Helper Methods (kept here as they are called by endpoints via self) ---

    def _parse_input_spec(self, input_spec: str) -> list[dict[str, str]]:
        """Parses an agent input string into a list of field definitions."""
        # Use the implementation moved to ui.utils
        return parse_input_spec(input_spec)

    def _format_result_to_html(self, data: Any) -> str:
        """Recursively formats a Python object into an HTML string."""
        # Use the implementation moved to ui.utils
        return format_result_to_html(data)

    def _type_convert_inputs(
        self, agent_name: str, inputs: dict[str, Any]
    ) -> dict[str, Any]:
        """Converts input values (esp. from forms) to expected Python types."""
        typed_inputs = {}
        agent_def = self.flock.agents.get(agent_name)
        if not agent_def or not agent_def.input:
            return inputs  # Return original if no spec

        parsed_fields = self._parse_input_spec(agent_def.input)
        field_types = {f["name"]: f["type"] for f in parsed_fields}

        for k, v in inputs.items():
            target_type = field_types.get(k)
            if target_type and target_type.startswith("bool"):
                typed_inputs[k] = (
                    str(v).lower() in ["true", "on", "1", "yes"]
                    if isinstance(v, str)
                    else bool(v)
                )
            elif target_type and target_type.startswith("int"):
                try:
                    typed_inputs[k] = int(v)
                except (ValueError, TypeError):
                    logger.warning(
                        f"Could not convert input '{k}' value '{v}' to int for agent '{agent_name}'"
                    )
                    typed_inputs[k] = v
            elif target_type and target_type.startswith("float"):
                try:
                    typed_inputs[k] = float(v)
                except (ValueError, TypeError):
                    logger.warning(
                        f"Could not convert input '{k}' value '{v}' to float for agent '{agent_name}'"
                    )
                    typed_inputs[k] = v
            # TODO: Add list/dict parsing (e.g., json.loads) if needed
            else:
                typed_inputs[k] = v  # Assume string or already correct type
        return typed_inputs

    # --- Server Start/Stop ---

    def start(
        self,
        host: str = "0.0.0.0",
        port: int = 8344,
        server_name: str = "Flock API",
        create_ui: bool = False,
    ):
        """Start the API server, optionally creating and mounting a FastHTML UI."""
        if create_ui:
            if not FASTHTML_AVAILABLE:
                logger.error(
                    "FastHTML not installed. Cannot create UI. Running API only."
                )
            else:
                logger.info("Attempting to create and mount FastHTML UI at /ui")
                try:
                    # Pass self (FlockAPI instance) to the UI creation function
                    # It needs access to self.flock and self._parse_input_spec
                    fh_app = create_ui_app(
                        self,
                        api_host=host,
                        api_port=port,
                        server_name=server_name,
                    )
                    self.app.mount("/ui", fh_app, name="ui")
                    logger.info("FastHTML UI mounted successfully.")

                    # Add root redirect only if UI was successfully mounted
                    @self.app.get(
                        "/",
                        include_in_schema=False,
                        response_class=RedirectResponse,
                    )
                    async def root_redirect():
                        logger.debug("Redirecting / to /ui/")
                        return "/ui/"

                except ImportError as e:
                    logger.error(
                        f"Could not create UI due to import error: {e}. Running API only."
                    )
                except Exception as e:
                    logger.error(
                        f"An error occurred setting up the UI: {e}. Running API only.",
                        exc_info=True,
                    )

        logger.info(f"Starting API server on http://{host}:{port}")
        if (
            create_ui
            and FASTHTML_AVAILABLE
            and any(
                m.path == "/ui" for m in self.app.routes if hasattr(m, "path")
            )
        ):
            logger.info(f"UI available at http://{host}:{port}/ui/")

        uvicorn.run(self.app, host=host, port=port)

    async def stop(self):
        """Stop the API server."""
        logger.info("Stopping API server (cleanup if necessary)")
        pass  # Add cleanup logic if needed


# --- End of file ---
```

### src\flock\core\util\cli_helper.py

- **Lines**: 85
- **Last modified**: 2025-02-26 07:10:53

**Functions**:
- `display_hummingbird()`
- `init_console(clear_screen)`
- `display_banner_no_version()`

**Content**:
```py
from importlib.metadata import PackageNotFoundError, version

from rich.console import Console
from rich.syntax import Text

try:
    __version__ = version("flock-core")
except PackageNotFoundError:
    __version__ = "0.2.0"

console = Console()


def display_hummingbird():
    """Display the hummingbird."""
    print("""
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;24;23;22m▀\033[0m\033[38;2;0;0;0;48;2;47;44;40m▀\033[0m\033[38;2;0;0;0;48;2;30;28;27m▀\033[0m\033[38;2;0;0;0;48;2;1;1;1m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;14;14;13m▀\033[0m\033[38;2;2;2;2;48;2;173;161;143m▀\033[0m\033[38;2;97;92;83;48;2;243;226;198m▀\033[0m\033[38;2;204;190;168;48;2;245;226;197m▀\033[0m\033[38;2;243;225;197;48;2;245;225;195m▀\033[0m\033[38;2;243;226;198;48;2;181;168;147m▀\033[0m\033[38;2;243;226;199;48;2;193;179;158m▀\033[0m\033[38;2;213;198;176;48;2;245;226;198m▀\033[0m\033[38;2;110;102;89;48;2;245;226;197m▀\033[0m\033[38;2;7;7;6;48;2;217;202;178m▀\033[0m\033[38;2;0;0;0;48;2;119;111;99m▀\033[0m\033[38;2;0;0;0;48;2;56;54;50m▀\033[0m\033[38;2;0;0;0;48;2;22;22;20m▀\033[0m\033[38;2;0;0;0;48;2;6;6;6m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;11;11;10;48;2;141;131;119m▀\033[0m\033[38;2;192;179;159;48;2;243;225;197m▀\033[0m\033[38;2;243;225;197;48;2;245;226;197m▀\033[0m\033[38;2;245;226;197;48;2;245;226;195m▀\033[0m\033[38;2;246;226;197;48;2;245;226;197m▀\033[0m\033[38;2;222;205;179;48;2;239;220;193m▀\033[0m\033[38;2;2;2;2;48;2;64;59;52m▀\033[0m\033[38;2;17;17;15;48;2;91;85;77m▀\033[0m\033[38;2;243;226;199;48;2;243;226;198m▀\033[0m\033[38;2;245;226;198;48;2;238;221;193m▀\033[0m\033[38;2;245;226;197;48;2;170;158;139m▀\033[0m\033[38;2;243;225;197;48;2;99;92;81m▀\033[0m\033[38;2;243;224;197;48;2;50;47;43m▀\033[0m\033[38;2;242;224;198;48;2;14;13;12m▀\033[0m\033[38;2;227;213;191;48;2;0;0;0m▀\033[0m\033[38;2;204;192;172;48;2;0;0;0m▀\033[0m\033[38;2;187;175;158;48;2;0;0;0m▀\033[0m\033[38;2;168;159;143;48;2;0;0;0m▀\033[0m\033[38;2;147;140;128;48;2;0;0;0m▀\033[0m\033[38;2;128;120;111;48;2;0;0;0m▀\033[0m\033[38;2;99;93;85;48;2;1;1;1m▀\033[0m\033[38;2;58;55;51;48;2;9;9;8m▀\033[0m\033[38;2;6;6;6;48;2;21;21;21m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;32;31;29;48;2;121;113;102m▀\033[0m\033[38;2;240;223;195;48;2;243;226;195m▀\033[0m\033[38;2;245;226;197;48;2;245;226;195m▀\033[0m\033[38;2;245;226;197;48;2;245;226;197m▀\033[0m\033[38;2;246;226;197;48;2;246;226;197m▀\033[0m\033[38;2;245;225;195;48;2;245;226;197m▀\033[0m\033[38;2;245;226;195;48;2;245;226;195m▀\033[0m\033[38;2;243;225;198;48;2;239;221;195m▀\033[0m\033[38;2;240;223;199;48;2;71;65;60m▀\033[0m\033[38;2;136;127;111;48;2;0;0;0m▀\033[0m\033[38;2;21;20;19;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;186;173;153;48;2;220;204;179m▀\033[0m\033[38;2;243;225;197;48;2;245;226;198m▀\033[0m\033[38;2;245;226;197;48;2;245;225;197m▀\033[0m\033[38;2;245;226;197;48;2;245;225;195m▀\033[0m\033[38;2;245;226;197;48;2;245;225;195m▀\033[0m\033[38;2;245;226;197;48;2;245;225;197m▀\033[0m\033[38;2;245;225;195;48;2;227;211;187m▀\033[0m\033[38;2;112;104;93;48;2;6;6;5m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;219;204;179;48;2;188;175;156m▀\033[0m\033[38;2;245;226;197;48;2;245;226;198m▀\033[0m\033[38;2;245;226;195;48;2;245;226;197m▀\033[0m\033[38;2;245;226;195;48;2;245;225;195m▀\033[0m\033[38;2;245;225;195;48;2;245;225;195m▀\033[0m\033[38;2;245;225;195;48;2;245;225;195m▀\033[0m\033[38;2;238;220;192;48;2;245;226;195m▀\033[0m\033[38;2;149;137;121;48;2;243;225;197m▀\033[0m\033[38;2;16;14;13;48;2;197;183;161m▀\033[0m\033[38;2;0;0;0;48;2;15;14;13m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;20;19;17m▀\033[0m\033[38;2;0;0;0;48;2;146;136;121m▀\033[0m\033[38;2;16;15;14;48;2;235;219;194m▀\033[0m\033[38;2;161;149;134;48;2;243;226;198m▀\033[0m\033[38;2;245;226;198;48;2;243;226;197m▀\033[0m\033[38;2;245;226;195;48;2;245;226;197m▀\033[0m\033[38;2;245;226;195;48;2;245;225;195m▀\033[0m\033[38;2;245;226;195;48;2;245;225;197m▀\033[0m\033[38;2;245;225;195;48;2;243;225;197m▀\033[0m\033[38;2;243;225;198;48;2;192;178;158m▀\033[0m\033[38;2;235;218;190;48;2;152;141;125m▀\033[0m\033[38;2;245;226;197;48;2;245;226;197m▀\033[0m\033[38;2;163;151;134;48;2;243;225;198m▀\033[0m\033[38;2;0;0;0;48;2;46;43;39m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;40;38;35m▀\033[0m\033[38;2;56;52;48;48;2;232;216;193m▀\033[0m\033[38;2;220;204;182;48;2;207;193;170m▀\033[0m\033[38;2;243;227;199;48;2;72;67;60m▀\033[0m\033[38;2;211;195;175;48;2;1;1;1m▀\033[0m\033[38;2;151;140;125;48;2;0;0;0m▀\033[0m\033[38;2;130;120;108;48;2;0;0;0m▀\033[0m\033[38;2;138;128;113;48;2;0;0;0m▀\033[0m\033[38;2;163;151;133;48;2;0;0;0m▀\033[0m\033[38;2;170;158;140;48;2;0;0;0m▀\033[0m\033[38;2;121;113;101;48;2;0;0;0m▀\033[0m\033[38;2;16;15;14;48;2;39;36;32m▀\033[0m\033[38;2;155;145;129;48;2;231;215;191m▀\033[0m\033[38;2;245;226;197;48;2;245;226;198m▀\033[0m\033[38;2;245;226;197;48;2;246;226;198m▀\033[0m\033[38;2;118;109;97;48;2;151;140;124m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;1;1;1;48;2;75;71;65m▀\033[0m\033[38;2;183;170;153;48;2;211;197;177m▀\033[0m\033[38;2;182;170;151;48;2;18;16;15m▀\033[0m\033[38;2;12;12;11;48;2;112;104;95m▀\033[0m\033[38;2;12;11;10;48;2;227;211;187m▀\033[0m\033[38;2;86;81;73;48;2;234;217;193m▀\033[0m\033[38;2;127;119;107;48;2;192;179;160m▀\033[0m\033[38;2;138;129;116;48;2;174;162;144m▀\033[0m\033[38;2;126;118;105;48;2;172;160;142m▀\033[0m\033[38;2;105;97;88;48;2;170;158;140m▀\033[0m\033[38;2;97;92;83;48;2;159;147;131m▀\033[0m\033[38;2;141;131;119;48;2;121;113;101m▀\033[0m\033[38;2;227;211;191;48;2;47;44;40m▀\033[0m\033[38;2;194;181;161;48;2;139;130;116m▀\033[0m\033[38;2;245;226;198;48;2;245;226;198m▀\033[0m\033[38;2;245;226;197;48;2;245;226;197m▀\033[0m\033[38;2;149;138;123;48;2;113;105;94m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;163;154;139;48;2;219;205;182m▀\033[0m\033[38;2;100;94;85;48;2;102;96;87m▀\033[0m\033[38;2;165;156;141;48;2;210;195;175m▀\033[0m\033[38;2;223;207;184;48;2;29;28;24m▀\033[0m\033[38;2;97;92;81;48;2;52;48;44m▀\033[0m\033[38;2;9;9;8;48;2;110;103;93m▀\033[0m\033[38;2;0;0;0;48;2;140;131;118m▀\033[0m\033[38;2;0;0;0;48;2;142;131;119m▀\033[0m\033[38;2;0;0;0;48;2;121;113;102m▀\033[0m\033[38;2;0;0;0;48;2;109;102;93m▀\033[0m\033[38;2;0;0;0;48;2;128;120;107m▀\033[0m\033[38;2;1;1;0;48;2;195;181;162m▀\033[0m\033[38;2;91;86;79;48;2;243;225;198m▀\033[0m\033[38;2;241;222;195;48;2;245;225;197m▀\033[0m\033[38;2;245;226;197;48;2;245;226;198m▀\033[0m\033[38;2;243;225;197;48;2;190;176;157m▀\033[0m\033[38;2;47;44;40;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;9;9;8;48;2;21;20;19m▀\033[0m\033[38;2;197;184;163;48;2;192;179;162m▀\033[0m\033[38;2;190;177;159;48;2;236;220;195m▀\033[0m\033[38;2;101;95;84;48;2;235;217;193m▀\033[0m\033[38;2;197;184;163;48;2;135;125;111m▀\033[0m\033[38;2;243;226;199;48;2;40;39;36m▀\033[0m\033[38;2;231;215;190;48;2;2;1;1m▀\033[0m\033[38;2;194;179;160;48;2;0;0;0m▀\033[0m\033[38;2;179;166;147;48;2;10;9;9m▀\033[0m\033[38;2;179;166;147;48;2;64;60;55m▀\033[0m\033[38;2;182;170;152;48;2;169;158;143m▀\033[0m\033[38;2;240;224;195;48;2;245;226;199m▀\033[0m\033[38;2;245;226;197;48;2;245;226;198m▀\033[0m\033[38;2;245;225;197;48;2;245;226;197m▀\033[0m\033[38;2;245;225;197;48;2;243;225;199m▀\033[0m\033[38;2;243;224;195;48;2;117;110;97m▀\033[0m\033[38;2;56;52;48;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;15;14;13;48;2;6;6;5m▀\033[0m\033[38;2;235;220;195;48;2;242;225;201m▀\033[0m\033[38;2;243;225;199;48;2;243;225;199m▀\033[0m\033[38;2;80;75;67;48;2;226;210;187m▀\033[0m\033[38;2;65;60;56;48;2;243;226;199m▀\033[0m\033[38;2;126;118;105;48;2;195;182;162m▀\033[0m\033[38;2;173;163;146;48;2;136;127;113m▀\033[0m\033[38;2;210;195;177;48;2;89;84;77m▀\033[0m\033[38;2;238;222;198;48;2;52;50;47m▀\033[0m\033[38;2;234;219;195;48;2;92;87;80m▀\033[0m\033[38;2;186;174;157;48;2;209;195;172m▀\033[0m\033[38;2;236;218;192;48;2;243;226;198m▀\033[0m\033[38;2;245;226;198;48;2;221;205;181m▀\033[0m\033[38;2;243;225;198;48;2;81;76;68m▀\033[0m\033[38;2;131;124;111;48;2;0;0;0m▀\033[0m\033[38;2;1;1;1;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;2;2;2;48;2;0;0;0m▀\033[0m\033[38;2;238;222;197;48;2;233;218;194m▀\033[0m\033[38;2;243;225;198;48;2;169;157;141m▀\033[0m\033[38;2;177;163;147;48;2;13;12;11m▀\033[0m\033[38;2;42;39;36;48;2;116;108;99m▀\033[0m\033[38;2;4;4;4;48;2;219;204;182m▀\033[0m\033[38;2;68;64;58;48;2;243;226;199m▀\033[0m\033[38;2;153;142;128;48;2;225;210;186m▀\033[0m\033[38;2;224;209;187;48;2;147;137;121m▀\033[0m\033[38;2;243;225;199;48;2;69;65;59m▀\033[0m\033[38;2;222;206;182;48;2;6;6;5m▀\033[0m\033[38;2;118;110;99;48;2;0;0;0m▀\033[0m\033[38;2;14;13;12;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;230;216;193;48;2;192;181;163m▀\033[0m\033[38;2;59;55;52;48;2;189;176;157m▀\033[0m\033[38;2;202;188;168;48;2;242;224;198m▀\033[0m\033[38;2;245;227;200;48;2;133;123;110m▀\033[0m\033[38;2;216;199;176;48;2;10;9;8m▀\033[0m\033[38;2;95;89;79;48;2;0;0;0m▀\033[0m\033[38;2;6;5;5;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;222;207;186;48;2;225;210;189m▀\033[0m\033[38;2;240;222;195;48;2;105;100;89m▀\033[0m\033[38;2;81;75;67;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;161;149;136;48;2;40;36;36m▀\033[0m\033[38;2;1;1;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m
\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m\033[38;2;0;0;0;48;2;0;0;0m▀\033[0m

""")


def init_console(clear_screen: bool = True):
    """Display the Flock banner."""
    banner_text = Text(
        f"""
🦆    🐓     🐤     🐧
╭━━━━━━━━━━━━━━━━━━━━━━━━╮
│ ▒█▀▀▀ █░░ █▀▀█ █▀▀ █░█ │
│ ▒█▀▀▀ █░░ █░░█ █░░ █▀▄ │
│ ▒█░░░ ▀▀▀ ▀▀▀▀ ▀▀▀ ▀░▀ │
╰━━━━━━━━━v{__version__}━━━━━━━━╯
🦆     🐤    🐧     🐓
""",
        justify="center",
        style="bold orange3",
    )
    if clear_screen:
        console.clear()
    console.print(banner_text)
    console.print(
        f"[italic]'Hummingbird'[/] milestone - [bold]white duck GmbH[/] - [cyan]https://whiteduck.de[/]\n"
    )


def display_banner_no_version():
    """Display the Flock banner."""
    banner_text = Text(
        f"""
🦆    🐓     🐤     🐧
╭━━━━━━━━━━━━━━━━━━━━━━━━╮
│ ▒█▀▀▀ █░░ █▀▀█ █▀▀ █░█ │
│ ▒█▀▀▀ █░░ █░░█ █░░ █▀▄ │
│ ▒█░░░ ▀▀▀ ▀▀▀▀ ▀▀▀ ▀░▀ │
╰━━━━━━━━━━━━━━━━━━━━━━━━╯
🦆     🐤    🐧     🐓
""",
        justify="center",
        style="bold orange3",
    )
    console.print(banner_text)
    console.print(f"[bold]white duck GmbH[/] - [cyan]https://whiteduck.de[/]\n")
```

### src\flock\cli\loaded_flock_cli.py

- **Lines**: 219
- **Last modified**: 2025-04-03 23:51:18

**Description**: CLI interface for working with a loaded Flock instance.

This module provides a CLI interface for a Flock instance that has already been loaded,
allowing users to execute, edit, or manage agents from the existing configuration.

**Functions**:
- `start_loaded_flock_cli(flock, server_name, show_results, edit_mode)`
- `_start_web_server(flock, create_ui)`

**Content**:
```py
"""CLI interface for working with a loaded Flock instance.

This module provides a CLI interface for a Flock instance that has already been loaded,
allowing users to execute, edit, or manage agents from the existing configuration.
"""

import questionary
from rich.console import Console
from rich.panel import Panel

from flock.cli.constants import (
    CLI_REGISTRY_MANAGEMENT,
    CLI_SETTINGS,
)
from flock.core.flock import Flock
from flock.core.util.cli_helper import init_console

# Import future modules we'll create
# These will be implemented later
try:
    from flock.cli.yaml_editor import yaml_editor

    yaml_editor_available = True
except ImportError:
    yaml_editor_available = False

try:
    from flock.cli.manage_agents import manage_agents

    manage_agents_available = True
except ImportError:
    manage_agents_available = False

try:
    from flock.cli.execute_flock import execute_flock

    execute_flock_available = True
except ImportError:
    execute_flock_available = False

try:
    from flock.cli.view_results import view_results

    view_results_available = True
except ImportError:
    view_results_available = False

# Create console instance
console = Console()


def start_loaded_flock_cli(
    flock: Flock,
    server_name: str = "Flock CLI",
    show_results: bool = False,
    edit_mode: bool = False,
) -> None:
    """Start a CLI interface with a loaded Flock instance.

    Args:
        flock: The loaded Flock instance
        server_name: Optional name for the CLI interface
        show_results: Whether to initially show results of previous runs
        edit_mode: Whether to open directly in edit mode
    """
    if not flock:
        console.print("[bold red]Error: No Flock instance provided.[/]")
        return

    agent_names = list(flock._agents.keys())

    # Directly go to specific modes if requested
    if edit_mode and yaml_editor_available:
        yaml_editor(flock)
        return

    if show_results and view_results_available:
        view_results(flock)
        return

    # Main CLI loop
    while True:
        # Initialize console for each loop iteration
        init_console()

        # Display header with Flock information
        console.print(Panel(f"[bold green]{server_name}[/]"), justify="center")
        console.print(
            f"Flock loaded with [bold cyan]{len(agent_names)}[/] agents: {', '.join(agent_names)}"
        )
        console.line()

        # Main menu choices
        choices = [
            questionary.Separator(line=" "),
            "Execute Flock",
            "Start Web Server",
            "Start Web Server with UI",
            "Manage Agents",
            "View Results of Past Runs",
        ]

        # Add YAML Editor option if available
        if yaml_editor_available:
            choices.append("Edit YAML Configurations")

        # Add remaining options
        choices.extend([questionary.Separator(), CLI_REGISTRY_MANAGEMENT])
        choices.extend(
            [
                questionary.Separator(),
                CLI_SETTINGS,
                questionary.Separator(),
                "Exit",
            ]
        )

        # Display menu and get choice
        choice = questionary.select(
            "What would you like to do?",
            choices=choices,
        ).ask()

        # Handle menu selection
        if choice == "Execute Flock":
            if execute_flock_available:
                execute_flock(flock)
            else:
                console.print(
                    "[yellow]Execute Flock functionality not yet implemented.[/]"
                )
                input("\nPress Enter to continue...")

        elif choice == "Start Web Server":
            _start_web_server(flock, create_ui=False)

        elif choice == "Start Web Server with UI":
            _start_web_server(flock, create_ui=True)

        elif choice == "Manage Agents":
            if manage_agents_available:
                manage_agents(flock)
            else:
                console.print(
                    "[yellow]Manage Agents functionality not yet implemented.[/]"
                )
                input("\nPress Enter to continue...")

        elif choice == "View Results of Past Runs":
            if view_results_available:
                view_results(flock)
            else:
                console.print(
                    "[yellow]View Results functionality not yet implemented.[/]"
                )
                input("\nPress Enter to continue...")

        elif choice == CLI_REGISTRY_MANAGEMENT:
            from flock.cli.registry_management import manage_registry

            manage_registry()

        elif choice == "Edit YAML Configurations" and yaml_editor_available:
            yaml_editor(flock)

        elif choice == CLI_SETTINGS:
            from flock.cli.settings import settings_editor

            settings_editor()

        elif choice == "Exit":
            break

        # Pause after each action unless we're exiting
        if choice != "Exit" and not choice.startswith("Start Web Server"):
            input("\nPress Enter to continue...")


def _start_web_server(flock: Flock, create_ui: bool = False) -> None:
    """Start a web server with the loaded Flock instance.

    Args:
        flock: The loaded Flock instance
        create_ui: Whether to create a UI for the web server
    """
    host = "127.0.0.1"
    port = 8344
    server_name = "Flock API"

    # Get configuration from user
    console.print("\n[bold]Web Server Configuration[/]")

    host_input = questionary.text(
        "Host (default: 127.0.0.1):", default=host
    ).ask()
    if host_input:
        host = host_input

    port_input = questionary.text(
        "Port (default: 8344):", default=str(port)
    ).ask()
    if port_input and port_input.isdigit():
        port = int(port_input)

    server_name_input = questionary.text(
        "Server name (default: Flock API):", default=server_name
    ).ask()
    if server_name_input:
        server_name = server_name_input

    # Start the web server
    console.print(
        f"\nStarting web server on {host}:{port} {'with UI' if create_ui else 'without UI'}..."
    )

    # Use the Flock's start_api method
    flock.start_api(
        host=host, port=port, server_name=server_name, create_ui=create_ui
    )
```

### src\flock\core\api\models.py

- **Lines**: 34
- **Last modified**: 2025-04-02 17:29:19
- **Used by**: 3 files

**Description**: Pydantic models for the Flock API.

**Classes**:
- `FlockAPIRequest`: 0 methods
- `FlockAPIResponse`: 0 methods

**Content**:
```py
# src/flock/core/api/models.py
"""Pydantic models for the Flock API."""

from datetime import datetime
from typing import Any

from pydantic import BaseModel, Field


class FlockAPIRequest(BaseModel):
    """Request model for running an agent via JSON API."""

    agent_name: str = Field(..., description="Name of the agent to run")
    inputs: dict[str, Any] = Field(
        default_factory=dict, description="Input data for the agent"
    )
    async_run: bool = Field(
        default=False, description="Whether to run asynchronously"
    )


class FlockAPIResponse(BaseModel):
    """Response model for API run requests."""

    run_id: str = Field(..., description="Unique ID for this run")
    status: str = Field(..., description="Status of the run")
    result: dict[str, Any] | None = Field(
        None, description="Run result if completed"
    )
    started_at: datetime = Field(..., description="When the run started")
    completed_at: datetime | None = Field(
        None, description="When the run completed"
    )
    error: str | None = Field(None, description="Error message if failed")
```

### src\flock\core\api\endpoints.py

- **Lines**: 222
- **Last modified**: 2025-04-02 17:29:19
- **Used by**: 1 files

**Description**: FastAPI endpoints for the Flock API.

**Functions**:
- `create_api_router(flock_api)`

**Content**:
```py
# src/flock/core/api/endpoints.py
"""FastAPI endpoints for the Flock API."""

import html  # For escaping
import uuid
from typing import TYPE_CHECKING  # Added Any for type hinting clarity

from fastapi import (
    APIRouter,
    BackgroundTasks,
    HTTPException,
    Request as FastAPIRequest,
)

# Import HTMLResponse for the UI form endpoint
from fastapi.responses import HTMLResponse

from flock.core.logging.logging import get_logger

# Import models and UI utils
from .models import FlockAPIRequest, FlockAPIResponse

# Import UI utils - assuming they are now in ui/utils.py

# Use TYPE_CHECKING to avoid circular imports for type hints
if TYPE_CHECKING:
    from flock.core.flock import Flock

    from .main import FlockAPI
    from .run_store import RunStore

logger = get_logger("api.endpoints")


# Factory function to create the router with dependencies
def create_api_router(flock_api: "FlockAPI") -> APIRouter:
    """Creates the APIRouter and defines endpoints, injecting dependencies."""
    router = APIRouter()
    # Get dependencies from the main FlockAPI instance passed in
    run_store: RunStore = flock_api.run_store
    flock_instance: Flock = flock_api.flock

    # --- API Endpoints ---
    @router.post("/run/flock", response_model=FlockAPIResponse, tags=["API"])
    async def run_flock_json(
        request: FlockAPIRequest, background_tasks: BackgroundTasks
    ):
        """Run a flock workflow starting with the specified agent (expects JSON)."""
        run_id = None
        try:
            run_id = str(uuid.uuid4())
            run_store.create_run(run_id)  # Use RunStore
            response = run_store.get_run(
                run_id
            )  # Get initial response from store

            processed_inputs = request.inputs if request.inputs else {}
            logger.info(
                f"API request: run flock '{request.agent_name}' (run_id: {run_id})",
                inputs=processed_inputs,
            )

            if request.async_run:
                logger.debug(
                    f"Running flock '{request.agent_name}' asynchronously (run_id: {run_id})"
                )
                # Call the helper method on the passed FlockAPI instance
                background_tasks.add_task(
                    flock_api._run_flock,
                    run_id,
                    request.agent_name,
                    processed_inputs,
                )
                run_store.update_run_status(run_id, "running")
                response.status = "running"  # Update local response copy too
            else:
                logger.debug(
                    f"Running flock '{request.agent_name}' synchronously (run_id: {run_id})"
                )
                # Call the helper method on the passed FlockAPI instance
                await flock_api._run_flock(
                    run_id, request.agent_name, processed_inputs
                )
                response = run_store.get_run(
                    run_id
                )  # Fetch updated status/result

            return response
        except ValueError as ve:
            logger.error(f"Value error starting run: {ve}")
            if run_id:
                run_store.update_run_status(run_id, "failed", str(ve))
            raise HTTPException(status_code=400, detail=str(ve))
        except Exception as e:
            error_msg = f"Internal server error: {type(e).__name__}"
            logger.error(f"Error starting run: {e!s}", exc_info=True)
            if run_id:
                run_store.update_run_status(run_id, "failed", error_msg)
            raise HTTPException(status_code=500, detail=error_msg)

    @router.get("/run/{run_id}", response_model=FlockAPIResponse, tags=["API"])
    async def get_run_status(run_id: str):
        """Get the status of a specific run."""
        logger.debug(f"API request: get status for run_id: {run_id}")
        run_data = run_store.get_run(run_id)
        if not run_data:
            logger.warning(f"Run ID not found: {run_id}")
            raise HTTPException(status_code=404, detail="Run not found")
        return run_data

    @router.get("/agents", tags=["API"])
    async def list_agents():
        """List all available agents."""
        logger.debug("API request: list agents")
        # Access flock instance via factory closure
        agents_list = [
            {"name": agent.name, "description": agent.description or agent.name}
            for agent in flock_instance.agents.values()
        ]
        return {"agents": agents_list}

    # --- UI Form Endpoint ---
    @router.post("/ui/run-agent-form", response_class=HTMLResponse, tags=["UI"])
    async def run_flock_form(fastapi_req: FastAPIRequest):
        """Endpoint to handle form submissions from the UI."""
        run_id = None
        try:
            form_data = await fastapi_req.form()
            agent_name = form_data.get("agent_name")
            if not agent_name:
                logger.warning("UI form submission missing agent_name")
                return HTMLResponse(
                    '<div id="result-content" class="error-message">Error: Agent name not provided.</div>',
                    status_code=400,
                )

            logger.info(f"UI Form submission for agent: {agent_name}")
            form_inputs = {}
            # Access flock instance via factory closure
            agent_def = flock_instance.agents.get(agent_name)
            # Use helper from flock_api instance for parsing
            defined_input_fields = (
                flock_api._parse_input_spec(agent_def.input or "")
                if agent_def
                else []
            )

            for key, value in form_data.items():
                if key.startswith("inputs."):
                    form_inputs[key[len("inputs.") :]] = value
            for field in defined_input_fields:  # Handle checkboxes
                if (
                    field["html_type"] == "checkbox"
                    and field["name"] not in form_inputs
                ):
                    form_inputs[field["name"]] = False
                elif (
                    field["html_type"] == "checkbox"
                    and field["name"] in form_inputs
                ):
                    form_inputs[field["name"]] = True

            logger.debug(f"Parsed form inputs for UI run: {form_inputs}")
            run_id = str(uuid.uuid4())
            run_store.create_run(run_id)  # Use RunStore
            logger.debug(
                f"Running flock '{agent_name}' synchronously from UI (run_id: {run_id})"
            )

            # Call helper method on flock_api instance
            await flock_api._run_flock(run_id, agent_name, form_inputs)

            final_status = run_store.get_run(run_id)
            if final_status and final_status.status == "completed":
                # Use helper from flock_api instance for formatting
                formatted_html = flock_api._format_result_to_html(
                    final_status.result
                )
                logger.info(f"UI run completed successfully (run_id: {run_id})")
                return HTMLResponse(
                    f"<div id='result-content'>{formatted_html}</div>"
                )  # Wrap in target div
            elif final_status and final_status.status == "failed":
                logger.error(
                    f"UI run failed (run_id: {run_id}): {final_status.error}"
                )
                error_msg = html.escape(final_status.error or "Unknown error")
                return HTMLResponse(
                    f"<div id='result-content' class='error-message'>Run Failed: {error_msg}</div>",
                    status_code=500,
                )
            else:
                status_str = (
                    final_status.status if final_status else "Not Found"
                )
                logger.warning(
                    f"UI run {run_id} ended in unexpected state: {status_str}"
                )
                return HTMLResponse(
                    f"<div id='result-content' class='error-message'>Run ended unexpectedly. Status: {status_str}</div>",
                    status_code=500,
                )

        except ValueError as ve:
            logger.error(f"Value error processing UI form run: {ve}")
            if run_id:
                run_store.update_run_status(run_id, "failed", str(ve))
            return HTMLResponse(
                f"<div id='result-content' class='error-message'>Error: {html.escape(str(ve))}</div>",
                status_code=400,
            )
        except Exception as e:
            error_msg = f"Internal server error: {type(e).__name__}"
            logger.error(f"Error processing UI form run: {e!s}", exc_info=True)
            if run_id:
                run_store.update_run_status(run_id, "failed", error_msg)
            return HTMLResponse(
                f"<div id='result-content' class='error-message'>{html.escape(error_msg)}</div>",
                status_code=500,
            )

    return router
```

### src\flock\core\api\run_store.py

- **Lines**: 72
- **Last modified**: 2025-04-02 17:29:19
- **Used by**: 2 files

**Description**: Manages the state of active and completed Flock runs.

**Classes**:
- `RunStore`: 5 methods

**Content**:
```py
# src/flock/core/api/run_store.py
"""Manages the state of active and completed Flock runs."""

import threading
from datetime import datetime

from flock.core.logging.logging import get_logger

from .models import FlockAPIResponse  # Import from the models file

logger = get_logger("api.run_store")


class RunStore:
    """Stores and manages the state of Flock runs."""

    def __init__(self):
        self._runs: dict[str, FlockAPIResponse] = {}
        self._lock = threading.Lock()  # Basic lock for thread safety

    def create_run(self, run_id: str) -> FlockAPIResponse:
        """Creates a new run record with 'starting' status."""
        with self._lock:
            if run_id in self._runs:
                logger.warning(f"Run ID {run_id} already exists. Overwriting.")
            response = FlockAPIResponse(
                run_id=run_id, status="starting", started_at=datetime.now()
            )
            self._runs[run_id] = response
            logger.debug(f"Created run record for run_id: {run_id}")
            return response

    def get_run(self, run_id: str) -> FlockAPIResponse | None:
        """Gets the status of a run."""
        with self._lock:
            return self._runs.get(run_id)

    def update_run_status(
        self, run_id: str, status: str, error: str | None = None
    ):
        """Updates the status and potentially error of a run."""
        with self._lock:
            if run_id in self._runs:
                self._runs[run_id].status = status
                if error:
                    self._runs[run_id].error = error
                if status in ["completed", "failed"]:
                    self._runs[run_id].completed_at = datetime.now()
                logger.debug(f"Updated status for run_id {run_id} to {status}")
            else:
                logger.warning(
                    f"Attempted to update status for non-existent run_id: {run_id}"
                )

    def update_run_result(self, run_id: str, result: dict):
        """Updates the result of a completed run."""
        with self._lock:
            if run_id in self._runs:
                # Ensure result is serializable (e.g., convert Box)
                final_result = (
                    dict(result) if hasattr(result, "to_dict") else result
                )
                self._runs[run_id].result = final_result
                self._runs[run_id].status = "completed"
                self._runs[run_id].completed_at = datetime.now()
                logger.debug(f"Updated result for completed run_id: {run_id}")
            else:
                logger.warning(
                    f"Attempted to update result for non-existent run_id: {run_id}"
                )

    # Add methods for cleanup, persistence, etc. later
```

### src\flock\core\__init__.py

- **Lines**: 19
- **Last modified**: 2025-04-02 17:29:19

**Description**: This module contains the core classes of the flock package.

**Content**:
```py
"""This module contains the core classes of the flock package."""

from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent
from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
from flock.core.flock_factory import FlockFactory
from flock.core.flock_module import FlockModule, FlockModuleConfig
from flock.core.flock_registry import FlockRegistry

__all__ = [
    "Flock",
    "FlockAgent",
    "FlockEvaluator",
    "FlockEvaluatorConfig",
    "FlockFactory",
    "FlockModule",
    "FlockModuleConfig",
    "FlockRegistry",
]
```

### src\flock\core\serialization\__init__.py

- **Lines**: 13
- **Last modified**: 2025-04-04 17:11:47

**Description**: Serialization utilities for Flock objects.

**Content**:
```py
"""Serialization utilities for Flock objects."""

from flock.core.serialization.callable_registry import CallableRegistry
from flock.core.serialization.json_encoder import FlockJSONEncoder
from flock.core.serialization.secure_serializer import SecureSerializer
from flock.core.serialization.serializable import Serializable

__all__ = [
    "CallableRegistry",
    "FlockJSONEncoder",
    "SecureSerializer",
    "Serializable",
]
```

### src\flock\core\api\__init__.py

- **Lines**: 11
- **Last modified**: 2025-04-02 17:29:19

**Description**: Flock API Server components.

**Content**:
```py
# src/flock/core/api/__init__.py
"""Flock API Server components."""

from .main import FlockAPI
from .models import FlockAPIRequest, FlockAPIResponse

__all__ = [
    "FlockAPI",
    "FlockAPIRequest",
    "FlockAPIResponse",
]
```

### src\flock\core\logging\__init__.py

- **Lines**: 2
- **Last modified**: 2025-02-18 03:20:40

**Description**: Flock logging system with Rich integration and structured logging support.

**Content**:
```py
"""Flock logging system with Rich integration and structured logging support."""

```

### src\flock\cli\settings.py

- **Lines**: 857
- **Last modified**: 2025-03-29 13:53:59

**Description**: Settings editor for the Flock CLI.

This module provides functionality to view, edit, add, and delete
environment variables in the .env file.

**Functions**:
- `settings_editor()`
- `view_env_variables(page, page_size)`
- `change_vars_per_page()`
- `get_vars_per_page_setting(env_vars)`
- `set_vars_per_page_setting(page_size)`
- `toggle_show_secrets()`
- `get_show_secrets_setting(env_vars)`
- `set_show_secrets_setting(show_secrets)`
- `edit_env_variable()`
- `add_env_variable()`
- `delete_env_variable()`
- `manage_profiles()`
- `switch_profile()`
- `create_profile()`
- `rename_profile()`
- `delete_profile()`
- `is_sensitive(key)`
- `mask_sensitive_value(value)`
- `get_current_profile()`
- `get_available_profiles()`
- `backup_env_file()`
- `load_env_file()`
- `save_env_file(env_vars)`

**Content**:
```py
"""Settings editor for the Flock CLI.

This module provides functionality to view, edit, add, and delete
environment variables in the .env file.
"""

import os
import shutil
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import math

import questionary
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from rich.text import Text

from flock.core.util.cli_helper import init_console

# Constants
ENV_FILE = ".env"
ENV_TEMPLATE_FILE = ".env_template"
ENV_PROFILE_PREFIX = ".env_"
DEFAULT_PROFILE_COMMENT = "# Profile: {profile_name}"
SHOW_SECRETS_KEY = "SHOW_SECRETS"
VARS_PER_PAGE_KEY = "VARS_PER_PAGE"
DEFAULT_VARS_PER_PAGE = 20

console = Console()


def settings_editor():
    """Main entry point for the settings editor."""
    while True:
        init_console()
        console.print(Panel("[bold green]Environment Settings Editor[/]"), justify="center")
        
        # Get current profile name
        current_profile = get_current_profile()
        if current_profile:
            console.print(f"Current Profile: [bold cyan]{current_profile}[/]")
        else:
            console.print("No profile detected")

        console.line()
            
        choice = questionary.select(
            "What would you like to do?",
            choices=[
                questionary.Separator(line=" "),
                "View all environment variables",
                "Edit an environment variable",
                "Add a new environment variable",
                "Delete an environment variable",
                questionary.Separator(),
                "Manage environment profiles",
                questionary.Separator(),
                "Toggle show secrets",
                "Change variables per page",
                questionary.Separator(),
                "Back to main menu",
            ],
        ).ask()
        
        if choice == "View all environment variables":
            view_env_variables()
        elif choice == "Edit an environment variable":
            edit_env_variable()
        elif choice == "Add a new environment variable":
            add_env_variable()
        elif choice == "Delete an environment variable":
            delete_env_variable()
        elif choice == "Manage environment profiles":
            manage_profiles()
        elif choice == "Toggle show secrets":
            toggle_show_secrets()
        elif choice == "Change variables per page":
            change_vars_per_page()
        elif choice == "Back to main menu":
            break
        
        if choice != "Back to main menu":
            input("\nPress Enter to continue...")


def view_env_variables(page: int = 1, page_size: Optional[int] = None):
    """View all environment variables with pagination.
    
    Args:
        page: Page number to display
        page_size: Number of variables per page (if None, use the setting in .env)
    """
    env_vars = load_env_file()
    
    # If page_size is not specified, get it from settings
    if page_size is None:
        page_size = get_vars_per_page_setting(env_vars)
    
    # Calculate pagination
    total_vars = len(env_vars)
    total_pages = math.ceil(total_vars / page_size) if total_vars > 0 else 1
    
    # Validate page number
    page = min(max(1, page), total_pages)
    
    start_idx = (page - 1) * page_size
    end_idx = min(start_idx + page_size, total_vars)
    
    # Get current page variables
    current_page_vars = list(env_vars.items())[start_idx:end_idx]
    
    # Check if secrets should be shown
    show_secrets = get_show_secrets_setting(env_vars)
    
    # Create table
    table = Table(title=f"Environment Variables (Page {page}/{total_pages}, {page_size} per page)")
    table.add_column("Name", style="cyan")
    table.add_column("Value", style="green")
    
    # Show secrets status
    secrets_status = "[green]ON[/]" if show_secrets else "[red]OFF[/]"
    init_console()
    console.print(f"Show Secrets: {secrets_status}")
    
    for key, value in current_page_vars:
        # Skip lines that are comments or empty
        if key.startswith('#') or not key:
            continue
            
        # Mask sensitive values if show_secrets is False
        if is_sensitive(key) and not show_secrets:
            masked_value = mask_sensitive_value(value)
            table.add_row(key, masked_value)
        else:
            table.add_row(key, value)
    
    console.print(table)
    
    # Pagination controls with more intuitive shortcuts
    console.print("\nNavigation: ", end="")
    if page > 1:
        console.print("[bold]Previous (p)[/] | ", end="")
    if page < total_pages:
        console.print("[bold]Next (n)[/] | ", end="")
    if show_secrets:
        console.print("[bold]Hide secrets (h)[/] | ", end="")
    else:
        console.print("[bold]Show secrets (s)[/] | ", end="")
    console.print("[bold]Change variables per page (v)[/] | ", end="")
    console.print("[bold]Back (b)[/]")
    
    # Handle navigation
    while True:
        key = input("Enter option: ").lower()
        if key == 'p' and page > 1:
            view_env_variables(page - 1, page_size)
            break
        elif key == 'n' and page < total_pages:
            view_env_variables(page + 1, page_size)
            break
        elif key == 's' and not show_secrets:
            # Confirm showing secrets
            confirm = questionary.confirm("Are you sure you want to show sensitive values?").ask()
            if confirm:
                set_show_secrets_setting(True)
                view_env_variables(page, page_size)
            break
        elif key == 'h' and show_secrets:
            set_show_secrets_setting(False)
            view_env_variables(page, page_size)
            break
        elif key == 'v':
            new_page_size = change_vars_per_page()
            if new_page_size:
                view_env_variables(1, new_page_size)  # Reset to first page with new page size
            break
        elif key == 'b':
            break


def change_vars_per_page():
    """Change the number of variables displayed per page.
    
    Returns:
        The new page size or None if cancelled
    """
    env_vars = load_env_file()
    current_setting = get_vars_per_page_setting(env_vars)
    
    console.print(f"Current variables per page: [cyan]{current_setting}[/]")
    
    # Predefined options plus custom option
    page_size_options = ["10", "20", "30", "50", "Custom", "Cancel"]
    
    choice = questionary.select(
        "Select number of variables per page:",
        choices=page_size_options,
    ).ask()
    
    if choice == "Cancel":
        return None
    
    if choice == "Custom":
        while True:
            try:
                custom_size = questionary.text(
                    "Enter custom page size (5-100):",
                    default=str(current_setting)
                ).ask()
                
                if not custom_size:
                    return None
                
                new_size = int(custom_size)
                if 5 <= new_size <= 100:
                    break
                else:
                    console.print("[yellow]Page size must be between 5 and 100.[/]")
            except ValueError:
                console.print("[yellow]Please enter a valid number.[/]")
    else:
        new_size = int(choice)
    
    # Save the setting
    set_vars_per_page_setting(new_size)
    console.print(f"[green]Variables per page set to {new_size}.[/]")
    
    return new_size


def get_vars_per_page_setting(env_vars: Dict[str, str] = None) -> int:
    """Get the current variables per page setting.
    
    Args:
        env_vars: Optional dictionary of environment variables
        
    Returns:
        Number of variables per page
    """
    if env_vars is None:
        env_vars = load_env_file()
    
    if VARS_PER_PAGE_KEY in env_vars:
        try:
            page_size = int(env_vars[VARS_PER_PAGE_KEY])
            # Ensure the value is within reasonable bounds
            if 5 <= page_size <= 100:
                return page_size
        except ValueError:
            pass
    
    return DEFAULT_VARS_PER_PAGE


def set_vars_per_page_setting(page_size: int):
    """Set the variables per page setting.
    
    Args:
        page_size: Number of variables to display per page
    """
    env_vars = load_env_file()
    env_vars[VARS_PER_PAGE_KEY] = str(page_size)
    save_env_file(env_vars)


def toggle_show_secrets():
    """Toggle the show secrets setting."""
    env_vars = load_env_file()
    current_setting = get_show_secrets_setting(env_vars)
    
    if current_setting:
        console.print("Currently showing sensitive values. Do you want to hide them?")
        confirm = questionary.confirm("Hide sensitive values?").ask()
        if confirm:
            set_show_secrets_setting(False)
            console.print("[green]Sensitive values will now be masked.[/]")
    else:
        console.print("[yellow]Warning:[/] Showing sensitive values can expose sensitive information.")
        confirm = questionary.confirm("Are you sure you want to show sensitive values?").ask()
        if confirm:
            set_show_secrets_setting(True)
            console.print("[green]Sensitive values will now be shown.[/]")


def get_show_secrets_setting(env_vars: Dict[str, str] = None) -> bool:
    """Get the current show secrets setting.
    
    Args:
        env_vars: Optional dictionary of environment variables
        
    Returns:
        True if secrets should be shown, False otherwise
    """
    if env_vars is None:
        env_vars = load_env_file()
    
    if SHOW_SECRETS_KEY in env_vars:
        return env_vars[SHOW_SECRETS_KEY].lower() == 'true'
    
    return False


def set_show_secrets_setting(show_secrets: bool):
    """Set the show secrets setting.
    
    Args:
        show_secrets: Whether to show secrets
    """
    env_vars = load_env_file()
    env_vars[SHOW_SECRETS_KEY] = str(show_secrets)
    save_env_file(env_vars)


def edit_env_variable():
    """Edit an environment variable."""
    # Get list of variables
    env_vars = load_env_file()
    
    if not env_vars:
        console.print("[yellow]No environment variables found to edit.[/]")
        return
    
    # Filter out comments
    variables = [k for k in env_vars.keys() if not k.startswith('#') and k]
    
    # Display variables with selection
    init_console()
    console.print("Select a variable to edit:")
    
    # Let user select a variable to edit
    var_name = questionary.select(
        "Select a variable to edit:",
        choices=variables + ["Cancel"],
    ).ask()
    
    if var_name == "Cancel":
        return
    
    current_value = env_vars[var_name]
    is_sensitive_var = is_sensitive(var_name)
    
    if is_sensitive_var:
        console.print(f"[yellow]Warning:[/] You are editing a sensitive value: {var_name}")
        confirm = questionary.confirm("Are you sure you want to continue?").ask()
        if not confirm:
            return
    
    # Show current value (masked if sensitive and show_secrets is False)
    show_secrets = get_show_secrets_setting(env_vars)
    if is_sensitive_var and not show_secrets:
        console.print(f"Current value: {mask_sensitive_value(current_value)}")
    else:
        console.print(f"Current value: {current_value}")
    
    # Get new value with hint
    console.print("[italic]Enter new value (or leave empty to cancel)[/]")
    new_value = questionary.text("Enter new value:", default=current_value).ask()
    
    if new_value is None:
        console.print("[yellow]Edit cancelled.[/]")
        return
    
    if new_value == "":
        # Confirm if user wants to set an empty value or cancel
        confirm = questionary.confirm("Do you want to set an empty value? Select No to cancel.", default=False).ask()
        if not confirm:
            console.print("[yellow]Edit cancelled.[/]")
            return
    
    if new_value == current_value:
        console.print("[yellow]No changes made.[/]")
        return
    
    # Update the value
    env_vars[var_name] = new_value
    save_env_file(env_vars)
    console.print(f"[green]Updated {var_name} successfully.[/]")


def add_env_variable():
    """Add a new environment variable."""
    env_vars = load_env_file()
    
    console.print("[italic]Enter variable name (or leave empty to go back)[/]")
    
    # Get variable name
    while True:
        var_name = questionary.text("Enter variable name:").ask()
        
        if not var_name:
            # Ask if user wants to go back
            go_back = questionary.confirm("Do you want to go back to the settings menu?", default=True).ask()
            if go_back:
                return
            else:
                console.print("[italic]Please enter a variable name (or leave empty to go back)[/]")
                continue
            
        if var_name in env_vars and not var_name.startswith('#'):
            console.print(f"[yellow]Variable {var_name} already exists. Please use edit instead.[/]")
            continue
            
        break
    
    # Get variable value
    var_value = questionary.text("Enter variable value:").ask()
    
    # Add to env_vars
    env_vars[var_name] = var_value
    save_env_file(env_vars)
    console.print(f"[green]Added {var_name} successfully.[/]")


def delete_env_variable():
    """Delete an environment variable."""
    # Get list of variables
    env_vars = load_env_file()
    
    if not env_vars:
        console.print("[yellow]No environment variables found to delete.[/]")
        return
    
    # Filter out comments
    variables = [k for k in env_vars.keys() if not k.startswith('#') and k]
    
    # Display variables with selection
    init_console()
    console.print("Select a variable to delete:")
    
    # Let user select a variable to delete with hint
    var_name = questionary.select(
        "Select a variable to delete:",
        choices=variables + ["Cancel"],
    ).ask()
    
    if var_name == "Cancel":
        return
    
    # Confirm deletion
    confirm = questionary.confirm(f"Are you sure you want to delete {var_name}?").ask()
    if not confirm:
        console.print("[yellow]Deletion cancelled.[/]")
        return
    
    # Delete the variable
    del env_vars[var_name]
    save_env_file(env_vars)
    console.print(f"[green]Deleted {var_name} successfully.[/]")


def manage_profiles():
    """Manage environment profiles."""
    init_console()
    console.print(Panel("[bold green]Environment Profile Management[/]"), justify="center")
    
    # Get current profile and available profiles
    current_profile = get_current_profile()
    available_profiles = get_available_profiles()
    
    if current_profile:
        console.print(f"Current Profile: [bold cyan]{current_profile}[/]")
    
    if not available_profiles:
        console.print("[yellow]No profiles found.[/]")
    else:
        console.print("Available Profiles:")
        for profile in available_profiles:
            if profile == current_profile:
                console.print(f"  [bold cyan]{profile} (active)[/]")
            else:
                console.print(f"  {profile}")

    console.line()
    
    # Profile management options
    choice = questionary.select(
        "What would you like to do?",
        choices=[
            questionary.Separator(line=" "),
            "Switch to a different profile",
            "Create a new profile",
            "Rename a profile",
            "Delete a profile",
            "Back to settings menu",
        ],
    ).ask()
    
    if choice == "Switch to a different profile":
        switch_profile()
    elif choice == "Create a new profile":
        create_profile()
    elif choice == "Rename a profile":
        rename_profile()
    elif choice == "Delete a profile":
        delete_profile()


def switch_profile():
    """Switch to a different environment profile."""
    available_profiles = get_available_profiles()
    current_profile = get_current_profile()
    
    if not available_profiles:
        console.print("[yellow]No profiles available to switch to.[/]")
        return
    
    # Remove current profile from the list to avoid switching to the same profile
    selectable_profiles = [p for p in available_profiles if p != current_profile]
    
    if not selectable_profiles:
        console.print("[yellow]No other profiles available to switch to.[/]")
        return
    
    target_profile = questionary.select(
        "Select a profile to switch to:",
        choices=selectable_profiles + ["Cancel"],
    ).ask()
    
    if target_profile == "Cancel":
        return
    
    # Confirm switch
    confirm = questionary.confirm(f"Are you sure you want to switch to the {target_profile} profile?").ask()
    if not confirm:
        return
    
    # Backup current .env file
    backup_env_file()
    
    # Copy selected profile to .env
    source_file = f"{ENV_PROFILE_PREFIX}{target_profile}"
    if os.path.exists(source_file):
        shutil.copy2(source_file, ENV_FILE)
        console.print(f"[green]Switched to {target_profile} profile successfully.[/]")
    else:
        console.print(f"[red]Error: Could not find profile file {source_file}.[/]")


def create_profile():
    """Create a new environment profile."""
    profile_name = questionary.text("Enter new profile name:").ask()
    
    if not profile_name:
        console.print("[yellow]Profile name cannot be empty.[/]")
        return
    
    # Check if profile already exists
    target_file = f"{ENV_PROFILE_PREFIX}{profile_name}"
    if os.path.exists(target_file):
        console.print(f"[yellow]Profile {profile_name} already exists.[/]")
        return
    
    # Determine source file - use current .env or template
    source_choices = ["Current environment (.env)", ".env_template"]
    if os.path.exists(ENV_TEMPLATE_FILE):
        source_choices.append(ENV_TEMPLATE_FILE)
    
    source_choice = questionary.select(
        "Create profile based on:",
        choices=source_choices + ["Cancel"],
    ).ask()
    
    if source_choice == "Cancel":
        return
    
    source_file = ENV_FILE if source_choice == "Current environment (.env)" else ENV_TEMPLATE_FILE
    
    if not os.path.exists(source_file):
        console.print(f"[red]Error: Source file {source_file} not found.[/]")
        return
    
    # Create new profile file
    try:
        # Copy source file
        shutil.copy2(source_file, target_file)
        
        # Add profile header if it doesn't exist
        with open(target_file, 'r') as file:
            content = file.read()
        
        if not content.startswith("# Profile:"):
            with open(target_file, 'w') as file:
                profile_header = DEFAULT_PROFILE_COMMENT.format(profile_name=profile_name)
                file.write(f"{profile_header}\n{content}")
        
        console.print(f"[green]Created {profile_name} profile successfully.[/]")
    except Exception as e:
        console.print(f"[red]Error creating profile: {str(e)}[/]")


def rename_profile():
    """Rename an existing profile."""
    available_profiles = get_available_profiles()
    current_profile = get_current_profile()
    
    if not available_profiles:
        console.print("[yellow]No profiles available to rename.[/]")
        return
    
    # Let user select a profile to rename
    profile_to_rename = questionary.select(
        "Select a profile to rename:",
        choices=available_profiles + ["Cancel"],
    ).ask()
    
    if profile_to_rename == "Cancel":
        return
    
    # Get new name
    new_name = questionary.text("Enter new profile name:").ask()
    
    if not new_name:
        console.print("[yellow]New profile name cannot be empty.[/]")
        return
    
    if new_name in available_profiles:
        console.print(f"[yellow]Profile {new_name} already exists.[/]")
        return
    
    # Rename profile file
    source_file = f"{ENV_PROFILE_PREFIX}{profile_to_rename}"
    target_file = f"{ENV_PROFILE_PREFIX}{new_name}"
    
    try:
        # Read content of the source file
        with open(source_file, 'r') as file:
            content = file.readlines()
        
        # Update profile header if it exists
        if content and content[0].startswith("# Profile:"):
            content[0] = DEFAULT_PROFILE_COMMENT.format(profile_name=new_name) + "\n"
        
        # Write to new file
        with open(target_file, 'w') as file:
            file.writelines(content)
        
        # Remove old file
        os.remove(source_file)
        
        # If this was the current profile, update .env as well
        if profile_to_rename == current_profile:
            with open(ENV_FILE, 'r') as file:
                content = file.readlines()
            
            if content and content[0].startswith("# Profile:"):
                content[0] = DEFAULT_PROFILE_COMMENT.format(profile_name=new_name) + "\n"
            
            with open(ENV_FILE, 'w') as file:
                file.writelines(content)
        
        console.print(f"[green]Renamed {profile_to_rename} to {new_name} successfully.[/]")
    except Exception as e:
        console.print(f"[red]Error renaming profile: {str(e)}[/]")


def delete_profile():
    """Delete an existing profile."""
    available_profiles = get_available_profiles()
    current_profile = get_current_profile()
    
    if not available_profiles:
        console.print("[yellow]No profiles available to delete.[/]")
        return
    
    # Let user select a profile to delete
    profile_to_delete = questionary.select(
        "Select a profile to delete:",
        choices=available_profiles + ["Cancel"],
    ).ask()
    
    if profile_to_delete == "Cancel":
        return
    
    # Confirm deletion
    confirm = questionary.confirm(
        f"Are you sure you want to delete the {profile_to_delete} profile? This cannot be undone."
    ).ask()
    
    if not confirm:
        return
    
    # Delete profile file
    profile_file = f"{ENV_PROFILE_PREFIX}{profile_to_delete}"
    
    try:
        os.remove(profile_file)
        
        # Warn if deleting the current profile
        if profile_to_delete == current_profile:
            console.print(
                f"[yellow]Warning: You deleted the currently active profile. "
                f"The .env file still contains those settings but is no longer marked as a profile.[/]"
            )
            
            # Remove profile header from .env
            with open(ENV_FILE, 'r') as file:
                content = file.readlines()
            
            if content and content[0].startswith("# Profile:"):
                content = content[1:]
                with open(ENV_FILE, 'w') as file:
                    file.writelines(content)
        
        console.print(f"[green]Deleted {profile_to_delete} profile successfully.[/]")
    except Exception as e:
        console.print(f"[red]Error deleting profile: {str(e)}[/]")


def is_sensitive(key: str) -> bool:
    """Check if a variable is considered sensitive.
    
    Args:
        key: The variable name
        
    Returns:
        True if sensitive, False otherwise
    """
    sensitive_patterns = ['key', 'token', 'secret', 'password', 'api', 'pat']
    key_lower = key.lower()
    return any(pattern in key_lower for pattern in sensitive_patterns)


def mask_sensitive_value(value: str) -> str:
    """Mask a sensitive value.
    
    Args:
        value: The sensitive value
        
    Returns:
        Masked value
    """
    if not value:
        return value
    
    if len(value) <= 4:
        return "••••"
    
    # Show first 2 and last 2 characters
    return value[:2] + "•" * (len(value) - 4) + value[-2:]


def get_current_profile() -> Optional[str]:
    """Get the name of the current active profile.
    
    Returns:
        Profile name or None if no profile is active
    """
    if not os.path.exists(ENV_FILE):
        return None
    
    try:
        with open(ENV_FILE, 'r') as file:
            first_line = file.readline().strip()
            
        if first_line.startswith("# Profile:"):
            return first_line.replace("# Profile:", "").strip()
    except Exception:
        pass
    
    return None


def get_available_profiles() -> List[str]:
    """Get a list of available profiles.
    
    Returns:
        List of profile names
    """
    profiles = []
    
    for file in os.listdir():
        if file.startswith(ENV_PROFILE_PREFIX):
            profile_name = file[len(ENV_PROFILE_PREFIX):]
            profiles.append(profile_name)
    
    return profiles


def backup_env_file():
    """Create a backup of the current .env file."""
    if not os.path.exists(ENV_FILE):
        return
    
    backup_file = f"{ENV_FILE}.bak"
    shutil.copy2(ENV_FILE, backup_file)


def load_env_file() -> Dict[str, str]:
    """Load the .env file into a dictionary.
    
    Returns:
        Dictionary of environment variables
    """
    env_vars = {}
    
    if not os.path.exists(ENV_FILE):
        console.print(f"[yellow]Warning: {ENV_FILE} file not found.[/]")
        return env_vars
    
    try:
        with open(ENV_FILE, 'r') as file:
            lines = file.readlines()
            
        # Process each line
        for line in lines:
            line = line.strip()
            
            # Skip empty lines
            if not line:
                env_vars[""] = ""
                continue
            
            # Handle comments
            if line.startswith('#'):
                env_vars[line] = ""
                continue
            
            # Handle regular variables
            if '=' in line:
                key, value = line.split('=', 1)
                env_vars[key] = value
            else:
                # Handle lines without equals sign
                env_vars[line] = ""
                
    except Exception as e:
        console.print(f"[red]Error loading .env file: {str(e)}[/]")
    
    return env_vars


def save_env_file(env_vars: Dict[str, str]):
    """Save environment variables back to the .env file.
    
    Args:
        env_vars: Dictionary of environment variables
    """
    # Create backup
    backup_env_file()
    
    try:
        with open(ENV_FILE, 'w') as file:
            for key, value in env_vars.items():
                if key.startswith('#'):
                    # Write comments as is
                    file.write(f"{key}\n")
                elif not key:
                    # Write empty lines
                    file.write("\n")
                else:
                    # Write regular variables
                    file.write(f"{key}={value}\n")
                    
        console.print("[green]Settings saved successfully.[/]")
    except Exception as e:
        console.print(f"[red]Error saving .env file: {str(e)}[/]")
```

### src\flock\core\api\ui\__init__.py

- **Lines**: 0
- **Last modified**: 2025-04-02 17:29:19

**Content**:
```py

```

### src\flock\core\tools\azure_tools.py

- **Lines**: 496
- **Last modified**: 2025-03-03 16:02:36

**Functions**:
- `_get_default_endpoint()`
- `_get_default_api_key()`
- `_get_default_index_name()`
- `azure_search_initialize_clients(endpoint, api_key, index_name)`
- `azure_search_create_index(index_name, fields, vector_search, endpoint, api_key)`
- `azure_search_upload_documents(documents, index_name, endpoint, api_key)`
- `azure_search_query(search_text, filter, select, top, vector, vector_field, vector_k, index_name, endpoint, api_key)`
- `azure_search_get_document(key, select, index_name, endpoint, api_key)`
- `azure_search_delete_documents(keys, key_field_name, index_name, endpoint, api_key)`
- `azure_search_list_indexes(endpoint, api_key)`
- `azure_search_get_index_statistics(index_name, endpoint, api_key)`
- `azure_search_create_vector_index(fields, vector_dimensions, index_name, algorithm_kind, endpoint, api_key)`

**Content**:
```py
import os
from typing import Any

from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.indexes.models import (
    ExhaustiveKnnAlgorithmConfiguration,
    HnswAlgorithmConfiguration,
    SearchableField,
    SearchField,
    SearchFieldDataType,
    SearchIndex,
    SimpleField,
    VectorSearch,
    VectorSearchProfile,
)
from azure.search.documents.models import VectorizedQuery

from flock.core.logging.trace_and_logged import traced_and_logged


def _get_default_endpoint() -> str:
    """Get the default Azure Search endpoint from environment variables."""
    endpoint = os.environ.get("AZURE_SEARCH_ENDPOINT")
    if not endpoint:
        raise ValueError(
            "AZURE_SEARCH_ENDPOINT environment variable is not set"
        )
    return endpoint


def _get_default_api_key() -> str:
    """Get the default Azure Search API key from environment variables."""
    api_key = os.environ.get("AZURE_SEARCH_API_KEY")
    if not api_key:
        raise ValueError("AZURE_SEARCH_API_KEY environment variable is not set")
    return api_key


def _get_default_index_name() -> str:
    """Get the default Azure Search index name from environment variables."""
    index_name = os.environ.get("AZURE_SEARCH_INDEX_NAME")
    if not index_name:
        raise ValueError(
            "AZURE_SEARCH_INDEX_NAME environment variable is not set"
        )
    return index_name


@traced_and_logged
def azure_search_initialize_clients(
    endpoint: str | None = None,
    api_key: str | None = None,
    index_name: str | None = None,
) -> dict[str, Any]:
    """Initialize Azure AI Search clients.

    Args:
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)
        index_name: Optional index name for SearchClient initialization (defaults to AZURE_SEARCH_INDEX_NAME env var if not None)

    Returns:
        Dictionary containing the initialized clients
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()

    credential = AzureKeyCredential(api_key)

    # Create the search index client
    search_index_client = SearchIndexClient(
        endpoint=endpoint, credential=credential
    )

    # Create clients dictionary
    clients = {
        "index_client": search_index_client,
    }

    # Add search client if index_name was provided or available in env
    if index_name is None and os.environ.get("AZURE_SEARCH_INDEX_NAME"):
        index_name = _get_default_index_name()

    if index_name:
        search_client = SearchClient(
            endpoint=endpoint, index_name=index_name, credential=credential
        )
        clients["search_client"] = search_client

    return clients


@traced_and_logged
def azure_search_create_index(
    index_name: str | None = None,
    fields: list[SearchField] = None,
    vector_search: VectorSearch | None = None,
    endpoint: str | None = None,
    api_key: str | None = None,
) -> dict[str, Any]:
    """Create a new search index in Azure AI Search.

    Args:
        index_name: Name of the search index to create (defaults to AZURE_SEARCH_INDEX_NAME env var)
        fields: List of field definitions for the index
        vector_search: Optional vector search configuration
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        Dictionary containing information about the created index
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()
    index_name = index_name or _get_default_index_name()

    if fields is None:
        raise ValueError("Fields must be provided for index creation")

    clients = azure_search_initialize_clients(endpoint, api_key)
    index_client = clients["index_client"]

    # Create the index
    index = SearchIndex(
        name=index_name, fields=fields, vector_search=vector_search
    )

    result = index_client.create_or_update_index(index)

    return {
        "index_name": result.name,
        "fields": [field.name for field in result.fields],
        "created": True,
    }


@traced_and_logged
def azure_search_upload_documents(
    documents: list[dict[str, Any]],
    index_name: str | None = None,
    endpoint: str | None = None,
    api_key: str | None = None,
) -> dict[str, Any]:
    """Upload documents to an Azure AI Search index.

    Args:
        documents: List of documents to upload (as dictionaries)
        index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        Dictionary containing the upload results
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()
    index_name = index_name or _get_default_index_name()

    clients = azure_search_initialize_clients(endpoint, api_key, index_name)
    search_client = clients["search_client"]

    result = search_client.upload_documents(documents=documents)

    # Process results
    succeeded = sum(1 for r in result if r.succeeded)

    return {
        "succeeded": succeeded,
        "failed": len(result) - succeeded,
        "total": len(result),
    }


@traced_and_logged
def azure_search_query(
    search_text: str | None = None,
    filter: str | None = None,
    select: list[str] | None = None,
    top: int | None = 50,
    vector: list[float] | None = None,
    vector_field: str | None = None,
    vector_k: int | None = 10,
    index_name: str | None = None,
    endpoint: str | None = None,
    api_key: str | None = None,
) -> list[dict[str, Any]]:
    """Search documents in an Azure AI Search index.

    Args:
        search_text: Optional text to search for (keyword search)
        filter: Optional OData filter expression
        select: Optional list of fields to return
        top: Maximum number of results to return
        vector: Optional vector for vector search
        vector_field: Name of the field containing vectors for vector search
        vector_k: Number of nearest neighbors to retrieve in vector search
        index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        List of search results as dictionaries
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()
    index_name = index_name or _get_default_index_name()

    clients = azure_search_initialize_clients(endpoint, api_key, index_name)
    search_client = clients["search_client"]

    # Set up vector query if vector is provided
    vectorized_query = None
    if vector and vector_field:
        vectorized_query = VectorizedQuery(
            vector=vector, k=vector_k, fields=[vector_field]
        )

    # Execute the search
    results = search_client.search(
        search_text=search_text,
        filter=filter,
        select=select,
        top=top,
        vector_queries=[vectorized_query] if vectorized_query else None,
    )

    # Convert results to list of dictionaries
    result_list = [dict(result) for result in results]

    return result_list


@traced_and_logged
def azure_search_get_document(
    key: str,
    select: list[str] | None = None,
    index_name: str | None = None,
    endpoint: str | None = None,
    api_key: str | None = None,
) -> dict[str, Any]:
    """Retrieve a specific document from an Azure AI Search index by key.

    Args:
        key: The unique key of the document to retrieve
        select: Optional list of fields to return
        index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        The retrieved document as a dictionary
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()
    index_name = index_name or _get_default_index_name()

    clients = azure_search_initialize_clients(endpoint, api_key, index_name)
    search_client = clients["search_client"]

    result = search_client.get_document(key=key, selected_fields=select)

    return dict(result)


@traced_and_logged
def azure_search_delete_documents(
    keys: list[str],
    key_field_name: str = "id",
    index_name: str | None = None,
    endpoint: str | None = None,
    api_key: str | None = None,
) -> dict[str, Any]:
    """Delete documents from an Azure AI Search index.

    Args:
        keys: List of document keys to delete
        key_field_name: Name of the key field (defaults to "id")
        index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        Dictionary containing the deletion results
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()
    index_name = index_name or _get_default_index_name()

    clients = azure_search_initialize_clients(endpoint, api_key, index_name)
    search_client = clients["search_client"]

    # Format documents for deletion (only need the key field)
    documents_to_delete = [{key_field_name: key} for key in keys]

    result = search_client.delete_documents(documents=documents_to_delete)

    # Process results
    succeeded = sum(1 for r in result if r.succeeded)

    return {
        "succeeded": succeeded,
        "failed": len(result) - succeeded,
        "total": len(result),
    }


@traced_and_logged
def azure_search_list_indexes(
    endpoint: str | None = None, api_key: str | None = None
) -> list[dict[str, Any]]:
    """List all indexes in the Azure AI Search service.

    Args:
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        List of indexes as dictionaries
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()

    clients = azure_search_initialize_clients(endpoint, api_key)
    index_client = clients["index_client"]

    result = index_client.list_indexes()

    # Convert index objects to dictionaries with basic information
    indexes = [
        {
            "name": index.name,
            "fields": [field.name for field in index.fields],
            "field_count": len(index.fields),
        }
        for index in result
    ]

    return indexes


@traced_and_logged
def azure_search_get_index_statistics(
    index_name: str | None = None,
    endpoint: str | None = None,
    api_key: str | None = None,
) -> dict[str, Any]:
    """Get statistics for a specific Azure AI Search index.

    Args:
        index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        Dictionary containing index statistics
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()
    index_name = index_name or _get_default_index_name()

    clients = azure_search_initialize_clients(endpoint, api_key, index_name)
    search_client = clients["search_client"]

    stats = search_client.get_document_count()

    return {"document_count": stats}


@traced_and_logged
def azure_search_create_vector_index(
    fields: list[dict[str, Any]],
    vector_dimensions: int,
    index_name: str | None = None,
    algorithm_kind: str = "hnsw",
    endpoint: str | None = None,
    api_key: str | None = None,
) -> dict[str, Any]:
    """Create a vector search index in Azure AI Search.

    Args:
        fields: List of field configurations (dicts with name, type, etc.)
        vector_dimensions: Dimensions of the vector field
        index_name: Name of the search index (defaults to AZURE_SEARCH_INDEX_NAME env var)
        algorithm_kind: Vector search algorithm ("hnsw" or "exhaustive")
        endpoint: The Azure AI Search service endpoint URL (defaults to AZURE_SEARCH_ENDPOINT env var)
        api_key: The Azure AI Search API key (defaults to AZURE_SEARCH_API_KEY env var)

    Returns:
        Dictionary with index creation result
    """
    # Use environment variables as defaults if not provided
    endpoint = endpoint or _get_default_endpoint()
    api_key = api_key or _get_default_api_key()
    index_name = index_name or _get_default_index_name()

    clients = azure_search_initialize_clients(endpoint, api_key)
    index_client = clients["index_client"]

    # Convert field configurations to SearchField objects
    index_fields = []
    vector_fields = []

    for field_config in fields:
        field_name = field_config["name"]
        field_type = field_config["type"]
        field_searchable = field_config.get("searchable", False)
        field_filterable = field_config.get("filterable", False)
        field_sortable = field_config.get("sortable", False)
        field_key = field_config.get("key", False)
        field_vector = field_config.get("vector", False)

        if field_searchable and field_type == "string":
            field = SearchableField(
                name=field_name,
                type=SearchFieldDataType.String,
                key=field_key,
                filterable=field_filterable,
                sortable=field_sortable,
            )
        else:
            data_type = None
            if field_type == "string":
                data_type = SearchFieldDataType.String
            elif field_type == "int":
                data_type = SearchFieldDataType.Int32
            elif field_type == "double":
                data_type = SearchFieldDataType.Double
            elif field_type == "boolean":
                data_type = SearchFieldDataType.Boolean
            elif field_type == "collection":
                data_type = SearchFieldDataType.Collection(
                    SearchFieldDataType.String
                )

            field = SimpleField(
                name=field_name,
                type=data_type,
                key=field_key,
                filterable=field_filterable,
                sortable=field_sortable,
            )

        index_fields.append(field)

        if field_vector:
            vector_fields.append(field_name)

    # Set up vector search configuration
    algorithm_config = None
    if algorithm_kind.lower() == "hnsw":
        algorithm_config = HnswAlgorithmConfiguration(
            name="hnsw-config",
            parameters={"m": 4, "efConstruction": 400, "efSearch": 500},
        )
    else:
        algorithm_config = ExhaustiveKnnAlgorithmConfiguration(
            name="exhaustive-config"
        )

    # Create vector search configuration
    vector_search = VectorSearch(
        algorithms=[algorithm_config],
        profiles=[
            VectorSearchProfile(
                name="vector-profile",
                algorithm_configuration_name=algorithm_config.name,
            )
        ],
    )

    # Create the search index
    index = SearchIndex(
        name=index_name, fields=index_fields, vector_search=vector_search
    )

    try:
        result = index_client.create_or_update_index(index)
        return {
            "index_name": result.name,
            "vector_fields": vector_fields,
            "vector_dimensions": vector_dimensions,
            "algorithm": algorithm_kind,
            "created": True,
        }
    except Exception as e:
        return {"error": str(e), "created": False}
```

### src\flock\core\tools\basic_tools.py

- **Lines**: 317
- **Last modified**: 2025-03-03 16:02:36

**Description**: This module contains basic agentic tools for performing various tasks.

**Functions**:
- `web_search_tavily(query)`
- `web_search_duckduckgo(keywords, search_type)`
- `web_search_bing(keywords)`
- `extract_links_from_markdown(markdown, url)`
- `get_web_content_as_markdown(url)`
- `get_anything_as_markdown(url_or_file_path)`
- `evaluate_math(expression)`
- `code_eval(python_code)`
- `get_current_time()`
- `count_words(text)`
- `extract_urls(text)`
- `extract_numbers(text)`
- `json_parse_safe(text)`
- `save_to_file(content, filename)`
- `read_from_file(filename)`
- `json_search(json_file_path, search_query, case_sensitive)`
- `_search_in_list(items, search_query, case_sensitive)`
- `_contains_text(obj, search_query, case_sensitive)`

**Content**:
```py
"""This module contains basic agentic tools for performing various tasks."""

import importlib
import json
import os
import re
from typing import Any, Literal

from flock.core.interpreter.python_interpreter import PythonInterpreter
from flock.core.logging.trace_and_logged import traced_and_logged


@traced_and_logged
def web_search_tavily(query: str):
    if importlib.util.find_spec("tavily") is not None:
        from tavily import TavilyClient

        client = TavilyClient(api_key=os.getenv("TAVILY_API_KEY"))
        try:
            response = client.search(query, include_answer=True)  # type: ignore
            return response
        except Exception:
            raise
    else:
        raise ImportError(
            "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
        )


@traced_and_logged
def web_search_duckduckgo(
    keywords: str, search_type: Literal["news", "web"] = "web"
):
    try:
        from duckduckgo_search import DDGS

        if search_type == "news":
            response = DDGS().news(keywords)
        else:
            response = DDGS().text(keywords)

        return response
    except Exception:
        raise


@traced_and_logged
def web_search_bing(keywords: str):
    try:
        import httpx

        subscription_key = os.environ["BING_SEARCH_V7_SUBSCRIPTION_KEY"]
        endpoint = "https://api.bing.microsoft.com/v7.0/search"

        # Query term(s) to search for.
        query = keywords

        # Construct a request
        mkt = "en-US"
        params = {"q": query, "mkt": mkt}
        headers = {"Ocp-Apim-Subscription-Key": subscription_key}

        response = httpx.get(endpoint, headers=headers, params=params)
        response.raise_for_status()
        search_results = response.json()
        return search_results["webPages"]
    except Exception:
        raise


def extract_links_from_markdown(markdown: str, url: str) -> list:
    # Regular expression to find all markdown links
    link_pattern = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
    links = link_pattern.findall(markdown)
    return [url + link[1] for link in links]


@traced_and_logged
def get_web_content_as_markdown(url: str):
    if (
        importlib.util.find_spec("httpx") is not None
        and importlib.util.find_spec("markdownify") is not None
    ):
        import httpx
        from markdownify import markdownify as md

        try:
            response = httpx.get(url)
            response.raise_for_status()
            markdown = md(response.text)
            return markdown
        except Exception:
            raise
    else:
        raise ImportError(
            "Optional tool dependencies not installed. Install with 'pip install flock-core[tools]'."
        )


@traced_and_logged
def get_anything_as_markdown(url_or_file_path: str):
    if importlib.util.find_spec("docling") is not None:
        from docling.document_converter import DocumentConverter

        try:
            converter = DocumentConverter()
            result = converter.convert(url_or_file_path)
            markdown = result.document.export_to_markdown()
            return markdown
        except Exception:
            raise
    else:
        raise ImportError(
            "Optional tool dependencies not installed. Install with 'pip install flock-core[all-tools]'."
        )


@traced_and_logged
def evaluate_math(expression: str) -> float:
    try:
        result = PythonInterpreter(
            {},
            [
                "os",
                "math",
                "random",
                "datetime",
                "time",
                "string",
                "collections",
                "itertools",
                "functools",
                "typing",
                "enum",
                "json",
                "ast",
            ],
            verbose=True,
        ).execute(expression)
        return result
    except Exception:
        raise


@traced_and_logged
def code_eval(python_code: str) -> str:
    try:
        result = PythonInterpreter(
            {},
            [
                "os",
                "math",
                "random",
                "datetime",
                "time",
                "string",
                "collections",
                "itertools",
                "functools",
                "typing",
                "enum",
                "json",
                "ast",
            ],
            verbose=True,
        ).execute(python_code)
        return result
    except Exception:
        raise


@traced_and_logged
def get_current_time() -> str:
    import datetime

    time = datetime.datetime.now().isoformat()
    return time


@traced_and_logged
def count_words(text: str) -> int:
    count = len(text.split())
    return count


@traced_and_logged
def extract_urls(text: str) -> list[str]:
    import re

    url_pattern = r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+"
    urls = re.findall(url_pattern, text)
    return urls


@traced_and_logged
def extract_numbers(text: str) -> list[float]:
    import re

    numbers = [float(x) for x in re.findall(r"-?\d*\.?\d+", text)]
    return numbers


@traced_and_logged
def json_parse_safe(text: str) -> dict:
    try:
        result = json.loads(text)
        return result
    except Exception:
        return {}


@traced_and_logged
def save_to_file(content: str, filename: str):
    try:
        with open(filename, "w") as f:
            f.write(content)
    except Exception:
        raise


@traced_and_logged
def read_from_file(filename: str) -> str:
    with open(filename, encoding="utf-8") as file:
        return file.read()


@traced_and_logged
def json_search(
    json_file_path: str, search_query: str, case_sensitive: bool = False
) -> list:
    """Search a JSON file for objects containing the specified search query.

    Args:
        json_file_path (str): Path to the JSON file to search
        search_query (str): Text to search for within the JSON objects
        case_sensitive (bool, optional): Whether to perform a case-sensitive search. Defaults to False.

    Returns:
        list: List of JSON objects (as dicts) that contain the search query

    Example:
        >>> matching_tickets = json_search("tickets.json", "error 404")
        >>> print(
        ...     f"Found {len(matching_tickets)} tickets mentioning '404 error'"
        ... )
    """
    try:
        # Read the JSON file
        file_content = read_from_file(json_file_path)

        # Parse the JSON content
        json_data = json_parse_safe(file_content)

        # Convert search query to lowercase if case-insensitive search
        if not case_sensitive:
            search_query = search_query.lower()

        results = []

        # Determine if the JSON root is an object or array
        if isinstance(json_data, dict):
            # Handle case where root is a dictionary object
            for key, value in json_data.items():
                if isinstance(value, list):
                    # If this key contains a list of objects, search within them
                    matching_items = _search_in_list(
                        value, search_query, case_sensitive
                    )
                    results.extend(matching_items)
                elif _contains_text(value, search_query, case_sensitive):
                    # The entire object matches
                    results.append(json_data)
                    break
        elif isinstance(json_data, list):
            # Handle case where root is an array
            matching_items = _search_in_list(
                json_data, search_query, case_sensitive
            )
            results.extend(matching_items)

        return results

    except Exception as e:
        return [{"error": f"Error searching JSON file: {e!s}"}]


def _search_in_list(
    items: list, search_query: str, case_sensitive: bool
) -> list:
    """Helper function to search for text in a list of items."""
    matching_items = []
    for item in items:
        if _contains_text(item, search_query, case_sensitive):
            matching_items.append(item)
    return matching_items


def _contains_text(obj: Any, search_query: str, case_sensitive: bool) -> bool:
    """Recursively check if an object contains the search query in any of its string values."""
    if isinstance(obj, str):
        # For string values, check if they contain the search query
        if case_sensitive:
            return search_query in obj
        else:
            return search_query in obj.lower()
    elif isinstance(obj, dict):
        # For dictionaries, check each value
        for value in obj.values():
            if _contains_text(value, search_query, case_sensitive):
                return True
    elif isinstance(obj, list):
        # For lists, check each item
        for item in obj:
            if _contains_text(item, search_query, case_sensitive):
                return True
    # For other types (numbers, booleans, None), return False
    return False
```

### src\flock\core\tools\llm_tools.py

- **Lines**: 788
- **Last modified**: 2025-02-28 09:57:06

**Functions**:
- `split_by_sentences(text)`
- `split_by_characters(text, chunk_size, overlap)`
- `split_by_tokens(text, tokenizer, max_tokens, overlap_tokens)`
- `split_by_separator(text, separator)`
- `recursive_text_splitter(text, chunk_size, separators, keep_separator)`
- `chunk_text_for_embedding(text, file_name, chunk_size, overlap)`
- `split_code_by_functions(code)`
- `count_tokens(text, model)`
- `count_tokens_estimate(text, model)`
- `truncate_to_token_limit(text, max_tokens, model)`
- `extract_keywords(text, top_n)`
- `clean_text(text, remove_urls, remove_html, normalize_whitespace)`
- `format_chat_history(messages, format_type, system_prefix, user_prefix, assistant_prefix)`
- `extract_json_from_text(text)`
- `calculate_text_hash(text, algorithm)`
- `format_table_from_dicts(data)`
- `detect_language(text)`
- `tiktoken_split(text, model, chunk_size, overlap)`

**Content**:
```py
import hashlib
import json
import re
from collections.abc import Callable
from typing import Any

import nltk

from flock.core.logging.trace_and_logged import traced_and_logged

# Ensure NLTK data is downloaded
try:
    nltk.data.find("tokenizers/punkt")
except LookupError:
    nltk.download("punkt")

try:
    nltk.data.find("corpora/stopwords")
except LookupError:
    nltk.download("stopwords")


@traced_and_logged
def split_by_sentences(text: str) -> list[str]:
    return nltk.sent_tokenize(text)


@traced_and_logged
def split_by_characters(
    text: str, chunk_size: int = 4000, overlap: int = 200
) -> list[str]:
    if chunk_size <= 0:
        raise ValueError("chunk_size must be positive")

    if overlap >= chunk_size:
        raise ValueError("overlap must be smaller than chunk_size")

    if not text:
        return []

    chunks = []
    start = 0
    text_length = len(text)

    while start < text_length:
        end = min(start + chunk_size, text_length)

        # If we're not at the end and the next character isn't a space, try to find a suitable break point
        if end < text_length and text[end] not in [
            " ",
            "\n",
            ".",
            ",",
            "!",
            "?",
            ";",
            ":",
            "-",
        ]:
            # Look for the last occurrence of a good break character
            break_chars = [" ", "\n", ".", ",", "!", "?", ";", ":", "-"]
            for i in range(end, max(start, end - 100), -1):
                if text[i] in break_chars:
                    end = i + 1  # Include the break character
                    break

        chunks.append(text[start:end])
        start = end - overlap if end < text_length else text_length

    return chunks


@traced_and_logged
def split_by_tokens(
    text: str,
    tokenizer: Callable[[str], list[str]],
    max_tokens: int = 1024,
    overlap_tokens: int = 100,
) -> list[str]:
    tokens = tokenizer(text)
    chunks = []

    i = 0
    while i < len(tokens):
        chunk = tokens[i : i + max_tokens]
        chunks.append("".join(chunk))
        i += max_tokens - overlap_tokens

    return chunks


@traced_and_logged
def split_by_separator(text: str, separator: str = "\n\n") -> list[str]:
    if not text:
        return []

    chunks = text.split(separator)
    return [chunk for chunk in chunks if chunk.strip()]


@traced_and_logged
def recursive_text_splitter(
    text: str,
    chunk_size: int = 4000,
    separators: list[str] = ["\n\n", "\n", ". ", ", ", " ", ""],
    keep_separator: bool = True,
) -> list[str]:
    if not text:
        return []

    if len(text) <= chunk_size:
        return [text]

    if not separators:
        return [
            text[:chunk_size],
            *recursive_text_splitter(text[chunk_size:], chunk_size, separators),
        ]

    separator = separators[0]
    new_separators = separators[1:]

    if separator == "":
        # If we're at the character level, just split by characters
        return split_by_characters(text, chunk_size=chunk_size, overlap=0)

    splits = text.split(separator)
    separator_len = len(separator) if keep_separator else 0

    # Add separator back to the chunks if needed
    if keep_separator and separator:
        splits = [f"{split}{separator}" for split in splits[:-1]] + [splits[-1]]

    # Process each split
    result = []
    current_chunk = []
    current_length = 0

    for split in splits:
        split_len = len(split)

        if split_len > chunk_size:
            # If current split is too large, handle current chunk and recursively split this large piece
            if current_chunk:
                result.append("".join(current_chunk))
                current_chunk = []
                current_length = 0

            # Recursively split this large piece
            smaller_chunks = recursive_text_splitter(
                split, chunk_size, new_separators, keep_separator
            )
            result.extend(smaller_chunks)
        elif current_length + split_len <= chunk_size:
            # If we can fit this split in the current chunk, add it
            current_chunk.append(split)
            current_length += split_len
        else:
            # If we can't fit this split, complete the current chunk and start a new one
            result.append("".join(current_chunk))
            current_chunk = [split]
            current_length = split_len

    # Don't forget the last chunk
    if current_chunk:
        result.append("".join(current_chunk))

    return result


@traced_and_logged
def chunk_text_for_embedding(
    text: str, file_name: str, chunk_size: int = 1000, overlap: int = 100
) -> list[dict[str, Any]]:
    chunks = split_by_characters(text, chunk_size=chunk_size, overlap=overlap)

    # Create metadata for each chunk
    result = []
    for i, chunk in enumerate(chunks):
        result.append(
            {
                "chunk_id": file_name + "_" + str(i),
                "text": chunk,
                "file": file_name,
                "total_chunks": len(chunks),
            }
        )

    return result


@traced_and_logged
def split_code_by_functions(code: str) -> list[dict[str, Any]]:
    if not code:
        return []

    # Basic pattern for Python functions
    function_pattern = re.compile(
        r"(^|\n)def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\((.*?)\)(?:\s*->.*?)?:"
    )
    matches = list(function_pattern.finditer(code))

    if not matches:
        return [{"name": "Main", "content": code, "type": "code"}]

    functions = []

    # Process each function
    for i in range(len(matches)):
        current_match = matches[i]
        function_name = current_match.group(2)

        # Determine function content
        if i < len(matches) - 1:
            next_function_start = matches[i + 1].start()
            content = code[current_match.start() : next_function_start]
        else:
            content = code[current_match.start() :]

        functions.append(
            {
                "name": function_name,
                "content": content.strip(),
                "type": "function",
            }
        )

    # Check if there's content before the first function
    if matches[0].start() > 0:
        preamble = code[: matches[0].start()].strip()
        if preamble:
            functions.insert(
                0,
                {"name": "Imports/Setup", "content": preamble, "type": "code"},
            )

    return functions


@traced_and_logged
def count_tokens(text: str, model: str = "gpt-3.5-turbo") -> int:
    """Count tokens using tiktoken."""
    if not text:
        return 0

    try:
        import tiktoken

        # Map model names to encoding types
        if model.startswith(("gpt-4", "gpt-3.5")):
            encoding_name = "cl100k_base"  # For newer OpenAI models
        elif model.startswith("text-davinci"):
            encoding_name = "p50k_base"  # For older OpenAI models
        elif "llama" in model.lower() or "mistral" in model.lower():
            encoding_name = (
                "cl100k_base"  # Best approximation for LLaMA/Mistral
            )
        else:
            # Default to cl100k_base as fallback
            encoding_name = "cl100k_base"

        # Try to get the specific encoder for the model if available
        try:
            encoding = tiktoken.encoding_for_model(model)
        except KeyError:
            # Fall back to the encoding name
            encoding = tiktoken.get_encoding(encoding_name)

        # Count tokens
        token_integers = encoding.encode(text)
        return len(token_integers)

    except ImportError:
        # Fallback to character-based estimation if tiktoken is not installed
        return count_tokens_estimate(text, model)


@traced_and_logged
def count_tokens_estimate(text: str, model: str = "gpt-3.5-turbo") -> int:
    """Estimate token count for different models."""
    if not text:
        return 0

    # Rough token estimations for different models
    if model.startswith(("gpt-3", "gpt-4")):
        # OpenAI models: ~4 chars per token
        return len(text) // 4 + 1
    elif model.startswith("claude"):
        # Anthropic models: ~3.5 chars per token
        return len(text) // 3.5 + 1
    elif "llama" in model.lower():
        # LLaMA-based models: ~3.7 chars per token
        return len(text) // 3.7 + 1
    else:
        # Default estimation
        return len(text) // 4 + 1


@traced_and_logged
def truncate_to_token_limit(
    text: str, max_tokens: int = 4000, model: str = "gpt-3.5-turbo"
) -> str:
    if not text:
        return ""

    # Try to use tiktoken for accurate truncation
    try:
        import tiktoken

        # Get appropriate encoding
        try:
            encoding = tiktoken.encoding_for_model(model)
        except KeyError:
            # Fall back to cl100k_base (used by most newer models)
            encoding = tiktoken.get_encoding("cl100k_base")

        # Encode the text to tokens
        tokens = encoding.encode(text)

        # If we're already under the limit, return the original text
        if len(tokens) <= max_tokens:
            return text

        # Truncate tokens and decode back to text
        truncated_tokens = tokens[:max_tokens]
        return encoding.decode(truncated_tokens)

    except ImportError:
        # Fallback to the character-based method if tiktoken is not available
        estimated_tokens = count_tokens_estimate(text, model)

        if estimated_tokens <= max_tokens:
            return text

        # Calculate approximate character limit
        char_per_token = 4  # Default for most models
        if model.startswith("claude"):
            char_per_token = 3.5
        elif "llama" in model.lower():
            char_per_token = 3.7

        char_limit = int(max_tokens * char_per_token)

        # Try to find a good breaking point
        if char_limit < len(text):
            # Look for sentence or paragraph break near the limit
            for i in range(char_limit - 1, max(0, char_limit - 100), -1):
                if i < len(text) and text[i] in [".", "!", "?", "\n"]:
                    return text[: i + 1]

        # Fallback to hard truncation
        return text[:char_limit]


@traced_and_logged
def extract_keywords(text: str, top_n: int = 10) -> list[str]:
    if not text:
        return []

    # Get stopwords
    try:
        from nltk.corpus import stopwords

        stop_words = set(stopwords.words("english"))
    except:
        # Fallback basic stopwords if NLTK data isn't available
        stop_words = {
            "i",
            "me",
            "my",
            "myself",
            "we",
            "our",
            "ours",
            "ourselves",
            "you",
            "you're",
            "you've",
            "you'll",
            "you'd",
            "your",
            "yours",
            "yourself",
            "yourselves",
            "he",
            "him",
            "his",
            "himself",
            "she",
            "she's",
            "her",
            "hers",
            "herself",
            "it",
            "it's",
            "its",
            "itself",
            "they",
            "them",
            "their",
            "theirs",
            "themselves",
            "what",
            "which",
            "who",
            "whom",
            "this",
            "that",
            "that'll",
            "these",
            "those",
            "am",
            "is",
            "are",
            "was",
            "were",
            "be",
            "been",
            "being",
            "have",
            "has",
            "had",
            "having",
            "do",
            "does",
            "did",
            "doing",
            "a",
            "an",
            "the",
            "and",
            "but",
            "if",
            "or",
            "because",
            "as",
            "until",
            "while",
            "of",
            "at",
            "by",
            "for",
            "with",
            "about",
            "against",
            "between",
            "into",
            "through",
            "during",
            "before",
            "after",
            "above",
            "below",
            "to",
            "from",
            "up",
            "down",
            "in",
            "out",
            "on",
            "off",
            "over",
            "under",
            "again",
            "further",
            "then",
            "once",
        }

    # Tokenize and remove punctuation
    words = re.findall(r"\b[a-zA-Z]{3,}\b", text.lower())

    # Remove stopwords
    words = [word for word in words if word not in stop_words]

    # Count word frequencies
    word_freq = {}
    for word in words:
        if word in word_freq:
            word_freq[word] += 1
        else:
            word_freq[word] = 1

    # Sort by frequency
    sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)

    # Return top N keywords
    return [word for word, freq in sorted_words[:top_n]]


@traced_and_logged
def clean_text(
    text: str,
    remove_urls: bool = True,
    remove_html: bool = True,
    normalize_whitespace: bool = True,
) -> str:
    if not text:
        return ""

    result = text

    # Remove URLs
    if remove_urls:
        result = re.sub(r"https?://\S+|www\.\S+", "", result)

    # Remove HTML tags
    if remove_html:
        result = re.sub(r"<.*?>", "", result)

    # Normalize whitespace
    if normalize_whitespace:
        # Replace multiple spaces, tabs, newlines with a single space
        result = re.sub(r"\s+", " ", result)
        result = result.strip()

    return result


@traced_and_logged
def format_chat_history(
    messages: list[dict[str, str]],
    format_type: str = "text",
    system_prefix: str = "System: ",
    user_prefix: str = "User: ",
    assistant_prefix: str = "Assistant: ",
) -> str:
    if not messages:
        return ""

    result = []

    if format_type == "text":
        for msg in messages:
            role = msg.get("role", "").lower()
            content = msg.get("content", "")

            if role == "system":
                result.append(f"{system_prefix}{content}")
            elif role == "user":
                result.append(f"{user_prefix}{content}")
            elif role == "assistant":
                result.append(f"{assistant_prefix}{content}")
            else:
                result.append(f"{role.capitalize()}: {content}")

        return "\n\n".join(result)

    elif format_type == "markdown":
        for msg in messages:
            role = msg.get("role", "").lower()
            content = msg.get("content", "")

            if role == "system":
                result.append(f"**{system_prefix.strip()}** {content}")
            elif role == "user":
                result.append(f"**{user_prefix.strip()}** {content}")
            elif role == "assistant":
                result.append(f"**{assistant_prefix.strip()}** {content}")
            else:
                result.append(f"**{role.capitalize()}:** {content}")

        return "\n\n".join(result)

    else:
        raise ValueError(f"Unsupported format type: {format_type}")


@traced_and_logged
def extract_json_from_text(text: str) -> dict[str, Any] | None:
    if not text:
        return None

    # Find JSON-like patterns between curly braces
    json_pattern = re.compile(r"({[\s\S]*?})")
    json_matches = json_pattern.findall(text)

    # Try to parse each match
    for json_str in json_matches:
        try:
            return json.loads(json_str)
        except json.JSONDecodeError:
            continue

    # Try to find JSON with markdown code blocks
    code_block_pattern = re.compile(r"```(?:json)?\s*([\s\S]*?)\s*```")
    code_blocks = code_block_pattern.findall(text)

    for block in code_blocks:
        # Clean up any trailing ``` that might have been captured
        block = block.replace("```", "")
        try:
            return json.loads(block)
        except json.JSONDecodeError:
            continue

    # No valid JSON found
    return None


@traced_and_logged
def calculate_text_hash(text: str, algorithm: str = "sha256") -> str:
    if not text:
        return ""

    if algorithm == "md5":
        return hashlib.md5(text.encode()).hexdigest()
    elif algorithm == "sha1":
        return hashlib.sha1(text.encode()).hexdigest()
    elif algorithm == "sha256":
        return hashlib.sha256(text.encode()).hexdigest()
    else:
        raise ValueError(f"Unsupported hash algorithm: {algorithm}")


@traced_and_logged
def format_table_from_dicts(data: list[dict[str, Any]]) -> str:
    if not data:
        return ""

    # Extract all possible keys
    keys = set()
    for item in data:
        keys.update(item.keys())

    # Convert to list and sort for consistent output
    keys = sorted(list(keys))

    # Calculate column widths
    widths = {key: len(key) for key in keys}
    for item in data:
        for key in keys:
            if key in item:
                value_str = str(item[key])
                widths[key] = max(widths[key], len(value_str))

    # Create header
    header = " | ".join(f"{key:{widths[key]}}" for key in keys)
    separator = "-+-".join("-" * widths[key] for key in keys)

    # Create rows
    rows = []
    for item in data:
        row = " | ".join(f"{item.get(key, '')!s:{widths[key]}}" for key in keys)
        rows.append(row)

    # Combine everything
    return f"{header}\n{separator}\n" + "\n".join(rows)


@traced_and_logged
def detect_language(text: str) -> str:
    """Simple language detection"""
    if not text or len(text.strip()) < 10:
        return "unknown"

    try:
        # Try to use langdetect if available
        from langdetect import detect

        return detect(text)
    except ImportError:
        # Fallback to simple detection based on character frequency
        # This is very simplistic and only works for a few common languages
        text = text.lower()

        # Count character frequencies that may indicate certain languages
        special_chars = {
            "á": 0,
            "é": 0,
            "í": 0,
            "ó": 0,
            "ú": 0,
            "ü": 0,
            "ñ": 0,  # Spanish
            "ä": 0,
            "ö": 0,
            "ß": 0,  # German
            "ç": 0,
            "à": 0,
            "è": 0,
            "ù": 0,  # French
            "å": 0,
            "ø": 0,  # Nordic
            "й": 0,
            "ы": 0,
            "ъ": 0,
            "э": 0,  # Russian/Cyrillic
            "的": 0,
            "是": 0,
            "在": 0,  # Chinese
            "の": 0,
            "は": 0,
            "で": 0,  # Japanese
            "한": 0,
            "국": 0,
            "어": 0,  # Korean
        }

        for char in text:
            if char in special_chars:
                special_chars[char] += 1

        # Detect based on character frequencies
        spanish = sum(
            special_chars[c] for c in ["á", "é", "í", "ó", "ú", "ü", "ñ"]
        )
        german = sum(special_chars[c] for c in ["ä", "ö", "ß"])
        french = sum(special_chars[c] for c in ["ç", "à", "è", "ù"])
        nordic = sum(special_chars[c] for c in ["å", "ø"])
        russian = sum(special_chars[c] for c in ["й", "ы", "ъ", "э"])
        chinese = sum(special_chars[c] for c in ["的", "是", "在"])
        japanese = sum(special_chars[c] for c in ["の", "は", "で"])
        korean = sum(special_chars[c] for c in ["한", "국", "어"])

        scores = {
            "es": spanish,
            "de": german,
            "fr": french,
            "no": nordic,
            "ru": russian,
            "zh": chinese,
            "ja": japanese,
            "ko": korean,
        }

        # If we have a clear signal from special characters
        max_score = max(scores.values())
        if max_score > 0:
            return max(scores, key=scores.get)

        # Otherwise assume English (very simplistic)
        return "en"


@traced_and_logged
def tiktoken_split(
    text: str,
    model: str = "gpt-3.5-turbo",
    chunk_size: int = 1000,
    overlap: int = 50,
) -> list[str]:
    """Split text based on tiktoken tokens with proper overlap handling."""
    if not text:
        return []

    try:
        import tiktoken

        try:
            encoding = tiktoken.encoding_for_model(model)
        except KeyError:
            encoding = tiktoken.get_encoding("cl100k_base")

        # Encode the text to tokens
        tokens = encoding.encode(text)
        total_tokens = len(tokens)

        # Check if we need to split at all
        if total_tokens <= chunk_size:
            return [text]

        # Create chunks with overlap
        chunks = []
        start_idx = 0

        while start_idx < total_tokens:
            # Define the end of this chunk
            end_idx = min(start_idx + chunk_size, total_tokens)

            # Decode this chunk of tokens back to text
            chunk_tokens = tokens[start_idx:end_idx]
            chunk_text = encoding.decode(chunk_tokens)
            chunks.append(chunk_text)

            # Move to the next chunk, accounting for overlap
            start_idx += chunk_size - overlap

            # Avoid tiny final chunks
            if start_idx < total_tokens and start_idx + overlap >= total_tokens:
                break

        return chunks
    except ImportError:
        # Fallback to character-based chunking if tiktoken is not available
        return split_by_characters(
            text, chunk_size=chunk_size * 4, overlap=overlap * 4
        )
```

### src\flock\__init__.py

- **Lines**: 95
- **Last modified**: 2025-04-03 03:45:54

**Description**: Flock package initialization.

**Functions**:
- `main()`

**Content**:
```py
"""Flock package initialization."""

from rich.panel import Panel

from flock.cli.constants import (
    CLI_EXIT,
    CLI_NOTES,
    CLI_REGISTRY_MANAGEMENT,
    CLI_THEME_BUILDER,
)
from flock.cli.load_release_notes import load_release_notes
from flock.cli.settings import settings_editor
from flock.core.logging.formatters.theme_builder import theme_builder


def main():
    """Main function."""
    import questionary
    from rich.console import Console

    from flock.cli.constants import (
        CLI_CREATE_AGENT,
        CLI_CREATE_FLOCK,
        CLI_LOAD_AGENT,
        CLI_LOAD_EXAMPLE,
        CLI_LOAD_FLOCK,
        CLI_SETTINGS,
        CLI_START_WEB_SERVER,
    )
    from flock.cli.load_flock import load_flock
    from flock.core.util.cli_helper import init_console

    console = Console()
    while True:
        init_console()

        # console.print("Flock Management Console\n", style="bold green")
        console.print(
            Panel("[bold green]Flock Management Console[/]"), justify="center"
        )
        console.line()

        result = questionary.select(
            "What do you want to do?",
            choices=[
                questionary.Separator(line=" "),
                # CLI_CREATE_AGENT,
                CLI_CREATE_FLOCK,
                # CLI_LOAD_AGENT,
                CLI_LOAD_FLOCK,
                # CLI_LOAD_EXAMPLE,
                questionary.Separator(),
                CLI_REGISTRY_MANAGEMENT,
                questionary.Separator(),
                CLI_THEME_BUILDER,
                CLI_SETTINGS,
                questionary.Separator(),
                CLI_NOTES,
                CLI_EXIT,
            ],
        ).ask()

        if result == CLI_LOAD_FLOCK:
            load_flock()
        elif result == CLI_CREATE_FLOCK:
            # This will be implemented in a separate create_flock.py
            from flock.cli.create_flock import create_flock

            create_flock()
        elif result == CLI_THEME_BUILDER:
            theme_builder()
        elif result == CLI_REGISTRY_MANAGEMENT:
            # Import registry management when needed
            from flock.cli.registry_management import manage_registry

            manage_registry()
        elif result == CLI_SETTINGS:
            settings_editor()
        elif result == CLI_START_WEB_SERVER:
            # Simple web server without a loaded Flock - could create a new one
            console.print(
                "[yellow]Web server without loaded Flock not yet implemented.[/]"
            )
            input("\nPress Enter to continue...")
        elif result == CLI_NOTES:
            load_release_notes()
        elif result == CLI_EXIT:
            break
        input("\nPress Enter to continue...\n\n")

        console.clear()


if __name__ == "__main__":
    main()
```

### examples\03_apps\dynamic_apps\main.py

- **Lines**: 53
- **Last modified**: 2025-04-02 17:29:19

**Classes**:
- `DynamicHTMLApp`: 0 methods

**Content**:
```py
import json
from typing import Optional
from pydantic import BaseModel, Field
from flock.core import FlockFactory, Flock


class DynamicHTMLApp(BaseModel):
    name: str = Field(..., description="Name of the app")
    requirements: list[str] = Field(..., description="User specified requirements for the app")
    description: str = Field(..., description="High level description of the data and functionality of the app, as well as design decisions")
    html_content: str = Field(..., description="HTML content of the app")
    css_content: str = Field(..., description="CSS content of the app")
    js_content: str = Field(..., description="JS content of the app")
    html_file: str = Field(..., description="HTML file name")
    css_file: str = Field(..., description="CSS file name")
    js_file: str = Field(..., description="JS file name")
    
MODEL = "gemini/gemini-2.5-pro-exp-03-25" #"groq/qwen-qwq-32b"    #"openai/gpt-4o" # 
flock = Flock(model=MODEL)

app_agent = FlockFactory.create_default_agent(name="app_agent",
                                              description="An agent that generates a static html app based on the requirements and the input_data. "
                                              "The input_data is the content of a json file that contains the data to be displayed in the app."
                                              "The final app should load the input_data from json files in a folder called 'data' in the same directory as the app."
                                              "The app should present the content of the input file as if designed by a professional UX designer and dedicated to the data in the input file."
                                              "For example, if the input data is a story, the app should present the story as if it is a dedicated story app."
                                              "If for example the input data is a list of products, the app should present the products as if it is a dedicated product app.",
                                              input="requirements: str, input_data: str",
                                              output="app: DynamicHTMLApp",
                                              max_tokens=60000)

flock.add_agent(app_agent)

requirements = "elegant, professional, color-coded, modern, dark mode"
input_file = "output/comic_book_series_agent_output_20250330_044131.json"
output_dir = "output/apps/"

# Load the input data as string 
with open(input_file, 'r') as f:
    input_data = f.read()

result = flock.run(start_agent=app_agent, input={'requirements': requirements, 'input_data': input_data}) 
app = result.app

#save html, css, js to files
with open(output_dir + app.html_file, 'w') as f:
    f.write(app.html_content)
with open(output_dir + app.css_file, 'w') as f:
    f.write(app.css_content)
with open(output_dir + app.js_file, 'w') as f:
    f.write(app.js_content)

print(f"App saved to {output_dir + app.html_file}")
```

### examples\03_apps\gemini_dev\main.py

- **Lines**: 46
- **Last modified**: 2025-04-02 17:29:19

**Content**:
```py
from typing import Optional
from pydantic import BaseModel, Field
from flock.core import FlockFactory, Flock
from flock.routers.default.default_router import DefaultRouter, DefaultRouterConfig



MODEL = "gemini/gemini-2.5-pro-exp-03-25" #"groq/qwen-qwq-32b"    #"openai/gpt-4o" # 
flock = Flock(model=MODEL)

# read .project/code.txt
with open(".project/code.txt", "r", encoding="utf-8") as f:
    code = f.read()

# read .project/llms-ctx.txt
with open(".project/llms-ctx.txt", "r", encoding="utf-8") as f:
    llms_ctx = f.read()
    
inputs = {
    "Code for the my project, the agent framework flock": code,
    "FastHTML Documentation": llms_ctx
}

prompt = """
I'm getting these errors:
INFO:     127.0.0.1:63465 - "GET /agents HTTP/1.1" 200 OK
INFO:     127.0.0.1:63463 - "GET /ui/ HTTP/1.1" 200 OK
INFO:     127.0.0.1:63464 - "GET /ui/get-agent-details-for-run?agent_name_selector=story_agent HTTP/1.1" 404 Not Found
INFO:     127.0.0.1:63464 - "GET /ui/agent-details-content HTTP/1.1" 404 Not Found
INFO:     127.0.0.1:63464 - "GET /ui/run-agent-content HTTP/1.1" 404 Not Found
INFO:     127.0.0.1:63464 - "GET /ui/get-agent-details-for-run?agent_name_selector=comic_book_issue_agent HTTP/1.1" 404 Not Found

Also you can't see anything in the UI because everything is just white
"""

dev_agent = FlockFactory.create_default_agent(name="dev_agent",
                                              description="An agent that is a master developer",
                                              input="inputs: dict[str, str], prompt: str",
                                              output="output_files: dict[str, str] | key is the filepath and value is the content of the file",
                                              max_tokens=60000,
                                              write_to_file=True)


flock.add_agent(dev_agent)
flock.run(start_agent="dev_agent", input={"inputs": inputs, "prompt": prompt})

```

## Design Patterns

The following design patterns appear to be used in this codebase:

### Singleton Pattern

- `scripts\code_collector.py`
- `src\flock\core\flock_registry.py`

### Factory Pattern

- `examples\playground\02_cook_book\repo_analyzer\repo_analyzer.py`
- `examples\playground\hier\her_vis.py`
- `scripts\code_collector.py`
- `src\flock\cli\settings.py`
- `src\flock\core\api\endpoints.py`
- `src\flock\core\api\run_store.py`
- `src\flock\core\api\ui\routes.py`
- `src\flock\core\flock_factory.py`
- `src\flock\core\logging\formatters\theme_builder.py`
- `src\flock\core\logging\formatters\themed_formatter.py`
- `src\flock\core\mixin\dspy_integration.py`
- `src\flock\core\tools\dev_tools\github.py`
- `src\flock\workflow\temporal_setup.py`

### Observer Pattern

- `scripts\code_collector.py`

### Decorator Pattern

- `scripts\code_collector.py`
- `src\flock\core\logging\trace_and_logged.py`

### Mvc Components

**Models**:
- `src\flock\core\api\models.py`

**Views**:
- `src\flock\cli\view_results.py`

## All Files

### docs\create_doc_boilerplate.py

- **Lines**: 131
- **Last modified**: 2025-02-19 03:47:02

```py
import os
from pathlib import Path

import yaml

# The mkdocs navigation structure
NAV_STRUCTURE = """
nav:
  - Home: index.md
  
  - Getting Started:
    - Quick Start: getting-started/quickstart.md
    - Installation: getting-started/installation.md
    - Basic Concepts: getting-started/concepts.md
    - Configuration: getting-started/configuration.md
    
  - Core Concepts:
    - Agents: core-concepts/agents.md
    - Type System: core-concepts/type-system.md
    - Workflows: core-concepts/workflows.md
    - Declarative Programming: core-concepts/declarative.md
    - Error Handling: core-concepts/error-handling.md
    
  - Features:
    - Agent Definition: features/agent-definition.md
    - Type Safety: features/type-safety.md
    - Pydantic Integration: features/pydantic.md
    - Agent Chaining: features/agent-chaining.md
    - Lifecycle Hooks: features/lifecycle-hooks.md
    
  - Integrations:
    - Temporal: integrations/temporal.md
    - DSPy: integrations/dspy.md
    - LiteLLM: integrations/litellm.md
    - Tavily: integrations/tavily.md
    
  - Advanced Usage:
    - Custom Agents: advanced/custom-agents.md
    - Complex Workflows: advanced/complex-workflows.md
    - Testing: advanced/testing.md
    - Performance Optimization: advanced/performance.md
    
  - Deployment:
    - Production Setup: deployment/production-setup.md
    - Monitoring: deployment/monitoring.md
    - Scalability: deployment/scalability.md
    - Security: deployment/security.md
    
  - Tutorials:
    - Basic Blog Generator: tutorials/blog-generator.md
    - Multi-Agent Systems: tutorials/multi-agent.md
    - Custom Tool Integration: tutorials/custom-tools.md
    - Error Recovery: tutorials/error-recovery.md
    
  - API Reference:
    - FlockAgent: api/flockagent.md
    - Flock Core: api/flock-core.md
    - Types: api/types.md
    - Utilities: api/utilities.md
    
  - Contributing:
    - Development Setup: contributing/development.md
    - Code Style: contributing/code-style.md
    - Testing Guide: contributing/testing.md
    - Documentation Guide: contributing/documentation.md
    
  - Architecture:
    - Overview: architecture/overview.md
    - Components: architecture/components.md
    - Design Decisions: architecture/design-decisions.md
    
  - Examples:
    - Hello Flock: examples/hello-flock.md
    - Type System Usage: examples/type-system.md
    - Pydantic Models: examples/pydantic.md
    - Chain Gang: examples/chain-gang.md
"""


def create_markdown_file(file_path: Path, title: str) -> None:
    """Create a markdown file with a title and placeholder content."""
    content = f"""# {title}

Documentation in progress...
"""
    file_path.write_text(content)


def extract_paths_from_nav(
    nav_dict: dict, paths: list, current_path: str = ""
) -> None:
    """Recursively extract all markdown file paths from the nav structure."""
    for item in nav_dict:
        if isinstance(item, dict):
            for key, value in item.items():
                if isinstance(value, str):
                    paths.append(value)
                elif isinstance(value, list):
                    extract_paths_from_nav(value, paths, current_path)


def main():
    # Parse the YAML structure
    nav_data = yaml.safe_load(NAV_STRUCTURE)

    # Extract all markdown file paths
    markdown_paths = []
    extract_paths_from_nav(nav_data["nav"], markdown_paths)

    # Create docs directory if it doesn't exist
    docs_dir = Path("docs")
    docs_dir.mkdir(exist_ok=True)

    # Create all necessary directories and markdown files
    for md_path in markdown_paths:
        # Convert path to Path object relative to docs directory
        full_path = docs_dir / md_path

        # Create parent directories if they don't exist
        full_path.parent.mkdir(parents=True, exist_ok=True)

        # Generate title from the filename
        title = os.path.splitext(full_path.name)[0].replace("-", " ").title()

        # Create the markdown file
        create_markdown_file(full_path, title)
        print(f"Created: {full_path}")


if __name__ == "__main__":
    main()
```

### examples\01_introduction\01_simple_example.py

- **Lines**: 49
- **Last modified**: 2025-04-04 16:30:16

```py
import os
from flock.core import Flock, FlockFactory 


# --------------------------------
# Define the model
# --------------------------------
# Flock uses litellm to talk to LLMs
# Please consult the litellm documentation for valid IDs:
# https://docs.litellm.ai/docs/providers
MODEL = "openai/gpt-4o"


# --------------------------------
# Create the flock and context
# --------------------------------
# The flock is the place where all the agents are at home
flock = Flock(name="Example 01", model=MODEL, enable_logging=False)

# --------------------------------
# Create an agent
# --------------------------------
# The Flock doesn't believe in prompts (see the docs for more info)
# The Flock just declares what agents get in and what agents produce
# bloggy takes in a blog_idea and outputs a funny_blog_title 
# and blog_headers
bloggy = FlockFactory.create_default_agent(
    name="bloggy",
    input="blog_idea",
    output="funny_blog_title, blog_headers"
)
flock.add_agent(bloggy)


# --------------------------------
# Run the flock
# --------------------------------
# Tell the flock who the starting agent is and what input to give it
flock.run(
    start_agent=bloggy, 
    input={"blog_idea": "A blog about robot kittens"}
)
#flock.to_yaml_file("bloggy.flock.yaml")

# --------------------------------
# Start the CLI with the loaded Flock
# --------------------------------
# Uncomment the line below to start the CLI:
flock.start_cli()
```

### examples\01_introduction\02_typed_output.py

- **Lines**: 56
- **Last modified**: 2025-04-04 17:08:02

```py
from pprint import pprint

from flock.core import Flock, FlockFactory
from flock.core.logging.formatters.themes import OutputTheme
from flock.core.tools import basic_tools


# --------------------------------
# Create the flock
# --------------------------------
flock = Flock()


# --------------------------------
# Create an agent
# --------------------------------
# Some additions to example 01
# - you can define the output types of the agent with standart python type hints
# - you can define the tools the agent can use
# - you can define if the agent should use the cache 
#   results will get cached and if true and if the input is the same as before, the agent will return the cached result
#   this is useful for expensive operations like web scraping and for debugging
# Some people need some swag in their output
# Flock supports rendering the output as a table and you can choose a theme (out of like 300 or so)
agent = FlockFactory.create_default_agent(
    name="my_agent",
    input="url",
    output="title, headings: list[str]," 
            "entities_and_metadata: list[dict[str, str]]," 
            "type:Literal['news', 'blog', 'opinion piece', 'tweet']",
    tools=[basic_tools.get_web_content_as_markdown],
    enable_rich_tables=True,
    output_theme=OutputTheme.aardvark_blue,
)
flock.add_agent(agent)


# --------------------------------
# Run the agent
# --------------------------------
# ATTENTION: Big table incoming
# It's worth it tho!
result = flock.run(
    start_agent=agent,
    input={"url": "https://lite.cnn.com/travel/alexander-the-great-macedon-persian-empire-darius/index.html"},
)

# --------------------------------
# The result type
# --------------------------------
# Btw, the result is a real python object with the types you defined
# so this works:
pprint(result.title)



```

### examples\01_introduction\03_tool_and_code_agent.py

- **Lines**: 40
- **Last modified**: 2025-03-17 23:21:38

```py
from flock.core import Flock, FlockFactory

from flock.core.logging.formatters.themes import OutputTheme
from flock.core.tools import basic_tools


# --------------------------------
# Create the flock
# --------------------------------
flock = Flock()


# --------------------------------
# Tools
# --------------------------------
# Let's talk about tools
# A FlockAgent has a tools argument that takes in ANY callable
# like the ones in flock.core.tools.basic_tools
# or your own custom tools
agent = FlockFactory.create_default_agent(
    name="my_celebrity_age_agent",
    input="a_person",
    output="persons_age_in_days",
    tools=[basic_tools.web_search_duckduckgo, basic_tools.code_eval],
    enable_rich_tables=True,
    output_theme=OutputTheme.adventuretime,
    use_cache=True,
)
flock.add_agent(agent)

# --------------------------------
# Run the agent
# --------------------------------
# Let's calculate Johnny Depp's age in days
flock.run(
    start_agent=agent,
    input={"a_person": "Johnny Depp"},
)


```

### examples\01_introduction\04_descriptions.py

- **Lines**: 28
- **Last modified**: 2025-04-04 16:30:16

```py
from flock.core import Flock, FlockFactory
 
flock = Flock()

# --------------------------------
# Add descriptions
# --------------------------------
# If you NEED your agent to handle edge cases, you can add descriptions to your agents
# The descriptions property on the FlockAgent class allows you to add a description to your agent,
# while with "|" you can specify descriptions of the input and output fields of the agent.

a_cat_naming_agent = FlockFactory.create_default_agent(
    name="cat_naming_agent",
    input="animal | the animal to create a cute name for",
    output="""
        cute_name: list[str] | a list of 5 cute names IN ALL CAPS, 
        error_message | an error message if the input is not a cat
    """,
)
flock.add_agent(a_cat_naming_agent)


flock.run(
    start_agent=a_cat_naming_agent, 
    input={"animal": "My new kitten"}
)


```

### examples\01_introduction\05_typed_output2.py

- **Lines**: 57
- **Last modified**: 2025-04-04 16:30:16

```py
from dataclasses import dataclass
from pprint import pprint
from typing import Literal

from flock.core import Flock, FlockFactory


# --------------------------------
# Define the data model for a random person
# --------------------------------
@dataclass
class RandomPerson:
    name: str
    age: int
    gender: Literal["female", "male"]
    job: str
    favorite_movie: str  
    short_bio: str

# And 'hide' it in a alias
RandomUserList = list[RandomPerson]


   
flock = Flock()

# --------------------------------
# Define the Random User List Agent
# --------------------------------
# This agent ("people_agent") is responsible for generating a list of random users.
# It requires the input "amount_of_people" and produces an output "random_user_list" 
# which is a RandomUserList object.
# Internally all dataclass, pydantic basemodels and alias are supported
people_agent = FlockFactory.create_default_agent(
    name="people_agent",
    input="amount_of_people",
    output="random_user_list: RandomUserList",
)
flock.add_agent(people_agent)

# --------------------------------
# Run the agent to generate random users
# --------------------------------
# We execute the agent asynchronously, passing in the desired amount of people.
# The result is a namespace containing the generated random user list.
result =  flock.run(
    start_agent=people_agent,
    input={"amount_of_people": "10"},
)

# --------------------------------
# Process and display the result
# --------------------------------
# Here we print the number of users generated to verify our agent's output.
pprint(len(result.random_user_list))


```

### examples\01_introduction\06_simple_hand_off.py

- **Lines**: 40
- **Last modified**: 2025-04-02 23:12:31

```py
from flock.core import Flock, FlockFactory
from flock.routers.agent.agent_router import AgentRouter, AgentRouterConfig
from flock.routers.default.default_router import DefaultRouter, DefaultRouterConfig
from flock.routers.llm.llm_router import LLMRouter, LLMRouterConfig



flock = Flock(enable_logging=True)

idea_agent = FlockFactory.create_default_agent(
    name="idea_agent",
    input="query",
    output="a_fun_software_project_idea",
    enable_rich_tables=True,
    wait_for_input=True,
)

project_plan_agent = FlockFactory.create_default_agent(
    name="project_plan_agent",
    input="a_fun_software_project_idea",
    output="catchy_project_name, project_pitch, techstack, project_implementation_plan",
    enable_rich_tables=True,
    wait_for_input=True,
)

# Default router = handoff to specific agent
idea_agent.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=project_plan_agent.name))

# LLM router = handoff to agent based on LLM's decision
#idea_agent.handoff_router = LLMRouter(config=LLMRouterConfig(with_output=True))
flock.add_agent(idea_agent)
flock.add_agent(project_plan_agent)
flock.start_cli()
flock.run(
    input={"query": "fun software project idea about ducks"},
    start_agent=idea_agent,
    agents=[idea_agent,project_plan_agent]
)


```

### examples\01_introduction\07_all_base_concepts.py

- **Lines**: 76
- **Last modified**: 2025-04-02 17:29:19

```py
import os
from flock.core import Flock, FlockFactory
from flock.core.flock_registry import flock_type 
from pydantic import BaseModel, Field
from typing import Optional, Literal

class Scene(BaseModel):
    title: str
    setting: str
    goal: str
    conflict: str
    outcome: str
    characters_involved: list[str]
    story_beats: list[str]


class Character(BaseModel):
    name: str
    role: str
    age: str
    appearance: str
    image_prompt: str
    personality_traits: list[str]
    backstory: str
    motivations: str
    weaknesses: str
    character_arc: str
    
class Chapter(BaseModel):
    title: str
    chapter_number: int
    purpose: str
    summary: str
    scenes: list[Scene]


@flock_type 
class Story(BaseModel):
    title: str
    status: Literal["Idea", "Drafting", "Revising", "Completed"]
    genre: list[str]
    tone: str
    themes: list[str]
    central_conflict: str
    brief_summary: str
    long_summary: str
    characters: list[Character]
    chapters: list[Chapter]
    
@flock_type 
class StoryBible(BaseModel):
    timeline: dict[str, str] 
    worldbuilding_notes: dict[str, str] 
    consistency_rules: list[str] 
    style_guide: str


MODEL = "gemini/gemini-2.5-pro-exp-03-25" 
flock = Flock(model=MODEL)


story_agent = FlockFactory.create_default_agent(
    name="story_agent",
    input="story_idea",
    output="story: Story, story_bible: StoryBible",
    max_tokens=60000,
)
flock.add_agent(story_agent)


flock.start_api(server_name="Example #07", create_ui=True)
flock.run(
    start_agent=story_agent, 
    input={"story_idea": "A story about a young woman who discovers she has the ability to time travel."}
)

```

### examples\02_concepts\api\api_client.py

- **Lines**: 19
- **Last modified**: 2025-02-28 23:13:59

```py
import httpx

from flock.core.flock_api import FlockAPIRequest
from rich.console import Console
from rich.pretty import Pretty


console = Console()

payload = FlockAPIRequest(agent_name="bloggy", inputs={"blog_idea": "A blog about cats"})

response = httpx.post("http://127.0.0.1:8344/run/flock",content=payload.model_dump_json())
response.raise_for_status()

pretty = Pretty(response.json())
console.print(pretty)



```

### examples\02_concepts\api\api_server.py

- **Lines**: 19
- **Last modified**: 2025-04-02 17:29:19

```py
from flock.core import Flock,FlockFactory



MODEL = "openai/gpt-4o"

flock = Flock(model=MODEL,enable_logging=True)

bloggy = FlockFactory.create_default_agent(
    name="bloggy",
    input="blog_idea",
    output="funny_blog_title, blog_headers",
)
flock.add_agent(bloggy)

# Swagger: http://127.0.0.1:8344/docs
# Redoc: http://127.0.0.1:8344/redoc
# POST: http://127.0.0.1:8344/run
flock.start_api(create_ui=True)
```

### examples\02_concepts\context\context.py

- **Lines**: 57
- **Last modified**: 2025-03-03 12:28:42

```py

from flock.core import Flock, FlockFactory
from flock.core.flock_module import FlockModule, FlockModuleConfig
from flock.routers.default.default_router import DefaultRouter, DefaultRouterConfig

class ContextModule(FlockModule):
    def terminate(self, agent, inputs, result, context=None):
        context.set_variable("flock_agent1.a_random_name", "John Doe")
     

flock = Flock()



# Flock has an advanced context system that allows you to store and retrieve data
# across agents. The context is a dictionary that can be accessed and modified by 
# all agents in the flock via modules
# Agents will write their inputs and outputs to the context prefixed with "agentname."



flock_agent_1 = FlockFactory.create_default_agent(
    name="flock_agent1",
    input="",
    output="a_random_name: str",
)


flock_agent_2 = FlockFactory.create_default_agent(
    name="flock_agent2",
    input="a_random_name",
    output="name_in_caps: str",
)
flock_agent_2.add_module(ContextModule(name="context_module", config= FlockModuleConfig()))

# Agent3 will reverse John Doe to eoD nhoJ
flock_agent_3 = FlockFactory.create_default_agent(
    name="flock_agent3",
    input="flock_agent1.a_random_name",
    output="name_reversed: str",
    wait_for_input=True,
    print_context=True,
)


flock_agent_1.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=flock_agent_2.name))
flock_agent_2.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=flock_agent_3.name))
flock_agent_3.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=flock_agent_1.name))
flock.add_agent(flock_agent_1)
flock.add_agent(flock_agent_2)
flock.add_agent(flock_agent_3)

flock.run(
    input={},
    start_agent=flock_agent_1,
    agents=[flock_agent_1, flock_agent_2, flock_agent_3]
)
```

### examples\02_concepts\evaluator\multi_hops_memory.py

- **Lines**: 81
- **Last modified**: 2025-04-02 15:32:47

```py

from flock.core import Flock, FlockFactory
from flock.core.logging.formatters.themes import OutputTheme
from flock.evaluators.memory.memory_evaluator import MemoryEvaluator, MemoryEvaluatorConfig
from flock.modules.memory.memory_module import MemoryModule, MemoryModuleConfig



def write_to_kg():
  write_to_kg_agent = FlockFactory.create_default_agent(model="openai/gpt-4o",name="write_to_kg_agent", 
                                            input="data", 
                                            output_theme=OutputTheme.aardvark_blue)
  

  write_to_kg_agent.evaluator = MemoryEvaluator(name="mem_eval", 
                                config=MemoryEvaluatorConfig(splitting_mode="characters", 
                                                          number_of_concepts_to_extract=3))

  write_to_kg_agent.run(inputs={"data": "Andre is 38 years old and author of the agent framework 'flock'"})
  write_to_kg_agent.run(inputs={"data": "Andre works for white duck"})
  write_to_kg_agent.run(inputs={"data": "Andre has two cats"})
  write_to_kg_agent.run(inputs={"data": "White Duck is a cloud consulting company"})
  write_to_kg_agent.run(inputs={"data": "Flock is an agent framework designed for scalable multi-agent systems"})
  write_to_kg_agent.run(inputs={"data": "One of Andre's cats is named Luna"})
  write_to_kg_agent.run(inputs={"data": "The other cat is named Lucy"})
  write_to_kg_agent.run(inputs={"data": "Andre lives in Germany"})
  write_to_kg_agent.run(inputs={"data": "Germany is in Europe"})

def read_from_kg():
  read_from_kg_agent = FlockFactory.create_default_agent(model="openai/gpt-4o",name="read_from_kg_agent", 
                                            input="query", 
                                            output_theme=OutputTheme.aardvark_blue)

  # replace the default evaluator with ZepEvaluator
  read_from_kg_agent.evaluator = MemoryEvaluator(name="mem_eval", config=MemoryEvaluatorConfig())


  # #### **2-Hop Question:**
  # **Question:** What kind of company does the employer of the author of 'flock' belong to?  
  # **Reasoning:**  
  # 1. "Andre is the author of the agent framework 'flock'."  
  # 2. "Andre works for White Duck."  
  # 3. "White Duck is a cloud consulting company."  
  # 4. Therefore, the employer of the author of 'flock' is a cloud consulting company.

  read_from_kg_agent.run(inputs={"query": "What kind of company does the employer of the author of 'flock' belong to?"})

def read_from_kg_and_evaluate():
  read_from_kg_and_evaluate_agent = FlockFactory.create_default_agent(model="openai/gpt-4o",name="read_from_kg_and_evaluate_agent", 
                                            input="query", 
                                            output="answer",
                                            output_theme=OutputTheme.aardvark_blue,
                                            enable_rich_tables=True)
  
  read_from_kg_and_evaluate_agent.add_module(MemoryModule(name="mem_eval", config=MemoryModuleConfig(splitting_mode="characters", enable_read_only_mode=True)))



  read_from_kg_and_evaluate_agent.run(inputs={"query": "What kind of company does the employer of the author of 'flock' belong to?"})

  # #### **3-Hop Question:**
  # **Question:** In which continent does the creator of the agent framework 'flock' live?  
  # **Reasoning:**  
  # 1. "Andre is the author of the agent framework 'flock'."  
  # 2. "Andre lives in Germany."  
  # 3. "Germany is in Europe."  
  # 4. Therefore, the creator of 'flock' lives in Europe.
  read_from_kg_and_evaluate_agent.run(inputs={"query": "In which continent does the creator of the agent framework 'flock' live? "})



if __name__ == "__main__":
  #write_to_kg()
  read_from_kg()
  #read_from_kg_and_evaluate()
  pass





```

### examples\02_concepts\evaluator\multi_hops_zep.py

- **Lines**: 85
- **Last modified**: 2025-02-28 23:13:59

```py

from flock.core import Flock, FlockFactory
from flock.core.logging.formatters.themes import OutputTheme
from flock.evaluators.zep.zep_evaluator import ZepEvaluator, ZepEvaluatorConfig
from flock.modules.zep.zep_module import ZepModule, ZepModuleConfig


def write_to_kg():
  write_to_kg_agent = FlockFactory.create_default_agent(name="write_to_kg_agent", 
                                            input="data", 
                                            output_theme=OutputTheme.aardvark_blue)
  
  # --------------------------------
  # Evaluators
  # --------------------------------
  # Evaluators are the components that evaluate the agent's inputs and outputs.
  # In this case, we're using the ZepEvaluator to interact with Zep.
  # This replaces the default declarative evaluator
  # The ZepEvaluator uses the ZepModule to interact with Zep.
  # input with "data" will add the data to the memory
  # input with "query" will search the memory for the query and return the facts
  write_to_kg_agent.evaluator = ZepEvaluator(name="zep", config=ZepEvaluatorConfig())

  write_to_kg_agent.run(inputs={"data": "Andre is 38 years old and author of the agent framework 'flock'"})
  write_to_kg_agent.run(inputs={"data": "Andre works for white duck"})
  write_to_kg_agent.run(inputs={"data": "Andre has two cats"})
  write_to_kg_agent.run(inputs={"data": "White Duck is a cloud consulting company"})
  write_to_kg_agent.run(inputs={"data": "Flock is an agent framework designed for scalable multi-agent systems"})
  write_to_kg_agent.run(inputs={"data": "One of Andre's cats is named Luna"})
  write_to_kg_agent.run(inputs={"data": "The other cat is named Lucy"})
  write_to_kg_agent.run(inputs={"data": "Andre lives in Germany"})
  write_to_kg_agent.run(inputs={"data": "Germany is in Europe"})

def read_from_kg():
  read_from_kg_agent = FlockFactory.create_default_agent(name="read_from_kg_agent", 
                                            input="query", 
                                            output_theme=OutputTheme.aardvark_blue)

  # replace the default evaluator with ZepEvaluator
  read_from_kg_agent.evaluator = ZepEvaluator(name="zep", config=ZepEvaluatorConfig())


  # #### **2-Hop Question:**
  # **Question:** What kind of company does the employer of the author of 'flock' belong to?  
  # **Reasoning:**  
  # 1. "Andre is the author of the agent framework 'flock'."  
  # 2. "Andre works for White Duck."  
  # 3. "White Duck is a cloud consulting company."  
  # 4. Therefore, the employer of the author of 'flock' is a cloud consulting company.

  read_from_kg_agent.run(inputs={"query": "What kind of company does the employer of the author of 'flock' belong to?"})

def read_from_kg_and_evaluate():
  read_from_kg_and_evaluate_agent = FlockFactory.create_default_agent(name="read_from_kg_and_evaluate_agent", 
                                            input="query", 
                                            output="short_answer",
                                            output_theme=OutputTheme.aardvark_blue)
  
  read_from_kg_and_evaluate_agent.add_module(ZepModule(name="zep", config=ZepModuleConfig()))



  read_from_kg_and_evaluate_agent.run(inputs={"query": "What kind of company does the employer of the author of 'flock' belong to?"})

  # #### **3-Hop Question:**
  # **Question:** In which continent does the creator of the agent framework 'flock' live?  
  # **Reasoning:**  
  # 1. "Andre is the author of the agent framework 'flock'."  
  # 2. "Andre lives in Germany."  
  # 3. "Germany is in Europe."  
  # 4. Therefore, the creator of 'flock' lives in Europe.
  read_from_kg_and_evaluate_agent.run(inputs={"query": "In which continent does the creator of the agent framework 'flock' live? "})



if __name__ == "__main__":
  #write_to_kg()
  read_from_kg()
  #read_from_kg_and_evaluate()
  pass





```

### examples\02_concepts\modules\use_modules.py

- **Lines**: 67
- **Last modified**: 2025-04-02 17:29:19

```py
"""
Title: Advanced Flock Agent with Caching, Type Hints, and Tool Integration

In this example, we'll show you how to build a more advanced Flock system that:
  - Uses a custom output formatter (RichTables) for a polished, swaggy display.
  - Defines output types using standard Python type hints (including lists and Literals) for structured results.
  - Integrates external tools (like a web content scraper) so that agents can perform more complex operations.
  - Leverages caching so that if an agent is called with the same input, the cached result is returned—this is particularly
    useful for expensive operations such as web scraping or during debugging.

The agent in this example takes a URL as input and outputs:
  - A title,
  - A list of headings,
  - A list of dictionaries mapping entities to metadata, and
  - A type (limited to one of 'news', 'blog', 'opinion piece', or 'tweet').

After executing the agent, you can work with the result as a real Python object that respects the defined types.

Let's dive in!
"""


from pprint import pprint

from flock.core import Flock, FlockFactory
from flock.core.logging.formatters.themes import OutputTheme
from flock.core.tools import basic_tools
from flock.modules.memory.memory_module import MemoryModule, MemoryModuleConfig
from flock.modules.zep.zep_module import ZepModule, ZepModuleConfig


flock = Flock(enable_logging=True)

agent = FlockFactory.create_default_agent(
    name="my_agent",
    input="url",
    output="title, headings: list[str]," 
            "entities_and_metadata: list[dict[str, str]]," 
            "type:Literal['news', 'blog', 'opinion piece', 'tweet']",
    tools=[basic_tools.get_web_content_as_markdown],
    enable_rich_tables=True,
    output_theme=OutputTheme.aardvark_blue,
)

# --------------------------------
# Add a module to the agent
# --------------------------------
# Modules are modules (heh) that can be added to an agent to extend its capabilities.
# Modules run at certain points in the agent's lifecycle and can manipulate the inputs and outputs and the agent itself.
# In this case, we're adding the Zep module to the agent, 
# which allows it to use Zep to store and retrieve information in Knowledge Graphs.
# Currently there are two graph based modules: Zep and Memory.
# Memory is more lightweight and easier to use, but Zep offers more features and is more powerful.

# zep = ZepModule(name="zep",config=ZepModuleConfig())
# agent.add_module(zep)

mem = MemoryModule(name="mem_split",config=MemoryModuleConfig(splitting_mode="characters", number_of_concepts_to_extract=5))
agent.add_module(mem)


flock.add_agent(agent)
result = flock.run(
    start_agent=agent,
    input={"url": "https://lite.cnn.com/travel/alexander-the-great-macedon-persian-empire-darius/index.html"},
)

```

### examples\02_concepts\router\router_example.py

- **Lines**: 98
- **Last modified**: 2025-02-28 23:13:59

```py
"""Example demonstrating how to use routers with agents.

This example shows how to use different routers with agents for auto-handoff.
"""

import asyncio
from typing import Dict, Any

from flock.core import Flock, FlockAgent


async def main():
    """Run the example."""
    # Create a Flock instance
    flock = Flock(model="openai/gpt-4o")

    # Create agents
    research_agent = FlockAgent(
        name="research_agent",
        description="Researches a topic and provides detailed findings",
        input="topic: str | The topic to research",
        output="findings: str | Detailed research findings",
    )
    # Create and attach an AgentRouter to the research agent
    from flock.routers.agent.agent_router import AgentRouter, AgentRouterConfig
    
    agent_router = AgentRouter(
        registry=None,  # Will be set by the framework
        config=AgentRouterConfig(
            confidence_threshold=0.6,  # Higher threshold for more confident decisions
        )
    )
    research_agent.handoff_router = agent_router

    summary_agent = FlockAgent(
        name="summary_agent",
        description="Creates a concise summary of research findings",
        input="findings: str | The research findings to summarize",
        output="summary: str | A concise summary of the findings",
    )
    # Create and attach an LLMRouter to the summary agent
    from flock.routers.llm.llm_router import LLMRouter, LLMRouterConfig
    
    llm_router = LLMRouter(
        config=LLMRouterConfig(
            temperature=0.1,  # Lower temperature for more deterministic decisions
            confidence_threshold=0.7,  # Higher threshold for more confident decisions
        )
    )
    summary_agent.handoff_router = llm_router

    blog_agent = FlockAgent(
        name="blog_agent",
        description="Creates a well-structured blog post based on research",
        input="findings: str | The research findings to use for the blog",
        output="blog_post: str | A complete blog post",
    )

    presentation_agent = FlockAgent(
        name="presentation_agent",
        description="Creates a presentation outline based on research",
        input="findings: str | The research findings to use for the presentation",
        output="presentation: str | A presentation outline",
    )

    # Add agents to the flock
    flock.add_agent(research_agent)
    flock.add_agent(summary_agent)
    flock.add_agent(blog_agent)
    flock.add_agent(presentation_agent)

    # Run the workflow starting with the research agent
    print("Running workflow with agent-based router...")
    result = await flock.run(
        start_agent=research_agent,
        input={"topic": "Artificial Intelligence"},
    )

    # Print the result
    print("\nWorkflow Result:")
    print_result(result)


def print_result(result: Dict[str, Any]):
    """Print the result in a readable format."""
    print("-" * 80)
    for key, value in result.items():
        print(f"{key}:")
        if isinstance(value, str) and len(value) > 100:
            # Print first 100 characters of long strings
            print(f"  {value[:100]}...")
        else:
            print(f"  {value}")
    print("-" * 80)


if __name__ == "__main__":
    asyncio.run(main())
```

### examples\02_concepts\tools\azure_tools_example.py

- **Lines**: 221
- **Last modified**: 2025-03-03 16:02:36

```py
"""
Azure AI Search Tools Example with Flock Agent Framework

This example demonstrates how to use Azure AI Search tools within the Flock agent framework.
It creates agents that can:
1. Create a search index
2. Upload documents to the index
3. Query the index with both text and filter criteria
4. Process and summarize search results

Requirements:
- Set the following environment variables:
  - AZURE_SEARCH_ENDPOINT: Your Azure AI Search service endpoint URL
  - AZURE_SEARCH_API_KEY: Your Azure AI Search API key
"""

import os
import time
import uuid
import json
from dotenv import load_dotenv

from flock.core import Flock, FlockFactory
from flock.core.logging.formatters.themes import OutputTheme
from flock.core.tools.azure_tools import (
    azure_search_initialize_clients,
    azure_search_create_index,
    azure_search_upload_documents,
    azure_search_query,
    azure_search_get_document,
    azure_search_delete_documents,
    azure_search_list_indexes,
    azure_search_get_index_statistics
)
from flock.core.tools import basic_tools

# Load environment variables from .env file
load_dotenv()

# Make sure Azure Search credentials are set
if not os.environ.get("AZURE_SEARCH_ENDPOINT") or not os.environ.get("AZURE_SEARCH_API_KEY"):
    raise ValueError(
        "Please set AZURE_SEARCH_ENDPOINT and AZURE_SEARCH_API_KEY environment variables"
    )

# Create a unique index name for this example
timestamp = int(time.time())
random_id = str(uuid.uuid4())[:8]
INDEX_NAME = f"flock-example-{timestamp}-{random_id}"
os.environ["AZURE_SEARCH_INDEX_NAME"] = INDEX_NAME

# --------------------------------
# Create the flock
# --------------------------------
flock = Flock()

# --------------------------------
# Define our agents
# --------------------------------

# 1. Index Creator Agent
# This agent creates a search index with appropriate schema
index_creator = FlockFactory.create_default_agent(
    name="index_creator",
    input="index_requirements",
    output="created_index_details",
    tools=[
        azure_search_create_index,
        basic_tools.code_eval,
        basic_tools.json_parse_safe
    ],
    enable_rich_tables=True,
    output_theme=OutputTheme.adventuretime,
)

# 2. Document Uploader Agent
# This agent uploads sample documents to the search index
document_uploader = FlockFactory.create_default_agent(
    name="document_uploader",
    input="created_index_details, documents_to_upload",
    output="upload_results",
    tools=[
        azure_search_upload_documents,
        basic_tools.code_eval,
        basic_tools.json_parse_safe
    ],
    enable_rich_tables=True,
    output_theme=OutputTheme.adventuretime,
)

# 3. Search Agent
# This agent searches the index and processes results
search_agent = FlockFactory.create_default_agent(
    name="search_agent",
    input="upload_results, search_query, filter_criteria",
    output="search_results: dict, relevant_documents: list, summary: str",
    tools=[
        azure_search_query,
        azure_search_get_document,
        basic_tools.code_eval,
        basic_tools.json_parse_safe
    ],
    enable_rich_tables=True,
    output_theme=OutputTheme.adventuretime,
)

# 4. Cleanup Agent (optional)
# This agent gets statistics and can delete documents if needed
cleanup_agent = FlockFactory.create_default_agent(
    name="cleanup_agent",
    input="search_results",
    output="index_statistics: dict, cleanup_recommendation: str",
    tools=[
        azure_search_get_index_statistics,
        azure_search_list_indexes,
        azure_search_delete_documents,
        basic_tools.code_eval
    ],
    enable_rich_tables=True,
    output_theme=OutputTheme.adventuretime,
)

# --------------------------------
# Configure agent flow
# --------------------------------
# Set up the agent sequence:
# index_creator -> document_uploader -> search_agent -> cleanup_agent
from flock.routers.default.default_router import DefaultRouter, DefaultRouterConfig

index_creator.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=document_uploader.name))
document_uploader.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=search_agent.name))
search_agent.handoff_router = DefaultRouter(config=DefaultRouterConfig(hand_off=cleanup_agent.name))

# Add all agents to the flock
flock.add_agent(index_creator)
flock.add_agent(document_uploader)
flock.add_agent(search_agent)
flock.add_agent(cleanup_agent)

# --------------------------------
# Define sample data
# --------------------------------
# Define our sample documents
sample_documents = [
    {
        "id": "doc1",
        "title": "Azure AI Search Overview",
        "content": "Azure AI Search is a cloud search service with built-in AI capabilities.",
        "category": "cloud",
        "rating": 5
    },
    {
        "id": "doc2",
        "title": "Python Development Best Practices",
        "content": "Write clean, maintainable Python code by following established best practices.",
        "category": "development",
        "rating": 4
    },
    {
        "id": "doc3",
        "title": "Azure and Python Integration",
        "content": "Learn how to use Azure services with Python applications effectively.",
        "category": "cloud",
        "rating": 5
    },
    {
        "id": "doc4",
        "title": "Search Engine Optimization",
        "content": "Improve your website's visibility in search engine results pages.",
        "category": "marketing",
        "rating": 3
    },
    {
        "id": "doc5",
        "title": "Machine Learning on Azure",
        "content": "Train and deploy machine learning models using Azure ML.",
        "category": "ai",
        "rating": 5
    }
]

# Define index requirements
index_requirements = """
Create a search index with the following fields:
- id: string, key field
- title: string, searchable, sortable
- content: string, searchable
- category: string, filterable
- rating: integer, filterable, sortable

The index name should be dynamically set from the AZURE_SEARCH_INDEX_NAME environment variable.
"""

# --------------------------------
# Run the flock
# --------------------------------
# Start the agent workflow
flock.run(
    start_agent=index_creator,
    input={
        "index_requirements": index_requirements,
        "documents_to_upload": json.dumps(sample_documents),
        "search_query": "Azure cloud capabilities",
        "filter_criteria": "rating eq 5"
    }
)

# --------------------------------
# Cleanup
# --------------------------------
# Delete the test index after running the example
print(f"\nCleaning up: Deleting index '{INDEX_NAME}'...")
try:
    clients = azure_search_initialize_clients()
    index_client = clients["index_client"]
    index_client.delete_index(INDEX_NAME)
    print(f"Index '{INDEX_NAME}' deleted successfully.")
except Exception as e:
    print(f"Warning: Failed to delete test index '{INDEX_NAME}': {e}")

print("\nExample completed!") 
```

### examples\03_apps\roguelike\game.py

- **Lines**: 505
- **Last modified**: 2025-02-28 23:13:59

```py
import random
import asyncio
from enum import Enum, auto
from typing import List, Optional
from dataclasses import dataclass, field
from itertools import cycle

from flock.core import Flock, FlockFactory
from flock.core.logging.formatters.themes import OutputTheme

# Import rich components
from rich.console import Console, Group
from rich.panel import Panel
from rich.table import Table
from rich.prompt import Prompt
from pydantic import BaseModel

# Basic game constants
MAP_WIDTH = 20
MAP_HEIGHT = 10
DEFAULT_MODEL = "openai/gpt-4o"  # Can be changed based on availability

# Define the map with # as walls, . as floor, P as player starting position
DEFAULT_MAP = [
    "####################",
    "#P...#.............#",
    "#....#.............#",
    "#....#.............#",
    "#....#.............#",
    "#....#.............#",
    "#....#.............#",
    "#....#.............#",
    "#..................#",
    "####################"
]

class EntityType(Enum):
    PLAYER = auto()
    NPC = auto()
    WALL = auto()

class Direction(Enum):
    UP = auto()
    DOWN = auto()
    LEFT = auto()
    RIGHT = auto()

class Action(Enum):
    MOVE = auto()
    ATTACK = auto()
    TALK = auto()
    WAIT = auto()
    


    
class Map(BaseModel):
    width: int
    height: int
    data: List[str]
    


@dataclass
class Entity:
    name: str
    type: EntityType
    x: int
    y: int
    char: str
    health: int = 10
    max_health: int = 10
    attack_power: int = 2
    personality: str = ""
    chat_history: List[str] = field(default_factory=list)
    is_alive: bool = True
    color: str = "white"   # New attribute for the entity's color

    def __str__(self):
        return f"[{self.color}]{self.name}[/{self.color}] ({self.char}) at ({self.x}, {self.y}) HP: {self.health}/{self.max_health}"

def format_entity(entity: Entity) -> str:
    """Helper to wrap an entity's name in rich markup based on its color."""
    return f"[{entity.color}]{entity.name}[/{entity.color}]"

class Scene(BaseModel):
    name: str
    description: str
    entities: List[Entity]
    map_data: Map
    turn: int
    player_index: int

@dataclass
class GameState:
    map_data: List[str]
    entities: List[Entity] = field(default_factory=list)
    turn: int = 0
    player_index: int = 0
    game_log: List[str] = field(default_factory=list)
    max_log_length: int = 10

    def add_to_log(self, message: str):
        self.game_log.append(message)
        if len(self.game_log) > self.max_log_length:
            self.game_log.pop(0)

    @property
    def player(self) -> Entity:
        return self.entities[self.player_index]

class RoguelikeGame:
    def __init__(self, map_data=None, model=DEFAULT_MODEL):
        self.state = GameState(map_data or DEFAULT_MAP)
        self.flock = Flock(model=model)
        self.console = Console()
        # Cycle through a list of colors for NPCs.
        self.npc_color_cycle = cycle([
            "red", "green", "blue", "magenta", "cyan", "yellow",
            "bright_green", "bright_blue", "bright_magenta", "bright_cyan"
        ])
        self.initialize_map()
        self.setup_agents()

    def initialize_map(self):
        """Parse the map and create entities."""
        player = None
        for y, row in enumerate(self.state.map_data):
            for x, cell in enumerate(row):
                if cell == 'P':
                    player = Entity(
                        name="Player",
                        type=EntityType.PLAYER,
                        x=x,
                        y=y,
                        char="@",
                        color="bright_white"  # Set player color
                    )
                    # Replace the player starting position with floor.
                    self.state.map_data[y] = row[:x] + '.' + row[x+1:]
                    break
            if player:
                break

        if player:
            self.state.entities.append(player)
            self.state.player_index = 0

        # Add NPCs with unique colors.
        self.add_npc("Edgar, Friendly Guard", 5, 3, "G", personality="Helpful and protective guard who patrols the area")
        self.add_npc("Malon, Suspicious Merchant", 10, 5, "M", personality="Greedy merchant who is always looking for a good deal")
        self.add_npc("Oshram, Angry Orc", 15, 7, "O", personality="Aggressive orc warrior who hates humans, especially merchants")

    def add_npc(self, name, x, y, char, personality=""):
        """Add an NPC with a unique color."""
        npc = Entity(
            name=name,
            type=EntityType.NPC,
            x=x,
            y=y,
            char=char,
            personality=personality,
            color=next(self.npc_color_cycle)
        )
        self.state.entities.append(npc)
        return npc

    def setup_agents(self):
        """Create Flock agents for each NPC."""
        for entity in self.state.entities:
            if entity.type == EntityType.NPC:
                agent = FlockFactory.create_default_agent(
                    name=f"agent_{entity.name.lower().replace(' ', '_')}",
                    description=f"You are {entity.name}, a character in a roguelike game. Never break character.",
                    input="""
                        myself: Entity | Your own entity information,
                        nearby_entities: list | List of nearby entities including the player,
                        map_view: list | The portion of the map that this entity can see,
                        game_log: list | Recent game events
                    """,
                    output="""
                        action: Literal["move", "attack", "talk", "wait"] | The action to take. talk has a range of 2 tiles,
                        direction: Literal["up", "down", "left", "right"] | Direction for movement if action is move,
                        target: str | Target entity name if action is attack or talk,
                        message: str | Message to say if action is talk,
                        reasoning: str | Short explanation of why this action was chosen
                    """,
                    temperature=0.7,
                    no_output=True
                )
                self.flock.add_agent(agent)
                entity.agent_name = agent.name

    def get_cell(self, x, y) -> str:
        if 0 <= y < len(self.state.map_data) and 0 <= x < len(self.state.map_data[y]):
            return self.state.map_data[y][x]
        return '#'

    def is_walkable(self, x, y) -> bool:
        if self.get_cell(x, y) == '#':
            return False
        for entity in self.state.entities:
            if entity.is_alive and entity.x == x and entity.y == y:
                return False
        return True

    def get_entity_at(self, x, y) -> Optional[Entity]:
        for entity in self.state.entities:
            if entity.is_alive and entity.x == x and entity.y == y:
                return entity
        return None

    def get_nearby_entities(self, entity: Entity, distance: int = 5) -> List[Entity]:
        nearby = []
        for other in self.state.entities:
            if other != entity and other.is_alive:
                dx = abs(entity.x - other.x)
                dy = abs(entity.y - other.y)
                if dx <= distance and dy <= distance:
                    nearby.append(other)
        return nearby

    def get_map_view(self, entity: Entity, vision_range: int = 5) -> List[str]:
        """Get a portion of the map centered on the entity (plain view for LLM input)."""
        map_view = []
        for y in range(entity.y - vision_range, entity.y + vision_range + 1):
            row = ""
            for x in range(entity.x - vision_range, entity.x + vision_range + 1):
                entity_here = self.get_entity_at(x, y)
                if entity_here:
                    row += entity_here.char
                else:
                    row += self.get_cell(x, y)
            map_view.append(row)
        return map_view

    def move_entity(self, entity: Entity, direction: Direction) -> bool:
        new_x, new_y = entity.x, entity.y
        if direction == Direction.UP:
            new_y -= 1
        elif direction == Direction.DOWN:
            new_y += 1
        elif direction == Direction.LEFT:
            new_x -= 1
        elif direction == Direction.RIGHT:
            new_x += 1
        if self.is_walkable(new_x, new_y):
            entity.x, entity.y = new_x, new_y
            return True
        return False

    def entity_attack(self, attacker: Entity, defender: Entity) -> bool:
        dx = abs(attacker.x - defender.x)
        dy = abs(attacker.y - defender.y)
        if dx <= 1 and dy <= 1:
            damage = attacker.attack_power
            defender.health -= damage
            self.state.add_to_log(f"{format_entity(attacker)} attacks {format_entity(defender)} for {damage} damage!")
            if defender.health <= 0:
                defender.health = 0
                defender.is_alive = False
                self.state.add_to_log(f"{format_entity(defender)} is defeated!")
            return True
        else:
            self.state.add_to_log(f"{format_entity(attacker)} can't reach {format_entity(defender)}!")
            return False

    def entity_talk(self, speaker: Entity, listener: Entity, message: str) -> bool:
        dx = abs(speaker.x - listener.x)
        dy = abs(speaker.y - listener.y)
        if dx <= 2 and dy <= 2:
            self.state.add_to_log(f"{format_entity(speaker)} to {format_entity(listener)}: {message}")
            listener.chat_history.append(f"{format_entity(speaker)}: {message}")
            return True
        else:
            self.state.add_to_log(f"{format_entity(speaker)} is too far to talk to {format_entity(listener)}!")
            return False

    async def process_player_action(self, action: Action, **kwargs) -> None:
        player = self.state.player
        if action == Action.MOVE:
            direction = kwargs.get('direction')
            success = self.move_entity(player, direction)
            if success:
                self.state.add_to_log(f"{format_entity(player)} moved {direction.name.lower()}")
            else:
                self.state.add_to_log(f"{format_entity(player)} couldn't move that way")
        elif action == Action.ATTACK:
            target = kwargs.get('target')
            if target:
                self.entity_attack(player, target)
            else:
                self.state.add_to_log("No target to attack")
        elif action == Action.TALK:
            target = kwargs.get('target')
            message = kwargs.get('message', "Hello there!")
            if target:
                self.entity_talk(player, target, message)
            else:
                self.state.add_to_log("No one to talk to")
        elif action == Action.WAIT:
            self.state.add_to_log(f"{format_entity(player)} waits...")

    async def process_npc_turn(self, entity: Entity) -> None:
        if not entity.is_alive:
            return
        if not hasattr(entity, 'agent_name'):
            return

        agent = self.flock.registry.get_agent(entity.agent_name)
        if not agent:
            self.state.add_to_log(f"Error: No agent found for {format_entity(entity)}")
            return

        nearby_entities = self.get_nearby_entities(entity)
        map_view = self.get_map_view(entity)
        nearby_info = []
        for other in nearby_entities:
            nearby_info.append({
                "name": other.name,
                "char": other.char,
                "position": (other.x, other.y),
                "health": other.health,
                "type": "Player" if other.type == EntityType.PLAYER else "NPC"
            })

        input_data = {
            "myself": entity,
            "nearby_entities": nearby_info,
            "map_view": map_view,
            "game_log": self.state.game_log
        }
        try:
            result = await agent.run_async(input_data)
            action_str = result.get("action", "wait").lower()
            direction_str = result.get("direction", "").lower()
            target_name = result.get("target", "")
            message = result.get("message", "")
            reasoning = result.get("reasoning", "")
            #self.state.add_to_log(f"{format_entity(entity)} thinks: {reasoning}")

            target = None
            if target_name:
                for other in self.state.entities:
                    if other.name.lower() == target_name.lower() and other.is_alive:
                        target = other
                        break

            if action_str == "move":
                direction = None
                if direction_str == "up":
                    direction = Direction.UP
                elif direction_str == "down":
                    direction = Direction.DOWN
                elif direction_str == "left":
                    direction = Direction.LEFT
                elif direction_str == "right":
                    direction = Direction.RIGHT

                if direction:
                    success = self.move_entity(entity, direction)
                    if success:
                        self.state.add_to_log(f"{format_entity(entity)} moved {direction_str}")
                else:
                    self.state.add_to_log(f"{format_entity(entity)} tries to move in an invalid direction")

            elif action_str == "attack":
                if target:
                    self.entity_attack(entity, target)
                else:
                    self.state.add_to_log(f"{format_entity(entity)} tries to attack but has no target")
            elif action_str == "talk":
                if target and message:
                    self.entity_talk(entity, target, message)
                else:
                    self.state.add_to_log(f"{format_entity(entity)} tries to talk but has no target or message")
            elif action_str == "wait":
                self.state.add_to_log(f"{format_entity(entity)} waits...")
            else:
                self.state.add_to_log(f"{format_entity(entity)} does something unexpected")
        except Exception as e:
            self.state.add_to_log(f"Error processing {format_entity(entity)}'s turn: {str(e)}")

    async def process_game_turn(self) -> None:
        for entity in self.state.entities:
            if entity.type == EntityType.NPC:
                await self.process_npc_turn(entity)
        self.state.turn += 1

    def render(self):
        """Render the game state using rich panels and tables with colored entities."""
        # Create a copy of the map and place entities with colored characters.
        render_map = self.state.map_data.copy()
        for entity in self.state.entities:
            if entity.is_alive:
                if 0 <= entity.y < len(render_map) and 0 <= entity.x < len(render_map[entity.y]):
                    row = render_map[entity.y]
                    colored_char = f"[{entity.color}]{entity.char}[/{entity.color}]"
                    render_map[entity.y] = row[:entity.x] + colored_char + row[entity.x+1:]

        header = Panel(f"[bold green]Turn: {self.state.turn}[/bold green]", title="Game Status")
        map_str = "\n".join(render_map)
        map_panel = Panel(map_str, title="Map", style="blue")

        # Build the entities table with colored names and characters.
        entity_table = Table(title="Entities", header_style="bold magenta")
        entity_table.add_column("Name", justify="left")
        entity_table.add_column("Char", justify="center")
        entity_table.add_column("Position", justify="center")
        entity_table.add_column("HP", justify="center")
        for entity in self.state.entities:
            if entity.is_alive:
                entity_table.add_row(
                    f"[{entity.color}]{entity.name}[/{entity.color}]",
                    f"[{entity.color}]{entity.char}[/{entity.color}]",
                    f"({entity.x}, {entity.y})",
                    f"{entity.health}/{entity.max_health}"
                )

        log_text = "\n".join(self.state.game_log) if self.state.game_log else "No logs yet."
        log_panel = Panel(log_text, title="Game Log", style="yellow")

        return Group(header, map_panel, entity_table, log_panel)

# Simple text-based interface using rich for beautiful output.
async def main():
    game = RoguelikeGame()
    player = game.state.player
    running = True
    game.console.print("[bold underline green]Welcome to the LLM-Powered Roguelike![/bold underline green]")
    game.console.print("Controls: [bold]w/a/s/d[/bold] to move, [bold]t[/bold] to talk, [bold]f[/bold] to attack, [bold]q[/bold] to quit")
    
    while running:
        game.console.clear()
        game.console.print("="*40)
        game.console.print(game.render())
        game.console.print("="*40)
        
        action = Prompt.ask("\nEnter action (w/a/s/d=move, t=talk, f=attack, q=quit)").lower()
        
        if action == 'q':
            running = False
            continue
        
        if action in ('w', 'a', 's', 'd'):
            direction = None
            if action == 'w':
                direction = Direction.UP
            elif action == 's':
                direction = Direction.DOWN
            elif action == 'a':
                direction = Direction.LEFT
            elif action == 'd':
                direction = Direction.RIGHT
            await game.process_player_action(Action.MOVE, direction=direction)
        
        elif action == 't':
            nearby = game.get_nearby_entities(player, distance=2)
            if not nearby:
                game.console.print("[red]No one nearby to talk to.[/red]")
                await asyncio.sleep(1)
                continue
            game.console.print("Nearby entities:")
            for i, entity in enumerate(nearby):
                game.console.print(f"{i+1}. [{entity.color}]{entity.name}[/{entity.color}]")
            try:
                target_idx = int(Prompt.ask("Who do you want to talk to? (number)")) - 1
                if 0 <= target_idx < len(nearby):
                    message = Prompt.ask("What do you want to say?")
                    await game.process_player_action(Action.TALK, target=nearby[target_idx], message=message)
            except ValueError:
                game.console.print("[red]Invalid input[/red]")
        
        elif action == 'f':
            nearby = game.get_nearby_entities(player, distance=1)
            if not nearby:
                game.console.print("[red]No one nearby to attack.[/red]")
                await asyncio.sleep(1)
                continue
            game.console.print("Nearby entities:")
            for i, entity in enumerate(nearby):
                game.console.print(f"{i+1}. [{entity.color}]{entity.name}[/{entity.color}]")
            try:
                target_idx = int(Prompt.ask("Who do you want to attack? (number)")) - 1
                if 0 <= target_idx < len(nearby):
                    await game.process_player_action(Action.ATTACK, target=nearby[target_idx])
            except ValueError:
                game.console.print("[red]Invalid input[/red]")
        
        else:
            await game.process_player_action(Action.WAIT)
        
        await game.process_game_turn()
        
        if not player.is_alive:
            game.console.print("\n[bold red]Game Over - You were defeated![/bold red]")
            running = False
        
        npcs_alive = any(e.is_alive and e.type == EntityType.NPC for e in game.state.entities)
        if not npcs_alive:
            game.console.print("\n[bold green]Victory! All enemies defeated![/bold green]")
            running = False

if __name__ == "__main__":
    asyncio.run(main())
```

### examples\03_apps\story_gen\cursor.py

- **Lines**: 65
- **Last modified**: 2025-03-29 13:53:59

```py
from typing import Optional
from pydantic import BaseModel, Field
from flock.core import FlockFactory, Flock


class Story(BaseModel):
    title: str
    status: str = Field(default="Idea", description="Idea, Drafting, Revising, Completed")
    genre: str
    tone: str
    themes: list[str]
    central_conflict: str
    brief_summary: str
    characters: list["Character"] = []
    chapters: list["Chapter"] = []
    
    
class Character(BaseModel):
    name: str
    role: str  # Protagonist, Antagonist, Supporting
    age: int = Field(default=None, description="Age of the character")
    appearance: Optional[str] = None
    personality_traits: list[str] = []
    backstory: Optional[str] = None
    motivations: Optional[str] = None
    weaknesses: Optional[str] = None
    character_arc: Optional[str] = None
    
class Chapter(BaseModel):
    title: str
    chapter_number: int
    purpose: Optional[str] = None
    summary: Optional[str] = Field(default=None, description="Key events or chapter summary")
    scenes: list["Scene"] = []
    
    

class Scene(BaseModel):
    title: str
    setting: Optional[str] = None
    goal: Optional[str] = None
    conflict: Optional[str] = None
    outcome: Optional[str] = None
    characters_involved: list[Character] = []
    story_beats: list[str] = []
    
    
class StoryBible(BaseModel):
    timeline: dict[str, str]  # Date/Event mapping
    worldbuilding_notes: dict[str, str]  # Topic/Description
    consistency_rules: list[str]  # List of rules
    writing_reference: Optional[str] = None
    
MODEL = "groq/qwen-qwq-32b"    
flock = Flock(model=MODEL)
brainstorm_agent = FlockFactory.create_default_agent(name="brainstorm_agent",
                                              description="A flock of agents that brainstorms about the story",
                                              input="story_idea: str",
                                              output="story_outlines: list[Story] | Three differentstory outlines",
                                              max_tokens=4096*8)

flock.add_agent(brainstorm_agent)

flock.run(start_agent=brainstorm_agent) 

```

### examples\05_documentation\getting-started\first_agent_01.py

- **Lines**: 26
- **Last modified**: 2025-02-28 23:13:59

```py
from flock.core import Flock, FlockFactory

# Get your flock ready for action!
flock = Flock(
    model="openai/gpt-4",  # Pick your favorite model
)

# Meet your new AI friend
bloggy = FlockFactory.create_default_agent(
    name="bloggy",
    input="topic",
    output="catchy_title, blog_headers"
)

# Add your friend to the flock
flock.add_agent(bloggy)

# Let's see what they can do!
result = flock.run(
    start_agent=bloggy,
    input={"topic": "Why robots make great pets"}
)

# Check out their work
print("✨ Title:", result.catchy_title)
print("\n📝 Headers:", result.blog_headers)
```

### examples\05_documentation\getting-started\first_agent_02.py

- **Lines**: 32
- **Last modified**: 2025-02-28 23:13:59

```py
from flock.core import Flock, FlockAgent
from flock.core.tools import basic_tools

# Get your flock ready for action!
flock = Flock(
    model="openai/gpt-4",  # Pick your favorite model
)

# Meet your new AI friend
bloggy = FlockAgent(
            name="bloggy",
            input="topic",
            description="Bloggy creates fun blog outlines to any given topic",
            output="""
                catchy_title: str | In all caps, 
                blog_headers: list[str] | Catchy sub-headers
            """
        )
# Add your friend to the flock
flock.add_agent(bloggy)

# Let's see what they can do!
result = flock.run(
    start_agent=bloggy,
    input={"topic": "Why robots make great pets"}
)


# Check out their work
print("✨ Title:", result.funny_blog_title)
print("\n📝 Headers:", result.blog_headers)
print("\n📝 Analysis:", result.analysis_results)
```

### examples\05_documentation\getting-started\first_agent_03.py

- **Lines**: 31
- **Last modified**: 2025-02-28 23:13:59

```py
from flock.core import Flock, FlockAgent
from flock.core.tools import basic_tools

# Get your flock ready for action!
flock = Flock(
    model="openai/gpt-4",  # Pick your favorite model  
    enable_logging=True
)


bloggy = FlockAgent(
    name="bloggy",
    description="Bloggy creates fun blog outlines and analysis to any given topic",
    input="blog_idea: str|The topic to blog about",
    output=(
        "funny_blog_title: str|A catchy title for the blog, "
        "blog_headers: list[str]|List of section headers for the blog, "
        "analysis_results: dict[str,int] | Result of all analysis done as key-value pairs"
    ),
    tools=[basic_tools.web_search_duckduckgo, basic_tools.code_eval],
)
flock.add_agent(bloggy)
result = flock.run(
    input={"blog_idea": "A blog about cats, with an analysis how old the oldest cat became in days"},
    start_agent=bloggy
)

# Check out their work
print("✨ Title:", result.funny_blog_title)
print("\n📝 Headers:", result.blog_headers)
print("\n📝 Analysis:", result.analysis_results)
```

### examples\playground\02_cook_book\flock_without_llms.py

- **Lines**: 79
- **Last modified**: 2025-02-28 23:13:59

```py
"""
Tutorial Example: Creating a agent chain, without agents

also called state machine

In this example, we create a simple two-agent chain:
  1. DoublerAgent: Receives a number ("value") and outputs its double ("doubled").
  2. AdderAgent: Takes the "doubled" value from the previous agent and adds 5 to produce "result".

The special thing about this example is that we don't use any external tools or LLMs.
Instead, we create a simple chain of agents that pass data between each other.

"""

import asyncio
from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent

# Define a simple agent that doubles the input value.
class DoublerAgent(FlockAgent):
    async def evaluate(self, inputs: dict[str, any]) -> dict[str, any]:
        # Retrieve the input value (defaulting to 0 if not provided)
        value = inputs.get("value", 0)
        # Return the doubled value
        return {"doubled": value * 2}

# Define another agent that adds 5 to the doubled value.
class AdderAgent(FlockAgent):
    async def evaluate(self, inputs: dict[str, any]) -> dict[str, any]:
        # Retrieve the "doubled" value (defaulting to 0 if not provided)
        doubled = inputs.get("doubled", 0)
        # Return the final result after adding 5
        return {"result": doubled + 5}

async def main():
    # --------------------------------
    # Create the flock
    # --------------------------------
    # Create a Flock instance in local debug mode (no Temporal needed for this simple demo)
    flock = Flock()

    # --------------------------------
    # Create the agents
    # --------------------------------
    # Define the doubler agent:
    doubler = DoublerAgent(
        name="doubler_agent",
        input="value: int | The number to double",
        output="doubled: int | The doubled value",
    )

    # Define the adder agent:
    adder = AdderAgent(
        name="adder_agent",
        input="doubled: int | The doubled value from the previous agent",
        output="result: int | The final result after adding 5",
    )

    # --------------------------------
    # Set up hand-off
    # --------------------------------
    # Link the agents so that the output of doubler is passed to adder automatically.
    doubler.hand_off = adder

    # Register both agents with the flock.
    flock.add_agent(doubler)
    flock.add_agent(adder)

    # --------------------------------
    # Run the agent chain
    # --------------------------------
    # Start the workflow with the doubler agent and provide the initial input.
    result = await flock.run_async(start_agent=doubler, input={"value": 10})
    
    # Print the final result. Expected output: result should be (10*2)+5 = 25.
    print(result)

if __name__ == "__main__":
    asyncio.run(main())
```

### examples\playground\02_cook_book\long_research_no_handoff.py

- **Lines**: 71
- **Last modified**: 2025-02-28 23:13:59

```py
"""
Title: Building huge documents without a hand off

In this example, we'll outline a thorough overview of a topic and then draft the content for each section.

We do this without using an explicit handoff between the outline and draft agents, but by using flock itself to manage the flow.

This way you can build create workflows that need a transformation of data from one agent to another without the need for a handoff.

This example implements https://dspy.ai/#__tabbed_2_6 to also highlight the ability to build dspy pipelines with flock.
"""


import asyncio

from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent

from flock.core.tools import basic_tools


async def main():

    flock = Flock(local_debug=True,enable_logging=True)
    
    outline_agent = FlockAgent(
        name="outline_agent",
        description="Outline a thorough overview of a topic.",
        input="topic",
        output="title,sections: list[str],section_subheadings: dict[str, list[str]]|mapping from section headings to subheadings",
        tools=[basic_tools.web_search_tavily, basic_tools.get_web_content_as_markdown],
    )


    draft_agent = FlockAgent(
        name="draft_agent",
        input="flock.topic,flock.section_heading,flock.section_subheadings: list[str]",
        output="content|markdown-formatted section",
        tools=[basic_tools.web_search_tavily, basic_tools.get_web_content_as_markdown],
    )

     
    flock.add_agent(outline_agent)
    flock.add_agent(draft_agent)

    # Instead defining handoff between agents, we just use flock to run the outline agent
    result = await flock.run_async(
        start_agent=outline_agent,
    )

    sections =[]
    # We then do our processing (in this case formatting the content) and run the draft agent for each section
    for heading, subheadings in result.section_subheadings.items():
            section, subheadings = f"## {heading}", [f"### {subheading}" for subheading in subheadings]
            result_content = await flock.run_async(
                input={"topic": result.topic,
                       "section_heading": section,
                       "section_subheadings": subheadings
                       },
                start_agent=draft_agent,
            )
            sections.append(result_content.content)
            with open("output.md", "w") as f:
                f.write("\n\n".join(sections))





if __name__ == "__main__":
    asyncio.run(main())
```

### examples\playground\02_cook_book\painting_by_numbers.py

- **Lines**: 86
- **Last modified**: 2025-02-28 23:13:59

```py
import numpy as np
from flock.core import Flock, FlockAgent
from pydantic import BaseModel, Field
import matplotlib.pyplot as plt

from flock.core.flock_agent import FlockAgentConfig, FlockAgentOutputConfig

from dspy import Image

from flock.core.logging.formatters.themes import OutputTheme

# Class for parts of the final image
class ImagePart(BaseModel):
    image_part: str = Field(description="Part of the image to draw")
    list_of_coordinates: list[tuple[float,float]] = Field(default_factory=list, description="List of coordinates to connect to create a part of the image. X<10 - Y<10 - coordinates are floats - use this accuracy for better results")
    matplotlib_color: str = Field(default="b", description="Color of the line in the plot")

# global variables
MODEL = "openai/gpt-4"
image : Image = None
image_parts: list[ImagePart] = None
counter = 0

# draws the image by iterating over the list of image parts and connecting the coordinates
async def draw_image(agent,input,output):
    global image_parts
    global image
    global counter
    counter += 1

    image_parts = output["list_of_all_image_parts"]

    plt.figure(figsize=(10, 10))
    
    for image_part in image_parts:
        coordinates = np.array(image_part.list_of_coordinates)  # Convert list to numpy array
        if len(coordinates) > 1:
            plt.plot(coordinates[:, 0], coordinates[:, 1], marker='x', linestyle='-', markersize=5, color=image_part.matplotlib_color)
    
    plt.axis('equal')  # Keep aspect ratio
    plt.grid(True)
    
    save_path = f"plot_{counter}.png"
    plt.savefig(save_path, dpi=300)

    image = Image.from_file(save_path)
    
    plt.show()

# if there is a previous image, load it and give it to the agent
async def load_prev_image(agent: FlockAgent,inputs):
    global image
    global image_parts
    if image is not None:
        agent.description = "Draws an image by connecting the coordinates of the image parts. Improves the image by adding new parts to the previous image and/or changing them."
        agent.input = "subject_to_draw: str, prev_image: dspy.Image | result of rendered image parts, prev_image_parts: list[ImagePart] | previously generated image parts"
        inputs["prev_image"] = image
        inputs["prev_image_parts"] = image_parts



# Generate the plot

flock = Flock(local_debug=True)

config = FlockAgentConfig(agent_type_override="ChainOfThought")

agent = FlockAgent(name="the_painter", 
                    input="subject_to_draw: str", 
                    description="Draws an image by connecting the coordinates of the image parts. 0/0 is bottom left corner - 10/10 is top right corner",
                    output="list_of_all_image_parts: list[ImagePart] | list of all image parts to draw by connecting the coordinates",
                    config=config, 
                    terminate_callback=draw_image,
                    initialize_callback=load_prev_image,
                    output_config=FlockAgentOutputConfig(
                        render_table=True,
                        theme=OutputTheme.abernathy
                    ))

agent.hand_off = agent

result = flock.run(start_agent=agent, agents=[agent])




```

### examples\playground\02_cook_book\project_manager.py

- **Lines**: 112
- **Last modified**: 2025-02-28 23:13:59

```py
"""
Tutorial Example: Multi-Agent Chain for Software Project Scaffolding

In this example, we build a chain of Flock agents that collaborate to scaffold a software project.
The workflow is as follows:

  1. **idea_agent:**  
     Takes a simple query and returns a fun software project idea.

  2. **project_plan_agent:**  
     Uses the software project idea to generate additional project details such as:
       - A catchy project name
       - A project pitch
       - A recommended tech stack
       - A project implementation plan

  3. **readme_agent:**  
     Consumes the outputs of the project_plan_agent to produce a readme file.

  4. **issue_agent:**  
     Uses the readme and additional project details to create GitHub issues and files.

Each agent is declared using a simple input/output signature, and the chain is established via the `hand_off` property.
Flock manages the registration and execution of agents. In this example, we run the workflow in local debug mode.

Let's see how it all comes together!
"""



import asyncio
from dataclasses import dataclass


from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent
from flock.core.logging.formatters.base_formatter import FormatterOptions
from flock.core.logging.formatters.rich_formatters import RichTables
from flock.core.tools import basic_tools
from flock.core.tools.dev_tools import github


@dataclass
class Features:
    title: str
    description: str
    acceptance_criteria: str


async def main():

    flock = Flock(local_debug=True, output_formatter=FormatterOptions(formatter=RichTables, wait_for_input=False, settings={}),enable_logging=True)
    
    idea_agent = FlockAgent(
        name="idea_agent",
        input="query",
        output="software_project_idea",
        tools=[basic_tools.web_search_tavily],
        use_cache=True,
    )

    project_plan_agent = FlockAgent(
        name="project_plan_agent",
        input="software_project_idea",
        output="catchy_project_name, project_pitch, techstack, project_implementation_plan",
        tools=[basic_tools.web_search_tavily],
        use_cache=True,
    )

    readme_agent = FlockAgent(
        name="readme_agent",
        input="catchy_project_name, project_pitch, techstack, project_implementation_plan",
        output="readme",
        tools=[github.upload_readme],
        use_cache=True,
    )   

    feature_agent = FlockAgent(
        name="feature_agent",
        input="readme, catchy_project_name, project_pitch, techstack, project_implementation_plan",
        output="features : list[Features]",
        tools=[github.create_user_stories_as_github_issue, github.create_files],
        use_cache=True,
    )   

    issue_agent = FlockAgent(
        name="issue_agent",
        input="current_feature, readme, techstack, project_implementation_plan, all_feature_titles",
        output="user_stories_on_github, files_on_github",
        tools=[github.create_user_stories_as_github_issue, github.create_files],
        use_cache=True,
    )   

    idea_agent.hand_off = project_plan_agent
    project_plan_agent.hand_off = readme_agent
    readme_agent.hand_off = feature_agent


    flock.add_agent(idea_agent)
    flock.add_agent(project_plan_agent)
    flock.add_agent(readme_agent)
    flock.add_agent(feature_agent)
    flock.add_agent(issue_agent)

    features : Features = await flock.run_async(
        start_agent=idea_agent,
    )



if __name__ == "__main__":
    asyncio.run(main())
```

### examples\playground\02_cook_book\repo_analyzer\repo_analyzer.py

- **Lines**: 721
- **Last modified**: 2025-02-28 23:13:59

```py
"""
Repository Analyzer

This example demonstrates how to use Flock to create a system that analyzes a repository
and generates a comprehensive knowledge database about it.
"""

import os
import sys
from pathlib import Path
from typing import List, Dict, Any

from flock.core import Flock, FlockAgent
from flock.core.tools import basic_tools

# Define custom evaluators for the agents

class RepoStructureEvaluator:
    """Custom evaluator for the repository structure analyzer agent."""
    
    async def evaluate(self, agent, inputs, tools):
        """
        Analyze the repository structure and identify key files.
        
        Args:
            agent: The agent instance
            inputs: The input parameters
            tools: The available tools
            
        Returns:
            Dictionary with the repository analysis results
        """
        repo_path = inputs["repo_path"]
        
        # Get the repository name from the path
        repo_name = os.path.basename(os.path.abspath(repo_path))
        
        # Get the repository structure
        file_structure = get_repo_structure(repo_path)
        
        # Check if README.md exists
        readme_path = os.path.join(repo_path, "README.md")
        readme_content = ""
        if os.path.exists(readme_path):
            with open(readme_path, "r", encoding="utf-8", errors="ignore") as f:
                readme_content = f.read()
        
        # Identify key files
        key_files = identify_key_files(file_structure, readme_content)
        
        return {
            "repo_name": repo_name,
            "key_files": key_files,
            "file_structure": file_structure,
            "readme_content": readme_content
        }

# Define the agents

# 1. Repository Structure Analyzer
# This agent analyzes the repository structure and identifies key files
repo_structure_analyzer = FlockAgent(
    name="repo_structure_analyzer",
    input="repo_path: str | Path to the repository to analyze",
    output="""
        repo_name: str | Name of the repository,
        key_files: list[str] | List of key files to analyze in detail,
        file_structure: dict | Dictionary representing the repository structure,
        readme_content: str | Content of the README file if it exists
    """,
    tools=[basic_tools.read_from_file],
)

# Set custom evaluator
repo_structure_analyzer.evaluate = RepoStructureEvaluator().evaluate

class FileContentEvaluator:
    """Custom evaluator for the file content analyzer agent."""
    
    async def evaluate(self, agent, inputs, tools):
        """
        Analyze the content of key files to understand their purpose and functionality.
        
        Args:
            agent: The agent instance
            inputs: The input parameters
            tools: The available tools
            
        Returns:
            Dictionary with the file analysis results
        """
        repo_path = inputs["repo_path"]
        key_files = inputs["key_files"]
        
        file_analyses = {}
        core_components = []
        key_concepts = []
        
        # Analyze each key file
        for file_path in key_files:
            full_path = os.path.join(repo_path, file_path)
            if not os.path.exists(full_path):
                continue
                
            try:
                with open(full_path, "r", encoding="utf-8", errors="ignore") as f:
                    content = f.read()
                    
                # Basic analysis of the file
                analysis = self._analyze_file(file_path, content)
                file_analyses[file_path] = analysis
                
                # Identify core components
                if analysis["type"] == "class" or analysis["type"] == "module":
                    component = {
                        "name": analysis["name"],
                        "description": analysis["summary"],
                        "detailed_description": analysis["description"],
                        "file_path": file_path,
                        "features": analysis["features"]
                    }
                    core_components.append(component)
                    
                # Identify key concepts
                for concept in analysis["concepts"]:
                    key_concept = {
                        "name": concept["name"],
                        "description": concept["description"],
                        "detailed_description": concept["detailed_description"]
                    }
                    
                    # Check if the concept already exists
                    if not any(c["name"] == key_concept["name"] for c in key_concepts):
                        key_concepts.append(key_concept)
            except Exception as e:
                file_analyses[file_path] = {
                    "error": str(e),
                    "type": "unknown",
                    "name": os.path.basename(file_path),
                    "summary": "Error analyzing file",
                    "description": f"Error analyzing file: {str(e)}",
                    "features": [],
                    "concepts": []
                }
        
        return {
            "file_analyses": file_analyses,
            "core_components": core_components,
            "key_concepts": key_concepts
        }
    
    def _analyze_file(self, file_path: str, content: str) -> Dict[str, Any]:
        """
        Analyze a file to understand its purpose and functionality.
        
        Args:
            file_path: Path to the file
            content: Content of the file
            
        Returns:
            Dictionary with the file analysis results
        """
        # Determine the file type
        file_type = "unknown"
        name = os.path.basename(file_path)
        summary = ""
        description = ""
        features = []
        concepts = []
        
        # Extract the file extension
        _, ext = os.path.splitext(file_path)
        
        if ext == ".py":
            file_type = "python"
            
            # Check if it's a class definition
            if "class " in content:
                file_type = "class"
                
                # Extract class name
                import re
                class_match = re.search(r"class\s+(\w+)", content)
                if class_match:
                    name = class_match.group(1)
                    
                # Extract docstring
                docstring_match = re.search(r'class\s+\w+.*?:\s*?"""(.*?)"""', content, re.DOTALL)
                if docstring_match:
                    docstring = docstring_match.group(1).strip()
                    lines = docstring.split("\n")
                    if lines:
                        summary = lines[0].strip()
                        description = "\n".join(lines[1:]).strip()
                        
                # Extract methods as features
                method_matches = re.finditer(r"def\s+(\w+)\s*\(", content)
                for match in method_matches:
                    method_name = match.group(1)
                    if not method_name.startswith("_") or method_name.startswith("__"):
                        features.append(method_name)
                        
            # Check if it's a module
            elif "__init__.py" in file_path:
                file_type = "module"
                
                # Extract module name
                name = os.path.basename(os.path.dirname(file_path))
                
                # Extract docstring
                docstring_match = re.search(r'"""(.*?)"""', content, re.DOTALL)
                if docstring_match:
                    docstring = docstring_match.group(1).strip()
                    lines = docstring.split("\n")
                    if lines:
                        summary = lines[0].strip()
                        description = "\n".join(lines[1:]).strip()
                        
                # Extract functions and classes as features
                function_matches = re.finditer(r"def\s+(\w+)\s*\(", content)
                for match in function_matches:
                    function_name = match.group(1)
                    if not function_name.startswith("_"):
                        features.append(function_name)
                        
                class_matches = re.finditer(r"class\s+(\w+)", content)
                for match in class_matches:
                    class_name = match.group(1)
                    features.append(class_name)
        
        elif ext == ".md":
            file_type = "markdown"
            
            # Extract title
            import re
            title_match = re.search(r"#\s+(.*)", content)
            if title_match:
                name = title_match.group(1).strip()
                
            # Extract summary
            lines = content.split("\n")
            for line in lines:
                if line.strip() and not line.startswith("#"):
                    summary = line.strip()
                    break
                    
            # Extract description
            description = content
            
            # Extract concepts
            concept_matches = re.finditer(r"##\s+(.*)", content)
            for match in concept_matches:
                concept_name = match.group(1).strip()
                concept_start = match.end()
                
                # Find the end of the concept
                concept_end = len(content)
                next_match = re.search(r"##\s+", content[concept_start:])
                if next_match:
                    concept_end = concept_start + next_match.start()
                    
                concept_content = content[concept_start:concept_end].strip()
                
                # Extract the first paragraph as the description
                concept_description = ""
                concept_lines = concept_content.split("\n")
                for line in concept_lines:
                    if line.strip():
                        concept_description = line.strip()
                        break
                        
                concepts.append({
                    "name": concept_name,
                    "description": concept_description,
                    "detailed_description": concept_content
                })
        
        # If we couldn't extract a summary, use the first non-empty line
        if not summary:
            lines = content.split("\n")
            for line in lines:
                if line.strip():
                    summary = line.strip()
                    break
        
        # If we couldn't extract a description, use the first few lines
        if not description:
            lines = content.split("\n")
            description = "\n".join(lines[:10])
        
        # If we couldn't extract any features, use the file name
        if not features:
            features.append(name)
        
        # If we couldn't extract any concepts, create a default one
        if not concepts:
            concepts.append({
                "name": name,
                "description": summary,
                "detailed_description": description
            })
        
        return {
            "type": file_type,
            "name": name,
            "summary": summary,
            "description": description,
            "features": features,
            "concepts": concepts
        }

# 2. File Content Analyzer
# This agent analyzes the content of key files to understand their purpose and functionality
file_content_analyzer = FlockAgent(
    name="file_content_analyzer",
    input="""
        repo_path: str | Path to the repository,
        key_files: list[str] | List of key files to analyze
    """,
    output="""
        file_analyses: dict | Dictionary mapping file paths to their analysis,
        core_components: list[dict] | List of core components identified in the codebase,
        key_concepts: list[dict] | List of key concepts identified in the codebase
    """,
    tools=[basic_tools.read_from_file],
)

# Set custom evaluator
file_content_analyzer.evaluate = FileContentEvaluator().evaluate

class DocumentationGeneratorEvaluator:
    """Custom evaluator for the documentation generator agent."""
    
    async def evaluate(self, agent, inputs, tools):
        """
        Generate comprehensive documentation based on the repository analysis.
        
        Args:
            agent: The agent instance
            inputs: The input parameters
            tools: The available tools
            
        Returns:
            Dictionary with the documentation files
        """
        repo_path = inputs["repo_path"]
        repo_name = inputs["repo_name"]
        file_structure = inputs["file_structure"]
        readme_content = inputs["readme_content"]
        file_analyses = inputs["file_analyses"]
        core_components = inputs["core_components"]
        key_concepts = inputs["key_concepts"]
        
        # Create the documentation structure
        documentation_files = create_documentation_structure(repo_name, core_components, key_concepts)
        
        return {
            "documentation_files": documentation_files
        }

# 3. Documentation Generator
# This agent generates comprehensive documentation based on the repository analysis
documentation_generator = FlockAgent(
    name="documentation_generator",
    input="""
        repo_path: str | Path to the repository,
        repo_name: str | Name of the repository,
        file_structure: dict | Dictionary representing the repository structure,
        readme_content: str | Content of the README file if it exists,
        file_analyses: dict | Dictionary mapping file paths to their analysis,
        core_components: list[dict] | List of core components identified in the codebase,
        key_concepts: list[dict] | List of key concepts identified in the codebase
    """,
    output="""
        documentation_files: dict | Dictionary mapping file paths to their content
    """,
    tools=[basic_tools.save_to_file],
)

# Set custom evaluator
documentation_generator.evaluate = DocumentationGeneratorEvaluator().evaluate

# Set up the agent chain
repo_structure_analyzer.hand_off = file_content_analyzer
file_content_analyzer.hand_off = documentation_generator

# Alternative way to set up the agent chain (as shown in examples/02_cook_book/long_research_no_handoff.py)
# This would be used if we wanted to do custom processing between agent runs
# For example:
"""
# Instead of using hand_off, we could do:
result = flock.run(
    start_agent=repo_structure_analyzer,
    input={"repo_path": repo_path}
)

# Then process the result and run the next agent
file_analysis_result = flock.run(
    start_agent=file_content_analyzer,
    input={
        "repo_path": repo_path,
        "key_files": result["key_files"]
    }
)

# Then process the result and run the next agent
documentation_result = flock.run(
    start_agent=documentation_generator,
    input={
        "repo_path": repo_path,
        "repo_name": result["repo_name"],
        "file_structure": result["file_structure"],
        "readme_content": result["readme_content"],
        "file_analyses": file_analysis_result["file_analyses"],
        "core_components": file_analysis_result["core_components"],
        "key_concepts": file_analysis_result["key_concepts"]
    }
)
"""

# Helper functions for the agents

def get_repo_structure(repo_path: str) -> Dict[str, Any]:
    """
    Recursively get the structure of a repository.
    
    Args:
        repo_path: Path to the repository
        
    Returns:
        Dictionary representing the repository structure
    """
    result = {}
    
    for root, dirs, files in os.walk(repo_path):
        # Skip hidden directories and files
        dirs[:] = [d for d in dirs if not d.startswith('.')]
        files = [f for f in files if not f.startswith('.')]
        
        # Skip virtual environments
        if 'venv' in dirs:
            dirs.remove('venv')
        if 'env' in dirs:
            dirs.remove('env')
        if '__pycache__' in dirs:
            dirs.remove('__pycache__')
            
        # Get the relative path
        rel_path = os.path.relpath(root, repo_path)
        if rel_path == '.':
            rel_path = ''
            
        # Add files to the result
        if files:
            result[rel_path] = files
            
    return result

def identify_key_files(repo_structure: Dict[str, Any], readme_content: str = "") -> List[str]:
    """
    Identify key files in the repository based on the structure and README content.
    
    Args:
        repo_structure: Dictionary representing the repository structure
        readme_content: Content of the README file if it exists
        
    Returns:
        List of key files to analyze in detail
    """
    key_files = []
    
    # Look for common important files
    for path, files in repo_structure.items():
        for file in files:
            file_path = os.path.join(path, file) if path else file
            
            # Main module files
            if file == '__init__.py':
                key_files.append(file_path)
                
            # Main implementation files
            if file.endswith('.py') and not file.startswith('test_'):
                key_files.append(file_path)
                
            # Configuration files
            if file in ['setup.py', 'pyproject.toml', 'requirements.txt']:
                key_files.append(file_path)
                
            # Documentation files
            if file.endswith('.md') and file != 'README.md':
                key_files.append(file_path)
    
    # Limit to a reasonable number of files
    return key_files[:20]  # Limit to 20 key files

def create_documentation_structure(repo_name: str, core_components: List[Dict], key_concepts: List[Dict]) -> Dict[str, str]:
    """
    Create a documentation structure based on the repository analysis.
    
    Args:
        repo_name: Name of the repository
        core_components: List of core components identified in the codebase
        key_concepts: List of key concepts identified in the codebase
        
    Returns:
        Dictionary mapping file paths to their content
    """
    docs = {}
    
    # Create README.md
    docs["README.md"] = f"""# {repo_name} Documentation

This folder contains comprehensive documentation about the {repo_name} framework.

## Purpose

This documentation serves as a knowledge base for understanding the {repo_name} framework, its architecture, components, features, and usage patterns. It is designed to provide a complete overview that can be quickly consumed to gain a deep understanding of the framework.

This is a living document that should be continuously updated. Whenever new information about the framework is discovered that is not yet included in this documentation, it should be added to the appropriate files. This ensures that the documentation remains comprehensive and up-to-date.

The `tasks` subfolder contains a log of all activities performed related to this documentation, which helps track what has been done and what still needs to be done.

## Contents

- [index.md](index.md) - Table of contents and overview of the documentation
- [overview.md](overview.md) - High-level overview of the framework
- [core-components.md](core-components.md) - Detailed information about the core components
- [architecture.md](architecture.md) - Information about the architecture and design decisions
- [features.md](features.md) - Key features
- [examples.md](examples.md) - Example usage patterns
- [file_lookup.md](file_lookup.md) - Links between key concepts and code files
- [tasks/](tasks/) - Log of all activities performed related to this documentation

## How to Use This Documentation

Start with the [index.md](index.md) file, which provides a table of contents and overview of the documentation. From there, you can navigate to specific topics of interest.

For a quick understanding, read the [overview.md](overview.md) file, which provides a high-level overview of the framework.

For more detailed information about specific aspects, refer to the corresponding documentation files.
"""
    
    # Create index.md
    docs["index.md"] = f"""# {repo_name} Framework Documentation

This documentation provides a comprehensive overview of the {repo_name} framework.

## Table of Contents

1. [Overview](overview.md)
   - Key Concepts
   - Core Components
   - Architecture

2. [Core Components](core-components.md)
   {os.linesep.join([f"   - {component['name']}" for component in core_components])}

3. [Architecture](architecture.md)
   - High-Level Architecture
   - Component Relationships
   - Design Decisions

4. [Features](features.md)
   {os.linesep.join([f"   - {concept['name']}" for concept in key_concepts])}

5. [Examples](examples.md)
   - Basic Example
   - Advanced Examples

6. [File Lookup](file_lookup.md)
   - Core Components
   - Key Files
   - Examples
"""
    
    # Create overview.md
    docs["overview.md"] = f"""# {repo_name} Framework Overview

This document provides a high-level overview of the {repo_name} framework.

## Key Concepts

{os.linesep.join([f"- **{concept['name']}**: {concept['description']}" for concept in key_concepts])}

## Core Components

{os.linesep.join([f"- **{component['name']}**: {component['description']}" for component in core_components])}

## Architecture

The {repo_name} framework is designed with a modular architecture that separates concerns and allows for flexibility and extensibility.
"""
    
    # Create core-components.md
    docs["core-components.md"] = f"""# {repo_name} Core Components

This document provides detailed information about the core components of the {repo_name} framework.

{os.linesep.join([f"## {component['name']}{os.linesep}{os.linesep}{component['detailed_description']}{os.linesep}" for component in core_components])}
"""
    
    # Create file_lookup.md
    docs["file_lookup.md"] = f"""# {repo_name} Framework Code File Lookup

This document provides links between key concepts in the {repo_name} framework and the corresponding code files where they are implemented.

## Core Components

{os.linesep.join([f"### {component['name']}{os.linesep}{os.linesep}- **Implementation**: {component['file_path']}{os.linesep}- **Key Features**:{os.linesep}{os.linesep.join(['  - ' + feature for feature in component['features']])}{os.linesep}" for component in core_components])}
"""
    
    # Create tasks folder and task_log.md
    docs["tasks/task_log.md"] = f"""# Task Log

This file logs all tasks performed related to the {repo_name} framework documentation.

## Initial Documentation Creation

1. Created the documentation folder as a knowledge base for {repo_name} framework information.
2. Analyzed the {repo_name} framework by examining key source files.
3. Created comprehensive documentation files:
   - overview.md - High-level overview of the framework
   - core-components.md - Detailed information about the core components
   - architecture.md - Information about the architecture and design decisions
   - features.md - Key features
   - examples.md - Example usage patterns
   - file_lookup.md - Links between key concepts and code files
   - index.md - Table of contents and overview of the documentation
   - README.md - Introduction to the documentation
4. Created a tasks subfolder to protocol all activities.

### Future Tasks

1. Continue to update documentation as new information is discovered.
2. Add more detailed information about specific components as needed.
3. Keep the file_lookup.md updated with new files and components.
4. Add more examples and use cases as they are discovered.
"""
    
    # Create empty files for other documentation
    docs["architecture.md"] = f"""# {repo_name} Architecture

This document provides an overview of the {repo_name} framework's architecture and design decisions.

## High-Level Architecture

The {repo_name} framework is designed with a modular architecture that separates concerns and allows for flexibility and extensibility.

## Component Relationships

The main components of the {repo_name} framework and their relationships.

## Design Decisions

Key design decisions that shaped the {repo_name} framework.
"""
    
    docs["features.md"] = f"""# {repo_name} Key Features

This document outlines the key features of the {repo_name} framework.

{os.linesep.join([f"## {concept['name']}{os.linesep}{os.linesep}{concept['detailed_description']}{os.linesep}" for concept in key_concepts])}
"""
    
    docs["examples.md"] = f"""# {repo_name} Examples

This document provides examples of how to use the {repo_name} framework for various use cases.

## Basic Example

A simple example of using the {repo_name} framework.

## Advanced Examples

More complex examples of using the {repo_name} framework.
"""
    
    return docs

def main():
    """Main function to run the repository analyzer."""
    if len(sys.argv) < 2:
        print("Usage: python repo_analyzer.py <repo_path> [output_path]")
        sys.exit(1)
        
    repo_path = sys.argv[1]
    output_path = sys.argv[2] if len(sys.argv) > 2 else os.path.join(repo_path, "docs", "generated")
    
    # Create the Flock instance
    flock = Flock(model="openai/gpt-4o")
    
    # Add the agents to the flock
    flock.add_agent(repo_structure_analyzer)
    flock.add_agent(file_content_analyzer)
    flock.add_agent(documentation_generator)
    
    # Run the flock
    result = flock.run(
        start_agent=repo_structure_analyzer,
        input={"repo_path": repo_path}
    )
    
    # Create the output directory if it doesn't exist
    os.makedirs(output_path, exist_ok=True)
    os.makedirs(os.path.join(output_path, "tasks"), exist_ok=True)
    
    # Save the documentation files
    for file_path, content in result["documentation_files"].items():
        full_path = os.path.join(output_path, file_path)
        
        # Create directories if needed
        os.makedirs(os.path.dirname(full_path), exist_ok=True)
        
        # Write the file
        with open(full_path, "w") as f:
            f.write(content)
            
    print(f"Documentation generated successfully in {output_path}")

if __name__ == "__main__":
    main()
```

### examples\playground\02_cook_book\repo_analyzer\repo_analyzer_llm.py

- **Lines**: 285
- **Last modified**: 2025-02-28 23:13:59

```py
"""
Repository Analyzer with LLM Support

This example demonstrates how to use Flock to create a system that analyzes a repository
and generates a comprehensive knowledge database about it, using LLMs for the analysis.
"""

import os
import sys
import json
from pathlib import Path
from typing import List, Dict, Any

from flock.core import Flock, FlockAgent
from flock.core.tools import basic_tools
import litellm

# Define the agents

# Custom tool to get repository structure
def get_repo_files_tool(repo_path: str) -> Dict[str, List[str]]:
    """
    Get a flat list of all files in the repository.
    
    Args:
        repo_path: Path to the repository
        
    Returns:
        Dictionary mapping directory paths to lists of files
    """
    return get_repo_structure(repo_path)

# Custom tool to read file content
def read_file_tool(repo_path: str, file_path: str) -> str:
    """
    Read the content of a file in the repository.
    
    Args:
        repo_path: Path to the repository
        file_path: Path to the file relative to the repository root
        
    Returns:
        Content of the file
    """
    full_path = os.path.join(repo_path, file_path)
    try:
        with open(full_path, "r", encoding="utf-8", errors="ignore") as f:
            return f.read()
    except Exception as e:
        return f"Error reading file: {str(e)}"

# 1. Repository Structure Analyzer
# This agent analyzes the repository structure and identifies key files to analyze
repo_structure_analyzer = FlockAgent(
    name="repo_structure_analyzer",
    model="openai/gpt-4o",
    description="""
    You are a repository structure analyzer. Your task is to analyze the structure of a repository
    and identify key files that should be analyzed in detail to understand the codebase.
    
    You will be given a list of files in the repository. You should identify the most important
    files that would help understand the codebase, such as:
    - Main module files (__init__.py)
    - Core implementation files
    - Configuration files
    - Documentation files
    
    Focus on files that are likely to contain important information about the architecture,
    components, and functionality of the codebase.
    """,
    input="repo_path: str | Path to the repository to analyze",
    output="""
        repo_name: str | Name of the repository,
        key_files: list[str] | List of key files to analyze in detail,
        file_structure: dict | Dictionary representing the repository structure,
        readme_content: str | Content of the README file if it exists
    """,
    tools=[get_repo_files_tool, read_file_tool],
)

# 2. File Content Analyzer
# This agent analyzes the content of key files to understand their purpose and functionality
file_content_analyzer = FlockAgent(
    name="file_content_analyzer",
    model="openai/gpt-4o",
    description="""
    You are a file content analyzer. Your task is to analyze the content of key files in a repository
    to understand their purpose and functionality.
    
    For each file, you should:
    1. Identify the purpose of the file
    2. Extract core components (classes, functions, etc.)
    3. Identify key concepts and their relationships
    
    Your analysis should be detailed and focus on understanding the architecture and design of the codebase.
    
    For each core component you identify, provide:
    - Name: The name of the component
    - Description: A brief description of what the component does
    - Detailed Description: A more detailed explanation of the component
    - File Path: The path to the file where the component is defined
    - Features: A list of key features or methods of the component
    
    For each key concept you identify, provide:
    - Name: The name of the concept
    - Description: A brief description of the concept
    - Detailed Description: A more detailed explanation of the concept
    """,
    input="""
        repo_path: str | Path to the repository,
        key_files: list[str] | List of key files to analyze
    """,
    output="""
        file_analyses: dict | Dictionary mapping file paths to their analysis,
        core_components: list[dict] | List of core components identified in the codebase,
        key_concepts: list[dict] | List of key concepts identified in the codebase
    """,
    tools=[read_file_tool],
)

# 3. Documentation Generator
# This agent generates comprehensive documentation based on the repository analysis
documentation_generator = FlockAgent(
    name="documentation_generator",
    model="openai/gpt-4o",
    description="""
    You are a documentation generator. Your task is to generate comprehensive documentation
    for a codebase based on the analysis of its structure and content.
    
    You should create a set of documentation files that provide a complete overview of the codebase,
    including:
    - Overview of the codebase
    - Core components and their relationships
    - Key concepts and features
    - Architecture and design decisions
    - Examples of usage
    
    The documentation should be organized in a way that makes it easy to navigate and understand.
    
    Create the following documentation files:
    1. README.md - Introduction to the documentation
    2. index.md - Table of contents and overview of the documentation
    3. overview.md - High-level overview of the framework
    4. core-components.md - Detailed information about the core components
    5. architecture.md - Information about the architecture and design decisions
    6. features.md - Key features of the framework
    7. examples.md - Example usage patterns
    8. file_lookup.md - Links between key concepts and code files
    9. tasks/task_log.md - Log of all activities performed related to this documentation
    
    The documentation should be comprehensive, well-organized, and easy to navigate.
    """,
    input="""
        repo_path: str | Path to the repository,
        repo_name: str | Name of the repository,
        file_structure: dict | Dictionary representing the repository structure,
        readme_content: str | Content of the README file if it exists,
        file_analyses: dict | Dictionary mapping file paths to their analysis,
        core_components: list[dict] | List of core components identified in the codebase,
        key_concepts: list[dict] | List of key concepts identified in the codebase
    """,
    output="""
        documentation_files: dict | Dictionary mapping file paths to their content
    """,
    tools=[read_file_tool, basic_tools.save_to_file],
)

# Set up the agent chain
repo_structure_analyzer.hand_off = file_content_analyzer
file_content_analyzer.hand_off = documentation_generator

# Alternative way to set up the agent chain (as shown in examples/02_cook_book/long_research_no_handoff.py)
# This would be used if we wanted to do custom processing between agent runs
# For example:
"""
# Instead of using hand_off, we could do:
result = flock.run(
    start_agent=repo_structure_analyzer,
    input={"repo_path": repo_path}
)

# Then process the result and run the next agent
file_analysis_result = flock.run(
    start_agent=file_content_analyzer,
    input={
        "repo_path": repo_path,
        "key_files": result["key_files"]
    }
)

# Then process the result and run the next agent
documentation_result = flock.run(
    start_agent=documentation_generator,
    input={
        "repo_path": repo_path,
        "repo_name": result["repo_name"],
        "file_structure": result["file_structure"],
        "readme_content": result["readme_content"],
        "file_analyses": file_analysis_result["file_analyses"],
        "core_components": file_analysis_result["core_components"],
        "key_concepts": file_analysis_result["key_concepts"]
    }
)
"""

# Helper functions for the agents

def get_repo_structure(repo_path: str) -> Dict[str, Any]:
    """
    Recursively get the structure of a repository.
    
    Args:
        repo_path: Path to the repository
        
    Returns:
        Dictionary representing the repository structure
    """
    result = {}
    
    for root, dirs, files in os.walk(repo_path):
        # Skip hidden directories and files
        dirs[:] = [d for d in dirs if not d.startswith('.')]
        files = [f for f in files if not f.startswith('.')]
        
        # Skip virtual environments
        if 'venv' in dirs:
            dirs.remove('venv')
        if 'env' in dirs:
            dirs.remove('env')
        if '__pycache__' in dirs:
            dirs.remove('__pycache__')
            
        # Get the relative path
        rel_path = os.path.relpath(root, repo_path)
        if rel_path == '.':
            rel_path = ''
            
        # Add files to the result
        if files:
            result[rel_path] = files
            
    return result

def main():
    """Main function to run the repository analyzer."""
    if len(sys.argv) < 2:
        print("Usage: python repo_analyzer_llm.py <repo_path> [output_path]")
        sys.exit(1)
        
    repo_path = sys.argv[1]
    output_path = sys.argv[2] if len(sys.argv) > 2 else os.path.join(repo_path, "docs", "generated")
    
    # Create the Flock instance
    flock = Flock(model="openai/gpt-4o")
    
    # Add the agents to the flock
    flock.add_agent(repo_structure_analyzer)
    flock.add_agent(file_content_analyzer)
    flock.add_agent(documentation_generator)
    
    # Run the flock
    result = flock.run(
        start_agent=repo_structure_analyzer,
        input={"repo_path": repo_path}
    )
    
    # Create the output directory if it doesn't exist
    os.makedirs(output_path, exist_ok=True)
    os.makedirs(os.path.join(output_path, "tasks"), exist_ok=True)
    
    # Save the documentation files
    for file_path, content in result["documentation_files"].items():
        full_path = os.path.join(output_path, file_path)
        
        # Create directories if needed
        os.makedirs(os.path.dirname(full_path), exist_ok=True)
        
        # Write the file
        with open(full_path, "w") as f:
            f.write(content)
            
    print(f"Documentation generated successfully in {output_path}")

if __name__ == "__main__":
    main()
```

### examples\playground\02_cook_book\save_and_load\load_01.py

- **Lines**: 14
- **Last modified**: 2025-02-28 23:13:59

```py


from flock.core import FlockAgent
from flock.core.logging.formatters.pprint_formatter import PrettyPrintFormatter


loaded_bloggy = FlockAgent.load_from_file("examples/data/bloggy.json")

result = loaded_bloggy.run(inputs={"blog_idea": "Idea for a blog post."})
PrettyPrintFormatter().display_data(result)




```

### examples\playground\02_cook_book\save_and_load\load_02.py

- **Lines**: 27
- **Last modified**: 2025-02-28 23:13:59

```py
from typing import Any, Dict
from flock.core import FlockAgent
from flock.core.logging.formatters.pprint_formatter import PrettyPrintFormatter

# Load agents with custom logic - Everything passed to the constructor is loaded
custom_bloggy = FlockAgent.load_from_file("examples/data/custom_bloggy.json")

result = custom_bloggy.run(inputs={"blog_idea": "Idea for a blog post."})
PrettyPrintFormatter().display_data(result)

# Inherited FlockAgents have to load from their own class
# so that the custom logic is loaded as well.
# this won't work:
inherited_agent = FlockAgent.load_from_file("examples/data/inherited_bloggy.json")

result = inherited_agent.run(inputs={"blog_idea": "This will fail"})
PrettyPrintFormatter().display_data(result)

# this will work:
class MyInheritedAgent(FlockAgent):
    async def evaluate(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        return {"result": "Hello, world!"}

inherited_agent = MyInheritedAgent.load_from_file("examples/data/inherited_bloggy.json")

result = inherited_agent.run(inputs={"blog_idea": "Idea for a blog post."})
PrettyPrintFormatter().display_data(result)
```

### examples\playground\02_cook_book\save_and_load\load_03.py

- **Lines**: 10
- **Last modified**: 2025-02-28 23:13:59

```py


from flock.core import FlockAgent
from flock.core.logging.formatters.pprint_formatter import PrettyPrintFormatter

# chatty is a rather compley agent. no problem for Flock to load it.
loaded_bloggy = FlockAgent.load_from_file("examples/data/chatty.json")

result = loaded_bloggy.run(inputs={})
PrettyPrintFormatter().display_data(result)
```

### examples\playground\02_cook_book\save_and_load\load_04.py

- **Lines**: 9
- **Last modified**: 2025-02-28 23:13:59

```py


from flock.core.flock import Flock


flock = Flock.load_from_file("flock.json")
agent = flock.agents

flock.run(start_agent=agent["bloggy"], input={"blog_idea": "A blog about cats"})
```

### examples\playground\02_cook_book\save_and_load\save_01.py

- **Lines**: 13
- **Last modified**: 2025-02-28 23:13:59

```py


from typing import Any, Dict, Type
from flock.core import FlockAgent
from flock.core.logging.formatters.pprint_formatter import PrettyPrintFormatter


# Save and load agents

bloggy = FlockAgent(name="bloggy", input="blog_idea", output="funny_blog_title, blog_headers") 
bloggy.save_to_file("examples/data/bloggy.json")


```

### examples\playground\02_cook_book\save_and_load\save_02.py

- **Lines**: 30
- **Last modified**: 2025-02-28 23:13:59

```py


from typing import Any, Dict, Type
from flock.core import FlockAgent
from flock.core.logging.formatters.pprint_formatter import PrettyPrintFormatter


# Save and load agents with custom logic

async def evaluate(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
    return {"result": "Hello, world!"}
    
async def on_terminate(agent,input,output):
    print(f"Agent {agent.name} has been terminated.")
    
custom_bloggy = FlockAgent(name="custom_bloggy", 
                              input="blog_idea", 
                              output="result", 
                              terminate_callback=on_terminate,
                              evaluate_callback=evaluate)
custom_bloggy.save_to_file("examples/data/custom_bloggy.json")


class MyInheritedAgent(FlockAgent):
    async def evaluate(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        return {"result": "Hello, world!"}
    
inherited_agent = MyInheritedAgent(name="inherited_bloggy")
inherited_agent.save_to_file("examples/data/inherited_bloggy.json")

```

### examples\playground\02_cook_book\save_and_load\save_03.py

- **Lines**: 40
- **Last modified**: 2025-02-28 23:13:59

```py
"""
Title: Building Your First Flock Agent

In this example, we'll walk you through creating and running a simple Flock system with a single agent.
Flock enables you to build LLM-powered agents by simply declaring what data each agent receives and what it
produces—no more tedious prompt engineering!

What you'll learn:
  - How to set up the Flock model (using litellm; check out https://docs.litellm.ai/docs/providers for valid model IDs).
  - How to create a Flock instance that serves as the central orchestrator and context holder.
  - How to define a simple agent (named "bloggy") by declaring its input and output.
  - How to add the agent to your Flock.
  - How to run the agent workflow asynchronously in local debug mode (without needing Temporal).

The "bloggy" agent in this example is designed to take a blog idea as input and generate a funny blog title
along with a list of blog headers as output.

Let's get started!
"""

import asyncio

from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent


MODEL = "openai/gpt-4o"

flock = Flock(model=MODEL, local_debug=True)


bloggy = FlockAgent(
    name="bloggy", 
    input="blog_idea", 
    output="funny_blog_title, blog_headers"
)
flock.add_agent(bloggy)

flock.save_to_file("examples/data/flock.json")

```

### examples\playground\02_cook_book\self_improvement_with_memory.py

- **Lines**: 110
- **Last modified**: 2025-02-28 23:13:59

```py
"""
Title: Reasoning assistant with self managed memory
"""

from datetime import datetime
import warnings

from flock.core.flock_router import HandOffRequest
from flock.core.tools import basic_tools
warnings.simplefilter("error", UserWarning)
import asyncio
from dataclasses import dataclass, field

from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent, FlockAgentConfig, FlockAgentMemoryConfigt

from rich.prompt import Prompt
from rich.panel import Panel
from rich.console import Console


@dataclass
class Chat:
    chat_history: list[str] = field(default_factory=list)
    user_query: str = ""
    answer_to_query: str = ""
    memory: str = ""
    
    async def before_response(self, agent, inputs):
        console = Console()

        # Use a Rich-styled prompt to get user input
        self.user_query = Prompt.ask("[bold cyan]User[/bold cyan]")
        inputs["user_query"] = self.user_query

    # Triggers after the agent responds to the user query
    async def after_response(self, agent:FlockAgent, inputs, outputs):
        # Update answer and history based on the agent's outputs
        console = Console()
        self.answer_to_query = outputs["answer_to_query"]
        self.chat_history.append({"user": self.user_query, "assistant": self.answer_to_query})

        agent.save_memory_graph("chat_memory_graph.json")
        agent.export_memory_graph("chat_memory_graph.png")

        # Display the assistant's reasoning (if available) in a styled panel
        reasoning = outputs.get("reasoning", "")
        if reasoning:
            reasoning_panel = Panel(
                reasoning,
                title="[bold blue]Assistant Reasoning[/bold blue]",
                border_style="blue",
            )
            console.print(reasoning_panel)

        # Display the assistant's answer in a styled panel
        answer_panel = Panel(
            self.answer_to_query,
            title="[bold green]Assistant Answer[/bold green]",
            border_style="green",
        )
        console.print(answer_panel)

    # Triggers at handoff to the next agent
    def hand_off(self, context, result):
        if self.user_query.lower() == "goodbye":
            return None
        return HandOffRequest(next_agent="chatty")


MODEL = "openai/gpt-4o"

async def main():

    chat_helper = Chat()
    flock = Flock(model=MODEL, local_debug=True)

    memory_config = FlockAgentMemoryConfig()
    memory_config.storage_type = "json"
    memory_config.file_path = "chat_memory_graph.json"

    chatty = FlockAgent(
        name="chatty", 
        description=f"""You are Chatty, a friendly assistant that loves to chat. 
                    Today is {datetime.now().strftime('%A, %B %d, %Y')}.
                    """,
        input="user_query", 
        output="answer_to_query",
        initialize_callback=chat_helper.before_response,
        terminate_callback=chat_helper.after_response,
        config=FlockAgentConfig(disable_output=True),
        tools=[basic_tools.web_search_duckduckgo, 
               basic_tools.get_web_content_as_markdown, 
               basic_tools.code_eval],
        memory_enabled=True,
        memory_config=memory_config
    )
    
    flock.add_agent(chatty)

    chatty.hand_off = chat_helper.hand_off

    await flock.run_async(
        start_agent=chatty, 
        input={"user_query": ""}
    )


if __name__ == "__main__":
    asyncio.run(main())
```

### examples\playground\hier\her_vis.py

- **Lines**: 405
- **Last modified**: 2025-02-28 23:13:59

```py
"""Visualization tool to compare traditional vs. hierarchical concept activation."""

import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from typing import Dict, List, Set, Tuple

from flock.modules.hierarchical.memory import ConceptRelationType, HierarchicalMemoryGraph




class MemoryVisualizer:
    """Visualizes the difference between traditional and hierarchical concept activation."""
    
    def __init__(self, memory_graph: HierarchicalMemoryGraph):
        """Initialize the visualizer with a memory graph.
        
        Args:
            memory_graph: The hierarchical memory graph to visualize
        """
        self.memory_graph = memory_graph
        
    def visualize_activation_comparison(
        self, 
        query_concepts: Set[str],
        filename: str = "activation_comparison.png",
        figsize: Tuple[int, int] = (20, 10),
        node_size_factor: float = 2000,
        title_fontsize: int = 16
    ) -> None:
        """Create a side-by-side visualization comparing traditional vs. hierarchical activation.
        
        Args:
            query_concepts: The set of concepts to start activation from
            filename: Output file to save the visualization
            figsize: Figure size (width, height) in inches
            node_size_factor: Base factor for node size calculation
            title_fontsize: Font size for the titles
        """
        # Get both types of activation results
        traditional = self.memory_graph.spread_activation(query_concepts)
        hierarchical = self.memory_graph.hierarchical_spread_activation(
            query_concepts, 
            upward_factor=0.8, 
            downward_factor=0.6
        )
        
        # Create a figure with two subplots
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)
        
        # Get the graph
        graph = self.memory_graph.graph
        
        # Use the same layout for both visualizations for consistency
        pos = nx.kamada_kawai_layout(graph)
        
        # Create a custom colormap (white to blue)
        cmap = LinearSegmentedColormap.from_list(
            "blue_activation", 
            [(1, 1, 1, 0.7), (0.2, 0.4, 0.8, 0.9)]
        )
        
        # First subplot: Traditional activation
        self._draw_activation_graph(
            ax=ax1,
            graph=graph,
            pos=pos,
            activation=traditional,
            cmap=cmap,
            node_size_factor=node_size_factor,
            title="Traditional Concept Activation",
            title_fontsize=title_fontsize,
            highlight_concepts=query_concepts
        )
        
        # Second subplot: Hierarchical activation
        self._draw_activation_graph(
            ax=ax2,
            graph=graph,
            pos=pos,
            activation=hierarchical,
            cmap=cmap,
            node_size_factor=node_size_factor,
            title="Hierarchical Concept Activation",
            title_fontsize=title_fontsize,
            highlight_concepts=query_concepts
        )
        
        # Add a legend for the relationship types - only on the second subplot
        ax2.plot([], [], '-', color="#2c3e50", label='Association')
        ax2.plot([], [], '-', color="#2e8540", label='IS-A')
        ax2.plot([], [], '-', color="#1E88E5", label='HAS-A')
        ax2.plot([], [], '-', color="#9C27B0", label='Other Hierarchical')
        ax2.legend(loc='upper right', frameon=True, framealpha=0.9)
        
        # Add a color bar for activation level
        sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, 1))
        sm.set_array([])
        cbar = plt.colorbar(sm, ax=[ax1, ax2], orientation='horizontal', pad=0.01, aspect=40)
        cbar.set_label('Activation Level')
        
        # Add a main title
        fig.suptitle(f"Memory Activation Comparison\nQuery Concepts: {', '.join(query_concepts)}", 
                     fontsize=title_fontsize+2)
        
        # Tight layout and save
        plt.tight_layout(rect=[0, 0, 1, 0.96])  # Make room for the suptitle
        plt.savefig(filename, bbox_inches="tight", facecolor="white", dpi=150)
        plt.close()
        
    def _draw_activation_graph(
        self,
        ax: plt.Axes,
        graph: nx.MultiDiGraph,
        pos: Dict,
        activation: Dict[str, float],
        cmap: LinearSegmentedColormap,
        node_size_factor: float,
        title: str,
        title_fontsize: int,
        highlight_concepts: Set[str]
    ) -> None:
        """Draw a single activation graph on the given axes.
        
        Args:
            ax: Matplotlib axes to draw on
            graph: The NetworkX graph to visualize
            pos: Node positions
            activation: Dictionary mapping concepts to activation levels
            cmap: Colormap for activation visualization
            node_size_factor: Base factor for node size calculation
            title: Title for this visualization
            title_fontsize: Font size for the title
            highlight_concepts: Concepts to highlight as query concepts
        """
        # Normalize activation values to [0, 1] for coloring
        max_activation = max(activation.values()) if activation else 1.0
        
        # Prepare node colors, sizes, and labels
        node_colors = []
        node_sizes = []
        
        for node in graph.nodes():
            activation_level = activation.get(node, 0) / max_activation if max_activation > 0 else 0
            node_colors.append(cmap(activation_level))
            
            # Larger node size for more activated nodes
            base_size = node_size_factor * (0.5 + 0.5 * activation_level)
            # Even larger for highlight concepts
            if node in highlight_concepts:
                base_size *= 1.5
            node_sizes.append(base_size)
        
        # Draw nodes with activation-based coloring and sizing
        nx.draw_networkx_nodes(
            graph, 
            pos,
            ax=ax,
            node_color=node_colors,
            node_size=node_sizes,
            edgecolors='black',
            linewidths=0.5
        )
        
        # Draw different types of edges with distinct styles
        # 1. Association edges (standard style)
        association_edges = []
        for u, v, k, data in graph.edges(keys=True, data=True):
            relation_type = data.get('relation_type', ConceptRelationType.ASSOCIATION)
            if relation_type == ConceptRelationType.ASSOCIATION:
                association_edges.append((u, v))
                
        nx.draw_networkx_edges(
            graph,
            pos,
            ax=ax,
            edgelist=association_edges,
            width=1,
            alpha=0.5,
            edge_color="#2c3e50",  # Dark blue-grey
            arrows=False
        )
        
        # 2. IS-A relationships (green with arrow)
        is_a_edges = []
        for u, v, k, data in graph.edges(keys=True, data=True):
            relation_type = data.get('relation_type', None)
            if relation_type == ConceptRelationType.IS_A:
                is_a_edges.append((u, v))
                
        nx.draw_networkx_edges(
            graph,
            pos,
            ax=ax,
            edgelist=is_a_edges,
            width=1.5,
            alpha=0.7,
            edge_color="#2e8540",  # Green
            arrows=True,
            arrowsize=15,
            connectionstyle="arc3,rad=0.1"
        )
        
        # 3. HAS-A relationships (blue with arrow)
        has_a_edges = []
        for u, v, k, data in graph.edges(keys=True, data=True):
            relation_type = data.get('relation_type', None)
            if relation_type == ConceptRelationType.HAS_A:
                has_a_edges.append((u, v))
                
        nx.draw_networkx_edges(
            graph,
            pos,
            ax=ax,
            edgelist=has_a_edges,
            width=1.5,
            alpha=0.7,
            edge_color="#1E88E5",  # Blue
            arrows=True,
            arrowsize=15,
            connectionstyle="arc3,rad=-0.1"
        )
        
        # 4. Other hierarchical relationships (purple with arrow)
        other_edges = []
        for u, v, k, data in graph.edges(keys=True, data=True):
            relation_type = data.get('relation_type', None)
            if relation_type not in [None, ConceptRelationType.ASSOCIATION, 
                                    ConceptRelationType.IS_A, ConceptRelationType.HAS_A]:
                other_edges.append((u, v))
                
        nx.draw_networkx_edges(
            graph,
            pos,
            ax=ax,
            edgelist=other_edges,
            width=1.5,
            alpha=0.7,
            edge_color="#9C27B0",  # Purple
            arrows=True,
            arrowsize=15,
            connectionstyle="arc3,rad=0.15"
        )
        
        # Draw labels with white background for better readability
        labels = {node: node for node in graph.nodes()}
        label_box_args = dict(
            bbox=dict(facecolor='white', edgecolor='none', alpha=0.7, pad=3),
            horizontalalignment='center',
            verticalalignment='center',
            fontsize=9,
            fontweight='bold'
        )
        
        nx.draw_networkx_labels(
            graph, 
            pos,
            ax=ax,
            labels=labels, 
            **label_box_args
        )
        
        # Add highlighting for query concepts
        for concept in highlight_concepts:
            if concept in pos:
                ax.plot(
                    pos[concept][0], 
                    pos[concept][1], 
                    'o', 
                    markersize=15, 
                    fillstyle='none', 
                    color='red', 
                    mew=2
                )
        
        # Set title and turn off axis
        ax.set_title(title, fontsize=title_fontsize)
        ax.axis('off')


def create_sample_memory_graph() -> HierarchicalMemoryGraph:
    """Create a sample memory graph for visualization demonstration."""
    memory_graph = HierarchicalMemoryGraph()
    
    # Add concepts
    concepts = {
        "animal", "pet", "wild animal", "cat", "dog", "lion", "tiger",
        "fish", "goldfish", "shark", "siamese", "tabby", "german shepherd", "bulldog",
        "domestic", "aquatic", "mammal", "canine", "feline",
        "luna", "lucy", "rex", "fluffy", "food", "pet food", "cat food", "dog food"
    }
    memory_graph.add_concepts(concepts)
    
    # Add hierarchical relationships
    
    # Mammals
    memory_graph.add_hierarchical_relation("mammal", "animal", ConceptRelationType.IS_A)
    
    # Domestic vs Wild
    memory_graph.add_hierarchical_relation("pet", "domestic", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("domestic", "animal", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("wild animal", "animal", ConceptRelationType.IS_A)
    
    # Pet types
    memory_graph.add_hierarchical_relation("dog", "pet", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("cat", "pet", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("goldfish", "pet", ConceptRelationType.IS_A)
    
    # Cat and dog breeds
    memory_graph.add_hierarchical_relation("siamese", "cat", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("tabby", "cat", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("german shepherd", "dog", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("bulldog", "dog", ConceptRelationType.IS_A)
    
    # Wild animals
    memory_graph.add_hierarchical_relation("lion", "wild animal", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("tiger", "wild animal", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("shark", "wild animal", ConceptRelationType.IS_A)
    
    # Taxonomic categories
    memory_graph.add_hierarchical_relation("dog", "canine", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("canine", "mammal", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("cat", "feline", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("feline", "mammal", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("lion", "feline", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("tiger", "feline", ConceptRelationType.IS_A)
    
    # Fish
    memory_graph.add_hierarchical_relation("fish", "animal", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("goldfish", "fish", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("shark", "fish", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("fish", "aquatic", ConceptRelationType.IS_A)
    
    # Specific pet instances
    memory_graph.add_hierarchical_relation("luna", "cat", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("lucy", "cat", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("rex", "dog", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("fluffy", "dog", ConceptRelationType.IS_A)
    
    # Food relationships (HAS-A)
    memory_graph.add_hierarchical_relation("pet food", "food", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("cat food", "pet food", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("dog food", "pet food", ConceptRelationType.IS_A)
    memory_graph.add_hierarchical_relation("cat", "cat food", ConceptRelationType.HAS_A)
    memory_graph.add_hierarchical_relation("dog", "dog food", ConceptRelationType.HAS_A)
    
    # Add some standard associations too
    for concept1 in ["luna", "lucy", "siamese", "tabby"]:
        for concept2 in ["cat", "feline"]:
            if concept1 != concept2:
                memory_graph._graph.add_edge(
                    concept1, concept2, 
                    key=ConceptRelationType.ASSOCIATION,
                    weight=2, 
                    relation_type=ConceptRelationType.ASSOCIATION
                )
    
    return memory_graph


def main():
    """Generate a visualization comparing traditional and hierarchical memory activation."""
    # Create a sample memory graph
    memory_graph = create_sample_memory_graph()
    
    # Create the visualizer
    visualizer = MemoryVisualizer(memory_graph)
    
    # Generate comparison visualizations for different query scenarios
    
    # Scenario 1: Query for a specific cat name
    visualizer.visualize_activation_comparison(
        query_concepts={"luna"},
        filename="activation_comparison_specific_cat.png"
    )
    print("Generated visualization for 'luna' query")
    
    # Scenario 2: Query about cats in general
    visualizer.visualize_activation_comparison(
        query_concepts={"cat"},
        filename="activation_comparison_general_cat.png"
    )
    print("Generated visualization for 'cat' query")
    
    # Scenario 3: Query about pets (higher-level concept)
    visualizer.visualize_activation_comparison(
        query_concepts={"pet"},
        filename="activation_comparison_pets.png"
    )
    print("Generated visualization for 'pet' query")
    
    # Scenario 4: Query with multiple concepts
    visualizer.visualize_activation_comparison(
        query_concepts={"dog", "food"},
        filename="activation_comparison_multiple_concepts.png"
    )
    print("Generated visualization for 'dog' and 'food' query")
    
    print("All visualizations completed successfully!")


if __name__ == "__main__":
    main()
```

### examples\playground\hier\hier_mem.py

- **Lines**: 175
- **Last modified**: 2025-02-28 23:13:59

```py
"""Example usage of the Hierarchical Memory Module for AI agents."""

import asyncio
from typing import Dict, Any

from flock.core import FlockFactory
from flock.core.logging.formatters.themes import OutputTheme
from flock.evaluators.memory.hierarchical_evaluator import HierarchicalMemoryEvaluator, HierarchicalMemoryEvaluatorConfig
from flock.modules.hierarchical.memory import ConceptRelationType, HierarchicalMemoryModuleConfig
from flock.modules.hierarchical.module import HierarchicalMemoryModule



async def main():
    """Demonstrate the hierarchical memory capabilities."""
    
    # Create an agent with hierarchical memory
    memory_config = HierarchicalMemoryEvaluatorConfig(
        folder_path="hierarchical_memory/",
        enable_hierarchical_concepts=True,
        upward_propagation_factor=0.8,
        downward_propagation_factor=0.6,
        similarity_threshold=0.5,
        splitting_mode="semantic",
        save_after_update=True,
    )
    memory_module_config = HierarchicalMemoryModuleConfig(
        folder_path="hierarchical_memory/",
        enable_hierarchical_concepts=True,
        upward_propagation_factor=0.8,
        downward_propagation_factor=0.6,
        similarity_threshold=0.5,
        splitting_mode="semantic"
    )
    # Create the memory module with the configuration
    memory_eval = HierarchicalMemoryEvaluator(name="hierarchical_memory", config=memory_config)
    memory_module = HierarchicalMemoryModule(name="hierarchical_memory", config=memory_module_config)
    
    # Create the agent with the memory module
    agent = FlockFactory.create_default_agent(
        model="openai/gpt-4o",
        name="hierarchical_memory_agent", 
        input="data", 
        output_theme=OutputTheme.aardvark_blue
    )
    
    # Replace the default evaluator with our hierarchical memory module
    agent.evaluator = memory_eval
    

    
    # Add some initial data with implicit hierarchies
    print("Adding initial information...")
    await agent.run_async(inputs={"data": "Andre is 38 years old and author of the agent framework 'flock'"})
    await agent.run_async(inputs={"data": "Andre works for White Duck"})
    await agent.run_async(inputs={"data": "Andre has two cats"})
    await agent.run_async(inputs={"data": "White Duck is a cloud consulting company"})
    await agent.run_async(inputs={"data": "Flock is an agent framework designed for scalable multi-agent systems"})
    
    # Add data about Andre's cats with names
    await agent.run_async(inputs={"data": "One of Andre's cats is named Luna"})
    await agent.run_async(inputs={"data": "The other cat is named Lucy"})
    
    # Add location information
    await agent.run_async(inputs={"data": "Andre lives in Germany"})
    await agent.run_async(inputs={"data": "Germany is in Europe"})
    
    # Add explicit hierarchical relationships
    print("\nAdding explicit hierarchical relationships...")
    
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="cat",
        parent_concept="pet",
        relation_type=ConceptRelationType.IS_A
    )
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="dog",
        parent_concept="pet",
        relation_type=ConceptRelationType.IS_A
    )
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="luna",
        parent_concept="cat",
        relation_type=ConceptRelationType.IS_A
    )
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="lucy",
        parent_concept="cat",
        relation_type=ConceptRelationType.IS_A
    )
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="flock",
        parent_concept="agent framework",
        relation_type=ConceptRelationType.IS_A
    )
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="agent framework",
        parent_concept="software",
        relation_type=ConceptRelationType.IS_A
    )
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="white duck",
        parent_concept="company",
        relation_type=ConceptRelationType.IS_A
    )
    
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="company",
        parent_concept="organization",
        relation_type=ConceptRelationType.IS_A
    )
    
    # Add compositional relationships
    await memory_module.add_hierarchical_relationship(
        agent=agent,
        child_concept="germany",
        parent_concept="europe",
        relation_type=ConceptRelationType.PART_OF
    )
    
    # Query the memory with hierarchical awareness
    print("\nQuerying memory with hierarchical awareness...")
    
    # This query should retrieve information about Luna and Lucy by
    # traversing up from the specific cat names to the "cat" concept
    query_result = await memory_module.search_memory(
        agent=agent, 
        query={"query": "What pets does Andre have?"}
    )
    
    print("\nQuery: What pets does Andre have?")
    if "memory_results" in query_result:
        print(f"Found {len(query_result['memory_results'])} results:")
        for i, result in enumerate(query_result["memory_results"]):
            print(f"  Result {i+1}: {result.content[:100]}..." if len(result.content) > 100 else result.content)
    else:
        print("No results found.")
    
    # This query tests the ability to retrieve information by traversing down from
    # Europe to Germany and finding relevant information
    query_result = await memory_module.search_memory(
        agent=agent, 
        query={"query": "Where in Europe does Andre live?"}
    )
    
    print("\nQuery: Where in Europe does Andre live?")
    if "memory_results" in query_result:
        print(f"Found {len(query_result['memory_results'])} results:")
        for i, result in enumerate(query_result["memory_results"]):
            print(f"  Result {i+1}: {result.content[:100]}..." if len(result.content) > 100 else result.content)
    else:
        print("No results found.")
    
    # Save the final memory and concept graph image
    memory_module.save_memory()
    print("\nMemory saved with hierarchical concept graph.")


if __name__ == "__main__":
    asyncio.run(main())
```

### examples\playground\misc\memory.py

- **Lines**: 261
- **Last modified**: 2025-02-28 09:57:06

```py

from flock.core import Flock, FlockAgent
from flock.core.tools import basic_tools
from uuid import uuid4

# Initialize Flock with memory
flock = Flock(
    model="openai/gpt-4",
    memory_config={
        "use_global": True,  # Enable global memory
        "global_weight": 0.4,  # Weight for global knowledge
        "local_weight": 0.6   # Weight for specialized knowledge
    }
)

# 1. Research Agent: Gathers and analyzes information
researcher = FlockAgent(
    name="researcher",
    input="""
        topic: str | The research topic,
        depth: str | Research depth (quick/thorough/comprehensive),
        context: str | Additional context or requirements
    """,
    output="""
        findings: list[dict] | Key research findings,
        sources: list[dict] | Sources and citations,
        analysis: str | Initial analysis and insights
    """,
    tools=[
        basic_tools.web_search_tavily,
        basic_tools.web_search_duckduckgo,
        basic_tools.get_web_content_as_markdown,
        basic_tools.extract_urls,
        basic_tools.extract_numbers,
    ],
    memory_mapping="""
        # Check if we've researched this before
        topic -> memory.semantic(threshold=0.85, scope='local') |
        memory.filter(recency='30d') -> recent_research |

        # Get global knowledge
        topic -> memory.semantic(scope='global') |
        memory.spread(depth=2) |  # Find related topics
        memory.filter(relevance=0.7) -> background |

        # Combine and enrich with fresh research
        memory.combine(
            weights={'recent': 0.6, 'background': 0.4}
        ) |
        memory.enrich(
            tools=['web_search_tavily', 'web_search_duckduckgo'],
            strategy='comprehensive'
        ) |
        memory.sort(by='relevance')
        -> findings, sources, analysis
    """
)

# 2. Analyst: Processes research and identifies patterns
analyst = FlockAgent(
    name="analyst",
    input="""
        findings: list[dict] | Research findings to analyze,
        analysis_type: str | Type of analysis needed,
        focus_areas: list[str] | Specific areas to focus on
    """,
    output="""
        patterns: list[dict] | Identified patterns and trends,
        insights: list[dict] | Key insights and implications,
        recommendations: list[str] | Actionable recommendations
    """,
    tools=[
        basic_tools.evaluate_math,
        basic_tools.extract_numbers,
        basic_tools.json_parse_safe,
    ],
    memory_mapping="""
        # Check for similar analyses
        findings -> memory.semantic(scope='local') |
        memory.filter(
            metadata={'type': 'analysis'},
            recency='90d'
        ) -> previous_analyses |

        # Get global patterns
        findings -> memory.semantic(scope='global') |
        memory.concepts |
        memory.spread(depth=3) -> global_patterns |

        # Combine and process
        memory.combine(
            weights={'previous': 0.4, 'global': 0.6}
        ) |
        memory.enrich(
            tools=['evaluate_math', 'extract_numbers'],
            strategy='validated'
        ) |
        memory.store(
            scope='both',
            metadata={'type': 'analysis_pattern'}
        ) -> patterns, insights, recommendations
    """
)

# 3. Content Creator: Produces tailored content
writer = FlockAgent(
    name="writer",
    input="""
        patterns: list[dict] | Patterns to write about,
        insights: list[dict] | Key insights to cover,
        audience: str | Target audience,
        style: str | Writing style,
        format: str | Content format
    """,
    output="""
        title: str | Content title,
        content: str | Main content,
        summary: str | Executive summary,
        sections: list[dict] | Content sections
    """,
    tools=[
        basic_tools.count_words,
        basic_tools.get_current_time,
    ],
    memory_mapping="""
        # Check writing patterns
        audience, style -> memory.exact(scope='local') |
        memory.sort(by='access_count') -> style_patterns |

        # Get relevant content examples
        patterns, insights -> memory.semantic(scope='global') |
        memory.filter(
            metadata={'format': format},
            relevance=0.8
        ) -> content_examples |

        # Combine and create
        memory.combine(
            weights={'style': 0.4, 'content': 0.6}
        ) |
        memory.enrich(
            tools=['count_words'],
            metadata={'word_count': 'total'}
        ) |
        memory.store(
            scope='both',
            metadata={'type': 'content', 'format': format}
        ) -> title, content, summary, sections
    """
)

# 4. Quality Checker: Ensures quality and consistency
checker = FlockAgent(
    name="checker",
    input="""
        content: dict | Content to check,
        standards: list[str] | Quality standards to apply,
        previous_feedback: list[dict] | Previous revision feedback
    """,
    output="""
        issues: list[dict] | Identified issues,
        suggestions: list[dict] | Improvement suggestions,
        quality_score: float | Overall quality score
    """,
    tools=[
        basic_tools.web_search_tavily,
        basic_tools.extract_urls,
    ],
    memory_mapping="""
        # Check against quality patterns
        content -> memory.semantic(scope='global') |
        memory.filter(
            metadata={'type': 'quality_check'},
            threshold=0.9
        ) -> quality_patterns |

        # Get relevant feedback
        content -> memory.semantic(scope='local') |
        memory.filter(
            metadata={'type': 'feedback'},
            recency='180d'
        ) -> historical_feedback |

        # Combine and validate
        memory.combine(weights={'quality': 0.7, 'feedback': 0.3}) |
        memory.enrich(
            tools=['web_search_tavily'],
            strategy='validated'
        ) |
        memory.store(
            scope='both',
            metadata={'type': 'quality_check'}
        ) -> issues, suggestions, quality_score
    """
)

# Set up the workflow
researcher.hand_off = analyst
analyst.hand_off = writer
writer.hand_off = checker

# Add all agents to flock
flock.add_agent(researcher)
flock.add_agent(analyst)
flock.add_agent(writer)
flock.add_agent(checker)

# Example usage
async def main():
    result = await flock.run_async(
        start_agent=researcher,
        input={
            "topic": "Recent breakthroughs in quantum computing",
            "depth": "comprehensive",
            "context": "Focus on practical applications",
            "analysis_type": "trend_analysis",
            "focus_areas": ["business impact", "timeline", "technical feasibility"],
            "audience": "business executives",
            "style": "professional",
            "format": "executive_brief",
            "standards": [
                "technical accuracy",
                "business relevance",
                "clarity",
                "actionable insights"
            ]
        }
    )
    return result

if __name__ == "__main__":
    import asyncio
    result = asyncio.run(main())


# This system:

# 1. **Builds Knowledge Over Time**:
#    - Remembers past research and analyses
#    - Learns writing patterns that work well
#    - Accumulates quality standards

# 2. **Uses Memory Effectively**:
#    - Global memory for shared knowledge
#    - Local memory for specialized patterns
#    - Concept spreading for related topics

# 3. **Leverages Tools**:
#    - Web search for fresh data
#    - Data extraction and validation
#    - Content analysis tools

# 4. **Gets Smarter With Use**:
#    - Writing styles adapt to audience feedback
#    - Quality checks learn from past issues
#    - Research patterns improve over time

# Would you like me to:
# 1. Add more specific memory operations?
# 2. Show how to analyze the memory state?
# 3. Add error handling and recovery?
```

### examples\playground\misc\self_learner.py

- **Lines**: 164
- **Last modified**: 2025-02-28 09:57:06

```py
import os
import json
import ast
import cloudpickle  # Allows function serialization
from functools import wraps
from typing import Callable, Dict
import litellm  # LLM wrapper for inference

# Configuration
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
TOOL_DB_PATH = "tools.json"

# Track function calls
_num_calls = {}

def track_num_calls(func):
    """Decorator to track function calls dynamically."""
    func_name = func.__name__

    @wraps(func)
    def wrapped_func(*args, **kwargs):
        _num_calls[func_name] = _num_calls.get(func_name, 0) + 1
        return func(*args, **kwargs)

    return wrapped_func

class Tool:
    """Minimal tool class for execution."""
    def __init__(self, name: str, description: str, func_bytes=None):
        self.name = name
        self.description = description
        self.func = self._load_func() if func_bytes is None else self._deserialize_func(func_bytes)

    def _load_func(self) -> Callable:
        """Compiles and returns the function from the generated code."""
        local_scope = {}
        exec(self.code, {}, local_scope)  # Execute in an isolated scope
        func_name = next(iter(local_scope))  # Get the function name
        return track_num_calls(local_scope[func_name])

    def _serialize_func(self) -> str:
        """Serializes the function correctly."""
        return cloudpickle.dumps(self.func).hex()

    def _deserialize_func(self, func_bytes: str) -> Callable:
        """Deserializes the function correctly."""
        return cloudpickle.loads(bytes.fromhex(func_bytes))

    def run(self, *args, **kwargs):
        """Executes the tool's function."""
        return self.func(*args, **kwargs)

    def serialize(self):
        """Serializes the tool into a JSON-compatible format."""
        return {
            "name": self.name,
            "description": self.description,
            "code": self.code,
            "func_bytes": self._serialize_func(),  # Serialize function
        }

    @staticmethod
    def deserialize(data):
        """Deserializes a tool from JSON-compatible format."""
        return Tool(
            name=data["name"],
            description=data["description"],
            code=data["code"],
            func_bytes=data["func_bytes"]
        )

class ToolManager:
    """Manages tools and persists them in a JSON file."""
    def __init__(self, db_path=TOOL_DB_PATH):
        self.db_path = db_path
        self.tools: Dict[str, Tool] = self._load_tools()

    def _load_tools(self) -> Dict[str, Tool]:
        """Loads tools from the JSON database."""
        if not os.path.exists(self.db_path):
            return {}

        with open(self.db_path, "r") as f:
            tool_data = json.load(f)

        return {name: Tool.deserialize(data) for name, data in tool_data.items()}

    def _save_tools(self):
        """Saves tools to the JSON database."""
        with open(self.db_path, "w") as f:
            json.dump({name: tool.serialize() for name, tool in self.tools.items()}, f, indent=4)

    def add_tool(self, tool: Tool):
        """Adds a tool and saves it."""
        self.tools[tool.name] = tool
        self._save_tools()

    def retrieve_best_tool(self, query: str) -> Tool:
        """Finds the best matching tool based on query substring search."""
        for tool in self.tools.values():
            if query.lower() in tool.description.lower():
                return tool
        return None

class Agent:
    """Core agent that generates, retrieves, and executes tools."""
    def __init__(self, tool_manager: ToolManager, model="gpt-4-turbo"):
        self.tool_manager = tool_manager
        self.model = model

    def run(self, query: str, *args, **kwargs):
        """Finds or generates a tool and executes it."""
        tool = self.tool_manager.retrieve_best_tool(query)
        if tool:
            print(f"Using existing tool: {tool.name} - {tool.description}")
            return tool.run(*args, **kwargs)

        print(f"No tool found. Generating a new one for: {query}")
        new_tool = self.generate_tool(query)
        self.tool_manager.add_tool(new_tool)
        return new_tool.run(*args, **kwargs)

    def generate_tool(self, query: str) -> Tool:
        """Uses an LLM to generate a Python function as a new tool."""
        prompt = f"""
        Generate a standalone Python function for the following task:
        "{query}"
        The function should be clear, modular, and include a one-line docstring.
        Output only the function code.
        """

        response = litellm.completion(
            model=self.model,
            messages=[{"role": "system", "content": "You are an AI that generates reusable Python functions."},
                      {"role": "user", "content": prompt}],
            api_key=OPENAI_API_KEY
        )["choices"][0]["message"]["content"]

        # Extract function code
        func_code = self.extract_function_code(response)
        func_name = self.get_function_name(func_code)

        return Tool(func_name, query, func_code)

    def extract_function_code(self, text: str) -> str:
        """Extracts Python function code from LLM response."""
        code_block = text.split("```python")[-1].split("```")[0].strip()
        return code_block if "def " in code_block else None

    def get_function_name(self, code: str) -> str:
        """Parses the function name from the generated code."""
        tree = ast.parse(code)
        for node in ast.walk(tree):
            if isinstance(node, ast.FunctionDef):
                return node.name
        return "generated_function"

# Example usage
if __name__ == "__main__":
    tool_manager = ToolManager()
    agent = Agent(tool_manager)

    #print(agent.run("Get the last N paper on arxiv as a pdf that are about LLMs", 3))  # Generates & runs new tool
    print(agent.run("Calculate the area of a circle", 10))  # Uses the saved tool
```

### examples\playground\misc\self_learner2.py

- **Lines**: 157
- **Last modified**: 2025-02-28 09:57:06

```py
import os
import json
import base64
from functools import wraps
from typing import Callable, Dict
import litellm  # LLM wrapper for inference

# Configuration
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
TOOL_DB_PATH = "tools.json"

# Track function calls
_num_calls = {}

def track_num_calls(func):
    """Decorator to track function calls dynamically."""
    func_name = func.__name__

    @wraps(func)
    def wrapped_func(*args, **kwargs):
        _num_calls[func_name] = _num_calls.get(func_name, 0) + 1
        return func(*args, **kwargs)

    return wrapped_func

class Tool:
    """Minimal tool class for execution."""
    def __init__(self, name: str, description: str, func_code=None):
        self.name = name
        self.description = description
        self.func_code = func_code
        self.func = self._load_func() if func_code else None

    def _load_func(self) -> Callable:
        """Decodes, compiles, and returns the function."""
        code = base64.b64decode(self.func_code).decode("utf-8")
        local_scope = {}
        exec(code, {}, local_scope)
        func_name = next(iter(local_scope))  # Get function name dynamically
        return track_num_calls(local_scope[func_name])

    def set_function(self, code: str):
        """Encodes the function and compiles it."""
        self.func_code = base64.b64encode(code.encode("utf-8")).decode("utf-8")
        self.func = self._load_func()

    def run(self, *args, **kwargs):
        """Executes the tool's function."""
        if not self.func:
            raise RuntimeError(f"Tool {self.name} has no function assigned!")
        return self.func(*args, **kwargs)

    def serialize(self):
        """Serializes the tool into a JSON-compatible format."""
        return {
            "name": self.name,
            "description": self.description,
            "func_code": self.func_code  # Store Base64-encoded function code
        }

    @staticmethod
    def deserialize(data):
        """Deserializes a tool from JSON-compatible format."""
        return Tool(name=data["name"], description=data["description"], func_code=data["func_code"])

class ToolManager:
    """Manages tools and persists them in a JSON file."""
    def __init__(self, db_path=TOOL_DB_PATH):
        self.db_path = db_path
        self.tools: Dict[str, Tool] = self._load_tools()

    def _load_tools(self) -> Dict[str, Tool]:
        """Loads tools from the JSON database."""
        if not os.path.exists(self.db_path):
            return {}

        with open(self.db_path, "r") as f:
            tool_data = json.load(f)

        return {name: Tool.deserialize(data) for name, data in tool_data.items()}

    def _save_tools(self):
        """Saves tools to the JSON database."""
        with open(self.db_path, "w") as f:
            json.dump({name: tool.serialize() for name, tool in self.tools.items()}, f, indent=4)

    def add_tool(self, tool: Tool):
        """Adds a tool and saves it."""
        self.tools[tool.name] = tool
        self._save_tools()

    def retrieve_best_tool(self, query: str) -> Tool:
        """Finds the best matching tool based on query substring search."""
        for tool in self.tools.values():
            if query.lower() in tool.description.lower():
                return tool
        return None

class Agent:
    """Core agent that generates, retrieves, and executes tools."""
    def __init__(self, tool_manager: ToolManager, model="gpt-4-turbo"):
        self.tool_manager = tool_manager
        self.model = model

    def run(self, query: str, *args, **kwargs):
        """Finds or generates a tool and executes it."""
        tool = self.tool_manager.retrieve_best_tool(query)
        if tool:
            print(f"Using existing tool: {tool.name} - {tool.description}")
            return tool.run(*args, **kwargs)

        print(f"No tool found. Generating a new one for: {query}")
        new_tool = self.generate_tool(query)
        self.tool_manager.add_tool(new_tool)
        return new_tool.run(*args, **kwargs)

    def generate_tool(self, query: str) -> Tool:
        """Uses an LLM to generate a Python function as a new tool."""
        prompt = f"""
        Generate a standalone Python function for the following task:
        "{query}"
        The function should be clear, modular, and include a one-line docstring.
        Output only the function code without any markdown formatting.
        The code should be a callable function that can be executed directly.
        So put imports and helper functions inside the function if needed.
        """

        response = litellm.completion(
            model=self.model,
            messages=[{"role": "system", "content": "You are an AI that generates reusable Python functions."},
                      {"role": "user", "content": prompt}],
            api_key=OPENAI_API_KEY
        )["choices"][0]["message"]["content"]

        # Extract function code properly
        func_code = self.clean_code(response)

        tool = Tool(name=query, description=query)
        tool.set_function(func_code)

        return tool

    def clean_code(self, text: str) -> str:
        """Cleans LLM output by removing markdown code blocks and stripping whitespace."""
        if "```python" in text:
            text = text.split("```python")[-1].split("```")[0].strip()
        return text

# Example usage
if __name__ == "__main__":
    tool_manager = ToolManager()
    agent = Agent(tool_manager)

    # print(agent.run("Calculate the area of a circle", 5))  # Generates & runs new tool
    # print(agent.run("Calculate the area of a circle", 10))  # Uses the saved tool

    print(agent.run("Get the top N threads on reddit and saves them as markdown", 3))
```

### examples\playground\website\app.py

- **Lines**: 15
- **Last modified**: 2025-02-28 09:57:06

```py
from flock.core import Flock, FlockFactory



flock = Flock()

website_agent = FlockFactory.create_default_agent(name="Website Agent", 
                                                  description="A website creator that creates a website for rendering a given prompt",
                                                  input="fields_to_render: list[str], design_style: str",
                                                  output="jinja2_template: str | A template for presenting the fields in a website")

flock.add_agent(website_agent)
flock.start_api()

flock.run(start_agent=website_agent, input={"fields_to_render": ["project_title", "project_descriptions", "tasks: list[str]"], "design_style": "very sleek and modern"})
```

### scripts\code_collector.py

- **Lines**: 704
- **Last modified**: 2025-04-04 17:13:15

```py
#!/usr/bin/env python3
"""Code Repository Analyzer

This script generates a comprehensive Markdown document of a code repository,
optimized for LLM consumption and understanding.
"""

import ast
import datetime
import glob
import os
from typing import Any


def find_files(folder: str, extension: str) -> list[str]:
    """Find all files with the specified extension in the folder and subfolders."""
    pattern = os.path.join(folder, f"**/*{extension}")
    return sorted(glob.glob(pattern, recursive=True))


def get_file_metadata(file_path: str) -> dict[str, Any]:
    """Extract metadata from a file."""
    metadata = {
        "path": file_path,
        "size_bytes": 0,
        "line_count": 0,
        "last_modified": "Unknown",
        "created": "Unknown",
    }

    try:
        stats = os.stat(file_path)
        metadata["size_bytes"] = stats.st_size
        metadata["last_modified"] = datetime.datetime.fromtimestamp(
            stats.st_mtime
        ).strftime("%Y-%m-%d %H:%M:%S")
        metadata["created"] = datetime.datetime.fromtimestamp(
            stats.st_ctime
        ).strftime("%Y-%m-%d %H:%M:%S")

        with open(file_path, encoding="utf-8") as f:
            content = f.read()
            metadata["line_count"] = len(content.splitlines())
    except Exception as e:
        print(f"Warning: Could not get complete metadata for {file_path}: {e}")

    return metadata


def extract_python_components(file_path: str) -> dict[str, Any]:
    """Extract classes, functions, and imports from Python files."""
    components = {
        "classes": [],
        "functions": [],
        "imports": [],
        "docstring": None,
    }

    try:
        with open(file_path, encoding="utf-8") as f:
            content = f.read()

        tree = ast.parse(content)

        # Extract module docstring
        if ast.get_docstring(tree):
            components["docstring"] = ast.get_docstring(tree)

        # Helper to determine if a function is top-level or a method
        def is_top_level_function(node):
            # Check if the function is defined inside a class
            for parent_node in ast.walk(tree):
                if isinstance(parent_node, ast.ClassDef):
                    for child in parent_node.body:
                        if (
                            child is node
                        ):  # This is a direct reference comparison
                            return False
            return True

        # Extract top-level classes and functions
        for node in ast.iter_child_nodes(tree):
            if isinstance(node, ast.ClassDef):
                class_info = {
                    "name": node.name,
                    "docstring": ast.get_docstring(node),
                    "methods": [
                        m.name
                        for m in node.body
                        if isinstance(m, ast.FunctionDef)
                    ],
                }
                components["classes"].append(class_info)
            elif isinstance(node, ast.FunctionDef):
                func_info = {
                    "name": node.name,
                    "docstring": ast.get_docstring(node),
                    "args": [
                        arg.arg for arg in node.args.args if hasattr(arg, "arg")
                    ],
                }
                components["functions"].append(func_info)

        # Extract all imports
        for node in ast.walk(tree):
            if isinstance(node, ast.Import):
                for name in node.names:
                    components["imports"].append(name.name)
            elif isinstance(node, ast.ImportFrom):
                module = node.module or ""
                for name in node.names:
                    components["imports"].append(f"{module}.{name.name}")

    except Exception as e:
        print(f"Warning: Could not parse Python components in {file_path}: {e}")

    return components


def analyze_code_dependencies(files: list[str]) -> dict[str, set[str]]:
    """Analyze dependencies between Python files based on imports."""
    dependencies = {file: set() for file in files}

    # Create a mapping from module names to file paths
    module_map = {}
    package_map = {}

    for file_path in files:
        if not file_path.endswith(".py"):
            continue

        # Handle both absolute and relative paths
        abs_path = os.path.abspath(file_path)

        # Map file paths to potential module names
        # First, try to extract package structure
        parts = []
        current = abs_path

        # Build the full module path
        while current:
            parent = os.path.dirname(current)

            # If we've reached the top or left the project directory, stop
            if parent == current or not os.path.exists(
                os.path.join(parent, "__init__.py")
            ):
                break

            parts.insert(0, os.path.basename(current))
            current = parent

        # Use the file name (without .py) as the last part
        base_name = os.path.basename(file_path)
        if base_name.endswith(".py"):
            if base_name != "__init__.py":
                module_name = os.path.splitext(base_name)[0]
                parts.append(module_name)

            full_module_name = ".".join(parts) if parts else None
            if full_module_name:
                module_map[full_module_name] = file_path

            # Also map short names for common imports
            if module_name := os.path.splitext(base_name)[0]:
                # Don't overwrite existing mappings with short names
                if module_name not in module_map:
                    module_map[module_name] = file_path

            # Map package names
            for i in range(len(parts)):
                package_name = ".".join(parts[: i + 1])
                package_map[package_name] = os.path.dirname(file_path)

    # Now analyze imports in each file
    for file_path in files:
        if not file_path.endswith(".py"):
            continue

        try:
            with open(file_path, encoding="utf-8") as f:
                code = f.read()

            tree = ast.parse(code)

            for node in ast.walk(tree):
                # Handle direct imports: import x, import x.y
                if isinstance(node, ast.Import):
                    for name in node.names:
                        # Check for the full module path
                        module_path = name.name
                        if module_path in module_map:
                            dependencies[file_path].add(module_map[module_path])

                        # Check for package imports
                        parts = module_path.split(".")
                        for i in range(len(parts), 0, -1):
                            prefix = ".".join(parts[:i])
                            if prefix in module_map:
                                dependencies[file_path].add(module_map[prefix])
                                break

                # Handle from imports: from x import y, from x.y import z
                elif isinstance(node, ast.ImportFrom):
                    if node.module:  # from x import y
                        # See if the module is in our map
                        if node.module in module_map:
                            dependencies[file_path].add(module_map[node.module])

                        # Check for package imports
                        for prefix in get_module_prefixes(node.module):
                            if prefix in module_map:
                                dependencies[file_path].add(module_map[prefix])

                    # Handle relative imports: from . import x, from .. import y
                    if node.level > 0:  # Relative import
                        # Get the directory of the current file
                        dir_path = os.path.dirname(file_path)

                        # Go up levels according to the number of dots
                        for _ in range(node.level - 1):
                            dir_path = os.path.dirname(dir_path)

                        # Try to find matching imports
                        for name in node.names:
                            if node.module:
                                target_module = f"{node.module}.{name.name}"
                            else:
                                target_module = name.name

                            # Check for the module within the relative directory
                            rel_path = os.path.join(
                                dir_path, target_module.replace(".", os.sep)
                            )

                            # Try with .py extension first
                            py_path = f"{rel_path}.py"
                            if os.path.exists(py_path) and py_path in files:
                                dependencies[file_path].add(py_path)

                            # Try as directory with __init__.py
                            init_path = os.path.join(rel_path, "__init__.py")
                            if os.path.exists(init_path) and init_path in files:
                                dependencies[file_path].add(init_path)

        except Exception as e:
            print(f"Warning: Could not analyze imports in {file_path}: {e}")

    return dependencies


def get_module_prefixes(module_name: str) -> list[str]:
    """Generate all possible module prefixes for a given module name.
    For example, 'a.b.c' would return ['a.b.c', 'a.b', 'a']
    """
    parts = module_name.split(".")
    return [".".join(parts[:i]) for i in range(len(parts), 0, -1)]


def generate_folder_tree(folder: str, included_files: list[str]) -> str:
    """Generate an ASCII folder tree representation, only showing directories and files that are included."""
    tree_output = []
    included_paths = set(included_files)

    # Get all directories containing included files
    included_dirs = set()
    for file_path in included_paths:
        dir_path = os.path.dirname(file_path)
        while dir_path and dir_path != folder:
            included_dirs.add(dir_path)
            dir_path = os.path.dirname(dir_path)

    def _generate_tree(dir_path: str, prefix: str = "", is_last: bool = True):
        # Get the directory name
        dir_name = os.path.basename(dir_path) or dir_path

        # Add the current directory to the output
        tree_output.append(
            f"{prefix}{'└── ' if is_last else '├── '}{dir_name}/"
        )

        # Update prefix for children
        new_prefix = f"{prefix}{'    ' if is_last else '│   '}"

        # Get relevant entries in the directory
        try:
            entries = os.listdir(dir_path)
            relevant_dirs = []
            relevant_files = []

            for entry in entries:
                entry_path = os.path.join(dir_path, entry)
                if os.path.isdir(entry_path):
                    # Include directory if it or any of its subdirectories contain included files
                    if (
                        any(
                            f.startswith(entry_path + os.sep)
                            for f in included_paths
                        )
                        or entry_path in included_dirs
                    ):
                        relevant_dirs.append(entry)
                elif entry_path in included_paths:
                    # Only include the specific files we're interested in
                    relevant_files.append(entry)

            # Sort entries for consistent output
            relevant_dirs.sort()
            relevant_files.sort()

            # Process relevant subdirectories
            for i, entry in enumerate(relevant_dirs):
                entry_path = os.path.join(dir_path, entry)
                is_last_dir = i == len(relevant_dirs) - 1
                is_last_item = is_last_dir and len(relevant_files) == 0
                _generate_tree(entry_path, new_prefix, is_last_item)

            # Process relevant files
            for i, entry in enumerate(relevant_files):
                is_last_file = i == len(relevant_files) - 1
                tree_output.append(
                    f"{new_prefix}{'└── ' if is_last_file else '├── '}{entry}"
                )

        except (PermissionError, FileNotFoundError):
            return

    # Start the recursion from the root folder
    _generate_tree(folder)

    return "\n".join(tree_output)


def get_common_patterns(files: list[str]) -> dict[str, list[str]]:
    """Identify common design patterns in the codebase."""
    patterns = {
        "singleton": [],
        "factory": [],
        "observer": [],
        "decorator": [],
        "mvc_components": {"models": [], "views": [], "controllers": []},
    }

    for file_path in files:
        if not file_path.endswith(".py"):
            continue

        try:
            with open(file_path, encoding="utf-8") as f:
                content = f.read().lower()

            # Check for singleton pattern
            if "instance = none" in content and "__new__" in content:
                patterns["singleton"].append(file_path)

            # Check for factory pattern
            if "factory" in os.path.basename(file_path).lower() or (
                "def create" in content and "return" in content
            ):
                patterns["factory"].append(file_path)

            # Check for observer pattern
            if ("observer" in content or "listener" in content) and (
                "notify" in content or "update" in content
            ):
                patterns["observer"].append(file_path)

            # Check for decorator pattern
            if "decorator" in os.path.basename(file_path).lower() or (
                "def wrapper" in content and "return wrapper" in content
            ):
                patterns["decorator"].append(file_path)

            # Check for MVC components
            if "model" in os.path.basename(file_path).lower():
                patterns["mvc_components"]["models"].append(file_path)
            elif "view" in os.path.basename(file_path).lower():
                patterns["mvc_components"]["views"].append(file_path)
            elif (
                "controller" in os.path.basename(file_path).lower()
                or "handler" in os.path.basename(file_path).lower()
            ):
                patterns["mvc_components"]["controllers"].append(file_path)

        except Exception:
            continue

    # Remove empty categories
    for key in list(patterns.keys()):
        if isinstance(patterns[key], list) and not patterns[key]:
            patterns.pop(key)
        elif isinstance(patterns[key], dict):
            empty = True
            for subkey in patterns[key]:
                if patterns[key][subkey]:
                    empty = False
                    break
            if empty:
                patterns.pop(key)

    return patterns


def find_key_files(
    files: list[str], dependencies: dict[str, set[str]]
) -> list[str]:
    """Identify key files based on dependencies and naming conventions."""
    # Initialize scores for each file
    scores = {file: 0 for file in files}

    # Track how many files depend on each file (dependents)
    dependent_count = {file: 0 for file in files}
    for file, deps in dependencies.items():
        for dep in deps:
            if dep in dependent_count:
                dependent_count[dep] += 1

    # Score by number of files that depend on this file
    for file, count in dependent_count.items():
        scores[file] += count * 2

    # Score by file naming heuristics
    for file in files:
        base_name = os.path.basename(file).lower()

        # Core files
        if any(
            core_name in base_name
            for core_name in ["main", "app", "core", "init", "cli"]
        ):
            scores[file] += 5

        # Configuration and settings
        if any(
            config_name in base_name
            for config_name in ["config", "settings", "constants"]
        ):
            scores[file] += 3

        # Base classes and abstract components
        if any(
            base_name in base_name
            for base_name in ["base", "abstract", "interface", "factory"]
        ):
            scores[file] += 2

        # Utilities and helpers
        if any(
            util_name in base_name
            for util_name in ["util", "helper", "common", "tools"]
        ):
            scores[file] += 1

        # Score directories by importance
        dir_path = os.path.dirname(file)
        if "core" in dir_path.lower():
            scores[file] += 2
        if "main" in dir_path.lower():
            scores[file] += 1

        # Score by file size and complexity
        try:
            metadata = get_file_metadata(file)
            line_count = metadata["line_count"]
            scores[file] += min(line_count / 50, 3)  # Cap at 3 points for size

            # Additional points for very significant files
            if line_count > 200:
                scores[file] += 1
        except Exception:
            pass

        # Score by extension - Python files are often more important
        if file.endswith(".py"):
            scores[file] += 1

        # Examples and documentation are important but not as much as core files
        if "example" in file.lower() or "demo" in file.lower():
            scores[file] += 0.5

    # Sort by score in descending order
    key_files = sorted(files, key=lambda f: scores[f], reverse=True)

    # Debugging info
    print(f"Top 5 key files with scores:")
    for file in key_files[:5]:
        print(f"  {file}: {scores[file]:.1f} points")

    # Return top 25% of files or at least 5 files (if available)
    num_key_files = max(min(len(files) // 4, 20), min(5, len(files)))
    return key_files[:num_key_files]


def generate_markdown_string(
    files: list[str],
    extension: str,
    folder: str,
    key_files: list[str],
    dependencies: dict[str, set[str]],
    patterns: dict[str, list[str]],
) -> str:
    """Generate a comprehensive markdown document about the codebase as a string."""
    md_content = []

    # Write header
    md_content.append(f"# Code Repository Analysis\n")
    md_content.append(f"Generated on {datetime.datetime.now()}\n\n")

    # Write repository summary
    md_content.append("## Repository Summary\n\n")
    md_content.append(f"- **Extension analyzed**: `{extension}`\n")
    md_content.append(f"- **Number of files**: {len(files)}\n")
    md_content.append(f"- **Root folder**: `{folder}`\n")

    total_lines = sum(get_file_metadata(f)["line_count"] for f in files)
    md_content.append(f"- **Total lines of code**: {total_lines}\n\n")

    # Generate and write folder tree
    md_content.append("## Project Structure\n\n")
    md_content.append("```\n")
    md_content.append(generate_folder_tree(folder, files))
    md_content.append("\n```\n\n")

    # Write key files section
    md_content.append("## Key Files\n\n")
    md_content.append(
        "These files appear to be central to the codebase based on dependencies and naming conventions:\n\n"
    )

    for file in key_files:
        rel_path = os.path.relpath(file, folder)
        md_content.append(f"### {rel_path}\n\n")

        metadata = get_file_metadata(file)
        md_content.append(f"- **Lines**: {metadata['line_count']}\n")
        md_content.append(f"- **Last modified**: {metadata['last_modified']}\n")

        # Add dependency info
        dependent_files = [
            os.path.relpath(f, folder)
            for f in dependencies
            if file in dependencies[f]
        ]
        if dependent_files:
            md_content.append(f"- **Used by**: {len(dependent_files)} files\n")

        # For Python files, add component analysis
        if file.endswith(".py"):
            components = extract_python_components(file)

            if components["docstring"]:
                md_content.append(
                    f"\n**Description**: {components['docstring'].strip()}\n"
                )

            if components["classes"]:
                md_content.append("\n**Classes**:\n")
                for cls in components["classes"]:
                    md_content.append(
                        f"- `{cls['name']}`: {len(cls['methods'])} methods\n"
                    )

            if components["functions"]:
                md_content.append("\n**Functions**:\n")
                for func in components["functions"]:
                    md_content.append(
                        f"- `{func['name']}({', '.join(func['args'])})`\n"
                    )

        md_content.append("\n**Content**:\n")
        md_content.append(f"```{extension.lstrip('.')}\n")

        # Read and write file content
        try:
            with open(file, encoding="utf-8") as code_file:
                content = code_file.read()
                md_content.append(content)
                if not content.endswith("\n"):
                    md_content.append("\n")
        except Exception as e:
            md_content.append(f"Error reading file: {e!s}\n")

        md_content.append("```\n\n")

    # Write design patterns section if any were detected
    if patterns:
        md_content.append("## Design Patterns\n\n")
        md_content.append(
            "The following design patterns appear to be used in this codebase:\n\n"
        )

        for pattern, files_list in patterns.items():
            if isinstance(files_list, list) and files_list:
                md_content.append(f"### {pattern.title()} Pattern\n\n")
                for f in files_list:
                    md_content.append(f"- `{os.path.relpath(f, folder)}`\n")
                md_content.append("\n")
            elif isinstance(files_list, dict):
                md_content.append(
                    f"### {pattern.replace('_', ' ').title()}\n\n"
                )
                for subpattern, subfiles in files_list.items():
                    if subfiles:
                        md_content.append(f"**{subpattern.title()}**:\n")
                        for f in subfiles:
                            md_content.append(
                                f"- `{os.path.relpath(f, folder)}`\n"
                            )
                        md_content.append("\n")

    # Write all other files section
    md_content.append("## All Files\n\n")

    for file in files:
        if file in key_files:
            continue  # Skip files already detailed in key files section

        rel_path = os.path.relpath(file, folder)
        md_content.append(f"### {rel_path}\n\n")

        metadata = get_file_metadata(file)
        md_content.append(f"- **Lines**: {metadata['line_count']}\n")
        md_content.append(
            f"- **Last modified**: {metadata['last_modified']}\n\n"
        )

        md_content.append("```" + extension.lstrip(".") + "\n")

        # Read and write file content
        try:
            with open(file, encoding="utf-8") as code_file:
                content = code_file.read()
                md_content.append(content)
                if not content.endswith("\n"):
                    md_content.append("\n")
        except Exception as e:
            md_content.append(f"Error reading file: {e!s}\n")

        md_content.append("```\n\n")

    return "".join(md_content)


def collect_code(extension: str = ".py", folder: str = ".") -> str:
    """Main function to analyze code repository and generate markdown string.

    Args:
        extension: File extension to analyze
        folder: Root folder to analyze

    Returns:
        A string containing the markdown analysis
    """
    print(f"Analyzing {extension} files from {folder}...")

    # Find all matching files
    files = find_files(folder, extension)
    print(f"Found {len(files)} files")

    # Get dependencies
    dependencies = analyze_code_dependencies(files)

    # Find key files
    key_files = find_key_files(files, dependencies)

    # Get design patterns
    patterns = get_common_patterns(files)

    # Generate markdown content
    markdown_content = generate_markdown_string(
        files, extension, folder, key_files, dependencies, patterns
    )
    print(f"Repository analysis complete.")

    return markdown_content


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(
        description="Analyze code repository for LLM consumption"
    )
    parser.add_argument(
        "-c", "--extension", default=".py", help="File extension to analyze"
    )
    parser.add_argument("-f", "--folder", default=".", help="Folder to analyze")
    parser.add_argument(
        "-o",
        "--output",
        default="repository_analysis.md",
        help="Output markdown file",
    )

    args = parser.parse_args()

    # Generate the markdown content
    markdown_content = collect_code(args.extension, args.folder)

    # Write the content to the output file
    with open(args.output, "w", encoding="utf-8") as output_file:
        output_file.write(markdown_content)

    print(f"Output written to '{args.output}'")
```

### src\flock\cli\constants.py

- **Lines**: 25
- **Last modified**: 2025-04-03 03:11:34

```py
"""Constants for the CLI module."""

CLI_CREATE_AGENT = "Create an agent"
CLI_CREATE_FLOCK = "Create a new Flock"
CLI_LOAD_AGENT = "Load an agent"
CLI_LOAD_FLOCK = "Load a *.flock file"
CLI_THEME_BUILDER = "Theme builder"
CLI_LOAD_EXAMPLE = "Load a example"
CLI_SETTINGS = "Settings"
CLI_NOTES = "'Hummingbird' release notes"
CLI_START_WEB_SERVER = "Start web server"
CLI_REGISTRY_MANAGEMENT = "Registry management"
CLI_EXIT = "Exit"
CLI_CHOICES = [
    CLI_CREATE_AGENT,
    CLI_CREATE_FLOCK,
    CLI_LOAD_AGENT,
    CLI_LOAD_FLOCK,
    CLI_LOAD_EXAMPLE,
    CLI_THEME_BUILDER,
    CLI_REGISTRY_MANAGEMENT,
    CLI_SETTINGS,
    CLI_START_WEB_SERVER,
    CLI_EXIT,
]
```

### src\flock\cli\create_agent.py

- **Lines**: 1
- **Last modified**: 2025-02-18 03:20:40

```py
# TODO
```

### src\flock\cli\create_flock.py

- **Lines**: 220
- **Last modified**: 2025-04-02 23:12:31

```py
"""Create a new Flock through a guided wizard.

This module provides a wizard-like interface for creating new Flock instances,
with options for basic configuration and initial agent creation.
"""

from pathlib import Path

import questionary
from rich.console import Console
from rich.panel import Panel

from flock.cli.loaded_flock_cli import start_loaded_flock_cli
from flock.core.flock import Flock
from flock.core.flock_factory import FlockFactory
from flock.core.util.cli_helper import init_console

# Create console instance
console = Console()


def create_flock():
    """Create a new Flock through a guided wizard."""
    init_console()
    console.print(Panel("[bold green]Create New Flock[/]"), justify="center")
    console.line()

    # Step 1: Basic Flock Configuration
    console.print("[bold]Step 1: Basic Flock Configuration[/]")
    console.line()

    # Get description
    description = questionary.text(
        "Enter a description for this Flock (optional):",
        default="",
    ).ask()

    # Default model selection
    default_models = [
        "openai/gpt-4o",
        "openai/gpt-3.5-turbo",
        "anthropic/claude-3-opus-20240229",
        "anthropic/claude-3-sonnet-20240229",
        "gemini/gemini-1.5-pro",
        "Other (specify)",
    ]

    model_choice = questionary.select(
        "Select a default model:",
        choices=default_models,
    ).ask()

    if model_choice == "Other (specify)":
        model = questionary.text(
            "Enter the model identifier:",
            default="openai/gpt-4o",
        ).ask()
    else:
        model = model_choice

    # Execution options
    enable_temporal = questionary.confirm(
        "Enable Temporal for distributed execution?",
        default=False,
    ).ask()

    # Logging configuration
    enable_logging = questionary.confirm(
        "Enable logging?",
        default=True,
    ).ask()

    # Create the Flock instance
    flock = Flock(
        model=model,
        description=description,
        enable_temporal=enable_temporal,
        enable_logging=enable_logging,
    )

    console.print("\n[green]✓[/] Flock created successfully!")
    console.line()

    # Step 2: Create Initial Agent (optional)
    create_agent = questionary.confirm(
        "Would you like to create an initial agent?",
        default=True,
    ).ask()

    if create_agent:
        _create_initial_agent(flock)

    # Step 3: Save Options
    console.print("\n[bold]Step 3: Save Options[/]")
    console.line()

    save_choice = questionary.select(
        "What would you like to do with this Flock?",
        choices=[
            "Save to YAML file",
            "Continue in CLI without saving",
            "Execute immediately",
            "Cancel and discard",
        ],
    ).ask()

    if save_choice == "Save to YAML file":
        _save_flock_to_yaml(flock)

        # Ask if user wants to continue working with this Flock
        continue_with_flock = questionary.confirm(
            "Would you like to continue working with this Flock in the CLI?",
            default=True,
        ).ask()

        if continue_with_flock:
            start_loaded_flock_cli(flock, server_name="New Flock")

    elif save_choice == "Continue in CLI without saving":
        start_loaded_flock_cli(flock, server_name="New Flock")

    elif save_choice == "Execute immediately":
        from flock.cli.execute_flock import execute_flock

        try:
            execute_flock(flock)
        except ImportError:
            console.print(
                "[yellow]Execute functionality not yet implemented.[/]"
            )
            input("\nPress Enter to continue...")
            start_loaded_flock_cli(flock, server_name="New Flock")


def _create_initial_agent(flock):
    """Create an initial agent for the Flock.

    Args:
        flock: The Flock instance to add the agent to
    """
    console.print("\n[bold]Step 2: Create Initial Agent[/]")
    console.line()

    # Get agent name
    name = questionary.text(
        "Enter a name for the agent:",
        default="my_agent",
    ).ask()

    # Get agent description
    description = questionary.text(
        "Enter a description for the agent (optional):",
        default="",
    ).ask()

    # Get input specification
    input_spec = questionary.text(
        "Enter input specification (e.g., 'query: str | The search query'):",
        default="query",
    ).ask()

    # Get output specification
    output_spec = questionary.text(
        "Enter output specification (e.g., 'result: str | The generated result'):",
        default="result",
    ).ask()

    # Additional options
    use_cache = questionary.confirm(
        "Enable caching for this agent?",
        default=True,
    ).ask()

    enable_rich_tables = questionary.confirm(
        "Enable rich table output for this agent?",
        default=True,
    ).ask()

    # Create the agent
    agent = FlockFactory.create_default_agent(
        name=name,
        description=description,
        input=input_spec,
        output=output_spec,
        use_cache=use_cache,
        enable_rich_tables=enable_rich_tables,
    )

    # Add the agent to the Flock
    flock.add_agent(agent)
    console.print(f"\n[green]✓[/] Agent '{name}' created and added to Flock!")


def _save_flock_to_yaml(flock):
    """Save the Flock to a YAML file.

    Args:
        flock: The Flock instance to save
    """
    # Get file path
    default_name = "my_flock.flock.yaml"
    file_path = questionary.text(
        "Enter file path to save Flock:",
        default=default_name,
    ).ask()

    # Ensure the file has the correct extension
    if not file_path.endswith((".yaml", ".yml")):
        file_path += ".yaml"

    # Create directory if it doesn't exist
    save_path = Path(file_path)
    save_path.parent.mkdir(parents=True, exist_ok=True)

    try:
        # Save the Flock to YAML
        flock.to_yaml_file(file_path)
        console.print(f"\n[green]✓[/] Flock saved to {file_path}")
    except Exception as e:
        console.print(f"\n[bold red]Error saving Flock:[/] {e!s}")
```

### src\flock\cli\execute_flock.py

- **Lines**: 200
- **Last modified**: 2025-04-02 23:12:31

```py
"""Execute a Flock instance with a selected agent.

This module provides functionality to execute a Flock instance with
a selected agent and input configuration.
"""

import json

import questionary
from rich.console import Console
from rich.panel import Panel

from flock.core.flock import Flock
from flock.core.util.cli_helper import init_console

# Create console instance
console = Console()


def execute_flock(flock: Flock):
    """Execute a Flock instance.

    Args:
        flock: The Flock instance to execute
    """
    if not flock:
        console.print("[bold red]Error: No Flock instance provided.[/]")
        return

    agent_names = list(flock._agents.keys())

    if not agent_names:
        console.print("[yellow]No agents in this Flock to execute.[/]")
        return

    init_console()
    console.print(Panel("[bold green]Execute Flock[/]"), justify="center")

    # Step 1: Select start agent
    console.print("\n[bold]Step 1: Select Start Agent[/]")

    start_agent_name = questionary.select(
        "Select an agent to start with:",
        choices=agent_names,
    ).ask()

    if not start_agent_name:
        return

    start_agent = flock._agents[start_agent_name]

    # Step 2: Configure input
    console.print("\n[bold]Step 2: Configure Input[/]")

    # Parse input schema
    input_fields = _parse_input_schema(start_agent.input)

    # If we couldn't parse any fields, ask for generic input
    if not input_fields:
        raw_input = questionary.text(
            "Enter input (JSON format):",
            default="{}",
        ).ask()

        try:
            input_data = json.loads(raw_input)
        except json.JSONDecodeError:
            console.print("[bold red]Error: Invalid JSON input.[/]")
            return
    else:
        # Otherwise, ask for each field
        input_data = {}

        for field, info in input_fields.items():
            field_type = info.get("type", "str")
            description = info.get("description", "")
            prompt = f"Enter value for '{field}'"

            if description:
                prompt += f" ({description})"

            prompt += ":"

            value = questionary.text(prompt).ask()

            # Convert value to appropriate type
            if field_type == "int":
                try:
                    value = int(value)
                except ValueError:
                    console.print(
                        f"[yellow]Warning: Could not convert value to int, using as string.[/]"
                    )

            input_data[field] = value

    # Step 3: Run Options
    console.print("\n[bold]Step 3: Run Options[/]")

    # Logging options
    enable_logging = questionary.confirm(
        "Enable detailed logging?",
        default=False,
    ).ask()

    # Preview input
    console.print("\n[bold]Input Preview:[/]")
    console.print(json.dumps(input_data, indent=2))

    # Confirm execution
    confirm = questionary.confirm(
        "Execute Flock with this configuration?",
        default=True,
    ).ask()

    if not confirm:
        return

    # Execute the Flock
    console.print("\n[bold]Executing Flock...[/]")

    try:
        # Handle logging settings
        if enable_logging:
            # Enable logging through the logging configuration method
            flock._configure_logging(True)

        # Run the Flock
        result = flock.run(
            start_agent=start_agent_name,
            input=input_data,
        )

        # Display result
        console.print("\n[bold green]Execution Complete![/]")

        if result and enable_logging:
            console.print("\n[bold]Result:[/]")
            if isinstance(result, dict):
                # Display as formatted JSON
                console.print(json.dumps(result, indent=2))
            else:
                # Display as plain text
                console.print(str(result))

    except Exception as e:
        console.print(f"\n[bold red]Error during execution:[/] {e!s}")


def _parse_input_schema(input_schema: str) -> dict[str, dict[str, str]]:
    """Parse the input schema string into a field dictionary.

    Args:
        input_schema: The input schema string (e.g., "query: str | The search query")

    Returns:
        A dictionary mapping field names to field info (type, description)
    """
    if not input_schema:
        return {}

    fields = {}

    try:
        # Split by comma for multiple fields
        for field_def in input_schema.split(","):
            field_def = field_def.strip()

            # Check for type hint with colon
            if ":" in field_def:
                field_name, rest = field_def.split(":", 1)
                field_name = field_name.strip()
                rest = rest.strip()

                # Check for description with pipe
                if "|" in rest:
                    field_type, description = rest.split("|", 1)
                    fields[field_name] = {
                        "type": field_type.strip(),
                        "description": description.strip(),
                    }
                else:
                    fields[field_name] = {"type": rest.strip()}
            else:
                # Just a field name without type hint
                if "|" in field_def:
                    field_name, description = field_def.split("|", 1)
                    fields[field_name.strip()] = {
                        "description": description.strip()
                    }
                else:
                    fields[field_def.strip()] = {}

    except Exception as e:
        console.print(
            f"[yellow]Warning: Could not parse input schema: {e!s}[/]"
        )
        return {}

    return fields
```

### src\flock\cli\load_agent.py

- **Lines**: 1
- **Last modified**: 2025-02-18 03:20:40

```py
# TODO
```

### src\flock\cli\load_examples.py

- **Lines**: 1
- **Last modified**: 2025-02-18 03:20:40

```py
# TODO
```

### src\flock\cli\load_flock.py

- **Lines**: 58
- **Last modified**: 2025-04-02 23:12:31

```py
"""Load a Flock from a file."""

from pathlib import Path

import questionary
from rich.console import Console
from rich.markdown import Markdown

from flock.cli.loaded_flock_cli import start_loaded_flock_cli
from flock.core.flock import Flock


def filter(file_path) -> bool:
    """Filter function for file selection."""
    path = Path(file_path)
    if path.is_dir():
        return True
    return path.is_file() and (
        path.suffix == ".flock"
        or path.suffix == ".yaml"
        or path.suffix == ".yml"
    )


def load_flock():
    """Load a Flock from a file."""
    console = Console()

    console.print(
        "\nPlease select a *.flock, *.yaml, or *.yml file\n", style="bold green"
    )

    result = questionary.path("", file_filter=filter).ask()

    if not result:
        return

    selected_file = Path(result)
    if selected_file.is_file():
        console.print(f"Selected file: {selected_file}", style="bold green")

        try:
            flock = Flock.load_from_file(result)

            console.line()
            console.print(
                Markdown("# Flock loaded successfully"), style="bold green"
            )
            console.line()

            # Instead of just running the Flock, start our enhanced CLI
            start_loaded_flock_cli(
                flock, server_name=f"Flock - {selected_file.name}"
            )

        except Exception as e:
            console.print(f"Error loading Flock: {e!s}", style="bold red")
            input("\nPress Enter to continue...")
```

### src\flock\cli\load_release_notes.py

- **Lines**: 23
- **Last modified**: 2025-02-24 03:21:51

```py
from pathlib import Path

from flock.core.util.cli_helper import display_hummingbird


def load_release_notes():
    """Load release notes."""
    from rich.console import Console
    from rich.markdown import Markdown

    from flock.core.util.cli_helper import init_console

    console = Console()
    file_path = Path(__file__).parent / "assets" / "release_notes.md"

    init_console()
    console.print(Markdown("# *'Hummingbird'* Release Notes"))
    display_hummingbird()
    with open(file_path) as file:
        release_notes = file.read()

    
    console.print(Markdown(release_notes))
```

### src\flock\cli\manage_agents.py

- **Lines**: 443
- **Last modified**: 2025-04-02 23:12:31

```py
"""Agent management functionality for the Flock CLI.

This module provides a CLI interface for managing agents within a Flock system,
including listing, adding, editing, and removing agents.
"""

import questionary
from rich.console import Console
from rich.panel import Panel
from rich.table import Table

from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent
from flock.core.flock_factory import FlockFactory
from flock.core.util.cli_helper import init_console

# Create console instance
console = Console()


def manage_agents(flock: Flock):
    """Agent management entry point.

    Args:
        flock: The Flock instance containing agents to manage
    """
    if not flock:
        console.print("[bold red]Error: No Flock instance provided.[/]")
        return

    while True:
        init_console()
        console.print(Panel("[bold green]Agent Manager[/]"), justify="center")

        agent_names = list(flock._agents.keys())
        console.print(f"Flock contains [bold cyan]{len(agent_names)}[/] agents")

        if agent_names:
            console.print(f"Agents: {', '.join(agent_names)}")
        else:
            console.print("[yellow]No agents in this Flock yet.[/]")

        console.line()

        # Main menu
        choice = questionary.select(
            "What would you like to do?",
            choices=[
                questionary.Separator(line=" "),
                "List All Agents",
                "Add New Agent",
                "Edit Agent",
                "Remove Agent",
                "Export Agent to YAML",
                "Import Agent from YAML",
                questionary.Separator(),
                "Back to Main Menu",
            ],
        ).ask()

        if choice == "List All Agents":
            _list_agents(flock)
        elif choice == "Add New Agent":
            _add_agent(flock)
        elif choice == "Edit Agent":
            _edit_agent(flock)
        elif choice == "Remove Agent":
            _remove_agent(flock)
        elif choice == "Export Agent to YAML":
            _export_agent(flock)
        elif choice == "Import Agent from YAML":
            _import_agent(flock)
        elif choice == "Back to Main Menu":
            break

        if choice != "Back to Main Menu":
            input("\nPress Enter to continue...")


def _list_agents(flock: Flock):
    """List all agents in the Flock.

    Args:
        flock: The Flock instance
    """
    agent_names = list(flock._agents.keys())

    if not agent_names:
        console.print("[yellow]No agents in this Flock.[/]")
        return

    # Create table for agents
    table = Table(title="Agents in Flock")
    table.add_column("Name", style="cyan")
    table.add_column("Model", style="green")
    table.add_column("Description", style="yellow")
    table.add_column("Input", style="magenta")
    table.add_column("Output", style="magenta")

    for name in agent_names:
        agent = flock._agents[name]

        # Format model nicely
        model = agent.model or flock.model or "Default"

        # Format description (truncate if too long)
        description = str(agent.description)
        if len(description) > 30:
            description = description[:27] + "..."

        # Format input/output (truncate if too long)
        input_str = str(agent.input)
        if len(input_str) > 30:
            input_str = input_str[:27] + "..."

        output_str = str(agent.output)
        if len(output_str) > 30:
            output_str = output_str[:27] + "..."

        table.add_row(
            name,
            model,
            description,
            input_str,
            output_str,
        )

    console.print(table)

    # Option to view detailed info for a specific agent
    if len(agent_names) > 0:
        view_details = questionary.confirm(
            "View detailed information for an agent?",
            default=False,
        ).ask()

        if view_details:
            agent_to_view = questionary.select(
                "Select an agent to view:",
                choices=agent_names,
            ).ask()

            if agent_to_view:
                _view_agent_details(flock._agents[agent_to_view])


def _view_agent_details(agent: FlockAgent):
    """Display detailed information about an agent.

    Args:
        agent: The agent to display details for
    """
    init_console()
    console.print(
        Panel(f"[bold green]Agent Details: {agent.name}[/]"), justify="center"
    )

    # Create a panel for each section
    basic_info = Table(show_header=False, box=None)
    basic_info.add_column("Property", style="cyan")
    basic_info.add_column("Value", style="green")

    basic_info.add_row("Name", agent.name)
    basic_info.add_row("Model", str(agent.model or "Default"))
    basic_info.add_row("Description", str(agent.description))
    basic_info.add_row("Input", str(agent.input))
    basic_info.add_row("Output", str(agent.output))
    basic_info.add_row("Use Cache", str(agent.use_cache))

    console.print(Panel(basic_info, title="Basic Information"))

    # Evaluator info
    evaluator_info = (
        f"Type: {type(agent.evaluator).__name__ if agent.evaluator else 'None'}"
    )
    console.print(Panel(evaluator_info, title="Evaluator"))

    # Router info
    router_info = f"Type: {type(agent.handoff_router).__name__ if agent.handoff_router else 'None'}"
    console.print(Panel(router_info, title="Router"))

    # Tools
    if agent.tools:
        tool_names = [t.__name__ for t in agent.tools]
        tools_info = ", ".join(tool_names)
    else:
        tools_info = "None"

    console.print(Panel(tools_info, title="Tools"))

    # Modules
    if agent.modules:
        module_table = Table(show_header=True)
        module_table.add_column("Name", style="cyan")
        module_table.add_column("Type", style="green")
        module_table.add_column("Enabled", style="yellow")

        for name, module in agent.modules.items():
            module_table.add_row(
                name,
                type(module).__name__,
                "Yes" if module.config.enabled else "No",
            )

        console.print(Panel(module_table, title="Modules"))
    else:
        console.print(Panel("None", title="Modules"))


def _add_agent(flock: Flock):
    """Add a new agent to the Flock.

    Args:
        flock: The Flock instance to add the agent to
    """
    console.print("\n[bold]Add New Agent[/]")
    console.line()

    # Get agent name
    name = questionary.text(
        "Enter a name for the agent:",
        default="my_agent",
    ).ask()

    # Check for name conflicts
    if name in flock._agents:
        console.print(
            f"[bold red]Error: An agent named '{name}' already exists.[/]"
        )
        return

    # Get agent description
    description = questionary.text(
        "Enter a description for the agent (optional):",
        default="",
    ).ask()

    # Get input specification
    input_spec = questionary.text(
        "Enter input specification (e.g., 'query: str | The search query'):",
        default="query",
    ).ask()

    # Get output specification
    output_spec = questionary.text(
        "Enter output specification (e.g., 'result: str | The generated result'):",
        default="result",
    ).ask()

    # Model selection
    use_flock_model = questionary.confirm(
        f"Use Flock's default model ({flock.model or 'None'})? Select 'n' to specify a different model.",
        default=True,
    ).ask()

    if use_flock_model:
        model = None  # Use Flock's default
    else:
        default_models = [
            "openai/gpt-4o",
            "openai/gpt-3.5-turbo",
            "anthropic/claude-3-opus-20240229",
            "anthropic/claude-3-sonnet-20240229",
            "gemini/gemini-1.5-pro",
            "Other (specify)",
        ]

        model_choice = questionary.select(
            "Select a model:",
            choices=default_models,
        ).ask()

        if model_choice == "Other (specify)":
            model = questionary.text(
                "Enter the model identifier:",
                default="openai/gpt-4o",
            ).ask()
        else:
            model = model_choice

    # Additional options
    use_cache = questionary.confirm(
        "Enable caching for this agent?",
        default=True,
    ).ask()

    enable_rich_tables = questionary.confirm(
        "Enable rich table output for this agent?",
        default=True,
    ).ask()

    # Create the agent
    agent = FlockFactory.create_default_agent(
        name=name,
        description=description,
        model=model,
        input=input_spec,
        output=output_spec,
        use_cache=use_cache,
        enable_rich_tables=enable_rich_tables,
    )

    # Add the agent to the Flock
    flock.add_agent(agent)
    console.print(f"\n[green]✓[/] Agent '{name}' created and added to Flock!")


def _edit_agent(flock: Flock):
    """Edit an existing agent in the Flock.

    Args:
        flock: The Flock instance containing the agent to edit
    """
    agent_names = list(flock._agents.keys())

    if not agent_names:
        console.print("[yellow]No agents in this Flock to edit.[/]")
        return

    # Select agent to edit
    agent_name = questionary.select(
        "Select an agent to edit:",
        choices=agent_names,
    ).ask()

    if not agent_name:
        return

    agent = flock._agents[agent_name]

    # Choose edit method
    edit_choice = questionary.select(
        "How would you like to edit this agent?",
        choices=[
            "Use Abstract Editor (Field by Field)",
            "Edit YAML Directly",
            "Cancel",
        ],
    ).ask()

    if edit_choice == "Use Abstract Editor (Field by Field)":
        # Not fully implemented yet
        console.print(
            "[yellow]Abstract editor not fully implemented. Opening YAML editor instead.[/]"
        )
        from flock.cli.yaml_editor import yaml_editor

        updated_agent = yaml_editor(agent)
        if updated_agent and isinstance(updated_agent, FlockAgent):
            flock._agents[agent_name] = updated_agent

    elif edit_choice == "Edit YAML Directly":
        from flock.cli.yaml_editor import _edit_yaml_directly

        updated_agent = _edit_yaml_directly(agent)
        if updated_agent and isinstance(updated_agent, FlockAgent):
            flock._agents[agent_name] = updated_agent
            console.print(f"\n[green]✓[/] Agent '{agent_name}' updated!")


def _remove_agent(flock: Flock):
    """Remove an agent from the Flock.

    Args:
        flock: The Flock instance containing the agent to remove
    """
    agent_names = list(flock._agents.keys())

    if not agent_names:
        console.print("[yellow]No agents in this Flock to remove.[/]")
        return

    # Select agent to remove
    agent_name = questionary.select(
        "Select an agent to remove:",
        choices=agent_names,
    ).ask()

    if not agent_name:
        return

    # Confirm deletion
    confirm = questionary.confirm(
        f"Are you sure you want to remove agent '{agent_name}'?",
        default=False,
    ).ask()

    if confirm:
        del flock._agents[agent_name]
        console.print(f"\n[green]✓[/] Agent '{agent_name}' removed from Flock!")


def _export_agent(flock: Flock):
    """Export an agent to a YAML file.

    Args:
        flock: The Flock instance containing the agent to export
    """
    agent_names = list(flock._agents.keys())

    if not agent_names:
        console.print("[yellow]No agents in this Flock to export.[/]")
        return

    # Select agent to export
    agent_name = questionary.select(
        "Select an agent to export:",
        choices=agent_names,
    ).ask()

    if not agent_name:
        return

    agent = flock._agents[agent_name]

    # Get file path
    file_path = questionary.text(
        "Enter file path to save agent:",
        default=f"{agent_name}.agent.yaml",
    ).ask()

    # Ensure the file has the correct extension
    if not file_path.endswith((".yaml", ".yml")):
        file_path += ".yaml"

    try:
        # Save the agent to YAML
        agent.to_yaml_file(file_path)
        console.print(
            f"\n[green]✓[/] Agent '{agent_name}' exported to {file_path}"
        )
    except Exception as e:
        console.print(f"\n[bold red]Error exporting agent:[/] {e!s}")


def _import_agent(flock: Flock):
    """Import an agent from a YAML file.

    Args:
        flock: The Flock instance to import the agent into
    """
    console.print("[yellow]Import functionality not yet implemented.[/]")
    # TODO: Implement agent import from YAML file
```

### src\flock\cli\registry_management.py

- **Lines**: 618
- **Last modified**: 2025-04-03 03:11:34

```py
"""Registry Management Module for the Flock CLI."""

import importlib
import inspect
import os
from pathlib import Path
from typing import Any

import questionary
from rich.console import Console
from rich.panel import Panel
from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn
from rich.table import Table

from flock.core.flock_registry import (
    get_registry,
)
from flock.core.logging.logging import get_logger

logger = get_logger("registry_cli")
console = Console()

# Constants for registry item types
REGISTRY_CATEGORIES = ["Agent", "Callable", "Type", "Component"]
REGISTRY_ACTIONS = [
    "View Registry Contents",
    "Add Item to Registry",
    "Remove Item from Registry",
    "Auto-Registration Scanner",
    "Export Registry",
    "Back to Main Menu",
]


def manage_registry() -> None:
    """Main function for managing the Flock Registry from the CLI."""
    while True:
        console.clear()
        console.print(
            Panel("[bold blue]Flock Registry Management[/]"), justify="center"
        )
        console.line()

        # Show registry stats
        display_registry_stats()

        action = questionary.select(
            "What would you like to do?",
            choices=REGISTRY_ACTIONS,
        ).ask()

        if action == "View Registry Contents":
            view_registry_contents()
        elif action == "Add Item to Registry":
            add_item_to_registry()
        elif action == "Remove Item from Registry":
            remove_item_from_registry()
        elif action == "Auto-Registration Scanner":
            auto_registration_scanner()
        elif action == "Export Registry":
            export_registry()
        elif action == "Back to Main Menu":
            break

        input("\nPress Enter to continue...")


def display_registry_stats() -> None:
    """Display statistics about the current registry contents."""
    registry = get_registry()

    table = Table(title="Registry Statistics")
    table.add_column("Category", style="cyan")
    table.add_column("Count", style="green")

    table.add_row("Agents", str(len(registry._agents)))
    table.add_row("Callables", str(len(registry._callables)))
    table.add_row("Types", str(len(registry._types)))
    table.add_row("Components", str(len(registry._components)))

    console.print(table)


def view_registry_contents(
    category: str | None = None, search_pattern: str | None = None
) -> None:
    """Display registry contents with filtering options."""
    registry = get_registry()

    if category is None:
        category = questionary.select(
            "Select a category to view:",
            choices=REGISTRY_CATEGORIES + ["All Categories"],
        ).ask()

    if search_pattern is None:
        search_pattern = questionary.text(
            "Enter search pattern (leave empty to show all):"
        ).ask()

    console.clear()

    if category == "All Categories" or category == "Agent":
        display_registry_section("Agents", registry._agents, search_pattern)

    if category == "All Categories" or category == "Callable":
        display_registry_section(
            "Callables", registry._callables, search_pattern
        )

    if category == "All Categories" or category == "Type":
        display_registry_section("Types", registry._types, search_pattern)

    if category == "All Categories" or category == "Component":
        display_registry_section(
            "Components", registry._components, search_pattern
        )


def display_registry_section(
    title: str, items: dict[str, Any], search_pattern: str
) -> None:
    """Display a section of registry items in a table."""
    filtered_items = {
        k: v
        for k, v in items.items()
        if not search_pattern or search_pattern.lower() in k.lower()
    }

    if not filtered_items:
        console.print(
            f"[yellow]No {title.lower()} found matching the search pattern.[/]"
        )
        return

    table = Table(title=f"Registered {title}")
    table.add_column("Name/Path", style="cyan")
    table.add_column("Type", style="green")

    for name, item in filtered_items.items():
        item_type = type(item).__name__
        table.add_row(name, item_type)

    console.print(table)
    console.print(f"Total: {len(filtered_items)} {title.lower()}")


def add_item_to_registry() -> None:
    """Add an item to the registry manually."""
    registry = get_registry()

    item_type = questionary.select(
        "What type of item do you want to add?",
        choices=["agent", "callable", "type", "component"],
    ).ask()

    module_path = questionary.text(
        "Enter the module path (e.g., 'your_module.submodule'):"
    ).ask()

    item_name = questionary.text("Enter the item name within the module:").ask()

    alias = questionary.text(
        "Enter an alias (optional, press Enter to skip):"
    ).ask()

    if not alias:
        alias = None

    try:
        # Attempt to import the module
        module = importlib.import_module(module_path)

        # Get the item from the module
        if not hasattr(module, item_name):
            console.print(
                f"[red]Error: {item_name} not found in {module_path}[/]"
            )
            return False

        item = getattr(module, item_name)

        # Register the item based on its type
        if item_type == "agent":
            registry.register_agent(item)
            console.print(
                f"[green]Successfully registered agent: {item_name}[/]"
            )
        elif item_type == "callable":
            result = registry.register_callable(item, alias)
            console.print(
                f"[green]Successfully registered callable: {result}[/]"
            )
        elif item_type == "type":
            result = registry.register_type(item, alias)
            console.print(f"[green]Successfully registered type: {result}[/]")
        elif item_type == "component":
            result = registry.register_component(item, alias)
            console.print(
                f"[green]Successfully registered component: {result}[/]"
            )

        return True

    except ImportError:
        console.print(f"[red]Error: Could not import module {module_path}[/]")
    except Exception as e:
        console.print(f"[red]Error: {e!s}[/]")

    return False


def remove_item_from_registry() -> None:
    """Remove an item from the registry."""
    registry = get_registry()

    item_type = questionary.select(
        "What type of item do you want to remove?",
        choices=["agent", "callable", "type", "component"],
    ).ask()

    # Get the appropriate dictionary based on item type
    if item_type == "agent":
        items = registry._agents
    elif item_type == "callable":
        items = registry._callables
    elif item_type == "type":
        items = registry._types
    elif item_type == "component":
        items = registry._components

    if not items:
        console.print(f"[yellow]No {item_type}s registered.[/]")
        return False

    # Create a list of items for selection
    item_names = list(items.keys())
    item_name = questionary.select(
        f"Select the {item_type} to remove:",
        choices=item_names + ["Cancel"],
    ).ask()

    if item_name == "Cancel":
        return False

    # Ask for confirmation
    confirm = questionary.confirm(
        f"Are you sure you want to remove {item_name}?",
        default=False,
    ).ask()

    if not confirm:
        console.print("[yellow]Operation cancelled.[/]")
        return False

    # Remove the item
    try:
        if item_type == "agent":
            del registry._agents[item_name]
        elif item_type == "callable":
            del registry._callables[item_name]
        elif item_type == "type":
            del registry._types[item_name]
        elif item_type == "component":
            del registry._components[item_name]

        console.print(
            f"[green]Successfully removed {item_type}: {item_name}[/]"
        )
        return True

    except Exception as e:
        console.print(f"[red]Error: {e!s}[/]")
        return False


def auto_registration_scanner() -> None:
    """Scan directory for potential registry items and optionally register them."""
    # Ask for the target path
    target_path = questionary.text(
        "Enter the path to scan (file or directory):",
        default=os.getcwd(),
    ).ask()

    # Ask if we should recursively scan directories
    recursive = True
    if os.path.isdir(target_path):
        recursive = questionary.confirm(
            "Scan recursively through subdirectories?",
            default=True,
        ).ask()

    # Ask if we should auto-register or just preview
    auto_register = questionary.confirm(
        "Auto-register discovered items? (No for preview only)",
        default=False,
    ).ask()

    # Perform the scan
    scan_results = scan_for_registry_items(
        target_path, recursive, auto_register
    )

    # Display results
    console.print(Panel("[bold green]Scan Results[/]"), justify="center")

    for category, items in scan_results.items():
        if items:
            console.print(f"\n[cyan]{category}:[/] {len(items)} items")
            for item in items:
                console.print(f"  - {item}")

    if auto_register:
        console.print("\n[green]Items have been registered to the registry.[/]")
    else:
        # Ask if we want to register the detected items
        register_now = questionary.confirm(
            "Register these items now?",
            default=False,
        ).ask()

        if register_now:
            # Re-scan with auto-register=True
            scan_for_registry_items(target_path, recursive, True)
            console.print(
                "\n[green]Items have been registered to the registry.[/]"
            )


def scan_for_registry_items(
    target_path: str, recursive: bool = True, auto_register: bool = False
) -> dict[str, list[str]]:
    """Scan directory for potential registry items and optionally register them."""
    results = {
        "Agents": [],
        "Callables": [],
        "Types": [],
        "Components": [],
        "Potential Items": [],
    }

    registry = get_registry()
    path = Path(target_path)

    with Progress(
        SpinnerColumn(),
        TextColumn("[progress.description]{task.description}"),
        BarColumn(),
        TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
    ) as progress:
        scan_task = progress.add_task(f"Scanning {target_path}...", total=100)

        # If path is a file, scan it directly
        if path.is_file() and path.suffix == ".py":
            module_path = get_module_path_from_file(path)
            if module_path:
                scan_python_file(path, module_path, results, auto_register)
            progress.update(scan_task, completed=100)

        # If path is a directory, scan all Python files
        elif path.is_dir():
            python_files = []
            if recursive:
                for root, _, files in os.walk(path):
                    python_files.extend(
                        [
                            Path(os.path.join(root, f))
                            for f in files
                            if f.endswith(".py")
                        ]
                    )
            else:
                python_files = [p for p in path.glob("*.py")]

            total_files = len(python_files)
            for i, file_path in enumerate(python_files):
                module_path = get_module_path_from_file(file_path)
                if module_path:
                    scan_python_file(
                        file_path, module_path, results, auto_register
                    )
                progress.update(
                    scan_task, completed=(i + 1) / total_files * 100
                )

    return results


def get_module_path_from_file(file_path: Path) -> str | None:
    """Convert a file path to a module path for import."""
    try:
        # Get absolute path
        abs_path = file_path.resolve()

        # Check if it's a Python file
        if abs_path.suffix != ".py":
            return None

        # Get the directory containing the file
        file_dir = abs_path.parent

        # Find the nearest parent directory with __init__.py
        # to determine the package root
        package_root = None
        current_dir = file_dir
        while current_dir != current_dir.parent:
            if (current_dir / "__init__.py").exists():
                if package_root is None:
                    package_root = current_dir
            else:
                # We've reached a directory without __init__.py
                # If we found a package root earlier, use that
                if package_root is not None:
                    break
            current_dir = current_dir.parent

        # If no package root was found, this file can't be imported as a module
        if package_root is None:
            return None

        # Calculate the module path
        rel_path = abs_path.relative_to(package_root.parent)
        module_path = str(rel_path.with_suffix("")).replace(os.sep, ".")

        return module_path

    except Exception as e:
        logger.error(f"Error determining module path: {e}")
        return None


def scan_python_file(
    file_path: Path,
    module_path: str,
    results: dict[str, list[str]],
    auto_register: bool,
) -> None:
    """Scan a Python file for registry-eligible items."""
    try:
        # Try to import the module
        module = importlib.import_module(module_path)

        # Scan for classes and functions
        for name, obj in inspect.getmembers(module):
            if name.startswith("_"):
                continue

            # Check for registry decorator presence
            is_registry_item = False

            # Check for classes
            if inspect.isclass(obj):
                # Check if it has a FlockAgent as a base class
                if is_flock_agent(obj):
                    if auto_register:
                        get_registry().register_agent(obj)
                    results["Agents"].append(f"{module_path}.{name}")
                    is_registry_item = True

                # Check for components
                elif has_component_base(obj):
                    if auto_register:
                        get_registry().register_component(obj)
                    results["Components"].append(f"{module_path}.{name}")
                    is_registry_item = True

                # Check for Pydantic models or dataclasses
                elif is_potential_type(obj):
                    if auto_register:
                        get_registry().register_type(obj)
                    results["Types"].append(f"{module_path}.{name}")
                    is_registry_item = True

                # If not already identified but seems like a potential candidate
                elif not is_registry_item and is_potential_registry_candidate(
                    obj
                ):
                    results["Potential Items"].append(
                        f"{module_path}.{name} (class)"
                    )

            # Check for functions (potential callables/tools)
            elif inspect.isfunction(obj) and obj.__module__ == module.__name__:
                if auto_register:
                    get_registry().register_callable(obj)
                results["Callables"].append(f"{module_path}.{name}")
                is_registry_item = True

    except (ImportError, AttributeError) as e:
        logger.warning(f"Could not import {module_path}: {e}")
    except Exception as e:
        logger.error(f"Error scanning {file_path}: {e}")


def is_flock_agent(cls: type) -> bool:
    """Check if a class is a FlockAgent or a subclass of FlockAgent."""
    try:
        from flock.core.flock_agent import FlockAgent

        return issubclass(cls, FlockAgent)
    except (ImportError, TypeError):
        # If FlockAgent can't be imported or cls is not a class
        return False


def has_component_base(cls: type) -> bool:
    """Check if a class has a base class that looks like a Flock component."""
    try:
        # Common Flock component base classes
        component_bases = ["FlockModule", "FlockEvaluator", "FlockRouter"]
        bases = [base.__name__ for base in cls.__mro__]
        return any(base in bases for base in component_bases)
    except (AttributeError, TypeError):
        return False


def is_potential_type(cls: type) -> bool:
    """Check if a class is a Pydantic model or dataclass."""
    try:
        from dataclasses import is_dataclass

        from pydantic import BaseModel

        return issubclass(cls, BaseModel) or is_dataclass(cls)
    except (ImportError, TypeError):
        return False


def is_potential_registry_candidate(obj: Any) -> bool:
    """Check if an object seems like it could be registry-eligible."""
    # This is a heuristic function to identify potential registry candidates
    if inspect.isclass(obj):
        # Classes with "Flock" in their name
        if "Flock" in obj.__name__:
            return True

        # Classes with docstrings mentioning certain keywords
        if obj.__doc__ and any(
            kw in obj.__doc__.lower()
            for kw in [
                "agent",
                "flock",
                "tool",
                "module",
                "evaluator",
                "router",
            ]
        ):
            return True

    elif inspect.isfunction(obj):
        # Functions with docstrings mentioning certain keywords
        if obj.__doc__ and any(
            kw in obj.__doc__.lower() for kw in ["tool", "agent", "flock"]
        ):
            return True

    return False


def export_registry() -> None:
    """Export the current registry state to a file."""
    registry = get_registry()

    # Choose export format
    export_format = questionary.select(
        "Select export format:",
        choices=["YAML", "JSON", "Text Report"],
    ).ask()

    # Choose export path
    export_path = questionary.text(
        "Enter export file path:",
        default=f"flock_registry_export.{export_format.lower()}",
    ).ask()

    try:
        export_data = {
            "agents": list(registry._agents.keys()),
            "callables": list(registry._callables.keys()),
            "types": list(registry._types.keys()),
            "components": list(registry._components.keys()),
        }

        if export_format == "YAML":
            import yaml

            with open(export_path, "w") as f:
                yaml.dump(export_data, f, sort_keys=False, indent=2)

        elif export_format == "JSON":
            import json

            with open(export_path, "w") as f:
                json.dump(export_data, f, indent=2)

        elif export_format == "Text Report":
            with open(export_path, "w") as f:
                f.write("FLOCK REGISTRY EXPORT\n")
                f.write("====================\n\n")

                for category, items in export_data.items():
                    f.write(f"{category.upper()} ({len(items)})\n")
                    f.write(
                        "-" * (len(category) + 2 + len(str(len(items)))) + "\n"
                    )
                    for item in sorted(items):
                        f.write(f"  - {item}\n")
                    f.write("\n")

        console.print(f"[green]Registry exported to {export_path}[/]")

    except Exception as e:
        console.print(f"[red]Error exporting registry: {e!s}[/]")


if __name__ == "__main__":
    manage_registry()
```

### src\flock\cli\view_results.py

- **Lines**: 29
- **Last modified**: 2025-04-02 23:12:31

```py
"""View execution results and history.

This module provides functionality to view the results of previous Flock executions.
"""

from rich.console import Console
from rich.panel import Panel

from flock.core.flock import Flock
from flock.core.util.cli_helper import init_console

# Create console instance
console = Console()


def view_results(flock: Flock):
    """View execution results for a Flock instance.

    Args:
        flock: The Flock instance to view results for
    """
    init_console()
    console.print(Panel("[bold green]View Results[/]"), justify="center")
    console.print(
        "[yellow]Results history functionality not yet implemented.[/]"
    )
    console.print(
        "This feature will allow viewing and filtering past execution results."
    )
```

### src\flock\cli\yaml_editor.py

- **Lines**: 283
- **Last modified**: 2025-04-03 23:51:18

```py
"""YAML Editor for Flock CLI.

This module provides functionality to view, edit, and validate YAML configurations
for Flock and FlockAgent instances.
"""

import os
import subprocess
import tempfile
from pathlib import Path

import questionary
import yaml
from rich.console import Console
from rich.panel import Panel
from rich.syntax import Syntax
from rich.table import Table

from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent
from flock.core.util.cli_helper import init_console

# Create console instance
console = Console()


def yaml_editor(flock_or_agent: Flock | FlockAgent | None = None):
    """YAML Editor main entry point.

    Args:
        flock_or_agent: Optional Flock or FlockAgent instance to edit
    """
    init_console()
    console.print(Panel("[bold green]YAML Editor[/]"), justify="center")

    if flock_or_agent is None:
        # If no object provided, provide options to load from file
        _yaml_file_browser()
        return

    while True:
        init_console()
        console.print(Panel("[bold green]YAML Editor[/]"), justify="center")

        # Determine object type
        if isinstance(flock_or_agent, Flock):
            obj_type = "Flock"
            console.print(
                f"Editing [bold cyan]Flock[/] with {len(flock_or_agent._agents)} agents"
            )
        elif isinstance(flock_or_agent, FlockAgent):
            obj_type = "FlockAgent"
            console.print(
                f"Editing [bold cyan]FlockAgent[/]: {flock_or_agent.name}"
            )
        else:
            console.print("[bold red]Error: Unknown object type[/]")
            input("\nPress Enter to continue...")
            return

        console.line()

        choice = questionary.select(
            "What would you like to do?",
            choices=[
                questionary.Separator(line=" "),
                "View Current YAML",
                "Edit YAML Directly",
                "Abstract Editor (Visual)",
                "Validate YAML",
                "Save to File",
                questionary.Separator(),
                "Back to Main Menu",
            ],
        ).ask()

        if choice == "View Current YAML":
            _view_yaml(flock_or_agent)
        elif choice == "Edit YAML Directly":
            flock_or_agent = _edit_yaml_directly(flock_or_agent)
        elif choice == "Abstract Editor (Visual)":
            flock_or_agent = _abstract_editor(flock_or_agent)
        elif choice == "Validate YAML":
            _validate_yaml(flock_or_agent)
        elif choice == "Save to File":
            _save_to_file(flock_or_agent)
        elif choice == "Back to Main Menu":
            break

        if choice != "Back to Main Menu":
            input("\nPress Enter to continue...")


def _yaml_file_browser():
    """Browser for YAML files to load."""
    console.print("\n[bold]YAML File Browser[/]")
    console.line()

    current_dir = os.getcwd()
    console.print(f"Current directory: [cyan]{current_dir}[/]")

    # List .yaml/.yml files in current directory
    yaml_files = list(Path(current_dir).glob("*.yaml")) + list(
        Path(current_dir).glob("*.yml")
    )

    if not yaml_files:
        console.print("[yellow]No YAML files found in current directory.[/]")
        input("\nPress Enter to continue...")
        return

    # Display files
    table = Table(title="YAML Files")
    table.add_column("Filename", style="cyan")
    table.add_column("Size", style="green")
    table.add_column("Last Modified", style="yellow")

    for file in yaml_files:
        table.add_row(
            file.name, f"{file.stat().st_size} bytes", f"{file.stat().st_mtime}"
        )

    console.print(table)

    # TODO: Add file selection and loading


def _view_yaml(obj: Flock | FlockAgent):
    """View the YAML representation of an object.

    Args:
        obj: The object to view as YAML
    """
    yaml_str = obj.to_yaml()

    # Display with syntax highlighting
    syntax = Syntax(
        yaml_str,
        "yaml",
        theme="monokai",
        line_numbers=True,
        code_width=100,
        word_wrap=True,
    )

    init_console()
    console.print(Panel("[bold green]YAML View[/]"), justify="center")
    console.print(syntax)


def _edit_yaml_directly(obj: Flock | FlockAgent) -> Flock | FlockAgent:
    """Edit the YAML representation directly using an external editor.

    Args:
        obj: The object to edit

    Returns:
        The updated object
    """
    # Convert to YAML
    yaml_str = obj.to_yaml()

    # Create a temporary file
    with tempfile.NamedTemporaryFile(
        suffix=".yaml", mode="w+", delete=False
    ) as tmp:
        tmp.write(yaml_str)
        tmp_path = tmp.name

    try:
        # Determine which editor to use
        editor = os.environ.get(
            "EDITOR", "notepad" if os.name == "nt" else "nano"
        )

        # Open the editor
        console.print(
            f"\nOpening {editor} to edit YAML. Save and exit when done."
        )
        subprocess.call([editor, tmp_path])

        # Read updated YAML
        with open(tmp_path) as f:
            updated_yaml = f.read()

        # Parse back to object
        try:
            if isinstance(obj, Flock):
                updated_obj = Flock.from_yaml(updated_yaml)
                console.print("\n[green]✓[/] YAML parsed successfully!")
                return updated_obj
            elif isinstance(obj, FlockAgent):
                updated_obj = FlockAgent.from_yaml(updated_yaml)
                console.print("\n[green]✓[/] YAML parsed successfully!")
                return updated_obj
        except Exception as e:
            console.print(f"\n[bold red]Error parsing YAML:[/] {e!s}")
            console.print("\nKeeping original object.")
            return obj

    finally:
        # Clean up the temporary file
        try:
            os.unlink(tmp_path)
        except:
            pass


def _abstract_editor(obj: Flock | FlockAgent) -> Flock | FlockAgent:
    """Edit object using an abstract form-based editor.

    Args:
        obj: The object to edit

    Returns:
        The updated object
    """
    console.print("\n[yellow]Abstract visual editor not yet implemented.[/]")
    console.print("Will provide a form-based editor for each field.")

    # For now, just return the original object
    return obj


def _validate_yaml(obj: Flock | FlockAgent):
    """Validate the YAML representation of an object.

    Args:
        obj: The object to validate
    """
    try:
        yaml_str = obj.to_yaml()

        # Attempt to parse with PyYAML
        yaml.safe_load(yaml_str)

        # Attempt to deserialize back to object
        if isinstance(obj, Flock):
            Flock.from_yaml(yaml_str)
        elif isinstance(obj, FlockAgent):
            FlockAgent.from_yaml(yaml_str)

        console.print("\n[green]✓[/] YAML validation successful!")
    except Exception as e:
        console.print(f"\n[bold red]YAML validation failed:[/] {e!s}")


def _save_to_file(obj: Flock | FlockAgent):
    """Save object to a YAML file.

    Args:
        obj: The object to save
    """
    # Determine default filename based on object type
    if isinstance(obj, Flock):
        default_name = "my_flock.flock.yaml"
    elif isinstance(obj, FlockAgent):
        default_name = f"{obj.name}.agent.yaml"
    else:
        default_name = "unknown.yaml"

    # Get file path
    file_path = questionary.text(
        "Enter file path to save YAML:",
        default=default_name,
    ).ask()

    # Ensure the file has the correct extension
    if not file_path.endswith((".yaml", ".yml")):
        file_path += ".yaml"

    # Create directory if it doesn't exist
    save_path = Path(file_path)
    save_path.parent.mkdir(parents=True, exist_ok=True)

    try:
        # Save to file
        with open(file_path, "w") as f:
            f.write(obj.to_yaml())

        console.print(f"\n[green]✓[/] Saved to {file_path}")
    except Exception as e:
        console.print(f"\n[bold red]Error saving file:[/] {e!s}")
```

### src\flock\config.py

- **Lines**: 46
- **Last modified**: 2025-02-18 03:20:40

```py
# flock/config.py
from decouple import config

from flock.core.logging.telemetry import TelemetryConfig

# -- Connection and External Service Configurations --
TEMPORAL_SERVER_URL = config("TEMPORAL_SERVER_URL", "localhost:7233")
DEFAULT_MODEL = config("DEFAULT_MODEL", "openai/gpt-4o")


# API Keys and related settings
TAVILY_API_KEY = config("TAVILY_API_KEY", "")
GITHUB_PAT = config("GITHUB_PAT", "")
GITHUB_REPO = config("GITHUB_REPO", "")
GITHUB_USERNAME = config("GITHUB_USERNAME", "")

# -- Debugging and Logging Configurations --
LOCAL_DEBUG = config("LOCAL_DEBUG", True)
LOG_LEVEL = config("LOG_LEVEL", "DEBUG")
LOGGING_DIR = config("LOGGING_DIR", "logs")

OTEL_SERVICE_NAME = config("OTL_SERVICE_NAME", "otel-flock")
JAEGER_ENDPOINT = config(
    "JAEGER_ENDPOINT", "http://localhost:14268/api/traces"
)  # Default gRPC endpoint for Jaeger
JAEGER_TRANSPORT = config(
    "JAEGER_TRANSPORT", "http"
).lower()  # Options: "grpc" or "http"
OTEL_SQL_DATABASE_NAME = config("OTEL_SQL_DATABASE", "flock_events.db")
OTEL_FILE_NAME = config("OTEL_FILE_NAME", "flock_events.jsonl")
OTEL_ENABLE_SQL: bool = config("OTEL_ENABLE_SQL", True) == "True"
OTEL_ENABLE_FILE: bool = config("OTEL_ENABLE_FILE", True) == "True"
OTEL_ENABLE_JAEGER: bool = config("OTEL_ENABLE_JAEGER", False) == "True"


TELEMETRY = TelemetryConfig(
    OTEL_SERVICE_NAME,
    JAEGER_ENDPOINT,
    JAEGER_TRANSPORT,
    LOGGING_DIR,
    OTEL_FILE_NAME,
    OTEL_SQL_DATABASE_NAME,
    OTEL_ENABLE_JAEGER,
    OTEL_ENABLE_FILE,
    OTEL_ENABLE_SQL,
)
```

### src\flock\core\api\ui\routes.py

- **Lines**: 271
- **Last modified**: 2025-04-02 17:29:19

```py
# src/flock/core/api/ui/routes.py
"""FastHTML UI routes for the Flock API."""

from typing import TYPE_CHECKING

# --- Conditional FastHTML Imports ---
try:
    import httpx
    from fasthtml.common import *

    # Import Form explicitly with an alias to avoid collisions
    from fasthtml.common import Form as FHForm

    FASTHTML_AVAILABLE = True
except ImportError:
    FASTHTML_AVAILABLE = False

    # Define necessary dummies if not available
    class Request:
        pass

    class Titled:
        pass

    class Div:
        pass

    class H1:
        pass

    class P:
        pass

    class H2:
        pass

    class Pre:
        pass

    class Code:
        pass

    class Label:
        pass

    class Select:
        pass

    class Option:
        pass

    class FHForm:
        pass  # Dummy alias if not available

    class Button:
        pass

    class Span:
        pass

    class Script:
        pass

    class Style:
        pass

    class Hidden:
        pass

    class Textarea:
        pass

    class Input:
        pass

    def fast_app():
        return None, None

    def picolink():
        return None
# ------------------------------------

# Use TYPE_CHECKING to avoid circular import errors for type hints
if TYPE_CHECKING:
    from flock.core.api.main import FlockAPI

# Import logger and utils needed by UI routes
from flock.core.logging.logging import get_logger

logger = get_logger("api.ui")


def create_ui_app(
    flock_api_instance: "FlockAPI",
    api_host: str,
    api_port: int,
    server_name: str,
) -> Any:
    """Creates and configures the FastHTML application and its routes."""
    if not FASTHTML_AVAILABLE:
        raise ImportError("FastHTML is not installed. Cannot create UI.")
    logger.debug("Creating FastHTML application instance for UI")

    # Use the passed FlockAPI instance to access necessary data/methods
    flock_instance = flock_api_instance.flock
    parse_input_spec_func = (
        flock_api_instance._parse_input_spec
    )  # Get reference to parser

    fh_app, fh_rt = fast_app(
        hdrs=(
            Script(src="https://unpkg.com/htmx.org@1.9.10/dist/htmx.min.js"),
            picolink,  # Pass directly
            Style("""
            body { padding: 20px; max-width: 800px; margin: auto; font-family: sans-serif; }
            label { display: block; margin-top: 1rem; font-weight: bold;}
            input, select, textarea { width: 100%; margin-top: 0.25rem; padding: 0.5rem; border: 1px solid #ccc; border-radius: 4px; box-sizing: border-box; }
            input[type=checkbox] { width: auto; margin-right: 0.5rem; vertical-align: middle; }
            label[for^=input_] { font-weight: normal; display: inline; margin-top: 0;} /* Style for checkbox labels */
            button[type=submit] { margin-top: 1.5rem; padding: 0.75rem 1.5rem; background-color: #007bff; color: white; border: none; border-radius: 4px; cursor: pointer; font-size: 1rem;}
            button[type=submit]:hover { background-color: #0056b3; }
            #result-area { margin-top: 2rem; background-color: #f8f9fa; padding: 15px; border: 1px solid #dee2e6; border-radius: 5px; white-space: pre-wrap; word-wrap: break-word; font-family: monospace; }
            .htmx-indicator { display: none; margin-left: 10px; font-style: italic; color: #6c757d; }
            .htmx-request .htmx-indicator { display: inline; }
            .htmx-request.htmx-indicator { display: inline; }
            .error-message { color: #721c24; margin-top: 10px; font-weight: bold; background-color: #f8d7da; border: 1px solid #f5c6cb; padding: 10px; border-radius: 5px;}
        """),
        )
    )

    @fh_rt("/get-agent-inputs")
    def get_agent_inputs(request: Request):
        """Endpoint called by HTMX to get agent input fields."""
        agent_name = request.query_params.get("agent_name")
        logger.debug(f"UI requesting inputs for agent: {agent_name}")
        if not agent_name:
            return Div("Please select an agent.", cls="error-message")

        # Access agents via the passed FlockAPI instance
        agent_def = flock_instance.agents.get(agent_name)
        if not agent_def:
            logger.warning(f"Agent '{agent_name}' not found for UI.")
            return Div(f"Agent '{agent_name}' not found.", cls="error-message")

        # Use the parsing function from the FlockAPI instance
        input_fields = parse_input_spec_func(agent_def.input or "")
        logger.debug(f"Parsed input fields for {agent_name}: {input_fields}")

        inputs_html = []
        for field in input_fields:
            field_id = f"input_{field['name']}"
            label_text = f"{field['name']}"
            if field["type"] != "bool":
                label_text += f" ({field['type']})"
            label = Label(label_text, fr=field_id)
            input_attrs = dict(
                id=field_id,
                name=f"inputs.{field['name']}",
                type=field["html_type"],
            )
            if field.get("step"):
                input_attrs["step"] = field["step"]
            if field.get("desc"):
                input_attrs["placeholder"] = field["desc"]
            if field.get("rows"):
                input_attrs["rows"] = field["rows"]

            if field["html_type"] == "textarea":
                input_el = Textarea(**input_attrs)
            elif field["html_type"] == "checkbox":
                input_el = Div(
                    Input(**input_attrs, value="true"),
                    Label(f" Enable?", fr=field_id),
                )
            else:
                input_el = Input(**input_attrs)

            inputs_html.append(
                Div(label, input_el, style="margin-bottom: 1rem;")
            )

        inputs_html.append(
            Hidden(
                id="selected_agent_name", name="agent_name", value=agent_name
            )
        )
        return (
            Div(*inputs_html)
            if inputs_html
            else P("This agent requires no input.")
        )

    @fh_rt("/")
    async def ui_root(request: Request):
        """Serves the main UI page."""
        logger.info("Serving main UI page /ui/")
        agents_list = []
        error_msg = None
        api_url = f"http://{api_host}:{api_port}/agents"
        try:
            async with httpx.AsyncClient() as client:
                logger.debug(f"UI fetching agents from {api_url}")
                response = await client.get(api_url)
                response.raise_for_status()
                agent_data = response.json()
                agents_list = agent_data.get("agents", [])
                logger.debug(f"Fetched {len(agents_list)} agents for UI")
        except Exception as e:
            error_msg = f"UI Error: Could not fetch agent list from API at {api_url}. Details: {e}"
            logger.error(error_msg, exc_info=True)

        options = [
            Option("-- Select Agent --", value="", selected=True, disabled=True)
        ] + [
            Option(
                f"{agent['name']}: {agent['description']}", value=agent["name"]
            )
            for agent in agents_list
        ]

        # Use FHForm alias here
        content = Div(
            H2(f"Agent Runner"),
            P(
                "Select an agent, provide the required inputs, and click 'Run Flock'."
            ),
            Label("Select Starting Agent:", fr="agent_select"),
            Select(
                *options,
                id="agent_select",
                name="agent_name",
                hx_get="/ui/get-agent-inputs",
                hx_trigger="change",
                hx_target="#agent-inputs-container",
                hx_indicator="#loading-indicator",
            ),
            FHForm(
                Div(id="agent-inputs-container", style="margin-top: 1rem;"),
                Button("Run Flock", type="submit"),
                Span(
                    " Processing...",
                    id="loading-indicator",
                    cls="htmx-indicator",
                ),
                hx_post="/ui/run-agent-form",  # Target the dedicated form endpoint
                hx_target="#result-area",
                hx_swap="innerHTML",
                hx_indicator="#loading-indicator",
            ),
            H2("Result"),
            Div(
                Pre(
                    Code(
                        "Result will appear here...",
                        id="result-content",
                        class_="language-json",
                    )
                ),
                id="result-area",
                style="min-height: 100px;",
            ),
        )

        if error_msg:
            content = Div(
                H1("Flock UI - Error"), P(error_msg, cls="error-message")
            )

        return Titled(f"{server_name}", content)

    return fh_app
```

### src\flock\core\api\ui\utils.py

- **Lines**: 119
- **Last modified**: 2025-04-02 17:29:19

```py
# src/flock/core/api/ui/utils.py
"""Utility functions for the Flock FastHTML UI."""

import html
from typing import Any

from flock.core.logging.logging import get_logger
from flock.core.util.input_resolver import (
    split_top_level,  # Assuming this is the correct location
)

logger = get_logger("api.ui.utils")


def parse_input_spec(input_spec: str) -> list[dict[str, str]]:
    """Parses an agent input string into a list of field definitions."""
    fields = []
    if not input_spec:
        return fields
    try:
        parts = split_top_level(input_spec)
    except NameError:
        logger.error("split_top_level utility function not found!")
        return fields  # Or raise?

    for part in parts:
        part = part.strip()
        if not part:
            continue
        field_info = {
            "name": "",
            "type": "str",
            "desc": "",
            "html_type": "text",
        }
        name_type_part, *desc_part = part.split("|", 1)
        if desc_part:
            field_info["desc"] = desc_part[0].strip()
        name_part, *type_part = name_type_part.split(":", 1)
        field_info["name"] = name_part.strip()
        if type_part:
            field_info["type"] = type_part[0].strip().lower()

        step = None
        field_type_norm = field_info["type"]
        if field_type_norm.startswith("int"):
            field_info["html_type"] = "number"
        elif field_type_norm.startswith("float"):
            field_info["html_type"] = "number"
            step = "any"
        elif field_type_norm.startswith("bool"):
            field_info["html_type"] = "checkbox"
        elif "list" in field_type_norm or "dict" in field_type_norm:
            field_info["html_type"] = "textarea"
            field_info["rows"] = 3

        if step:
            field_info["step"] = step
        if field_info["name"]:
            fields.append(field_info)
        else:
            logger.warning(
                f"Could not parse field name from input spec part: '{part}'"
            )
    return fields


def format_result_to_html(
    data: Any, level: int = 0, max_level: int = 5, max_str_len: int = 999999
) -> str:
    """Recursively formats a Python object (dict, list, Box, etc.) into an HTML string."""
    if hasattr(data, "to_dict") and callable(data.to_dict):
        data = data.to_dict()
    if level > max_level:
        return html.escape(f"[Max recursion depth {max_level} reached]")

    if isinstance(data, dict):
        if not data:
            return "<i>(empty dictionary)</i>"
        table_html = '<table style="width: 100%; border-collapse: collapse; margin-bottom: 10px; border: 1px solid #dee2e6;">'
        table_html += '<thead style="background-color: #e9ecef;"><tr><th style="text-align: left; padding: 8px; border-bottom: 2px solid #dee2e6;">Key</th><th style="text-align: left; padding: 8px; border-bottom: 2px solid #dee2e6;">Value</th></tr></thead>'
        table_html += "<tbody>"
        for key, value in data.items():
            escaped_key = html.escape(str(key))
            formatted_value = format_result_to_html(
                value, level + 1, max_level, max_str_len
            )  # Recursive call
            table_html += f'<tr><td style="vertical-align: top; padding: 8px; border-top: 1px solid #dee2e6;"><strong>{escaped_key}</strong></td><td style="padding: 8px; border-top: 1px solid #dee2e6;">{formatted_value}</td></tr>'
        table_html += "</tbody></table>"
        return table_html
    elif isinstance(data, (list, tuple)):
        if not data:
            return "<i>(empty list)</i>"
        list_html = '<dl style="margin-left: 20px; padding-left: 0; margin-bottom: 10px;">'
        for i, item in enumerate(data):
            formatted_item = format_result_to_html(
                item, level + 1, max_level, max_str_len
            )  # Recursive call
            list_html += f'<dt style="font-weight: bold; margin-top: 5px;">Item {i + 1}:</dt><dd style="margin-left: 20px; margin-bottom: 5px;">{formatted_item}</dd>'
        list_html += "</dl>"
        return list_html
    else:
        str_value = str(data)
        escaped_value = html.escape(str_value)
        if len(str_value) > max_str_len:
            escaped_value = (
                html.escape(str_value[:max_str_len])
                + f"... <i style='color: #6c757d;'>({len(str_value) - max_str_len} more chars)</i>"
            )

        style = ""
        if isinstance(data, bool):
            style = "color: #d63384; font-weight: bold;"
        elif isinstance(data, (int, float)):
            style = "color: #0d6efd;"
        elif data is None:
            style = "color: #6c757d; font-style: italic;"
            escaped_value = "None"
        return f'<code style="{style}">{escaped_value}</code>'
```

### src\flock\core\context\context.py

- **Lines**: 185
- **Last modified**: 2025-03-27 07:01:34

```py
import uuid
from dataclasses import asdict
from datetime import datetime
from typing import Any, Literal

from opentelemetry import trace
from pydantic import BaseModel, Field

from flock.core.context.context_vars import FLOCK_LAST_AGENT, FLOCK_LAST_RESULT
from flock.core.logging.logging import get_logger
from flock.core.serialization.serializable import Serializable

logger = get_logger("context")
tracer = trace.get_tracer(__name__)


class AgentRunRecord(BaseModel):
    id: str = Field(default="")
    agent: str = Field(default="")
    data: dict[str, Any] = Field(default_factory=dict)
    timestamp: str = Field(default="")
    hand_off: dict | None = Field(default_factory=dict)
    called_from: str = Field(default="")


class AgentDefinition(BaseModel):
    agent_type: str = Field(default="")
    agent_name: str = Field(default="")
    agent_data: dict = Field(default_factory=dict)
    serializer: Literal["json", "cloudpickle", "msgpack"] = Field(
        default="cloudpickle"
    )


class FlockContext(Serializable, BaseModel):
    state: dict[str, Any] = Field(default_factory=dict)
    history: list[AgentRunRecord] = Field(default_factory=list)
    agent_definitions: dict[str, AgentDefinition] = Field(default_factory=dict)
    run_id: str = Field(default="")
    workflow_id: str = Field(default="")
    workflow_timestamp: str = Field(default="")

    def record(
        self,
        agent_name: str,
        data: dict[str, Any],
        timestamp: str,
        hand_off: str,
        called_from: str,
    ) -> None:
        record = AgentRunRecord(
            id=agent_name + "_" + uuid.uuid4().hex[:4],
            agent=agent_name,
            data=data.copy(),
            timestamp=timestamp,
            hand_off=hand_off,
            called_from=called_from,
        )
        self.history.append(record)
        for key, value in data.items():
            self.set_variable(f"{agent_name}.{key}", value)
        self.set_variable(FLOCK_LAST_RESULT, data)
        self.set_variable(FLOCK_LAST_AGENT, agent_name)
        logger.info(
            f"Agent run recorded - run_id '{record.id}'",
            agent=agent_name,
            timestamp=timestamp,
            data=data,
        )
        current_span = trace.get_current_span()
        if current_span.get_span_context().is_valid:
            current_span.add_event(
                "record",
                attributes={"agent": agent_name, "timestamp": timestamp},
            )

    def get_variable(self, key: str, default: Any = None) -> Any:
        return self.state.get(key, default)

    def set_variable(self, key: str, value: Any) -> None:
        old_value = self.state.get(key)
        self.state[key] = value
        if old_value != value:
            escaped_value = str(value).replace("{", "{{").replace("}", "}}")

            logger.info(
                "Context variable updated - {} -> {}",
                key,
                escaped_value,  # Arguments in order
            )

            current_span = trace.get_current_span()
            if current_span.get_span_context().is_valid:
                current_span.add_event(
                    "set_variable",
                    attributes={
                        "key": key,
                        "old": str(old_value),
                        "new": str(value),
                    },
                )

    def deepcopy(self) -> "FlockContext":
        return FlockContext.from_dict(self.to_dict())

    def get_agent_history(self, agent_name: str) -> list[AgentRunRecord]:
        return [record for record in self.history if record.agent == agent_name]

    def next_input_for(self, agent) -> Any:
        try:
            if hasattr(agent, "input") and isinstance(agent.input, str):
                keys = [k.strip() for k in agent.input.split(",") if k.strip()]
                if len(keys) == 1:
                    return self.get_variable(keys[0])
                else:
                    return {key: self.get_variable(key) for key in keys}
            else:
                return self.get_variable("init_input")
        except Exception as e:
            logger.error(
                "Error getting next input for agent",
                agent=agent.name,
                error=str(e),
            )
            raise

    def get_most_recent_value(self, variable_name: str) -> Any:
        for history_record in reversed(self.history):
            if variable_name in history_record.data:
                return history_record.data[variable_name]

    def get_agent_definition(self, agent_name: str) -> AgentDefinition | None:
        return self.agent_definitions.get(agent_name)

    def add_agent_definition(
        self, agent_type: type, agent_name: str, agent_data: Any
    ) -> None:
        definition = AgentDefinition(
            agent_type=agent_type.__name__,
            agent_name=agent_name,
            agent_data=agent_data,
        )
        self.agent_definitions[agent_name] = definition

    # Use the reactive setter for dict-like access.
    def __getitem__(self, key: str) -> Any:
        return self.get_variable(key)

    def __setitem__(self, key: str, value: Any) -> None:
        self.set_variable(key, value)

    def to_dict(self) -> dict[str, Any]:
        def convert(obj):
            if isinstance(obj, datetime):
                return obj.isoformat()
            if hasattr(obj, "__dataclass_fields__"):
                return asdict(
                    obj, dict_factory=lambda x: {k: convert(v) for k, v in x}
                )
            return obj

        return convert(asdict(self))

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> "FlockContext":
        def convert(obj):
            if isinstance(obj, dict):
                if "timestamp" in obj:
                    return AgentRunRecord(
                        **{
                            **obj,
                            "timestamp": datetime.fromisoformat(
                                obj["timestamp"]
                            ),
                        }
                    )
                if "agent_type" in obj:
                    return AgentDefinition(**obj)
                return {k: convert(v) for k, v in obj.items()}
            if isinstance(obj, list):
                return [convert(v) for v in obj]
            return obj

        converted = convert(data)
        return cls(**converted)
```

### src\flock\core\context\context_manager.py

- **Lines**: 37
- **Last modified**: 2025-02-26 07:10:53

```py
"""Module for managing the FlockContext."""

from flock.core.context.context import FlockContext
from flock.core.context.context_vars import (
    FLOCK_CURRENT_AGENT,
    FLOCK_INITIAL_INPUT,
    FLOCK_LOCAL_DEBUG,
    FLOCK_MODEL,
    FLOCK_RUN_ID,
)


def initialize_context(
    context: FlockContext,
    agent_name: str,
    input_data: dict,
    run_id: str,
    local_debug: bool,
    model: str,
) -> None:
    """Initialize the FlockContext with standard variables before running an agent.

    Args:
        context: The FlockContext instance.
        agent_name: The name of the current agent.
        input_data: A dictionary of inputs for the agent.
        run_id: A unique identifier for the run.
        local_debug: Flag indicating whether local debugging is enabled.
    """
    context.set_variable(FLOCK_CURRENT_AGENT, agent_name)
    for key, value in input_data.items():
        context.set_variable("flock." + key, value)
    context.set_variable(FLOCK_INITIAL_INPUT, input_data)
    context.set_variable(FLOCK_LOCAL_DEBUG, local_debug)
    context.run_id = run_id
    context.set_variable(FLOCK_RUN_ID, run_id)
    context.set_variable(FLOCK_MODEL, model)
```

### src\flock\core\context\context_vars.py

- **Lines**: 9
- **Last modified**: 2025-02-26 07:10:53

```py
"""Context variables for Flock."""

FLOCK_CURRENT_AGENT = "flock.current_agent"
FLOCK_INITIAL_INPUT = "flock.initial_input"
FLOCK_LOCAL_DEBUG = "flock.local_debug"
FLOCK_RUN_ID = "flock.run_id"
FLOCK_LAST_AGENT = "flock.last_agent"
FLOCK_LAST_RESULT = "flock.last_result"
FLOCK_MODEL = "flock.model"
```

### src\flock\core\execution\local_executor.py

- **Lines**: 31
- **Last modified**: 2025-03-07 13:43:41

```py
# src/your_package/core/execution/local_executor.py
from flock.core.context.context import FlockContext
from flock.core.logging.logging import get_logger
from flock.workflow.activities import (
    run_agent,  # This should be the local activity function
)

logger = get_logger("flock")


async def run_local_workflow(
    context: FlockContext, box_result: bool = True
) -> dict:
    """Execute the agent workflow locally (for debugging).

    Args:
        context: The FlockContext instance with state and history.
        output_formatter: Formatter options for displaying results.
        box_result: If True, wraps the result in a Box for nicer display.

    Returns:
        A dictionary containing the workflow result.
    """
    logger.info("Running local debug workflow")
    result = await run_agent(context)
    if box_result:
        from box import Box

        logger.debug("Boxing result")
        return Box(result)
    return result
```

### src\flock\core\execution\temporal_executor.py

- **Lines**: 49
- **Last modified**: 2025-03-07 13:43:41

```py
# src/your_package/core/execution/temporal_executor.py

from flock.core.context.context import FlockContext
from flock.core.context.context_vars import FLOCK_RUN_ID
from flock.core.logging.logging import get_logger
from flock.workflow.activities import (
    run_agent,  # Activity function used in Temporal
)
from flock.workflow.temporal_setup import create_temporal_client, setup_worker
from flock.workflow.workflow import FlockWorkflow  # Your workflow class

logger = get_logger("flock")


async def run_temporal_workflow(
    context: FlockContext,
    box_result: bool = True,
) -> dict:
    """Execute the agent workflow via Temporal for robust, distributed processing.

    Args:
        context: The FlockContext instance with state and history.
        box_result: If True, wraps the result in a Box for nicer display.

    Returns:
        A dictionary containing the workflow result.
    """
    logger.info("Setting up Temporal workflow")
    await setup_worker(workflow=FlockWorkflow, activity=run_agent)
    logger.debug("Creating Temporal client")
    flock_client = await create_temporal_client()
    workflow_id = context.get_variable(FLOCK_RUN_ID)
    logger.info("Executing Temporal workflow", workflow_id=workflow_id)
    result = await flock_client.execute_workflow(
        FlockWorkflow.run,
        context.to_dict(),
        id=workflow_id,
        task_queue="flock-queue",
    )

    agent_name = context.get_variable("FLOCK_CURRENT_AGENT")
    logger.debug("Formatting Temporal result", agent=agent_name)

    if box_result:
        from box import Box

        logger.debug("Boxing Temporal result")
        return Box(result)
    return result
```

### src\flock\core\flock_agent.py

- **Lines**: 589
- **Last modified**: 2025-04-04 16:30:16

```py
# src/flock/core/flock_agent.py
"""FlockAgent is the core, declarative base class for all agents in the Flock framework."""

import asyncio
from abc import ABC
from collections.abc import Callable
from typing import TYPE_CHECKING, Any, TypeVar

if TYPE_CHECKING:
    from flock.core.context.context import FlockContext
    from flock.core.flock_evaluator import FlockEvaluator
    from flock.core.flock_module import FlockModule
    from flock.core.flock_router import FlockRouter

from opentelemetry import trace
from pydantic import BaseModel, Field

# Core Flock components (ensure these are importable)
from flock.core.context.context import FlockContext
from flock.core.flock_evaluator import FlockEvaluator
from flock.core.flock_module import FlockModule
from flock.core.flock_router import FlockRouter
from flock.core.logging.logging import get_logger

# Mixins and Serialization components
from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
from flock.core.serialization.serializable import (
    Serializable,  # Import Serializable base
)
from flock.core.serialization.serialization_utils import (
    deserialize_component,
    serialize_item,
)

logger = get_logger("agent")
tracer = trace.get_tracer(__name__)
T = TypeVar("T", bound="FlockAgent")


# Make FlockAgent inherit from Serializable
class FlockAgent(BaseModel, Serializable, DSPyIntegrationMixin, ABC):
    """Core, declarative base class for Flock agents, enabling serialization,
    modularity, and integration with evaluation and routing components.
    Inherits from Pydantic BaseModel, ABC, DSPyIntegrationMixin, and Serializable.
    """

    name: str = Field(..., description="Unique identifier for the agent.")
    model: str | None = Field(
        None,
        description="The model identifier to use (e.g., 'openai/gpt-4o'). If None, uses Flock's default.",
    )
    description: str | Callable[..., str] | None = Field(
        "",
        description="A human-readable description or a callable returning one.",
    )
    input: str | Callable[..., str] | None = Field(
        None,
        description=(
            "Signature for input keys. Supports type hints (:) and descriptions (|). "
            "E.g., 'query: str | Search query, context: dict | Conversation context'. Can be a callable."
        ),
    )
    output: str | Callable[..., str] | None = Field(
        None,
        description=(
            "Signature for output keys. Supports type hints (:) and descriptions (|). "
            "E.g., 'result: str | Generated result, summary: str | Brief summary'. Can be a callable."
        ),
    )
    tools: list[Callable[..., Any]] | None = (
        Field(  # Assume tools are always callable for serialization simplicity
            default=None,
            description="List of callable tools the agent can use. These must be registered.",
        )
    )
    use_cache: bool = Field(
        default=True,
        description="Enable caching for the agent's evaluator (if supported).",
    )

    # --- Components ---
    evaluator: FlockEvaluator | None = Field(  # Make optional, allow None
        default=None,
        description="The evaluator instance defining the agent's core logic.",
    )
    handoff_router: FlockRouter | None = Field(  # Make optional, allow None
        default=None,
        description="Router determining the next agent in the workflow.",
    )
    modules: dict[str, FlockModule] = Field(  # Keep as dict
        default_factory=dict,
        description="Dictionary of FlockModules attached to this agent.",
    )

    # --- Runtime State (Excluded from Serialization) ---
    context: FlockContext | None = Field(
        default=None,
        exclude=True,  # Exclude context from model_dump and serialization
        description="Runtime context associated with the flock execution.",
    )

    # --- Existing Methods (add_module, remove_module, etc.) ---
    # (Keep these methods as they were, adding type hints where useful)
    def add_module(self, module: FlockModule) -> None:
        """Add a module to this agent."""
        if not module.name:
            logger.error("Module must have a name to be added.")
            return
        if module.name in self.modules:
            logger.warning(f"Overwriting existing module: {module.name}")
        self.modules[module.name] = module
        logger.debug(f"Added module '{module.name}' to agent '{self.name}'")

    def remove_module(self, module_name: str) -> None:
        """Remove a module from this agent."""
        if module_name in self.modules:
            del self.modules[module_name]
            logger.debug(
                f"Removed module '{module_name}' from agent '{self.name}'"
            )
        else:
            logger.warning(
                f"Module '{module_name}' not found on agent '{self.name}'."
            )

    def get_module(self, module_name: str) -> FlockModule | None:
        """Get a module by name."""
        return self.modules.get(module_name)

    def get_enabled_modules(self) -> list[FlockModule]:
        """Get a list of currently enabled modules attached to this agent."""
        return [m for m in self.modules.values() if m.config.enabled]

    # --- Lifecycle Hooks (Keep as they were) ---
    async def initialize(self, inputs: dict[str, Any]) -> None:
        """Initialize agent and run module initializers."""
        logger.debug(f"Initializing agent '{self.name}'")
        with tracer.start_as_current_span("agent.initialize") as span:
            span.set_attribute("agent.name", self.name)
            span.set_attribute("inputs", str(inputs))
            logger.info(
                f"agent.initialize",
                agent=self.name,
            )
            try:
                for module in self.get_enabled_modules():
                    await module.initialize(self, inputs, self.context)
            except Exception as module_error:
                logger.error(
                    "Error during initialize",
                    agent=self.name,
                    error=str(module_error),
                )
                span.record_exception(module_error)

    async def terminate(
        self, inputs: dict[str, Any], result: dict[str, Any]
    ) -> None:
        """Terminate agent and run module terminators."""
        logger.debug(f"Terminating agent '{self.name}'")
        with tracer.start_as_current_span("agent.terminate") as span:
            span.set_attribute("agent.name", self.name)
            span.set_attribute("inputs", str(inputs))
            span.set_attribute("result", str(result))
            logger.info(
                f"agent.terminate",
                agent=self.name,
            )
            try:
                for module in self.get_enabled_modules():
                    await module.terminate(self, inputs, result, self.context)
            except Exception as module_error:
                logger.error(
                    "Error during terminate",
                    agent=self.name,
                    error=str(module_error),
                )
                span.record_exception(module_error)

    async def on_error(self, error: Exception, inputs: dict[str, Any]) -> None:
        """Handle errors and run module error handlers."""
        logger.error(f"Error occurred in agent '{self.name}': {error}")
        with tracer.start_as_current_span("agent.on_error") as span:
            span.set_attribute("agent.name", self.name)
            span.set_attribute("inputs", str(inputs))
            try:
                for module in self.get_enabled_modules():
                    await module.on_error(self, error, inputs, self.context)
            except Exception as module_error:
                logger.error(
                    "Error during on_error",
                    agent=self.name,
                    error=str(module_error),
                )
                span.record_exception(module_error)

    async def evaluate(self, inputs: dict[str, Any]) -> dict[str, Any]:
        """Core evaluation logic, calling the assigned evaluator and modules."""
        if not self.evaluator:
            raise RuntimeError(
                f"Agent '{self.name}' has no evaluator assigned."
            )
        with tracer.start_as_current_span("agent.evaluate") as span:
            span.set_attribute("agent.name", self.name)
            span.set_attribute("inputs", str(inputs))
            logger.info(
                f"agent.evaluate",
                agent=self.name,
            )

            logger.debug(f"Evaluating agent '{self.name}'")
            current_inputs = inputs

            # Pre-evaluate hooks
            for module in self.get_enabled_modules():
                current_inputs = await module.pre_evaluate(
                    self, current_inputs, self.context
                )

            # Actual evaluation
            try:
                # Pass registered tools if the evaluator needs them
                registered_tools = []
                if self.tools:
                    # Ensure tools are actually retrieved/validated if needed by evaluator type
                    # For now, assume evaluator handles tool resolution if necessary
                    registered_tools = self.tools

                result = await self.evaluator.evaluate(
                    self, current_inputs, registered_tools
                )
            except Exception as eval_error:
                logger.error(
                    "Error during evaluate",
                    agent=self.name,
                    error=str(eval_error),
                )
                span.record_exception(eval_error)
                await self.on_error(
                    eval_error, current_inputs
                )  # Call error hook
                raise  # Re-raise the exception

            # Post-evaluate hooks
            current_result = result
            for module in self.get_enabled_modules():
                current_result = await module.post_evaluate(
                    self, current_inputs, current_result, self.context
                )

            logger.debug(f"Evaluation completed for agent '{self.name}'")
            return current_result

    def run(self, inputs: dict[str, Any]) -> dict[str, Any]:
        """Synchronous wrapper for run_async."""
        try:
            loop = asyncio.get_running_loop()
        except (
            RuntimeError
        ):  # 'RuntimeError: There is no current event loop...'
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
        return loop.run_until_complete(self.run_async(inputs))

    def set_model(self, model: str):
        """Set the model for the agent and its evaluator."""
        self.model = model
        if self.evaluator and hasattr(self.evaluator, "config"):
            self.evaluator.config.model = model
            logger.info(
                f"Set model to '{model}' for agent '{self.name}' and its evaluator."
            )
        elif self.evaluator:
            logger.warning(
                f"Evaluator for agent '{self.name}' does not have a standard config to set model."
            )
        else:
            logger.warning(
                f"Agent '{self.name}' has no evaluator to set model for."
            )

    async def run_async(self, inputs: dict[str, Any]) -> dict[str, Any]:
        """Asynchronous execution logic with lifecycle hooks."""
        with tracer.start_as_current_span("agent.run") as span:
            span.set_attribute("agent.name", self.name)
            span.set_attribute("inputs", str(inputs))
            try:
                await self.initialize(inputs)
                result = await self.evaluate(inputs)
                await self.terminate(inputs, result)
                span.set_attribute("result", str(result))
                logger.info("Agent run completed", agent=self.name)
                return result
            except Exception as run_error:
                logger.error(
                    "Error running agent", agent=self.name, error=str(run_error)
                )
                if "evaluate" not in str(
                    run_error
                ):  # Simple check, might need refinement
                    await self.on_error(run_error, inputs)
                logger.error(
                    f"Agent '{self.name}' run failed: {run_error}",
                    exc_info=True,
                )
                span.record_exception(run_error)
                raise  # Re-raise after handling

    async def run_temporal(self, inputs: dict[str, Any]) -> dict[str, Any]:
        with tracer.start_as_current_span("agent.run_temporal") as span:
            span.set_attribute("agent.name", self.name)
            span.set_attribute("inputs", str(inputs))
            try:
                from temporalio.client import Client

                from flock.workflow.agent_activities import (
                    run_flock_agent_activity,
                )
                from flock.workflow.temporal_setup import run_activity

                client = await Client.connect(
                    "localhost:7233", namespace="default"
                )
                agent_data = self.to_dict()
                inputs_data = inputs

                result = await run_activity(
                    client,
                    self.name,
                    run_flock_agent_activity,
                    {"agent_data": agent_data, "inputs": inputs_data},
                )
                span.set_attribute("result", str(result))
                logger.info("Temporal run successful", agent=self.name)
                return result
            except Exception as temporal_error:
                logger.error(
                    "Error in Temporal workflow",
                    agent=self.name,
                    error=str(temporal_error),
                )
                span.record_exception(temporal_error)
                raise

    # resolve_callables remains useful for dynamic definitions
    def resolve_callables(self, context: FlockContext | None = None) -> None:
        """Resolves callable fields (description, input, output) using context."""
        if callable(self.description):
            self.description = self.description(
                context
            )  # Pass context if needed by callable
        if callable(self.input):
            self.input = self.input(context)
        if callable(self.output):
            self.output = self.output(context)

    # --- Serialization Implementation ---

    def to_dict(self) -> dict[str, Any]:
        """Convert instance to dictionary representation suitable for serialization."""
        from flock.core.flock_registry import get_registry

        FlockRegistry = get_registry()
        logger.debug(f"Serializing agent '{self.name}' to dict.")
        # Use Pydantic's dump, exclude manually handled fields and runtime context
        data = self.model_dump(
            exclude={
                "context",
                "evaluator",
                "modules",
                "handoff_router",
                "tools",
            },
            mode="json",  # Use json mode for better handling of standard types by Pydantic
            exclude_none=True,  # Exclude None values for cleaner output
        )

        # --- Serialize Components using Registry Type Names ---
        # Evaluator
        if self.evaluator:
            evaluator_type_name = FlockRegistry.get_component_type_name(
                type(self.evaluator)
            )
            if evaluator_type_name:
                # Recursively serialize the evaluator's dict representation
                evaluator_dict = serialize_item(
                    self.evaluator.model_dump(mode="json", exclude_none=True)
                )
                evaluator_dict["type"] = evaluator_type_name  # Add type marker
                data["evaluator"] = evaluator_dict
            else:
                logger.warning(
                    f"Could not get registered type name for evaluator {type(self.evaluator).__name__} in agent '{self.name}'. Skipping serialization."
                )

        # Router
        if self.handoff_router:
            router_type_name = FlockRegistry.get_component_type_name(
                type(self.handoff_router)
            )
            if router_type_name:
                router_dict = serialize_item(
                    self.handoff_router.model_dump(
                        mode="json", exclude_none=True
                    )
                )
                router_dict["type"] = router_type_name
                data["handoff_router"] = router_dict
            else:
                logger.warning(
                    f"Could not get registered type name for router {type(self.handoff_router).__name__} in agent '{self.name}'. Skipping serialization."
                )

        # Modules
        if self.modules:
            serialized_modules = {}
            for name, module_instance in self.modules.items():
                module_type_name = FlockRegistry.get_component_type_name(
                    type(module_instance)
                )
                if module_type_name:
                    module_dict = serialize_item(
                        module_instance.model_dump(
                            mode="json", exclude_none=True
                        )
                    )
                    module_dict["type"] = module_type_name
                    serialized_modules[name] = module_dict
                else:
                    logger.warning(
                        f"Could not get registered type name for module {type(module_instance).__name__} ('{name}') in agent '{self.name}'. Skipping."
                    )
            if serialized_modules:
                data["modules"] = serialized_modules

        # --- Serialize Tools (Callables) ---
        if self.tools:
            serialized_tools = []
            for tool in self.tools:
                if callable(tool) and not isinstance(tool, type):
                    path_str = FlockRegistry.get_callable_path_string(tool)
                    if path_str:
                        serialized_tools.append({"__callable_ref__": path_str})
                    else:
                        logger.warning(
                            f"Could not get path string for tool {tool} in agent '{self.name}'. Skipping."
                        )
                # Silently skip non-callable items or log warning
                # else:
                #      logger.warning(f"Non-callable item found in tools list for agent '{self.name}': {tool}. Skipping.")
            if serialized_tools:
                data["tools"] = serialized_tools

        # No need to call _filter_none_values here as model_dump(exclude_none=True) handles it
        return data

    @classmethod
    def from_dict(cls: type[T], data: dict[str, Any]) -> T:
        """Create instance from dictionary representation."""
        from flock.core.flock_registry import get_registry

        logger.debug(
            f"Deserializing agent from dict. Provided keys: {list(data.keys())}"
        )
        if "name" not in data:
            raise ValueError("Agent data must include a 'name' field.")
        FlockRegistry = get_registry()
        agent_name = data["name"]  # For logging context

        # Pop complex components to handle them after basic agent instantiation
        evaluator_data = data.pop("evaluator", None)
        router_data = data.pop("handoff_router", None)
        modules_data = data.pop("modules", {})
        tools_data = data.pop("tools", [])

        # Deserialize remaining data recursively (handles nested basic types/callables)
        # Note: Pydantic v2 handles most basic deserialization well if types match.
        # Explicit deserialize_item might be needed if complex non-pydantic structures exist.
        # For now, assume Pydantic handles basic fields based on type hints.
        deserialized_basic_data = data  # Assume Pydantic handles basic fields

        try:
            # Create the agent instance using Pydantic's constructor
            agent = cls(**deserialized_basic_data)
        except Exception as e:
            logger.error(
                f"Pydantic validation/init failed for agent '{agent_name}': {e}",
                exc_info=True,
            )
            raise ValueError(
                f"Failed to initialize agent '{agent_name}' from dict: {e}"
            ) from e

        # --- Deserialize and Attach Components ---
        # Evaluator
        if evaluator_data:
            try:
                agent.evaluator = deserialize_component(
                    evaluator_data, FlockEvaluator
                )
                if agent.evaluator is None:
                    raise ValueError("deserialize_component returned None")
                logger.debug(
                    f"Deserialized evaluator '{agent.evaluator.name}' for agent '{agent_name}'"
                )
            except Exception as e:
                logger.error(
                    f"Failed to deserialize evaluator for agent '{agent_name}': {e}",
                    exc_info=True,
                )
                # Decide: raise error or continue without evaluator?
                # raise ValueError(f"Failed to deserialize evaluator for agent '{agent_name}': {e}") from e

        # Router
        if router_data:
            try:
                agent.handoff_router = deserialize_component(
                    router_data, FlockRouter
                )
                if agent.handoff_router is None:
                    raise ValueError("deserialize_component returned None")
                logger.debug(
                    f"Deserialized router '{agent.handoff_router.name}' for agent '{agent_name}'"
                )
            except Exception as e:
                logger.error(
                    f"Failed to deserialize router for agent '{agent_name}': {e}",
                    exc_info=True,
                )
                # Decide: raise error or continue without router?

        # Modules
        if modules_data:
            agent.modules = {}  # Ensure it's initialized
            for name, module_data in modules_data.items():
                try:
                    module_instance = deserialize_component(
                        module_data, FlockModule
                    )
                    if module_instance:
                        # Ensure instance name matches key if possible
                        module_instance.name = module_data.get("name", name)
                        agent.add_module(
                            module_instance
                        )  # Use add_module for consistency
                    else:
                        raise ValueError("deserialize_component returned None")
                except Exception as e:
                    logger.error(
                        f"Failed to deserialize module '{name}' for agent '{agent_name}': {e}",
                        exc_info=True,
                    )
                    # Decide: skip module or raise error?

        # --- Deserialize Tools ---
        agent.tools = []  # Initialize tools list
        if tools_data:
            for tool_ref in tools_data:
                if (
                    isinstance(tool_ref, dict)
                    and "__callable_ref__" in tool_ref
                ):
                    path_str = tool_ref["__callable_ref__"]
                    try:
                        tool_func = FlockRegistry.get_callable(path_str)
                        agent.tools.append(tool_func)
                    except KeyError:
                        logger.error(
                            f"Tool callable '{path_str}' not found in registry for agent '{agent_name}'. Skipping."
                        )
                else:
                    logger.warning(
                        f"Invalid tool format found during deserialization for agent '{agent_name}': {tool_ref}. Skipping."
                    )

        logger.info(f"Successfully deserialized agent: {agent.name}")
        return agent

    # --- Pydantic v2 Configuration ---
    class Config:
        arbitrary_types_allowed = (
            True  # Important for components like evaluator, router etc.
        )
        # Might need custom json_encoders if not using model_dump(mode='json') everywhere
        # json_encoders = {
        #      FlockEvaluator: lambda v: v.to_dict() if v else None,
        #      FlockRouter: lambda v: v.to_dict() if v else None,
        #      FlockModule: lambda v: v.to_dict() if v else None,
        # }
```

### src\flock\core\flock_evaluator.py

- **Lines**: 53
- **Last modified**: 2025-03-29 13:53:59

```py
"""Base classes and implementations for Flock evaluators."""

from abc import ABC, abstractmethod
from typing import Any, TypeVar

from pydantic import BaseModel, Field, create_model

T = TypeVar("T", bound="FlockEvaluatorConfig")


class FlockEvaluatorConfig(BaseModel):
    """Base configuration class for Flock modules.

    This class serves as the base for all module-specific configurations.
    Each module should define its own config class inheriting from this one.

    Example:
        class MemoryModuleConfig(FlockModuleConfig):
            file_path: str = Field(default="memory.json")
            save_after_update: bool = Field(default=True)
    """

    model: str = Field(
        default="", description="The model to use for evaluation"
    )

    @classmethod
    def with_fields(cls: type[T], **field_definitions) -> type[T]:
        """Create a new config class with additional fields."""
        return create_model(
            f"Dynamic{cls.__name__}", __base__=cls, **field_definitions
        )


class FlockEvaluator(ABC, BaseModel):
    """Base class for all evaluators in Flock.

    An evaluator is responsible for taking inputs and producing outputs using
    some evaluation strategy (e.g., DSPy, natural language, etc.).
    """

    name: str = Field(..., description="Unique identifier for this evaluator")
    config: FlockEvaluatorConfig = Field(
        default_factory=FlockEvaluatorConfig,
        description="Evaluator configuration",
    )

    @abstractmethod
    async def evaluate(
        self, agent: Any, inputs: dict[str, Any], tools: list[Any]
    ) -> dict[str, Any]:
        """Evaluate inputs to produce outputs."""
        pass
```

### src\flock\core\flock_factory.py

- **Lines**: 82
- **Last modified**: 2025-03-29 13:53:59

```py
"""Factory for creating pre-configured Flock agents."""

from collections.abc import Callable
from typing import Any

from flock.core.flock_agent import FlockAgent
from flock.core.logging.formatters.themes import OutputTheme
from flock.evaluators.declarative.declarative_evaluator import (
    DeclarativeEvaluator,
    DeclarativeEvaluatorConfig,
)
from flock.modules.output.output_module import OutputModule, OutputModuleConfig
from flock.modules.performance.metrics_module import (
    MetricsModule,
    MetricsModuleConfig,
)


class FlockFactory:
    """Factory for creating pre-configured Flock agents with common module setups."""

    @staticmethod
    def create_default_agent(
        name: str,
        description: str | Callable[..., str] | None = None,
        model: str | Callable[..., str] | None = None,
        input: str | Callable[..., str] | None = None,
        output: str | Callable[..., str] | None = None,
        tools: list[Callable[..., Any] | Any] | None = None,
        use_cache: bool = True,
        enable_rich_tables: bool = False,
        output_theme: OutputTheme = OutputTheme.abernathy,
        wait_for_input: bool = False,
        temperature: float = 0.0,
        max_tokens: int = 4096,
        alert_latency_threshold_ms: int = 30000,
        no_output: bool = False,
        print_context: bool = False,
        write_to_file: bool = False,
    ) -> FlockAgent:
        """Creates a default FlockAgent.

        The default agent includes a declarative evaluator with the following modules:
        - OutputModule

        It also includes often needed configurations like cache usage, rich tables, and output theme.
        """
        eval_config = DeclarativeEvaluatorConfig(
            model=model,
            use_cache=use_cache,
            max_tokens=max_tokens,
            temperature=temperature,
        )

        evaluator = DeclarativeEvaluator(name="default", config=eval_config)
        agent = FlockAgent(
            name=name,
            input=input,
            output=output,
            tools=tools,
            model=model,
            description=description,
            evaluator=evaluator,
        )
        output_config = OutputModuleConfig(
            render_table=enable_rich_tables,
            theme=output_theme,
            wait_for_input=wait_for_input,
            no_output=no_output,
            print_context=print_context,
            write_to_file=write_to_file,
        )
        output_module = OutputModule("output", config=output_config)

        metrics_config = MetricsModuleConfig(
            latency_threshold_ms=alert_latency_threshold_ms
        )
        metrics_module = MetricsModule("metrics", config=metrics_config)

        agent.add_module(output_module)
        agent.add_module(metrics_module)
        return agent
```

### src\flock\core\flock_module.py

- **Lines**: 101
- **Last modified**: 2025-03-16 14:43:42

```py
"""Base classes and implementations for the Flock module system."""

from abc import ABC
from typing import Any, TypeVar

from pydantic import BaseModel, Field, create_model

from flock.core.context.context import FlockContext

T = TypeVar("T", bound="FlockModuleConfig")


class FlockModuleConfig(BaseModel):
    """Base configuration class for Flock modules.

    This class serves as the base for all module-specific configurations.
    Each module should define its own config class inheriting from this one.

    Example:
        class MemoryModuleConfig(FlockModuleConfig):
            file_path: str = Field(default="memory.json")
            save_after_update: bool = Field(default=True)
    """

    enabled: bool = Field(
        default=True, description="Whether the module is currently enabled"
    )

    @classmethod
    def with_fields(cls: type[T], **field_definitions) -> type[T]:
        """Create a new config class with additional fields."""
        return create_model(
            f"Dynamic{cls.__name__}", __base__=cls, **field_definitions
        )


class FlockModule(BaseModel, ABC):
    """Base class for all Flock modules.

    Modules can hook into agent lifecycle events and modify or enhance agent behavior.
    They are initialized when added to an agent and can maintain their own state.

    Each module should define its configuration requirements either by:
    1. Creating a subclass of FlockModuleConfig
    2. Using FlockModuleConfig.with_fields() to create a config class
    """

    name: str = Field(
        default="", description="Unique identifier for the module"
    )
    config: FlockModuleConfig = Field(
        default_factory=FlockModuleConfig, description="Module configuration"
    )

    async def initialize(
        self,
        agent: Any,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Called when the agent starts running."""
        pass

    async def pre_evaluate(
        self,
        agent: Any,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Called before agent evaluation, can modify inputs."""
        return inputs

    async def post_evaluate(
        self,
        agent: Any,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Called after agent evaluation, can modify results."""
        return result

    async def terminate(
        self,
        agent: Any,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Called when the agent finishes running."""
        pass

    async def on_error(
        self,
        agent: Any,
        error: Exception,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Called when an error occurs during agent execution."""
        pass
```

### src\flock\core\flock_registry.py

- **Lines**: 532
- **Last modified**: 2025-04-04 17:11:47

```py
# src/flock/core/flock_registry.py
"""Centralized registry for managing Agents, Callables, Types, and Component Classes
within the Flock framework to support dynamic lookup and serialization.
"""

from __future__ import annotations  # Add this at the very top

import importlib
import inspect
import sys
from collections.abc import Callable, Mapping, Sequence
from dataclasses import is_dataclass
from typing import (  # Add TYPE_CHECKING
    TYPE_CHECKING,
    Any,
    Literal,
    Optional,
    TypeVar,
    Union,
    overload,
)

from pydantic import BaseModel

if TYPE_CHECKING:
    from flock.core.flock_agent import (
        FlockAgent,  # Import only for type checking
    )
    from flock.core.flock_evaluator import FlockEvaluator
    from flock.core.flock_module import FlockModule
    from flock.core.flock_router import FlockRouter

    COMPONENT_BASE_TYPES = (FlockModule, FlockEvaluator, FlockRouter)
    IS_COMPONENT_CHECK_ENABLED = True
else:
    # Define dummy types or skip check if not type checking
    FlockAgent = Any  # Or define a dummy class
    COMPONENT_BASE_TYPES = ()
    IS_COMPONENT_CHECK_ENABLED = False

# Fallback if core types aren't available during setup

from flock.core.logging.logging import get_logger

logger = get_logger("registry")
T = TypeVar("T")
ClassType = TypeVar("ClassType", bound=type)
FuncType = TypeVar("FuncType", bound=Callable)


class FlockRegistry:
    """Singleton registry for Agents, Callables (functions/methods).

    Types (Pydantic/Dataclasses used in signatures), and Component Classes
    (Modules, Evaluators, Routers).
    """

    _instance = None

    _agents: dict[str, FlockAgent]
    _callables: dict[str, Callable]
    _types: dict[str, type]
    _components: dict[str, type]  # For Module, Evaluator, Router classes

    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
            cls._instance._initialize()
            # logger.info("FlockRegistry instance created.")
        return cls._instance

    def _initialize(self):
        """Initialize the internal dictionaries."""
        self._agents = {}
        self._callables = {}
        self._types = {}
        self._components = {}
        # logger.debug("FlockRegistry initialized internal stores.")
        # Auto-register core Python types
        self._register_core_types()

    def _register_core_types(self):
        """Registers common built-in and typing types."""
        core_types = [
            str,
            int,
            float,
            bool,
            list,
            dict,
            tuple,
            set,
            Any,
            Mapping,
            Sequence,
            TypeVar,
            Literal,
            Optional,
            Union,  # Common typing generics
        ]
        for t in core_types:
            try:
                self.register_type(t)
            except Exception as e:
                logger.error(f"Failed to auto-register core type {t}: {e}")

    # --- Path String Generation ---
    @staticmethod
    def _get_path_string(obj: Callable | type) -> str | None:
        """Generates a unique path string 'module.ClassName' or 'module.function_name'."""
        try:
            module = obj.__module__
            name = obj.__name__
            if module == "builtins":
                return name
            # Check if it's nested (basic check, might not cover all edge cases)
            if "." in name and hasattr(sys.modules[module], name.split(".")[0]):
                # Likely a nested class/method - serialization might need custom handling or pickle
                logger.warning(
                    f"Object {name} appears nested in {module}. Path string might be ambiguous."
                )
            return f"{module}.{name}"
        except AttributeError:
            logger.warning(f"Could not determine module/name for object: {obj}")
            return None

    # --- Agent Registration ---
    def register_agent(self, agent: FlockAgent) -> None:
        """Registers a FlockAgent instance by its name."""
        if not hasattr(agent, "name") or not agent.name:
            logger.error(
                "Attempted to register an agent without a valid 'name' attribute."
            )
            return
        if agent.name in self._agents and self._agents[agent.name] != agent:
            logger.warning(
                f"Agent '{agent.name}' already registered. Overwriting."
            )
        self._agents[agent.name] = agent
        logger.debug(f"Registered agent: {agent.name}")

    def get_agent(self, name: str) -> FlockAgent | None:
        """Retrieves a registered FlockAgent instance by name."""
        agent = self._agents.get(name)
        if not agent:
            logger.warning(f"Agent '{name}' not found in registry.")
        return agent

    def get_all_agent_names(self) -> list[str]:
        """Returns a list of names of all registered agents."""
        return list(self._agents.keys())

    # --- Callable Registration ---
    def register_callable(
        self, func: Callable, name: str | None = None
    ) -> str | None:
        """Registers a callable (function/method). Returns its path string identifier."""
        path_str = name or self._get_path_string(func)
        if path_str:
            if (
                path_str in self._callables
                and self._callables[path_str] != func
            ):
                logger.warning(
                    f"Callable '{path_str}' already registered. Overwriting."
                )
            self._callables[path_str] = func
            logger.debug(f"Registered callable: {path_str}")
            return path_str
        return None

    def get_callable(self, path_str: str) -> Callable:
        """Retrieves a callable by its path string, attempting dynamic import if not found."""
        if path_str in self._callables:
            return self._callables[path_str]

        logger.debug(
            f"Callable '{path_str}' not in registry, attempting dynamic import."
        )
        try:
            if "." not in path_str:  # Built-ins
                builtins_module = importlib.import_module("builtins")
                if hasattr(builtins_module, path_str):
                    func = getattr(builtins_module, path_str)
                    if callable(func):
                        self.register_callable(func, path_str)  # Cache it
                        return func
                raise KeyError(f"Built-in callable '{path_str}' not found.")

            module_name, func_name = path_str.rsplit(".", 1)
            module = importlib.import_module(module_name)
            func = getattr(module, func_name)
            if callable(func):
                self.register_callable(
                    func, path_str
                )  # Cache dynamically imported
                return func
            else:
                raise TypeError(
                    f"Dynamically imported object '{path_str}' is not callable."
                )
        except (ImportError, AttributeError, KeyError, TypeError) as e:
            logger.error(
                f"Failed to dynamically load/find callable '{path_str}': {e}"
            )
            raise KeyError(
                f"Callable '{path_str}' not found or failed to load: {e}"
            ) from e

    def get_callable_path_string(self, func: Callable) -> str | None:
        """Gets the path string for a callable, registering it if necessary."""
        for path_str, registered_func in self._callables.items():
            if func == registered_func:
                return path_str
        # If not found by identity, generate path, register, and return
        return self.register_callable(func)

    # --- Type Registration ---
    def register_type(
        self, type_obj: type, name: str | None = None
    ) -> str | None:
        """Registers a class/type (Pydantic, Dataclass, etc.) used in signatures."""
        type_name = name or type_obj.__name__
        if type_name:
            if type_name in self._types and self._types[type_name] != type_obj:
                logger.warning(
                    f"Type '{type_name}' already registered. Overwriting."
                )
            self._types[type_name] = type_obj
            # logger.debug(f"Registered type: {type_name}")
            return type_name
        return None

    def get_type(self, type_name: str) -> type:
        """Retrieves a registered type by its name."""
        if type_name in self._types:
            return self._types[type_name]
        else:
            # Consider adding dynamic import attempts for types if needed,
            # but explicit registration is generally safer for types.
            logger.error(f"Type '{type_name}' not found in registry.")
            raise KeyError(
                f"Type '{type_name}' not found. Ensure it is registered."
            )

    # --- Component Class Registration ---
    def register_component(
        self, component_class: type, name: str | None = None
    ) -> str | None:
        """Registers a component class (Module, Evaluator, Router)."""
        type_name = name or component_class.__name__
        if type_name:
            # Optional: Add check if it's a subclass of expected bases
            # if COMPONENT_BASE_TYPES and not issubclass(component_class, COMPONENT_BASE_TYPES):
            #     logger.warning(f"Registering class '{type_name}' which is not a standard Flock component type.")
            if (
                type_name in self._components
                and self._components[type_name] != component_class
            ):
                logger.warning(
                    f"Component class '{type_name}' already registered. Overwriting."
                )
            self._components[type_name] = component_class
            logger.debug(f"Registered component class: {type_name}")
            return type_name
        return None

    def get_component(self, type_name: str) -> type:
        """Retrieves a component class by its type name."""
        if type_name in self._components:
            return self._components[type_name]
        else:
            # Dynamic import attempts similar to get_callable could be added here if desired,
            # targeting likely module locations based on type_name conventions.
            logger.error(
                f"Component class '{type_name}' not found in registry."
            )
            raise KeyError(
                f"Component class '{type_name}' not found. Ensure it is registered."
            )

    def get_component_type_name(self, component_class: type) -> str | None:
        """Gets the type name for a component class, registering it if necessary."""
        for type_name, registered_class in self._components.items():
            if component_class == registered_class:
                return type_name
        # If not found, register using class name and return
        return self.register_component(component_class)

    # --- Auto-Registration ---
    def register_module_components(self, module_or_path: Any) -> None:
        """Scans a module (object or path string) and automatically registers.

        - Functions as callables.
        - Pydantic Models and Dataclasses as types.
        - Subclasses of FlockModule, FlockEvaluator, FlockRouter as components.
        """
        try:
            if isinstance(module_or_path, str):
                module = importlib.import_module(module_or_path)
            elif inspect.ismodule(module_or_path):
                module = module_or_path
            else:
                logger.error(
                    f"Invalid input for auto-registration: {module_or_path}. Must be module object or path string."
                )
                return

            logger.info(
                f"Auto-registering components from module: {module.__name__}"
            )
            registered_count = {"callable": 0, "type": 0, "component": 0}

            for name, obj in inspect.getmembers(module):
                if name.startswith("_"):
                    continue  # Skip private/internal

                # Register Functions as Callables
                if (
                    inspect.isfunction(obj)
                    and obj.__module__ == module.__name__
                ):
                    if self.register_callable(obj):
                        registered_count["callable"] += 1

                # Register Classes (Types and Components)
                elif inspect.isclass(obj) and obj.__module__ == module.__name__:
                    is_component = False
                    # Register as Component if subclass of base types
                    if (
                        COMPONENT_BASE_TYPES
                        and issubclass(obj, COMPONENT_BASE_TYPES)
                        and self.register_component(obj)
                    ):
                        registered_count["component"] += 1
                        is_component = True  # Mark as component

                    # Register as Type if Pydantic Model or Dataclass
                    # A component can also be a type used in signatures
                    base_model_or_dataclass = isinstance(obj, type) and (
                        issubclass(obj, BaseModel) or is_dataclass(obj)
                    )
                    if (
                        base_model_or_dataclass
                        and self.register_type(obj)
                        and not is_component
                    ):
                        # Only increment type count if it wasn't already counted as component
                        registered_count["type"] += 1

            logger.info(
                f"Auto-registration summary for {module.__name__}: "
                f"{registered_count['callable']} callables, "
                f"{registered_count['type']} types, "
                f"{registered_count['component']} components."
            )

        except Exception as e:
            logger.error(
                f"Error during auto-registration for {module_or_path}: {e}",
                exc_info=True,
            )


# --- Initialize Singleton ---
_registry_instance = FlockRegistry()


# --- Convenience Access ---
# Provide a function to easily get the singleton instance
def get_registry() -> FlockRegistry:
    """Returns the singleton FlockRegistry instance."""
    return _registry_instance


# Type hinting for decorators to preserve signature
@overload
def flock_component(cls: ClassType) -> ClassType: ...
@overload
def flock_component(
    *, name: str | None = None
) -> Callable[[ClassType], ClassType]: ...


def flock_component(
    cls: ClassType | None = None, *, name: str | None = None
) -> Any:
    """Decorator to register a Flock Component class (Module, Evaluator, Router).

    Usage:
        @flock_component
        class MyModule(FlockModule): ...

        @flock_component(name="CustomRouterAlias")
        class MyRouter(FlockRouter): ...
    """
    registry = get_registry()

    def decorator(inner_cls: ClassType) -> ClassType:
        if not inspect.isclass(inner_cls):
            raise TypeError("@flock_component can only decorate classes.")
        component_name = name or inner_cls.__name__
        registry.register_component(inner_cls, name=component_name)
        return inner_cls

    if cls is None:
        # Called as @flock_component(name="...")
        return decorator
    else:
        # Called as @flock_component
        return decorator(cls)


# Type hinting for decorators
@overload
def flock_tool(func: FuncType) -> FuncType: ...
@overload
def flock_tool(
    *, name: str | None = None
) -> Callable[[FuncType], FuncType]: ...


def flock_tool(func: FuncType | None = None, *, name: str | None = None) -> Any:
    """Decorator to register a callable function/method as a Tool (or general callable).

    Usage:
        @flock_tool
        def my_web_search(query: str): ...

        @flock_tool(name="utils.calculate_pi")
        def compute_pi(): ...
    """
    registry = get_registry()

    def decorator(inner_func: FuncType) -> FuncType:
        if not callable(inner_func):
            raise TypeError("@flock_tool can only decorate callables.")
        # Let registry handle default name generation if None
        registry.register_callable(inner_func, name=name)
        return inner_func

    if func is None:
        # Called as @flock_tool(name="...")
        return decorator
    else:
        # Called as @flock_tool
        return decorator(func)


# Alias for clarity if desired
# flock_callable = flock_tool


@overload
def flock_type(cls: ClassType) -> ClassType: ...
@overload
def flock_type(
    *, name: str | None = None
) -> Callable[[ClassType], ClassType]: ...


def flock_type(cls: ClassType | None = None, *, name: str | None = None) -> Any:
    """Decorator to register a Type (Pydantic Model, Dataclass) used in signatures.

    Usage:
        @flock_type
        class MyDataModel(BaseModel): ...

        @flock_type(name="UserInput")
        @dataclass
        class UserQuery: ...
    """
    registry = get_registry()

    def decorator(inner_cls: ClassType) -> ClassType:
        if not inspect.isclass(inner_cls):
            raise TypeError("@flock_type can only decorate classes.")
        type_name = name or inner_cls.__name__
        registry.register_type(inner_cls, name=type_name)
        return inner_cls

    if cls is None:
        # Called as @flock_type(name="...")
        return decorator
    else:
        # Called as @flock_type
        return decorator(cls)


# --- Auto-register known core components and tools ---
def _auto_register_by_path():
    components_to_register = [
        (
            "flock.evaluators.declarative.declarative_evaluator",
            "DeclarativeEvaluator",
        ),
        ("flock.evaluators.memory.memory_evaluator", "MemoryEvaluator"),
        ("flock.modules.output.output_module", "OutputModule"),
        ("flock.modules.performance.metrics_module", "MetricsModule"),
        ("flock.modules.memory.memory_module", "MemoryModule"),
        # ("flock.modules.hierarchical.module", "HierarchicalMemoryModule"), # Uncomment if exists
        ("flock.routers.default.default_router", "DefaultRouter"),
        ("flock.routers.llm.llm_router", "LLMRouter"),
        ("flock.routers.agent.agent_router", "AgentRouter"),
    ]
    for module_path, class_name in components_to_register:
        try:
            module = importlib.import_module(module_path)
            component_class = getattr(module, class_name)
            _registry_instance.register_component(component_class)
        except (ImportError, AttributeError) as e:
            logger.warning(f"{class_name} not found for auto-registration: {e}")

    # Auto-register standard tools by scanning modules
    tool_modules = [
        "flock.core.tools.basic_tools",
        "flock.core.tools.azure_tools",
        "flock.core.tools.dev_tools.github",
        "flock.core.tools.llm_tools",
        "flock.core.tools.markdown_tools",
    ]
    for module_path in tool_modules:
        try:
            _registry_instance.register_module_components(module_path)
        except ImportError as e:
            logger.warning(
                f"Could not auto-register tools from {module_path}: {e}"
            )


# Bootstrapping the registry
# _auto_register_by_path()
```

### src\flock\core\flock_router.py

- **Lines**: 70
- **Last modified**: 2025-03-16 14:43:42

```py
"""Base router class for the Flock framework."""

from abc import ABC, abstractmethod
from typing import Any, Literal

from pydantic import BaseModel, Field

from flock.core.context.context import FlockContext


class HandOffRequest(BaseModel):
    """Base class for handoff returns."""

    next_agent: str = Field(default="", description="Next agent to invoke")
    # match = use the output fields of the current agent that also exists as input field of the next agent
    # add = add the output of the current agent to the input of the next agent
    hand_off_mode: Literal["match", "add"] = Field(default="match")
    override_next_agent: Any | None = Field(
        default=None,
        description="Override the next agent to hand off to",
    )
    override_context: FlockContext | None = Field(
        default=None, descrio="Override context parameters"
    )


class FlockRouterConfig(BaseModel):
    """Configuration for a router.

    This class defines the configuration parameters for a router.
    Subclasses can extend this to add additional parameters.
    """

    enabled: bool = Field(
        default=True, description="Whether the router is enabled"
    )
    agents: list[str] | None = Field(
        default=None,
        description="List of agents to choose from",
    )


class FlockRouter(BaseModel, ABC):
    """Base class for all routers.

    A router is responsible for determining the next agent in a workflow
    based on the current agent's output.
    """

    name: str = Field(..., description="Name of the router")
    config: FlockRouterConfig = Field(default_factory=FlockRouterConfig)

    @abstractmethod
    async def route(
        self,
        current_agent: Any,
        result: dict[str, Any],
        context: FlockContext,
    ) -> HandOffRequest:
        """Determine the next agent to hand off to based on the current agent's output.

        Args:
            current_agent: The agent that just completed execution
            result: The output from the current agent
            context: The global execution context

        Returns:
            A HandOff object containing the next agent and input data
        """
        pass
```

### src\flock\core\interpreter\python_interpreter.py

- **Lines**: 683
- **Last modified**: 2025-02-24 03:21:51

```py
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import ast
import builtins
import difflib
import importlib
import re
import typing
from collections.abc import Mapping
from typing import (
    Any,
)

from opentelemetry import trace

from flock.core.logging.logging import get_logger

tracer = trace.get_tracer(__name__)
logger = get_logger("interpreter")


class InterpreterError(ValueError):
    r"""An error raised when the interpreter cannot evaluate a Python
    expression, due to syntax error or unsupported operations.
    """

    pass


class PythonInterpreter:
    r"""A customized python interpreter to control the execution of
    LLM-generated codes. The interpreter makes sure the code can only execute
    functions given in action space and import white list. It also supports
    fuzzy variable matching to receive uncertain input variable name.

    [Documentation omitted for brevity]

    Args:
        action_space (Dict[str, Any]): A dictionary mapping action names to
            their corresponding functions or objects.
        import_white_list (Optional[List[str]], optional): A list of allowed modules.
        verbose (bool, optional): If True, the interpreter prints log messages
            as it executes the code. (default: False)
    """

    def __init__(
        self,
        action_space: dict[str, Any],
        import_white_list: list[str] | None = None,
        verbose: bool = False,
    ) -> None:
        self.action_space = action_space
        self.state = self.action_space.copy()
        self.fuzz_state: dict[str, Any] = {}
        self.import_white_list = import_white_list or [
            "math",
            "random",
            "datetime",
            "time",
            "string",
            "collections",
            "itertools",
            "functools",
            "typing",
            "enum",
            "json",
            "ast",
        ]  # default imports
        self.verbose = verbose

    def log(self, message: str) -> None:
        """Print a log message immediately."""
        # print(message, flush=True)
        logger.info(message, flush=True)

    def execute(
        self,
        code: str,
        state: dict[str, Any] | None = None,
        fuzz_state: dict[str, Any] | None = None,
        keep_state: bool = True,
    ) -> Any:
        r"""Execute the input python codes in a secure environment.

        [Documentation omitted for brevity]
        """
        if state is not None:
            self.state.update(state)
        if fuzz_state is not None:
            self.fuzz_state.update(fuzz_state)

        try:
            expression = ast.parse(code)
        except SyntaxError as e:
            error_line = code.splitlines()[e.lineno - 1]
            raise InterpreterError(
                f"Syntax error in code at line {e.lineno}: {error_line}\nError: {e}"
            )

        result = None
        if self.verbose:
            self.log("[Interpreter] Starting code execution...")

        for idx, node in enumerate(expression.body):
            # Log the AST node being executed (using unparse if available)
            if self.verbose:
                try:
                    node_repr = ast.unparse(node)
                except Exception:
                    node_repr = ast.dump(node)
                self.log(f"[Interpreter] Executing node {idx}: {node_repr}")

            try:
                line_result = self._execute_ast(node)
            except InterpreterError as e:
                if not keep_state:
                    self.clear_state()
                msg = f"Evaluation of the code stopped at node {idx}. See:\n{e}"
                raise InterpreterError(msg)
            if line_result is not None:
                result = line_result
                if self.verbose:
                    self.log(f"[Interpreter] Node {idx} result: {result}")

        if self.verbose:
            self.log("[Interpreter] Finished code execution.")
        if not keep_state:
            self.clear_state()

        return result

    def clear_state(self) -> None:
        r"""Initialize :obj:`state` and :obj:`fuzz_state`"""
        self.state = self.action_space.copy()
        self.fuzz_state = {}

    # ast.Index is deprecated after python 3.9, which cannot pass type check,
    # but is still necessary for older versions.
    @typing.no_type_check
    def _execute_ast(self, expression: ast.AST) -> Any:
        if isinstance(expression, ast.Assign):
            return self._execute_assign(expression)
        elif isinstance(expression, ast.Attribute):
            value = self._execute_ast(expression.value)
            return getattr(value, expression.attr)
        elif isinstance(expression, ast.AugAssign):
            return self._execute_augassign(expression)
        elif isinstance(expression, ast.BinOp):
            return self._execute_binop(expression)
        elif isinstance(expression, ast.BoolOp):
            return self._execute_condition(expression)
        elif isinstance(expression, ast.Call):
            return self._execute_call(expression)
        elif isinstance(expression, ast.Compare):
            return self._execute_condition(expression)
        elif isinstance(expression, ast.Constant):
            return expression.value
        elif isinstance(expression, ast.Dict):
            result: dict = {}
            for k, v in zip(expression.keys, expression.values):
                if k is not None:
                    result[self._execute_ast(k)] = self._execute_ast(v)
                else:
                    result.update(self._execute_ast(v))
            return result
        elif isinstance(expression, ast.Expr):
            return self._execute_ast(expression.value)
        elif isinstance(expression, ast.For):
            return self._execute_for(expression)
        elif isinstance(expression, ast.FormattedValue):
            return self._execute_ast(expression.value)
        elif isinstance(expression, ast.FunctionDef):
            self.state[expression.name] = expression
            return None
        elif isinstance(expression, ast.GeneratorExp):
            return self._execute_generatorexp(expression)
        elif isinstance(expression, ast.If):
            return self._execute_if(expression)
        elif isinstance(expression, ast.IfExp):
            return self._execute_ifexp(expression)
        elif isinstance(expression, ast.Import):
            self._execute_import(expression)
            return None
        elif isinstance(expression, ast.ImportFrom):
            self._execute_import_from(expression)
            return None
        elif hasattr(ast, "Index") and isinstance(expression, ast.Index):
            return self._execute_ast(expression.value)
        elif isinstance(expression, ast.JoinedStr):
            return "".join(
                [str(self._execute_ast(v)) for v in expression.values]
            )
        elif isinstance(expression, ast.Lambda):
            return self._execute_lambda(expression)
        elif isinstance(expression, ast.List):
            return [self._execute_ast(elt) for elt in expression.elts]
        elif isinstance(expression, ast.Name):
            return self._execute_name(expression)
        elif isinstance(expression, ast.Return):
            return self._execute_ast(expression.value)
        elif isinstance(expression, ast.Subscript):
            return self._execute_subscript(expression)
        elif isinstance(expression, ast.Tuple):
            return tuple([self._execute_ast(elt) for elt in expression.elts])
        elif isinstance(expression, ast.UnaryOp):
            return self._execute_unaryop(expression)
        elif isinstance(expression, ast.While):
            return self._execute_while(expression)
        elif isinstance(expression, ast.ListComp):
            return self._execute_listcomp(expression)
        elif isinstance(expression, ast.DictComp):
            return self._execute_dictcomp(expression)
        elif isinstance(expression, ast.SetComp):
            return self._execute_setcomp(expression)
        elif isinstance(expression, ast.Break):
            raise BreakException()
        elif isinstance(expression, ast.Continue):
            raise ContinueException()
        elif isinstance(expression, ast.Try):
            return self._execute_try(expression)
        elif isinstance(expression, ast.Raise):
            return self._execute_raise(expression)
        elif isinstance(expression, ast.Pass):
            return None
        elif isinstance(expression, ast.Assert):
            return self._execute_assert(expression)
        else:
            raise InterpreterError(
                f"{expression.__class__.__name__} is not supported."
            )

    def _execute_assign(self, assign: ast.Assign) -> Any:
        targets = assign.targets
        result = self._execute_ast(assign.value)

        for target in targets:
            self._assign(target, result)
        return result

    def _assign(self, target: ast.expr, value: Any):
        if isinstance(target, ast.Name):
            self.state[target.id] = value
        elif isinstance(target, ast.Tuple):
            if not isinstance(value, tuple):
                raise InterpreterError(
                    f"Expected type tuple, but got {value.__class__.__name__} instead."
                )
            if len(target.elts) != len(value):
                raise InterpreterError(
                    f"Expected {len(target.elts)} values but got {len(value)}."
                )
            for t, v in zip(target.elts, value):
                self.state[self._execute_ast(t)] = v
        else:
            raise InterpreterError(
                f"Unsupported variable type. Expected ast.Name or ast.Tuple, got {target.__class__.__name__} instead."
            )

    def _execute_call(self, call: ast.Call) -> Any:
        callable_func = self._execute_ast(call.func)

        args = [self._execute_ast(arg) for arg in call.args]
        kwargs = {
            keyword.arg: self._execute_ast(keyword.value)
            for keyword in call.keywords
        }
        if isinstance(callable_func, ast.FunctionDef):
            old_state = self.state.copy()
            for param_name, arg_value in zip(
                [param.arg for param in callable_func.args.args], args
            ):
                self.state[param_name] = arg_value
            result = None
            for stmt in callable_func.body:
                result = self._execute_ast(stmt)
                if isinstance(stmt, ast.Return):
                    break
            self.state = old_state
            return result
        return callable_func(*args, **kwargs)

    def _execute_augassign(self, augassign: ast.AugAssign):
        current_value = self.state[augassign.target.id]
        increment_value = self._execute_ast(augassign.value)
        if not (
            isinstance(current_value, (int, float))
            and isinstance(increment_value, (int, float))
        ):
            raise InterpreterError(
                f"Invalid types for augmented assignment: {type(current_value)}, {type(increment_value)}"
            )
        if isinstance(augassign.op, ast.Add):
            new_value = current_value + increment_value
        elif isinstance(augassign.op, ast.Sub):
            new_value = current_value - increment_value
        elif isinstance(augassign.op, ast.Mult):
            new_value = current_value * increment_value
        elif isinstance(augassign.op, ast.Div):
            new_value = current_value / increment_value
        else:
            raise InterpreterError(
                f"Augmented assignment operator {augassign.op} is not supported"
            )
        self._assign(augassign.target, new_value)
        return new_value

    def _execute_subscript(self, subscript: ast.Subscript):
        index = self._execute_ast(subscript.slice)
        value = self._execute_ast(subscript.value)
        if not isinstance(subscript.ctx, ast.Load):
            raise InterpreterError(
                f"{subscript.ctx.__class__.__name__} is not supported for subscript."
            )
        if isinstance(value, (list, tuple)):
            return value[int(index)]
        if index in value:
            return value[index]
        if isinstance(index, str) and isinstance(value, Mapping):
            close_matches = difflib.get_close_matches(index, list(value.keys()))
            if len(close_matches) > 0:
                return value[close_matches[0]]
        raise InterpreterError(f"Could not index {value} with '{index}'.")

    def _execute_name(self, name: ast.Name):
        if name.id in dir(builtins):
            return getattr(builtins, name.id)
        if isinstance(name.ctx, ast.Store):
            return name.id
        elif isinstance(name.ctx, ast.Load):
            return self._get_value_from_state(name.id)
        else:
            raise InterpreterError(f"{name.ctx} is not supported.")

    def _execute_condition(self, condition):
        if isinstance(condition, ast.BoolOp):
            if isinstance(condition.op, ast.And):
                results = [
                    self._execute_ast(value) for value in condition.values
                ]
                return all(results)
            elif isinstance(condition.op, ast.Or):
                results = [
                    self._execute_ast(value) for value in condition.values
                ]
                return any(results)
            else:
                raise InterpreterError(
                    f"Boolean operator {condition.op} is not supported"
                )
        elif isinstance(condition, ast.Compare):
            if len(condition.ops) > 1:
                raise InterpreterError(
                    "Cannot evaluate conditions with multiple operators"
                )
            left = self._execute_ast(condition.left)
            comparator = condition.ops[0]
            right = self._execute_ast(condition.comparators[0])
            if isinstance(comparator, ast.Eq):
                return left == right
            elif isinstance(comparator, ast.NotEq):
                return left != right
            elif isinstance(comparator, ast.Lt):
                return left < right
            elif isinstance(comparator, ast.LtE):
                return left <= right
            elif isinstance(comparator, ast.Gt):
                return left > right
            elif isinstance(comparator, ast.GtE):
                return left >= right
            elif isinstance(comparator, ast.Is):
                return left is right
            elif isinstance(comparator, ast.IsNot):
                return left is not right
            elif isinstance(comparator, ast.In):
                return left in right
            elif isinstance(comparator, ast.NotIn):
                return left not in right
            else:
                raise InterpreterError("Unsupported comparison operator")
        elif isinstance(condition, ast.UnaryOp):
            return self._execute_unaryop(condition)
        elif isinstance(condition, ast.Name) or isinstance(condition, ast.Call):
            return bool(self._execute_ast(condition))
        elif isinstance(condition, ast.Constant):
            return bool(condition.value)
        else:
            raise InterpreterError(
                f"Unsupported condition type: {type(condition).__name__}"
            )

    def _execute_if(self, if_statement: ast.If):
        result = None
        if self._execute_condition(if_statement.test):
            for line in if_statement.body:
                line_result = self._execute_ast(line)
                if line_result is not None:
                    result = line_result
        else:
            for line in if_statement.orelse:
                line_result = self._execute_ast(line)
                if line_result is not None:
                    result = line_result
        return result

    def _execute_ifexp(self, ifexp: ast.IfExp) -> Any:
        test_result = self._execute_condition(ifexp.test)
        if test_result:
            return self._execute_ast(ifexp.body)
        else:
            return self._execute_ast(ifexp.orelse)

    def _execute_import(self, import_module: ast.Import) -> None:
        for module in import_module.names:
            self._validate_import(module.name)
            alias = module.asname or module.name
            self.state[alias] = importlib.import_module(module.name)

    def _execute_import_from(self, import_from: ast.ImportFrom):
        if import_from.module is None:
            raise InterpreterError('"from . import" is not supported.')
        for import_name in import_from.names:
            full_name = import_from.module + f".{import_name.name}"
            self._validate_import(full_name)
            imported_module = importlib.import_module(import_from.module)
            alias = import_name.asname or import_name.name
            self.state[alias] = getattr(imported_module, import_name.name)

    # Note: Two versions of _execute_for and _execute_while appear in this file.
    # We keep both as provided, but you may wish to consolidate these in your code.

    def _execute_for(self, for_statement: ast.For):
        class BreakException(Exception):
            pass

        class ContinueException(Exception):
            pass

        result = None
        try:
            for value in self._execute_ast(for_statement.iter):
                self._assign(for_statement.target, value)
                try:
                    for line in for_statement.body:
                        line_result = self._execute_ast(line)
                        if line_result is not None:
                            result = line_result
                except ContinueException:
                    continue
        except BreakException:
            pass
        return result

    def _execute_while(self, while_statement: ast.While):
        class BreakException(Exception):
            pass

        class ContinueException(Exception):
            pass

        result = None
        try:
            while self._execute_condition(while_statement.test):
                try:
                    for line in while_statement.body:
                        line_result = self._execute_ast(line)
                        if line_result is not None:
                            result = line_result
                except ContinueException:
                    continue
        except BreakException:
            pass
        return result

    def _execute_try(self, try_statement: ast.Try):
        try:
            for line in try_statement.body:
                self._execute_ast(line)
        except Exception as e:
            handled = False
            for handler in try_statement.handlers:
                if handler.type is None or isinstance(
                    e, self._execute_ast(handler.type)
                ):
                    if handler.name:
                        self.state[handler.name.id] = e
                    for line in handler.body:
                        self._execute_ast(line)
                    handled = True
                    break
            if not handled:
                raise
        finally:
            for line in try_statement.finalbody:
                self._execute_ast(line)

    def _execute_raise(self, raise_statement: ast.Raise):
        if raise_statement.exc:
            exception = self._execute_ast(raise_statement.exc)
            raise exception
        else:
            raise

    def _execute_assert(self, assert_statement: ast.Assert):
        test_result = self._execute_condition(assert_statement.test)
        if not test_result:
            if assert_statement.msg:
                msg = self._execute_ast(assert_statement.msg)
                raise AssertionError(msg)
            else:
                raise AssertionError

    def _execute_lambda(self, lambda_node: ast.Lambda) -> Any:
        def lambda_function(*args):
            old_state = self.state.copy()
            for param, arg in zip(lambda_node.args.args, args):
                self.state[param.arg] = arg
            result = self._execute_ast(lambda_node.body)
            self.state = old_state  # Restore the state
            return result

        return lambda_function

    def _validate_import(self, full_name: str):
        tmp_name = ""
        found_name = False
        for name in full_name.split("."):
            tmp_name += name if tmp_name == "" else f".{name}"
            if tmp_name in self.import_white_list:
                found_name = True
                return
        if not found_name:
            raise InterpreterError(
                f"It is not permitted to import modules "
                f"than module white list (try to import {full_name})."
            )

    def _execute_binop(self, binop: ast.BinOp):
        left = self._execute_ast(binop.left)
        operator = binop.op
        right = self._execute_ast(binop.right)

        if isinstance(operator, ast.Add):
            return left + right
        elif isinstance(operator, ast.Sub):
            return left - right
        elif isinstance(operator, ast.Mult):
            return left * right
        elif isinstance(operator, ast.Div):
            return left / right
        elif isinstance(operator, ast.FloorDiv):
            return left // right
        elif isinstance(operator, ast.Mod):
            return left % right
        elif isinstance(operator, ast.Pow):
            return left**right
        elif isinstance(operator, ast.LShift):
            return left << right
        elif isinstance(operator, ast.RShift):
            return left >> right
        elif isinstance(operator, ast.BitAnd):
            return left & right
        elif isinstance(operator, ast.BitOr):
            return left | right
        elif isinstance(operator, ast.BitXor):
            return left ^ right
        elif isinstance(operator, ast.MatMult):
            return left @ right
        else:
            raise InterpreterError(f"Operator not supported: {operator}")

    def _execute_unaryop(self, unaryop: ast.UnaryOp):
        operand = self._execute_ast(unaryop.operand)
        operator = unaryop.op

        if isinstance(operator, ast.UAdd):
            return +operand
        elif isinstance(operator, ast.USub):
            return -operand
        elif isinstance(operator, ast.Not):
            return not operand
        elif isinstance(operator, ast.Invert):
            return ~operand
        else:
            raise InterpreterError(f"Operator not supported: {operator}")

    def _execute_listcomp(self, comp: ast.ListComp):
        return [self._execute_comp(comp.elt, comp.generators)]

    def _execute_dictcomp(self, comp: ast.DictComp):
        return {
            self._execute_comp(comp.key, comp.generators): self._execute_comp(
                comp.value, comp.generators
            )
        }

    def _execute_setcomp(self, comp: ast.SetComp):
        return {self._execute_comp(comp.elt, comp.generators)}

    def _execute_comp(self, elt, generators):
        if not generators:
            return self._execute_ast(elt)
        gen = generators[0]
        result = []
        for value in self._execute_ast(gen.iter):
            self._assign(gen.target, value)
            if all(self._execute_condition(if_cond) for if_cond in gen.ifs):
                result.extend(self._execute_comp(elt, generators[1:]))
        return result

    def _execute_generatorexp(self, genexp: ast.GeneratorExp):
        def generator():
            for value in self._execute_comp(genexp.elt, genexp.generators):
                yield value

        return generator()

    def _get_value_from_state(self, key: str) -> Any:
        if key in self.state:
            return self.state[key]
        elif key in self.fuzz_state:
            return self.fuzz_state[key]
        else:
            raise InterpreterError(f"The variable `{key}` is not defined.")


class TextPrompt(str):
    r"""A class that represents a text prompt. The :obj:`TextPrompt` class
    extends the built-in :obj:`str` class to provide a property for retrieving
    the set of keywords in the prompt.
    """

    @property
    def key_words(self) -> set[str]:
        pattern = re.compile(r"\{([^{}]+)\}")
        found = pattern.findall(self)
        return set(found)

    def format(self, *args: Any, **kwargs: Any) -> "TextPrompt":
        default_kwargs = {key: "{" + f"{key}" + "}" for key in self.key_words}
        default_kwargs.update(kwargs)
        return TextPrompt(super().format(*args, **default_kwargs))


class CodePrompt(TextPrompt):
    r"""A class that represents a code prompt. It extends the :obj:`TextPrompt`
    class with a :obj:`code_type` property.
    """

    def __new__(cls, *args: Any, **kwargs: Any) -> "CodePrompt":
        code_type = kwargs.pop("code_type", None)
        instance = super().__new__(cls, *args, **kwargs)
        instance._code_type = code_type
        return instance

    @property
    def code_type(self) -> str | None:
        return self._code_type

    def set_code_type(self, code_type: str) -> None:
        self._code_type = code_type

    def execute(
        self,
        interpreter: PythonInterpreter | None = None,
        user_variable: dict[str, Any] | None = None,
    ) -> tuple[Any, PythonInterpreter]:
        if not interpreter:
            interpreter = PythonInterpreter(action_space=globals())
        execution_res = interpreter.execute(
            self, fuzz_state=user_variable, keep_state=True
        )
        return execution_res, interpreter
```

### src\flock\core\logging\formatters\enum_builder.py

- **Lines**: 38
- **Last modified**: 2025-02-18 03:20:40

```py
"""Enum Builder."""

import os
import pathlib
import re

theme_folder = pathlib.Path(__file__).parent.parent.parent.parent / "themes"

if not theme_folder.exists():
    raise FileNotFoundError(f"Theme folder not found: {theme_folder}")

theme_files = [
    pathlib.Path(f.path).stem for f in os.scandir(theme_folder) if f.is_file()
]

theme_enum_entries = {}
for theme in theme_files:
    safe_name = (
        theme.replace("-", "_")
        .replace(" ", "_")
        .replace("(", "_")
        .replace(")", "_")
        .replace("+", "_")
        .replace(".", "_")
    )

    if re.match(r"^\d", safe_name):
        safe_name = f"_{safe_name}"

    theme_enum_entries[safe_name] = theme

with open("theme_enum.py", "w") as f:
    f.write("from enum import Enum\n\n")
    f.write("class OutputOptionsTheme(Enum):\n")
    for safe_name, original_name in theme_enum_entries.items():
        f.write(f'    {safe_name} = "{original_name}"\n')

print("Generated theme_enum.py ✅")
```

### src\flock\core\logging\formatters\theme_builder.py

- **Lines**: 476
- **Last modified**: 2025-02-18 03:20:40

```py
#!/usr/bin/env python
"""A simple interactive theme builder.

Steps:
1. Load theme files from a folder (or pick N random ones).
2. Display each theme’s color palette (colors only).
3. Let the user choose a palette.
4. Generate a number of sample tables using that palette (with randomized non-color settings).
5. Let the user select one sample table and save its configuration to a TOML file.
"""

import pathlib
import random
import re
from typing import Any

import toml
from rich import box
from rich.console import Console, Group
from rich.panel import Panel
from rich.table import Table
from rich.text import Text


def resolve_style_string(style_str: str, theme: dict) -> str:
    """Replace tokens of the form "color.<section>.<key>" in style_str with
    the value from theme["colors"][<section>][<key>].
    """
    pattern = r"color\.(\w+)\.(\w+)"

    def repl(match):
        section = match.group(1)
        key = match.group(2)
        try:
            return theme["colors"][section][key]
        except KeyError:
            return match.group(0)

    return re.sub(pattern, repl, style_str)


def generate_default_rich_block(theme: dict | None = None) -> dict[str, Any]:
    """Generate a default [rich] block that includes:
    - Color properties computed from the theme's [colors] blocks.
    - Extra color tokens (so tokens like "color.bright.green" can be used).
    - Non-color table layout properties, randomly chosen.
    """

    def random_background():
        return random.choice(
            [
                f"{normal_black}",
                f"{normal_blue}",
                f"{primary_background}",
                f"{selection_background}",
                f"{cursor_cursor}",
            ]
        )

    if theme is not None:
        bright = theme["colors"].get("bright", {})
        normal = theme["colors"].get("normal", {})
        cursor = theme["colors"].get("cursor", {})
        primary = theme["colors"].get("primary", {})
        selection = theme["colors"].get("selection", {})

        bright_black = bright.get("black", "#000000")
        bright_blue = bright.get("blue", "#96cbfe")
        bright_cyan = bright.get("cyan", "#85befd")
        bright_green = bright.get("green", "#94fa36")
        bright_magenta = bright.get("magenta", "#b9b6fc")
        bright_red = bright.get("red", "#fd5ff1")
        bright_white = bright.get("white", "#e0e0e0")
        bright_yellow = bright.get("yellow", "#f5ffa8")

        normal_black = normal.get("black", "#000000")
        normal_blue = normal.get("blue", "#85befd")
        normal_cyan = normal.get("cyan", "#85befd")
        normal_green = normal.get("green", "#87c38a")
        normal_magenta = normal.get("magenta", "#b9b6fc")
        normal_red = normal.get("red", "#fd5ff1")
        normal_white = normal.get("white", "#e0e0e0")
        normal_yellow = normal.get("yellow", "#ffd7b1")

        cursor_cursor = cursor.get("cursor", "#d0d0d0")
        cursor_text = cursor.get("text", "#151515")

        primary_background = primary.get("background", "#161719")
        primary_foreground = primary.get("foreground", "#c5c8c6")
        selection_background = selection.get("background", "#444444")
        selection_text = selection.get("text", primary_foreground)
    else:
        # Fallback default values.
        bright_black = "black"
        bright_blue = "blue"
        bright_cyan = "cyan"
        bright_green = "green"
        bright_magenta = "magenta"
        bright_red = "red"
        bright_white = "white"
        bright_yellow = "yellow"
        normal_black = "black"
        normal_blue = "blue"
        normal_cyan = "cyan"
        normal_green = "green"
        normal_magenta = "magenta"
        normal_red = "red"
        normal_white = "white"
        normal_yellow = "yellow"
        cursor_cursor = "gray"
        cursor_text = "white"
        primary_background = "black"
        primary_foreground = "white"
        selection_background = "gray"
        selection_text = "white"

    # Color properties.
    default_color_props = {
        "panel_style": f"on {random_background()}",
        "table_header_style": f"bold {selection_text} on {selection_background}",
        "table_title_style": f"bold {primary_foreground}",
        "table_border_style": bright_blue,
        "panel_border_style": bright_blue,
        "column_output": f"bold {primary_foreground}",
        "column_value": primary_foreground,
    }
    # Extra color tokens.
    extra_color_props = {
        "bright_black": bright_black,
        "bright_blue": bright_blue,
        "bright_cyan": bright_cyan,
        "bright_green": bright_green,
        "bright_magenta": bright_magenta,
        "bright_red": bright_red,
        "bright_white": bright_white,
        "bright_yellow": bright_yellow,
        "normal_black": normal_black,
        "normal_blue": normal_blue,
        "normal_cyan": normal_cyan,
        "normal_green": normal_green,
        "normal_magenta": normal_magenta,
        "normal_red": normal_red,
        "normal_white": normal_white,
        "normal_yellow": normal_yellow,
        "cursor_cursor": cursor_cursor,
        "cursor_text": cursor_text,
    }
    # Non-color layout properties, randomly chosen.
    default_non_color_props = {
        "table_show_lines": random.choice([True, False]),
        "table_box": random.choice(
            ["ROUNDED", "SIMPLE", "SQUARE", "MINIMAL", "HEAVY", "DOUBLE_EDGE"]
        ),
        "panel_padding": random.choice([[1, 2], [1, 1], [2, 2], [0, 2]]),
        "panel_title_align": random.choice(["left", "center", "right"]),
        "table_row_styles": random.choice(
            [["", "dim"], ["", "italic"], ["", "underline"]]
        ),
    }
    # Extra table layout properties (non-content).
    default_extra_table_props = {
        "table_safe_box": True,
        "table_padding": [0, 1],
        "table_collapse_padding": False,
        "table_pad_edge": True,
        "table_expand": False,
        "table_show_footer": False,
        "table_show_edge": True,
        "table_leading": 0,
        "table_style": "none",
        "table_footer_style": "none",
        "table_caption": "",
        "table_caption_style": "none",
        "table_title_justify": "center",
        "table_caption_justify": "center",
        "table_highlight": False,
    }
    defaults = {
        **default_color_props,
        **extra_color_props,
        **default_non_color_props,
        **default_extra_table_props,
    }
    return defaults


def load_theme_from_file(filepath: str) -> dict:
    """Load a theme from a TOML file.

    If the file does not contain a [rich] block, one is generated and saved.
    """
    with open(filepath) as f:
        theme = toml.load(f)
    if "rich" not in theme:
        theme["rich"] = generate_default_rich_block(theme)
        with open(filepath, "w") as f:
            toml.dump(theme, f)
    return theme


def get_default_styles(theme: dict | None) -> dict[str, Any]:
    """Build a style mapping from the theme by merging defaults with any overrides
    in the [rich] block. Also resolves any color tokens.
    """
    if theme is None:
        final_styles = generate_default_rich_block(None)
    else:
        defaults = generate_default_rich_block(theme)
        rich_props = theme.get("rich", {})
        final_styles = {
            key: rich_props.get(key, defaults[key]) for key in defaults
        }
    # Ensure tuple for padding properties.
    final_styles["panel_padding"] = tuple(final_styles["panel_padding"])
    if "table_padding" in final_styles:
        final_styles["table_padding"] = tuple(final_styles["table_padding"])
    # Resolve tokens.
    if theme is not None:
        for key, value in final_styles.items():
            if isinstance(value, str):
                final_styles[key] = resolve_style_string(value, theme)
    return final_styles


def create_rich_renderable(
    value: Any,
    level: int = 0,
    theme: dict | None = None,
    styles: dict[str, Any] | None = None,
) -> Any:
    """Recursively creates a Rich renderable.

    - If value is a dict, renders it as a Table.
    - If a list/tuple, renders each item.
    - Otherwise, returns the string representation.
    """
    if styles is None:
        styles = get_default_styles(theme)

    if isinstance(value, dict):
        box_style = (
            getattr(box, styles["table_box"])
            if isinstance(styles["table_box"], str)
            else styles["table_box"]
        )
        table_kwargs = {
            "show_header": True,
            "header_style": styles["table_header_style"],
            "title": f"Subtable (Level {level})" if level > 0 else None,
            "title_style": styles["table_title_style"],
            "border_style": styles["table_border_style"],
            "show_lines": styles["table_show_lines"],
            "box": box_style,
            "row_styles": styles["table_row_styles"],
            "safe_box": styles.get("table_safe_box"),
            "padding": styles.get("table_padding"),
            "collapse_padding": styles.get("table_collapse_padding"),
            "pad_edge": styles.get("table_pad_edge"),
            "expand": styles.get("table_expand"),
            "show_footer": styles.get("table_show_footer"),
            "show_edge": styles.get("table_show_edge"),
            "leading": styles.get("table_leading"),
            "style": styles.get("table_style"),
            "footer_style": styles.get("table_footer_style"),
            "caption": styles.get("table_caption"),
            "caption_style": styles.get("table_caption_style"),
            "title_justify": styles.get("table_title_justify"),
            "caption_justify": styles.get("table_caption_justify"),
            "highlight": styles.get("table_highlight"),
        }
        table = Table(**table_kwargs)
        table.add_column("Key", style=styles["column_output"])
        table.add_column("Value", style=styles["column_value"])
        for k, v in value.items():
            table.add_row(
                str(k), create_rich_renderable(v, level + 1, theme, styles)
            )
        return table

    elif isinstance(value, (list, tuple)):
        if all(isinstance(item, dict) for item in value):
            sub_tables = []
            for i, item in enumerate(value):
                sub_tables.append(f"[bold]Item {i + 1}[/bold]")
                sub_tables.append(
                    create_rich_renderable(item, level + 1, theme, styles)
                )
            return Group(*sub_tables)
        else:
            rendered_items = [
                create_rich_renderable(item, level + 1, theme, styles)
                for item in value
            ]
            if all(isinstance(item, str) for item in rendered_items):
                return "\n".join(rendered_items)
            else:
                return Group(*rendered_items)
    else:
        if isinstance(value, str) and "\n" in value:
            return f"\n{value}\n"
        return str(value)


# --- Theme Builder Functions --- #


def load_theme_files(theme_dir: pathlib.Path) -> list[pathlib.Path]:
    """Return a list of .toml theme files in the given directory."""
    return list(theme_dir.glob("*.toml"))


def display_color_palette(theme: dict) -> None:
    """Display the color palette from a theme's [colors] sections with a color preview."""
    console = Console()
    palette_table = Table(
        title="Color Palette", show_header=True, header_style="bold"
    )
    palette_table.add_column("Section", style="bold")
    palette_table.add_column("Key", style="italic")
    palette_table.add_column("Value", style="bold")
    palette_table.add_column("Preview", justify="center")

    # Iterate over the colors in each section.
    for section, colors in theme.get("colors", {}).items():
        for key, value in colors.items():
            # Create a Text object with a fixed-width string (here, six spaces)
            # styled with a background color of the actual color value.
            preview = Text("      ", style=f"on {value}")
            palette_table.add_row(section, key, value, preview)

    console.print(palette_table)


def generate_sample_rich_blocks(
    chosen_theme: dict, count: int
) -> list[dict[str, Any]]:
    """Generate a list of sample rich blocks (randomized layout) using the chosen theme's colors."""
    samples = []
    for _ in range(count):
        samples.append(generate_default_rich_block(chosen_theme))
    return samples


def generate_sample_table(sample_theme: dict, dummy_data: dict) -> Panel:
    """Generate a sample table using the given theme dictionary (which includes a [rich] block)
    and some dummy data.
    """
    # Here we use our create_rich_renderable to build a table for dummy_data.
    # For simplicity, we create our own panel.
    styles = get_default_styles(sample_theme)
    # Build a basic table (using our earlier functions)
    table = create_rich_renderable(
        dummy_data, theme=sample_theme, styles=styles
    )
    return Panel(
        table,
        title="Sample Table",
        title_align=styles["panel_title_align"],
        border_style=styles["panel_border_style"],
        padding=styles["panel_padding"],
        style=styles["panel_style"],
    )


def save_theme(theme: dict, filename: pathlib.Path) -> None:
    """Save the given theme dictionary to the specified TOML file."""
    with open(filename, "w") as f:
        toml.dump(theme, f)


# --- Main Interactive Loop --- #


def theme_builder():
    console = Console(force_terminal=True, color_system="truecolor")
    themes_dir = pathlib.Path(__file__).parent.parent.parent.parent / "themes"
    theme_files = load_theme_files(themes_dir)

    if not theme_files:
        console.print("[red]No theme files found in the themes folder.[/red]")
        return

    # Ask the user: load all themes or N random themes?
    console.print("[bold]Theme Builder[/bold]")
    choice = console.input(
        "Load [bold](a)ll[/bold] themes or [bold](n)[/bold] random ones? (a/n): "
    )
    if choice.lower() == "n":
        n = console.input("How many random themes? ")
        try:
            n = int(n)
        except ValueError:
            n = len(theme_files)
        theme_files = random.sample(theme_files, min(n, len(theme_files)))

    # Display palettes for each theme file.
    console.print("\n[underline]Available Color Palettes:[/underline]")
    palettes = []
    for idx, tf in enumerate(theme_files):
        theme_dict = load_theme_from_file(str(tf))
        palettes.append((tf, theme_dict))
        console.print(f"\n[bold]Theme #{idx} - {tf.name}[/bold]")
        display_color_palette(theme_dict)

    # Let the user choose a palette by index.
    sel = console.input("\nEnter the number of the palette to use: ")
    try:
        sel = int(sel)
        chosen_theme = palettes[sel][1]
    except (ValueError, IndexError):
        console.print("[red]Invalid selection. Exiting.[/red]")
        return

    console.print("\n[underline]Selected Palette:[/underline]")
    display_color_palette(chosen_theme)

    # Ask the user how many sample tables to generate.
    count = console.input("\nHow many sample tables to generate? (default 3): ")
    try:
        count = int(count)
    except ValueError:
        count = 3

    # Generate sample rich blocks from the chosen theme.
    sample_rich_blocks = generate_sample_rich_blocks(chosen_theme, count)

    # For each sample, create a new theme dict that uses the chosen palette and the sample rich block.
    dummy_data = {
        "Agent": "Test Agent",
        "Status": "Running",
        "Metrics": {
            "CPU": "20%",
            "Memory": "512MB",
            "Nested": {"value1": 1, "value2": 2},
        },
        "Logs": [
            "Initialization complete",
            "Running process...",
            {"Step": "Completed", "Time": "2025-02-07T12:00:00Z"},
        ],
    }

    samples = []
    for i, rich_block in enumerate(sample_rich_blocks):
        # Build a sample theme: copy the chosen theme and override its [rich] block.
        sample_theme = dict(
            chosen_theme
        )  # shallow copy (good enough if colors remain unchanged)
        sample_theme["rich"] = rich_block
        sample_table = generate_sample_table(sample_theme, dummy_data)
        samples.append((sample_theme, sample_table))
        console.print(f"\n[bold]Sample Table #{i}[/bold]")
        console.print(sample_table)

    # Let the user choose one sample or regenerate.
    sel2 = console.input(
        "\nEnter the number of the sample table you like, or type [bold]r[/bold] to regenerate: "
    )
    if sel2.lower() == "r":
        console.print("Regenerating samples...")
        theme_builder()  # restart the builder
        return
    try:
        sel2 = int(sel2)
        chosen_sample_theme = samples[sel2][0]
    except (ValueError, IndexError):
        console.print("[red]Invalid selection. Exiting.[/red]")
        return

    # Ask for file name to save the chosen theme.
    filename = console.input(
        "\nEnter a filename to save the chosen theme (e.g. mytheme.toml): "
    )
    save_path = themes_dir / filename
    save_theme(chosen_sample_theme, save_path)
    console.print(f"\n[green]Theme saved as {save_path}.[/green]")
```

### src\flock\core\logging\formatters\themed_formatter.py

- **Lines**: 550
- **Last modified**: 2025-03-29 13:53:59

```py
"""A Rich-based formatter for agent results with theme support."""

import pathlib
import random
import re
from typing import Any

from temporalio import workflow

from flock.core.logging.formatters.themes import OutputTheme

with workflow.unsafe.imports_passed_through():
    from pygments.style import Style
    from pygments.token import Token
    from rich import box
    from rich.console import Console, Group
    from rich.panel import Panel
    from rich.syntax import PygmentsSyntaxTheme, Syntax
    from rich.table import Table
    from rich.theme import Theme

import toml  # install with: pip install toml


def resolve_style_string(style_str: str, theme: dict) -> str:
    """Replace tokens in a style string of the form.

        color.<section>.<key>

    with the corresponding value from theme["colors"][<section>][<key>].
    If the token cannot be resolved, it is left unchanged.
    """
    pattern = r"color\.(\w+)\.(\w+)"

    def repl(match):
        section = match.group(1)
        key = match.group(2)
        try:
            return theme["colors"][section][key]
        except KeyError:
            return match.group(0)  # leave token unchanged if not found

    return re.sub(pattern, repl, style_str)


def generate_default_rich_block(theme: dict | None = None) -> dict[str, Any]:
    """Generate a default [rich] block with *all* styling properties.

    For the color mapping properties the defaults are computed from the
    theme's [colors] blocks (if available). This includes colors from the
    "bright", "normal", and "cursor" sections.

    Non color properties (layout and table specific properties) are randomly
    chosen from a set of sensible alternatives.
    """
    if theme is not None:
        # Retrieve colors from the theme.
        bright_black = theme["colors"]["bright"].get("black", "#000000")
        bright_blue = theme["colors"]["bright"].get("blue", "#96cbfe")
        bright_cyan = theme["colors"]["bright"].get("cyan", "#85befd")
        bright_green = theme["colors"]["bright"].get("green", "#94fa36")
        bright_magenta = theme["colors"]["bright"].get("magenta", "#b9b6fc")
        bright_red = theme["colors"]["bright"].get("red", "#fd5ff1")
        bright_white = theme["colors"]["bright"].get("white", "#e0e0e0")
        bright_yellow = theme["colors"]["bright"].get("yellow", "#f5ffa8")

        normal_black = theme["colors"]["normal"].get("black", "#000000")
        normal_blue = theme["colors"]["normal"].get("blue", "#85befd")
        normal_cyan = theme["colors"]["normal"].get("cyan", "#85befd")
        normal_green = theme["colors"]["normal"].get("green", "#87c38a")
        normal_magenta = theme["colors"]["normal"].get("magenta", "#b9b6fc")
        normal_red = theme["colors"]["normal"].get("red", "#fd5ff1")
        normal_white = theme["colors"]["normal"].get("white", "#e0e0e0")
        normal_yellow = theme["colors"]["normal"].get("yellow", "#ffd7b1")

        cursor_cursor = theme["colors"]["cursor"].get("cursor", "#d0d0d0")
        cursor_text = theme["colors"]["cursor"].get("text", "#151515")

        primary_background = theme["colors"]["primary"].get(
            "background", "#161719"
        )
        primary_foreground = theme["colors"]["primary"].get(
            "foreground", "#c5c8c6"
        )
        selection_background = theme["colors"]["selection"].get(
            "background", "#444444"
        )
        selection_text = theme["colors"]["selection"].get(
            "text", primary_foreground
        )
    else:
        bright_black = "black"
        bright_blue = "blue"
        bright_cyan = "cyan"
        bright_green = "green"
        bright_magenta = "magenta"
        bright_red = "red"
        bright_white = "white"
        bright_yellow = "yellow"

        normal_black = "black"
        normal_blue = "blue"
        normal_cyan = "cyan"
        normal_green = "green"
        normal_magenta = "magenta"
        normal_red = "red"
        normal_white = "white"
        normal_yellow = "yellow"

        cursor_cursor = "gray"
        cursor_text = "white"

        primary_background = "black"
        primary_foreground = "white"
        selection_background = "gray"
        selection_text = "white"

    # Color properties computed from the theme.
    default_color_props = {
        "panel_style": f"on {primary_background}",
        "table_header_style": f"bold {selection_text} on {selection_background}",
        "table_title_style": f"bold {primary_foreground}",
        "table_border_style": bright_blue,
        "panel_border_style": bright_blue,
        "column_output": f"bold {primary_foreground}",
        "column_value": primary_foreground,
    }
    # Extra color tokens so they can be used via tokens like color.bright.black, etc.
    extra_color_props = {
        "bright_black": bright_black,
        "bright_blue": bright_blue,
        "bright_cyan": bright_cyan,
        "bright_green": bright_green,
        "bright_magenta": bright_magenta,
        "bright_red": bright_red,
        "bright_white": bright_white,
        "bright_yellow": bright_yellow,
        "normal_black": normal_black,
        "normal_blue": normal_blue,
        "normal_cyan": normal_cyan,
        "normal_green": normal_green,
        "normal_magenta": normal_magenta,
        "normal_red": normal_red,
        "normal_white": normal_white,
        "normal_yellow": normal_yellow,
        "cursor_cursor": cursor_cursor,
        "cursor_text": cursor_text,
    }
    # Randomly choose non color properties.
    default_non_color_props = {
        "table_show_lines": random.choice([True, False]),
        "table_box": random.choice(
            ["ROUNDED", "SIMPLE", "SQUARE", "MINIMAL", "HEAVY", "DOUBLE_EDGE"]
        ),
        "panel_padding": random.choice([[1, 2], [1, 1], [2, 2], [0, 2]]),
        "panel_title_align": random.choice(["left", "center", "right"]),
        # Add table_row_styles property.
        "table_row_styles": random.choice(
            [["", "dim"], ["", "italic"], ["", "underline"]]
        ),
    }
    # Extra table layout properties (non content properties).
    default_extra_table_props = {
        "table_safe_box": True,
        "table_padding": [0, 1],
        "table_collapse_padding": False,
        "table_pad_edge": True,
        "table_expand": False,
        "table_show_footer": False,
        "table_show_edge": True,
        "table_leading": 0,
        "table_style": "none",
        "table_footer_style": "none",
        "table_caption": None,
        "table_caption_style": "none",
        "table_title_justify": "center",
        "table_caption_justify": "center",
        "table_highlight": False,
    }
    # Combine all defaults.
    defaults = {
        **default_color_props,
        **extra_color_props,
        **default_non_color_props,
        **default_extra_table_props,
    }
    return defaults


def load_theme_from_file(filepath: str) -> dict:
    """Load a theme from a TOML file.

    The theme is expected to contain color blocks like [colors.primary],
    [colors.selection], [colors.normal], [colors.cursor], etc.
    If the file does not contain a [rich] block for styling properties,
    one is generated (with all properties including color mappings) and
    written back into the file.
    """
    with open(filepath) as f:
        theme = toml.load(f)

    if "rich" not in theme:
        theme["rich"] = generate_default_rich_block(theme)
        # Write the updated theme back into the file.
        with open(filepath, "w") as f:
            toml.dump(theme, f)

    return theme


def get_default_styles(theme: dict | None) -> dict[str, Any]:
    """Build a style mapping from the theme.

    It first computes defaults from the [colors] block (via generate_default_rich_block)
    and then overrides any property found in the [rich] block.
    Finally, for every property that is a string, tokens of the form
    "color.<section>.<key>" are resolved.
    """
    if theme is None:
        final_styles = generate_default_rich_block(None)
    else:
        defaults = generate_default_rich_block(theme)
        rich_props = theme.get("rich", {})
        final_styles = {
            key: rich_props.get(key, defaults[key]) for key in defaults
        }

    # Ensure that panel_padding and table_padding are tuples.
    final_styles["panel_padding"] = tuple(final_styles["panel_padding"])
    if "table_padding" in final_styles:
        final_styles["table_padding"] = tuple(final_styles["table_padding"])

    # Resolve tokens in every string value.
    if theme is not None:
        for key, value in final_styles.items():
            if isinstance(value, str):
                final_styles[key] = resolve_style_string(value, theme)

    return final_styles


def create_rich_renderable(
    value: Any,
    level: int = 0,
    theme: dict | None = None,
    styles: dict[str, Any] | None = None,
    max_length: int = -1,
) -> Any:
    """Recursively creates a Rich renderable for a given value.

    - For dicts: creates a Table with headers styled via the computed properties.
    - For lists/tuples: if every item is a dict, returns a Group of subtables;
      otherwise, renders each item recursively.
    - Other types: returns a string (adding extra newlines for multi-line strings).
    """
    if styles is None:
        styles = get_default_styles(theme)

    # If the value is a dictionary, render it as a table.
    if isinstance(value, dict):
        # Convert table_box string into an actual box style.
        box_style = (
            getattr(box, styles["table_box"])
            if isinstance(styles["table_box"], str)
            else styles["table_box"]
        )
        # Gather all table-related keyword arguments.
        table_kwargs = {
            "show_header": True,
            "header_style": styles["table_header_style"],
            "title": f"Subtable (Level {level})" if level > 0 else None,
            "title_style": styles["table_title_style"],
            "border_style": styles["table_border_style"],
            "show_lines": styles["table_show_lines"],
            "box": box_style,
            "row_styles": styles["table_row_styles"],
            "safe_box": styles.get("table_safe_box"),
            "padding": styles.get("table_padding"),
            "collapse_padding": styles.get("table_collapse_padding"),
            "pad_edge": styles.get("table_pad_edge"),
            "expand": styles.get("table_expand"),
            "show_footer": styles.get("table_show_footer"),
            "show_edge": styles.get("table_show_edge"),
            "leading": styles.get("table_leading"),
            "style": styles.get("table_style"),
            "footer_style": styles.get("table_footer_style"),
            "caption": styles.get("table_caption"),
            "caption_style": styles.get("table_caption_style"),
            "title_justify": styles.get("table_title_justify"),
            "caption_justify": styles.get("table_caption_justify"),
            "highlight": styles.get("table_highlight"),
        }
        table = Table(**table_kwargs)
        table.add_column("Key", style=styles["column_output"])
        table.add_column("Value", style=styles["column_value"])
        for k, v in value.items():
            table.add_row(
                str(k),
                create_rich_renderable(v, level + 1, theme, styles, max_length),
            )
        return table

    # If the value is a list or tuple, render each item.
    elif isinstance(value, list | tuple):
        if all(isinstance(item, dict) for item in value):
            sub_tables = []
            for i, item in enumerate(value):
                sub_tables.append(f"[bold]Item {i + 1}[/bold]")
                sub_tables.append(
                    create_rich_renderable(
                        item, level + 1, theme, styles, max_length=max_length
                    )
                )
            return Group(*sub_tables)
        else:
            rendered_items = [
                create_rich_renderable(
                    item, level + 1, theme, styles, max_length=max_length
                )
                for item in value
            ]
            if all(isinstance(item, str) for item in rendered_items):
                return "\n".join(rendered_items)
            else:
                return Group(*rendered_items)

    # Otherwise, return a string representation.
    else:
        s = str(value).strip()
        if max_length > 0 and len(s) > max_length:
            omitted = len(s) - max_length
            s = (
                s[:max_length]
                + f"[bold bright_yellow]...(+{omitted}chars)[/bold bright_yellow]"
            )
        if isinstance(value, str) and "\n" in value:
            return f"\n{s}\n"
        return s


def load_syntax_theme_from_file(filepath: str) -> dict:
    """Load a syntax highlighting theme from a TOML file and map it to Rich styles."""
    with open(filepath) as f:
        theme = toml.load(f)

    if "colors" not in theme:
        raise ValueError(
            f"Theme file {filepath} does not contain a 'colors' section."
        )

    # Map theme colors to syntax categories
    syntax_theme = {
        "background": theme["colors"]["primary"].get("background", "#161719"),
        "text": theme["colors"]["primary"].get("foreground", "#c5c8c6"),
        "comment": theme["colors"]["normal"].get("black", "#666666"),
        "keyword": theme["colors"]["bright"].get("magenta", "#ff79c6"),
        "builtin": theme["colors"]["bright"].get("cyan", "#8be9fd"),
        "string": theme["colors"]["bright"].get("green", "#50fa7b"),
        "name": theme["colors"]["bright"].get("blue", "#6272a4"),
        "number": theme["colors"]["bright"].get("yellow", "#f1fa8c"),
        "operator": theme["colors"]["bright"].get("red", "#ff5555"),
        "punctuation": theme["colors"]["normal"].get("white", "#bbbbbb"),
        "error": theme["colors"]["bright"].get("red", "#ff5555"),
    }

    return syntax_theme


def create_rich_syntax_theme(syntax_theme: dict) -> Theme:
    """Convert a syntax theme dict to a Rich-compatible Theme."""
    return Theme(
        {
            "background": f"on {syntax_theme['background']}",
            "text": syntax_theme["text"],
            "keyword": f"bold {syntax_theme['keyword']}",
            "builtin": f"bold {syntax_theme['builtin']}",
            "string": syntax_theme["string"],
            "name": syntax_theme["name"],
            "number": syntax_theme["number"],
            "operator": syntax_theme["operator"],
            "punctuation": syntax_theme["punctuation"],
            "error": f"bold {syntax_theme['error']}",
        }
    )


def create_pygments_syntax_theme(syntax_theme: dict) -> PygmentsSyntaxTheme:
    """Convert a syntax theme dict to a Pygments-compatible Rich syntax theme."""

    class CustomSyntaxStyle(Style):
        """Dynamically generated Pygments style based on the loaded theme."""

        background_color = syntax_theme["background"]
        styles = {
            Token.Text: syntax_theme["text"],
            Token.Comment: f"italic {syntax_theme['comment']}",
            Token.Keyword: f"bold {syntax_theme['keyword']}",
            Token.Name.Builtin: f"bold {syntax_theme['builtin']}",
            Token.String: syntax_theme["string"],
            Token.Name: syntax_theme["name"],
            Token.Number: syntax_theme["number"],
            Token.Operator: syntax_theme["operator"],
            Token.Punctuation: syntax_theme["punctuation"],
            Token.Error: f"bold {syntax_theme['error']}",
        }

    return PygmentsSyntaxTheme(CustomSyntaxStyle)


class ThemedAgentResultFormatter:
    """Formats agent results in a Rich table with nested subtables and theme support."""

    def __init__(
        self,
        theme: OutputTheme = OutputTheme.afterglow,
        max_length: int = -1,
        render_table: bool = True,
        wait_for_input: bool = False,
    ):
        """Initialize the formatter with a theme and optional max length."""
        self.theme = theme
        self.styles = None
        self.max_length = max_length
        self.render_table = render_table
        self.wait_for_input = wait_for_input

    def format_result(
        self,
        result: dict[str, Any],
        agent_name: str,
        theme,
        styles,
    ) -> Panel:
        from devtools import pformat

        """Format an agent's result as a Rich Panel containing a table."""
        box_style = (
            getattr(box, styles["table_box"])
            if isinstance(styles["table_box"], str)
            else styles["table_box"]
        )

        # Gather table properties for the main table.
        table_kwargs = {
            "show_header": True,
            "header_style": styles["table_header_style"],
            "title": f"Agent Results: {agent_name}",
            "title_style": styles["table_title_style"],
            "border_style": styles["table_border_style"],
            "show_lines": styles["table_show_lines"],
            "box": box_style,
            "row_styles": styles["table_row_styles"],
            "safe_box": styles.get("table_safe_box"),
            "padding": styles.get("table_padding"),
            "collapse_padding": styles.get("table_collapse_padding"),
            "pad_edge": styles.get("table_pad_edge"),
            "expand": styles.get("table_expand"),
            "show_footer": styles.get("table_show_footer"),
            "show_edge": styles.get("table_show_edge"),
            "leading": styles.get("table_leading"),
            "style": styles.get("table_style"),
            "footer_style": styles.get("table_footer_style"),
            "caption": styles.get("table_caption"),
            "caption_style": styles.get("table_caption_style"),
            "title_justify": styles.get("table_title_justify"),
            "caption_justify": styles.get("table_caption_justify"),
            "highlight": styles.get("table_highlight"),
        }

        table = Table(**table_kwargs)
        table.add_column("Output", style=styles["column_output"])
        table.add_column("Value", style=styles["column_value"])
        for key, value in result.items():
            rich_renderable = create_rich_renderable(
                value,
                level=0,
                theme=theme,
                styles=styles,
                max_length=self.max_length,
            )
            table.add_row(key, rich_renderable)

        s = pformat(result, highlight=False)

        if self.render_table:
            return Panel(
                table,
                title="🐤🐧🐓🦆",
                title_align=styles["panel_title_align"],
                border_style=styles["panel_border_style"],
                padding=styles["panel_padding"],
                style=styles["panel_style"],
            )
        else:
            syntax = Syntax(
                s,  # The formatted string
                "python",  # Highlight as Python (change this for other formats)
                theme=self.syntax_style,  # Choose a Rich theme (matches your color setup)
                line_numbers=False,
            )
            return Panel(
                syntax,
                title=agent_name,
                title_align=styles["panel_title_align"],
                border_style=styles["panel_border_style"],
                padding=styles["panel_padding"],
                style=styles["panel_style"],
            )

    def display_result(self, result: dict[str, Any], agent_name: str) -> None:
        """Print an agent's result using Rich formatting."""
        theme = self.theme
        themes_dir = (
            pathlib.Path(__file__).parent.parent.parent.parent / "themes"
        )
        all_themes = list(themes_dir.glob("*.toml"))
        theme = (
            theme.value + ".toml"
            if not theme.value.endswith(".toml")
            else theme.value
        )
        theme = (
            pathlib.Path(__file__).parent.parent.parent.parent
            / "themes"
            / theme
        )

        if pathlib.Path(theme) not in all_themes:
            raise ValueError(
                f"Invalid theme: {theme}\nAvailable themes: {all_themes}"
            )

        theme_dict = load_theme_from_file(theme)

        styles = get_default_styles(theme_dict)
        self.styles = styles
        self.syntax_style = create_pygments_syntax_theme(
            load_syntax_theme_from_file(theme)
        )

        console = Console()
        panel = self.format_result(
            result=result,
            agent_name=agent_name,
            theme=theme_dict,
            styles=styles,
        )
        console.print(panel)
        if self.wait_for_input:
            console.input(prompt="Press Enter to continue...")
```

### src\flock\core\logging\formatters\themes.py

- **Lines**: 340
- **Last modified**: 2025-02-24 03:21:51

```py
from enum import Enum


class OutputTheme(str, Enum):
    tomorrow_night_eighties = "tomorrow-night-eighties"
    builtin_light = "builtin-light"
    iterm2_dark_background = "iterm2-dark-background"
    zenbones = "zenbones"
    iterm2_tango_dark = "iterm2-tango-dark"
    gruber_darker = "gruber-darker"
    scarlet_protocol = "scarlet-protocol"
    purplepeter = "purplepeter"
    seashells = "seashells"
    monokai_soda = "monokai-soda"
    wildcherry = "wildcherry"
    builtin_solarized_light = "builtin-solarized-light"
    firewatch = "firewatch"
    builtin_tango_dark = "builtin-tango-dark"
    spacedust = "spacedust"
    paraiso_dark = "paraiso-dark"
    nightlion_v2 = "nightlion-v2"
    misterioso = "misterioso"
    shades_of_purple = "shades-of-purple"
    red_planet = "red-planet"
    flat = "flat"
    terafox = "terafox"
    crayonponyfish = "crayonponyfish"
    elementary = "elementary"
    blulocolight = "blulocolight"
    blazer = "blazer"
    purple_rain = "purple-rain"
    aurora = "aurora"
    neutron = "neutron"
    alienblood = "alienblood"
    symfonic = "symfonic"
    pro = "pro"
    highway = "highway"
    grape = "grape"
    hax0r_blue = "hax0r-blue"
    zenwritten_light = "zenwritten-light"
    spacegray = "spacegray"
    everblush = "everblush"
    popping_and_locking = "popping-and-locking"
    zenburn = "zenburn"
    monalisa = "monalisa"
    deep = "deep"
    ir_black = "ir-black"
    wombat = "wombat"
    zenbones_light = "zenbones-light"
    darkermatrix = "darkermatrix"
    wez = "wez"
    matrix = "matrix"
    farmhouse_light = "farmhouse-light"
    sublette = "sublette"
    nocturnal_winter = "nocturnal-winter"
    ryuuko = "ryuuko"
    jackie_brown = "jackie-brown"
    framer = "framer"
    _3024_day = "3024-day"
    lovelace = "lovelace"
    teerb = "teerb"
    fairyfloss = "fairyfloss"
    tokyonight = "tokyonight"
    xcodelighthc = "xcodelighthc"
    iceberg_light = "iceberg-light"
    gruvboxlight = "gruvboxlight"
    tomorrow = "tomorrow"
    sleepyhollow = "sleepyhollow"
    monokai_vivid = "monokai-vivid"
    synthwave_everything = "synthwave-everything"
    tomorrow_night_burns = "tomorrow-night-burns"
    hurtado = "hurtado"
    dotgov = "dotgov"
    adventure = "adventure"
    tomorrow_night = "tomorrow-night"
    arthur = "arthur"
    fahrenheit = "fahrenheit"
    oxocarbon = "oxocarbon"
    violet_dark = "violet-dark"
    adventuretime = "adventuretime"
    vesper = "vesper"
    overnight_slumber = "overnight-slumber"
    japanesque = "japanesque"
    encom = "encom"
    brogrammer = "brogrammer"
    _3024_night = "3024-night"
    hivacruz = "hivacruz"
    darkmatrix = "darkmatrix"
    synthwavealpha = "synthwavealpha"
    aardvark_blue = "aardvark-blue"
    xcodewwdc = "xcodewwdc"
    chester = "chester"
    flatland = "flatland"
    n0tch2k = "n0tch2k"
    molokai = "molokai"
    violet_light = "violet-light"
    solarized_darcula = "solarized-darcula"
    espresso = "espresso"
    darkside = "darkside"
    flexoki_light = "flexoki-light"
    bright_lights = "bright-lights"
    clrs = "clrs"
    firefly_traditional = "firefly-traditional"
    forestblue = "forestblue"
    batman = "batman"
    snazzy = "snazzy"
    wryan = "wryan"
    kurokula = "kurokula"
    iterm2_pastel_dark_background = "iterm2-pastel-dark-background"
    afterglow = "afterglow"
    seoulbones_light = "seoulbones-light"
    ollie = "ollie"
    shaman = "shaman"
    liquidcarbontransparent = "liquidcarbontransparent"
    ayu_mirage = "ayu-mirage"
    kolorit = "kolorit"
    red_sands = "red-sands"
    funforrest = "funforrest"
    unikitty = "unikitty"
    espresso_libre = "espresso-libre"
    ultraviolent = "ultraviolent"
    ayu_light = "ayu-light"
    terminal_basic = "terminal-basic"
    paulmillr = "paulmillr"
    github = "github"
    hacktober = "hacktober"
    ayu_copy = "ayu copy"
    material = "material"
    vimbones = "vimbones"
    arcoiris = "arcoiris"
    wilmersdorf = "wilmersdorf"
    desert = "desert"
    rouge_2 = "rouge-2"
    doom_peacock = "doom-peacock"
    smyck = "smyck"
    cutiepro = "cutiepro"
    nvimlight = "nvimlight"
    hipster_green = "hipster-green"
    spiderman = "spiderman"
    nvimdark = "nvimdark"
    sugarplum = "sugarplum"
    catppuccin_latte = "catppuccin-latte"
    dayfox = "dayfox"
    seafoam_pastel = "seafoam-pastel"
    peppermint = "peppermint"
    tokyonight_storm = "tokyonight-storm"
    mariana = "mariana"
    novel = "novel"
    argonaut_copy = "argonaut copy"
    twilight = "twilight"
    xcodelight = "xcodelight"
    homebrew = "homebrew"
    ateliersulphurpool = "ateliersulphurpool"
    thayer_bright = "thayer-bright"
    konsolas = "konsolas"
    iterm2_solarized_light = "iterm2-solarized-light"
    midnight_in_mojave = "midnight-in-mojave"
    materialdarker = "materialdarker"
    royal = "royal"
    builtin_tango_light = "builtin-tango-light"
    idletoes = "idletoes"
    operator_mono_dark = "operator-mono-dark"
    cyberdyne = "cyberdyne"
    atom = "atom"
    hybrid = "hybrid"
    slate = "slate"
    duckbones = "duckbones"
    tinacious_design__dark_ = "tinacious-design-(dark)"
    kibble = "kibble"
    sakura = "sakura"
    lab_fox = "lab-fox"
    blue_matrix = "blue-matrix"
    materialdesigncolors = "materialdesigncolors"
    seoulbones_dark = "seoulbones-dark"
    seti = "seti"
    solarized_dark_higher_contrast = "solarized-dark-higher-contrast"
    chalkboard = "chalkboard"
    mathias = "mathias"
    neobones_dark = "neobones-dark"
    alabaster = "alabaster"
    djangorebornagain = "djangorebornagain"
    ayu = "ayu"
    iterm2_default = "iterm2-default"
    mirage = "mirage"
    firefoxdev = "firefoxdev"
    nightfox = "nightfox"
    grey_green = "grey-green"
    broadcast = "broadcast"
    solarized_dark___patched = "solarized-dark---patched"
    flexoki_dark = "flexoki-dark"
    challengerdeep = "challengerdeep"
    onehalflight = "onehalflight"
    earthsong = "earthsong"
    kanagawabones = "kanagawabones"
    gruvboxdarkhard = "gruvboxdarkhard"
    abernathy = "abernathy"
    oceanicmaterial = "oceanicmaterial"
    medallion = "medallion"
    pnevma = "pnevma"
    birdsofparadise = "birdsofparadise"
    toychest = "toychest"
    dimidium = "dimidium"
    cyberpunk = "cyberpunk"
    duotone_dark = "duotone-dark"
    whimsy = "whimsy"
    nord_light = "nord-light"
    belafonte_day = "belafonte-day"
    square = "square"
    retro = "retro"
    pandora = "pandora"
    galaxy = "galaxy"
    the_hulk = "the-hulk"
    rose_pine_moon = "rose-pine-moon"
    coffee_theme = "coffee-theme"
    tomorrow_night_bright = "tomorrow-night-bright"
    blulocodark = "blulocodark"
    sundried = "sundried"
    rippedcasts = "rippedcasts"
    glacier = "glacier"
    zenwritten_dark = "zenwritten-dark"
    xcodedarkhc = "xcodedarkhc"
    iterm2_solarized_dark = "iterm2-solarized-dark"
    softserver = "softserver"
    jubi = "jubi"
    fishtank = "fishtank"
    spacegray_eighties_dull = "spacegray-eighties-dull"
    raycast_light = "raycast-light"
    tinacious_design__light_ = "tinacious-design-(light)"
    gruvboxdark = "gruvboxdark"
    piatto_light = "piatto-light"
    grass = "grass"
    catppuccin_mocha = "catppuccin-mocha"
    hardcore = "hardcore"
    tokyonight_day = "tokyonight-day"
    underthesea = "underthesea"
    guezwhoz = "guezwhoz"
    borland = "borland"
    argonaut = "argonaut"
    farmhouse_dark = "farmhouse-dark"
    rapture = "rapture"
    zenbones_dark = "zenbones-dark"
    iceberg_dark = "iceberg-dark"
    pro_light = "pro-light"
    jellybeans = "jellybeans"
    later_this_evening = "later-this-evening"
    blueberrypie = "blueberrypie"
    vibrantink = "vibrantink"
    dimmedmonokai = "dimmedmonokai"
    catppuccin_macchiato = "catppuccin-macchiato"
    ocean = "ocean"
    banana_blueberry = "banana-blueberry"
    dark_ = "dark+"
    neopolitan = "neopolitan"
    relaxed = "relaxed"
    galizur = "galizur"
    liquidcarbon = "liquidcarbon"
    hax0r_gr33n = "hax0r-gr33n"
    ic_orange_ppl = "ic-orange-ppl"
    niji = "niji"
    liquidcarbontransparentinverse = "liquidcarbontransparentinverse"
    github_dark = "github-dark"
    zenburned = "zenburned"
    django = "django"
    rose_pine_dawn = "rose-pine-dawn"
    builtin_dark = "builtin-dark"
    iterm2_smoooooth = "iterm2-smoooooth"
    neon = "neon"
    raycast_dark = "raycast-dark"
    palenighthc = "palenighthc"
    laser = "laser"
    builtin_solarized_dark = "builtin-solarized-dark"
    cobalt2 = "cobalt2"
    breeze = "breeze"
    apple_classic = "apple-classic"
    c64 = "c64"
    calamity = "calamity"
    onehalfdark = "onehalfdark"
    neobones_light = "neobones-light"
    dracula = "dracula"
    spring = "spring"
    monokai_remastered = "monokai-remastered"
    lavandula = "lavandula"
    night_owlish_light = "night-owlish-light"
    builtin_pastel_dark = "builtin-pastel-dark"
    frontenddelight = "frontenddelight"
    tango_adapted = "tango-adapted"
    ubuntu = "ubuntu"
    oceanic_next = "oceanic-next"
    primary = "primary"
    materialdark = "materialdark"
    doomone = "doomone"
    rose_pine = "rose-pine"
    chalk = "chalk"
    andromeda = "andromeda"
    djangosmooth = "djangosmooth"
    red_alert = "red-alert"
    warmneon = "warmneon"
    man_page = "man-page"
    hopscotch = "hopscotch"
    urple = "urple"
    tomorrow_night_blue = "tomorrow-night-blue"
    atomonelight = "atomonelight"
    pencillight = "pencillight"
    ciapre = "ciapre"
    dracula_ = "dracula+"
    hopscotch_256 = "hopscotch.256"
    fideloper = "fideloper"
    treehouse = "treehouse"
    ic_green_ppl = "ic-green-ppl"
    tango_half_adapted = "tango-half-adapted"
    belafonte_night = "belafonte-night"
    iterm2_light_background = "iterm2-light-background"
    harper = "harper"
    mellifluous = "mellifluous"
    rebecca = "rebecca"
    cga = "cga"
    cobalt_neon = "cobalt-neon"
    synthwave = "synthwave"
    pencildark = "pencildark"
    cyberpunkscarletprotocol = "cyberpunkscarletprotocol"
    iterm2_tango_light = "iterm2-tango-light"
    subliminal = "subliminal"
    idea = "idea"
    xcodedark = "xcodedark"
    apple_system_colors = "apple-system-colors"
    hax0r_r3d = "hax0r-r3d"
    atom_test = "atom_test"
    floraverse = "floraverse"
    materialocean = "materialocean"
    nord = "nord"
    vaughn = "vaughn"
    obsidian = "obsidian"
    jetbrains_darcula = "jetbrains-darcula"
    elemental = "elemental"
    spacegray_eighties = "spacegray-eighties"
    nightlion_v1 = "nightlion-v1"
    bluedolphin = "bluedolphin"
    catppuccin_frappe = "catppuccin-frappe"
    dark_pastel = "dark-pastel"
    ultradark = "ultradark"
```

### src\flock\core\logging\logging.py

- **Lines**: 473
- **Last modified**: 2025-04-02 17:29:19

```py
# File: src/flock/core/logging.py
"""A unified logging module for Flock that works both in local/worker contexts and inside Temporal workflows.

Key points:
  - We always have Temporal imported, so we cannot decide based on import.
  - Instead, we dynamically check if we're in a workflow context by trying
    to call `workflow.info()`.
  - In a workflow, we use Temporal's built-in logger and skip debug/info/warning
    logs during replay.
  - Outside workflows, we use Loguru with rich formatting.
"""

import sys

from opentelemetry import trace

# Always import Temporal workflow (since it's part of the project)
from temporalio import workflow

with workflow.unsafe.imports_passed_through():
    from loguru import logger as loguru_logger


def in_workflow_context() -> bool:
    """Returns True if this code is running inside a Temporal workflow context.

    It does this by attempting to call workflow.info() and returning True
    if successful. Otherwise, it returns False.
    """
    try:
        workflow.logger.debug("Checking if in workflow context...")
        # loguru_logger.debug("Checking if in workflow context...")
        # This call will succeed only if we're in a workflow context.
        return bool(hasattr(workflow.info(), "is_replaying"))
    except Exception:
        return False


def get_current_trace_id() -> str:
    """Fetch the current trace ID from OpenTelemetry, if available."""
    current_span = trace.get_current_span()
    span_context = current_span.get_span_context()
    # Format the trace_id as hex (if valid)
    if span_context.is_valid:
        return format(span_context.trace_id, "032x")
    return "no-trace"


COLOR_MAP = {
    # Core & Orchestration
    "flock": "magenta",  # Color only
    "agent": "blue",  # Color only
    "workflow": "cyan",  # Color only
    "activities": "cyan",
    "context": "green",
    # Components & Mechanisms
    "registry": "yellow",  # Color only
    "serialization": "yellow",
    "serialization.utils": "light-yellow",
    "evaluator": "light-blue",
    "module": "light-green",
    "router": "light-magenta",
    "mixin.dspy": "yellow",
    # Specific Modules (Examples)
    "memory": "yellow",
    "module.output": "green",
    "module.metrics": "blue",
    "module.zep": "red",
    "module.hierarchical": "light-green",
    # Tools & Execution
    "tools": "light-black",
    "interpreter": "light-yellow",
    # API Components
    "api": "white",  # Color only
    "api.main": "white",
    "api.endpoints": "light-black",
    "api.run_store": "light-black",
    "api.ui": "light-blue",  # Color only
    "api.ui.routes": "light-blue",
    "api.ui.utils": "cyan",
    # Default/Unknown
    "unknown": "light-black",
}

LOGGERS = [
    "flock",  # Core Flock orchestration
    "agent",  # General agent operations
    "context",  # Context management
    "registry",  # Unified registry operations (new)
    "serialization",  # General serialization (new - can be base for others)
    "serialization.utils",  # Serialization helpers (new, more specific)
    "evaluator",  # Base evaluator category (new/optional)
    "module",  # Base module category (new/optional)
    "router",  # Base router category (new/optional)
    "mixin.dspy",  # DSPy integration specifics (new)
    "memory",  # Memory module specifics
    "module.output",  # Output module specifics (example specific module)
    "module.metrics",  # Metrics module specifics (example specific module)
    "module.zep",  # Zep module specifics (example specific module)
    "module.hierarchical",  # Hierarchical memory specifics (example specific module)
    "interpreter",  # Code interpreter (if still used)
    "activities",  # Temporal activities
    "workflow",  # Temporal workflow logic
    "tools",  # Tool execution/registration
    "api",  # General API server (new)
    "api.main",  # API main setup (new)
    "api.endpoints",  # API endpoints (new)
    "api.run_store",  # API run state management (new)
    "api.ui",  # UI general (new)
    "api.ui.routes",  # UI routes (new)
    "api.ui.utils",  # UI utils (new)
]

BOLD_CATEGORIES = [
    "flock",
    "agent",
    "workflow",
    "registry",
    "api",
    "api.ui",
]


def color_for_category(category: str) -> str:
    """Return the Rich markup color code name for the given category."""
    # Handle potentially nested names like 'serialization.utils'
    # Try exact match first, then go up the hierarchy
    if category in COLOR_MAP:
        return COLOR_MAP[category]
    parts = category.split(".")
    for i in range(len(parts) - 1, 0, -1):
        parent_category = ".".join(parts[:i])
        if parent_category in COLOR_MAP:
            return COLOR_MAP[parent_category]
    # Fallback to default 'unknown' color
    return COLOR_MAP.get("unknown", "light-black")  # Final fallback


def custom_format(record):
    """A formatter that applies truncation and sequential styling tags."""
    t = record["time"].strftime("%Y-%m-%d %H:%M:%S")
    level_name = record["level"].name
    category = record["extra"].get("category", "unknown")
    trace_id = record["extra"].get("trace_id", "no-trace")
    color_tag = color_for_category(
        category
    )  # Get the color tag name (e.g., "yellow")

    message = record["message"]
    message = message.replace("{", "{{").replace("}", "}}")

    # MAX_LENGTH = 500 # Example value
    if len(message) > MAX_LENGTH:
        truncated_chars = len(message) - MAX_LENGTH
        message = (
            message[:MAX_LENGTH]
            + f"<yellow>...+({truncated_chars} chars)</yellow>"
        )

    # Determine if category needs bolding (can refine this logic)
    needs_bold = category in BOLD_CATEGORIES

    # Apply tags sequentially
    category_styled = f"[{category}]"  # Start with the plain category name
    category_styled = (
        f"<{color_tag}>{category_styled}</{color_tag}>"  # Wrap with color
    )
    if needs_bold:
        category_styled = (
            f"<bold>{category_styled}</bold>"  # Wrap with bold if needed
        )

    # Final format string using sequential tags for category
    return (
        f"<green>{t}</green> | <level>{level_name: <8}</level> | "
        f"<cyan>[trace_id: {trace_id}]</cyan> | "
        f"{category_styled} | {message}\n"  # Apply the sequentially styled category
    )


class ImmediateFlushSink:
    """A custom Loguru sink that writes to a stream and flushes immediately after each message.

    This ensures that logs appear in real time.
    """

    def __init__(self, stream=None):
        """Initialize the ImmediateFlushSink.

        Args:
            stream (Stream, optional): The stream to write to. Defaults to sys.stderr.
        """
        self._stream = stream if stream else sys.stderr

    def write(self, message):
        """Write a message to the stream and flush immediately.

        Args:
            message (str): The message to write.
        """
        self._stream.write(message)
        self._stream.flush()

    def flush(self):
        """Flush the stream."""
        self._stream.flush()


class PrintAndFlushSink:
    """A Loguru sink.

    forcibly prints each log record and flushes immediately,
    mimicking print(..., flush=True).
    """

    def write(self, message: str):
        """Write a message to the stream and flush immediately.

        Args:
            message (str): The message to write.
        """
        # message already ends with a newline
        print(message, end="", flush=True)

    def flush(self):
        """Flush the stream.

        Already flushed on every write call.
        """
        pass


# Configure Loguru for non-workflow (local/worker) contexts.
# Note that in workflow code, we will use Temporal's workflow.logger instead.
loguru_logger.remove()
loguru_logger.add(
    PrintAndFlushSink(),
    level="DEBUG",
    colorize=True,
    format=custom_format,
)
# Optionally add a file handler, e.g.:
# loguru_logger.add("logs/flock.log", rotation="100 MB", retention="30 days", level="DEBUG")


# Define a dummy logger that does nothing
class DummyLogger:
    """A dummy logger that does nothing when called."""

    def debug(self, *args, **kwargs):  # noqa: D102
        pass

    def info(self, *args, **kwargs):  # noqa: D102
        pass

    def warning(self, *args, **kwargs):  # noqa: D102
        pass

    def error(self, *args, **kwargs):  # noqa: D102
        pass

    def exception(self, *args, **kwargs):  # noqa: D102
        pass

    def success(self, *args, **kwargs):  # noqa: D102
        pass


dummy_logger = DummyLogger()


# Maximum length for log messages before truncation
MAX_LENGTH = 500


class FlockLogger:
    """A unified logger that selects the appropriate logging mechanism based on context.

    - If running in a workflow context, it uses Temporal's built-in logger.
      Additionally, if workflow.info().is_replaying is True, it suppresses debug/info/warning logs.
    - Otherwise, it uses Loguru.
    """

    def __init__(self, name: str, enable_logging: bool = False):
        """Initialize the FlockLogger.

        Args:
            name (str): The name of the logger.
            enable_logging (bool, optional): Whether to enable logging. Defaults to False.
        """
        self.name = name
        self.enable_logging = enable_logging

    def _get_logger(self):
        if not self.enable_logging:
            return dummy_logger
        if in_workflow_context():
            # Use Temporal's workflow.logger inside a workflow context.
            return workflow.logger
        # Bind our logger with category and trace_id
        return loguru_logger.bind(
            name=self.name,
            category=self.name,  # Customize this per module (e.g., "flock", "agent", "context")
            trace_id=get_current_trace_id(),
        )

    def _truncate_message(self, message: str, max_length: int) -> str:
        """Truncate a message if it exceeds max_length and add truncation indicator."""
        if len(message) > max_length:
            truncated_chars = len(message) - max_length
            return (
                message[:max_length]
                + f"...<yellow>+({truncated_chars} chars)</yellow>"
            )
        return message

    def debug(
        self,
        message: str,
        *args,
        flush: bool = False,
        max_length: int = MAX_LENGTH,
        **kwargs,
    ) -> None:
        """Debug a message.

        Args:
            message (str): The message to debug.
            flush (bool, optional): Whether to flush the message. Defaults to False.
            max_length (int, optional): The maximum length of the message. Defaults to MAX_LENGTH.
        """
        message = self._truncate_message(message, max_length)
        self._get_logger().debug(message, *args, **kwargs)

    def info(
        self,
        message: str,
        *args,
        flush: bool = False,
        max_length: int = MAX_LENGTH,
        **kwargs,
    ) -> None:
        """Info a message.

        Args:
            message (str): The message to info.
            flush (bool, optional): Whether to flush the message. Defaults to False.
            max_length (int, optional): The maximum length of the message. Defaults to MAX_LENGTH.
        """
        message = self._truncate_message(message, max_length)
        self._get_logger().info(message, *args, **kwargs)

    def warning(
        self,
        message: str,
        *args,
        flush: bool = False,
        max_length: int = MAX_LENGTH,
        **kwargs,
    ) -> None:
        """Warning a message.

        Args:
            message (str): The message to warning.
            flush (bool, optional): Whether to flush the message. Defaults to False.
            max_length (int, optional): The maximum length of the message. Defaults to MAX_LENGTH.
        """
        message = self._truncate_message(message, max_length)
        self._get_logger().warning(message, *args, **kwargs)

    def error(
        self,
        message: str,
        *args,
        flush: bool = False,
        max_length: int = MAX_LENGTH,
        **kwargs,
    ) -> None:
        """Error a message.

        Args:
            message (str): The message to error.
            flush (bool, optional): Whether to flush the message. Defaults to False.
            max_length (int, optional): The maximum length of the message. Defaults to MAX_LENGTH.
        """
        message = self._truncate_message(message, max_length)
        self._get_logger().error(message, *args, **kwargs)

    def exception(
        self,
        message: str,
        *args,
        flush: bool = False,
        max_length: int = MAX_LENGTH,
        **kwargs,
    ) -> None:
        """Exception a message.

        Args:
            message (str): The message to exception.
            flush (bool, optional): Whether to flush the message. Defaults to False.
            max_length (int, optional): The maximum length of the message. Defaults to MAX_LENGTH.
        """
        message = self._truncate_message(message, max_length)
        self._get_logger().exception(message, *args, **kwargs)

    def success(
        self,
        message: str,
        *args,
        flush: bool = False,
        max_length: int = MAX_LENGTH,
        **kwargs,
    ) -> None:
        """Success a message.

        Args:
            message (str): The message to success.
            flush (bool, optional): Whether to flush the message. Defaults to False.
            max_length (int, optional): The maximum length of the message. Defaults to MAX_LENGTH.
        """
        message = self._truncate_message(message, max_length)
        self._get_logger().success(message, *args, **kwargs)


_LOGGER_CACHE: dict[str, FlockLogger] = {}


def get_logger(name: str = "flock", enable_logging: bool = True) -> FlockLogger:
    """Return a cached FlockLogger instance for the given name.

    If the logger doesn't exist, create it.
    If it does exist, update 'enable_logging' if a new value is passed.
    """
    if name not in _LOGGER_CACHE:
        _LOGGER_CACHE[name] = FlockLogger(name, enable_logging)
    else:
        _LOGGER_CACHE[name].enable_logging = enable_logging
    return _LOGGER_CACHE[name]


def get_module_loggers() -> list[FlockLogger]:
    """Return a cached FlockLogger instance for the given module name."""
    result = []
    for kvp in _LOGGER_CACHE:
        if kvp.startswith("module."):
            result.append(_LOGGER_CACHE[kvp])

    return result


def truncate_for_logging(obj, max_item_length=100, max_items=10):
    """Truncate large data structures for logging purposes."""
    if isinstance(obj, str) and len(obj) > max_item_length:
        return (
            obj[:max_item_length]
            + f"... ({len(obj) - max_item_length} more chars)"
        )
    elif isinstance(obj, dict):
        if len(obj) > max_items:
            return {
                k: truncate_for_logging(v)
                for i, (k, v) in enumerate(obj.items())
                if i < max_items
            }
        return {k: truncate_for_logging(v) for k, v in obj.items()}
    elif isinstance(obj, list):
        if len(obj) > max_items:
            return [truncate_for_logging(item) for item in obj[:max_items]] + [
                f"... ({len(obj) - max_items} more items)"
            ]
        return [truncate_for_logging(item) for item in obj]
    return obj
```

### src\flock\core\logging\span_middleware\baggage_span_processor.py

- **Lines**: 31
- **Last modified**: 2025-02-18 03:20:40

```py
from opentelemetry.baggage import get_baggage
from opentelemetry.sdk.trace import SpanProcessor


class BaggageAttributeSpanProcessor(SpanProcessor):
    """A custom span processor that, on span start, inspects the baggage items from the parent context
    and attaches specified baggage keys as attributes on the span.
    """

    def __init__(self, baggage_keys=None):
        # baggage_keys: list of baggage keys to attach to spans (e.g. ["session_id", "run_id"])
        if baggage_keys is None:
            baggage_keys = []
        self.baggage_keys = baggage_keys

    def on_start(self, span, parent_context):
        # For each desired key, look up its value in the parent context baggage and set it as an attribute.
        for key in self.baggage_keys:
            value = get_baggage(key, context=parent_context)
            if value is not None:
                span.set_attribute(key, value)

    def on_end(self, span):
        # No action required on span end for this processor.
        pass

    def shutdown(self):
        pass

    def force_flush(self, timeout_millis: int = 30000):
        pass
```

### src\flock\core\logging\telemetry.py

- **Lines**: 138
- **Last modified**: 2025-02-18 03:20:40

```py
"""This module sets up OpenTelemetry tracing for a service."""

import sys

from opentelemetry import trace
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from temporalio import workflow

from flock.core.logging.span_middleware.baggage_span_processor import (
    BaggageAttributeSpanProcessor,
)

with workflow.unsafe.imports_passed_through():
    from flock.core.logging.telemetry_exporter.file_exporter import (
        FileSpanExporter,
    )
    from flock.core.logging.telemetry_exporter.sqlite_exporter import (
        SqliteTelemetryExporter,
    )


class TelemetryConfig:
    """This configuration class sets up OpenTelemetry tracing.

      - Export spans to a Jaeger collector using gRPC.
      - Write spans to a file.
      - Save spans in a SQLite database.

    Only exporters with a non-None configuration will be activated.
    """

    def __init__(
        self,
        service_name: str,
        jaeger_endpoint: str | None = None,
        jaeger_transport: str = "grpc",
        local_logging_dir: str | None = None,
        file_export_name: str | None = None,
        sqlite_db_name: str | None = None,
        enable_jaeger: bool = True,
        enable_file: bool = True,
        enable_sql: bool = True,
        batch_processor_options: dict | None = None,
    ):
        """:param service_name: Name of your service.

        :param jaeger_endpoint: The Jaeger collector gRPC endpoint (e.g., "localhost:14250").
        :param file_export_path: If provided, spans will be written to this file.
        :param sqlite_db_path: If provided, spans will be stored in this SQLite DB.
        :param batch_processor_options: Dict of options for BatchSpanProcessor (e.g., {"max_export_batch_size": 10}).
        """
        self.service_name = service_name
        self.jaeger_endpoint = jaeger_endpoint
        self.jaeger_transport = jaeger_transport
        self.file_export_name = file_export_name
        self.sqlite_db_name = sqlite_db_name
        self.local_logging_dir = local_logging_dir
        self.batch_processor_options = batch_processor_options or {}
        self.enable_jaeger = enable_jaeger
        self.enable_file = enable_file
        self.enable_sql = enable_sql
        self.global_tracer = None

    def setup_tracing(self):
        """Set up OpenTelemetry tracing with the specified exporters."""
        # Create a Resource with the service name.
        resource = Resource(attributes={"service.name": self.service_name})
        provider = TracerProvider(resource=resource)
        trace.set_tracer_provider(provider)

        # List to collect our span processors.
        span_processors = []

        # If a Jaeger endpoint is specified, add the Jaeger exporter.
        if self.jaeger_endpoint and self.enable_jaeger:
            if self.jaeger_transport == "grpc":
                from opentelemetry.exporter.jaeger.proto.grpc import (
                    JaegerExporter,
                )

                jaeger_exporter = JaegerExporter(
                    endpoint=self.jaeger_endpoint,
                    insecure=True,
                )
            elif self.jaeger_transport == "http":
                from opentelemetry.exporter.jaeger.thrift import JaegerExporter

                jaeger_exporter = JaegerExporter(
                    collector_endpoint=self.jaeger_endpoint,
                )
            else:
                raise ValueError(
                    "Invalid JAEGER_TRANSPORT specified. Use 'grpc' or 'http'."
                )

            span_processors.append(SimpleSpanProcessor(jaeger_exporter))

        # If a file path is provided, add the custom file exporter.
        if self.file_export_name and self.enable_file:
            file_exporter = FileSpanExporter(
                self.local_logging_dir, self.file_export_name
            )
            span_processors.append(SimpleSpanProcessor(file_exporter))

        # If a SQLite database path is provided, ensure the DB exists and add the SQLite exporter.
        if self.sqlite_db_name and self.enable_sql:
            sqlite_exporter = SqliteTelemetryExporter(
                self.local_logging_dir, self.sqlite_db_name
            )
            span_processors.append(SimpleSpanProcessor(sqlite_exporter))

        # Register all span processors with the provider.
        for processor in span_processors:
            provider.add_span_processor(processor)

        provider.add_span_processor(
            BaggageAttributeSpanProcessor(baggage_keys=["session_id", "run_id"])
        )
        # self.global_tracer = trace.get_tracer("flock")
        sys.excepthook = self.log_exception_to_otel

    def log_exception_to_otel(self, exc_type, exc_value, exc_traceback):
        """Log unhandled exceptions to OpenTelemetry."""
        if issubclass(exc_type, KeyboardInterrupt):
            # Allow normal handling of KeyboardInterrupt
            sys.__excepthook__(exc_type, exc_value, exc_traceback)
            return

        # Use OpenTelemetry to record the exception
        with self.global_tracer.start_as_current_span(
            "UnhandledException"
        ) as span:
            span.record_exception(exc_value)
            span.set_status(
                trace.Status(trace.StatusCode.ERROR, str(exc_value))
            )
```

### src\flock\core\logging\telemetry_exporter\base_exporter.py

- **Lines**: 38
- **Last modified**: 2025-02-18 03:20:40

```py
"""Base class for custom OpenTelemetry exporters."""

from abc import ABC, abstractmethod

from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult


class TelemetryExporter(SpanExporter, ABC):
    """Base class for custom OpenTelemetry exporters."""

    def __init__(self):
        """Base class for custom OpenTelemetry exporters."""
        super().__init__()

    def _export(self, spans):
        """Forward spans to the Jaeger exporter."""
        try:
            result = self.export(spans)
            if result is None:
                return SpanExportResult.SUCCESS
            return result
        except Exception:
            return SpanExportResult.FAILURE
        finally:
            self.shutdown()

    @abstractmethod
    def export(self, spans) -> SpanExportResult | None:
        """Export spans to the configured backend.

        To be implemented by subclasses.
        """
        raise NotImplementedError("Subclasses must implement the export method")

    @abstractmethod
    def shutdown(self):
        """Cleanup resources, if any. Optional for subclasses."""
        pass
```

### src\flock\core\logging\telemetry_exporter\file_exporter.py

- **Lines**: 85
- **Last modified**: 2025-02-18 03:20:40

```py
"""A simple exporter that writes span data as JSON lines into a file."""

import json

from opentelemetry.sdk.trace.export import SpanExportResult
from opentelemetry.trace import Status, StatusCode
from temporalio import workflow

from flock.core.logging.telemetry_exporter.base_exporter import (
    TelemetryExporter,
)

with workflow.unsafe.imports_passed_through():
    from pathlib import Path


class FileSpanExporter(TelemetryExporter):
    """A simple exporter that writes span data as JSON lines into a file."""

    def __init__(self, dir: str, file_path: str = "flock_events.jsonl"):
        """Initialize the exporter with a file path."""
        super().__init__()
        self.telemetry_path = Path(dir)
        self.telemetry_path.mkdir(parents=True, exist_ok=True)
        self.file_path = self.telemetry_path.joinpath(file_path).__str__()

    def _span_to_json(self, span):
        """Convert a ReadableSpan to a JSON-serializable dict."""
        context = span.get_span_context()
        status = span.status or Status(StatusCode.UNSET)

        return {
            "name": span.name,
            "context": {
                "trace_id": format(context.trace_id, "032x"),
                "span_id": format(context.span_id, "016x"),
                "trace_flags": context.trace_flags,
                "trace_state": str(context.trace_state),
            },
            "kind": span.kind.name if span.kind else None,
            "start_time": span.start_time,
            "end_time": span.end_time,
            "status": {
                "status_code": status.status_code.name,
                "description": status.description,
            },
            "attributes": dict(span.attributes or {}),
            "events": [
                {
                    "name": event.name,
                    "timestamp": event.timestamp,
                    "attributes": dict(event.attributes or {}),
                }
                for event in span.events
            ],
            "links": [
                {
                    "context": {
                        "trace_id": format(link.context.trace_id, "032x"),
                        "span_id": format(link.context.span_id, "016x"),
                    },
                    "attributes": dict(link.attributes or {}),
                }
                for link in span.links
            ],
            "resource": {
                attr_key: attr_value
                for attr_key, attr_value in span.resource.attributes.items()
            },
        }

    def export(self, spans):
        """Write spans to a log file."""
        try:
            with open(self.file_path, "a") as f:
                for span in spans:
                    json_span = self._span_to_json(span)
                    f.write(f"{json.dumps(json_span)}\n")
            return SpanExportResult.SUCCESS
        except Exception:
            return SpanExportResult.FAILURE

    def shutdown(self) -> None:
        # Nothing special needed on shutdown.
        pass
```

### src\flock\core\logging\telemetry_exporter\sqlite_exporter.py

- **Lines**: 103
- **Last modified**: 2025-02-18 03:20:40

```py
"""Exporter for storing OpenTelemetry spans in SQLite."""

import json
import sqlite3
from pathlib import Path
from typing import Any

from opentelemetry.sdk.trace.export import SpanExportResult

from flock.core.logging.telemetry_exporter.base_exporter import (
    TelemetryExporter,
)


class SqliteTelemetryExporter(TelemetryExporter):
    """Exporter for storing OpenTelemetry spans in SQLite."""

    def __init__(self, dir: str, db_path: str = "flock_events.db"):
        """Initialize the SQLite exporter.

        Args:
            db_path: Path to the SQLite database file
        """
        super().__init__()
        self.telemetry_path = Path(dir)
        self.telemetry_path.mkdir(parents=True, exist_ok=True)
        # Create an absolute path to the database file:
        self.db_path = self.telemetry_path.joinpath(db_path).resolve().__str__()
        # Use the absolute path when connecting:
        self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
        self._initialize_database()

    def _initialize_database(self):
        """Set up the SQLite database schema."""
        cursor = self.conn.cursor()
        cursor.execute(
            """
            CREATE TABLE IF NOT EXISTS spans (
                id TEXT PRIMARY KEY,
                name TEXT,
                trace_id TEXT,
                span_id TEXT,
                start_time INTEGER,
                end_time INTEGER,
                attributes TEXT,
                status TEXT
            )
            """
        )
        self.conn.commit()

    def _convert_attributes(self, attributes: dict[str, Any]) -> str:
        """Convert span attributes to a JSON string.

        Args:
            attributes: Dictionary of span attributes

        Returns:
            JSON string representation of attributes
        """
        # Convert attributes to a serializable format
        serializable_attrs = {}
        for key, value in attributes.items():
            # Convert complex types to strings if needed
            if isinstance(value, dict | list | tuple):
                serializable_attrs[key] = json.dumps(value)
            else:
                serializable_attrs[key] = str(value)
        return json.dumps(serializable_attrs)

    def export(self, spans) -> SpanExportResult:
        """Export spans to SQLite."""
        try:
            cursor = self.conn.cursor()
            for span in spans:
                span_id = format(span.context.span_id, "016x")
                trace_id = format(span.context.trace_id, "032x")
                cursor.execute(
                    """
                    INSERT OR REPLACE INTO spans 
                    (id, name, trace_id, span_id, start_time, end_time, attributes, status)
                    VALUES (?, ?, ?, ?, ?, ?, ?, ?)
                    """,
                    (
                        span_id,
                        span.name,
                        trace_id,
                        span_id,
                        span.start_time,
                        span.end_time,
                        self._convert_attributes(span.attributes),
                        str(span.status),
                    ),
                )
            self.conn.commit()
            return SpanExportResult.SUCCESS
        except Exception as e:
            print("Error exporting spans to SQLite:", e)
            return SpanExportResult.FAILURE

    def shutdown(self) -> None:
        """Cleanup resources."""
        pass
```

### src\flock\core\logging\trace_and_logged.py

- **Lines**: 59
- **Last modified**: 2025-02-18 03:20:40

```py
"""A decorator that wraps a function in an OpenTelemetry span and logs its inputs, outputs, and exceptions."""

import functools
import inspect

from opentelemetry import trace

from flock.core.logging.logging import get_logger

logger = get_logger("tools")
tracer = trace.get_tracer(__name__)


def traced_and_logged(func):
    """A decorator that wraps a function in an OpenTelemetry span.

    and logs its inputs,
    outputs, and exceptions. Supports both synchronous and asynchronous functions.
    """
    if inspect.iscoroutinefunction(func):

        @functools.wraps(func)
        async def async_wrapper(*args, **kwargs):
            with tracer.start_as_current_span(func.__name__) as span:
                span.set_attribute("args", str(args))
                span.set_attribute("kwargs", str(kwargs))
                try:
                    result = await func(*args, **kwargs)
                    span.set_attribute("result", str(result))
                    logger.debug(
                        f"{func.__name__} executed successfully", result=result
                    )
                    return result
                except Exception as e:
                    logger.error(f"Error in {func.__name__}", error=str(e))
                    span.record_exception(e)
                    raise

        return async_wrapper
    else:

        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            with tracer.start_as_current_span(func.__name__) as span:
                span.set_attribute("args", str(args))
                span.set_attribute("kwargs", str(kwargs))
                try:
                    result = func(*args, **kwargs)
                    span.set_attribute("result", str(result))
                    logger.debug(
                        f"{func.__name__} executed successfully", result=result
                    )
                    return result
                except Exception as e:
                    logger.error(f"Error in {func.__name__}", error=str(e))
                    span.record_exception(e)
                    raise

        return wrapper
```

### src\flock\core\mixin\dspy_integration.py

- **Lines**: 442
- **Last modified**: 2025-04-02 23:12:31

```py
# src/flock/core/mixin/dspy_integration.py
"""Mixin class for integrating with the dspy library."""

import re  # Import re for parsing
import typing
from typing import Any, Literal

from flock.core.logging.logging import get_logger

# Import split_top_level (assuming it's moved or copied appropriately)
# Option 1: If moved to a shared util
# from flock.core.util.parsing_utils import split_top_level
# Option 2: If kept within this file (as in previous example)
# Define split_top_level here or ensure it's imported

logger = get_logger("mixin.dspy")

# Type definition for agent type override
AgentType = Literal["ReAct", "Completion", "ChainOfThought"] | None


# Helper function needed by _resolve_type_string (copied from input_resolver.py/previous response)
def split_top_level(s: str) -> list[str]:
    """Split a string on commas that are not enclosed within brackets, parentheses, or quotes."""
    parts = []
    current = []
    level = 0
    in_quote = False
    quote_char = ""
    i = 0
    while i < len(s):
        char = s[i]
        # Handle escapes within quotes
        if in_quote and char == "\\" and i + 1 < len(s):
            current.append(char)
            current.append(s[i + 1])
            i += 1  # Skip next char
        elif in_quote:
            current.append(char)
            if char == quote_char:
                in_quote = False
        elif char in ('"', "'"):
            in_quote = True
            quote_char = char
            current.append(char)
        elif char in "([{":
            level += 1
            current.append(char)
        elif char in ")]}":
            level -= 1
            current.append(char)
        elif char == "," and level == 0:
            parts.append("".join(current).strip())
            current = []
        else:
            current.append(char)
        i += 1
    if current:
        parts.append("".join(current).strip())
    # Filter out empty strings that might result from trailing commas etc.
    return [part for part in parts if part]


# Helper function to resolve type strings (can be static or module-level)
def _resolve_type_string(type_str: str) -> type:
    """Resolves a type string into a Python type object.
    Handles built-ins, registered types, and common typing generics like
    List, Dict, Optional, Union, Literal.
    """
    # Import registry here to avoid circular imports
    from flock.core.flock_registry import get_registry

    FlockRegistry = get_registry()

    type_str = type_str.strip()
    logger.debug(f"Attempting to resolve type string: '{type_str}'")

    # 1. Check built-ins and registered types directly
    try:
        # This covers str, int, bool, Any, and types registered by name
        resolved_type = FlockRegistry.get_type(type_str)
        logger.debug(f"Resolved '{type_str}' via registry to: {resolved_type}")
        return resolved_type
    except KeyError:
        logger.debug(
            f"'{type_str}' not found directly in registry, attempting generic parsing."
        )
        pass  # Not found, continue parsing generics

    # 2. Handle typing generics (List, Dict, Optional, Union, Literal)
    # Use regex to match pattern like Generic[InnerType1, InnerType2, ...]
    generic_match = re.fullmatch(r"(\w+)\s*\[(.*)\]", type_str)
    if generic_match:
        base_name = generic_match.group(1).strip()
        args_str = generic_match.group(2).strip()
        logger.debug(
            f"Detected generic pattern: Base='{base_name}', Args='{args_str}'"
        )

        try:
            # Get the base generic type (e.g., list, dict, Optional) from registry/builtins
            BaseType = FlockRegistry.get_type(
                base_name
            )  # Expects List, Dict etc. to be registered
            logger.debug(
                f"Resolved base generic type '{base_name}' to: {BaseType}"
            )

            # Special handling for Literal
            if BaseType is typing.Literal:
                # Split literal values, remove quotes, strip whitespace
                literal_args_raw = split_top_level(args_str)
                literal_args = tuple(
                    s.strip().strip("'\"") for s in literal_args_raw
                )
                logger.debug(
                    f"Parsing Literal arguments: {literal_args_raw} -> {literal_args}"
                )
                resolved_type = typing.Literal[literal_args]  # type: ignore
                logger.debug(f"Constructed Literal type: {resolved_type}")
                return resolved_type

            # Recursively resolve arguments for other generics
            logger.debug(f"Splitting generic arguments: '{args_str}'")
            arg_strs = split_top_level(args_str)
            logger.debug(f"Split arguments: {arg_strs}")
            if not arg_strs:
                raise ValueError("Generic type has no arguments.")

            resolved_arg_types = tuple(
                _resolve_type_string(arg) for arg in arg_strs
            )
            logger.debug(f"Resolved generic arguments: {resolved_arg_types}")

            # Construct the generic type hint
            if BaseType is typing.Optional:
                if len(resolved_arg_types) != 1:
                    raise ValueError("Optional requires exactly one argument.")
                resolved_type = typing.Union[resolved_arg_types[0], type(None)]  # type: ignore
                logger.debug(
                    f"Constructed Optional type as Union: {resolved_type}"
                )
                return resolved_type
            elif BaseType is typing.Union:
                if not resolved_arg_types:
                    raise ValueError("Union requires at least one argument.")
                resolved_type = typing.Union[resolved_arg_types]  # type: ignore
                logger.debug(f"Constructed Union type: {resolved_type}")
                return resolved_type
            elif hasattr(
                BaseType, "__getitem__"
            ):  # Check if subscriptable (like list, dict, List, Dict)
                resolved_type = BaseType[resolved_arg_types]  # type: ignore
                logger.debug(
                    f"Constructed subscripted generic type: {resolved_type}"
                )
                return resolved_type
            else:
                # Base type found but cannot be subscripted
                logger.warning(
                    f"Base type '{base_name}' found but is not a standard subscriptable generic. Returning base type."
                )
                return BaseType

        except (KeyError, ValueError, IndexError, TypeError) as e:
            logger.warning(
                f"Failed to parse generic type '{type_str}': {e}. Falling back."
            )
            # Fall through to raise KeyError below if base type itself wasn't found or parsing failed

    # 3. If not resolved by now, raise error
    logger.error(f"Type string '{type_str}' could not be resolved.")
    raise KeyError(f"Type '{type_str}' could not be resolved.")


class DSPyIntegrationMixin:
    """Mixin class for integrating with the dspy library."""

    def create_dspy_signature_class(
        self, agent_name, description_spec, fields_spec
    ) -> Any:
        """Creates a dynamic DSPy Signature class from string specifications,
        resolving types using the FlockRegistry.
        """
        try:
            import dspy
        except ImportError:
            logger.error(
                "DSPy library is not installed. Cannot create DSPy signature. "
                "Install with: pip install dspy-ai"
            )
            raise ImportError("DSPy is required for this functionality.")

        base_class = dspy.Signature
        class_dict = {"__doc__": description_spec, "__annotations__": {}}

        if "->" in fields_spec:
            inputs_spec, outputs_spec = fields_spec.split("->", 1)
        else:
            inputs_spec, outputs_spec = (
                fields_spec,
                "",
            )  # Assume only inputs if no '->'

        def parse_field(field_str):
            """Parses 'name: type_str | description' using _resolve_type_string."""
            field_str = field_str.strip()
            if not field_str:
                return None

            parts = field_str.split("|", 1)
            main_part = parts[0].strip()
            desc = parts[1].strip() if len(parts) > 1 else None

            if ":" in main_part:
                name, type_str = [s.strip() for s in main_part.split(":", 1)]
            else:
                name = main_part
                type_str = "str"  # Default type

            try:
                field_type = _resolve_type_string(type_str)
            except Exception as e:  # Catch resolution errors
                logger.error(
                    f"Failed to resolve type '{type_str}' for field '{name}': {e}. Defaulting to str."
                )
                field_type = str

            return name, field_type, desc

        def process_fields(fields_string, field_kind):
            """Process fields and add to class_dict."""
            if not fields_string or not fields_string.strip():
                return

            for field in split_top_level(fields_string):
                if field.strip():
                    parsed = parse_field(field)
                    if not parsed:
                        continue
                    name, field_type, desc = parsed
                    class_dict["__annotations__"][name] = (
                        field_type  # Use resolved type
                    )

                    FieldClass = (
                        dspy.InputField
                        if field_kind == "input"
                        else dspy.OutputField
                    )
                    # DSPy Fields use 'desc' for description
                    class_dict[name] = (
                        FieldClass(desc=desc)
                        if desc is not None
                        else FieldClass()
                    )

        try:
            process_fields(inputs_spec, "input")
            process_fields(outputs_spec, "output")
        except Exception as e:
            logger.error(
                f"Error processing fields for DSPy signature '{agent_name}': {e}",
                exc_info=True,
            )
            raise ValueError(
                f"Could not process fields for signature: {e}"
            ) from e

        # Create and return the dynamic class
        try:
            DynamicSignature = type(
                "dspy_" + agent_name, (base_class,), class_dict
            )
            logger.info(
                f"Successfully created DSPy Signature: {DynamicSignature.__name__} "
                f"with fields: {DynamicSignature.__annotations__}"
            )
            return DynamicSignature
        except Exception as e:
            logger.error(
                f"Failed to create dynamic type 'dspy_{agent_name}': {e}",
                exc_info=True,
            )
            raise TypeError(f"Could not create DSPy signature type: {e}") from e

    def _configure_language_model(
        self,
        model: str | None,
        use_cache: bool,
        temperature: float,
        max_tokens: int,
    ) -> None:
        """Initialize and configure the language model using dspy."""
        if model is None:
            logger.warning(
                "No model specified for DSPy configuration. Using DSPy default."
            )
            # Rely on DSPy's global default or raise error if none configured
            # import dspy
            # if dspy.settings.lm is None:
            #      raise ValueError("No model specified for agent and no global DSPy LM configured.")
            return

        try:
            import dspy
        except ImportError:
            logger.error(
                "DSPy library is not installed. Cannot configure language model."
            )
            return  # Or raise

        try:
            # Ensure 'cache' parameter is handled correctly (might not exist on dspy.LM directly)
            # DSPy handles caching globally or via specific optimizers typically.
            # We'll configure the LM without explicit cache control here.
            lm_instance = dspy.LM(
                model=model,
                temperature=temperature,
                max_tokens=max_tokens,
                cache=use_cache,
                # Add other relevant parameters if needed, e.g., API keys via dspy.settings
            )
            dspy.settings.configure(lm=lm_instance)
            logger.info(
                f"DSPy LM configured with model: {model}, temp: {temperature}, max_tokens: {max_tokens}"
            )
            # Note: DSPy caching is usually configured globally, e.g., dspy.settings.configure(cache=...)
            # or handled by optimizers. Setting `cache=use_cache` on dspy.LM might not be standard.
        except Exception as e:
            logger.error(
                f"Failed to configure DSPy language model '{model}': {e}",
                exc_info=True,
            )

    def _select_task(
        self,
        signature: Any,
        agent_type_override: AgentType,
        tools: list[Any] | None = None,
    ) -> Any:
        """Select and instantiate the appropriate DSPy Program/Module."""
        try:
            import dspy
        except ImportError:
            logger.error(
                "DSPy library is not installed. Cannot select DSPy task."
            )
            raise ImportError("DSPy is required for this functionality.")

        processed_tools = []
        if tools:
            for tool in tools:
                if callable(tool):  # Basic check
                    processed_tools.append(tool)
                # Could add more sophisticated tool wrapping/validation here if needed
                else:
                    logger.warning(
                        f"Item '{tool}' in tools list is not callable, skipping."
                    )

        dspy_program = None
        selected_type = agent_type_override

        # Determine type if not overridden
        if not selected_type:
            selected_type = (
                "ReAct" if processed_tools else "Predict"
            )  # Default logic

        logger.debug(
            f"Selecting DSPy program type: {selected_type} (Tools provided: {bool(processed_tools)})"
        )

        try:
            if selected_type == "ChainOfThought":
                dspy_program = dspy.ChainOfThought(signature)
            elif selected_type == "ReAct":
                # ReAct requires tools, even if empty list
                dspy_program = dspy.ReAct(
                    signature, tools=processed_tools or [], max_iters=10
                )
            elif selected_type == "Predict":  # Default or explicitly Completion
                dspy_program = dspy.Predict(signature)
            else:  # Fallback or handle unknown type
                logger.warning(
                    f"Unknown or unsupported agent_type_override '{selected_type}'. Defaulting to dspy.Predict."
                )
                dspy_program = dspy.Predict(signature)

            logger.info(
                f"Instantiated DSPy program: {type(dspy_program).__name__}"
            )
            return dspy_program
        except Exception as e:
            logger.error(
                f"Failed to instantiate DSPy program of type '{selected_type}': {e}",
                exc_info=True,
            )
            raise RuntimeError(f"Could not create DSPy program: {e}") from e

    def _process_result(
        self, result: Any, inputs: dict[str, Any]
    ) -> dict[str, Any]:
        """Convert the DSPy result object to a dictionary."""
        if result is None:
            logger.warning("DSPy program returned None result.")
            return {}
        try:
            # DSPy Prediction objects often behave like dicts or have .keys() / items()
            if hasattr(result, "items") and callable(result.items):
                output_dict = dict(result.items())
            elif hasattr(result, "__dict__"):  # Fallback for other object types
                output_dict = {
                    k: v
                    for k, v in result.__dict__.items()
                    if not k.startswith("_")
                }
            else:
                # If it's already a dict (less common for DSPy results directly)
                if isinstance(result, dict):
                    output_dict = result
                else:  # Final fallback
                    logger.warning(
                        f"Could not reliably convert DSPy result of type {type(result)} to dict. Returning as is."
                    )
                    output_dict = {"raw_result": result}

            logger.debug(f"Processed DSPy result to dict: {output_dict}")
            # Optionally merge inputs back if desired (can make result dict large)
            final_result = {**inputs, **output_dict}
            return final_result

        except Exception as conv_error:
            logger.error(
                f"Failed to process DSPy result into dictionary: {conv_error}",
                exc_info=True,
            )
            return {
                "error": "Failed to process result",
                "raw_result": str(result),
            }
```

### src\flock\core\mixin\prompt_parser.py

- **Lines**: 125
- **Last modified**: 2025-02-18 03:20:40

```py
"""A mixin class for parsing agent prompts and building clean signatures for DSPy."""

# DEPRECATED! This mixin is no longer used in the current version of Flock. It was used to parse agent prompts and build clean signatures for DSPy.
# TODO: DELETE THIS FILE!

from flock.core.util.input_resolver import split_top_level


class PromptParserMixin:
    """A mixin class for parsing agent prompts and building clean signatures for DSPy."""

    def _parse_key_descriptions(self, keys_str: str) -> list[tuple[str, str]]:
        """Parse a comma-separated string into a list of (key, description) tuples.

        This function processes a configuration string that defines one or more keys, where each key may
        include a type hint and an optional human-readable description. The expected format for each key is:

            key: type_hint | description

        If the pipe symbol ("|") is absent, the description is set to an empty string.

        The splitting is performed using split_top_level() so that commas inside type hints are preserved.

        For example, given:
            "query: str | The search query, context: dict | The full conversation context"
        it returns:
            [("query", "The search query"), ("context", "The full conversation context")]

        Args:
            keys_str (str): A comma-separated string of key definitions.

        Returns:
            List[Tuple[str, str]]: A list of (key, description) tuples.
        """
        key_descs = []
        for part in split_top_level(keys_str):
            if not part:
                continue
            if "|" in part:
                key_type_part, desc = part.split("|", 1)
                desc = desc.strip()
            else:
                key_type_part = part
                desc = ""
            key = key_type_part.split(":", 1)[0].strip()
            key_descs.append((key, desc))
        return key_descs

    def _build_clean_signature(self, keys_str: str) -> str:
        """Build a clean signature string from the configuration string by removing the description parts.

        Given a string like:
            "query: str | The search query, context: dict | The full conversation context"
        this method returns:
            "query: str, context: dict"

        This function uses split_top_level() to avoid splitting on commas that are inside type hints.

        Args:
            keys_str (str): The configuration string containing keys, type hints, and optional descriptions.

        Returns:
            str: A clean signature string with only keys and type hints.
        """
        parts = []
        for part in split_top_level(keys_str):
            if not part:
                continue
            if "|" in part:
                clean_part = part.split("|", 1)[0].strip()
            else:
                clean_part = part.strip()
            parts.append(clean_part)
        return ", ".join(parts)

    def _build_descriptions(self) -> tuple[dict[str, str], dict[str, str]]:
        """Build dictionaries of input and output descriptions from the agent's configuration.

        Returns:
            A tuple containing:
            - input_desc: A dictionary mapping each input key (without type hints) to its description.
            - output_desc: A dictionary mapping each output key (without type hints) to its description.
        """
        input_desc: dict[str, str] = {}
        if self.input:
            for key, desc in self._parse_key_descriptions(self.input):
                input_desc[key] = desc

        output_desc: dict[str, str] = {}
        if self.output:
            for key, desc in self._parse_key_descriptions(self.output):
                output_desc[key] = desc

        return input_desc, output_desc

    def _build_prompt(
        self, input_desc: dict[str, str], output_desc: dict[str, str]
    ) -> str:
        """Build a clean signature prompt from the agent's configuration.

        This method uses the original input and output strings (removing the description parts)
        to create a signature string that is passed to DSPy. For example, if:
        - self.input is "query: str | The search query, context: dict | The full conversation context"
        - self.output is "result: str | The result"
        then the prompt will be:
        "query: str, context: dict -> result: str"

        **Note:** The descriptive metadata is preserved in the dictionaries obtained from _build_descriptions,
        which are passed separately to DSPy.

        Args:
            input_desc: Dictionary of input key descriptions (for metadata only).
            output_desc: Dictionary of output key descriptions (for metadata only).

        Returns:
            A clean signature string for DSPy.
        """
        clean_input = (
            self._build_clean_signature(self.input) if self.input else ""
        )
        clean_output = (
            self._build_clean_signature(self.output) if self.output else ""
        )
        # Combine the clean input and output signatures using "->"
        return f"{clean_input} -> {clean_output}"
```

### src\flock\core\serialization\callable_registry.py

- **Lines**: 52
- **Last modified**: 2025-04-02 17:29:19

```py
"""Registry system for callable objects to support serialization."""

from collections.abc import Callable


class CallableRegistry:
    """Registry for callable objects.

    This class serves as a central registry for callable objects (functions, methods)
    that can be referenced by name in serialized formats.

    This is a placeholder implementation that will be fully implemented in task US007-T004.
    """

    _registry: dict[str, Callable] = {}

    @classmethod
    def register(cls, name: str, callable_obj: Callable) -> None:
        """Register a callable object with the given name.

        Args:
            name: Unique name for the callable
            callable_obj: Function or method to register
        """
        cls._registry[name] = callable_obj

    @classmethod
    def get(cls, name: str) -> Callable:
        """Get a callable object by name.

        Args:
            name: Name of the callable to retrieve

        Returns:
            The registered callable

        Raises:
            KeyError: If no callable with the given name is registered
        """
        return cls._registry[name]

    @classmethod
    def contains(cls, name: str) -> bool:
        """Check if a callable with the given name is registered.

        Args:
            name: Name to check

        Returns:
            True if registered, False otherwise
        """
        return name in cls._registry
```

### src\flock\core\serialization\json_encoder.py

- **Lines**: 41
- **Last modified**: 2025-03-29 13:53:59

```py
"""JSON encoder utilities for Flock objects."""

import json
from datetime import datetime
from typing import Any


class FlockJSONEncoder(json.JSONEncoder):
    """Custom JSON encoder for handling Pydantic models and other non-serializable objects."""

    def default(self, obj: Any) -> Any:
        from pydantic import BaseModel

        # Handle Pydantic models
        if isinstance(obj, BaseModel):
            return obj.model_dump()

        # Handle datetime objects
        if isinstance(obj, datetime):
            return obj.isoformat()

        # Handle sets, convert to list
        if isinstance(obj, set):
            return list(obj)

        # Handle objects with a to_dict method
        if hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict")):
            return obj.to_dict()

        # Handle objects with a __dict__ attribute
        if hasattr(obj, "__dict__"):
            return {
                k: v for k, v in obj.__dict__.items() if not k.startswith("_")
            }

        # Let the parent class handle it or raise TypeError
        try:
            return super().default(obj)
        except TypeError:
            # If all else fails, convert to string
            return str(obj)
```

### src\flock\core\serialization\secure_serializer.py

- **Lines**: 175
- **Last modified**: 2025-02-24 21:22:09

```py
import cloudpickle


class SecureSerializer:
    """Security-focused serialization system with capability controls for Flock objects."""

    # Define capability levels for different modules
    MODULE_CAPABILITIES = {
        # Core Python - unrestricted
        "builtins": "unrestricted",
        "datetime": "unrestricted",
        "re": "unrestricted",
        "math": "unrestricted",
        "json": "unrestricted",
        # Framework modules - unrestricted
        "flock": "unrestricted",
        # System modules - restricted but allowed
        "os": "restricted",
        "io": "restricted",
        "sys": "restricted",
        "subprocess": "high_risk",
        # Network modules - high risk
        "socket": "high_risk",
        "requests": "high_risk",
    }

    # Functions that should never be serialized
    BLOCKED_FUNCTIONS = {
        "os.system",
        "os.popen",
        "os.spawn",
        "os.exec",
        "subprocess.call",
        "subprocess.run",
        "subprocess.Popen",
        "eval",
        "exec",
        "__import__",
    }

    @staticmethod
    def _get_module_capability(module_name):
        """Get the capability level for a module."""
        for prefix, level in SecureSerializer.MODULE_CAPABILITIES.items():
            if module_name == prefix or module_name.startswith(f"{prefix}."):
                return level
        return "unknown"  # Default to unknown for unlisted modules

    @staticmethod
    def _is_safe_callable(obj):
        """Check if a callable is safe to serialize."""
        if not callable(obj) or isinstance(obj, type):
            return True, "Not a callable function"

        module = obj.__module__
        func_name = (
            f"{module}.{obj.__name__}"
            if hasattr(obj, "__name__")
            else "unknown"
        )

        # Check against blocked functions
        if func_name in SecureSerializer.BLOCKED_FUNCTIONS:
            return False, f"Function {func_name} is explicitly blocked"

        # Check module capability level
        capability = SecureSerializer._get_module_capability(module)
        if capability == "unknown":
            return False, f"Module {module} has unknown security capability"

        return True, capability

    @staticmethod
    def serialize(obj, allow_restricted=True, allow_high_risk=False):
        """Serialize an object with capability checks."""
        if callable(obj) and not isinstance(obj, type):
            is_safe, capability = SecureSerializer._is_safe_callable(obj)

            if not is_safe:
                raise ValueError(
                    f"Cannot serialize unsafe callable: {capability}"
                )

            if capability == "high_risk" and not allow_high_risk:
                raise ValueError(
                    f"High risk callable {obj.__module__}.{obj.__name__} requires explicit permission"
                )

            if capability == "restricted" and not allow_restricted:
                raise ValueError(
                    f"Restricted callable {obj.__module__}.{obj.__name__} requires explicit permission"
                )

            # Store metadata about the callable for verification during deserialization
            metadata = {
                "module": obj.__module__,
                "name": getattr(obj, "__name__", "unknown"),
                "capability": capability,
            }

            return {
                "__serialized_callable__": True,
                "data": cloudpickle.dumps(obj).hex(),
                "metadata": metadata,
            }

        if isinstance(obj, list):
            return [
                SecureSerializer.serialize(
                    item, allow_restricted, allow_high_risk
                )
                for item in obj
            ]

        if isinstance(obj, dict):
            return {
                k: SecureSerializer.serialize(
                    v, allow_restricted, allow_high_risk
                )
                for k, v in obj.items()
            }

        return obj

    @staticmethod
    def deserialize(obj, allow_restricted=True, allow_high_risk=False):
        """Deserialize an object with capability enforcement."""
        if isinstance(obj, dict) and obj.get("__serialized_callable__") is True:
            # Validate the capability level during deserialization
            metadata = obj.get("metadata", {})
            capability = metadata.get("capability", "unknown")

            if capability == "high_risk" and not allow_high_risk:
                raise ValueError(
                    f"Cannot deserialize high risk callable {metadata.get('module')}.{metadata.get('name')}"
                )

            if capability == "restricted" and not allow_restricted:
                raise ValueError(
                    f"Cannot deserialize restricted callable {metadata.get('module')}.{metadata.get('name')}"
                )

            try:
                callable_obj = cloudpickle.loads(bytes.fromhex(obj["data"]))

                # Additional verification that the deserialized object matches its metadata
                if callable_obj.__module__ != metadata.get("module") or (
                    hasattr(callable_obj, "__name__")
                    and callable_obj.__name__ != metadata.get("name")
                ):
                    raise ValueError(
                        "Callable metadata mismatch - possible tampering detected"
                    )

                return callable_obj
            except Exception as e:
                raise ValueError(f"Failed to deserialize callable: {e!s}")

        if isinstance(obj, list):
            return [
                SecureSerializer.deserialize(
                    item, allow_restricted, allow_high_risk
                )
                for item in obj
            ]

        if isinstance(obj, dict) and "__serialized_callable__" not in obj:
            return {
                k: SecureSerializer.deserialize(
                    v, allow_restricted, allow_high_risk
                )
                for k, v in obj.items()
            }

        return obj
```

### src\flock\core\serialization\serializable.py

- **Lines**: 315
- **Last modified**: 2025-04-03 23:51:18

```py
# src/flock/core/serialization/serializable.py
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, TypeVar

# Use yaml if available, otherwise skip yaml methods
try:
    import yaml

    YAML_AVAILABLE = True
except ImportError:
    YAML_AVAILABLE = False

# Use msgpack if available
try:
    import msgpack

    MSGPACK_AVAILABLE = True
except ImportError:
    MSGPACK_AVAILABLE = False

# Use cloudpickle
try:
    import cloudpickle

    PICKLE_AVAILABLE = True
except ImportError:
    PICKLE_AVAILABLE = False


T = TypeVar("T", bound="Serializable")


class Serializable(ABC):
    """Base class for all serializable objects in the system.

    Provides methods for serializing/deserializing objects to various formats.
    Subclasses MUST implement to_dict and from_dict.
    """

    @abstractmethod
    def to_dict(self) -> dict[str, Any]:
        """Convert instance to a dictionary representation suitable for serialization.
        This method should handle converting nested Serializable objects and callables.
        """
        pass

    @classmethod
    @abstractmethod
    def from_dict(cls: type[T], data: dict[str, Any]) -> T:
        """Create instance from a dictionary representation.
        This method should handle reconstructing nested Serializable objects and callables.
        """
        pass

    # --- JSON Methods ---
    def to_json(self, indent: int | None = 2) -> str:
        """Serialize to JSON string."""
        # Import encoder locally to avoid making it a hard dependency if JSON isn't used
        from .json_encoder import FlockJSONEncoder

        try:
            # Note: to_dict should ideally prepare the structure fully.
            # FlockJSONEncoder is a fallback for types missed by to_dict.
            return json.dumps(
                self.to_dict(), cls=FlockJSONEncoder, indent=indent
            )
        except Exception as e:
            raise RuntimeError(
                f"Failed to serialize {self.__class__.__name__} to JSON: {e}"
            ) from e

    @classmethod
    def from_json(cls: type[T], json_str: str) -> T:
        """Create instance from JSON string."""
        try:
            data = json.loads(json_str)
            return cls.from_dict(data)
        except json.JSONDecodeError as e:
            raise ValueError(f"Invalid JSON string: {e}") from e
        except Exception as e:
            raise RuntimeError(
                f"Failed to deserialize {cls.__name__} from JSON: {e}"
            ) from e

    # --- YAML Methods ---
    def to_yaml(self, sort_keys=False, default_flow_style=False) -> str:
        """Serialize to YAML string."""
        if not YAML_AVAILABLE:
            raise NotImplementedError(
                "YAML support requires PyYAML: pip install pyyaml"
            )
        try:
            # to_dict should prepare a structure suitable for YAML dumping
            return yaml.dump(
                self.to_dict(),
                sort_keys=sort_keys,
                default_flow_style=default_flow_style,
                allow_unicode=True,
            )
        except Exception as e:
            raise RuntimeError(
                f"Failed to serialize {self.__class__.__name__} to YAML: {e}"
            ) from e

    @classmethod
    def from_yaml(cls: type[T], yaml_str: str) -> T:
        """Create instance from YAML string."""
        if not YAML_AVAILABLE:
            raise NotImplementedError(
                "YAML support requires PyYAML: pip install pyyaml"
            )
        try:
            data = yaml.safe_load(yaml_str)
            if not isinstance(data, dict):
                raise TypeError(
                    f"YAML did not yield a dictionary for {cls.__name__}"
                )
            return cls.from_dict(data)
        except yaml.YAMLError as e:
            raise ValueError(f"Invalid YAML string: {e}") from e
        except Exception as e:
            raise RuntimeError(
                f"Failed to deserialize {cls.__name__} from YAML: {e}"
            ) from e

    def to_yaml_file(self, path: Path | str, **yaml_dump_kwargs) -> None:
        """Serialize to YAML file."""
        if not YAML_AVAILABLE:
            raise NotImplementedError(
                "YAML support requires PyYAML: pip install pyyaml"
            )
        path = Path(path)
        try:
            path.parent.mkdir(parents=True, exist_ok=True)
            yaml_str = self.to_yaml(**yaml_dump_kwargs)
            path.write_text(yaml_str, encoding="utf-8")
        except Exception as e:
            raise RuntimeError(
                f"Failed to write {self.__class__.__name__} to YAML file {path}: {e}"
            ) from e

    @classmethod
    def from_yaml_file(cls: type[T], path: Path | str) -> T:
        """Create instance from YAML file."""
        if not YAML_AVAILABLE:
            raise NotImplementedError(
                "YAML support requires PyYAML: pip install pyyaml"
            )
        path = Path(path)
        try:
            yaml_str = path.read_text(encoding="utf-8")
            return cls.from_yaml(yaml_str)
        except FileNotFoundError:
            raise
        except Exception as e:
            raise RuntimeError(
                f"Failed to read {cls.__name__} from YAML file {path}: {e}"
            ) from e

    # --- MsgPack Methods ---
    def to_msgpack(self) -> bytes:
        """Serialize to msgpack bytes."""
        if not MSGPACK_AVAILABLE:
            raise NotImplementedError(
                "MsgPack support requires msgpack: pip install msgpack"
            )
        try:
            # Use default hook for complex types if needed, or rely on to_dict
            return msgpack.packb(self.to_dict(), use_bin_type=True)
        except Exception as e:
            raise RuntimeError(
                f"Failed to serialize {self.__class__.__name__} to MsgPack: {e}"
            ) from e

    @classmethod
    def from_msgpack(cls: type[T], msgpack_bytes: bytes) -> T:
        """Create instance from msgpack bytes."""
        if not MSGPACK_AVAILABLE:
            raise NotImplementedError(
                "MsgPack support requires msgpack: pip install msgpack"
            )
        try:
            # Use object_hook if custom deserialization is needed beyond from_dict
            data = msgpack.unpackb(msgpack_bytes, raw=False)
            if not isinstance(data, dict):
                raise TypeError(
                    f"MsgPack did not yield a dictionary for {cls.__name__}"
                )
            return cls.from_dict(data)
        except Exception as e:
            raise RuntimeError(
                f"Failed to deserialize {cls.__name__} from MsgPack: {e}"
            ) from e

    def to_msgpack_file(self, path: Path | str) -> None:
        """Serialize to msgpack file."""
        if not MSGPACK_AVAILABLE:
            raise NotImplementedError(
                "MsgPack support requires msgpack: pip install msgpack"
            )
        path = Path(path)
        try:
            path.parent.mkdir(parents=True, exist_ok=True)
            msgpack_bytes = self.to_msgpack()
            path.write_bytes(msgpack_bytes)
        except Exception as e:
            raise RuntimeError(
                f"Failed to write {self.__class__.__name__} to MsgPack file {path}: {e}"
            ) from e

    @classmethod
    def from_msgpack_file(cls: type[T], path: Path | str) -> T:
        """Create instance from msgpack file."""
        if not MSGPACK_AVAILABLE:
            raise NotImplementedError(
                "MsgPack support requires msgpack: pip install msgpack"
            )
        path = Path(path)
        try:
            msgpack_bytes = path.read_bytes()
            return cls.from_msgpack(msgpack_bytes)
        except FileNotFoundError:
            raise
        except Exception as e:
            raise RuntimeError(
                f"Failed to read {cls.__name__} from MsgPack file {path}: {e}"
            ) from e

    # --- Pickle Methods (Use with caution due to security risks) ---
    def to_pickle(self) -> bytes:
        """Serialize to pickle bytes using cloudpickle."""
        if not PICKLE_AVAILABLE:
            raise NotImplementedError(
                "Pickle support requires cloudpickle: pip install cloudpickle"
            )
        try:
            return cloudpickle.dumps(self)
        except Exception as e:
            raise RuntimeError(
                f"Failed to serialize {self.__class__.__name__} to Pickle: {e}"
            ) from e

    @classmethod
    def from_pickle(cls: type[T], pickle_bytes: bytes) -> T:
        """Create instance from pickle bytes using cloudpickle."""
        if not PICKLE_AVAILABLE:
            raise NotImplementedError(
                "Pickle support requires cloudpickle: pip install cloudpickle"
            )
        try:
            instance = cloudpickle.loads(pickle_bytes)
            if not isinstance(instance, cls):
                raise TypeError(
                    f"Deserialized object is not of type {cls.__name__}"
                )
            return instance
        except Exception as e:
            raise RuntimeError(
                f"Failed to deserialize {cls.__name__} from Pickle: {e}"
            ) from e

    def to_pickle_file(self, path: Path | str) -> None:
        """Serialize to pickle file using cloudpickle."""
        if not PICKLE_AVAILABLE:
            raise NotImplementedError(
                "Pickle support requires cloudpickle: pip install cloudpickle"
            )
        path = Path(path)
        try:
            path.parent.mkdir(parents=True, exist_ok=True)
            pickle_bytes = self.to_pickle()
            path.write_bytes(pickle_bytes)
        except Exception as e:
            raise RuntimeError(
                f"Failed to write {self.__class__.__name__} to Pickle file {path}: {e}"
            ) from e

    @classmethod
    def from_pickle_file(cls: type[T], path: Path | str) -> T:
        """Create instance from pickle file using cloudpickle."""
        if not PICKLE_AVAILABLE:
            raise NotImplementedError(
                "Pickle support requires cloudpickle: pip install cloudpickle"
            )
        path = Path(path)
        try:
            pickle_bytes = path.read_bytes()
            return cls.from_pickle(pickle_bytes)
        except FileNotFoundError:
            raise
        except Exception as e:
            raise RuntimeError(
                f"Failed to read {cls.__name__} from Pickle file {path}: {e}"
            ) from e

    # _filter_none_values remains unchanged
    @staticmethod
    def _filter_none_values(data: Any) -> Any:
        """Filter out None values from dictionaries and lists recursively."""
        if isinstance(data, dict):
            return {
                k: Serializable._filter_none_values(v)
                for k, v in data.items()
                if v is not None
            }
        elif isinstance(data, list):
            # Filter None from list items AND recursively filter within items
            return [
                Serializable._filter_none_values(item)
                for item in data
                if item is not None
            ]
        return data
```

### src\flock\core\serialization\serialization_utils.py

- **Lines**: 199
- **Last modified**: 2025-04-04 16:30:16

```py
# src/flock/core/serialization/serialization_utils.py
"""Utilities for recursive serialization/deserialization with callable handling."""

import importlib
from collections.abc import Mapping, Sequence
from typing import TYPE_CHECKING, Any

from pydantic import BaseModel

# Use TYPE_CHECKING to avoid circular imports
if TYPE_CHECKING:
    pass

from flock.core.logging.logging import get_logger

logger = get_logger("serialization.utils")

# Remove this line to avoid circular import at module level
# FlockRegistry = get_registry()  # Get singleton instance

# --- Serialization Helper ---


def serialize_item(item: Any) -> Any:
    """Recursively prepares an item for serialization (e.g., to dict for YAML/JSON).
    Converts known callables to their path strings using FlockRegistry.
    Converts Pydantic models using model_dump.
    """
    # Import the registry lazily when needed
    from flock.core.flock_registry import get_registry

    FlockRegistry = get_registry()

    if isinstance(item, BaseModel):
        dumped = item.model_dump(mode="json", exclude_none=True)
        return serialize_item(dumped)
    elif callable(item) and not isinstance(item, type):
        path_str = FlockRegistry.get_callable_path_string(
            item
        )  # Use registry helper
        if path_str:
            return {"__callable_ref__": path_str}
        else:
            logger.warning(
                f"Could not get path string for callable {item}, storing as string."
            )
            return str(item)
    elif isinstance(item, Mapping):
        return {key: serialize_item(value) for key, value in item.items()}
    elif isinstance(item, Sequence) and not isinstance(item, str):
        return [serialize_item(sub_item) for sub_item in item]
    elif isinstance(
        item, type
    ):  # Handle type objects themselves (e.g. if stored directly)
        type_name = FlockRegistry.get_component_type_name(
            item
        )  # Check components first
        if type_name:
            return {"__component_ref__": type_name}
        type_name = FlockRegistry._get_path_string(
            item
        )  # Check regular types/classes by path
        if type_name:
            return {"__type_ref__": type_name}
        logger.warning(
            f"Could not serialize type object {item}, storing as string."
        )
        return str(item)
    else:
        # Return basic types as is
        return item


# --- Deserialization Helper ---


def deserialize_item(item: Any) -> Any:
    """Recursively processes a deserialized item (e.g., from YAML/JSON dict).
    Converts reference dicts back to actual callables or types using FlockRegistry.
    Handles nested lists and dicts.
    """
    # Import the registry lazily when needed
    from flock.core.flock_registry import get_registry

    FlockRegistry = get_registry()

    if isinstance(item, Mapping):
        if "__callable_ref__" in item and len(item) == 1:
            path_str = item["__callable_ref__"]
            try:
                return FlockRegistry.get_callable(path_str)
            except KeyError:
                logger.error(
                    f"Callable reference '{path_str}' not found during deserialization."
                )
                return None
        elif "__component_ref__" in item and len(item) == 1:
            type_name = item["__component_ref__"]
            try:
                return FlockRegistry.get_component(type_name)
            except KeyError:
                logger.error(
                    f"Component reference '{type_name}' not found during deserialization."
                )
                return None
        elif "__type_ref__" in item and len(item) == 1:
            type_name = item["__type_ref__"]
            try:
                # For general types, use get_type or fallback to dynamic import like get_callable
                # Using get_type for now, assuming it needs registration
                return FlockRegistry.get_type(type_name)
            except KeyError:
                # Attempt dynamic import as fallback if get_type fails (similar to get_callable)
                try:
                    if "." not in type_name:  # Builtins?
                        mod = importlib.import_module("builtins")
                    else:
                        module_name, class_name = type_name.rsplit(".", 1)
                        mod = importlib.import_module(module_name)
                    type_obj = getattr(mod, class_name)
                    if isinstance(type_obj, type):
                        FlockRegistry.register_type(
                            type_obj, type_name
                        )  # Cache it
                        return type_obj
                    else:
                        raise TypeError()
                except Exception:
                    logger.error(
                        f"Type reference '{type_name}' not found in registry or via dynamic import."
                    )
                    return None

        else:
            # Recursively deserialize dictionary values
            return {key: deserialize_item(value) for key, value in item.items()}
    elif isinstance(item, Sequence) and not isinstance(item, str):
        return [deserialize_item(sub_item) for sub_item in item]
    else:
        # Return basic types as is
        return item


# --- Component Deserialization Helper ---
def deserialize_component(
    data: dict | None, expected_base_type: type
) -> Any | None:
    """Deserializes a component (Module, Evaluator, Router) from its dict representation.
    Uses the 'type' field to find the correct class via FlockRegistry.
    """
    # Import the registry and COMPONENT_BASE_TYPES lazily when needed
    from flock.core.flock_registry import COMPONENT_BASE_TYPES, get_registry

    FlockRegistry = get_registry()

    if data is None:
        return None
    if not isinstance(data, dict):
        logger.error(
            f"Expected dict for component deserialization, got {type(data)}"
        )
        return None

    type_name = data.get(
        "type"
    )  # Assuming 'type' key holds the class name string
    if not type_name:
        logger.error(f"Component data missing 'type' field: {data}")
        return None

    try:
        ComponentClass = FlockRegistry.get_component(type_name)  # Use registry
        # Optional: Keep the base type check
        if COMPONENT_BASE_TYPES and not issubclass(
            ComponentClass, expected_base_type
        ):
            raise TypeError(
                f"Deserialized class {type_name} is not a subclass of {expected_base_type.__name__}"
            )

        # Recursively deserialize the data *before* passing to Pydantic constructor
        # This handles nested callables/types within the component's config/data
        deserialized_data_for_init = {}
        for k, v in data.items():
            # Don't pass the 'type' field itself to the constructor if it matches class name
            if k == "type" and v == ComponentClass.__name__:
                continue
            deserialized_data_for_init[k] = deserialize_item(v)

        # Use Pydantic constructor directly. Assumes keys match field names.
        # from_dict could be added to components for more complex logic if needed.
        return ComponentClass(**deserialized_data_for_init)

    except (KeyError, TypeError, Exception) as e:
        logger.error(
            f"Failed to deserialize component of type '{type_name}': {e}",
            exc_info=True,
        )
        return None
```

### src\flock\core\tools\dev_tools\github.py

- **Lines**: 157
- **Last modified**: 2025-02-18 03:20:40

```py
"""This module provides tools for interacting with GitHub repositories."""

import base64
import os

import httpx

from flock.core.logging.trace_and_logged import traced_and_logged


@traced_and_logged
def create_user_stories_as_github_issue(title: str, body: str) -> str:
    github_pat = os.getenv("GITHUB_PAT")
    github_repo = os.getenv("GITHUB_REPO")

    url = f"https://api.github.com/repos/{github_repo}/issues"
    headers = {
        "Authorization": f"Bearer {github_pat}",
        "Accept": "application/vnd.github+json",
    }
    issue_title = title
    issue_body = body

    payload = {"title": issue_title, "body": issue_body}
    response = httpx.post(url, json=payload, headers=headers)

    if response.status_code == 201:
        return "Issue created successfully."
    else:
        return "Failed to create issue. Please try again later."


@traced_and_logged
def upload_readme(content: str):
    GITHUB_USERNAME = os.getenv("GITHUB_USERNAME")
    REPO_NAME = os.getenv("GITHUB_REPO")
    GITHUB_TOKEN = os.getenv("GITHUB_PAT")

    if not GITHUB_USERNAME or not REPO_NAME or not GITHUB_TOKEN:
        raise ValueError(
            "Missing environment variables: GITHUB_USERNAME, GITHUB_REPO, or GITHUB_PAT"
        )

    GITHUB_API_URL = f"https://api.github.com/repos/{GITHUB_USERNAME}/{REPO_NAME}/contents/README.md"

    encoded_content = base64.b64encode(content.encode()).decode()

    with httpx.Client() as client:
        response = client.get(
            GITHUB_API_URL,
            headers={
                "Authorization": f"Bearer {GITHUB_TOKEN}",
                "Accept": "application/vnd.github.v3+json",
            },
        )

        data = response.json()
        sha = data.get("sha", None)

        payload = {
            "message": "Updating README.md",
            "content": encoded_content,
            "branch": "main",
        }

        if sha:
            payload["sha"] = sha

        response = client.put(
            GITHUB_API_URL,
            json=payload,
            headers={
                "Authorization": f"Bearer {GITHUB_TOKEN}",
                "Accept": "application/vnd.github.v3+json",
            },
        )

        if response.status_code in [200, 201]:
            print("README.md successfully uploaded/updated!")
        else:
            print("Failed to upload README.md:", response.json())


@traced_and_logged
def create_files(file_paths) -> str:
    """Create multiple files in a GitHub repository with a predefined content.

    This function iterates over a list of file paths (relative to the repository root) and creates
    each file in the specified GitHub repository with the content "#created by flock". For each file,
    it checks whether the file already exists; if it does, that file is skipped. The function
    uses the following environment variables for authentication and repository information:

      - GITHUB_USERNAME: Your GitHub username.
      - GITHUB_REPO: The name of the repository.
      - GITHUB_PAT: Your GitHub Personal Access Token for authentication.

    Parameters:
        file_paths (list of str): A list of file paths (relative to the repository root) to be created.

    Returns:
        str: A message indicating whether the files were created successfully or if there was a failure.
    """
    try:
        GITHUB_USERNAME = os.getenv("GITHUB_USERNAME")
        REPO_NAME = os.getenv("GITHUB_REPO")
        GITHUB_TOKEN = os.getenv("GITHUB_PAT")

        if not GITHUB_USERNAME or not REPO_NAME or not GITHUB_TOKEN:
            raise ValueError(
                "Missing environment variables: GITHUB_USERNAME, GITHUB_REPO, or GITHUB_PAT"
            )

        encoded_content = base64.b64encode(b"#created by flock").decode()

        with httpx.Client() as client:
            for file_path in file_paths:
                GITHUB_API_URL = f"https://api.github.com/repos/{GITHUB_USERNAME}/{REPO_NAME}/contents/{file_path}"

                response = client.get(
                    GITHUB_API_URL,
                    headers={
                        "Authorization": f"token {GITHUB_TOKEN}",
                        "Accept": "application/vnd.github.v3+json",
                    },
                )

                data = response.json()
                sha = data.get("sha", None)

                payload = {
                    "message": f"Creating {file_path}",
                    "content": encoded_content,
                    "branch": "main",
                }

                if sha:
                    print(f"Skipping {file_path}, file already exists.")
                    continue

                response = client.put(
                    GITHUB_API_URL,
                    json=payload,
                    headers={
                        "Authorization": f"token {GITHUB_TOKEN}",
                        "Accept": "application/vnd.github.v3+json",
                    },
                )

                if response.status_code in [200, 201]:
                    print(f"{file_path} successfully created!")
                else:
                    print(f"Failed to create {file_path}:", response.json())

        return "Files created successfully."

    except Exception:
        return "Failed to create file. Please try again later."
```

### src\flock\core\tools\markdown_tools.py

- **Lines**: 195
- **Last modified**: 2025-02-28 09:57:06

```py
import re
from typing import Any

from flock.core.logging.trace_and_logged import traced_and_logged


@traced_and_logged
def split_markdown_by_headers(
    markdown_text: str, min_header_level: int = 1, max_header_level: int = 2
) -> list[dict[str, Any]]:
    if not markdown_text:
        return []

    # Pattern to match headers from level min_header_level to max_header_level
    header_pattern = re.compile(
        f"^({'#' * min_header_level}){{'1,{max_header_level - min_header_level + 1}'}}\\s+(.+)$",
        re.MULTILINE,
    )

    # Find all headers
    headers = list(header_pattern.finditer(markdown_text))

    if not headers:
        return [{"title": "Text", "content": markdown_text, "level": 0}]

    chunks = []

    # Process each section
    for i in range(len(headers)):
        current_header = headers[i]
        header_text = current_header.group(2).strip()
        header_level = len(current_header.group(1))

        # Determine section content
        if i < len(headers) - 1:
            next_header_start = headers[i + 1].start()
            content = markdown_text[current_header.end() : next_header_start]
        else:
            content = markdown_text[current_header.end() :]

        chunks.append(
            {
                "title": header_text,
                "content": content.strip(),
                "level": header_level,
            }
        )

    # Check if there's content before the first header
    if headers[0].start() > 0:
        preamble = markdown_text[: headers[0].start()].strip()
        if preamble:
            chunks.insert(
                0, {"title": "Preamble", "content": preamble, "level": 0}
            )

    return chunks


@traced_and_logged
def extract_code_blocks(
    markdown_text: str, language: str = None
) -> list[dict[str, str]]:
    if not markdown_text:
        return []

    # Pattern to match markdown code blocks
    if language:
        # Match only code blocks with the specified language
        pattern = rf"```{language}\s*([\s\S]*?)\s*```"
    else:
        # Match all code blocks, capturing the language specifier if present
        pattern = r"```(\w*)\s*([\s\S]*?)\s*```"

    blocks = []

    if language:
        # If language is specified, we only capture the code content
        matches = re.finditer(pattern, markdown_text)
        for match in matches:
            blocks.append(
                {"language": language, "code": match.group(1).strip()}
            )
    else:
        # If no language is specified, we capture both language and code content
        matches = re.finditer(pattern, markdown_text)
        for match in matches:
            lang = match.group(1).strip() if match.group(1) else "text"
            blocks.append({"language": lang, "code": match.group(2).strip()})

    return blocks


@traced_and_logged
def extract_links(markdown_text: str) -> list[dict[str, str]]:
    if not markdown_text:
        return []

    # Pattern to match markdown links [text](url)
    link_pattern = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
    matches = link_pattern.findall(markdown_text)

    return [{"text": text, "url": url} for text, url in matches]


@traced_and_logged
def extract_tables(markdown_text: str) -> list[dict[str, Any]]:
    if not markdown_text:
        return []

    # Split the text by lines
    lines = markdown_text.split("\n")

    tables = []
    current_table = None
    header_row = None

    for line in lines:
        line = line.strip()

        # Table rows are indicated by starting with |
        if line.startswith("|") and line.endswith("|"):
            if current_table is None:
                current_table = []
                # This is the header row
                header_row = [
                    cell.strip() for cell in line.strip("|").split("|")
                ]
            elif "|--" in line or "|:-" in line:
                # This is the separator row, ignore it
                pass
            else:
                # This is a data row
                row_data = [cell.strip() for cell in line.strip("|").split("|")]

                # Create a dictionary mapping headers to values
                row_dict = {}
                for i, header in enumerate(header_row):
                    if i < len(row_data):
                        row_dict[header] = row_data[i]
                    else:
                        row_dict[header] = ""

                current_table.append(row_dict)
        else:
            # End of table
            if current_table is not None:
                tables.append({"headers": header_row, "rows": current_table})
                current_table = None
                header_row = None

    # Don't forget to add the last table if we're at the end of the document
    if current_table is not None:
        tables.append({"headers": header_row, "rows": current_table})

    return tables


@traced_and_logged
def markdown_to_plain_text(markdown_text: str) -> str:
    if not markdown_text:
        return ""

    # Replace headers
    text = re.sub(r"^#{1,6}\s+(.+)$", r"\1", markdown_text, flags=re.MULTILINE)

    # Replace bold and italic
    text = re.sub(r"\*\*(.*?)\*\*", r"\1", text)
    text = re.sub(r"__(.*?)__", r"\1", text)
    text = re.sub(r"\*(.*?)\*", r"\1", text)
    text = re.sub(r"_(.*?)_", r"\1", text)

    # Replace links
    text = re.sub(r"\[(.*?)\]\((.*?)\)", r"\1 (\2)", text)

    # Replace code blocks
    text = re.sub(r"```(?:\w+)?\s*([\s\S]*?)\s*```", r"\1", text)
    text = re.sub(r"`([^`]*?)`", r"\1", text)

    # Replace bullet points
    text = re.sub(r"^[\*\-\+]\s+(.+)$", r"• \1", text, flags=re.MULTILINE)

    # Replace numbered lists (keeping the numbers)
    text = re.sub(r"^\d+\.\s+(.+)$", r"\1", text, flags=re.MULTILINE)

    # Replace blockquotes
    text = re.sub(r"^>\s+(.+)$", r"\1", text, flags=re.MULTILINE)

    # Remove HTML tags
    text = re.sub(r"<.*?>", "", text)

    # Normalize whitespace
    text = re.sub(r"\n{3,}", "\n\n", text)

    return text.strip()
```

### src\flock\core\util\hydrator.py

- **Lines**: 306
- **Last modified**: 2025-02-19 02:21:32

```py
import asyncio
import json
from typing import get_origin, get_type_hints


# -----------------------------------------------------------
# Dummy FlockAgent for demonstration:
# -----------------------------------------------------------
class FlockAgent:
    def __init__(self, name, input, output, model, description):
        self.name = name
        self.input = input
        self.output = output
        self.model = model
        self.description = description

    async def evaluate(self, data: dict) -> dict:
        """Pretend LLM call.
        We'll parse self.output to see which keys we want,
        then generate some placeholders for those keys.
        """
        print(
            f"[FlockAgent] Evaluate called for agent {self.name} with data: {data}"
        )

        # Very naive parse of output string: "title: str | desc, budget: int | desc, ..."
        fields = []
        for out_part in self.output.split(","):
            out_part = out_part.strip()
            # out_part might look like: "title: str | property of MyBlogPost"
            if not out_part:
                continue
            field_name = out_part.split(":")[0].strip()
            fields.append(field_name)

        # We'll pretend the LLM returns either an integer for int fields or a string for others:
        response = {}
        for f in fields:
            if " int" in self.output:  # naive
                response[f] = 42
            else:
                response[f] = f"Generated data for {f}"
        return response


# -----------------------------------------------------------
# Optional: a decorator that marks a class as "flockclass"
# -----------------------------------------------------------
def flockclass(model: str):
    def decorator(cls):
        cls.__is_flockclass__ = True
        cls.__flock_model__ = model
        return cls

    return decorator


# -----------------------------------------------------------
# Utility sets
# -----------------------------------------------------------
BASIC_TYPES = {str, int, float, bool}


# -----------------------------------------------------------
# The main hydrator that can handle:
#   - basic types (do nothing)
#   - user-defined classes (auto-fill missing fields + recurse)
#   - lists (ask LLM how many items to create + fill them)
#   - dicts (ask LLM how many key->value pairs to create + fill them)
# -----------------------------------------------------------
def hydrate_object(obj, model="gpt-4", class_name=None):
    """Recursively hydrates the object in-place,
    calling an LLM for missing fields or structure.
    """
    # 1) If None or basic, do nothing
    if obj is None or isinstance(obj, (str, int, float, bool)):
        return

    # 2) If list, check if it is empty => ask the LLM how many items we need
    if isinstance(obj, list):
        if len(obj) == 0:
            # We'll do a single LLM call to decide how many items to put in:
            # In real usage, you'd put a more robust prompt.
            list_agent = FlockAgent(
                name=f"{class_name or 'list'}Generator",
                input="Generate number of items for this list",
                output="count: int | number of items to create",
                model=model,
                description="Agent that decides how many items to create in a list.",
            )
            result = asyncio.run(list_agent.evaluate({}))
            num_items = result.get("count", 0)
            # We'll assume the list should hold some type T.
            # But in Python, we rarely store that info in the runtime.
            # For demonstration, let's just store dummy strings or we can guess "object".
            for i in range(num_items):
                # For demonstration, create a simple string or dict
                # If you want a typed approach, you'll need additional metadata or pass in generics
                item = f"Generated item {i + 1}"
                obj.append(item)

        # Now recursively fill each item
        for i in range(len(obj)):
            hydrate_object(
                obj[i],
                model=model,
                class_name=f"{class_name or 'list'}[item={i}]",
            )
        return

    # 3) If dict, check if it is empty => ask LLM for which keys to create
    if isinstance(obj, dict):
        if len(obj) == 0:
            # We'll do a single LLM call that returns a list of keys
            dict_agent = FlockAgent(
                name=f"{class_name or 'dict'}Generator",
                input="Generate keys for this dict",
                output="keys: str | comma-separated list of keys to create",
                model=model,
                description="Agent that decides which keys to create in a dict.",
            )
            result = asyncio.run(dict_agent.evaluate({}))
            keys_str = result.get("keys", "")
            keys = [k.strip() for k in keys_str.split(",") if k.strip()]

            # For demonstration, let's assume the dict holds sub-objects that we can fill further
            # We'll create a plain dict or plain string for each key
            for k in keys:
                obj[k] = f"Placeholder for {k}"

        # Now recursively fill each value
        for key, val in obj.items():
            hydrate_object(
                val,
                model=model,
                class_name=f"{class_name or 'dict'}[key={key}]",
            )
        return

    # 4) If it's a user-defined class with annotations, fill missing fields
    cls = type(obj)
    if hasattr(cls, "__annotations__"):
        # If there's a model stored on the class, we can use that. Else fallback to the default
        used_model = getattr(cls, "__flock_model__", model)

        # Figure out which fields are missing or None
        type_hints = get_type_hints(cls)
        missing_basic_fields = []
        complex_fields = []
        for field_name, field_type in type_hints.items():
            value = getattr(obj, field_name, None)
            if value is None:
                # It's missing. See if it's a basic type or complex
                if _is_basic_type(field_type):
                    missing_basic_fields.append(field_name)
                else:
                    complex_fields.append(field_name)
            else:
                # Already has some value, but if it's a complex type, we should recurse
                if not _is_basic_type(field_type):
                    complex_fields.append(field_name)

        # If we have missing basic fields, do a single LLM call to fill them
        if missing_basic_fields:
            input_str = (
                f"Existing data: {json.dumps(obj.__dict__, default=str)}"
            )
            output_fields_str = []
            for bf in missing_basic_fields:
                bf_type = type_hints[bf]
                bf_type_name = (
                    bf_type.__name__
                    if hasattr(bf_type, "__name__")
                    else str(bf_type)
                )
                desc = f"property of a class named {cls.__name__}"
                output_fields_str.append(f"{bf}: {bf_type_name} | {desc}")

            agent = FlockAgent(
                name=cls.__name__,
                input=input_str,
                output=", ".join(output_fields_str),
                model=used_model,
                description=f"Agent for {cls.__name__}",
            )
            result = asyncio.run(agent.evaluate(obj.__dict__))
            for bf in missing_basic_fields:
                if bf in result:
                    setattr(obj, bf, result[bf])

        # For each "complex" field, instantiate if None + recurse
        for cf in complex_fields:
            cf_value = getattr(obj, cf, None)
            cf_type = type_hints[cf]

            if cf_value is None:
                # We need to create something of the appropriate type
                new_val = _instantiate_type(cf_type)
                setattr(obj, cf, new_val)
                hydrate_object(
                    new_val, model=used_model, class_name=cf_type.__name__
                )
            else:
                # Recurse into it
                hydrate_object(
                    cf_value, model=used_model, class_name=cf_type.__name__
                )

    else:
        # It's some Python object with no annotations -> do nothing
        pass


# -----------------------------------------------------------
# Helper: is a type "basic"?
# -----------------------------------------------------------
def _is_basic_type(t):
    if t in BASIC_TYPES:
        return True
    # You may want to check for Optionals or Unions
    # e.g., if get_origin(t) == Union, parse that, etc.
    return False


# -----------------------------------------------------------
# Helper: instantiate a type (list, dict, or user-defined)
# -----------------------------------------------------------
def _instantiate_type(t):
    origin = get_origin(t)
    if origin is list:
        return []
    if origin is dict:
        return {}

    # If it's a built-in basic type, return None (we fill it from LLM).
    if t in BASIC_TYPES:
        return None

    # If it's a user-defined class
    if isinstance(t, type):
        try:
            # Attempt parameterless init
            return t()
        except:
            # Or try __new__
            try:
                return t.__new__(t)
            except:
                return None
    return None


# -----------------------------------------------------------
# Example classes
# -----------------------------------------------------------
@flockclass("gpt-4")
class LongContent:
    title: str
    content: str


@flockclass("gpt-4")
class MyBlogPost:
    title: str
    headers: str
    # We'll have a dict of key->LongContent
    content: dict[str, LongContent]


@flockclass("gpt-4")
class MyProjectPlan:
    project_idea: str
    budget: int
    title: str
    content: MyBlogPost


# -----------------------------------------------------------
# Demo
# -----------------------------------------------------------
if __name__ == "__main__":
    plan = MyProjectPlan()
    plan.project_idea = "a declarative agent framework"
    plan.budget = 100000

    # content is None by default, so the hydrator will create MyBlogPost
    # and fill it in. MyBlogPost.content is a dict[str, LongContent],
    # also None -> becomes an empty dict -> we let the LLM decide the keys.

    hydrate_object(plan, model="gpt-4", class_name="MyProjectPlan")

    print("\n--- MyProjectPlan hydrated ---")
    for k, v in plan.__dict__.items():
        print(f"{k} = {v}")
    if plan.content:
        print("\n--- MyBlogPost hydrated ---")
        for k, v in plan.content.__dict__.items():
            print(f"  {k} = {v}")
            if k == "content" and isinstance(v, dict):
                print("    (keys) =", list(v.keys()))
                for sub_k, sub_val in v.items():
                    print(f"    {sub_k} -> {sub_val}")
                    if isinstance(sub_val, LongContent):
                        print(
                            f"       -> LongContent fields: {sub_val.__dict__}"
                        )
```

### src\flock\core\util\input_resolver.py

- **Lines**: 175
- **Last modified**: 2025-02-18 03:20:40

```py
"""Utility functions for resolving input keys to their corresponding values."""

from flock.core.context.context import FlockContext


def get_callable_members(obj):
    """Extract all callable (methods/functions) members from a module or class.
    Returns a list of callable objects.
    """
    import inspect

    # Get all members of the object
    members = inspect.getmembers(obj)

    # Filter for callable members that don't start with underscore (to exclude private/special methods)
    callables = [
        member[1]
        for member in members
        if inspect.isroutine(member[1]) and not member[0].startswith("_")
    ]

    return callables


def split_top_level(s: str) -> list[str]:
    """Split a string on commas that are not enclosed within brackets, parentheses, or quotes.

    This function iterates over the string while keeping track of the nesting level. It
    only splits on commas when the nesting level is zero. It also properly handles quoted
    substrings.

    Args:
        s (str): The input string.

    Returns:
        List[str]: A list of substrings split at top-level commas.
    """
    parts = []
    current = []
    level = 0
    in_quote = False
    quote_char = ""

    for char in s:
        # If inside a quote, only exit when the matching quote is found.
        if in_quote:
            current.append(char)
            if char == quote_char:
                in_quote = False
            elif char == "\\":
                # Include escape sequences
                continue
            continue

        # Check for the start of a quote.
        if char in ('"', "'"):
            in_quote = True
            quote_char = char
            current.append(char)
            continue

        # Track nesting.
        if char in "([{":
            level += 1
        elif char in ")]}":
            level -= 1

        # Split on commas if not nested.
        if char == "," and level == 0:
            parts.append("".join(current).strip())
            current = []
        else:
            current.append(char)
    if current:
        parts.append("".join(current).strip())
    return parts


def _parse_keys(keys: list[str]) -> list[str]:
    """Split a comma‐separated string and strip any type annotations.

    For example, "a, b: list[str]" becomes ["a", "b"].
    """
    res_keys = []
    for key in keys:
        if "|" in key:
            key = key.split("|")[0].strip()
        if ":" in key:
            key = key.split(":")[0].strip()
        res_keys.append(key)
    return res_keys


def top_level_to_keys(s: str) -> list[str]:
    """Convert a top-level comma-separated string to a list of keys."""
    top_level_split = split_top_level(s)
    return _parse_keys(top_level_split)


def resolve_inputs(
    input_spec: str, context: FlockContext, previous_agent_name: str
) -> dict:
    """Build a dictionary of inputs based on the input specification string and the provided context.

    The lookup rules are:
      - "context" (case-insensitive): returns the entire context.
      - "context.property": returns an attribute from the context.
      - "def.agent_name": returns the agent definition for the given agent.
      - "agent_name": returns the most up2date record from the given agent's history.
      - "agent_name.property": returns the value of a property from the state variable keyed by "agent_name.property".
      - "property": searches the history for the most recent value of a property.
      - Otherwise, if no matching value is found, fallback to the FLOCK_INITIAL_INPUT.

    -> Recommendations:
        - prefix your agent variables with the agent name or a short handle to avoid conflicts.
        eg. agent name: "idea_agent", variable: "ia_idea" (ia = idea agent)
        - or set hand off mode to strict to avoid conflicts.
        with strict mode, the agent will only accept inputs from the previous agent.

    Args:
        input_spec: Comma-separated input keys (e.g., "query" or "agent_name.property").
        context: A FlockContext instance.

    Returns:
        A dictionary mapping each input key to its resolved value.
    """
    split_input = split_top_level(input_spec)
    keys = _parse_keys(split_input)
    inputs = {}

    for key in keys:
        split_key = key.split(".")

        # Case 1: A single key
        if len(split_key) == 1:
            # Special keyword: "context"
            if key.lower() == "context":
                inputs[key] = context
                continue

            # Try to get a historic record for an agent (if any)
            historic_records = context.get_agent_history(key)
            if historic_records:
                # You may choose to pass the entire record or just its data.
                inputs[key] = historic_records[0].data
                continue

            # Fallback to the most recent value in the state
            historic_value = context.get_most_recent_value(key)
            if historic_value is not None:
                inputs[key] = historic_value
                continue

            # Fallback to the initial input
            inputs[key] = context.get_variable("flock." + key)

        # Case 2: A compound key (e.g., "agent_name.property" or "context.property")
        elif len(split_key) == 2:
            entity_name, property_name = split_key

            if entity_name.lower() == "context":
                # Try to fetch the attribute from the context
                inputs[key] = getattr(context, property_name, None)
                continue

            if entity_name.lower() == "def":
                # Return the agent definition for the given property name
                inputs[key] = context.get_agent_definition(property_name)
                continue

            # Otherwise, attempt to look up a state variable with the key "agent_name.property"
            inputs[key] = context.get_variable(f"{entity_name}.{property_name}")
            continue

    return inputs
```

### src\flock\evaluators\declarative\declarative_evaluator.py

- **Lines**: 54
- **Last modified**: 2025-04-02 23:12:31

```py
from typing import Any

from pydantic import Field

from flock.core.flock_agent import FlockAgent
from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
from flock.core.mixin.prompt_parser import PromptParserMixin


class DeclarativeEvaluatorConfig(FlockEvaluatorConfig):
    """Configuration for the DeclarativeEvaluator."""

    agent_type_override: str | None = None
    model: str | None = "openai/gpt-4o"
    use_cache: bool = True
    temperature: float = 0.0
    max_tokens: int = 4096


class DeclarativeEvaluator(
    FlockEvaluator, DSPyIntegrationMixin, PromptParserMixin
):
    """Evaluator that uses DSPy for generation."""

    config: DeclarativeEvaluatorConfig = Field(
        default_factory=DeclarativeEvaluatorConfig,
        description="Evaluator configuration",
    )

    async def evaluate(
        self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any]
    ) -> dict[str, Any]:
        """Evaluate using DSPy."""
        _dspy_signature = self.create_dspy_signature_class(
            agent.name,
            agent.description,
            f"{agent.input} -> {agent.output}",
        )
        self._configure_language_model(
            model=self.config.model,
            use_cache=self.config.use_cache,
            temperature=self.config.temperature,
            max_tokens=self.config.max_tokens,
        )
        agent_task = self._select_task(
            _dspy_signature,
            agent_type_override=self.config.agent_type_override,
            tools=tools,
        )
        # Execute the task.
        result = agent_task(**inputs)
        result = self._process_result(result, inputs)
        return result
```

### src\flock\evaluators\memory\azure_search_evaluator.py

- **Lines**: 0
- **Last modified**: 2025-02-28 23:13:59

```py

```

### src\flock\evaluators\memory\memory_evaluator.py

- **Lines**: 88
- **Last modified**: 2025-02-28 09:57:06

```py
from typing import Any, Literal

from pydantic import Field

from flock.core.flock_agent import FlockAgent
from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
from flock.core.mixin.prompt_parser import PromptParserMixin
from flock.modules.memory.memory_module import MemoryModule, MemoryModuleConfig


class MemoryEvaluatorConfig(FlockEvaluatorConfig):
    folder_path: str = Field(
        default="concept_memory/",
        description="Directory where memory file and concept graph will be saved",
    )
    concept_graph_file: str = Field(
        default="concept_graph.png",
        description="Base filename for the concept graph image",
    )

    file_path: str | None = Field(
        default="agent_memory.json", description="Path to save memory file"
    )
    memory_mapping: str | None = Field(
        default=None, description="Memory mapping configuration"
    )
    similarity_threshold: float = Field(
        default=0.5, description="Threshold for semantic similarity"
    )
    max_length: int = Field(
        default=1000, description="Max length of memory entry before splitting"
    )
    save_after_update: bool = Field(
        default=True, description="Whether to save memory after each update"
    )
    splitting_mode: Literal["summary", "semantic", "characters", "none"] = (
        Field(default="none", description="Mode to split memory content")
    )
    enable_read_only_mode: bool = Field(
        default=False, description="Whether to enable read only mode"
    )
    number_of_concepts_to_extract: int = Field(
        default=3, description="Number of concepts to extract from the memory"
    )


class MemoryEvaluator(FlockEvaluator, DSPyIntegrationMixin, PromptParserMixin):
    """Evaluator that uses DSPy for generation."""

    config: MemoryEvaluatorConfig = Field(
        default_factory=MemoryEvaluatorConfig,
        description="Evaluator configuration",
    )

    async def evaluate(
        self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any]
    ) -> dict[str, Any]:
        """Simple evaluator that uses a memory concept graph.

        if inputs contain "query", it searches memory for the query and returns the facts.
        if inputs contain "data", it adds the data to memory
        """
        result = {}
        memory_module = MemoryModule(
            name=self.name,
            config=MemoryModuleConfig(
                folder_path=self.config.folder_path,
                concept_graph_file=self.config.concept_graph_file,
                file_path=self.config.file_path,
                memory_mapping=self.config.memory_mapping,
                similarity_threshold=self.config.similarity_threshold,
                max_length=self.config.max_length,
                save_after_update=self.config.save_after_update,
                splitting_mode=self.config.splitting_mode,
                enable_read_only_mode=self.config.enable_read_only_mode,
                number_of_concepts_to_extract=self.config.number_of_concepts_to_extract,
            ),
        )

        if "query" in inputs:
            facts = await memory_module.search_memory(agent, inputs)
            result = {"facts": facts}

        if "data" in inputs:
            await memory_module.add_to_memory(agent, inputs)
            result = {"message": "Data added to memory"}
        return result
```

### src\flock\evaluators\natural_language\natural_language_evaluator.py

- **Lines**: 66
- **Last modified**: 2025-02-24 03:21:51

```py
from typing import Any

from flock.core.flock_evaluator import FlockEvaluator


class NaturalLanguageEvaluator(FlockEvaluator):
    """Evaluator that uses natural language prompting."""

    name: str = "natural_language"
    prompt_template: str = ""
    client: Any = None  # OpenAI client

    async def setup(self, input_schema: str, output_schema: str) -> None:
        """Set up prompt template and client."""
        from openai import AsyncOpenAI

        # Create prompt template
        self.prompt_template = f"""
        You are an AI assistant that processes inputs and generates outputs.
        
        Input Format:
        {input_schema}
        
        Required Output Format:
        {output_schema}
        
        Please process the following input and provide output in the required format:
        {{input}}
        """

        # Set up client
        self.client = AsyncOpenAI()

    async def evaluate(self, inputs: dict[str, Any]) -> dict[str, Any]:
        """Evaluate using natural language."""
        if not self.client:
            raise RuntimeError("Evaluator not set up")

        # Format input for prompt
        input_str = "\n".join(f"{k}: {v}" for k, v in inputs.items())

        # Get completion
        response = await self.client.chat.completions.create(
            model=self.config.model,
            messages=[
                {
                    "role": "user",
                    "content": self.prompt_template.format(input=input_str),
                }
            ],
            temperature=self.config.temperature,
            max_tokens=self.config.max_tokens,
        )

        # Parse response into dictionary
        try:
            import json

            return json.loads(response.choices[0].message.content)
        except json.JSONDecodeError:
            return {"result": response.choices[0].message.content}

    async def cleanup(self) -> None:
        """Close client."""
        if self.client:
            await self.client.close()
```

### src\flock\evaluators\zep\zep_evaluator.py

- **Lines**: 57
- **Last modified**: 2025-02-26 06:10:08

```py
from typing import Any

from pydantic import Field

from flock.core.flock_agent import FlockAgent
from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
from flock.core.mixin.dspy_integration import DSPyIntegrationMixin
from flock.core.mixin.prompt_parser import PromptParserMixin
from flock.modules.zep.zep_module import ZepModule, ZepModuleConfig


class ZepEvaluatorConfig(FlockEvaluatorConfig):
    zep_url: str = "http://localhost:8000"
    zep_api_key: str = "apikey"
    min_fact_rating: float = Field(
        default=0.7, description="Minimum rating for facts to be considered"
    )


class ZepEvaluator(FlockEvaluator, DSPyIntegrationMixin, PromptParserMixin):
    """Evaluator that uses DSPy for generation."""

    config: ZepEvaluatorConfig = Field(
        default_factory=ZepEvaluatorConfig,
        description="Evaluator configuration",
    )

    async def evaluate(
        self, agent: FlockAgent, inputs: dict[str, Any], tools: list[Any]
    ) -> dict[str, Any]:
        """Simple evaluator that uses Zep.

        if inputs contain "query", it searches memory for the query and returns the facts.
        if inputs contain "data", it adds the data to memory
        """
        result = {}
        zep = ZepModule(
            name=self.name,
            config=ZepModuleConfig(
                zep_api_key=self.config.zep_api_key,
                zep_url=self.config.zep_url,
                min_fact_rating=self.config.min_fact_rating,
                enable_read=True,
                enable_write=True,
            ),
        )
        client = zep.get_client()
        if "query" in inputs:
            query = inputs["query"]
            facts = zep.search_memory(query, client)
            result = {"facts": facts}

        if "data" in inputs:
            data = inputs["data"]
            zep.add_to_memory(data, client)
            result = {"message": "Data added to memory"}
        return result
```

### src\flock\modules\azure-search\azure_search_module.py

- **Lines**: 0
- **Last modified**: 2025-02-28 23:13:59

```py

```

### src\flock\modules\callback\callback_module.py

- **Lines**: 89
- **Last modified**: 2025-03-03 12:28:42

```py
"""Callback module for handling agent lifecycle hooks."""

from collections.abc import Awaitable, Callable
from typing import Any

from pydantic import Field

from flock.core import FlockModule, FlockModuleConfig
from flock.core.context.context import FlockContext


class CallbackModuleConfig(FlockModuleConfig):
    """Configuration for callback module."""

    initialize_callback: (
        Callable[[Any, dict[str, Any]], Awaitable[None]] | None
    ) = Field(
        default=None,
        description="Optional callback function for initialization",
    )
    evaluate_callback: (
        Callable[[Any, dict[str, Any]], Awaitable[dict[str, Any]]] | None
    ) = Field(
        default=None, description="Optional callback function for evaluate"
    )
    terminate_callback: (
        Callable[[Any, dict[str, Any], dict[str, Any]], Awaitable[None]] | None
    ) = Field(
        default=None, description="Optional callback function for termination"
    )
    on_error_callback: (
        Callable[[Any, Exception, dict[str, Any]], Awaitable[None]] | None
    ) = Field(
        default=None,
        description="Optional callback function for error handling",
    )


class CallbackModule(FlockModule):
    """Module that provides callback functionality for agent lifecycle events."""

    name: str = "callbacks"
    config: CallbackModuleConfig = Field(
        default_factory=CallbackModuleConfig,
        description="Callback module configuration",
    )

    async def pre_initialize(
        self,
        agent: Any,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Run initialize callback if configured."""
        if self.config.initialize_callback:
            await self.config.initialize_callback(agent, inputs)

    async def pre_evaluate(
        self,
        agent: Any,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Run evaluate callback if configured."""
        if self.config.evaluate_callback:
            return await self.config.evaluate_callback(agent, inputs)
        return inputs

    async def pre_terminate(
        self,
        agent: Any,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Run terminate callback if configured."""
        if self.config.terminate_callback:
            await self.config.terminate_callback(agent, inputs, result)

    async def on_error(
        self,
        agent: Any,
        error: Exception,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Run error callback if configured."""
        if self.config.on_error_callback:
            await self.config.on_error_callback(agent, error, inputs)
```

### src\flock\modules\memory\memory_module.py

- **Lines**: 407
- **Last modified**: 2025-04-02 23:12:31

```py
import json
import uuid
from datetime import datetime
from typing import Any, Literal

from pydantic import Field
from tqdm import tqdm

from flock.core.context.context import FlockContext

# if TYPE_CHECKING:
#     from flock.core import FlockAgent
from flock.core.flock_agent import FlockAgent
from flock.core.flock_module import FlockModule, FlockModuleConfig
from flock.core.logging.logging import get_logger
from flock.modules.memory.memory_parser import MemoryMappingParser
from flock.modules.memory.memory_storage import FlockMemoryStore, MemoryEntry

logger = get_logger("memory")


class MemoryModuleConfig(FlockModuleConfig):
    """Configuration for the MemoryModule.

    This class defines the configuration for the MemoryModule.
    """

    folder_path: str = Field(
        default="concept_memory/",
        description="Directory where memory file and concept graph will be saved",
    )
    concept_graph_file: str = Field(
        default="concept_graph.png",
        description="Base filename for the concept graph image",
    )
    file_path: str | None = Field(
        default="agent_memory.json", description="Path to save memory file"
    )
    memory_mapping: str | None = Field(
        default=None, description="Memory mapping configuration"
    )
    similarity_threshold: float = Field(
        default=0.5, description="Threshold for semantic similarity"
    )
    max_length: int = Field(
        default=1000, description="Max length of memory entry before splitting"
    )
    save_after_update: bool = Field(
        default=True, description="Whether to save memory after each update"
    )
    splitting_mode: Literal["summary", "semantic", "characters", "none"] = (
        Field(default="none", description="Mode to split memory content")
    )
    enable_read_only_mode: bool = Field(
        default=False, description="Whether to enable read only mode"
    )
    number_of_concepts_to_extract: int = Field(
        default=3, description="Number of concepts to extract from the memory"
    )


class MemoryModule(FlockModule):
    """Module that adds memory capabilities to a Flock agent."""

    name: str = "memory"
    config: MemoryModuleConfig = Field(
        default_factory=MemoryModuleConfig,
        description="Memory module configuration",
    )
    memory_store: FlockMemoryStore | None = None
    memory_ops: list[Any] = []

    def __init__(self, name: str, config: MemoryModuleConfig):
        super().__init__(name=name, config=config)
        self.memory_store = FlockMemoryStore.load_from_file(
            self.get_memory_filename(name)
        )
        self.memory_ops = (
            MemoryMappingParser().parse(self.config.memory_mapping)
            if self.config.memory_mapping
            else [{"type": "semantic"}]
        )

    async def initialize(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Initialize memory store if needed."""
        if not self.memory_store:
            self.memory_store = FlockMemoryStore.load_from_file(
                self.get_memory_filename(self.name)
            )
        self.memory_ops = (
            MemoryMappingParser().parse(self.config.memory_mapping)
            if self.config.memory_mapping
            else [{"type": "semantic"}]
        )
        logger.debug(f"Initialized memory module for agent {agent.name}")

    async def pre_evaluate(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Check memory before evaluation."""
        if not self.memory_store:
            return inputs

        inputs = await self.search_memory(agent, inputs)

        if "context" in inputs:
            agent.input = (
                agent.input + ", context: list | context with more information"
            )

        return inputs

    def get_memory_filename(self, module_name: str) -> str:
        """Generate the full file path for the memory file."""
        folder = self.config.folder_path
        if not folder.endswith(("/", "\\")):
            folder += "/"
        import os

        if not os.path.exists(folder):
            os.makedirs(folder, exist_ok=True)
        # Determine base filename and extension from file_path config
        if self.config.file_path:
            file_name = self.config.file_path.rsplit("/", 1)[-1].rsplit(
                "\\", 1
            )[-1]
            if "." in file_name:
                base, ext = file_name.rsplit(".", 1)
                ext = f".{ext}"
            else:
                base, ext = file_name, ""
        else:
            base, ext = "agent_memory", ".json"
        return f"{folder}{module_name}_{base}{ext}"

    def get_concept_graph_filename(self, module_name: str) -> str:
        """Generate the full file path for the concept graph image."""
        folder = self.config.folder_path
        if not folder.endswith(("/", "\\")):
            folder += "/"
        import os

        if not os.path.exists(folder):
            os.makedirs(folder, exist_ok=True)
        # Use timestamp to create a unique filename
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
        if self.config.concept_graph_file:
            file_name = self.config.concept_graph_file.rsplit("/", 1)[
                -1
            ].rsplit("\\", 1)[-1]
            if "." in file_name:
                base, ext = file_name.rsplit(".", 1)
                ext = f".{ext}"
            else:
                base, ext = file_name, ""
        else:
            base, ext = "concept_graph", ".png"
        return f"{folder}{module_name}_{base}_{timestamp}{ext}"

    async def search_memory(
        self, agent: FlockAgent, query: dict[str, Any]
    ) -> list[str]:
        """Search memory for the query."""
        if not self.memory_store:
            return []

        try:
            input_text = json.dumps(query)
            query_embedding = self.memory_store.compute_embedding(input_text)
            concepts = await self._extract_concepts(
                agent, input_text, self.config.number_of_concepts_to_extract
            )

            memory_results = []
            for op in self.memory_ops:
                if op["type"] == "semantic":
                    semantic_results = self.memory_store.retrieve(
                        query_embedding,
                        concepts,
                        similarity_threshold=self.config.similarity_threshold,
                    )
                    memory_results.extend(semantic_results)
                elif op["type"] == "exact":
                    exact_results = self.memory_store.exact_match(query)
                    memory_results.extend(exact_results)

            context: list[dict[str, Any]] = []
            if memory_results:
                for result in memory_results:
                    context.append(
                        {"content": result.content, "concepts": result.concepts}
                    )

                logger.debug(
                    f"Found {len(memory_results)} relevant memories",
                    agent=agent.name,
                )
                query["context"] = context

            return query

        except Exception as e:
            logger.warning(f"Memory retrieval failed: {e}", agent=agent.name)
            return query

    async def add_to_memory(
        self, agent: FlockAgent, data: dict[str, Any]
    ) -> None:
        """Add data to memory."""
        if not self.memory_store:
            return

        try:
            chunks = await self._get_chunks(agent, data, None)
            await self._store_chunks(agent, chunks)
        except Exception as e:
            logger.warning(f"Memory storage failed: {e}", agent=agent.name)

    async def post_evaluate(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Store results in memory after evaluation."""
        if not self.memory_store:
            return result

        try:
            chunks = await self._get_chunks(agent, inputs, result)
            await self._store_chunks(agent, chunks)
        except Exception as e:
            logger.warning(f"Memory storage failed: {e}", agent=agent.name)

        return result

    async def terminate(
        self,
        agent: Any,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Save memory store if configured."""
        if self.config.save_after_update and self.memory_store:
            self.save_memory()

    async def _extract_concepts(
        self, agent: FlockAgent, text: str, number_of_concepts: int = 3
    ) -> set[str]:
        """Extract concepts using the agent's LLM capabilities."""
        existing_concepts = set()
        if self.memory_store and self.memory_store.concept_graph:
            existing_concepts = set(
                self.memory_store.concept_graph.graph.nodes()
            )

        input_signature = "text: str | Text to analyze"
        if existing_concepts:
            input_signature += ", existing_concepts: list[str] | Already known concepts that might apply"

        concept_signature = agent.create_dspy_signature_class(
            f"{agent.name}_concept_extractor",
            "Extract key concepts from text",
            f"{input_signature} -> concepts: list[str] | Max {number_of_concepts} key concepts all lower case",
        )

        agent._configure_language_model(agent.model, True, 0.0, 8192)
        predictor = agent._select_task(concept_signature, "Completion")
        result_obj = predictor(
            text=text,
            existing_concepts=list(existing_concepts)
            if existing_concepts
            else None,
        )
        concept_list = getattr(result_obj, "concepts", [])
        return set(concept_list)

    async def _summarize_mode(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any],
    ) -> str:
        """Extract information chunks using summary mode."""
        split_signature = agent.create_dspy_signature_class(
            f"{agent.name}_splitter",
            "Extract a list of potentially needed data and information for future reference",
            """
            content: str | The content to split
            -> chunks: list[str] | List of data and information for future reference
            """,
        )
        agent._configure_language_model(agent.model, True, 0.0, 8192)
        splitter = agent._select_task(split_signature, "Completion")
        full_text = json.dumps(inputs) + json.dumps(result)
        split_result = splitter(content=full_text)
        return "\n".join(split_result.chunks)

    async def _semantic_splitter_mode(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any],
    ) -> str | list[dict[str, str]]:
        """Extract information chunks using semantic mode."""
        split_signature = agent.create_dspy_signature_class(
            f"{self.name}_splitter",
            "Split content into meaningful, self-contained chunks",
            """
            content: str | The content to split
            -> chunks: list[dict[str,str]] | List of chunks as key-value pairs - keys are a short title and values are the chunk content
            """,
        )
        agent._configure_language_model(agent.model, True, 0.0, 8192)
        splitter = agent._select_task(split_signature, "Completion")
        full_text = json.dumps(inputs) + (json.dumps(result) if result else "")
        split_result = splitter(content=full_text)
        return split_result.chunks

    async def _character_splitter_mode(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any],
    ) -> list[str]:
        """Extract information chunks by splitting text into fixed character lengths."""
        full_text = json.dumps(inputs) + (json.dumps(result) if result else "")
        return [
            full_text[i : i + self.config.max_length]
            for i in range(0, len(full_text), self.config.max_length)
        ]

    async def _get_chunks(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any] | None,
    ) -> str | list[str]:
        """Get memory chunks based on the configured splitting mode."""
        mode = self.config.splitting_mode
        if mode == "semantic":
            return await self._semantic_splitter_mode(agent, inputs, result)
        elif mode == "summary":
            return await self._summarize_mode(agent, inputs, result)
        elif mode == "characters":
            return await self._character_splitter_mode(agent, inputs, result)
        elif mode == "none":
            return (
                json.dumps(inputs) + json.dumps(result)
                if result
                else json.dumps(inputs)
            )
        else:
            raise ValueError(f"Unknown splitting mode: {mode}")

    async def _store_chunk(self, agent: FlockAgent, chunk: str) -> None:
        """Store a single chunk in memory."""
        chunk_concepts = await self._extract_concepts(
            agent, chunk, self.config.number_of_concepts_to_extract
        )
        entry = MemoryEntry(
            id=str(uuid.uuid4()),
            content=chunk,
            embedding=self.memory_store.compute_embedding(chunk).tolist(),
            concepts=chunk_concepts,
            timestamp=datetime.now(),
        )
        self.memory_store.add_entry(entry)
        if self.config.save_after_update:
            self.save_memory()
        logger.debug(
            "Stored interaction in memory",
            agent=agent.name,
            entry_id=entry.id,
            concepts=chunk_concepts,
        )

    async def _store_chunks(
        self, agent: FlockAgent, chunks: str | list[str]
    ) -> None:
        """Store chunks (single or multiple) in memory."""
        if isinstance(chunks, str):
            await self._store_chunk(agent, chunks)
        elif isinstance(chunks, list):
            for chunk in tqdm(chunks, desc="Storing chunks in memory"):
                await self._store_chunk(agent, chunk)

    def save_memory(self) -> None:
        """Save memory store to file."""
        if self.memory_store and self.config.file_path:
            json_str = self.memory_store.model_dump_json()
            filename = self.get_memory_filename(self.name)
            with open(filename, "w") as file:
                file.write(json_str)
            self.memory_store.concept_graph.save_as_image(
                self.get_concept_graph_filename(self.name)
            )
```

### src\flock\modules\memory\memory_parser.py

- **Lines**: 125
- **Last modified**: 2025-02-28 09:57:06

```py
"""Parser for memory mapping declarations into executable operations."""

import re
from typing import Any

from flock.modules.memory.memory_storage import (
    CombineOperation,
    EnrichOperation,
    ExactOperation,
    FilterOperation,
    MemoryOperation,
    MemoryScope,
    SemanticOperation,
    SortOperation,
)


class MemoryMappingParser:
    """Parses memory mapping declarations into executable operations."""

    def parse(self, mapping: str) -> list[MemoryOperation]:
        """Parse a memory mapping string into operations.

        Example mappings:
        "topic -> memory.semantic(threshold=0.9) | memory.exact -> output"
        "query -> memory.semantic(scope='global') | memory.filter(recency='7d') | memory.sort(by='relevance')"
        """
        operations = []
        stages = [s.strip() for s in mapping.split("|")]

        for stage in stages:
            if "->" not in stage:
                continue

            inputs, op_spec = stage.split("->")
            inputs = [i.strip() for i in inputs.split(",")]

            if "memory." in op_spec:
                # Extract operation name and parameters
                match = re.match(r"memory\.(\w+)(?:\((.*)\))?", op_spec.strip())
                if not match:
                    continue

                op_name, params_str = match.groups()
                params = self._parse_params(params_str or "")

                # Create appropriate operation object
                if op_name == "semantic":
                    operation = SemanticOperation(
                        threshold=params.get("threshold", 0.8),
                        scope=params.get("scope", MemoryScope.BOTH),
                        max_results=params.get("max_results", 10),
                    )
                elif op_name == "exact":
                    operation = ExactOperation(
                        keys=inputs, scope=params.get("scope", MemoryScope.BOTH)
                    )
                elif op_name == "enrich":
                    operation = EnrichOperation(
                        tools=params.get("tools", []),
                        strategy=params.get("strategy", "comprehensive"),
                        scope=params.get("scope", MemoryScope.BOTH),
                    )
                elif op_name == "filter":
                    operation = FilterOperation(
                        recency=params.get("recency"),
                        relevance=params.get("relevance"),
                        metadata=params.get("metadata", {}),
                    )
                elif op_name == "sort":
                    operation = SortOperation(
                        by=params.get("by", "relevance"),
                        ascending=params.get("ascending", False),
                    )
                elif op_name == "combine":
                    operation = CombineOperation(
                        weights=params.get(
                            "weights", {"semantic": 0.7, "exact": 0.3}
                        )
                    )

                operations.append(operation)

        return operations

    def _parse_params(self, params_str: str) -> dict[str, Any]:
        """Parse parameters string into a dictionary.

        Handles:
        - Quoted strings: threshold='high'
        - Numbers: threshold=0.9
        - Lists: tools=['web_search', 'extract_numbers']
        - Dictionaries: weights={'semantic': 0.7, 'exact': 0.3}
        """
        if not params_str:
            return {}

        params = {}
        # Split on commas not inside brackets or quotes
        param_pairs = re.findall(
            r"""
            (?:[^,"]|"[^"]*"|'[^']*')+  # Match everything except comma, or quoted strings
        """,
            params_str,
            re.VERBOSE,
        )

        for pair in param_pairs:
            if "=" not in pair:
                continue
            key, value = pair.split("=", 1)
            key = key.strip()
            value = value.strip()

            # Try to evaluate the value (for lists, dicts, numbers)
            try:
                # Safely evaluate the value
                value = eval(value, {"__builtins__": {}}, {})
            except:
                # If evaluation fails, treat as string
                value = value.strip("'\"")

            params[key] = value

        return params
```

### src\flock\modules\memory\memory_storage.py

- **Lines**: 736
- **Last modified**: 2025-02-24 03:21:51

```py
"""Flock memory storage with short-term and long-term memory, concept graph, and clustering.

Based on concept graph spreading activation and embedding-based semantic search.
"""

import json
from datetime import datetime
from enum import Enum
from typing import Any, Literal

import networkx as nx
import numpy as np
from networkx.readwrite import json_graph
from opentelemetry import trace
from pydantic import BaseModel, Field, PrivateAttr

# Import SentenceTransformer for production-grade embeddings.
from sentence_transformers import SentenceTransformer

# Import the Flock logger.
from flock.core.logging.logging import get_logger

tracer = trace.get_tracer(__name__)
logger = get_logger("memory")


class MemoryScope(Enum):
    LOCAL = "local"
    GLOBAL = "global"
    BOTH = "both"


class MemoryOperation(BaseModel):
    """Base class for memory operations."""

    type: str
    scope: MemoryScope = MemoryScope.BOTH


class CombineOperation(MemoryOperation):
    """Combine results from multiple operations using weighted scoring."""

    type: Literal["combine"] = "combine"
    weights: dict[str, float] = Field(
        default_factory=lambda: {"semantic": 0.7, "exact": 0.3}
    )


class SemanticOperation(MemoryOperation):
    """Semantic search operation."""

    type: Literal["semantic"] = "semantic"
    threshold: float = 0.5
    max_results: int = 10
    recency_filter: str | None = None  # e.g., "7d", "24h"


class ExactOperation(MemoryOperation):
    """Exact matching operation."""

    type: Literal["exact"] = "exact"
    keys: list[str] = Field(default_factory=list)


class ChunkOperation(MemoryOperation):
    """Operation for handling chunked entries."""

    type: Literal["chunk"] = "chunk"
    reconstruct: bool = True


class EnrichOperation(MemoryOperation):
    """Enrich memory with tool results."""

    type: Literal["enrich"] = "enrich"
    tools: list[str]
    strategy: Literal["comprehensive", "quick", "validated"] = "comprehensive"


class FilterOperation(MemoryOperation):
    """Filter memory results."""

    type: Literal["filter"] = "filter"
    recency: str | None = None
    relevance: float | None = None
    metadata: dict[str, Any] = Field(default_factory=dict)


class SortOperation(MemoryOperation):
    """Sort memory results."""

    type: Literal["sort"] = "sort"
    by: Literal["relevance", "recency", "access_count"] = "relevance"
    ascending: bool = False


class MemoryEntry(BaseModel):
    """A single memory entry."""

    id: str
    content: str
    embedding: list[float] | None = None
    timestamp: datetime = Field(default_factory=datetime.now)
    access_count: int = Field(default=0)
    concepts: set[str] = Field(default_factory=set)
    decay_factor: float = Field(default=1.0)


class MemoryGraph(BaseModel):
    """Graph representation of concept relationships.

    The graph is stored as a JSON string for serialization, while a private attribute holds the actual NetworkX graph.
    """

    # JSON representation using the node-link format with explicit edges="links" to avoid warnings.
    graph_json: str = Field(
        default_factory=lambda: json.dumps(
            json_graph.node_link_data(nx.Graph(), edges="links")
        )
    )
    # Private attribute for the actual NetworkX graph.
    _graph: nx.Graph = PrivateAttr()

    def __init__(self, **data):
        """Initialize the MemoryGraph with a NetworkX graph from JSON data."""
        super().__init__(**data)
        try:
            data_graph = json.loads(self.graph_json)
            self._graph = json_graph.node_link_graph(data_graph, edges="links")
            logger.debug(
                f"MemoryGraph initialized from JSON with {len(self._graph.nodes())} nodes."
            )
        except Exception as e:
            logger.error(f"Failed to load MemoryGraph from JSON: {e}")
            self._graph = nx.Graph()

    @property
    def graph(self) -> nx.Graph:
        """Provides access to the internal NetworkX graph."""
        return self._graph

    def update_graph_json(self) -> None:
        """Update the JSON representation based on the current state of the graph."""
        self.graph_json = json.dumps(
            json_graph.node_link_data(self._graph, edges="links")
        )
        logger.debug("MemoryGraph JSON updated.")

    def add_concepts(self, concepts: set[str]) -> None:
        """Add a set of concepts to the graph and update their associations."""
        concept_list = list(concepts)
        logger.debug(f"Adding concepts: {concept_list}")
        for concept in concepts:
            self._graph.add_node(concept)
        for c1 in concepts:
            for c2 in concepts:
                if c1 != c2:
                    if self._graph.has_edge(c1, c2):
                        self._graph[c1][c2]["weight"] += 1
                    else:
                        self._graph.add_edge(c1, c2, weight=1)
        self.update_graph_json()

    def spread_activation(
        self, initial_concepts: set[str], decay_factor: float = 0.5
    ) -> dict[str, float]:
        """Spread activation through the concept graph.

        Args:
            initial_concepts: The starting set of concepts.
            decay_factor: How much the activation decays at each step.

        Returns:
            A dictionary mapping each concept to its activation level.
        """
        logger.debug(f"Spreading activation from concepts: {initial_concepts}")
        activated = {concept: 1.0 for concept in initial_concepts}
        frontier = list(initial_concepts)

        while frontier:
            current = frontier.pop(0)
            current_activation = activated[current]
            for neighbor in self._graph.neighbors(current):
                weight = self._graph[current][neighbor]["weight"]
                new_activation = current_activation * decay_factor * weight
                if (
                    neighbor not in activated
                    or activated[neighbor] < new_activation
                ):
                    activated[neighbor] = new_activation
                    frontier.append(neighbor)

        logger.debug(f"Activation levels: {activated}")
        return activated

    def save_as_image(self, filename: str = "memory_graph.png") -> None:
        """Visualize the concept graph and save it as a PNG image with improved readability.

        This method uses matplotlib to create a clear and readable visualization by:
        - Using a larger figure size
        - Implementing better node spacing
        - Adding adjustable text labels
        - Using a more visually appealing color scheme
        - Adding edge weight visualization

        Args:
            filename: The path (including .png) where the image will be saved.
        """
        import matplotlib

        matplotlib.use("Agg")
        import matplotlib.pyplot as plt

        logger.info(f"Saving MemoryGraph visualization to '{filename}'")

        if self._graph.number_of_nodes() == 0:
            logger.warning("MemoryGraph is empty; skipping image creation.")
            return

        try:
            # Create a larger figure with higher DPI
            plt.figure(figsize=(16, 12), dpi=100)

            # Use Kamada-Kawai layout for better node distribution
            pos = nx.kamada_kawai_layout(self._graph)

            # Calculate node sizes based on degree
            node_degrees = dict(self._graph.degree())
            node_sizes = [
                2000 * (1 + node_degrees[node] * 0.2)
                for node in self._graph.nodes()
            ]

            # Calculate edge weights for width and transparency
            edge_weights = [
                d["weight"] for (u, v, d) in self._graph.edges(data=True)
            ]
            max_weight = max(edge_weights) if edge_weights else 1
            edge_widths = [1 + (w / max_weight) * 3 for w in edge_weights]
            edge_alphas = [0.2 + (w / max_weight) * 0.8 for w in edge_weights]

            # Draw the network with custom styling
            # Nodes
            nx.draw_networkx_nodes(
                self._graph,
                pos,
                node_size=node_sizes,
                node_color="#5fa4d4",  # Lighter blue
                alpha=0.7,
                edgecolors="white",
            )

            # Edges with varying width and transparency
            for (u, v, d), width, alpha in zip(
                self._graph.edges(data=True), edge_widths, edge_alphas
            ):
                nx.draw_networkx_edges(
                    self._graph,
                    pos,
                    edgelist=[(u, v)],
                    width=width,
                    alpha=alpha,
                    edge_color="#2c3e50",  # Darker blue-grey
                )

            # Add labels with better positioning and background
            labels = nx.get_node_attributes(self._graph, "name") or {
                node: node for node in self._graph.nodes()
            }
            label_pos = {
                node: (x, y + 0.02) for node, (x, y) in pos.items()
            }  # Slightly offset labels

            # Draw labels with white background for better readability
            for node, (x, y) in label_pos.items():
                plt.text(
                    x,
                    y,
                    labels[node],
                    horizontalalignment="center",
                    verticalalignment="center",
                    fontsize=8,
                    fontweight="bold",
                    bbox=dict(
                        facecolor="white", edgecolor="none", alpha=0.7, pad=2.0
                    ),
                )

            # Add edge weight labels for significant weights
            edge_labels = nx.get_edge_attributes(self._graph, "weight")
            significant_edges = {
                (u, v): w
                for (u, v), w in edge_labels.items()
                if w > max_weight * 0.3
            }
            if significant_edges:
                nx.draw_networkx_edge_labels(
                    self._graph,
                    pos,
                    edge_labels=significant_edges,
                    font_size=6,
                    bbox=dict(facecolor="white", edgecolor="none", alpha=0.7),
                )

            # Improve layout
            plt.title("Memory Concept Graph", fontsize=16, pad=20)
            plt.axis("off")

            # Add padding and save
            plt.tight_layout(pad=2.0)
            plt.savefig(filename, bbox_inches="tight", facecolor="white")
            plt.close()

            logger.info(f"MemoryGraph image saved successfully to '{filename}'")

        except Exception as e:
            logger.error(f"Failed to save MemoryGraph image: {e}")
            plt.close()


class FlockMemoryStore(BaseModel):
    """Enhanced Flock memory storage with short-term and long-term memory.

    including embedding-based semantic search, exact matching, and result combination.
    """

    short_term: list[MemoryEntry] = Field(default_factory=list)
    long_term: list[MemoryEntry] = Field(default_factory=list)
    concept_graph: MemoryGraph = Field(default_factory=MemoryGraph)
    clusters: dict[int, list[MemoryEntry]] = Field(default_factory=dict)
    # Instead of np.ndarray, store centroids as lists of floats.
    cluster_centroids: dict[int, list[float]] = Field(default_factory=dict)
    # The embedding model is stored as a private attribute, as it's not serializable.
    _embedding_model: SentenceTransformer | None = PrivateAttr(default=None)

    @classmethod
    def load_from_file(cls, file_path: str | None = None) -> "FlockMemoryStore":
        """Load a memory store from a JSON file.

        Args:
            file_path: Path to the JSON file containing the serialized memory store.
                      If None, returns an empty memory store.

        Returns:
            FlockMemoryStore: A new memory store instance with loaded data.

        Raises:
            FileNotFoundError: If the specified file doesn't exist
            JSONDecodeError: If the file contains invalid JSON
            ValueError: If the JSON structure is invalid
        """
        if file_path is None:
            logger.debug("No file path provided, creating new memory store")
            return cls()

        try:
            logger.info(f"Loading memory store from {file_path}")
            with open(file_path) as f:
                data = json.load(f)

            # Initialize a new store
            store = cls()

            # Load short-term memory entries
            store.short_term = [
                MemoryEntry(
                    id=entry["id"],
                    content=entry["content"],
                    embedding=entry.get("embedding"),
                    timestamp=datetime.fromisoformat(entry["timestamp"]),
                    access_count=entry.get("access_count", 0),
                    concepts=set(entry.get("concepts", [])),
                    decay_factor=entry.get("decay_factor", 1.0),
                )
                for entry in data.get("short_term", [])
            ]

            # Load long-term memory entries
            store.long_term = [
                MemoryEntry(
                    id=entry["id"],
                    content=entry["content"],
                    embedding=entry.get("embedding"),
                    timestamp=datetime.fromisoformat(entry["timestamp"]),
                    access_count=entry.get("access_count", 0),
                    concepts=set(entry.get("concepts", [])),
                    decay_factor=entry.get("decay_factor", 1.0),
                )
                for entry in data.get("long_term", [])
            ]

            # Load concept graph
            if "concept_graph" in data:
                graph_data = json.loads(data["concept_graph"]["graph_json"])
                store.concept_graph = MemoryGraph(
                    graph_json=json.dumps(graph_data)
                )

            # Load clusters
            if "clusters" in data:
                store.clusters = {
                    int(k): [
                        MemoryEntry(
                            id=entry["id"],
                            content=entry["content"],
                            embedding=entry.get("embedding"),
                            timestamp=datetime.fromisoformat(
                                entry["timestamp"]
                            ),
                            access_count=entry.get("access_count", 0),
                            concepts=set(entry.get("concepts", [])),
                            decay_factor=entry.get("decay_factor", 1.0),
                        )
                        for entry in v
                    ]
                    for k, v in data["clusters"].items()
                }

            # Load cluster centroids
            if "cluster_centroids" in data:
                store.cluster_centroids = {
                    int(k): v for k, v in data["cluster_centroids"].items()
                }

            # Initialize the embedding model
            store._embedding_model = None  # Will be lazy-loaded when needed

            logger.info(
                f"Successfully loaded memory store with "
                f"{len(store.short_term)} short-term and "
                f"{len(store.long_term)} long-term entries"
            )
            return store

        except FileNotFoundError:
            logger.warning(
                f"Memory file {file_path} not found, creating new store"
            )
            return cls()
        except json.JSONDecodeError as e:
            logger.error(f"Invalid JSON in memory file: {e}")
            raise
        except Exception as e:
            logger.error(f"Error loading memory store: {e}")
            raise ValueError(f"Failed to load memory store: {e}")

    @classmethod
    def merge_stores(
        cls, stores: list["FlockMemoryStore"]
    ) -> "FlockMemoryStore":
        """Merge multiple memory stores into a single store.

        Args:
            stores: List of FlockMemoryStore instances to merge

        Returns:
            FlockMemoryStore: A new memory store containing merged data
        """
        merged = cls()

        # Merge short-term and long-term memories
        for store in stores:
            merged.short_term.extend(store.short_term)
            merged.long_term.extend(store.long_term)

        # Merge concept graphs
        merged_graph = nx.Graph()
        for store in stores:
            if store.concept_graph and store.concept_graph.graph:
                merged_graph = nx.compose(
                    merged_graph, store.concept_graph.graph
                )

        merged.concept_graph = MemoryGraph(
            graph_json=json.dumps(
                nx.node_link_data(merged_graph, edges="links")
            )
        )

        # Recompute clusters for the merged data
        if merged.short_term:
            merged._update_clusters()

        return merged

    def get_embedding_model(self) -> SentenceTransformer:
        """Initialize and return the SentenceTransformer model.

        Uses "all-MiniLM-L6-v2" as the default model.
        """
        if self._embedding_model is None:
            try:
                logger.debug(
                    "Loading SentenceTransformer model 'all-MiniLM-L6-v2'."
                )
                self._embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
            except Exception as e:
                logger.error(f"Failed to load embedding model: {e}")
                raise RuntimeError(f"Failed to load embedding model: {e}")
        return self._embedding_model

    def compute_embedding(self, text: str) -> np.ndarray:
        """Compute and return the embedding for the provided text as a NumPy array."""
        logger.debug(
            f"Computing embedding for text: {text[:100].replace('{', '{{').replace('}', '}}')}..."
        )  # Log first 30 chars for brevity.
        model = self.get_embedding_model()
        try:
            embedding = model.encode(text, convert_to_numpy=True)
            return embedding
        except Exception as e:
            logger.error(f"Error computing embedding: {e}")
            raise RuntimeError(f"Error computing embedding: {e}")

    def _calculate_similarity(
        self, query_embedding: np.ndarray, entry_embedding: np.ndarray
    ) -> float:
        """Compute the cosine similarity between two embeddings.

        Returns a float between 0 and 1.
        """
        try:
            norm_query = np.linalg.norm(query_embedding)
            norm_entry = np.linalg.norm(entry_embedding)
            if norm_query == 0 or norm_entry == 0:
                return 0.0
            similarity = float(
                np.dot(query_embedding, entry_embedding)
                / (norm_query * norm_entry)
            )
            return similarity
        except Exception as e:
            logger.error(f"Error computing similarity: {e}")
            raise RuntimeError(f"Error computing similarity: {e}")

    def exact_match(self, inputs: dict[str, Any]) -> list[MemoryEntry]:
        """Perform an exact key-based lookup in short-term memory.

        Returns entries where all provided key-value pairs exist in the entry's inputs.
        """
        logger.debug(f"Performing exact match lookup with inputs: {inputs}")
        matches = []
        for entry in self.short_term:
            if all(item in entry.inputs.items() for item in inputs.items()):
                matches.append(entry)
        logger.debug(f"Exact match found {len(matches)} entries.")
        return matches

    def combine_results(
        self, inputs: dict[str, Any], weights: dict[str, float]
    ) -> dict[str, Any]:
        """Combine semantic and exact match results using the provided weights.

        Args:
            inputs: Input dictionary to search memory.
            weights: Dictionary with keys "semantic" and "exact" for weighting.

        Returns:
            A dictionary with "combined_results" as a sorted list of memory entries.
        """
        logger.debug(
            f"Combining results for inputs: {inputs} with weights: {weights}"
        )
        query_text = " ".join(str(value) for value in inputs.values())
        query_embedding = self.compute_embedding(query_text)

        semantic_matches = self.retrieve(
            query_embedding, set(inputs.values()), similarity_threshold=0.8
        )
        exact_matches = self.exact_match(inputs)

        combined: dict[str, dict[str, Any]] = {}
        for entry in semantic_matches:
            if entry.embedding is None:
                continue
            semantic_score = self._calculate_similarity(
                query_embedding, np.array(entry.embedding)
            )
            combined[entry.id] = {
                "entry": entry,
                "semantic_score": semantic_score * weights.get("semantic", 0.7),
                "exact_score": 0.0,
            }
        for entry in exact_matches:
            if entry.id in combined:
                combined[entry.id]["exact_score"] = 1.0 * weights.get(
                    "exact", 0.3
                )
            else:
                combined[entry.id] = {
                    "entry": entry,
                    "semantic_score": 0.0,
                    "exact_score": 1.0 * weights.get("exact", 0.3),
                }
        results: list[tuple[float, MemoryEntry]] = []
        for data in combined.values():
            total_score = data["semantic_score"] + data["exact_score"]
            results.append((total_score, data["entry"]))
        results.sort(key=lambda x: x[0], reverse=True)
        logger.debug(f"Combined results count: {len(results)}")
        return {"combined_results": [entry for score, entry in results]}

    def add_entry(self, entry: MemoryEntry) -> None:
        """Add a new memory entry to short-term memory, update the concept graph and clusters.

        and check for promotion to long-term memory.
        """
        with tracer.start_as_current_span("memory.add_entry") as span:
            logger.info(f"Adding memory entry with id: {entry.id}")
            span.set_attribute("entry.id", entry.id)
            self.short_term.append(entry)
            self.concept_graph.add_concepts(entry.concepts)
            self._update_clusters()
            if entry.access_count > 10:
                self._promote_to_long_term(entry)

    def _promote_to_long_term(self, entry: MemoryEntry) -> None:
        """Promote an entry to long-term memory."""
        logger.info(f"Promoting entry {entry.id} to long-term memory.")
        if entry not in self.long_term:
            self.long_term.append(entry)

    def retrieve(
        self,
        query_embedding: np.ndarray,
        query_concepts: set[str],
        similarity_threshold: float = 0.8,
        exclude_last_n: int = 0,
    ) -> list[MemoryEntry]:
        """Retrieve memory entries using semantic similarity and concept-based activation."""
        with tracer.start_as_current_span("memory.retrieve") as span:
            logger.debug("Retrieving memory entries...")
            results = []
            current_time = datetime.now()
            decay_rate = 0.0001
            norm_query = query_embedding / (
                np.linalg.norm(query_embedding) + 1e-8
            )

            entries = (
                self.short_term[:-exclude_last_n]
                if exclude_last_n > 0
                else self.short_term
            )

            for entry in entries:
                if entry.embedding is None:
                    continue

                # Calculate base similarity
                entry_embedding = np.array(entry.embedding)
                norm_entry = entry_embedding / (
                    np.linalg.norm(entry_embedding) + 1e-8
                )
                similarity = float(np.dot(norm_query, norm_entry))

                # Calculate modifiers
                time_diff = (current_time - entry.timestamp).total_seconds()
                decay = np.exp(-decay_rate * time_diff)
                # Add 1 to base score so new entries aren't zeroed out
                reinforcement = 1.0 + np.log1p(entry.access_count)

                # Calculate final score
                final_score = (
                    similarity * decay * reinforcement * entry.decay_factor
                )

                span.add_event(
                    "memory score",
                    attributes={
                        "entry_id": entry.id,
                        "similarity": similarity,
                        "final_score": final_score,
                    },
                )

                # If base similarity passes threshold, include in results
                if similarity >= similarity_threshold:
                    results.append((final_score, entry))

            # Update access counts and decay for retrieved entries
            for _, entry in results:
                entry.access_count += 1
                self._update_decay_factors(entry)

            # Sort by final score
            results.sort(key=lambda x: x[0], reverse=True)
            logger.debug(f"Retrieved {len(results)} memory entries.")
            return [entry for score, entry in results]

    def _update_decay_factors(self, retrieved_entry: MemoryEntry) -> None:
        """Update decay factors: increase for the retrieved entry and decrease for others."""
        logger.debug(f"Updating decay factor for entry {retrieved_entry.id}")
        retrieved_entry.decay_factor *= 1.1
        for entry in self.short_term:
            if entry != retrieved_entry:
                entry.decay_factor *= 0.9

    def _update_clusters(self) -> None:
        """Update memory clusters using k-means clustering on entry embeddings."""
        logger.debug("Updating memory clusters...")
        if len(self.short_term) < 2:
            logger.debug("Not enough entries for clustering.")
            return

        valid_entries = [
            entry for entry in self.short_term if entry.embedding is not None
        ]
        if not valid_entries:
            logger.debug(
                "No valid entries with embeddings found for clustering."
            )
            return

        embeddings = [np.array(entry.embedding) for entry in valid_entries]
        embeddings_matrix = np.vstack(embeddings)

        from sklearn.cluster import KMeans

        n_clusters = min(10, len(embeddings))
        kmeans = KMeans(n_clusters=n_clusters, random_state=42)
        labels = kmeans.fit_predict(embeddings_matrix)

        self.clusters.clear()
        self.cluster_centroids.clear()

        for i in range(n_clusters):
            cluster_entries = [
                entry
                for entry, label in zip(valid_entries, labels)
                if label == i
            ]
            self.clusters[i] = cluster_entries
            # Convert the centroid (np.ndarray) to a list of floats.
            self.cluster_centroids[i] = kmeans.cluster_centers_[i].tolist()
        logger.debug(f"Clustering complete with {n_clusters} clusters.")
```

### src\flock\modules\output\output_module.py

- **Lines**: 220
- **Last modified**: 2025-04-02 23:12:31

```py
"""Output formatting and display functionality for agents."""

import json
import os
from datetime import datetime
from typing import TYPE_CHECKING, Any

from pydantic import Field

if TYPE_CHECKING:
    from flock.core import FlockAgent

from flock.core.context.context import FlockContext
from flock.core.flock_module import FlockModule, FlockModuleConfig
from flock.core.logging.formatters.themed_formatter import (
    ThemedAgentResultFormatter,
)
from flock.core.logging.formatters.themes import OutputTheme
from flock.core.logging.logging import get_logger
from flock.core.serialization.json_encoder import FlockJSONEncoder

# from flock.core.logging.formatters.themes import OutputTheme
# from flock.core.logging.logging import get_logger
# from flock.core.serialization.json_encoder import FlockJSONEncoder

logger = get_logger("module.output")


class OutputModuleConfig(FlockModuleConfig):
    """Configuration for output formatting and display."""

    theme: OutputTheme = Field(
        default=OutputTheme.afterglow, description="Theme for output formatting"
    )
    render_table: bool = Field(
        default=False, description="Whether to render output as a table"
    )
    max_length: int = Field(
        default=1000, description="Maximum length for displayed output"
    )
    wait_for_input: bool = Field(
        default=False,
        description="Whether to wait for user input after display",
    )
    write_to_file: bool = Field(
        default=False, description="Whether to save output to file"
    )
    output_dir: str = Field(
        default="output/", description="Directory for saving output files"
    )
    truncate_long_values: bool = Field(
        default=True, description="Whether to truncate long values in display"
    )
    show_metadata: bool = Field(
        default=True, description="Whether to show metadata like timestamps"
    )
    format_code_blocks: bool = Field(
        default=True,
        description="Whether to apply syntax highlighting to code blocks",
    )
    custom_formatters: dict[str, str] = Field(
        default_factory=dict,
        description="Custom formatters for specific output types",
    )
    no_output: bool = Field(
        default=False,
        description="Whether to suppress output",
    )
    print_context: bool = Field(
        default=False,
        description="Whether to print the context",
    )


class OutputModule(FlockModule):
    """Module that handles output formatting and display."""

    name: str = "output"
    config: OutputModuleConfig = Field(
        default_factory=OutputModuleConfig, description="Output configuration"
    )

    def __init__(self, name: str, config: OutputModuleConfig):
        super().__init__(name=name, config=config)
        if self.config.write_to_file:
            os.makedirs(self.config.output_dir, exist_ok=True)
        self._formatter = ThemedAgentResultFormatter(
            theme=self.config.theme,
            max_length=self.config.max_length,
            render_table=self.config.render_table,
            wait_for_input=self.config.wait_for_input,
        )

    def _format_value(self, value: Any, key: str) -> str:
        """Format a single value based on its type and configuration."""
        # Check for custom formatter
        if key in self.config.custom_formatters:
            formatter_name = self.config.custom_formatters[key]
            if hasattr(self, f"_format_{formatter_name}"):
                return getattr(self, f"_format_{formatter_name}")(value)

        # Default formatting based on type
        if isinstance(value, dict):
            return self._format_dict(value)
        elif isinstance(value, list):
            return self._format_list(value)
        elif isinstance(value, str) and self.config.format_code_blocks:
            return self._format_potential_code(value)
        else:
            return str(value)

    def _format_dict(self, d: dict[str, Any], indent: int = 0) -> str:
        """Format a dictionary with proper indentation."""
        lines = []
        for k, v in d.items():
            formatted_value = self._format_value(v, k)
            if (
                self.config.truncate_long_values
                and len(formatted_value) > self.config.max_length
            ):
                formatted_value = (
                    formatted_value[: self.config.max_length] + "..."
                )
            lines.append(f"{'  ' * indent}{k}: {formatted_value}")
        return "\n".join(lines)

    def _format_list(self, lst: list[Any]) -> str:
        """Format a list with proper indentation."""
        return "\n".join(f"- {self._format_value(item, '')}" for item in lst)

    def _format_potential_code(self, text: str) -> str:
        """Format text that might contain code blocks."""
        import re

        def replace_code_block(match):
            code = match.group(2)
            lang = match.group(1) if match.group(1) else ""
            # Here you could add syntax highlighting
            return f"```{lang}\n{code}\n```"

        # Replace code blocks with formatted versions
        text = re.sub(
            r"```(\w+)?\n(.*?)\n```", replace_code_block, text, flags=re.DOTALL
        )
        return text

    def _save_output(self, agent_name: str, result: dict[str, Any]) -> None:
        """Save output to file if configured."""
        if not self.config.write_to_file:
            return

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"{agent_name}_output_{timestamp}.json"
        filepath = os.path.join(self.config.output_dir, filename)

        output_data = {
            "agent": agent_name,
            "timestamp": timestamp,
            "output": result,
        }

        if self.config.show_metadata:
            output_data["metadata"] = {
                "formatted_at": datetime.now().isoformat(),
                "theme": self.config.theme.value,
                "max_length": self.config.max_length,
            }

        try:
            with open(filepath, "w") as f:
                json.dump(output_data, f, indent=2, cls=FlockJSONEncoder)
        except Exception as e:
            logger.warning(f"Failed to save output to file: {e}")

    async def post_evaluate(
        self,
        agent: "FlockAgent",
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Format and display the output."""
        logger.debug("Formatting and displaying output")
        if self.config.no_output:
            return result
        if self.config.print_context:
            result["context"] = context
        # Display the result using the formatter
        self._formatter.display_result(result, agent.name)

        # Save to file if configured
        self._save_output(agent.name, result)

        return result

    def update_theme(self, new_theme: OutputTheme) -> None:
        """Update the output theme."""
        self.config.theme = new_theme
        self._formatter = ThemedAgentResultFormatter(
            theme=self.config.theme,
            max_length=self.config.max_length,
            render_table=self.config.render_table,
            wait_for_input=self.config.wait_for_input,
            write_to_file=self.config.write_to_file,
        )

    def add_custom_formatter(self, key: str, formatter_name: str) -> None:
        """Add a custom formatter for a specific output key."""
        self.config.custom_formatters[key] = formatter_name

    def get_output_files(self) -> list[str]:
        """Get list of saved output files."""
        if not self.config.write_to_file:
            return []

        return [
            f
            for f in os.listdir(self.config.output_dir)
            if f.endswith("_output.json")
        ]
```

### src\flock\modules\performance\metrics_module.py

- **Lines**: 492
- **Last modified**: 2025-03-03 12:28:42

```py
"""Performance and metrics tracking for Flock agents."""

import json
import os
import time
from collections import defaultdict
from datetime import datetime
from typing import Any, Literal

import numpy as np
import psutil
from pydantic import BaseModel, Field, validator

from flock.core.context.context import FlockContext
from flock.core.flock_agent import FlockAgent
from flock.core.flock_module import FlockModule, FlockModuleConfig


class MetricPoint(BaseModel):
    """Single metric measurement."""

    timestamp: datetime
    value: int | float | str
    tags: dict[str, str] = {}


class MetricsModuleConfig(FlockModuleConfig):
    """Configuration for performance metrics collection."""

    # Collection settings
    collect_timing: bool = Field(
        default=True, description="Collect timing metrics"
    )
    collect_memory: bool = Field(
        default=True, description="Collect memory usage"
    )
    collect_token_usage: bool = Field(
        default=True, description="Collect token usage stats"
    )
    collect_cpu: bool = Field(default=True, description="Collect CPU usage")

    # Storage settings
    storage_type: Literal["json", "prometheus", "memory"] = Field(
        default="json", description="Where to store metrics"
    )
    metrics_dir: str = Field(
        default="metrics/", description="Directory for metrics storage"
    )

    # Aggregation settings
    aggregation_interval: str = Field(
        default="1h", description="Interval for metric aggregation"
    )
    retention_days: int = Field(default=30, description="Days to keep metrics")

    # Alerting settings
    alert_on_high_latency: bool = Field(
        default=True, description="Alert on high latency"
    )
    latency_threshold_ms: int = Field(
        default=1000, description="Threshold for latency alerts"
    )

    @validator("aggregation_interval")
    def validate_interval(cls, v):
        """Validate time interval format."""
        if v[-1] not in ["s", "m", "h", "d"]:
            raise ValueError("Interval must end with s, m, h, or d")
        return v


class MetricsModule(FlockModule):
    """Module for collecting and analyzing agent performance metrics."""

    name: str = "performance_metrics"
    config: MetricsModuleConfig = Field(
        default_factory=MetricsModuleConfig,
        description="Performance metrics configuration",
    )

    def __init__(self, name, config):
        super().__init__(name=name, config=config)
        self._metrics = defaultdict(list)
        self._start_time: float | None = None
        self._start_memory: int | None = None

        # Set up storage
        if self.config.storage_type == "json":
            os.makedirs(self.config.metrics_dir, exist_ok=True)

        # Set up prometheus if needed
        if self.config.storage_type == "prometheus":
            try:
                from prometheus_client import Counter, Gauge, Histogram

                self._prom_latency = Histogram(
                    "flock_agent_latency_seconds",
                    "Time taken for agent evaluation",
                    ["agent_name"],
                )
                self._prom_memory = Gauge(
                    "flock_agent_memory_bytes",
                    "Memory usage by agent",
                    ["agent_name"],
                )
                self._prom_tokens = Counter(
                    "flock_agent_tokens_total",
                    "Token usage by agent",
                    ["agent_name", "type"],
                )
                self._prom_errors = Counter(
                    "flock_agent_errors_total",
                    "Error count by agent",
                    ["agent_name", "error_type"],
                )
            except ImportError:
                self.config.storage_type = "json"

    """Fixes for metrics summary calculation."""

    def _load_metrics_from_files(
        self, metric_name: str = None
    ) -> dict[str, list[MetricPoint]]:
        """Load metrics from JSON files."""
        metrics = defaultdict(list)

        try:
            # Get all metric files
            files = [
                f
                for f in os.listdir(self.config.metrics_dir)
                if f.endswith(".json") and not f.startswith("summary_")
            ]

            # Filter by metric name if specified
            if metric_name:
                files = [f for f in files if f.startswith(f"{metric_name}_")]

            for filename in files:
                filepath = os.path.join(self.config.metrics_dir, filename)
                with open(filepath) as f:
                    for line in f:
                        try:
                            data = json.loads(line)
                            point = MetricPoint(
                                timestamp=datetime.fromisoformat(
                                    data["timestamp"]
                                ),
                                value=data["value"],
                                tags=data["tags"],
                            )
                            name = filename.split("_")[
                                0
                            ]  # Get metric name from filename
                            metrics[name].append(point)
                        except json.JSONDecodeError:
                            continue

            return dict(metrics)
        except Exception as e:
            print(f"Error loading metrics from files: {e}")
            return {}

    def get_metrics(
        self,
        metric_name: str | None = None,
        start_time: datetime | None = None,
        end_time: datetime | None = None,
    ) -> dict[str, list[MetricPoint]]:
        """Get recorded metrics with optional filtering."""
        # Get metrics from appropriate source
        if self.config.storage_type == "json":
            metrics = self._load_metrics_from_files(metric_name)
        else:
            metrics = self._metrics
            if metric_name:
                metrics = {metric_name: metrics[metric_name]}

        # Apply time filtering if needed
        if start_time or end_time:
            filtered_metrics = defaultdict(list)
            for name, points in metrics.items():
                filtered_points = [
                    p
                    for p in points
                    if (not start_time or p.timestamp >= start_time)
                    and (not end_time or p.timestamp <= end_time)
                ]
                filtered_metrics[name] = filtered_points
            metrics = filtered_metrics

        return dict(metrics)

    def get_statistics(
        self, metric_name: str, percentiles: list[float] = [50, 90, 95, 99]
    ) -> dict[str, float]:
        """Calculate statistics for a metric."""
        # Get all points for this metric
        metrics = self.get_metrics(metric_name=metric_name)
        points = metrics.get(metric_name, [])

        if not points:
            return {}

        values = [p.value for p in points if isinstance(p.value, (int, float))]
        if not values:
            return {}

        stats = {
            "min": min(values),
            "max": max(values),
            "mean": float(
                np.mean(values)
            ),  # Convert to float for JSON serialization
            "std": float(np.std(values)),
            "count": len(values),
            "last_value": values[-1],
        }

        for p in percentiles:
            stats[f"p{p}"] = float(np.percentile(values, p))

        return stats

    async def terminate(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Clean up and final metric recording."""
        if self.config.storage_type == "json":
            # Save aggregated metrics
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            summary_file = os.path.join(
                self.config.metrics_dir,
                f"summary_{agent.name}_{timestamp}.json",
            )

            # Calculate summary for all metrics
            summary = {
                "agent": agent.name,
                "timestamp": timestamp,
                "metrics": {},
            }

            # Get all unique metric names from files
            all_metrics = self._load_metrics_from_files()

            for metric_name in all_metrics.keys():
                stats = self.get_statistics(metric_name)
                if stats:  # Only include metrics that have data
                    summary["metrics"][metric_name] = stats

            with open(summary_file, "w") as f:
                json.dump(summary, f, indent=2)

    def _record_metric(
        self, name: str, value: int | float | str, tags: dict[str, str] = None
    ) -> None:
        """Record a single metric point."""
        point = MetricPoint(
            timestamp=datetime.now(), value=value, tags=tags or {}
        )

        # Store metric
        if self.config.storage_type == "memory":
            self._metrics[name].append(point)

        elif self.config.storage_type == "prometheus":
            if name == "latency":
                self._prom_latency.labels(**tags).observe(value)
            elif name == "memory":
                self._prom_memory.labels(**tags).set(value)
            elif name == "tokens":
                self._prom_tokens.labels(**tags).inc(value)

        elif self.config.storage_type == "json":
            self._save_metric_to_file(name, point)

    def _save_metric_to_file(self, name: str, point: MetricPoint) -> None:
        """Save metric to JSON file."""
        filename = f"{name}_{point.timestamp.strftime('%Y%m')}.json"
        filepath = os.path.join(self.config.metrics_dir, filename)

        data = {
            "timestamp": point.timestamp.isoformat(),
            "value": point.value,
            "tags": point.tags,
        }

        # Append to file
        with open(filepath, "a") as f:
            f.write(json.dumps(data) + "\n")

    def _get_tokenizer(self, model: str):
        """Get the appropriate tokenizer for the model."""
        try:
            import tiktoken

            # Handle different model naming conventions
            if model.startswith("openai/"):
                model = model[7:]  # Strip 'openai/' prefix

            try:
                return tiktoken.encoding_for_model(model)
            except KeyError:
                # Fallback to cl100k_base for unknown models
                return tiktoken.get_encoding("cl100k_base")

        except ImportError:
            return None

    def _calculate_token_usage(self, text: str, model: str = "gpt-4") -> int:
        """Calculate token count using tiktoken when available."""
        tokenizer = self._get_tokenizer(model)

        if tokenizer:
            # Use tiktoken for accurate count
            return len(tokenizer.encode(text))
        else:
            # Fallback to estimation if tiktoken not available
            # Simple estimation - words / 0.75 for average tokens per word
            token_estimate = int(len(text.split()) / 0.75)

            # Log warning about estimation
            print(
                f"Warning: Using estimated token count. Install tiktoken for accurate counting."
            )

    def _should_alert(self, metric: str, value: float) -> bool:
        """Check if metric should trigger alert."""
        if metric == "latency" and self.config.alert_on_high_latency:
            return value * 1000 > self.config.latency_threshold_ms
        return False

    async def initialize(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Initialize metrics collection."""
        self._start_time = time.time()

        if self.config.collect_memory:
            self._start_memory = psutil.Process().memory_info().rss
            self._record_metric(
                "memory",
                self._start_memory,
                {"agent": agent.name, "phase": "start"},
            )

    def _calculate_cost(
        self, text: str, model: str, is_completion: bool = False
    ) -> tuple[int, float]:
        """Calculate both token count and cost."""
        # Get token count
        try:
            from litellm import cost_per_token

            token_count = self._calculate_token_usage(text, model)
            # Calculate total cost
            if is_completion:
                total_cost = token_count * cost_per_token(
                    model, completion_tokens=token_count
                )
            else:
                total_cost = token_count * cost_per_token(
                    model, prompt_tokens=token_count
                )

            return token_count, total_cost
        except Exception:
            token_count = 0
            total_cost = 0.0
            return token_count, total_cost

    async def pre_evaluate(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Record pre-evaluation metrics."""
        if self.config.collect_token_usage:
            # Calculate input tokens and cost
            total_input_tokens = 0
            total_input_cost = 0.0

            for v in inputs.values():
                tokens, cost = self._calculate_cost(
                    str(v), agent.model, is_completion=False
                )
                total_input_tokens += tokens
                if isinstance(cost, float):
                    total_input_cost += cost
                else:
                    total_input_cost += cost[1]

            self._record_metric(
                "tokens",
                total_input_tokens,
                {"agent": agent.name, "type": "input"},
            )
            self._record_metric(
                "cost", total_input_cost, {"agent": agent.name, "type": "input"}
            )

        if self.config.collect_cpu:
            cpu_percent = psutil.Process().cpu_percent()
            self._record_metric(
                "cpu",
                cpu_percent,
                {"agent": agent.name, "phase": "pre_evaluate"},
            )

        return inputs

    async def post_evaluate(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Record post-evaluation metrics."""
        if self.config.collect_timing and self._start_time:
            latency = time.time() - self._start_time
            self._record_metric("latency", latency, {"agent": agent.name})

            # Check for alerts
            if self._should_alert("latency", latency):
                # In practice, you'd want to integrate with a proper alerting system
                print(f"ALERT: High latency detected: {latency * 1000:.2f}ms")

        if self.config.collect_token_usage:
            # Calculate output tokens and cost
            total_output_tokens = 0
            total_output_cost = 0.0

            for v in result.values():
                tokens, cost = self._calculate_cost(
                    str(v), agent.model, is_completion=True
                )
                total_output_tokens += tokens
                if isinstance(cost, float):
                    total_output_cost += cost
                else:
                    total_output_cost += cost[1]

            self._record_metric(
                "tokens",
                total_output_tokens,
                {"agent": agent.name, "type": "output"},
            )
            self._record_metric(
                "cost",
                total_output_cost,
                {"agent": agent.name, "type": "output"},
            )

            # Record total cost for this operation
            self._record_metric(
                "total_cost",
                total_output_cost + total_output_cost,
                {"agent": agent.name},
            )

        if self.config.collect_memory and self._start_memory:
            current_memory = psutil.Process().memory_info().rss
            memory_diff = current_memory - self._start_memory
            self._record_metric(
                "memory", memory_diff, {"agent": agent.name, "phase": "end"}
            )

        return result

    async def on_error(
        self,
        agent: FlockAgent,
        error: Exception,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> None:
        """Record error metrics."""
        self._record_metric(
            "errors",
            1,
            {"agent": agent.name, "error_type": type(error).__name__},
        )
```

### src\flock\modules\zep\zep_module.py

- **Lines**: 185
- **Last modified**: 2025-03-03 12:28:42

```py
import uuid
from typing import Any

from pydantic import Field
from zep_python.client import Zep
from zep_python.types import Message as ZepMessage, SessionSearchResult

from flock.core.context.context import FlockContext
from flock.core.flock_agent import FlockAgent
from flock.core.flock_module import FlockModule, FlockModuleConfig
from flock.core.logging.logging import get_logger

logger = get_logger("module.zep")


class ZepModuleConfig(FlockModuleConfig):
    """Configuration for the Zep module."""

    zep_url: str = "http://localhost:8000"
    zep_api_key: str = "apikey"
    min_fact_rating: float = Field(
        default=0.7, description="Minimum rating for facts to be considered"
    )
    enable_read: bool = True
    enable_write: bool = False


class ZepModule(FlockModule):
    """Module that adds Zep capabilities to a Flock agent."""

    name: str = "zep"
    config: ZepModuleConfig = ZepModuleConfig()
    session_id: str | None = None
    user_id: str | None = None

    def __init__(self, name, config: ZepModuleConfig) -> None:
        """Initialize Zep module."""
        super().__init__(name=name, config=config)
        logger.debug("Initializing Zep module")
        zep_client = Zep(
            base_url=self.config.zep_url, api_key=self.config.zep_api_key
        )
        self.user_id = self.name
        self._setup_user(zep_client)
        self.session_id = str(uuid.uuid4())
        self._setup_session(zep_client)

    def _setup_user(self, zep_client: Zep) -> None:
        """Set up user in Zep."""
        if not zep_client or not self.user_id:
            raise ValueError("Zep service or user_id not initialized")

        try:
            user = zep_client.user.get(user_id=self.user_id)
            if not user:
                zep_client.user.add(user_id=self.user_id)
        except Exception:
            zep_client.user.add(user_id=self.user_id)

    def _setup_session(self, zep_client: Zep) -> None:
        """Set up new session."""
        if not zep_client or not self.user_id or not self.session_id:
            raise ValueError(
                "Zep service, user_id, or session_id not initialized"
            )

        zep_client.memory.add_session(
            user_id=self.user_id,
            session_id=self.session_id,
        )

    def get_client(self) -> Zep:
        """Get Zep client."""
        return Zep(
            base_url=self.config.zep_url, api_key=self.config.zep_api_key
        )

    def get_memory(self, zep_client: Zep) -> str | None:
        """Get memory for the current session."""
        if not zep_client or not self.session_id:
            logger.error("Zep service or session_id not initialized")
            return None

        try:
            memory = zep_client.memory.get(
                self.session_id, min_rating=self.config.min_fact_rating
            )
            if memory:
                return f"{memory.relevant_facts}"
        except Exception as e:
            logger.error(f"Error fetching memory: {e}")
            return None

        return None

    def split_text(
        self, text: str | None, max_length: int = 1000
    ) -> list[ZepMessage]:
        """Split text into smaller chunks."""
        result: list[ZepMessage] = []
        if not text:
            return result
        if len(text) <= max_length:
            return [ZepMessage(role="user", content=text, role_type="user")]
        for i in range(0, len(text), max_length):
            result.append(
                ZepMessage(
                    role="user",
                    content=text[i : i + max_length],
                    role_type="user",
                )
            )
        return result

    def add_to_memory(self, text: str, zep_client: Zep) -> None:
        """Add text to memory."""
        if not zep_client or not self.session_id:
            logger.error("Zep service or session_id not initialized")
            return

        messages = self.split_text(text)
        zep_client.memory.add(session_id=self.session_id, messages=messages)

    def search_memory(
        self, query: str, zep_client: Zep
    ) -> list[SessionSearchResult]:
        """Search memory for a query."""
        if not zep_client or not self.user_id:
            logger.error("Zep service or user_id not initialized")
            return []

        response = zep_client.memory.search_sessions(
            text=query,
            user_id=self.user_id,
            search_scope="facts",
            min_fact_rating=self.config.min_fact_rating,
        )
        if not response.results:
            return []
        return response.results

    async def post_evaluate(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        result: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Format and display the output."""
        if not self.config.enable_write:
            return result
        logger.debug("Saving data to memory")
        zep_client = Zep(
            base_url=self.config.zep_url, api_key=self.config.zep_api_key
        )
        self.add_to_memory(str(result), zep_client)
        return result

    async def pre_evaluate(
        self,
        agent: FlockAgent,
        inputs: dict[str, Any],
        context: FlockContext | None = None,
    ) -> dict[str, Any]:
        """Format and display the output."""
        if not self.config.enable_read:
            return inputs

        zep_client = Zep(
            base_url=self.config.zep_url, api_key=self.config.zep_api_key
        )

        logger.debug("Searching memory")
        facts = self.search_memory(str(inputs), zep_client)

        # Add memory to inputs
        facts_str = ""
        if facts:
            for fact in facts:
                facts_str += fact.fact.fact + "\n"
            logger.debug("Found facts in memory: {}", facts_str)
            agent.input = agent.input + ", memory"
            inputs["memory"] = facts_str

        return inputs
```

### src\flock\platform\docker_tools.py

- **Lines**: 49
- **Last modified**: 2025-02-18 03:20:40

```py
import subprocess
import time


def _check_docker_running():
    """Check if Docker is running by calling 'docker info'."""
    try:
        result = subprocess.run(
            ["docker", "info"],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=True,
        )
        return result.returncode == 0
    except Exception:
        return False


def _start_docker():
    """Attempt to start Docker.
    This example first tries 'systemctl start docker' and then 'service docker start'.
    Adjust as needed for your environment.
    """
    try:
        print("Attempting to start Docker...")
        result = subprocess.run(
            ["sudo", "systemctl", "start", "docker"],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=True,
        )
        if result.returncode != 0:
            result = subprocess.run(
                ["sudo", "service", "docker", "start"],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True,
            )
        # Give Docker a moment to start.
        time.sleep(3)
        if _check_docker_running():
            print("Docker is now running.")
            return True
        else:
            print("Docker did not start successfully.")
            return False
    except Exception as e:
        print(f"Exception when trying to start Docker: {e}")
        return False
```

### src\flock\platform\jaeger_install.py

- **Lines**: 86
- **Last modified**: 2025-02-18 03:20:40

```py
import socket
import subprocess
from urllib.parse import urlparse


class JaegerInstaller:
    jaeger_endpoint: str = None
    jaeger_transport: str = "grpc"

    def _check_jaeger_running(self):
        """Check if Jaeger is reachable by attempting a socket connection.
        For HTTP transport, we parse the URL; for gRPC, we expect "host:port".
        """
        try:
            if self.jaeger_transport == "grpc":
                host, port = self.jaeger_endpoint.split(":")
                port = int(port)
            elif self.jaeger_transport == "http":
                parsed = urlparse(self.jaeger_endpoint)
                host = parsed.hostname
                port = parsed.port if parsed.port else 80
            else:
                return False

            # Try connecting to the host and port.
            with socket.create_connection((host, port), timeout=3):
                return True
        except Exception:
            return False

    def _is_jaeger_container_running(self):
        """Check if a Jaeger container (using the official all-in-one image) is running.
        This uses 'docker ps' to filter for containers running the Jaeger image.
        """
        try:
            result = subprocess.run(
                [
                    "docker",
                    "ps",
                    "--filter",
                    "ancestor=jaegertracing/all-in-one:latest",
                    "--format",
                    "{{.ID}}",
                ],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True,
            )
            return bool(result.stdout.strip())
        except Exception:
            return False

    def _provision_jaeger_container(self):
        """Provision a Jaeger container using Docker."""
        try:
            print("Provisioning Jaeger container using Docker...")
            result = subprocess.run(
                [
                    "docker",
                    "run",
                    "-d",
                    "--name",
                    "jaeger",
                    "-p",
                    "16686:16686",
                    "-p",
                    "14250:14250",
                    "-p",
                    "14268:14268",
                    "jaegertracing/all-in-one:latest",
                ],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                text=True,
            )
            if result.returncode == 0:
                print("Jaeger container started successfully.")
                return True
            else:
                print(
                    f"Failed to start Jaeger container. Error: {result.stderr}"
                )
                return False
        except Exception as e:
            print(f"Exception when provisioning Jaeger container: {e}")
            return False
```

### src\flock\routers\__init__.py

- **Lines**: 1
- **Last modified**: 2025-02-26 06:10:08

```py
"""Routers for the Flock framework."""
```

### src\flock\routers\agent\__init__.py

- **Lines**: 1
- **Last modified**: 2025-02-26 06:10:08

```py
"""Agent-based router implementation for the Flock framework."""
```

### src\flock\routers\agent\agent_router.py

- **Lines**: 234
- **Last modified**: 2025-02-26 06:10:08

```py
"""Agent-based router implementation for the Flock framework."""

from typing import Any

from flock.core.context.context import FlockContext
from flock.core.flock_agent import FlockAgent
from flock.core.flock_router import (
    FlockRouter,
    FlockRouterConfig,
    HandOffRequest,
)
from flock.core.logging.formatters.themes import OutputTheme
from flock.core.logging.logging import get_logger
from flock.evaluators.declarative.declarative_evaluator import (
    DeclarativeEvaluator,
    DeclarativeEvaluatorConfig,
)
from flock.modules.output.output_module import OutputModule, OutputModuleConfig
from flock.routers.agent.handoff_agent import (
    AgentInfo,
    HandoffAgent,
)

logger = get_logger("agent_router")


class AgentRouterConfig(FlockRouterConfig):
    """Configuration for the agent router.

    This class extends FlockRouterConfig with parameters specific to the agent router.
    """

    with_output: bool = False
    confidence_threshold: float = 0.5  # No additional parameters needed for now


class AgentRouter(FlockRouter):
    """Router that uses a FlockAgent to determine the next agent in a workflow.

    This class is responsible for:
    1. Creating and managing a HandoffAgent
    2. Analyzing available agents in the registry
    3. Using the HandoffAgent to determine the best next agent
    4. Creating a HandOff object with the selected agent
    """

    def __init__(
        self,
        name: str = "agent_router",
        config: AgentRouterConfig | None = None,
    ):
        """Initialize the AgentRouter.

        Args:
            registry: The agent registry containing all available agents
            name: The name of the router
            config: The router configuration
        """
        super().__init__(
            name=name, config=config or AgentRouterConfig(name=name)
        )

    async def route(
        self,
        current_agent: FlockAgent,
        result: dict[str, Any],
        context: FlockContext,
    ) -> HandOffRequest:
        """Determine the next agent to hand off to based on the current agent's output.

        Args:
            current_agent: The agent that just completed execution
            result: The output from the current agent
            context: The global execution context

        Returns:
            A HandOff object containing the next agent and input data
        """
        # Get all available agents from context.agent_definitions
        agent_definitions = context.agent_definitions
        handoff_agent = HandoffAgent(model=current_agent.model)
        handoff_agent.evaluator = DeclarativeEvaluator(
            name="evaluator",
            config=DeclarativeEvaluatorConfig(
                model=current_agent.model,
                use_cache=True,
                max_tokens=1000,
                temperature=0.0,
            ),
        )
        if self.config.with_output:
            handoff_agent.add_module(
                OutputModule(
                    name="output",
                    config=OutputModuleConfig(
                        theme=OutputTheme.abernathy,
                    ),
                )
            )
        available_agents = self._get_available_agents(
            agent_definitions, current_agent.name
        )

        if not available_agents:
            logger.warning("No available agents for agent-based routing")
            return HandOffRequest(
                next_agent="",
                hand_off_mode="add",
                override_next_agent=None,
                override_context=None,
            )

        # Prepare input for the handoff agent
        handoff_input = {
            "current_agent_name": current_agent.name,
            "current_agent_description": current_agent.description,
            "current_agent_input": current_agent.input,
            "current_agent_output": current_agent.output,
            "current_result": result,
            "available_agents": available_agents,
        }

        try:
            # Run the handoff agent to determine the next agent
            handoff_result = await handoff_agent.run_async(handoff_input)

            # Extract the decision
            next_agent_name = handoff_result.get("agent_name")
            confidence = handoff_result.get("confidence")
            reasoning = handoff_result.get("reasoning")
            logger.info(
                f"Agent router selected agent '{next_agent_name}' with confidence {confidence} and reasoning: {reasoning}"
            )

            if confidence < self.config.confidence_threshold:
                logger.info(
                    f"No suitable next agent found (best score: {confidence})"
                )
                return HandOffRequest(
                    next_agent="",
                    hand_off_mode="add",
                    override_next_agent=None,
                    override_context=None,
                )

            next_agent = agent_definitions.get(next_agent_name)
            if not next_agent:
                logger.error(
                    f"Selected agent '{next_agent_name}' not found in agent definitions"
                )
                return HandOffRequest(
                    next_agent="",
                    hand_off_mode="add",
                    override_next_agent=None,
                    override_context=None,
                )

            logger.info(
                f"Agent router selected agent '{next_agent_name}' with confidence {confidence}"
            )
            return HandOffRequest(
                next_agent=next_agent_name,
                hand_off_mode="add",
                override_next_agent=None,
                override_context=None,
            )

        except Exception as e:
            logger.error(f"Error in agent-based routing: {e}")
            return HandOffRequest(
                next_agent="",
                hand_off_mode="add",
                override_next_agent=None,
                override_context=None,
            )

    def _get_available_agents(
        self, agent_definitions: dict[str, Any], current_agent_name: str
    ) -> list[AgentInfo]:
        """Get all available agents except the current one and the handoff agent.

        Args:
            agent_definitions: Dictionary of available agents
            current_agent_name: Name of the current agent to exclude

        Returns:
            List of available agents as AgentInfo objects
        """
        agents = []
        for agent_name in agent_definitions:
            if agent_name != current_agent_name:
                agent = agent_definitions[agent_name]
                agent_info = AgentInfo(
                    name=agent_name,
                    description=agent.agent_data["description"]
                    if agent.agent_data["description"]
                    else "",
                    input_schema=agent.agent_data["input"],
                    output_schema=agent.agent_data["output"],
                )
                agents.append(agent_info)
        return agents

    def _get_schema_from_agent(
        self, agent: Any, schema_type: str
    ) -> dict[str, Any]:
        """Extract input or output schema from an agent.

        Args:
            agent: The agent to extract schema from
            schema_type: Either "input" or "output"

        Returns:
            Dictionary representation of the schema
        """
        schema = {}
        schema_str = agent.agent_data.get(schema_type, "")

        # Parse the schema string to extract field names, types, and descriptions
        if schema_str:
            fields = schema_str.split(",")
            for field in fields:
                field = field.strip()
                if ":" in field:
                    name, rest = field.split(":", 1)
                    name = name.strip()
                    schema[name] = rest.strip()
                else:
                    schema[field] = "Any"

        return schema

    # The _create_next_input method is no longer needed since we're using hand_off_mode="add"
    # instead of manually preparing inputs for the next agent
```

### src\flock\routers\agent\handoff_agent.py

- **Lines**: 58
- **Last modified**: 2025-02-26 06:10:08

```py
"""Handoff agent for the agent-based router."""

from pydantic import BaseModel

from flock.core.flock_agent import FlockAgent


class AgentInfo(BaseModel):
    """Information about an agent for handoff decisions."""

    name: str
    description: str = ""
    input_schema: str = ""
    output_schema: str = ""


class HandoffDecision(BaseModel):
    """Decision about which agent to hand off to."""

    agent_name: str
    confidence: float
    reasoning: str


class HandoffAgent(FlockAgent):
    """Agent that decides which agent to hand off to next.

    This agent analyzes the current agent's output and available agents
    to determine the best next agent in the workflow.
    """

    def __init__(
        self,
        name: str = "handoff_agent",
        model: str | None = None,
        description: str = "Decides which agent to hand off to next",
    ):
        """Initialize the HandoffAgent.

        Args:
            name: The name of the agent
            model: The model to use (e.g., 'openai/gpt-4o')
            description: A human-readable description of the agent
        """
        super().__init__(
            name=name,
            model=model,
            description=description,
            input=(
                "current_agent_name: str | Name of the current agent, "
                "current_agent_description: str | Description of the current agent, "
                "current_agent_input: str | Input schema of the current agent, "
                "current_agent_output: str | Output schema of the current agent, "
                "current_result: dict | Output from the current agent, "
                "available_agents: list[AgentInfo] | List of available agents"
            ),
            output="agent_name: str | Name of the agent to hand off to, confidence: float | Confidence in the decision, reasoning: str | Reasoning for the decision",
        )
```

### src\flock\routers\default\__init__.py

- **Lines**: 1
- **Last modified**: 2025-02-26 06:10:08

```py
"""Default router implementation for the Flock framework."""
```

### src\flock\routers\default\default_router.py

- **Lines**: 76
- **Last modified**: 2025-03-16 14:43:42

```py
"""Default router implementation for the Flock framework."""

from collections.abc import Callable
from typing import Any

from pydantic import Field

from flock.core.context.context import FlockContext
from flock.core.flock_agent import FlockAgent
from flock.core.flock_router import (
    FlockRouter,
    FlockRouterConfig,
    HandOffRequest,
)
from flock.core.logging.logging import get_logger

logger = get_logger("default_router")


class DefaultRouterConfig(FlockRouterConfig):
    """Configuration for the default router."""

    hand_off: str | HandOffRequest | Callable[..., HandOffRequest] = Field(
        default="", description="Next agent to hand off to"
    )


class DefaultRouter(FlockRouter):
    """Default router implementation.

    This router simply uses the agent's hand_off property to determine the next agent.
    It does not perform any dynamic routing.
    """

    name: str = "default_router"
    config: DefaultRouterConfig = Field(
        default_factory=DefaultRouterConfig, description="Output configuration"
    )

    def __init__(
        self,
        name: str = "default_router",
        config: DefaultRouterConfig | None = None,
    ):
        """Initialize the DefaultRouter.

        Args:
            name: The name of the router
            config: The router configuration
        """
        super().__init__(
            name=name, config=config or DefaultRouterConfig(name=name)
        )

    async def route(
        self,
        current_agent: FlockAgent,
        result: dict[str, Any],
        context: FlockContext,
    ) -> HandOffRequest:
        """Determine the next agent to hand off to based on the current agent's output.

        Args:
            current_agent: The agent that just completed execution
            result: The output from the current agent
            context: The global execution context

        Returns:
            A HandOff object containing the next agent and input data
        """
        handoff = self.config.hand_off
        if callable(handoff):
            handoff = handoff(context, result)
        if isinstance(handoff, str):
            handoff = HandOffRequest(next_agent=handoff, hand_off_mode="match")
        return handoff
```

### src\flock\routers\llm\__init__.py

- **Lines**: 1
- **Last modified**: 2025-02-26 06:10:08

```py
"""LLM-based router implementation for the Flock framework."""
```

### src\flock\routers\llm\llm_router.py

- **Lines**: 363
- **Last modified**: 2025-02-26 06:10:08

```py
"""LLM-based router implementation for the Flock framework."""

import json
from typing import Any

import litellm

from flock.core.context.context import FlockContext
from flock.core.flock_agent import FlockAgent
from flock.core.flock_router import (
    FlockRouter,
    FlockRouterConfig,
    HandOffRequest,
)
from flock.core.logging.logging import get_logger

logger = get_logger("llm_router")


class LLMRouterConfig(FlockRouterConfig):
    """Configuration for the LLM router.

    This class extends FlockRouterConfig with parameters specific to the LLM router.
    """

    temperature: float = 0.2
    max_tokens: int = 500
    confidence_threshold: float = 0.5
    prompt: str = ""


class LLMRouter(FlockRouter):
    """Router that uses an LLM to determine the next agent in a workflow.

    This class is responsible for:
    1. Analyzing available agents in the registry
    2. Using an LLM to score each agent's suitability as the next step
    3. Selecting the highest-scoring agent
    4. Creating a HandOff object with the selected agent
    """

    def __init__(
        self,
        name: str = "llm_router",
        config: LLMRouterConfig | None = None,
    ):
        """Initialize the LLMRouter.

        Args:
            registry: The agent registry containing all available agents
            name: The name of the router
            config: The router configuration
        """
        logger.info(f"Initializing LLM Router '{name}'")
        super().__init__(name=name, config=config or LLMRouterConfig(name=name))
        logger.debug(
            "LLM Router configuration",
            temperature=self.config.temperature,
            max_tokens=self.config.max_tokens,
        )

    async def route(
        self,
        current_agent: FlockAgent,
        result: dict[str, Any],
        context: FlockContext,
    ) -> HandOffRequest:
        """Determine the next agent to hand off to based on the current agent's output.

        Args:
            current_agent: The agent that just completed execution
            result: The output from the current agent
            context: The global execution context

        Returns:
            A HandOff object containing the next agent and input data
        """
        logger.info(
            f"Routing from agent '{current_agent.name}'",
            current_agent=current_agent.name,
        )
        logger.debug("Current agent result", result=result)

        agent_definitions = context.agent_definitions
        # Get all available agents from the registry
        available_agents = self._get_available_agents(
            agent_definitions, current_agent.name
        )
        logger.debug(
            "Available agents for routing",
            count=len(available_agents),
            agents=[a.agent_data["name"] for a in available_agents],
        )

        if not available_agents:
            logger.warning(
                "No available agents for routing",
                current_agent=current_agent.name,
            )
            return HandOffRequest(
                next_agent="", override_next_agent={}, override_context=None
            )

        # Use LLM to determine the best next agent
        next_agent_name, score = await self._select_next_agent(
            current_agent, result, available_agents
        )
        logger.info(
            "Agent selection result",
            next_agent=next_agent_name,
            score=score,
        )

        if not next_agent_name or score < self.config.confidence_threshold:
            logger.warning(
                "No suitable next agent found",
                best_score=score,
            )
            return HandOffRequest(
                next_agent="", override_next_agent={}, override_context=None
            )

        # Get the next agent from the registry
        next_agent = agent_definitions.get(next_agent_name)
        if not next_agent:
            logger.error(
                "Selected agent not found in registry",
                agent_name=next_agent_name,
            )
            return HandOffRequest(
                next_agent="", override_next_agent={}, override_context=None
            )

        # Create input for the next agent

        logger.success(
            f"Successfully routed to agent '{next_agent_name}'",
            score=score,
            from_agent=current_agent.name,
        )
        return HandOffRequest(
            next_agent=next_agent_name,
            hand_off_mode="add",
            override_next_agent=None,
            override_context=None,
        )

    def _get_available_agents(
        self, agent_definitions: dict[str, Any], current_agent_name: str
    ) -> list[FlockAgent]:
        """Get all available agents except the current one.

        Args:
            current_agent_name: Name of the current agent to exclude

        Returns:
            List of available agents
        """
        logger.debug(
            "Getting available agents",
            total_agents=len(agent_definitions),
            current_agent=current_agent_name,
        )
        agents = []
        for agent in agent_definitions:
            if agent != current_agent_name:
                agents.append(agent_definitions.get(agent))
        return agents

    async def _select_next_agent(
        self,
        current_agent: FlockAgent,
        result: dict[str, Any],
        available_agents: list[FlockAgent],
    ) -> tuple[str, float]:
        """Use an LLM to select the best next agent.

        Args:
            current_agent: The agent that just completed execution
            result: The output from the current agent
            available_agents: List of available agents to choose from

        Returns:
            Tuple of (selected_agent_name, confidence_score)
        """
        logger.debug(
            "Selecting next agent",
            current_agent=current_agent.name,
            available_count=len(available_agents),
        )

        # Prepare the prompt for the LLM
        prompt = self._create_selection_prompt(
            current_agent, result, available_agents
        )
        logger.debug("Generated selection prompt", prompt_length=len(prompt))

        try:
            logger.info(
                "Calling LLM for agent selection",
                model=current_agent.model,
                temperature=self.config.temperature,
            )
            # Call the LLM to get the next agent
            response = await litellm.acompletion(
                model=current_agent.model,
                messages=[{"role": "user", "content": prompt}],
                temperature=self.config.temperature
                if isinstance(self.config, LLMRouterConfig)
                else 0.2,
                max_tokens=self.config.max_tokens
                if isinstance(self.config, LLMRouterConfig)
                else 500,
            )

            content = response.choices[0].message.content
            # Parse the response to get the agent name and score
            try:
                # extract the json object from the response
                content = content.split("```json")[1].split("```")[0]
                data = json.loads(content)
                next_agent = data.get("next_agent", "")
                score = float(data.get("score", 0))
                reasoning = data.get("reasoning", "")
                logger.info(
                    "Successfully parsed LLM response",
                    next_agent=next_agent,
                    score=score,
                    reasoning=reasoning,
                )
                return next_agent, score
            except (json.JSONDecodeError, ValueError) as e:
                logger.error(
                    "Failed to parse LLM response",
                    error=str(e),
                    raw_response=content,
                )
                logger.debug("Attempting fallback parsing")

                # Fallback: try to extract the agent name from the text
                for agent in available_agents:
                    if agent.agent_data["name"] in content:
                        logger.info(
                            "Found agent name in response using fallback",
                            agent=agent.agent_data["name"],
                        )
                        return agent.agent_data[
                            "name"
                        ], 0.6  # Default score for fallback

                return "", 0.0

        except Exception as e:
            logger.error(
                "Error calling LLM for agent selection",
                error=str(e),
                current_agent=current_agent.name,
            )
            return "", 0.0

    def _create_selection_prompt(
        self,
        current_agent: FlockAgent,
        result: dict[str, Any],
        available_agents: list[FlockAgent],
    ) -> str:
        """Create a prompt for the LLM to select the next agent.

        Args:
            current_agent: The agent that just completed execution
            result: The output from the current agent
            available_agents: List of available agents to choose from

        Returns:
            Prompt string for the LLM
        """
        # Format the current agent's output
        result_str = json.dumps(result, indent=2)

        # Format the available agents' information
        agents_info = []
        for agent in available_agents:
            agent_info = {
                "name": agent.agent_data["name"],
                "description": agent.agent_data["description"]
                if agent.agent_data["description"]
                else "",
                "input": agent.agent_data["input"],
                "output": agent.agent_data["output"],
            }
            agents_info.append(agent_info)

        agents_str = json.dumps(agents_info, indent=2)

        # Create the prompt
        if self.config.prompt:
            prompt = self.config.prompt
        else:
            prompt = f"""
You are a workflow router that determines the next agent to execute in a multi-agent system.

CURRENT AGENT:
Name: {current_agent.name}
Description: {current_agent.description}
Input: {current_agent.input}
Output: {current_agent.output}

CURRENT AGENT'S OUTPUT:
{result_str}

AVAILABLE AGENTS:
{agents_str}

Based on the current agent's output and the available agents, determine which agent should be executed next.
Consider the following:
1. Which agent's input requirements best match the current agent's output?
2. Which agent's purpose and description make it the most logical next step?
3. Which agent would provide the most value in continuing the workflow?

Respond with a JSON object containing:
1. "next_agent": The name of the selected agent
2. "score": A confidence score between 0 and 1 indicating how suitable this agent is
3. "reasoning": A brief explanation of why this agent was selected

If no agent is suitable, set "next_agent" to an empty string and "score" to 0.

JSON Response:
"""
        return prompt

    def _create_next_input(
        self,
        current_agent: FlockAgent,
        result: dict[str, Any],
        next_agent: FlockAgent,
    ) -> dict[str, Any]:
        """Create the input for the next agent, including the previous agent's output.

        Args:
            current_agent: The agent that just completed execution
            result: The output from the current agent
            next_agent: The next agent to execute

        Returns:
            Input dictionary for the next agent
        """
        # Start with an empty input
        next_input = {}

        # Add a special field for the previous agent's output
        next_input["previous_agent_output"] = {
            "agent_name": current_agent.name,
            "result": result,
        }

        # Try to map the current agent's output to the next agent's input
        # This is a simple implementation that could be enhanced with more sophisticated mapping
        for key in result:
            # If the next agent expects this key, add it directly
            if key in next_agent.input:
                next_input[key] = result[key]

        return next_input
```

### src\flock\workflow\__init__.py

- **Lines**: 0
- **Last modified**: 2025-02-18 03:20:41

```py

```

### src\flock\workflow\activities.py

- **Lines**: 201
- **Last modified**: 2025-04-02 17:29:19

```py
"""Defines Temporal activities for running a chain of agents with logging and tracing."""

from datetime import datetime

from opentelemetry import trace
from temporalio import activity

from flock.core.context.context import FlockContext
from flock.core.context.context_vars import FLOCK_CURRENT_AGENT, FLOCK_MODEL
from flock.core.flock_agent import FlockAgent
from flock.core.flock_registry import get_registry
from flock.core.flock_router import HandOffRequest
from flock.core.logging.logging import get_logger
from flock.core.util.input_resolver import resolve_inputs

logger = get_logger("activities")
tracer = trace.get_tracer(__name__)


@activity.defn
async def run_agent(context: FlockContext) -> dict:
    """Runs a chain of agents using the provided context.

    The context contains state, history, and agent definitions.
    After each agent run, its output is merged into the context.
    """
    # Start a top-level span for the entire run_agent activity.
    with tracer.start_as_current_span("run_agent") as span:
        registry = get_registry()
        previous_agent_name = ""
        if isinstance(context, dict):
            context = FlockContext.from_dict(context)
        current_agent_name = context.get_variable(FLOCK_CURRENT_AGENT)
        span.set_attribute("initial.agent", current_agent_name)
        logger.info("Starting agent chain", initial_agent=current_agent_name)

        agent = registry.get_agent(current_agent_name)
        if agent.model is None or agent.evaluator.config.model is None:
            agent.set_model(context.get_variable(FLOCK_MODEL))
        agent.resolve_callables(context=context)
        if not agent:
            logger.error("Agent not found", agent=current_agent_name)
            span.record_exception(
                Exception(f"Agent '{current_agent_name}' not found")
            )
            return {"error": f"Agent '{current_agent_name}' not found."}

        # Loop over agents in the chain.
        while agent:
            # Create a nested span for this iteration.
            with tracer.start_as_current_span("agent_iteration") as iter_span:
                iter_span.set_attribute("agent.name", agent.name)
                agent.context = context
                # Resolve inputs for the agent.
                agent_inputs = resolve_inputs(
                    agent.input, context, previous_agent_name
                )
                iter_span.add_event(
                    "resolved inputs", attributes={"inputs": str(agent_inputs)}
                )

                # Execute the agent with its own span.
                with tracer.start_as_current_span("execute_agent") as exec_span:
                    logger.info("Executing agent", agent=agent.name)
                    try:
                        result = await agent.run_async(agent_inputs)
                        exec_span.set_attribute("result", str(result))
                        logger.debug(
                            "Agent execution completed", agent=agent.name
                        )
                    except Exception as e:
                        logger.error(
                            "Agent execution failed",
                            agent=agent.name,
                            error=str(e),
                        )
                        exec_span.record_exception(e)
                        raise

                # Determine the next agent using the handoff router if available
                handoff_data = HandOffRequest()

                if agent.handoff_router:
                    logger.info(
                        f"Using handoff router: {agent.handoff_router.__class__.__name__}",
                        agent=agent.name,
                    )
                    try:
                        # Route to the next agent
                        handoff_data = await agent.handoff_router.route(
                            agent, result, context
                        )

                        if callable(handoff_data):
                            logger.debug(
                                "Executing handoff function", agent=agent.name
                            )
                            try:
                                handoff_data = handoff_data(context, result)
                                if isinstance(
                                    handoff_data.next_agent, FlockAgent
                                ):
                                    handoff_data.next_agent = (
                                        handoff_data.next_agent.name
                                    )
                            except Exception as e:
                                logger.error(
                                    "Handoff function error {} {}",
                                    agent=agent.name,
                                    error=str(e),
                                )
                                iter_span.record_exception(e)
                                return {"error": f"Handoff function error: {e}"}
                        elif isinstance(handoff_data.next_agent, FlockAgent):
                            handoff_data.next_agent = (
                                handoff_data.next_agent.name
                            )

                        if not handoff_data.next_agent:
                            logger.info(
                                "Router found no suitable next agent",
                                agent=agent.name,
                            )
                            context.record(
                                agent.name,
                                result,
                                timestamp=datetime.now().isoformat(),
                                hand_off=None,
                                called_from=previous_agent_name,
                            )
                            logger.info("Completing chain", agent=agent.name)
                            iter_span.add_event("chain completed")
                            return result
                    except Exception as e:
                        logger.error(
                            "Router error {} {}",
                            agent.name,
                            str(e),
                        )
                        iter_span.record_exception(e)
                        return {"error": f"Router error: {e}"}
                else:
                    # No router, so no handoff
                    logger.info(
                        "No handoff router defined, completing chain",
                        agent=agent.name,
                    )
                    context.record(
                        agent.name,
                        result,
                        timestamp=datetime.now().isoformat(),
                        hand_off=None,
                        called_from=previous_agent_name,
                    )
                    iter_span.add_event("chain completed")
                    return result

                # Record the agent run in the context.
                context.record(
                    agent.name,
                    result,
                    timestamp=datetime.now().isoformat(),
                    hand_off=handoff_data.model_dump(),
                    called_from=previous_agent_name,
                )
                previous_agent_name = agent.name
                previous_agent_output = agent.output
                if handoff_data.override_context:
                    context.update(handoff_data.override_context)

                # Prepare the next agent.
                try:
                    agent = registry.get_agent(handoff_data.next_agent)
                    if handoff_data.hand_off_mode == "add":
                        agent.input = previous_agent_output + ", " + agent.input
                    agent.resolve_callables(context=context)
                    if not agent:
                        logger.error(
                            "Next agent not found",
                            agent=handoff_data.next_agent,
                        )
                        iter_span.record_exception(
                            Exception(
                                f"Next agent '{handoff_data.next_agent}' not found"
                            )
                        )
                        return {
                            "error": f"Next agent '{handoff_data.next_agent}' not found."
                        }

                    context.set_variable(FLOCK_CURRENT_AGENT, agent.name)

                    logger.info("Handing off to next agent", next=agent.name)
                    iter_span.set_attribute("next.agent", agent.name)
                except Exception as e:
                    logger.error("Error during handoff", error=str(e))
                    iter_span.record_exception(e)
                    return {"error": f"Error during handoff: {e}"}

        # If the loop exits unexpectedly, return the initial input.
        return context.get_variable("init_input")
```

### src\flock\workflow\agent_activities.py

- **Lines**: 24
- **Last modified**: 2025-02-18 03:20:41

```py
from temporalio import activity

from flock.core.context.context import FlockContext
from flock.core.flock_agent import FlockAgent


@activity.defn
async def run_declarative_agent_activity(params: dict) -> dict:
    """Temporal activity to run a declarative (or batch) agent.

    Expects a dictionary with:
      - "agent_data": a dict representation of the agent (as produced by .dict()),
      - "context_data": a dict containing the FlockContext state and optionally other fields.

    The activity reconstructs the agent and a FlockContext, then calls the agent’s _evaluate() method.
    """
    agent_data = params.get("agent_data")
    context_data = params.get("context_data", {})
    # Reconstruct the agent from its serialized representation.
    agent = FlockAgent.from_dict(agent_data)
    # Reconstruct the FlockContext from the state.
    context = FlockContext.from_dict(context_data)
    result = await agent.evaluate(context)
    return result
```

### src\flock\workflow\temporal_setup.py

- **Lines**: 38
- **Last modified**: 2025-02-18 03:20:41

```py
import asyncio
import uuid

from temporalio.client import Client
from temporalio.worker import Worker


async def create_temporal_client() -> Client:
    client = await Client.connect("localhost:7233")
    return client


async def setup_worker(workflow, activity) -> Client:
    worker_client = await create_temporal_client()
    worker = Worker(worker_client, task_queue="flock-queue", workflows=[workflow], activities=[activity])
    asyncio.create_task(worker.run())
    await asyncio.sleep(1)


async def run_worker(client: Client, task_queue: str, workflows, activities):
    worker = Worker(client, task_queue=task_queue, workflows=workflows, activities=activities)
    await worker.run()


async def run_activity(client: Client, name: str, func, param):
    run_id = f"{name}_{uuid.uuid4().hex[:4]}"

    try:
        result = await client.execute_activity(
            func,
            param,
            id=run_id,
            task_queue="flock-queue",
            start_to_close_timeout=300,  # e.g., 5 minutes
        )
        return result
    except Exception:
        raise
```

### src\flock\workflow\workflow.py

- **Lines**: 58
- **Last modified**: 2025-02-18 03:20:41

```py
from datetime import timedelta

from temporalio import workflow

from flock.core.context.context import FlockContext
from flock.core.logging.logging import get_logger
from flock.workflow.activities import run_agent

# Import activity, passing it through the sandbox without reloading the module


logger = get_logger("workflow")


@workflow.defn
class FlockWorkflow:
    def __init__(self) -> None:
        self.context = None

    @workflow.run
    async def run(self, context_dict: dict) -> dict:
        self.context = FlockContext.from_dict(context_dict)
        self.context.workflow_id = workflow.info().workflow_id
        self.context.workflow_timestamp = workflow.info().start_time.strftime("%Y-%m-%d %H:%M:%S")

        try:
            logger.info(
                "Starting workflow execution",
                timestamp=self.context.workflow_timestamp,
            )

            result = await workflow.execute_activity(
                run_agent,
                self.context,
                start_to_close_timeout=timedelta(minutes=5),
            )

            self.context.set_variable(
                "flock.result",
                {
                    "result": result,
                    "success": True,
                },
            )

            logger.success("Workflow completed successfully")
            return result

        except Exception as e:
            logger.exception("Workflow execution failed", error=str(e))
            self.context.set_variable(
                "flock.result",
                {
                    "result": f"Failed: {e}",
                    "success": False,
                },
            )
            return self.context
```

### tests\__init__.py

- **Lines**: 1
- **Last modified**: 2025-04-02 17:29:19

```py
"""Test package for Flock.""" 
```

### tests\serialization\__init__.py

- **Lines**: 1
- **Last modified**: 2025-04-02 17:29:19

```py
"""Test package for Flock.""" 
```

### tests\serialization\test_yaml_serialization.py

- **Lines**: 328
- **Last modified**: 2025-04-02 17:29:19

```py
# tests/serialization/test_yaml_serialization.py

import os
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Callable
from dataclasses import dataclass

import pytest
import yaml

# --- Core Flock Imports ---
# Assume these are correctly implemented and importable
from flock.core.flock import Flock
from flock.core.flock_agent import FlockAgent
from flock.core.flock_evaluator import FlockEvaluator, FlockEvaluatorConfig
from flock.core.flock_module import FlockModule, FlockModuleConfig
from flock.core.flock_router import FlockRouter, FlockRouterConfig, HandOffRequest
from flock.core.context.context import FlockContext
from flock.core.serialization.serializable import Serializable

# --- Registry and Decorators ---
from flock.core.flock_registry import (
    flock_component,
    flock_tool,
    flock_type,
    get_registry,
)

# Get registry instance
FlockRegistry = get_registry()

# --- Mock Components for Testing ---

class MockEvalConfig(FlockEvaluatorConfig):
    mock_eval_param: str = "eval_default"

@flock_component # Register this component class
class MockEvaluator(FlockEvaluator, Serializable): # Inherit Serializable
    name: str = "mock_evaluator"
    config: MockEvalConfig = MockEvalConfig()

    # Needed for serialization if not just using Pydantic dump
    def to_dict(self) -> Dict[str, Any]:
        return {"name": self.name, "config": self.config.model_dump(), "type": self.__class__.__name__}

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> "MockEvaluator":
        config = MockEvalConfig(**data.get("config", {}))
        return cls(name=data.get("name", "mock_evaluator"), config=config)

    async def evaluate(self, agent: Any, inputs: Dict[str, Any], tools: List[Any]) -> Dict[str, Any]:
        return {"mock_result": f"evaluated {inputs.get('test_input', '')} with {self.config.mock_eval_param}"}

class MockModuleConfig(FlockModuleConfig):
    mock_module_param: bool = True

@flock_component # Register this component class
class MockModule(FlockModule, Serializable): # Inherit Serializable
    config: MockModuleConfig = MockModuleConfig()

    def to_dict(self) -> Dict[str, Any]:
        return {"name": self.name, "config": self.config.model_dump(), "type": self.__class__.__name__}

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> "MockModule":
        config = MockModuleConfig(**data.get("config", {}))
        return cls(name=data.get("name", "mock_module"), config=config)

    # Mock lifecycle methods if needed for testing interactions
    async def post_evaluate(self, agent: FlockAgent, inputs: dict[str, Any], result: dict[str, Any], context: FlockContext | None = None) -> dict[str, Any]:
        result["mock_module_added"] = self.config.mock_module_param
        return result

class MockRouterConfig(FlockRouterConfig):
    next_agent_name: str = "default_next"

@flock_component # Register this component class
class MockRouter(FlockRouter, Serializable): # Inherit Serializable
    config: MockRouterConfig = MockRouterConfig()

    def to_dict(self) -> Dict[str, Any]:
        return {"name": self.name, "config": self.config.model_dump(), "type": self.__class__.__name__}

    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> "MockRouter":
        config = MockRouterConfig(**data.get("config", {}))
        return cls(name=data.get("name", "mock_router"), config=config)

    async def route(self, current_agent: Any, result: Dict[str, Any], context: FlockContext) -> HandOffRequest:
        return HandOffRequest(next_agent=self.config.next_agent_name)

# --- Sample Tool Function ---
@flock_tool # Register this tool function
def sample_tool(text: str, capitalize: bool = True) -> str:
    """A sample registered tool."""
    return text.upper() if capitalize else text.lower()

# --- Sample Custom Type ---
@flock_type # Register this custom type
@dataclass
class MyCustomData:
    id: int
    value: str
    tags: List[str] | None = None

# --- Test Class ---

class TestFlockYAMLSerialization:

    def test_agent_serialization_basic(self, tmp_path):
        """Test serializing and deserializing a basic FlockAgent."""
        agent = FlockAgent(
            name="basic_agent",
            model="test_model",
            description="A basic agent",
            input="query: str",
            output="answer: str"
        )
        file_path = tmp_path / "basic_agent.yaml"

        # Act
        agent.to_yaml_file(file_path)
        loaded_agent = FlockAgent.from_yaml_file(file_path)

        # Assert
        assert isinstance(loaded_agent, FlockAgent)
        assert loaded_agent.name == agent.name
        assert loaded_agent.model == agent.model
        assert loaded_agent.description == agent.description
        assert loaded_agent.input == agent.input
        assert loaded_agent.output == agent.output
        assert loaded_agent.use_cache == agent.use_cache # Check defaults
        assert loaded_agent.evaluator is None # Default should be None before factory
        assert loaded_agent.modules == {}
        assert loaded_agent.handoff_router is None
        assert loaded_agent.tools == []

    def test_agent_serialization_with_components(self, tmp_path):
        """Test agent serialization with evaluator, module, and router."""
        evaluator = MockEvaluator(name="mock_evaluator", config=MockEvalConfig(mock_eval_param="test_eval"))
        router = MockRouter(name="mock_router", config=MockRouterConfig(next_agent_name="agent_two"))
        module = MockModule(name="extra_module", config=MockModuleConfig(mock_module_param=False))

        agent = FlockAgent(
            name="component_agent",
            model="test_model_comp",
            evaluator=evaluator,
            handoff_router=router,
            modules={"extra_module": module}
        )
        file_path = tmp_path / "component_agent.yaml"

        # Act
        agent.to_yaml_file(file_path)
        loaded_agent = FlockAgent.from_yaml_file(file_path)

        # Assert
        assert loaded_agent.name == "component_agent"
        assert isinstance(loaded_agent.evaluator, MockEvaluator)
        assert loaded_agent.evaluator.name == "mock_evaluator" # Default name from mock
        assert loaded_agent.evaluator.config.mock_eval_param == "test_eval"

        assert isinstance(loaded_agent.handoff_router, MockRouter)
        assert loaded_agent.handoff_router.name == "mock_router" # Default name from mock
        assert loaded_agent.handoff_router.config.next_agent_name == "agent_two"

        assert "extra_module" in loaded_agent.modules
        assert isinstance(loaded_agent.modules["extra_module"], MockModule)
        assert loaded_agent.modules["extra_module"].config.mock_module_param is False

    def test_agent_serialization_with_tools(self, tmp_path):
        """Test agent serialization with callable tools."""
        agent = FlockAgent(
            name="tool_agent",
            model="tool_model",
            tools=[sample_tool, print] # Include a built-in for testing path gen
        )
        file_path = tmp_path / "tool_agent.yaml"

        # Act
        agent.to_yaml_file(file_path)
        # Optional: Inspect YAML for callable refs
        yaml_content = file_path.read_text()
        assert "__callable_ref__: tests.serialization.test_yaml_serialization.sample_tool" in yaml_content
        assert "__callable_ref__: print" in yaml_content # Check built-in

        loaded_agent = FlockAgent.from_yaml_file(file_path)

        # Assert
        assert loaded_agent.name == "tool_agent"
        assert loaded_agent.tools is not None
        assert len(loaded_agent.tools) == 2
        assert loaded_agent.tools[0] is sample_tool # Check identity after registry lookup
        assert loaded_agent.tools[1] is print       # Check identity for built-in
        # Test calling the loaded tool
        assert loaded_agent.tools[0]("hello") == "HELLO"

    def test_agent_serialization_with_custom_type(self, tmp_path):
        """Test agent serialization where signature uses a registered custom type."""
        agent = FlockAgent(
            name="custom_type_agent",
            model="custom_model",
            input="data: MyCustomData", # Use the registered custom type
            output="result_tags: list[str]"
        )
        file_path = tmp_path / "custom_type_agent.yaml"

        # Act
        agent.to_yaml_file(file_path)
        loaded_agent = FlockAgent.from_yaml_file(file_path)

        # Assert - Primarily check that loading worked and fields are correct
        assert loaded_agent.name == "custom_type_agent"
        assert loaded_agent.input == "data: MyCustomData"
        assert loaded_agent.output == "result_tags: list[str]"
        # We don't directly test type resolution here, but successful loading implies
        # the type string was stored correctly. Resolution is tested implicitly by DSPy mixin tests.

    def test_flock_serialization_basic(self, tmp_path):
        """Test serializing and deserializing a basic Flock instance."""
        flock = Flock(
            model="global_model",
            description="Test Flock Instance"
        )
        file_path = tmp_path / "basic_flock.yaml"

        # Act
        flock.to_yaml_file(file_path)
        loaded_flock = Flock.from_yaml_file(file_path)

        # Assert
        assert isinstance(loaded_flock, Flock)
        assert loaded_flock.model == flock.model
        assert loaded_flock.description == flock.description
        assert loaded_flock.agents == {} # No agents added

    def test_flock_serialization_with_agents(self, tmp_path):
        """Test Flock serialization with multiple agents."""
        flock = Flock(model="flock_model")
        agent1 = FlockAgent(name="agent_one", input="in1", output="out1")
        agent2 = FlockAgent(
            name="agent_two",
            input="out1", output="out2",
            evaluator=MockEvaluator(config=MockEvalConfig(mock_eval_param="agent2_eval"))
        )
        flock.add_agent(agent1)
        flock.add_agent(agent2)

        file_path = tmp_path / "flock_with_agents.yaml"

        # Act
        flock.to_yaml_file(file_path)
        loaded_flock = Flock.from_yaml_file(file_path)

        # Assert
        assert loaded_flock.model == "flock_model"
        assert len(loaded_flock.agents) == 2
        assert "agent_one" in loaded_flock.agents
        assert "agent_two" in loaded_flock.agents

        loaded_a1 = loaded_flock.agents["agent_one"]
        loaded_a2 = loaded_flock.agents["agent_two"]

        assert isinstance(loaded_a1, FlockAgent)
        assert loaded_a1.name == "agent_one"
        assert loaded_a1.input == "in1"

        assert isinstance(loaded_a2, FlockAgent)
        assert loaded_a2.name == "agent_two"
        assert loaded_a2.input == "out1"
        assert isinstance(loaded_a2.evaluator, MockEvaluator)
        assert loaded_a2.evaluator.config.mock_eval_param == "agent2_eval"

    def test_deserialization_missing_type(self, tmp_path):
        """Test deserialization fails gracefully if a component type is missing."""
        bad_yaml = """
name: bad_agent
type: NonExistentComponentType # This type is not registered
config: {}
"""
        file_path = tmp_path / "bad_component.yaml"
        file_path.write_text(bad_yaml)

        # Wrap the agent loading in a structure that references the bad component
        agent_yaml = f"""
name: agent_referencing_bad
evaluator: !include {file_path.name} # Faking include for testing structure
"""
        agent_file_path = tmp_path / "agent_referencing_bad.yaml"
        agent_file_path.write_text(agent_yaml)

        # We need a custom loader potentially if using !include, or simulate the structure
        # For simplicity, load the structure manually simulating include
        bad_component_data = yaml.safe_load(bad_yaml)
        agent_data = yaml.safe_load(agent_yaml.replace(f"!include {file_path.name}", "")) # Remove include tag
        agent_data['evaluator'] = bad_component_data # Manually insert structure

        # Expect KeyError during component deserialization because type is not registered
        with pytest.raises(KeyError, match="Component class 'NonExistentComponentType' not found"):
            FlockAgent.from_dict(agent_data)

    def test_deserialization_missing_callable(self, tmp_path):
        """Test deserialization fails gracefully if a callable reference is missing."""
        agent_yaml = """
name: agent_with_bad_tool
tools:
  - __callable_ref__: non_existent_module.non_existent_function
"""
        file_path = tmp_path / "bad_tool_agent.yaml"
        file_path.write_text(agent_yaml)

        # Expect KeyError during callable deserialization
        with pytest.raises(KeyError, match="Callable 'non_existent_module.non_existent_function' not found"):
            FlockAgent.from_yaml_file(file_path)

    def test_yaml_dump_options(self, tmp_path):
        """Verify that options can be passed to yaml.dump."""
        agent = FlockAgent(name="dump_options_test")
        file_path = tmp_path / "dump_options.yaml"

        # Act - Use sort_keys=True
        agent.to_yaml_file(file_path, sort_keys=True)
        content = file_path.read_text()

        # Assert - Check if keys are roughly sorted (basic check)
        # Note: Exact order depends on implementation details, this is a basic check
        assert content.startswith("description:") or content.startswith("evaluator:") # description might come first alphabetically
```

