Skip to content

Loggers

Logger configuration and management for experiment tracking.

LoggerConfig

Configuration for a single logger backend.

API Reference

autotimm.LoggerConfig dataclass

Configuration for a single logger backend.

All parameters are required - no defaults are provided to ensure explicit configuration.

Parameters:

Name Type Description Default
backend str

Logger backend type. One of "tensorboard", "mlflow", "wandb", or "csv".

required
params dict[str, Any]

Parameters passed to the logger constructor. Required keys depend on the backend.

dict()
Example

config = LoggerConfig( ... backend="tensorboard", ... params={"save_dir": "logs", "name": "experiment_1"}, ... )

Source code in src/autotimm/core/loggers.py
@dataclass
class LoggerConfig:
    """Configuration for a single logger backend.

    All parameters are required - no defaults are provided to ensure
    explicit configuration.

    Parameters:
        backend: Logger backend type. One of ``"tensorboard"``, ``"mlflow"``,
            ``"wandb"``, or ``"csv"``.
        params: Parameters passed to the logger constructor. Required keys
            depend on the backend.

    Example:
        >>> config = LoggerConfig(
        ...     backend="tensorboard",
        ...     params={"save_dir": "logs", "name": "experiment_1"},
        ... )
    """

    backend: str
    params: dict[str, Any] = field(default_factory=dict)

    def __post_init__(self) -> None:
        if not self.backend:
            raise ValueError("backend is required")
        self.backend = self.backend.lower()
        valid_backends = {"tensorboard", "mlflow", "wandb", "csv"}
        if self.backend not in valid_backends:
            raise ValueError(
                f"Unknown backend '{self.backend}'. "
                f"Valid backends: {', '.join(sorted(valid_backends))}"
            )

Usage Examples

TensorBoard

from autotimm import LoggerConfig

tb = LoggerConfig(
    backend="tensorboard",
    params={"save_dir": "logs", "name": "experiment_1"},
)

Weights & Biases

wandb = LoggerConfig(
    backend="wandb",
    params={
        "project": "my-project",
        "name": "run-1",
        "tags": ["resnet", "cifar10"],
    },
)

MLflow

mlflow = LoggerConfig(
    backend="mlflow",
    params={
        "experiment_name": "cifar10-classification",
        "tracking_uri": "http://localhost:5000",
    },
)

CSV Logger

csv = LoggerConfig(
    backend="csv",
    params={"save_dir": "logs/csv", "name": "metrics"},
)

Parameters

Parameter Type Default Description
backend str Required Logger type
params dict {} Backend-specific params

Supported Backends

Backend Required Params Install
tensorboard save_dir pip install autotimm[tensorboard]
csv save_dir Built-in
wandb project pip install autotimm[wandb]
mlflow experiment_name pip install autotimm[mlflow]

LoggerManager

Manages multiple PyTorch Lightning loggers.

API Reference

autotimm.LoggerManager

Manages multiple PyTorch Lightning loggers.

This class creates and manages multiple logger instances from explicit configurations. No default values are provided - all configuration must be specified by the user.

Parameters:

Name Type Description Default
configs list[LoggerConfig]

List of LoggerConfig objects defining each logger.

required

Attributes:

Name Type Description
loggers list[Logger]

List of instantiated PyTorch Lightning logger objects.

Example

manager = LoggerManager( ... configs=[ ... LoggerConfig( ... backend="tensorboard", ... params={"save_dir": "logs/tb", "name": "run_1"}, ... ), ... LoggerConfig( ... backend="wandb", ... params={"project": "my_project", "name": "run_1"}, ... ), ... ] ... ) trainer = pl.Trainer(logger=manager.loggers)

Source code in src/autotimm/core/loggers.py
class LoggerManager:
    """Manages multiple PyTorch Lightning loggers.

    This class creates and manages multiple logger instances from explicit
    configurations. No default values are provided - all configuration
    must be specified by the user.

    Parameters:
        configs: List of ``LoggerConfig`` objects defining each logger.

    Attributes:
        loggers: List of instantiated PyTorch Lightning logger objects.

    Example:
        >>> manager = LoggerManager(
        ...     configs=[
        ...         LoggerConfig(
        ...             backend="tensorboard",
        ...             params={"save_dir": "logs/tb", "name": "run_1"},
        ...         ),
        ...         LoggerConfig(
        ...             backend="wandb",
        ...             params={"project": "my_project", "name": "run_1"},
        ...         ),
        ...     ]
        ... )
        >>> trainer = pl.Trainer(logger=manager.loggers)
    """

    def __init__(self, configs: list[LoggerConfig]) -> None:
        if not configs:
            raise ValueError("At least one LoggerConfig is required")

        self._configs = configs
        self._loggers: list[Logger] = []
        self._initialize_loggers()

    def _initialize_loggers(self) -> None:
        """Initialize all logger instances from configs."""
        for config in self._configs:
            logger = self._create_logger(config)
            self._loggers.append(logger)

    def _create_logger(self, config: LoggerConfig) -> Logger:
        """Create a single logger instance from config."""
        backend = config.backend
        params = config.params

        if backend == "tensorboard":
            from pytorch_lightning.loggers import TensorBoardLogger

            self._validate_required_params(params, ["save_dir"], "tensorboard")
            return TensorBoardLogger(**params)

        if backend == "mlflow":
            try:
                from pytorch_lightning.loggers import MLFlowLogger
            except ImportError:
                raise ImportError(
                    "MLflow logger requires mlflow. Install with: pip install mlflow"
                ) from None
            self._validate_required_params(params, ["experiment_name"], "mlflow")
            return MLFlowLogger(**params)

        if backend == "wandb":
            try:
                from pytorch_lightning.loggers import WandbLogger
            except ImportError:
                raise ImportError(
                    "W&B logger requires wandb. Install with: pip install wandb"
                ) from None
            self._validate_required_params(params, ["project"], "wandb")
            return WandbLogger(**params)

        if backend == "csv":
            from pytorch_lightning.loggers import CSVLogger

            self._validate_required_params(params, ["save_dir"], "csv")
            return CSVLogger(**params)

        raise ValueError(f"Unknown backend: {backend}")

    @staticmethod
    def _validate_required_params(
        params: dict[str, Any],
        required: list[str],
        backend: str,
    ) -> None:
        """Validate that required parameters are present."""
        missing = [key for key in required if key not in params]
        if missing:
            raise ValueError(
                f"Missing required parameters for {backend} logger: {', '.join(missing)}"
            )

    @property
    def loggers(self) -> list[Logger]:
        """Return list of instantiated loggers for use with pl.Trainer."""
        return self._loggers

    @property
    def configs(self) -> list[LoggerConfig]:
        """Return the configurations used to create the loggers."""
        return self._configs

    def __len__(self) -> int:
        """Return the number of loggers."""
        return len(self._loggers)

    def __iter__(self):
        """Iterate over the loggers."""
        return iter(self._loggers)

    def __getitem__(self, index: int) -> Logger:
        """Get a logger by index."""
        return self._loggers[index]

    def get_logger_by_backend(self, backend: str) -> Logger | None:
        """Get the first logger matching the given backend type.

        Parameters:
            backend: Backend name to search for.

        Returns:
            The first matching logger, or None if not found.
        """
        backend = backend.lower()
        for config, logger in zip(self._configs, self._loggers):
            if config.backend == backend:
                return logger
        return None

loggers property

loggers: list[Logger]

Return list of instantiated loggers for use with pl.Trainer.

configs property

configs: list[LoggerConfig]

Return the configurations used to create the loggers.

__init__

__init__(configs: list[LoggerConfig]) -> None
Source code in src/autotimm/core/loggers.py
def __init__(self, configs: list[LoggerConfig]) -> None:
    if not configs:
        raise ValueError("At least one LoggerConfig is required")

    self._configs = configs
    self._loggers: list[Logger] = []
    self._initialize_loggers()

get_logger_by_backend

get_logger_by_backend(backend: str) -> Logger | None

Get the first logger matching the given backend type.

Parameters:

Name Type Description Default
backend str

Backend name to search for.

required

Returns:

Type Description
Logger | None

The first matching logger, or None if not found.

Source code in src/autotimm/core/loggers.py
def get_logger_by_backend(self, backend: str) -> Logger | None:
    """Get the first logger matching the given backend type.

    Parameters:
        backend: Backend name to search for.

    Returns:
        The first matching logger, or None if not found.
    """
    backend = backend.lower()
    for config, logger in zip(self._configs, self._loggers):
        if config.backend == backend:
            return logger
    return None

Usage Examples

Basic Usage

from autotimm import LoggerConfig, LoggerManager

manager = LoggerManager(configs=[
    LoggerConfig(backend="tensorboard", params={"save_dir": "logs/tb"}),
    LoggerConfig(backend="csv", params={"save_dir": "logs/csv"}),
])

With AutoTrainer

from autotimm import AutoTrainer

trainer = AutoTrainer(max_epochs=10, logger=manager)

Access Loggers

# Get all loggers
all_loggers = manager.loggers

# Get by backend
tb_logger = manager.get_logger_by_backend("tensorboard")
csv_logger = manager.get_logger_by_backend("csv")

# Iterate
for logger in manager:
    print(type(logger))

# Length
print(f"Number of loggers: {len(manager)}")

Parameters

Parameter Type Description
configs list[LoggerConfig] List of logger configs

Methods

Method Returns Description
loggers list[Logger] All instantiated loggers
configs list[LoggerConfig] Original configs
get_logger_by_backend(name) Logger \| None Find logger by backend
len(manager) int Number of loggers
iter(manager) Iterator Iterate over loggers
manager[i] Logger Get logger by index

Backend Parameters

TensorBoard

LoggerConfig(
    backend="tensorboard",
    params={
        "save_dir": "logs",           # Required
        "name": "experiment",         # Subdirectory
        "version": "v1",              # Version string
        "log_graph": True,            # Log model graph
        "default_hp_metric": False,   # HP metric logging
        "prefix": "",                 # Metric prefix
        "sub_dir": None,              # Additional subdirectory
    },
)

Weights & Biases

LoggerConfig(
    backend="wandb",
    params={
        "project": "my-project",      # Required
        "name": "run-1",              # Run name
        "id": None,                   # Run ID (for resuming)
        "tags": ["tag1", "tag2"],     # Tags
        "notes": "Experiment notes",  # Description
        "group": "experiment-group",  # Group runs
        "job_type": "training",       # Job type
        "entity": None,               # Team/user
        "save_dir": "wandb_logs",     # Local save directory
        "offline": False,             # Offline mode
        "log_model": False,           # Log model artifacts
        "prefix": "",                 # Metric prefix
    },
)

MLflow

LoggerConfig(
    backend="mlflow",
    params={
        "experiment_name": "exp",     # Required
        "run_name": "run-1",          # Run name
        "tracking_uri": None,         # MLflow server URL
        "tags": {"env": "dev"},       # Tags
        "save_dir": "mlruns",         # Local artifacts
        "log_model": False,           # Log model
        "prefix": "",                 # Metric prefix
        "artifact_location": None,    # Artifact storage
        "run_id": None,               # For resuming
    },
)

CSV Logger

LoggerConfig(
    backend="csv",
    params={
        "save_dir": "logs",           # Required
        "name": "metrics",            # Subdirectory
        "version": None,              # Auto-increment if None
        "prefix": "",                 # Metric prefix
        "flush_logs_every_n_steps": 100,
    },
)

Full Example

from autotimm import (
    AutoTrainer,
    ImageClassifier,
    ImageDataModule,
    LoggerConfig,
    LoggerManager,
    MetricConfig,
)

# Data
data = ImageDataModule(
    data_dir="./data",
    dataset_name="CIFAR10",
    image_size=224,
    batch_size=64,
)

# Metrics
metrics = [
    MetricConfig(
        name="accuracy",
        backend="torchmetrics",
        metric_class="Accuracy",
        params={"task": "multiclass"},
        stages=["train", "val", "test"],
        prog_bar=True,
    ),
]

# Model
model = ImageClassifier(
    backbone="resnet50",
    num_classes=10,
    metrics=metrics,
)

# Multiple loggers
logger_manager = LoggerManager(configs=[
    LoggerConfig(
        backend="tensorboard",
        params={"save_dir": "logs/tb", "name": "cifar10"},
    ),
    LoggerConfig(
        backend="csv",
        params={"save_dir": "logs/csv"},
    ),
    LoggerConfig(
        backend="wandb",
        params={"project": "cifar10-experiments", "name": "resnet50-run"},
    ),
])

# Trainer
trainer = AutoTrainer(
    max_epochs=10,
    logger=logger_manager,
    checkpoint_monitor="val/accuracy",
)

# Train
trainer.fit(model, datamodule=data)

# Access specific logger after training
tb = logger_manager.get_logger_by_backend("tensorboard")
print(f"TensorBoard log dir: {tb.log_dir}")