| doc_id | THR-API-004 |
|---|---|
| doc_title | Thread System API Reference |
| doc_version | 1.0.0 |
| doc_date | 2026-04-04 |
| doc_status | Released |
| project | thread_system |
| category | API |
SSOT: This document is the single source of truth for Thread System API Reference.
Language: English | 한국어
Complete API documentation for the Thread System framework.
- Overview
- Core Module
- Synchronization Module
- Dependency Injection
- Thread Pool Module
- Typed Thread Pool Module
- Interfaces
- External Modules
- Utilities Module
- Quick Reference
The Thread System framework provides a comprehensive set of classes for building multi-threaded applications with adaptive performance optimization.
- Core Classes (
thread_modulenamespace)thread_base- Abstract base class for all workersjob- Unit of work representationjob_queue- Thread-safe job queueresult<T>- Error handling typecallback_job- Job implementation with lambda support
- Standard Thread Pool (
thread_pool_modulenamespace)thread_pool- Thread pool with adaptive queue optimizationthread_worker- Worker thread implementation
- Type-based Priority Pool (
typed_thread_pool_modulenamespace)typed_thread_pool_t<T>- Template-based priority pooltyped_thread_worker_t<T>- Priority-aware workertyped_job_t<T>- Job with type informationjob_types- Default priority enumeration (RealTime, Batch, Background)
- Adaptive Queue System: Automatic switching between mutex and lock-free strategies
- Enhanced Synchronization: Comprehensive synchronization wrappers with timeout support
- Cancellation Support: Cooperative cancellation with linked token hierarchies
- Dependency Injection: Prefer
service_container(thread-safe DI).service_registryis a lightweight header-only alternative. - Error Handling: Modern result pattern for robust error management
- Interface-Based Design: Clean separation of concerns through well-defined interfaces
Abstract base class for all worker threads in the system.
class thread_base {
public:
thread_base(const std::string& thread_title = "thread_base");
virtual ~thread_base();
// Thread control
auto start() -> result_void;
auto stop() -> result_void;
// Configuration
auto set_wake_interval(const std::optional<std::chrono::milliseconds>& interval) -> void;
auto get_wake_interval() const -> std::optional<std::chrono::milliseconds>;
// Status
auto is_running() const -> bool;
auto get_thread_title() const -> std::string;
protected:
// Override in derived classes
virtual auto before_start() -> result_void { return {}; }
virtual auto do_work() -> result_void = 0;
virtual auto after_stop() -> result_void { return {}; }
virtual auto should_continue_work() const -> bool { return false; }
};Base class for all work units.
class job {
public:
job(const std::string& name = "job");
job(const std::vector<uint8_t>& data, const std::string& name = "data_job");
virtual ~job();
// Core functionality
virtual auto do_work() -> result_void;
// Cancellation support
virtual auto set_cancellation_token(const cancellation_token& token) -> void;
virtual auto get_cancellation_token() const -> cancellation_token;
// Queue association
virtual auto set_job_queue(const std::shared_ptr<job_queue>& queue) -> void;
virtual auto get_job_queue() const -> std::shared_ptr<job_queue>;
// Metadata
auto get_name() const -> std::string;
virtual auto to_string() const -> std::string;
};Thread-safe queue for job management.
class job_queue : public std::enable_shared_from_this<job_queue> {
public:
job_queue();
virtual ~job_queue();
// Queue operations
[[nodiscard]] virtual auto enqueue(std::unique_ptr<job>&& value) -> result_void;
[[nodiscard]] virtual auto enqueue_batch(std::vector<std::unique_ptr<job>>&& jobs) -> result_void;
[[nodiscard]] virtual auto dequeue() -> result<std::unique_ptr<job>>;
[[nodiscard]] virtual auto dequeue_batch() -> std::deque<std::unique_ptr<job>>;
// State management
virtual auto clear() -> void;
auto stop() -> void;
// Status
[[nodiscard]] auto empty() const -> bool;
[[nodiscard]] auto size() const -> std::size_t;
[[nodiscard]] auto is_stopped() const -> bool;
// Configuration
auto set_notify(bool notify) -> void;
};Lock-free Multi-Producer Multi-Consumer (MPMC) job queue using the Michael-Scott algorithm.
class lockfree_job_queue : public scheduler_interface,
public queue_capabilities_interface {
public:
lockfree_job_queue();
~lockfree_job_queue();
// Queue operations
[[nodiscard]] auto enqueue(std::unique_ptr<job>&& job) -> result_void;
[[nodiscard]] auto dequeue() -> result<std::unique_ptr<job>>;
[[nodiscard]] auto try_dequeue() -> result<std::unique_ptr<job>>;
// Status (approximate values due to concurrent modifications)
[[nodiscard]] auto empty() const -> bool;
[[nodiscard]] auto size() const -> std::size_t;
// scheduler_interface implementation
auto schedule(std::unique_ptr<job>&& work) -> result_void override;
auto get_next_job() -> result<std::unique_ptr<job>> override;
// Capabilities: lock_free=true, exact_size=false, supports_batch=false
[[nodiscard]] auto get_capabilities() const -> queue_capabilities override;
};Adaptive queue that switches between mutex and lock-free modes based on requirements.
class adaptive_job_queue : public scheduler_interface,
public queue_capabilities_interface {
public:
enum class mode { mutex, lock_free };
enum class policy { accuracy_first, performance_first, balanced, manual };
explicit adaptive_job_queue(policy p = policy::balanced);
~adaptive_job_queue();
// Queue operations
[[nodiscard]] auto enqueue(std::unique_ptr<job>&& j) -> result_void;
[[nodiscard]] auto dequeue() -> result<std::unique_ptr<job>>;
[[nodiscard]] auto try_dequeue() -> result<std::unique_ptr<job>>;
[[nodiscard]] auto empty() const -> bool;
[[nodiscard]] auto size() const -> std::size_t;
auto clear() -> void;
auto stop() -> void;
[[nodiscard]] auto is_stopped() const -> bool;
// Adaptive-specific API
[[nodiscard]] auto current_mode() const -> mode;
[[nodiscard]] auto current_policy() const -> policy;
auto switch_mode(mode m) -> result_void; // Only if policy is manual
[[nodiscard]] auto get_stats() const -> stats;
// RAII guard for temporary accuracy mode
[[nodiscard]] auto require_accuracy() -> accuracy_guard;
// Capabilities change based on current mode
[[nodiscard]] auto get_capabilities() const -> queue_capabilities override;
};Factory for creating queue instances based on requirements.
class queue_factory {
public:
/// Queue selection requirements
struct requirements {
bool need_exact_size = false; ///< Require exact size()
bool need_atomic_empty = false; ///< Require atomic empty()
bool prefer_lock_free = false; ///< Prefer lock-free if possible
bool need_batch_operations = false; ///< Require batch enqueue/dequeue
bool need_blocking_wait = false; ///< Require blocking dequeue
};
// Convenience factory methods
[[nodiscard]] static auto create_standard_queue() -> std::shared_ptr<job_queue>;
[[nodiscard]] static auto create_lockfree_queue() -> std::unique_ptr<lockfree_job_queue>;
[[nodiscard]] static auto create_adaptive_queue(
adaptive_job_queue::policy policy = adaptive_job_queue::policy::balanced)
-> std::unique_ptr<adaptive_job_queue>;
// Requirements-based factory
[[nodiscard]] static auto create_for_requirements(const requirements& reqs)
-> std::unique_ptr<scheduler_interface>;
// Environment-optimized auto-selection
[[nodiscard]] static auto create_optimal() -> std::unique_ptr<scheduler_interface>;
};
// Compile-time type selection
template<bool NeedExactSize, bool PreferLockFree = false>
struct queue_type_selector {
using type = /* job_queue | lockfree_job_queue | adaptive_job_queue */;
};
template<bool NeedExactSize, bool PreferLockFree = false>
using queue_t = typename queue_type_selector<NeedExactSize, PreferLockFree>::type;
// Pre-defined type aliases
using accurate_queue_t = queue_t<true, false>; // job_queue
using fast_queue_t = queue_t<false, true>; // lockfree_job_queue
using balanced_queue_t = queue_t<false, false>; // adaptive_job_queueUsage Examples:
// Direct factory methods
auto standard = queue_factory::create_standard_queue();
auto lockfree = queue_factory::create_lockfree_queue();
auto adaptive = queue_factory::create_adaptive_queue();
// Requirements-based creation
queue_factory::requirements reqs;
reqs.need_exact_size = true;
auto queue = queue_factory::create_for_requirements(reqs); // Returns job_queue
// Environment-optimized selection
auto optimal = queue_factory::create_optimal();
// Compile-time selection
accurate_queue_t accurate; // job_queue
fast_queue_t fast; // lockfree_job_queue
balanced_queue_t balanced; // adaptive_job_queueError handling type based on std::expected pattern.
template<typename T>
using result = std::expected<T, error>;
using result_void = result<void>;
struct error {
error_code code;
std::string message;
};
enum class error_code {
success = 0,
invalid_argument,
resource_allocation_failed,
operation_canceled,
thread_start_failure,
job_execution_failed,
queue_full,
queue_empty
};Comprehensive synchronization wrapper utilities providing enhanced functionality over standard library primitives.
#include "core/sync/include/sync_primitives.h"
// Scoped lock guard with timeout support
class scoped_lock_guard {
public:
template<typename Mutex>
explicit scoped_lock_guard(Mutex& mutex,
std::chrono::milliseconds timeout = std::chrono::milliseconds(0));
[[nodiscard]] bool owns_lock() const noexcept;
explicit operator bool() const noexcept;
};
// Enhanced condition variable wrapper
class condition_variable_wrapper {
public:
template<typename Predicate>
void wait(std::unique_lock<std::mutex>& lock, Predicate pred);
template<typename Rep, typename Period, typename Predicate>
bool wait_for(std::unique_lock<std::mutex>& lock,
const std::chrono::duration<Rep, Period>& rel_time,
Predicate pred);
void notify_one() noexcept;
void notify_all() noexcept;
};
// Atomic flag with wait/notify support
class atomic_flag_wrapper {
public:
void test_and_set(std::memory_order order = std::memory_order_seq_cst) noexcept;
void clear(std::memory_order order = std::memory_order_seq_cst) noexcept;
void wait(bool old, std::memory_order order = std::memory_order_seq_cst) const noexcept;
void notify_one() noexcept;
void notify_all() noexcept;
};
// Shared mutex wrapper for reader-writer locks
class shared_mutex_wrapper {
public:
void lock();
bool try_lock();
void unlock();
void lock_shared();
bool try_lock_shared();
void unlock_shared();
};Provides cooperative cancellation mechanism for long-running operations.
#include "core/sync/include/cancellation_token.h"
class cancellation_token {
public:
// Create a new cancellation token
static cancellation_token create();
// Create a linked token (cancelled when any parent is cancelled)
static cancellation_token create_linked(
std::initializer_list<cancellation_token> tokens);
// Cancel the operation
void cancel();
// Check cancellation status
[[nodiscard]] bool is_cancelled() const;
// Throw if cancelled
void throw_if_cancelled() const;
// Register callback for cancellation
void register_callback(std::function<void()> callback);
};
// Usage example
auto token = cancellation_token::create();
auto linked = cancellation_token::create_linked({token, other_token});
token.register_callback([]() {
std::cout << "Operation cancelled" << std::endl;
});
// In worker thread
while (!token.is_cancelled()) {
// Do work
token.throw_if_cancelled(); // Throws if cancelled
}Preferred DI for Thread System: thread-safe container with singleton/factory lifetimes.
#include "interfaces/service_container.h"
#include "interfaces/thread_context.h"
// Register services globally
auto& container = thread_module::service_container::global();
container.register_singleton<thread_module::logger_interface>(my_logger);
container.register_singleton<monitoring_interface::monitoring_interface>(my_monitoring);
// Resolve via thread_context (pools/workers use this context)
thread_module::thread_context ctx; // resolves from global containerLightweight header-only alternative for simple scenarios.
#include "core/base/include/service_registry.h"
// Register a service instance
thread_module::service_registry::register_service<MyInterface>(std::make_shared<MyImpl>());
// Retrieve a service instance
auto svc = thread_module::service_registry::get_service<MyInterface>();Main thread pool implementation with adaptive queue optimization.
class thread_pool : public std::enable_shared_from_this<thread_pool> {
public:
thread_pool(const std::string& thread_title = "thread_pool");
virtual ~thread_pool();
// Pool control
auto start() -> std::optional<std::string>;
auto stop(const bool& immediately_stop = false) -> void;
// Job management
auto enqueue(std::unique_ptr<job>&& job) -> std::optional<std::string>;
auto enqueue_batch(std::vector<std::unique_ptr<job>>&& jobs) -> std::optional<std::string>;
// Worker management
auto enqueue(std::unique_ptr<thread_worker>&& worker) -> std::optional<std::string>;
auto enqueue_batch(std::vector<std::unique_ptr<thread_worker>>&& workers) -> std::optional<std::string>;
// Access
auto get_job_queue() -> std::shared_ptr<job_queue>;
auto get_workers() const -> const std::vector<std::unique_ptr<thread_worker>>&;
};Worker thread implementation with adaptive capabilities.
class thread_worker : public thread_base {
public:
struct worker_statistics {
uint64_t jobs_processed;
uint64_t total_processing_time_ns;
uint64_t batch_operations;
uint64_t avg_processing_time_ns;
};
thread_worker(const std::string& name = "thread_worker");
// Configuration
auto set_batch_processing(bool enabled, size_t batch_size = 32) -> void;
auto get_statistics() const -> worker_statistics;
// Queue association
auto set_job_queue(const std::shared_ptr<job_queue>& queue) -> void;
protected:
auto do_work() -> result_void override;
};Priority-based thread pool with per-type queues.
template<typename T>
class typed_thread_pool_t : public std::enable_shared_from_this<typed_thread_pool_t<T>> {
public:
typed_thread_pool_t(const std::string& name = "typed_thread_pool");
// Pool control
auto start() -> result_void;
auto stop(bool clear_queue = false) -> result_void;
// Job management
auto enqueue(std::unique_ptr<typed_job_t<T>>&& job) -> result_void;
auto enqueue_batch(std::vector<std::unique_ptr<typed_job_t<T>>>&& jobs) -> result_void;
// Worker management
auto add_worker(std::unique_ptr<typed_thread_worker_t<T>>&& worker) -> result_void;
auto add_workers(std::vector<std::unique_ptr<typed_thread_worker_t<T>>>&& workers) -> result_void;
// Queue strategy (adaptive mode)
auto set_queue_strategy(queue_strategy strategy) -> void;
};Worker with type-based job handling.
template<typename T>
class typed_thread_worker_t : public thread_base {
public:
typed_thread_worker_t(std::vector<T> types = get_all_job_types(),
const bool& use_time_tag = true,
const thread_context& context = thread_context());
// Type responsibilities
auto types() const -> std::vector<T>;
// Queue association (uses aging queue for priority aging support)
auto set_aging_job_queue(std::shared_ptr<aging_typed_job_queue_t<T>> job_queue) -> void;
// Context management
auto set_context(const thread_context& context) -> void;
auto get_context() const -> const thread_context&;
};Default priority levels for typed pools.
enum class job_types : uint8_t {
Background = 0, // Lowest priority
Batch = 1, // Medium priority
RealTime = 2 // Highest priority
};This section provides a comprehensive overview of the public interfaces that decouple components in the Thread System and enable dependency injection.
The thread system uses interface-based design for clean separation of concerns and easy extensibility. These interfaces enable dependency injection and allow components to be easily replaced or extended.
Header: interfaces/executor_interface.h
Base interface for all executor implementations (thread pools).
#include "interfaces/executor_interface.h"
class executor_interface {
public:
virtual ~executor_interface() = default;
// Execute a job
virtual auto execute(std::unique_ptr<thread_module::job>&& work) -> thread_module::result_void = 0;
// Execute multiple jobs
virtual auto execute_batch(std::vector<std::unique_ptr<job>> jobs) -> result_void = 0;
// Shutdown the executor
virtual auto shutdown() -> thread_module::result_void = 0;
// Check if executor is running
[[nodiscard]] virtual auto is_running() const -> bool = 0;
};Implemented by: implementations/thread_pool/thread_pool
Example:
auto pool = std::make_shared<thread_pool_module::thread_pool>("pool");
pool->enqueue_batch({std::make_unique<thread_pool_module::thread_worker>(false)});
pool->start();
pool->execute(std::make_unique<thread_module::callback_job>([](){ return thread_module::result_void(); }));
pool->shutdown();Header: interfaces/scheduler_interface.h
Interface for job scheduling strategies.
#include "interfaces/scheduler_interface.h"
class scheduler_interface {
public:
virtual ~scheduler_interface() = default;
// Schedule a job
virtual auto schedule(std::unique_ptr<thread_module::job>&& work) -> thread_module::result_void = 0;
// Get next job to execute
virtual auto get_next_job() -> thread_module::result<std::unique_ptr<thread_module::job>> = 0;
// Check if there are pending jobs
[[nodiscard]] virtual auto has_pending() const -> bool = 0;
};Implemented by: core/jobs/job_queue and its derivatives (lockfree_job_queue, adaptive_job_queue).
Header: interfaces/queue_capabilities_interface.h
Mixin interface for queue capability introspection. Enables runtime queries about queue characteristics.
#include "interfaces/queue_capabilities_interface.h"
/// Runtime-queryable queue capabilities descriptor
struct queue_capabilities {
bool exact_size = true; ///< size() returns exact value
bool atomic_empty_check = true; ///< empty() is atomic
bool lock_free = false; ///< Uses lock-free algorithms
bool wait_free = false; ///< Uses wait-free algorithms
bool supports_batch = true; ///< Supports batch operations
bool supports_blocking_wait = true; ///< Supports blocking dequeue
bool supports_stop = true; ///< Supports stop() method
};
class queue_capabilities_interface {
public:
virtual ~queue_capabilities_interface() = default;
/// Get capabilities of this queue implementation
[[nodiscard]] virtual auto get_capabilities() const -> queue_capabilities;
/// Convenience methods
[[nodiscard]] auto has_exact_size() const -> bool;
[[nodiscard]] auto has_atomic_empty() const -> bool;
[[nodiscard]] auto is_lock_free() const -> bool;
[[nodiscard]] auto is_wait_free() const -> bool;
[[nodiscard]] auto supports_batch() const -> bool;
[[nodiscard]] auto supports_blocking_wait() const -> bool;
[[nodiscard]] auto supports_stop() const -> bool;
};Implemented by: job_queue, lockfree_job_queue, adaptive_job_queue
Example:
// Check capabilities at runtime
if (auto* cap = dynamic_cast<queue_capabilities_interface*>(queue.get())) {
if (cap->has_exact_size()) {
// Safe to use size() for decisions
auto count = queue->size();
}
if (cap->is_lock_free()) {
// Queue uses lock-free algorithms
}
}Header: interfaces/logger_interface.h
logger_interface::log(level, message[, file, line, function])logger_registry::set_logger(std::shared_ptr<logger_interface>)- Convenience macros:
THREAD_LOG_INFO("message"), etc.
Header: interfaces/monitoring_interface.h
Data structures:
system_metrics,thread_pool_metrics(supportspool_name/pool_instance_id),worker_metricsmetrics_snapshot
Key methods:
update_system_metrics(const system_metrics&)update_thread_pool_metrics(const thread_pool_metrics&)update_thread_pool_metrics(const std::string& pool_name, std::uint32_t pool_instance_id, const thread_pool_metrics&)update_worker_metrics(std::size_t worker_id, const worker_metrics&)get_current_snapshot(),get_recent_snapshots(size_t)
Utility:
null_monitoring— no-op implementationscoped_timer(std::atomic<std::uint64_t>& target)— RAII measurement helper
Header: interfaces/monitorable_interface.h
auto get_metrics() -> monitoring_interface::metrics_snapshotvoid reset_metrics()
Use to expose component metrics uniformly.
Headers: interfaces/thread_context.h, interfaces/service_container.h
thread_context provides access to:
std::shared_ptr<logger_interface> logger()std::shared_ptr<monitoring_interface::monitoring_interface> monitoring()- Helper methods to log and update metrics safely when services are available
service_container is a thread-safe DI container:
register_singleton<Interface>(std::shared_ptr<Interface>)register_factory<Interface>(std::function<std::shared_ptr<Interface>()>, lifetime)resolve<Interface>() -> std::shared_ptr<Interface>
Use scoped_service<Interface> to register within a scope.
Header: core/sync/include/error_handling.h, interfaces/error_handler.h
- Strongly typed
error_code,error,result<T>/result_void error_handlerinterface anddefault_error_handlerimplementation
Header: interfaces/crash_handler.h
- Global
crash_handler::instance()with registration of crash callbacks and cleanup routines - Configurable safety level, core dumps, stack trace, and log path
- RAII helper
scoped_crash_callback
Headers under implementations/typed_thread_pool/include:
typed_job_interface,job_types,type_traits- Lock-free/adaptive typed queues and typed workers/pools
These enable per-type routing and priority scheduling for heterogeneous workloads.
The following modules have been moved to separate projects for better modularity and reusability:
Repository: https://github.com/kcenon/logger_system
The asynchronous logging functionality is now available as a separate project that implements the logger_interface. It provides:
- High-performance asynchronous logging with multiple output targets
- Structured logging with type-safe formatting
- Log level filtering with bitwise-enabled categories
- File rotation and compression support
- Thread-safe operations with minimal performance impact
Repository: https://github.com/kcenon/monitoring_system
The performance monitoring and metrics collection system is now a separate project that implements the monitoring_interface. It offers:
- Real-time performance metrics collection and analysis
- System resource monitoring (CPU, memory, thread usage)
- Thread pool statistics tracking and reporting
- Historical data retention with configurable duration
- Extensible metrics framework for custom measurements
These external modules integrate seamlessly through the interface pattern:
// Example: Register external logger and create a pool
#include "interfaces/service_container.h"
#include "implementations/thread_pool/include/thread_pool.h"
#include <logger_system/logger.h> // External project
// Register logger with the global container
thread_module::service_container::global()
.register_singleton<thread_module::logger_interface>(
std::make_shared<logger_module::logger>());
// Create a pool (thread_context resolves logger automatically)
auto pool = std::make_shared<thread_pool_module::thread_pool>("MyPool");Macros for reducing formatter code duplication.
#include "utilities/core/formatter_macros.h"
// Generate formatter specializations for custom types
DECLARE_FORMATTER(my_namespace::my_class)String conversion utilities.
namespace convert_string {
auto to_string(const std::wstring& str) -> std::string;
auto to_wstring(const std::string& str) -> std::wstring;
auto to_utf8(const std::wstring& str) -> std::string;
auto from_utf8(const std::string& str) -> std::wstring;
}#include "implementations/thread_pool/include/thread_pool.h"
#include "core/jobs/include/callback_job.h"
// Create and configure pool
auto pool = std::make_shared<thread_pool>("MyPool");
// Add workers
for (int i = 0; i < 4; ++i) {
pool->enqueue(std::make_unique<thread_worker>());
}
// Start pool
if (auto error = pool->start(); error.has_value()) {
std::cerr << "Failed to start pool: " << *error << std::endl;
return;
}
// Submit jobs with cancellation support
auto token = cancellation_token::create();
auto job = std::make_unique<callback_job>([]() -> result_void {
// Do work
return {};
});
job->set_cancellation_token(token);
pool->enqueue(std::move(job));
// Cancel if needed
token.cancel();
// Stop pool
pool->stop();// Create typed pool with priorities
auto typed_pool = std::make_shared<typed_thread_pool_t<job_types>>();
// Add specialized workers
auto realtime_worker = std::make_unique<typed_thread_worker_t<job_types>>();
realtime_worker->set_responsibilities({job_types::RealTime});
typed_pool->add_worker(std::move(realtime_worker));
// Start pool
typed_pool->start();
// Submit priority job
auto priority_job = std::make_unique<callback_typed_job<job_types>>(
job_types::RealTime,
[]() -> result_void {
// High priority work
return {};
}
);
typed_pool->enqueue(std::move(priority_job));// Using result type for error handling
auto process_with_result() -> result_void {
auto pool = std::make_shared<thread_pool>();
// Start pool with error checking
if (auto error = pool->start(); error.has_value()) {
return thread_module::error{
thread_module::error_code::thread_start_failure,
*error
};
}
// Submit job with error handling
auto job = std::make_unique<callback_job>([]() -> result_void {
// Simulate potential failure
if (some_condition_fails()) {
return thread_module::error{
thread_module::error_code::job_execution_failed,
"Operation failed due to invalid state"
};
}
return {}; // Success
});
if (auto error = pool->enqueue(std::move(job)); error.has_value()) {
return thread_module::error{
thread_module::error_code::queue_full,
*error
};
}
return {}; // Success
}// Configure worker for batch processing
auto worker = std::make_unique<thread_worker>("BatchWorker");
worker->set_batch_processing(true, 64); // Enable with batch size 64
// Submit multiple jobs for efficient batch processing
std::vector<std::unique_ptr<job>> batch;
for (int i = 0; i < 1000; ++i) {
batch.push_back(std::make_unique<callback_job>([i]() -> result_void {
// Process item i
return {};
}));
}
// Enqueue entire batch
pool->enqueue_batch(std::move(batch));#include "interfaces/service_container.h"
#include "implementations/thread_pool/include/thread_pool.h"
// Register thread pool as executor service
auto pool = std::make_shared<thread_pool_module::thread_pool>("AppPool");
thread_module::service_container::global()
.register_singleton<thread_module::executor_interface>(pool);
// Optional: register external logger implementation
// thread_module::service_container::global()
// .register_singleton<thread_module::logger_interface>(
// std::make_shared<external_logger>());
// Use services later via thread_context or resolve<>()#include "core/sync/include/sync_primitives.h"
// Using scoped lock with timeout
std::mutex resource_mutex;
{
scoped_lock_guard lock(resource_mutex, std::chrono::milliseconds(100));
if (lock.owns_lock()) {
// Successfully acquired lock within timeout
// Access protected resource
} else {
// Timeout occurred
// Handle timeout scenario
}
}
// Using enhanced condition variable
condition_variable_wrapper cv;
std::mutex cv_mutex;
bool ready = false;
// Producer thread
{
std::unique_lock<std::mutex> lock(cv_mutex);
ready = true;
cv.notify_one();
}
// Consumer thread with timeout
{
std::unique_lock<std::mutex> lock(cv_mutex);
if (cv.wait_for(lock, std::chrono::seconds(1), []{ return ready; })) {
// Condition met within timeout
} else {
// Timeout occurred
}
}Last Updated: 2025-10-20