Lesson 35 - Get Compute Auth Token Working

This commit is contained in:
Norman Lansing
2026-02-28 12:32:28 -05:00
parent 1d477ee42a
commit 4fde462bce
7743 changed files with 1397833 additions and 18 deletions

View File

@@ -0,0 +1,81 @@
// Copyright (C) 2011-2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
#define BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
#include <boost/config.hpp>
#ifndef BOOST_LOCKFREE_FORCE_STD_ATOMIC
#define BOOST_LOCKFREE_NO_HDR_ATOMIC
// MSVC supports atomic<> from version 2012 onwards.
#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
// GCC supports atomic<> from version 4.8 onwards.
#if (BOOST_GCC >= 40800) && (__cplusplus >= 201103L)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
// Apple clang is 2 mayor versions ahead, but in fact 1 minor version behind
#ifdef BOOST_CLANG
#define BOOST_ATOMIC_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
#if defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 60100) && (__cplusplus >= 201103L)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
#if !defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 30600) && (__cplusplus >= 201103L)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
#undef BOOST_ATOMIC_CLANG_VERSION
#endif // BOOST_CLANG
#endif // BOOST_LOCKFREE_FORCE_STD_ATOMIC
#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
#include <boost/atomic.hpp>
#else
#include <atomic>
#endif
namespace boost {
namespace lockfree {
namespace detail {
#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
using boost::atomic;
using boost::memory_order_acquire;
using boost::memory_order_consume;
using boost::memory_order_relaxed;
using boost::memory_order_release;
#else
using std::atomic;
using std::memory_order_acquire;
using std::memory_order_consume;
using std::memory_order_relaxed;
using std::memory_order_release;
#endif
}
using detail::atomic;
using detail::memory_order_acquire;
using detail::memory_order_consume;
using detail::memory_order_relaxed;
using detail::memory_order_release;
}}
#endif /* BOOST_LOCKFREE_DETAIL_ATOMIC_HPP */

View File

@@ -0,0 +1,83 @@
// boost lockfree: copy_payload helper
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
#define BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
#include <boost/mpl/if.hpp>
#include <boost/type_traits/is_convertible.hpp>
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4512) // assignment operator could not be generated
#endif
namespace boost {
namespace lockfree {
namespace detail {
struct copy_convertible
{
template <typename T, typename U>
static void copy(T & t, U & u)
{
u = t;
}
};
struct copy_constructible_and_copyable
{
template <typename T, typename U>
static void copy(T & t, U & u)
{
u = U(t);
}
};
template <typename T, typename U>
void copy_payload(T & t, U & u)
{
typedef typename boost::mpl::if_<typename boost::is_convertible<T, U>::type,
copy_convertible,
copy_constructible_and_copyable
>::type copy_type;
copy_type::copy(t, u);
}
template <typename T>
struct consume_via_copy
{
consume_via_copy(T & out):
out_(out)
{}
template <typename U>
void operator()(U & element)
{
copy_payload(element, out_);
}
T & out_;
};
struct consume_noop
{
template <typename U>
void operator()(const U &)
{
}
};
}}}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#endif /* BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED */

View File

@@ -0,0 +1,649 @@
// lock-free freelist
//
// Copyright (C) 2008-2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
#define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
#include <limits>
#include <memory>
#include <boost/array.hpp>
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include <boost/noncopyable.hpp>
#include <boost/static_assert.hpp>
#include <boost/lockfree/detail/atomic.hpp>
#include <boost/lockfree/detail/parameter.hpp>
#include <boost/lockfree/detail/tagged_ptr.hpp>
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4100) // unreferenced formal parameter
#pragma warning(disable: 4127) // conditional expression is constant
#endif
namespace boost {
namespace lockfree {
namespace detail {
template <typename T,
typename Alloc = std::allocator<T>
>
class freelist_stack:
Alloc
{
struct freelist_node
{
tagged_ptr<freelist_node> next;
};
typedef tagged_ptr<freelist_node> tagged_node_ptr;
public:
typedef tagged_ptr<T> tagged_node_handle;
template <typename Allocator>
freelist_stack (Allocator const & alloc, std::size_t n = 0):
Alloc(alloc),
pool_(tagged_node_ptr(NULL))
{
for (std::size_t i = 0; i != n; ++i) {
T * node = Alloc::allocate(1);
#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
destruct<false>(node);
#else
deallocate<false>(node);
#endif
}
}
template <bool ThreadSafe>
void reserve (std::size_t count)
{
for (std::size_t i = 0; i != count; ++i) {
T * node = Alloc::allocate(1);
deallocate<ThreadSafe>(node);
}
}
template <bool ThreadSafe, bool Bounded>
T * construct (void)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T();
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType>
T * construct (ArgumentType const & arg)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T(arg);
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T(arg1, arg2);
return node;
}
template <bool ThreadSafe>
void destruct (tagged_node_handle tagged_ptr)
{
T * n = tagged_ptr.get_ptr();
n->~T();
deallocate<ThreadSafe>(n);
}
template <bool ThreadSafe>
void destruct (T * n)
{
n->~T();
deallocate<ThreadSafe>(n);
}
~freelist_stack(void)
{
tagged_node_ptr current = pool_.load();
while (current) {
freelist_node * current_ptr = current.get_ptr();
if (current_ptr)
current = current_ptr->next;
Alloc::deallocate((T*)current_ptr, 1);
}
}
bool is_lock_free(void) const
{
return pool_.is_lock_free();
}
T * get_handle(T * pointer) const
{
return pointer;
}
T * get_handle(tagged_node_handle const & handle) const
{
return get_pointer(handle);
}
T * get_pointer(tagged_node_handle const & tptr) const
{
return tptr.get_ptr();
}
T * get_pointer(T * pointer) const
{
return pointer;
}
T * null_handle(void) const
{
return NULL;
}
protected: // allow use from subclasses
template <bool ThreadSafe, bool Bounded>
T * allocate (void)
{
if (ThreadSafe)
return allocate_impl<Bounded>();
else
return allocate_impl_unsafe<Bounded>();
}
private:
template <bool Bounded>
T * allocate_impl (void)
{
tagged_node_ptr old_pool = pool_.load(memory_order_consume);
for(;;) {
if (!old_pool.get_ptr()) {
if (!Bounded)
return Alloc::allocate(1);
else
return 0;
}
freelist_node * new_pool_ptr = old_pool->next.get_ptr();
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
if (pool_.compare_exchange_weak(old_pool, new_pool)) {
void * ptr = old_pool.get_ptr();
return reinterpret_cast<T*>(ptr);
}
}
}
template <bool Bounded>
T * allocate_impl_unsafe (void)
{
tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
if (!old_pool.get_ptr()) {
if (!Bounded)
return Alloc::allocate(1);
else
return 0;
}
freelist_node * new_pool_ptr = old_pool->next.get_ptr();
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
pool_.store(new_pool, memory_order_relaxed);
void * ptr = old_pool.get_ptr();
return reinterpret_cast<T*>(ptr);
}
protected:
template <bool ThreadSafe>
void deallocate (T * n)
{
if (ThreadSafe)
deallocate_impl(n);
else
deallocate_impl_unsafe(n);
}
private:
void deallocate_impl (T * n)
{
void * node = n;
tagged_node_ptr old_pool = pool_.load(memory_order_consume);
freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
for(;;) {
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
new_pool->next.set_ptr(old_pool.get_ptr());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return;
}
}
void deallocate_impl_unsafe (T * n)
{
void * node = n;
tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
new_pool->next.set_ptr(old_pool.get_ptr());
pool_.store(new_pool, memory_order_relaxed);
}
atomic<tagged_node_ptr> pool_;
};
class tagged_index
{
public:
typedef boost::uint16_t tag_t;
typedef boost::uint16_t index_t;
/** uninitialized constructor */
tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0)
{}
/** copy constructor */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_index(tagged_index const & rhs):
index(rhs.index), tag(rhs.tag)
{}
#else
tagged_index(tagged_index const & rhs) = default;
#endif
explicit tagged_index(index_t i, tag_t t = 0):
index(i), tag(t)
{}
/** index access */
/* @{ */
index_t get_index() const
{
return index;
}
void set_index(index_t i)
{
index = i;
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const
{
return tag;
}
tag_t get_next_tag() const
{
tag_t next = (get_tag() + 1u) & (std::numeric_limits<tag_t>::max)();
return next;
}
void set_tag(tag_t t)
{
tag = t;
}
/* @} */
bool operator==(tagged_index const & rhs) const
{
return (index == rhs.index) && (tag == rhs.tag);
}
bool operator!=(tagged_index const & rhs) const
{
return !operator==(rhs);
}
protected:
index_t index;
tag_t tag;
};
template <typename T,
std::size_t size>
struct compiletime_sized_freelist_storage
{
// array-based freelists only support a 16bit address space.
BOOST_STATIC_ASSERT(size < 65536);
boost::array<char, size * sizeof(T)> data;
// unused ... only for API purposes
template <typename Allocator>
compiletime_sized_freelist_storage(Allocator const & /* alloc */, std::size_t /* count */)
{}
T * nodes(void) const
{
return reinterpret_cast<T*>(const_cast<char*>(data.data()));
}
std::size_t node_count(void) const
{
return size;
}
};
template <typename T,
typename Alloc = std::allocator<T> >
struct runtime_sized_freelist_storage:
Alloc
{
T * nodes_;
std::size_t node_count_;
template <typename Allocator>
runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count):
Alloc(alloc), node_count_(count)
{
if (count > 65535)
boost::throw_exception(std::runtime_error("boost.lockfree: freelist size is limited to a maximum of 65535 objects"));
nodes_ = Alloc::allocate(count);
}
~runtime_sized_freelist_storage(void)
{
Alloc::deallocate(nodes_, node_count_);
}
T * nodes(void) const
{
return nodes_;
}
std::size_t node_count(void) const
{
return node_count_;
}
};
template <typename T,
typename NodeStorage = runtime_sized_freelist_storage<T>
>
class fixed_size_freelist:
NodeStorage
{
struct freelist_node
{
tagged_index next;
};
typedef tagged_index::index_t index_t;
void initialize(void)
{
T * nodes = NodeStorage::nodes();
for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) {
tagged_index * next_index = reinterpret_cast<tagged_index*>(nodes + i);
next_index->set_index(null_handle());
#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
destruct<false>(nodes + i);
#else
deallocate<false>(static_cast<index_t>(i));
#endif
}
}
public:
typedef tagged_index tagged_node_handle;
template <typename Allocator>
fixed_size_freelist (Allocator const & alloc, std::size_t count):
NodeStorage(alloc, count),
pool_(tagged_index(static_cast<index_t>(count), 0))
{
initialize();
}
fixed_size_freelist (void):
pool_(tagged_index(NodeStorage::node_count(), 0))
{
initialize();
}
template <bool ThreadSafe, bool Bounded>
T * construct (void)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T();
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType>
T * construct (ArgumentType const & arg)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T(arg);
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T(arg1, arg2);
return node;
}
template <bool ThreadSafe>
void destruct (tagged_node_handle tagged_index)
{
index_t index = tagged_index.get_index();
T * n = NodeStorage::nodes() + index;
(void)n; // silence msvc warning
n->~T();
deallocate<ThreadSafe>(index);
}
template <bool ThreadSafe>
void destruct (T * n)
{
n->~T();
deallocate<ThreadSafe>(n - NodeStorage::nodes());
}
bool is_lock_free(void) const
{
return pool_.is_lock_free();
}
index_t null_handle(void) const
{
return static_cast<index_t>(NodeStorage::node_count());
}
index_t get_handle(T * pointer) const
{
if (pointer == NULL)
return null_handle();
else
return static_cast<index_t>(pointer - NodeStorage::nodes());
}
index_t get_handle(tagged_node_handle const & handle) const
{
return handle.get_index();
}
T * get_pointer(tagged_node_handle const & tptr) const
{
return get_pointer(tptr.get_index());
}
T * get_pointer(index_t index) const
{
if (index == null_handle())
return 0;
else
return NodeStorage::nodes() + index;
}
T * get_pointer(T * ptr) const
{
return ptr;
}
protected: // allow use from subclasses
template <bool ThreadSafe>
index_t allocate (void)
{
if (ThreadSafe)
return allocate_impl();
else
return allocate_impl_unsafe();
}
private:
index_t allocate_impl (void)
{
tagged_index old_pool = pool_.load(memory_order_consume);
for(;;) {
index_t index = old_pool.get_index();
if (index == null_handle())
return index;
T * old_node = NodeStorage::nodes() + index;
tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return old_pool.get_index();
}
}
index_t allocate_impl_unsafe (void)
{
tagged_index old_pool = pool_.load(memory_order_consume);
index_t index = old_pool.get_index();
if (index == null_handle())
return index;
T * old_node = NodeStorage::nodes() + index;
tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
pool_.store(new_pool, memory_order_relaxed);
return old_pool.get_index();
}
template <bool ThreadSafe>
void deallocate (index_t index)
{
if (ThreadSafe)
deallocate_impl(index);
else
deallocate_impl_unsafe(index);
}
void deallocate_impl (index_t index)
{
freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
tagged_index old_pool = pool_.load(memory_order_consume);
for(;;) {
tagged_index new_pool (index, old_pool.get_tag());
new_pool_node->next.set_index(old_pool.get_index());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return;
}
}
void deallocate_impl_unsafe (index_t index)
{
freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
tagged_index old_pool = pool_.load(memory_order_consume);
tagged_index new_pool (index, old_pool.get_tag());
new_pool_node->next.set_index(old_pool.get_index());
pool_.store(new_pool);
}
atomic<tagged_index> pool_;
};
template <typename T,
typename Alloc,
bool IsCompileTimeSized,
bool IsFixedSize,
std::size_t Capacity
>
struct select_freelist
{
typedef typename mpl::if_c<IsCompileTimeSized,
compiletime_sized_freelist_storage<T, Capacity>,
runtime_sized_freelist_storage<T, Alloc>
>::type fixed_sized_storage_type;
typedef typename mpl::if_c<IsCompileTimeSized || IsFixedSize,
fixed_size_freelist<T, fixed_sized_storage_type>,
freelist_stack<T, Alloc>
>::type type;
};
template <typename T, bool IsNodeBased>
struct select_tagged_handle
{
typedef typename mpl::if_c<IsNodeBased,
tagged_ptr<T>,
tagged_index
>::type tagged_handle_type;
typedef typename mpl::if_c<IsNodeBased,
T*,
typename tagged_index::index_t
>::type handle_type;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */

View File

@@ -0,0 +1,73 @@
// boost lockfree
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
#define BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
#include <boost/lockfree/policies.hpp>
namespace boost {
namespace lockfree {
namespace detail {
namespace mpl = boost::mpl;
template <typename bound_args, typename tag_type>
struct has_arg
{
typedef typename parameter::binding<bound_args, tag_type, mpl::void_>::type type;
static const bool value = mpl::is_not_void_<type>::type::value;
};
template <typename bound_args>
struct extract_capacity
{
static const bool has_capacity = has_arg<bound_args, tag::capacity>::value;
typedef typename mpl::if_c<has_capacity,
typename has_arg<bound_args, tag::capacity>::type,
mpl::size_t< 0 >
>::type capacity_t;
static const std::size_t capacity = capacity_t::value;
};
template <typename bound_args, typename T>
struct extract_allocator
{
static const bool has_allocator = has_arg<bound_args, tag::allocator>::value;
typedef typename mpl::if_c<has_allocator,
typename has_arg<bound_args, tag::allocator>::type,
std::allocator<T>
>::type allocator_arg;
typedef typename allocator_arg::template rebind<T>::other type;
};
template <typename bound_args, bool default_ = false>
struct extract_fixed_sized
{
static const bool has_fixed_sized = has_arg<bound_args, tag::fixed_sized>::value;
typedef typename mpl::if_c<has_fixed_sized,
typename has_arg<bound_args, tag::fixed_sized>::type,
mpl::bool_<default_>
>::type type;
static const bool value = type::value;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_DETAIL_PARAMETER_HPP */

View File

@@ -0,0 +1,56 @@
// Copyright (C) 2009 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
#define BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
/* this file defines the following macros:
BOOST_LOCKFREE_CACHELINE_BYTES: size of a cache line
BOOST_LOCKFREE_PTR_COMPRESSION: use tag/pointer compression to utilize parts
of the virtual address space as tag (at least 16bit)
BOOST_LOCKFREE_DCAS_ALIGNMENT: symbol used for aligning structs at cache line
boundaries
*/
#define BOOST_LOCKFREE_CACHELINE_BYTES 64
#ifdef _MSC_VER
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __declspec(align(BOOST_LOCKFREE_CACHELINE_BYTES))
#if defined(_M_IX86)
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#elif defined(_M_X64) || defined(_M_IA64)
#define BOOST_LOCKFREE_PTR_COMPRESSION 1
#define BOOST_LOCKFREE_DCAS_ALIGNMENT __declspec(align(16))
#endif
#endif /* _MSC_VER */
#ifdef __GNUC__
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __attribute__((aligned(BOOST_LOCKFREE_CACHELINE_BYTES)))
#if defined(__i386__) || defined(__ppc__)
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#elif defined(__x86_64__)
#define BOOST_LOCKFREE_PTR_COMPRESSION 1
#define BOOST_LOCKFREE_DCAS_ALIGNMENT __attribute__((aligned(16)))
#elif defined(__alpha__)
// LATER: alpha may benefit from pointer compression. but what is the maximum size of the address space?
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#endif
#endif /* __GNUC__ */
#ifndef BOOST_LOCKFREE_DCAS_ALIGNMENT
#define BOOST_LOCKFREE_DCAS_ALIGNMENT /*BOOST_LOCKFREE_DCAS_ALIGNMENT*/
#endif
#ifndef BOOST_LOCKFREE_CACHELINE_ALIGNMENT
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT /*BOOST_LOCKFREE_CACHELINE_ALIGNMENT*/
#endif
#endif /* BOOST_LOCKFREE_PREFIX_HPP_INCLUDED */

View File

@@ -0,0 +1,21 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#include <boost/config.hpp>
#include <boost/lockfree/detail/prefix.hpp>
#ifndef BOOST_LOCKFREE_PTR_COMPRESSION
#include <boost/lockfree/detail/tagged_ptr_dcas.hpp>
#else
#include <boost/lockfree/detail/tagged_ptr_ptrcompression.hpp>
#endif
#endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */

View File

@@ -0,0 +1,133 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
#include <cstddef> /* for std::size_t */
#include <limits>
namespace boost {
namespace lockfree {
namespace detail {
template <class T>
class BOOST_LOCKFREE_DCAS_ALIGNMENT tagged_ptr
{
public:
typedef std::size_t tag_t;
/** uninitialized constructor */
tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
{}
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr(tagged_ptr const & p):
ptr(p.ptr), tag(p.tag)
{}
#else
tagged_ptr(tagged_ptr const & p) = default;
#endif
explicit tagged_ptr(T * p, tag_t t = 0):
ptr(p), tag(t)
{}
/** unsafe set operation */
/* @{ */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr & operator= (tagged_ptr const & p)
{
set(p.ptr, p.tag);
return *this;
}
#else
tagged_ptr & operator= (tagged_ptr const & p) = default;
#endif
void set(T * p, tag_t t)
{
ptr = p;
tag = t;
}
/* @} */
/** comparing semantics */
/* @{ */
bool operator== (volatile tagged_ptr const & p) const
{
return (ptr == p.ptr) && (tag == p.tag);
}
bool operator!= (volatile tagged_ptr const & p) const
{
return !operator==(p);
}
/* @} */
/** pointer access */
/* @{ */
T * get_ptr(void) const
{
return ptr;
}
void set_ptr(T * p)
{
ptr = p;
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const
{
return tag;
}
tag_t get_next_tag() const
{
tag_t next = (get_tag() + 1) & (std::numeric_limits<tag_t>::max)();
return next;
}
void set_tag(tag_t t)
{
tag = t;
}
/* @} */
/** smart pointer support */
/* @{ */
T & operator*() const
{
return *ptr;
}
T * operator->() const
{
return ptr;
}
operator bool(void) const
{
return ptr != 0;
}
/* @} */
protected:
T * ptr;
tag_t tag;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED */

View File

@@ -0,0 +1,174 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008, 2009 Tim Blechmann, based on code by Cory Nelson
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
#include <cstddef> /* for std::size_t */
#include <limits>
#include <boost/cstdint.hpp>
namespace boost {
namespace lockfree {
namespace detail {
#if defined (__x86_64__) || defined (_M_X64)
template <class T>
class tagged_ptr
{
typedef boost::uint64_t compressed_ptr_t;
public:
typedef boost::uint16_t tag_t;
private:
union cast_unit
{
compressed_ptr_t value;
tag_t tag[4];
};
static const int tag_index = 3;
static const compressed_ptr_t ptr_mask = 0xffffffffffffUL; //(1L<<48L)-1;
static T* extract_ptr(volatile compressed_ptr_t const & i)
{
return (T*)(i & ptr_mask);
}
static tag_t extract_tag(volatile compressed_ptr_t const & i)
{
cast_unit cu;
cu.value = i;
return cu.tag[tag_index];
}
static compressed_ptr_t pack_ptr(T * ptr, tag_t tag)
{
cast_unit ret;
ret.value = compressed_ptr_t(ptr);
ret.tag[tag_index] = tag;
return ret.value;
}
public:
/** uninitialized constructor */
tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
{}
/** copy constructor */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr(tagged_ptr const & p):
ptr(p.ptr)
{}
#else
tagged_ptr(tagged_ptr const & p) = default;
#endif
explicit tagged_ptr(T * p, tag_t t = 0):
ptr(pack_ptr(p, t))
{}
/** unsafe set operation */
/* @{ */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr & operator= (tagged_ptr const & p)
{
ptr = p.ptr;
return *this;
}
#else
tagged_ptr & operator= (tagged_ptr const & p) = default;
#endif
void set(T * p, tag_t t)
{
ptr = pack_ptr(p, t);
}
/* @} */
/** comparing semantics */
/* @{ */
bool operator== (volatile tagged_ptr const & p) const
{
return (ptr == p.ptr);
}
bool operator!= (volatile tagged_ptr const & p) const
{
return !operator==(p);
}
/* @} */
/** pointer access */
/* @{ */
T * get_ptr() const
{
return extract_ptr(ptr);
}
void set_ptr(T * p)
{
tag_t tag = get_tag();
ptr = pack_ptr(p, tag);
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const
{
return extract_tag(ptr);
}
tag_t get_next_tag() const
{
tag_t next = (get_tag() + 1u) & (std::numeric_limits<tag_t>::max)();
return next;
}
void set_tag(tag_t t)
{
T * p = get_ptr();
ptr = pack_ptr(p, t);
}
/* @} */
/** smart pointer support */
/* @{ */
T & operator*() const
{
return *get_ptr();
}
T * operator->() const
{
return get_ptr();
}
operator bool(void) const
{
return get_ptr() != 0;
}
/* @} */
protected:
compressed_ptr_t ptr;
};
#else
#error unsupported platform
#endif
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED */

View File

@@ -0,0 +1,59 @@
// boost lockfree
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_POLICIES_HPP_INCLUDED
#define BOOST_LOCKFREE_POLICIES_HPP_INCLUDED
#include <boost/parameter.hpp>
#include <boost/mpl/bool.hpp>
#include <boost/mpl/size_t.hpp>
#include <boost/mpl/void.hpp>
namespace boost {
namespace lockfree {
#ifndef BOOST_DOXYGEN_INVOKED
namespace tag { struct allocator ; }
namespace tag { struct fixed_sized; }
namespace tag { struct capacity; }
#endif
/** Configures a data structure as \b fixed-sized.
*
* The internal nodes are stored inside an array and they are addressed by array indexing. This limits the possible size of the
* queue to the number of elements that can be addressed by the index type (usually 2**16-2), but on platforms that lack
* double-width compare-and-exchange instructions, this is the best way to achieve lock-freedom.
* This implies that a data structure is bounded.
* */
template <bool IsFixedSized>
struct fixed_sized:
boost::parameter::template_keyword<tag::fixed_sized, boost::mpl::bool_<IsFixedSized> >
{};
/** Sets the \b capacity of a data structure at compile-time.
*
* This implies that a data structure is bounded and fixed-sized.
* */
template <size_t Size>
struct capacity:
boost::parameter::template_keyword<tag::capacity, boost::mpl::size_t<Size> >
{};
/** Defines the \b allocator type of a data structure.
* */
template <class Alloc>
struct allocator:
boost::parameter::template_keyword<tag::allocator, Alloc>
{};
}
}
#endif /* BOOST_LOCKFREE_POLICIES_HPP_INCLUDED */

View File

@@ -0,0 +1,548 @@
// lock-free queue from
// Michael, M. M. and Scott, M. L.,
// "simple, fast and practical non-blocking and blocking concurrent queue algorithms"
//
// Copyright (C) 2008-2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_FIFO_HPP_INCLUDED
#define BOOST_LOCKFREE_FIFO_HPP_INCLUDED
#include <boost/assert.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits/has_trivial_assign.hpp>
#include <boost/type_traits/has_trivial_destructor.hpp>
#include <boost/config.hpp> // for BOOST_LIKELY
#include <boost/lockfree/detail/atomic.hpp>
#include <boost/lockfree/detail/copy_payload.hpp>
#include <boost/lockfree/detail/freelist.hpp>
#include <boost/lockfree/detail/parameter.hpp>
#include <boost/lockfree/detail/tagged_ptr.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
#endif
namespace boost {
namespace lockfree {
namespace detail {
typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
boost::parameter::optional<tag::capacity>
> queue_signature;
} /* namespace detail */
/** The queue class provides a multi-writer/multi-reader queue, pushing and popping is lock-free,
* construction/destruction has to be synchronized. It uses a freelist for memory management,
* freed nodes are pushed to the freelist and not returned to the OS before the queue is destroyed.
*
* \b Policies:
* - \ref boost::lockfree::fixed_sized, defaults to \c boost::lockfree::fixed_sized<false> \n
* Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \n
* If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
* by array indexing. This limits the possible size of the queue to the number of elements that can be addressed by the index
* type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
* to achieve lock-freedom.
*
* - \ref boost::lockfree::capacity, optional \n
* If this template argument is passed to the options, the size of the queue is set at compile-time.\n
* It this option implies \c fixed_sized<true>
*
* - \ref boost::lockfree::allocator, defaults to \c boost::lockfree::allocator<std::allocator<void>> \n
* Specifies the allocator that is used for the internal freelist
*
* \b Requirements:
* - T must have a copy constructor
* - T must have a trivial assignment operator
* - T must have a trivial destructor
*
* */
#ifndef BOOST_DOXYGEN_INVOKED
template <typename T,
class A0 = boost::parameter::void_,
class A1 = boost::parameter::void_,
class A2 = boost::parameter::void_>
#else
template <typename T, ...Options>
#endif
class queue
{
private:
#ifndef BOOST_DOXYGEN_INVOKED
#ifdef BOOST_HAS_TRIVIAL_DESTRUCTOR
BOOST_STATIC_ASSERT((boost::has_trivial_destructor<T>::value));
#endif
#ifdef BOOST_HAS_TRIVIAL_ASSIGN
BOOST_STATIC_ASSERT((boost::has_trivial_assign<T>::value));
#endif
typedef typename detail::queue_signature::bind<A0, A1, A2>::type bound_args;
static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
static const size_t capacity = detail::extract_capacity<bound_args>::capacity + 1; // the queue uses one dummy node
static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
static const bool node_based = !(has_capacity || fixed_sized);
static const bool compile_time_sized = has_capacity;
struct BOOST_LOCKFREE_CACHELINE_ALIGNMENT node
{
typedef typename detail::select_tagged_handle<node, node_based>::tagged_handle_type tagged_node_handle;
typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
node(T const & v, handle_type null_handle):
data(v)//, next(tagged_node_handle(0, 0))
{
/* increment tag to avoid ABA problem */
tagged_node_handle old_next = next.load(memory_order_relaxed);
tagged_node_handle new_next (null_handle, old_next.get_next_tag());
next.store(new_next, memory_order_release);
}
node (handle_type null_handle):
next(tagged_node_handle(null_handle, 0))
{}
node(void)
{}
atomic<tagged_node_handle> next;
T data;
};
typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
typedef typename pool_t::tagged_node_handle tagged_node_handle;
typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_type;
void initialize(void)
{
node * n = pool.template construct<true, false>(pool.null_handle());
tagged_node_handle dummy_node(pool.get_handle(n), 0);
head_.store(dummy_node, memory_order_relaxed);
tail_.store(dummy_node, memory_order_release);
}
struct implementation_defined
{
typedef node_allocator allocator;
typedef std::size_t size_type;
};
#endif
BOOST_DELETED_FUNCTION(queue(queue const&))
BOOST_DELETED_FUNCTION(queue& operator= (queue const&))
public:
typedef T value_type;
typedef typename implementation_defined::allocator allocator;
typedef typename implementation_defined::size_type size_type;
/**
* \return true, if implementation is lock-free.
*
* \warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner.
* On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there is
* no possibility to provide a completely accurate implementation, because one would need to test every internal
* node, which is impossible if further nodes will be allocated from the operating system.
* */
bool is_lock_free (void) const
{
return head_.is_lock_free() && tail_.is_lock_free() && pool.is_lock_free();
}
//! Construct queue
// @{
queue(void):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(node_allocator(), capacity)
{
BOOST_ASSERT(has_capacity);
initialize();
}
template <typename U>
explicit queue(typename node_allocator::template rebind<U>::other const & alloc):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(alloc, capacity)
{
BOOST_STATIC_ASSERT(has_capacity);
initialize();
}
explicit queue(allocator const & alloc):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(alloc, capacity)
{
BOOST_ASSERT(has_capacity);
initialize();
}
// @}
//! Construct queue, allocate n nodes for the freelist.
// @{
explicit queue(size_type n):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(node_allocator(), n + 1)
{
BOOST_ASSERT(!has_capacity);
initialize();
}
template <typename U>
queue(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
head_(tagged_node_handle(0, 0)),
tail_(tagged_node_handle(0, 0)),
pool(alloc, n + 1)
{
BOOST_STATIC_ASSERT(!has_capacity);
initialize();
}
// @}
/** \copydoc boost::lockfree::stack::reserve
* */
void reserve(size_type n)
{
pool.template reserve<true>(n);
}
/** \copydoc boost::lockfree::stack::reserve_unsafe
* */
void reserve_unsafe(size_type n)
{
pool.template reserve<false>(n);
}
/** Destroys queue, free all nodes from freelist.
* */
~queue(void)
{
T dummy;
while(unsynchronized_pop(dummy))
{}
pool.template destruct<false>(head_.load(memory_order_relaxed));
}
/** Check if the queue is empty
*
* \return true, if the queue is empty, false otherwise
* \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use this
* value in program logic.
* */
bool empty(void) const
{
return pool.get_handle(head_.load()) == pool.get_handle(tail_.load());
}
/** Pushes object t to the queue.
*
* \post object will be pushed to the queue, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* */
bool push(T const & t)
{
return do_push<false>(t);
}
/** Pushes object t to the queue.
*
* \post object will be pushed to the queue, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail
* \throws if memory allocator throws
* */
bool bounded_push(T const & t)
{
return do_push<true>(t);
}
private:
#ifndef BOOST_DOXYGEN_INVOKED
template <bool Bounded>
bool do_push(T const & t)
{
node * n = pool.template construct<true, Bounded>(t, pool.null_handle());
handle_type node_handle = pool.get_handle(n);
if (n == NULL)
return false;
for (;;) {
tagged_node_handle tail = tail_.load(memory_order_acquire);
node * tail_node = pool.get_pointer(tail);
tagged_node_handle next = tail_node->next.load(memory_order_acquire);
node * next_ptr = pool.get_pointer(next);
tagged_node_handle tail2 = tail_.load(memory_order_acquire);
if (BOOST_LIKELY(tail == tail2)) {
if (next_ptr == 0) {
tagged_node_handle new_tail_next(node_handle, next.get_next_tag());
if ( tail_node->next.compare_exchange_weak(next, new_tail_next) ) {
tagged_node_handle new_tail(node_handle, tail.get_next_tag());
tail_.compare_exchange_strong(tail, new_tail);
return true;
}
}
else {
tagged_node_handle new_tail(pool.get_handle(next_ptr), tail.get_next_tag());
tail_.compare_exchange_strong(tail, new_tail);
}
}
}
}
#endif
public:
/** Pushes object t to the queue.
*
* \post object will be pushed to the queue, if internal node can be allocated
* \returns true, if the push operation is successful.
*
* \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
* from the OS. This may not be lock-free.
* \throws if memory allocator throws
* */
bool unsynchronized_push(T const & t)
{
node * n = pool.template construct<false, false>(t, pool.null_handle());
if (n == NULL)
return false;
for (;;) {
tagged_node_handle tail = tail_.load(memory_order_relaxed);
tagged_node_handle next = tail->next.load(memory_order_relaxed);
node * next_ptr = next.get_ptr();
if (next_ptr == 0) {
tail->next.store(tagged_node_handle(n, next.get_next_tag()), memory_order_relaxed);
tail_.store(tagged_node_handle(n, tail.get_next_tag()), memory_order_relaxed);
return true;
}
else
tail_.store(tagged_node_handle(next_ptr, tail.get_next_tag()), memory_order_relaxed);
}
}
/** Pops object from queue.
*
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Thread-safe and non-blocking
* */
bool pop (T & ret)
{
return pop<T>(ret);
}
/** Pops object from queue.
*
* \pre type U must be constructible by T and copyable, or T must be convertible to U
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Thread-safe and non-blocking
* */
template <typename U>
bool pop (U & ret)
{
for (;;) {
tagged_node_handle head = head_.load(memory_order_acquire);
node * head_ptr = pool.get_pointer(head);
tagged_node_handle tail = tail_.load(memory_order_acquire);
tagged_node_handle next = head_ptr->next.load(memory_order_acquire);
node * next_ptr = pool.get_pointer(next);
tagged_node_handle head2 = head_.load(memory_order_acquire);
if (BOOST_LIKELY(head == head2)) {
if (pool.get_handle(head) == pool.get_handle(tail)) {
if (next_ptr == 0)
return false;
tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());
tail_.compare_exchange_strong(tail, new_tail);
} else {
if (next_ptr == 0)
/* this check is not part of the original algorithm as published by michael and scott
*
* however we reuse the tagged_ptr part for the freelist and clear the next part during node
* allocation. we can observe a null-pointer here.
* */
continue;
detail::copy_payload(next_ptr->data, ret);
tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());
if (head_.compare_exchange_weak(head, new_head)) {
pool.template destruct<true>(head);
return true;
}
}
}
}
}
/** Pops object from queue.
*
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Not thread-safe, but non-blocking
*
* */
bool unsynchronized_pop (T & ret)
{
return unsynchronized_pop<T>(ret);
}
/** Pops object from queue.
*
* \pre type U must be constructible by T and copyable, or T must be convertible to U
* \post if pop operation is successful, object will be copied to ret.
* \returns true, if the pop operation is successful, false if queue was empty.
*
* \note Not thread-safe, but non-blocking
*
* */
template <typename U>
bool unsynchronized_pop (U & ret)
{
for (;;) {
tagged_node_handle head = head_.load(memory_order_relaxed);
node * head_ptr = pool.get_pointer(head);
tagged_node_handle tail = tail_.load(memory_order_relaxed);
tagged_node_handle next = head_ptr->next.load(memory_order_relaxed);
node * next_ptr = pool.get_pointer(next);
if (pool.get_handle(head) == pool.get_handle(tail)) {
if (next_ptr == 0)
return false;
tagged_node_handle new_tail(pool.get_handle(next), tail.get_next_tag());
tail_.store(new_tail);
} else {
if (next_ptr == 0)
/* this check is not part of the original algorithm as published by michael and scott
*
* however we reuse the tagged_ptr part for the freelist and clear the next part during node
* allocation. we can observe a null-pointer here.
* */
continue;
detail::copy_payload(next_ptr->data, ret);
tagged_node_handle new_head(pool.get_handle(next), head.get_next_tag());
head_.store(new_head);
pool.template destruct<false>(head);
return true;
}
}
}
/** consumes one element via a functor
*
* pops one element from the queue and applies the functor on this object
*
* \returns true, if one element was consumed
*
* \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
* */
template <typename Functor>
bool consume_one(Functor & f)
{
T element;
bool success = pop(element);
if (success)
f(element);
return success;
}
/// \copydoc boost::lockfree::queue::consume_one(Functor & rhs)
template <typename Functor>
bool consume_one(Functor const & f)
{
T element;
bool success = pop(element);
if (success)
f(element);
return success;
}
/** consumes all elements via a functor
*
* sequentially pops all elements from the queue and applies the functor on each object
*
* \returns number of elements that are consumed
*
* \note Thread-safe and non-blocking, if functor is thread-safe and non-blocking
* */
template <typename Functor>
size_t consume_all(Functor & f)
{
size_t element_count = 0;
while (consume_one(f))
element_count += 1;
return element_count;
}
/// \copydoc boost::lockfree::queue::consume_all(Functor & rhs)
template <typename Functor>
size_t consume_all(Functor const & f)
{
size_t element_count = 0;
while (consume_one(f))
element_count += 1;
return element_count;
}
private:
#ifndef BOOST_DOXYGEN_INVOKED
atomic<tagged_node_handle> head_;
static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
char padding1[padding_size];
atomic<tagged_node_handle> tail_;
char padding2[padding_size];
pool_t pool;
#endif
};
} /* namespace lockfree */
} /* namespace boost */
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */