Initial Commit - Lesson 31 (Commit #1)

This commit is contained in:
Norman Lansing
2026-02-24 22:39:26 -05:00
commit 9591e7f503
4631 changed files with 1019212 additions and 0 deletions

View File

@@ -0,0 +1,81 @@
// Copyright (C) 2011-2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
#define BOOST_LOCKFREE_DETAIL_ATOMIC_HPP
#include <boost/config.hpp>
#ifndef BOOST_LOCKFREE_FORCE_STD_ATOMIC
#define BOOST_LOCKFREE_NO_HDR_ATOMIC
// MSVC supports atomic<> from version 2012 onwards.
#if defined(BOOST_MSVC) && (BOOST_MSVC >= 1700)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
// GCC supports atomic<> from version 4.8 onwards.
#if (BOOST_GCC >= 40800) && (__cplusplus >= 201103L)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
// Apple clang is 2 mayor versions ahead, but in fact 1 minor version behind
#ifdef BOOST_CLANG
#define BOOST_ATOMIC_CLANG_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__)
#if defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 60100) && (__cplusplus >= 201103L)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
#if !defined(__apple_build_version__) && (BOOST_ATOMIC_CLANG_VERSION >= 30600) && (__cplusplus >= 201103L)
#undef BOOST_LOCKFREE_NO_HDR_ATOMIC
#endif
#undef BOOST_ATOMIC_CLANG_VERSION
#endif // BOOST_CLANG
#endif // BOOST_LOCKFREE_FORCE_STD_ATOMIC
#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
#include <boost/atomic.hpp>
#else
#include <atomic>
#endif
namespace boost {
namespace lockfree {
namespace detail {
#if defined(BOOST_LOCKFREE_NO_HDR_ATOMIC)
using boost::atomic;
using boost::memory_order_acquire;
using boost::memory_order_consume;
using boost::memory_order_relaxed;
using boost::memory_order_release;
#else
using std::atomic;
using std::memory_order_acquire;
using std::memory_order_consume;
using std::memory_order_relaxed;
using std::memory_order_release;
#endif
}
using detail::atomic;
using detail::memory_order_acquire;
using detail::memory_order_consume;
using detail::memory_order_relaxed;
using detail::memory_order_release;
}}
#endif /* BOOST_LOCKFREE_DETAIL_ATOMIC_HPP */

View File

@@ -0,0 +1,83 @@
// boost lockfree: copy_payload helper
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
#define BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED
#include <boost/mpl/if.hpp>
#include <boost/type_traits/is_convertible.hpp>
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4512) // assignment operator could not be generated
#endif
namespace boost {
namespace lockfree {
namespace detail {
struct copy_convertible
{
template <typename T, typename U>
static void copy(T & t, U & u)
{
u = t;
}
};
struct copy_constructible_and_copyable
{
template <typename T, typename U>
static void copy(T & t, U & u)
{
u = U(t);
}
};
template <typename T, typename U>
void copy_payload(T & t, U & u)
{
typedef typename boost::mpl::if_<typename boost::is_convertible<T, U>::type,
copy_convertible,
copy_constructible_and_copyable
>::type copy_type;
copy_type::copy(t, u);
}
template <typename T>
struct consume_via_copy
{
consume_via_copy(T & out):
out_(out)
{}
template <typename U>
void operator()(U & element)
{
copy_payload(element, out_);
}
T & out_;
};
struct consume_noop
{
template <typename U>
void operator()(const U &)
{
}
};
}}}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#endif /* BOOST_LOCKFREE_DETAIL_COPY_PAYLOAD_HPP_INCLUDED */

View File

@@ -0,0 +1,649 @@
// lock-free freelist
//
// Copyright (C) 2008-2013 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
#define BOOST_LOCKFREE_FREELIST_HPP_INCLUDED
#include <limits>
#include <memory>
#include <boost/array.hpp>
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include <boost/noncopyable.hpp>
#include <boost/static_assert.hpp>
#include <boost/lockfree/detail/atomic.hpp>
#include <boost/lockfree/detail/parameter.hpp>
#include <boost/lockfree/detail/tagged_ptr.hpp>
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable: 4100) // unreferenced formal parameter
#pragma warning(disable: 4127) // conditional expression is constant
#endif
namespace boost {
namespace lockfree {
namespace detail {
template <typename T,
typename Alloc = std::allocator<T>
>
class freelist_stack:
Alloc
{
struct freelist_node
{
tagged_ptr<freelist_node> next;
};
typedef tagged_ptr<freelist_node> tagged_node_ptr;
public:
typedef tagged_ptr<T> tagged_node_handle;
template <typename Allocator>
freelist_stack (Allocator const & alloc, std::size_t n = 0):
Alloc(alloc),
pool_(tagged_node_ptr(NULL))
{
for (std::size_t i = 0; i != n; ++i) {
T * node = Alloc::allocate(1);
#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
destruct<false>(node);
#else
deallocate<false>(node);
#endif
}
}
template <bool ThreadSafe>
void reserve (std::size_t count)
{
for (std::size_t i = 0; i != count; ++i) {
T * node = Alloc::allocate(1);
deallocate<ThreadSafe>(node);
}
}
template <bool ThreadSafe, bool Bounded>
T * construct (void)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T();
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType>
T * construct (ArgumentType const & arg)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T(arg);
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
{
T * node = allocate<ThreadSafe, Bounded>();
if (node)
new(node) T(arg1, arg2);
return node;
}
template <bool ThreadSafe>
void destruct (tagged_node_handle tagged_ptr)
{
T * n = tagged_ptr.get_ptr();
n->~T();
deallocate<ThreadSafe>(n);
}
template <bool ThreadSafe>
void destruct (T * n)
{
n->~T();
deallocate<ThreadSafe>(n);
}
~freelist_stack(void)
{
tagged_node_ptr current = pool_.load();
while (current) {
freelist_node * current_ptr = current.get_ptr();
if (current_ptr)
current = current_ptr->next;
Alloc::deallocate((T*)current_ptr, 1);
}
}
bool is_lock_free(void) const
{
return pool_.is_lock_free();
}
T * get_handle(T * pointer) const
{
return pointer;
}
T * get_handle(tagged_node_handle const & handle) const
{
return get_pointer(handle);
}
T * get_pointer(tagged_node_handle const & tptr) const
{
return tptr.get_ptr();
}
T * get_pointer(T * pointer) const
{
return pointer;
}
T * null_handle(void) const
{
return NULL;
}
protected: // allow use from subclasses
template <bool ThreadSafe, bool Bounded>
T * allocate (void)
{
if (ThreadSafe)
return allocate_impl<Bounded>();
else
return allocate_impl_unsafe<Bounded>();
}
private:
template <bool Bounded>
T * allocate_impl (void)
{
tagged_node_ptr old_pool = pool_.load(memory_order_consume);
for(;;) {
if (!old_pool.get_ptr()) {
if (!Bounded)
return Alloc::allocate(1);
else
return 0;
}
freelist_node * new_pool_ptr = old_pool->next.get_ptr();
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
if (pool_.compare_exchange_weak(old_pool, new_pool)) {
void * ptr = old_pool.get_ptr();
return reinterpret_cast<T*>(ptr);
}
}
}
template <bool Bounded>
T * allocate_impl_unsafe (void)
{
tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
if (!old_pool.get_ptr()) {
if (!Bounded)
return Alloc::allocate(1);
else
return 0;
}
freelist_node * new_pool_ptr = old_pool->next.get_ptr();
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag());
pool_.store(new_pool, memory_order_relaxed);
void * ptr = old_pool.get_ptr();
return reinterpret_cast<T*>(ptr);
}
protected:
template <bool ThreadSafe>
void deallocate (T * n)
{
if (ThreadSafe)
deallocate_impl(n);
else
deallocate_impl_unsafe(n);
}
private:
void deallocate_impl (T * n)
{
void * node = n;
tagged_node_ptr old_pool = pool_.load(memory_order_consume);
freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
for(;;) {
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
new_pool->next.set_ptr(old_pool.get_ptr());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return;
}
}
void deallocate_impl_unsafe (T * n)
{
void * node = n;
tagged_node_ptr old_pool = pool_.load(memory_order_relaxed);
freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node);
tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag());
new_pool->next.set_ptr(old_pool.get_ptr());
pool_.store(new_pool, memory_order_relaxed);
}
atomic<tagged_node_ptr> pool_;
};
class tagged_index
{
public:
typedef boost::uint16_t tag_t;
typedef boost::uint16_t index_t;
/** uninitialized constructor */
tagged_index(void) BOOST_NOEXCEPT //: index(0), tag(0)
{}
/** copy constructor */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_index(tagged_index const & rhs):
index(rhs.index), tag(rhs.tag)
{}
#else
tagged_index(tagged_index const & rhs) = default;
#endif
explicit tagged_index(index_t i, tag_t t = 0):
index(i), tag(t)
{}
/** index access */
/* @{ */
index_t get_index() const
{
return index;
}
void set_index(index_t i)
{
index = i;
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const
{
return tag;
}
tag_t get_next_tag() const
{
tag_t next = (get_tag() + 1u) & (std::numeric_limits<tag_t>::max)();
return next;
}
void set_tag(tag_t t)
{
tag = t;
}
/* @} */
bool operator==(tagged_index const & rhs) const
{
return (index == rhs.index) && (tag == rhs.tag);
}
bool operator!=(tagged_index const & rhs) const
{
return !operator==(rhs);
}
protected:
index_t index;
tag_t tag;
};
template <typename T,
std::size_t size>
struct compiletime_sized_freelist_storage
{
// array-based freelists only support a 16bit address space.
BOOST_STATIC_ASSERT(size < 65536);
boost::array<char, size * sizeof(T)> data;
// unused ... only for API purposes
template <typename Allocator>
compiletime_sized_freelist_storage(Allocator const & /* alloc */, std::size_t /* count */)
{}
T * nodes(void) const
{
return reinterpret_cast<T*>(const_cast<char*>(data.data()));
}
std::size_t node_count(void) const
{
return size;
}
};
template <typename T,
typename Alloc = std::allocator<T> >
struct runtime_sized_freelist_storage:
Alloc
{
T * nodes_;
std::size_t node_count_;
template <typename Allocator>
runtime_sized_freelist_storage(Allocator const & alloc, std::size_t count):
Alloc(alloc), node_count_(count)
{
if (count > 65535)
boost::throw_exception(std::runtime_error("boost.lockfree: freelist size is limited to a maximum of 65535 objects"));
nodes_ = Alloc::allocate(count);
}
~runtime_sized_freelist_storage(void)
{
Alloc::deallocate(nodes_, node_count_);
}
T * nodes(void) const
{
return nodes_;
}
std::size_t node_count(void) const
{
return node_count_;
}
};
template <typename T,
typename NodeStorage = runtime_sized_freelist_storage<T>
>
class fixed_size_freelist:
NodeStorage
{
struct freelist_node
{
tagged_index next;
};
typedef tagged_index::index_t index_t;
void initialize(void)
{
T * nodes = NodeStorage::nodes();
for (std::size_t i = 0; i != NodeStorage::node_count(); ++i) {
tagged_index * next_index = reinterpret_cast<tagged_index*>(nodes + i);
next_index->set_index(null_handle());
#ifdef BOOST_LOCKFREE_FREELIST_INIT_RUNS_DTOR
destruct<false>(nodes + i);
#else
deallocate<false>(static_cast<index_t>(i));
#endif
}
}
public:
typedef tagged_index tagged_node_handle;
template <typename Allocator>
fixed_size_freelist (Allocator const & alloc, std::size_t count):
NodeStorage(alloc, count),
pool_(tagged_index(static_cast<index_t>(count), 0))
{
initialize();
}
fixed_size_freelist (void):
pool_(tagged_index(NodeStorage::node_count(), 0))
{
initialize();
}
template <bool ThreadSafe, bool Bounded>
T * construct (void)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T();
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType>
T * construct (ArgumentType const & arg)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T(arg);
return node;
}
template <bool ThreadSafe, bool Bounded, typename ArgumentType1, typename ArgumentType2>
T * construct (ArgumentType1 const & arg1, ArgumentType2 const & arg2)
{
index_t node_index = allocate<ThreadSafe>();
if (node_index == null_handle())
return NULL;
T * node = NodeStorage::nodes() + node_index;
new(node) T(arg1, arg2);
return node;
}
template <bool ThreadSafe>
void destruct (tagged_node_handle tagged_index)
{
index_t index = tagged_index.get_index();
T * n = NodeStorage::nodes() + index;
(void)n; // silence msvc warning
n->~T();
deallocate<ThreadSafe>(index);
}
template <bool ThreadSafe>
void destruct (T * n)
{
n->~T();
deallocate<ThreadSafe>(n - NodeStorage::nodes());
}
bool is_lock_free(void) const
{
return pool_.is_lock_free();
}
index_t null_handle(void) const
{
return static_cast<index_t>(NodeStorage::node_count());
}
index_t get_handle(T * pointer) const
{
if (pointer == NULL)
return null_handle();
else
return static_cast<index_t>(pointer - NodeStorage::nodes());
}
index_t get_handle(tagged_node_handle const & handle) const
{
return handle.get_index();
}
T * get_pointer(tagged_node_handle const & tptr) const
{
return get_pointer(tptr.get_index());
}
T * get_pointer(index_t index) const
{
if (index == null_handle())
return 0;
else
return NodeStorage::nodes() + index;
}
T * get_pointer(T * ptr) const
{
return ptr;
}
protected: // allow use from subclasses
template <bool ThreadSafe>
index_t allocate (void)
{
if (ThreadSafe)
return allocate_impl();
else
return allocate_impl_unsafe();
}
private:
index_t allocate_impl (void)
{
tagged_index old_pool = pool_.load(memory_order_consume);
for(;;) {
index_t index = old_pool.get_index();
if (index == null_handle())
return index;
T * old_node = NodeStorage::nodes() + index;
tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return old_pool.get_index();
}
}
index_t allocate_impl_unsafe (void)
{
tagged_index old_pool = pool_.load(memory_order_consume);
index_t index = old_pool.get_index();
if (index == null_handle())
return index;
T * old_node = NodeStorage::nodes() + index;
tagged_index * next_index = reinterpret_cast<tagged_index*>(old_node);
tagged_index new_pool(next_index->get_index(), old_pool.get_next_tag());
pool_.store(new_pool, memory_order_relaxed);
return old_pool.get_index();
}
template <bool ThreadSafe>
void deallocate (index_t index)
{
if (ThreadSafe)
deallocate_impl(index);
else
deallocate_impl_unsafe(index);
}
void deallocate_impl (index_t index)
{
freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
tagged_index old_pool = pool_.load(memory_order_consume);
for(;;) {
tagged_index new_pool (index, old_pool.get_tag());
new_pool_node->next.set_index(old_pool.get_index());
if (pool_.compare_exchange_weak(old_pool, new_pool))
return;
}
}
void deallocate_impl_unsafe (index_t index)
{
freelist_node * new_pool_node = reinterpret_cast<freelist_node*>(NodeStorage::nodes() + index);
tagged_index old_pool = pool_.load(memory_order_consume);
tagged_index new_pool (index, old_pool.get_tag());
new_pool_node->next.set_index(old_pool.get_index());
pool_.store(new_pool);
}
atomic<tagged_index> pool_;
};
template <typename T,
typename Alloc,
bool IsCompileTimeSized,
bool IsFixedSize,
std::size_t Capacity
>
struct select_freelist
{
typedef typename mpl::if_c<IsCompileTimeSized,
compiletime_sized_freelist_storage<T, Capacity>,
runtime_sized_freelist_storage<T, Alloc>
>::type fixed_sized_storage_type;
typedef typename mpl::if_c<IsCompileTimeSized || IsFixedSize,
fixed_size_freelist<T, fixed_sized_storage_type>,
freelist_stack<T, Alloc>
>::type type;
};
template <typename T, bool IsNodeBased>
struct select_tagged_handle
{
typedef typename mpl::if_c<IsNodeBased,
tagged_ptr<T>,
tagged_index
>::type tagged_handle_type;
typedef typename mpl::if_c<IsNodeBased,
T*,
typename tagged_index::index_t
>::type handle_type;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
#endif /* BOOST_LOCKFREE_FREELIST_HPP_INCLUDED */

View File

@@ -0,0 +1,73 @@
// boost lockfree
//
// Copyright (C) 2011 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
#define BOOST_LOCKFREE_DETAIL_PARAMETER_HPP
#include <boost/lockfree/policies.hpp>
namespace boost {
namespace lockfree {
namespace detail {
namespace mpl = boost::mpl;
template <typename bound_args, typename tag_type>
struct has_arg
{
typedef typename parameter::binding<bound_args, tag_type, mpl::void_>::type type;
static const bool value = mpl::is_not_void_<type>::type::value;
};
template <typename bound_args>
struct extract_capacity
{
static const bool has_capacity = has_arg<bound_args, tag::capacity>::value;
typedef typename mpl::if_c<has_capacity,
typename has_arg<bound_args, tag::capacity>::type,
mpl::size_t< 0 >
>::type capacity_t;
static const std::size_t capacity = capacity_t::value;
};
template <typename bound_args, typename T>
struct extract_allocator
{
static const bool has_allocator = has_arg<bound_args, tag::allocator>::value;
typedef typename mpl::if_c<has_allocator,
typename has_arg<bound_args, tag::allocator>::type,
std::allocator<T>
>::type allocator_arg;
typedef typename allocator_arg::template rebind<T>::other type;
};
template <typename bound_args, bool default_ = false>
struct extract_fixed_sized
{
static const bool has_fixed_sized = has_arg<bound_args, tag::fixed_sized>::value;
typedef typename mpl::if_c<has_fixed_sized,
typename has_arg<bound_args, tag::fixed_sized>::type,
mpl::bool_<default_>
>::type type;
static const bool value = type::value;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_DETAIL_PARAMETER_HPP */

View File

@@ -0,0 +1,56 @@
// Copyright (C) 2009 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
#define BOOST_LOCKFREE_PREFIX_HPP_INCLUDED
/* this file defines the following macros:
BOOST_LOCKFREE_CACHELINE_BYTES: size of a cache line
BOOST_LOCKFREE_PTR_COMPRESSION: use tag/pointer compression to utilize parts
of the virtual address space as tag (at least 16bit)
BOOST_LOCKFREE_DCAS_ALIGNMENT: symbol used for aligning structs at cache line
boundaries
*/
#define BOOST_LOCKFREE_CACHELINE_BYTES 64
#ifdef _MSC_VER
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __declspec(align(BOOST_LOCKFREE_CACHELINE_BYTES))
#if defined(_M_IX86)
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#elif defined(_M_X64) || defined(_M_IA64)
#define BOOST_LOCKFREE_PTR_COMPRESSION 1
#define BOOST_LOCKFREE_DCAS_ALIGNMENT __declspec(align(16))
#endif
#endif /* _MSC_VER */
#ifdef __GNUC__
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT __attribute__((aligned(BOOST_LOCKFREE_CACHELINE_BYTES)))
#if defined(__i386__) || defined(__ppc__)
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#elif defined(__x86_64__)
#define BOOST_LOCKFREE_PTR_COMPRESSION 1
#define BOOST_LOCKFREE_DCAS_ALIGNMENT __attribute__((aligned(16)))
#elif defined(__alpha__)
// LATER: alpha may benefit from pointer compression. but what is the maximum size of the address space?
#define BOOST_LOCKFREE_DCAS_ALIGNMENT
#endif
#endif /* __GNUC__ */
#ifndef BOOST_LOCKFREE_DCAS_ALIGNMENT
#define BOOST_LOCKFREE_DCAS_ALIGNMENT /*BOOST_LOCKFREE_DCAS_ALIGNMENT*/
#endif
#ifndef BOOST_LOCKFREE_CACHELINE_ALIGNMENT
#define BOOST_LOCKFREE_CACHELINE_ALIGNMENT /*BOOST_LOCKFREE_CACHELINE_ALIGNMENT*/
#endif
#endif /* BOOST_LOCKFREE_PREFIX_HPP_INCLUDED */

View File

@@ -0,0 +1,21 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED
#include <boost/config.hpp>
#include <boost/lockfree/detail/prefix.hpp>
#ifndef BOOST_LOCKFREE_PTR_COMPRESSION
#include <boost/lockfree/detail/tagged_ptr_dcas.hpp>
#else
#include <boost/lockfree/detail/tagged_ptr_ptrcompression.hpp>
#endif
#endif /* BOOST_LOCKFREE_TAGGED_PTR_HPP_INCLUDED */

View File

@@ -0,0 +1,133 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008 Tim Blechmann
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED
#include <cstddef> /* for std::size_t */
#include <limits>
namespace boost {
namespace lockfree {
namespace detail {
template <class T>
class BOOST_LOCKFREE_DCAS_ALIGNMENT tagged_ptr
{
public:
typedef std::size_t tag_t;
/** uninitialized constructor */
tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
{}
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr(tagged_ptr const & p):
ptr(p.ptr), tag(p.tag)
{}
#else
tagged_ptr(tagged_ptr const & p) = default;
#endif
explicit tagged_ptr(T * p, tag_t t = 0):
ptr(p), tag(t)
{}
/** unsafe set operation */
/* @{ */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr & operator= (tagged_ptr const & p)
{
set(p.ptr, p.tag);
return *this;
}
#else
tagged_ptr & operator= (tagged_ptr const & p) = default;
#endif
void set(T * p, tag_t t)
{
ptr = p;
tag = t;
}
/* @} */
/** comparing semantics */
/* @{ */
bool operator== (volatile tagged_ptr const & p) const
{
return (ptr == p.ptr) && (tag == p.tag);
}
bool operator!= (volatile tagged_ptr const & p) const
{
return !operator==(p);
}
/* @} */
/** pointer access */
/* @{ */
T * get_ptr(void) const
{
return ptr;
}
void set_ptr(T * p)
{
ptr = p;
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const
{
return tag;
}
tag_t get_next_tag() const
{
tag_t next = (get_tag() + 1) & (std::numeric_limits<tag_t>::max)();
return next;
}
void set_tag(tag_t t)
{
tag = t;
}
/* @} */
/** smart pointer support */
/* @{ */
T & operator*() const
{
return *ptr;
}
T * operator->() const
{
return ptr;
}
operator bool(void) const
{
return ptr != 0;
}
/* @} */
protected:
T * ptr;
tag_t tag;
};
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_TAGGED_PTR_DCAS_HPP_INCLUDED */

View File

@@ -0,0 +1,174 @@
// tagged pointer, for aba prevention
//
// Copyright (C) 2008, 2009 Tim Blechmann, based on code by Cory Nelson
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
#define BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED
#include <cstddef> /* for std::size_t */
#include <limits>
#include <boost/cstdint.hpp>
namespace boost {
namespace lockfree {
namespace detail {
#if defined (__x86_64__) || defined (_M_X64)
template <class T>
class tagged_ptr
{
typedef boost::uint64_t compressed_ptr_t;
public:
typedef boost::uint16_t tag_t;
private:
union cast_unit
{
compressed_ptr_t value;
tag_t tag[4];
};
static const int tag_index = 3;
static const compressed_ptr_t ptr_mask = 0xffffffffffffUL; //(1L<<48L)-1;
static T* extract_ptr(volatile compressed_ptr_t const & i)
{
return (T*)(i & ptr_mask);
}
static tag_t extract_tag(volatile compressed_ptr_t const & i)
{
cast_unit cu;
cu.value = i;
return cu.tag[tag_index];
}
static compressed_ptr_t pack_ptr(T * ptr, tag_t tag)
{
cast_unit ret;
ret.value = compressed_ptr_t(ptr);
ret.tag[tag_index] = tag;
return ret.value;
}
public:
/** uninitialized constructor */
tagged_ptr(void) BOOST_NOEXCEPT//: ptr(0), tag(0)
{}
/** copy constructor */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr(tagged_ptr const & p):
ptr(p.ptr)
{}
#else
tagged_ptr(tagged_ptr const & p) = default;
#endif
explicit tagged_ptr(T * p, tag_t t = 0):
ptr(pack_ptr(p, t))
{}
/** unsafe set operation */
/* @{ */
#ifdef BOOST_NO_CXX11_DEFAULTED_FUNCTIONS
tagged_ptr & operator= (tagged_ptr const & p)
{
ptr = p.ptr;
return *this;
}
#else
tagged_ptr & operator= (tagged_ptr const & p) = default;
#endif
void set(T * p, tag_t t)
{
ptr = pack_ptr(p, t);
}
/* @} */
/** comparing semantics */
/* @{ */
bool operator== (volatile tagged_ptr const & p) const
{
return (ptr == p.ptr);
}
bool operator!= (volatile tagged_ptr const & p) const
{
return !operator==(p);
}
/* @} */
/** pointer access */
/* @{ */
T * get_ptr() const
{
return extract_ptr(ptr);
}
void set_ptr(T * p)
{
tag_t tag = get_tag();
ptr = pack_ptr(p, tag);
}
/* @} */
/** tag access */
/* @{ */
tag_t get_tag() const
{
return extract_tag(ptr);
}
tag_t get_next_tag() const
{
tag_t next = (get_tag() + 1u) & (std::numeric_limits<tag_t>::max)();
return next;
}
void set_tag(tag_t t)
{
T * p = get_ptr();
ptr = pack_ptr(p, t);
}
/* @} */
/** smart pointer support */
/* @{ */
T & operator*() const
{
return *get_ptr();
}
T * operator->() const
{
return get_ptr();
}
operator bool(void) const
{
return get_ptr() != 0;
}
/* @} */
protected:
compressed_ptr_t ptr;
};
#else
#error unsupported platform
#endif
} /* namespace detail */
} /* namespace lockfree */
} /* namespace boost */
#endif /* BOOST_LOCKFREE_TAGGED_PTR_PTRCOMPRESSION_HPP_INCLUDED */