Lesson 35 - Get Compute Auth Token Working

This commit is contained in:
Norman Lansing
2026-02-28 12:32:28 -05:00
parent 1d477ee42a
commit 4fde462bce
7743 changed files with 1397833 additions and 18 deletions

View File

@@ -0,0 +1,234 @@
#ifndef AWS_COMMON_ALLOCATOR_H
#define AWS_COMMON_ALLOCATOR_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/macros.h>
#include <aws/common/stdbool.h>
#include <aws/common/stdint.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/*
* Quick guide to allocators:
* CRT offers several flavours of allocators:
* - default: basic allocator that invokes system one directly.
* - aligned: basic allocator that aligns small allocations on 8 byte
* boundary and big buffers on 32/64 byte (system dependent) boundary.
* Aligned mem can improve perf on some operations, like memcpy or hashes.
* Depending on a system, can result in higher peak memory count in heavy
* acquire/free scenarios (ex. s3), due to memory fragmentation related to how
* aligned allocators work (over allocate, find aligned offset, release extra memory)
* - wrapped_cf: wraps MacOS's Security Framework allocator.
* - mem_tracer: wraps any allocator and provides tracing functionality to allocations
* - small_block_allocator: pools smaller allocations into preallocated buckets.
* Not actively maintained. Avoid if possible.
*/
/* Allocator structure. An instance of this will be passed around for anything needing memory allocation */
struct aws_allocator {
void *(*mem_acquire)(struct aws_allocator *allocator, size_t size);
void (*mem_release)(struct aws_allocator *allocator, void *ptr);
/* Optional method; if not supported, this pointer must be NULL */
void *(*mem_realloc)(struct aws_allocator *allocator, void *oldptr, size_t oldsize, size_t newsize);
/* Optional method; if not supported, this pointer must be NULL */
void *(*mem_calloc)(struct aws_allocator *allocator, size_t num, size_t size);
void *impl;
};
/**
* Inexpensive (constant time) check of data-structure invariants.
*/
AWS_COMMON_API
bool aws_allocator_is_valid(const struct aws_allocator *alloc);
AWS_COMMON_API
struct aws_allocator *aws_default_allocator(void);
/*
* Allocator that align small allocations on 8 byte boundary and big allocations
* on 32/64 byte boundary.
*/
AWS_COMMON_API
struct aws_allocator *aws_aligned_allocator(void);
#ifdef __MACH__
/* Avoid pulling in CoreFoundation headers in a header file. */
struct __CFAllocator; /* NOLINT(bugprone-reserved-identifier) */
typedef const struct __CFAllocator *CFAllocatorRef;
/**
* Wraps a CFAllocator around aws_allocator. For Mac only. Use this anytime you need a CFAllocatorRef for interacting
* with Apple Frameworks. Unfortunately, it allocates memory so we can't make it static file scope, be sure to call
* aws_wrapped_cf_allocator_destroy when finished.
*/
AWS_COMMON_API
CFAllocatorRef aws_wrapped_cf_allocator_new(struct aws_allocator *allocator);
/**
* Cleans up any resources alloced in aws_wrapped_cf_allocator_new.
*/
AWS_COMMON_API
void aws_wrapped_cf_allocator_destroy(CFAllocatorRef allocator);
#endif
/**
* Returns at least `size` of memory ready for usage. In versions v0.6.8 and prior, this function was allowed to return
* NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle
* conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a
* GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_acquire(struct aws_allocator *allocator, size_t size);
/**
* Allocates a block of memory for an array of num elements, each of them size bytes long, and initializes all its bits
* to zero. In versions v0.6.8 and prior, this function was allowed to return NULL.
* In later versions, if allocator->mem_calloc() returns NULL, this function will assert and exit. To handle
* conditions where OOM is not a fatal error, allocator->mem_calloc() is responsible for finding/reclaiming/running a
* GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size);
/**
* Allocates many chunks of bytes into a single block. Expects to be called with alternating void ** (dest), size_t
* (size). The first void ** will be set to the root of the allocation. Alignment is assumed to be sizeof(intmax_t).
*
* This is useful for allocating structs using the pimpl pattern, as you may allocate the public object and impl object
* in the same contiguous block of memory.
*
* Returns a pointer to the allocation.
*
* In versions v0.6.8 and prior, this function was allowed to return
* NULL. In later versions, if allocator->mem_acquire() returns NULL, this function will assert and exit. To handle
* conditions where OOM is not a fatal error, allocator->mem_acquire() is responsible for finding/reclaiming/running a
* GC etc...before returning.
*/
AWS_COMMON_API
void *aws_mem_acquire_many(struct aws_allocator *allocator, size_t count, ...);
/**
* Releases ptr back to whatever allocated it.
* Nothing happens if ptr is NULL.
*/
AWS_COMMON_API
void aws_mem_release(struct aws_allocator *allocator, void *ptr);
/**
* Attempts to adjust the size of the pointed-to memory buffer from oldsize to
* newsize. The pointer (*ptr) may be changed if the memory needs to be
* reallocated.
*
* In versions v0.6.8 and prior, this function was allowed to return
* NULL. In later versions, if allocator->mem_realloc() returns NULL, this function will assert and exit. To handle
* conditions where OOM is not a fatal error, allocator->mem_realloc() is responsible for finding/reclaiming/running a
* GC etc...before returning.
*/
AWS_COMMON_API
int aws_mem_realloc(struct aws_allocator *allocator, void **ptr, size_t oldsize, size_t newsize);
/*
* Maintainer note: The above function doesn't return the pointer (as with
* standard C realloc) as this pattern becomes error-prone when OOMs occur.
* In particular, we want to avoid losing the old pointer when an OOM condition
* occurs, so we prefer to take the old pointer as an in/out reference argument
* that we can leave unchanged on failure.
*/
enum aws_mem_trace_level {
AWS_MEMTRACE_NONE = 0, /* no tracing */
AWS_MEMTRACE_BYTES = 1, /* just track allocation sizes and total allocated */
AWS_MEMTRACE_STACKS = 2, /* capture callstacks for each allocation */
};
/*
* Wraps an allocator and tracks all external allocations. If aws_mem_trace_dump() is called
* and there are still allocations active, they will be reported to the aws_logger at TRACE level.
* allocator - The allocator to wrap
* deprecated - Deprecated arg, ignored.
* level - The level to track allocations at
* frames_per_stack is how many frames to store per callstack if AWS_MEMTRACE_STACKS is in use,
* otherwise it is ignored. 8 tends to be a pretty good number balancing storage space vs useful stacks.
* Returns the tracer allocator, which should be used for all allocations that should be tracked.
*/
AWS_COMMON_API
struct aws_allocator *aws_mem_tracer_new(
struct aws_allocator *allocator,
struct aws_allocator *deprecated,
enum aws_mem_trace_level level,
size_t frames_per_stack);
/*
* Unwraps the traced allocator and cleans up the tracer.
* Returns the original allocator
*/
AWS_COMMON_API
struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator);
/*
* If there are outstanding allocations, dumps them to log, along with any information gathered
* based on the trace level set when aws_mem_trace() was called.
* Should be passed the tracer allocator returned from aws_mem_trace().
*/
AWS_COMMON_API
void aws_mem_tracer_dump(struct aws_allocator *trace_allocator);
/*
* Returns the current number of bytes in outstanding allocations
*/
AWS_COMMON_API
size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator);
/*
* Returns the current number of outstanding allocations
*/
AWS_COMMON_API
size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator);
/*
* Creates a new Small Block Allocator which fronts the supplied parent allocator. The SBA will intercept
* and handle small allocs, and will forward anything larger to the parent allocator.
* If multi_threaded is true, the internal allocator will protect its internal data structures with a mutex
*/
AWS_COMMON_API
struct aws_allocator *aws_small_block_allocator_new(struct aws_allocator *allocator, bool multi_threaded);
/*
* Destroys a Small Block Allocator instance and frees its memory to the parent allocator. The parent
* allocator will otherwise be unaffected.
*/
AWS_COMMON_API
void aws_small_block_allocator_destroy(struct aws_allocator *sba_allocator);
/*
* Returns the number of bytes currently active in the SBA
*/
AWS_COMMON_API
size_t aws_small_block_allocator_bytes_active(struct aws_allocator *sba_allocator);
/*
* Returns the number of bytes reserved in pages/bins inside the SBA, e.g. the
* current system memory used by the SBA
*/
AWS_COMMON_API
size_t aws_small_block_allocator_bytes_reserved(struct aws_allocator *sba_allocator);
/*
* Returns the page size that the SBA is using
*/
AWS_COMMON_API
size_t aws_small_block_allocator_page_size(struct aws_allocator *sba_allocator);
/*
* Returns the amount of memory in each page available to user allocations
*/
AWS_COMMON_API
size_t aws_small_block_allocator_page_size_available(struct aws_allocator *sba_allocator);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_ALLOCATOR_H */

View File

@@ -0,0 +1,241 @@
#ifndef AWS_COMMON_ARRAY_LIST_H
#define AWS_COMMON_ARRAY_LIST_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/math.h>
#include <stdlib.h>
AWS_PUSH_SANE_WARNING_LEVEL
enum { AWS_ARRAY_LIST_DEBUG_FILL = 0xDD };
struct aws_array_list {
struct aws_allocator *alloc;
size_t current_size;
size_t length;
size_t item_size;
void *data;
};
/**
* Prototype for a comparator function for sorting elements.
*
* a and b should be cast to pointers to the element type held in the list
* before being dereferenced. The function should compare the elements and
* return a positive number if a > b, zero if a = b, and a negative number
* if a < b.
*/
typedef int(aws_array_list_comparator_fn)(const void *a, const void *b);
AWS_EXTERN_C_BEGIN
/**
* Initializes an array list with an array of size initial_item_allocation * item_size. In this mode, the array size
* will grow by a factor of 2 upon insertion if space is not available. initial_item_allocation is the number of
* elements you want space allocated for. item_size is the size of each element in bytes. Mixing items types is not
* supported by this API.
*/
AWS_STATIC_IMPL
int aws_array_list_init_dynamic(
struct aws_array_list *AWS_RESTRICT list,
struct aws_allocator *alloc,
size_t initial_item_allocation,
size_t item_size);
/**
* Initializes an array list with a preallocated array of void *. item_count is the number of elements in the array,
* and item_size is the size in bytes of each element. Mixing items types is not supported
* by this API. Once this list is full, new items will be rejected.
*/
AWS_STATIC_IMPL
void aws_array_list_init_static(
struct aws_array_list *AWS_RESTRICT list,
void *raw_array,
size_t item_count,
size_t item_size);
/**
* Initializes an array list with a preallocated array of *already-initialized* elements. item_count is the number of
* elements in the array, and item_size is the size in bytes of each element.
*
* Once initialized, nothing further can be added to the list, since it will be full and cannot resize.
*
* Primary use case is to treat an already-initialized C array as an array list.
*/
AWS_STATIC_IMPL
void aws_array_list_init_static_from_initialized(
struct aws_array_list *AWS_RESTRICT list,
void *raw_array,
size_t item_count,
size_t item_size);
/**
* Set of properties of a valid aws_array_list.
*/
AWS_STATIC_IMPL
bool aws_array_list_is_valid(const struct aws_array_list *AWS_RESTRICT list);
/**
* Deallocates any memory that was allocated for this list, and resets list for reuse or deletion.
*/
AWS_STATIC_IMPL
void aws_array_list_clean_up(struct aws_array_list *AWS_RESTRICT list);
/**
* Erases and then deallocates any memory that was allocated for this list, and resets list for reuse or deletion.
*/
AWS_STATIC_IMPL
void aws_array_list_clean_up_secure(struct aws_array_list *AWS_RESTRICT list);
/**
* Pushes the memory pointed to by val onto the end of internal list
*/
AWS_STATIC_IMPL
int aws_array_list_push_back(struct aws_array_list *AWS_RESTRICT list, const void *val);
/**
* Copies the element at the front of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised
*/
AWS_STATIC_IMPL
int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *val);
/**
* Pushes the memory pointed to by val onto the front of internal list.
* This call results in shifting all of the elements in the list. Avoid this call unless that
* is intended behavior.
*/
AWS_STATIC_IMPL
int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val);
/**
* Deletes the element at the front of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised.
* This call results in shifting all of the elements at the end of the array to the front. Avoid this call unless that
* is intended behavior.
*/
AWS_STATIC_IMPL
int aws_array_list_pop_front(struct aws_array_list *AWS_RESTRICT list);
/**
* Delete N elements from the front of the list.
* Remaining elements are shifted to the front of the list.
* If the list has less than N elements, the list is cleared.
* This call is more efficient than calling aws_array_list_pop_front() N times.
*/
AWS_STATIC_IMPL
void aws_array_list_pop_front_n(struct aws_array_list *AWS_RESTRICT list, size_t n);
/**
* Deletes the element this index in the list if it exists.
* If element does not exist, AWS_ERROR_INVALID_INDEX will be raised.
* This call results in shifting all remaining elements towards the front.
* Avoid this call unless that is intended behavior.
*/
AWS_STATIC_IMPL
int aws_array_list_erase(struct aws_array_list *AWS_RESTRICT list, size_t index);
/**
* Copies the element at the end of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised.
*/
AWS_STATIC_IMPL
int aws_array_list_back(const struct aws_array_list *AWS_RESTRICT list, void *val);
/**
* Deletes the element at the end of the list if it exists. If list is empty, AWS_ERROR_LIST_EMPTY will be raised.
*/
AWS_STATIC_IMPL
int aws_array_list_pop_back(struct aws_array_list *AWS_RESTRICT list);
/**
* Clears all elements in the array and resets length to zero. Size does not change in this operation.
*/
AWS_STATIC_IMPL
void aws_array_list_clear(struct aws_array_list *AWS_RESTRICT list);
/**
* If in dynamic mode, shrinks the allocated array size to the minimum amount necessary to store its elements.
*/
AWS_COMMON_API
int aws_array_list_shrink_to_fit(struct aws_array_list *AWS_RESTRICT list);
/**
* Copies the elements from from to to. If to is in static mode, it must at least be the same length as from. Any data
* in to will be overwritten in this copy.
*/
AWS_COMMON_API
int aws_array_list_copy(const struct aws_array_list *AWS_RESTRICT from, struct aws_array_list *AWS_RESTRICT to);
/**
* Swap contents between two dynamic lists. Both lists must use the same allocator.
*/
AWS_STATIC_IMPL
void aws_array_list_swap_contents(
struct aws_array_list *AWS_RESTRICT list_a,
struct aws_array_list *AWS_RESTRICT list_b);
/**
* Returns the number of elements that can fit in the internal array. If list is initialized in dynamic mode,
* the capacity changes over time.
*/
AWS_STATIC_IMPL
size_t aws_array_list_capacity(const struct aws_array_list *AWS_RESTRICT list);
/**
* Returns the number of elements in the internal array.
*/
AWS_STATIC_IMPL
size_t aws_array_list_length(const struct aws_array_list *AWS_RESTRICT list);
/**
* Copies the memory at index to val. If element does not exist, AWS_ERROR_INVALID_INDEX will be raised.
*/
AWS_STATIC_IMPL
int aws_array_list_get_at(const struct aws_array_list *AWS_RESTRICT list, void *val, size_t index);
/**
* Copies the memory address of the element at index to *val. If element does not exist, AWS_ERROR_INVALID_INDEX will be
* raised.
*/
AWS_STATIC_IMPL
int aws_array_list_get_at_ptr(const struct aws_array_list *AWS_RESTRICT list, void **val, size_t index);
/**
* Ensures that the array list has enough capacity to store a value at the specified index. If there is not already
* enough capacity, and the list is in dynamic mode, this function will attempt to allocate more memory, expanding the
* list. In static mode, if 'index' is beyond the maximum index, AWS_ERROR_INVALID_INDEX will be raised.
*/
AWS_COMMON_API
int aws_array_list_ensure_capacity(struct aws_array_list *AWS_RESTRICT list, size_t index);
/**
* Copies the the memory pointed to by val into the array at index. If in dynamic mode, the size will grow by a factor
* of two when the array is full. In static mode, AWS_ERROR_INVALID_INDEX will be raised if the index is past the bounds
* of the array.
*/
AWS_STATIC_IMPL
int aws_array_list_set_at(struct aws_array_list *AWS_RESTRICT list, const void *val, size_t index);
/**
* Swap elements at the specified indices, which must be within the bounds of the array.
*/
AWS_COMMON_API
void aws_array_list_swap(struct aws_array_list *AWS_RESTRICT list, size_t a, size_t b);
/**
* Sort elements in the list in-place according to the comparator function.
*/
AWS_COMMON_API
void aws_array_list_sort(struct aws_array_list *AWS_RESTRICT list, aws_array_list_comparator_fn *compare_fn);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/array_list.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_ARRAY_LIST_H */

View File

@@ -0,0 +1,410 @@
#ifndef AWS_COMMON_ARRAY_LIST_INL
#define AWS_COMMON_ARRAY_LIST_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/* This is implicitly included, but helps with editor highlighting */
#include <aws/common/array_list.h>
/*
* Do not add system headers here; add them to array_list.h. This file is included under extern "C" guards,
* which might break system headers.
*/
AWS_EXTERN_C_BEGIN
AWS_STATIC_IMPL
int aws_array_list_init_dynamic(
struct aws_array_list *AWS_RESTRICT list,
struct aws_allocator *alloc,
size_t initial_item_allocation,
size_t item_size) {
AWS_FATAL_PRECONDITION(list != NULL);
AWS_FATAL_PRECONDITION(alloc != NULL);
AWS_FATAL_PRECONDITION(item_size > 0);
AWS_ZERO_STRUCT(*list);
size_t allocation_size = 0;
if (aws_mul_size_checked(initial_item_allocation, item_size, &allocation_size)) {
goto error;
}
if (allocation_size > 0) {
list->data = aws_mem_acquire(alloc, allocation_size);
if (!list->data) {
goto error;
}
#ifdef DEBUG_BUILD
memset(list->data, AWS_ARRAY_LIST_DEBUG_FILL, allocation_size);
#endif
list->current_size = allocation_size;
}
list->item_size = item_size;
list->alloc = alloc;
AWS_FATAL_POSTCONDITION(list->current_size == 0 || list->data);
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
error:
AWS_POSTCONDITION(AWS_IS_ZEROED(*list));
return AWS_OP_ERR;
}
AWS_STATIC_IMPL
void aws_array_list_init_static(
struct aws_array_list *AWS_RESTRICT list,
void *raw_array,
size_t item_count,
size_t item_size) {
AWS_FATAL_PRECONDITION(list != NULL);
AWS_FATAL_PRECONDITION(raw_array != NULL);
AWS_FATAL_PRECONDITION(item_count > 0);
AWS_FATAL_PRECONDITION(item_size > 0);
AWS_ZERO_STRUCT(*list);
list->alloc = NULL;
size_t current_size = 0;
int no_overflow = !aws_mul_size_checked(item_count, item_size, &current_size);
AWS_FATAL_PRECONDITION(no_overflow);
list->current_size = current_size;
list->item_size = item_size;
list->length = 0;
list->data = raw_array;
AWS_POSTCONDITION(aws_array_list_is_valid(list));
}
AWS_STATIC_IMPL
void aws_array_list_init_static_from_initialized(
struct aws_array_list *AWS_RESTRICT list,
void *raw_array,
size_t item_count,
size_t item_size) {
aws_array_list_init_static(list, raw_array, item_count, item_size);
list->length = item_count;
AWS_POSTCONDITION(aws_array_list_is_valid(list));
}
AWS_STATIC_IMPL
bool aws_array_list_is_valid(const struct aws_array_list *AWS_RESTRICT list) {
if (!list) {
return false;
}
size_t required_size = 0;
bool required_size_is_valid =
(aws_mul_size_checked(list->length, list->item_size, &required_size) == AWS_OP_SUCCESS);
bool current_size_is_valid = (list->current_size >= required_size);
bool data_is_valid = AWS_IMPLIES(list->current_size == 0, list->data == NULL) &&
AWS_IMPLIES(list->current_size != 0, AWS_MEM_IS_WRITABLE(list->data, list->current_size));
bool item_size_is_valid = (list->item_size != 0);
return required_size_is_valid && current_size_is_valid && data_is_valid && item_size_is_valid;
}
AWS_STATIC_IMPL
void aws_array_list_clean_up(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list));
if (list->alloc && list->data) {
aws_mem_release(list->alloc, list->data);
}
AWS_ZERO_STRUCT(*list);
}
AWS_STATIC_IMPL
void aws_array_list_clean_up_secure(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list));
if (list->alloc && list->data) {
aws_secure_zero(list->data, list->current_size);
aws_mem_release(list->alloc, list->data);
}
AWS_ZERO_STRUCT(*list);
}
AWS_STATIC_IMPL
int aws_array_list_push_back(struct aws_array_list *AWS_RESTRICT list, const void *val) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
AWS_PRECONDITION(
val && AWS_MEM_IS_READABLE(val, list->item_size),
"Input pointer [val] must point writable memory of [list->item_size] bytes.");
int err_code = aws_array_list_set_at(list, val, aws_array_list_length(list));
if (err_code && aws_last_error() == AWS_ERROR_INVALID_INDEX && !list->alloc) {
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE);
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return err_code;
}
AWS_STATIC_IMPL
int aws_array_list_front(const struct aws_array_list *AWS_RESTRICT list, void *val) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
AWS_PRECONDITION(
val && AWS_MEM_IS_WRITABLE(val, list->item_size),
"Input pointer [val] must point writable memory of [list->item_size] bytes.");
if (aws_array_list_length(list) > 0) {
memcpy(val, list->data, list->item_size);
AWS_POSTCONDITION(AWS_BYTES_EQ(val, list->data, list->item_size));
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_LIST_EMPTY);
}
AWS_STATIC_IMPL
int aws_array_list_push_front(struct aws_array_list *AWS_RESTRICT list, const void *val) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
AWS_PRECONDITION(
val && AWS_MEM_IS_READABLE(val, list->item_size),
"Input pointer [val] must point writable memory of [list->item_size] bytes.");
size_t orig_len = aws_array_list_length(list);
int err_code = aws_array_list_ensure_capacity(list, orig_len);
if (err_code && aws_last_error() == AWS_ERROR_INVALID_INDEX && !list->alloc) {
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_LIST_EXCEEDS_MAX_SIZE);
} else if (err_code) {
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return err_code;
}
if (orig_len) {
memmove((uint8_t *)list->data + list->item_size, list->data, orig_len * list->item_size);
}
++list->length;
memcpy(list->data, val, list->item_size);
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return err_code;
}
AWS_STATIC_IMPL
int aws_array_list_pop_front(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
if (aws_array_list_length(list) > 0) {
aws_array_list_pop_front_n(list, 1);
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_LIST_EMPTY);
}
AWS_STATIC_IMPL
void aws_array_list_pop_front_n(struct aws_array_list *AWS_RESTRICT list, size_t n) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
if (n >= aws_array_list_length(list)) {
aws_array_list_clear(list);
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return;
}
if (n > 0) {
size_t popping_bytes = list->item_size * n;
size_t remaining_items = aws_array_list_length(list) - n;
size_t remaining_bytes = remaining_items * list->item_size;
memmove(list->data, (uint8_t *)list->data + popping_bytes, remaining_bytes);
list->length = remaining_items;
#ifdef DEBUG_BUILD
memset((uint8_t *)list->data + remaining_bytes, AWS_ARRAY_LIST_DEBUG_FILL, popping_bytes);
#endif
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
}
int aws_array_list_erase(struct aws_array_list *AWS_RESTRICT list, size_t index) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
const size_t length = aws_array_list_length(list);
if (index >= length) {
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_INVALID_INDEX);
}
if (index == 0) {
/* Removing front element */
aws_array_list_pop_front(list);
} else if (index == (length - 1)) {
/* Removing back element */
aws_array_list_pop_back(list);
} else {
/* Removing middle element */
uint8_t *item_ptr = (uint8_t *)list->data + (index * list->item_size);
uint8_t *next_item_ptr = item_ptr + list->item_size;
size_t trailing_items = (length - index) - 1;
size_t trailing_bytes = trailing_items * list->item_size;
memmove(item_ptr, next_item_ptr, trailing_bytes);
aws_array_list_pop_back(list);
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_STATIC_IMPL
int aws_array_list_back(const struct aws_array_list *AWS_RESTRICT list, void *val) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
AWS_PRECONDITION(
val && AWS_MEM_IS_WRITABLE(val, list->item_size),
"Input pointer [val] must point writable memory of [list->item_size] bytes.");
if (aws_array_list_length(list) > 0) {
size_t last_item_offset = list->item_size * (aws_array_list_length(list) - 1);
memcpy(val, (void *)((uint8_t *)list->data + last_item_offset), list->item_size);
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_LIST_EMPTY);
}
AWS_STATIC_IMPL
int aws_array_list_pop_back(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
if (aws_array_list_length(list) > 0) {
AWS_FATAL_PRECONDITION(list->data);
size_t last_item_offset = list->item_size * (aws_array_list_length(list) - 1);
memset((void *)((uint8_t *)list->data + last_item_offset), 0, list->item_size);
list->length--;
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_LIST_EMPTY);
}
AWS_STATIC_IMPL
void aws_array_list_clear(struct aws_array_list *AWS_RESTRICT list) {
AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list));
if (list->data) {
#ifdef DEBUG_BUILD
memset(list->data, AWS_ARRAY_LIST_DEBUG_FILL, list->current_size);
#endif
list->length = 0;
}
AWS_POSTCONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list));
}
AWS_STATIC_IMPL
void aws_array_list_swap_contents(
struct aws_array_list *AWS_RESTRICT list_a,
struct aws_array_list *AWS_RESTRICT list_b) {
AWS_FATAL_PRECONDITION(list_a->alloc);
AWS_FATAL_PRECONDITION(list_a->alloc == list_b->alloc);
AWS_FATAL_PRECONDITION(list_a->item_size == list_b->item_size);
AWS_FATAL_PRECONDITION(list_a != list_b);
AWS_PRECONDITION(aws_array_list_is_valid(list_a));
AWS_PRECONDITION(aws_array_list_is_valid(list_b));
struct aws_array_list tmp = *list_a;
*list_a = *list_b;
*list_b = tmp;
AWS_POSTCONDITION(aws_array_list_is_valid(list_a));
AWS_POSTCONDITION(aws_array_list_is_valid(list_b));
}
AWS_STATIC_IMPL
size_t aws_array_list_capacity(const struct aws_array_list *AWS_RESTRICT list) {
AWS_FATAL_PRECONDITION(list->item_size);
AWS_PRECONDITION(aws_array_list_is_valid(list));
size_t capacity = list->current_size / list->item_size;
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return capacity;
}
AWS_STATIC_IMPL
size_t aws_array_list_length(const struct aws_array_list *AWS_RESTRICT list) {
/*
* This assert teaches clang-tidy and friends that list->data cannot be null in a non-empty
* list.
*/
AWS_FATAL_PRECONDITION(!list->length || list->data);
AWS_PRECONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list));
size_t len = list->length;
AWS_POSTCONDITION(AWS_IS_ZEROED(*list) || aws_array_list_is_valid(list));
return len;
}
AWS_STATIC_IMPL
int aws_array_list_get_at(const struct aws_array_list *AWS_RESTRICT list, void *val, size_t index) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
AWS_PRECONDITION(
val && AWS_MEM_IS_WRITABLE(val, list->item_size),
"Input pointer [val] must point writable memory of [list->item_size] bytes.");
if (aws_array_list_length(list) > index) {
memcpy(val, (void *)((uint8_t *)list->data + (list->item_size * index)), list->item_size);
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_INVALID_INDEX);
}
AWS_STATIC_IMPL
int aws_array_list_get_at_ptr(const struct aws_array_list *AWS_RESTRICT list, void **val, size_t index) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
AWS_PRECONDITION(val != NULL);
if (aws_array_list_length(list) > index) {
*val = (void *)((uint8_t *)list->data + (list->item_size * index));
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return aws_raise_error(AWS_ERROR_INVALID_INDEX);
}
AWS_STATIC_IMPL
int aws_array_list_set_at(struct aws_array_list *AWS_RESTRICT list, const void *val, size_t index) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
AWS_PRECONDITION(
val && AWS_MEM_IS_READABLE(val, list->item_size),
"Input pointer [val] must point readable memory of [list->item_size] bytes.");
if (aws_array_list_ensure_capacity(list, index)) {
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_ERR;
}
AWS_FATAL_PRECONDITION(list->data);
memcpy((void *)((uint8_t *)list->data + (list->item_size * index)), val, list->item_size);
/*
* This isn't perfect, but its the best I can come up with for detecting
* length changes.
*/
if (index >= aws_array_list_length(list)) {
if (aws_add_size_checked(index, 1, &list->length)) {
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_ERR;
}
}
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_SUCCESS;
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ARRAY_LIST_INL */

View File

@@ -0,0 +1,193 @@
#ifndef AWS_COMMON_ASSERT_H
#define AWS_COMMON_ASSERT_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/exports.h>
#include <aws/common/macros.h>
#include <stdio.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
AWS_COMMON_API
AWS_DECLSPEC_NORETURN
void aws_fatal_assert(const char *cond_str, const char *file, int line) AWS_ATTRIBUTE_NORETURN;
AWS_EXTERN_C_END
#if defined(CBMC)
# define AWS_PANIC_OOM(mem, msg) \
do { \
if (!(mem)) { \
fprintf(stderr, "%s: %s, line %d", msg, __FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
#else
# define AWS_PANIC_OOM(mem, msg) \
do { \
if (!(mem)) { \
fprintf(stderr, "%s", msg); \
abort(); \
} \
} while (0)
#endif /* defined(CBMC) */
#if defined(CBMC)
# define AWS_ASSUME(cond) __CPROVER_assume(cond)
#elif defined(_MSC_VER)
# define AWS_ASSUME(cond) __assume(cond)
# define AWS_UNREACHABLE() __assume(0)
#elif defined(__clang__)
# define AWS_ASSUME(cond) \
do { \
bool _result = (cond); \
__builtin_assume(_result); \
} while (false)
# define AWS_UNREACHABLE() __builtin_unreachable()
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
# define AWS_ASSUME(cond) ((cond) ? (void)0 : __builtin_unreachable())
# define AWS_UNREACHABLE() __builtin_unreachable()
#else
# define AWS_ASSUME(cond)
# define AWS_UNREACHABLE()
#endif
#if defined(CBMC)
# include <assert.h>
# define AWS_ASSERT(cond) assert(cond)
#elif defined(DEBUG_BUILD) || defined(__clang_analyzer__)
# define AWS_ASSERT(cond) AWS_FATAL_ASSERT(cond)
#else
# define AWS_ASSERT(cond)
#endif /* defined(CBMC) */
#if defined(CBMC)
# define AWS_FATAL_ASSERT(cond) AWS_ASSERT(cond)
#elif defined(__clang_analyzer__)
# define AWS_FATAL_ASSERT(cond) \
if (!(cond)) { \
abort(); \
}
#else
# if defined(_MSC_VER)
# define AWS_FATAL_ASSERT(cond) \
__pragma(warning(push)) __pragma(warning(disable : 4127)) /* conditional expression is constant */ \
if (!(cond)) { \
aws_fatal_assert(#cond, __FILE__, __LINE__); \
} \
__pragma(warning(pop))
# else
# define AWS_FATAL_ASSERT(cond) \
do { \
if (!(cond)) { \
aws_fatal_assert(#cond, __FILE__, __LINE__); \
} \
} while (0)
# endif /* defined(_MSC_VER) */
#endif /* defined(CBMC) */
/**
* Define function contracts.
* When the code is being verified using CBMC these contracts are formally verified;
* When the code is built in debug mode, they are checked as much as possible using assertions
* When the code is built in production mode, non-fatal contracts are not checked.
* Violations of the function contracts are undefined behaviour.
*/
#ifdef CBMC
// clang-format off
// disable clang format, since it likes to break formatting of stringize macro.
// seems to be fixed in v15 plus, but we are not ready to update to it yet
# define AWS_PRECONDITION2(cond, explanation) __CPROVER_precondition((cond), (explanation))
# define AWS_PRECONDITION1(cond) __CPROVER_precondition((cond), #cond " check failed")
# define AWS_FATAL_PRECONDITION2(cond, explanation) __CPROVER_precondition((cond), (explanation))
# define AWS_FATAL_PRECONDITION1(cond) __CPROVER_precondition((cond), #cond " check failed")
# define AWS_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation))
# define AWS_POSTCONDITION1(cond) __CPROVER_assert((cond), #cond " check failed")
# define AWS_FATAL_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation))
# define AWS_FATAL_POSTCONDITION1(cond) __CPROVER_assert((cond), #cond " check failed")
# define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
# define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
// clang-format on
#else
# define AWS_PRECONDITION2(cond, expl) AWS_ASSERT(cond)
# define AWS_PRECONDITION1(cond) AWS_ASSERT(cond)
# define AWS_FATAL_PRECONDITION2(cond, expl) AWS_FATAL_ASSERT(cond)
# define AWS_FATAL_PRECONDITION1(cond) AWS_FATAL_ASSERT(cond)
# define AWS_POSTCONDITION2(cond, expl) AWS_ASSERT(cond)
# define AWS_POSTCONDITION1(cond) AWS_ASSERT(cond)
# define AWS_FATAL_POSTCONDITION2(cond, expl) AWS_FATAL_ASSERT(cond)
# define AWS_FATAL_POSTCONDITION1(cond) AWS_FATAL_ASSERT(cond)
/**
* These macros should not be used in is_valid functions.
* All validate functions are also used in assumptions for CBMC proofs,
* which should not contain __CPROVER_*_ok primitives. The use of these primitives
* in assumptions may lead to spurious results.
* The C runtime does not give a way to check these properties,
* but we can at least check that the pointer is valid. */
# define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (base))
# define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (base))
#endif /* CBMC */
/**
* These macros can safely be used in validate functions.
*/
#define AWS_MEM_IS_READABLE(base, len) (((len) == 0) || (base))
#define AWS_MEM_IS_WRITABLE(base, len) (((len) == 0) || (base))
/* Logical consequence. */
#define AWS_IMPLIES(a, b) (!(a) || (b))
/**
* If and only if (iff) is a biconditional logical connective between statements a and b.
* We need double negations (!!) here to work correctly for non-Boolean a and b values.
* Equivalent to (AWS_IMPLIES(a, b) && AWS_IMPLIES(b, a)).
*/
#define AWS_IFF(a, b) (!!(a) == !!(b))
#define AWS_RETURN_ERROR_IF_IMPL(type, cond, err, explanation) \
do { \
if (!(cond)) { \
return aws_raise_error(err); \
} \
} while (0)
#define AWS_RETURN_ERROR_IF3(cond, err, explanation) AWS_RETURN_ERROR_IF_IMPL("InternalCheck", cond, err, explanation)
#define AWS_RETURN_ERROR_IF2(cond, err) AWS_RETURN_ERROR_IF3(cond, err, #cond " check failed")
#define AWS_RETURN_ERROR_IF(...) CALL_OVERLOAD(AWS_RETURN_ERROR_IF, __VA_ARGS__)
#define AWS_ERROR_PRECONDITION3(cond, err, explanation) AWS_RETURN_ERROR_IF_IMPL("Precondition", cond, err, explanation)
#define AWS_ERROR_PRECONDITION2(cond, err) AWS_ERROR_PRECONDITION3(cond, err, #cond " check failed")
#define AWS_ERROR_PRECONDITION1(cond) AWS_ERROR_PRECONDITION2(cond, AWS_ERROR_INVALID_ARGUMENT)
#define AWS_ERROR_POSTCONDITION3(cond, err, explanation) \
AWS_RETURN_ERROR_IF_IMPL("Postcondition", cond, err, explanation)
#define AWS_ERROR_POSTCONDITION2(cond, err) AWS_ERROR_POSTCONDITION3(cond, err, #cond " check failed")
#define AWS_ERROR_POSTCONDITION1(cond) AWS_ERROR_POSTCONDITION2(cond, AWS_ERROR_INVALID_ARGUMENT)
// The UNUSED is used to silence the complains of GCC for zero arguments in variadic macro
#define AWS_PRECONDITION(...) CALL_OVERLOAD(AWS_PRECONDITION, __VA_ARGS__)
#define AWS_FATAL_PRECONDITION(...) CALL_OVERLOAD(AWS_FATAL_PRECONDITION, __VA_ARGS__)
#define AWS_POSTCONDITION(...) CALL_OVERLOAD(AWS_POSTCONDITION, __VA_ARGS__)
#define AWS_FATAL_POSTCONDITION(...) CALL_OVERLOAD(AWS_FATAL_POSTCONDITION, __VA_ARGS__)
#define AWS_ERROR_PRECONDITION(...) CALL_OVERLOAD(AWS_ERROR_PRECONDITION, __VA_ARGS__)
#define AWS_ERROR_POSTCONDITION(...) CALL_OVERLOAD(AWS_ERROR_PRECONDITION, __VA_ARGS__)
#define AWS_RETURN_WITH_POSTCONDITION(_rval, ...) \
do { \
AWS_POSTCONDITION(__VA_ARGS__); \
return _rval; \
} while (0)
#define AWS_SUCCEED_WITH_POSTCONDITION(...) AWS_RETURN_WITH_POSTCONDITION(AWS_OP_SUCCESS, __VA_ARGS__)
#define AWS_OBJECT_PTR_IS_READABLE(ptr) AWS_MEM_IS_READABLE((ptr), sizeof(*(ptr)))
#define AWS_OBJECT_PTR_IS_WRITABLE(ptr) AWS_MEM_IS_WRITABLE((ptr), sizeof(*(ptr)))
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_ASSERT_H */

View File

@@ -0,0 +1,329 @@
#ifndef AWS_COMMON_ATOMICS_H
#define AWS_COMMON_ATOMICS_H
#include <aws/common/common.h>
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
AWS_PUSH_SANE_WARNING_LEVEL
/**
* struct aws_atomic_var represents an atomic variable - a value which can hold an integer or pointer
* that can be manipulated atomically. struct aws_atomic_vars should normally only be manipulated
* with atomics methods defined in this header.
*/
struct aws_atomic_var {
void *value;
};
/* Helpers for extracting the integer and pointer values from aws_atomic_var. */
#define AWS_ATOMIC_VAR_PTRVAL(var) ((var)->value)
#define AWS_ATOMIC_VAR_INTVAL(var) (*(aws_atomic_impl_int_t *)(var))
/*
* This enumeration specifies the memory ordering properties requested for a particular
* atomic operation. The atomic operation may provide stricter ordering than requested.
* Note that, within a single thread, all operations are still sequenced (that is, a thread
* sees its own atomic writes and reads happening in program order, but other threads may
* disagree on this ordering).
*
* The behavior of these memory orderings are the same as in the C11 atomics API; however,
* we only implement a subset that can be portably implemented on the compilers we target.
*/
enum aws_memory_order {
/**
* No particular ordering constraints are guaranteed relative to other
* operations at all; we merely ensure that the operation itself is atomic.
*/
aws_memory_order_relaxed = 0,
/* aws_memory_order_consume - not currently implemented */
/**
* Specifies acquire ordering. No reads or writes on the current thread can be
* reordered to happen before this operation. This is typically paired with a release
* ordering; any writes that happened on the releasing operation will be visible
* after the paired acquire operation.
*
* Acquire ordering is only meaningful on load or load-store operations.
*/
aws_memory_order_acquire = 2, /* leave a spot for consume if we ever add it */
/**
* Specifies release order. No reads or writes can be reordered to come after this
* operation. Typically paired with an acquire operation.
*
* Release ordering is only meaningful on store or load-store operations.
*/
aws_memory_order_release,
/**
* Specifies acquire-release order; if this operation acts as a load, it acts as an
* acquire operation; if it acts as a store, it acts as a release operation; if it's
* a load-store, it does both.
*/
aws_memory_order_acq_rel,
/*
* Specifies sequentially consistent order. This behaves as acq_rel, but in addition,
* all seq_cst operations appear to occur in some globally consistent order.
*
* TODO: Figure out how to correctly implement this in MSVC. It appears that interlocked
* functions provide only acq_rel ordering.
*/
aws_memory_order_seq_cst
};
/**
* Statically initializes an aws_atomic_var to a given size_t value.
*/
#define AWS_ATOMIC_INIT_INT(x) {.value = (void *)(uintptr_t)(x)}
/**
* Statically initializes an aws_atomic_var to a given void * value.
*/
#define AWS_ATOMIC_INIT_PTR(x) {.value = (void *)(x)}
AWS_EXTERN_C_BEGIN
/*
* Note: We do not use the C11 atomics API; this is because we want to make sure the representation
* (and behavior) of atomic values is consistent, regardless of what --std= flag you pass to your compiler.
* Since C11 atomics can silently introduce locks, we run the risk of creating such ABI inconsistencies
* if we decide based on compiler features which atomics API to use, and in practice we expect to have
* either the GNU or MSVC atomics anyway.
*
* As future work, we could test to see if the C11 atomics API on this platform behaves consistently
* with the other APIs and use it if it does.
*/
/**
* Initializes an atomic variable with an integer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*
* This operation does not imply a barrier. Ensure that you use an acquire-release barrier (or stronger)
* when communicating the fact that initialization is complete to the other thread. Launching the thread
* implies a sufficiently strong barrier.
*/
AWS_STATIC_IMPL
void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n);
/**
* Initializes an atomic variable with a pointer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*
* This operation does not imply a barrier. Ensure that you use an acquire-release barrier (or stronger)
* when communicating the fact that initialization is complete to the other thread. Launching the thread
* implies a sufficiently strong barrier.
*/
AWS_STATIC_IMPL
void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p);
/**
* Reads an atomic var as an integer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order);
/**
* Reads an atomic var as an integer, using sequentially consistent ordering, and returns the result.
*/
AWS_STATIC_IMPL
size_t aws_atomic_load_int(volatile const struct aws_atomic_var *var);
/**
* Reads an atomic var as a pointer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order);
/**
* Reads an atomic var as a pointer, using sequentially consistent ordering, and returns the result.
*/
AWS_STATIC_IMPL
void *aws_atomic_load_ptr(volatile const struct aws_atomic_var *var);
/**
* Stores an integer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order);
/**
* Stores an integer into an atomic var, using sequentially consistent ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_int(volatile struct aws_atomic_var *var, size_t n);
/**
* Stores a pointer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order);
/**
* Stores a pointer into an atomic var, using sequentially consistent ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_ptr(volatile struct aws_atomic_var *var, void *p);
/**
* Exchanges an integer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t n,
enum aws_memory_order memory_order);
/**
* Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_exchange_int(volatile struct aws_atomic_var *var, size_t n);
/**
* Exchanges a pointer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
void *aws_atomic_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void *p,
enum aws_memory_order memory_order);
/**
* Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
void *aws_atomic_exchange_ptr(volatile struct aws_atomic_var *var, void *p);
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
* Returns true if the compare was successful and the variable updated to desired.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t *expected,
size_t desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure);
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure.
* Returns true if the compare was successful and the variable updated to desired.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_int(volatile struct aws_atomic_var *var, size_t *expected, size_t desired);
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
* Returns true if the compare was successful and the variable updated to desired.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void **expected,
void *desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure);
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure.
* Returns true if the compare was successful and the variable updated to desired.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_ptr(volatile struct aws_atomic_var *var, void **expected, void *desired);
/**
* Atomically adds n to *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
/**
* Atomically subtracts n from *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
/**
* Atomically ORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
/**
* Atomically ANDs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
/**
* Atomically XORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order);
/**
* Atomically adds n to *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_add(volatile struct aws_atomic_var *var, size_t n);
/**
* Atomically subtracts n from *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_sub(volatile struct aws_atomic_var *var, size_t n);
/**
* Atomically ands n into *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_and(volatile struct aws_atomic_var *var, size_t n);
/**
* Atomically ors n into *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_or(volatile struct aws_atomic_var *var, size_t n);
/**
* Atomically xors n into *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_xor(volatile struct aws_atomic_var *var, size_t n);
/**
* Provides the same reordering guarantees as an atomic operation with the specified memory order, without
* needing to actually perform an atomic operation.
*/
AWS_STATIC_IMPL
void aws_atomic_thread_fence(enum aws_memory_order order);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/atomics.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif

View File

@@ -0,0 +1,147 @@
#ifndef AWS_COMMON_ATOMICS_INL
#define AWS_COMMON_ATOMICS_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/atomics.h>
#include <aws/common/common.h>
AWS_EXTERN_C_BEGIN
/**
* Reads an atomic var as an integer, using sequentially consistent ordering, and returns the result.
*/
AWS_STATIC_IMPL
size_t aws_atomic_load_int(volatile const struct aws_atomic_var *var) {
return aws_atomic_load_int_explicit(var, aws_memory_order_seq_cst);
}
/**
* Reads an atomic var as a pointer, using sequentially consistent ordering, and returns the result.
*/
AWS_STATIC_IMPL
void *aws_atomic_load_ptr(volatile const struct aws_atomic_var *var) {
return aws_atomic_load_ptr_explicit(var, aws_memory_order_seq_cst);
}
/**
* Stores an integer into an atomic var, using sequentially consistent ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_int(volatile struct aws_atomic_var *var, size_t n) {
aws_atomic_store_int_explicit(var, n, aws_memory_order_seq_cst);
}
/**
* Stores a pointer into an atomic var, using sequentially consistent ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_ptr(volatile struct aws_atomic_var *var, void *p) {
aws_atomic_store_ptr_explicit(var, p, aws_memory_order_seq_cst);
}
/**
* Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_exchange_int(volatile struct aws_atomic_var *var, size_t n) {
return aws_atomic_exchange_int_explicit(var, n, aws_memory_order_seq_cst);
}
/**
* Exchanges an integer with the value in an atomic_var, using sequentially consistent ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
void *aws_atomic_exchange_ptr(volatile struct aws_atomic_var *var, void *p) {
return aws_atomic_exchange_ptr_explicit(var, p, aws_memory_order_seq_cst);
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure.
* Returns true if the compare was successful and the variable updated to desired.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_int(volatile struct aws_atomic_var *var, size_t *expected, size_t desired) {
return aws_atomic_compare_exchange_int_explicit(
var, expected, desired, aws_memory_order_seq_cst, aws_memory_order_seq_cst);
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. Uses sequentially consistent memory ordering, regardless of success or failure.
* Returns true if the compare was successful and the variable updated to desired.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_ptr(volatile struct aws_atomic_var *var, void **expected, void *desired) {
return aws_atomic_compare_exchange_ptr_explicit(
var, expected, desired, aws_memory_order_seq_cst, aws_memory_order_seq_cst);
}
/**
* Atomically adds n to *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_add(volatile struct aws_atomic_var *var, size_t n) {
return aws_atomic_fetch_add_explicit(var, n, aws_memory_order_seq_cst);
}
/**
* Atomically subtracts n from *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_sub(volatile struct aws_atomic_var *var, size_t n) {
return aws_atomic_fetch_sub_explicit(var, n, aws_memory_order_seq_cst);
}
/**
* Atomically ands n into *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_and(volatile struct aws_atomic_var *var, size_t n) {
return aws_atomic_fetch_and_explicit(var, n, aws_memory_order_seq_cst);
}
/**
* Atomically ors n into *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_or(volatile struct aws_atomic_var *var, size_t n) {
return aws_atomic_fetch_or_explicit(var, n, aws_memory_order_seq_cst);
}
/**
* Atomically xors n into *var, and returns the previous value of *var.
* Uses sequentially consistent ordering.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_xor(volatile struct aws_atomic_var *var, size_t n) {
return aws_atomic_fetch_xor_explicit(var, n, aws_memory_order_seq_cst);
}
AWS_EXTERN_C_END
/* Include the backend implementation now, because we'll use its typedefs and #defines below */
#if defined(__GNUC__) || defined(__clang__)
# if defined(__ATOMIC_RELAXED)
# include <aws/common/atomics_gnu.inl>
# else
# include <aws/common/atomics_gnu_old.inl>
# endif /* __ATOMIC_RELAXED */
#elif defined(_MSC_VER)
# include <aws/common/atomics_msvc.inl>
#else
# error No atomics implementation for your compiler is available
#endif
#include <aws/common/atomics_fallback.inl>
#endif /* AWS_COMMON_ATOMICS_INL */

View File

@@ -0,0 +1,24 @@
#ifndef AWS_COMMON_ATOMICS_FALLBACK_INL
#define AWS_COMMON_ATOMICS_FALLBACK_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
AWS_EXTERN_C_BEGIN
#ifndef AWS_ATOMICS_HAVE_THREAD_FENCE
void aws_atomic_thread_fence(enum aws_memory_order order) {
struct aws_atomic_var var;
aws_atomic_int_t expected = 0;
aws_atomic_store_int(&var, expected, aws_memory_order_relaxed);
aws_atomic_compare_exchange_int(&var, &expected, 1, order, aws_memory_order_relaxed);
}
#endif /* AWS_ATOMICS_HAVE_THREAD_FENCE */
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ATOMICS_FALLBACK_INL */

View File

@@ -0,0 +1,218 @@
#ifndef AWS_COMMON_ATOMICS_GNU_INL
#define AWS_COMMON_ATOMICS_GNU_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/* These are implicitly included, but help with editor highlighting */
#include <aws/common/atomics.h>
#include <aws/common/common.h>
#include <stdint.h>
#include <stdlib.h>
AWS_EXTERN_C_BEGIN
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wc11-extensions"
#else
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wpedantic"
#endif
typedef size_t aws_atomic_impl_int_t;
static inline int aws_atomic_priv_xlate_order(enum aws_memory_order order) {
switch (order) {
case aws_memory_order_relaxed:
return __ATOMIC_RELAXED;
case aws_memory_order_acquire:
return __ATOMIC_ACQUIRE;
case aws_memory_order_release:
return __ATOMIC_RELEASE;
case aws_memory_order_acq_rel:
return __ATOMIC_ACQ_REL;
case aws_memory_order_seq_cst:
return __ATOMIC_SEQ_CST;
default: /* Unknown memory order */
abort();
}
}
/**
* Initializes an atomic variable with an integer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*/
AWS_STATIC_IMPL
void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n) {
AWS_ATOMIC_VAR_INTVAL(var) = n;
}
/**
* Initializes an atomic variable with a pointer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*/
AWS_STATIC_IMPL
void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p) {
AWS_ATOMIC_VAR_PTRVAL(var) = p;
}
/**
* Reads an atomic var as an integer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) {
return __atomic_load_n(&AWS_ATOMIC_VAR_INTVAL(var), aws_atomic_priv_xlate_order(memory_order));
}
/**
* Reads an atomic var as a pointer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) {
return __atomic_load_n(&AWS_ATOMIC_VAR_PTRVAL(var), aws_atomic_priv_xlate_order(memory_order));
}
/**
* Stores an integer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) {
__atomic_store_n(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(memory_order));
}
/**
* Stores an pointer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) {
__atomic_store_n(&AWS_ATOMIC_VAR_PTRVAL(var), p, aws_atomic_priv_xlate_order(memory_order));
}
/**
* Exchanges an integer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t n,
enum aws_memory_order memory_order) {
return __atomic_exchange_n(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(memory_order));
}
/**
* Exchanges a pointer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
void *aws_atomic_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void *p,
enum aws_memory_order memory_order) {
return __atomic_exchange_n(&AWS_ATOMIC_VAR_PTRVAL(var), p, aws_atomic_priv_xlate_order(memory_order));
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t *expected,
size_t desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure) {
return __atomic_compare_exchange_n(
&AWS_ATOMIC_VAR_INTVAL(var),
expected,
desired,
false,
aws_atomic_priv_xlate_order(order_success),
aws_atomic_priv_xlate_order(order_failure));
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void **expected,
void *desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure) {
return __atomic_compare_exchange_n(
&AWS_ATOMIC_VAR_PTRVAL(var),
expected,
desired,
false,
aws_atomic_priv_xlate_order(order_success),
aws_atomic_priv_xlate_order(order_failure));
}
/**
* Atomically adds n to *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __atomic_fetch_add(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order));
}
/**
* Atomically subtracts n from *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __atomic_fetch_sub(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order));
}
/**
* Atomically ORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __atomic_fetch_or(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order));
}
/**
* Atomically ANDs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __atomic_fetch_and(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order));
}
/**
* Atomically XORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __atomic_fetch_xor(&AWS_ATOMIC_VAR_INTVAL(var), n, aws_atomic_priv_xlate_order(order));
}
/**
* Provides the same reordering guarantees as an atomic operation with the specified memory order, without
* needing to actually perform an atomic operation.
*/
AWS_STATIC_IMPL
void aws_atomic_thread_fence(enum aws_memory_order order) {
__atomic_thread_fence(order);
}
#ifdef __clang__
# pragma clang diagnostic pop
#else
# pragma GCC diagnostic pop
#endif
#define AWS_ATOMICS_HAVE_THREAD_FENCE
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ATOMICS_GNU_INL */

View File

@@ -0,0 +1,274 @@
#ifndef AWS_COMMON_ATOMICS_GNU_OLD_INL
#define AWS_COMMON_ATOMICS_GNU_OLD_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/* These are implicitly included, but help with editor highlighting */
#include <aws/common/atomics.h>
#include <aws/common/common.h>
#include <stdint.h>
#include <stdlib.h>
AWS_EXTERN_C_BEGIN
#if defined(__GNUC__)
# if (__GNUC__ < 4)
# error GCC versions before 4.1.2 are not supported
# elif (defined(__arm__) || defined(__ia64__)) && (__GNUC__ == 4 && __GNUC_MINOR__ < 4)
/* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793 Itanium codegen */
/* https://bugs.launchpad.net/ubuntu/+source/gcc-4.4/+bug/491872 ARM codegen*/
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=42263 ARM codegen */
# error GCC versions before 4.4.0 are not supported on ARM or Itanium
# elif (defined(__x86_64__) || defined(__i386__)) && \
(__GNUC__ == 4 && (__GNUC_MINOR__ < 1 || (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ < 2)))
/* 4.1.2 is the first gcc version with 100% working atomic intrinsics on Intel */
# error GCC versions before 4.1.2 are not supported on x86/x64
# endif
#endif
typedef size_t aws_atomic_impl_int_t;
static inline void aws_atomic_private_compiler_barrier(void) {
__asm__ __volatile__("" : : : "memory");
}
static inline void aws_atomic_private_barrier_before(enum aws_memory_order order) {
if (order == aws_memory_order_release || order == aws_memory_order_acq_rel || order == aws_memory_order_seq_cst) {
__sync_synchronize();
}
aws_atomic_private_compiler_barrier();
}
static inline void aws_atomic_private_barrier_after(enum aws_memory_order order) {
aws_atomic_private_compiler_barrier();
if (order == aws_memory_order_acquire || order == aws_memory_order_acq_rel || order == aws_memory_order_seq_cst) {
__sync_synchronize();
}
}
/**
* Initializes an atomic variable with an integer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*/
AWS_STATIC_IMPL
void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n) {
AWS_ATOMIC_VAR_INTVAL(var) = n;
}
/**
* Initializes an atomic variable with a pointer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*/
AWS_STATIC_IMPL
void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p) {
AWS_ATOMIC_VAR_PTRVAL(var) = p;
}
/**
* Reads an atomic var as an integer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) {
aws_atomic_private_barrier_before(memory_order);
size_t retval = AWS_ATOMIC_VAR_INTVAL(var);
/* Release barriers are not permitted for loads, so we just do a compiler barrier here */
aws_atomic_private_compiler_barrier();
return retval;
}
/**
* Reads an atomic var as a pointer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) {
aws_atomic_private_barrier_before(memory_order);
void *retval = AWS_ATOMIC_VAR_PTRVAL(var);
/* Release barriers are not permitted for loads, so we just do a compiler barrier here */
aws_atomic_private_compiler_barrier();
return retval;
}
/**
* Stores an integer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) {
/* Acquire barriers are not permitted for stores, so just do a compiler barrier before */
aws_atomic_private_compiler_barrier();
AWS_ATOMIC_VAR_INTVAL(var) = n;
aws_atomic_private_barrier_after(memory_order);
}
/**
* Stores a pointer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) {
/* Acquire barriers are not permitted for stores, so just do a compiler barrier before */
aws_atomic_private_compiler_barrier();
AWS_ATOMIC_VAR_PTRVAL(var) = p;
aws_atomic_private_barrier_after(memory_order);
}
/**
* Exchanges an integer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t n,
enum aws_memory_order memory_order) {
/*
* GCC 4.6 and before have only __sync_lock_test_and_set as an exchange operation,
* which may not support arbitrary values on all architectures. We simply emulate
* with a CAS instead.
*/
size_t oldval;
do {
oldval = AWS_ATOMIC_VAR_INTVAL(var);
} while (!__sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_INTVAL(var), oldval, n));
/* __sync_bool_compare_and_swap implies a full barrier */
return oldval;
}
/**
* Exchanges a pointer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
void *aws_atomic_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void *p,
enum aws_memory_order memory_order) {
/*
* GCC 4.6 and before have only __sync_lock_test_and_set as an exchange operation,
* which may not support arbitrary values on all architectures. We simply emulate
* with a CAS instead.
*/
void *oldval;
do {
oldval = AWS_ATOMIC_VAR_PTRVAL(var);
} while (!__sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_PTRVAL(var), oldval, p));
/* __sync_bool_compare_and_swap implies a full barrier */
return oldval;
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t *expected,
size_t desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure) {
bool result = __sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_INTVAL(var), *expected, desired);
if (!result) {
*expected = AWS_ATOMIC_VAR_INTVAL(var);
}
return result;
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void **expected,
void *desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure) {
bool result = __sync_bool_compare_and_swap(&AWS_ATOMIC_VAR_PTRVAL(var), *expected, desired);
if (!result) {
*expected = AWS_ATOMIC_VAR_PTRVAL(var);
}
return result;
}
/**
* Atomically adds n to *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __sync_fetch_and_add(&AWS_ATOMIC_VAR_INTVAL(var), n);
}
/**
* Atomically subtracts n from *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __sync_fetch_and_sub(&AWS_ATOMIC_VAR_INTVAL(var), n);
}
/**
* Atomically ORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __sync_fetch_and_or(&AWS_ATOMIC_VAR_INTVAL(var), n);
}
/**
* Atomically ANDs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __sync_fetch_and_and(&AWS_ATOMIC_VAR_INTVAL(var), n);
}
/**
* Atomically XORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
return __sync_fetch_and_xor(&AWS_ATOMIC_VAR_INTVAL(var), n);
}
/**
* Provides the same reordering guarantees as an atomic operation with the specified memory order, without
* needing to actually perform an atomic operation.
*/
AWS_STATIC_IMPL
void aws_atomic_thread_fence(enum aws_memory_order order) {
/* On old versions of GCC we only have this one big hammer... */
__sync_synchronize();
}
#define AWS_ATOMICS_HAVE_THREAD_FENCE
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ATOMICS_GNU_OLD_INL */

View File

@@ -0,0 +1,407 @@
#ifndef AWS_COMMON_ATOMICS_MSVC_INL
#define AWS_COMMON_ATOMICS_MSVC_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/* These are implicitly included, but helps with editor highlighting */
#include <aws/common/atomics.h>
#include <aws/common/common.h>
/* This file generates level 4 compiler warnings in Visual Studio 2017 and older */
#pragma warning(push, 3)
#include <intrin.h>
#pragma warning(pop)
#include <stdint.h>
#include <stdlib.h>
AWS_EXTERN_C_BEGIN
#if !(defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64))
# error Atomics are not currently supported for non-x86 or ARM64 MSVC platforms
/*
* In particular, it's not clear that seq_cst will work properly on non-x86
* memory models. We may need to make use of platform-specific intrinsics.
*
* NOTE: Before removing this #error, please make use of the Interlocked*[Acquire|Release]
* variants (if applicable for the new platform)! This will (hopefully) help ensure that
* code breaks before people take too much of a dependency on it.
*/
#endif
/**
* Some general notes:
*
* On x86/x86_64, by default, windows uses acquire/release semantics for volatile accesses;
* however, this is not the case on ARM, and on x86/x86_64 it can be disabled using the
* /volatile:iso compile flag.
*
* Interlocked* functions implicitly have acq_rel semantics; there are ones with weaker
* semantics as well, but because windows is generally used on x86, where there's not a lot
* of performance difference between different ordering modes anyway, we just use the stronger
* forms for now. Further, on x86, they actually have seq_cst semantics as they use locked instructions.
* It is unclear if Interlocked functions guarantee seq_cst on non-x86 platforms.
*
* Since all loads and stores are acq and/or rel already, we can do non-seq_cst loads and stores
* as just volatile variable accesses, but add the appropriate barriers for good measure.
*
* For seq_cst accesses, we take advantage of the facts that (on x86):
* 1. Loads are not reordered with other loads
* 2. Stores are not reordered with other stores
* 3. Locked instructions (including swaps) have a total order
* 4. Non-locked accesses are not reordered with locked instructions
*
* Therefore, if we ensure that all seq_cst stores are locked, we can establish
* a total order on stores, and the intervening ordinary loads will not violate that total
* order.
* See http://www.cs.cmu.edu/~410-f10/doc/Intel_Reordering_318147.pdf 2.7, which covers
* this use case.
*/
/**
* Some general notes about ARM environments:
* ARM processors uses a weak memory model as opposed to the strong memory model used by Intel processors
* This means more permissible memory ordering allowed between stores and loads.
*
* Thus ARM port will need more hardware fences/barriers to assure developer intent.
* Memory barriers will prevent reordering stores and loads accross them depending on their type
* (read write, write only, read only ...)
*
* For more information about ARM64 memory ordering,
* see https://developer.arm.com/documentation/102336/0100/Memory-ordering
* For more information about Memory barriers,
* see https://developer.arm.com/documentation/102336/0100/Memory-barriers
* For more information about Miscosoft Interensic ARM64 APIs,
* see https://learn.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=msvc-170
* Note: wrt _Interlocked[Op]64 is the same for ARM64 and x64 processors
*/
#ifdef _M_IX86
# define AWS_INTERLOCKED_INT(x) _Interlocked##x
typedef long aws_atomic_impl_int_t;
#else
# define AWS_INTERLOCKED_INT(x) _Interlocked##x##64
typedef long long aws_atomic_impl_int_t;
#endif
#ifdef _M_ARM64
/* Hardware Read Write barrier, prevents all memory operations to cross the barrier in both directions */
# define AWS_RW_BARRIER() __dmb(_ARM64_BARRIER_SY)
/* Hardware Read barrier, prevents all memory operations to cross the barrier upwards */
# define AWS_R_BARRIER() __dmb(_ARM64_BARRIER_LD)
/* Hardware Write barrier, prevents all memory operations to cross the barrier downwards */
# define AWS_W_BARRIER() __dmb(_ARM64_BARRIER_ST)
/* Software barrier, prevents the compiler from reodering the operations across the barrier */
# define AWS_SW_BARRIER() _ReadWriteBarrier();
#else
/* hardware barriers, do nothing on x86 since it has a strong memory model
* as described in the section above: some general notes
*/
# define AWS_RW_BARRIER()
# define AWS_R_BARRIER()
# define AWS_W_BARRIER()
/*
* x86: only a compiler barrier is required. For seq_cst, we must use some form of interlocked operation for
* writes, but that's the caller's responsibility.
*
* Volatile ops may or may not imply this barrier, depending on the /volatile: switch, but adding an extra
* barrier doesn't hurt.
*/
# define AWS_SW_BARRIER() _ReadWriteBarrier(); /* software barrier */
#endif
static inline void aws_atomic_priv_check_order(enum aws_memory_order order) {
#ifndef NDEBUG
switch (order) {
case aws_memory_order_relaxed:
return;
case aws_memory_order_acquire:
return;
case aws_memory_order_release:
return;
case aws_memory_order_acq_rel:
return;
case aws_memory_order_seq_cst:
return;
default: /* Unknown memory order */
abort();
}
#endif
(void)order;
}
enum aws_atomic_mode_priv { aws_atomic_priv_load, aws_atomic_priv_store };
static inline void aws_atomic_priv_barrier_before(enum aws_memory_order order, enum aws_atomic_mode_priv mode) {
aws_atomic_priv_check_order(order);
AWS_ASSERT(mode != aws_atomic_priv_load || order != aws_memory_order_release);
if (order == aws_memory_order_relaxed) {
/* no barriers required for relaxed mode */
return;
}
if (order == aws_memory_order_acquire || mode == aws_atomic_priv_load) {
/* for acquire, we need only use a barrier afterward */
return;
}
AWS_RW_BARRIER();
AWS_SW_BARRIER();
}
static inline void aws_atomic_priv_barrier_after(enum aws_memory_order order, enum aws_atomic_mode_priv mode) {
aws_atomic_priv_check_order(order);
AWS_ASSERT(mode != aws_atomic_priv_store || order != aws_memory_order_acquire);
if (order == aws_memory_order_relaxed) {
/* no barriers required for relaxed mode */
return;
}
if (order == aws_memory_order_release || mode == aws_atomic_priv_store) {
/* for release, we need only use a barrier before */
return;
}
AWS_RW_BARRIER();
AWS_SW_BARRIER();
}
/**
* Initializes an atomic variable with an integer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*/
AWS_STATIC_IMPL
void aws_atomic_init_int(volatile struct aws_atomic_var *var, size_t n) {
AWS_ATOMIC_VAR_INTVAL(var) = (aws_atomic_impl_int_t)n;
}
/**
* Initializes an atomic variable with a pointer value. This operation should be done before any
* other operations on this atomic variable, and must be done before attempting any parallel operations.
*/
AWS_STATIC_IMPL
void aws_atomic_init_ptr(volatile struct aws_atomic_var *var, void *p) {
AWS_ATOMIC_VAR_PTRVAL(var) = p;
}
/**
* Reads an atomic var as an integer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
size_t aws_atomic_load_int_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) {
aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_load);
size_t result = (size_t)AWS_ATOMIC_VAR_INTVAL(var);
aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_load);
return result;
}
/**
* Reads an atomic var as an pointer, using the specified ordering, and returns the result.
*/
AWS_STATIC_IMPL
void *aws_atomic_load_ptr_explicit(volatile const struct aws_atomic_var *var, enum aws_memory_order memory_order) {
aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_load);
void *result = AWS_ATOMIC_VAR_PTRVAL(var);
aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_load);
return result;
}
/**
* Stores an integer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_int_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order memory_order) {
if (memory_order != aws_memory_order_seq_cst) {
aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_store);
AWS_ATOMIC_VAR_INTVAL(var) = (aws_atomic_impl_int_t)n;
aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_store);
} else {
AWS_INTERLOCKED_INT(Exchange)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n);
}
}
/**
* Stores an pointer into an atomic var, using the specified ordering.
*/
AWS_STATIC_IMPL
void aws_atomic_store_ptr_explicit(volatile struct aws_atomic_var *var, void *p, enum aws_memory_order memory_order) {
aws_atomic_priv_check_order(memory_order);
if (memory_order != aws_memory_order_seq_cst) {
aws_atomic_priv_barrier_before(memory_order, aws_atomic_priv_store);
AWS_ATOMIC_VAR_PTRVAL(var) = p;
aws_atomic_priv_barrier_after(memory_order, aws_atomic_priv_store);
} else {
_InterlockedExchangePointer(&AWS_ATOMIC_VAR_PTRVAL(var), p);
}
}
/**
* Exchanges an integer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t n,
enum aws_memory_order memory_order) {
aws_atomic_priv_check_order(memory_order);
return (size_t)AWS_INTERLOCKED_INT(Exchange)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n);
}
/**
* Exchanges a pointer with the value in an atomic_var, using the specified ordering.
* Returns the value that was previously in the atomic_var.
*/
AWS_STATIC_IMPL
void *aws_atomic_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void *p,
enum aws_memory_order memory_order) {
aws_atomic_priv_check_order(memory_order);
return _InterlockedExchangePointer(&AWS_ATOMIC_VAR_PTRVAL(var), p);
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_int_explicit(
volatile struct aws_atomic_var *var,
size_t *expected,
size_t desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure) {
aws_atomic_priv_check_order(order_success);
aws_atomic_priv_check_order(order_failure);
size_t oldval = (size_t)AWS_INTERLOCKED_INT(CompareExchange)(
&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)desired, (aws_atomic_impl_int_t)*expected);
bool successful = oldval == *expected;
*expected = oldval;
return successful;
}
/**
* Atomically compares *var to *expected; if they are equal, atomically sets *var = desired. Otherwise, *expected is set
* to the value in *var. On success, the memory ordering used was order_success; otherwise, it was order_failure.
* order_failure must be no stronger than order_success, and must not be release or acq_rel.
*/
AWS_STATIC_IMPL
bool aws_atomic_compare_exchange_ptr_explicit(
volatile struct aws_atomic_var *var,
void **expected,
void *desired,
enum aws_memory_order order_success,
enum aws_memory_order order_failure) {
aws_atomic_priv_check_order(order_success);
aws_atomic_priv_check_order(order_failure);
void *oldval = _InterlockedCompareExchangePointer(&AWS_ATOMIC_VAR_PTRVAL(var), desired, *expected);
bool successful = oldval == *expected;
*expected = oldval;
return successful;
}
/**
* Atomically adds n to *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_add_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
aws_atomic_priv_check_order(order);
return (size_t)AWS_INTERLOCKED_INT(ExchangeAdd)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n);
}
/**
* Atomically subtracts n from *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_sub_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
aws_atomic_priv_check_order(order);
return (size_t)AWS_INTERLOCKED_INT(ExchangeAdd)(&AWS_ATOMIC_VAR_INTVAL(var), -(aws_atomic_impl_int_t)n);
}
/**
* Atomically ORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_or_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
aws_atomic_priv_check_order(order);
return (size_t)AWS_INTERLOCKED_INT(Or)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n);
}
/**
* Atomically ANDs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_and_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
aws_atomic_priv_check_order(order);
return (size_t)AWS_INTERLOCKED_INT(And)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n);
}
/**
* Atomically XORs n with *var, and returns the previous value of *var.
*/
AWS_STATIC_IMPL
size_t aws_atomic_fetch_xor_explicit(volatile struct aws_atomic_var *var, size_t n, enum aws_memory_order order) {
aws_atomic_priv_check_order(order);
return (size_t)AWS_INTERLOCKED_INT(Xor)(&AWS_ATOMIC_VAR_INTVAL(var), (aws_atomic_impl_int_t)n);
}
/**
* Provides the same reordering guarantees as an atomic operation with the specified memory order, without
* needing to actually perform an atomic operation.
*/
AWS_STATIC_IMPL
void aws_atomic_thread_fence(enum aws_memory_order order) {
volatile aws_atomic_impl_int_t x = 0;
aws_atomic_priv_check_order(order);
/* On x86: A compiler barrier is sufficient for anything short of seq_cst */
switch (order) {
case aws_memory_order_seq_cst:
AWS_INTERLOCKED_INT(Exchange)(&x, 1);
break;
case aws_memory_order_release:
AWS_W_BARRIER();
AWS_SW_BARRIER();
break;
case aws_memory_order_acquire:
AWS_R_BARRIER();
AWS_SW_BARRIER();
break;
case aws_memory_order_acq_rel:
AWS_RW_BARRIER();
AWS_SW_BARRIER();
break;
case aws_memory_order_relaxed:
/* no-op */
break;
}
}
/* prevent conflicts with other files that might pick the same names */
#undef AWS_RW_BARRIER
#undef AWS_R_BARRIER
#undef AWS_W_BARRIER
#undef AWS_SW_BARRIER
#define AWS_ATOMICS_HAVE_THREAD_FENCE
AWS_EXTERN_C_END
#endif

View File

@@ -0,0 +1,967 @@
#ifndef AWS_COMMON_BYTE_BUF_H
#define AWS_COMMON_BYTE_BUF_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/array_list.h>
#include <aws/common/byte_order.h>
#include <aws/common/common.h>
#include <string.h>
AWS_PUSH_SANE_WARNING_LEVEL
/**
* Represents a length-delimited binary string or buffer. If byte buffer points
* to constant memory or memory that should otherwise not be freed by this
* struct, set allocator to NULL and free function will be a no-op.
*
* This structure used to define the output for all functions that write to a buffer.
*
* Note that this structure allocates memory at the buffer pointer only. The
* struct itself does not get dynamically allocated and must be either
* maintained or copied to avoid losing access to the memory.
*/
struct aws_byte_buf {
/* do not reorder this, this struct lines up nicely with windows buffer structures--saving us allocations.*/
size_t len;
uint8_t *buffer;
size_t capacity;
struct aws_allocator *allocator;
};
/**
* Represents a movable pointer within a larger binary string or buffer.
*
* This structure is used to define buffers for reading.
*/
struct aws_byte_cursor {
/* do not reorder this, this struct lines up nicely with windows buffer structures--saving us allocations */
size_t len;
uint8_t *ptr;
};
/**
* Helper macro for passing aws_byte_cursor to the printf family of functions.
* Intended for use with the PRInSTR format macro.
* Ex: printf(PRInSTR "\n", AWS_BYTE_CURSOR_PRI(my_cursor));
*/
#define AWS_BYTE_CURSOR_PRI(C) ((int)(C).len < 0 ? 0 : (int)(C).len), (const char *)(C).ptr
/**
* Helper macro for passing aws_byte_buf to the printf family of functions.
* Intended for use with the PRInSTR format macro.
* Ex: printf(PRInSTR "\n", AWS_BYTE_BUF_PRI(my_buf));
*/
#define AWS_BYTE_BUF_PRI(B) ((int)(B).len < 0 ? 0 : (int)(B).len), (const char *)(B).buffer
/**
* Helper Macro for initializing a byte cursor from a string literal
*/
#define AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(literal) \
{.ptr = (uint8_t *)(const char *)(literal), .len = sizeof(literal) - 1}
/**
* Signature for function argument to trim APIs
*/
typedef bool(aws_byte_predicate_fn)(uint8_t value);
AWS_EXTERN_C_BEGIN
/**
* Compare two arrays.
* Return whether their contents are equivalent.
* NULL may be passed as the array pointer if its length is declared to be 0.
*/
AWS_COMMON_API
bool aws_array_eq(const void *const array_a, const size_t len_a, const void *array_b, const size_t len_b);
/**
* Perform a case-insensitive string comparison of two arrays.
* Return whether their contents are equivalent.
* NULL may be passed as the array pointer if its length is declared to be 0.
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_array_eq_ignore_case(
const void *const array_a,
const size_t len_a,
const void *const array_b,
const size_t len_b);
/**
* Compare an array and a null-terminated string.
* Returns true if their contents are equivalent.
* The array should NOT contain a null-terminator, or the comparison will always return false.
* NULL may be passed as the array pointer if its length is declared to be 0.
*/
AWS_COMMON_API
bool aws_array_eq_c_str(const void *const array, const size_t array_len, const char *const c_str);
/**
* Perform a case-insensitive string comparison of an array and a null-terminated string.
* Return whether their contents are equivalent.
* The array should NOT contain a null-terminator, or the comparison will always return false.
* NULL may be passed as the array pointer if its length is declared to be 0.
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_array_eq_c_str_ignore_case(const void *const array, const size_t array_len, const char *const c_str);
AWS_COMMON_API
int aws_byte_buf_init(struct aws_byte_buf *buf, struct aws_allocator *allocator, size_t capacity);
/**
* Initializes an aws_byte_buf structure base on another valid one.
* Requires: *src and *allocator are valid objects.
* Ensures: *dest is a valid aws_byte_buf with a new backing array dest->buffer
* which is a copy of the elements from src->buffer.
*/
AWS_COMMON_API int aws_byte_buf_init_copy(
struct aws_byte_buf *dest,
struct aws_allocator *allocator,
const struct aws_byte_buf *src);
/**
* Reads 'filename' into 'out_buf'. If successful, 'out_buf' is allocated and filled with the data;
* It is your responsibility to call 'aws_byte_buf_clean_up()' on it. Otherwise, 'out_buf' remains
* unused. In the very unfortunate case where some API needs to treat out_buf as a c_string, a null terminator
* is appended, but is not included as part of the length field.
*/
AWS_COMMON_API
int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocator *alloc, const char *filename);
/**
* Same as aws_byte_buf_init_from_file(), but for reading "special files" like /proc/cpuinfo.
* These files don't accurately report their size, so size_hint is used as initial buffer size,
* and the buffer grows until the while file is read.
*/
AWS_COMMON_API
int aws_byte_buf_init_from_file_with_size_hint(
struct aws_byte_buf *out_buf,
struct aws_allocator *alloc,
const char *filename,
size_t size_hint);
/**
* Evaluates the set of properties that define the shape of all valid aws_byte_buf structures.
* It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion).
*/
AWS_COMMON_API
bool aws_byte_buf_is_valid(const struct aws_byte_buf *const buf);
/**
* Evaluates the set of properties that define the shape of all valid aws_byte_cursor structures.
* It is also a cheap check, in the sense it runs in constant time (i.e., no loops or recursion).
*/
AWS_COMMON_API
bool aws_byte_cursor_is_valid(const struct aws_byte_cursor *cursor);
/**
* Copies src buffer into dest and sets the correct len and capacity.
* A new memory zone is allocated for dest->buffer. When dest is no longer needed it will have to be cleaned-up using
* aws_byte_buf_clean_up(dest).
* Dest capacity and len will be equal to the src len. Allocator of the dest will be identical with parameter allocator.
* If src buffer is null the dest will have a null buffer with a len and a capacity of 0
* Returns AWS_OP_SUCCESS in case of success or AWS_OP_ERR when memory can't be allocated.
*/
AWS_COMMON_API
int aws_byte_buf_init_copy_from_cursor(
struct aws_byte_buf *dest,
struct aws_allocator *allocator,
struct aws_byte_cursor src);
/**
* Init buffer with contents of multiple cursors, and update cursors to reference the memory stored in the buffer.
* Each cursor arg must be an `struct aws_byte_cursor *`. NULL must be passed as the final arg.
* NOTE: Do not append/grow/resize buffers initialized this way, or the cursors will end up referencing invalid memory.
* Returns AWS_OP_SUCCESS in case of success.
* AWS_OP_ERR is returned if memory can't be allocated or the total cursor length exceeds SIZE_MAX.
*/
AWS_COMMON_API
int aws_byte_buf_init_cache_and_update_cursors(struct aws_byte_buf *dest, struct aws_allocator *allocator, ...);
AWS_COMMON_API
void aws_byte_buf_clean_up(struct aws_byte_buf *buf);
/**
* Equivalent to calling aws_byte_buf_secure_zero and then aws_byte_buf_clean_up
* on the buffer.
*/
AWS_COMMON_API
void aws_byte_buf_clean_up_secure(struct aws_byte_buf *buf);
/**
* Resets the len of the buffer to 0, but does not free the memory. The buffer can then be reused.
* Optionally zeroes the contents, if the "zero_contents" flag is true.
*/
AWS_COMMON_API
void aws_byte_buf_reset(struct aws_byte_buf *buf, bool zero_contents);
/**
* Sets all bytes of buffer to zero and resets len to zero.
*/
AWS_COMMON_API
void aws_byte_buf_secure_zero(struct aws_byte_buf *buf);
/**
* Compare two aws_byte_buf structures.
* Return whether their contents are equivalent.
*/
AWS_COMMON_API
bool aws_byte_buf_eq(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b);
/**
* Perform a case-insensitive string comparison of two aws_byte_buf structures.
* Return whether their contents are equivalent.
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_byte_buf_eq_ignore_case(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b);
/**
* Compare an aws_byte_buf and a null-terminated string.
* Returns true if their contents are equivalent.
* The buffer should NOT contain a null-terminator, or the comparison will always return false.
*/
AWS_COMMON_API
bool aws_byte_buf_eq_c_str(const struct aws_byte_buf *const buf, const char *const c_str);
/**
* Perform a case-insensitive string comparison of an aws_byte_buf and a null-terminated string.
* Return whether their contents are equivalent.
* The buffer should NOT contain a null-terminator, or the comparison will always return false.
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_byte_buf_eq_c_str_ignore_case(const struct aws_byte_buf *const buf, const char *const c_str);
/**
* No copies, no buffer allocations. Iterates over input_str, and returns the
* next substring between split_on instances relative to previous substr.
* Behaves similar to strtok with substr being used as state for next split.
*
* Returns true each time substr is set and false when there is no more splits
* (substr is set to empty in that case).
*
* Example usage.
* struct aws_byte_cursor substr = {0};
* while (aws_byte_cursor_next_split(&input_str, ';', &substr)) {
* // ...use substr...
* }
*
* Note: It is the user's responsibility zero-initialize substr before the first call.
*
* Edge case rules are as follows:
* empty input will have single empty split. ex. "" splits into ""
* if input starts with split_on then first split is empty. ex ";A" splits into "", "A"
* adjacent split tokens result in empty split. ex "A;;B" splits into "A", "", "B"
* If the input ends with split_on, last split is empty. ex. "A;" splits into "A", ""
*
* It is the user's responsibility to make sure the input buffer stays in memory
* long enough to use the results.
*/
AWS_COMMON_API
bool aws_byte_cursor_next_split(
const struct aws_byte_cursor *AWS_RESTRICT input_str,
char split_on,
struct aws_byte_cursor *AWS_RESTRICT substr);
/**
* No copies, no buffer allocations. Fills in output with a list of
* aws_byte_cursor instances where buffer is an offset into the input_str and
* len is the length of that string in the original buffer.
*
* Edge case rules are as follows:
* if the input begins with split_on, an empty cursor will be the first entry in
* output. if the input has two adjacent split_on tokens, an empty cursor will
* be inserted into the output. if the input ends with split_on, an empty cursor
* will be appended to the output.
*
* It is the user's responsibility to properly initialize output. Recommended number of preallocated elements from
* output is your most likely guess for the upper bound of the number of elements resulting from the split.
*
* The type that will be stored in output is struct aws_byte_cursor (you'll need
* this for the item size param).
*
* It is the user's responsibility to make sure the input buffer stays in memory
* long enough to use the results.
*/
AWS_COMMON_API
int aws_byte_cursor_split_on_char(
const struct aws_byte_cursor *AWS_RESTRICT input_str,
char split_on,
struct aws_array_list *AWS_RESTRICT output);
/**
* No copies, no buffer allocations. Fills in output with a list of aws_byte_cursor instances where buffer is
* an offset into the input_str and len is the length of that string in the original buffer. N is the max number of
* splits, if this value is zero, it will add all splits to the output.
*
* Edge case rules are as follows:
* if the input begins with split_on, an empty cursor will be the first entry in output
* if the input has two adjacent split_on tokens, an empty cursor will be inserted into the output.
* if the input ends with split_on, an empty cursor will be appended to the output.
*
* It is the user's responsibility to properly initialize output. Recommended number of preallocated elements from
* output is your most likely guess for the upper bound of the number of elements resulting from the split.
*
* If the output array is not large enough, input_str will be updated to point to the first character after the last
* processed split_on instance.
*
* The type that will be stored in output is struct aws_byte_cursor (you'll need this for the item size param).
*
* It is the user's responsibility to make sure the input buffer stays in memory long enough to use the results.
*/
AWS_COMMON_API
int aws_byte_cursor_split_on_char_n(
const struct aws_byte_cursor *AWS_RESTRICT input_str,
char split_on,
size_t n,
struct aws_array_list *AWS_RESTRICT output);
/**
* Search for an exact byte match inside a cursor. The first match will be returned. Returns AWS_OP_SUCCESS
* on successful match and first_find will be set to the offset in input_str, and length will be the remaining length
* from input_str past the returned offset. If the match was not found, AWS_OP_ERR will be returned and
* AWS_ERROR_STRING_MATCH_NOT_FOUND will be raised.
*/
AWS_COMMON_API
int aws_byte_cursor_find_exact(
const struct aws_byte_cursor *AWS_RESTRICT input_str,
const struct aws_byte_cursor *AWS_RESTRICT to_find,
struct aws_byte_cursor *first_find);
/**
*
* Shrinks a byte cursor from the right for as long as the supplied predicate is true
*/
AWS_COMMON_API
struct aws_byte_cursor aws_byte_cursor_right_trim_pred(
const struct aws_byte_cursor *source,
aws_byte_predicate_fn *predicate);
/**
* Shrinks a byte cursor from the left for as long as the supplied predicate is true
*/
AWS_COMMON_API
struct aws_byte_cursor aws_byte_cursor_left_trim_pred(
const struct aws_byte_cursor *source,
aws_byte_predicate_fn *predicate);
/**
* Shrinks a byte cursor from both sides for as long as the supplied predicate is true
*/
AWS_COMMON_API
struct aws_byte_cursor aws_byte_cursor_trim_pred(
const struct aws_byte_cursor *source,
aws_byte_predicate_fn *predicate);
/**
* Returns true if the byte cursor's range of bytes all satisfy the predicate
*/
AWS_COMMON_API
bool aws_byte_cursor_satisfies_pred(const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate);
/**
* Copies from to to. If to is too small, AWS_ERROR_DEST_COPY_TOO_SMALL will be
* returned. dest->len will contain the amount of data actually copied to dest.
*
* from and to may be the same buffer, permitting copying a buffer into itself.
*/
AWS_COMMON_API
int aws_byte_buf_append(struct aws_byte_buf *to, const struct aws_byte_cursor *from);
/**
* Copies from to to while converting bytes via the passed in lookup table.
* If to is too small, AWS_ERROR_DEST_COPY_TOO_SMALL will be
* returned. to->len will contain its original size plus the amount of data actually copied to to.
*
* from and to should not be the same buffer (overlap is not handled)
* lookup_table must be at least 256 bytes
*/
AWS_COMMON_API
int aws_byte_buf_append_with_lookup(
struct aws_byte_buf *AWS_RESTRICT to,
const struct aws_byte_cursor *AWS_RESTRICT from,
const uint8_t *lookup_table);
/**
* Copies from to to. If to is too small, the buffer will be grown appropriately and
* the old contents copied to, before the new contents are appended.
*
* If the grow fails (overflow or OOM), then an error will be returned.
*
* from and to may be the same buffer, permitting copying a buffer into itself.
*/
AWS_COMMON_API
int aws_byte_buf_append_dynamic(struct aws_byte_buf *to, const struct aws_byte_cursor *from);
/**
* Copies `from` to `to`. If `to` is too small, the buffer will be grown appropriately and
* the old contents copied over, before the new contents are appended.
*
* If the grow fails (overflow or OOM), then an error will be returned.
*
* If the buffer is grown, the old buffer will be securely cleared before getting freed.
*
* `from` and `to` may be the same buffer, permitting copying a buffer into itself.
*/
AWS_COMMON_API
int aws_byte_buf_append_dynamic_secure(struct aws_byte_buf *to, const struct aws_byte_cursor *from);
/**
* Copies a single byte into `to`. If `to` is too small, the buffer will be grown appropriately and
* the old contents copied over, before the byte is appended.
*
* If the grow fails (overflow or OOM), then an error will be returned.
*/
AWS_COMMON_API
int aws_byte_buf_append_byte_dynamic(struct aws_byte_buf *buffer, uint8_t value);
/**
* Copies a single byte into `to`. If `to` is too small, the buffer will be grown appropriately and
* the old contents copied over, before the byte is appended.
*
* If the grow fails (overflow or OOM), then an error will be returned.
*
* If the buffer is grown, the old buffer will be securely cleared before getting freed.
*/
AWS_COMMON_API
int aws_byte_buf_append_byte_dynamic_secure(struct aws_byte_buf *buffer, uint8_t value);
/**
* Copy contents of cursor to buffer, then update cursor to reference the memory stored in the buffer.
* If buffer is too small, AWS_ERROR_DEST_COPY_TOO_SMALL will be returned.
*
* The cursor is permitted to reference memory from earlier in the buffer.
*/
AWS_COMMON_API
int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_cursor *from_and_update);
/**
* Appends '\0' at the end of the buffer.
*/
AWS_COMMON_API
int aws_byte_buf_append_null_terminator(struct aws_byte_buf *buf);
/**
* Attempts to increase the capacity of a buffer to the requested capacity
*
* If the the buffer's capacity is currently larger than the request capacity, the
* function does nothing (no shrink is performed).
*/
AWS_COMMON_API
int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity);
/**
* Convenience function that attempts to increase the capacity of a buffer relative to the current
* length.
*
* aws_byte_buf_reserve_relative(buf, x) ~~ aws_byte_buf_reserve(buf, buf->len + x)
*
*/
AWS_COMMON_API
int aws_byte_buf_reserve_relative(struct aws_byte_buf *buffer, size_t additional_length);
/**
* Concatenates a variable number of struct aws_byte_buf * into destination.
* Number of args must be greater than 1. If dest is too small,
* AWS_ERROR_DEST_COPY_TOO_SMALL will be returned. dest->len will contain the
* amount of data actually copied to dest.
*/
AWS_COMMON_API
int aws_byte_buf_cat(struct aws_byte_buf *dest, size_t number_of_args, ...);
/**
* Compare two aws_byte_cursor structures.
* Return whether their contents are equivalent.
*/
AWS_COMMON_API
bool aws_byte_cursor_eq(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b);
/**
* Perform a case-insensitive string comparison of two aws_byte_cursor structures.
* Return whether their contents are equivalent.
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_byte_cursor_eq_ignore_case(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b);
/**
* Compare an aws_byte_cursor and an aws_byte_buf.
* Return whether their contents are equivalent.
*/
AWS_COMMON_API
bool aws_byte_cursor_eq_byte_buf(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b);
/**
* Perform a case-insensitive string comparison of an aws_byte_cursor and an aws_byte_buf.
* Return whether their contents are equivalent.
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_byte_cursor_eq_byte_buf_ignore_case(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b);
/**
* Compare an aws_byte_cursor and a null-terminated string.
* Returns true if their contents are equivalent.
* The cursor should NOT contain a null-terminator, or the comparison will always return false.
*/
AWS_COMMON_API
bool aws_byte_cursor_eq_c_str(const struct aws_byte_cursor *const cursor, const char *const c_str);
/**
* Perform a case-insensitive string comparison of an aws_byte_cursor and a null-terminated string.
* Return whether their contents are equivalent.
* The cursor should NOT contain a null-terminator, or the comparison will always return false.
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str);
/**
* Return true if the input starts with the prefix (exact byte comparison).
*/
AWS_COMMON_API
bool aws_byte_cursor_starts_with(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix);
/**
* Return true if the input starts with the prefix (case-insensitive).
* The "C" locale is used for comparing upper and lowercase letters.
* Data is assumed to be ASCII text, UTF-8 will work fine too.
*/
AWS_COMMON_API
bool aws_byte_cursor_starts_with_ignore_case(const struct aws_byte_cursor *input, const struct aws_byte_cursor *prefix);
/**
* Case-insensitive hash function for array containing ASCII or UTF-8 text.
*/
AWS_COMMON_API
uint64_t aws_hash_array_ignore_case(const void *array, const size_t len);
/**
* Case-insensitive hash function for aws_byte_cursors stored in an aws_hash_table.
* For case-sensitive hashing, use aws_hash_byte_cursor_ptr().
*/
AWS_COMMON_API
uint64_t aws_hash_byte_cursor_ptr_ignore_case(const void *item);
/**
* Returns a lookup table for bytes that is the identity transformation with the exception
* of uppercase ascii characters getting replaced with lowercase characters. Used in
* caseless comparisons.
*/
AWS_COMMON_API
const uint8_t *aws_lookup_table_to_lower_get(void);
/**
* Returns lookup table to go from ASCII/UTF-8 hex character to a number (0-15).
* Non-hex characters map to 255.
* Valid examples:
* '0' -> 0
* 'F' -> 15
* 'f' -> 15
* Invalid examples:
* ' ' -> 255
* 'Z' -> 255
* '\0' -> 255
*/
AWS_COMMON_API
const uint8_t *aws_lookup_table_hex_to_num_get(void);
/**
* Lexical (byte value) comparison of two byte cursors
*/
AWS_COMMON_API
int aws_byte_cursor_compare_lexical(const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs);
/**
* Lexical (byte value) comparison of two byte cursors where the raw values are sent through a lookup table first
*/
AWS_COMMON_API
int aws_byte_cursor_compare_lookup(
const struct aws_byte_cursor *lhs,
const struct aws_byte_cursor *rhs,
const uint8_t *lookup_table);
/**
* For creating a byte buffer from a null-terminated string literal.
*/
AWS_COMMON_API struct aws_byte_buf aws_byte_buf_from_c_str(const char *c_str);
AWS_COMMON_API struct aws_byte_buf aws_byte_buf_from_array(const void *bytes, size_t len);
AWS_COMMON_API struct aws_byte_buf aws_byte_buf_from_empty_array(const void *bytes, size_t capacity);
AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_from_buf(const struct aws_byte_buf *const buf);
AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_from_c_str(const char *c_str);
AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_from_array(const void *const bytes, const size_t len);
/**
* Tests if the given aws_byte_cursor has at least len bytes remaining. If so,
* *buf is advanced by len bytes (incrementing ->ptr and decrementing ->len),
* and an aws_byte_cursor referring to the first len bytes of the original *buf
* is returned. Otherwise, an aws_byte_cursor with ->ptr = NULL, ->len = 0 is
* returned.
*
* Note that if len is above (SIZE_MAX / 2), this function will also treat it as
* a buffer overflow, and return NULL without changing *buf.
*/
AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cursor, const size_t len);
/**
* Behaves identically to aws_byte_cursor_advance, but avoids speculative
* execution potentially reading out-of-bounds pointers (by returning an
* empty ptr in such speculated paths).
*
* This should generally be done when using an untrusted or
* data-dependent value for 'len', to avoid speculating into a path where
* cursor->ptr points outside the true ptr length.
*/
AWS_COMMON_API struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *const cursor, size_t len);
/**
* Reads specified length of data from byte cursor and copies it to the
* destination array.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read(
struct aws_byte_cursor *AWS_RESTRICT cur,
void *AWS_RESTRICT dest,
const size_t len);
/**
* Reads as many bytes from cursor as size of buffer, and copies them to buffer.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_and_fill_buffer(
struct aws_byte_cursor *AWS_RESTRICT cur,
struct aws_byte_buf *AWS_RESTRICT dest);
/**
* Reads a single byte from cursor, placing it in *var.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_u8(struct aws_byte_cursor *AWS_RESTRICT cur, uint8_t *AWS_RESTRICT var);
/**
* Reads a 16-bit value in network byte order from cur, and places it in host
* byte order into var.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_be16(struct aws_byte_cursor *cur, uint16_t *var);
/**
* Reads an unsigned 24-bit value (3 bytes) in network byte order from cur,
* and places it in host byte order into 32-bit var.
* Ex: if cur's next 3 bytes are {0xAA, 0xBB, 0xCC}, then var becomes 0x00AABBCC.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_be24(struct aws_byte_cursor *cur, uint32_t *var);
/**
* Reads a 32-bit value in network byte order from cur, and places it in host
* byte order into var.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_be32(struct aws_byte_cursor *cur, uint32_t *var);
/**
* Reads a 64-bit value in network byte order from cur, and places it in host
* byte order into var.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_be64(struct aws_byte_cursor *cur, uint64_t *var);
/**
* Reads a 32-bit value in network byte order from cur, and places it in host
* byte order into var.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_float_be32(struct aws_byte_cursor *cur, float *var);
/**
* Reads a 64-bit value in network byte order from cur, and places it in host
* byte order into var.
*
* On success, returns true and updates the cursor pointer/length accordingly.
* If there is insufficient space in the cursor, returns false, leaving the
* cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_float_be64(struct aws_byte_cursor *cur, double *var);
/**
* Reads 2 hex characters from ASCII/UTF-8 text to produce an 8-bit number.
* Accepts both lowercase 'a'-'f' and uppercase 'A'-'F'.
* For example: "0F" produces 15.
*
* On success, returns true and advances the cursor by 2.
* If there is insufficient space in the cursor or an invalid character
* is encountered, returns false, leaving the cursor unchanged.
*/
AWS_COMMON_API bool aws_byte_cursor_read_hex_u8(struct aws_byte_cursor *cur, uint8_t *var);
/**
* Appends a sub-buffer to the specified buffer.
*
* If the buffer has at least `len' bytes remaining (buffer->capacity - buffer->len >= len),
* then buffer->len is incremented by len, and an aws_byte_buf is assigned to *output corresponding
* to the last len bytes of the input buffer. The aws_byte_buf at *output will have a null
* allocator, a zero initial length, and a capacity of 'len'. The function then returns true.
*
* If there is insufficient space, then this function nulls all fields in *output and returns
* false.
*/
AWS_COMMON_API bool aws_byte_buf_advance(
struct aws_byte_buf *const AWS_RESTRICT buffer,
struct aws_byte_buf *const AWS_RESTRICT output,
const size_t len);
/**
* Write specified number of bytes from array to byte buffer.
*
* On success, returns true and updates the buffer length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write(
struct aws_byte_buf *AWS_RESTRICT buf,
const uint8_t *AWS_RESTRICT src,
size_t len);
/**
* Copies all bytes from buffer to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_from_whole_buffer(
struct aws_byte_buf *AWS_RESTRICT buf,
struct aws_byte_buf src);
/**
* Copies all bytes from buffer to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_from_whole_cursor(
struct aws_byte_buf *AWS_RESTRICT buf,
struct aws_byte_cursor src);
/**
* Without increasing buf's capacity, write as much as possible from advancing_cursor into buf.
*
* buf's len is updated accordingly.
* advancing_cursor is advanced so it contains the remaining unwritten parts.
* Returns the section of advancing_cursor which was written.
*
* This function cannot fail. If buf is full (len == capacity) or advancing_len has 0 length,
* then buf and advancing_cursor are not altered and a cursor with 0 length is returned.
*
* Example: Given a buf with 2 bytes of space available and advancing_cursor with contents "abc".
* "ab" will be written to buf and buf->len will increase 2 and become equal to buf->capacity.
* advancing_cursor will advance so its contents become the unwritten "c".
* The returned cursor's contents will be the "ab" from the original advancing_cursor.
*/
AWS_COMMON_API struct aws_byte_cursor aws_byte_buf_write_to_capacity(
struct aws_byte_buf *buf,
struct aws_byte_cursor *advancing_cursor);
/**
* Copies one byte to buffer.
*
* On success, returns true and updates the cursor /length
accordingly.
*
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_u8(struct aws_byte_buf *AWS_RESTRICT buf, uint8_t c);
/**
* Writes one byte repeatedly to buffer (like memset)
*
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_u8_n(struct aws_byte_buf *buf, uint8_t c, size_t count);
/**
* Writes a 16-bit integer in network byte order (big endian) to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_be16(struct aws_byte_buf *buf, uint16_t x);
/**
* Writes low 24-bits (3 bytes) of an unsigned integer in network byte order (big endian) to buffer.
* Ex: If x is 0x00AABBCC then {0xAA, 0xBB, 0xCC} is written to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, or x's value cannot fit in 3 bytes,
* returns false, leaving the buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_be24(struct aws_byte_buf *buf, uint32_t x);
/**
* Writes a 32-bit integer in network byte order (big endian) to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_be32(struct aws_byte_buf *buf, uint32_t x);
/**
* Writes a 32-bit float in network byte order (big endian) to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_float_be32(struct aws_byte_buf *buf, float x);
/**
* Writes a 64-bit integer in network byte order (big endian) to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_be64(struct aws_byte_buf *buf, uint64_t x);
/**
* Writes a 64-bit float in network byte order (big endian) to buffer.
*
* On success, returns true and updates the buffer /length accordingly.
* If there is insufficient space in the buffer, returns false, leaving the
* buffer unchanged.
*/
AWS_COMMON_API bool aws_byte_buf_write_float_be64(struct aws_byte_buf *buf, double x);
/**
* Like isalnum(), but ignores C locale.
* Returns true if ch has the value of ASCII/UTF-8: 'a'-'z', 'A'-'Z', or '0'-'9'.
*/
AWS_COMMON_API bool aws_isalnum(uint8_t ch);
/**
* Like isalpha(), but ignores C locale.
* Returns true if ch has the value of ASCII/UTF-8: 'a'-'z' or 'A'-'Z'.
*/
AWS_COMMON_API bool aws_isalpha(uint8_t ch);
/**
* Like isdigit().
* Returns true if ch has the value of ASCII/UTF-8: '0'-'9'.
*
* Note: C's built-in isdigit() is also supposed to ignore the C locale,
* but cppreference.com claims "some implementations (e.g. Microsoft in 1252 codepage)
* may classify additional single-byte characters as digits"
*/
AWS_COMMON_API bool aws_isdigit(uint8_t ch);
/**
* Like isxdigit().
* Returns true if ch has the value of ASCII/UTF-8: '0'-'9', 'a'-'f', or 'A'-'F'.
*
* Note: C's built-in isxdigit() is also supposed to ignore the C locale,
* but cppreference.com claims "some implementations (e.g. Microsoft in 1252 codepage)
* may classify additional single-byte characters as digits"
*/
AWS_COMMON_API bool aws_isxdigit(uint8_t ch);
/**
* Like isspace(), but ignores C locale.
* Return true if ch has the value of ASCII/UTF-8: space (0x20), form feed (0x0C),
* line feed (0x0A), carriage return (0x0D), horizontal tab (0x09), or vertical tab (0x0B).
*/
AWS_COMMON_API bool aws_isspace(uint8_t ch);
/**
* Read entire cursor as ASCII/UTF-8 unsigned base-10 number.
* Stricter than strtoull(), which allows whitespace and inputs that start with "0x"
*
* Examples:
* "0" -> 0
* "123" -> 123
* "00004" -> 4 // leading zeros ok
*
* Rejects things like:
* "-1" // negative numbers not allowed
* "1,000" // only characters 0-9 allowed
* "" // blank string not allowed
* " 0 " // whitespace not allowed
* "0x0" // hex not allowed
* "FF" // hex not allowed
* "999999999999999999999999999999999999999999" // larger than max u64
*/
AWS_COMMON_API
int aws_byte_cursor_utf8_parse_u64(struct aws_byte_cursor cursor, uint64_t *dst);
/**
* Read entire cursor as ASCII/UTF-8 unsigned base-16 number with NO "0x" prefix.
*
* Examples:
* "F" -> 15
* "000000ff" -> 255 // leading zeros ok
* "Ff" -> 255 // mixed case ok
* "123" -> 291
* "FFFFFFFFFFFFFFFF" -> 18446744073709551616 // max u64
*
* Rejects things like:
* "0x0" // 0x prefix not allowed
* "" // blank string not allowed
* " F " // whitespace not allowed
* "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" // larger than max u64
*/
AWS_COMMON_API
int aws_byte_cursor_utf8_parse_u64_hex(struct aws_byte_cursor cursor, uint64_t *dst);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_BYTE_BUF_H */

View File

@@ -0,0 +1,76 @@
#ifndef AWS_COMMON_BYTE_ORDER_H
#define AWS_COMMON_BYTE_ORDER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* Returns 1 if machine is big endian, 0 if little endian.
* If you compile with even -O1 optimization, this check is completely optimized
* out at compile time and code which calls "if (aws_is_big_endian())" will do
* the right thing without branching.
*/
AWS_STATIC_IMPL int aws_is_big_endian(void);
/**
* Convert 64 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint64_t aws_hton64(uint64_t x);
/**
* Convert 64 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint64_t aws_ntoh64(uint64_t x);
/**
* Convert 32 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint32_t aws_hton32(uint32_t x);
/**
* Convert 32 bit float from host to network byte order.
*/
AWS_STATIC_IMPL float aws_htonf32(float x);
/**
* Convert 64 bit double from host to network byte order.
*/
AWS_STATIC_IMPL double aws_htonf64(double x);
/**
* Convert 32 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint32_t aws_ntoh32(uint32_t x);
/**
* Convert 32 bit float from network to host byte order.
*/
AWS_STATIC_IMPL float aws_ntohf32(float x);
/**
* Convert 32 bit float from network to host byte order.
*/
AWS_STATIC_IMPL double aws_ntohf64(double x);
/**
* Convert 16 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x);
/**
* Convert 16 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint16_t aws_ntoh16(uint16_t x);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/byte_order.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_BYTE_ORDER_H */

View File

@@ -0,0 +1,164 @@
#ifndef AWS_COMMON_BYTE_ORDER_INL
#define AWS_COMMON_BYTE_ORDER_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_order.h>
#include <aws/common/common.h>
#ifdef _WIN32
# include <stdlib.h>
#else
# include <netinet/in.h>
#endif /* _MSC_VER */
AWS_EXTERN_C_BEGIN
/**
* Returns 1 if machine is big endian, 0 if little endian.
* If you compile with even -O1 optimization, this check is completely optimized
* out at compile time and code which calls "if (aws_is_big_endian())" will do
* the right thing without branching.
*/
AWS_STATIC_IMPL int aws_is_big_endian(void) {
const uint16_t z = 0x100;
return *(const uint8_t *)&z;
}
/**
* Convert 64 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint64_t aws_hton64(uint64_t x) {
if (aws_is_big_endian()) {
return x;
}
#if defined(__x86_64__) && (defined(__GNUC__) || defined(__clang__)) && !defined(CBMC)
uint64_t v;
__asm__("bswap %q0" : "=r"(v) : "0"(x));
return v;
#elif defined(_MSC_VER)
return _byteswap_uint64(x);
#else
uint32_t low = x & UINT32_MAX;
uint32_t high = (uint32_t)(x >> 32);
return ((uint64_t)htonl(low)) << 32 | htonl(high);
#endif
}
/**
* Convert 64 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint64_t aws_ntoh64(uint64_t x) {
return aws_hton64(x);
}
/**
* Convert 32 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint32_t aws_hton32(uint32_t x) {
#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ulong(x);
#else
return htonl(x);
#endif
}
/**
* Convert 32 bit float from host to network byte order.
*/
AWS_STATIC_IMPL float aws_htonf32(float x) {
if (aws_is_big_endian()) {
return x;
}
uint8_t *f_storage = (uint8_t *)&x;
float ret_value;
uint8_t *ret_storage = (uint8_t *)&ret_value;
ret_storage[0] = f_storage[3];
ret_storage[1] = f_storage[2];
ret_storage[2] = f_storage[1];
ret_storage[3] = f_storage[0];
return ret_value;
}
/**
* Convert 64 bit double from host to network byte order.
*/
AWS_STATIC_IMPL double aws_htonf64(double x) {
if (aws_is_big_endian()) {
return x;
}
uint8_t *f_storage = (uint8_t *)&x;
double ret_value;
uint8_t *ret_storage = (uint8_t *)&ret_value;
ret_storage[0] = f_storage[7];
ret_storage[1] = f_storage[6];
ret_storage[2] = f_storage[5];
ret_storage[3] = f_storage[4];
ret_storage[4] = f_storage[3];
ret_storage[5] = f_storage[2];
ret_storage[6] = f_storage[1];
ret_storage[7] = f_storage[0];
return ret_value;
}
/**
* Convert 32 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint32_t aws_ntoh32(uint32_t x) {
#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ulong(x);
#else
return ntohl(x);
#endif
}
/**
* Convert 32 bit float from network to host byte order.
*/
AWS_STATIC_IMPL float aws_ntohf32(float x) {
return aws_htonf32(x);
}
/**
* Convert 32 bit float from network to host byte order.
*/
AWS_STATIC_IMPL double aws_ntohf64(double x) {
return aws_htonf64(x);
}
/**
* Convert 16 bit integer from host to network byte order.
*/
AWS_STATIC_IMPL uint16_t aws_hton16(uint16_t x) {
#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ushort(x);
#else
return htons(x);
#endif
}
/**
* Convert 16 bit integer from network to host byte order.
*/
AWS_STATIC_IMPL uint16_t aws_ntoh16(uint16_t x) {
#ifdef _WIN32
return aws_is_big_endian() ? x : _byteswap_ushort(x);
#else
return ntohs(x);
#endif
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_BYTE_ORDER_INL */

View File

@@ -0,0 +1,87 @@
#ifndef AWS_COMMON_CACHE_H
#define AWS_COMMON_CACHE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/linked_hash_table.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_cache;
struct aws_cache_vtable {
void (*destroy)(struct aws_cache *cache);
int (*find)(struct aws_cache *cache, const void *key, void **p_value);
int (*put)(struct aws_cache *cache, const void *key, void *p_value);
int (*remove)(struct aws_cache *cache, const void *key);
void (*clear)(struct aws_cache *cache);
size_t (*get_element_count)(const struct aws_cache *cache);
};
/**
* Base stucture for caches, used the linked hash table implementation.
*/
struct aws_cache {
struct aws_allocator *allocator;
const struct aws_cache_vtable *vtable;
struct aws_linked_hash_table table;
size_t max_items;
void *impl;
};
/* Default implementations */
void aws_cache_base_default_destroy(struct aws_cache *cache);
int aws_cache_base_default_find(struct aws_cache *cache, const void *key, void **p_value);
int aws_cache_base_default_remove(struct aws_cache *cache, const void *key);
void aws_cache_base_default_clear(struct aws_cache *cache);
size_t aws_cache_base_default_get_element_count(const struct aws_cache *cache);
AWS_EXTERN_C_BEGIN
/**
* Cleans up the cache. Elements in the cache will be evicted and cleanup
* callbacks will be invoked.
*/
AWS_COMMON_API
void aws_cache_destroy(struct aws_cache *cache);
/**
* Finds element in the cache by key. If found, *p_value will hold the stored value, and AWS_OP_SUCCESS will be
* returned. If not found, AWS_OP_SUCCESS will be returned and *p_value will be NULL.
*
* If any errors occur AWS_OP_ERR will be returned.
*/
AWS_COMMON_API
int aws_cache_find(struct aws_cache *cache, const void *key, void **p_value);
/**
* Puts `p_value` at `key`. If an element is already stored at `key` it will be replaced. If the cache is already full,
* an item will be removed based on the cache policy.
*/
AWS_COMMON_API
int aws_cache_put(struct aws_cache *cache, const void *key, void *p_value);
/**
* Removes item at `key` from the cache.
*/
AWS_COMMON_API
int aws_cache_remove(struct aws_cache *cache, const void *key);
/**
* Clears all items from the cache.
*/
AWS_COMMON_API
void aws_cache_clear(struct aws_cache *cache);
/**
* Returns the number of elements in the cache.
*/
AWS_COMMON_API
size_t aws_cache_get_element_count(const struct aws_cache *cache);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_CACHE_H */

View File

@@ -0,0 +1,449 @@
#ifndef AWS_COMMON_CBOR_H
#define AWS_COMMON_CBOR_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* The types use by APIs, not 1:1 with major type.
* It's an extension for cbor major type in RFC8949 section 3.1
* Major type 0 - AWS_CBOR_TYPE_UINT
* Major type 1 - AWS_CBOR_TYPE_NEGINT
* Major type 2 - AWS_CBOR_TYPE_BYTES/AWS_CBOR_TYPE_INDEF_BYTES_START
* Major type 3 - AWS_CBOR_TYPE_TEXT/AWS_CBOR_TYPE_INDEF_TEXT_START
* Major type 4 - AWS_CBOR_TYPE_ARRAY_START/AWS_CBOR_TYPE_INDEF_ARRAY_START
* Major type 5 - AWS_CBOR_TYPE_MAP_START/AWS_CBOR_TYPE_INDEF_MAP_START
* Major type 6 - AWS_CBOR_TYPE_TAG
* Major type 7
* - 20/21 - AWS_CBOR_TYPE_BOOL
* - 22 - AWS_CBOR_TYPE_NULL
* - 23 - AWS_CBOR_TYPE_UNDEFINED
* - 25/26/27 - AWS_CBOR_TYPE_FLOAT
* - 31 - AWS_CBOR_TYPE_BREAK
* - rest of value are not supported.
*/
enum aws_cbor_type {
AWS_CBOR_TYPE_UNKNOWN = 0,
AWS_CBOR_TYPE_UINT,
AWS_CBOR_TYPE_NEGINT,
AWS_CBOR_TYPE_FLOAT,
AWS_CBOR_TYPE_BYTES,
AWS_CBOR_TYPE_TEXT,
AWS_CBOR_TYPE_ARRAY_START,
AWS_CBOR_TYPE_MAP_START,
AWS_CBOR_TYPE_TAG,
AWS_CBOR_TYPE_BOOL,
AWS_CBOR_TYPE_NULL,
AWS_CBOR_TYPE_UNDEFINED,
AWS_CBOR_TYPE_BREAK,
AWS_CBOR_TYPE_INDEF_BYTES_START,
AWS_CBOR_TYPE_INDEF_TEXT_START,
AWS_CBOR_TYPE_INDEF_ARRAY_START,
AWS_CBOR_TYPE_INDEF_MAP_START,
};
/**
* The common tags, refer to RFC8949 section 3.4
* Expected value type followed by the tag:
* AWS_CBOR_TAG_STANDARD_TIME - AWS_CBOR_TYPE_TEXT
* AWS_CBOR_TAG_EPOCH_TIME - AWS_CBOR_TYPE_UINT/AWS_CBOR_TYPE_NEGINT/AWS_CBOR_TYPE_FLOAT
* AWS_CBOR_TAG_UNSIGNED_BIGNUM - AWS_CBOR_TYPE_BYTES
* AWS_CBOR_TAG_NEGATIVE_BIGNUM - AWS_CBOR_TYPE_BYTES
* AWS_CBOR_TAG_DECIMAL_FRACTION - AWS_CBOR_TYPE_ARRAY_START/AWS_CBOR_TYPE_INDEF_ARRAY_START
**/
#define AWS_CBOR_TAG_STANDARD_TIME 0
#define AWS_CBOR_TAG_EPOCH_TIME 1
#define AWS_CBOR_TAG_UNSIGNED_BIGNUM 2
#define AWS_CBOR_TAG_NEGATIVE_BIGNUM 3
#define AWS_CBOR_TAG_DECIMAL_FRACTION 4
struct aws_cbor_encoder;
struct aws_cbor_decoder;
/*******************************************************************************
* ENCODE
******************************************************************************/
/* Return c-string for aws_cbor_type */
AWS_COMMON_API
const char *aws_cbor_type_cstr(enum aws_cbor_type type);
/**
* @brief Create a new cbor encoder. Creating a encoder with a temporay buffer.
* Every aws_cbor_encoder_write_* will encode directly into the buffer to follow the encoded data.
*
* @param allocator
* @return aws_cbor_encoder
*/
AWS_COMMON_API
struct aws_cbor_encoder *aws_cbor_encoder_new(struct aws_allocator *allocator);
AWS_COMMON_API
struct aws_cbor_encoder *aws_cbor_encoder_destroy(struct aws_cbor_encoder *encoder);
/**
* @brief Get the current encoded data from encoder. The encoded data has the same lifetime as the encoder, and once
* any other function call invoked for the encoder, the encoded data is no longer valid.
*
* @param encoder
* @return struct aws_byte_cursor from the encoder buffer.
*/
AWS_COMMON_API
struct aws_byte_cursor aws_cbor_encoder_get_encoded_data(const struct aws_cbor_encoder *encoder);
/**
* @brief Clear the current encoded buffer from encoder.
*
* @param encoder
*/
AWS_COMMON_API
void aws_cbor_encoder_reset(struct aws_cbor_encoder *encoder);
/**
* @brief Encode a AWS_CBOR_TYPE_UINT value to "smallest possible" in encoder's buffer.
* Referring to RFC8949 section 4.2.1
*
* TODO: maybe add a width of the encoded value.
*
* @param encoder
* @param value value to encode.
*/
AWS_COMMON_API
void aws_cbor_encoder_write_uint(struct aws_cbor_encoder *encoder, uint64_t value);
/**
* @brief Encode a AWS_CBOR_TYPE_NEGINT value to "smallest possible" in encoder's buffer.
* It represents (-1 - value).
* Referring to RFC8949 section 4.2.1
*
*
* @param encoder
* @param value The argument to encode to negative integer, which is (-1 - expected_val)
*/
AWS_COMMON_API
void aws_cbor_encoder_write_negint(struct aws_cbor_encoder *encoder, uint64_t value);
/**
* @brief Encode a AWS_CBOR_TYPE_FLOAT value to "smallest possible", but will not be encoded into half-precision float,
* as it's not well supported cross languages.
*
* To be more specific, it will be encoded into integer/negative/float
* (Order with priority) when the conversation will not cause precision loss.
*
* @param encoder
* @param value value to encode.
*/
AWS_COMMON_API
void aws_cbor_encoder_write_float(struct aws_cbor_encoder *encoder, double value);
/**
* @brief Encode a AWS_CBOR_TYPE_BYTES value to "smallest possible" in encoder's buffer.
* Referring to RFC8949 section 4.2.1, the length of "from" will be encoded first and then the value of "from" will
* be followed.
*
* @param encoder
* @param from value to encode.
*/
AWS_COMMON_API
void aws_cbor_encoder_write_bytes(struct aws_cbor_encoder *encoder, struct aws_byte_cursor from);
/**
* @brief Encode a AWS_CBOR_TYPE_TEXT value to "smallest possible" in encoder's buffer.
* Referring to RFC8949 section 4.2.1, the length of "from" will be encoded first and then the value of "from" will
* be followed.
*
* @param encoder
* @param from value to encode.
*/
AWS_COMMON_API
void aws_cbor_encoder_write_text(struct aws_cbor_encoder *encoder, struct aws_byte_cursor from);
/**
* @brief Encode a AWS_CBOR_TYPE_ARRAY_START value to "smallest possible" in encoder's buffer.
* Referring to RFC8949 section 4.2.1
* The "number_entries" is the cbor data items should be followed as the content of the array.
* Notes: it's user's responsibility to keep the integrity of the array to be encoded.
*
* @param encoder
* @param number_entries The number of data item in array.
*/
AWS_COMMON_API
void aws_cbor_encoder_write_array_start(struct aws_cbor_encoder *encoder, size_t number_entries);
/**
* @brief Encode a AWS_CBOR_TYPE_MAP_START value to "smallest possible" in encoder's buffer.
* Referring to RFC8949 section 4.2.1
* The "number_entries" is the number of pair of cbor data items as key and value should be followed as the content of
* the map.
*
* Notes: it's user's responsibility to keep the integrity of the map to be encoded.
*
* @param encoder
* @param number_entries The number of data item in map.
*/
AWS_COMMON_API
void aws_cbor_encoder_write_map_start(struct aws_cbor_encoder *encoder, size_t number_entries);
/**
* @brief Encode a AWS_CBOR_TYPE_TAG value to "smallest possible" in encoder's buffer.
* Referring to RFC8949 section 4.2.1
* The following cbor data item will be the content of the tagged value.
* Notes: it's user's responsibility to keep the integrity of the tagged value to follow the RFC8949 section 3.4
*
* @param encoder
* @param tag_number The tag value to encode.
*/
AWS_COMMON_API
void aws_cbor_encoder_write_tag(struct aws_cbor_encoder *encoder, uint64_t tag_number);
/**
* @brief Encode a simple value AWS_CBOR_TYPE_NULL
*
* @param encoder
*/
AWS_COMMON_API
void aws_cbor_encoder_write_null(struct aws_cbor_encoder *encoder);
/**
* @brief Encode a simple value AWS_CBOR_TYPE_UNDEFINED
*
* @param encoder
*/
AWS_COMMON_API
void aws_cbor_encoder_write_undefined(struct aws_cbor_encoder *encoder);
/**
* @brief Encode a simple value AWS_CBOR_TYPE_BOOL
*
* @param encoder
*/
AWS_COMMON_API
void aws_cbor_encoder_write_bool(struct aws_cbor_encoder *encoder, bool value);
/**
* @brief Encode a simple value AWS_CBOR_TYPE_BREAK
*
* Notes: no error checking, it's user's responsibility to track the break
* to close the corresponding indef_start
*/
AWS_COMMON_API
void aws_cbor_encoder_write_break(struct aws_cbor_encoder *encoder);
/**
* @brief Encode a AWS_CBOR_TYPE_INDEF_BYTES_START
*
* Notes: no error checking, it's user's responsibility to add corresponding data and the break
* to close the indef_start
*/
AWS_COMMON_API
void aws_cbor_encoder_write_indef_bytes_start(struct aws_cbor_encoder *encoder);
/**
* @brief Encode a AWS_CBOR_TYPE_INDEF_TEXT_START
*
* Notes: no error checking, it's user's responsibility to add corresponding data
* and the break to close the indef_start
*/
AWS_COMMON_API
void aws_cbor_encoder_write_indef_text_start(struct aws_cbor_encoder *encoder);
/**
* @brief Encode a AWS_CBOR_TYPE_INDEF_ARRAY_START
*
* Notes: no error checking, it's user's responsibility to add corresponding data
* and the break to close the indef_start
*/
AWS_COMMON_API
void aws_cbor_encoder_write_indef_array_start(struct aws_cbor_encoder *encoder);
/**
* @brief Encode a AWS_CBOR_TYPE_INDEF_MAP_START
*
* Notes: no error checking, it's user's responsibility to add corresponding data
* and the break to close the indef_start
*/
AWS_COMMON_API
void aws_cbor_encoder_write_indef_map_start(struct aws_cbor_encoder *encoder);
/*******************************************************************************
* DECODE
******************************************************************************/
/**
* @brief Create a cbor decoder to take src to decode.
* The typical usage of decoder will be:
* - If the next element type only accept what expected, `aws_cbor_decoder_pop_next_*`
* - If the next element type accept different type, invoke `aws_cbor_decoder_peek_type` first, then based on the type
* to invoke corresponding `aws_cbor_decoder_pop_next_*`
* - If the next element type doesn't have corrsponding value, specifically: AWS_CBOR_TYPE_NULL,
* AWS_CBOR_TYPE_UNDEFINED, AWS_CBOR_TYPE_INF_*_START, AWS_CBOR_TYPE_BREAK, call
* `aws_cbor_decoder_consume_next_single_element` to consume it and continues for further decoding.
* - To ignore the next data item (the element and the content of it), `aws_cbor_decoder_consume_next_whole_data_item`
*
* Note: it's caller's responsibilty to keep the src outlive the decoder.
*
* @param allocator
* @param src The src data to decode from.
* @return decoder
*/
AWS_COMMON_API
struct aws_cbor_decoder *aws_cbor_decoder_new(struct aws_allocator *allocator, struct aws_byte_cursor src);
AWS_COMMON_API
struct aws_cbor_decoder *aws_cbor_decoder_destroy(struct aws_cbor_decoder *decoder);
/**
* @brief Get the length of the remaining bytes of the source. Once the source was decoded, it will be consumed,
* and result in decrease of the remaining length of bytes.
*
* @param decoder
* @return The length of bytes remaining of the decoder source.
*/
AWS_COMMON_API
size_t aws_cbor_decoder_get_remaining_length(const struct aws_cbor_decoder *decoder);
/**
* @brief Decode the next element and store it in the decoder cache if there was no element cached.
* If there was element cached, just return the type of the cached element.
*
* @param decoder
* @param out_type
* @return AWS_OP_SUCCESS if succeed, AWS_OP_ERR for any decoding error and corresponding error code will be raised.
*/
AWS_COMMON_API
int aws_cbor_decoder_peek_type(struct aws_cbor_decoder *decoder, enum aws_cbor_type *out_type);
/**
* @brief Consume the next data item, includes all the content within the data item.
*
* As an example for the following cbor, this function will consume all the data
* as it's only one cbor data item, an indefinite map with 2 key, value pair:
* 0xbf6346756ef563416d7421ff
* BF -- Start indefinite-length map
* 63 -- First key, UTF-8 string length 3
* 46756e -- "Fun"
* F5 -- First value, true
* 63 -- Second key, UTF-8 string length 3
* 416d74 -- "Amt"
* 21 -- Second value, -2
* FF -- "break"
*
* Notes: this function will not ensure the data item is well-formed.
*
* @param src The src to parse data from
* @return AWS_OP_SUCCESS successfully consumed the next data item, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_cbor_decoder_consume_next_whole_data_item(struct aws_cbor_decoder *decoder);
/**
* @brief Consume the next single element, without the content followed by the element.
*
* As an example for the following cbor, this function will only consume the
* 0xBF, "Start indefinite-length map", not any content of the map represented.
* The next element to decode will start from 0x63
* 0xbf6346756ef563416d7421ff
* BF -- Start indefinite-length map
* 63 -- First key, UTF-8 string length 3
* 46756e -- "Fun"
* F5 -- First value, true
* 63 -- Second key, UTF-8 string length 3
* 416d74 -- "Amt"
* 21 -- Second value, -2
* FF -- "break"
*
* @param decoder The decoder to parse data from
* @return AWS_OP_SUCCESS successfully consumed the next element, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_cbor_decoder_consume_next_single_element(struct aws_cbor_decoder *decoder);
/**
* @brief Get the next element based on the type. If the next element doesn't match the expected type. Error will be
* raised. If the next element already been cached, it will consume the cached item when no error was returned.
* Specifically:
* AWS_CBOR_TYPE_UINT - aws_cbor_decoder_pop_next_unsigned_int_val
* AWS_CBOR_TYPE_NEGINT - aws_cbor_decoder_pop_next_negative_int_val, it represents (-1 - *out)
* AWS_CBOR_TYPE_FLOAT - aws_cbor_decoder_pop_next_double_val
* AWS_CBOR_TYPE_BYTES - aws_cbor_decoder_pop_next_bytes_val
* AWS_CBOR_TYPE_TEXT - aws_cbor_decoder_pop_next_text_val
*
* @param decoder
* @param out
* @return AWS_OP_SUCCESS successfully consumed the next element and get the result, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_cbor_decoder_pop_next_unsigned_int_val(struct aws_cbor_decoder *decoder, uint64_t *out);
AWS_COMMON_API
int aws_cbor_decoder_pop_next_negative_int_val(struct aws_cbor_decoder *decoder, uint64_t *out);
AWS_COMMON_API
int aws_cbor_decoder_pop_next_float_val(struct aws_cbor_decoder *decoder, double *out);
AWS_COMMON_API
int aws_cbor_decoder_pop_next_boolean_val(struct aws_cbor_decoder *decoder, bool *out);
AWS_COMMON_API
int aws_cbor_decoder_pop_next_bytes_val(struct aws_cbor_decoder *decoder, struct aws_byte_cursor *out);
AWS_COMMON_API
int aws_cbor_decoder_pop_next_text_val(struct aws_cbor_decoder *decoder, struct aws_byte_cursor *out);
/**
* @brief Get the next AWS_CBOR_TYPE_ARRAY_START element. Only consume the AWS_CBOR_TYPE_ARRAY_START element and set the
* size of array to *out_size, not the content of the array. The next *out_size cbor data items will be the content of
* the array for a valid cbor data,
*
* Notes: For indefinite-length, this function will fail with "AWS_ERROR_CBOR_UNEXPECTED_TYPE". The designed way to
* handle indefinite-length is:
* - Get AWS_CBOR_TYPE_INDEF_ARRAY_START from _peek_type
* - call `aws_cbor_decoder_consume_next_single_element` to pop the indefinite-length start.
* - Decode the next data item until AWS_CBOR_TYPE_BREAK read.
*
* @param decoder
* @param out_size store the size of array if succeed.
* @return AWS_OP_SUCCESS successfully consumed the next element and get the result, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_cbor_decoder_pop_next_array_start(struct aws_cbor_decoder *decoder, uint64_t *out_size);
/**
* @brief Get the next AWS_CBOR_TYPE_MAP_START element. Only consume the AWS_CBOR_TYPE_MAP_START element and set the
* size of array to *out_size, not the content of the map. The next *out_size pair of cbor data items as key and value
* will be the content of the array for a valid cbor data,
*
* Notes: For indefinite-length, this function will fail with "AWS_ERROR_CBOR_UNEXPECTED_TYPE". The designed way to
* handle indefinite-length is:
* - Get AWS_CBOR_TYPE_INDEF_MAP_START from _peek_type
* - call `aws_cbor_decoder_consume_next_single_element` to pop the indefinite-length start.
* - Decode the next data item until AWS_CBOR_TYPE_BREAK read.
*
* @param decoder
* @param out_size store the size of map if succeed.
* @return AWS_OP_SUCCESS successfully consumed the next element and get the result, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_cbor_decoder_pop_next_map_start(struct aws_cbor_decoder *decoder, uint64_t *out_size);
/**
* @brief Get the next AWS_CBOR_TYPE_TAG element. Only consume the AWS_CBOR_TYPE_TAG element and set the
* tag value to *out_tag_val, not the content of the tagged. The next cbor data item will be the content of the tagged
* value for a valid cbor data.
*
* @param decoder
* @param out_size store the size of map if succeed.
* @return AWS_OP_SUCCESS successfully consumed the next element and get the result, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_cbor_decoder_pop_next_tag_val(struct aws_cbor_decoder *decoder, uint64_t *out_tag_val);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif // AWS_COMMON_CBOR_H

View File

@@ -0,0 +1,65 @@
#ifndef AWS_COMMON_CLOCK_H
#define AWS_COMMON_CLOCK_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/math.h>
AWS_PUSH_SANE_WARNING_LEVEL
enum aws_timestamp_unit {
AWS_TIMESTAMP_SECS = 1,
AWS_TIMESTAMP_MILLIS = 1000,
AWS_TIMESTAMP_MICROS = 1000000,
AWS_TIMESTAMP_NANOS = 1000000000,
};
AWS_EXTERN_C_BEGIN
/**
* Converts 'timestamp' from unit 'convert_from' to unit 'convert_to', if the units are the same then 'timestamp' is
* returned. If 'remainder' is NOT NULL, it will be set to the remainder if convert_from is a more precise unit than
* convert_to. To avoid unnecessary branching, 'remainder' is not zero initialized in this function, be sure to set it
* to 0 first if you care about that kind of thing. If conversion would lead to integer overflow, the timestamp
* returned will be the highest possible time that is representable, i.e. UINT64_MAX.
*/
AWS_STATIC_IMPL uint64_t aws_timestamp_convert(
uint64_t timestamp,
enum aws_timestamp_unit convert_from,
enum aws_timestamp_unit convert_to,
uint64_t *remainder);
/**
* More general form of aws_timestamp_convert that takes arbitrary frequencies rather than the timestamp enum.
*/
AWS_STATIC_IMPL uint64_t
aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder);
/**
* Get ticks in nanoseconds (usually 100 nanosecond precision) on the high resolution clock (most-likely TSC). This
* clock has no bearing on the actual system time. On success, timestamp will be set.
*/
AWS_COMMON_API
int aws_high_res_clock_get_ticks(uint64_t *timestamp);
/**
* Get ticks in nanoseconds (usually 100 nanosecond precision) on the system clock. Reflects actual system time via
* nanoseconds since unix epoch. Use with care since an inaccurately set clock will probably cause bugs. On success,
* timestamp will be set.
*/
AWS_COMMON_API
int aws_sys_clock_get_ticks(uint64_t *timestamp);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/clock.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_CLOCK_H */

View File

@@ -0,0 +1,91 @@
#ifndef AWS_COMMON_CLOCK_INL
#define AWS_COMMON_CLOCK_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/clock.h>
#include <aws/common/common.h>
#include <aws/common/math.h>
AWS_EXTERN_C_BEGIN
/**
* Converts 'timestamp' from unit 'convert_from' to unit 'convert_to', if the units are the same then 'timestamp' is
* returned. If 'remainder' is NOT NULL, it will be set to the remainder if convert_from is a more precise unit than
* convert_to (but only if the old frequency is a multiple of the new one). If conversion would lead to integer
* overflow, the timestamp returned will be the highest possible time that is representable, i.e. UINT64_MAX.
*/
AWS_STATIC_IMPL uint64_t
aws_timestamp_convert_u64(uint64_t ticks, uint64_t old_frequency, uint64_t new_frequency, uint64_t *remainder) {
AWS_FATAL_ASSERT(old_frequency > 0 && new_frequency > 0);
/*
* The remainder, as defined in the contract of the original version of this function, only makes mathematical
* sense when the old frequency is a positive multiple of the new frequency. The new convert function needs to be
* backwards compatible with the old version's remainder while being a lot more accurate with its conversions
* in order to handle extreme edge cases of large numbers.
*/
if (remainder != NULL) {
*remainder = 0;
/* only calculate remainder when going from a higher to lower frequency */
if (new_frequency < old_frequency) {
uint64_t frequency_remainder = old_frequency % new_frequency;
/* only calculate remainder when the old frequency is evenly divisible by the new one */
if (frequency_remainder == 0) {
uint64_t frequency_ratio = old_frequency / new_frequency;
*remainder = ticks % frequency_ratio;
}
}
}
/*
* Now do the actual conversion.
*/
uint64_t old_seconds_elapsed = ticks / old_frequency;
uint64_t old_remainder = ticks - old_seconds_elapsed * old_frequency;
uint64_t new_ticks_whole_part = aws_mul_u64_saturating(old_seconds_elapsed, new_frequency);
/*
* This could be done in one of three ways:
*
* (1) (old_remainder / old_frequency) * new_frequency - this would be completely wrong since we know that
* (old_remainder / old_frequency) < 1 = 0
*
* (2) old_remainder * (new_frequency / old_frequency) - this only gives a good solution when new_frequency is
* a multiple of old_frequency
*
* (3) (old_remainder * new_frequency) / old_frequency - this is how we do it below, the primary concern is if
* the initial multiplication can overflow. For that to be the case, we would need to be using old and new
* frequencies in the billions. This does not appear to be the case in any current machine's hardware counters.
*
* Ignoring arbitrary frequencies, even a nanosecond to nanosecond conversion would not overflow either.
*
* If this did become an issue, we would potentially need to use intrinsics/platform support for 128 bit math.
*
* For review consideration:
* (1) should we special case frequencies being a multiple of the other?
* (2) should we special case frequencies being the same? A ns-to-ns conversion does the full math and
* approaches overflow (but cannot actually do so).
*/
uint64_t new_ticks_remainder_part = aws_mul_u64_saturating(old_remainder, new_frequency) / old_frequency;
return aws_add_u64_saturating(new_ticks_whole_part, new_ticks_remainder_part);
}
AWS_STATIC_IMPL uint64_t aws_timestamp_convert(
uint64_t timestamp,
enum aws_timestamp_unit convert_from,
enum aws_timestamp_unit convert_to,
uint64_t *remainder) {
return aws_timestamp_convert_u64(timestamp, convert_from, convert_to, remainder);
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_CLOCK_INL */

View File

@@ -0,0 +1,108 @@
#ifndef AWS_COMMON_COMMAND_LINE_PARSER_H
#define AWS_COMMON_COMMAND_LINE_PARSER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
enum aws_cli_options_has_arg {
AWS_CLI_OPTIONS_NO_ARGUMENT = 0,
AWS_CLI_OPTIONS_REQUIRED_ARGUMENT = 1,
AWS_CLI_OPTIONS_OPTIONAL_ARGUMENT = 2,
};
/**
* Invoked when a subcommand is encountered. argc and argv[] begins at the command encountered.
* command_name is the name of the command being handled.
*/
typedef int(aws_cli_options_subcommand_fn)(int argc, char *const argv[], const char *command_name, void *user_data);
/**
* Dispatch table to dispatch cli commands from.
* command_name should be the exact string for the command you want to handle from the command line.
*/
struct aws_cli_subcommand_dispatch {
aws_cli_options_subcommand_fn *subcommand_fn;
const char *command_name;
};
/* Ignoring padding since we're trying to maintain getopt.h compatibility */
/* NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) */
struct aws_cli_option {
const char *name;
enum aws_cli_options_has_arg has_arg;
int *flag;
int val;
};
AWS_EXTERN_C_BEGIN
/**
* Initialized to 1 (for where the first argument would be). As arguments are parsed, this number is the index
* of the next argument to parse. Reset this to 1 to parse another set of arguments, or to rerun the parser.
*/
AWS_COMMON_API extern int aws_cli_optind;
/**
* If an option has an argument, when the option is encountered, this will be set to the argument portion.
*/
AWS_COMMON_API extern const char *aws_cli_optarg;
/**
* If 0x02 was returned by aws_cli_getopt_long(), this value will be set to the argument encountered.
*/
AWS_COMMON_API extern const char *aws_cli_positional_arg;
/**
* A mostly compliant implementation of posix getopt_long(). Parses command-line arguments. argc is the number of
* command line arguments passed in argv. optstring contains the legitimate option characters. The option characters
* correspond to aws_cli_option::val. If the character is followed by a :, the option requires an argument. If it is
* followed by '::', the argument is optional (not implemented yet).
*
* longopts, is an array of struct aws_cli_option. These are the allowed options for the program.
* The last member of the array must be zero initialized.
*
* If longindex is non-null, it will be set to the index in longopts, for the found option.
*
* Returns option val if it was found, '?' if an option was encountered that was not specified in the option string,
* 0x02 (START_OF_TEXT) will be returned if a positional argument was encountered. returns -1 when all arguments that
* can be parsed have been parsed.
*/
AWS_COMMON_API int aws_cli_getopt_long(
int argc,
char *const argv[],
const char *optstring,
const struct aws_cli_option *longopts,
int *longindex);
/**
* Resets global parser state for use in another parser run for the application.
*/
AWS_COMMON_API void aws_cli_reset_state(void);
/**
* Dispatches the current command line arguments with a subcommand from the second input argument in argv[], if
* dispatch table contains a command that matches the argument. When the command is dispatched, argc and argv will be
* updated to reflect the new argument count. The cli options are required to come after the subcommand. If either, no
* dispatch was found or there was no argument passed to the program, this function will return AWS_OP_ERR. Check
* aws_last_error() for details on the error.
* @param argc number of arguments passed to int main()
* @param argv the arguments passed to int main()
* @param parse_cb, optional, specify NULL if you don't want to handle this. This argument is for parsing "meta"
* commands from the command line options prior to dispatch occurring.
* @param dispatch_table table containing functions and command name to dispatch on.
* @param table_length number of entries in dispatch_table.
* @return AWS_OP_SUCCESS(0) on success, AWS_OP_ERR(-1) on failure
*/
AWS_COMMON_API int aws_cli_dispatch_on_subcommand(
int argc,
char *const argv[],
struct aws_cli_subcommand_dispatch *dispatch_table,
int table_length,
void *user_data);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_COMMAND_LINE_PARSER_H */

View File

@@ -0,0 +1,48 @@
#ifndef AWS_COMMON_COMMON_H
#define AWS_COMMON_COMMON_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/config.h>
#include <aws/common/exports.h>
#include <aws/common/allocator.h>
#include <aws/common/assert.h>
#include <aws/common/error.h>
#include <aws/common/macros.h>
#include <aws/common/platform.h>
#include <aws/common/predicates.h>
#include <aws/common/stdbool.h>
#include <aws/common/stdint.h>
#include <aws/common/zero.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h> /* for abort() */
#include <string.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* Initializes internal data structures used by aws-c-common.
* Must be called before using any functionality in aws-c-common.
*/
AWS_COMMON_API
void aws_common_library_init(struct aws_allocator *allocator);
/**
* Shuts down the internal data structures used by aws-c-common.
*/
AWS_COMMON_API
void aws_common_library_clean_up(void);
AWS_COMMON_API
void aws_common_fatal_assert_library_initialized(void);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_COMMON_H */

View File

@@ -0,0 +1,118 @@
#ifndef AWS_COMMON_CONDITION_VARIABLE_H
#define AWS_COMMON_CONDITION_VARIABLE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#ifndef _WIN32
# include <pthread.h>
#endif
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_mutex;
struct aws_condition_variable;
typedef bool(aws_condition_predicate_fn)(void *);
struct aws_condition_variable {
#ifdef _WIN32
void *condition_handle;
#else
pthread_cond_t condition_handle;
#endif
bool initialized;
};
/**
* Static initializer for condition variable.
* You can do something like struct aws_condition_variable var =
* AWS_CONDITION_VARIABLE_INIT;
*
* If on Windows and you get an error about AWS_CONDITION_VARIABLE_INIT being undefined, please include windows.h to get
* CONDITION_VARIABLE_INIT.
*/
#ifdef _WIN32
# define AWS_CONDITION_VARIABLE_INIT {.condition_handle = NULL, .initialized = true}
#else
# define AWS_CONDITION_VARIABLE_INIT {.condition_handle = PTHREAD_COND_INITIALIZER, .initialized = true}
#endif
AWS_EXTERN_C_BEGIN
/**
* Initializes a condition variable.
*/
AWS_COMMON_API
int aws_condition_variable_init(struct aws_condition_variable *condition_variable);
/**
* Cleans up a condition variable.
*/
AWS_COMMON_API
void aws_condition_variable_clean_up(struct aws_condition_variable *condition_variable);
/**
* Notifies/Wakes one waiting thread
*/
AWS_COMMON_API
int aws_condition_variable_notify_one(struct aws_condition_variable *condition_variable);
/**
* Notifies/Wakes all waiting threads.
*/
AWS_COMMON_API
int aws_condition_variable_notify_all(struct aws_condition_variable *condition_variable);
/**
* Waits the calling thread on a notification from another thread. This function must be called with the mutex locked
* by the calling thread otherwise the behavior is undefined. Spurious wakeups can occur and to avoid this causing
* any problems use the _pred version of this function.
*/
AWS_COMMON_API
int aws_condition_variable_wait(struct aws_condition_variable *condition_variable, struct aws_mutex *mutex);
/**
* Waits the calling thread on a notification from another thread. If predicate returns false, the wait is reentered,
* otherwise control returns to the caller. This function must be called with the mutex locked by the calling thread
* otherwise the behavior is undefined.
*/
AWS_COMMON_API
int aws_condition_variable_wait_pred(
struct aws_condition_variable *condition_variable,
struct aws_mutex *mutex,
aws_condition_predicate_fn *pred,
void *pred_ctx);
/**
* Waits the calling thread on a notification from another thread. Times out after time_to_wait. time_to_wait is in
* nanoseconds. This function must be called with the mutex locked by the calling thread otherwise the behavior is
* undefined. Spurious wakeups can occur and to avoid this causing any problems use the _pred version of this function.
*/
AWS_COMMON_API
int aws_condition_variable_wait_for(
struct aws_condition_variable *condition_variable,
struct aws_mutex *mutex,
int64_t time_to_wait);
/**
* Waits the calling thread on a notification from another thread. Times out after time_to_wait. time_to_wait is in
* nanoseconds. If predicate returns false, the wait is reentered, otherwise control returns to the caller. This
* function must be called with the mutex locked by the calling thread otherwise the behavior is undefined.
*/
AWS_COMMON_API
int aws_condition_variable_wait_for_pred(
struct aws_condition_variable *condition_variable,
struct aws_mutex *mutex,
int64_t time_to_wait,
aws_condition_predicate_fn *pred,
void *pred_ctx);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_CONDITION_VARIABLE_H */

View File

@@ -0,0 +1,33 @@
#ifndef AWS_COMMON_CONFIG_H
#define AWS_COMMON_CONFIG_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header exposes compiler feature test results determined during cmake
* configure time to inline function implementations. The macros defined here
* should be considered to be an implementation detail, and can change at any
* time.
*/
/* #undef AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS */
/* #undef AWS_HAVE_GCC_INLINE_ASM */
#define AWS_HAVE_MSVC_INTRINSICS_X64
/* #undef AWS_HAVE_POSIX_LARGE_FILE_SUPPORT */
/* #undef AWS_HAVE_EXECINFO */
#define AWS_HAVE_WINAPI_DESKTOP
/* #undef AWS_HAVE_LINUX_IF_LINK_H */
#define AWS_HAVE_AVX2_INTRINSICS
#define AWS_HAVE_AVX512_INTRINSICS
#define AWS_HAVE_MM256_EXTRACT_EPI64
#define AWS_HAVE_CLMUL
/* #undef AWS_HAVE_ARM32_CRC */
/* #undef AWS_HAVE_ARMv8_1 */
/* #undef AWS_ARCH_ARM64 */
#define AWS_ARCH_INTEL
#define AWS_ARCH_INTEL_X64
#define AWS_USE_CPU_EXTENSIONS
#endif

View File

@@ -0,0 +1,36 @@
#ifndef AWS_COMMON_CPUID_H
#define AWS_COMMON_CPUID_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
enum aws_cpu_feature_name {
AWS_CPU_FEATURE_CLMUL,
AWS_CPU_FEATURE_SSE_4_1,
AWS_CPU_FEATURE_SSE_4_2,
AWS_CPU_FEATURE_AVX2,
AWS_CPU_FEATURE_AVX512,
AWS_CPU_FEATURE_ARM_CRC,
AWS_CPU_FEATURE_BMI2,
AWS_CPU_FEATURE_VPCLMULQDQ,
AWS_CPU_FEATURE_ARM_PMULL,
AWS_CPU_FEATURE_ARM_CRYPTO,
AWS_CPU_FEATURE_COUNT,
};
AWS_EXTERN_C_BEGIN
/**
* Returns true if a cpu feature is supported, false otherwise.
*/
AWS_COMMON_API bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_CPUID_H */

View File

@@ -0,0 +1,35 @@
#ifndef AWS_COMMON_CROSS_PROCESS_LOCK_H
#define AWS_COMMON_CROSS_PROCESS_LOCK_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/common.h>
struct aws_cross_process_lock;
AWS_EXTERN_C_BEGIN
/**
* Attempts to acquire a system-wide (not per process or per user) lock scoped by instance_nonce.
* For any given unique nonce, a lock will be returned by the first caller. Subsequent calls will
* return NULL and raise AWS_ERROR_MUTEX_CALLER_NOT_OWNER
* until the either the process owning the lock exits or the program owning the lock
* calls aws_cross_process_lock_release() explicitly.
*
* If the process exits before the lock is released, the kernel will unlock it for the next consumer.
*/
AWS_COMMON_API
struct aws_cross_process_lock *aws_cross_process_lock_try_acquire(
struct aws_allocator *allocator,
struct aws_byte_cursor instance_nonce);
/**
* Releases the lock so the next caller (may be another process) can get an instance of the lock.
*/
AWS_COMMON_API
void aws_cross_process_lock_release(struct aws_cross_process_lock *instance_lock);
AWS_EXTERN_C_END
#endif /* AWS_COMMON_CROSS_PROCESS_LOCK_H */

View File

@@ -0,0 +1,167 @@
#ifndef AWS_COMMON_DATE_TIME_H
#define AWS_COMMON_DATE_TIME_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <time.h>
AWS_PUSH_SANE_WARNING_LEVEL
enum {
AWS_DATE_TIME_STR_MAX_LEN = 100,
AWS_DATE_TIME_STR_MAX_BASIC_LEN = 20,
};
struct aws_byte_buf;
struct aws_byte_cursor;
enum aws_date_format {
AWS_DATE_FORMAT_RFC822,
AWS_DATE_FORMAT_ISO_8601,
AWS_DATE_FORMAT_ISO_8601_BASIC,
AWS_DATE_FORMAT_AUTO_DETECT,
};
enum aws_date_month {
AWS_DATE_MONTH_JANUARY = 0,
AWS_DATE_MONTH_FEBRUARY,
AWS_DATE_MONTH_MARCH,
AWS_DATE_MONTH_APRIL,
AWS_DATE_MONTH_MAY,
AWS_DATE_MONTH_JUNE,
AWS_DATE_MONTH_JULY,
AWS_DATE_MONTH_AUGUST,
AWS_DATE_MONTH_SEPTEMBER,
AWS_DATE_MONTH_OCTOBER,
AWS_DATE_MONTH_NOVEMBER,
AWS_DATE_MONTH_DECEMBER,
};
enum aws_date_day_of_week {
AWS_DATE_DAY_OF_WEEK_SUNDAY = 0,
AWS_DATE_DAY_OF_WEEK_MONDAY,
AWS_DATE_DAY_OF_WEEK_TUESDAY,
AWS_DATE_DAY_OF_WEEK_WEDNESDAY,
AWS_DATE_DAY_OF_WEEK_THURSDAY,
AWS_DATE_DAY_OF_WEEK_FRIDAY,
AWS_DATE_DAY_OF_WEEK_SATURDAY,
};
struct aws_date_time {
time_t timestamp;
uint16_t milliseconds;
char tz[6];
struct tm gmt_time;
struct tm local_time;
bool utc_assumed;
};
AWS_EXTERN_C_BEGIN
/**
* Initializes dt to be the current system time.
*/
AWS_COMMON_API void aws_date_time_init_now(struct aws_date_time *dt);
/**
* Initializes dt to be the time represented in milliseconds since unix epoch.
*/
AWS_COMMON_API void aws_date_time_init_epoch_millis(struct aws_date_time *dt, uint64_t ms_since_epoch);
/**
* Initializes dt to be the time represented in seconds.millis since unix epoch.
*/
AWS_COMMON_API void aws_date_time_init_epoch_secs(struct aws_date_time *dt, double sec_ms);
/**
* Initializes dt to be the time represented by date_str in format 'fmt'. Returns AWS_OP_SUCCESS if the
* string was successfully parsed, returns AWS_OP_ERR if parsing failed.
*
* The parser is lenient regarding AWS_DATE_FORMAT_ISO_8601 vs AWS_DATE_FORMAT_ISO_8601_BASIC.
* Regardless of which you pass in, both "2002-10-02T08:05:09Z" and "20021002T080509Z" would be accepted.
*
* Notes for AWS_DATE_FORMAT_RFC822:
* If no time zone information is provided, it is assumed to be local time (please don't do this).
*
* Only time zones indicating Universal Time (e.g. Z, UT, UTC, or GMT),
* or offsets from UTC (e.g. +0100, -0700), are accepted.
*
* Really, it's just better if you always use Universal Time.
*/
AWS_COMMON_API int aws_date_time_init_from_str(
struct aws_date_time *dt,
const struct aws_byte_buf *date_str,
enum aws_date_format fmt);
/**
* aws_date_time_init variant that takes a byte_cursor rather than a byte_buf
*/
AWS_COMMON_API int aws_date_time_init_from_str_cursor(
struct aws_date_time *dt,
const struct aws_byte_cursor *date_str_cursor,
enum aws_date_format fmt);
/**
* Copies the current time as a formatted date string in local time into output_buf. If buffer is too small, it will
* return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not
* allowed.
*/
AWS_COMMON_API int aws_date_time_to_local_time_str(
const struct aws_date_time *dt,
enum aws_date_format fmt,
struct aws_byte_buf *output_buf);
/**
* Copies the current time as a formatted date string in utc time into output_buf. If buffer is too small, it will
* return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not
* allowed.
*/
AWS_COMMON_API int aws_date_time_to_utc_time_str(
const struct aws_date_time *dt,
enum aws_date_format fmt,
struct aws_byte_buf *output_buf);
/**
* Copies the current time as a formatted short date string in local time into output_buf. If buffer is too small, it
* will return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not
* allowed.
*/
AWS_COMMON_API int aws_date_time_to_local_time_short_str(
const struct aws_date_time *dt,
enum aws_date_format fmt,
struct aws_byte_buf *output_buf);
/**
* Copies the current time as a formatted short date string in utc time into output_buf. If buffer is too small, it will
* return AWS_OP_ERR. A good size suggestion is AWS_DATE_TIME_STR_MAX_LEN bytes. AWS_DATE_FORMAT_AUTO_DETECT is not
* allowed.
*/
AWS_COMMON_API int aws_date_time_to_utc_time_short_str(
const struct aws_date_time *dt,
enum aws_date_format fmt,
struct aws_byte_buf *output_buf);
AWS_COMMON_API double aws_date_time_as_epoch_secs(const struct aws_date_time *dt);
AWS_COMMON_API uint64_t aws_date_time_as_nanos(const struct aws_date_time *dt);
AWS_COMMON_API uint64_t aws_date_time_as_millis(const struct aws_date_time *dt);
AWS_COMMON_API uint16_t aws_date_time_year(const struct aws_date_time *dt, bool local_time);
AWS_COMMON_API enum aws_date_month aws_date_time_month(const struct aws_date_time *dt, bool local_time);
AWS_COMMON_API uint8_t aws_date_time_month_day(const struct aws_date_time *dt, bool local_time);
AWS_COMMON_API enum aws_date_day_of_week aws_date_time_day_of_week(const struct aws_date_time *dt, bool local_time);
AWS_COMMON_API uint8_t aws_date_time_hour(const struct aws_date_time *dt, bool local_time);
AWS_COMMON_API uint8_t aws_date_time_minute(const struct aws_date_time *dt, bool local_time);
AWS_COMMON_API uint8_t aws_date_time_second(const struct aws_date_time *dt, bool local_time);
AWS_COMMON_API bool aws_date_time_dst(const struct aws_date_time *dt, bool local_time);
/**
* returns the difference of a and b (a - b) in seconds.
*/
AWS_COMMON_API time_t aws_date_time_diff(const struct aws_date_time *a, const struct aws_date_time *b);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_DATE_TIME_H */

View File

@@ -0,0 +1,50 @@
#ifndef AWS_COMMON_DEVICE_RANDOM_H
#define AWS_COMMON_DEVICE_RANDOM_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_byte_buf;
AWS_EXTERN_C_BEGIN
/**
* Get an unpredictably random 64bit number, suitable for cryptographic use.
*/
AWS_COMMON_API int aws_device_random_u64(uint64_t *output);
/**
* Get an unpredictably random 32bit number, suitable for cryptographic use.
*/
AWS_COMMON_API int aws_device_random_u32(uint32_t *output);
/**
* Get an unpredictably random 16bit number, suitable for cryptographic use.
*/
AWS_COMMON_API int aws_device_random_u16(uint16_t *output);
/**
* Get an unpredictably random 8bit number, suitable for cryptographic use.
*/
AWS_COMMON_API int aws_device_random_u8(uint8_t *output);
/**
* Fill the rest of a buffer with unpredictably random bytes, suitable for cryptographic use.
*/
AWS_COMMON_API int aws_device_random_buffer(struct aws_byte_buf *output);
/**
* Write N unpredictably random bytes to a buffer, suitable for cryptographic use.
* If there is insufficient space in the buffer, AWS_ERROR_SHORT_BUFFER is raised
* and the buffer will be unchanged.
*/
AWS_COMMON_API int aws_device_random_buffer_append(struct aws_byte_buf *output, size_t n);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_DEVICE_RANDOM_H */

View File

@@ -0,0 +1,235 @@
#ifndef AWS_COMMON_ENCODING_H
#define AWS_COMMON_ENCODING_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/byte_order.h>
#include <aws/common/common.h>
#include <memory.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/*
* computes the length necessary to store the result of aws_hex_encode().
* returns -1 on failure, and 0 on success. encoded_length will be set on
* success.
*/
AWS_COMMON_API
int aws_hex_compute_encoded_len(size_t to_encode_len, size_t *encoded_length);
/*
* Base 16 (hex) encodes the contents of to_encode and stores the result in
* output. 0 terminates the result. Assumes the buffer is empty and does not resize on
* insufficient capacity.
*/
AWS_COMMON_API
int aws_hex_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output);
/*
* Base 16 (hex) encodes the contents of to_encode and appends the result in
* output. Does not 0-terminate. Grows the destination buffer dynamically if necessary.
*/
AWS_COMMON_API
int aws_hex_encode_append_dynamic(
const struct aws_byte_cursor *AWS_RESTRICT to_encode,
struct aws_byte_buf *AWS_RESTRICT output);
/*
* computes the length necessary to store the result of aws_hex_decode().
* returns -1 on failure, and 0 on success. decoded_len will be set on success.
*/
AWS_COMMON_API
int aws_hex_compute_decoded_len(size_t to_decode_len, size_t *decoded_len);
/*
* Base 16 (hex) decodes the contents of to_decode and stores the result in
* output. If output is NULL, output_size will be set to what the output_size
* should be.
*/
AWS_COMMON_API
int aws_hex_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, struct aws_byte_buf *AWS_RESTRICT output);
/*
* Computes the length necessary to store the output of aws_base64_encode call.
* returns -1 on failure, and 0 on success. encoded_length will be set on
* success.
*/
AWS_COMMON_API
int aws_base64_compute_encoded_len(size_t to_encode_len, size_t *encoded_len);
/*
* Base 64 encodes the contents of to_encode and stores the result in output.
*/
AWS_COMMON_API
int aws_base64_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, struct aws_byte_buf *AWS_RESTRICT output);
/*
* Computes the length necessary to store the output of aws_base64_decode call.
* returns -1 on failure, and 0 on success. decoded_len will be set on success.
*/
AWS_COMMON_API
int aws_base64_compute_decoded_len(const struct aws_byte_cursor *AWS_RESTRICT to_decode, size_t *decoded_len);
/*
* Base 64 decodes the contents of to_decode and stores the result in output.
*/
AWS_COMMON_API
int aws_base64_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, struct aws_byte_buf *AWS_RESTRICT output);
/* Add a 64 bit unsigned integer to the buffer, ensuring network - byte order
* Assumes the buffer size is at least 8 bytes.
*/
AWS_STATIC_IMPL void aws_write_u64(uint64_t value, uint8_t *buffer);
/*
* Extracts a 64 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes buffer size is at least 8
* bytes.
*/
AWS_STATIC_IMPL uint64_t aws_read_u64(const uint8_t *buffer);
/* Add a 32 bit unsigned integer to the buffer, ensuring network - byte order
* Assumes the buffer size is at least 4 bytes.
*/
AWS_STATIC_IMPL void aws_write_u32(uint32_t value, uint8_t *buffer);
/*
* Extracts a 32 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes the buffer size is at least 4
* bytes.
*/
AWS_STATIC_IMPL uint32_t aws_read_u32(const uint8_t *buffer);
/* Add a 24 bit unsigned integer to the buffer, ensuring network - byte order
* return the new position in the buffer for the next operation.
* Note, since this uses uint32_t for storage, the 3 least significant bytes
* will be used. Assumes buffer is at least 3 bytes long.
*/
AWS_STATIC_IMPL void aws_write_u24(uint32_t value, uint8_t *buffer);
/*
* Extracts a 24 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes buffer is at least 3 bytes
* long.
*/
AWS_STATIC_IMPL uint32_t aws_read_u24(const uint8_t *buffer);
/* Add a 16 bit unsigned integer to the buffer, ensuring network-byte order
* return the new position in the buffer for the next operation.
* Assumes buffer is at least 2 bytes long.
*/
AWS_STATIC_IMPL void aws_write_u16(uint16_t value, uint8_t *buffer);
/*
* Extracts a 16 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes buffer is at least 2 bytes
* long.
*/
AWS_STATIC_IMPL uint16_t aws_read_u16(const uint8_t *buffer);
enum aws_text_encoding {
AWS_TEXT_UNKNOWN,
AWS_TEXT_UTF8,
AWS_TEXT_UTF16,
AWS_TEXT_UTF32,
AWS_TEXT_ASCII,
};
/* Checks the BOM in the buffer to see if encoding can be determined. If there is no BOM or
* it is unrecognizable, then AWS_TEXT_UNKNOWN will be returned.
*/
AWS_STATIC_IMPL enum aws_text_encoding aws_text_detect_encoding(const uint8_t *bytes, size_t size);
/*
* Returns true if aws_text_detect_encoding() determines the text is UTF8 or ASCII.
* Note that this immediately returns true if the UTF8 BOM is seen.
* To fully validate every byte, use aws_decode_utf8().
*/
AWS_STATIC_IMPL bool aws_text_is_utf8(const uint8_t *bytes, size_t size);
struct aws_utf8_decoder_options {
/**
* Optional.
* Callback invoked for each Unicode codepoint.
* Use this callback to store codepoints as they're decoded,
* or to perform additional validation. RFC-3629 is already enforced,
* which forbids codepoints between U+D800 and U+DFFF,
* but you may whish to forbid codepoints like U+0000.
*
* @return AWS_OP_SUCCESS to continue processing the string, otherwise
* return AWS_OP_ERROR and raise an error (i.e. AWS_ERROR_INVALID_UTF8)
* to stop processing the string and report failure.
*/
int (*on_codepoint)(uint32_t codepoint, void *user_data);
/* Optional. Pointer passed to on_codepoint callback. */
void *user_data;
};
/**
* Decode a complete string of UTF8/ASCII text.
* Text is always validated according to RFC-3629 (you may perform additional
* validation in the on_codepoint callback).
* The text does not need to begin with a UTF8 BOM.
* If you need to decode text incrementally as you receive it, use aws_utf8_decoder_new() instead.
*
* @param bytes Text to decode.
* @param options Options for decoding. If NULL is passed, the text is simply validated.
*
* @return AWS_OP_SUCCESS if successful.
* An error is raised if the text is not valid, or the on_codepoint callback raises an error.
*/
AWS_COMMON_API int aws_decode_utf8(struct aws_byte_cursor bytes, const struct aws_utf8_decoder_options *options);
struct aws_utf8_decoder;
/**
* Create a UTF8/ASCII decoder, which can process text incrementally as you receive it.
* Text is always validated according to RFC-3629 (you may perform additional
* validation in the on_codepoint callback).
* The text does not need to begin with a UTF8 BOM.
* To decode text all at once, simply use aws_decode_utf8().
*
* Feed bytes into the decoder with aws_utf8_decoder_update(),
* and call aws_utf8_decoder_finalize() when the text is complete.
*
* @param allocator Allocator
* @param options Options for decoder. If NULL is passed, the text is simply validated.
*/
AWS_COMMON_API struct aws_utf8_decoder *aws_utf8_decoder_new(
struct aws_allocator *allocator,
const struct aws_utf8_decoder_options *options);
AWS_COMMON_API void aws_utf8_decoder_destroy(struct aws_utf8_decoder *decoder);
AWS_COMMON_API void aws_utf8_decoder_reset(struct aws_utf8_decoder *decoder);
/**
* Update the decoder with more bytes of text.
* The on_codepoint callback will be invoked for each codepoint encountered.
* Raises an error if invalid UTF8 is encountered or the on_codepoint callback reports an error.
*
* Note: You must call aws_utf8_decoder_finalize() when the text is 100% complete,
* to ensure the input was completely valid.
*/
AWS_COMMON_API int aws_utf8_decoder_update(struct aws_utf8_decoder *decoder, struct aws_byte_cursor bytes);
/**
* Tell the decoder that you've reached the end of your text.
* Raises AWS_ERROR_INVALID_UTF8 if the text did not end with a complete UTF8 codepoint.
* This also resets the decoder.
*/
AWS_COMMON_API int aws_utf8_decoder_finalize(struct aws_utf8_decoder *decoder);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/encoding.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_ENCODING_H */

View File

@@ -0,0 +1,142 @@
#ifndef AWS_COMMON_ENCODING_INL
#define AWS_COMMON_ENCODING_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/byte_order.h>
#include <aws/common/common.h>
#include <aws/common/encoding.h>
AWS_EXTERN_C_BEGIN
/* Add a 64 bit unsigned integer to the buffer, ensuring network - byte order
* Assumes the buffer size is at least 8 bytes.
*/
AWS_STATIC_IMPL void aws_write_u64(uint64_t value, uint8_t *buffer) {
value = aws_hton64(value);
memcpy((void *)buffer, &value, sizeof(value));
}
/*
* Extracts a 64 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes buffer size is at least 8
* bytes.
*/
AWS_STATIC_IMPL uint64_t aws_read_u64(const uint8_t *buffer) {
uint64_t value = 0;
memcpy((void *)&value, (void *)buffer, sizeof(value));
return aws_ntoh64(value);
}
/* Add a 32 bit unsigned integer to the buffer, ensuring network - byte order
* Assumes the buffer size is at least 4 bytes.
*/
AWS_STATIC_IMPL void aws_write_u32(uint32_t value, uint8_t *buffer) {
value = aws_hton32(value);
memcpy((void *)buffer, (void *)&value, sizeof(value));
}
/*
* Extracts a 32 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes the buffer size is at least 4
* bytes.
*/
AWS_STATIC_IMPL uint32_t aws_read_u32(const uint8_t *buffer) {
uint32_t value = 0;
memcpy((void *)&value, (void *)buffer, sizeof(value));
return aws_ntoh32(value);
}
/* Add a 24 bit unsigned integer to the buffer, ensuring network - byte order
* return the new position in the buffer for the next operation.
* Note, since this uses uint32_t for storage, the 3 least significant bytes
* will be used. Assumes buffer is at least 3 bytes long.
*/
AWS_STATIC_IMPL void aws_write_u24(uint32_t value, uint8_t *buffer) {
value = aws_hton32(value);
memcpy((void *)buffer, (void *)((uint8_t *)&value + 1), sizeof(value) - 1);
}
/*
* Extracts a 24 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes buffer is at least 3 bytes
* long.
*/
AWS_STATIC_IMPL uint32_t aws_read_u24(const uint8_t *buffer) {
uint32_t value = 0;
memcpy((void *)((uint8_t *)&value + 1), (void *)buffer, sizeof(value) - 1);
return aws_ntoh32(value);
}
/* Add a 16 bit unsigned integer to the buffer, ensuring network-byte order
* return the new position in the buffer for the next operation.
* Assumes buffer is at least 2 bytes long.
*/
AWS_STATIC_IMPL void aws_write_u16(uint16_t value, uint8_t *buffer) {
value = aws_hton16(value);
memcpy((void *)buffer, (void *)&value, sizeof(value));
}
/*
* Extracts a 16 bit unsigned integer from buffer. Ensures conversion from
* network byte order to host byte order. Assumes buffer is at least 2 bytes
* long.
*/
AWS_STATIC_IMPL uint16_t aws_read_u16(const uint8_t *buffer) {
uint16_t value = 0;
memcpy((void *)&value, (void *)buffer, sizeof(value));
return aws_ntoh16(value);
}
/* Reference: https://unicodebook.readthedocs.io/guess_encoding.html */
AWS_STATIC_IMPL enum aws_text_encoding aws_text_detect_encoding(const uint8_t *bytes, size_t size) {
static const char *UTF_8_BOM = "\xEF\xBB\xBF";
static const char *UTF_16_BE_BOM = "\xFE\xFF";
static const char *UTF_16_LE_BOM = "\xFF\xFE";
static const char *UTF_32_BE_BOM = "\x00\x00\xFE\xFF";
static const char *UTF_32_LE_BOM = "\xFF\xFE\x00\x00";
if (size >= 3) {
if (memcmp(bytes, UTF_8_BOM, 3) == 0)
return AWS_TEXT_UTF8;
}
if (size >= 4) {
if (memcmp(bytes, UTF_32_LE_BOM, 4) == 0)
return AWS_TEXT_UTF32;
if (memcmp(bytes, UTF_32_BE_BOM, 4) == 0)
return AWS_TEXT_UTF32;
}
if (size >= 2) {
if (memcmp(bytes, UTF_16_LE_BOM, 2) == 0)
return AWS_TEXT_UTF16;
if (memcmp(bytes, UTF_16_BE_BOM, 2) == 0)
return AWS_TEXT_UTF16;
}
size_t idx = 0;
for (; idx < size; ++idx) {
if (bytes[idx] & 0x80) {
return AWS_TEXT_UNKNOWN;
}
}
return AWS_TEXT_ASCII;
}
AWS_STATIC_IMPL bool aws_text_is_utf8(const uint8_t *bytes, size_t size) {
enum aws_text_encoding encoding = aws_text_detect_encoding(bytes, size);
return encoding == AWS_TEXT_UTF8 || encoding == AWS_TEXT_ASCII;
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ENCODING_INL */

View File

@@ -0,0 +1,49 @@
#ifndef AWS_COMMON_ENVIRONMENT_H
#define AWS_COMMON_ENVIRONMENT_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_string;
/*
* Simple shims to the appropriate platform calls for environment variable manipulation.
*
* Not thread safe to use set/unset unsynced with get. Set/unset only used in unit tests.
*/
AWS_EXTERN_C_BEGIN
/*
* Get the value of an environment variable. If the variable is not set, the output string will be set to NULL.
* Not thread-safe
*/
AWS_COMMON_API
int aws_get_environment_value(
struct aws_allocator *allocator,
const struct aws_string *variable_name,
struct aws_string **value_out);
/*
* Set the value of an environment variable. On Windows, setting a variable to the empty string will actually unset it.
* Not thread-safe
*/
AWS_COMMON_API
int aws_set_environment_value(const struct aws_string *variable_name, const struct aws_string *value);
/*
* Unset an environment variable.
* Not thread-safe
*/
AWS_COMMON_API
int aws_unset_environment_value(const struct aws_string *variable_name);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_ENVIRONMENT_H */

View File

@@ -0,0 +1,225 @@
#ifndef AWS_COMMON_ERROR_H
#define AWS_COMMON_ERROR_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/assert.h>
#include <aws/common/exports.h>
#include <aws/common/macros.h>
#include <aws/common/package.h>
#include <aws/common/stdint.h>
AWS_PUSH_SANE_WARNING_LEVEL
#define AWS_OP_SUCCESS (0)
#define AWS_OP_ERR (-1)
/* Each library gets space for 2^^10 error entries */
#define AWS_ERROR_ENUM_STRIDE_BITS 10
#define AWS_ERROR_ENUM_STRIDE (1U << AWS_ERROR_ENUM_STRIDE_BITS)
#define AWS_ERROR_ENUM_BEGIN_RANGE(x) ((x) * AWS_ERROR_ENUM_STRIDE)
#define AWS_ERROR_ENUM_END_RANGE(x) (((x) + 1) * AWS_ERROR_ENUM_STRIDE - 1)
struct aws_error_info {
int error_code;
const char *literal_name;
const char *error_str;
const char *lib_name;
const char *formatted_name;
};
struct aws_error_info_list {
const struct aws_error_info *error_list;
uint16_t count;
};
#define AWS_DEFINE_ERROR_INFO(C, ES, LN) \
{ \
.literal_name = #C, \
.error_code = (C), \
.error_str = (ES), \
.lib_name = (LN), \
.formatted_name = LN ": " #C ", " ES, \
}
typedef void(aws_error_handler_fn)(int err, void *ctx);
AWS_EXTERN_C_BEGIN
/*
* Returns the latest error code on the current thread, or 0 if none have
* occurred.
*/
AWS_COMMON_API
int aws_last_error(void);
/*
* Returns the error str corresponding to `err`.
*/
AWS_COMMON_API
const char *aws_error_str(int err);
/*
* Returns the enum name corresponding to `err`.
*/
AWS_COMMON_API
const char *aws_error_name(int err);
/*
* Returns the error lib name corresponding to `err`.
*/
AWS_COMMON_API
const char *aws_error_lib_name(int err);
/*
* Returns libname concatenated with error string.
*/
AWS_COMMON_API
const char *aws_error_debug_str(int err);
/*
* Internal implementation detail.
*/
AWS_COMMON_API
void aws_raise_error_private(int err);
/*
* Raises `err` to the installed callbacks, and sets the thread's error.
*/
AWS_STATIC_IMPL
int aws_raise_error(int err);
/*
* Resets the `err` back to defaults
*/
AWS_COMMON_API
void aws_reset_error(void);
/*
* Sets `err` to the latest error. Does not invoke callbacks.
*/
AWS_COMMON_API
void aws_restore_error(int err);
/*
* Sets an application wide error handler function. This will be overridden by
* the thread local handler. The previous handler is returned, this can be used
* for restoring an error handler if it needs to be overridden temporarily.
* Setting this to NULL will turn off this error callback after it has been
* enabled.
*/
AWS_COMMON_API
aws_error_handler_fn *aws_set_global_error_handler_fn(aws_error_handler_fn *handler, void *ctx);
/*
* Sets a thread-local error handler function. This will override the global
* handler. The previous handler is returned, this can be used for restoring an
* error handler if it needs to be overridden temporarily. Setting this to NULL
* will turn off this error callback after it has been enabled.
*/
AWS_COMMON_API
aws_error_handler_fn *aws_set_thread_local_error_handler_fn(aws_error_handler_fn *handler, void *ctx);
/** TODO: this needs to be a private function (wait till we have the cmake story
* better before moving it though). It should be external for the purpose of
* other libs we own, but customers should not be able to hit it without going
* out of their way to do so.
*/
AWS_COMMON_API
void aws_register_error_info(const struct aws_error_info_list *error_info);
AWS_COMMON_API
void aws_unregister_error_info(const struct aws_error_info_list *error_info);
/**
* Convert a c library io error into an aws error, and raise it.
* If no conversion is found, fallback_aws_error_code is raised.
* Always returns AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_translate_and_raise_io_error_or(int error_no, int fallback_aws_error_code);
/**
* Convert a c library io error into an aws error, and raise it.
* If no conversion is found, AWS_ERROR_SYS_CALL_FAILURE is raised.
* Always returns AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_translate_and_raise_io_error(int error_no);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/error.inl>
#endif /* AWS_NO_STATIC_IMPL */
enum aws_common_error {
AWS_ERROR_SUCCESS = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID),
AWS_ERROR_OOM,
AWS_ERROR_NO_SPACE,
AWS_ERROR_UNKNOWN,
AWS_ERROR_SHORT_BUFFER,
AWS_ERROR_OVERFLOW_DETECTED,
AWS_ERROR_UNSUPPORTED_OPERATION,
AWS_ERROR_INVALID_BUFFER_SIZE,
AWS_ERROR_INVALID_HEX_STR,
AWS_ERROR_INVALID_BASE64_STR,
AWS_ERROR_INVALID_INDEX,
AWS_ERROR_THREAD_INVALID_SETTINGS,
AWS_ERROR_THREAD_INSUFFICIENT_RESOURCE,
AWS_ERROR_THREAD_NO_PERMISSIONS,
AWS_ERROR_THREAD_NOT_JOINABLE,
AWS_ERROR_THREAD_NO_SUCH_THREAD_ID,
AWS_ERROR_THREAD_DEADLOCK_DETECTED,
AWS_ERROR_MUTEX_NOT_INIT,
AWS_ERROR_MUTEX_TIMEOUT,
AWS_ERROR_MUTEX_CALLER_NOT_OWNER,
AWS_ERROR_MUTEX_FAILED,
AWS_ERROR_COND_VARIABLE_INIT_FAILED,
AWS_ERROR_COND_VARIABLE_TIMED_OUT,
AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN,
AWS_ERROR_CLOCK_FAILURE,
AWS_ERROR_LIST_EMPTY,
AWS_ERROR_DEST_COPY_TOO_SMALL,
AWS_ERROR_LIST_EXCEEDS_MAX_SIZE,
AWS_ERROR_LIST_STATIC_MODE_CANT_SHRINK,
AWS_ERROR_PRIORITY_QUEUE_FULL,
AWS_ERROR_PRIORITY_QUEUE_EMPTY,
AWS_ERROR_PRIORITY_QUEUE_BAD_NODE,
AWS_ERROR_HASHTBL_ITEM_NOT_FOUND,
AWS_ERROR_INVALID_DATE_STR,
AWS_ERROR_INVALID_ARGUMENT,
AWS_ERROR_RANDOM_GEN_FAILED,
AWS_ERROR_MALFORMED_INPUT_STRING,
AWS_ERROR_UNIMPLEMENTED,
AWS_ERROR_INVALID_STATE,
AWS_ERROR_ENVIRONMENT_GET,
AWS_ERROR_ENVIRONMENT_SET,
AWS_ERROR_ENVIRONMENT_UNSET,
AWS_ERROR_STREAM_UNSEEKABLE,
AWS_ERROR_NO_PERMISSION,
AWS_ERROR_FILE_INVALID_PATH,
AWS_ERROR_MAX_FDS_EXCEEDED,
AWS_ERROR_SYS_CALL_FAILURE,
AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED,
AWS_ERROR_STRING_MATCH_NOT_FOUND,
AWS_ERROR_DIVIDE_BY_ZERO,
AWS_ERROR_INVALID_FILE_HANDLE,
AWS_ERROR_OPERATION_INTERUPTED,
AWS_ERROR_DIRECTORY_NOT_EMPTY,
AWS_ERROR_PLATFORM_NOT_SUPPORTED,
AWS_ERROR_INVALID_UTF8,
AWS_ERROR_GET_HOME_DIRECTORY_FAILED,
AWS_ERROR_INVALID_XML,
AWS_ERROR_FILE_OPEN_FAILURE,
AWS_ERROR_FILE_READ_FAILURE,
AWS_ERROR_FILE_WRITE_FAILURE,
AWS_ERROR_INVALID_CBOR,
AWS_ERROR_CBOR_UNEXPECTED_TYPE,
AWS_ERROR_END_COMMON_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_ERROR_H */

View File

@@ -0,0 +1,31 @@
#ifndef AWS_COMMON_ERROR_INL
#define AWS_COMMON_ERROR_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/error.h>
AWS_EXTERN_C_BEGIN
/*
* Raises `err` to the installed callbacks, and sets the thread's error.
*/
AWS_STATIC_IMPL
int aws_raise_error(int err) {
/*
* Certain static analyzers can't see through the out-of-line call to aws_raise_error,
* and assume that this might return AWS_OP_SUCCESS. We'll put the return inline just
* to help with their assumptions.
*/
aws_raise_error_private(err);
return AWS_OP_ERR;
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ERROR_INL */

View File

@@ -0,0 +1,40 @@
#ifndef AWS_COMMON_EXPORTS_H
#define AWS_COMMON_EXPORTS_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#if defined(AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined(_WIN32)
# ifdef AWS_COMMON_USE_IMPORT_EXPORT
# ifdef AWS_COMMON_EXPORTS
# define AWS_COMMON_API __declspec(dllexport)
# else
# define AWS_COMMON_API __declspec(dllimport)
# endif /* AWS_COMMON_EXPORTS */
# else
# define AWS_COMMON_API
# endif /* AWS_COMMON_USE_IMPORT_EXPORT */
#else /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32) */
# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_COMMON_USE_IMPORT_EXPORT) && defined(AWS_COMMON_EXPORTS)
# define AWS_COMMON_API __attribute__((visibility("default")))
# else
# define AWS_COMMON_API
# endif /* __GNUC__ >= 4 || defined(__clang__) */
#endif /* defined (AWS_C_RT_USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32) */
#ifdef AWS_NO_STATIC_IMPL
# define AWS_STATIC_IMPL AWS_COMMON_API
#endif
#ifndef AWS_STATIC_IMPL
/*
* In order to allow us to export our inlinable methods in a DLL/.so, we have a designated .c
* file where this AWS_STATIC_IMPL macro will be redefined to be non-static.
*/
# define AWS_STATIC_IMPL static inline
#endif
#endif /* AWS_COMMON_EXPORTS_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
#ifndef AWS_COMMON_FIFO_CACHE_H
#define AWS_COMMON_FIFO_CACHE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/cache.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* Initializes the first-in-first-out cache. Sets up the underlying linked hash table.
* Once `max_items` elements have been added, the oldest(first-in) item will
* be removed. For the other parameters, see aws/common/hash_table.h. Hash table
* semantics of these arguments are preserved.
*/
AWS_COMMON_API
struct aws_cache *aws_cache_new_fifo(
struct aws_allocator *allocator,
aws_hash_fn *hash_fn,
aws_hash_callback_eq_fn *equals_fn,
aws_hash_callback_destroy_fn *destroy_key_fn,
aws_hash_callback_destroy_fn *destroy_value_fn,
size_t max_items);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_FIFO_CACHE_H */

View File

@@ -0,0 +1,213 @@
#ifndef AWS_COMMON_FILE_H
#define AWS_COMMON_FILE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/common.h>
#include <aws/common/platform.h>
#include <stdio.h>
AWS_PUSH_SANE_WARNING_LEVEL
#ifdef AWS_OS_WINDOWS
# define AWS_PATH_DELIM '\\'
# define AWS_PATH_DELIM_STR "\\"
#else
# define AWS_PATH_DELIM '/'
# define AWS_PATH_DELIM_STR "/"
#endif
struct aws_string;
struct aws_directory_iterator;
enum aws_file_type {
AWS_FILE_TYPE_FILE = 1,
AWS_FILE_TYPE_SYM_LINK = 2,
AWS_FILE_TYPE_DIRECTORY = 4,
};
struct aws_directory_entry {
/**
* Absolute path to the entry from the current process root.
*/
struct aws_byte_cursor path;
/**
* Path to the entry relative to the current working directory.
*/
struct aws_byte_cursor relative_path;
/**
* Bit-field of enum aws_file_type
*/
int file_type;
/**
* Size of the file on disk.
*/
int64_t file_size;
};
/**
* Invoked during calls to aws_directory_traverse() as an entry is encountered. entry will contain
* the parsed directory entry info.
*
* Return true to continue the traversal, or alternatively, if you have a reason to abort the traversal, return false.
*/
typedef bool(aws_on_directory_entry)(const struct aws_directory_entry *entry, void *user_data);
AWS_EXTERN_C_BEGIN
/**
* Deprecated - Use aws_fopen_safe() instead, avoid const char * in public APIs.
* Opens file at file_path using mode. Returns the FILE pointer if successful.
* Otherwise, aws_last_error() will contain the error that occurred
*/
AWS_COMMON_API FILE *aws_fopen(const char *file_path, const char *mode);
/**
* Opens file at file_path using mode. Returns the FILE pointer if successful.
* Otherwise, aws_last_error() will contain the error that occurred
*/
AWS_COMMON_API FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode);
/**
* Creates a directory if it doesn't currently exist. If the directory already exists, it's ignored and assumed
* successful.
*
* Returns AWS_OP_SUCCESS on success. Otherwise, check aws_last_error().
*/
AWS_COMMON_API int aws_directory_create(const struct aws_string *dir_path);
/**
* Returns true if the directory currently exists. Otherwise, it returns false.
*/
AWS_COMMON_API bool aws_directory_exists(const struct aws_string *dir_path);
/**
* Deletes a directory. If the directory is not empty, this will fail unless the recursive parameter is set to true.
* If recursive is true then the entire directory and all of its contents will be deleted. If it is set to false,
* the directory will be deleted only if it is empty. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
* aws_last_error() will contain the error that occurred. If the directory doesn't exist, AWS_OP_SUCCESS is still
* returned.
*/
AWS_COMMON_API int aws_directory_delete(const struct aws_string *dir_path, bool recursive);
/**
* Deletes a file. Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
* aws_last_error() will contain the error that occurred. If the file doesn't exist, AWS_OP_SUCCESS is still returned.
*/
AWS_COMMON_API int aws_file_delete(const struct aws_string *file_path);
/**
* Moves directory at from to to.
* Returns AWS_OP_SUCCESS if the operation was successful. Otherwise,
* aws_last_error() will contain the error that occurred.
*/
AWS_COMMON_API int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to);
/**
* Traverse a directory starting at path.
*
* If you want the traversal to recurse the entire directory, pass recursive as true. Passing false for this parameter
* will only iterate the contents of the directory, but will not descend into any directories it encounters.
*
* If recursive is set to true, the traversal is performed post-order, depth-first
* (for practical reasons such as deleting a directory that contains subdirectories or files).
*
* returns AWS_OP_SUCCESS(0) on success.
*/
AWS_COMMON_API int aws_directory_traverse(
struct aws_allocator *allocator,
const struct aws_string *path,
bool recursive,
aws_on_directory_entry *on_entry,
void *user_data);
/**
* Creates a read-only iterator of a directory starting at path. If path is invalid or there's any other error
* condition, NULL will be returned. Call aws_last_error() for the exact error in that case.
*/
AWS_COMMON_API struct aws_directory_iterator *aws_directory_entry_iterator_new(
struct aws_allocator *allocator,
const struct aws_string *path);
/**
* Moves the iterator to the next entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with
* AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available.
*/
AWS_COMMON_API int aws_directory_entry_iterator_next(struct aws_directory_iterator *iterator);
/**
* Moves the iterator to the previous entry. Returns AWS_OP_SUCCESS if another entry is available, or AWS_OP_ERR with
* AWS_ERROR_LIST_EMPTY as the value for aws_last_error() if no more entries are available.
*/
AWS_COMMON_API int aws_directory_entry_iterator_previous(struct aws_directory_iterator *iterator);
/**
* Cleanup and deallocate iterator
*/
AWS_COMMON_API void aws_directory_entry_iterator_destroy(struct aws_directory_iterator *iterator);
/**
* Gets the aws_directory_entry value for iterator at the current position. Returns NULL if the iterator contains no
* entries.
*/
AWS_COMMON_API const struct aws_directory_entry *aws_directory_entry_iterator_get_value(
const struct aws_directory_iterator *iterator);
/**
* Returns true iff the character is a directory separator on ANY supported platform.
*/
AWS_COMMON_API
bool aws_is_any_directory_separator(char value);
/**
* Returns the directory separator used by the local platform
*/
AWS_COMMON_API
char aws_get_platform_directory_separator(void);
/**
* Normalizes the path by replacing any directory separator with the local platform's directory separator.
* @param path path to normalize. Must be writeable.
*/
AWS_COMMON_API
void aws_normalize_directory_separator(struct aws_byte_buf *path);
/**
* Returns the current user's home directory.
*/
AWS_COMMON_API
struct aws_string *aws_get_home_directory(struct aws_allocator *allocator);
/**
* Returns true if a file or path exists, otherwise, false.
*/
AWS_COMMON_API
bool aws_path_exists(const struct aws_string *path);
/*
* Wrapper for highest-resolution platform-dependent seek implementation.
* Maps to:
*
* _fseeki64() on windows
* fseeko() on linux
*
* whence can either be SEEK_SET or SEEK_END
*
* Returns AWS_OP_SUCCESS, or AWS_OP_ERR (after an error has been raised).
*/
AWS_COMMON_API
int aws_fseek(FILE *file, int64_t offset, int whence);
/*
* Wrapper for os-specific file length query. We can't use fseek(END, 0)
* because support for it is not technically required.
*
* Unix flavors call fstat, while Windows variants use GetFileSize on a
* HANDLE queried from the libc FILE pointer.
*/
AWS_COMMON_API
int aws_file_get_length(FILE *file, int64_t *length);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_FILE_H */

View File

@@ -0,0 +1,454 @@
#ifndef AWS_COMMON_HASH_TABLE_H
#define AWS_COMMON_HASH_TABLE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <stddef.h>
AWS_PUSH_SANE_WARNING_LEVEL
enum {
AWS_COMMON_HASH_TABLE_ITER_CONTINUE = (1 << 0),
AWS_COMMON_HASH_TABLE_ITER_DELETE = (1 << 1),
AWS_COMMON_HASH_TABLE_ITER_ERROR = (1 << 2),
};
/**
* Hash table data structure. This module provides an automatically resizing
* hash table implementation for general purpose use. The hash table stores a
* mapping between void * keys and values; it is expected that in most cases,
* these will point to a structure elsewhere in the heap, instead of inlining a
* key or value into the hash table element itself.
*
* Currently, this hash table implements a variant of robin hood hashing, but
* we do not guarantee that this won't change in the future.
*
* Associated with each hash function are four callbacks:
*
* hash_fn - A hash function from the keys to a uint64_t. It is critical that
* the hash function for a key does not change while the key is in the hash
* table; violating this results in undefined behavior. Collisions are
* tolerated, though naturally with reduced performance.
*
* equals_fn - An equality comparison function. This function must be
* reflexive and consistent with hash_fn.
*
* destroy_key_fn, destroy_value_fn - Optional callbacks invoked when the
* table is cleared or cleaned up and at the caller's option when an element
* is removed from the table. Either or both may be set to NULL, which
* has the same effect as a no-op destroy function.
*
* This datastructure can be safely moved between threads, subject to the
* requirements of the underlying allocator. It is also safe to invoke
* non-mutating operations on the hash table from multiple threads. A suitable
* memory barrier must be used when transitioning from single-threaded mutating
* usage to multithreaded usage.
*/
struct hash_table_state; /* Opaque pointer */
struct aws_hash_table {
struct hash_table_state *p_impl;
};
/**
* Represents an element in the hash table. Various operations on the hash
* table may provide pointers to elements stored within the hash table;
* generally, calling code may alter value, but must not alter key (or any
* information used to compute key's hash code).
*
* Pointers to elements within the hash are invalidated whenever an operation
* which may change the number of elements in the hash is invoked (i.e. put,
* delete, clear, and clean_up), regardless of whether the number of elements
* actually changes.
*/
struct aws_hash_element {
const void *key;
void *value;
};
enum aws_hash_iter_status {
AWS_HASH_ITER_STATUS_DONE,
AWS_HASH_ITER_STATUS_DELETE_CALLED,
AWS_HASH_ITER_STATUS_READY_FOR_USE,
};
struct aws_hash_iter {
const struct aws_hash_table *map;
struct aws_hash_element element;
size_t slot;
size_t limit;
enum aws_hash_iter_status status;
/*
* Reserving extra fields for binary compatibility with future expansion of
* iterator in case hash table implementation changes.
*/
int unused_0;
void *unused_1;
void *unused_2;
};
/**
* Prototype for a key hashing function pointer.
*/
typedef uint64_t(aws_hash_fn)(const void *key);
/**
* Prototype for a hash table equality check function pointer.
*
* This type is usually used for a function that compares two hash table
* keys, but note that the same type is used for a function that compares
* two hash table values in aws_hash_table_eq.
*
* Equality functions used in a hash table must be be reflexive (a == a),
* symmetric (a == b => b == a), transitive (a == b, b == c => a == c)
* and consistent (result does not change with time).
*/
typedef bool(aws_hash_callback_eq_fn)(const void *a, const void *b);
/**
* Prototype for a hash table key or value destructor function pointer.
*
* This function is used to destroy elements in the hash table when the
* table is cleared or cleaned up.
*
* Note that functions which remove individual elements from the hash
* table provide options of whether or not to invoke the destructors
* on the key and value of a removed element.
*/
typedef void(aws_hash_callback_destroy_fn)(void *key_or_value);
AWS_EXTERN_C_BEGIN
/**
* Initializes a hash map with initial capacity for 'size' elements
* without resizing. Uses hash_fn to compute the hash of each element.
* equals_fn to compute equality of two keys. Whenever an element is
* removed without being returned, destroy_key_fn is run on the pointer
* to the key and destroy_value_fn is run on the pointer to the value.
* Either or both may be NULL if a callback is not desired in this case.
*/
AWS_COMMON_API
int aws_hash_table_init(
struct aws_hash_table *map,
struct aws_allocator *alloc,
size_t size,
aws_hash_fn *hash_fn,
aws_hash_callback_eq_fn *equals_fn,
aws_hash_callback_destroy_fn *destroy_key_fn,
aws_hash_callback_destroy_fn *destroy_value_fn);
/**
* Deletes every element from map and frees all associated memory.
* destroy_fn will be called for each element. aws_hash_table_init
* must be called before reusing the hash table.
*
* This method is idempotent.
*/
AWS_COMMON_API
void aws_hash_table_clean_up(struct aws_hash_table *map);
/**
* Safely swaps two hash tables. Note that we swap the entirety of the hash
* table, including which allocator is associated.
*
* Neither hash table is required to be initialized; if one or both is
* uninitialized, then the uninitialized state is also swapped.
*/
AWS_COMMON_API
void aws_hash_table_swap(struct aws_hash_table *AWS_RESTRICT a, struct aws_hash_table *AWS_RESTRICT b);
/**
* Moves the hash table in 'from' to 'to'. After this move, 'from' will
* be identical to the state of the original 'to' hash table, and 'to'
* will be in the same state as if it had been passed to aws_hash_table_clean_up
* (that is, it will have no memory allocated, and it will be safe to
* either discard it or call aws_hash_table_clean_up again).
*
* Note that 'to' will not be cleaned up. You should make sure that 'to'
* is either uninitialized or cleaned up before moving a hashtable into
* it.
*/
AWS_COMMON_API
void aws_hash_table_move(struct aws_hash_table *AWS_RESTRICT to, struct aws_hash_table *AWS_RESTRICT from);
/**
* Returns the current number of entries in the table.
*/
AWS_COMMON_API
size_t aws_hash_table_get_entry_count(const struct aws_hash_table *map);
/**
* Returns an iterator to be used for iterating through a hash table.
* Iterator will already point to the first element of the table it finds,
* which can be accessed as iter.element.
*
* This function cannot fail, but if there are no elements in the table,
* the returned iterator will return true for aws_hash_iter_done(&iter).
*/
AWS_COMMON_API
struct aws_hash_iter aws_hash_iter_begin(const struct aws_hash_table *map);
/**
* Returns true if iterator is done iterating through table, false otherwise.
* If this is true, the iterator will not include an element of the table.
*/
AWS_COMMON_API
bool aws_hash_iter_done(const struct aws_hash_iter *iter);
/**
* Updates iterator so that it points to next element of hash table.
*
* This and the two previous functions are designed to be used together with
* the following idiom:
*
* for (struct aws_hash_iter iter = aws_hash_iter_begin(&map);
* !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) {
* const key_type key = *(const key_type *)iter.element.key;
* value_type value = *(value_type *)iter.element.value;
* // etc.
* }
*
* Note that calling this on an iter which is "done" is idempotent:
* i.e. it will return another iter which is "done".
*/
AWS_COMMON_API
void aws_hash_iter_next(struct aws_hash_iter *iter);
/**
* Deletes the element currently pointed-to by the hash iterator.
* After calling this method, the element member of the iterator
* should not be accessed until the next call to aws_hash_iter_next.
*
* @param destroy_contents If true, the destructors for the key and value
* will be called.
*/
AWS_COMMON_API
void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents);
/**
* Attempts to locate an element at key. If the element is found, a
* pointer to the value is placed in *p_elem; if it is not found,
* *pElem is set to NULL. Either way, AWS_OP_SUCCESS is returned.
*
* This method does not change the state of the hash table. Therefore, it
* is safe to call _find from multiple threads on the same hash table,
* provided no mutating operations happen in parallel.
*
* Calling code may update the value in the hash table by modifying **pElem
* after a successful find. However, this pointer is not guaranteed to
* remain usable after a subsequent call to _put, _delete, _clear, or
* _clean_up.
*/
AWS_COMMON_API
int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem);
/**
* Attempts to locate an element at key. If no such element was found,
* creates a new element, with value initialized to NULL. In either case, a
* pointer to the element is placed in *p_elem.
*
* If was_created is non-NULL, *was_created is set to 0 if an existing
* element was found, or 1 is a new element was created.
*
* Returns AWS_OP_SUCCESS if an item was found or created.
* Raises AWS_ERROR_OOM if hash table expansion was required and memory
* allocation failed.
*/
AWS_COMMON_API
int aws_hash_table_create(
struct aws_hash_table *map,
const void *key,
struct aws_hash_element **p_elem,
int *was_created);
/**
* Inserts a new element at key, with the given value. If another element
* exists at that key, the old element will be overwritten; both old key and
* value objects will be destroyed.
*
* If was_created is non-NULL, *was_created is set to 0 if an existing
* element was found, or 1 is a new element was created.
*
* Returns AWS_OP_SUCCESS if an item was found or created.
* Raises AWS_ERROR_OOM if hash table expansion was required and memory
* allocation failed.
*/
AWS_COMMON_API
int aws_hash_table_put(struct aws_hash_table *map, const void *key, void *value, int *was_created);
/**
* Removes element at key. Always returns AWS_OP_SUCCESS.
*
* If pValue is non-NULL, the existing value (if any) is moved into
* (*value) before removing from the table, and destroy_fn is _not_
* invoked. If pValue is NULL, then (if the element existed) destroy_fn
* will be invoked on the element being removed.
*
* If was_present is non-NULL, it is set to 0 if the element was
* not present, or 1 if it was present (and is now removed).
*/
AWS_COMMON_API
int aws_hash_table_remove(
struct aws_hash_table *map,
const void *key,
struct aws_hash_element *p_value,
int *was_present);
/**
* Removes element already known (typically by find()).
*
* p_value should point to a valid element returned by create() or find().
*
* NOTE: DO NOT call this method from inside of a aws_hash_table_foreach callback, return
* AWS_COMMON_HASH_TABLE_ITER_DELETE instead.
*/
AWS_COMMON_API
int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_element *p_value);
/**
* Iterates through every element in the map and invokes the callback on
* that item. Iteration is performed in an arbitrary, implementation-defined
* order, and is not guaranteed to be consistent across invocations.
*
* The callback may change the value associated with the key by overwriting
* the value pointed-to by value. In this case, the on_element_removed
* callback will not be invoked, unless the callback invokes
* AWS_COMMON_HASH_TABLE_ITER_DELETE (in which case the on_element_removed
* is given the updated value).
*
* The callback must return a bitmask of zero or more of the following values
* ORed together:
*
* # AWS_COMMON_HASH_TABLE_ITER_CONTINUE - Continues iteration to the next
* element (if not set, iteration stops)
* # AWS_COMMON_HASH_TABLE_ITER_DELETE - Deletes the current value and
* continues iteration. destroy_fn will NOT be invoked.
* # AWS_COMMON_HASH_TABLE_ITER_ERROR - Stop iteration with error.
* No action will be taken for the current value and the value before this.
* No rolling back. The deleted value before will NOT be back.
* aws_hash_table_foreach returns AWS_OP_ERR after stropping the iteration.
*
* Invoking any method which may change the contents of the hashtable
* during iteration results in undefined behavior. However, you may safely
* invoke non-mutating operations during an iteration.
*
* This operation is mutating only if AWS_COMMON_HASH_TABLE_ITER_DELETE
* is returned at some point during iteration. Otherwise, it is non-mutating
* and is safe to invoke in parallel with other non-mutating operations.
*/
AWS_COMMON_API
int aws_hash_table_foreach(
struct aws_hash_table *map,
int (*callback)(void *context, struct aws_hash_element *p_element),
void *context);
/**
* Compares two hash tables for equality. Both hash tables must have equivalent
* key comparators; values will be compared using the comparator passed into this
* function. The key hash function does not need to be equivalent between the
* two hash tables.
*/
AWS_COMMON_API
bool aws_hash_table_eq(
const struct aws_hash_table *a,
const struct aws_hash_table *b,
aws_hash_callback_eq_fn *value_eq);
/**
* Removes every element from the hash map. destroy_fn will be called for
* each element.
*/
AWS_COMMON_API
void aws_hash_table_clear(struct aws_hash_table *map);
/**
* Convenience hash function for NULL-terminated C-strings
*/
AWS_COMMON_API
uint64_t aws_hash_c_string(const void *item);
/**
* Convenience hash function for struct aws_strings.
* Hash is same as used on the string bytes by aws_hash_c_string.
*/
AWS_COMMON_API
uint64_t aws_hash_string(const void *item);
/**
* Convenience hash function for struct aws_byte_cursor.
* Hash is same as used on the string bytes by aws_hash_c_string.
*/
AWS_COMMON_API
uint64_t aws_hash_byte_cursor_ptr(const void *item);
/**
* Convenience hash function which hashes the pointer value directly,
* without dereferencing. This can be used in cases where pointer identity
* is desired, or where a uintptr_t is encoded into a const void *.
*/
AWS_COMMON_API
uint64_t aws_hash_ptr(const void *item);
AWS_COMMON_API
uint64_t aws_hash_combine(uint64_t item1, uint64_t item2);
/**
* Convenience eq callback for NULL-terminated C-strings
*/
AWS_COMMON_API
bool aws_hash_callback_c_str_eq(const void *a, const void *b);
/**
* Convenience eq callback for AWS strings
*/
AWS_COMMON_API
bool aws_hash_callback_string_eq(const void *a, const void *b);
/**
* Convenience destroy callback for AWS strings
*/
AWS_COMMON_API
void aws_hash_callback_string_destroy(void *a);
/**
* Equality function which compares pointer equality.
*/
AWS_COMMON_API
bool aws_ptr_eq(const void *a, const void *b);
/**
* Best-effort check of hash_table_state data-structure invariants
*/
AWS_COMMON_API
bool aws_hash_table_is_valid(const struct aws_hash_table *map);
/**
* Given a pointer to a hash_iter, checks that it is well-formed, with all data-structure invariants.
*/
AWS_COMMON_API
bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter);
/**
* Helper function to hash keys that are uint64_t values.
*
* The function is not a strong hash function in any sense; it merely reflects
* the uint64 value back. Do not use this function as a hash if you need
* the properties of a strong hash function.
*/
AWS_COMMON_API uint64_t aws_hash_uint64_t_by_identity(const void *item);
/**
* Helper function to compare hash keys that are uint64_t values.
*/
AWS_COMMON_API bool aws_hash_compare_uint64_t_eq(const void *a, const void *b);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_HASH_TABLE_H */

View File

@@ -0,0 +1,28 @@
#ifndef AWS_COMMON_HOST_UTILS_H
#define AWS_COMMON_HOST_UTILS_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
struct aws_byte_cursor;
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/*
* Determine whether host cursor is IPv4 string.
*/
AWS_COMMON_API bool aws_host_utils_is_ipv4(struct aws_byte_cursor host);
/*
* Determine whether host cursor is IPv6 string.
* Supports checking for uri encoded strings and scoped literals.
*/
AWS_COMMON_API bool aws_host_utils_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_HOST_UTILS_H */

View File

@@ -0,0 +1,489 @@
#ifndef AWS_COMMON_JSON_H
#define AWS_COMMON_JSON_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_json_value;
AWS_EXTERN_C_BEGIN
// ====================
// Create and pass type
/**
* Creates a new string aws_json_value with the given string and returns a pointer to it.
*
* Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
* on the object/array containing the aws_json_value.
* Note: might be slower than c_str version due to internal copy
* @param string A byte cursor you want to store in the aws_json_value
* @param allocator The allocator to use when creating the value
* @return A new string aws_json_value
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string);
/**
* Creates a new string aws_json_value with the given string and returns a pointer to it.
*
* Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
* on the object/array containing the aws_json_value.
* @param string c string pointer you want to store in the aws_json_value
* @param allocator The allocator to use when creating the value
* @return A new string aws_json_value
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_string_from_c_str(struct aws_allocator *allocator, const char *string);
/**
* Creates a new number aws_json_value with the given number and returns a pointer to it.
*
* Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
* on the object/array containing the aws_json_value.
* @param number The number you want to store in the aws_json_value
* @param allocator The allocator to use when creating the value
* @return A new number aws_json_value
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_number(struct aws_allocator *allocator, double number);
/**
* Creates a new array aws_json_value and returns a pointer to it.
*
* Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
* on the object/array containing the aws_json_value.
* Deleting this array will also destroy any aws_json_values it contains.
* @param allocator The allocator to use when creating the value
* @return A new array aws_json_value
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_array(struct aws_allocator *allocator);
/**
* Creates a new boolean aws_json_value with the given boolean and returns a pointer to it.
*
* Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
* on the object/array containing the aws_json_value.
* @param boolean The boolean you want to store in the aws_json_value
* @param allocator The allocator to use when creating the value
* @return A new boolean aws_json_value
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_boolean(struct aws_allocator *allocator, bool boolean);
/**
* Creates a new null aws_json_value and returns a pointer to it.
*
* Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
* on the object/array containing the aws_json_value.
* @param allocator The allocator to use when creating the value
* @return A new null aws_json_value
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_null(struct aws_allocator *allocator);
/**
* Creates a new object aws_json_value and returns a pointer to it.
*
* Note: You will need to free the memory for the aws_json_value using aws_json_destroy on the aws_json_value or
* on the object/array containing the aws_json_value.
* Deleting this object will also destroy any aws_json_values it contains.
* @param allocator The allocator to use when creating the value
* @return A new object aws_json_value
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator);
// ====================
// ====================
// Value getters
/**
* Gets the string of a string aws_json_value.
* @param value The string aws_json_value.
* @param output The string
* @return AWS_OP_SUCCESS if the value is a string, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output);
/**
* Gets the number of a number aws_json_value.
* @param value The number aws_json_value.
* @param output The number
* @return AWS_OP_SUCCESS if the value is a number, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_json_value_get_number(const struct aws_json_value *value, double *output);
/**
* Gets the boolean of a boolean aws_json_value.
* @param value The boolean aws_json_value.
* @param output The boolean
* @return AWS_OP_SUCCESS if the value is a boolean, otherwise AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output);
// ====================
// ====================
// Object API
/**
* Adds a aws_json_value to a object aws_json_value.
*
* Note that the aws_json_value will be destroyed when the aws_json_value object is destroyed
* by calling "aws_json_destroy()"
* Note: might be slower than c_str version due to internal copy
* @param object The object aws_json_value you want to add a value to.
* @param key The key to add the aws_json_value at.
* @param value The aws_json_value you want to add.
* @return AWS_OP_SUCCESS if adding was successful.
* Will return AWS_OP_ERROR if the object passed is invalid or if the passed key
* is already in use in the object.
*/
AWS_COMMON_API
int aws_json_value_add_to_object(
struct aws_json_value *object,
struct aws_byte_cursor key,
struct aws_json_value *value);
/**
* Adds a aws_json_value to a object aws_json_value.
*
* Note that the aws_json_value will be destroyed when the aws_json_value object is destroyed
* by calling "aws_json_destroy()"
* @param object The object aws_json_value you want to add a value to.
* @param key The key to add the aws_json_value at.
* @param value The aws_json_value you want to add.
* @return AWS_OP_SUCCESS if adding was successful.
* Will return AWS_OP_ERROR if the object passed is invalid or if the passed key
* is already in use in the object.
*/
AWS_COMMON_API
int aws_json_value_add_to_object_c_str(struct aws_json_value *object, const char *key, struct aws_json_value *value);
/**
* Returns the aws_json_value at the given key.
* Note: might be slower than c_str version due to internal copy
* @param object The object aws_json_value you want to get the value from.
* @param key The key that the aws_json_value is at. Is case sensitive.
* @return The aws_json_value at the given key, otherwise NULL.
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_value *object, struct aws_byte_cursor key);
/**
* Returns the aws_json_value at the given key.
* Note: same as aws_json_value_get_from_object but with key as const char *.
* Prefer this method is you have a key thats already a valid char * as it is likely to be faster.
* @param object The object aws_json_value you want to get the value from.
* @param key The key that the aws_json_value is at. Is case sensitive.
* @return The aws_json_value at the given key, otherwise NULL.
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_get_from_object_c_str(const struct aws_json_value *object, const char *key);
/**
* Checks if there is a aws_json_value at the given key.
* Note: might be slower than c_str version due to internal copy
* @param object The value aws_json_value you want to check a key in.
* @param key The key that you want to check. Is case sensitive.
* @return True if a aws_json_value is found.
*/
AWS_COMMON_API
bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte_cursor key);
/**
* Checks if there is a aws_json_value at the given key.
* Note: same as aws_json_value_has_key but with key as const char *.
* Prefer this method is you have a key thats already a valid char * as it is likely to be faster.
* @param object The value aws_json_value you want to check a key in.
* @param key The key that you want to check. Is case sensitive.
* @return True if a aws_json_value is found.
*/
AWS_COMMON_API
bool aws_json_value_has_key_c_str(const struct aws_json_value *object, const char *key);
/**
* Removes the aws_json_value at the given key.
* Note: might be slower than c_str version due to internal copy
* @param object The object aws_json_value you want to remove a aws_json_value in.
* @param key The key that the aws_json_value is at. Is case sensitive.
* @return AWS_OP_SUCCESS if the aws_json_value was removed.
* Will return AWS_OP_ERR if the object passed is invalid or if the value
* at the key cannot be found.
*/
AWS_COMMON_API
int aws_json_value_remove_from_object(struct aws_json_value *object, struct aws_byte_cursor key);
/**
* Removes the aws_json_value at the given key.
* Note: same as aws_json_value_remove_from_object but with key as const char *.
* Prefer this method is you have a key thats already a valid char * as it is likely to be faster.
* @param object The object aws_json_value you want to remove a aws_json_value in.
* @param key The key that the aws_json_value is at. Is case sensitive.
* @return AWS_OP_SUCCESS if the aws_json_value was removed.
* Will return AWS_OP_ERR if the object passed is invalid or if the value
* at the key cannot be found.
*/
AWS_COMMON_API
int aws_json_value_remove_from_object_c_str(struct aws_json_value *object, const char *key);
/**
* @brief callback for iterating members of an object
* Iteration can be controlled as follows:
* - return AWS_OP_SUCCESS and out_should_continue is set to true (default value) -
* continue iteration without error
* - return AWS_OP_SUCCESS and out_continue is set to false -
* stop iteration without error
* - return AWS_OP_ERR - stop iteration with error
*/
typedef int(aws_json_on_member_encountered_const_fn)(
const struct aws_byte_cursor *key,
const struct aws_json_value *value,
bool *out_should_continue,
void *user_data);
/**
* @brief iterates through members of the object.
* iteration is sequential in order fields were initially parsed.
* @param object object to iterate over.
* @param on_member callback for when member is encountered.
* @param user_data user data to pass back in callback.
* @return AWS_OP_SUCCESS when iteration finishes completely or exits early,
* AWS_OP_ERR if value is not an object.
*/
AWS_COMMON_API
int aws_json_const_iterate_object(
const struct aws_json_value *object,
aws_json_on_member_encountered_const_fn *on_member,
void *user_data);
// ====================
// ====================
// Array API
/**
* Adds a aws_json_value to the given array aws_json_value.
*
* Note that the aws_json_value will be destroyed when the aws_json_value array is destroyed
* by calling "aws_json_destroy()"
* @param array The array aws_json_value you want to add an aws_json_value to.
* @param value The aws_json_value you want to add.
* @return AWS_OP_SUCCESS if adding the aws_json_value was successful.
* Will return AWS_OP_ERR if the array passed is invalid.
*/
AWS_COMMON_API
int aws_json_value_add_array_element(struct aws_json_value *array, const struct aws_json_value *value);
/**
* Returns the aws_json_value at the given index in the array aws_json_value.
* @param array The array aws_json_value.
* @param index The index of the aws_json_value you want to access.
* @return A pointer to the aws_json_value at the given index in the array, otherwise NULL.
*/
AWS_COMMON_API
struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index);
/**
* Returns the number of items in the array aws_json_value.
* @param array The array aws_json_value.
* @return The number of items in the array_json_value.
*/
AWS_COMMON_API
size_t aws_json_get_array_size(const struct aws_json_value *array);
/**
* Removes the aws_json_value at the given index in the array aws_json_value.
* @param array The array aws_json_value.
* @param index The index containing the aws_json_value you want to remove.
* @return AWS_OP_SUCCESS if the aws_json_value at the index was removed.
* Will return AWS_OP_ERR if the array passed is invalid or if the index
* passed is out of range.
*/
AWS_COMMON_API
int aws_json_value_remove_array_element(struct aws_json_value *array, size_t index);
/**
* @brief callback for iterating values of an array.
* Iteration can be controlled as follows:
* - return AWS_OP_SUCCESS and out_should_continue is set to true (default value) -
* continue iteration without error
* - return AWS_OP_SUCCESS and out_continue is set to false -
* stop iteration without error
* - return AWS_OP_ERR - stop iteration with error
*/
typedef int(aws_json_on_value_encountered_const_fn)(
size_t index,
const struct aws_json_value *value,
bool *out_should_continue,
void *user_data);
/**
* @brief iterates through values of an array.
* iteration is sequential starting with 0th element.
* @param array array to iterate over.
* @param on_value callback for when value is encountered.
* @param user_data user data to pass back in callback.
* @return AWS_OP_SUCCESS when iteration finishes completely or exits early,
* AWS_OP_ERR if value is not an array.
*/
AWS_COMMON_API
int aws_json_const_iterate_array(
const struct aws_json_value *array,
aws_json_on_value_encountered_const_fn *on_value,
void *user_data);
// ====================
// ====================
// Checks
/**
* Checks whether two json values are equivalent.
* @param a first value to compare.
* @param b second value to compare.
* @param is_case_sensitive case sensitive compare or not.
* @return True is values are equal, false otherwise
*/
AWS_COMMON_API
bool aws_json_value_compare(const struct aws_json_value *a, const struct aws_json_value *b, bool is_case_sensitive);
/**
* Duplicates json value.
* @param value first value to compare.
* @return duplicated value. NULL and last error set if value cannot be duplicated.
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_duplicate(const struct aws_json_value *value);
/**
* Checks if the aws_json_value is a string.
* @param value The aws_json_value to check.
* @return True if the aws_json_value is a string aws_json_value, otherwise false.
*/
AWS_COMMON_API
bool aws_json_value_is_string(const struct aws_json_value *value);
/**
* Checks if the aws_json_value is a number.
* @param value The aws_json_value to check.
* @return True if the aws_json_value is a number aws_json_value, otherwise false.
*/
AWS_COMMON_API
bool aws_json_value_is_number(const struct aws_json_value *value);
/**
* Checks if the aws_json_value is a array.
* @param value The aws_json_value to check.
* @return True if the aws_json_value is a array aws_json_value, otherwise false.
*/
AWS_COMMON_API
bool aws_json_value_is_array(const struct aws_json_value *value);
/**
* Checks if the aws_json_value is a boolean.
* @param value The aws_json_value to check.
* @return True if the aws_json_value is a boolean aws_json_value, otherwise false.
*/
AWS_COMMON_API
bool aws_json_value_is_boolean(const struct aws_json_value *value);
/**
* Checks if the aws_json_value is a null aws_json_value.
* @param value The aws_json_value to check.
* @return True if the aws_json_value is a null aws_json_value, otherwise false.
*/
AWS_COMMON_API
bool aws_json_value_is_null(const struct aws_json_value *value);
/**
* Checks if the aws_json_value is a object aws_json_value.
* @param value The aws_json_value to check.
* @return True if the aws_json_value is a object aws_json_value, otherwise false.
*/
AWS_COMMON_API
bool aws_json_value_is_object(const struct aws_json_value *value);
// ====================
// ====================
// Memory Management
/**
* Removes the aws_json_value from memory. If the aws_json_value is a object or array, it will also destroy
* attached aws_json_values as well.
*
* For example, if you called "aws_json_array_add(b, a)" to add an object "a" to an array "b", if you call
* "aws_json_destroy(b)" then it will also free "a" automatically. All children/attached aws_json_values are freed
* when the parent/root aws_json_value is destroyed.
* @param value The aws_json_value to destroy.
*/
AWS_COMMON_API
void aws_json_value_destroy(struct aws_json_value *value);
// ====================
// ====================
// Utility
/**
* Appends a unformatted JSON string representation of the aws_json_value into the passed byte buffer.
* The byte buffer is expected to be already initialized so the function can append the JSON into it.
*
* Note: The byte buffer will automatically have its size extended if the JSON string is over the byte
* buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not
* have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned.
*
* Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free
* the memory used, as it will NOT be called automatically.
* @param value The aws_json_value to format.
* @param output The destination for the JSON string
* @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors
* Will return AWS_OP_ERR if the value passed is not an aws_json_value or if there
* was an error appending the JSON into the byte buffer.
*/
AWS_COMMON_API
int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output);
/**
* Appends a formatted JSON string representation of the aws_json_value into the passed byte buffer.
* The byte buffer is expected to already be initialized so the function can append the JSON into it.
*
* Note: The byte buffer will automatically have its size extended if the JSON string is over the byte
* buffer capacity AND the byte buffer has an allocator associated with it. If the byte buffer does not
* have an allocator associated and the JSON string is over capacity, AWS_OP_ERR will be returned.
*
* Note: When you are finished with the aws_byte_buf, you must call "aws_byte_buf_clean_up_secure" to free
* the memory used, as it will NOT be called automatically.
* @param value The aws_json_value to format.
* @param output The destination for the JSON string
* @return AWS_OP_SUCCESS if the JSON string was allocated to output without any errors
* Will return AWS_OP_ERR if the value passed is not an aws_json_value or if there
* aws an error appending the JSON into the byte buffer.
*/
AWS_COMMON_API
int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output);
/**
* Parses the JSON string and returns a aws_json_value containing the root of the JSON.
* @param allocator The allocator used to create the value
* @param string The string containing the JSON.
* @return The root aws_json_value of the JSON.
*/
AWS_COMMON_API
struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string);
// ====================
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif // AWS_COMMON_JSON_H

View File

@@ -0,0 +1,31 @@
#ifndef AWS_COMMON_LIFO_CACHE_H
#define AWS_COMMON_LIFO_CACHE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/cache.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* Initializes the last-in-first-out cache. Sets up the underlying linked hash table.
* Once `max_items` elements have been added, the latest(last-in) item will
* be removed. For the other parameters, see aws/common/hash_table.h. Hash table
* semantics of these arguments are preserved.
*/
AWS_COMMON_API
struct aws_cache *aws_cache_new_lifo(
struct aws_allocator *allocator,
aws_hash_fn *hash_fn,
aws_hash_callback_eq_fn *equals_fn,
aws_hash_callback_destroy_fn *destroy_key_fn,
aws_hash_callback_destroy_fn *destroy_value_fn,
size_t max_items);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LIFO_CACHE_H */

View File

@@ -0,0 +1,125 @@
#ifndef AWS_COMMON_LINKED_HASH_TABLE_H
#define AWS_COMMON_LINKED_HASH_TABLE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/hash_table.h>
#include <aws/common/linked_list.h>
AWS_PUSH_SANE_WARNING_LEVEL
/**
* Simple linked hash table. Preserves insertion order, and can be iterated in insertion order.
*
* You can also change the order safely without altering the shape of the underlying hash table.
*/
struct aws_linked_hash_table {
struct aws_allocator *allocator;
struct aws_linked_list list;
struct aws_hash_table table;
aws_hash_callback_destroy_fn *user_on_value_destroy;
aws_hash_callback_destroy_fn *user_on_key_destroy;
};
/**
* Linked-List node stored in the table. This is the node type that will be returned in
* aws_linked_hash_table_get_iteration_list().
*/
struct aws_linked_hash_table_node {
struct aws_linked_list_node node;
struct aws_linked_hash_table *table;
const void *key;
void *value;
};
AWS_EXTERN_C_BEGIN
/**
* Initializes the table. Sets up the underlying hash table and linked list.
* For the other parameters, see aws/common/hash_table.h. Hash table
* semantics of these arguments are preserved.
*/
AWS_COMMON_API
int aws_linked_hash_table_init(
struct aws_linked_hash_table *table,
struct aws_allocator *allocator,
aws_hash_fn *hash_fn,
aws_hash_callback_eq_fn *equals_fn,
aws_hash_callback_destroy_fn *destroy_key_fn,
aws_hash_callback_destroy_fn *destroy_value_fn,
size_t initial_item_count);
/**
* Cleans up the table. Elements in the table will be evicted and cleanup
* callbacks will be invoked.
*/
AWS_COMMON_API
void aws_linked_hash_table_clean_up(struct aws_linked_hash_table *table);
/**
* Finds element in the table by key. If found, AWS_OP_SUCCESS will be
* returned. If not found, AWS_OP_SUCCESS will be returned and *p_value will be
* NULL.
*
* If any errors occur AWS_OP_ERR will be returned.
*/
AWS_COMMON_API
int aws_linked_hash_table_find(struct aws_linked_hash_table *table, const void *key, void **p_value);
/**
* Finds element in the table by key. If found, AWS_OP_SUCCESS will be returned and the item will be moved to the back
* of the list.
* If not found, AWS_OP_SUCCESS will be returned and *p_value will be NULL.
*
* Note: this will change the order of elements
*/
AWS_COMMON_API
int aws_linked_hash_table_find_and_move_to_back(struct aws_linked_hash_table *table, const void *key, void **p_value);
/**
* Puts `p_value` at `key`. If an element is already stored at `key` it will be replaced.
*/
AWS_COMMON_API
int aws_linked_hash_table_put(struct aws_linked_hash_table *table, const void *key, void *p_value);
/**
* Removes item at `key` from the table.
*/
AWS_COMMON_API
int aws_linked_hash_table_remove(struct aws_linked_hash_table *table, const void *key);
/**
* Clears all items from the table.
*/
AWS_COMMON_API
void aws_linked_hash_table_clear(struct aws_linked_hash_table *table);
/**
* returns number of elements in the table.
*/
AWS_COMMON_API
size_t aws_linked_hash_table_get_element_count(const struct aws_linked_hash_table *table);
/**
* Move the aws_linked_hash_table_node to the end of the list.
*
* Note: this will change the order of elements
*/
AWS_COMMON_API
void aws_linked_hash_table_move_node_to_end_of_list(
struct aws_linked_hash_table *table,
struct aws_linked_hash_table_node *node);
/**
* returns the underlying linked list for iteration.
*
* The returned list has nodes of the type: aws_linked_hash_table_node. Use AWS_CONTAINER_OF for access to the element.
*/
AWS_COMMON_API
const struct aws_linked_list *aws_linked_hash_table_get_iteration_list(const struct aws_linked_hash_table *table);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LINKED_HASH_TABLE_H */

View File

@@ -0,0 +1,196 @@
#ifndef AWS_COMMON_LINKED_LIST_H
#define AWS_COMMON_LINKED_LIST_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <stddef.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_linked_list_node {
struct aws_linked_list_node *next;
struct aws_linked_list_node *prev;
};
struct aws_linked_list {
struct aws_linked_list_node head;
struct aws_linked_list_node tail;
};
AWS_EXTERN_C_BEGIN
/**
* Set node's next and prev pointers to NULL.
*/
AWS_STATIC_IMPL void aws_linked_list_node_reset(struct aws_linked_list_node *node);
/**
* These functions need to be defined first as they are used in pre
* and post conditions.
*/
/**
* Tests if the list is empty.
*/
AWS_STATIC_IMPL bool aws_linked_list_empty(const struct aws_linked_list *list);
/**
* Checks that a linked list is valid.
*/
AWS_STATIC_IMPL bool aws_linked_list_is_valid(const struct aws_linked_list *list);
/**
* Checks that the prev of the next pointer of a node points to the
* node. As this checks whether the [next] connection of a node is
* bidirectional, it returns false if used for the list tail.
*/
AWS_STATIC_IMPL bool aws_linked_list_node_next_is_valid(const struct aws_linked_list_node *node);
/**
* Checks that the next of the prev pointer of a node points to the
* node. Similarly to the above, this returns false if used for the
* head of a list.
*/
AWS_STATIC_IMPL bool aws_linked_list_node_prev_is_valid(const struct aws_linked_list_node *node);
/**
* Checks that a linked list satisfies double linked list connectivity
* constraints. This check is O(n) as it traverses the whole linked
* list to ensure that tail is reachable from head (and vice versa)
* and that every connection is bidirectional.
*
* Note: This check *cannot* go into an infinite loop, because we
* ensure that the connection to the next node is
* bidirectional. Therefore, if a node's [a] a.next is a previous node
* [b] in the list, b.prev != &a and so this check would fail, thus
* terminating the loop.
*/
AWS_STATIC_IMPL bool aws_linked_list_is_valid_deep(const struct aws_linked_list *list);
/**
* Initializes the list. List will be empty after this call.
*/
AWS_STATIC_IMPL void aws_linked_list_init(struct aws_linked_list *list);
/**
* Returns an iteration pointer for the first element in the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_begin(const struct aws_linked_list *list);
/**
* Returns an iteration pointer for one past the last element in the list.
*/
AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_end(const struct aws_linked_list *list);
/**
* Returns a pointer for the last element in the list.
* Used to begin iterating the list in reverse. Ex:
* for (i = aws_linked_list_rbegin(list); i != aws_linked_list_rend(list); i = aws_linked_list_prev(i)) {...}
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_rbegin(const struct aws_linked_list *list);
/**
* Returns the pointer to one before the first element in the list.
* Used to end iterating the list in reverse.
*/
AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_rend(const struct aws_linked_list *list);
/**
* Returns the next element in the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_next(const struct aws_linked_list_node *node);
/**
* Returns the previous element in the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_prev(const struct aws_linked_list_node *node);
/**
* Inserts to_add immediately after after.
*/
AWS_STATIC_IMPL void aws_linked_list_insert_after(
struct aws_linked_list_node *after,
struct aws_linked_list_node *to_add);
/**
* Swaps the order two nodes in the linked list.
*/
AWS_STATIC_IMPL void aws_linked_list_swap_nodes(struct aws_linked_list_node *a, struct aws_linked_list_node *b);
/**
* Inserts to_add immediately before before.
*/
AWS_STATIC_IMPL void aws_linked_list_insert_before(
struct aws_linked_list_node *before,
struct aws_linked_list_node *to_add);
/**
* Removes the specified node from the list (prev/next point to each other) and
* returns the next node in the list.
*/
AWS_STATIC_IMPL void aws_linked_list_remove(struct aws_linked_list_node *node);
/**
* Append new_node.
*/
AWS_STATIC_IMPL void aws_linked_list_push_back(struct aws_linked_list *list, struct aws_linked_list_node *node);
/**
* Returns the element in the back of the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_back(const struct aws_linked_list *list);
/**
* Returns the element in the back of the list and removes it
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_back(struct aws_linked_list *list);
/**
* Prepend new_node.
*/
AWS_STATIC_IMPL void aws_linked_list_push_front(struct aws_linked_list *list, struct aws_linked_list_node *node);
/**
* Returns the element in the front of the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_front(const struct aws_linked_list *list);
/**
* Returns the element in the front of the list and removes it
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_front(struct aws_linked_list *list);
AWS_STATIC_IMPL void aws_linked_list_swap_contents(
struct aws_linked_list *AWS_RESTRICT a,
struct aws_linked_list *AWS_RESTRICT b);
/**
* Remove all nodes from one list, and add them to the back of another.
*
* Example: if dst={1,2} and src={3,4}, they become dst={1,2,3,4} and src={}
*/
AWS_STATIC_IMPL void aws_linked_list_move_all_back(
struct aws_linked_list *AWS_RESTRICT dst,
struct aws_linked_list *AWS_RESTRICT src);
/**
* Remove all nodes from one list, and add them to the front of another.
*
* Example: if dst={2,1} and src={4,3}, they become dst={4,3,2,1} and src={}
*/
AWS_STATIC_IMPL void aws_linked_list_move_all_front(
struct aws_linked_list *AWS_RESTRICT dst,
struct aws_linked_list *AWS_RESTRICT src);
/**
* Returns true if the node is currently in a list, false otherwise.
*/
AWS_STATIC_IMPL bool aws_linked_list_node_is_in_list(struct aws_linked_list_node *node);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/linked_list.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LINKED_LIST_H */

View File

@@ -0,0 +1,440 @@
#ifndef AWS_COMMON_LINKED_LIST_INL
#define AWS_COMMON_LINKED_LIST_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/linked_list.h>
#include <stddef.h>
AWS_EXTERN_C_BEGIN
/**
* Set node's next and prev pointers to NULL.
*/
AWS_STATIC_IMPL void aws_linked_list_node_reset(struct aws_linked_list_node *node) {
AWS_PRECONDITION(node != NULL);
AWS_ZERO_STRUCT(*node);
AWS_POSTCONDITION(AWS_IS_ZEROED(*node));
}
/**
* These functions need to be defined first as they are used in pre
* and post conditions.
*/
/**
* Tests if the list is empty.
*/
AWS_STATIC_IMPL bool aws_linked_list_empty(const struct aws_linked_list *list) {
AWS_PRECONDITION(list);
return list->head.next == &list->tail;
}
/**
* Checks that a linked list is valid.
*/
AWS_STATIC_IMPL bool aws_linked_list_is_valid(const struct aws_linked_list *list) {
if (list && list->head.next && list->head.prev == NULL && list->tail.prev && list->tail.next == NULL) {
#if defined(AWS_DEEP_CHECKS) && (AWS_DEEP_CHECKS == 1)
return aws_linked_list_is_valid_deep(list);
#else
return true;
#endif
}
return false;
}
/**
* Checks that the prev of the next pointer of a node points to the
* node. As this checks whether the [next] connection of a node is
* bidirectional, it returns false if used for the list tail.
*/
AWS_STATIC_IMPL bool aws_linked_list_node_next_is_valid(const struct aws_linked_list_node *node) {
return node && node->next && node->next->prev == node;
}
/**
* Checks that the next of the prev pointer of a node points to the
* node. Similarly to the above, this returns false if used for the
* head of a list.
*/
AWS_STATIC_IMPL bool aws_linked_list_node_prev_is_valid(const struct aws_linked_list_node *node) {
return node && node->prev && node->prev->next == node;
}
/**
* Checks that a linked list satisfies double linked list connectivity
* constraints. This check is O(n) as it traverses the whole linked
* list to ensure that tail is reachable from head (and vice versa)
* and that every connection is bidirectional.
*
* Note: This check *cannot* go into an infinite loop, because we
* ensure that the connection to the next node is
* bidirectional. Therefore, if a node's [a] a.next is a previous node
* [b] in the list, b.prev != &a and so this check would fail, thus
* terminating the loop.
*/
AWS_STATIC_IMPL bool aws_linked_list_is_valid_deep(const struct aws_linked_list *list) {
if (!list) {
return false;
}
/* This could go into an infinite loop for a circular list */
const struct aws_linked_list_node *temp = &list->head;
/* Head must reach tail by following next pointers */
bool head_reaches_tail = false;
/* By satisfying the above and that edges are bidirectional, we
* also guarantee that tail reaches head by following prev
* pointers */
while (temp) {
if (temp == &list->tail) {
head_reaches_tail = true;
break;
} else if (!aws_linked_list_node_next_is_valid(temp)) {
/* Next and prev pointers should connect the same nodes */
return false;
}
temp = temp->next;
}
return head_reaches_tail;
}
/**
* Initializes the list. List will be empty after this call.
*/
AWS_STATIC_IMPL void aws_linked_list_init(struct aws_linked_list *list) {
AWS_PRECONDITION(list);
list->head.next = &list->tail;
list->head.prev = NULL;
list->tail.prev = &list->head;
list->tail.next = NULL;
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(aws_linked_list_empty(list));
}
/**
* Returns an iteration pointer for the first element in the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_begin(const struct aws_linked_list *list) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
struct aws_linked_list_node *rval = list->head.next;
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(rval == list->head.next);
return rval;
}
/**
* Returns an iteration pointer for one past the last element in the list.
*/
AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_end(const struct aws_linked_list *list) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
const struct aws_linked_list_node *rval = &list->tail;
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(rval == &list->tail);
return rval;
}
/**
* Returns a pointer for the last element in the list.
* Used to begin iterating the list in reverse. Ex:
* for (i = aws_linked_list_rbegin(list); i != aws_linked_list_rend(list); i = aws_linked_list_prev(i)) {...}
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_rbegin(const struct aws_linked_list *list) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
struct aws_linked_list_node *rval = list->tail.prev;
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(rval == list->tail.prev);
return rval;
}
/**
* Returns the pointer to one before the first element in the list.
* Used to end iterating the list in reverse.
*/
AWS_STATIC_IMPL const struct aws_linked_list_node *aws_linked_list_rend(const struct aws_linked_list *list) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
const struct aws_linked_list_node *rval = &list->head;
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(rval == &list->head);
return rval;
}
/**
* Returns the next element in the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_next(const struct aws_linked_list_node *node) {
AWS_PRECONDITION(aws_linked_list_node_next_is_valid(node));
struct aws_linked_list_node *rval = node->next;
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(node));
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(rval));
AWS_POSTCONDITION(rval == node->next);
return rval;
}
/**
* Returns the previous element in the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_prev(const struct aws_linked_list_node *node) {
AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(node));
struct aws_linked_list_node *rval = node->prev;
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(node));
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(rval));
AWS_POSTCONDITION(rval == node->prev);
return rval;
}
/**
* Inserts to_add immediately after after.
*/
AWS_STATIC_IMPL void aws_linked_list_insert_after(
struct aws_linked_list_node *after,
struct aws_linked_list_node *to_add) {
AWS_PRECONDITION(aws_linked_list_node_next_is_valid(after));
AWS_PRECONDITION(to_add != NULL);
to_add->prev = after;
to_add->next = after->next;
after->next->prev = to_add;
after->next = to_add;
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(after));
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(to_add));
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(to_add));
AWS_POSTCONDITION(after->next == to_add);
}
/**
* Swaps the order two nodes in the linked list.
*/
AWS_STATIC_IMPL void aws_linked_list_swap_nodes(struct aws_linked_list_node *a, struct aws_linked_list_node *b) {
AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(a));
AWS_PRECONDITION(aws_linked_list_node_next_is_valid(a));
AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(b));
AWS_PRECONDITION(aws_linked_list_node_next_is_valid(b));
if (a == b) {
return;
}
/* snapshot b's value to avoid clobbering its next/prev pointers if a/b are adjacent */
struct aws_linked_list_node tmp = *b;
a->prev->next = b;
a->next->prev = b;
tmp.prev->next = a;
tmp.next->prev = a;
tmp = *a;
*a = *b;
*b = tmp;
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(a));
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(a));
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(b));
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(b));
}
/**
* Inserts to_add immediately before before.
*/
AWS_STATIC_IMPL void aws_linked_list_insert_before(
struct aws_linked_list_node *before,
struct aws_linked_list_node *to_add) {
AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(before));
AWS_PRECONDITION(to_add != NULL);
to_add->next = before;
to_add->prev = before->prev;
before->prev->next = to_add;
before->prev = to_add;
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(before));
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(to_add));
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(to_add));
AWS_POSTCONDITION(before->prev == to_add);
}
/**
* Removes the specified node from the list (prev/next point to each other) and
* returns the next node in the list.
*/
AWS_STATIC_IMPL void aws_linked_list_remove(struct aws_linked_list_node *node) {
AWS_PRECONDITION(aws_linked_list_node_prev_is_valid(node));
AWS_PRECONDITION(aws_linked_list_node_next_is_valid(node));
node->prev->next = node->next;
node->next->prev = node->prev;
aws_linked_list_node_reset(node);
AWS_POSTCONDITION(node->next == NULL && node->prev == NULL);
}
/**
* Append new_node.
*/
AWS_STATIC_IMPL void aws_linked_list_push_back(struct aws_linked_list *list, struct aws_linked_list_node *node) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
AWS_PRECONDITION(node != NULL);
aws_linked_list_insert_before(&list->tail, node);
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(list->tail.prev == node, "[node] is the new last element of [list]");
}
/**
* Returns the element in the back of the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_back(const struct aws_linked_list *list) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
AWS_PRECONDITION(!aws_linked_list_empty(list));
struct aws_linked_list_node *rval = list->tail.prev;
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(rval));
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(rval));
return rval;
}
/**
* Returns the element in the back of the list and removes it
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_back(struct aws_linked_list *list) {
AWS_PRECONDITION(!aws_linked_list_empty(list));
AWS_PRECONDITION(aws_linked_list_is_valid(list));
struct aws_linked_list_node *back = aws_linked_list_back(list);
aws_linked_list_remove(back);
AWS_POSTCONDITION(back->next == NULL && back->prev == NULL);
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
return back;
}
/**
* Prepend new_node.
*/
AWS_STATIC_IMPL void aws_linked_list_push_front(struct aws_linked_list *list, struct aws_linked_list_node *node) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
AWS_PRECONDITION(node != NULL);
aws_linked_list_insert_before(list->head.next, node);
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(list->head.next == node, "[node] is the new first element of [list]");
}
/**
* Returns the element in the front of the list.
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_front(const struct aws_linked_list *list) {
AWS_PRECONDITION(aws_linked_list_is_valid(list));
AWS_PRECONDITION(!aws_linked_list_empty(list));
struct aws_linked_list_node *rval = list->head.next;
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
AWS_POSTCONDITION(aws_linked_list_node_prev_is_valid(rval));
AWS_POSTCONDITION(aws_linked_list_node_next_is_valid(rval));
return rval;
}
/**
* Returns the element in the front of the list and removes it
*/
AWS_STATIC_IMPL struct aws_linked_list_node *aws_linked_list_pop_front(struct aws_linked_list *list) {
AWS_PRECONDITION(!aws_linked_list_empty(list));
AWS_PRECONDITION(aws_linked_list_is_valid(list));
struct aws_linked_list_node *front = aws_linked_list_front(list);
aws_linked_list_remove(front);
AWS_POSTCONDITION(front->next == NULL && front->prev == NULL);
AWS_POSTCONDITION(aws_linked_list_is_valid(list));
return front;
}
AWS_STATIC_IMPL void aws_linked_list_swap_contents(
struct aws_linked_list *AWS_RESTRICT a,
struct aws_linked_list *AWS_RESTRICT b) {
AWS_PRECONDITION(aws_linked_list_is_valid(a));
AWS_PRECONDITION(aws_linked_list_is_valid(b));
AWS_PRECONDITION(a != b);
struct aws_linked_list_node *a_first = a->head.next;
struct aws_linked_list_node *a_last = a->tail.prev;
/* Move B's contents into A */
if (aws_linked_list_empty(b)) {
aws_linked_list_init(a);
} else {
a->head.next = b->head.next;
a->head.next->prev = &a->head;
a->tail.prev = b->tail.prev;
a->tail.prev->next = &a->tail;
}
/* Move A's old contents into B */
if (a_first == &a->tail) {
aws_linked_list_init(b);
} else {
b->head.next = a_first;
b->head.next->prev = &b->head;
b->tail.prev = a_last;
b->tail.prev->next = &b->tail;
}
AWS_POSTCONDITION(aws_linked_list_is_valid(a));
AWS_POSTCONDITION(aws_linked_list_is_valid(b));
}
AWS_STATIC_IMPL void aws_linked_list_move_all_back(
struct aws_linked_list *AWS_RESTRICT dst,
struct aws_linked_list *AWS_RESTRICT src) {
AWS_PRECONDITION(aws_linked_list_is_valid(src));
AWS_PRECONDITION(aws_linked_list_is_valid(dst));
AWS_PRECONDITION(dst != src);
if (!aws_linked_list_empty(src)) {
/* splice src nodes into dst, between the back and tail nodes */
struct aws_linked_list_node *dst_back = dst->tail.prev;
struct aws_linked_list_node *src_front = src->head.next;
struct aws_linked_list_node *src_back = src->tail.prev;
dst_back->next = src_front;
src_front->prev = dst_back;
dst->tail.prev = src_back;
src_back->next = &dst->tail;
/* reset src */
src->head.next = &src->tail;
src->tail.prev = &src->head;
}
AWS_POSTCONDITION(aws_linked_list_is_valid(src));
AWS_POSTCONDITION(aws_linked_list_is_valid(dst));
}
AWS_STATIC_IMPL void aws_linked_list_move_all_front(
struct aws_linked_list *AWS_RESTRICT dst,
struct aws_linked_list *AWS_RESTRICT src) {
AWS_PRECONDITION(aws_linked_list_is_valid(src));
AWS_PRECONDITION(aws_linked_list_is_valid(dst));
AWS_PRECONDITION(dst != src);
if (!aws_linked_list_empty(src)) {
/* splice src nodes into dst, between the head and front nodes */
struct aws_linked_list_node *dst_front = dst->head.next;
struct aws_linked_list_node *src_front = src->head.next;
struct aws_linked_list_node *src_back = src->tail.prev;
dst->head.next = src_front;
src_front->prev = &dst->head;
src_back->next = dst_front;
dst_front->prev = src_back;
/* reset src */
src->head.next = &src->tail;
src->tail.prev = &src->head;
}
AWS_POSTCONDITION(aws_linked_list_is_valid(src));
AWS_POSTCONDITION(aws_linked_list_is_valid(dst));
}
AWS_STATIC_IMPL bool aws_linked_list_node_is_in_list(struct aws_linked_list_node *node) {
return aws_linked_list_node_prev_is_valid(node) && aws_linked_list_node_next_is_valid(node);
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_LINKED_LIST_INL */

View File

@@ -0,0 +1,75 @@
#ifndef AWS_COMMON_LOG_CHANNEL_H
#define AWS_COMMON_LOG_CHANNEL_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/logging.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_string;
struct aws_log_writer;
/*
* Log channel interface and default implementations
*
* A log channel is an abstraction for the transfer of formatted log data between a source (formatter)
* and a sink (writer).
*/
struct aws_log_channel;
typedef int(aws_log_channel_send_fn)(struct aws_log_channel *channel, struct aws_string *output);
typedef void(aws_log_channel_clean_up_fn)(struct aws_log_channel *channel);
struct aws_log_channel_vtable {
aws_log_channel_send_fn *send;
aws_log_channel_clean_up_fn *clean_up;
};
struct aws_log_channel {
struct aws_log_channel_vtable *vtable;
struct aws_allocator *allocator;
struct aws_log_writer *writer;
void *impl;
};
AWS_EXTERN_C_BEGIN
/*
* Simple channel that results in log lines being written in the same thread they were generated in.
*
* The passed in log writer is not an ownership transfer. The log channel does not clean up the writer.
*/
AWS_COMMON_API
int aws_log_channel_init_foreground(
struct aws_log_channel *channel,
struct aws_allocator *allocator,
struct aws_log_writer *writer);
/*
* Simple channel that sends log lines to a background thread.
*
* The passed in log writer is not an ownership transfer. The log channel does not clean up the writer.
*/
AWS_COMMON_API
int aws_log_channel_init_background(
struct aws_log_channel *channel,
struct aws_allocator *allocator,
struct aws_log_writer *writer);
/*
* Channel cleanup function
*/
AWS_COMMON_API
void aws_log_channel_clean_up(struct aws_log_channel *channel);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LOG_CHANNEL_H */

View File

@@ -0,0 +1,98 @@
#ifndef AWS_COMMON_LOG_FORMATTER_H
#define AWS_COMMON_LOG_FORMATTER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/date_time.h>
#include <aws/common/logging.h>
#include <stdarg.h>
#include <stdio.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_allocator;
struct aws_string;
/*
* Log formatter interface and default implementation
*
* Log formatters are invoked by the LOGF_* macros to transform a set of arguments into
* one or more lines of text to be output to a logging sink (writer).
*/
struct aws_log_formatter;
typedef int(aws_log_formatter_format_fn)(
struct aws_log_formatter *formatter,
struct aws_string **formatted_output,
enum aws_log_level level,
aws_log_subject_t subject,
const char *format,
va_list args);
typedef void(aws_log_formatter_clean_up_fn)(struct aws_log_formatter *logger);
struct aws_log_formatter_vtable {
aws_log_formatter_format_fn *format;
aws_log_formatter_clean_up_fn *clean_up;
};
struct aws_log_formatter {
struct aws_log_formatter_vtable *vtable;
struct aws_allocator *allocator;
void *impl;
};
struct aws_log_formatter_standard_options {
enum aws_date_format date_format;
};
struct aws_logging_standard_formatting_data {
char *log_line_buffer;
size_t total_length;
enum aws_log_level level;
const char *subject_name;
const char *format;
enum aws_date_format date_format;
struct aws_allocator *allocator; /* not used, just there to make byte_bufs valid */
size_t amount_written;
};
AWS_EXTERN_C_BEGIN
/*
* Initializes the default log formatter which outputs lines in the format:
*
* [<LogLevel>] [<Timestamp>] [<ThreadId>] - <User content>\n
*/
AWS_COMMON_API
int aws_log_formatter_init_default(
struct aws_log_formatter *formatter,
struct aws_allocator *allocator,
struct aws_log_formatter_standard_options *options);
/*
* Cleans up a log formatter (minus the base structure memory) by calling the formatter's clean_up function
* via the vtable.
*/
AWS_COMMON_API
void aws_log_formatter_clean_up(struct aws_log_formatter *formatter);
/*
* Formats a single log line based on the input + the var args list. Output is written to a fixed-size
* buffer supplied in the data struct.
*/
AWS_COMMON_API
int aws_format_standard_log_line(struct aws_logging_standard_formatting_data *formatting_data, va_list args);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LOG_FORMATTER_H */

View File

@@ -0,0 +1,76 @@
#ifndef AWS_COMMON_LOG_WRITER_H
#define AWS_COMMON_LOG_WRITER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_allocator;
struct aws_string;
/*
* Log writer interface and default implementation(s)
*
* A log writer functions as a sink for formatted log lines. We provide
* default implementations that go to stdout, stderr, and a specified file.
*/
struct aws_log_writer;
typedef int(aws_log_writer_write_fn)(struct aws_log_writer *writer, const struct aws_string *output);
typedef void(aws_log_writer_clean_up_fn)(struct aws_log_writer *writer);
struct aws_log_writer_vtable {
aws_log_writer_write_fn *write;
aws_log_writer_clean_up_fn *clean_up;
};
struct aws_log_writer {
struct aws_log_writer_vtable *vtable;
struct aws_allocator *allocator;
void *impl;
};
struct aws_log_writer_file_options {
const char *filename;
FILE *file;
};
AWS_EXTERN_C_BEGIN
/*
* Initialize a log writer that sends log lines to stdout. Uses C library IO.
*/
AWS_COMMON_API
int aws_log_writer_init_stdout(struct aws_log_writer *writer, struct aws_allocator *allocator);
/*
* Initialize a log writer that sends log lines to stderr. Uses C library IO.
*/
AWS_COMMON_API
int aws_log_writer_init_stderr(struct aws_log_writer *writer, struct aws_allocator *allocator);
/*
* Initialize a log writer that sends log lines to a file. Uses C library IO.
*/
AWS_COMMON_API
int aws_log_writer_init_file(
struct aws_log_writer *writer,
struct aws_allocator *allocator,
struct aws_log_writer_file_options *options);
/*
* Frees all resources used by a log writer with the exception of the base structure memory
*/
AWS_COMMON_API
void aws_log_writer_clean_up(struct aws_log_writer *writer);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LOG_WRITER_H */

View File

@@ -0,0 +1,360 @@
#ifndef AWS_COMMON_LOGGING_H
#define AWS_COMMON_LOGGING_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/atomics.h>
#include <aws/common/common.h>
#include <aws/common/thread.h>
AWS_PUSH_SANE_WARNING_LEVEL
#define AWS_LOG_LEVEL_NONE 0
#define AWS_LOG_LEVEL_FATAL 1
#define AWS_LOG_LEVEL_ERROR 2
#define AWS_LOG_LEVEL_WARN 3
#define AWS_LOG_LEVEL_INFO 4
#define AWS_LOG_LEVEL_DEBUG 5
#define AWS_LOG_LEVEL_TRACE 6
/**
* Controls what log calls pass through the logger and what log calls get filtered out.
* If a log level has a value of X, then all log calls using a level <= X will appear, while
* those using a value > X will not occur.
*
* You can filter both dynamically (by setting the log level on the logger object) or statically
* (by defining AWS_STATIC_LOG_LEVEL to be an appropriate integer module-wide). Statically filtered
* log calls will be completely compiled out but require a rebuild if you want to get more detail
* about what's happening.
*/
enum aws_log_level {
AWS_LL_NONE = AWS_LOG_LEVEL_NONE,
AWS_LL_FATAL = AWS_LOG_LEVEL_FATAL,
AWS_LL_ERROR = AWS_LOG_LEVEL_ERROR,
AWS_LL_WARN = AWS_LOG_LEVEL_WARN,
AWS_LL_INFO = AWS_LOG_LEVEL_INFO,
AWS_LL_DEBUG = AWS_LOG_LEVEL_DEBUG,
AWS_LL_TRACE = AWS_LOG_LEVEL_TRACE,
AWS_LL_COUNT
};
/**
* Log subject is a way of designating the topic of logging.
*
* The general idea is to support a finer-grained approach to log level control. The primary use case
* is for situations that require more detailed logging within a specific domain, where enabling that detail
* globally leads to an untenable flood of information.
*
* For example, enable TRACE logging for tls-related log statements (handshake binary payloads), but
* only WARN logging everywhere else (because http payloads would blow up the log files).
*
* Log subject is an enum similar to aws error: each library has its own value-space and someone is
* responsible for registering the value <-> string connections.
*/
typedef uint32_t aws_log_subject_t;
/* Each library gets space for 2^^10 log subject entries */
enum {
AWS_LOG_SUBJECT_STRIDE_BITS = 10,
};
#define AWS_LOG_SUBJECT_STRIDE (1U << AWS_LOG_SUBJECT_STRIDE_BITS)
#define AWS_LOG_SUBJECT_BEGIN_RANGE(x) ((x) * AWS_LOG_SUBJECT_STRIDE)
#define AWS_LOG_SUBJECT_END_RANGE(x) (((x) + 1) * AWS_LOG_SUBJECT_STRIDE - 1)
struct aws_log_subject_info {
aws_log_subject_t subject_id;
const char *subject_name;
const char *subject_description;
};
#define DEFINE_LOG_SUBJECT_INFO(id, name, desc) \
{.subject_id = (id), .subject_name = (name), .subject_description = (desc)}
struct aws_log_subject_info_list {
struct aws_log_subject_info *subject_list;
size_t count;
};
enum aws_common_log_subject {
AWS_LS_COMMON_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID),
AWS_LS_COMMON_TASK_SCHEDULER,
AWS_LS_COMMON_THREAD,
AWS_LS_COMMON_MEMTRACE,
AWS_LS_COMMON_XML_PARSER,
AWS_LS_COMMON_IO,
AWS_LS_COMMON_BUS,
AWS_LS_COMMON_TEST,
AWS_LS_COMMON_JSON_PARSER,
AWS_LS_COMMON_CBOR,
AWS_LS_COMMON_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
struct aws_logger;
struct aws_log_formatter;
struct aws_log_channel;
struct aws_log_writer;
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4623) /* default constructor was implicitly defined as deleted */
# pragma warning(disable : 4626) /* assignment operator was implicitly defined as deleted */
# pragma warning(disable : 5027) /* move assignment operator was implicitly defined as deleted */
#endif
/**
* We separate the log level function from the log call itself so that we can do the filter check in the macros (see
* below)
*
* By doing so, we make it so that the variadic format arguments are not even evaluated if the filter check does not
* succeed.
*/
struct aws_logger_vtable {
int (*const log)(
struct aws_logger *logger,
enum aws_log_level log_level,
aws_log_subject_t subject,
const char *format,
...)
#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
__attribute__((format(printf, 4, 5)))
#endif /* non-ms compilers: TODO - find out what versions format support was added in */
;
enum aws_log_level (*const get_log_level)(struct aws_logger *logger, aws_log_subject_t subject);
void (*const clean_up)(struct aws_logger *logger);
int (*set_log_level)(struct aws_logger *logger, enum aws_log_level);
};
#ifdef _MSC_VER
# pragma warning(pop)
#endif
struct aws_logger {
struct aws_logger_vtable *vtable;
struct aws_allocator *allocator;
void *p_impl;
};
/**
* The base formatted logging macro that all other formatted logging macros resolve to.
* Checks for a logger and filters based on log level.
*/
#define AWS_LOGF(log_level, subject, ...) \
do { \
AWS_ASSERT(log_level > 0); \
struct aws_logger *logger = aws_logger_get(); \
if (logger != NULL && logger->vtable->get_log_level(logger, (subject)) >= (log_level)) { \
logger->vtable->log(logger, log_level, subject, __VA_ARGS__); \
} \
} while (0)
/**
* Unconditional logging macro that takes a logger and does not do a level check or a null check. Intended for
* situations when you need to log many things and do a single manual level check before beginning.
*/
#define AWS_LOGUF(logger, log_level, subject, ...) \
{ logger->vtable->log(logger, log_level, subject, __VA_ARGS__); }
/**
* LOGF_<level> variants for each level. These are what should be used directly to do all logging.
*
* i.e.
*
* LOGF_FATAL("Device \"%s\" not found", device->name);
*
*
* Later we will likely expose Subject-aware variants
*/
#if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_FATAL)
# define AWS_LOGF_FATAL(subject, ...) AWS_LOGF(AWS_LL_FATAL, subject, __VA_ARGS__)
#else
# define AWS_LOGF_FATAL(subject, ...)
#endif
#if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_ERROR)
# define AWS_LOGF_ERROR(subject, ...) AWS_LOGF(AWS_LL_ERROR, subject, __VA_ARGS__)
#else
# define AWS_LOGF_ERROR(subject, ...)
#endif
#if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_WARN)
# define AWS_LOGF_WARN(subject, ...) AWS_LOGF(AWS_LL_WARN, subject, __VA_ARGS__)
#else
# define AWS_LOGF_WARN(subject, ...)
#endif
#if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_INFO)
# define AWS_LOGF_INFO(subject, ...) AWS_LOGF(AWS_LL_INFO, subject, __VA_ARGS__)
#else
# define AWS_LOGF_INFO(subject, ...)
#endif
#if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_DEBUG)
# define AWS_LOGF_DEBUG(subject, ...) AWS_LOGF(AWS_LL_DEBUG, subject, __VA_ARGS__)
#else
# define AWS_LOGF_DEBUG(subject, ...)
#endif
#if !defined(AWS_STATIC_LOG_LEVEL) || (AWS_STATIC_LOG_LEVEL >= AWS_LOG_LEVEL_TRACE)
# define AWS_LOGF_TRACE(subject, ...) AWS_LOGF(AWS_LL_TRACE, subject, __VA_ARGS__)
#else
# define AWS_LOGF_TRACE(subject, ...)
#endif
/*
* Standard logger implementation composing three sub-components:
*
* The formatter takes var args input from the user and produces a formatted log line
* The writer takes a formatted log line and outputs it somewhere
* The channel is the transport between the two
*/
struct aws_logger_pipeline {
struct aws_log_formatter *formatter;
struct aws_log_channel *channel;
struct aws_log_writer *writer;
struct aws_allocator *allocator;
struct aws_atomic_var level;
};
/**
* Options for aws_logger_init_standard().
* Set `filename` to open a file for logging and close it when the logger cleans up.
* Set `file` to use a file that is already open, such as `stderr` or `stdout`.
*/
struct aws_logger_standard_options {
enum aws_log_level level;
const char *filename;
FILE *file;
};
AWS_EXTERN_C_BEGIN
/**
* Sets the aws logger used globally across the process. Not thread-safe. Must only be called once.
*/
AWS_COMMON_API
void aws_logger_set(struct aws_logger *logger);
/**
* Gets the aws logger used globally across the process.
*/
AWS_COMMON_API
struct aws_logger *aws_logger_get(void);
/**
* Gets the aws logger used globally across the process if the logging level is at least the inputted level.
*
* @param subject log subject to perform the level check versus, not currently used
* @param level logging level to check against in order to return the logger
* @return the current logger if the current logging level is at or more detailed then the supplied logging level
*/
AWS_COMMON_API
struct aws_logger *aws_logger_get_conditional(aws_log_subject_t subject, enum aws_log_level level);
/**
* Cleans up all resources used by the logger; simply invokes the clean_up v-function
*/
AWS_COMMON_API
void aws_logger_clean_up(struct aws_logger *logger);
/**
* Sets the current logging level for the logger. Loggers are not require to support this.
* @param logger logger to set the log level for
* @param level new log level for the logger
* @return AWS_OP_SUCCESS if the level was successfully set, AWS_OP_ERR otherwise
*/
AWS_COMMON_API
int aws_logger_set_log_level(struct aws_logger *logger, enum aws_log_level level);
/**
* Converts a log level to a c-string constant. Intended primarily to support building log lines that
* include the level in them, i.e.
*
* [ERROR] 10:34:54.642 01-31-19 - Json parse error....
*/
AWS_COMMON_API
int aws_log_level_to_string(enum aws_log_level log_level, const char **level_string);
/**
* Converts a c-string constant to a log level value. Uses case-insensitive comparison
* and simply iterates all possibilities until a match or nothing remains. If no match
* is found, AWS_OP_ERR is returned.
*/
AWS_COMMON_API
int aws_string_to_log_level(const char *level_string, enum aws_log_level *log_level);
/**
* Converts an aws_thread_id_t to a c-string. For portability, aws_thread_id_t
* must not be printed directly. Intended primarily to support building log
* lines that include the thread id in them. The parameter `buffer` must
* point-to a char buffer of length `bufsz == AWS_THREAD_ID_T_REPR_BUFSZ`. The
* thread id representation is returned in `buffer`.
*/
AWS_COMMON_API
int aws_thread_id_t_to_string(aws_thread_id_t thread_id, char *buffer, size_t bufsz);
/**
* Get subject name from log subject.
*/
AWS_COMMON_API
const char *aws_log_subject_name(aws_log_subject_t subject);
/**
* Connects log subject strings with log subject integer values
*/
AWS_COMMON_API
void aws_register_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list);
/**
* Disconnects log subject strings with log subject integer values
*/
AWS_COMMON_API
void aws_unregister_log_subject_info_list(struct aws_log_subject_info_list *log_subject_list);
/*
* Initializes a pipeline logger that is built from the default formatter, a background thread-based channel, and
* a file writer. The default logger in almost all circumstances.
*/
AWS_COMMON_API
int aws_logger_init_standard(
struct aws_logger *logger,
struct aws_allocator *allocator,
struct aws_logger_standard_options *options);
/*
* Initializes a pipeline logger from components that have already been initialized. This is not an ownership transfer.
* After the pipeline logger is cleaned up, the components will have to manually be cleaned up by the user.
*/
AWS_COMMON_API
int aws_logger_init_from_external(
struct aws_logger *logger,
struct aws_allocator *allocator,
struct aws_log_formatter *formatter,
struct aws_log_channel *channel,
struct aws_log_writer *writer,
enum aws_log_level level);
/*
* Pipeline logger vtable for custom configurations
*/
AWS_COMMON_API
extern struct aws_logger_vtable g_pipeline_logger_owned_vtable;
/*
* Initializes a logger that does not perform any allocation during logging. Log lines larger than the internal
* constant are truncated. Formatting matches the standard logger. Used for memory tracing logging.
* If no file or filename is set in the aws_logger_standard_options, then it will use stderr.
*/
AWS_COMMON_API
int aws_logger_init_noalloc(
struct aws_logger *logger,
struct aws_allocator *allocator,
struct aws_logger_standard_options *options);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LOGGING_H */

View File

@@ -0,0 +1,44 @@
#ifndef AWS_COMMON_LRU_CACHE_H
#define AWS_COMMON_LRU_CACHE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/cache.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* Initializes the Least-recently-used cache. Sets up the underlying linked hash table.
* Once `max_items` elements have been added, the least recently used item will be removed. For the other parameters,
* see aws/common/hash_table.h. Hash table semantics of these arguments are preserved.(Yes the one that was the answer
* to that interview question that one time).
*/
AWS_COMMON_API
struct aws_cache *aws_cache_new_lru(
struct aws_allocator *allocator,
aws_hash_fn *hash_fn,
aws_hash_callback_eq_fn *equals_fn,
aws_hash_callback_destroy_fn *destroy_key_fn,
aws_hash_callback_destroy_fn *destroy_value_fn,
size_t max_items);
/**
* Accesses the least-recently-used element, sets it to most-recently-used
* element, and returns the value.
*/
AWS_COMMON_API
void *aws_lru_cache_use_lru_element(struct aws_cache *cache);
/**
* Accesses the most-recently-used element and returns its value.
*/
AWS_COMMON_API
void *aws_lru_cache_get_mru_element(const struct aws_cache *cache);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_LRU_CACHE_H */

View File

@@ -0,0 +1,183 @@
#ifndef AWS_COMMON_MACROS_H
#define AWS_COMMON_MACROS_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/* clang-format off */
/* Use these macros in public header files to suppress unreasonable compiler
* warnings. Public header files are included by external applications,
* which may set their warning levels pedantically high.
*
* Developers of AWS libraries should hesitate before adding more warnings to this macro.
* Prefer disabling the warning within a .c file, or in the library's CFLAGS,
* or push/pop the warning around a single problematic declaration. */
#if defined(_MSC_VER)
# define AWS_PUSH_SANE_WARNING_LEVEL \
__pragma(warning(push)) \
__pragma(warning(disable : 4820)) /* padding added to struct */ \
__pragma(warning(disable : 4514)) /* unreferenced inline function has been removed */ \
__pragma(warning(disable : 5039)) /* reference to potentially throwing function passed to extern C function */
# define AWS_POP_SANE_WARNING_LEVEL __pragma(warning(pop))
#else
# define AWS_PUSH_SANE_WARNING_LEVEL
# define AWS_POP_SANE_WARNING_LEVEL
#endif
/* clang-format on */
#ifdef __cplusplus
# define AWS_EXTERN_C_BEGIN extern "C" {
# define AWS_EXTERN_C_END }
#else
# define AWS_EXTERN_C_BEGIN
# define AWS_EXTERN_C_END
#endif /* __cplusplus */
#define AWS_CONCAT(A, B) A##B
#define AWS_STATIC_ASSERT0(cond, msg) typedef char AWS_CONCAT(static_assertion_, msg)[(!!(cond)) * 2 - 1]
#define AWS_STATIC_ASSERT1(cond, line) AWS_STATIC_ASSERT0(cond, AWS_CONCAT(at_line_, line))
#define AWS_STATIC_ASSERT(cond) AWS_STATIC_ASSERT1(cond, __LINE__)
/* https://stackoverflow.com/questions/9183993/msvc-variadic-macro-expansion */
#define GLUE(x, y) x y
#define RETURN_ARG_COUNT(_1_, _2_, _3_, _4_, _5_, count, ...) count
#define EXPAND_ARGS(args) RETURN_ARG_COUNT args
#define COUNT_ARGS_MAX5(...) EXPAND_ARGS((__VA_ARGS__, 5, 4, 3, 2, 1, 0))
#define OVERLOAD_MACRO2(name, count) name##count
#define OVERLOAD_MACRO1(name, count) OVERLOAD_MACRO2(name, count)
#define OVERLOAD_MACRO(name, count) OVERLOAD_MACRO1(name, count)
#define CALL_OVERLOAD(name, ...) GLUE(OVERLOAD_MACRO(name, COUNT_ARGS_MAX5(__VA_ARGS__)), (__VA_ARGS__))
#define CALL_OVERLOAD_TEST1(x) x
#define CALL_OVERLOAD_TEST2(x, y) y
#define CALL_OVERLOAD_TEST3(x, y, z) z
#define CALL_OVERLOAD_TEST(...) CALL_OVERLOAD(CALL_OVERLOAD_TEST, __VA_ARGS__)
AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1) == 1);
AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2) == 2);
AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3);
enum { AWS_CACHE_LINE = 64 };
/**
* Format macro for strings of a specified length.
* Allows non null-terminated strings to be used with the printf family of functions.
* Ex: printf("scheme is " PRInSTR, 4, "http://example.org"); // outputs: "scheme is http"
*/
#define PRInSTR "%.*s"
#if defined(_MSC_VER)
# include <malloc.h>
# define AWS_ALIGNED_TYPEDEF(from, to, alignment) typedef __declspec(align(alignment)) from to
# define AWS_LIKELY(x) x
# define AWS_UNLIKELY(x) x
# define AWS_FORCE_INLINE __forceinline
# define AWS_NO_INLINE __declspec(noinline)
# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = _alloca(sizeof(type) * (length))
# define AWS_DECLSPEC_NORETURN __declspec(noreturn)
# define AWS_ATTRIBUTE_NORETURN
#else
# if defined(__GNUC__) || defined(__clang__)
# define AWS_ALIGNED_TYPEDEF(from, to, alignment) typedef from to __attribute__((aligned(alignment)))
# define AWS_TYPE_OF(a) __typeof__(a)
# define AWS_LIKELY(x) __builtin_expect(!!(x), 1)
# define AWS_UNLIKELY(x) __builtin_expect(!!(x), 0)
# define AWS_FORCE_INLINE __attribute__((always_inline))
# define AWS_NO_INLINE __attribute__((noinline))
# define AWS_DECLSPEC_NORETURN
# define AWS_ATTRIBUTE_NORETURN __attribute__((noreturn))
# if defined(__cplusplus)
# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = alloca(sizeof(type) * (length))
# else
# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type name[length]
# endif /* defined(__cplusplus) */
# endif /* defined(__GNUC__) || defined(__clang__) */
#endif /* defined(_MSC_VER) */
#if defined(__has_feature)
# if __has_feature(address_sanitizer)
# define AWS_SUPPRESS_ASAN __attribute__((no_sanitize("address")))
# endif
#elif defined(__SANITIZE_ADDRESS__)
# if defined(__GNUC__)
# define AWS_SUPPRESS_ASAN __attribute__((no_sanitize_address))
# elif defined(_MSC_VER)
# define AWS_SUPPRESS_ASAN __declspec(no_sanitize_address)
# endif
#endif
#if !defined(AWS_SUPPRESS_ASAN)
# define AWS_SUPPRESS_ASAN
#endif
#if defined(__has_feature)
# if __has_feature(thread_sanitizer)
# define AWS_SUPPRESS_TSAN __attribute__((no_sanitize("thread")))
# endif
#elif defined(__SANITIZE_THREAD__)
# if defined(__GNUC__)
# define AWS_SUPPRESS_TSAN __attribute__((no_sanitize_thread))
# else
# define AWS_SUPPRESS_TSAN
# endif
#else
# define AWS_SUPPRESS_TSAN
#endif
#if !defined(AWS_SUPPRESS_TSAN)
# define AWS_SUPPRESS_TSAN
#endif
#if defined(__has_feature)
# if __has_feature(undefined_behavior_sanitizer)
# define AWS_SUPPRESS_UBSAN __attribute__((no_sanitize("undefined")))
# endif
#elif defined(__SANITIZE_UNDEFINED__)
# if defined(__GNUC__)
# define AWS_SUPPRESS_UBSAN __attribute__((no_sanitize_undefined))
# else
# define AWS_SUPPRESS_UBSAN
# endif
#else
# define AWS_SUPPRESS_UBSAN
#endif
#if !defined(AWS_SUPPRESS_UBSAN)
# define AWS_SUPPRESS_UBSAN
#endif
/* If this is C++, restrict isn't supported. If this is not at least C99 on gcc and clang, it isn't supported.
* If visual C++ building in C mode, the restrict definition is __restrict.
* This just figures all of that out based on who's including this header file. */
#if defined(__cplusplus)
# define AWS_RESTRICT
#else
# if defined(_MSC_VER)
# define AWS_RESTRICT __restrict
# else
# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
# define AWS_RESTRICT restrict
# else
# define AWS_RESTRICT
# endif /* defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L */
# endif /* defined(_MSC_VER) */
#endif /* defined(__cplusplus) */
#if defined(_MSC_VER)
# define AWS_THREAD_LOCAL __declspec(thread)
#else
# define AWS_THREAD_LOCAL __thread
#endif
#define AWS_ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0]))
/**
* from a pointer and a type of the struct containing the node
* this will get you back to the pointer of the object. member is the name of
* the instance of struct aws_linked_list_node in your struct.
*/
#define AWS_CONTAINER_OF(ptr, type, member) ((type *)((uint8_t *)(ptr) - offsetof(type, member)))
#endif /* AWS_COMMON_MACROS_H */

View File

@@ -0,0 +1,107 @@
#ifndef AWS_COMMON_MATH_CBMC_INL
#define AWS_COMMON_MATH_CBMC_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header is already included, but include it again to make editor
* highlighting happier.
*/
#include <aws/common/common.h>
AWS_EXTERN_C_BEGIN
/* This header does safe operations. Supressing the checks within these functions
* avoids unnecessary CBMC assertions
*/
#pragma CPROVER check push
#pragma CPROVER check disable "unsigned-overflow"
/**
* Multiplies a * b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) {
if (__CPROVER_overflow_mult(a, b))
return UINT64_MAX;
return a * b;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
if (__CPROVER_overflow_mult(a, b))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a * b;
return AWS_OP_SUCCESS;
}
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) {
if (__CPROVER_overflow_mult(a, b))
return UINT32_MAX;
return a * b;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if (__CPROVER_overflow_mult(a, b))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a * b;
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) {
if (__CPROVER_overflow_plus(a, b))
return UINT64_MAX;
return a + b;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
if (__CPROVER_overflow_plus(a, b))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a + b;
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
if (__CPROVER_overflow_plus(a, b))
return UINT32_MAX;
return a + b;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if (__CPROVER_overflow_plus(a, b))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a + b;
return AWS_OP_SUCCESS;
}
#pragma CPROVER check pop
AWS_EXTERN_C_END
#endif /* AWS_COMMON_MATH_CBMC_INL */

View File

@@ -0,0 +1,207 @@
#ifndef AWS_COMMON_MATH_FALLBACK_INL
#define AWS_COMMON_MATH_FALLBACK_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header is already included, but include it again to make editor
* highlighting happier.
*/
#include <aws/common/common.h>
#include <aws/common/math.h>
AWS_EXTERN_C_BEGIN
/**
* Multiplies a * b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) {
if (a > 0 && b > 0 && a > (UINT64_MAX / b))
return UINT64_MAX;
return a * b;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
if (a > 0 && b > 0 && a > (UINT64_MAX / b))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a * b;
return AWS_OP_SUCCESS;
}
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) {
if (a > 0 && b > 0 && a > (UINT32_MAX / b))
return UINT32_MAX;
return a * b;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if (a > 0 && b > 0 && a > (UINT32_MAX / b))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a * b;
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) {
if ((b > 0) && (a > (UINT64_MAX - b)))
return UINT64_MAX;
return a + b;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
if ((b > 0) && (a > (UINT64_MAX - b)))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a + b;
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
if ((b > 0) && (a > (UINT32_MAX - b)))
return UINT32_MAX;
return a + b;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if ((b > 0) && (a > (UINT32_MAX - b)))
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
*r = a + b;
return AWS_OP_SUCCESS;
}
/*
* These are pure C implementations of the count leading/trailing zeros calls
* They should not be necessary unless using a really esoteric compiler with
* no intrinsics for these functions whatsoever.
*/
#if !defined(__clang__) && !defined(__GNUC__)
/**
* Search from the MSB to LSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) {
return aws_clz_i32((int32_t)n);
}
AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) {
size_t idx = 0;
if (n == 0) {
return sizeof(n) * 8;
}
/* sign bit is the first bit */
if (n < 0) {
return 0;
}
while (n >= 0) {
++idx;
n <<= 1;
}
return idx;
}
AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) {
return aws_clz_i64((int64_t)n);
}
AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) {
size_t idx = 0;
if (n == 0) {
return sizeof(n) * 8;
}
/* sign bit is the first bit */
if (n < 0) {
return 0;
}
while (n >= 0) {
++idx;
n <<= 1;
}
return idx;
}
AWS_STATIC_IMPL size_t aws_clz_size(size_t n) {
# if SIZE_BITS == 64
return aws_clz_u64(n);
# else
return aws_clz_u32(n);
# endif
}
/**
* Search from the LSB to MSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) {
return aws_ctz_i32((int32_t)n);
}
AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) {
int32_t idx = 0;
const int32_t max_bits = (int32_t)(SIZE_BITS / sizeof(uint8_t));
if (n == 0) {
return sizeof(n) * 8;
}
while (idx < max_bits) {
if (n & (1 << idx)) {
break;
}
++idx;
}
return (size_t)idx;
}
AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) {
return aws_ctz_i64((int64_t)n);
}
AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) {
int64_t idx = 0;
const int64_t max_bits = (int64_t)(SIZE_BITS / sizeof(uint8_t));
if (n == 0) {
return sizeof(n) * 8;
}
while (idx < max_bits) {
if (n & (1ULL << idx)) {
break;
}
++idx;
}
return (size_t)idx;
}
AWS_STATIC_IMPL size_t aws_ctz_size(size_t n) {
# if SIZE_BITS == 64
return aws_ctz_u64(n);
# else
return aws_ctz_u32(n);
# endif
}
#endif
AWS_EXTERN_C_END
#endif /* AWS_COMMON_MATH_FALLBACK_INL */

View File

@@ -0,0 +1,184 @@
#ifndef AWS_COMMON_MATH_GCC_ARM64_ASM_INL
#define AWS_COMMON_MATH_GCC_ARM64_ASM_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header is already included, but include it again to make editor
* highlighting happier.
*/
#include <aws/common/common.h>
#include <aws/common/math.h>
/* clang-format off */
AWS_EXTERN_C_BEGIN
/**
* Multiplies a * b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) {
/* We can use inline assembly to do this efficiently on arm64 by doing
a high-mul and checking the upper 64 bits of a 64x64->128b multiply
are zero */
uint64_t tmp = 0, res = 0;
__asm__("umulh %x[hmul], %x[arga], %x[argb]\n"
"mul %x[res], %x[arga], %x[argb]\n"
"cmp %x[hmul], #0\n"
"csinv %x[res], %x[res], xzr, eq\n"
: /* inout: hmul is upper 64b, r is the result */ [hmul] "+&r"(tmp), [res]"+&r"(res)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b)
: /* clobbers: cc (cmp clobbers condition codes) */ "cc");
return res;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
/* We can use inline assembly to do this efficiently on arm64 by doing
a high-mul and checking the upper 64 bits of a 64x64->128b multiply
are zero */
uint64_t tmp, res;
__asm__("umulh %x[hmul], %x[arga], %x[argb]\n"
"mul %x[res], %x[arga], %x[argb]\n"
: /* inout: hmul is upper 64b, r is the result */ [hmul] "=&r"(tmp), [res]"=&r"(res)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b));
*r = res;
if (tmp) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) {
/* We can use inline assembly to do this efficiently on arm64 by doing
a high-mul and checking the upper 32 bits of a 32x32->64b multiply
are zero */
uint64_t res = 0;
__asm__("umull %x[res], %w[arga], %w[argb]\n"
"cmp xzr, %x[res], lsr #32\n"
"csinv %w[res], %w[res], wzr, eq\n"
: /* inout: res contains both lower/upper 32b */ [res]"+&r"(res)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b)
: /* clobbers: cc (cmp clobbers condition codes) */ "cc");
return res & 0xffffffff;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
/* We can use inline assembly to do this efficiently on arm64 by doing
a high-mul and checking the upper 32 bits of a 32x32->64b multiply
are zero */
uint64_t res;
__asm__("umull %x[res], %w[arga], %w[argb]\n"
: /* inout: res is both upper/lower 32b */ [res]"=r"(res)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b));
*r = res & 0xffffffff;
if (res >> 32) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
/* We can use inline assembly to do this efficiently on arm64 by doing a
* 64b + 64b add and checking the carry out */
uint64_t res, flag;
__asm__("adds %x[res], %x[arga], %x[argb]\n"
"csinv %x[flag], xzr, xzr, cc\n"
: /* inout: res is the result of addition; flag is -1 if carry happened */
[res]"=&r"(res), [flag] "=r"(flag)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b)
: /* clobbers: cc (cmp clobbers condition codes) */ "cc");
*r = res;
if (flag) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) {
/* We can use inline assembly to do this efficiently on arm64 by doing a
* 64b + 64b add and checking the carry out */
uint64_t res;
__asm__("adds %x[res], %x[arga], %x[argb]\n"
"csinv %x[res], %x[res], xzr, cc\n"
: /* inout: res is the result */ [res]"=&r"(res)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b)
: /* clobbers: cc (cmp clobbers condition codes) */ "cc");
return res;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
/* We can use inline assembly to do this efficiently on arm64 by doing a
* 32b + 32b add and checking the carry out */
uint32_t res, flag;
__asm__("adds %w[res], %w[arga], %w[argb]\n"
"csinv %w[flag], wzr, wzr, cc\n"
: /* inout: res is 32b result */ [res]"=&r"(res), [flag] "=r"(flag)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b)
: /* clobbers: cc (cmp clobbers condition codes) */ "cc");
*r = res;
if (flag) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
/* We can use inline assembly to do this efficiently on arm64 by doing a
* 32b + 32b add and checking the carry out */
uint32_t res = 0;
__asm__("adds %w[res], %w[arga], %w[argb]\n"
"csinv %w[res], %w[res], wzr, cc\n"
: /* inout: res is the result */ [res]"+&r"(res)
: /* in: a and b */ [arga] "r"(a), [argb] "r"(b)
: /* clobbers: cc (cmp clobbers condition codes) */ "cc");
return res;
}
AWS_EXTERN_C_END
/* clang-format on */
#endif /* AWS_COMMON_MATH_GCC_ARM64_ASM_INL */

View File

@@ -0,0 +1,102 @@
#ifndef AWS_COMMON_MATH_GCC_BUILTIN_INL
#define AWS_COMMON_MATH_GCC_BUILTIN_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header is already included, but include it again to make editor
* highlighting happier.
*/
#include <aws/common/common.h>
#include <aws/common/math.h>
/* clang-format off */
AWS_EXTERN_C_BEGIN
/**
* Search from the MSB to LSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_clz(n);
}
AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_clz(n);
}
AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_clzll(n);
}
AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_clzll(n);
}
AWS_STATIC_IMPL size_t aws_clz_size(size_t n) {
#if SIZE_BITS == 64
return aws_clz_u64(n);
#else
return aws_clz_u32(n);
#endif
}
/**
* Search from the LSB to MSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_ctzl(n);
}
AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_ctz(n);
}
AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_ctzll(n);
}
AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) {
if (n == 0) {
return sizeof(n) * 8;
}
return __builtin_ctzll(n);
}
AWS_STATIC_IMPL size_t aws_ctz_size(size_t n) {
#if SIZE_BITS == 64
return aws_ctz_u64(n);
#else
return aws_ctz_u32(n);
#endif
}
AWS_EXTERN_C_END
/* clang-format on */
#endif /* AWS_COMMON_MATH_GCC_BUILTIN_INL */

View File

@@ -0,0 +1,115 @@
#ifndef AWS_COMMON_MATH_GCC_OVERFLOW_INL
#define AWS_COMMON_MATH_GCC_OVERFLOW_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header is already included, but include it again to make editor
* highlighting happier.
*/
#include <aws/common/common.h>
#include <aws/common/math.h>
AWS_EXTERN_C_BEGIN
/**
* Multiplies a * b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) {
uint64_t res;
if (__builtin_mul_overflow(a, b, &res)) {
res = UINT64_MAX;
}
return res;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
if (__builtin_mul_overflow(a, b, r)) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) {
uint32_t res;
if (__builtin_mul_overflow(a, b, &res)) {
res = UINT32_MAX;
}
return res;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if (__builtin_mul_overflow(a, b, r)) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
if (__builtin_add_overflow(a, b, r)) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) {
uint64_t res;
if (__builtin_add_overflow(a, b, &res)) {
res = UINT64_MAX;
}
return res;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if (__builtin_add_overflow(a, b, r)) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
uint32_t res;
if (__builtin_add_overflow(a, b, &res)) {
res = UINT32_MAX;
}
return res;
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_MATH_GCC_OVERFLOW_INL */

View File

@@ -0,0 +1,184 @@
#ifndef AWS_COMMON_MATH_GCC_X64_ASM_INL
#define AWS_COMMON_MATH_GCC_X64_ASM_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header is already included, but include it again to make editor
* highlighting happier.
*/
#include <aws/common/common.h>
#include <aws/common/math.h>
/* clang-format off */
AWS_EXTERN_C_BEGIN
/**
* Multiplies a * b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) {
/* We can use inline assembly to do this efficiently on x86-64 and x86.
we specify rdx as an output, rather than a clobber, because we want to
allow it to be allocated as an input register */
uint64_t rdx;
__asm__("mulq %q[arg2]\n" /* rax * b, result is in RDX:RAX, OF=CF=(RDX != 0) */
"cmovc %q[saturate], %%rax\n"
: /* in/out: %rax = a, out: rdx (ignored) */ "+&a"(a), "=&d"(rdx)
: /* in: register only */ [arg2] "r"(b),
/* in: saturation value (reg/memory) */ [saturate] "rm"(~0LL)
: /* clobbers: cc */ "cc");
(void)rdx; /* suppress unused warnings */
return a;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
/* We can use inline assembly to do this efficiently on x86-64 and x86. */
char flag;
uint64_t result = a;
__asm__("mulq %q[arg2]\n" /* rax * b, result is in RDX:RAX, OF=CF=(RDX != 0) */
"seto %[flag]\n" /* flag = overflow_bit */
: /* in/out: %rax (first arg & result), %d (flag) */ "+&a"(result), [flag] "=&d"(flag)
: /* in: reg for 2nd operand */
[arg2] "r"(b)
: /* clobbers: cc (d is used for flag so no need to clobber)*/ "cc");
*r = result;
if (flag) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) {
/* We can use inline assembly to do this efficiently on x86-64 and x86.
we specify edx as an output, rather than a clobber, because we want to
allow it to be allocated as an input register */
uint32_t edx;
__asm__("mull %k[arg2]\n" /* eax * b, result is in EDX:EAX, OF=CF=(EDX != 0) */
/* cmov isn't guaranteed to be available on x86-32 */
"jnc .1f%=\n"
"mov $0xFFFFFFFF, %%eax\n"
".1f%=:"
: /* in/out: %eax = result/a, out: edx (ignored) */ "+&a"(a), "=&d"(edx)
: /* in: operand 2 in reg */ [arg2] "r"(b)
: /* clobbers: cc */ "cc");
(void)edx; /* suppress unused warnings */
return a;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
/* We can use inline assembly to do this efficiently on x86-64 and x86. */
uint32_t result = a;
char flag;
/**
* Note: We use SETNO which only takes a byte register. To make this easy,
* we'll write it to dl (which we throw away anyway) and mask off the high bits.
*/
__asm__("mull %k[arg2]\n" /* eax * b, result is in EDX:EAX, OF=CF=(EDX != 0) */
"seto %[flag]\n" /* flag = overflow_bit */
: /* in/out: %eax (first arg & result), %d (flag) */ "+&a"(result), [flag] "=&d"(flag)
: /* in: reg for 2nd operand */
[arg2] "r"(b)
: /* clobbers: cc (d is used for flag so no need to clobber)*/ "cc");
*r = result;
if (flag) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
/* We can use inline assembly to do this efficiently on x86-64 and x86. */
char flag;
__asm__("addq %[argb], %[arga]\n" /* [arga] = [arga] + [argb] */
"setc %[flag]\n" /* [flag] = 1 if overflow, 0 otherwise */
: /* in/out: */ [arga] "+r"(a), [flag] "=&r"(flag)
: /* in: */ [argb] "r"(b)
: /* clobbers: */ "cc");
*r = a;
if (flag) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) {
/* We can use inline assembly to do this efficiently on x86-64 and x86. */
__asm__("addq %[arg1], %[arg2]\n" /* [arga] = [arga] + [argb] */
"cmovc %q[saturate], %[arg2]\n"
: /* in/out: %rax = a, out: rdx (ignored) */ [arg2] "+r"(b)
: /* in: register only */ [arg1] "r"(a),
/* in: saturation value (reg/memory) */ [saturate] "rm"(~0LL)
: /* clobbers: cc */ "cc");
return b;
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
/* We can use inline assembly to do this efficiently on x86-64 and x86. */
char flag;
__asm__("addl %[argb], %[arga]\n" /* [arga] = [arga] + [argb] */
"setc %[flag]\n" /* [flag] = 1 if overflow, 0 otherwise */
: /* in/out: */ [arga] "+r"(a), [flag] "=&r"(flag)
: /* in: */ [argb] "r"(b)
: /* clobbers: */ "cc");
*r = a;
if (flag) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
/**
* Adds a + b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
/* We can use inline assembly to do this efficiently on x86-64 and x86. */
__asm__("addl %[arg1], %[arg2]\n" /* [arga] = [arga] + [argb] */
/* cmov isn't guaranteed to be available on x86-32 */
"jnc .1f%=\n"
"mov $0xFFFFFFFF, %%eax\n"
".1f%=:"
: /* in/out: %rax = a, out: rdx (ignored) */ [arg2] "+a"(b)
: /* in: register only */ [arg1] "r"(a)
: /* clobbers: cc */ "cc");
return b;
}
AWS_EXTERN_C_END
/* clang-format on */
#endif /* AWS_COMMON_MATH_GCC_X64_ASM_INL */

View File

@@ -0,0 +1,210 @@
#ifndef AWS_COMMON_MATH_H
#define AWS_COMMON_MATH_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/config.h>
#include <limits.h>
#include <stdlib.h>
AWS_PUSH_SANE_WARNING_LEVEL
/* The number of bits in a size_t variable */
#if SIZE_MAX == UINT32_MAX
# define SIZE_BITS 32
#elif SIZE_MAX == UINT64_MAX
# define SIZE_BITS 64
#else
# error "Target not supported"
#endif
/* The largest power of two that can be stored in a size_t */
#define SIZE_MAX_POWER_OF_TWO (((size_t)1) << (SIZE_BITS - 1))
AWS_EXTERN_C_BEGIN
#if defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) && (defined(__clang__) || !defined(__cplusplus)) || \
(defined(__x86_64__) || defined(__aarch64__)) && defined(AWS_HAVE_GCC_INLINE_ASM) || \
defined(AWS_HAVE_MSVC_INTRINSICS_X64) || defined(CBMC) || !defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
/* In all these cases, we can use fast static inline versions of this code */
# define AWS_COMMON_MATH_API AWS_STATIC_IMPL
#else
/*
* We got here because we are building in C++ mode but we only support overflow extensions
* in C mode. Because the fallback is _slow_ (involving a division), we'd prefer to make a
* non-inline call to the fast C intrinsics.
*/
# define AWS_COMMON_MATH_API AWS_COMMON_API
#endif
/**
* Multiplies a * b. If the result overflows, returns 2^64 - 1.
*/
AWS_COMMON_MATH_API uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b);
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_COMMON_MATH_API int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r);
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_COMMON_MATH_API uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b);
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_COMMON_MATH_API int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r);
/**
* Adds a + b. If the result overflows returns 2^64 - 1.
*/
AWS_COMMON_MATH_API uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b);
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_COMMON_MATH_API int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r);
/**
* Adds a + b. If the result overflows returns 2^32 - 1.
*/
AWS_COMMON_MATH_API uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b);
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_COMMON_MATH_API int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r);
/**
* Subtracts a - b. If the result overflows returns 0.
*/
AWS_STATIC_IMPL uint64_t aws_sub_u64_saturating(uint64_t a, uint64_t b);
/**
* If a - b overflows, returns AWS_OP_ERR; otherwise subtracts
* a - b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_sub_u64_checked(uint64_t a, uint64_t b, uint64_t *r);
/**
* Subtracts a - b. If the result overflows returns 0.
*/
AWS_STATIC_IMPL uint32_t aws_sub_u32_saturating(uint32_t a, uint32_t b);
/**
* If a - b overflows, returns AWS_OP_ERR; otherwise subtracts
* a - b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_sub_u32_checked(uint32_t a, uint32_t b, uint32_t *r);
/**
* Multiplies a * b. If the result overflows, returns SIZE_MAX.
*/
AWS_STATIC_IMPL size_t aws_mul_size_saturating(size_t a, size_t b);
/**
* Multiplies a * b and returns the result in *r. If the result
* overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_size_checked(size_t a, size_t b, size_t *r);
/**
* Adds a + b. If the result overflows returns SIZE_MAX.
*/
AWS_STATIC_IMPL size_t aws_add_size_saturating(size_t a, size_t b);
/**
* Adds a + b and returns the result in *r. If the result
* overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_size_checked(size_t a, size_t b, size_t *r);
/**
* Adds [num] arguments (expected to be of size_t), and returns the result in *r.
* If the result overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS.
*/
AWS_COMMON_API int aws_add_size_checked_varargs(size_t num, size_t *r, ...);
/**
* Subtracts a - b. If the result overflows returns 0.
*/
AWS_STATIC_IMPL size_t aws_sub_size_saturating(size_t a, size_t b);
/**
* If a - b overflows, returns AWS_OP_ERR; otherwise subtracts
* a - b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_sub_size_checked(size_t a, size_t b, size_t *r);
/**
* Function to check if x is power of 2
*/
AWS_STATIC_IMPL bool aws_is_power_of_two(const size_t x);
/**
* Function to find the smallest result that is power of 2 >= n. Returns AWS_OP_ERR if this cannot
* be done without overflow
*/
AWS_STATIC_IMPL int aws_round_up_to_power_of_two(size_t n, size_t *result);
/**
* Counts the number of leading 0 bits in an integer. 0 will return the size of the integer in bits.
*/
AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n);
AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n);
AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n);
AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n);
AWS_STATIC_IMPL size_t aws_clz_size(size_t n);
/**
* Counts the number of trailing 0 bits in an integer. 0 will return the size of the integer in bits.
*/
AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n);
AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n);
AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n);
AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n);
AWS_STATIC_IMPL size_t aws_ctz_size(size_t n);
AWS_STATIC_IMPL uint8_t aws_min_u8(uint8_t a, uint8_t b);
AWS_STATIC_IMPL uint8_t aws_max_u8(uint8_t a, uint8_t b);
AWS_STATIC_IMPL int8_t aws_min_i8(int8_t a, int8_t b);
AWS_STATIC_IMPL int8_t aws_max_i8(int8_t a, int8_t b);
AWS_STATIC_IMPL uint16_t aws_min_u16(uint16_t a, uint16_t b);
AWS_STATIC_IMPL uint16_t aws_max_u16(uint16_t a, uint16_t b);
AWS_STATIC_IMPL int16_t aws_min_i16(int16_t a, int16_t b);
AWS_STATIC_IMPL int16_t aws_max_i16(int16_t a, int16_t b);
AWS_STATIC_IMPL uint32_t aws_min_u32(uint32_t a, uint32_t b);
AWS_STATIC_IMPL uint32_t aws_max_u32(uint32_t a, uint32_t b);
AWS_STATIC_IMPL int32_t aws_min_i32(int32_t a, int32_t b);
AWS_STATIC_IMPL int32_t aws_max_i32(int32_t a, int32_t b);
AWS_STATIC_IMPL uint64_t aws_min_u64(uint64_t a, uint64_t b);
AWS_STATIC_IMPL uint64_t aws_max_u64(uint64_t a, uint64_t b);
AWS_STATIC_IMPL int64_t aws_min_i64(int64_t a, int64_t b);
AWS_STATIC_IMPL int64_t aws_max_i64(int64_t a, int64_t b);
AWS_STATIC_IMPL size_t aws_min_size(size_t a, size_t b);
AWS_STATIC_IMPL size_t aws_max_size(size_t a, size_t b);
AWS_STATIC_IMPL int aws_min_int(int a, int b);
AWS_STATIC_IMPL int aws_max_int(int a, int b);
AWS_STATIC_IMPL float aws_min_float(float a, float b);
AWS_STATIC_IMPL float aws_max_float(float a, float b);
AWS_STATIC_IMPL double aws_min_double(double a, double b);
AWS_STATIC_IMPL double aws_max_double(double a, double b);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/math.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_MATH_H */

View File

@@ -0,0 +1,300 @@
#ifndef AWS_COMMON_MATH_INL
#define AWS_COMMON_MATH_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/config.h>
#include <aws/common/math.h>
#include <limits.h>
#include <stdlib.h>
#if defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) && (defined(__clang__) || !defined(__cplusplus))
/*
* GCC and clang have these super convenient overflow checking builtins...
* but (in the case of GCC) they're only available when building C source.
* We'll fall back to one of the other inlinable variants (or a non-inlined version)
* if we are building this header on G++.
*/
# include <aws/common/math.gcc_overflow.inl>
#elif defined(__x86_64__) && defined(AWS_HAVE_GCC_INLINE_ASM)
# include <aws/common/math.gcc_x64_asm.inl>
#elif defined(__aarch64__) && defined(AWS_HAVE_GCC_INLINE_ASM)
# include <aws/common/math.gcc_arm64_asm.inl>
#elif defined(AWS_HAVE_MSVC_INTRINSICS_X64)
# include <aws/common/math.msvc.inl>
#elif defined(CBMC)
# include <aws/common/math.cbmc.inl>
#else
# ifndef AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS
/* Fall back to the pure-C implementations */
# include <aws/common/math.fallback.inl>
# else
/*
* We got here because we are building in C++ mode but we only support overflow extensions
* in C mode. Because the fallback is _slow_ (involving a division), we'd prefer to make a
* non-inline call to the fast C intrinsics.
*/
# endif /* AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS */
#endif /* defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) && (defined(__clang__) || !defined(__cplusplus)) */
#if defined(__clang__) || defined(__GNUC__)
# include <aws/common/math.gcc_builtin.inl>
#endif
AWS_EXTERN_C_BEGIN
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4127) /*Disable "conditional expression is constant" */
#elif defined(__GNUC__)
# pragma GCC diagnostic push
# if defined(__cplusplus) && !defined(__clang__)
# pragma GCC diagnostic ignored "-Wuseless-cast" /* Warning is C++ only (not C), and GCC only (not clang) */
# endif
#endif
AWS_STATIC_IMPL uint64_t aws_sub_u64_saturating(uint64_t a, uint64_t b) {
return a <= b ? 0 : a - b;
}
AWS_STATIC_IMPL int aws_sub_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
if (a < b) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
*r = a - b;
return AWS_OP_SUCCESS;
}
AWS_STATIC_IMPL uint32_t aws_sub_u32_saturating(uint32_t a, uint32_t b) {
return a <= b ? 0 : a - b;
}
AWS_STATIC_IMPL int aws_sub_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if (a < b) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
*r = a - b;
return AWS_OP_SUCCESS;
}
/**
* Multiplies a * b. If the result overflows, returns SIZE_MAX.
*/
AWS_STATIC_IMPL size_t aws_mul_size_saturating(size_t a, size_t b) {
#if SIZE_BITS == 32
return (size_t)aws_mul_u32_saturating(a, b);
#elif SIZE_BITS == 64
return (size_t)aws_mul_u64_saturating(a, b);
#else
# error "Target not supported"
#endif
}
/**
* Multiplies a * b and returns the result in *r. If the result
* overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_size_checked(size_t a, size_t b, size_t *r) {
#if SIZE_BITS == 32
return aws_mul_u32_checked(a, b, (uint32_t *)r);
#elif SIZE_BITS == 64
return aws_mul_u64_checked(a, b, (uint64_t *)r);
#else
# error "Target not supported"
#endif
}
/**
* Adds a + b. If the result overflows returns SIZE_MAX.
*/
AWS_STATIC_IMPL size_t aws_add_size_saturating(size_t a, size_t b) {
#if SIZE_BITS == 32
return (size_t)aws_add_u32_saturating(a, b);
#elif SIZE_BITS == 64
return (size_t)aws_add_u64_saturating(a, b);
#else
# error "Target not supported"
#endif
}
/**
* Adds a + b and returns the result in *r. If the result
* overflows, returns AWS_OP_ERR; otherwise returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_size_checked(size_t a, size_t b, size_t *r) {
#if SIZE_BITS == 32
return aws_add_u32_checked(a, b, (uint32_t *)r);
#elif SIZE_BITS == 64
return aws_add_u64_checked(a, b, (uint64_t *)r);
#else
# error "Target not supported"
#endif
}
AWS_STATIC_IMPL size_t aws_sub_size_saturating(size_t a, size_t b) {
#if SIZE_BITS == 32
return (size_t)aws_sub_u32_saturating(a, b);
#elif SIZE_BITS == 64
return (size_t)aws_sub_u64_saturating(a, b);
#else
# error "Target not supported"
#endif
}
AWS_STATIC_IMPL int aws_sub_size_checked(size_t a, size_t b, size_t *r) {
#if SIZE_BITS == 32
return aws_sub_u32_checked(a, b, (uint32_t *)r);
#elif SIZE_BITS == 64
return aws_sub_u64_checked(a, b, (uint64_t *)r);
#else
# error "Target not supported"
#endif
}
/**
* Function to check if x is power of 2
*/
AWS_STATIC_IMPL bool aws_is_power_of_two(const size_t x) {
/* First x in the below expression is for the case when x is 0 */
return x && (!(x & (x - 1)));
}
/**
* Function to find the smallest result that is power of 2 >= n. Returns AWS_OP_ERR if this cannot
* be done without overflow
*/
AWS_STATIC_IMPL int aws_round_up_to_power_of_two(size_t n, size_t *result) {
if (n == 0) {
*result = 1;
return AWS_OP_SUCCESS;
}
if (n > SIZE_MAX_POWER_OF_TWO) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
#if SIZE_BITS == 64
n |= n >> 32;
#endif
n++;
*result = n;
return AWS_OP_SUCCESS;
}
#ifdef _MSC_VER
# pragma warning(pop)
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
#endif /* _MSC_VER */
AWS_STATIC_IMPL uint8_t aws_min_u8(uint8_t a, uint8_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL uint8_t aws_max_u8(uint8_t a, uint8_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL int8_t aws_min_i8(int8_t a, int8_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL int8_t aws_max_i8(int8_t a, int8_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL uint16_t aws_min_u16(uint16_t a, uint16_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL uint16_t aws_max_u16(uint16_t a, uint16_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL int16_t aws_min_i16(int16_t a, int16_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL int16_t aws_max_i16(int16_t a, int16_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL uint32_t aws_min_u32(uint32_t a, uint32_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL uint32_t aws_max_u32(uint32_t a, uint32_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL int32_t aws_min_i32(int32_t a, int32_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL int32_t aws_max_i32(int32_t a, int32_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL uint64_t aws_min_u64(uint64_t a, uint64_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL uint64_t aws_max_u64(uint64_t a, uint64_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL int64_t aws_min_i64(int64_t a, int64_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL int64_t aws_max_i64(int64_t a, int64_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL size_t aws_min_size(size_t a, size_t b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL size_t aws_max_size(size_t a, size_t b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL int aws_min_int(int a, int b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL int aws_max_int(int a, int b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL float aws_min_float(float a, float b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL float aws_max_float(float a, float b) {
return a > b ? a : b;
}
AWS_STATIC_IMPL double aws_min_double(double a, double b) {
return a < b ? a : b;
}
AWS_STATIC_IMPL double aws_max_double(double a, double b) {
return a > b ? a : b;
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_MATH_INL */

View File

@@ -0,0 +1,273 @@
#ifndef AWS_COMMON_MATH_MSVC_INL
#define AWS_COMMON_MATH_MSVC_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* This header is already included, but include it again to make editor
* highlighting happier.
*/
#include <aws/common/common.h>
#include <aws/common/cpuid.h>
#include <aws/common/math.h>
/* This file generates level 4 compiler warnings in Visual Studio 2017 and older */
#pragma warning(push, 3)
#include <intrin.h>
#pragma warning(pop)
AWS_EXTERN_C_BEGIN
/**
* Multiplies a * b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_mul_u64_saturating(uint64_t a, uint64_t b) {
uint64_t out;
uint64_t ret_val = _umul128(a, b, &out);
return (out == 0) ? ret_val : UINT64_MAX;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
uint64_t out;
*r = _umul128(a, b, &out);
if (out != 0) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
static uint32_t (*s_mul_u32_saturating_fn_ptr)(uint32_t a, uint32_t b) = NULL;
static uint32_t s_mulx_u32_saturating(uint32_t a, uint32_t b) {
uint32_t high_32;
uint32_t ret_val = _mulx_u32(a, b, &high_32);
return (high_32 == 0) ? ret_val : UINT32_MAX;
}
static uint32_t s_emulu_saturating(uint32_t a, uint32_t b) {
uint64_t result = __emulu(a, b);
return (result > UINT32_MAX) ? UINT32_MAX : (uint32_t)result;
}
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) {
if (AWS_UNLIKELY(!s_mul_u32_saturating_fn_ptr)) {
if (aws_cpu_has_feature(AWS_CPU_FEATURE_BMI2)) {
s_mul_u32_saturating_fn_ptr = s_mulx_u32_saturating;
} else {
/* If BMI2 unavailable, use __emulu instead */
s_mul_u32_saturating_fn_ptr = s_emulu_saturating;
}
}
return s_mul_u32_saturating_fn_ptr(a, b);
}
static int (*s_mul_u32_checked_fn_ptr)(uint32_t a, uint32_t b, uint32_t *r) = NULL;
static int s_mulx_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
uint32_t high_32;
*r = _mulx_u32(a, b, &high_32);
if (high_32 != 0) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
}
static int s_emulu_checked(uint32_t a, uint32_t b, uint32_t *r) {
uint64_t result = __emulu(a, b);
if (result > UINT32_MAX) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
*r = (uint32_t)result;
return AWS_OP_SUCCESS;
}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
if (AWS_UNLIKELY(!s_mul_u32_checked_fn_ptr)) {
if (aws_cpu_has_feature(AWS_CPU_FEATURE_BMI2)) {
s_mul_u32_checked_fn_ptr = s_mulx_u32_checked;
} else {
/* If BMI2 unavailable, use __emulu instead */
s_mul_u32_checked_fn_ptr = s_emulu_checked;
}
}
return s_mul_u32_checked_fn_ptr(a, b, r);
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
#if !defined(_MSC_VER) || _MSC_VER < 1920
/* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
if ((b > 0) && (a > (UINT64_MAX - b))) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
*r = a + b;
return AWS_OP_SUCCESS;
#else
if (_addcarry_u64((uint8_t)0, a, b, r)) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
#endif
}
/**
* Adds a + b. If the result overflows, returns 2^64 - 1.
*/
AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) {
#if !defined(_MSC_VER) || _MSC_VER < 1920
/* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
if ((b > 0) && (a > (UINT64_MAX - b))) {
return UINT64_MAX;
}
return a + b;
#else
uint64_t res = 0;
if (_addcarry_u64((uint8_t)0, a, b, &res)) {
res = UINT64_MAX;
}
return res;
#endif
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
#if !defined(_MSC_VER) || _MSC_VER < 1920
/* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
if ((b > 0) && (a > (UINT32_MAX - b))) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
*r = a + b;
return AWS_OP_SUCCESS;
#else
if (_addcarry_u32((uint8_t)0, a, b, r)) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
#endif
}
/**
* Adds a + b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
#if !defined(_MSC_VER) || _MSC_VER < 1920
/* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
if ((b > 0) && (a > (UINT32_MAX - b)))
return UINT32_MAX;
return a + b;
#else
uint32_t res = 0;
if (_addcarry_u32((uint8_t)0, a, b, &res)) {
res = UINT32_MAX;
}
return res;
#endif
}
/**
* Search from the MSB to LSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) {
unsigned long idx = 0;
if (_BitScanReverse(&idx, n)) {
return 31 - idx;
}
return 32;
}
AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) {
unsigned long idx = 0;
if (_BitScanReverse(&idx, (unsigned long)n)) {
return 31 - idx;
}
return 32;
}
AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) {
unsigned long idx = 0;
if (_BitScanReverse64(&idx, n)) {
return 63 - idx;
}
return 64;
}
AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) {
unsigned long idx = 0;
if (_BitScanReverse64(&idx, (uint64_t)n)) {
return 63 - idx;
}
return 64;
}
AWS_STATIC_IMPL size_t aws_clz_size(size_t n) {
#if SIZE_BITS == 64
return aws_clz_u64(n);
#else
return aws_clz_u32(n);
#endif
}
/**
* Search from the LSB to MSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) {
unsigned long idx = 0;
if (_BitScanForward(&idx, n)) {
return idx;
}
return 32;
}
AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) {
unsigned long idx = 0;
if (_BitScanForward(&idx, (uint32_t)n)) {
return idx;
}
return 32;
}
AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) {
unsigned long idx = 0;
if (_BitScanForward64(&idx, n)) {
return idx;
}
return 64;
}
AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) {
unsigned long idx = 0;
if (_BitScanForward64(&idx, (uint64_t)n)) {
return idx;
}
return 64;
}
AWS_STATIC_IMPL size_t aws_ctz_size(size_t n) {
#if SIZE_BITS == 64
return aws_ctz_u64(n);
#else
return aws_ctz_u32(n);
#endif
}
AWS_EXTERN_C_END
#endif /* WS_COMMON_MATH_MSVC_INL */

View File

@@ -0,0 +1,74 @@
#ifndef AWS_COMMON_MUTEX_H
#define AWS_COMMON_MUTEX_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#ifdef _WIN32
/* NOTE: Do not use this macro before including windows.h */
# define AWSMUTEX_TO_WINDOWS(pMutex) (PSRWLOCK) & (pMutex)->mutex_handle
#else
# include <pthread.h>
#endif
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_mutex {
#ifdef _WIN32
void *mutex_handle;
#else
pthread_mutex_t mutex_handle;
#endif
bool initialized;
};
#ifdef _WIN32
# define AWS_MUTEX_INIT {.mutex_handle = NULL, .initialized = true}
#else
# define AWS_MUTEX_INIT {.mutex_handle = PTHREAD_MUTEX_INITIALIZER, .initialized = true}
#endif
AWS_EXTERN_C_BEGIN
/**
* Initializes a new platform instance of mutex.
*/
AWS_COMMON_API
int aws_mutex_init(struct aws_mutex *mutex);
/**
* Cleans up internal resources.
*/
AWS_COMMON_API
void aws_mutex_clean_up(struct aws_mutex *mutex);
/**
* Blocks until it acquires the lock. While on some platforms such as Windows,
* this may behave as a reentrant mutex, you should not treat it like one. On
* platforms it is possible for it to be non-reentrant, it will be.
*/
AWS_COMMON_API
int aws_mutex_lock(struct aws_mutex *mutex);
/**
* Attempts to acquire the lock but returns immediately if it can not.
* While on some platforms such as Windows, this may behave as a reentrant mutex,
* you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be.
* Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps]
*/
AWS_COMMON_API
int aws_mutex_try_lock(struct aws_mutex *mutex);
/**
* Releases the lock.
*/
AWS_COMMON_API
int aws_mutex_unlock(struct aws_mutex *mutex);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_MUTEX_H */

View File

@@ -0,0 +1,21 @@
#ifndef AWS_COMMON_PACKAGE_H
#define AWS_COMMON_PACKAGE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/*
* Preliminary cap on the number of possible aws-c-libraries participating in shared enum ranges for
* errors, log subjects, and other cross-library enums. Expandable as needed
*/
#define AWS_PACKAGE_SLOTS 32
/*
* Each aws-c-* and aws-crt-* library has a unique package id starting from zero. These are used to macro-calculate
* correct ranges for the cross-library enumerations.
*/
#define AWS_C_COMMON_PACKAGE_ID 0
#endif /* AWS_COMMON_PACKAGE_H */

View File

@@ -0,0 +1,38 @@
#ifndef AWS_COMMON_PLATFORM_H
#define AWS_COMMON_PLATFORM_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/config.h>
#ifdef _WIN32
# define AWS_OS_WINDOWS
/* indicate whether this is for Windows desktop, or UWP or Windows S, or other Windows-like devices */
# if defined(AWS_HAVE_WINAPI_DESKTOP)
# define AWS_OS_WINDOWS_DESKTOP
# endif
#elif __APPLE__
# define AWS_OS_APPLE
# include "TargetConditionals.h"
# if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
# define AWS_OS_IOS
# elif defined(TARGET_OS_WATCH) && TARGET_OS_WATCH
# define AWS_OS_WATCHOS
# elif defined(TARGET_OS_TV) && TARGET_OS_TV
# define AWS_OS_TVOS
# else
# define AWS_OS_MACOS
# endif
#elif __linux__
# define AWS_OS_LINUX
#endif
#if defined(_POSIX_VERSION)
# define AWS_OS_POSIX
#endif
#endif /* AWS_COMMON_PLATFORM_H */

View File

@@ -0,0 +1,27 @@
#ifndef AWS_COMMON_PREDICATES_H
#define AWS_COMMON_PREDICATES_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
/**
* Returns whether all bytes of the two byte arrays match.
*/
#if defined(AWS_DEEP_CHECKS) && (AWS_DEEP_CHECKS == 1)
# ifdef CBMC
/* clang-format off */
# define AWS_BYTES_EQ(arr1, arr2, len) \
__CPROVER_forall { \
int i; \
(i >= 0 && i < len) ==> ((const uint8_t *)&arr1)[i] == ((const uint8_t *)&arr2)[i] \
}
/* clang-format on */
# else
# define AWS_BYTES_EQ(arr1, arr2, len) (memcmp(arr1, arr2, len) == 0)
# endif /* CBMC */
#else
# define AWS_BYTES_EQ(arr1, arr2, len) (1)
#endif /* (AWS_DEEP_CHECKS == 1) */
#endif /* AWS_COMMON_PREDICATES_H */

View File

@@ -0,0 +1,205 @@
#ifndef AWS_COMMON_PRIORITY_QUEUE_H
#define AWS_COMMON_PRIORITY_QUEUE_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/array_list.h>
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
/* The comparator should return a positive value if the second argument has a
* higher priority than the first; Otherwise, it should return a negative value
* or zero. NOTE: priority_queue pops its highest priority element first. For
* example: int cmp(const void *a, const void *b) { return a < b; } would result
* in a max heap, while: int cmp(const void *a, const void *b) { return a > b; }
* would result in a min heap.
*/
typedef int(aws_priority_queue_compare_fn)(const void *a, const void *b);
struct aws_priority_queue {
/**
* predicate that determines the priority of the elements in the queue.
*/
aws_priority_queue_compare_fn *pred;
/**
* The underlying container storing the queue elements.
*/
struct aws_array_list container;
/**
* An array of pointers to backpointer elements. This array is initialized when
* the first call to aws_priority_queue_push_bp is made, and is subsequently maintained
* through any heap node manipulations.
*
* Each element is a struct aws_priority_queue_node *, pointing to a backpointer field
* owned by the calling code, or a NULL. The backpointer field is continually updated
* with information needed to locate and remove a specific node later on.
*/
struct aws_array_list backpointers;
};
struct aws_priority_queue_node {
/** The current index of the node in question, or SIZE_MAX if the node has been removed. */
size_t current_index;
};
AWS_EXTERN_C_BEGIN
/**
* Initializes a priority queue struct for use. This mode will grow memory automatically (exponential model)
* Default size is the inital size of the queue
* item_size is the size of each element in bytes. Mixing items types is not supported by this API.
* pred is the function that will be used to determine priority.
*/
AWS_COMMON_API
int aws_priority_queue_init_dynamic(
struct aws_priority_queue *queue,
struct aws_allocator *alloc,
size_t default_size,
size_t item_size,
aws_priority_queue_compare_fn *pred);
/**
* Initializes a priority queue struct for use. This mode will not allocate any additional memory. When the heap fills
* new enqueue operations will fail with AWS_ERROR_PRIORITY_QUEUE_FULL.
*
* Heaps initialized using this call do not support the aws_priority_queue_push_ref call with a non-NULL backpointer
* parameter.
*
* heap is the raw memory allocated for this priority_queue
* item_count is the maximum number of elements the raw heap can contain
* item_size is the size of each element in bytes. Mixing items types is not supported by this API.
* pred is the function that will be used to determine priority.
*/
AWS_COMMON_API
void aws_priority_queue_init_static(
struct aws_priority_queue *queue,
void *heap,
size_t item_count,
size_t item_size,
aws_priority_queue_compare_fn *pred);
/**
* Checks that the backpointer at a specific index of the queue is
* NULL or points to a correctly allocated aws_priority_queue_node.
*/
bool aws_priority_queue_backpointer_index_valid(const struct aws_priority_queue *const queue, size_t index);
/**
* Checks that the backpointers of the priority queue are either NULL
* or correctly allocated to point at aws_priority_queue_nodes. This
* check is O(n), as it accesses every backpointer in a loop, and thus
* shouldn't be used carelessly.
*/
bool aws_priority_queue_backpointers_valid_deep(const struct aws_priority_queue *const queue);
/**
* Checks that the backpointers of the priority queue satisfy validity
* constraints.
*/
bool aws_priority_queue_backpointers_valid(const struct aws_priority_queue *const queue);
/**
* Set of properties of a valid aws_priority_queue.
*/
AWS_COMMON_API
bool aws_priority_queue_is_valid(const struct aws_priority_queue *const queue);
/**
* Cleans up any internally allocated memory and resets the struct for reuse or deletion.
*/
AWS_COMMON_API
void aws_priority_queue_clean_up(struct aws_priority_queue *queue);
/**
* Copies item into the queue and places it in the proper priority order. Complexity: O(log(n)).
*/
AWS_COMMON_API
int aws_priority_queue_push(struct aws_priority_queue *queue, void *item);
/**
* Copies item into the queue and places it in the proper priority order. Complexity: O(log(n)).
*
* If the backpointer parameter is non-null, the heap will continually update the pointed-to field
* with information needed to remove the node later on. *backpointer must remain valid until the node
* is removed from the heap, and may be updated on any mutating operation on the priority queue.
*
* If the node is removed, the backpointer will be set to a sentinel value that indicates that the
* node has already been removed. It is safe (and a no-op) to call aws_priority_queue_remove with
* such a sentinel value.
*/
AWS_COMMON_API
int aws_priority_queue_push_ref(
struct aws_priority_queue *queue,
void *item,
struct aws_priority_queue_node *backpointer);
/**
* Copies the element of the highest priority, and removes it from the queue.. Complexity: O(log(n)).
* If queue is empty, AWS_ERROR_PRIORITY_QUEUE_EMPTY will be raised.
*/
AWS_COMMON_API
int aws_priority_queue_pop(struct aws_priority_queue *queue, void *item);
/**
* Removes a specific node from the priority queue. Complexity: O(log(n))
* After removing a node (using either _remove or _pop), the backpointer set at push_ref time is set
* to a sentinel value. If this sentinel value is passed to aws_priority_queue_remove,
* AWS_ERROR_PRIORITY_QUEUE_BAD_NODE will be raised. Note, however, that passing uninitialized
* aws_priority_queue_nodes, or ones from different priority queues, results in undefined behavior.
*/
AWS_COMMON_API
int aws_priority_queue_remove(struct aws_priority_queue *queue, void *item, const struct aws_priority_queue_node *node);
/**
* Obtains a pointer to the element of the highest priority. Complexity: constant time.
* If queue is empty, AWS_ERROR_PRIORITY_QUEUE_EMPTY will be raised.
*/
AWS_COMMON_API
int aws_priority_queue_top(const struct aws_priority_queue *queue, void **item);
/**
* Removes all elements from the queue, but does not free internal memory.
*/
AWS_COMMON_API
void aws_priority_queue_clear(struct aws_priority_queue *queue);
/**
* Current number of elements in the queue
*/
AWS_COMMON_API
size_t aws_priority_queue_size(const struct aws_priority_queue *queue);
/**
* Current allocated capacity for the queue, in dynamic mode this grows over time, in static mode, this will never
* change.
*/
AWS_COMMON_API
size_t aws_priority_queue_capacity(const struct aws_priority_queue *queue);
/**
* Initializes a queue node to a default value that indicates the node is not in the queue.
*
* @param node priority queue node to initialize with a default value
*/
AWS_COMMON_API
void aws_priority_queue_node_init(struct aws_priority_queue_node *node);
/**
* Checks if a priority queue node is currently in a priority queue.
*
* @param node priority queue node to check usage for
*
* @return true if the node is in a queue, false otherwise
*/
AWS_COMMON_API
bool aws_priority_queue_node_is_in_queue(const struct aws_priority_queue_node *node);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_PRIORITY_QUEUE_H */

View File

@@ -0,0 +1,84 @@
#ifndef AWS_COMMON_PROCESS_H
#define AWS_COMMON_PROCESS_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_run_command_result {
/* return code from running the command. */
int ret_code;
/**
* captured stdout message from running the command,
* caller is responsible for releasing the memory.
*/
struct aws_string *std_out;
/**
* captured stderr message from running the command,
* caller is responsible for releasing the memory.
* It's currently not implemented and the value will be set to NULL.
*/
struct aws_string *std_err;
};
struct aws_run_command_options {
/**
* command path and commandline options of running that command.
*/
const char *command;
};
AWS_EXTERN_C_BEGIN
/**
* Returns the current process's PID (process id).
* @return PID as int
*/
AWS_COMMON_API int aws_get_pid(void);
/**
* Returns the soft limit for max io handles (max fds in unix terminology). This limit is one more than the actual
* limit. The soft limit can be changed up to the hard limit by any process regardless of permissions.
*/
AWS_COMMON_API size_t aws_get_soft_limit_io_handles(void);
/**
* Returns the hard limit for max io handles (max fds in unix terminology). This limit is one more than the actual
* limit. This limit cannot be increased without sudo permissions.
*/
AWS_COMMON_API size_t aws_get_hard_limit_io_handles(void);
/**
* Sets the new soft limit for io_handles (max fds). This can be up to the hard limit but may not exceed it.
*
* This operation will always fail with AWS_ERROR_UNIMPLEMENTED error code on Windows.
*/
AWS_COMMON_API int aws_set_soft_limit_io_handles(size_t max_handles);
AWS_COMMON_API int aws_run_command_result_init(struct aws_allocator *allocator, struct aws_run_command_result *result);
AWS_COMMON_API void aws_run_command_result_cleanup(struct aws_run_command_result *result);
/**
* Currently this API is implemented using popen on Posix system and
* _popen on Windows to capture output from running a command. Note
* that popen only captures stdout, and doesn't provide an option to
* capture stderr. We will add more options, such as acquire stderr
* in the future so probably will alter the underlying implementation
* as well.
*/
AWS_COMMON_API int aws_run_command(
struct aws_allocator *allocator,
struct aws_run_command_options *options,
struct aws_run_command_result *result);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_PROCESS_H */

View File

@@ -0,0 +1,62 @@
#ifndef AWS_COMMON_REF_COUNT_H
#define AWS_COMMON_REF_COUNT_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/atomics.h>
AWS_PUSH_SANE_WARNING_LEVEL
typedef void(aws_simple_completion_callback)(void *);
/*
* A utility type for making ref-counted types, reminiscent of std::shared_ptr in C++
*/
struct aws_ref_count {
struct aws_atomic_var ref_count;
void *object;
aws_simple_completion_callback *on_zero_fn;
};
struct aws_shutdown_callback_options {
aws_simple_completion_callback *shutdown_callback_fn;
void *shutdown_callback_user_data;
};
AWS_EXTERN_C_BEGIN
/**
* Initializes a ref-counter structure. After initialization, the ref count will be 1.
*
* @param ref_count ref-counter to initialize
* @param object object being ref counted
* @param on_zero_fn function to invoke when the ref count reaches zero
*/
AWS_COMMON_API void aws_ref_count_init(
struct aws_ref_count *ref_count,
void *object,
aws_simple_completion_callback *on_zero_fn);
/**
* Increments a ref-counter's ref count
*
* @param ref_count ref-counter to increment the count for
* @return the object being ref-counted
*/
AWS_COMMON_API void *aws_ref_count_acquire(struct aws_ref_count *ref_count);
/**
* Decrements a ref-counter's ref count. Invokes the on_zero callback if the ref count drops to zero
* @param ref_count ref-counter to decrement the count for
* @return the value of the decremented ref count
*/
AWS_COMMON_API size_t aws_ref_count_release(struct aws_ref_count *ref_count);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_REF_COUNT_H */

View File

@@ -0,0 +1,104 @@
#ifndef AWS_COMMON_RING_BUFFER_H
#define AWS_COMMON_RING_BUFFER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/atomics.h>
AWS_PUSH_SANE_WARNING_LEVEL
/**
* Lockless ring buffer implementation that is thread safe assuming a single thread acquires and a single thread
* releases. For any other use case (other than the single-threaded use-case), you must manage thread-safety manually.
*
* Also, a very important note: release must happen in the same order as acquire. If you do not your application, and
* possibly computers within a thousand mile radius, may die terrible deaths, and the local drinking water will be
* poisoned for generations with fragments of what is left of your radioactive corrupted memory.
*/
struct aws_ring_buffer {
struct aws_allocator *allocator;
uint8_t *allocation;
struct aws_atomic_var head;
struct aws_atomic_var tail;
uint8_t *allocation_end;
};
struct aws_byte_buf;
AWS_EXTERN_C_BEGIN
/**
* Initializes a ring buffer with an allocation of size `size`. Returns AWS_OP_SUCCESS on a successful initialization,
* AWS_OP_ERR otherwise.
*/
AWS_COMMON_API int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size);
/*
* Checks whether atomic_ptr correctly points to a memory location within the bounds of the aws_ring_buffer
*/
AWS_STATIC_IMPL bool aws_ring_buffer_check_atomic_ptr(
const struct aws_ring_buffer *ring_buf,
const uint8_t *atomic_ptr);
/**
* Checks whether the ring buffer is empty
*/
AWS_STATIC_IMPL bool aws_ring_buffer_is_empty(const struct aws_ring_buffer *ring_buf);
/**
* Evaluates the set of properties that define the shape of all valid aws_ring_buffer structures.
* It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion).
*/
AWS_STATIC_IMPL bool aws_ring_buffer_is_valid(const struct aws_ring_buffer *ring_buf);
/**
* Cleans up the ring buffer's resources.
*/
AWS_COMMON_API void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf);
/**
* Attempts to acquire `requested_size` buffer and stores the result in `dest` if successful. Returns AWS_OP_SUCCESS if
* the requested size was available for use, AWS_OP_ERR otherwise.
*/
AWS_COMMON_API int aws_ring_buffer_acquire(
struct aws_ring_buffer *ring_buf,
size_t requested_size,
struct aws_byte_buf *dest);
/**
* Attempts to acquire `requested_size` buffer and stores the result in `dest` if successful. If not available, it will
* attempt to acquire anywhere from 1 byte to `requested_size`. Returns AWS_OP_SUCCESS if some buffer space is available
* for use, AWS_OP_ERR otherwise.
*/
AWS_COMMON_API int aws_ring_buffer_acquire_up_to(
struct aws_ring_buffer *ring_buf,
size_t minimum_size,
size_t requested_size,
struct aws_byte_buf *dest);
/**
* Releases `buf` back to the ring buffer for further use. RELEASE MUST HAPPEN in the SAME ORDER AS ACQUIRE.
* If you do not, your application, and possibly computers within a thousand mile radius, may die terrible deaths,
* and the local drinking water will be poisoned for generations
* with fragments of what is left of your radioactive corrupted memory.
*/
AWS_COMMON_API void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf);
/**
* Returns true if the memory in `buf` was vended by this ring buffer, false otherwise.
* Make sure `buf->buffer` and `ring_buffer->allocation` refer to the same memory region.
*/
AWS_COMMON_API bool aws_ring_buffer_buf_belongs_to_pool(
const struct aws_ring_buffer *ring_buffer,
const struct aws_byte_buf *buf);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/ring_buffer.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_RING_BUFFER_H */

View File

@@ -0,0 +1,44 @@
#ifndef AWS_COMMON_RING_BUFFER_INL
#define AWS_COMMON_RING_BUFFER_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/ring_buffer.h>
AWS_EXTERN_C_BEGIN
/*
* Checks whether atomic_ptr correctly points to a memory location within the bounds of the aws_ring_buffer
*/
AWS_STATIC_IMPL bool aws_ring_buffer_check_atomic_ptr(
const struct aws_ring_buffer *ring_buf,
const uint8_t *atomic_ptr) {
return ((atomic_ptr != NULL) && (atomic_ptr >= ring_buf->allocation && atomic_ptr <= ring_buf->allocation_end));
}
/**
* Checks whether the ring buffer is empty
*/
AWS_STATIC_IMPL bool aws_ring_buffer_is_empty(const struct aws_ring_buffer *ring_buf) {
uint8_t *head = (uint8_t *)aws_atomic_load_ptr(&ring_buf->head);
uint8_t *tail = (uint8_t *)aws_atomic_load_ptr(&ring_buf->tail);
return head == tail;
}
/**
* Evaluates the set of properties that define the shape of all valid aws_ring_buffer structures.
* It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion).
*/
AWS_STATIC_IMPL bool aws_ring_buffer_is_valid(const struct aws_ring_buffer *ring_buf) {
uint8_t *head = (uint8_t *)aws_atomic_load_ptr(&ring_buf->head);
uint8_t *tail = (uint8_t *)aws_atomic_load_ptr(&ring_buf->tail);
bool head_in_range = aws_ring_buffer_check_atomic_ptr(ring_buf, head);
bool tail_in_range = aws_ring_buffer_check_atomic_ptr(ring_buf, tail);
/* if head points-to the first element of the buffer then tail must too */
bool valid_head_tail = (head != ring_buf->allocation) || (tail == ring_buf->allocation);
return ring_buf && (ring_buf->allocation != NULL) && head_in_range && tail_in_range && valid_head_tail &&
(ring_buf->allocator != NULL);
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_RING_BUFFER_INL */

View File

@@ -0,0 +1,71 @@
#ifndef AWS_COMMON_RW_LOCK_H
#define AWS_COMMON_RW_LOCK_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#ifdef _WIN32
/* NOTE: Do not use this macro before including windows.h */
# define AWSSRW_TO_WINDOWS(pCV) (PSRWLOCK) pCV
#else
# include <pthread.h>
#endif
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_rw_lock {
#ifdef _WIN32
void *lock_handle;
#else
pthread_rwlock_t lock_handle;
#endif
};
#ifdef _WIN32
# define AWS_RW_LOCK_INIT {.lock_handle = NULL}
#else
# define AWS_RW_LOCK_INIT {.lock_handle = PTHREAD_RWLOCK_INITIALIZER}
#endif
AWS_EXTERN_C_BEGIN
/**
* Initializes a new platform instance of mutex.
*/
AWS_COMMON_API int aws_rw_lock_init(struct aws_rw_lock *lock);
/**
* Cleans up internal resources.
*/
AWS_COMMON_API void aws_rw_lock_clean_up(struct aws_rw_lock *lock);
/**
* Blocks until it acquires the lock. While on some platforms such as Windows,
* this may behave as a reentrant mutex, you should not treat it like one. On
* platforms it is possible for it to be non-reentrant, it will be.
*/
AWS_COMMON_API int aws_rw_lock_rlock(struct aws_rw_lock *lock);
AWS_COMMON_API int aws_rw_lock_wlock(struct aws_rw_lock *lock);
/**
* Attempts to acquire the lock but returns immediately if it can not.
* While on some platforms such as Windows, this may behave as a reentrant mutex,
* you should not treat it like one. On platforms it is possible for it to be non-reentrant, it will be.
* Note: For windows, minimum support server version is Windows Server 2008 R2 [desktop apps | UWP apps]
*/
AWS_COMMON_API int aws_rw_lock_try_rlock(struct aws_rw_lock *lock);
AWS_COMMON_API int aws_rw_lock_try_wlock(struct aws_rw_lock *lock);
/**
* Releases the lock.
*/
AWS_COMMON_API int aws_rw_lock_runlock(struct aws_rw_lock *lock);
AWS_COMMON_API int aws_rw_lock_wunlock(struct aws_rw_lock *lock);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_RW_LOCK_H */

View File

@@ -0,0 +1,146 @@
#ifndef AWS_COMMON_STATISTICS_H
#define AWS_COMMON_STATISTICS_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/package.h>
#include <aws/common/stdint.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_array_list;
typedef uint32_t aws_crt_statistics_category_t;
/* Each library gets space for 2^^8 category entries */
enum {
AWS_CRT_STATISTICS_CATEGORY_STRIDE_BITS = 8,
};
#define AWS_CRT_STATISTICS_CATEGORY_STRIDE (1U << AWS_CRT_STATISTICS_CATEGORY_STRIDE_BITS)
#define AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(x) ((x) * AWS_CRT_STATISTICS_CATEGORY_STRIDE)
#define AWS_CRT_STATISTICS_CATEGORY_END_RANGE(x) (((x) + 1) * AWS_CRT_STATISTICS_CATEGORY_STRIDE - 1)
/**
* The common-specific range of the aws_crt_statistics_category cross-library enum.
*
* This enum functions as an RTTI value that lets statistics handler's interpret (via cast) a
* specific statistics structure if the RTTI value is understood.
*
* Common doesn't have any statistics structures presently, so its range is essentially empty.
*
*/
enum aws_crt_common_statistics_category {
AWSCRT_STAT_CAT_INVALID = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
/**
* Pattern-struct that functions as a base "class" for all statistics structures. To conform
* to the pattern, a statistics structure must have its first member be the category. In that
* case it becomes "safe" to cast from aws_crt_statistics_base to the specific statistics structure
* based on the category value.
*/
struct aws_crt_statistics_base {
aws_crt_statistics_category_t category;
};
/**
* The start and end time, in milliseconds-since-epoch, that a set of statistics was gathered over.
*/
struct aws_crt_statistics_sample_interval {
uint64_t begin_time_ms;
uint64_t end_time_ms;
};
struct aws_crt_statistics_handler;
/*
* Statistics intake function. The array_list is a list of pointers to aws_crt_statistics_base "derived" (via
* pattern) objects. The handler should iterate the list and downcast elements whose RTTI category it understands,
* while skipping those it does not understand.
*/
typedef void(aws_crt_statistics_handler_process_statistics_fn)(
struct aws_crt_statistics_handler *handler,
struct aws_crt_statistics_sample_interval *interval,
struct aws_array_list *stats,
void *context);
/*
* Destroys a statistics handler implementation
*/
typedef void(aws_crt_statistics_handler_destroy_fn)(struct aws_crt_statistics_handler *handler);
/*
* The period, in milliseconds, that the handler would like to be informed of statistics. Statistics generators are
* not required to honor this value, but should if able.
*/
typedef uint64_t(aws_crt_statistics_handler_get_report_interval_ms_fn)(struct aws_crt_statistics_handler *);
/**
* Vtable for functions that all statistics handlers must implement
*/
struct aws_crt_statistics_handler_vtable {
aws_crt_statistics_handler_process_statistics_fn *process_statistics;
aws_crt_statistics_handler_destroy_fn *destroy;
aws_crt_statistics_handler_get_report_interval_ms_fn *get_report_interval_ms;
};
/**
* Base structure for all statistics handler implementations.
*
* A statistics handler is an object that listens to a stream of polymorphic (via the category RTTI enum) statistics
* structures emitted from some arbitrary source. In the initial implementation, statistics handlers are primarily
* attached to channels, where they monitor IO throughput and state data (from channel handlers) to determine a
* connection's health.
*
* Statistics handlers are a generalization of the timeout and bandwidth filters that are often associated with
* SDK network connections. Configurable, default implementations are defined at the protocol level (http, etc...)
* where they can be attached at connection (channel) creation time.
*/
struct aws_crt_statistics_handler {
struct aws_crt_statistics_handler_vtable *vtable;
struct aws_allocator *allocator;
void *impl;
};
AWS_EXTERN_C_BEGIN
/**
* Submits a list of statistics objects to a statistics handler for processing
*
* handler - the statistics handler that will process the statistics objects
* interval - time period over which the statistics were gathered
* stats - list of pointers to structures that can be case to aws_crt_statistics_base (i.e. have category as a first
* member)
* context - (optional) additional context specific to where the statistics handler has been attached
*/
AWS_COMMON_API
void aws_crt_statistics_handler_process_statistics(
struct aws_crt_statistics_handler *handler,
struct aws_crt_statistics_sample_interval *interval,
struct aws_array_list *stats,
void *context);
/**
* Queries the frequency (via an interval in milliseconds) which a statistics handler would like to be informed
* of statistics.
*/
AWS_COMMON_API
uint64_t aws_crt_statistics_handler_get_report_interval_ms(struct aws_crt_statistics_handler *handler);
/**
* completely destroys a statistics handler. The handler's cleanup function must clean up the impl portion completely
* (including its allocation, if done separately).
*/
AWS_COMMON_API
void aws_crt_statistics_handler_destroy(struct aws_crt_statistics_handler *handler);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_STATISTICS_H */

View File

@@ -0,0 +1,30 @@
/* clang-format off */
/* clang-format gets confused by the #define bool line, and gives crazy indenting */
#ifndef AWS_COMMON_STDBOOL_H
#define AWS_COMMON_STDBOOL_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#ifndef NO_STDBOOL
# include <stdbool.h> /* NOLINT(fuchsia-restrict-system-includes) */
#else
# ifndef __cplusplus
# define bool _Bool
# define true 1
# define false 0
# elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
# define _Bool bool
# if __cplusplus < 201103L
/* For C++98, define bool, false, true as a GNU extension. */
# define bool bool
# define false false
# define true true
# endif /* __cplusplus < 201103L */
# endif /* __cplusplus */
#endif /* NO_STDBOOL */
#endif /* AWS_COMMON_STDBOOL_H */
/* clang-format on */

View File

@@ -0,0 +1,93 @@
#ifndef AWS_COMMON_STDINT_H
#define AWS_COMMON_STDINT_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/assert.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef NO_STDINT
# include <stdint.h> /* NOLINT(fuchsia-restrict-system-includes) */
/* Android defines SIZE_MAX in limits.h, not stdint.h */
# ifdef ANDROID
# include <limits.h>
# endif
#else
# if defined(__x86_64__) || defined(_M_AMD64) || defined(__aarch64__) || defined(__ia64__) || defined(__powerpc64__)
# define PTR_SIZE 8
# else
# define PTR_SIZE 4
# endif
typedef signed char int8_t;
typedef short int int16_t;
typedef int int32_t;
# if (PTR_SIZE == 8)
typedef long int int64_t;
# else
typedef long long int int64_t;
# endif /* (PTR_SIZE == 8) */
typedef unsigned char uint8_t;
typedef unsigned short int uint16_t;
typedef unsigned int uint32_t;
# if (PTR_SIZE == 8)
typedef unsigned long int uint64_t;
# else
typedef unsigned long long int uint64_t;
# endif /* (PTR_SIZE == 8) */
# if (PTR_SIZE == 8)
typedef long int intptr_t;
typedef unsigned long int uintptr_t;
# else
typedef int intptr_t;
typedef unsigned int uintptr_t;
# endif
# if (PTR_SIZE == 8)
# define __INT64_C(c) c##L
# define __UINT64_C(c) c##UL
# else
# define __INT64_C(c) c##LL
# define __UINT64_C(c) c##ULL
# endif
# define INT8_MIN (-128)
# define INT16_MIN (-32767 - 1)
# define INT32_MIN (-2147483647 - 1)
# define INT64_MIN (-__INT64_C(9223372036854775807) - 1)
# define INT8_MAX (127)
# define INT16_MAX (32767)
# define INT32_MAX (2147483647)
# define INT64_MAX (__INT64_C(9223372036854775807))
# define UINT8_MAX (255)
# define UINT16_MAX (65535)
# define UINT32_MAX (4294967295U)
# define UINT64_MAX (__UINT64_C(18446744073709551615))
AWS_STATIC_ASSERT(sizeof(uint64_t) == 8);
AWS_STATIC_ASSERT(sizeof(uint32_t) == 4);
AWS_STATIC_ASSERT(sizeof(uint16_t) == 2);
AWS_STATIC_ASSERT(sizeof(uint8_t) == 1);
AWS_STATIC_ASSERT(sizeof(int64_t) == 8);
AWS_STATIC_ASSERT(sizeof(int32_t) == 4);
AWS_STATIC_ASSERT(sizeof(int16_t) == 2);
AWS_STATIC_ASSERT(sizeof(int8_t) == 1);
AWS_STATIC_ASSERT(sizeof(uintptr_t) == sizeof(void *));
AWS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(void *));
AWS_STATIC_ASSERT(sizeof(char) == 1);
#endif /* NO_STDINT */
/**
* @deprecated Use int64_t instead for offsets in public APIs.
*/
typedef int64_t aws_off_t;
#endif /* AWS_COMMON_STDINT_H */

View File

@@ -0,0 +1,384 @@
#ifndef AWS_COMMON_STRING_H
#define AWS_COMMON_STRING_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
/**
* Represents an immutable string holding either text or binary data. If the
* string is in constant memory or memory that should otherwise not be freed by
* this struct, set allocator to NULL and destroy function will be a no-op.
*
* This is for use cases where the entire struct and the data bytes themselves
* need to be held in dynamic memory, such as when held by a struct
* aws_hash_table. The data bytes themselves are always held in contiguous
* memory immediately after the end of the struct aws_string, and the memory for
* both the header and the data bytes is allocated together.
*
* Use the aws_string_bytes function to access the data bytes. A null byte is
* always included immediately after the data but not counted in the length, so
* that the output of aws_string_bytes can be treated as a C-string in cases
* where none of the the data bytes are null.
*
* Note that the fields of this structure are const; this ensures not only that
* they cannot be modified, but also that you can't assign the structure using
* the = operator accidentally.
*/
/* Using a flexible array member is the C99 compliant way to have the bytes of
* the string immediately follow the header.
*
* MSVC doesn't know this for some reason so we need to use a pragma to make
* it happy.
*/
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4623) /* default constructor was implicitly defined as deleted */
# pragma warning(disable : 4626) /* assignment operator was implicitly defined as deleted */
# pragma warning(disable : 5027) /* move assignment operator was implicitly defined as deleted */
#endif
struct aws_string {
struct aws_allocator *const allocator;
/* size in bytes of `bytes` minus any null terminator.
* NOTE: This is not the number of characters in the string. */
const size_t len;
/* give this a storage specifier for C++ purposes. It will likely be larger after init. */
const uint8_t bytes[1];
};
#ifdef AWS_OS_WINDOWS
struct aws_wstring {
struct aws_allocator *const allocator;
/* number of characters in the string not including the null terminator. */
const size_t len;
/* give this a storage specifier for C++ purposes. It will likely be larger after init. */
const wchar_t bytes[1];
};
#endif /* AWS_OS_WINDOWS */
#ifdef _MSC_VER
# pragma warning(pop)
#endif
AWS_EXTERN_C_BEGIN
#ifdef AWS_OS_WINDOWS
/**
* For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop.
*
* Note: `to_convert` is assumed to be UTF-8 or ASCII.
*
* returns NULL on failure.
*/
AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wstring(
struct aws_allocator *allocator,
const struct aws_string *to_convert);
/**
* For windows only. Converts `to_convert` to a windows whcar format (UTF-16) for use with windows OS interop.
*
* Note: `to_convert` is assumed to be UTF-8 or ASCII.
*
* returns NULL on failure.
*/
AWS_COMMON_API struct aws_wstring *aws_string_convert_to_wchar_from_byte_cursor(
struct aws_allocator *allocator,
const struct aws_byte_cursor *to_convert);
/**
* clean up str.
*/
AWS_COMMON_API
void aws_wstring_destroy(struct aws_wstring *str);
/**
* For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
*
* Note: `to_convert` is assumed to be wchar already.
*
* returns NULL on failure.
*/
AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_str(
struct aws_allocator *allocator,
const struct aws_wstring *to_convert);
/**
* For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
*
* Note: `to_convert` is assumed to be wchar already.
*
* returns NULL on failure.
*/
AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_byte_cursor(
struct aws_allocator *allocator,
const struct aws_byte_cursor *to_convert);
/**
* For windows only. Converts `to_convert` from a windows whcar format (UTF-16) to UTF-8.
*
* Note: `to_convert` is assumed to be wchar already.
*
* returns NULL on failure.
*/
AWS_COMMON_API struct aws_string *aws_string_convert_from_wchar_c_str(
struct aws_allocator *allocator,
const wchar_t *to_convert);
/**
* Create a new wide string from a byte cursor. This assumes that w_str_cur is already in utf-16.
*
* returns NULL on failure.
*/
AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_cursor(
struct aws_allocator *allocator,
const struct aws_byte_cursor *w_str_cur);
/**
* Create a new wide string from a utf-16 string enclosing array. The length field is in number of characters not
* counting the null terminator.
*
* returns NULL on failure.
*/
AWS_COMMON_API struct aws_wstring *aws_wstring_new_from_array(
struct aws_allocator *allocator,
const wchar_t *w_str,
size_t length);
/**
* Returns a wchar_t * pointer for use with windows OS interop.
*/
AWS_COMMON_API const wchar_t *aws_wstring_c_str(const struct aws_wstring *str);
/**
* Returns the number of characters in the wchar string. NOTE: This is not the length in bytes or the buffer size.
*/
AWS_COMMON_API size_t aws_wstring_num_chars(const struct aws_wstring *str);
/**
* Returns the length in bytes for the buffer.
*/
AWS_COMMON_API size_t aws_wstring_size_bytes(const struct aws_wstring *str);
/**
* Verifies that str is a valid string. Returns true if it's valid and false otherwise.
*/
AWS_COMMON_API bool aws_wstring_is_valid(const struct aws_wstring *str);
#endif /* AWS_OS_WINDOWS */
/**
* Returns true if bytes of string are the same, false otherwise.
*/
AWS_COMMON_API
bool aws_string_eq(const struct aws_string *a, const struct aws_string *b);
/**
* Returns true if bytes of string are equivalent, using a case-insensitive comparison.
*/
AWS_COMMON_API
bool aws_string_eq_ignore_case(const struct aws_string *a, const struct aws_string *b);
/**
* Returns true if bytes of string and cursor are the same, false otherwise.
*/
AWS_COMMON_API
bool aws_string_eq_byte_cursor(const struct aws_string *str, const struct aws_byte_cursor *cur);
/**
* Returns true if bytes of string and cursor are equivalent, using a case-insensitive comparison.
*/
AWS_COMMON_API
bool aws_string_eq_byte_cursor_ignore_case(const struct aws_string *str, const struct aws_byte_cursor *cur);
/**
* Returns true if bytes of string and buffer are the same, false otherwise.
*/
AWS_COMMON_API
bool aws_string_eq_byte_buf(const struct aws_string *str, const struct aws_byte_buf *buf);
/**
* Returns true if bytes of string and buffer are equivalent, using a case-insensitive comparison.
*/
AWS_COMMON_API
bool aws_string_eq_byte_buf_ignore_case(const struct aws_string *str, const struct aws_byte_buf *buf);
AWS_COMMON_API
bool aws_string_eq_c_str(const struct aws_string *str, const char *c_str);
/**
* Returns true if bytes of strings are equivalent, using a case-insensitive comparison.
*/
AWS_COMMON_API
bool aws_string_eq_c_str_ignore_case(const struct aws_string *str, const char *c_str);
/**
* Constructor functions which copy data from null-terminated C-string or array of bytes.
*/
AWS_COMMON_API
struct aws_string *aws_string_new_from_c_str(struct aws_allocator *allocator, const char *c_str);
/**
* Allocate a new string with the same contents as array.
*/
AWS_COMMON_API
struct aws_string *aws_string_new_from_array(struct aws_allocator *allocator, const uint8_t *bytes, size_t len);
/**
* Allocate a new string with the same contents as another string.
*/
AWS_COMMON_API
struct aws_string *aws_string_new_from_string(struct aws_allocator *allocator, const struct aws_string *str);
/**
* Allocate a new string with the same contents as cursor.
*/
AWS_COMMON_API
struct aws_string *aws_string_new_from_cursor(struct aws_allocator *allocator, const struct aws_byte_cursor *cursor);
/**
* Allocate a new string with the same contents as buf.
*/
AWS_COMMON_API
struct aws_string *aws_string_new_from_buf(struct aws_allocator *allocator, const struct aws_byte_buf *buf);
/**
* Deallocate string.
*/
AWS_COMMON_API
void aws_string_destroy(struct aws_string *str);
/**
* Zeroes out the data bytes of string and then deallocates the memory.
* Not safe to run on a string created with AWS_STATIC_STRING_FROM_LITERAL.
*/
AWS_COMMON_API
void aws_string_destroy_secure(struct aws_string *str);
/**
* Compares lexicographical ordering of two strings. This is a binary
* byte-by-byte comparison, treating bytes as unsigned integers. It is suitable
* for either textual or binary data and is unaware of unicode or any other byte
* encoding. If both strings are identical in the bytes of the shorter string,
* then the longer string is lexicographically after the shorter.
*
* Returns a positive number if string a > string b. (i.e., string a is
* lexicographically after string b.) Returns zero if string a = string b.
* Returns negative number if string a < string b.
*/
AWS_COMMON_API
int aws_string_compare(const struct aws_string *a, const struct aws_string *b);
/**
* A convenience function for sorting lists of (const struct aws_string *) elements. This can be used as a
* comparator for aws_array_list_sort. It is just a simple wrapper around aws_string_compare.
*/
AWS_COMMON_API
int aws_array_list_comparator_string(const void *a, const void *b);
/**
* Defines a (static const struct aws_string *) with name specified in first
* argument that points to constant memory and has data bytes containing the
* string literal in the second argument.
*
* GCC allows direct initilization of structs with variable length final fields
* However, this might not be portable, so we can do this instead
* This will have to be updated whenever the aws_string structure changes
*/
#define AWS_STATIC_STRING_FROM_LITERAL(name, literal) \
static const struct { \
struct aws_allocator *const allocator; \
const size_t len; \
const uint8_t bytes[sizeof(literal)]; \
} name##_s = {NULL, sizeof(literal) - 1, literal}; \
static const struct aws_string *name = (struct aws_string *)(&name##_s) /* NOLINT(bugprone-macro-parentheses) */
/* NOLINT above is because clang-tidy complains that (name) isn't in parentheses,
* but gcc8-c++ complains that the parentheses are unnecessary */
/*
* A related macro that declares the string pointer without static, allowing it to be externed as a global constant
*/
#define AWS_STRING_FROM_LITERAL(name, literal) \
static const struct { \
struct aws_allocator *const allocator; \
const size_t len; \
const uint8_t bytes[sizeof(literal)]; \
} name##_s = {NULL, sizeof(literal) - 1, literal}; \
const struct aws_string *(name) = (struct aws_string *)(&name##_s)
/**
* Copies all bytes from string to buf.
*
* On success, returns true and updates the buf pointer/length
* accordingly. If there is insufficient space in the buf, returns
* false, leaving the buf unchanged.
*/
AWS_COMMON_API
bool aws_byte_buf_write_from_whole_string(
struct aws_byte_buf *AWS_RESTRICT buf,
const struct aws_string *AWS_RESTRICT src);
/**
* Creates an aws_byte_cursor from an existing string.
*/
AWS_COMMON_API
struct aws_byte_cursor aws_byte_cursor_from_string(const struct aws_string *src);
/**
* If the string was dynamically allocated, clones it. If the string was statically allocated (i.e. has no allocator),
* returns the original string.
*/
AWS_COMMON_API
struct aws_string *aws_string_clone_or_reuse(struct aws_allocator *allocator, const struct aws_string *str);
/** Computes the length of a c string in bytes assuming the character set is either ASCII or UTF-8. If no NULL character
* is found within max_read_len of str, AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED is raised. Otherwise, str_len
* will contain the string length minus the NULL character, and AWS_OP_SUCCESS will be returned. */
AWS_COMMON_API
int aws_secure_strlen(const char *str, size_t max_read_len, size_t *str_len);
/**
* Equivalent to str->bytes.
*/
AWS_STATIC_IMPL
const uint8_t *aws_string_bytes(const struct aws_string *str);
/**
* Equivalent to `(const char *)str->bytes`.
*/
AWS_STATIC_IMPL
const char *aws_string_c_str(const struct aws_string *str);
/**
* Evaluates the set of properties that define the shape of all valid aws_string structures.
* It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion).
*/
AWS_STATIC_IMPL
bool aws_string_is_valid(const struct aws_string *str);
/**
* Best-effort checks aws_string invariants, when the str->len is unknown
*/
AWS_STATIC_IMPL
bool aws_c_string_is_valid(const char *str);
/**
* Evaluates if a char is a white character.
*/
AWS_STATIC_IMPL
bool aws_char_is_space(uint8_t c);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/string.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_STRING_H */

View File

@@ -0,0 +1,60 @@
#ifndef AWS_COMMON_STRING_INL
#define AWS_COMMON_STRING_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/string.h>
#include <ctype.h>
AWS_EXTERN_C_BEGIN
/**
* Equivalent to str->bytes.
*/
AWS_STATIC_IMPL
const uint8_t *aws_string_bytes(const struct aws_string *str) {
AWS_PRECONDITION(aws_string_is_valid(str));
return str->bytes;
}
/**
* Equivalent to `(const char *)str->bytes`.
*/
AWS_STATIC_IMPL
const char *aws_string_c_str(const struct aws_string *str) {
AWS_PRECONDITION(aws_string_is_valid(str));
return (const char *)str->bytes;
}
/**
* Evaluates the set of properties that define the shape of all valid aws_string structures.
* It is also a cheap check, in the sense it run in constant time (i.e., no loops or recursion).
*/
AWS_STATIC_IMPL
bool aws_string_is_valid(const struct aws_string *str) {
return str && AWS_MEM_IS_READABLE(&str->bytes[0], str->len + 1) && str->bytes[str->len] == 0;
}
/**
* Best-effort checks aws_string invariants, when the str->len is unknown
*/
AWS_STATIC_IMPL
bool aws_c_string_is_valid(const char *str) {
/* Knowing the actual length to check would require strlen(), which is
* a) linear time in the length of the string
* b) could already cause a memory violation for a non-zero-terminated string.
* But we know that a c-string must have at least one character, to store the null terminator
*/
return str && AWS_MEM_IS_READABLE(str, 1);
}
/**
* Evaluates if a char is a white character.
*/
AWS_STATIC_IMPL
bool aws_char_is_space(uint8_t c) {
return aws_isspace(c);
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_STRING_INL */

View File

@@ -0,0 +1,155 @@
#ifndef AWS_COMMON_SYSTEM_INFO_H
#define AWS_COMMON_SYSTEM_INFO_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
enum aws_platform_os {
AWS_PLATFORM_OS_WINDOWS,
AWS_PLATFORM_OS_MAC,
AWS_PLATFORM_OS_UNIX,
};
struct aws_cpu_info {
int32_t cpu_id;
bool suspected_hyper_thread;
};
struct aws_system_environment;
AWS_EXTERN_C_BEGIN
/**
* Allocates and initializes information about the system the current process is executing on.
* If successful returns an instance of aws_system_environment. If it fails, it will return NULL.
*
* Note: This api is used internally and is still early in its evolution.
* It may change in incompatible ways in the future.
*/
AWS_COMMON_API
struct aws_system_environment *aws_system_environment_load(struct aws_allocator *allocator);
AWS_COMMON_API
struct aws_system_environment *aws_system_environment_acquire(struct aws_system_environment *env);
AWS_COMMON_API
void aws_system_environment_release(struct aws_system_environment *env);
/**
* Returns the virtualization vendor for the specified compute environment, e.g. "Xen, Amazon EC2, etc..."
*
* The return value may be empty and in that case no vendor was detected.
*/
AWS_COMMON_API
struct aws_byte_cursor aws_system_environment_get_virtualization_vendor(const struct aws_system_environment *env);
/**
* Returns the product name for the specified compute environment. For example, the Amazon EC2 Instance type.
*
* The return value may be empty and in that case no vendor was detected.
*/
AWS_COMMON_API
struct aws_byte_cursor aws_system_environment_get_virtualization_product_name(const struct aws_system_environment *env);
/**
* Returns the number of processors for the specified compute environment.
*/
AWS_COMMON_API
size_t aws_system_environment_get_processor_count(struct aws_system_environment *env);
/**
* Returns the number of separate cpu groupings (multi-socket configurations or NUMA).
*/
AWS_COMMON_API
size_t aws_system_environment_get_cpu_group_count(const struct aws_system_environment *env);
/* Returns the OS this was built under */
AWS_COMMON_API
enum aws_platform_os aws_get_platform_build_os(void);
/* Returns the number of online processors available for usage. */
AWS_COMMON_API
size_t aws_system_info_processor_count(void);
/**
* Returns the logical processor groupings on the system (such as multiple numa nodes).
*/
AWS_COMMON_API
uint16_t aws_get_cpu_group_count(void);
/**
* For a group, returns the number of CPUs it contains.
*/
AWS_COMMON_API
size_t aws_get_cpu_count_for_group(uint16_t group_idx);
/**
* Fills in cpu_ids_array with the cpu_id's for the group. To obtain the size to allocate for cpu_ids_array
* and the value for argument for cpu_ids_array_length, call aws_get_cpu_count_for_group().
*/
AWS_COMMON_API
void aws_get_cpu_ids_for_group(uint16_t group_idx, struct aws_cpu_info *cpu_ids_array, size_t cpu_ids_array_length);
/* Returns true if a debugger is currently attached to the process. */
AWS_COMMON_API
bool aws_is_debugger_present(void);
/* If a debugger is attached to the process, trip a breakpoint. */
AWS_COMMON_API
void aws_debug_break(void);
#if defined(AWS_HAVE_EXECINFO) || defined(_WIN32) || defined(__APPLE__)
# define AWS_BACKTRACE_STACKS_AVAILABLE
#endif
/*
* Records a stack trace from the call site.
* Returns the number of stack entries/stack depth captured, or 0 if the operation
* is not supported on this platform
*/
AWS_COMMON_API
size_t aws_backtrace(void **stack_frames, size_t num_frames);
/*
* Converts stack frame pointers to symbols, if symbols are available
* Returns an array up to stack_depth long, that needs to be free()ed.
* stack_depth should be the length of frames.
* Returns NULL if the platform does not support stack frame translation
* or an error occurs
*/
char **aws_backtrace_symbols(void *const *stack_frames, size_t stack_depth);
/*
* Converts stack frame pointers to symbols, using all available system
* tools to try to produce a human readable result. This call will not be
* quick, as it shells out to addr2line or similar tools.
* On Windows, this is the same as aws_backtrace_symbols()
* Returns an array up to stack_depth long that needs to be free()ed. Missing
* frames will be NULL.
* Returns NULL if the platform does not support stack frame translation
* or an error occurs
*/
char **aws_backtrace_addr2line(void *const *stack_frames, size_t stack_depth);
/**
* Print a backtrace from either the current stack, or (if provided) the current exception/signal
* call_site_data is siginfo_t* on POSIX, and LPEXCEPTION_POINTERS on Windows, and can be null
*/
AWS_COMMON_API
void aws_backtrace_print(FILE *fp, void *call_site_data);
/* Log the callstack from the current stack to the currently configured aws_logger */
AWS_COMMON_API
void aws_backtrace_log(int log_level);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_SYSTEM_INFO_H */

View File

@@ -0,0 +1,30 @@
#ifndef AWS_COMMON_SYSTEM_RESOURCE_UTIL_H
#define AWS_COMMON_SYSTEM_RESOURCE_UTIL_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
struct aws_memory_usage_stats {
size_t maxrss; /* max resident set size in kilobytes since program start */
size_t page_faults; /* num of page faults since program start */
size_t _reserved[8];
};
/*
* Get memory usage for current process.
* Raises AWS_ERROR_SYS_CALL_FAILURE on failure.
*/
AWS_COMMON_API int aws_init_memory_usage_for_current_process(struct aws_memory_usage_stats *memory_usage);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_SYSTEM_RESOURCE_UTIL_H */

View File

@@ -0,0 +1,132 @@
#ifndef AWS_COMMON_TASK_SCHEDULER_H
#define AWS_COMMON_TASK_SCHEDULER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <aws/common/linked_list.h>
#include <aws/common/priority_queue.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_task;
typedef enum aws_task_status {
AWS_TASK_STATUS_RUN_READY,
AWS_TASK_STATUS_CANCELED,
} aws_task_status;
/**
* A scheduled function.
*/
typedef void(aws_task_fn)(struct aws_task *task, void *arg, enum aws_task_status);
/*
* A task object.
* Once added to the scheduler, a task must remain in memory until its function is executed.
*/
struct aws_task {
aws_task_fn *fn;
void *arg;
uint64_t timestamp;
struct aws_linked_list_node node;
struct aws_priority_queue_node priority_queue_node;
const char *type_tag;
/* honor the ABI compat */
union {
bool scheduled;
size_t reserved;
} abi_extension;
};
struct aws_task_scheduler {
struct aws_allocator *alloc;
struct aws_priority_queue timed_queue; /* Tasks scheduled to run at specific times */
struct aws_linked_list timed_list; /* If timed_queue runs out of memory, further timed tests are stored here */
struct aws_linked_list asap_list; /* Tasks scheduled to run as soon as possible */
};
AWS_EXTERN_C_BEGIN
/**
* Init an aws_task
*/
AWS_COMMON_API
void aws_task_init(struct aws_task *task, aws_task_fn *fn, void *arg, const char *type_tag);
/*
* Runs or cancels a task
*/
AWS_COMMON_API
void aws_task_run(struct aws_task *task, enum aws_task_status status);
/**
* Initializes a task scheduler instance.
*/
AWS_COMMON_API
int aws_task_scheduler_init(struct aws_task_scheduler *scheduler, struct aws_allocator *alloc);
/**
* Empties and executes all queued tasks, passing the AWS_TASK_STATUS_CANCELED status to the task function.
* Cleans up any memory allocated, and prepares the instance for reuse or deletion.
*/
AWS_COMMON_API
void aws_task_scheduler_clean_up(struct aws_task_scheduler *scheduler);
AWS_COMMON_API
bool aws_task_scheduler_is_valid(const struct aws_task_scheduler *scheduler);
/**
* Returns whether the scheduler has any scheduled tasks.
* next_task_time (optional) will be set to time of the next task, note that 0 will be set if tasks were
* added via aws_task_scheduler_schedule_now() and UINT64_MAX will be set if no tasks are scheduled at all.
*/
AWS_COMMON_API
bool aws_task_scheduler_has_tasks(const struct aws_task_scheduler *scheduler, uint64_t *next_task_time);
/**
* Schedules a task to run immediately.
* The task should not be cleaned up or modified until its function is executed.
*/
AWS_COMMON_API
void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struct aws_task *task);
/**
* Schedules a task to run at time_to_run.
* The task should not be cleaned up or modified until its function is executed.
*/
AWS_COMMON_API
void aws_task_scheduler_schedule_future(
struct aws_task_scheduler *scheduler,
struct aws_task *task,
uint64_t time_to_run);
/**
* Removes task from the scheduler and invokes the task with the AWS_TASK_STATUS_CANCELED status.
*/
AWS_COMMON_API
void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct aws_task *task);
/**
* Sequentially execute all tasks scheduled to run at, or before current_time.
* AWS_TASK_STATUS_RUN_READY will be passed to the task function as the task status.
*
* If a task schedules another task, the new task will not be executed until the next call to this function.
*/
AWS_COMMON_API
void aws_task_scheduler_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time);
/**
* Convert a status value to a c-string suitable for logging
*/
AWS_COMMON_API
const char *aws_task_status_to_c_str(enum aws_task_status status);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_TASK_SCHEDULER_H */

View File

@@ -0,0 +1,270 @@
#ifndef AWS_COMMON_THREAD_H
#define AWS_COMMON_THREAD_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
#include <aws/common/string.h>
#ifndef _WIN32
# include <pthread.h>
#endif
AWS_PUSH_SANE_WARNING_LEVEL
enum aws_thread_detach_state {
AWS_THREAD_NOT_CREATED = 1,
AWS_THREAD_JOINABLE,
AWS_THREAD_JOIN_COMPLETED,
AWS_THREAD_MANAGED,
};
/**
* Specifies the join strategy used on an aws_thread, which in turn controls whether or not a thread participates
* in the managed thread system. The managed thread system provides logic to guarantee a join on all participating
* threads at the cost of laziness (the user cannot control when joins happen).
*
* Manual - thread does not participate in the managed thread system; any joins must be done by the user. This
* is the default. The user must call aws_thread_clean_up(), but only after any desired join operation has completed.
* Not doing so will cause the windows handle to leak.
*
* Managed - the managed thread system will automatically perform a join some time after the thread's run function
* has completed. It is an error to call aws_thread_join on a thread configured with the managed join strategy. The
* managed thread system will call aws_thread_clean_up() on the thread after the background join has completed.
*
* Additionally, an API exists, aws_thread_join_all_managed(), which blocks and returns when all outstanding threads
* with the managed strategy have fully joined. This API is useful for tests (rather than waiting for many individual
* signals) and program shutdown or DLL unload. This API is automatically invoked by the common library clean up
* function. If the common library clean up is called from a managed thread, this will cause deadlock.
*
* Lazy thread joining is done only when threads finish their run function or when the user calls
* aws_thread_join_all_managed(). This means it may be a long time between thread function completion and the join
* being applied, but the queue of unjoined threads is always one or fewer so there is no critical resource
* backlog.
*
* Currently, only event loop group async cleanup and host resolver threads participate in the managed thread system.
* Additionally, event loop threads will increment and decrement the pending join count (they are manually joined
* internally) in order to have an accurate view of internal thread usage and also to prevent failure to release
* an event loop group fully from allowing aws_thread_join_all_managed() from running to completion when its
* intent is such that it should block instead.
*/
enum aws_thread_join_strategy {
AWS_TJS_MANUAL = 0,
AWS_TJS_MANAGED,
};
/**
* Thread names should be 15 characters or less.
* Longer names will not display on Linux.
* This length does not include a null terminator.
*/
#define AWS_THREAD_NAME_RECOMMENDED_STRLEN 15
struct aws_thread_options {
size_t stack_size;
/* default is -1. If you set this to anything >= 0, and the platform supports it, the thread will be pinned to
* that cpu. Also, we assume you're doing this for memory throughput purposes. On unix systems,
* If libnuma.so is available, upon the thread launching, the memory policy for that thread will be set to
* allocate on the numa node that cpu-core is on.
*
* On windows, this will cause the thread affinity to be set, but currently we don't do anything to tell the OS
* how to allocate memory on a node.
*
* On Apple and Android platforms, this setting doesn't do anything at all.
*/
int32_t cpu_id;
enum aws_thread_join_strategy join_strategy;
/**
* Thread name, for debugging purpose.
* The length should not exceed AWS_THREAD_NAME_RECOMMENDED_STRLEN(15)
* if you want it to display properly on all platforms.
*/
struct aws_byte_cursor name;
};
#ifdef _WIN32
typedef union {
void *ptr;
} aws_thread_once;
# define AWS_THREAD_ONCE_STATIC_INIT {NULL}
typedef unsigned long aws_thread_id_t;
#else
typedef pthread_once_t aws_thread_once;
# define AWS_THREAD_ONCE_STATIC_INIT PTHREAD_ONCE_INIT
typedef pthread_t aws_thread_id_t;
#endif
/*
* Buffer size needed to represent aws_thread_id_t as a string (2 hex chars per byte
* plus '\0' terminator). Needed for portable printing because pthread_t is
* opaque.
*/
#define AWS_THREAD_ID_T_REPR_BUFSZ (sizeof(aws_thread_id_t) * 2 + 1)
struct aws_thread {
struct aws_allocator *allocator;
enum aws_thread_detach_state detach_state;
#ifdef _WIN32
void *thread_handle;
#endif
aws_thread_id_t thread_id;
};
AWS_EXTERN_C_BEGIN
/**
* Returns an instance of system default thread options.
*/
AWS_COMMON_API
const struct aws_thread_options *aws_default_thread_options(void);
AWS_COMMON_API void aws_thread_call_once(aws_thread_once *flag, void (*call_once)(void *), void *user_data);
/**
* Initializes a new platform specific thread object struct (not the os-level
* thread itself).
*/
AWS_COMMON_API
int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator);
/**
* Creates an OS level thread and associates it with func. context will be passed to func when it is executed.
* options will be applied to the thread if they are applicable for the platform.
*
* After launch, you may join on the thread. A successfully launched thread must have clean_up called on it in order
* to avoid a handle leak. If you do not join before calling clean_up, the thread will become detached.
*
* Managed threads must not have join or clean_up called on them by external code.
*/
AWS_COMMON_API
int aws_thread_launch(
struct aws_thread *thread,
void (*func)(void *arg),
void *arg,
const struct aws_thread_options *options);
/**
* Gets the id of thread
*/
AWS_COMMON_API
aws_thread_id_t aws_thread_get_id(struct aws_thread *thread);
/**
* Gets the detach state of the thread. For example, is it safe to call join on
* this thread? Has it been detached()?
*/
AWS_COMMON_API
enum aws_thread_detach_state aws_thread_get_detach_state(struct aws_thread *thread);
/**
* Joins the calling thread to a thread instance. Returns when thread is
* finished. Calling this from the associated OS thread will cause a deadlock.
*/
AWS_COMMON_API
int aws_thread_join(struct aws_thread *thread);
/**
* Blocking call that waits for all managed threads to complete their join call. This can only be called
* from the main thread or a non-managed thread.
*
* This gets called automatically from library cleanup.
*
* By default the wait is unbounded, but that default can be overridden via aws_thread_set_managed_join_timeout_ns()
*/
AWS_COMMON_API
int aws_thread_join_all_managed(void);
/**
* Overrides how long, in nanoseconds, that aws_thread_join_all_managed will wait for threads to complete.
* A value of zero will result in an unbounded wait.
*/
AWS_COMMON_API
void aws_thread_set_managed_join_timeout_ns(uint64_t timeout_in_ns);
/**
* Cleans up the thread handle. Don't call this on a managed thread. If you wish to join the thread, you must join
* before calling this function.
*/
AWS_COMMON_API
void aws_thread_clean_up(struct aws_thread *thread);
/**
* Returns the thread id of the calling thread.
*/
AWS_COMMON_API
aws_thread_id_t aws_thread_current_thread_id(void);
/**
* Compare thread ids.
*/
AWS_COMMON_API
bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2);
/**
* Sleeps the current thread by nanos.
*/
AWS_COMMON_API
void aws_thread_current_sleep(uint64_t nanos);
typedef void(aws_thread_atexit_fn)(void *user_data);
/**
* Adds a callback to the chain to be called when the current thread joins.
* Callbacks are called from the current thread, in the reverse order they
* were added, after the thread function returns.
* If not called from within an aws_thread, has no effect.
*/
AWS_COMMON_API
int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data);
/**
* Increments the count of unjoined threads in the managed thread system. Used by managed threads and
* event loop threads. Additional usage requires the user to join corresponding threads themselves and
* correctly increment/decrement even in the face of launch/join errors.
*
* aws_thread_join_all_managed() will not return until this count has gone to zero.
*/
AWS_COMMON_API void aws_thread_increment_unjoined_count(void);
/**
* Decrements the count of unjoined threads in the managed thread system. Used by managed threads and
* event loop threads. Additional usage requires the user to join corresponding threads themselves and
* correctly increment/decrement even in the face of launch/join errors.
*
* aws_thread_join_all_managed() will not return until this count has gone to zero.
*/
AWS_COMMON_API void aws_thread_decrement_unjoined_count(void);
/**
* Gets name of the current thread.
* Caller is responsible for destroying returned string.
* If thread does not have a name, AWS_OP_SUCCESS is returned and out_name is
* set to NULL.
* If underlying OS call fails, AWS_ERROR_SYS_CALL_FAILURE will be raised
* If OS does not support getting thread name, AWS_ERROR_PLATFORM_NOT_SUPPORTED
* will be raised
*/
AWS_COMMON_API int aws_thread_current_name(struct aws_allocator *allocator, struct aws_string **out_name);
/**
* Gets name of the thread.
* Caller is responsible for destroying returned string.
* If thread does not have a name, AWS_OP_SUCCESS is returned and out_name is
* set to NULL.
* If underlying OS call fails, AWS_ERROR_SYS_CALL_FAILURE will be raised
* If OS does not support getting thread name, AWS_ERROR_PLATFORM_NOT_SUPPORTED
* will be raised
*/
AWS_COMMON_API int aws_thread_name(
struct aws_allocator *allocator,
aws_thread_id_t thread_id,
struct aws_string **out_name);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_THREAD_H */

View File

@@ -0,0 +1,63 @@
#ifndef AWS_COMMON_THREAD_SCHEDULER_H
#define AWS_COMMON_THREAD_SCHEDULER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_thread_scheduler;
struct aws_thread_options;
struct aws_task;
AWS_EXTERN_C_BEGIN
/**
* Creates a new instance of a thread scheduler. This object receives scheduled tasks and executes them inside a
* background thread. On success, this function returns an instance with a ref-count of 1. On failure it returns NULL.
*
* thread_options are optional.
*
* The semantics of this interface conform to the semantics of aws_task_scheduler.
*/
AWS_COMMON_API
struct aws_thread_scheduler *aws_thread_scheduler_new(
struct aws_allocator *allocator,
const struct aws_thread_options *thread_options);
/**
* Acquire a reference to the scheduler.
*/
AWS_COMMON_API void aws_thread_scheduler_acquire(struct aws_thread_scheduler *scheduler);
/**
* Release a reference to the scheduler.
*/
AWS_COMMON_API void aws_thread_scheduler_release(const struct aws_thread_scheduler *scheduler);
/**
* Schedules a task to run in the future. time_to_run is the absolute time from the system hw_clock.
*/
AWS_COMMON_API void aws_thread_scheduler_schedule_future(
struct aws_thread_scheduler *scheduler,
struct aws_task *task,
uint64_t time_to_run);
/**
* Schedules a task to run as soon as possible.
*/
AWS_COMMON_API void aws_thread_scheduler_schedule_now(struct aws_thread_scheduler *scheduler, struct aws_task *task);
/**
* Cancel a task that has been scheduled. The cancellation callback will be invoked in the background thread.
* This function is slow, so please don't do it in the hot path for your code.
*/
AWS_COMMON_API void aws_thread_scheduler_cancel_task(struct aws_thread_scheduler *scheduler, struct aws_task *task);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_THREAD_SCHEDULER_H */

View File

@@ -0,0 +1,32 @@
#ifndef AWS_COMMON_TIME_H
#define AWS_COMMON_TIME_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
#include <time.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* Cross platform friendly version of timegm
*/
AWS_COMMON_API time_t aws_timegm(struct tm *const t);
/**
* Cross platform friendly version of localtime_r
*/
AWS_COMMON_API void aws_localtime(time_t time, struct tm *t);
/**
* Cross platform friendly version of gmtime_r
*/
AWS_COMMON_API void aws_gmtime(time_t time, struct tm *t);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_TIME_H */

View File

@@ -0,0 +1,187 @@
#ifndef AWS_COMMON_URI_H
#define AWS_COMMON_URI_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
AWS_PUSH_SANE_WARNING_LEVEL
/**
* Data representing a URI. uri_str is always allocated and filled in.
* The other portions are merely storing offsets into uri_str.
*/
struct aws_uri {
size_t self_size;
struct aws_allocator *allocator;
struct aws_byte_buf uri_str;
struct aws_byte_cursor scheme;
struct aws_byte_cursor authority;
struct aws_byte_cursor userinfo;
struct aws_byte_cursor user;
struct aws_byte_cursor password;
struct aws_byte_cursor host_name;
uint32_t port;
struct aws_byte_cursor path;
struct aws_byte_cursor query_string;
struct aws_byte_cursor path_and_query;
};
/**
* key/value pairs for a query string. If the query fragment was not in format key=value, the fragment value
* will be stored in key
*/
struct aws_uri_param {
struct aws_byte_cursor key;
struct aws_byte_cursor value;
};
/**
* Arguments for building a URI instance. All members must
* be initialized before passing them to aws_uri_init().
*
* query_string and query_params are exclusive to each other. If you set
* query_string, do not prepend it with '?'
*/
struct aws_uri_builder_options {
struct aws_byte_cursor scheme;
struct aws_byte_cursor path;
struct aws_byte_cursor host_name;
uint32_t port;
struct aws_array_list *query_params;
struct aws_byte_cursor query_string;
};
AWS_EXTERN_C_BEGIN
/**
* Parses 'uri_str' and initializes uri. Returns AWS_OP_SUCCESS, on success, AWS_OP_ERR on failure.
* After calling this function, the parts can be accessed.
*/
AWS_COMMON_API int aws_uri_init_parse(
struct aws_uri *uri,
struct aws_allocator *allocator,
const struct aws_byte_cursor *uri_str);
/**
* Initializes uri to values specified in options. Returns AWS_OP_SUCCESS, on success, AWS_OP_ERR on failure.
* After calling this function, the parts can be accessed.
*/
AWS_COMMON_API int aws_uri_init_from_builder_options(
struct aws_uri *uri,
struct aws_allocator *allocator,
struct aws_uri_builder_options *options);
AWS_COMMON_API void aws_uri_clean_up(struct aws_uri *uri);
/**
* Returns the scheme portion of the uri (e.g. http, https, ftp, ftps, etc...). If the scheme was not present
* in the uri, the returned value will be empty. It is the users job to determine the appropriate defaults
* if this field is empty, based on protocol, port, etc...
*/
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_scheme(const struct aws_uri *uri);
/**
* Returns the authority portion of the uri (host[:port]). If it was not present, this was a request uri. In that
* case, the value will be empty.
*/
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_authority(const struct aws_uri *uri);
/**
* Returns the path portion of the uri, including any leading '/'. If not present, this value will be empty.
*/
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_path(const struct aws_uri *uri);
/**
* Returns the query string portion of the uri, minus the '?'. If not present, this value will be empty.
*/
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_query_string(const struct aws_uri *uri);
/**
* Returns the 'host_name' portion of the authority. If no authority was present, this value will be empty.
*/
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_host_name(const struct aws_uri *uri);
/**
* Returns the port portion of the authority if it was present, otherwise, returns 0.
* If this is 0, it is the users job to determine the correct port based on scheme and protocol.
*/
AWS_COMMON_API uint32_t aws_uri_port(const struct aws_uri *uri);
/**
* Returns the path and query portion of the uri (i.e., the thing you send across the wire).
*/
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_path_and_query(const struct aws_uri *uri);
/**
* For iterating over the params in the query string.
* `param` is an in/out argument used to track progress, it MUST be zeroed out to start.
* If true is returned, `param` contains the value of the next param.
* If false is returned, there are no further params.
*
* Edge cases:
* 1) Entries without '=' sign are treated as having a key and no value.
* Example: First param in query string "a&b=c" has key="a" value=""
*
* 2) Blank entries are skipped.
* Example: The only param in query string "&&a=b" is key="a" value="b"
*/
AWS_COMMON_API bool aws_query_string_next_param(struct aws_byte_cursor query_string, struct aws_uri_param *param);
/**
* Parses query string and stores the parameters in 'out_params'. Returns AWS_OP_SUCCESS on success and
* AWS_OP_ERR on failure. The user is responsible for initializing out_params with item size of struct aws_query_param.
* The user is also responsible for cleaning up out_params when finished.
*/
AWS_COMMON_API int aws_query_string_params(struct aws_byte_cursor query_string, struct aws_array_list *out_params);
/**
* For iterating over the params in the uri query string.
* `param` is an in/out argument used to track progress, it MUST be zeroed out to start.
* If true is returned, `param` contains the value of the next param.
* If false is returned, there are no further params.
*
* Edge cases:
* 1) Entries without '=' sign are treated as having a key and no value.
* Example: First param in query string "a&b=c" has key="a" value=""
*
* 2) Blank entries are skipped.
* Example: The only param in query string "&&a=b" is key="a" value="b"
*/
AWS_COMMON_API bool aws_uri_query_string_next_param(const struct aws_uri *uri, struct aws_uri_param *param);
/**
* Parses query string and stores the parameters in 'out_params'. Returns AWS_OP_SUCCESS on success and
* AWS_OP_ERR on failure. The user is responsible for initializing out_params with item size of struct aws_query_param.
* The user is also responsible for cleaning up out_params when finished.
*/
AWS_COMMON_API int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list *out_params);
/**
* Writes the uri path encoding of a cursor to a buffer. This is the modified version of rfc3986 used by
* sigv4 signing.
*/
AWS_COMMON_API int aws_byte_buf_append_encoding_uri_path(
struct aws_byte_buf *buffer,
const struct aws_byte_cursor *cursor);
/**
* Writes the uri query param encoding (passthrough alnum + '-' '_' '~' '.') of a UTF-8 cursor to a buffer
* For example, reading "a b_c" would write "a%20b_c".
*/
AWS_COMMON_API int aws_byte_buf_append_encoding_uri_param(
struct aws_byte_buf *buffer,
const struct aws_byte_cursor *cursor);
/**
* Writes the uri decoding of a UTF-8 cursor to a buffer,
* replacing %xx escapes by their single byte equivalent.
* For example, reading "a%20b_c" would write "a b_c".
*/
AWS_COMMON_API int aws_byte_buf_append_decoding_uri(struct aws_byte_buf *buffer, const struct aws_byte_cursor *cursor);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_URI_H */

View File

@@ -0,0 +1,32 @@
#ifndef AWS_COMMON_UUID_H
#define AWS_COMMON_UUID_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/common.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_byte_cursor;
struct aws_byte_buf;
struct aws_uuid {
uint8_t uuid_data[16];
};
/* 36 bytes for the UUID plus one more for the null terminator. */
enum { AWS_UUID_STR_LEN = 37 };
AWS_EXTERN_C_BEGIN
AWS_COMMON_API int aws_uuid_init(struct aws_uuid *uuid);
AWS_COMMON_API int aws_uuid_init_from_str(struct aws_uuid *uuid, const struct aws_byte_cursor *uuid_str);
AWS_COMMON_API int aws_uuid_to_str(const struct aws_uuid *uuid, struct aws_byte_buf *output);
AWS_COMMON_API bool aws_uuid_equals(const struct aws_uuid *a, const struct aws_uuid *b);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_UUID_H */

View File

@@ -0,0 +1,96 @@
#ifndef AWS_COMMON_XML_PARSER_H
#define AWS_COMMON_XML_PARSER_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/array_list.h>
#include <aws/common/byte_buf.h>
#include <aws/common/exports.h>
AWS_PUSH_SANE_WARNING_LEVEL
struct aws_xml_node;
struct aws_xml_attribute {
struct aws_byte_cursor name;
struct aws_byte_cursor value;
};
/**
* Callback for when an xml node is encountered in the document. As a user you have a few options:
*
* 1. fail the parse by returning AWS_OP_ERR (after an error has been raised). This will stop any further parsing.
* 2. call aws_xml_node_traverse() on the node to descend into the node with a new callback and user_data.
* 3. call aws_xml_node_as_body() to retrieve the contents of the node as text.
*
* You MUST NOT call both aws_xml_node_traverse() and aws_xml_node_as_body() on the same node.
*
* return true to continue the parsing operation.
*/
typedef int(aws_xml_parser_on_node_encountered_fn)(struct aws_xml_node *node, void *user_data);
struct aws_xml_parser_options {
/* xml document to parse. */
struct aws_byte_cursor doc;
/* Max node depth used for parsing document. */
size_t max_depth;
/* Callback invoked on the root node */
aws_xml_parser_on_node_encountered_fn *on_root_encountered;
/* User data for callback */
void *user_data;
};
AWS_EXTERN_C_BEGIN
/**
* Parse an XML document.
* WARNING: This is not a public API. It is only intended for use within the aws-c libraries.
*/
AWS_COMMON_API
int aws_xml_parse(struct aws_allocator *allocator, const struct aws_xml_parser_options *options);
/**
* Writes the contents of the body of node into out_body. out_body is an output parameter in this case. Upon success,
* out_body will contain the body of the node.
*/
AWS_COMMON_API
int aws_xml_node_as_body(struct aws_xml_node *node, struct aws_byte_cursor *out_body);
/**
* Traverse node and invoke on_node_encountered when a nested node is encountered.
*/
AWS_COMMON_API
int aws_xml_node_traverse(
struct aws_xml_node *node,
aws_xml_parser_on_node_encountered_fn *on_node_encountered,
void *user_data);
/*
* Get the name of an xml node.
*/
AWS_COMMON_API
struct aws_byte_cursor aws_xml_node_get_name(const struct aws_xml_node *node);
/*
* Get the number of attributes for an xml node.
*/
AWS_COMMON_API
size_t aws_xml_node_get_num_attributes(const struct aws_xml_node *node);
/*
* Get an attribute for an xml node by its index.
*/
AWS_COMMON_API
struct aws_xml_attribute aws_xml_node_get_attribute(const struct aws_xml_node *node, size_t attribute_index);
AWS_EXTERN_C_END
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_XML_PARSER_H */

View File

@@ -0,0 +1,67 @@
#ifndef AWS_COMMON_ZERO_H
#define AWS_COMMON_ZERO_H
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/stdbool.h>
#include <aws/common/stdint.h>
#include <string.h>
AWS_PUSH_SANE_WARNING_LEVEL
AWS_EXTERN_C_BEGIN
/**
* Set each byte in the struct to zero.
*/
#define AWS_ZERO_STRUCT(object) \
do { \
memset(&(object), 0, sizeof(object)); \
} while (0)
/**
* Set each byte in the array to zero.
* Does not work with arrays of unknown bound.
*/
#define AWS_ZERO_ARRAY(array) memset((void *)(array), 0, sizeof(array))
/**
* Returns whether each byte in the object is zero.
*/
#ifdef CBMC
/* clang-format off */
# define AWS_IS_ZEROED(object) \
__CPROVER_forall { \
int i; \
(i >= 0 && i < sizeof(object)) ==> ((const uint8_t *)&object)[i] == 0 \
}
/* clang-format on */
#else
# define AWS_IS_ZEROED(object) aws_is_mem_zeroed(&(object), sizeof(object))
#endif
/**
* Returns whether each byte is zero.
*/
AWS_STATIC_IMPL
bool aws_is_mem_zeroed(const void *buf, size_t bufsize);
/**
* Securely zeroes a memory buffer. This function will attempt to ensure that
* the compiler will not optimize away this zeroing operation.
*/
AWS_COMMON_API
void aws_secure_zero(void *pBuf, size_t bufsize);
AWS_EXTERN_C_END
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/zero.inl>
#endif /* AWS_NO_STATIC_IMPL */
AWS_POP_SANE_WARNING_LEVEL
#endif /* AWS_COMMON_ZERO_H */

View File

@@ -0,0 +1,49 @@
#ifndef AWS_COMMON_ZERO_INL
#define AWS_COMMON_ZERO_INL
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/stdbool.h>
#include <aws/common/stdint.h>
#include <aws/common/zero.h>
#include <string.h>
AWS_EXTERN_C_BEGIN
/**
* Returns whether each byte is zero.
*/
AWS_STATIC_IMPL
bool aws_is_mem_zeroed(const void *buf, size_t bufsize) {
/* Optimization idea: vectorized instructions to check more than 64 bits at a time. */
/* Check 64 bits at a time */
const uint64_t *buf_u64 = (const uint64_t *)buf;
const size_t num_u64_checks = bufsize / 8;
size_t i;
for (i = 0; i < num_u64_checks; ++i) {
if (buf_u64[i]) {
return false;
}
}
/* Update buf to where u64 checks left off */
buf = buf_u64 + num_u64_checks;
bufsize = bufsize % 8;
/* Check 8 bits at a time */
const uint8_t *buf_u8 = (const uint8_t *)buf;
for (i = 0; i < bufsize; ++i) {
if (buf_u8[i]) {
return false;
}
}
return true;
}
AWS_EXTERN_C_END
#endif /* AWS_COMMON_ZERO_INL */