Commit 3b6633ba authored by Marek Vavrusa's avatar Marek Vavrusa

Atomics compatibility wrapper, should work for most recent compilers.

Fallback for x86/amd64 architectures using full barrier.

Change-Id: I80e3beaa721653f760f2d0a6d26a80dd422ab618
parent f37df93b
......@@ -22,6 +22,7 @@ samples/Makefile.am
src/Makefile.am
src/common/acl.c
src/common/acl.h
src/common/atomic.h
src/common/base32hex.c
src/common/base32hex.h
src/common/base64.c
......
......@@ -15,6 +15,10 @@ Optional packages:
Dependencies for building documentation:
* texinfo
Knot DNS requires compiler to support atomic intrinsics.
GCC version at least 4.1 supports legacy atomic builtins, however 4.7 or newer is preferred.
Clang supports atomics since version 2.9.
Installation
============
......
......@@ -205,6 +205,7 @@ libknots_la_SOURCES = \
common/libtap/tap.c \
common/libtap/tap.h \
common/libtap/tap_unit.h \
common/atomic.h \
common/mempattern.h \
common/mempattern.c \
common/descriptor.h \
......
/* Copyright (C) 2011 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*!
* \file atomic.h
*
* \author Marek Vavrusa <marek.vavrusa@nic.cz>
*
* \brief Compatibility layer for atomic operations.
*
* Supports both __atomic and __sync legacy code.
* Based on the code bits from queue by Rusty Russel <rusty@rustcorp.com.au>.
*
* \addtogroup common_lib
* @{
*/
#ifndef _KNOTD_ATOMIC_H_
#define _KNOTD_ATOMIC_H_
#include <stdbool.h>
#if defined(__ATOMIC_SEQ_CST) /* GCC4.7+ supports C11 atomics. */
static inline unsigned int read_once(unsigned int *ptr, int memmodel)
{
return __atomic_load_n(ptr, memmodel);
}
static inline void *read_ptr(void **ptr, int memmodel)
{
return __atomic_load_n(ptr, memmodel);
}
static inline void store_once(unsigned int *ptr, unsigned int val, int memmodel)
{
__atomic_store_n(ptr, val, memmodel);
}
static inline void store_ptr(void **ptr, void *val, int memmodel)
{
__atomic_store_n(ptr, val, memmodel);
}
static inline void atomic_inc(unsigned int *val, int memmodel)
{
__atomic_add_fetch(val, 1, memmodel);
}
static inline void atomic_dec(unsigned int *val, int memmodel)
{
__atomic_sub_fetch(val, 1, memmodel);
}
static inline bool compare_and_swap(unsigned int *ptr,
unsigned int old, unsigned int nval, int memmodel)
{
return __atomic_compare_exchange_n(ptr, &old, nval, false,
memmodel, memmodel);
}
#else /* Legacy __sync interface */
#if defined(__i386__) || defined(__i686__) || defined(__amd64__)
static inline void mb(void) /* mfence compatible */
{
asm volatile ("mfence" : : : "memory");
}
#else /* last resort for other architectures */
static inline void mb(void)
{
__sync_synchronize();
}
#endif
/* Define as full barrier. */
#undef __ATOMIC_SEQ_CST
#undef __ATOMIC_RELAXED
#undef __ATOMIC_ACQUIRE
#undef __ATOMIC_RELEASE
#define __ATOMIC_SEQ_CST 1
#define __ATOMIC_RELAXED 1
#define __ATOMIC_ACQUIRE 1
#define __ATOMIC_RELEASE 0
static inline unsigned int read_once(unsigned int *ptr, int memmodel)
{
return __sync_fetch_and_add(ptr, 0);
}
static inline void *read_ptr(void **ptr, int memmodel)
{
return __sync_fetch_and_add(ptr, 0);
}
static inline void store_once(unsigned int *ptr, unsigned int val, int memmodel)
{
*(volatile unsigned int *)ptr = val;
if (memmodel)
mb();
}
static inline void store_ptr(void **ptr, void *val, int memmodel)
{
*(void * volatile *)ptr = val;
if (memmodel)
mb();
}
static inline void atomic_inc(unsigned int *val, int memmodel)
{
__sync_fetch_and_add(val, 1);
}
static inline void atomic_dec(unsigned int *val, int memmodel)
{
__sync_fetch_and_sub(val, 1);
}
static inline bool compare_and_swap(unsigned int *ptr,
unsigned int old, unsigned int nval, int memmodel)
{
return __sync_bool_compare_and_swap(ptr, old, nval);
}
#endif
#endif /* _KNOTD_ATOMIC_H_ */
/*! @} */
......@@ -21,134 +21,13 @@
* THE SOFTWARE.
*/
#include "queue.h"
#include "atomic.h"
#include "config.h"
#include <stdbool.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#define HAVE_GCC_ATOMICS 1
#if HAVE_GCC_ATOMICS
/* Even these will go away with stdatomic.h */
static unsigned int read_once(unsigned int *ptr, int memmodel)
{
return __atomic_load_n(ptr, memmodel);
}
static void *read_ptr(void **ptr, int memmodel)
{
return __atomic_load_n(ptr, memmodel);
}
static void store_once(unsigned int *ptr, unsigned int val, int memmodel)
{
__atomic_store_n(ptr, val, memmodel);
}
static void store_ptr(void **ptr, void *val, int memmodel)
{
__atomic_store_n(ptr, val, memmodel);
}
static void atomic_inc(unsigned int *val, int memmodel)
{
__atomic_add_fetch(val, 1, memmodel);
}
static void atomic_dec(unsigned int *val, int memmodel)
{
__atomic_sub_fetch(val, 1, memmodel);
}
static bool compare_and_swap(unsigned int *ptr,
unsigned int old, unsigned int new, int memmodel)
{
return __atomic_compare_exchange_n(ptr, &old, new, false,
memmodel, memmodel);
}
#else
#undef __ATOMIC_SEQ_CST
#undef __ATOMIC_RELAXED
#undef __ATOMIC_ACQUIRE
#undef __ATOMIC_RELEASE
/* Overkill, but all-or-nothing keeps it simple. */
#define __ATOMIC_SEQ_CST 1
#define __ATOMIC_RELAXED 1
#define __ATOMIC_ACQUIRE 1
#define __ATOMIC_RELEASE 0
#ifdef __i386__
static inline void mb(void)
{
asm volatile ("mfence" : : : "memory");
}
#else
#error implement mb
#endif
static unsigned int read_once(unsigned int *ptr, bool barrier)
{
if (barrier)
mb();
return *(volatile unsigned int *)ptr;
}
static void *read_ptr(void **ptr, bool barrier)
{
if (barrier)
mb();
return *(void * volatile *)ptr;
}
static void store_once(unsigned int *ptr, unsigned int val, bool barrier)
{
*(volatile unsigned int *)ptr = val;
if (barrier)
mb();
}
static void store_ptr(void **ptr, void *val, bool barrier)
{
*(void * volatile *)ptr = val;
if (barrier)
mb();
}
static void atomic_inc(unsigned int *val, bool barrier)
{
#ifdef __i386__
asm volatile ("lock addl $1, (%0)" : : "r"(val) : "memory");
#else
#error implement atomic_inc
#endif
}
static void atomic_dec(unsigned int *val, bool barrier)
{
#ifdef __i386__
asm volatile ("lock subl $1, (%0)" : : "r"(val) : "memory");
#else
#error implement atomic_dec
#endif
}
static bool compare_and_swap(unsigned int *ptr,
unsigned int old, unsigned int new,
int memmodel)
{
#ifdef __i386__
unsigned int prev;
asm volatile ("lock cmpxchgl %1, (%2)"
: "=a"(prev) : "r"(new), "r"(ptr), "a"(old) : "memory");
return prev == old;
#else
#error implement compare_and_swap
#endif
}
#endif /* ! GCC 4.7 or above */
static void wait_for_change(unsigned int *ptr, unsigned int val)
{
while (read_once(ptr, __ATOMIC_RELAXED) == val);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment