aboutsummaryrefslogtreecommitdiff
path: root/src/rt/sync
diff options
context:
space:
mode:
Diffstat (limited to 'src/rt/sync')
-rw-r--r--src/rt/sync/fair_ticket_lock.cpp43
-rw-r--r--src/rt/sync/fair_ticket_lock.h15
-rw-r--r--src/rt/sync/lock_free_queue.cpp37
-rw-r--r--src/rt/sync/lock_free_queue.h15
-rw-r--r--src/rt/sync/spin_lock.cpp47
-rw-r--r--src/rt/sync/spin_lock.h14
6 files changed, 171 insertions, 0 deletions
diff --git a/src/rt/sync/fair_ticket_lock.cpp b/src/rt/sync/fair_ticket_lock.cpp
new file mode 100644
index 00000000..0306ee1d
--- /dev/null
+++ b/src/rt/sync/fair_ticket_lock.cpp
@@ -0,0 +1,43 @@
+/*
+ * This works well as long as the number of contending threads
+ * is less than the number of processors. This is because of
+ * the fair locking scheme. If the thread that is next in line
+ * for acquiring the lock is not currently running, no other
+ * thread can acquire the lock. This is terrible for performance,
+ * and it seems that all fair locking schemes suffer from this
+ * behavior.
+ */
+
+// #define TRACE
+
+fair_ticket_lock::fair_ticket_lock() {
+ next_ticket = now_serving = 0;
+}
+
+fair_ticket_lock::~fair_ticket_lock() {
+
+}
+
+void fair_ticket_lock::lock() {
+ unsigned ticket = __sync_fetch_and_add(&next_ticket, 1);
+ while (now_serving != ticket) {
+ pause();
+ }
+#ifdef TRACE
+ printf("locked nextTicket: %d nowServing: %d",
+ next_ticket, now_serving);
+#endif
+}
+
+void fair_ticket_lock::unlock() {
+ now_serving++;
+#ifdef TRACE
+ printf("unlocked nextTicket: %d nowServing: %d",
+ next_ticket, now_serving);
+#endif
+}
+
+void fair_ticket_lock::pause() {
+ asm volatile("pause\n" : : : "memory");
+}
+
diff --git a/src/rt/sync/fair_ticket_lock.h b/src/rt/sync/fair_ticket_lock.h
new file mode 100644
index 00000000..c34c9041
--- /dev/null
+++ b/src/rt/sync/fair_ticket_lock.h
@@ -0,0 +1,15 @@
+#ifndef FAIR_TICKET_LOCK_H
+#define FAIR_TICKET_LOCK_H
+
+class fair_ticket_lock {
+ unsigned next_ticket;
+ unsigned now_serving;
+ void pause();
+public:
+ fair_ticket_lock();
+ virtual ~fair_ticket_lock();
+ void lock();
+ void unlock();
+};
+
+#endif /* FAIR_TICKET_LOCK_H */
diff --git a/src/rt/sync/lock_free_queue.cpp b/src/rt/sync/lock_free_queue.cpp
new file mode 100644
index 00000000..9d1081de
--- /dev/null
+++ b/src/rt/sync/lock_free_queue.cpp
@@ -0,0 +1,37 @@
+/*
+ * Interrupt transparent queue, Schoen et. al, "On Interrupt-Transparent
+ * Synchronization in an Embedded Object-Oriented Operating System", 2000.
+ * enqueue() is allowed to interrupt enqueue() and dequeue(), however,
+ * dequeue() is not allowed to interrupt itself.
+ */
+
+#include "lock_free_queue.h"
+
+lock_free_queue::lock_free_queue() :
+ tail(this) {
+}
+
+void lock_free_queue::enqueue(lock_free_queue_node *item) {
+ item->next = (lock_free_queue_node *) 0;
+ lock_free_queue_node *last = tail;
+ tail = item;
+ while (last->next)
+ last = last->next;
+ last->next = item;
+}
+
+lock_free_queue_node *lockfree_queue::dequeue() {
+ lock_free_queue_node *item = next;
+ if (item && !(next = item->next)) {
+ tail = (lock_free_queue_node *) this;
+ if (item->next) {
+ lock_free_queue_node *lost = item->next;
+ lock_free_queue_node *help;
+ do {
+ help = lost->next;
+ enqueue(lost);
+ } while ((lost = help) != (lock_free_queue_node *) 0);
+ }
+ }
+ return item;
+}
diff --git a/src/rt/sync/lock_free_queue.h b/src/rt/sync/lock_free_queue.h
new file mode 100644
index 00000000..fba4aa9a
--- /dev/null
+++ b/src/rt/sync/lock_free_queue.h
@@ -0,0 +1,15 @@
+#ifndef LOCK_FREE_QUEUE_H
+#define LOCK_FREE_QUEUE_H
+
+class lock_free_queue_node {
+ lock_free_queue_node *next;
+};
+
+class lock_free_queue {
+public:
+ lock_free_queue();
+ void enqueue(lock_free_queue_node *item);
+ lock_free_queue_node *dequeue();
+};
+
+#endif /* LOCK_FREE_QUEUE_H */
diff --git a/src/rt/sync/spin_lock.cpp b/src/rt/sync/spin_lock.cpp
new file mode 100644
index 00000000..11a5cb20
--- /dev/null
+++ b/src/rt/sync/spin_lock.cpp
@@ -0,0 +1,47 @@
+/*
+ * Your average spin lock.
+ */
+
+#include "globals.h"
+
+// #define TRACE
+
+spin_lock::spin_lock() {
+ unlock();
+}
+
+spin_lock::~spin_lock() {
+}
+
+static inline unsigned xchg32(void *ptr, unsigned x) {
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" ((unsigned) x)
+ :"m" (*(volatile unsigned *)ptr), "0" (x)
+ :"memory");
+ return x;
+}
+
+void spin_lock::lock() {
+ while (true) {
+ if (!xchg32(&ticket, 1)) {
+ return;
+ }
+ while (ticket) {
+ pause();
+ }
+ }
+#ifdef TRACE
+ printf(" lock: %d", ticket);
+#endif
+}
+
+void spin_lock::unlock() {
+ ticket = 0;
+#ifdef TRACE
+ printf("unlock:");
+#endif
+}
+
+void spin_lock::pause() {
+ asm volatile("pause\n" : : : "memory");
+}
diff --git a/src/rt/sync/spin_lock.h b/src/rt/sync/spin_lock.h
new file mode 100644
index 00000000..3684c23a
--- /dev/null
+++ b/src/rt/sync/spin_lock.h
@@ -0,0 +1,14 @@
+#ifndef UNFAIR_TICKET_LOCK_H
+#define UNFAIR_TICKET_LOCK_H
+
+class spin_lock {
+ unsigned ticket;
+ void pause();
+public:
+ spin_lock();
+ virtual ~spin_lock();
+ void lock();
+ void unlock();
+};
+
+#endif /* UNFAIR_TICKET_LOCK_H */