Introducing the Big Tinc Lock.
authorGuus Sliepen <guus@tinc-vpn.org>
Fri, 14 Jan 2011 21:18:59 +0000 (22:18 +0100)
committerGuus Sliepen <guus@tinc-vpn.org>
Fri, 14 Jan 2011 21:18:59 +0000 (22:18 +0100)
Now that tinc is multi-threaded, we need some form of synchronisation.  For
now, there is a single mutex which we lock whenever we do something with global
data structures. The main thread, from which timer events are handled, is
always locked unless it is sleeping for the next event, the other threads
receive incoming I/O unlocked, and lock to process the received data.

In the future more fine-grained locking should be used, probably in the form of
rwlocks, or lockless algorithms (maybe RCU).

src/linux/device.c
src/meta.c
src/net.c
src/net.h
src/net_packet.c
src/net_socket.c
src/threads.h
src/tincd.c

index c1ca0a21dcde9915de6f0b9f2ea5d22148f524bd..40711cccca5b8f4d1b730ee29821fca9eb2e750e 100644 (file)
@@ -61,7 +61,9 @@ static void read_thread(void *arg) {
 
        while(true) {
                if(read_packet(&packet)) {
+                       mutex_lock(&mutex);
                        route(myself, &packet);
+                       mutex_unlock(&mutex);
                } else {
                        if(errno == EAGAIN || errno == EINTR) {
                                errno = 0;
index 43e1e19e7f8b990b1eeb822020cd170ef11a5e8c..b556e1750fa585b759d38084891de07aae4ba444 100644 (file)
@@ -77,7 +77,9 @@ static bool process_meta(connection_t *c, char *reqbuf, int *len) {
                        if(c->tcplen > *len)
                                break;
 
+                       mutex_lock(&mutex);
                        receive_tcppacket(c, reqbuf, c->tcplen);
+                       mutex_unlock(&mutex);
 
                        memmove(reqbuf, reqbuf, *len - c->tcplen);
                        *len -= c->tcplen;
@@ -88,7 +90,11 @@ static bool process_meta(connection_t *c, char *reqbuf, int *len) {
                        else
                                *end++ = 0;
 
-                       if(!receive_request(c, reqbuf))
+                       mutex_lock(&mutex);
+                       bool success = receive_request(c, reqbuf);
+                       mutex_unlock(&mutex);
+
+                       if(!success)
                                return false;
 
                        memmove(reqbuf, end, *len - (end - reqbuf));
index 39b467df7bccf11ed79a36e1998cf485b90881a2..2ffe03a7b39a7985fdb01a7ab1108880a62c0184 100644 (file)
--- a/src/net.c
+++ b/src/net.c
@@ -220,14 +220,18 @@ void handle_meta_connection_data(void *data) {
 
                getsockopt(c->socket, SOL_SOCKET, SO_ERROR, &result, &len);
 
-               if(!result)
+               if(!result) {
+                       mutex_lock(&mutex);
                        finish_connecting(c);
-               else {
+                       mutex_unlock(&mutex);
+               } else {
                        ifdebug(CONNECTIONS) logger(LOG_DEBUG,
                                           "Error while connecting to %s (%s): %s",
                                           c->name, c->hostname, sockstrerror(result));
                        closesocket(c->socket);
+                       mutex_lock(&mutex);
                        do_outgoing_connection(c);
+                       mutex_unlock(&mutex);
                        return;
                }
        }
@@ -235,7 +239,7 @@ void handle_meta_connection_data(void *data) {
        while(true) {
                if (!receive_meta(c)) {
                        terminate_connection(c, c->status.active);
-                       return;
+                       break;
                }
        }
 }
@@ -368,7 +372,10 @@ int main_loop(void) {
 #endif
 
        while(true) {
-               usleep(1000);
+               mutex_unlock(&mutex);
+               usleep(1000000);
+               mutex_lock(&mutex);
+
                struct event *event;
                while((event = get_expired_event())) {
                        event->handler(event->data);
index d4523fca5f658eb52baba80d4d5023221c0e81ea..2be797d947c55a2ea3c2eca3df1eff8905a917de 100644 (file)
--- a/src/net.h
+++ b/src/net.h
@@ -155,8 +155,8 @@ extern void load_all_subnets();
 
 #ifndef HAVE_MINGW
 #define closesocket(s) close(s)
-#else
-extern CRITICAL_SECTION mutex;
 #endif
 
+extern mutex_t mutex;
+
 #endif                                                 /* __TINC_NET_H__ */
index 2856c616bb99b9cdb3cdbfe7ab363dec1e416795..a985390ca905ddbe70e737e94c97510b0208c1e1 100644 (file)
@@ -599,23 +599,26 @@ void handle_incoming_vpn_data(void *arg) {
 
                sockaddrunmap(&from);           /* Some braindead IPv6 implementations do stupid things. */
 
+               mutex_lock(&mutex);
                n = lookup_node_udp(&from);
 
                if(!n) {
                        n = try_harder(&from, &pkt);
                        if(n)
                                update_node_udp(n, &from);
-                       else ifdebug(PROTOCOL) {
+               }
+
+               if(n) {
+                       receive_udppacket(n, &pkt);
+               } else {
+                       ifdebug(PROTOCOL) {
                                hostname = sockaddr2hostname(&from);
                                logger(LOG_WARNING, "Received UDP packet from unknown source %s", hostname);
                                free(hostname);
-                               continue;
                        }
-                       else
-                               continue;
                }
 
-               receive_udppacket(n, &pkt);
+               mutex_unlock(&mutex);
        }
 }
 
index 28b606798672d0ff31c16b15a905aea442db8859..3c79ceeabacd401210c4ba3a21c004cb251c3874 100644 (file)
@@ -498,6 +498,7 @@ void handle_new_meta_connection(void *data) {
 
                configure_tcp(c);
 
+               mutex_lock(&mutex);
                connection_add(c);
 
                c->allow_request = ID;
@@ -507,6 +508,7 @@ void handle_new_meta_connection(void *data) {
                        logger(LOG_ERR, "create_thread() failed: %s", strerror(errno));
                        abort();
                }
+               mutex_unlock(&mutex);
        }
 }
 
index d55e59ed9eb3f7c15051b470b284fb36dca6bd12..85aaced7ecc843f5cdf0b2c998660ab22a979139 100644 (file)
@@ -38,6 +38,10 @@ static inline void thread_destroy(thread_t *tid) {
 static inline void mutex_create(mutex_t *mutex) {
        pthread_mutex_init(mutex, NULL);
 }
+#if 1
+#define mutex_lock(m) logger(LOG_DEBUG, "mutex_lock() at " __FILE__ " line %d", __LINE__); pthread_mutex_lock(m)
+#define mutex_unlock(m) logger(LOG_DEBUG, "mutex_unlock() at " __FILE__ " line %d", __LINE__); pthread_mutex_unlock(m)
+#else
 static inline void mutex_lock(mutex_t *mutex) {
        pthread_mutex_lock(mutex);
 }
@@ -45,5 +49,6 @@ static inline void mutex_unlock(mutex_t *mutex) {
        pthread_mutex_unlock(mutex);
 }
 #endif
+#endif
 
 #endif
index aab250ae707ba9589031273e8a0d2f735ae706da..951867643ea37fdcbcd79de305d4a5f493c944f4 100644 (file)
@@ -110,6 +110,8 @@ static struct option const long_options[] = {
        {NULL, 0, NULL, 0}
 };
 
+mutex_t mutex;
+
 #ifdef HAVE_MINGW
 static struct WSAData wsa_state;
 CRITICAL_SECTION mutex;
@@ -383,6 +385,8 @@ int main(int argc, char **argv) {
 
        g_argv = argv;
 
+       mutex_create(&mutex);
+       mutex_lock(&mutex);
        init_events();
        init_configuration(&config_tree);