00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019 #ifndef _POSIX_MUTEX_H
00020 #define _POSIX_MUTEX_H
00021
00022 #include <asm/xenomai/atomic.h>
00023 #include <pthread.h>
00024
00025 struct pse51_mutex;
00026
00027 union __xeno_mutex {
00028 pthread_mutex_t native_mutex;
00029 struct __shadow_mutex {
00030 unsigned magic;
00031 unsigned lockcnt;
00032 struct pse51_mutex *mutex;
00033 xnarch_atomic_t lock;
00034 #ifdef CONFIG_XENO_FASTSYNCH
00035 union {
00036 unsigned owner_offset;
00037 xnarch_atomic_t *owner;
00038 };
00039 struct pse51_mutexattr attr;
00040 #endif
00041 } shadow_mutex;
00042 };
00043
00044 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00045
00046 #include <posix/internal.h>
00047 #include <posix/thread.h>
00048 #include <posix/cb_lock.h>
00049
00050 typedef struct pse51_mutex {
00051 unsigned magic;
00052 xnsynch_t synchbase;
00053 xnholder_t link;
00054
00055 #define link2mutex(laddr) \
00056 ((pse51_mutex_t *)(((char *)laddr) - offsetof(pse51_mutex_t, link)))
00057
00058 pthread_mutexattr_t attr;
00059 pse51_kqueues_t *owningq;
00060 } pse51_mutex_t;
00061
00062 extern pthread_mutexattr_t pse51_default_mutex_attr;
00063
00064 void pse51_mutexq_cleanup(pse51_kqueues_t *q);
00065
00066 void pse51_mutex_pkg_init(void);
00067
00068 void pse51_mutex_pkg_cleanup(void);
00069
00070
00071 int pse51_mutex_timedlock_break(struct __shadow_mutex *shadow,
00072 int timed, xnticks_t to);
00073
00074 int pse51_mutex_check_init(struct __shadow_mutex *shadow,
00075 const pthread_mutexattr_t *attr);
00076
00077 int pse51_mutex_init_internal(struct __shadow_mutex *shadow,
00078 pse51_mutex_t *mutex,
00079 xnarch_atomic_t *ownerp,
00080 const pthread_mutexattr_t *attr);
00081
00082 void pse51_mutex_destroy_internal(pse51_mutex_t *mutex,
00083 pse51_kqueues_t *q);
00084
00085
00086 static inline int pse51_mutex_timedlock_internal(xnthread_t *cur,
00087 struct __shadow_mutex *shadow,
00088 unsigned count,
00089 int timed,
00090 xnticks_t abs_to)
00091
00092 {
00093 pse51_mutex_t *mutex = shadow->mutex;
00094
00095 if (xnpod_unblockable_p())
00096 return -EPERM;
00097
00098 if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct __shadow_mutex)
00099 || !pse51_obj_active(mutex, PSE51_MUTEX_MAGIC, struct pse51_mutex))
00100 return -EINVAL;
00101
00102 #if XENO_DEBUG(POSIX)
00103 if (mutex->owningq != pse51_kqueues(mutex->attr.pshared))
00104 return -EPERM;
00105 #endif
00106
00107 if (xnsynch_owner_check(&mutex->synchbase, cur) == 0)
00108 return -EBUSY;
00109
00110 if (timed)
00111 xnsynch_acquire(&mutex->synchbase, abs_to, XN_REALTIME);
00112 else
00113 xnsynch_acquire(&mutex->synchbase, XN_INFINITE, XN_RELATIVE);
00114
00115 if (unlikely(xnthread_test_info(cur, XNBREAK | XNRMID | XNTIMEO))) {
00116 if (xnthread_test_info(cur, XNBREAK))
00117 return -EINTR;
00118 else if (xnthread_test_info(cur, XNTIMEO))
00119 return -ETIMEDOUT;
00120 else
00121 return -EINVAL;
00122 }
00123
00124 shadow->lockcnt = count;
00125
00126 return 0;
00127 }
00128
00129 #endif
00130
00131 #endif