00001
00026 #ifndef _RTDM_DRIVER_H
00027 #define _RTDM_DRIVER_H
00028
00029 #ifndef __KERNEL__
00030 #error This header is for kernel space usage only. \
00031 You are likely looking for rtdm/rtdm.h...
00032 #endif
00033
00034 #include <asm/atomic.h>
00035 #include <linux/list.h>
00036
00037 #include <nucleus/xenomai.h>
00038 #include <nucleus/heap.h>
00039 #include <nucleus/pod.h>
00040 #include <nucleus/synch.h>
00041 #include <nucleus/select.h>
00042 #include <rtdm/rtdm.h>
00043
00044
00045 #include <nucleus/assert.h>
00046 #ifdef CONFIG_PCI
00047 #include <asm-generic/xenomai/pci_ids.h>
00048 #endif
00049
00050 #ifndef CONFIG_XENO_OPT_DEBUG_RTDM
00051 #define CONFIG_XENO_OPT_DEBUG_RTDM 0
00052 #endif
00053
00054 struct rtdm_dev_context;
00055 typedef struct xnselector rtdm_selector_t;
00056 enum rtdm_selecttype;
00057
00070 #define RTDM_EXCLUSIVE 0x0001
00071
00073 #define RTDM_NAMED_DEVICE 0x0010
00074
00077 #define RTDM_PROTOCOL_DEVICE 0x0020
00078
00080 #define RTDM_DEVICE_TYPE_MASK 0x00F0
00081
00090 #define RTDM_CREATED_IN_NRT 0
00091
00093 #define RTDM_CLOSING 1
00094
00096 #define RTDM_USER_CONTEXT_FLAG 8
00097
00106 #define RTDM_DEVICE_STRUCT_VER 5
00107
00109 #define RTDM_CONTEXT_STRUCT_VER 3
00110
00112 #define RTDM_SECURE_DEVICE 0x80000000
00113
00115 #define RTDM_DRIVER_VER(major, minor, patch) \
00116 (((major & 0xFF) << 16) | ((minor & 0xFF) << 8) | (patch & 0xFF))
00117
00119 #define RTDM_DRIVER_MAJOR_VER(ver) (((ver) >> 16) & 0xFF)
00120
00122 #define RTDM_DRIVER_MINOR_VER(ver) (((ver) >> 8) & 0xFF)
00123
00125 #define RTDM_DRIVER_PATCH_VER(ver) ((ver) & 0xFF)
00126
00138 enum rtdm_selecttype {
00140 RTDM_SELECTTYPE_READ = XNSELECT_READ,
00141
00143 RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
00144
00146 RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
00147 };
00171 typedef int (*rtdm_open_handler_t)(struct rtdm_dev_context *context,
00172 rtdm_user_info_t *user_info, int oflag);
00173
00188 typedef int (*rtdm_socket_handler_t)(struct rtdm_dev_context *context,
00189 rtdm_user_info_t *user_info, int protocol);
00190
00211 typedef int (*rtdm_close_handler_t)(struct rtdm_dev_context *context,
00212 rtdm_user_info_t *user_info);
00213
00229 typedef int (*rtdm_ioctl_handler_t)(struct rtdm_dev_context *context,
00230 rtdm_user_info_t *user_info,
00231 unsigned int request, void __user *arg);
00232
00246 typedef int (*rtdm_select_bind_handler_t)(struct rtdm_dev_context *context,
00247 rtdm_selector_t *selector,
00248 enum rtdm_selecttype type,
00249 unsigned fd_index);
00250
00266 typedef ssize_t (*rtdm_read_handler_t)(struct rtdm_dev_context *context,
00267 rtdm_user_info_t *user_info,
00268 void *buf, size_t nbyte);
00269
00285 typedef ssize_t (*rtdm_write_handler_t)(struct rtdm_dev_context *context,
00286 rtdm_user_info_t *user_info,
00287 const void *buf, size_t nbyte);
00288
00305 typedef ssize_t (*rtdm_recvmsg_handler_t)(struct rtdm_dev_context *context,
00306 rtdm_user_info_t *user_info,
00307 struct msghdr *msg, int flags);
00308
00325 typedef ssize_t (*rtdm_sendmsg_handler_t)(struct rtdm_dev_context *context,
00326 rtdm_user_info_t *user_info,
00327 const struct msghdr *msg, int flags);
00330 typedef int (*rtdm_rt_handler_t)(struct rtdm_dev_context *context,
00331 rtdm_user_info_t *user_info, void *arg);
00335 struct rtdm_operations {
00340 rtdm_close_handler_t close_rt;
00342 rtdm_close_handler_t close_nrt;
00343
00345 rtdm_ioctl_handler_t ioctl_rt;
00347 rtdm_ioctl_handler_t ioctl_nrt;
00348
00350 rtdm_select_bind_handler_t select_bind;
00356 rtdm_read_handler_t read_rt;
00358 rtdm_read_handler_t read_nrt;
00359
00361 rtdm_write_handler_t write_rt;
00363 rtdm_write_handler_t write_nrt;
00369 rtdm_recvmsg_handler_t recvmsg_rt;
00371 rtdm_recvmsg_handler_t recvmsg_nrt;
00372
00374 rtdm_sendmsg_handler_t sendmsg_rt;
00376 rtdm_sendmsg_handler_t sendmsg_nrt;
00378 };
00379
00380 struct rtdm_devctx_reserved {
00381 void *owner;
00382 struct list_head cleanup;
00383 };
00384
00396 struct rtdm_dev_context {
00398 unsigned long context_flags;
00399
00401 int fd;
00402
00405 atomic_t close_lock_count;
00406
00408 struct rtdm_operations *ops;
00409
00411 struct rtdm_device *device;
00412
00414 struct rtdm_devctx_reserved reserved;
00415
00417 char dev_private[0];
00418 };
00419
00428 static inline void *
00429 rtdm_context_to_private(struct rtdm_dev_context *context)
00430 {
00431 return (void *)context->dev_private;
00432 }
00433
00442 static inline struct rtdm_dev_context *
00443 rtdm_private_to_context(void *dev_private)
00444 {
00445 return container_of(dev_private, struct rtdm_dev_context, dev_private);
00446 }
00447
00448 struct rtdm_dev_reserved {
00449 struct list_head entry;
00450 atomic_t refcount;
00451 struct rtdm_dev_context *exclusive_context;
00452 };
00453
00461 struct rtdm_device {
00464 int struct_version;
00465
00467 int device_flags;
00469 size_t context_size;
00470
00472 char device_name[RTDM_MAX_DEVNAME_LEN + 1];
00473
00475 int protocol_family;
00477 int socket_type;
00478
00483 rtdm_open_handler_t open_rt;
00486 rtdm_open_handler_t open_nrt;
00487
00493 rtdm_socket_handler_t socket_rt;
00496 rtdm_socket_handler_t socket_nrt;
00497
00499 struct rtdm_operations ops;
00500
00502 int device_class;
00505 int device_sub_class;
00507 int profile_version;
00509 const char *driver_name;
00511 int driver_version;
00514 const char *peripheral_name;
00516 const char *provider_name;
00517
00519 const char *proc_name;
00521 struct proc_dir_entry *proc_entry;
00522
00524 int device_id;
00526 void *device_data;
00527
00529 struct rtdm_dev_reserved reserved;
00530 };
00533
00534
00535 int rtdm_dev_register(struct rtdm_device *device);
00536 int rtdm_dev_unregister(struct rtdm_device *device, unsigned int poll_delay);
00537
00538
00539
00540 #define rtdm_open rt_dev_open
00541 #define rtdm_socket rt_dev_socket
00542 #define rtdm_close rt_dev_close
00543 #define rtdm_ioctl rt_dev_ioctl
00544 #define rtdm_read rt_dev_read
00545 #define rtdm_write rt_dev_write
00546 #define rtdm_recvmsg rt_dev_recvmsg
00547 #define rtdm_recv rt_dev_recv
00548 #define rtdm_recvfrom rt_dev_recvfrom
00549 #define rtdm_sendmsg rt_dev_sendmsg
00550 #define rtdm_send rt_dev_send
00551 #define rtdm_sendto rt_dev_sendto
00552 #define rtdm_bind rt_dev_bind
00553 #define rtdm_listen rt_dev_listen
00554 #define rtdm_accept rt_dev_accept
00555 #define rtdm_getsockopt rt_dev_getsockopt
00556 #define rtdm_setsockopt rt_dev_setsockopt
00557 #define rtdm_getsockname rt_dev_getsockname
00558 #define rtdm_getpeername rt_dev_getpeername
00559 #define rtdm_shutdown rt_dev_shutdown
00560
00561 struct rtdm_dev_context *rtdm_context_get(int fd);
00562
00563 #ifndef DOXYGEN_CPP
00564
00565 #define CONTEXT_IS_LOCKED(context) \
00566 (atomic_read(&(context)->close_lock_count) > 1 || \
00567 (test_bit(RTDM_CLOSING, &(context)->context_flags) && \
00568 atomic_read(&(context)->close_lock_count) > 0))
00569
00570 static inline void rtdm_context_lock(struct rtdm_dev_context *context)
00571 {
00572 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00573 );
00574 atomic_inc(&context->close_lock_count);
00575 }
00576
00577 extern int rtdm_apc;
00578
00579 static inline void rtdm_context_unlock(struct rtdm_dev_context *context)
00580 {
00581 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00582 );
00583 smp_mb__before_atomic_dec();
00584 if (unlikely(atomic_dec_and_test(&context->close_lock_count)))
00585 rthal_apc_schedule(rtdm_apc);
00586 }
00587
00588 static inline void rtdm_context_put(struct rtdm_dev_context *context)
00589 {
00590 rtdm_context_unlock(context);
00591 }
00592
00593
00594 struct xntbase;
00595 extern struct xntbase *rtdm_tbase;
00596
00597 static inline nanosecs_abs_t rtdm_clock_read(void)
00598 {
00599 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_time(rtdm_tbase));
00600 }
00601
00602 static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
00603 {
00604 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_jiffies(rtdm_tbase));
00605 }
00606 #endif
00607
00613 int rtdm_select_bind(int fd, rtdm_selector_t *selector,
00614 enum rtdm_selecttype type, unsigned fd_index);
00615
00616
00654 #ifdef DOXYGEN_CPP
00655 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00656 { \
00657 <ENTER_ATOMIC_SECTION> \
00658 code_block; \
00659 <LEAVE_ATOMIC_SECTION> \
00660 }
00661 #else
00662 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00663 { \
00664 spl_t __rtdm_s; \
00665 \
00666 xnlock_get_irqsave(&nklock, __rtdm_s); \
00667 code_block; \
00668 xnlock_put_irqrestore(&nklock, __rtdm_s); \
00669 }
00670 #endif
00671
00681 #define RTDM_LOCK_UNLOCKED RTHAL_SPIN_LOCK_UNLOCKED
00682
00684 typedef rthal_spinlock_t rtdm_lock_t;
00685
00687 typedef unsigned long rtdm_lockctx_t;
00688
00704 #define rtdm_lock_init(lock) rthal_spin_lock_init(lock)
00705
00722 #ifdef DOXYGEN_CPP
00723 #define rtdm_lock_get(lock) rthal_spin_lock(lock)
00724 #else
00725 #define rtdm_lock_get(lock) \
00726 do { \
00727 XENO_BUGON(RTDM, !rthal_local_irq_disabled()); \
00728 rthal_spin_lock(lock); \
00729 } while (0)
00730 #endif
00731
00748 #define rtdm_lock_put(lock) rthal_spin_unlock(lock)
00749
00767 #define rtdm_lock_get_irqsave(lock, context) \
00768 rthal_spin_lock_irqsave(lock, context)
00769
00787 #define rtdm_lock_put_irqrestore(lock, context) \
00788 rthal_spin_unlock_irqrestore(lock, context)
00789
00806 #define rtdm_lock_irqsave(context) \
00807 rthal_local_irq_save(context)
00808
00825 #define rtdm_lock_irqrestore(context) \
00826 rthal_local_irq_restore(context)
00827
00831
00837 typedef xnintr_t rtdm_irq_t;
00838
00845 #define RTDM_IRQTYPE_SHARED XN_ISR_SHARED
00846
00848 #define RTDM_IRQTYPE_EDGE XN_ISR_EDGE
00849
00858 typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
00859
00866 #define RTDM_IRQ_NONE XN_ISR_NONE
00867
00868 #define RTDM_IRQ_HANDLED XN_ISR_HANDLED
00869
00888 #define rtdm_irq_get_arg(irq_handle, type) ((type *)irq_handle->cookie)
00889
00891 int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
00892 rtdm_irq_handler_t handler, unsigned long flags,
00893 const char *device_name, void *arg);
00894
00895 #ifndef DOXYGEN_CPP
00896 static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
00897 {
00898 return xnintr_detach(irq_handle);
00899 }
00900
00901 static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
00902 {
00903 return xnintr_enable(irq_handle);
00904 }
00905
00906 static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
00907 {
00908 return xnintr_disable(irq_handle);
00909 }
00910 #endif
00911
00912
00913
00919 typedef unsigned rtdm_nrtsig_t;
00920
00931 typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t nrt_sig, void *arg);
00934 #ifndef DOXYGEN_CPP
00935 static inline int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
00936 rtdm_nrtsig_handler_t handler, void *arg)
00937 {
00938 *nrt_sig = rthal_alloc_virq();
00939
00940 if (*nrt_sig == 0)
00941 return -EAGAIN;
00942
00943 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, handler, arg, NULL,
00944 IPIPE_HANDLE_MASK);
00945 return 0;
00946 }
00947
00948 static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
00949 {
00950 rthal_free_virq(*nrt_sig);
00951 }
00952
00953 static inline void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig)
00954 {
00955 rthal_trigger_irq(*nrt_sig);
00956 }
00957 #endif
00958
00959
00960
00966 typedef xntimer_t rtdm_timer_t;
00967
00973 typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
00974
00980 enum rtdm_timer_mode {
00982 RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
00983
00985 RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
00986
00988 RTDM_TIMERMODE_REALTIME = XN_REALTIME
00989 };
00994 #ifndef DOXYGEN_CPP
00995 #define rtdm_timer_init(timer, handler, name) \
00996 ({ \
00997 xntimer_init((timer), rtdm_tbase, handler); \
00998 xntimer_set_name((timer), (name)); \
00999 0; \
01000 })
01001 #endif
01002
01003 void rtdm_timer_destroy(rtdm_timer_t *timer);
01004
01005 int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
01006 nanosecs_rel_t interval, enum rtdm_timer_mode mode);
01007
01008 void rtdm_timer_stop(rtdm_timer_t *timer);
01009
01010 #ifndef DOXYGEN_CPP
01011 static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
01012 nanosecs_abs_t expiry,
01013 nanosecs_rel_t interval,
01014 enum rtdm_timer_mode mode)
01015 {
01016 return xntimer_start(timer, xntbase_ns2ticks_ceil(rtdm_tbase, expiry),
01017 xntbase_ns2ticks_ceil(rtdm_tbase, interval),
01018 (xntmode_t)mode);
01019 }
01020
01021 static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
01022 {
01023 xntimer_stop(timer);
01024 }
01025 #endif
01026
01027
01033 typedef xnthread_t rtdm_task_t;
01034
01040 typedef void (*rtdm_task_proc_t)(void *arg);
01041
01046 #define RTDM_TASK_LOWEST_PRIORITY XNSCHED_LOW_PRIO
01047 #define RTDM_TASK_HIGHEST_PRIORITY XNSCHED_HIGH_PRIO
01048
01054 #define RTDM_TASK_RAISE_PRIORITY (+1)
01055 #define RTDM_TASK_LOWER_PRIORITY (-1)
01056
01060 int rtdm_task_init(rtdm_task_t *task, const char *name,
01061 rtdm_task_proc_t task_proc, void *arg,
01062 int priority, nanosecs_rel_t period);
01063 int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
01064 void rtdm_task_busy_sleep(nanosecs_rel_t delay);
01065
01066 #ifndef DOXYGEN_CPP
01067 static inline void rtdm_task_destroy(rtdm_task_t *task)
01068 {
01069 xnpod_delete_thread(task);
01070 }
01071
01072 void rtdm_task_join_nrt(rtdm_task_t *task, unsigned int poll_delay);
01073
01074 static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
01075 {
01076 union xnsched_policy_param param = { .rt = { .prio = priority } };
01077 xnpod_set_thread_schedparam(task, &xnsched_class_rt, ¶m);
01078 xnpod_schedule();
01079 }
01080
01081 static inline int rtdm_task_set_period(rtdm_task_t *task,
01082 nanosecs_rel_t period)
01083 {
01084 if (period < 0)
01085 period = 0;
01086 return xnpod_set_thread_periodic(task, XN_INFINITE,
01087 xntbase_ns2ticks_ceil
01088 (xnthread_time_base(task), period));
01089 }
01090
01091 static inline int rtdm_task_unblock(rtdm_task_t *task)
01092 {
01093 int res = xnpod_unblock_thread(task);
01094
01095 xnpod_schedule();
01096 return res;
01097 }
01098
01099 static inline rtdm_task_t *rtdm_task_current(void)
01100 {
01101 return xnpod_current_thread();
01102 }
01103
01104 static inline int rtdm_task_wait_period(void)
01105 {
01106 XENO_ASSERT(RTDM, !xnpod_unblockable_p(), return -EPERM;);
01107 return xnpod_wait_thread_period(NULL);
01108 }
01109
01110 static inline int rtdm_task_sleep(nanosecs_rel_t delay)
01111 {
01112 return __rtdm_task_sleep(delay, XN_RELATIVE);
01113 }
01114
01115 static inline int
01116 rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
01117 {
01118
01119 if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
01120 return -EINVAL;
01121 return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
01122 }
01123
01124
01125 static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
01126 {
01127 return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
01128 }
01129 #endif
01130
01131
01132
01133 typedef nanosecs_abs_t rtdm_toseq_t;
01134
01135 void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
01136
01137
01138
01139 typedef struct {
01140 xnsynch_t synch_base;
01141 DECLARE_XNSELECT(select_block);
01142 } rtdm_event_t;
01143
01144 #define RTDM_EVENT_PENDING XNSYNCH_SPARE1
01145
01146 void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
01147 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01148 int rtdm_event_select_bind(rtdm_event_t *event, rtdm_selector_t *selector,
01149 enum rtdm_selecttype type, unsigned fd_index);
01150 #else
01151 #define rtdm_event_select_bind(e, s, t, i) ({ (void)(e); -EBADF; })
01152 #endif
01153 int rtdm_event_wait(rtdm_event_t *event);
01154 int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
01155 rtdm_toseq_t *timeout_seq);
01156 void rtdm_event_signal(rtdm_event_t *event);
01157
01158 void rtdm_event_clear(rtdm_event_t *event);
01159
01160 #ifndef DOXYGEN_CPP
01161 void __rtdm_synch_flush(xnsynch_t *synch, unsigned long reason);
01162
01163 static inline void rtdm_event_pulse(rtdm_event_t *event)
01164 {
01165 trace_mark(xn_rtdm, event_pulse, "event %p", event);
01166 __rtdm_synch_flush(&event->synch_base, 0);
01167 }
01168
01169 static inline void rtdm_event_destroy(rtdm_event_t *event)
01170 {
01171 trace_mark(xn_rtdm, event_destroy, "event %p", event);
01172 __rtdm_synch_flush(&event->synch_base, XNRMID);
01173 xnselect_destroy(&event->select_block);
01174 }
01175 #endif
01176
01177
01178
01179 typedef struct {
01180 unsigned long value;
01181 xnsynch_t synch_base;
01182 DECLARE_XNSELECT(select_block);
01183 } rtdm_sem_t;
01184
01185 void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
01186 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01187 int rtdm_sem_select_bind(rtdm_sem_t *sem, rtdm_selector_t *selector,
01188 enum rtdm_selecttype type, unsigned fd_index);
01189 #else
01190 #define rtdm_sem_select_bind(s, se, t, i) ({ (void)(s); -EBADF; })
01191 #endif
01192 int rtdm_sem_down(rtdm_sem_t *sem);
01193 int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
01194 rtdm_toseq_t *timeout_seq);
01195 void rtdm_sem_up(rtdm_sem_t *sem);
01196
01197 #ifndef DOXYGEN_CPP
01198 static inline void rtdm_sem_destroy(rtdm_sem_t *sem)
01199 {
01200 trace_mark(xn_rtdm, sem_destroy, "sem %p", sem);
01201 __rtdm_synch_flush(&sem->synch_base, XNRMID);
01202 xnselect_destroy(&sem->select_block);
01203 }
01204 #endif
01205
01206
01207
01208 typedef struct {
01209 xnsynch_t synch_base;
01210 } rtdm_mutex_t;
01211
01212 void rtdm_mutex_init(rtdm_mutex_t *mutex);
01213 int rtdm_mutex_lock(rtdm_mutex_t *mutex);
01214 int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
01215 rtdm_toseq_t *timeout_seq);
01216
01217 #ifndef DOXYGEN_CPP
01218 static inline void rtdm_mutex_unlock(rtdm_mutex_t *mutex)
01219 {
01220 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return;);
01221
01222 trace_mark(xn_rtdm, mutex_unlock, "mutex %p", mutex);
01223
01224 if (unlikely(xnsynch_release(&mutex->synch_base) != NULL))
01225 xnpod_schedule();
01226 }
01227
01228 static inline void rtdm_mutex_destroy(rtdm_mutex_t *mutex)
01229 {
01230 trace_mark(xn_rtdm, mutex_destroy, "mutex %p", mutex);
01231
01232 __rtdm_synch_flush(&mutex->synch_base, XNRMID);
01233 }
01234 #endif
01235
01236
01237
01238 #define rtdm_printk(format, ...) printk(format, ##__VA_ARGS__)
01239
01240 #ifndef DOXYGEN_CPP
01241 static inline void *rtdm_malloc(size_t size)
01242 {
01243 return xnmalloc(size);
01244 }
01245
01246 static inline void rtdm_free(void *ptr)
01247 {
01248 xnfree(ptr);
01249 }
01250
01251 #ifdef CONFIG_XENO_OPT_PERVASIVE
01252 int rtdm_mmap_to_user(rtdm_user_info_t *user_info,
01253 void *src_addr, size_t len,
01254 int prot, void **pptr,
01255 struct vm_operations_struct *vm_ops,
01256 void *vm_private_data);
01257 int rtdm_iomap_to_user(rtdm_user_info_t *user_info,
01258 phys_addr_t src_addr, size_t len,
01259 int prot, void **pptr,
01260 struct vm_operations_struct *vm_ops,
01261 void *vm_private_data);
01262 int rtdm_munmap(rtdm_user_info_t *user_info, void *ptr, size_t len);
01263
01264 static inline int rtdm_read_user_ok(rtdm_user_info_t *user_info,
01265 const void __user *ptr, size_t size)
01266 {
01267 return access_rok(ptr, size);
01268 }
01269
01270 static inline int rtdm_rw_user_ok(rtdm_user_info_t *user_info,
01271 const void __user *ptr, size_t size)
01272 {
01273 return access_wok(ptr, size);
01274 }
01275
01276 static inline int rtdm_copy_from_user(rtdm_user_info_t *user_info,
01277 void *dst, const void __user *src,
01278 size_t size)
01279 {
01280 return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
01281 }
01282
01283 static inline int rtdm_safe_copy_from_user(rtdm_user_info_t *user_info,
01284 void *dst, const void __user *src,
01285 size_t size)
01286 {
01287 return (!access_rok(src, size) ||
01288 __xn_copy_from_user(dst, src, size)) ? -EFAULT : 0;
01289 }
01290
01291 static inline int rtdm_copy_to_user(rtdm_user_info_t *user_info,
01292 void __user *dst, const void *src,
01293 size_t size)
01294 {
01295 return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
01296 }
01297
01298 static inline int rtdm_safe_copy_to_user(rtdm_user_info_t *user_info,
01299 void __user *dst, const void *src,
01300 size_t size)
01301 {
01302 return (!access_wok(dst, size) ||
01303 __xn_copy_to_user(dst, src, size)) ? -EFAULT : 0;
01304 }
01305
01306 static inline int rtdm_strncpy_from_user(rtdm_user_info_t *user_info,
01307 char *dst,
01308 const char __user *src, size_t count)
01309 {
01310 if (unlikely(!access_rok(src, 1)))
01311 return -EFAULT;
01312 return __xn_strncpy_from_user(dst, src, count);
01313 }
01314
01315 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01316 {
01317 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01318
01319 return (user_info ? xnshadow_thread(user_info) != NULL
01320 : !xnpod_root_p());
01321 }
01322
01323 #else
01324
01325 #define rtdm_mmap_to_user(...) ({ -ENOSYS; })
01326 #define rtdm_munmap(...) ({ -ENOSYS; })
01327 #define rtdm_read_user_ok(...) ({ 0; })
01328 #define rtdm_rw_user_ok(...) ({ 0; })
01329 #define rtdm_copy_from_user(...) ({ -ENOSYS; })
01330 #define rtdm_safe_copy_from_user(...) ({ -ENOSYS; })
01331 #define rtdm_copy_to_user(...) ({ -ENOSYS; })
01332 #define rtdm_safe_copy_to_user(...) ({ -ENOSYS; })
01333 #define rtdm_strncpy_from_user(...) ({ -ENOSYS; })
01334
01335 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01336 {
01337 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01338
01339 return !xnpod_root_p();
01340 }
01341
01342 #endif
01343
01344 static inline int rtdm_in_rt_context(void)
01345 {
01346 return (rthal_current_domain != rthal_root_domain);
01347 }
01348
01349 #endif
01350
01351 int rtdm_exec_in_rt(struct rtdm_dev_context *context,
01352 rtdm_user_info_t *user_info, void *arg,
01353 rtdm_rt_handler_t handler);
01354
01355 #endif