00001
00023 #ifndef _XENO_NUCLEUS_SCHED_RT_H
00024 #define _XENO_NUCLEUS_SCHED_RT_H
00025
00026 #ifndef _XENO_NUCLEUS_SCHED_H
00027 #error "please don't include nucleus/sched-rt.h directly"
00028 #endif
00029
00030
00031 #define XNSCHED_RT_MIN_PRIO 0
00032 #define XNSCHED_RT_MAX_PRIO 257
00033 #define XNSCHED_RT_NR_PRIO (XNSCHED_RT_MAX_PRIO - XNSCHED_RT_MIN_PRIO + 1)
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045 #define XNSCHED_LOW_PRIO 0
00046 #define XNSCHED_HIGH_PRIO 99
00047 #define XNSCHED_IRQ_PRIO XNSCHED_RT_MAX_PRIO
00048
00049 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00050
00051 #if defined(CONFIG_XENO_OPT_SCALABLE_SCHED) && \
00052 XNSCHED_RT_NR_PRIO > XNSCHED_MLQ_LEVELS
00053 #error "RT class cannot use multi-level queue (too many priority levels)"
00054 #endif
00055
00056 extern struct xnsched_class xnsched_class_rt;
00057
00058 extern struct xnsched_class xnsched_class_idle;
00059
00060 #define xnsched_class_default xnsched_class_rt
00061
00062 static inline void __xnsched_rt_requeue(struct xnthread *thread)
00063 {
00064 sched_insertpql(&thread->sched->rt.runnable,
00065 &thread->rlink, thread->cprio);
00066 }
00067
00068 static inline void __xnsched_rt_enqueue(struct xnthread *thread)
00069 {
00070 sched_insertpqf(&thread->sched->rt.runnable,
00071 &thread->rlink, thread->cprio);
00072 }
00073
00074 static inline void __xnsched_rt_dequeue(struct xnthread *thread)
00075 {
00076 sched_removepq(&thread->sched->rt.runnable, &thread->rlink);
00077 }
00078
00079 static inline struct xnthread *__xnsched_rt_pick(struct xnsched *sched)
00080 {
00081 struct xnpholder *h = sched_getpq(&sched->rt.runnable);
00082 return h ? link2thread(h, rlink) : NULL;
00083 }
00084
00085 static inline void __xnsched_rt_setparam(struct xnthread *thread,
00086 const union xnsched_policy_param *p)
00087 {
00088 thread->cprio = p->rt.prio;
00089 }
00090
00091 static inline void __xnsched_rt_getparam(struct xnthread *thread,
00092 union xnsched_policy_param *p)
00093 {
00094 p->rt.prio = thread->cprio;
00095 }
00096
00097 static inline void __xnsched_rt_trackprio(struct xnthread *thread,
00098 const union xnsched_policy_param *p)
00099 {
00100 if (p)
00101 __xnsched_rt_setparam(thread, p);
00102 else
00103 thread->cprio = thread->bprio;
00104 }
00105
00106 static inline void __xnsched_rt_forget(struct xnthread *thread)
00107 {
00108 }
00109
00110 static inline int xnsched_rt_init_tcb(struct xnthread *thread)
00111 {
00112 return 0;
00113 }
00114
00115 void xnsched_rt_tick(struct xnthread *curr);
00116
00117 #ifdef CONFIG_XENO_OPT_PRIOCPL
00118
00119 static inline struct xnthread *__xnsched_rt_push_rpi(struct xnsched *sched,
00120 struct xnthread *thread)
00121 {
00122 sched_insertpqf(&sched->rt.relaxed, &thread->xlink, thread->cprio);
00123 return link2thread(sched_getheadpq(&sched->rt.relaxed), xlink);
00124 }
00125
00126 static inline void __xnsched_rt_pop_rpi(struct xnthread *thread)
00127 {
00128 struct xnsched *sched = thread->rpi;
00129 sched_removepq(&sched->rt.relaxed, &thread->xlink);
00130 }
00131
00132 static inline struct xnthread *__xnsched_rt_peek_rpi(struct xnsched *sched)
00133 {
00134 struct xnpholder *h = sched_getheadpq(&sched->rt.relaxed);
00135 return h ? link2thread(h, xlink) : NULL;
00136 }
00137
00138 static inline void __xnsched_rt_suspend_rpi(struct xnthread *thread)
00139 {
00140 }
00141
00142 static inline void __xnsched_rt_resume_rpi(struct xnthread *thread)
00143 {
00144 }
00145
00146 #endif
00147
00148 #endif
00149
00150 #endif