00001
00028 #ifndef _XENO_NUCLEUS_POD_H
00029 #define _XENO_NUCLEUS_POD_H
00030
00034 #include <nucleus/sched.h>
00035
00036
00037 #define XNFATAL 0x00000001
00038 #define XNPEXEC 0x00000002
00039
00040
00041 #define XNPOD_SPARE0 0x01000000
00042 #define XNPOD_SPARE1 0x02000000
00043 #define XNPOD_SPARE2 0x04000000
00044 #define XNPOD_SPARE3 0x08000000
00045 #define XNPOD_SPARE4 0x10000000
00046 #define XNPOD_SPARE5 0x20000000
00047 #define XNPOD_SPARE6 0x40000000
00048 #define XNPOD_SPARE7 0x80000000
00049
00050 #define XNPOD_NORMAL_EXIT 0x0
00051 #define XNPOD_FATAL_EXIT 0x1
00052
00053 #define XNPOD_ALL_CPUS XNARCH_CPU_MASK_ALL
00054
00055 #define XNPOD_FATAL_BUFSZ 16384
00056
00057 #define nkpod (&nkpod_struct)
00058
00059 struct xnsynch;
00060
00067 struct xnpod {
00068
00069 xnflags_t status;
00071 xnsched_t sched[XNARCH_NR_CPUS];
00073 xnqueue_t threadq;
00074 int threadq_rev;
00076 xnqueue_t tstartq,
00077 tswitchq,
00078 tdeleteq;
00080 atomic_counter_t timerlck;
00082 xntimer_t tslicer;
00083 int tsliced;
00085 int refcnt;
00087 #ifdef __XENO_SIM__
00088 void (*schedhook) (xnthread_t *thread, xnflags_t mask);
00089 #endif
00090 };
00091
00092 typedef struct xnpod xnpod_t;
00093
00094 DECLARE_EXTERN_XNLOCK(nklock);
00095
00096 extern u_long nklatency;
00097
00098 extern u_long nktimerlat;
00099
00100 extern char *nkmsgbuf;
00101
00102 extern xnarch_cpumask_t nkaffinity;
00103
00104 extern xnpod_t nkpod_struct;
00105
00106 #ifdef CONFIG_PROC_FS
00107 void xnpod_init_proc(void);
00108 void xnpod_cleanup_proc(void);
00109 #else
00110 static inline void xnpod_init_proc(void) {}
00111 static inline void xnpod_cleanup_proc(void) {}
00112 #endif
00113
00114 static inline void xnpod_mount(void)
00115 {
00116 xnsched_register_classes();
00117 xnpod_init_proc();
00118 }
00119
00120 static inline void xnpod_umount(void)
00121 {
00122 xnpod_cleanup_proc();
00123 }
00124
00125 #ifdef __cplusplus
00126 extern "C" {
00127 #endif
00128
00129 int __xnpod_set_thread_schedparam(struct xnthread *thread,
00130 struct xnsched_class *sched_class,
00131 const union xnsched_policy_param *sched_param,
00132 int propagate);
00133
00134 void __xnpod_reset_thread(struct xnthread *thread);
00135
00136 #ifdef CONFIG_XENO_HW_FPU
00137 void xnpod_switch_fpu(xnsched_t *sched);
00138 #endif
00139
00140 void __xnpod_schedule(struct xnsched *sched);
00141
00142
00143
00144 #define xnpod_sched_slot(cpu) \
00145 (&nkpod->sched[cpu])
00146
00147 #define xnpod_current_sched() \
00148 xnpod_sched_slot(xnarch_current_cpu())
00149
00150 #define xnpod_active_p() \
00151 testbits(nkpod->status, XNPEXEC)
00152
00153 #define xnpod_fatal_p() \
00154 testbits(nkpod->status, XNFATAL)
00155
00156 #define xnpod_interrupt_p() \
00157 testbits(xnpod_current_sched()->lflags, XNINIRQ)
00158
00159 #define xnpod_callout_p() \
00160 testbits(xnpod_current_sched()->status, XNKCOUT)
00161
00162 #define xnpod_asynch_p() \
00163 ({ \
00164 xnsched_t *sched = xnpod_current_sched(); \
00165 testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ); \
00166 })
00167
00168 #define xnpod_current_thread() \
00169 (xnpod_current_sched()->curr)
00170
00171 #define xnpod_current_root() \
00172 (&xnpod_current_sched()->rootcb)
00173
00174 #ifdef CONFIG_XENO_OPT_PERVASIVE
00175 #define xnpod_current_p(thread) \
00176 ({ int __shadow_p = xnthread_test_state(thread, XNSHADOW); \
00177 int __curr_p = __shadow_p ? xnshadow_thread(current) == thread \
00178 : thread == xnpod_current_thread(); \
00179 __curr_p;})
00180 #else
00181 #define xnpod_current_p(thread) \
00182 (xnpod_current_thread() == (thread))
00183 #endif
00184
00185 #define xnpod_locked_p() \
00186 xnthread_test_state(xnpod_current_thread(), XNLOCK)
00187
00188 #define xnpod_unblockable_p() \
00189 (xnpod_asynch_p() || xnthread_test_state(xnpod_current_thread(), XNROOT))
00190
00191 #define xnpod_root_p() \
00192 xnthread_test_state(xnpod_current_thread(),XNROOT)
00193
00194 #define xnpod_shadow_p() \
00195 xnthread_test_state(xnpod_current_thread(),XNSHADOW)
00196
00197 #define xnpod_userspace_p() \
00198 xnthread_test_state(xnpod_current_thread(),XNROOT|XNSHADOW)
00199
00200 #define xnpod_primary_p() \
00201 (!(xnpod_asynch_p() || xnpod_root_p()))
00202
00203 #define xnpod_secondary_p() xnpod_root_p()
00204
00205 #define xnpod_idle_p() xnpod_root_p()
00206
00207 int xnpod_init(void);
00208
00209 int xnpod_enable_timesource(void);
00210
00211 void xnpod_disable_timesource(void);
00212
00213 void xnpod_shutdown(int xtype);
00214
00215 int xnpod_init_thread(struct xnthread *thread,
00216 const struct xnthread_init_attr *attr,
00217 struct xnsched_class *sched_class,
00218 const union xnsched_policy_param *sched_param);
00219
00220 int xnpod_start_thread(xnthread_t *thread,
00221 const struct xnthread_start_attr *attr);
00222
00223 void xnpod_stop_thread(xnthread_t *thread);
00224
00225 void xnpod_restart_thread(xnthread_t *thread);
00226
00227 void xnpod_delete_thread(xnthread_t *thread);
00228
00229 void xnpod_abort_thread(xnthread_t *thread);
00230
00231 xnflags_t xnpod_set_thread_mode(xnthread_t *thread,
00232 xnflags_t clrmask,
00233 xnflags_t setmask);
00234
00235 void xnpod_suspend_thread(xnthread_t *thread,
00236 xnflags_t mask,
00237 xnticks_t timeout,
00238 xntmode_t timeout_mode,
00239 struct xnsynch *wchan);
00240
00241 void xnpod_resume_thread(xnthread_t *thread,
00242 xnflags_t mask);
00243
00244 int xnpod_unblock_thread(xnthread_t *thread);
00245
00246 int xnpod_set_thread_schedparam(struct xnthread *thread,
00247 struct xnsched_class *sched_class,
00248 const union xnsched_policy_param *sched_param);
00249
00250 int xnpod_migrate_thread(int cpu);
00251
00252 void xnpod_dispatch_signals(void);
00253
00254 static inline void xnpod_schedule(void)
00255 {
00256 struct xnsched *sched;
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276 sched = xnpod_current_sched();
00277
00278
00279
00280
00281
00282 #if XENO_DEBUG(NUCLEUS)
00283 if (testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ|XNSWLOCK))
00284 return;
00285 #else
00286 if (testbits(sched->status | sched->lflags,
00287 XNKCOUT|XNINIRQ|XNSWLOCK|XNRESCHED) != XNRESCHED)
00288 return;
00289 #endif
00290
00291 __xnpod_schedule(sched);
00292 }
00293
00294 void xnpod_lock_sched(void);
00295
00296 void xnpod_unlock_sched(void);
00297
00298 void xnpod_fire_callouts(xnqueue_t *hookq,
00299 xnthread_t *thread);
00300
00301 static inline void xnpod_run_hooks(struct xnqueue *q,
00302 struct xnthread *thread, const char *type)
00303 {
00304 if (!emptyq_p(q)) {
00305 trace_mark(xn_nucleus, thread_callout,
00306 "thread %p thread_name %s hook %s",
00307 thread, xnthread_name(thread), type);
00308 xnpod_fire_callouts(q, thread);
00309 }
00310 }
00311
00312 int xnpod_set_thread_periodic(xnthread_t *thread,
00313 xnticks_t idate,
00314 xnticks_t period);
00315
00316 int xnpod_wait_thread_period(unsigned long *overruns_r);
00317
00318 int xnpod_set_thread_tslice(struct xnthread *thread,
00319 xnticks_t quantum);
00320
00321 static inline xntime_t xnpod_get_cpu_time(void)
00322 {
00323 return xnarch_get_cpu_time();
00324 }
00325
00326 int xnpod_add_hook(int type, void (*routine) (xnthread_t *));
00327
00328 int xnpod_remove_hook(int type, void (*routine) (xnthread_t *));
00329
00330 static inline void xnpod_yield(void)
00331 {
00332 xnpod_resume_thread(xnpod_current_thread(), 0);
00333 xnpod_schedule();
00334 }
00335
00336 static inline void xnpod_delay(xnticks_t timeout)
00337 {
00338 xnpod_suspend_thread(xnpod_current_thread(), XNDELAY, timeout, XN_RELATIVE, NULL);
00339 }
00340
00341 static inline void xnpod_suspend_self(void)
00342 {
00343 xnpod_suspend_thread(xnpod_current_thread(), XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
00344 }
00345
00346 static inline void xnpod_delete_self(void)
00347 {
00348 xnpod_delete_thread(xnpod_current_thread());
00349 }
00350
00351 #ifdef __cplusplus
00352 }
00353 #endif
00354
00357 #endif