00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #ifndef _XENO_NUCLEUS_HEAP_H
00023 #define _XENO_NUCLEUS_HEAP_H
00024
00025 #include <nucleus/queue.h>
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00048
00049 #define XNHEAP_PAGE_SIZE 512
00050 #define XNHEAP_PAGE_MASK (~(XNHEAP_PAGE_SIZE-1))
00051 #define XNHEAP_PAGE_ALIGN(addr) (((addr)+XNHEAP_PAGE_SIZE-1)&XNHEAP_PAGE_MASK)
00052
00053 #define XNHEAP_MINLOG2 3
00054 #define XNHEAP_MAXLOG2 22
00055 #define XNHEAP_MINALLOCSZ (1 << XNHEAP_MINLOG2)
00056 #define XNHEAP_MINALIGNSZ (1 << 4)
00057 #define XNHEAP_NBUCKETS (XNHEAP_MAXLOG2 - XNHEAP_MINLOG2 + 2)
00058 #define XNHEAP_MAXEXTSZ (1 << 31)
00059
00060 #define XNHEAP_PFREE 0
00061 #define XNHEAP_PCONT 1
00062 #define XNHEAP_PLIST 2
00063
00064 #define XNHEAP_GFP_NONCACHED (1 << __GFP_BITS_SHIFT)
00065
00066 struct xnpagemap {
00067 unsigned int type : 8;
00068 unsigned int bcount : 24;
00069 };
00070
00071 typedef struct xnextent {
00072
00073 xnholder_t link;
00074
00075 #define link2extent(ln) container_of(ln, xnextent_t, link)
00076
00077 caddr_t membase,
00078 memlim,
00079 freelist;
00080
00081 struct xnpagemap pagemap[1];
00082
00083 } xnextent_t;
00084
00085 typedef struct xnheap {
00086
00087 xnholder_t link;
00088
00089 #define link2heap(ln) container_of(ln, xnheap_t, link)
00090
00091 u_long extentsize,
00092 pagesize,
00093 pageshift,
00094 hdrsize,
00095 npages,
00096 ubytes,
00097 maxcont;
00098
00099 xnqueue_t extents;
00100
00101 DECLARE_XNLOCK(lock);
00102
00103 struct xnbucket {
00104 caddr_t freelist;
00105 int fcount;
00106 } buckets[XNHEAP_NBUCKETS];
00107
00108 xnholder_t *idleq[XNARCH_NR_CPUS];
00109
00110 xnarch_heapcb_t archdep;
00111
00112 XNARCH_DECL_DISPLAY_CONTEXT();
00113
00114 xnholder_t stat_link;
00115
00116 char label[XNOBJECT_NAME_LEN+16];
00117
00118 } xnheap_t;
00119
00120 extern xnheap_t kheap;
00121
00122 #if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
00123 extern xnheap_t kstacks;
00124 #endif
00125
00126 #define xnheap_extentsize(heap) ((heap)->extentsize)
00127 #define xnheap_page_size(heap) ((heap)->pagesize)
00128 #define xnheap_page_count(heap) ((heap)->npages)
00129 #define xnheap_usable_mem(heap) ((heap)->maxcont * countq(&(heap)->extents))
00130 #define xnheap_used_mem(heap) ((heap)->ubytes)
00131 #define xnheap_max_contiguous(heap) ((heap)->maxcont)
00132
00133 static inline size_t xnheap_align(size_t size, size_t al)
00134 {
00135
00136 return ((size+al-1)&(~(al-1)));
00137 }
00138
00139 static inline size_t xnheap_external_overhead(size_t hsize, size_t psize)
00140 {
00141 size_t pages = (hsize + psize - 1) / psize;
00142 return xnheap_align(sizeof(xnextent_t)
00143 + pages * sizeof(struct xnpagemap), psize);
00144 }
00145
00146 static inline size_t xnheap_internal_overhead(size_t hsize, size_t psize)
00147 {
00148
00149
00150
00151
00152
00153 return xnheap_align((sizeof(xnextent_t) * psize
00154 + sizeof(struct xnpagemap) * hsize)
00155 / (psize + sizeof(struct xnpagemap)), psize);
00156 }
00157
00158 #define xnmalloc(size) xnheap_alloc(&kheap,size)
00159 #define xnfree(ptr) xnheap_free(&kheap,ptr)
00160 #define xnfreesync() xnheap_finalize_free(&kheap)
00161 #define xnfreesafe(thread, ptr, ln) \
00162 do { \
00163 if (xnpod_current_p(thread)) \
00164 xnheap_schedule_free(&kheap, ptr, ln); \
00165 else \
00166 xnheap_free(&kheap,ptr); \
00167 } while(0)
00168
00169 static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
00170 {
00171
00172
00173
00174
00175
00176
00177
00178
00179 if (hsize < 2 * psize)
00180 hsize = 2 * psize;
00181 hsize += xnheap_external_overhead(hsize, psize);
00182 return xnheap_align(hsize, psize);
00183 }
00184
00185 #ifdef __cplusplus
00186 extern "C" {
00187 #endif
00188
00189
00190
00191 #ifdef __KERNEL__
00192
00193 int xnheap_mount(void);
00194
00195 void xnheap_umount(void);
00196
00197 void xnheap_init_proc(void);
00198
00199 void xnheap_cleanup_proc(void);
00200
00201 int xnheap_init_mapped(xnheap_t *heap,
00202 u_long heapsize,
00203 int memflags);
00204
00205 void xnheap_destroy_mapped(xnheap_t *heap,
00206 void (*release)(struct xnheap *heap),
00207 void __user *mapaddr);
00208
00209 #define xnheap_base_memory(heap) \
00210 ((caddr_t)(heap)->archdep.heapbase)
00211
00212 #define xnheap_mapped_offset(heap,ptr) \
00213 (((caddr_t)(ptr)) - xnheap_base_memory(heap))
00214
00215 #define xnheap_mapped_address(heap,off) \
00216 (xnheap_base_memory(heap) + (off))
00217
00218 #define xnheap_mapped_p(heap) \
00219 (xnheap_base_memory(heap) != NULL)
00220
00221 #endif
00222
00223
00224
00225 int xnheap_init(xnheap_t *heap,
00226 void *heapaddr,
00227 u_long heapsize,
00228 u_long pagesize);
00229
00230 void xnheap_set_label(xnheap_t *heap, const char *name, ...);
00231
00232 void xnheap_destroy(xnheap_t *heap,
00233 void (*flushfn)(xnheap_t *heap,
00234 void *extaddr,
00235 u_long extsize,
00236 void *cookie),
00237 void *cookie);
00238
00239 int xnheap_extend(xnheap_t *heap,
00240 void *extaddr,
00241 u_long extsize);
00242
00243 void *xnheap_alloc(xnheap_t *heap,
00244 u_long size);
00245
00246 int xnheap_test_and_free(xnheap_t *heap,
00247 void *block,
00248 int (*ckfn)(void *block));
00249
00250 int xnheap_free(xnheap_t *heap,
00251 void *block);
00252
00253 void xnheap_schedule_free(xnheap_t *heap,
00254 void *block,
00255 xnholder_t *link);
00256
00257 void xnheap_finalize_free_inner(xnheap_t *heap,
00258 int cpu);
00259
00260 static inline void xnheap_finalize_free(xnheap_t *heap)
00261 {
00262 int cpu = xnarch_current_cpu();
00263
00264 XENO_ASSERT(NUCLEUS,
00265 spltest() != 0,
00266 xnpod_fatal("%s called in unsafe context", __FUNCTION__));
00267
00268 if (heap->idleq[cpu])
00269 xnheap_finalize_free_inner(heap, cpu);
00270 }
00271
00272 int xnheap_check_block(xnheap_t *heap,
00273 void *block);
00274
00275 #ifdef __cplusplus
00276 }
00277 #endif
00278
00279 #endif
00280
00281 #define XNHEAP_DEV_NAME "/dev/rtheap"
00282 #define XNHEAP_DEV_MINOR 254
00283
00284 #ifdef CONFIG_MMU
00285
00286 #define xnheap_area_decl();
00287 #define xnheap_area_set(p, val)
00288 #else
00289 #define xnheap_area_decl() unsigned long area
00290 #define xnheap_area_set(p, val) (p)->area = (unsigned long)(val)
00291 #endif
00292
00293 struct xnheap_desc {
00294 unsigned long handle;
00295 unsigned int size;
00296 xnheap_area_decl();
00297 };
00298
00299 #endif